ben81828 commited on
Commit
0d47ca0
·
verified ·
1 Parent(s): e0f7be5

Training in progress, step 2300, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9efa5a507b1e2d13741d6dd8d920f108bb4ba17026ae4bb85a703fca7207dad8
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:928aa21b0c49c49462d0aedc8af44db37ae597ff96ee656e5763e930bfac1a19
3
  size 29034840
last-checkpoint/global_step2299/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8b88893fe1583c5ce33e461bf4ce59a1626ce53ff44ba50c21267848f362c07
3
+ size 43429616
last-checkpoint/global_step2299/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21b29af43f75c1390f228bf54b7e627ffe91ec6cf44730afbc0ecaeafec7cc06
3
+ size 43429616
last-checkpoint/global_step2299/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bdc5d2fa6d94db687b3268482eb07108d2c5fe4aace0924ed3981fc45fe3ce7
3
+ size 43429616
last-checkpoint/global_step2299/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9eee0fa02aab662f6dd71836e00a139cf13412379a8590bd664cfe160c8e8a1d
3
+ size 43429616
last-checkpoint/global_step2299/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3db9a66f062c06d4ae22ce00381d04aaf88febb31a1deb876ef8f5f7e037b0b
3
+ size 637299
last-checkpoint/global_step2299/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d4d0f45bd74f8f11098175a2c697bf0d440274790a3e45d2430d91452939d2d
3
+ size 637171
last-checkpoint/global_step2299/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e17a1e9ce61b5f6d7436055412d41d8bb09874b16c91328ac0303c620f7e7754
3
+ size 637171
last-checkpoint/global_step2299/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a51f66021906f63ad256a9e41a3333820be76fc6433e1d490fb47829b67200bf
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step2249
 
1
+ global_step2299
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1d25cbcbbaa0866ea9c7365cb49b84e805db119693e615f5a1898a6ebfe997e8
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f12bf3da75454e5aae4644f2a1d46fdf90f68e680dbf5bdaa86861f825d32d80
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a883389afac12125c2c6bf62631b7de0220fdb0020d24cd0c6e8f8858dd3b362
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2faef1b8798e7516fd96ee7b3363866a8f97ca2d0ec5a8dd27bbfe70b0c6a733
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:577d49de6d60035e159d9ebb1e6eabef79a55787b14ecea93a6a93c242661779
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f35f6d27fbd414dd4285d91816b37fb6b97ff10fbba4c074d56ad2a7f723033
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b5f2dd1c21e06806a9ce39eeab45734dfb8a62b829f91a86d1f65f13102d6242
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea5c64dfc7e3b2729065483dfef8e4bb0af0d9bae32df888d258ee3c2859d676
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e9cbcebe45b9faaca29ead6f9f59deeeb362fed9bbabf34da80537e86d967e85
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ba68b7498f6ef6c27be2e178ae1a286d6e6aacbd9a34a6f944fd623ea7cf386
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.22014015913009644,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-2250",
4
- "epoch": 1.158640226628895,
5
  "eval_steps": 50,
6
- "global_step": 2250,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -4012,11 +4012,100 @@
4012
  "eval_steps_per_second": 0.738,
4013
  "num_input_tokens_seen": 26310336,
4014
  "step": 2250
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4015
  }
4016
  ],
4017
  "logging_steps": 5,
4018
  "max_steps": 3882,
4019
- "num_input_tokens_seen": 26310336,
4020
  "num_train_epochs": 2,
4021
  "save_steps": 50,
4022
  "stateful_callbacks": {
@@ -4031,7 +4120,7 @@
4031
  "attributes": {}
4032
  }
4033
  },
4034
- "total_flos": 1736069012258816.0,
4035
  "train_batch_size": 1,
4036
  "trial_name": null,
4037
  "trial_params": null
 
1
  {
2
  "best_metric": 0.22014015913009644,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-2250",
4
+ "epoch": 1.184393510172547,
5
  "eval_steps": 50,
6
+ "global_step": 2300,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
4012
  "eval_steps_per_second": 0.738,
4013
  "num_input_tokens_seen": 26310336,
4014
  "step": 2250
4015
+ },
4016
+ {
4017
+ "epoch": 1.1612155549832603,
4018
+ "grad_norm": 8.283787630987762,
4019
+ "learning_rate": 4.0828536862897125e-05,
4020
+ "loss": 0.2986,
4021
+ "num_input_tokens_seen": 26368840,
4022
+ "step": 2255
4023
+ },
4024
+ {
4025
+ "epoch": 1.1637908833376256,
4026
+ "grad_norm": 20.215982883136522,
4027
+ "learning_rate": 4.061921681310241e-05,
4028
+ "loss": 0.263,
4029
+ "num_input_tokens_seen": 26427280,
4030
+ "step": 2260
4031
+ },
4032
+ {
4033
+ "epoch": 1.1663662116919906,
4034
+ "grad_norm": 6.802291856358878,
4035
+ "learning_rate": 4.04100670308708e-05,
4036
+ "loss": 0.2282,
4037
+ "num_input_tokens_seen": 26485760,
4038
+ "step": 2265
4039
+ },
4040
+ {
4041
+ "epoch": 1.1689415400463559,
4042
+ "grad_norm": 8.985818465549714,
4043
+ "learning_rate": 4.0201091312412394e-05,
4044
+ "loss": 0.2311,
4045
+ "num_input_tokens_seen": 26544224,
4046
+ "step": 2270
4047
+ },
4048
+ {
4049
+ "epoch": 1.1715168684007211,
4050
+ "grad_norm": 8.383045025215052,
4051
+ "learning_rate": 3.999229345077789e-05,
4052
+ "loss": 0.2939,
4053
+ "num_input_tokens_seen": 26602712,
4054
+ "step": 2275
4055
+ },
4056
+ {
4057
+ "epoch": 1.1740921967550864,
4058
+ "grad_norm": 6.558546292884369,
4059
+ "learning_rate": 3.978367723578981e-05,
4060
+ "loss": 0.2165,
4061
+ "num_input_tokens_seen": 26661160,
4062
+ "step": 2280
4063
+ },
4064
+ {
4065
+ "epoch": 1.1766675251094514,
4066
+ "grad_norm": 7.108641756919162,
4067
+ "learning_rate": 3.957524645397359e-05,
4068
+ "loss": 0.1823,
4069
+ "num_input_tokens_seen": 26719648,
4070
+ "step": 2285
4071
+ },
4072
+ {
4073
+ "epoch": 1.1792428534638166,
4074
+ "grad_norm": 11.468609977960636,
4075
+ "learning_rate": 3.936700488848899e-05,
4076
+ "loss": 0.2827,
4077
+ "num_input_tokens_seen": 26778112,
4078
+ "step": 2290
4079
+ },
4080
+ {
4081
+ "epoch": 1.1818181818181819,
4082
+ "grad_norm": 11.548102910785317,
4083
+ "learning_rate": 3.915895631906135e-05,
4084
+ "loss": 0.2897,
4085
+ "num_input_tokens_seen": 26836592,
4086
+ "step": 2295
4087
+ },
4088
+ {
4089
+ "epoch": 1.184393510172547,
4090
+ "grad_norm": 4.826501140271565,
4091
+ "learning_rate": 3.895110452191301e-05,
4092
+ "loss": 0.2903,
4093
+ "num_input_tokens_seen": 26895096,
4094
+ "step": 2300
4095
+ },
4096
+ {
4097
+ "epoch": 1.184393510172547,
4098
+ "eval_loss": 0.2940039336681366,
4099
+ "eval_runtime": 20.1001,
4100
+ "eval_samples_per_second": 2.985,
4101
+ "eval_steps_per_second": 0.746,
4102
+ "num_input_tokens_seen": 26895096,
4103
+ "step": 2300
4104
  }
4105
  ],
4106
  "logging_steps": 5,
4107
  "max_steps": 3882,
4108
+ "num_input_tokens_seen": 26895096,
4109
  "num_train_epochs": 2,
4110
  "save_steps": 50,
4111
  "stateful_callbacks": {
 
4120
  "attributes": {}
4121
  }
4122
  },
4123
+ "total_flos": 1774654872354816.0,
4124
  "train_batch_size": 1,
4125
  "trial_name": null,
4126
  "trial_params": null