ben81828 commited on
Commit
560001f
·
verified ·
1 Parent(s): 093abc7

Training in progress, step 700, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e67d46cdc279c111f3d8b6cd0c68158fe4239ea365a1c2b31f1709a759de013a
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:086f56d8253ad2de065083ad9ed3e401f206ca6169266148720dc9056e97e11d
3
  size 29034840
last-checkpoint/global_step700/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bd9660af62d03296045b76792c76fdd9cd1bfc7c53492a1599f6b67d7a48317
3
+ size 43429616
last-checkpoint/global_step700/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d56985c9ce25564c02f637b19f88557f60fa453b0df988ca8bba5e7bfa14df97
3
+ size 43429616
last-checkpoint/global_step700/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16a39001e7b1938d07665b62309720203060c26b8f08d1b1e8f5740c71cfa763
3
+ size 43429616
last-checkpoint/global_step700/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75b7803c640a3eb1aeb12efa6bc7a9345531f1f9a2c619024ccf46c78b577d64
3
+ size 43429616
last-checkpoint/global_step700/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1016e4296788968087a2a2e95f3a62ee2c6085b5340c3e92171dae571481ea55
3
+ size 637299
last-checkpoint/global_step700/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb87f12b7ce572b38ad895ddc218de834da4bf60ec37d070e1017300431fe756
3
+ size 637171
last-checkpoint/global_step700/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19d458531685f08f79fecf5600b5b32a91ce3e7e9e5c6bf39ed77dc64a6777d6
3
+ size 637171
last-checkpoint/global_step700/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6548dde312f71fd6a7a282d97e0df54d5ebeb16b695ea5d8f6c774bbc1ca3bb
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step650
 
1
+ global_step700
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8044e4c53158c210a17648ba8f2dc2d25a25bbfc55f686015542618eb652a33e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7d74de51245105e1fbf57a6707ef3538b353952485508f6e2f8f74dc5d479d4
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4cd85d7fa425e7888c973f1c2985ac15ca21b5e6171fe140a401c2bc75ca46ff
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0617c9eb6cf7df57b2e0bb53cfe17c05f0910de56fe5b14427fe39ab54a44782
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d7915667371a58f1598639e0d1c20a0c59c783c14580cd040a6631eb4ea2311e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed68a365057022897d9645ee60902a77102f43215dcdf2ddd5d3842b6a8446d8
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:35dd78929ad7f0fbf37fdb1284e8edf0424350f6e6ce1cd5a3ee78979af3d3cb
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63ebaa0c302cadbdfcd9f8ee2289e35ecf9c9fc8c9968fc0c05f100dac20c6b9
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:97610cd95fe06fdca35f3af5c085e6621fc4c4d7c3ecef7fbe6456b650501b2b
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2f570b3fae67de7e649855487e96bf548cf918d7ba3d40d1baf265735ed69a0
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.787663459777832,
3
- "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-600",
4
- "epoch": 0.3347926860674736,
5
  "eval_steps": 50,
6
- "global_step": 650,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1164,11 +1164,100 @@
1164
  "eval_steps_per_second": 0.759,
1165
  "num_input_tokens_seen": 7602512,
1166
  "step": 650
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1167
  }
1168
  ],
1169
  "logging_steps": 5,
1170
  "max_steps": 3882,
1171
- "num_input_tokens_seen": 7602512,
1172
  "num_train_epochs": 2,
1173
  "save_steps": 50,
1174
  "stateful_callbacks": {
@@ -1183,7 +1272,7 @@
1183
  "attributes": {}
1184
  }
1185
  },
1186
- "total_flos": 501587549356032.0,
1187
  "train_batch_size": 1,
1188
  "trial_name": null,
1189
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.7318872809410095,
3
+ "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-700",
4
+ "epoch": 0.3605459696111254,
5
  "eval_steps": 50,
6
+ "global_step": 700,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1164
  "eval_steps_per_second": 0.759,
1165
  "num_input_tokens_seen": 7602512,
1166
  "step": 650
1167
+ },
1168
+ {
1169
+ "epoch": 0.3373680144218388,
1170
+ "grad_norm": 6.586425160827751,
1171
+ "learning_rate": 9.620822953030652e-05,
1172
+ "loss": 0.8131,
1173
+ "num_input_tokens_seen": 7660968,
1174
+ "step": 655
1175
+ },
1176
+ {
1177
+ "epoch": 0.33994334277620397,
1178
+ "grad_norm": 6.92970378602844,
1179
+ "learning_rate": 9.612643863094163e-05,
1180
+ "loss": 0.8348,
1181
+ "num_input_tokens_seen": 7719448,
1182
+ "step": 660
1183
+ },
1184
+ {
1185
+ "epoch": 0.34251867113056916,
1186
+ "grad_norm": 8.35228285894448,
1187
+ "learning_rate": 9.604381050550038e-05,
1188
+ "loss": 0.8289,
1189
+ "num_input_tokens_seen": 7777928,
1190
+ "step": 665
1191
+ },
1192
+ {
1193
+ "epoch": 0.34509399948493436,
1194
+ "grad_norm": 12.894782157020227,
1195
+ "learning_rate": 9.596034665373916e-05,
1196
+ "loss": 0.7758,
1197
+ "num_input_tokens_seen": 7836424,
1198
+ "step": 670
1199
+ },
1200
+ {
1201
+ "epoch": 0.3476693278392995,
1202
+ "grad_norm": 13.409694970235305,
1203
+ "learning_rate": 9.587604859058334e-05,
1204
+ "loss": 0.8189,
1205
+ "num_input_tokens_seen": 7894904,
1206
+ "step": 675
1207
+ },
1208
+ {
1209
+ "epoch": 0.3502446561936647,
1210
+ "grad_norm": 8.783205826578632,
1211
+ "learning_rate": 9.579091784609984e-05,
1212
+ "loss": 0.8221,
1213
+ "num_input_tokens_seen": 7953432,
1214
+ "step": 680
1215
+ },
1216
+ {
1217
+ "epoch": 0.3528199845480299,
1218
+ "grad_norm": 8.368380903445857,
1219
+ "learning_rate": 9.570495596546926e-05,
1220
+ "loss": 0.8378,
1221
+ "num_input_tokens_seen": 8011888,
1222
+ "step": 685
1223
+ },
1224
+ {
1225
+ "epoch": 0.35539531290239507,
1226
+ "grad_norm": 6.7086179135551145,
1227
+ "learning_rate": 9.561816450895793e-05,
1228
+ "loss": 0.7529,
1229
+ "num_input_tokens_seen": 8070344,
1230
+ "step": 690
1231
+ },
1232
+ {
1233
+ "epoch": 0.35797064125676026,
1234
+ "grad_norm": 8.476897088436242,
1235
+ "learning_rate": 9.55305450518895e-05,
1236
+ "loss": 0.7311,
1237
+ "num_input_tokens_seen": 8128816,
1238
+ "step": 695
1239
+ },
1240
+ {
1241
+ "epoch": 0.3605459696111254,
1242
+ "grad_norm": 10.427785019598666,
1243
+ "learning_rate": 9.544209918461642e-05,
1244
+ "loss": 0.774,
1245
+ "num_input_tokens_seen": 8187320,
1246
+ "step": 700
1247
+ },
1248
+ {
1249
+ "epoch": 0.3605459696111254,
1250
+ "eval_loss": 0.7318872809410095,
1251
+ "eval_runtime": 19.6917,
1252
+ "eval_samples_per_second": 3.047,
1253
+ "eval_steps_per_second": 0.762,
1254
+ "num_input_tokens_seen": 8187320,
1255
+ "step": 700
1256
  }
1257
  ],
1258
  "logging_steps": 5,
1259
  "max_steps": 3882,
1260
+ "num_input_tokens_seen": 8187320,
1261
  "num_train_epochs": 2,
1262
  "save_steps": 50,
1263
  "stateful_callbacks": {
 
1272
  "attributes": {}
1273
  }
1274
  },
1275
+ "total_flos": 540175003287552.0,
1276
  "train_batch_size": 1,
1277
  "trial_name": null,
1278
  "trial_params": null