ben81828 commited on
Commit
c707d2c
·
verified ·
1 Parent(s): ad1909d

Training in progress, step 1850, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:69a5ff5ee90349b6b28053c5cfac6617d1d42ea2529e5b55624923cc2833ed0a
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:715ff69f50f84d39187c1f0fc35de081fab6e8a1a5b66268497bd57f97e40762
3
  size 29034840
last-checkpoint/global_step1850/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87805fe85ef92b3cae987509afa07eb8dab695db5d5d9e764e704e3f576fa5d3
3
+ size 43429616
last-checkpoint/global_step1850/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4166835001ba7a003b620767185d08e9b98d27767cc68ead6407c2d3674c53b7
3
+ size 43429616
last-checkpoint/global_step1850/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db83a65a1f0af4d0adcac7fdfe1e0fb1054587385e45a38c98cec7af970a02e7
3
+ size 43429616
last-checkpoint/global_step1850/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f38c27227c1eea73fafe645bcb5cd3115e3dd1d151fca3e2a6ce9450a8602238
3
+ size 43429616
last-checkpoint/global_step1850/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae68137480686f51324164cfe12b894960f5dac924d517328b9a25cc5475cb52
3
+ size 637299
last-checkpoint/global_step1850/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75d6f58ca4c8da0e147329d82ed7539d567f755227e39ad9cc5e35d551f74abd
3
+ size 637171
last-checkpoint/global_step1850/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e205dd3e9640f3af3ad21d7b533feac51eb10ed278346cf338f88fc343a8131d
3
+ size 637171
last-checkpoint/global_step1850/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e4c50db8340941b9c0ee37b041dc49397ad831d065dc3058b939ae5d78b2dec
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1800
 
1
+ global_step1850
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4d76f093328c54f2b94a10b8b50dc92fc99ceff9e3949d050a70042526f1d0eb
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cc13c69d2b97530a3e18634e2f473678ea6880a6b34244c9c86a457f70137e1
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54b172dd00cba9f005761d85fb0804f751caf6e1cc8294d1873354a890cb9909
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fcfd90610e35a8f40098e1413d3e3a1658276c859045d3450f227cab64c9081a
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ec6adef733bd7630aa48eff1a6edaabc275d67293dcb0b7a64d71451405d489d
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:663ae184d05dd950d27e1f419e0306ca3b141b9d7f075a80804cada62f64a363
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6ce1c4ba1932c0f698bef90e0e4e74ded1e0db5fc35282a0815899b8be759e67
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec4f0a72927b3837a0e73cb5612acf4318c503f3ba2108b3925f750332a49b60
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e12a70de6fe1b2f3a6d61d703ac117228f27ab633a78519fb1a082b631797f25
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:657e1e43dbcdd82c1b5b25483acdceb69febc0e5b31b87d49bda24ccdcfd4221
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.3325226604938507,
3
- "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-1750",
4
- "epoch": 0.9271182075714653,
5
  "eval_steps": 50,
6
- "global_step": 1800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -3211,11 +3211,100 @@
3211
  "eval_steps_per_second": 0.778,
3212
  "num_input_tokens_seen": 21053080,
3213
  "step": 1800
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3214
  }
3215
  ],
3216
  "logging_steps": 5,
3217
  "max_steps": 3882,
3218
- "num_input_tokens_seen": 21053080,
3219
  "num_train_epochs": 2,
3220
  "save_steps": 50,
3221
  "stateful_callbacks": {
@@ -3230,7 +3319,7 @@
3230
  "attributes": {}
3231
  }
3232
  },
3233
- "total_flos": 1389126149734400.0,
3234
  "train_batch_size": 1,
3235
  "trial_name": null,
3236
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.2935050129890442,
3
+ "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-1850",
4
+ "epoch": 0.9528714911151172,
5
  "eval_steps": 50,
6
+ "global_step": 1850,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
3211
  "eval_steps_per_second": 0.778,
3212
  "num_input_tokens_seen": 21053080,
3213
  "step": 1800
3214
+ },
3215
+ {
3216
+ "epoch": 0.9296935359258306,
3217
+ "grad_norm": 10.926380171957325,
3218
+ "learning_rate": 5.98824493873634e-05,
3219
+ "loss": 0.3107,
3220
+ "num_input_tokens_seen": 21111592,
3221
+ "step": 1805
3222
+ },
3223
+ {
3224
+ "epoch": 0.9322688642801957,
3225
+ "grad_norm": 9.204735754102403,
3226
+ "learning_rate": 5.9673544356647706e-05,
3227
+ "loss": 0.3453,
3228
+ "num_input_tokens_seen": 21170024,
3229
+ "step": 1810
3230
+ },
3231
+ {
3232
+ "epoch": 0.9348441926345609,
3233
+ "grad_norm": 10.122487159211746,
3234
+ "learning_rate": 5.946446374455555e-05,
3235
+ "loss": 0.3607,
3236
+ "num_input_tokens_seen": 21228480,
3237
+ "step": 1815
3238
+ },
3239
+ {
3240
+ "epoch": 0.9374195209889261,
3241
+ "grad_norm": 9.590674638217761,
3242
+ "learning_rate": 5.9255211346041526e-05,
3243
+ "loss": 0.3375,
3244
+ "num_input_tokens_seen": 21286944,
3245
+ "step": 1820
3246
+ },
3247
+ {
3248
+ "epoch": 0.9399948493432912,
3249
+ "grad_norm": 5.469697379095751,
3250
+ "learning_rate": 5.9045790959178296e-05,
3251
+ "loss": 0.2521,
3252
+ "num_input_tokens_seen": 21345456,
3253
+ "step": 1825
3254
+ },
3255
+ {
3256
+ "epoch": 0.9425701776976565,
3257
+ "grad_norm": 12.20617548886948,
3258
+ "learning_rate": 5.883620638508756e-05,
3259
+ "loss": 0.3855,
3260
+ "num_input_tokens_seen": 21403896,
3261
+ "step": 1830
3262
+ },
3263
+ {
3264
+ "epoch": 0.9451455060520216,
3265
+ "grad_norm": 12.862485307076343,
3266
+ "learning_rate": 5.8626461427871204e-05,
3267
+ "loss": 0.2947,
3268
+ "num_input_tokens_seen": 21462360,
3269
+ "step": 1835
3270
+ },
3271
+ {
3272
+ "epoch": 0.9477208344063868,
3273
+ "grad_norm": 15.353261731205423,
3274
+ "learning_rate": 5.841655989454213e-05,
3275
+ "loss": 0.4505,
3276
+ "num_input_tokens_seen": 21520864,
3277
+ "step": 1840
3278
+ },
3279
+ {
3280
+ "epoch": 0.950296162760752,
3281
+ "grad_norm": 9.642536812282682,
3282
+ "learning_rate": 5.820650559495523e-05,
3283
+ "loss": 0.3758,
3284
+ "num_input_tokens_seen": 21579376,
3285
+ "step": 1845
3286
+ },
3287
+ {
3288
+ "epoch": 0.9528714911151172,
3289
+ "grad_norm": 9.811875826616257,
3290
+ "learning_rate": 5.7996302341738164e-05,
3291
+ "loss": 0.3221,
3292
+ "num_input_tokens_seen": 21637848,
3293
+ "step": 1850
3294
+ },
3295
+ {
3296
+ "epoch": 0.9528714911151172,
3297
+ "eval_loss": 0.2935050129890442,
3298
+ "eval_runtime": 19.4702,
3299
+ "eval_samples_per_second": 3.082,
3300
+ "eval_steps_per_second": 0.77,
3301
+ "num_input_tokens_seen": 21637848,
3302
+ "step": 1850
3303
  }
3304
  ],
3305
  "logging_steps": 5,
3306
  "max_steps": 3882,
3307
+ "num_input_tokens_seen": 21637848,
3308
  "num_train_epochs": 2,
3309
  "save_steps": 50,
3310
  "stateful_callbacks": {
 
3319
  "attributes": {}
3320
  }
3321
  },
3322
+ "total_flos": 1427712275382272.0,
3323
  "train_batch_size": 1,
3324
  "trial_name": null,
3325
  "trial_params": null