ben81828 commited on
Commit
1d05e69
·
verified ·
1 Parent(s): 24f8bc5

Training in progress, step 1350, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3f02e158d7a0ecd4f15ebd388f3f9321f3c48f48b6c240184415bff2106b4225
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6aef5102eef70c31f0044c654484ffa32b1a0562551889340924debb001cc356
3
  size 29034840
last-checkpoint/global_step1350/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91bca888e4c33e24509ed239e1cace5cfcc74b657e45601aa17195a818a995d9
3
+ size 43429616
last-checkpoint/global_step1350/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4ab6b925af54924e652426e5130c56f13868f1543ea6c23dcedb092eada1390
3
+ size 43429616
last-checkpoint/global_step1350/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4bff5c2841762d4c5f5f2f84c319b2519954e346f9b70ea4c14e9a1f980b3e7
3
+ size 43429616
last-checkpoint/global_step1350/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e3d62e714ef5a158d1249d0f9b65bb0f82f150761b83dc2cf2e0aa76b322d75
3
+ size 43429616
last-checkpoint/global_step1350/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf6a04ed575d4bbe95b19e28c451beac968995fe7f81b545d3f82185be8aab33
3
+ size 637299
last-checkpoint/global_step1350/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6797229513b11978627019edbf1729dfa52f86c1151aac9777d9693a2be4536b
3
+ size 637171
last-checkpoint/global_step1350/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3dff6f8ada3bacd5905b06ee331028a57bd1b141fb55f651aca5cbe974a3a95
3
+ size 637171
last-checkpoint/global_step1350/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0cfc18b2fc2a54e8761219cee72f1cb08510e09bfc522456e8dbdebcd2dde79
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1300
 
1
+ global_step1350
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c364a094b8b4b8d6b015687012206e88b2233dd7d6a4f6f395d7aef77752ea67
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9747fe881253e52a47314f48068ef9649032bec4cb284b1b4becbb8787f37faa
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2bd2e297a13ac051fc5f3cce9c34767e51a5cb4574835aa8bd1309d8cdc48053
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ac02a5554a5ef9e3473dcd2926626ae41f4777354859c7d2bf0a0c1188c0583
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:028047dd1753d92e11bf971ee14a5c981a9a3ea6631f228e38475027eb5ae430
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9405d230cc78dac3f3b2ab887674631c15f66fedab0042ab7bc1bd83b8575344
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:000eb4cb4096582f15856f380d5f2a9a00eaecdbb95f2289a7a81a0a624fdf72
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b15237547030ac62d49d70a5465b2e29515e6334f62416eb16c0c6d073f7c6bf
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2d597028ddc0c214d0751afd211631bdacdc9a86d2364a66b2294abb4eba3117
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c2345b3e418be58bce3abfab9e2138cf24cde9ab37ff4c2ee4ae37ffd9029bf
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.49471279978752136,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-1300",
4
- "epoch": 0.6695853721349472,
5
  "eval_steps": 50,
6
- "global_step": 1300,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2321,11 +2321,100 @@
2321
  "eval_steps_per_second": 0.778,
2322
  "num_input_tokens_seen": 15205064,
2323
  "step": 1300
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2324
  }
2325
  ],
2326
  "logging_steps": 5,
2327
  "max_steps": 3882,
2328
- "num_input_tokens_seen": 15205064,
2329
  "num_train_epochs": 2,
2330
  "save_steps": 50,
2331
  "stateful_callbacks": {
@@ -2340,7 +2429,7 @@
2340
  "attributes": {}
2341
  }
2342
  },
2343
- "total_flos": 1003241463611392.0,
2344
  "train_batch_size": 1,
2345
  "trial_name": null,
2346
  "trial_params": null
 
1
  {
2
  "best_metric": 0.49471279978752136,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-1300",
4
+ "epoch": 0.695338655678599,
5
  "eval_steps": 50,
6
+ "global_step": 1350,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2321
  "eval_steps_per_second": 0.778,
2322
  "num_input_tokens_seen": 15205064,
2323
  "step": 1300
2324
+ },
2325
+ {
2326
+ "epoch": 0.6721607004893124,
2327
+ "grad_norm": 10.254593495214214,
2328
+ "learning_rate": 7.925467901247996e-05,
2329
+ "loss": 0.4589,
2330
+ "num_input_tokens_seen": 15263560,
2331
+ "step": 1305
2332
+ },
2333
+ {
2334
+ "epoch": 0.6747360288436776,
2335
+ "grad_norm": 10.097755490465826,
2336
+ "learning_rate": 7.908166355425475e-05,
2337
+ "loss": 0.457,
2338
+ "num_input_tokens_seen": 15322016,
2339
+ "step": 1310
2340
+ },
2341
+ {
2342
+ "epoch": 0.6773113571980427,
2343
+ "grad_norm": 10.727169120741898,
2344
+ "learning_rate": 7.890812024415555e-05,
2345
+ "loss": 0.51,
2346
+ "num_input_tokens_seen": 15380504,
2347
+ "step": 1315
2348
+ },
2349
+ {
2350
+ "epoch": 0.6798866855524079,
2351
+ "grad_norm": 11.12869176509039,
2352
+ "learning_rate": 7.873405223211087e-05,
2353
+ "loss": 0.4994,
2354
+ "num_input_tokens_seen": 15438944,
2355
+ "step": 1320
2356
+ },
2357
+ {
2358
+ "epoch": 0.6824620139067731,
2359
+ "grad_norm": 8.499688286725998,
2360
+ "learning_rate": 7.855946267757295e-05,
2361
+ "loss": 0.4501,
2362
+ "num_input_tokens_seen": 15497384,
2363
+ "step": 1325
2364
+ },
2365
+ {
2366
+ "epoch": 0.6850373422611383,
2367
+ "grad_norm": 10.352066646699223,
2368
+ "learning_rate": 7.838435474946034e-05,
2369
+ "loss": 0.4807,
2370
+ "num_input_tokens_seen": 15555856,
2371
+ "step": 1330
2372
+ },
2373
+ {
2374
+ "epoch": 0.6876126706155035,
2375
+ "grad_norm": 14.714795422962215,
2376
+ "learning_rate": 7.820873162610044e-05,
2377
+ "loss": 0.5112,
2378
+ "num_input_tokens_seen": 15614368,
2379
+ "step": 1335
2380
+ },
2381
+ {
2382
+ "epoch": 0.6901879989698687,
2383
+ "grad_norm": 8.466874504995866,
2384
+ "learning_rate": 7.803259649517178e-05,
2385
+ "loss": 0.4825,
2386
+ "num_input_tokens_seen": 15672864,
2387
+ "step": 1340
2388
+ },
2389
+ {
2390
+ "epoch": 0.6927633273242338,
2391
+ "grad_norm": 7.62934190428385,
2392
+ "learning_rate": 7.78559525536462e-05,
2393
+ "loss": 0.5147,
2394
+ "num_input_tokens_seen": 15731376,
2395
+ "step": 1345
2396
+ },
2397
+ {
2398
+ "epoch": 0.695338655678599,
2399
+ "grad_norm": 9.019045929732858,
2400
+ "learning_rate": 7.767880300773074e-05,
2401
+ "loss": 0.4702,
2402
+ "num_input_tokens_seen": 15789848,
2403
+ "step": 1350
2404
+ },
2405
+ {
2406
+ "epoch": 0.695338655678599,
2407
+ "eval_loss": 0.5247787237167358,
2408
+ "eval_runtime": 19.436,
2409
+ "eval_samples_per_second": 3.087,
2410
+ "eval_steps_per_second": 0.772,
2411
+ "num_input_tokens_seen": 15789848,
2412
+ "step": 1350
2413
  }
2414
  ],
2415
  "logging_steps": 5,
2416
  "max_steps": 3882,
2417
+ "num_input_tokens_seen": 15789848,
2418
  "num_train_epochs": 2,
2419
  "save_steps": 50,
2420
  "stateful_callbacks": {
 
2429
  "attributes": {}
2430
  }
2431
  },
2432
+ "total_flos": 1041830008324096.0,
2433
  "train_batch_size": 1,
2434
  "trial_name": null,
2435
  "trial_params": null