Training in progress, step 2500, checkpoint
Browse files- last-checkpoint/adapter_model.safetensors +1 -1
- last-checkpoint/global_step2499/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step2499/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step2499/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step2499/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step2499/zero_pp_rank_0_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/global_step2499/zero_pp_rank_1_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/global_step2499/zero_pp_rank_2_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/global_step2499/zero_pp_rank_3_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/latest +1 -1
- last-checkpoint/rng_state_0.pth +1 -1
- last-checkpoint/rng_state_1.pth +1 -1
- last-checkpoint/rng_state_2.pth +1 -1
- last-checkpoint/rng_state_3.pth +1 -1
- last-checkpoint/scheduler.pt +1 -1
- last-checkpoint/trainer_state.json +93 -4
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 29034840
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9966048df26861231f31ce0ddfe797df85f054e16dbf2091f97f17eda09a0921
|
3 |
size 29034840
|
last-checkpoint/global_step2499/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b4b433ab564c44660c5768a12dcb6f566e0fb6e921d6519e63dfb5f600dd5ba8
|
3 |
+
size 43429616
|
last-checkpoint/global_step2499/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7d1ed86de19f51135c95073461ad5c3b27d9f7970bef2aa4bc784b3290ffc013
|
3 |
+
size 43429616
|
last-checkpoint/global_step2499/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f82875e94321d319c1175271ac5e6a226fce428c7076c7b48a75d6b5af6c04e4
|
3 |
+
size 43429616
|
last-checkpoint/global_step2499/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:301bcb66e17799f3ce082f82e7c18707562d9a3990239469cb0c702779e2af6b
|
3 |
+
size 43429616
|
last-checkpoint/global_step2499/zero_pp_rank_0_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b68057968ae8a6712b220c63c51737d021b970e8bc7eae3b478483df645f816b
|
3 |
+
size 637299
|
last-checkpoint/global_step2499/zero_pp_rank_1_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0e556ac7579ecbafd661a7651fefa61a7c19c100f36f7c9b8b31e0715c28cccc
|
3 |
+
size 637171
|
last-checkpoint/global_step2499/zero_pp_rank_2_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fe1aa2ebfc9ea794e8a4e19de5c025e81597c8300f18fc96bb87266383e6cca9
|
3 |
+
size 637171
|
last-checkpoint/global_step2499/zero_pp_rank_3_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dab7388ad26aa2d426b25a026e976559019c9b2ae8f851138b711373b8827c01
|
3 |
+
size 637171
|
last-checkpoint/latest
CHANGED
@@ -1 +1 @@
|
|
1 |
-
|
|
|
1 |
+
global_step2499
|
last-checkpoint/rng_state_0.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0362dfd92e8da01e4a0deedcbd1c493b8162d5d1d84d5a4c1cd210c556f2cf9b
|
3 |
size 15024
|
last-checkpoint/rng_state_1.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e65c5adee1a22c5343e38495a6905880496fb22d5e3ec5b16b87aadb731969d2
|
3 |
size 15024
|
last-checkpoint/rng_state_2.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f8d8858483b0c6944d55621cc2633469e3e0d04c48b6671eee92d4abab2352c2
|
3 |
size 15024
|
last-checkpoint/rng_state_3.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ca42ef4f7a2f8c2285c4cf6cef585dcc0b132b21e8bb33d96d53b6db837f5e54
|
3 |
size 15024
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d6bfc8f82217a5679ff2e12a0352f5107734a71e0cf25a7278e2a7645ea398cd
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": 0.22014015913009644,
|
3 |
"best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-2250",
|
4 |
-
"epoch": 1.
|
5 |
"eval_steps": 50,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -4368,11 +4368,100 @@
|
|
4368 |
"eval_steps_per_second": 0.761,
|
4369 |
"num_input_tokens_seen": 28649256,
|
4370 |
"step": 2450
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4371 |
}
|
4372 |
],
|
4373 |
"logging_steps": 5,
|
4374 |
"max_steps": 3882,
|
4375 |
-
"num_input_tokens_seen":
|
4376 |
"num_train_epochs": 2,
|
4377 |
"save_steps": 50,
|
4378 |
"stateful_callbacks": {
|
@@ -4387,7 +4476,7 @@
|
|
4387 |
"attributes": {}
|
4388 |
}
|
4389 |
},
|
4390 |
-
"total_flos":
|
4391 |
"train_batch_size": 1,
|
4392 |
"trial_name": null,
|
4393 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": 0.22014015913009644,
|
3 |
"best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-2250",
|
4 |
+
"epoch": 1.2874066443471541,
|
5 |
"eval_steps": 50,
|
6 |
+
"global_step": 2500,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
4368 |
"eval_steps_per_second": 0.761,
|
4369 |
"num_input_tokens_seen": 28649256,
|
4370 |
"step": 2450
|
4371 |
+
},
|
4372 |
+
{
|
4373 |
+
"epoch": 1.2642286891578676,
|
4374 |
+
"grad_norm": 13.48575395754194,
|
4375 |
+
"learning_rate": 3.2625716337267295e-05,
|
4376 |
+
"loss": 0.2041,
|
4377 |
+
"num_input_tokens_seen": 28707736,
|
4378 |
+
"step": 2455
|
4379 |
+
},
|
4380 |
+
{
|
4381 |
+
"epoch": 1.2668040175122328,
|
4382 |
+
"grad_norm": 3.1641318472119986,
|
4383 |
+
"learning_rate": 3.242613057068641e-05,
|
4384 |
+
"loss": 0.3022,
|
4385 |
+
"num_input_tokens_seen": 28766232,
|
4386 |
+
"step": 2460
|
4387 |
+
},
|
4388 |
+
{
|
4389 |
+
"epoch": 1.269379345866598,
|
4390 |
+
"grad_norm": 17.472204317681474,
|
4391 |
+
"learning_rate": 3.222686378172847e-05,
|
4392 |
+
"loss": 0.2756,
|
4393 |
+
"num_input_tokens_seen": 28824688,
|
4394 |
+
"step": 2465
|
4395 |
+
},
|
4396 |
+
{
|
4397 |
+
"epoch": 1.271954674220963,
|
4398 |
+
"grad_norm": 4.051662934956441,
|
4399 |
+
"learning_rate": 3.2027919587220516e-05,
|
4400 |
+
"loss": 0.2395,
|
4401 |
+
"num_input_tokens_seen": 28883176,
|
4402 |
+
"step": 2470
|
4403 |
+
},
|
4404 |
+
{
|
4405 |
+
"epoch": 1.2745300025753283,
|
4406 |
+
"grad_norm": 5.058807389542336,
|
4407 |
+
"learning_rate": 3.1829301598134355e-05,
|
4408 |
+
"loss": 0.2204,
|
4409 |
+
"num_input_tokens_seen": 28941592,
|
4410 |
+
"step": 2475
|
4411 |
+
},
|
4412 |
+
{
|
4413 |
+
"epoch": 1.2771053309296936,
|
4414 |
+
"grad_norm": 4.820344297526228,
|
4415 |
+
"learning_rate": 3.1631013419520855e-05,
|
4416 |
+
"loss": 0.2477,
|
4417 |
+
"num_input_tokens_seen": 29000064,
|
4418 |
+
"step": 2480
|
4419 |
+
},
|
4420 |
+
{
|
4421 |
+
"epoch": 1.2796806592840588,
|
4422 |
+
"grad_norm": 6.098120285465299,
|
4423 |
+
"learning_rate": 3.143305865044467e-05,
|
4424 |
+
"loss": 0.2645,
|
4425 |
+
"num_input_tokens_seen": 29058496,
|
4426 |
+
"step": 2485
|
4427 |
+
},
|
4428 |
+
{
|
4429 |
+
"epoch": 1.2822559876384239,
|
4430 |
+
"grad_norm": 10.021136804794267,
|
4431 |
+
"learning_rate": 3.123544088391881e-05,
|
4432 |
+
"loss": 0.2184,
|
4433 |
+
"num_input_tokens_seen": 29116992,
|
4434 |
+
"step": 2490
|
4435 |
+
},
|
4436 |
+
{
|
4437 |
+
"epoch": 1.284831315992789,
|
4438 |
+
"grad_norm": 32.07716009409277,
|
4439 |
+
"learning_rate": 3.10381637068395e-05,
|
4440 |
+
"loss": 0.1857,
|
4441 |
+
"num_input_tokens_seen": 29175496,
|
4442 |
+
"step": 2495
|
4443 |
+
},
|
4444 |
+
{
|
4445 |
+
"epoch": 1.2874066443471541,
|
4446 |
+
"grad_norm": 10.697788786097991,
|
4447 |
+
"learning_rate": 3.084123069992096e-05,
|
4448 |
+
"loss": 0.257,
|
4449 |
+
"num_input_tokens_seen": 29233968,
|
4450 |
+
"step": 2500
|
4451 |
+
},
|
4452 |
+
{
|
4453 |
+
"epoch": 1.2874066443471541,
|
4454 |
+
"eval_loss": 0.2488754242658615,
|
4455 |
+
"eval_runtime": 19.5416,
|
4456 |
+
"eval_samples_per_second": 3.07,
|
4457 |
+
"eval_steps_per_second": 0.768,
|
4458 |
+
"num_input_tokens_seen": 29233968,
|
4459 |
+
"step": 2500
|
4460 |
}
|
4461 |
],
|
4462 |
"logging_steps": 5,
|
4463 |
"max_steps": 3882,
|
4464 |
+
"num_input_tokens_seen": 29233968,
|
4465 |
"num_train_epochs": 2,
|
4466 |
"save_steps": 50,
|
4467 |
"stateful_callbacks": {
|
|
|
4476 |
"attributes": {}
|
4477 |
}
|
4478 |
},
|
4479 |
+
"total_flos": 1928988327673856.0,
|
4480 |
"train_batch_size": 1,
|
4481 |
"trial_name": null,
|
4482 |
"trial_params": null
|