ben81828 commited on
Commit
dfac202
·
verified ·
1 Parent(s): 9f7ac96

Training in progress, step 550, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:55382d84797424bf555d465074d1d58ecfa91cc23250d3509fdb9cd78d1cbeff
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82e427cc7e624394004d64620be182b50bd10e2a67a34cf622de25a3f138b7e0
3
  size 29034840
last-checkpoint/global_step550/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:735c0136eff0398146cf3a3ab763ebac3d070d48f89a182a5573fd655e7afdaf
3
+ size 43429616
last-checkpoint/global_step550/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bf8140665399fdea0be80da5aa6d4bba2b8fc3ed75218ea7ea9597294354c6e
3
+ size 43429616
last-checkpoint/global_step550/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:051f89f797fb6f86de484b54f48bb5b36e77703038f5c839a589563e8769d8fc
3
+ size 43429616
last-checkpoint/global_step550/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:130206d3a04e9a463582348d97149ab8817702228f3aa905b7b20377e4f453d3
3
+ size 43429616
last-checkpoint/global_step550/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1f1e88f3bf0a1ab4f6fdcbce21f2206f381a04328feda2f67246fb4023f22be
3
+ size 637299
last-checkpoint/global_step550/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:096829d7819569a0a45c1f1ccd20ed09f7c621736d3582368c85eb1f4d51d913
3
+ size 637171
last-checkpoint/global_step550/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5fa69fa8e76e9121e0fe023594a90145c030a6aa13d4e5007cb9f1a4268ed73
3
+ size 637171
last-checkpoint/global_step550/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b15fb48a94b40a9b7267e646ab4d8caeff42e109740ea9800d79bc394acc6b40
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step500
 
1
+ global_step550
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bdd1f02cb20d3f4f7e0dd26fea62af57e5e71316163f926a28ed6cf89a9f3777
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae78313eb528c8d3695eebaf4de3539bd0a0bc6ee18c66af1ee183442f1758a0
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fc6d54ba2aa85e2f895439a1b787ec947b848a1c34ea5a3a28821572bf2b9fec
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b38031f60d9e88601d369ef46bcdcf2b5b03f2cb4ba93853bcb2328df7ebb7c
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8b6927d26551cddd8e35b34b43e79bd58f8b6027b6a481bb6a563a3652addeb4
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f58092375c93d237cd0e3149aecfbf83e2acdae46279e07a32920d01cb507e64
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f8295b1be8e66b4b30cb905dc48cfc717c027e427937b8142d00ae9de8106c6a
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83cd4bbff9962da7ec6787fcea8d65df7096917f9a5902e249ba7aee8887fe5f
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8779534db128a38380f8ecdd5b697bdb59a398f35fe4ad4de1318e871fcc0e6f
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e969c9b0ecef9c1209a6397ff63db034af1cc51341323dc3dc14016347fe871
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.8268976211547852,
3
- "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-500",
4
- "epoch": 0.25753283543651817,
5
  "eval_steps": 50,
6
- "global_step": 500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -897,11 +897,100 @@
897
  "eval_steps_per_second": 0.764,
898
  "num_input_tokens_seen": 5848048,
899
  "step": 500
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
900
  }
901
  ],
902
  "logging_steps": 5,
903
  "max_steps": 3882,
904
- "num_input_tokens_seen": 5848048,
905
  "num_train_epochs": 2,
906
  "save_steps": 50,
907
  "stateful_callbacks": {
@@ -916,7 +1005,7 @@
916
  "attributes": {}
917
  }
918
  },
919
- "total_flos": 385817567035392.0,
920
  "train_batch_size": 1,
921
  "trial_name": null,
922
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.7909801602363586,
3
+ "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-550",
4
+ "epoch": 0.28328611898017,
5
  "eval_steps": 50,
6
+ "global_step": 550,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
897
  "eval_steps_per_second": 0.764,
898
  "num_input_tokens_seen": 5848048,
899
  "step": 500
900
+ },
901
+ {
902
+ "epoch": 0.26010816379088336,
903
+ "grad_norm": 4.216936754020565,
904
+ "learning_rate": 9.826583551051483e-05,
905
+ "loss": 0.8566,
906
+ "num_input_tokens_seen": 5906512,
907
+ "step": 505
908
+ },
909
+ {
910
+ "epoch": 0.2626834921452485,
911
+ "grad_norm": 10.456282683777822,
912
+ "learning_rate": 9.820978249400773e-05,
913
+ "loss": 0.8365,
914
+ "num_input_tokens_seen": 5965024,
915
+ "step": 510
916
+ },
917
+ {
918
+ "epoch": 0.2652588204996137,
919
+ "grad_norm": 2.334974931865165,
920
+ "learning_rate": 9.81528544373233e-05,
921
+ "loss": 0.8882,
922
+ "num_input_tokens_seen": 6023496,
923
+ "step": 515
924
+ },
925
+ {
926
+ "epoch": 0.2678341488539789,
927
+ "grad_norm": 0.6948827424617825,
928
+ "learning_rate": 9.809505237374426e-05,
929
+ "loss": 0.8799,
930
+ "num_input_tokens_seen": 6082000,
931
+ "step": 520
932
+ },
933
+ {
934
+ "epoch": 0.2704094772083441,
935
+ "grad_norm": 0.8415524888602947,
936
+ "learning_rate": 9.80363773524172e-05,
937
+ "loss": 0.8758,
938
+ "num_input_tokens_seen": 6140480,
939
+ "step": 525
940
+ },
941
+ {
942
+ "epoch": 0.27298480556270927,
943
+ "grad_norm": 1.3536520282199265,
944
+ "learning_rate": 9.797683043833345e-05,
945
+ "loss": 0.8644,
946
+ "num_input_tokens_seen": 6198968,
947
+ "step": 530
948
+ },
949
+ {
950
+ "epoch": 0.2755601339170744,
951
+ "grad_norm": 4.556188528469967,
952
+ "learning_rate": 9.791641271230982e-05,
953
+ "loss": 0.8453,
954
+ "num_input_tokens_seen": 6257464,
955
+ "step": 535
956
+ },
957
+ {
958
+ "epoch": 0.2781354622714396,
959
+ "grad_norm": 2.890141630286954,
960
+ "learning_rate": 9.78551252709689e-05,
961
+ "loss": 0.8533,
962
+ "num_input_tokens_seen": 6315944,
963
+ "step": 540
964
+ },
965
+ {
966
+ "epoch": 0.2807107906258048,
967
+ "grad_norm": 4.471490037342243,
968
+ "learning_rate": 9.779296922671923e-05,
969
+ "loss": 0.8575,
970
+ "num_input_tokens_seen": 6374408,
971
+ "step": 545
972
+ },
973
+ {
974
+ "epoch": 0.28328611898017,
975
+ "grad_norm": 3.174906426420603,
976
+ "learning_rate": 9.77299457077351e-05,
977
+ "loss": 0.8666,
978
+ "num_input_tokens_seen": 6432936,
979
+ "step": 550
980
+ },
981
+ {
982
+ "epoch": 0.28328611898017,
983
+ "eval_loss": 0.7909801602363586,
984
+ "eval_runtime": 19.8739,
985
+ "eval_samples_per_second": 3.019,
986
+ "eval_steps_per_second": 0.755,
987
+ "num_input_tokens_seen": 6432936,
988
+ "step": 550
989
  }
990
  ],
991
  "logging_steps": 5,
992
  "max_steps": 3882,
993
+ "num_input_tokens_seen": 6432936,
994
  "num_train_epochs": 2,
995
  "save_steps": 50,
996
  "stateful_callbacks": {
 
1005
  "attributes": {}
1006
  }
1007
  },
1008
+ "total_flos": 424412082339840.0,
1009
  "train_batch_size": 1,
1010
  "trial_name": null,
1011
  "trial_params": null