ben81828 commited on
Commit
e463c90
·
verified ·
1 Parent(s): 60f46fb

Training in progress, step 500, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7156b16f127d3e2296e6ada4ebf9cbb8c48381f16d088d0a874a522c40e99a16
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55382d84797424bf555d465074d1d58ecfa91cc23250d3509fdb9cd78d1cbeff
3
  size 29034840
last-checkpoint/global_step500/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc106c6fc6d9c584cf59e5adf55bdf127c07781019ffa591f00cefaa2aa092a8
3
+ size 43429616
last-checkpoint/global_step500/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1206ffbd9ab0ca5b2df0bd5e6d788b9cbd993d61d56e60e0d2b4e38ea1bafd79
3
+ size 43429616
last-checkpoint/global_step500/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95b13646a15e0a5bb9ffcadaa56a12480f686f005cc3f4f5b35ece1084e33102
3
+ size 43429616
last-checkpoint/global_step500/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc8c9ef603b49122a9ba6b122660d8ee061f9e18ccb68b40d01da50ed3e11949
3
+ size 43429616
last-checkpoint/global_step500/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:982b6b8112b1a68d2e38cae94a225f18bd9ea392e1654dbc7cc130776bff950b
3
+ size 637299
last-checkpoint/global_step500/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:854e2832a5dae385b495dad0465c766c553cf722b470925f5286a7230114f652
3
+ size 637171
last-checkpoint/global_step500/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da17084af120106739c69dcfe393381f051b6adbcd329aae7c5a39f5b06e3345
3
+ size 637171
last-checkpoint/global_step500/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7066b428556bc0b3f73235845964ecb2a136bcb07c44570c346b66a0f2f5992
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step450
 
1
+ global_step500
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7dbc6521b0b64cb12d818506108fcf257a4089ca8a9b1e453776ed3e032e7176
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bdd1f02cb20d3f4f7e0dd26fea62af57e5e71316163f926a28ed6cf89a9f3777
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2b13e3da1b0679cab1bab94f893e385a9a224d3335b5a6f62602f33c2be88d03
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc6d54ba2aa85e2f895439a1b787ec947b848a1c34ea5a3a28821572bf2b9fec
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a24f0e0f117b5a8236e0d12594c0c358f41ef00068d4460002e95ad1cc3cb1c
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b6927d26551cddd8e35b34b43e79bd58f8b6027b6a481bb6a563a3652addeb4
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e46e4eab6c4a25d84ad36ddf1357401788adeeb6388c03cefa35a63b52ee7610
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8295b1be8e66b4b30cb905dc48cfc717c027e427937b8142d00ae9de8106c6a
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:581bd51b94cb26100e335b86a6da2b6d11272f4e7dfeb8188f963bb8c6dcbb27
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8779534db128a38380f8ecdd5b697bdb59a398f35fe4ad4de1318e871fcc0e6f
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.8932263255119324,
3
- "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-300",
4
- "epoch": 0.23177955189286634,
5
  "eval_steps": 50,
6
- "global_step": 450,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -808,11 +808,100 @@
808
  "eval_steps_per_second": 0.718,
809
  "num_input_tokens_seen": 5263304,
810
  "step": 450
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
811
  }
812
  ],
813
  "logging_steps": 5,
814
  "max_steps": 3882,
815
- "num_input_tokens_seen": 5263304,
816
  "num_train_epochs": 2,
817
  "save_steps": 50,
818
  "stateful_callbacks": {
@@ -827,7 +916,7 @@
827
  "attributes": {}
828
  }
829
  },
830
- "total_flos": 347233811955712.0,
831
  "train_batch_size": 1,
832
  "trial_name": null,
833
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.8268976211547852,
3
+ "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-500",
4
+ "epoch": 0.25753283543651817,
5
  "eval_steps": 50,
6
+ "global_step": 500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
808
  "eval_steps_per_second": 0.718,
809
  "num_input_tokens_seen": 5263304,
810
  "step": 450
811
+ },
812
+ {
813
+ "epoch": 0.23435488024723153,
814
+ "grad_norm": 0.7549819464827967,
815
+ "learning_rate": 9.877802252293474e-05,
816
+ "loss": 0.8891,
817
+ "num_input_tokens_seen": 5321760,
818
+ "step": 455
819
+ },
820
+ {
821
+ "epoch": 0.2369302086015967,
822
+ "grad_norm": 0.9960909370043465,
823
+ "learning_rate": 9.873077323567488e-05,
824
+ "loss": 0.9026,
825
+ "num_input_tokens_seen": 5380224,
826
+ "step": 460
827
+ },
828
+ {
829
+ "epoch": 0.23950553695596188,
830
+ "grad_norm": 1.5888386521989892,
831
+ "learning_rate": 9.868263945190312e-05,
832
+ "loss": 0.8707,
833
+ "num_input_tokens_seen": 5438704,
834
+ "step": 465
835
+ },
836
+ {
837
+ "epoch": 0.24208086531032708,
838
+ "grad_norm": 3.0542478842411587,
839
+ "learning_rate": 9.863362204528024e-05,
840
+ "loss": 0.9051,
841
+ "num_input_tokens_seen": 5497208,
842
+ "step": 470
843
+ },
844
+ {
845
+ "epoch": 0.24465619366469224,
846
+ "grad_norm": 1.2908325061552137,
847
+ "learning_rate": 9.858372190550533e-05,
848
+ "loss": 0.8711,
849
+ "num_input_tokens_seen": 5555704,
850
+ "step": 475
851
+ },
852
+ {
853
+ "epoch": 0.24723152201905743,
854
+ "grad_norm": 3.1989324866235744,
855
+ "learning_rate": 9.853293993829969e-05,
856
+ "loss": 0.885,
857
+ "num_input_tokens_seen": 5614160,
858
+ "step": 480
859
+ },
860
+ {
861
+ "epoch": 0.24980685037342262,
862
+ "grad_norm": 3.591366302378185,
863
+ "learning_rate": 9.848127706539039e-05,
864
+ "loss": 0.8615,
865
+ "num_input_tokens_seen": 5672640,
866
+ "step": 485
867
+ },
868
+ {
869
+ "epoch": 0.2523821787277878,
870
+ "grad_norm": 2.053833335696007,
871
+ "learning_rate": 9.842873422449354e-05,
872
+ "loss": 0.9057,
873
+ "num_input_tokens_seen": 5731072,
874
+ "step": 490
875
+ },
876
+ {
877
+ "epoch": 0.254957507082153,
878
+ "grad_norm": 1.4501486574941083,
879
+ "learning_rate": 9.837531236929726e-05,
880
+ "loss": 0.8818,
881
+ "num_input_tokens_seen": 5789544,
882
+ "step": 495
883
+ },
884
+ {
885
+ "epoch": 0.25753283543651817,
886
+ "grad_norm": 2.1068404021122866,
887
+ "learning_rate": 9.832101246944439e-05,
888
+ "loss": 0.8576,
889
+ "num_input_tokens_seen": 5848048,
890
+ "step": 500
891
+ },
892
+ {
893
+ "epoch": 0.25753283543651817,
894
+ "eval_loss": 0.8268976211547852,
895
+ "eval_runtime": 19.6346,
896
+ "eval_samples_per_second": 3.056,
897
+ "eval_steps_per_second": 0.764,
898
+ "num_input_tokens_seen": 5848048,
899
+ "step": 500
900
  }
901
  ],
902
  "logging_steps": 5,
903
  "max_steps": 3882,
904
+ "num_input_tokens_seen": 5848048,
905
  "num_train_epochs": 2,
906
  "save_steps": 50,
907
  "stateful_callbacks": {
 
916
  "attributes": {}
917
  }
918
  },
919
+ "total_flos": 385817567035392.0,
920
  "train_batch_size": 1,
921
  "trial_name": null,
922
  "trial_params": null