ben81828 commited on
Commit
0143fcc
·
verified ·
1 Parent(s): 53df35b

Training in progress, step 2150, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a111e8798c83c643eeba02e2a149e66e3437bb97bb843d76a35308962c3f1d78
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2034eebd508e7ac8ecb0a317db22f0ed34690e812dc78be4e22f97f074447d0f
3
  size 29034840
last-checkpoint/global_step2149/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76a834493c770996dd31bdf289d8d416720d9a0c1163a4a28ce363bfb9f56125
3
+ size 43429616
last-checkpoint/global_step2149/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66ae8328db5cbe4350cadbe9b03e5f94da9266623aaad8962b9c3013a29fa6c9
3
+ size 43429616
last-checkpoint/global_step2149/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67ffae581e43860255f53fc49344f4c9abc8aee6837d3d4073ea36f38bd660de
3
+ size 43429616
last-checkpoint/global_step2149/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb9994cac84231dddba4ebe94b420877e83b181dd1edf79cf1d2a5be45661faa
3
+ size 43429616
last-checkpoint/global_step2149/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f000a8026012dca201de2d8edd792d3f11c18221cb0b0ca2bd6c4102d3442db7
3
+ size 637299
last-checkpoint/global_step2149/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93290cbef16db999968f1763b76a46df6e094b05b64c8ac94f7e82559e0e8bcf
3
+ size 637171
last-checkpoint/global_step2149/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf708fc6c44da8bc3d14d7da12c3598394376bcf4601fa8e8035ef28bbd430db
3
+ size 637171
last-checkpoint/global_step2149/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea2fbbc7294531a75063dddb972957a050968b045541575f6b4e3e81281fc0fb
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step2099
 
1
+ global_step2149
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b105708e2c99c8661b46698b8ccc5799ac83c1f0fc6a30c2d41c9fbfb349d480
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c2f72d01585273766959f0cc9805fab753b53f20e581399855a293176ace988
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bc44cd2015d8c8fc2f109f07c797876873a52f478c57b0350b8a2cf5dcb17f25
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fd1ecda2bb159be37a2a23800e098324f5b0334e7189df47c343ca6cb7605a2
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9843dec201b5a542ebd69abfc596f99ad5a000cf81dab52c6a2c52a5b9224ea7
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf71c84ea2995fbc545b918d03f7f94c92293ca2e33343f177e6fd04531b7b19
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2296f0efda653dd4c7e861f5a867baa09d6d8bb50e57bc69af930268b40de9ef
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72c53116f0f4c80841c24cd681d5fbd5a5992b259583a4cfb493f8f3e4544d82
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7e06f479f50d6b90756aab0e5f1d2b8944be9014d46f9719fade9db1f0995f9b
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a5fa7220e971f22064d8e3cc972d3be539d9ac37e650399fd40813d74479992
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.2472737729549408,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-1950",
4
- "epoch": 1.0813803759979397,
5
  "eval_steps": 50,
6
- "global_step": 2100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -3745,11 +3745,100 @@
3745
  "eval_steps_per_second": 0.774,
3746
  "num_input_tokens_seen": 24555904,
3747
  "step": 2100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3748
  }
3749
  ],
3750
  "logging_steps": 5,
3751
  "max_steps": 3882,
3752
- "num_input_tokens_seen": 24555904,
3753
  "num_train_epochs": 2,
3754
  "save_steps": 50,
3755
  "stateful_callbacks": {
@@ -3764,7 +3853,7 @@
3764
  "attributes": {}
3765
  }
3766
  },
3767
- "total_flos": 1620301350436864.0,
3768
  "train_batch_size": 1,
3769
  "trial_name": null,
3770
  "trial_params": null
 
1
  {
2
  "best_metric": 0.2472737729549408,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-1950",
4
+ "epoch": 1.1071336595415915,
5
  "eval_steps": 50,
6
+ "global_step": 2150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
3745
  "eval_steps_per_second": 0.774,
3746
  "num_input_tokens_seen": 24555904,
3747
  "step": 2100
3748
+ },
3749
+ {
3750
+ "epoch": 1.083955704352305,
3751
+ "grad_norm": 10.785937986790138,
3752
+ "learning_rate": 4.716837353150695e-05,
3753
+ "loss": 0.2942,
3754
+ "num_input_tokens_seen": 24614392,
3755
+ "step": 2105
3756
+ },
3757
+ {
3758
+ "epoch": 1.0865310327066702,
3759
+ "grad_norm": 7.201431051550072,
3760
+ "learning_rate": 4.695572353154438e-05,
3761
+ "loss": 0.2357,
3762
+ "num_input_tokens_seen": 24672848,
3763
+ "step": 2110
3764
+ },
3765
+ {
3766
+ "epoch": 1.0891063610610352,
3767
+ "grad_norm": 10.470411807057468,
3768
+ "learning_rate": 4.674312878725985e-05,
3769
+ "loss": 0.2476,
3770
+ "num_input_tokens_seen": 24731344,
3771
+ "step": 2115
3772
+ },
3773
+ {
3774
+ "epoch": 1.0916816894154004,
3775
+ "grad_norm": 10.029698195020993,
3776
+ "learning_rate": 4.653059315739188e-05,
3777
+ "loss": 0.3875,
3778
+ "num_input_tokens_seen": 24789832,
3779
+ "step": 2120
3780
+ },
3781
+ {
3782
+ "epoch": 1.0942570177697657,
3783
+ "grad_norm": 5.623217865601237,
3784
+ "learning_rate": 4.631812049960595e-05,
3785
+ "loss": 0.3313,
3786
+ "num_input_tokens_seen": 24848264,
3787
+ "step": 2125
3788
+ },
3789
+ {
3790
+ "epoch": 1.0968323461241307,
3791
+ "grad_norm": 4.705669392169022,
3792
+ "learning_rate": 4.61057146704246e-05,
3793
+ "loss": 0.2385,
3794
+ "num_input_tokens_seen": 24906744,
3795
+ "step": 2130
3796
+ },
3797
+ {
3798
+ "epoch": 1.099407674478496,
3799
+ "grad_norm": 9.334998248379781,
3800
+ "learning_rate": 4.589337952515736e-05,
3801
+ "loss": 0.2478,
3802
+ "num_input_tokens_seen": 24965256,
3803
+ "step": 2135
3804
+ },
3805
+ {
3806
+ "epoch": 1.1019830028328612,
3807
+ "grad_norm": 8.45475259780102,
3808
+ "learning_rate": 4.5681118917830835e-05,
3809
+ "loss": 0.214,
3810
+ "num_input_tokens_seen": 25023768,
3811
+ "step": 2140
3812
+ },
3813
+ {
3814
+ "epoch": 1.1045583311872265,
3815
+ "grad_norm": 23.234169875193494,
3816
+ "learning_rate": 4.546893670111866e-05,
3817
+ "loss": 0.2806,
3818
+ "num_input_tokens_seen": 25082248,
3819
+ "step": 2145
3820
+ },
3821
+ {
3822
+ "epoch": 1.1071336595415915,
3823
+ "grad_norm": 7.852580632471935,
3824
+ "learning_rate": 4.525683672627168e-05,
3825
+ "loss": 0.2536,
3826
+ "num_input_tokens_seen": 25140752,
3827
+ "step": 2150
3828
+ },
3829
+ {
3830
+ "epoch": 1.1071336595415915,
3831
+ "eval_loss": 0.30806976556777954,
3832
+ "eval_runtime": 19.3086,
3833
+ "eval_samples_per_second": 3.107,
3834
+ "eval_steps_per_second": 0.777,
3835
+ "num_input_tokens_seen": 25140752,
3836
+ "step": 2150
3837
  }
3838
  ],
3839
  "logging_steps": 5,
3840
  "max_steps": 3882,
3841
+ "num_input_tokens_seen": 25140752,
3842
  "num_train_epochs": 2,
3843
  "save_steps": 50,
3844
  "stateful_callbacks": {
 
3853
  "attributes": {}
3854
  }
3855
  },
3856
+ "total_flos": 1658893279166464.0,
3857
  "train_batch_size": 1,
3858
  "trial_name": null,
3859
  "trial_params": null