ben81828 commited on
Commit
4e10492
·
verified ·
1 Parent(s): 4755249

Training in progress, step 2900, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:73c7f2377d2df91cf17de2733b3a5bbe82d66442455824ef9aa239771fcb5c26
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6efdabb869140223f58ba89adb95c0f30e574545e43fb41d7bac2b2f9150b3f4
3
  size 29034840
last-checkpoint/global_step2899/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe584b25009f277bbd1ff8bd68498986caeb876dde90db7fb191b1828a4b09d4
3
+ size 43429616
last-checkpoint/global_step2899/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90b37aad3ae90c9555a3255e733ac7286f6ed53445b5a691924da68415107bc1
3
+ size 43429616
last-checkpoint/global_step2899/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05f1397e484268e9c78c25d048eb927a0958f13b099e7e38cb6464cdf8260894
3
+ size 43429616
last-checkpoint/global_step2899/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6a4704ace1d93a396e94eb31be253766aae3bc1786ff5cc58e88bbab5d5aae8
3
+ size 43429616
last-checkpoint/global_step2899/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b07ba6cef623fc319725e939f863369b1349e53b309afcc6f961d7d3ec8bab6a
3
+ size 637299
last-checkpoint/global_step2899/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:561aced656a1a87db26d9d074566496b417f0e1a340fe32041d82d5bb86ca954
3
+ size 637171
last-checkpoint/global_step2899/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b47d8d65772e4ad33bcd2aac19666fd9bd00c11fffef415c34820da3d7491dfb
3
+ size 637171
last-checkpoint/global_step2899/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff1814429e9f6159d228ad9418a3daad31d09f23fd7299fd0145ec2f5640a696
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step2849
 
1
+ global_step2899
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ce92cea831a04716b4b472f1dad1cc986b2021dee9aac057217f5d455b27ec42
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfc5d4e344535f1dd0ff5275633ec3d55bb6249e432442365ff24445d82ec35c
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3cddb73bbdf0f6f6a2c3182d70f7ad5d587353b164c08dd4f383b940d6b61e4e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a898928042c09dc123c1025557279997043b7f607bc91ee2ff2d4b4d2b9f1ba
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b24b508e466beb446d37377d2a04757d3bc2b4230de3ac56b25a65d7753a74c1
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6923d07d979aa78d66765208f598662fd5092b5227cd87920feedfb923fa375f
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b4c6a18a7de8b25b21673ba2ff7efbaaae00ec8c453c7975b467c1df87b87022
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9ba35b9b3c512fbe857d909557329ba47dbefe5f521014123c05901c32edb6d
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:06c7a8724990bbfa42e474ca2bea837a85b83fc6dd9afb66285c6f4108456bdd
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ab87135f8169af50b16a6f8b1198988096b14bc0e5836b645aff14a930bb3f5
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.18780523538589478,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-2650",
4
- "epoch": 1.467679629152717,
5
  "eval_steps": 50,
6
- "global_step": 2850,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -5080,11 +5080,100 @@
5080
  "eval_steps_per_second": 0.765,
5081
  "num_input_tokens_seen": 33327720,
5082
  "step": 2850
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5083
  }
5084
  ],
5085
  "logging_steps": 5,
5086
  "max_steps": 3882,
5087
- "num_input_tokens_seen": 33327720,
5088
  "num_train_epochs": 2,
5089
  "save_steps": 50,
5090
  "stateful_callbacks": {
@@ -5099,7 +5188,7 @@
5099
  "attributes": {}
5100
  }
5101
  },
5102
- "total_flos": 2199114391224320.0,
5103
  "train_batch_size": 1,
5104
  "trial_name": null,
5105
  "trial_params": null
 
1
  {
2
  "best_metric": 0.18780523538589478,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-2650",
4
+ "epoch": 1.4934329126963688,
5
  "eval_steps": 50,
6
+ "global_step": 2900,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
5080
  "eval_steps_per_second": 0.765,
5081
  "num_input_tokens_seen": 33327720,
5082
  "step": 2850
5083
+ },
5084
+ {
5085
+ "epoch": 1.4702549575070822,
5086
+ "grad_norm": 8.379574283067335,
5087
+ "learning_rate": 1.795318255808956e-05,
5088
+ "loss": 0.2166,
5089
+ "num_input_tokens_seen": 33386160,
5090
+ "step": 2855
5091
+ },
5092
+ {
5093
+ "epoch": 1.4728302858614473,
5094
+ "grad_norm": 9.376274649781424,
5095
+ "learning_rate": 1.7789962377094088e-05,
5096
+ "loss": 0.1843,
5097
+ "num_input_tokens_seen": 33444648,
5098
+ "step": 2860
5099
+ },
5100
+ {
5101
+ "epoch": 1.4754056142158125,
5102
+ "grad_norm": 2.5653090157172724,
5103
+ "learning_rate": 1.762732683007902e-05,
5104
+ "loss": 0.1988,
5105
+ "num_input_tokens_seen": 33503144,
5106
+ "step": 2865
5107
+ },
5108
+ {
5109
+ "epoch": 1.4779809425701778,
5110
+ "grad_norm": 12.205543974310553,
5111
+ "learning_rate": 1.746527886898962e-05,
5112
+ "loss": 0.1715,
5113
+ "num_input_tokens_seen": 33561600,
5114
+ "step": 2870
5115
+ },
5116
+ {
5117
+ "epoch": 1.4805562709245428,
5118
+ "grad_norm": 2.016900668078144,
5119
+ "learning_rate": 1.7303821435105998e-05,
5120
+ "loss": 0.1577,
5121
+ "num_input_tokens_seen": 33620056,
5122
+ "step": 2875
5123
+ },
5124
+ {
5125
+ "epoch": 1.483131599278908,
5126
+ "grad_norm": 7.53734329898392,
5127
+ "learning_rate": 1.714295745898989e-05,
5128
+ "loss": 0.2204,
5129
+ "num_input_tokens_seen": 33678512,
5130
+ "step": 2880
5131
+ },
5132
+ {
5133
+ "epoch": 1.4857069276332733,
5134
+ "grad_norm": 25.928242388695963,
5135
+ "learning_rate": 1.6982689860431283e-05,
5136
+ "loss": 0.2283,
5137
+ "num_input_tokens_seen": 33736960,
5138
+ "step": 2885
5139
+ },
5140
+ {
5141
+ "epoch": 1.4882822559876385,
5142
+ "grad_norm": 21.45493152674021,
5143
+ "learning_rate": 1.682302154839558e-05,
5144
+ "loss": 0.2632,
5145
+ "num_input_tokens_seen": 33795416,
5146
+ "step": 2890
5147
+ },
5148
+ {
5149
+ "epoch": 1.4908575843420036,
5150
+ "grad_norm": 8.396238950090229,
5151
+ "learning_rate": 1.6663955420970667e-05,
5152
+ "loss": 0.1844,
5153
+ "num_input_tokens_seen": 33853936,
5154
+ "step": 2895
5155
+ },
5156
+ {
5157
+ "epoch": 1.4934329126963688,
5158
+ "grad_norm": 4.710042357794161,
5159
+ "learning_rate": 1.650549436531442e-05,
5160
+ "loss": 0.2015,
5161
+ "num_input_tokens_seen": 33912440,
5162
+ "step": 2900
5163
+ },
5164
+ {
5165
+ "epoch": 1.4934329126963688,
5166
+ "eval_loss": 0.23923428356647491,
5167
+ "eval_runtime": 19.3819,
5168
+ "eval_samples_per_second": 3.096,
5169
+ "eval_steps_per_second": 0.774,
5170
+ "num_input_tokens_seen": 33912440,
5171
+ "step": 2900
5172
  }
5173
  ],
5174
  "logging_steps": 5,
5175
  "max_steps": 3882,
5176
+ "num_input_tokens_seen": 33912440,
5177
  "num_train_epochs": 2,
5178
  "save_steps": 50,
5179
  "stateful_callbacks": {
 
5188
  "attributes": {}
5189
  }
5190
  },
5191
+ "total_flos": 2237698608201728.0,
5192
  "train_batch_size": 1,
5193
  "trial_name": null,
5194
  "trial_params": null