ben81828 commited on
Commit
a0b137c
·
verified ·
1 Parent(s): c66e2f4

Training in progress, step 650, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:168cc62ce0f9bc823e9f05cfca486c4f8b12cfdb3adf1b70687137fa417f7b65
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e67d46cdc279c111f3d8b6cd0c68158fe4239ea365a1c2b31f1709a759de013a
3
  size 29034840
last-checkpoint/global_step650/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14d01bfba78120d96959b055843024bd09f10360c10a51370c7a04ad4ca16e2b
3
+ size 43429616
last-checkpoint/global_step650/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d43719e39bbb6e731c70986b000f026aaef64ad5e576c03421efc74a249aef17
3
+ size 43429616
last-checkpoint/global_step650/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e58ddb882ce83931caddad7c8eb62c51d3a029a7669ac2a1fa9911be812fc8f
3
+ size 43429616
last-checkpoint/global_step650/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e20c6a185c2c8c974c38b0c9335e9f3a0acb549e7128f771e2b77df5c5c0896e
3
+ size 43429616
last-checkpoint/global_step650/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f9450a0ced61d6c33a66cdf1b2511022f0ec24416b17e1f16ed49dc98ff62dd
3
+ size 637299
last-checkpoint/global_step650/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d01b9093c4ff061c6453535b14beda5a9d96a531472a58effac685152d63de95
3
+ size 637171
last-checkpoint/global_step650/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a3bfb0ef98b44969e473f6d5c11a92b5a607c48dc2a278b5d72591611de4079
3
+ size 637171
last-checkpoint/global_step650/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a41c42f45b9260afbaae285e2a90de1413db2108ea931952bc69942ea6f19e1e
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step600
 
1
+ global_step650
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a81e3916b1392c4c49afb171dee5415c15f5a5a5af8749b28195fcfa0596699c
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8044e4c53158c210a17648ba8f2dc2d25a25bbfc55f686015542618eb652a33e
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a781038dd714b87b8adb1aac8dbc8217ceb607428a992133954ad522365236e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cd85d7fa425e7888c973f1c2985ac15ca21b5e6171fe140a401c2bc75ca46ff
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9446c3db15f382a5546f13622787fc99392a5e0bc8a9ca2da1838de7ab621a37
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7915667371a58f1598639e0d1c20a0c59c783c14580cd040a6631eb4ea2311e
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1f11e7a6b3faa884fc23044e3772ff9dd72c257f02e121665061e2a03d518bd9
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35dd78929ad7f0fbf37fdb1284e8edf0424350f6e6ce1cd5a3ee78979af3d3cb
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c97a1e2f2542883d462e18c679fb75515cd51cbf96416fbbbdc7ed7d003e43a9
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97610cd95fe06fdca35f3af5c085e6621fc4c4d7c3ecef7fbe6456b650501b2b
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.787663459777832,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-600",
4
- "epoch": 0.3090394025238218,
5
  "eval_steps": 50,
6
- "global_step": 600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1075,11 +1075,100 @@
1075
  "eval_steps_per_second": 0.765,
1076
  "num_input_tokens_seen": 7017576,
1077
  "step": 600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1078
  }
1079
  ],
1080
  "logging_steps": 5,
1081
  "max_steps": 3882,
1082
- "num_input_tokens_seen": 7017576,
1083
  "num_train_epochs": 2,
1084
  "save_steps": 50,
1085
  "stateful_callbacks": {
@@ -1094,7 +1183,7 @@
1094
  "attributes": {}
1095
  }
1096
  },
1097
- "total_flos": 462990180876288.0,
1098
  "train_batch_size": 1,
1099
  "trial_name": null,
1100
  "trial_params": null
 
1
  {
2
  "best_metric": 0.787663459777832,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-600",
4
+ "epoch": 0.3347926860674736,
5
  "eval_steps": 50,
6
+ "global_step": 650,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1075
  "eval_steps_per_second": 0.765,
1076
  "num_input_tokens_seen": 7017576,
1077
  "step": 600
1078
+ },
1079
+ {
1080
+ "epoch": 0.311614730878187,
1081
+ "grad_norm": 6.69798043480266,
1082
+ "learning_rate": 9.697977204284973e-05,
1083
+ "loss": 0.8925,
1084
+ "num_input_tokens_seen": 7076032,
1085
+ "step": 605
1086
+ },
1087
+ {
1088
+ "epoch": 0.31419005923255217,
1089
+ "grad_norm": 5.067921055882507,
1090
+ "learning_rate": 9.690643253456297e-05,
1091
+ "loss": 0.8159,
1092
+ "num_input_tokens_seen": 7134536,
1093
+ "step": 610
1094
+ },
1095
+ {
1096
+ "epoch": 0.31676538758691736,
1097
+ "grad_norm": 7.400939684061883,
1098
+ "learning_rate": 9.683224164278264e-05,
1099
+ "loss": 0.826,
1100
+ "num_input_tokens_seen": 7193032,
1101
+ "step": 615
1102
+ },
1103
+ {
1104
+ "epoch": 0.3193407159412825,
1105
+ "grad_norm": 5.898525799199162,
1106
+ "learning_rate": 9.675720071412365e-05,
1107
+ "loss": 0.8187,
1108
+ "num_input_tokens_seen": 7251568,
1109
+ "step": 620
1110
+ },
1111
+ {
1112
+ "epoch": 0.3219160442956477,
1113
+ "grad_norm": 11.267105316774332,
1114
+ "learning_rate": 9.66813111106296e-05,
1115
+ "loss": 0.8524,
1116
+ "num_input_tokens_seen": 7310072,
1117
+ "step": 625
1118
+ },
1119
+ {
1120
+ "epoch": 0.3244913726500129,
1121
+ "grad_norm": 6.703970582399643,
1122
+ "learning_rate": 9.660457420974819e-05,
1123
+ "loss": 0.7966,
1124
+ "num_input_tokens_seen": 7368560,
1125
+ "step": 630
1126
+ },
1127
+ {
1128
+ "epoch": 0.32706670100437807,
1129
+ "grad_norm": 6.945445265294353,
1130
+ "learning_rate": 9.652699140430608e-05,
1131
+ "loss": 0.799,
1132
+ "num_input_tokens_seen": 7427040,
1133
+ "step": 635
1134
+ },
1135
+ {
1136
+ "epoch": 0.32964202935874326,
1137
+ "grad_norm": 7.0684293091171595,
1138
+ "learning_rate": 9.644856410248369e-05,
1139
+ "loss": 0.8477,
1140
+ "num_input_tokens_seen": 7485552,
1141
+ "step": 640
1142
+ },
1143
+ {
1144
+ "epoch": 0.3322173577131084,
1145
+ "grad_norm": 7.165086711244158,
1146
+ "learning_rate": 9.636929372778963e-05,
1147
+ "loss": 0.7867,
1148
+ "num_input_tokens_seen": 7544040,
1149
+ "step": 645
1150
+ },
1151
+ {
1152
+ "epoch": 0.3347926860674736,
1153
+ "grad_norm": 9.185933515393563,
1154
+ "learning_rate": 9.628918171903485e-05,
1155
+ "loss": 0.8367,
1156
+ "num_input_tokens_seen": 7602512,
1157
+ "step": 650
1158
+ },
1159
+ {
1160
+ "epoch": 0.3347926860674736,
1161
+ "eval_loss": 0.7940558791160583,
1162
+ "eval_runtime": 19.7641,
1163
+ "eval_samples_per_second": 3.036,
1164
+ "eval_steps_per_second": 0.759,
1165
+ "num_input_tokens_seen": 7602512,
1166
+ "step": 650
1167
  }
1168
  ],
1169
  "logging_steps": 5,
1170
  "max_steps": 3882,
1171
+ "num_input_tokens_seen": 7602512,
1172
  "num_train_epochs": 2,
1173
  "save_steps": 50,
1174
  "stateful_callbacks": {
 
1183
  "attributes": {}
1184
  }
1185
  },
1186
+ "total_flos": 501587549356032.0,
1187
  "train_batch_size": 1,
1188
  "trial_name": null,
1189
  "trial_params": null