masatochi commited on
Commit
271ca2d
·
verified ·
1 Parent(s): 8d5da89

Training in progress, step 165, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5150c2196969043314f566861c4849a7f9365e1cc6a686a4cdb2edb6fa4eddb5
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1f826ff0deb5ca760bab1688322cba096707f0f6506ea0db49e64851b1b6e1b
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:df1a35499a98ce19e19615db2fbac7d72d4a494f93b1298df13d80ee2cf6aec3
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:304ef3fb9b06f4e634b17acfe8df2622465f353a24e78010c8336da55dc22a2d
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3d72c4d52a605fc99c729c21c0deb735993f9bc8ecba075dedb1f93e3a6ad9c7
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6728ffdf9bf59bf13703e6ecc746855ce34369dd62dc3ca69a894e5a32ceaccd
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fda70a7e2255bb2a6bed4064da315669732f8ee1496404c42573741f484eb69b
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9d255475980828d00dffcac4a69741da74c7e5ef4f645ab21d69e57306e3317
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.07824439146647105,
5
  "eval_steps": 34,
6
- "global_step": 160,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1167,6 +1167,41 @@
1167
  "learning_rate": 2.6099108277934103e-05,
1168
  "loss": 0.9811,
1169
  "step": 160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1170
  }
1171
  ],
1172
  "logging_steps": 1,
@@ -1186,7 +1221,7 @@
1186
  "attributes": {}
1187
  }
1188
  },
1189
- "total_flos": 7.102326712880333e+17,
1190
  "train_batch_size": 3,
1191
  "trial_name": null,
1192
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.08068952869979827,
5
  "eval_steps": 34,
6
+ "global_step": 165,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1167
  "learning_rate": 2.6099108277934103e-05,
1168
  "loss": 0.9811,
1169
  "step": 160
1170
+ },
1171
+ {
1172
+ "epoch": 0.0787334189131365,
1173
+ "grad_norm": 0.36206722259521484,
1174
+ "learning_rate": 2.4866811044312665e-05,
1175
+ "loss": 1.1474,
1176
+ "step": 161
1177
+ },
1178
+ {
1179
+ "epoch": 0.07922244635980194,
1180
+ "grad_norm": 0.38807740807533264,
1181
+ "learning_rate": 2.36601717258897e-05,
1182
+ "loss": 0.9162,
1183
+ "step": 162
1184
+ },
1185
+ {
1186
+ "epoch": 0.07971147380646738,
1187
+ "grad_norm": 0.36179038882255554,
1188
+ "learning_rate": 2.2479602388887012e-05,
1189
+ "loss": 0.8902,
1190
+ "step": 163
1191
+ },
1192
+ {
1193
+ "epoch": 0.08020050125313283,
1194
+ "grad_norm": 0.40321770310401917,
1195
+ "learning_rate": 2.132550619665168e-05,
1196
+ "loss": 0.8454,
1197
+ "step": 164
1198
+ },
1199
+ {
1200
+ "epoch": 0.08068952869979827,
1201
+ "grad_norm": 0.3098496198654175,
1202
+ "learning_rate": 2.0198277271976052e-05,
1203
+ "loss": 1.0062,
1204
+ "step": 165
1205
  }
1206
  ],
1207
  "logging_steps": 1,
 
1221
  "attributes": {}
1222
  }
1223
  },
1224
+ "total_flos": 7.324274422657843e+17,
1225
  "train_batch_size": 3,
1226
  "trial_name": null,
1227
  "trial_params": null