masatochi commited on
Commit
1832b05
·
verified ·
1 Parent(s): 2274625

Training in progress, step 35, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8793f46feb5de8084edfe2d09eb7051d2e0cf0226d4c35f19c7f3b706f020162
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fcf40be826f55b07270198e0a6ead902a546dedd458e282c2ffd682c7f06eaa
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:81abc551906a3c2b0ee0bcf4e2f4b1fae525647218995a2d644aa121904655c5
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4eceac72edacc47b86aa1b6436a39c3f9c719c923b943c1c6d4b1f803e72ffbd
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:42700a3931f169d1a7cb24ca5c5bfdf8c30401d4efdc7d55be9e5bed753e25b1
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd3b0103484cbe9033aa207b10c52e72e6e07124b9b9355e171cb765f0460e37
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cedaf8893734b19717a3bbbc716629d55965a18bdde504cf46d9182fcb60eb14
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:666e3907648b0318630be4f0e9270d7d902986930986134493537abd333e959c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.014670823399963322,
5
  "eval_steps": 34,
6
- "global_step": 30,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -225,6 +225,49 @@
225
  "learning_rate": 0.0002,
226
  "loss": 1.0082,
227
  "step": 30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228
  }
229
  ],
230
  "logging_steps": 1,
@@ -244,7 +287,7 @@
244
  "attributes": {}
245
  }
246
  },
247
- "total_flos": 1.3316862586650624e+17,
248
  "train_batch_size": 3,
249
  "trial_name": null,
250
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.017115960633290545,
5
  "eval_steps": 34,
6
+ "global_step": 35,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
225
  "learning_rate": 0.0002,
226
  "loss": 1.0082,
227
  "step": 30
228
+ },
229
+ {
230
+ "epoch": 0.015159850846628768,
231
+ "grad_norm": 0.47625860571861267,
232
+ "learning_rate": 0.00019998292504580528,
233
+ "loss": 0.9419,
234
+ "step": 31
235
+ },
236
+ {
237
+ "epoch": 0.01564887829329421,
238
+ "grad_norm": 0.5290003418922424,
239
+ "learning_rate": 0.0001999317060143023,
240
+ "loss": 1.1635,
241
+ "step": 32
242
+ },
243
+ {
244
+ "epoch": 0.016137905739959654,
245
+ "grad_norm": 0.5592769980430603,
246
+ "learning_rate": 0.0001998463603967434,
247
+ "loss": 0.9437,
248
+ "step": 33
249
+ },
250
+ {
251
+ "epoch": 0.0166269331866251,
252
+ "grad_norm": 0.3583241403102875,
253
+ "learning_rate": 0.00019972691733857883,
254
+ "loss": 1.1428,
255
+ "step": 34
256
+ },
257
+ {
258
+ "epoch": 0.0166269331866251,
259
+ "eval_loss": 1.0231887102127075,
260
+ "eval_runtime": 1359.3297,
261
+ "eval_samples_per_second": 1.9,
262
+ "eval_steps_per_second": 0.633,
263
+ "step": 34
264
+ },
265
+ {
266
+ "epoch": 0.017115960633290545,
267
+ "grad_norm": 0.41851457953453064,
268
+ "learning_rate": 0.00019957341762950344,
269
+ "loss": 1.0976,
270
+ "step": 35
271
  }
272
  ],
273
  "logging_steps": 1,
 
287
  "attributes": {}
288
  }
289
  },
290
+ "total_flos": 1.5536339684425728e+17,
291
  "train_batch_size": 3,
292
  "trial_name": null,
293
  "trial_params": null