masatochi commited on
Commit
f090635
·
verified ·
1 Parent(s): 616e886

Training in progress, step 60, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e0b256324c77b74ce7cf35a934a1dea4cf2f1c4e901640287d88f57a3f16226
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6182a1bf66b3e9471386ce49f5b81916a920ee2586faf2b33d43c13d5947f5c
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54788bd57376a0ecc9872c34fed24cc6327e9699d90ddaa7ba48b2e34adcbf9a
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d786bfb07515005a60cacddfa1a37bcf2424ba648fb856f7801dddeddf8e7fc7
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:501917edc59cb38fa8ba673663fd0069ec90987aba1d4647ea54c9fb6fe18e47
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e2701e6433cbd351cde5d73f1b3cbd9cefb4bc8bfbd165ddcbca91b8495404c
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:61487aeef4449a4dec50f3ae9ec76bca52908878863009a40746a21c237f51ad
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78248a64468e8e03af894427063f3f9a858b670b67d13949fb12f06211d294f4
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.026896509566599426,
5
  "eval_steps": 34,
6
- "global_step": 55,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -408,6 +408,41 @@
408
  "learning_rate": 0.00018951632913550626,
409
  "loss": 0.9054,
410
  "step": 55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
411
  }
412
  ],
413
  "logging_steps": 1,
@@ -427,7 +462,7 @@
427
  "attributes": {}
428
  }
429
  },
430
- "total_flos": 2.4414248075526144e+17,
431
  "train_batch_size": 3,
432
  "trial_name": null,
433
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.029341646799926645,
5
  "eval_steps": 34,
6
+ "global_step": 60,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
408
  "learning_rate": 0.00018951632913550626,
409
  "loss": 0.9054,
410
  "step": 55
411
+ },
412
+ {
413
+ "epoch": 0.02738553701326487,
414
+ "grad_norm": 0.3576945662498474,
415
+ "learning_rate": 0.0001886773685920062,
416
+ "loss": 1.0002,
417
+ "step": 56
418
+ },
419
+ {
420
+ "epoch": 0.027874564459930314,
421
+ "grad_norm": 0.2915222942829132,
422
+ "learning_rate": 0.0001878081248083698,
423
+ "loss": 0.9491,
424
+ "step": 57
425
+ },
426
+ {
427
+ "epoch": 0.028363591906595757,
428
+ "grad_norm": 0.296265572309494,
429
+ "learning_rate": 0.00018690889463055283,
430
+ "loss": 1.0483,
431
+ "step": 58
432
+ },
433
+ {
434
+ "epoch": 0.0288526193532612,
435
+ "grad_norm": 0.3489420413970947,
436
+ "learning_rate": 0.00018597998514483725,
437
+ "loss": 0.9064,
438
+ "step": 59
439
+ },
440
+ {
441
+ "epoch": 0.029341646799926645,
442
+ "grad_norm": 0.37772810459136963,
443
+ "learning_rate": 0.00018502171357296144,
444
+ "loss": 0.9779,
445
+ "step": 60
446
  }
447
  ],
448
  "logging_steps": 1,
 
462
  "attributes": {}
463
  }
464
  },
465
+ "total_flos": 2.6633725173301248e+17,
466
  "train_batch_size": 3,
467
  "trial_name": null,
468
  "trial_params": null