masatochi commited on
Commit
080fd27
·
verified ·
1 Parent(s): 2d1961e

Training in progress, step 85, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f13ba07bc586558fb64f1b66825209f18db56ce572477ab8155107c3ff3f0a88
3
  size 59827904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f44a8929830688bb4f6cf3bb4f71e56ae1288a232f4677e3d86095a957322a04
3
  size 59827904
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:030da15cbb81bbe486c293ede63079918942591b2c0aab7e2ba0cf09fe49d1a5
3
  size 30875540
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8f27453575437da6f87b9c37a6ab4b7dd09e5107bfc2aaa57336c7611e95b7c
3
  size 30875540
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:646ade6023f73d01b10ebe8ac45df7f64238b06f8264b4a748bbde983b0bdd8f
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7168b0d6c665cd748268e687051b0b7d92835f5a5784945e8c1488a08a50658e
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4bc75fc1c14b28d29d31fa9d4252536c919fc25a390fac3a1e8c09d6575b4029
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d9707e13ab424365dde92daf30c033711d2caa0fada77309f65d41532581807
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.039122195733235526,
5
  "eval_steps": 34,
6
- "global_step": 80,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -591,6 +591,41 @@
591
  "learning_rate": 0.00016026346363792567,
592
  "loss": 9.2195,
593
  "step": 80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
594
  }
595
  ],
596
  "logging_steps": 1,
@@ -610,7 +645,7 @@
610
  "attributes": {}
611
  }
612
  },
613
- "total_flos": 1.7635837407657984e+17,
614
  "train_batch_size": 3,
615
  "trial_name": null,
616
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.041567332966562745,
5
  "eval_steps": 34,
6
+ "global_step": 85,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
591
  "learning_rate": 0.00016026346363792567,
592
  "loss": 9.2195,
593
  "step": 80
594
+ },
595
+ {
596
+ "epoch": 0.03961122317990097,
597
+ "grad_norm": Infinity,
598
+ "learning_rate": 0.00015877852522924732,
599
+ "loss": 9.7124,
600
+ "step": 81
601
+ },
602
+ {
603
+ "epoch": 0.040100250626566414,
604
+ "grad_norm": Infinity,
605
+ "learning_rate": 0.00015727351400805052,
606
+ "loss": 9.2742,
607
+ "step": 82
608
+ },
609
+ {
610
+ "epoch": 0.04058927807323186,
611
+ "grad_norm": Infinity,
612
+ "learning_rate": 0.00015574894393428855,
613
+ "loss": 11.2145,
614
+ "step": 83
615
+ },
616
+ {
617
+ "epoch": 0.0410783055198973,
618
+ "grad_norm": Infinity,
619
+ "learning_rate": 0.00015420533564724495,
620
+ "loss": 8.6546,
621
+ "step": 84
622
+ },
623
+ {
624
+ "epoch": 0.041567332966562745,
625
+ "grad_norm": Infinity,
626
+ "learning_rate": 0.0001526432162877356,
627
+ "loss": 8.4331,
628
+ "step": 85
629
  }
630
  ],
631
  "logging_steps": 1,
 
645
  "attributes": {}
646
  }
647
  },
648
+ "total_flos": 1.8738077245636608e+17,
649
  "train_batch_size": 3,
650
  "trial_name": null,
651
  "trial_params": null