masatochi commited on
Commit
4aff1b6
·
verified ·
1 Parent(s): 3c3f6a1

Training in progress, step 90, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d8a32db2828cd0be16ca1dc3270a14ecfcccac3e23ecb40538485602e53ee626
3
  size 59827904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46a287a3794fcb8dbbceecf2b88cc096624efe5b27177b5a1abef1ee12514a57
3
  size 59827904
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:67ffd950c52cfbd9f40af5c0a5c4c61abf9542c299135c6338adc11abc2b1dfe
3
  size 30875540
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f91efb653a8a76c016afa4c81f46c30f802355e458aa26c84d7be92cc93de2f2
3
  size 30875540
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7168b0d6c665cd748268e687051b0b7d92835f5a5784945e8c1488a08a50658e
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d7e9b37fba6634a583fc8b1bf58a4648f316317630483e214e684618a5aa0cb
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8d9707e13ab424365dde92daf30c033711d2caa0fada77309f65d41532581807
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfdc0543e9ce40f0d0b0ee9752d10d130598c759cc5a2bd973736f6096894d17
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.041567332966562745,
5
  "eval_steps": 34,
6
- "global_step": 85,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -626,6 +626,41 @@
626
  "learning_rate": 0.0001526432162877356,
627
  "loss": 8.1738,
628
  "step": 85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
629
  }
630
  ],
631
  "logging_steps": 1,
@@ -645,7 +680,7 @@
645
  "attributes": {}
646
  }
647
  },
648
- "total_flos": 1.8738077245636608e+17,
649
  "train_batch_size": 3,
650
  "trial_name": null,
651
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.04401247019988997,
5
  "eval_steps": 34,
6
+ "global_step": 90,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
626
  "learning_rate": 0.0001526432162877356,
627
  "loss": 8.1738,
628
  "step": 85
629
+ },
630
+ {
631
+ "epoch": 0.042056360413228196,
632
+ "grad_norm": Infinity,
633
+ "learning_rate": 0.0001510631193180907,
634
+ "loss": 9.8915,
635
+ "step": 86
636
+ },
637
+ {
638
+ "epoch": 0.04254538785989364,
639
+ "grad_norm": 2.3853981730414264e+18,
640
+ "learning_rate": 0.0001494655843399779,
641
+ "loss": 8.3371,
642
+ "step": 87
643
+ },
644
+ {
645
+ "epoch": 0.04303441530655908,
646
+ "grad_norm": Infinity,
647
+ "learning_rate": 0.00014785115691012864,
648
+ "loss": 8.893,
649
+ "step": 88
650
+ },
651
+ {
652
+ "epoch": 0.04352344275322453,
653
+ "grad_norm": Infinity,
654
+ "learning_rate": 0.00014622038835403133,
655
+ "loss": 8.3023,
656
+ "step": 89
657
+ },
658
+ {
659
+ "epoch": 0.04401247019988997,
660
+ "grad_norm": Infinity,
661
+ "learning_rate": 0.00014457383557765386,
662
+ "loss": 8.9136,
663
+ "step": 90
664
  }
665
  ],
666
  "logging_steps": 1,
 
680
  "attributes": {}
681
  }
682
  },
683
+ "total_flos": 1.9840317083615232e+17,
684
  "train_batch_size": 3,
685
  "trial_name": null,
686
  "trial_params": null