masatochi commited on
Commit
012d4b2
·
verified ·
1 Parent(s): 3eb275a

Training in progress, step 90, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eb8f9c20ff848f97c9be76e903f682cab9928a9e7a4234368fffbc3ae82ae492
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cc30f1e357080bd583aaba94cad79247586b36f29d35647d75bd19ccc7e18b8
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d6b87fbb9d48467f85b9ed2b65af9f2b02a235337a09f6a5324590cf757d7513
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64885af957151ce4958b0e044f7ed2e3e471dca64d4388e096a94408b85ba248
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:826e666b3f1881e003df1e799293232a6d3ede1a55e213829eb507201e9190b8
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdf18d6697609a27a0b488fa23bfd024570519524cd30dfeaa261a878e17c189
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8d9707e13ab424365dde92daf30c033711d2caa0fada77309f65d41532581807
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfdc0543e9ce40f0d0b0ee9752d10d130598c759cc5a2bd973736f6096894d17
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.041567332966562745,
5
  "eval_steps": 34,
6
- "global_step": 85,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -626,6 +626,41 @@
626
  "learning_rate": 0.0001526432162877356,
627
  "loss": 1.0845,
628
  "step": 85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
629
  }
630
  ],
631
  "logging_steps": 1,
@@ -645,7 +680,7 @@
645
  "attributes": {}
646
  }
647
  },
648
- "total_flos": 3.773111066217677e+17,
649
  "train_batch_size": 3,
650
  "trial_name": null,
651
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.04401247019988997,
5
  "eval_steps": 34,
6
+ "global_step": 90,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
626
  "learning_rate": 0.0001526432162877356,
627
  "loss": 1.0845,
628
  "step": 85
629
+ },
630
+ {
631
+ "epoch": 0.042056360413228196,
632
+ "grad_norm": 0.2645609378814697,
633
+ "learning_rate": 0.0001510631193180907,
634
+ "loss": 0.8387,
635
+ "step": 86
636
+ },
637
+ {
638
+ "epoch": 0.04254538785989364,
639
+ "grad_norm": 0.2979009747505188,
640
+ "learning_rate": 0.0001494655843399779,
641
+ "loss": 1.0789,
642
+ "step": 87
643
+ },
644
+ {
645
+ "epoch": 0.04303441530655908,
646
+ "grad_norm": 0.3363690674304962,
647
+ "learning_rate": 0.00014785115691012864,
648
+ "loss": 0.9624,
649
+ "step": 88
650
+ },
651
+ {
652
+ "epoch": 0.04352344275322453,
653
+ "grad_norm": 0.700744092464447,
654
+ "learning_rate": 0.00014622038835403133,
655
+ "loss": 1.0248,
656
+ "step": 89
657
+ },
658
+ {
659
+ "epoch": 0.04401247019988997,
660
+ "grad_norm": 0.4093436300754547,
661
+ "learning_rate": 0.00014457383557765386,
662
+ "loss": 0.993,
663
+ "step": 90
664
  }
665
  ],
666
  "logging_steps": 1,
 
680
  "attributes": {}
681
  }
682
  },
683
+ "total_flos": 3.995058775995187e+17,
684
  "train_batch_size": 3,
685
  "trial_name": null,
686
  "trial_params": null