masatochi commited on
Commit
400e015
·
verified ·
1 Parent(s): 6e7cbb1

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5206b64b2dbfead9aed5d4ffe61b0aefde2dd764fe08312607cb2b94fb8d7932
3
  size 59827904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2804ca5b5892a76645bfd34d9388ae43f9efcfd8d9f15335cdc723c5a0fed53b
3
  size 59827904
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9a26a62fe8b53bf9461820fee5d4f51ec4dd3d81967cb4a1732f165d27c70131
3
  size 30875540
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b279f97477546bd8157a8dce653efa9e4e2aac00a9e7a0101b5793489729d25
3
  size 30875540
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:14a47bb6c7de5708f6f2b75126905ee2455abe93376f11950e796f2aa144c3bf
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94a9b6b38714cd4775330d5c8485f5a42057200cf2746b572faa0571796877a3
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3160dd3097641f3bf4d4036c0ddfd8673184925120f088b5ebecc6a1e5c953dd
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07b8a5b9aaab52c529e8225a8efdb5ee4b1c103decfe1101828eac45305d759c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.04645760743321719,
5
  "eval_steps": 34,
6
- "global_step": 95,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -696,6 +696,41 @@
696
  "learning_rate": 0.00013612416661871533,
697
  "loss": 9.3274,
698
  "step": 95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
699
  }
700
  ],
701
  "logging_steps": 1,
@@ -715,7 +750,7 @@
715
  "attributes": {}
716
  }
717
  },
718
- "total_flos": 2.0942556921593856e+17,
719
  "train_batch_size": 3,
720
  "trial_name": null,
721
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.04890274466654441,
5
  "eval_steps": 34,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
696
  "learning_rate": 0.00013612416661871533,
697
  "loss": 9.3274,
698
  "step": 95
699
+ },
700
+ {
701
+ "epoch": 0.04694663487988263,
702
+ "grad_norm": Infinity,
703
+ "learning_rate": 0.00013439489186339282,
704
+ "loss": 8.6449,
705
+ "step": 96
706
+ },
707
+ {
708
+ "epoch": 0.04743566232654808,
709
+ "grad_norm": Infinity,
710
+ "learning_rate": 0.0001326538712840083,
711
+ "loss": 8.9065,
712
+ "step": 97
713
+ },
714
+ {
715
+ "epoch": 0.04792468977321352,
716
+ "grad_norm": Infinity,
717
+ "learning_rate": 0.00013090169943749476,
718
+ "loss": 8.659,
719
+ "step": 98
720
+ },
721
+ {
722
+ "epoch": 0.048413717219878964,
723
+ "grad_norm": Infinity,
724
+ "learning_rate": 0.00012913897468893248,
725
+ "loss": 8.259,
726
+ "step": 99
727
+ },
728
+ {
729
+ "epoch": 0.04890274466654441,
730
+ "grad_norm": 1.7481951207638434e+19,
731
+ "learning_rate": 0.0001273662990072083,
732
+ "loss": 8.2901,
733
+ "step": 100
734
  }
735
  ],
736
  "logging_steps": 1,
 
750
  "attributes": {}
751
  }
752
  },
753
+ "total_flos": 2.204479675957248e+17,
754
  "train_batch_size": 3,
755
  "trial_name": null,
756
  "trial_params": null