masatochi commited on
Commit
32dcd6d
·
verified ·
1 Parent(s): 7907f8d

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:75cbc872c534a49966396c255341d9ff2c917040c39e7f926f7533577d857cc2
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3aae5e22c885800d90b299eaaf44aee69e3a90e7d46275d0085e4ac6a673db3
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0679492656c17132b1f8d6498337064990d913e068a9db428d6dc866229a749a
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:358866885ea048cd89ba52e27e6ed69c858994b3c209c6e2637fb566d18d918b
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:710b3f208bedd5f0fd57c1f63cb3947d4bf432afdfc4b07fd7ad25fb3ae74fb4
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58fe55f37e15eaaf48a322b8fab1abce8bcd6c4961dc9be21f59761cc27ae094
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3160dd3097641f3bf4d4036c0ddfd8673184925120f088b5ebecc6a1e5c953dd
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07b8a5b9aaab52c529e8225a8efdb5ee4b1c103decfe1101828eac45305d759c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.04645760743321719,
5
  "eval_steps": 34,
6
- "global_step": 95,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -696,6 +696,41 @@
696
  "learning_rate": 0.00013612416661871533,
697
  "loss": 1.187,
698
  "step": 95
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
699
  }
700
  ],
701
  "logging_steps": 1,
@@ -715,7 +750,7 @@
715
  "attributes": {}
716
  }
717
  },
718
- "total_flos": 4.2170064857726976e+17,
719
  "train_batch_size": 3,
720
  "trial_name": null,
721
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.04890274466654441,
5
  "eval_steps": 34,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
696
  "learning_rate": 0.00013612416661871533,
697
  "loss": 1.187,
698
  "step": 95
699
+ },
700
+ {
701
+ "epoch": 0.04694663487988263,
702
+ "grad_norm": 0.4264492392539978,
703
+ "learning_rate": 0.00013439489186339282,
704
+ "loss": 1.1103,
705
+ "step": 96
706
+ },
707
+ {
708
+ "epoch": 0.04743566232654808,
709
+ "grad_norm": 0.40396761894226074,
710
+ "learning_rate": 0.0001326538712840083,
711
+ "loss": 1.0,
712
+ "step": 97
713
+ },
714
+ {
715
+ "epoch": 0.04792468977321352,
716
+ "grad_norm": 0.30181634426116943,
717
+ "learning_rate": 0.00013090169943749476,
718
+ "loss": 0.9821,
719
+ "step": 98
720
+ },
721
+ {
722
+ "epoch": 0.048413717219878964,
723
+ "grad_norm": 0.37075236439704895,
724
+ "learning_rate": 0.00012913897468893248,
725
+ "loss": 1.0607,
726
+ "step": 99
727
+ },
728
+ {
729
+ "epoch": 0.04890274466654441,
730
+ "grad_norm": 0.39950835704803467,
731
+ "learning_rate": 0.0001273662990072083,
732
+ "loss": 1.011,
733
+ "step": 100
734
  }
735
  ],
736
  "logging_steps": 1,
 
750
  "attributes": {}
751
  }
752
  },
753
+ "total_flos": 4.438954195550208e+17,
754
  "train_batch_size": 3,
755
  "trial_name": null,
756
  "trial_params": null