masatochi commited on
Commit
c9042dc
·
verified ·
1 Parent(s): 7fa4af3

Training in progress, step 135, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:04219c720b69da395b407d47b273d1b6f06d909e0e16bc792d9da19239623894
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2693e3c807ab1261a91352b2a34141c7debf3c87e07888e0213731c63bf1238c
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5045a1eecb46de53b6cd1195ec2d1add713e18b2412bfe58472bed9fbf500eac
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57d38d2f83edf7948b193d522722e03c4f2b8cbed5067da59897c7f7c3822919
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f9e2e9d13f1fab954a76ecc1666482fd90d98438794e386126a6ee156ce5d270
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79354fda314c9103e2d55f6fd0e3e7ec5fe801812e33d2c8e4dd8c180772e09a
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fb82cfc852da5eb6970b061692451307a303bfd2a3160c6d6a29266f8bb6adef
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fc750a6dfb3e5c9f642238b7443b0984a56e79b7c2731a6e152ecfc3e32f4e7
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.06357356806650773,
5
  "eval_steps": 34,
6
- "global_step": 130,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -949,6 +949,41 @@
949
  "learning_rate": 7.263370099279172e-05,
950
  "loss": 1.0144,
951
  "step": 130
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
952
  }
953
  ],
954
  "logging_steps": 1,
@@ -968,7 +1003,7 @@
968
  "attributes": {}
969
  }
970
  },
971
- "total_flos": 5.77064045421527e+17,
972
  "train_batch_size": 3,
973
  "trial_name": null,
974
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.06601870529983496,
5
  "eval_steps": 34,
6
+ "global_step": 135,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
949
  "learning_rate": 7.263370099279172e-05,
950
  "loss": 1.0144,
951
  "step": 130
952
+ },
953
+ {
954
+ "epoch": 0.06406259551317317,
955
+ "grad_norm": 0.48772111535072327,
956
+ "learning_rate": 7.086102531106754e-05,
957
+ "loss": 1.055,
958
+ "step": 131
959
+ },
960
+ {
961
+ "epoch": 0.06455162295983861,
962
+ "grad_norm": 0.37441909313201904,
963
+ "learning_rate": 6.909830056250527e-05,
964
+ "loss": 0.948,
965
+ "step": 132
966
+ },
967
+ {
968
+ "epoch": 0.06504065040650407,
969
+ "grad_norm": 0.4598063826560974,
970
+ "learning_rate": 6.734612871599168e-05,
971
+ "loss": 1.0565,
972
+ "step": 133
973
+ },
974
+ {
975
+ "epoch": 0.06552967785316952,
976
+ "grad_norm": 0.34052199125289917,
977
+ "learning_rate": 6.560510813660719e-05,
978
+ "loss": 0.9724,
979
+ "step": 134
980
+ },
981
+ {
982
+ "epoch": 0.06601870529983496,
983
+ "grad_norm": 0.4426654279232025,
984
+ "learning_rate": 6.387583338128471e-05,
985
+ "loss": 1.1751,
986
+ "step": 135
987
  }
988
  ],
989
  "logging_steps": 1,
 
1003
  "attributes": {}
1004
  }
1005
  },
1006
+ "total_flos": 5.992588163992781e+17,
1007
  "train_batch_size": 3,
1008
  "trial_name": null,
1009
  "trial_params": null