masatochi commited on
Commit
1ad4fc9
·
verified ·
1 Parent(s): 28f60a3

Training in progress, step 160, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c71d237e710a962dcb9e6ba5ccc36a842b75de6c36ff2691b85a6e48f49d4991
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5150c2196969043314f566861c4849a7f9365e1cc6a686a4cdb2edb6fa4eddb5
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b0df129bdd4dadcc2e2c280e7cf459a2eb420cd37dd7d896ef8cfbf82a4c45fd
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df1a35499a98ce19e19615db2fbac7d72d4a494f93b1298df13d80ee2cf6aec3
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d1849ad9db14ff92103b9d227ede45e09b064472ac17db6af688392748c6c18b
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d72c4d52a605fc99c729c21c0deb735993f9bc8ecba075dedb1f93e3a6ad9c7
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:382ece00c385c691790a218c7659825f5fdd28b4e63aa1032e7b069dd2944457
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fda70a7e2255bb2a6bed4064da315669732f8ee1496404c42573741f484eb69b
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.07579925423314383,
5
  "eval_steps": 34,
6
- "global_step": 155,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1132,6 +1132,41 @@
1132
  "learning_rate": 3.263043563534428e-05,
1133
  "loss": 1.0331,
1134
  "step": 155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1135
  }
1136
  ],
1137
  "logging_steps": 1,
@@ -1151,7 +1186,7 @@
1151
  "attributes": {}
1152
  }
1153
  },
1154
- "total_flos": 6.880379003102822e+17,
1155
  "train_batch_size": 3,
1156
  "trial_name": null,
1157
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.07824439146647105,
5
  "eval_steps": 34,
6
+ "global_step": 160,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1132
  "learning_rate": 3.263043563534428e-05,
1133
  "loss": 1.0331,
1134
  "step": 155
1135
+ },
1136
+ {
1137
+ "epoch": 0.07628828167980928,
1138
+ "grad_norm": 0.40078866481781006,
1139
+ "learning_rate": 3.1276331403073735e-05,
1140
+ "loss": 0.9615,
1141
+ "step": 156
1142
+ },
1143
+ {
1144
+ "epoch": 0.07677730912647472,
1145
+ "grad_norm": 0.6274716258049011,
1146
+ "learning_rate": 2.9945696240670906e-05,
1147
+ "loss": 1.0867,
1148
+ "step": 157
1149
+ },
1150
+ {
1151
+ "epoch": 0.07726633657314017,
1152
+ "grad_norm": 0.3220311403274536,
1153
+ "learning_rate": 2.8638984558824777e-05,
1154
+ "loss": 0.9813,
1155
+ "step": 158
1156
+ },
1157
+ {
1158
+ "epoch": 0.07775536401980561,
1159
+ "grad_norm": 0.3146653473377228,
1160
+ "learning_rate": 2.7356642598377603e-05,
1161
+ "loss": 0.9991,
1162
+ "step": 159
1163
+ },
1164
+ {
1165
+ "epoch": 0.07824439146647105,
1166
+ "grad_norm": 0.22245855629444122,
1167
+ "learning_rate": 2.6099108277934103e-05,
1168
+ "loss": 0.9811,
1169
+ "step": 160
1170
  }
1171
  ],
1172
  "logging_steps": 1,
 
1186
  "attributes": {}
1187
  }
1188
  },
1189
+ "total_flos": 7.102326712880333e+17,
1190
  "train_batch_size": 3,
1191
  "trial_name": null,
1192
  "trial_params": null