masatochi commited on
Commit
222579b
·
verified ·
1 Parent(s): 1fed760

Training in progress, step 30, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:90c26a85e38465a485240c658909a17687699416af3e79f864da8043bd0f62fc
3
  size 59827904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:596da61471f7acd22dfed810279a826afb7b67f26080e0055da80a685e50ae03
3
  size 59827904
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f80c9ff111119d40461ca73630d31c5328cada50105f577839c77ebc90090796
3
  size 30875540
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a70f332281dffbc44dd442c0eb6fba71dece1c53fc85584adbaed85acee7fe1
3
  size 30875540
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3a5c53b4ddb1f83c87db0874b9ca8a72613eb4fb4e16a727f1e944d64a00fe7b
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72223fd40b61c72a4df43aea9c90ca091d8f4c0b5e6bb74b51346988ac0956d2
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7cbe95d5ecd33771846042e20aabf210775f4e6a78ced16f0764898d40abeba5
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cedaf8893734b19717a3bbbc716629d55965a18bdde504cf46d9182fcb60eb14
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.012225686166636102,
5
  "eval_steps": 34,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -190,6 +190,41 @@
190
  "learning_rate": 0.0001666666666666667,
191
  "loss": 9.1022,
192
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
  }
194
  ],
195
  "logging_steps": 1,
@@ -209,7 +244,7 @@
209
  "attributes": {}
210
  }
211
  },
212
- "total_flos": 5.51119918989312e+16,
213
  "train_batch_size": 3,
214
  "trial_name": null,
215
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.014670823399963322,
5
  "eval_steps": 34,
6
+ "global_step": 30,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
190
  "learning_rate": 0.0001666666666666667,
191
  "loss": 9.1022,
192
  "step": 25
193
+ },
194
+ {
195
+ "epoch": 0.012714713613301546,
196
+ "grad_norm": 3.8149141971211387e+18,
197
+ "learning_rate": 0.00017333333333333334,
198
+ "loss": 8.8296,
199
+ "step": 26
200
+ },
201
+ {
202
+ "epoch": 0.013203741059966991,
203
+ "grad_norm": Infinity,
204
+ "learning_rate": 0.00018,
205
+ "loss": 9.8113,
206
+ "step": 27
207
+ },
208
+ {
209
+ "epoch": 0.013692768506632435,
210
+ "grad_norm": Infinity,
211
+ "learning_rate": 0.0001866666666666667,
212
+ "loss": 9.417,
213
+ "step": 28
214
+ },
215
+ {
216
+ "epoch": 0.014181795953297879,
217
+ "grad_norm": Infinity,
218
+ "learning_rate": 0.00019333333333333333,
219
+ "loss": 9.168,
220
+ "step": 29
221
+ },
222
+ {
223
+ "epoch": 0.014670823399963322,
224
+ "grad_norm": Infinity,
225
+ "learning_rate": 0.0002,
226
+ "loss": 10.644,
227
+ "step": 30
228
  }
229
  ],
230
  "logging_steps": 1,
 
244
  "attributes": {}
245
  }
246
  },
247
+ "total_flos": 6.613439027871744e+16,
248
  "train_batch_size": 3,
249
  "trial_name": null,
250
  "trial_params": null