masatochi commited on
Commit
3b80b21
·
verified ·
1 Parent(s): db318db

Training in progress, step 40, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9fcf40be826f55b07270198e0a6ead902a546dedd458e282c2ffd682c7f06eaa
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:099c785d00f552a53e3256c6de344c9c9b068203d4a8da86cbe7d971d6ce297a
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4eceac72edacc47b86aa1b6436a39c3f9c719c923b943c1c6d4b1f803e72ffbd
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d602eee55c6090dfce8deb583211d914fe5c9ccb9052febe40949d2205aad5b3
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dd3b0103484cbe9033aa207b10c52e72e6e07124b9b9355e171cb765f0460e37
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab2ed981c27f8081d81d2039e0485e05b085c6f7d5fed74c55bf1d3c4164f221
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:666e3907648b0318630be4f0e9270d7d902986930986134493537abd333e959c
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35e2941b1419d36fedcd8eb55488740cb386508ea401393ade4c1f5fd25ff6c8
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.017115960633290545,
5
  "eval_steps": 34,
6
- "global_step": 35,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -268,6 +268,41 @@
268
  "learning_rate": 0.00019957341762950344,
269
  "loss": 1.0976,
270
  "step": 35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271
  }
272
  ],
273
  "logging_steps": 1,
@@ -287,7 +322,7 @@
287
  "attributes": {}
288
  }
289
  },
290
- "total_flos": 1.5536339684425728e+17,
291
  "train_batch_size": 3,
292
  "trial_name": null,
293
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.019561097866617763,
5
  "eval_steps": 34,
6
+ "global_step": 40,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
268
  "learning_rate": 0.00019957341762950344,
269
  "loss": 1.0976,
270
  "step": 35
271
+ },
272
+ {
273
+ "epoch": 0.017604988079955988,
274
+ "grad_norm": 0.46739837527275085,
275
+ "learning_rate": 0.0001993859136895274,
276
+ "loss": 1.0089,
277
+ "step": 36
278
+ },
279
+ {
280
+ "epoch": 0.018094015526621432,
281
+ "grad_norm": 0.3970998227596283,
282
+ "learning_rate": 0.00019916446955107428,
283
+ "loss": 1.1005,
284
+ "step": 37
285
+ },
286
+ {
287
+ "epoch": 0.018583042973286876,
288
+ "grad_norm": 0.5996494293212891,
289
+ "learning_rate": 0.0001989091608371146,
290
+ "loss": 0.9741,
291
+ "step": 38
292
+ },
293
+ {
294
+ "epoch": 0.01907207041995232,
295
+ "grad_norm": 0.27929016947746277,
296
+ "learning_rate": 0.00019862007473534025,
297
+ "loss": 0.9607,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 0.019561097866617763,
302
+ "grad_norm": 0.33169642090797424,
303
+ "learning_rate": 0.0001982973099683902,
304
+ "loss": 1.1173,
305
+ "step": 40
306
  }
307
  ],
308
  "logging_steps": 1,
 
322
  "attributes": {}
323
  }
324
  },
325
+ "total_flos": 1.7755816782200832e+17,
326
  "train_batch_size": 3,
327
  "trial_name": null,
328
  "trial_params": null