masatochi commited on
Commit
40342f3
·
verified ·
1 Parent(s): 77bc932

Training in progress, step 185, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8d07a125acf118b1129c100c6b14f4c3ec600c5d9bb5bd7f070b8a63141f90b3
3
  size 59827904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb723e62fb82cd4b78aa0cca1ace2861019e61e678760115be31dba5ad1696fb
3
  size 59827904
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:97ecfeeafecbec2a3a15990a51ba72578f9c86b9c0d8af9cca4c5141999cd257
3
  size 30875540
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69404c049d0fc9a8dcd6c745ff05d502bd435ce6e5e88f9d263276b6c3e98a21
3
  size 30875540
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6362b09e58588bf4bb998fe89b4e73cad56d76d2d176c7abc6cfafe3bad8a094
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15f3f424f914fd255fa1ff6a73e197c5d3690a5830ea92f8fc6fedd09ca07699
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b7c990948ce2aaf07d64c7544604482e458e5281aa73955448bb3e7dee4b1367
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31cc4c125027b06153274d1c1fcc2291ff49e04af7d8c2cae65dd480bfd90a0c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.08802494039977994,
5
  "eval_steps": 34,
6
- "global_step": 180,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1315,6 +1315,41 @@
1315
  "learning_rate": 6.75277705956443e-06,
1316
  "loss": 8.2317,
1317
  "step": 180
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1318
  }
1319
  ],
1320
  "logging_steps": 1,
@@ -1334,7 +1369,7 @@
1334
  "attributes": {}
1335
  }
1336
  },
1337
- "total_flos": 3.9680634167230464e+17,
1338
  "train_batch_size": 3,
1339
  "trial_name": null,
1340
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.09047007763310716,
5
  "eval_steps": 34,
6
+ "global_step": 185,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1315
  "learning_rate": 6.75277705956443e-06,
1316
  "loss": 8.2317,
1317
  "step": 180
1318
+ },
1319
+ {
1320
+ "epoch": 0.08851396784644538,
1321
+ "grad_norm": Infinity,
1322
+ "learning_rate": 6.1011639384943585e-06,
1323
+ "loss": 8.8381,
1324
+ "step": 181
1325
+ },
1326
+ {
1327
+ "epoch": 0.08900299529311083,
1328
+ "grad_norm": Infinity,
1329
+ "learning_rate": 5.481617183918053e-06,
1330
+ "loss": 9.8422,
1331
+ "step": 182
1332
+ },
1333
+ {
1334
+ "epoch": 0.08949202273977627,
1335
+ "grad_norm": Infinity,
1336
+ "learning_rate": 4.8943483704846475e-06,
1337
+ "loss": 8.9866,
1338
+ "step": 183
1339
+ },
1340
+ {
1341
+ "epoch": 0.08998105018644172,
1342
+ "grad_norm": Infinity,
1343
+ "learning_rate": 4.339558049955927e-06,
1344
+ "loss": 8.7644,
1345
+ "step": 184
1346
+ },
1347
+ {
1348
+ "epoch": 0.09047007763310716,
1349
+ "grad_norm": Infinity,
1350
+ "learning_rate": 3.817435682718096e-06,
1351
+ "loss": 9.5396,
1352
+ "step": 185
1353
  }
1354
  ],
1355
  "logging_steps": 1,
 
1369
  "attributes": {}
1370
  }
1371
  },
1372
+ "total_flos": 4.078287400520909e+17,
1373
  "train_batch_size": 3,
1374
  "trial_name": null,
1375
  "trial_params": null