masatochi commited on
Commit
f5d8506
·
verified ·
1 Parent(s): d27bfc3

Training in progress, step 180, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bacdc1b5a1323d95a65acebce6a66bfdad5fef4371ff73342931d742fdcfd505
3
  size 59827904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d07a125acf118b1129c100c6b14f4c3ec600c5d9bb5bd7f070b8a63141f90b3
3
  size 59827904
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8380053b6f50aaf7dfe06245a314881d5f57bb9d2625d2fcea2f041563df4a1e
3
  size 30875540
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97ecfeeafecbec2a3a15990a51ba72578f9c86b9c0d8af9cca4c5141999cd257
3
  size 30875540
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:be9ec3198a68ec65f16686b0f1d5a6ba174d2aca875621b68207c99c69128ae9
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6362b09e58588bf4bb998fe89b4e73cad56d76d2d176c7abc6cfafe3bad8a094
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:416d32fdf638b555a0fc031fb149fd18abaec9c234026f168a4a4bd45704a2a9
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7c990948ce2aaf07d64c7544604482e458e5281aa73955448bb3e7dee4b1367
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.08557980316645272,
5
  "eval_steps": 34,
6
- "global_step": 175,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1280,6 +1280,41 @@
1280
  "learning_rate": 1.0483670864493778e-05,
1281
  "loss": 9.0925,
1282
  "step": 175
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1283
  }
1284
  ],
1285
  "logging_steps": 1,
@@ -1299,7 +1334,7 @@
1299
  "attributes": {}
1300
  }
1301
  },
1302
- "total_flos": 3.857839432925184e+17,
1303
  "train_batch_size": 3,
1304
  "trial_name": null,
1305
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.08802494039977994,
5
  "eval_steps": 34,
6
+ "global_step": 180,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1280
  "learning_rate": 1.0483670864493778e-05,
1281
  "loss": 9.0925,
1282
  "step": 175
1283
+ },
1284
+ {
1285
+ "epoch": 0.08606883061311817,
1286
+ "grad_norm": 2.8518316557467648e+17,
1287
+ "learning_rate": 9.675280065387116e-06,
1288
+ "loss": 8.1331,
1289
+ "step": 176
1290
+ },
1291
+ {
1292
+ "epoch": 0.08655785805978361,
1293
+ "grad_norm": Infinity,
1294
+ "learning_rate": 8.897735075391155e-06,
1295
+ "loss": 9.1131,
1296
+ "step": 177
1297
+ },
1298
+ {
1299
+ "epoch": 0.08704688550644905,
1300
+ "grad_norm": Infinity,
1301
+ "learning_rate": 8.151301425407699e-06,
1302
+ "loss": 9.5168,
1303
+ "step": 178
1304
+ },
1305
+ {
1306
+ "epoch": 0.0875359129531145,
1307
+ "grad_norm": Infinity,
1308
+ "learning_rate": 7.43623402184438e-06,
1309
+ "loss": 9.3454,
1310
+ "step": 179
1311
+ },
1312
+ {
1313
+ "epoch": 0.08802494039977994,
1314
+ "grad_norm": Infinity,
1315
+ "learning_rate": 6.75277705956443e-06,
1316
+ "loss": 8.2317,
1317
+ "step": 180
1318
  }
1319
  ],
1320
  "logging_steps": 1,
 
1334
  "attributes": {}
1335
  }
1336
  },
1337
+ "total_flos": 3.9680634167230464e+17,
1338
  "train_batch_size": 3,
1339
  "trial_name": null,
1340
  "trial_params": null