masatochi commited on
Commit
e52e622
·
verified ·
1 Parent(s): 1772997

Training in progress, step 180, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1d9ef8895aecefde27b9815938ee8b70b7cb60258c13c343c6f4409007de4b8f
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e02a42948b13a1783128ccd70b47ddc0998cf4588b0fbb53b48b0d92c8e09f1d
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:19e5799f46aa4ac27c728572da7de1ddb0568e884989194df0340fe82104010e
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30306307f8da5309806edc2f06c365a1874ad8afd38a0e67925ef2af47263c38
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:350629c8726cd8603e94d6658b9108dc8785e8448290744abeabee7c4b3b48d6
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c9e95e8038f40b4d739b9be60c3742d2a869eaba46d8fd74d89cee2de436ed5
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:416d32fdf638b555a0fc031fb149fd18abaec9c234026f168a4a4bd45704a2a9
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7c990948ce2aaf07d64c7544604482e458e5281aa73955448bb3e7dee4b1367
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.08557980316645272,
5
  "eval_steps": 34,
6
- "global_step": 175,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1280,6 +1280,41 @@
1280
  "learning_rate": 1.0483670864493778e-05,
1281
  "loss": 0.9053,
1282
  "step": 175
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1283
  }
1284
  ],
1285
  "logging_steps": 1,
@@ -1299,7 +1334,7 @@
1299
  "attributes": {}
1300
  }
1301
  },
1302
- "total_flos": 7.768169842212864e+17,
1303
  "train_batch_size": 3,
1304
  "trial_name": null,
1305
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.08802494039977994,
5
  "eval_steps": 34,
6
+ "global_step": 180,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1280
  "learning_rate": 1.0483670864493778e-05,
1281
  "loss": 0.9053,
1282
  "step": 175
1283
+ },
1284
+ {
1285
+ "epoch": 0.08606883061311817,
1286
+ "grad_norm": 0.46799540519714355,
1287
+ "learning_rate": 9.675280065387116e-06,
1288
+ "loss": 0.9817,
1289
+ "step": 176
1290
+ },
1291
+ {
1292
+ "epoch": 0.08655785805978361,
1293
+ "grad_norm": 0.28441375494003296,
1294
+ "learning_rate": 8.897735075391155e-06,
1295
+ "loss": 0.8335,
1296
+ "step": 177
1297
+ },
1298
+ {
1299
+ "epoch": 0.08704688550644905,
1300
+ "grad_norm": 0.3706384301185608,
1301
+ "learning_rate": 8.151301425407699e-06,
1302
+ "loss": 0.9957,
1303
+ "step": 178
1304
+ },
1305
+ {
1306
+ "epoch": 0.0875359129531145,
1307
+ "grad_norm": 0.2594084143638611,
1308
+ "learning_rate": 7.43623402184438e-06,
1309
+ "loss": 0.8361,
1310
+ "step": 179
1311
+ },
1312
+ {
1313
+ "epoch": 0.08802494039977994,
1314
+ "grad_norm": 0.48771926760673523,
1315
+ "learning_rate": 6.75277705956443e-06,
1316
+ "loss": 0.9614,
1317
+ "step": 180
1318
  }
1319
  ],
1320
  "logging_steps": 1,
 
1334
  "attributes": {}
1335
  }
1336
  },
1337
+ "total_flos": 7.990117551990374e+17,
1338
  "train_batch_size": 3,
1339
  "trial_name": null,
1340
  "trial_params": null