masatochi commited on
Commit
5aaa718
·
verified ·
1 Parent(s): d7bb349

Training in progress, step 185, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e02a42948b13a1783128ccd70b47ddc0998cf4588b0fbb53b48b0d92c8e09f1d
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:216911c39630b8029330e697b538168e8e4a1fd44cef032ce0432f2a681df877
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:30306307f8da5309806edc2f06c365a1874ad8afd38a0e67925ef2af47263c38
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5edfb201cf07670aa206c2100799f9f2d9aa5a4a7afd966ce133d52f87f58e8f
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6c9e95e8038f40b4d739b9be60c3742d2a869eaba46d8fd74d89cee2de436ed5
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44f07ee7727b2a57ec87aa201844eb2406bc5b699b1d99c8190698d036889705
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b7c990948ce2aaf07d64c7544604482e458e5281aa73955448bb3e7dee4b1367
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31cc4c125027b06153274d1c1fcc2291ff49e04af7d8c2cae65dd480bfd90a0c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.08802494039977994,
5
  "eval_steps": 34,
6
- "global_step": 180,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1315,6 +1315,41 @@
1315
  "learning_rate": 6.75277705956443e-06,
1316
  "loss": 0.9614,
1317
  "step": 180
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1318
  }
1319
  ],
1320
  "logging_steps": 1,
@@ -1334,7 +1369,7 @@
1334
  "attributes": {}
1335
  }
1336
  },
1337
- "total_flos": 7.990117551990374e+17,
1338
  "train_batch_size": 3,
1339
  "trial_name": null,
1340
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.09047007763310716,
5
  "eval_steps": 34,
6
+ "global_step": 185,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1315
  "learning_rate": 6.75277705956443e-06,
1316
  "loss": 0.9614,
1317
  "step": 180
1318
+ },
1319
+ {
1320
+ "epoch": 0.08851396784644538,
1321
+ "grad_norm": 0.3285332918167114,
1322
+ "learning_rate": 6.1011639384943585e-06,
1323
+ "loss": 1.0667,
1324
+ "step": 181
1325
+ },
1326
+ {
1327
+ "epoch": 0.08900299529311083,
1328
+ "grad_norm": 0.293144166469574,
1329
+ "learning_rate": 5.481617183918053e-06,
1330
+ "loss": 1.0208,
1331
+ "step": 182
1332
+ },
1333
+ {
1334
+ "epoch": 0.08949202273977627,
1335
+ "grad_norm": 0.39136576652526855,
1336
+ "learning_rate": 4.8943483704846475e-06,
1337
+ "loss": 0.9498,
1338
+ "step": 183
1339
+ },
1340
+ {
1341
+ "epoch": 0.08998105018644172,
1342
+ "grad_norm": 0.31894204020500183,
1343
+ "learning_rate": 4.339558049955927e-06,
1344
+ "loss": 1.06,
1345
+ "step": 184
1346
+ },
1347
+ {
1348
+ "epoch": 0.09047007763310716,
1349
+ "grad_norm": 0.3131878972053528,
1350
+ "learning_rate": 3.817435682718096e-06,
1351
+ "loss": 0.9719,
1352
+ "step": 185
1353
  }
1354
  ],
1355
  "logging_steps": 1,
 
1369
  "attributes": {}
1370
  }
1371
  },
1372
+ "total_flos": 8.212065261767885e+17,
1373
  "train_batch_size": 3,
1374
  "trial_name": null,
1375
  "trial_params": null