masatochi commited on
Commit
41beb88
·
verified ·
1 Parent(s): 9b5d7c2

Training in progress, step 195, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ad1aa5a6aae02bb7fc612c226782b2532fa37329e1010f72e27df037ab25cee6
3
  size 59827904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b0dee2372e0f18f2926fc6260eec903f5561fa89a64d712e0b7d8365d17c593
3
  size 59827904
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8fe029025b390129dfda46b9ae7d3c4ae995b75c4e24acf8877cddb5262499fa
3
  size 30875540
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b975acebc204f6e79db6fa817f78ef570e8350fee5fb167ce316257a8482e30
3
  size 30875540
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:977ff09e0c2768597b76a3b64add1b9f5188e8ddfb34d4e801a7ae1dfcc89c53
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0dfa4c978e9e36a07b5616ff9458f53247c12dc4ecf4c596cc8b6cb0fe8c5e1
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b289b438571df9d34409287e67864402aaad98d9ecdf87ccd44b9abb7f5b6982
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a35430a05f2b9748f37dd11667a782564c85a35d840d60cbaddfa2c905ab7c0a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.09291521486643438,
5
  "eval_steps": 34,
6
- "global_step": 190,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1385,6 +1385,41 @@
1385
  "learning_rate": 1.7026900316098215e-06,
1386
  "loss": 9.3022,
1387
  "step": 190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1388
  }
1389
  ],
1390
  "logging_steps": 1,
@@ -1404,7 +1439,7 @@
1404
  "attributes": {}
1405
  }
1406
  },
1407
- "total_flos": 4.188511384318771e+17,
1408
  "train_batch_size": 3,
1409
  "trial_name": null,
1410
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0953603520997616,
5
  "eval_steps": 34,
6
+ "global_step": 195,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1385
  "learning_rate": 1.7026900316098215e-06,
1386
  "loss": 9.3022,
1387
  "step": 190
1388
+ },
1389
+ {
1390
+ "epoch": 0.09340424231309982,
1391
+ "grad_norm": Infinity,
1392
+ "learning_rate": 1.3799252646597426e-06,
1393
+ "loss": 9.6389,
1394
+ "step": 191
1395
+ },
1396
+ {
1397
+ "epoch": 0.09389326975976527,
1398
+ "grad_norm": Infinity,
1399
+ "learning_rate": 1.0908391628854041e-06,
1400
+ "loss": 9.3169,
1401
+ "step": 192
1402
+ },
1403
+ {
1404
+ "epoch": 0.09438229720643071,
1405
+ "grad_norm": 4.0833470417810227e+18,
1406
+ "learning_rate": 8.355304489257254e-07,
1407
+ "loss": 8.7958,
1408
+ "step": 193
1409
+ },
1410
+ {
1411
+ "epoch": 0.09487132465309615,
1412
+ "grad_norm": Infinity,
1413
+ "learning_rate": 6.140863104726391e-07,
1414
+ "loss": 8.5085,
1415
+ "step": 194
1416
+ },
1417
+ {
1418
+ "epoch": 0.0953603520997616,
1419
+ "grad_norm": Infinity,
1420
+ "learning_rate": 4.2658237049655323e-07,
1421
+ "loss": 9.1892,
1422
+ "step": 195
1423
  }
1424
  ],
1425
  "logging_steps": 1,
 
1439
  "attributes": {}
1440
  }
1441
  },
1442
+ "total_flos": 4.2987353681166336e+17,
1443
  "train_batch_size": 3,
1444
  "trial_name": null,
1445
  "trial_params": null