masatochi commited on
Commit
1949d0d
·
verified ·
1 Parent(s): 17dd6b2

Training in progress, step 195, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:05041d2d031e01b8ed683099d28816ddbbe607cd11c4e2bb9c5d4e4526fd8668
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc366c1271ad045a8c8905c718f8bffe3f303c03c5916603e7373b126e81849d
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d93c343e1a2d3dc0ae22ec52df62832ddc988236bc6ba2b92c04aa8e07ce53b8
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3661588ed8c4f860a25629ed92db7120773dde4b1466832bcd4b1f2803fb4cc5
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bf11cdd535ea662ff270636f900a467408683d2b7101cb9dbc848f0681f26dea
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:523bfe0a33577b4182bf5e3b1a37489a75b989d79cb5926278ae5a0647ffb1d8
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b289b438571df9d34409287e67864402aaad98d9ecdf87ccd44b9abb7f5b6982
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a35430a05f2b9748f37dd11667a782564c85a35d840d60cbaddfa2c905ab7c0a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.09291521486643438,
5
  "eval_steps": 34,
6
- "global_step": 190,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1385,6 +1385,41 @@
1385
  "learning_rate": 1.7026900316098215e-06,
1386
  "loss": 0.8295,
1387
  "step": 190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1388
  }
1389
  ],
1390
  "logging_steps": 1,
@@ -1404,7 +1439,7 @@
1404
  "attributes": {}
1405
  }
1406
  },
1407
- "total_flos": 8.434012971545395e+17,
1408
  "train_batch_size": 3,
1409
  "trial_name": null,
1410
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0953603520997616,
5
  "eval_steps": 34,
6
+ "global_step": 195,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1385
  "learning_rate": 1.7026900316098215e-06,
1386
  "loss": 0.8295,
1387
  "step": 190
1388
+ },
1389
+ {
1390
+ "epoch": 0.09340424231309982,
1391
+ "grad_norm": 1.2903939485549927,
1392
+ "learning_rate": 1.3799252646597426e-06,
1393
+ "loss": 1.0516,
1394
+ "step": 191
1395
+ },
1396
+ {
1397
+ "epoch": 0.09389326975976527,
1398
+ "grad_norm": 1.140723705291748,
1399
+ "learning_rate": 1.0908391628854041e-06,
1400
+ "loss": 0.9476,
1401
+ "step": 192
1402
+ },
1403
+ {
1404
+ "epoch": 0.09438229720643071,
1405
+ "grad_norm": 1.051087737083435,
1406
+ "learning_rate": 8.355304489257254e-07,
1407
+ "loss": 0.9245,
1408
+ "step": 193
1409
+ },
1410
+ {
1411
+ "epoch": 0.09487132465309615,
1412
+ "grad_norm": 1.2365599870681763,
1413
+ "learning_rate": 6.140863104726391e-07,
1414
+ "loss": 0.9907,
1415
+ "step": 194
1416
+ },
1417
+ {
1418
+ "epoch": 0.0953603520997616,
1419
+ "grad_norm": 1.1254229545593262,
1420
+ "learning_rate": 4.2658237049655323e-07,
1421
+ "loss": 0.8159,
1422
+ "step": 195
1423
  }
1424
  ],
1425
  "logging_steps": 1,
 
1439
  "attributes": {}
1440
  }
1441
  },
1442
+ "total_flos": 8.655960681322906e+17,
1443
  "train_batch_size": 3,
1444
  "trial_name": null,
1445
  "trial_params": null