masatochi commited on
Commit
1bf02b0
·
verified ·
1 Parent(s): d22a2b9

Training in progress, step 190, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:216911c39630b8029330e697b538168e8e4a1fd44cef032ce0432f2a681df877
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4a5c97dbf76ff33b307a5750999b650fdca855b6fccfb24cfb317d17b551db2
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5edfb201cf07670aa206c2100799f9f2d9aa5a4a7afd966ce133d52f87f58e8f
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e14756a84c0c36abc832c0c104bd27a99e8fd2062b9726fdee254c458fe1f7b4
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:44f07ee7727b2a57ec87aa201844eb2406bc5b699b1d99c8190698d036889705
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf11cdd535ea662ff270636f900a467408683d2b7101cb9dbc848f0681f26dea
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:31cc4c125027b06153274d1c1fcc2291ff49e04af7d8c2cae65dd480bfd90a0c
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b289b438571df9d34409287e67864402aaad98d9ecdf87ccd44b9abb7f5b6982
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.09047007763310716,
5
  "eval_steps": 34,
6
- "global_step": 185,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1350,6 +1350,41 @@
1350
  "learning_rate": 3.817435682718096e-06,
1351
  "loss": 0.9719,
1352
  "step": 185
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1353
  }
1354
  ],
1355
  "logging_steps": 1,
@@ -1369,7 +1404,7 @@
1369
  "attributes": {}
1370
  }
1371
  },
1372
- "total_flos": 8.212065261767885e+17,
1373
  "train_batch_size": 3,
1374
  "trial_name": null,
1375
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.09291521486643438,
5
  "eval_steps": 34,
6
+ "global_step": 190,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1350
  "learning_rate": 3.817435682718096e-06,
1351
  "loss": 0.9719,
1352
  "step": 185
1353
+ },
1354
+ {
1355
+ "epoch": 0.0909591050797726,
1356
+ "grad_norm": 0.26452943682670593,
1357
+ "learning_rate": 3.3281595730812575e-06,
1358
+ "loss": 0.9504,
1359
+ "step": 186
1360
+ },
1361
+ {
1362
+ "epoch": 0.09144813252643805,
1363
+ "grad_norm": 0.36320924758911133,
1364
+ "learning_rate": 2.8718968083886075e-06,
1365
+ "loss": 1.1415,
1366
+ "step": 187
1367
+ },
1368
+ {
1369
+ "epoch": 0.09193715997310349,
1370
+ "grad_norm": 0.24845726788043976,
1371
+ "learning_rate": 2.4488032019563402e-06,
1372
+ "loss": 1.0171,
1373
+ "step": 188
1374
+ },
1375
+ {
1376
+ "epoch": 0.09242618741976893,
1377
+ "grad_norm": 0.5962578058242798,
1378
+ "learning_rate": 2.0590232398634114e-06,
1379
+ "loss": 0.9151,
1380
+ "step": 189
1381
+ },
1382
+ {
1383
+ "epoch": 0.09291521486643438,
1384
+ "grad_norm": 0.3705110549926758,
1385
+ "learning_rate": 1.7026900316098215e-06,
1386
+ "loss": 1.1048,
1387
+ "step": 190
1388
  }
1389
  ],
1390
  "logging_steps": 1,
 
1404
  "attributes": {}
1405
  }
1406
  },
1407
+ "total_flos": 8.434012971545395e+17,
1408
  "train_batch_size": 3,
1409
  "trial_name": null,
1410
  "trial_params": null