dimasik87 commited on
Commit
b871df5
·
verified ·
1 Parent(s): 4de4b71

Training in progress, step 40, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:aa54b4b7166161a6f06646e52ac7e6d821f3616aa042b366ac7d4e467a9030a4
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f327e5ab2022652d0e4adaf08bfaca6d4b16bb909a343e9ed57c89637eb15d5
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a695a50d569581f7d0018b06f6dc4cbc63cf971987aa7e844c53fd1ed04985ff
3
  size 335922386
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c5d477d90a6255d322631c99080b111a862467de276c9b78ed367eefd150fe0
3
  size 335922386
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:597a0b04dd0a0cc52d9ce583fedb956e33f0756a8bca7a040554f7370a939198
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b28ad40fa0c0353f4ef0255340ffadb1812eaa1e26c616580e4553bfaccb7edb
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0aa0f75a1f3e346be25756b578158b09a68943f0b9f1cfe29f97939687f864ef
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c792918044964431737f4cb39f3769dbfd230048b1125ac69a6439eb6c8534b
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.003264047872702133,
5
  "eval_steps": 4,
6
- "global_step": 36,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -339,6 +339,42 @@
339
  "eval_samples_per_second": 8.22,
340
  "eval_steps_per_second": 8.22,
341
  "step": 36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
342
  }
343
  ],
344
  "logging_steps": 1,
@@ -358,7 +394,7 @@
358
  "attributes": {}
359
  }
360
  },
361
- "total_flos": 1.3632415595495424e+16,
362
  "train_batch_size": 1,
363
  "trial_name": null,
364
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0036267198585579253,
5
  "eval_steps": 4,
6
+ "global_step": 40,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
339
  "eval_samples_per_second": 8.22,
340
  "eval_steps_per_second": 8.22,
341
  "step": 36
342
+ },
343
+ {
344
+ "epoch": 0.003354715869166081,
345
+ "grad_norm": 0.4692092835903168,
346
+ "learning_rate": 4.7750143528405126e-05,
347
+ "loss": 0.1626,
348
+ "step": 37
349
+ },
350
+ {
351
+ "epoch": 0.0034453838656300292,
352
+ "grad_norm": 0.4230363965034485,
353
+ "learning_rate": 4.12214747707527e-05,
354
+ "loss": 0.1752,
355
+ "step": 38
356
+ },
357
+ {
358
+ "epoch": 0.0035360518620939775,
359
+ "grad_norm": 0.5133092403411865,
360
+ "learning_rate": 3.5055195166981645e-05,
361
+ "loss": 0.209,
362
+ "step": 39
363
+ },
364
+ {
365
+ "epoch": 0.0036267198585579253,
366
+ "grad_norm": 0.3762070834636688,
367
+ "learning_rate": 2.9289321881345254e-05,
368
+ "loss": 0.1563,
369
+ "step": 40
370
+ },
371
+ {
372
+ "epoch": 0.0036267198585579253,
373
+ "eval_loss": 0.20195676386356354,
374
+ "eval_runtime": 282.3618,
375
+ "eval_samples_per_second": 8.223,
376
+ "eval_steps_per_second": 8.223,
377
+ "step": 40
378
  }
379
  ],
380
  "logging_steps": 1,
 
394
  "attributes": {}
395
  }
396
  },
397
+ "total_flos": 1.5116215932420096e+16,
398
  "train_batch_size": 1,
399
  "trial_name": null,
400
  "trial_params": null