leixa commited on
Commit
76a2589
·
verified ·
1 Parent(s): dd2f7c2

Training in progress, step 177, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1e192170feb2bc0826bada36df604d12552545ddd3a733f64528bbdcd659856e
3
  size 121155320
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49d96bf1960b150602e5e76b3faa6ec34c3d4128539a376971f5bfe284307e68
3
  size 121155320
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d2a16d0b0b7e4b74842de4906cdc5794c912ca34e6ce93661123473d4ea548ad
3
  size 61896852
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57013b5e51a30dae40b7285217c759a1d05db1a242ab0edceebd0444817d72b8
3
  size 61896852
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c76fb16654d3d39a9c1de0bea5e372f834c7386bec235e90ec17798c3df3395
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa65732a2e2e4637165a27bb72afa324ebe26d80426fca5be7655f29ac48d281
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dd37f3769f073fd69535a2b7b95e24a5f7c7faceefe96cb49af23a93832a8544
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3920ee4a294e40209fd09294d2bc1408892fffa1eb0ce50b11d8a2f2e706c9ae
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 2.8085106382978724,
5
  "eval_steps": 15,
6
- "global_step": 165,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -488,6 +488,34 @@
488
  "eval_samples_per_second": 89.194,
489
  "eval_steps_per_second": 22.524,
490
  "step": 165
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
491
  }
492
  ],
493
  "logging_steps": 3,
@@ -502,12 +530,12 @@
502
  "should_evaluate": false,
503
  "should_log": false,
504
  "should_save": true,
505
- "should_training_stop": false
506
  },
507
  "attributes": {}
508
  }
509
  },
510
- "total_flos": 5491409635246080.0,
511
  "train_batch_size": 4,
512
  "trial_name": null,
513
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 3.0127659574468084,
5
  "eval_steps": 15,
6
+ "global_step": 177,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
488
  "eval_samples_per_second": 89.194,
489
  "eval_steps_per_second": 22.524,
490
  "step": 165
491
+ },
492
+ {
493
+ "epoch": 2.8595744680851065,
494
+ "grad_norm": 5.334619998931885,
495
+ "learning_rate": 7.149145929922607e-07,
496
+ "loss": 0.332,
497
+ "step": 168
498
+ },
499
+ {
500
+ "epoch": 2.9106382978723406,
501
+ "grad_norm": 4.500619888305664,
502
+ "learning_rate": 3.1816191188415166e-07,
503
+ "loss": 0.3051,
504
+ "step": 171
505
+ },
506
+ {
507
+ "epoch": 2.9617021276595743,
508
+ "grad_norm": 3.653864860534668,
509
+ "learning_rate": 7.960384569353219e-08,
510
+ "loss": 0.2497,
511
+ "step": 174
512
+ },
513
+ {
514
+ "epoch": 3.0127659574468084,
515
+ "grad_norm": 3.714113235473633,
516
+ "learning_rate": 0.0,
517
+ "loss": 0.2474,
518
+ "step": 177
519
  }
520
  ],
521
  "logging_steps": 3,
 
530
  "should_evaluate": false,
531
  "should_log": false,
532
  "should_save": true,
533
+ "should_training_stop": true
534
  },
535
  "attributes": {}
536
  }
537
  },
538
+ "total_flos": 5889895783464960.0,
539
  "train_batch_size": 4,
540
  "trial_name": null,
541
  "trial_params": null