leixa commited on
Commit
3825d3c
·
verified ·
1 Parent(s): ad627a3

Training in progress, step 294, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:18d3d88ed93966722511e1fd83bc9e5a29664dc928eaf28d3ef2ef1c16720d1f
3
  size 313820248
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8dfd39d0e918f52073c6d797146c5a4cc8d71bef64bfd25f35e08d84c0cae024
3
  size 313820248
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4cb26a28b4f259288dad56068545379587b60becf2baf1dcdeef689747c983ad
3
- size 159641092
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2ea606a77a785082c6c5e1b7cd44520ed885f2227842265f6cd0c1ebe19417e
3
+ size 159641284
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:24c351254b01cc68c87af0db48cd5f3e1b6768bd4a213369e7028441ce047b0d
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e97da99bcea15f94303530a0ce03b8a68986e36bb82581fb2057e7273237f605
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ee19ddad9c4c375a1de2d74fb4c1cf5e15d36c1ed47a2cb80f7cb0fbacb3b29e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:318b114b83c26bcba11815378a88e0015bce044b0002c702e0a2627e1a1d1e56
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.42857142857142855,
5
  "eval_steps": 42,
6
- "global_step": 252,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -651,6 +651,112 @@
651
  "eval_samples_per_second": 28.13,
652
  "eval_steps_per_second": 3.523,
653
  "step": 252
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
654
  }
655
  ],
656
  "logging_steps": 3,
@@ -670,7 +776,7 @@
670
  "attributes": {}
671
  }
672
  },
673
- "total_flos": 1.0207954482285773e+17,
674
  "train_batch_size": 8,
675
  "trial_name": null,
676
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.5,
5
  "eval_steps": 42,
6
+ "global_step": 294,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
651
  "eval_samples_per_second": 28.13,
652
  "eval_steps_per_second": 3.523,
653
  "step": 252
654
+ },
655
+ {
656
+ "epoch": 0.4336734693877551,
657
+ "grad_norm": 1.393947958946228,
658
+ "learning_rate": 5e-05,
659
+ "loss": 1.4025,
660
+ "step": 255
661
+ },
662
+ {
663
+ "epoch": 0.4387755102040816,
664
+ "grad_norm": 1.2842843532562256,
665
+ "learning_rate": 4.903834726061565e-05,
666
+ "loss": 1.3806,
667
+ "step": 258
668
+ },
669
+ {
670
+ "epoch": 0.44387755102040816,
671
+ "grad_norm": 1.4686455726623535,
672
+ "learning_rate": 4.807705027948008e-05,
673
+ "loss": 1.3954,
674
+ "step": 261
675
+ },
676
+ {
677
+ "epoch": 0.4489795918367347,
678
+ "grad_norm": 1.3992563486099243,
679
+ "learning_rate": 4.711646468323129e-05,
680
+ "loss": 1.3417,
681
+ "step": 264
682
+ },
683
+ {
684
+ "epoch": 0.45408163265306123,
685
+ "grad_norm": 1.3817628622055054,
686
+ "learning_rate": 4.6156945835334184e-05,
687
+ "loss": 1.4016,
688
+ "step": 267
689
+ },
690
+ {
691
+ "epoch": 0.45918367346938777,
692
+ "grad_norm": 1.4665182828903198,
693
+ "learning_rate": 4.5198848704615914e-05,
694
+ "loss": 1.3706,
695
+ "step": 270
696
+ },
697
+ {
698
+ "epoch": 0.4642857142857143,
699
+ "grad_norm": 1.3997013568878174,
700
+ "learning_rate": 4.424252773394704e-05,
701
+ "loss": 1.4081,
702
+ "step": 273
703
+ },
704
+ {
705
+ "epoch": 0.46938775510204084,
706
+ "grad_norm": 1.4313600063323975,
707
+ "learning_rate": 4.328833670911724e-05,
708
+ "loss": 1.4345,
709
+ "step": 276
710
+ },
711
+ {
712
+ "epoch": 0.4744897959183674,
713
+ "grad_norm": 1.2777554988861084,
714
+ "learning_rate": 4.23366286279542e-05,
715
+ "loss": 1.3368,
716
+ "step": 279
717
+ },
718
+ {
719
+ "epoch": 0.47959183673469385,
720
+ "grad_norm": 1.297582745552063,
721
+ "learning_rate": 4.138775556973406e-05,
722
+ "loss": 1.411,
723
+ "step": 282
724
+ },
725
+ {
726
+ "epoch": 0.4846938775510204,
727
+ "grad_norm": 1.409166693687439,
728
+ "learning_rate": 4.04420685649314e-05,
729
+ "loss": 1.4427,
730
+ "step": 285
731
+ },
732
+ {
733
+ "epoch": 0.4897959183673469,
734
+ "grad_norm": 1.392500400543213,
735
+ "learning_rate": 3.9499917465357534e-05,
736
+ "loss": 1.4439,
737
+ "step": 288
738
+ },
739
+ {
740
+ "epoch": 0.49489795918367346,
741
+ "grad_norm": 1.3200541734695435,
742
+ "learning_rate": 3.856165081473474e-05,
743
+ "loss": 1.39,
744
+ "step": 291
745
+ },
746
+ {
747
+ "epoch": 0.5,
748
+ "grad_norm": 1.4324284791946411,
749
+ "learning_rate": 3.762761571975429e-05,
750
+ "loss": 1.3971,
751
+ "step": 294
752
+ },
753
+ {
754
+ "epoch": 0.5,
755
+ "eval_loss": 1.3641271591186523,
756
+ "eval_runtime": 35.1729,
757
+ "eval_samples_per_second": 28.147,
758
+ "eval_steps_per_second": 3.525,
759
+ "step": 294
760
  }
761
  ],
762
  "logging_steps": 3,
 
776
  "attributes": {}
777
  }
778
  },
779
+ "total_flos": 1.1909280229333402e+17,
780
  "train_batch_size": 8,
781
  "trial_name": null,
782
  "trial_params": null