dada22231 commited on
Commit
702a15d
·
verified ·
1 Parent(s): 2a24422

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:89c3cfbc71baa360ea503a04599efc9dfe299a54d04c47b2f646367599cbac9d
3
  size 60599872
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:999f076b8f5f2d9d9faefe07d63f0a355fc27c5beef3d916a5727556d9cbc800
3
  size 60599872
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:af020e95108d540c6d446acbaabbd2498107f9d85933dc962f0469dddc9c4a38
3
  size 121392706
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fdf4c8fc9c4532475e85c7b5d525faceeb854e0f5252e1fb3c98d5ec1bd5485
3
  size 121392706
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b49768a88e541f702eb75d71268964d7cc79b72046465ea5ab4d2f7b2684f932
3
  size 14960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f27e6ce44b63159e5afa22f0fea678bb8d77bbcda6ef64c9b02895897e534fdb
3
  size 14960
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d42e3a6f19e06c36f6182caeab45c4d6cf1899bf44ba738453d442d66c7fa692
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:136936543dc8c1c85d2dabe8b804440676128344b22e8d997ffe3bd503facb6c
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dedb05ed73814bd7342db7ab5d4bec7aa9950516a77a467f7cfda9f6dec31cc9
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5080f711d3aa0a0b66148b31e234a97245bd7d82a63fe15d54b7de9dc1ce4a27
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1c8d3317a9d670385f3523e97d1d1073e2b084502a4c464fcf0832f7fe80c1c6
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0028d968992a8c63eab1e9f7d07956a002eade85d427bedc709b096cd233480d
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:370cee31176b8bff781da8f054b9870dc93c63a8623674218a84718aa7abd3af
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:051dee7dfbeecb34b46e8409ffafec324501f465585234624669bc8c9e863ae4
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.021306684240698814,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-75",
4
- "epoch": 0.05740940078937926,
5
  "eval_steps": 25,
6
- "global_step": 75,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -564,6 +564,189 @@
564
  "eval_samples_per_second": 47.938,
565
  "eval_steps_per_second": 12.464,
566
  "step": 75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
567
  }
568
  ],
569
  "logging_steps": 1,
@@ -587,12 +770,12 @@
587
  "should_evaluate": false,
588
  "should_log": false,
589
  "should_save": true,
590
- "should_training_stop": false
591
  },
592
  "attributes": {}
593
  }
594
  },
595
- "total_flos": 4.482214844275098e+16,
596
  "train_batch_size": 1,
597
  "trial_name": null,
598
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.01876358687877655,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
+ "epoch": 0.07654586771917235,
5
  "eval_steps": 25,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
564
  "eval_samples_per_second": 47.938,
565
  "eval_steps_per_second": 12.464,
566
  "step": 75
567
+ },
568
+ {
569
+ "epoch": 0.058174859466570986,
570
+ "grad_norm": 0.18385003507137299,
571
+ "learning_rate": 2.3180194846605367e-05,
572
+ "loss": 0.17,
573
+ "step": 76
574
+ },
575
+ {
576
+ "epoch": 0.05894031814376271,
577
+ "grad_norm": 0.40027400851249695,
578
+ "learning_rate": 2.215611672859741e-05,
579
+ "loss": 0.1423,
580
+ "step": 77
581
+ },
582
+ {
583
+ "epoch": 0.05970577682095443,
584
+ "grad_norm": 0.142598494887352,
585
+ "learning_rate": 2.1167208663446025e-05,
586
+ "loss": 0.0031,
587
+ "step": 78
588
+ },
589
+ {
590
+ "epoch": 0.060471235498146154,
591
+ "grad_norm": 0.06515596807003021,
592
+ "learning_rate": 2.0214529598676836e-05,
593
+ "loss": 0.0014,
594
+ "step": 79
595
+ },
596
+ {
597
+ "epoch": 0.06123669417533788,
598
+ "grad_norm": 0.05153204873204231,
599
+ "learning_rate": 1.9299099686894423e-05,
600
+ "loss": 0.0014,
601
+ "step": 80
602
+ },
603
+ {
604
+ "epoch": 0.0620021528525296,
605
+ "grad_norm": 0.014992534182965755,
606
+ "learning_rate": 1.842189919337732e-05,
607
+ "loss": 0.0005,
608
+ "step": 81
609
+ },
610
+ {
611
+ "epoch": 0.06276761152972132,
612
+ "grad_norm": 0.013885805383324623,
613
+ "learning_rate": 1.758386744638546e-05,
614
+ "loss": 0.0004,
615
+ "step": 82
616
+ },
617
+ {
618
+ "epoch": 0.06353307020691305,
619
+ "grad_norm": 0.010251723229885101,
620
+ "learning_rate": 1.6785901831303956e-05,
621
+ "loss": 0.0003,
622
+ "step": 83
623
+ },
624
+ {
625
+ "epoch": 0.06429852888410477,
626
+ "grad_norm": 0.0075831301510334015,
627
+ "learning_rate": 1.602885682970026e-05,
628
+ "loss": 0.0003,
629
+ "step": 84
630
+ },
631
+ {
632
+ "epoch": 0.0650639875612965,
633
+ "grad_norm": 0.0066162901930511,
634
+ "learning_rate": 1.531354310432403e-05,
635
+ "loss": 0.0003,
636
+ "step": 85
637
+ },
638
+ {
639
+ "epoch": 0.06582944623848822,
640
+ "grad_norm": 0.006637216545641422,
641
+ "learning_rate": 1.464072663102903e-05,
642
+ "loss": 0.0002,
643
+ "step": 86
644
+ },
645
+ {
646
+ "epoch": 0.06659490491567994,
647
+ "grad_norm": 0.011600018478929996,
648
+ "learning_rate": 1.4011127878547087e-05,
649
+ "loss": 0.0003,
650
+ "step": 87
651
+ },
652
+ {
653
+ "epoch": 0.06736036359287166,
654
+ "grad_norm": 0.14439114928245544,
655
+ "learning_rate": 1.3425421036992098e-05,
656
+ "loss": 0.1791,
657
+ "step": 88
658
+ },
659
+ {
660
+ "epoch": 0.06812582227006339,
661
+ "grad_norm": 0.1987999677658081,
662
+ "learning_rate": 1.2884233295920353e-05,
663
+ "loss": 0.1352,
664
+ "step": 89
665
+ },
666
+ {
667
+ "epoch": 0.0688912809472551,
668
+ "grad_norm": 0.19108176231384277,
669
+ "learning_rate": 1.2388144172720251e-05,
670
+ "loss": 0.066,
671
+ "step": 90
672
+ },
673
+ {
674
+ "epoch": 0.06965673962444684,
675
+ "grad_norm": 0.07157070934772491,
676
+ "learning_rate": 1.1937684892050604e-05,
677
+ "loss": 0.0013,
678
+ "step": 91
679
+ },
680
+ {
681
+ "epoch": 0.07042219830163857,
682
+ "grad_norm": 0.04157419875264168,
683
+ "learning_rate": 1.1533337816991932e-05,
684
+ "loss": 0.0009,
685
+ "step": 92
686
+ },
687
+ {
688
+ "epoch": 0.07118765697883028,
689
+ "grad_norm": 0.02740364894270897,
690
+ "learning_rate": 1.1175535932519987e-05,
691
+ "loss": 0.0007,
692
+ "step": 93
693
+ },
694
+ {
695
+ "epoch": 0.07195311565602201,
696
+ "grad_norm": 0.013225646689534187,
697
+ "learning_rate": 1.0864662381854632e-05,
698
+ "loss": 0.0004,
699
+ "step": 94
700
+ },
701
+ {
702
+ "epoch": 0.07271857433321373,
703
+ "grad_norm": 0.00674827815964818,
704
+ "learning_rate": 1.0601050056180447e-05,
705
+ "loss": 0.0002,
706
+ "step": 95
707
+ },
708
+ {
709
+ "epoch": 0.07348403301040546,
710
+ "grad_norm": 0.005964465904980898,
711
+ "learning_rate": 1.0384981238178534e-05,
712
+ "loss": 0.0002,
713
+ "step": 96
714
+ },
715
+ {
716
+ "epoch": 0.07424949168759717,
717
+ "grad_norm": 0.005700402893126011,
718
+ "learning_rate": 1.0216687299751144e-05,
719
+ "loss": 0.0002,
720
+ "step": 97
721
+ },
722
+ {
723
+ "epoch": 0.0750149503647889,
724
+ "grad_norm": 0.005580263212323189,
725
+ "learning_rate": 1.0096348454262845e-05,
726
+ "loss": 0.0002,
727
+ "step": 98
728
+ },
729
+ {
730
+ "epoch": 0.07578040904198062,
731
+ "grad_norm": 0.009917444549500942,
732
+ "learning_rate": 1.0024093563563546e-05,
733
+ "loss": 0.0002,
734
+ "step": 99
735
+ },
736
+ {
737
+ "epoch": 0.07654586771917235,
738
+ "grad_norm": 0.0060340710915625095,
739
+ "learning_rate": 1e-05,
740
+ "loss": 0.0002,
741
+ "step": 100
742
+ },
743
+ {
744
+ "epoch": 0.07654586771917235,
745
+ "eval_loss": 0.01876358687877655,
746
+ "eval_runtime": 1.0439,
747
+ "eval_samples_per_second": 47.897,
748
+ "eval_steps_per_second": 12.453,
749
+ "step": 100
750
  }
751
  ],
752
  "logging_steps": 1,
 
770
  "should_evaluate": false,
771
  "should_log": false,
772
  "should_save": true,
773
+ "should_training_stop": true
774
  },
775
  "attributes": {}
776
  }
777
  },
778
+ "total_flos": 5.965949609128755e+16,
779
  "train_batch_size": 1,
780
  "trial_name": null,
781
  "trial_params": null