Farouk commited on
Commit
9dc55f9
·
1 Parent(s): 1254ac6

commit files to HF hub

Browse files
Files changed (5) hide show
  1. adapter_model.bin +1 -1
  2. optimizer.pt +1 -1
  3. rng_state.pth +1 -1
  4. scheduler.pt +1 -1
  5. trainer_state.json +385 -3
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d7b1f5770103fea0d0250e2fb9b7822b572027bb6555f2d28b25017a13c0323
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a536ce89fc9d39500c1a969e2744cc280554a8b796e16792a678b6200a40722d
3
  size 319977229
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:459d984530ea10b4f8419a713abba31de3821ed77f2dc15f6449001f0df3ff8a
3
  size 1279539973
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbec28930a59cf7aa9e05fce3a2dd098474ed77a6193c292a23eb180627c86ce
3
  size 1279539973
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:432b393c7abadfeb6ab449be0ac900a9729da39a14f6d6d74ec5327811ab0a3c
3
  size 14511
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f392cfcc9f0c8f1518509e54b053b0e0b907c94cdf174812804a1b1d802aea5
3
  size 14511
scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:de7840bcb72f2f480fd301578d289cdfa174589e831b0d33e5772f3956b6beae
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d8d6be7898f87772ccbc5c732e900fe63a643c4595ce6af3d6bc6f811ba4b65
3
  size 627
trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": 0.5078858137130737,
3
  "best_model_checkpoint": "./output_v2/7b_cluster05_Nous-Hermes-llama-2-7b_partitioned_v3_standardized_05/checkpoint-600",
4
- "epoch": 2.379182156133829,
5
- "global_step": 800,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -770,11 +770,393 @@
770
  "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
771
  "mmlu_loss": 0.9420785120841573,
772
  "step": 800
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
773
  }
774
  ],
775
  "max_steps": 5000,
776
  "num_train_epochs": 15,
777
- "total_flos": 1.6252093169374003e+17,
778
  "trial_name": null,
779
  "trial_params": null
780
  }
 
1
  {
2
  "best_metric": 0.5078858137130737,
3
  "best_model_checkpoint": "./output_v2/7b_cluster05_Nous-Hermes-llama-2-7b_partitioned_v3_standardized_05/checkpoint-600",
4
+ "epoch": 3.5687732342007434,
5
+ "global_step": 1200,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
770
  "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
771
  "mmlu_loss": 0.9420785120841573,
772
  "step": 800
773
+ },
774
+ {
775
+ "epoch": 2.41,
776
+ "learning_rate": 0.0002,
777
+ "loss": 0.3678,
778
+ "step": 810
779
+ },
780
+ {
781
+ "epoch": 2.44,
782
+ "learning_rate": 0.0002,
783
+ "loss": 0.316,
784
+ "step": 820
785
+ },
786
+ {
787
+ "epoch": 2.47,
788
+ "learning_rate": 0.0002,
789
+ "loss": 0.3669,
790
+ "step": 830
791
+ },
792
+ {
793
+ "epoch": 2.5,
794
+ "learning_rate": 0.0002,
795
+ "loss": 0.3955,
796
+ "step": 840
797
+ },
798
+ {
799
+ "epoch": 2.53,
800
+ "learning_rate": 0.0002,
801
+ "loss": 0.3854,
802
+ "step": 850
803
+ },
804
+ {
805
+ "epoch": 2.56,
806
+ "learning_rate": 0.0002,
807
+ "loss": 0.3514,
808
+ "step": 860
809
+ },
810
+ {
811
+ "epoch": 2.59,
812
+ "learning_rate": 0.0002,
813
+ "loss": 0.3491,
814
+ "step": 870
815
+ },
816
+ {
817
+ "epoch": 2.62,
818
+ "learning_rate": 0.0002,
819
+ "loss": 0.3567,
820
+ "step": 880
821
+ },
822
+ {
823
+ "epoch": 2.65,
824
+ "learning_rate": 0.0002,
825
+ "loss": 0.3839,
826
+ "step": 890
827
+ },
828
+ {
829
+ "epoch": 2.68,
830
+ "learning_rate": 0.0002,
831
+ "loss": 0.3291,
832
+ "step": 900
833
+ },
834
+ {
835
+ "epoch": 2.71,
836
+ "learning_rate": 0.0002,
837
+ "loss": 0.3917,
838
+ "step": 910
839
+ },
840
+ {
841
+ "epoch": 2.74,
842
+ "learning_rate": 0.0002,
843
+ "loss": 0.3812,
844
+ "step": 920
845
+ },
846
+ {
847
+ "epoch": 2.77,
848
+ "learning_rate": 0.0002,
849
+ "loss": 0.3496,
850
+ "step": 930
851
+ },
852
+ {
853
+ "epoch": 2.8,
854
+ "learning_rate": 0.0002,
855
+ "loss": 0.3339,
856
+ "step": 940
857
+ },
858
+ {
859
+ "epoch": 2.83,
860
+ "learning_rate": 0.0002,
861
+ "loss": 0.3565,
862
+ "step": 950
863
+ },
864
+ {
865
+ "epoch": 2.86,
866
+ "learning_rate": 0.0002,
867
+ "loss": 0.3825,
868
+ "step": 960
869
+ },
870
+ {
871
+ "epoch": 2.88,
872
+ "learning_rate": 0.0002,
873
+ "loss": 0.4028,
874
+ "step": 970
875
+ },
876
+ {
877
+ "epoch": 2.91,
878
+ "learning_rate": 0.0002,
879
+ "loss": 0.3621,
880
+ "step": 980
881
+ },
882
+ {
883
+ "epoch": 2.94,
884
+ "learning_rate": 0.0002,
885
+ "loss": 0.3345,
886
+ "step": 990
887
+ },
888
+ {
889
+ "epoch": 2.97,
890
+ "learning_rate": 0.0002,
891
+ "loss": 0.4121,
892
+ "step": 1000
893
+ },
894
+ {
895
+ "epoch": 2.97,
896
+ "eval_loss": 0.5176346898078918,
897
+ "eval_runtime": 175.3431,
898
+ "eval_samples_per_second": 5.703,
899
+ "eval_steps_per_second": 2.852,
900
+ "step": 1000
901
+ },
902
+ {
903
+ "epoch": 2.97,
904
+ "mmlu_eval_accuracy": 0.43483776787791517,
905
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
906
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
907
+ "mmlu_eval_accuracy_astronomy": 0.4375,
908
+ "mmlu_eval_accuracy_business_ethics": 0.6363636363636364,
909
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
910
+ "mmlu_eval_accuracy_college_biology": 0.3125,
911
+ "mmlu_eval_accuracy_college_chemistry": 0.0,
912
+ "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
913
+ "mmlu_eval_accuracy_college_mathematics": 0.36363636363636365,
914
+ "mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
915
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
916
+ "mmlu_eval_accuracy_computer_security": 0.18181818181818182,
917
+ "mmlu_eval_accuracy_conceptual_physics": 0.38461538461538464,
918
+ "mmlu_eval_accuracy_econometrics": 0.25,
919
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
920
+ "mmlu_eval_accuracy_elementary_mathematics": 0.43902439024390244,
921
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
922
+ "mmlu_eval_accuracy_global_facts": 0.3,
923
+ "mmlu_eval_accuracy_high_school_biology": 0.25,
924
+ "mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
925
+ "mmlu_eval_accuracy_high_school_computer_science": 0.4444444444444444,
926
+ "mmlu_eval_accuracy_high_school_european_history": 0.4444444444444444,
927
+ "mmlu_eval_accuracy_high_school_geography": 0.7272727272727273,
928
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5714285714285714,
929
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4186046511627907,
930
+ "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
931
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.34615384615384615,
932
+ "mmlu_eval_accuracy_high_school_physics": 0.17647058823529413,
933
+ "mmlu_eval_accuracy_high_school_psychology": 0.6666666666666666,
934
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
935
+ "mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273,
936
+ "mmlu_eval_accuracy_high_school_world_history": 0.46153846153846156,
937
+ "mmlu_eval_accuracy_human_aging": 0.6521739130434783,
938
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
939
+ "mmlu_eval_accuracy_international_law": 0.7692307692307693,
940
+ "mmlu_eval_accuracy_jurisprudence": 0.2727272727272727,
941
+ "mmlu_eval_accuracy_logical_fallacies": 0.5,
942
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
943
+ "mmlu_eval_accuracy_management": 0.36363636363636365,
944
+ "mmlu_eval_accuracy_marketing": 0.68,
945
+ "mmlu_eval_accuracy_medical_genetics": 0.7272727272727273,
946
+ "mmlu_eval_accuracy_miscellaneous": 0.6395348837209303,
947
+ "mmlu_eval_accuracy_moral_disputes": 0.5263157894736842,
948
+ "mmlu_eval_accuracy_moral_scenarios": 0.24,
949
+ "mmlu_eval_accuracy_nutrition": 0.45454545454545453,
950
+ "mmlu_eval_accuracy_philosophy": 0.4117647058823529,
951
+ "mmlu_eval_accuracy_prehistory": 0.42857142857142855,
952
+ "mmlu_eval_accuracy_professional_accounting": 0.22580645161290322,
953
+ "mmlu_eval_accuracy_professional_law": 0.32941176470588235,
954
+ "mmlu_eval_accuracy_professional_medicine": 0.45161290322580644,
955
+ "mmlu_eval_accuracy_professional_psychology": 0.42028985507246375,
956
+ "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
957
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
958
+ "mmlu_eval_accuracy_sociology": 0.5909090909090909,
959
+ "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
960
+ "mmlu_eval_accuracy_virology": 0.5,
961
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
962
+ "mmlu_loss": 0.9825846157073197,
963
+ "step": 1000
964
+ },
965
+ {
966
+ "epoch": 3.0,
967
+ "learning_rate": 0.0002,
968
+ "loss": 0.3798,
969
+ "step": 1010
970
+ },
971
+ {
972
+ "epoch": 3.03,
973
+ "learning_rate": 0.0002,
974
+ "loss": 0.2727,
975
+ "step": 1020
976
+ },
977
+ {
978
+ "epoch": 3.06,
979
+ "learning_rate": 0.0002,
980
+ "loss": 0.2523,
981
+ "step": 1030
982
+ },
983
+ {
984
+ "epoch": 3.09,
985
+ "learning_rate": 0.0002,
986
+ "loss": 0.2601,
987
+ "step": 1040
988
+ },
989
+ {
990
+ "epoch": 3.12,
991
+ "learning_rate": 0.0002,
992
+ "loss": 0.2679,
993
+ "step": 1050
994
+ },
995
+ {
996
+ "epoch": 3.15,
997
+ "learning_rate": 0.0002,
998
+ "loss": 0.2855,
999
+ "step": 1060
1000
+ },
1001
+ {
1002
+ "epoch": 3.18,
1003
+ "learning_rate": 0.0002,
1004
+ "loss": 0.2473,
1005
+ "step": 1070
1006
+ },
1007
+ {
1008
+ "epoch": 3.21,
1009
+ "learning_rate": 0.0002,
1010
+ "loss": 0.2848,
1011
+ "step": 1080
1012
+ },
1013
+ {
1014
+ "epoch": 3.24,
1015
+ "learning_rate": 0.0002,
1016
+ "loss": 0.2793,
1017
+ "step": 1090
1018
+ },
1019
+ {
1020
+ "epoch": 3.27,
1021
+ "learning_rate": 0.0002,
1022
+ "loss": 0.2671,
1023
+ "step": 1100
1024
+ },
1025
+ {
1026
+ "epoch": 3.3,
1027
+ "learning_rate": 0.0002,
1028
+ "loss": 0.2445,
1029
+ "step": 1110
1030
+ },
1031
+ {
1032
+ "epoch": 3.33,
1033
+ "learning_rate": 0.0002,
1034
+ "loss": 0.3044,
1035
+ "step": 1120
1036
+ },
1037
+ {
1038
+ "epoch": 3.36,
1039
+ "learning_rate": 0.0002,
1040
+ "loss": 0.2651,
1041
+ "step": 1130
1042
+ },
1043
+ {
1044
+ "epoch": 3.39,
1045
+ "learning_rate": 0.0002,
1046
+ "loss": 0.2768,
1047
+ "step": 1140
1048
+ },
1049
+ {
1050
+ "epoch": 3.42,
1051
+ "learning_rate": 0.0002,
1052
+ "loss": 0.3228,
1053
+ "step": 1150
1054
+ },
1055
+ {
1056
+ "epoch": 3.45,
1057
+ "learning_rate": 0.0002,
1058
+ "loss": 0.3178,
1059
+ "step": 1160
1060
+ },
1061
+ {
1062
+ "epoch": 3.48,
1063
+ "learning_rate": 0.0002,
1064
+ "loss": 0.2958,
1065
+ "step": 1170
1066
+ },
1067
+ {
1068
+ "epoch": 3.51,
1069
+ "learning_rate": 0.0002,
1070
+ "loss": 0.2947,
1071
+ "step": 1180
1072
+ },
1073
+ {
1074
+ "epoch": 3.54,
1075
+ "learning_rate": 0.0002,
1076
+ "loss": 0.2658,
1077
+ "step": 1190
1078
+ },
1079
+ {
1080
+ "epoch": 3.57,
1081
+ "learning_rate": 0.0002,
1082
+ "loss": 0.2602,
1083
+ "step": 1200
1084
+ },
1085
+ {
1086
+ "epoch": 3.57,
1087
+ "eval_loss": 0.551461935043335,
1088
+ "eval_runtime": 175.3834,
1089
+ "eval_samples_per_second": 5.702,
1090
+ "eval_steps_per_second": 2.851,
1091
+ "step": 1200
1092
+ },
1093
+ {
1094
+ "epoch": 3.57,
1095
+ "mmlu_eval_accuracy": 0.4334187959408507,
1096
+ "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
1097
+ "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
1098
+ "mmlu_eval_accuracy_astronomy": 0.3125,
1099
+ "mmlu_eval_accuracy_business_ethics": 0.7272727272727273,
1100
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
1101
+ "mmlu_eval_accuracy_college_biology": 0.375,
1102
+ "mmlu_eval_accuracy_college_chemistry": 0.0,
1103
+ "mmlu_eval_accuracy_college_computer_science": 0.36363636363636365,
1104
+ "mmlu_eval_accuracy_college_mathematics": 0.18181818181818182,
1105
+ "mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
1106
+ "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
1107
+ "mmlu_eval_accuracy_computer_security": 0.09090909090909091,
1108
+ "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
1109
+ "mmlu_eval_accuracy_econometrics": 0.25,
1110
+ "mmlu_eval_accuracy_electrical_engineering": 0.375,
1111
+ "mmlu_eval_accuracy_elementary_mathematics": 0.43902439024390244,
1112
+ "mmlu_eval_accuracy_formal_logic": 0.2857142857142857,
1113
+ "mmlu_eval_accuracy_global_facts": 0.4,
1114
+ "mmlu_eval_accuracy_high_school_biology": 0.25,
1115
+ "mmlu_eval_accuracy_high_school_chemistry": 0.45454545454545453,
1116
+ "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
1117
+ "mmlu_eval_accuracy_high_school_european_history": 0.5,
1118
+ "mmlu_eval_accuracy_high_school_geography": 0.7272727272727273,
1119
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.47619047619047616,
1120
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.4186046511627907,
1121
+ "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
1122
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.2692307692307692,
1123
+ "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
1124
+ "mmlu_eval_accuracy_high_school_psychology": 0.6333333333333333,
1125
+ "mmlu_eval_accuracy_high_school_statistics": 0.21739130434782608,
1126
+ "mmlu_eval_accuracy_high_school_us_history": 0.7272727272727273,
1127
+ "mmlu_eval_accuracy_high_school_world_history": 0.5384615384615384,
1128
+ "mmlu_eval_accuracy_human_aging": 0.6086956521739131,
1129
+ "mmlu_eval_accuracy_human_sexuality": 0.4166666666666667,
1130
+ "mmlu_eval_accuracy_international_law": 0.7692307692307693,
1131
+ "mmlu_eval_accuracy_jurisprudence": 0.2727272727272727,
1132
+ "mmlu_eval_accuracy_logical_fallacies": 0.5,
1133
+ "mmlu_eval_accuracy_machine_learning": 0.36363636363636365,
1134
+ "mmlu_eval_accuracy_management": 0.45454545454545453,
1135
+ "mmlu_eval_accuracy_marketing": 0.72,
1136
+ "mmlu_eval_accuracy_medical_genetics": 0.6363636363636364,
1137
+ "mmlu_eval_accuracy_miscellaneous": 0.627906976744186,
1138
+ "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
1139
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
1140
+ "mmlu_eval_accuracy_nutrition": 0.45454545454545453,
1141
+ "mmlu_eval_accuracy_philosophy": 0.35294117647058826,
1142
+ "mmlu_eval_accuracy_prehistory": 0.4,
1143
+ "mmlu_eval_accuracy_professional_accounting": 0.2903225806451613,
1144
+ "mmlu_eval_accuracy_professional_law": 0.3235294117647059,
1145
+ "mmlu_eval_accuracy_professional_medicine": 0.4838709677419355,
1146
+ "mmlu_eval_accuracy_professional_psychology": 0.43478260869565216,
1147
+ "mmlu_eval_accuracy_public_relations": 0.5833333333333334,
1148
+ "mmlu_eval_accuracy_security_studies": 0.4444444444444444,
1149
+ "mmlu_eval_accuracy_sociology": 0.6363636363636364,
1150
+ "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
1151
+ "mmlu_eval_accuracy_virology": 0.5,
1152
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
1153
+ "mmlu_loss": 1.0567254588523438,
1154
+ "step": 1200
1155
  }
1156
  ],
1157
  "max_steps": 5000,
1158
  "num_train_epochs": 15,
1159
+ "total_flos": 2.4398423758430208e+17,
1160
  "trial_name": null,
1161
  "trial_params": null
1162
  }