Farouk commited on
Commit
e0558a0
·
1 Parent(s): 6450f45

Training in progress, step 400

Browse files
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:854d9775c36d691131435da90a76e8c4b814ee81dcbe15efd3d242c7247dc3a7
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b08f069556e42bf079fd83f2a9564a029b14d0a7789b87df13893db2d68fe7f0
3
  size 319977229
checkpoint-200/adapter_model/adapter_model/README.md CHANGED
@@ -4,6 +4,17 @@ library_name: peft
4
  ## Training procedure
5
 
6
 
 
 
 
 
 
 
 
 
 
 
 
7
  The following `bitsandbytes` quantization config was used during training:
8
  - load_in_8bit: False
9
  - load_in_4bit: True
@@ -16,5 +27,6 @@ The following `bitsandbytes` quantization config was used during training:
16
  - bnb_4bit_compute_dtype: bfloat16
17
  ### Framework versions
18
 
 
19
 
20
  - PEFT 0.4.0
 
4
  ## Training procedure
5
 
6
 
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: True
16
+ - bnb_4bit_compute_dtype: bfloat16
17
+
18
  The following `bitsandbytes` quantization config was used during training:
19
  - load_in_8bit: False
20
  - load_in_4bit: True
 
27
  - bnb_4bit_compute_dtype: bfloat16
28
  ### Framework versions
29
 
30
+ - PEFT 0.4.0
31
 
32
  - PEFT 0.4.0
checkpoint-200/adapter_model/adapter_model/adapter_config.json CHANGED
@@ -14,13 +14,13 @@
14
  "r": 64,
15
  "revision": null,
16
  "target_modules": [
17
- "q_proj",
18
  "down_proj",
 
19
  "k_proj",
20
  "up_proj",
21
- "o_proj",
22
- "v_proj",
23
- "gate_proj"
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
 
14
  "r": 64,
15
  "revision": null,
16
  "target_modules": [
17
+ "v_proj",
18
  "down_proj",
19
+ "o_proj",
20
  "k_proj",
21
  "up_proj",
22
+ "gate_proj",
23
+ "q_proj"
 
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
checkpoint-200/adapter_model/adapter_model/adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0075a06493c9d9a0f18d9ad6329effee1c7f8b6f8ee3b5692cae0a760ef1f224
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:854d9775c36d691131435da90a76e8c4b814ee81dcbe15efd3d242c7247dc3a7
3
  size 319977229
checkpoint-400/README.md CHANGED
@@ -4,6 +4,17 @@ library_name: peft
4
  ## Training procedure
5
 
6
 
 
 
 
 
 
 
 
 
 
 
 
7
  The following `bitsandbytes` quantization config was used during training:
8
  - load_in_8bit: False
9
  - load_in_4bit: True
@@ -16,5 +27,6 @@ The following `bitsandbytes` quantization config was used during training:
16
  - bnb_4bit_compute_dtype: bfloat16
17
  ### Framework versions
18
 
 
19
 
20
  - PEFT 0.4.0
 
4
  ## Training procedure
5
 
6
 
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: True
16
+ - bnb_4bit_compute_dtype: bfloat16
17
+
18
  The following `bitsandbytes` quantization config was used during training:
19
  - load_in_8bit: False
20
  - load_in_4bit: True
 
27
  - bnb_4bit_compute_dtype: bfloat16
28
  ### Framework versions
29
 
30
+ - PEFT 0.4.0
31
 
32
  - PEFT 0.4.0
checkpoint-400/adapter_config.json CHANGED
@@ -14,13 +14,13 @@
14
  "r": 64,
15
  "revision": null,
16
  "target_modules": [
17
- "q_proj",
18
  "down_proj",
 
19
  "k_proj",
20
  "up_proj",
21
- "o_proj",
22
- "v_proj",
23
- "gate_proj"
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
 
14
  "r": 64,
15
  "revision": null,
16
  "target_modules": [
17
+ "v_proj",
18
  "down_proj",
19
+ "o_proj",
20
  "k_proj",
21
  "up_proj",
22
+ "gate_proj",
23
+ "q_proj"
 
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
checkpoint-400/adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fea1d3891f164b3061844eead6c991776a55b14d3435c3ad0b1663a43dcb8148
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b08f069556e42bf079fd83f2a9564a029b14d0a7789b87df13893db2d68fe7f0
3
  size 319977229
checkpoint-400/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:95fd7ab2c3d5dcff3a1c2ef4ec14f0ab012acc6015932532506b65c660efea7b
3
  size 1279539973
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:062f57d8687321acd2038ef768dce6dd7cbb8d85b7549945e866b63cc690f41c
3
  size 1279539973
checkpoint-400/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f9658ef49d2aefef0930a26e4fd2970d2b99f9a383e8e6e5639424cb0a5fb467
3
  size 14511
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7eca4bb7a223d26bdf74bd673d266758307f55f5871745502b32db0746329971
3
  size 14511
checkpoint-400/trainer_state.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "best_metric": 0.7662714719772339,
3
  "best_model_checkpoint": "experts/expert-16/checkpoint-400",
4
  "epoch": 0.1267427122940431,
5
  "global_step": 400,
@@ -10,7 +10,7 @@
10
  {
11
  "epoch": 0.0,
12
  "learning_rate": 0.0002,
13
- "loss": 0.8331,
14
  "step": 10
15
  },
16
  {
@@ -22,7 +22,7 @@
22
  {
23
  "epoch": 0.01,
24
  "learning_rate": 0.0002,
25
- "loss": 0.9038,
26
  "step": 30
27
  },
28
  {
@@ -34,13 +34,13 @@
34
  {
35
  "epoch": 0.02,
36
  "learning_rate": 0.0002,
37
- "loss": 0.8155,
38
  "step": 50
39
  },
40
  {
41
  "epoch": 0.02,
42
  "learning_rate": 0.0002,
43
- "loss": 0.7896,
44
  "step": 60
45
  },
46
  {
@@ -52,105 +52,105 @@
52
  {
53
  "epoch": 0.03,
54
  "learning_rate": 0.0002,
55
- "loss": 0.8827,
56
  "step": 80
57
  },
58
  {
59
  "epoch": 0.03,
60
  "learning_rate": 0.0002,
61
- "loss": 0.861,
62
  "step": 90
63
  },
64
  {
65
  "epoch": 0.03,
66
  "learning_rate": 0.0002,
67
- "loss": 0.7879,
68
  "step": 100
69
  },
70
  {
71
  "epoch": 0.03,
72
  "learning_rate": 0.0002,
73
- "loss": 0.803,
74
  "step": 110
75
  },
76
  {
77
  "epoch": 0.04,
78
  "learning_rate": 0.0002,
79
- "loss": 0.8212,
80
  "step": 120
81
  },
82
  {
83
  "epoch": 0.04,
84
  "learning_rate": 0.0002,
85
- "loss": 0.8075,
86
  "step": 130
87
  },
88
  {
89
  "epoch": 0.04,
90
  "learning_rate": 0.0002,
91
- "loss": 0.9263,
92
  "step": 140
93
  },
94
  {
95
  "epoch": 0.05,
96
  "learning_rate": 0.0002,
97
- "loss": 0.7969,
98
  "step": 150
99
  },
100
  {
101
  "epoch": 0.05,
102
  "learning_rate": 0.0002,
103
- "loss": 0.7883,
104
  "step": 160
105
  },
106
  {
107
  "epoch": 0.05,
108
  "learning_rate": 0.0002,
109
- "loss": 0.7582,
110
  "step": 170
111
  },
112
  {
113
  "epoch": 0.06,
114
  "learning_rate": 0.0002,
115
- "loss": 0.8095,
116
  "step": 180
117
  },
118
  {
119
  "epoch": 0.06,
120
  "learning_rate": 0.0002,
121
- "loss": 0.8614,
122
  "step": 190
123
  },
124
  {
125
  "epoch": 0.06,
126
  "learning_rate": 0.0002,
127
- "loss": 0.8674,
128
  "step": 200
129
  },
130
  {
131
  "epoch": 0.06,
132
- "eval_loss": 0.7774001359939575,
133
- "eval_runtime": 149.8878,
134
- "eval_samples_per_second": 6.672,
135
- "eval_steps_per_second": 3.336,
136
  "step": 200
137
  },
138
  {
139
  "epoch": 0.06,
140
- "mmlu_eval_accuracy": 0.4759538024775667,
141
  "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
142
  "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
143
  "mmlu_eval_accuracy_astronomy": 0.4375,
144
  "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
145
- "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
146
  "mmlu_eval_accuracy_college_biology": 0.4375,
147
- "mmlu_eval_accuracy_college_chemistry": 0.25,
148
  "mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
149
  "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
150
  "mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
151
  "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
152
  "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
153
- "mmlu_eval_accuracy_conceptual_physics": 0.3076923076923077,
154
  "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
155
  "mmlu_eval_accuracy_electrical_engineering": 0.25,
156
  "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
@@ -159,16 +159,16 @@
159
  "mmlu_eval_accuracy_high_school_biology": 0.375,
160
  "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
161
  "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
162
- "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
163
  "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
164
  "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
165
  "mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
166
  "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
167
- "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
168
  "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
169
  "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
170
- "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
171
- "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
172
  "mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
173
  "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
174
  "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
@@ -177,216 +177,216 @@
177
  "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
178
  "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
179
  "mmlu_eval_accuracy_management": 0.6363636363636364,
180
- "mmlu_eval_accuracy_marketing": 0.84,
181
  "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
182
- "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
183
  "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
184
- "mmlu_eval_accuracy_moral_scenarios": 0.25,
185
  "mmlu_eval_accuracy_nutrition": 0.6666666666666666,
186
  "mmlu_eval_accuracy_philosophy": 0.5,
187
  "mmlu_eval_accuracy_prehistory": 0.42857142857142855,
188
  "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
189
- "mmlu_eval_accuracy_professional_law": 0.3058823529411765,
190
  "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
191
- "mmlu_eval_accuracy_professional_psychology": 0.463768115942029,
192
  "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
193
  "mmlu_eval_accuracy_security_studies": 0.5185185185185185,
194
- "mmlu_eval_accuracy_sociology": 0.6363636363636364,
195
  "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
196
  "mmlu_eval_accuracy_virology": 0.5555555555555556,
197
- "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
198
- "mmlu_loss": 1.619096915108728,
199
  "step": 200
200
  },
201
  {
202
  "epoch": 0.07,
203
  "learning_rate": 0.0002,
204
- "loss": 0.8312,
205
  "step": 210
206
  },
207
  {
208
  "epoch": 0.07,
209
  "learning_rate": 0.0002,
210
- "loss": 0.8465,
211
  "step": 220
212
  },
213
  {
214
  "epoch": 0.07,
215
  "learning_rate": 0.0002,
216
- "loss": 0.8433,
217
  "step": 230
218
  },
219
  {
220
  "epoch": 0.08,
221
  "learning_rate": 0.0002,
222
- "loss": 0.8223,
223
  "step": 240
224
  },
225
  {
226
  "epoch": 0.08,
227
  "learning_rate": 0.0002,
228
- "loss": 0.7884,
229
  "step": 250
230
  },
231
  {
232
  "epoch": 0.08,
233
  "learning_rate": 0.0002,
234
- "loss": 0.8233,
235
  "step": 260
236
  },
237
  {
238
  "epoch": 0.09,
239
  "learning_rate": 0.0002,
240
- "loss": 0.812,
241
  "step": 270
242
  },
243
  {
244
  "epoch": 0.09,
245
  "learning_rate": 0.0002,
246
- "loss": 0.8342,
247
  "step": 280
248
  },
249
  {
250
  "epoch": 0.09,
251
  "learning_rate": 0.0002,
252
- "loss": 0.8316,
253
  "step": 290
254
  },
255
  {
256
  "epoch": 0.1,
257
  "learning_rate": 0.0002,
258
- "loss": 0.7984,
259
  "step": 300
260
  },
261
  {
262
  "epoch": 0.1,
263
  "learning_rate": 0.0002,
264
- "loss": 0.7821,
265
  "step": 310
266
  },
267
  {
268
  "epoch": 0.1,
269
  "learning_rate": 0.0002,
270
- "loss": 0.7715,
271
  "step": 320
272
  },
273
  {
274
  "epoch": 0.1,
275
  "learning_rate": 0.0002,
276
- "loss": 0.7675,
277
  "step": 330
278
  },
279
  {
280
  "epoch": 0.11,
281
  "learning_rate": 0.0002,
282
- "loss": 0.793,
283
  "step": 340
284
  },
285
  {
286
  "epoch": 0.11,
287
  "learning_rate": 0.0002,
288
- "loss": 0.8223,
289
  "step": 350
290
  },
291
  {
292
  "epoch": 0.11,
293
  "learning_rate": 0.0002,
294
- "loss": 0.7916,
295
  "step": 360
296
  },
297
  {
298
  "epoch": 0.12,
299
  "learning_rate": 0.0002,
300
- "loss": 0.8094,
301
  "step": 370
302
  },
303
  {
304
  "epoch": 0.12,
305
  "learning_rate": 0.0002,
306
- "loss": 0.7655,
307
  "step": 380
308
  },
309
  {
310
  "epoch": 0.12,
311
  "learning_rate": 0.0002,
312
- "loss": 0.7868,
313
  "step": 390
314
  },
315
  {
316
  "epoch": 0.13,
317
  "learning_rate": 0.0002,
318
- "loss": 0.7983,
319
  "step": 400
320
  },
321
  {
322
  "epoch": 0.13,
323
- "eval_loss": 0.7662714719772339,
324
- "eval_runtime": 149.8239,
325
- "eval_samples_per_second": 6.675,
326
- "eval_steps_per_second": 3.337,
327
  "step": 400
328
  },
329
  {
330
  "epoch": 0.13,
331
- "mmlu_eval_accuracy": 0.47563130411411997,
332
  "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
333
  "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
334
  "mmlu_eval_accuracy_astronomy": 0.4375,
335
  "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
336
- "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
337
  "mmlu_eval_accuracy_college_biology": 0.4375,
338
  "mmlu_eval_accuracy_college_chemistry": 0.125,
339
  "mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
340
  "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
341
  "mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
342
- "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
343
- "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
344
  "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
345
  "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
346
  "mmlu_eval_accuracy_electrical_engineering": 0.25,
347
- "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
348
  "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
349
- "mmlu_eval_accuracy_global_facts": 0.3,
350
  "mmlu_eval_accuracy_high_school_biology": 0.40625,
351
  "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
352
  "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
353
- "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
354
  "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
355
  "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
356
  "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
357
  "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
358
- "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
359
  "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
360
- "mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
361
- "mmlu_eval_accuracy_high_school_statistics": 0.43478260869565216,
362
- "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
363
  "mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
364
  "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
365
  "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
366
  "mmlu_eval_accuracy_international_law": 0.8461538461538461,
367
- "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
368
  "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
369
  "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
370
  "mmlu_eval_accuracy_management": 0.6363636363636364,
371
  "mmlu_eval_accuracy_marketing": 0.84,
372
  "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
373
- "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
374
  "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
375
- "mmlu_eval_accuracy_moral_scenarios": 0.24,
376
- "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
377
  "mmlu_eval_accuracy_philosophy": 0.5,
378
- "mmlu_eval_accuracy_prehistory": 0.45714285714285713,
379
  "mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
380
  "mmlu_eval_accuracy_professional_law": 0.3058823529411765,
381
  "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
382
  "mmlu_eval_accuracy_professional_psychology": 0.5217391304347826,
383
  "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
384
- "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
385
  "mmlu_eval_accuracy_sociology": 0.6818181818181818,
386
  "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
387
  "mmlu_eval_accuracy_virology": 0.5,
388
  "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
389
- "mmlu_loss": 1.474700499685875,
390
  "step": 400
391
  }
392
  ],
 
1
  {
2
+ "best_metric": 0.7656086683273315,
3
  "best_model_checkpoint": "experts/expert-16/checkpoint-400",
4
  "epoch": 0.1267427122940431,
5
  "global_step": 400,
 
10
  {
11
  "epoch": 0.0,
12
  "learning_rate": 0.0002,
13
+ "loss": 0.8339,
14
  "step": 10
15
  },
16
  {
 
22
  {
23
  "epoch": 0.01,
24
  "learning_rate": 0.0002,
25
+ "loss": 0.9041,
26
  "step": 30
27
  },
28
  {
 
34
  {
35
  "epoch": 0.02,
36
  "learning_rate": 0.0002,
37
+ "loss": 0.8151,
38
  "step": 50
39
  },
40
  {
41
  "epoch": 0.02,
42
  "learning_rate": 0.0002,
43
+ "loss": 0.79,
44
  "step": 60
45
  },
46
  {
 
52
  {
53
  "epoch": 0.03,
54
  "learning_rate": 0.0002,
55
+ "loss": 0.8831,
56
  "step": 80
57
  },
58
  {
59
  "epoch": 0.03,
60
  "learning_rate": 0.0002,
61
+ "loss": 0.8607,
62
  "step": 90
63
  },
64
  {
65
  "epoch": 0.03,
66
  "learning_rate": 0.0002,
67
+ "loss": 0.7876,
68
  "step": 100
69
  },
70
  {
71
  "epoch": 0.03,
72
  "learning_rate": 0.0002,
73
+ "loss": 0.8031,
74
  "step": 110
75
  },
76
  {
77
  "epoch": 0.04,
78
  "learning_rate": 0.0002,
79
+ "loss": 0.8207,
80
  "step": 120
81
  },
82
  {
83
  "epoch": 0.04,
84
  "learning_rate": 0.0002,
85
+ "loss": 0.807,
86
  "step": 130
87
  },
88
  {
89
  "epoch": 0.04,
90
  "learning_rate": 0.0002,
91
+ "loss": 0.9262,
92
  "step": 140
93
  },
94
  {
95
  "epoch": 0.05,
96
  "learning_rate": 0.0002,
97
+ "loss": 0.7964,
98
  "step": 150
99
  },
100
  {
101
  "epoch": 0.05,
102
  "learning_rate": 0.0002,
103
+ "loss": 0.7879,
104
  "step": 160
105
  },
106
  {
107
  "epoch": 0.05,
108
  "learning_rate": 0.0002,
109
+ "loss": 0.7587,
110
  "step": 170
111
  },
112
  {
113
  "epoch": 0.06,
114
  "learning_rate": 0.0002,
115
+ "loss": 0.8091,
116
  "step": 180
117
  },
118
  {
119
  "epoch": 0.06,
120
  "learning_rate": 0.0002,
121
+ "loss": 0.8615,
122
  "step": 190
123
  },
124
  {
125
  "epoch": 0.06,
126
  "learning_rate": 0.0002,
127
+ "loss": 0.8672,
128
  "step": 200
129
  },
130
  {
131
  "epoch": 0.06,
132
+ "eval_loss": 0.7779108881950378,
133
+ "eval_runtime": 110.9863,
134
+ "eval_samples_per_second": 9.01,
135
+ "eval_steps_per_second": 4.505,
136
  "step": 200
137
  },
138
  {
139
  "epoch": 0.06,
140
+ "mmlu_eval_accuracy": 0.4744171116325413,
141
  "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
142
  "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
143
  "mmlu_eval_accuracy_astronomy": 0.4375,
144
  "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
145
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
146
  "mmlu_eval_accuracy_college_biology": 0.4375,
147
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
148
  "mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
149
  "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
150
  "mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
151
  "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
152
  "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
153
+ "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
154
  "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
155
  "mmlu_eval_accuracy_electrical_engineering": 0.25,
156
  "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
 
159
  "mmlu_eval_accuracy_high_school_biology": 0.375,
160
  "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
161
  "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
162
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
163
  "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
164
  "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
165
  "mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
166
  "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
167
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.3076923076923077,
168
  "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
169
  "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
170
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
171
+ "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
172
  "mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
173
  "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
174
  "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
 
177
  "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
178
  "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
179
  "mmlu_eval_accuracy_management": 0.6363636363636364,
180
+ "mmlu_eval_accuracy_marketing": 0.88,
181
  "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
182
+ "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
183
  "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
184
+ "mmlu_eval_accuracy_moral_scenarios": 0.27,
185
  "mmlu_eval_accuracy_nutrition": 0.6666666666666666,
186
  "mmlu_eval_accuracy_philosophy": 0.5,
187
  "mmlu_eval_accuracy_prehistory": 0.42857142857142855,
188
  "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
189
+ "mmlu_eval_accuracy_professional_law": 0.3176470588235294,
190
  "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
191
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
192
  "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
193
  "mmlu_eval_accuracy_security_studies": 0.5185185185185185,
194
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
195
  "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
196
  "mmlu_eval_accuracy_virology": 0.5555555555555556,
197
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
198
+ "mmlu_loss": 1.5868234255450824,
199
  "step": 200
200
  },
201
  {
202
  "epoch": 0.07,
203
  "learning_rate": 0.0002,
204
+ "loss": 0.8316,
205
  "step": 210
206
  },
207
  {
208
  "epoch": 0.07,
209
  "learning_rate": 0.0002,
210
+ "loss": 0.8454,
211
  "step": 220
212
  },
213
  {
214
  "epoch": 0.07,
215
  "learning_rate": 0.0002,
216
+ "loss": 0.8434,
217
  "step": 230
218
  },
219
  {
220
  "epoch": 0.08,
221
  "learning_rate": 0.0002,
222
+ "loss": 0.821,
223
  "step": 240
224
  },
225
  {
226
  "epoch": 0.08,
227
  "learning_rate": 0.0002,
228
+ "loss": 0.7893,
229
  "step": 250
230
  },
231
  {
232
  "epoch": 0.08,
233
  "learning_rate": 0.0002,
234
+ "loss": 0.8242,
235
  "step": 260
236
  },
237
  {
238
  "epoch": 0.09,
239
  "learning_rate": 0.0002,
240
+ "loss": 0.8128,
241
  "step": 270
242
  },
243
  {
244
  "epoch": 0.09,
245
  "learning_rate": 0.0002,
246
+ "loss": 0.8344,
247
  "step": 280
248
  },
249
  {
250
  "epoch": 0.09,
251
  "learning_rate": 0.0002,
252
+ "loss": 0.8338,
253
  "step": 290
254
  },
255
  {
256
  "epoch": 0.1,
257
  "learning_rate": 0.0002,
258
+ "loss": 0.7981,
259
  "step": 300
260
  },
261
  {
262
  "epoch": 0.1,
263
  "learning_rate": 0.0002,
264
+ "loss": 0.781,
265
  "step": 310
266
  },
267
  {
268
  "epoch": 0.1,
269
  "learning_rate": 0.0002,
270
+ "loss": 0.7717,
271
  "step": 320
272
  },
273
  {
274
  "epoch": 0.1,
275
  "learning_rate": 0.0002,
276
+ "loss": 0.767,
277
  "step": 330
278
  },
279
  {
280
  "epoch": 0.11,
281
  "learning_rate": 0.0002,
282
+ "loss": 0.7925,
283
  "step": 340
284
  },
285
  {
286
  "epoch": 0.11,
287
  "learning_rate": 0.0002,
288
+ "loss": 0.8226,
289
  "step": 350
290
  },
291
  {
292
  "epoch": 0.11,
293
  "learning_rate": 0.0002,
294
+ "loss": 0.7912,
295
  "step": 360
296
  },
297
  {
298
  "epoch": 0.12,
299
  "learning_rate": 0.0002,
300
+ "loss": 0.8093,
301
  "step": 370
302
  },
303
  {
304
  "epoch": 0.12,
305
  "learning_rate": 0.0002,
306
+ "loss": 0.7648,
307
  "step": 380
308
  },
309
  {
310
  "epoch": 0.12,
311
  "learning_rate": 0.0002,
312
+ "loss": 0.7866,
313
  "step": 390
314
  },
315
  {
316
  "epoch": 0.13,
317
  "learning_rate": 0.0002,
318
+ "loss": 0.7976,
319
  "step": 400
320
  },
321
  {
322
  "epoch": 0.13,
323
+ "eval_loss": 0.7656086683273315,
324
+ "eval_runtime": 110.9802,
325
+ "eval_samples_per_second": 9.011,
326
+ "eval_steps_per_second": 4.505,
327
  "step": 400
328
  },
329
  {
330
  "epoch": 0.13,
331
+ "mmlu_eval_accuracy": 0.47124130233512024,
332
  "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
333
  "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
334
  "mmlu_eval_accuracy_astronomy": 0.4375,
335
  "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
336
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
337
  "mmlu_eval_accuracy_college_biology": 0.4375,
338
  "mmlu_eval_accuracy_college_chemistry": 0.125,
339
  "mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
340
  "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
341
  "mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
342
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
343
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
344
  "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
345
  "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
346
  "mmlu_eval_accuracy_electrical_engineering": 0.25,
347
+ "mmlu_eval_accuracy_elementary_mathematics": 0.2926829268292683,
348
  "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
349
+ "mmlu_eval_accuracy_global_facts": 0.4,
350
  "mmlu_eval_accuracy_high_school_biology": 0.40625,
351
  "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
352
  "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
353
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
354
  "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
355
  "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
356
  "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
357
  "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
358
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.34615384615384615,
359
  "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
360
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
361
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
362
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
363
  "mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
364
  "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
365
  "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
366
  "mmlu_eval_accuracy_international_law": 0.8461538461538461,
367
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
368
  "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
369
  "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
370
  "mmlu_eval_accuracy_management": 0.6363636363636364,
371
  "mmlu_eval_accuracy_marketing": 0.84,
372
  "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
373
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
374
  "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
375
+ "mmlu_eval_accuracy_moral_scenarios": 0.25,
376
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
377
  "mmlu_eval_accuracy_philosophy": 0.5,
378
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
379
  "mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
380
  "mmlu_eval_accuracy_professional_law": 0.3058823529411765,
381
  "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
382
  "mmlu_eval_accuracy_professional_psychology": 0.5217391304347826,
383
  "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
384
+ "mmlu_eval_accuracy_security_studies": 0.5185185185185185,
385
  "mmlu_eval_accuracy_sociology": 0.6818181818181818,
386
  "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
387
  "mmlu_eval_accuracy_virology": 0.5,
388
  "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
389
+ "mmlu_loss": 1.4339068503199297,
390
  "step": 400
391
  }
392
  ],
checkpoint-400/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d55ea0037adc27e709c2f87c727ac3fd55b607c5afed1c76d4262111f780937e
3
  size 5819
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:277dab2895cbf8d8b713e5f0339719c5d50d193c9afc1399ad953aa4910a2fab
3
  size 5819