Farouk commited on
Commit
8bb1e82
·
1 Parent(s): cef76d4

Training in progress, step 800

Browse files
adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:316c05afc65201a82db1fe35558b74edc638e2bf7fc8b812ef6a0a8584071b89
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e76a12b40d96b039c516a755ff0fc82c6483336c390439b21ebc6df94896779f
3
  size 319977229
checkpoint-600/adapter_model/adapter_model/README.md CHANGED
@@ -4,6 +4,17 @@ library_name: peft
4
  ## Training procedure
5
 
6
 
 
 
 
 
 
 
 
 
 
 
 
7
  The following `bitsandbytes` quantization config was used during training:
8
  - load_in_8bit: False
9
  - load_in_4bit: True
@@ -16,5 +27,6 @@ The following `bitsandbytes` quantization config was used during training:
16
  - bnb_4bit_compute_dtype: bfloat16
17
  ### Framework versions
18
 
 
19
 
20
  - PEFT 0.4.0
 
4
  ## Training procedure
5
 
6
 
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: True
16
+ - bnb_4bit_compute_dtype: bfloat16
17
+
18
  The following `bitsandbytes` quantization config was used during training:
19
  - load_in_8bit: False
20
  - load_in_4bit: True
 
27
  - bnb_4bit_compute_dtype: bfloat16
28
  ### Framework versions
29
 
30
+ - PEFT 0.4.0
31
 
32
  - PEFT 0.4.0
checkpoint-600/adapter_model/adapter_model/adapter_config.json CHANGED
@@ -14,13 +14,13 @@
14
  "r": 64,
15
  "revision": null,
16
  "target_modules": [
17
- "q_proj",
18
  "down_proj",
 
19
  "k_proj",
20
  "up_proj",
21
- "o_proj",
22
- "v_proj",
23
- "gate_proj"
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
 
14
  "r": 64,
15
  "revision": null,
16
  "target_modules": [
17
+ "v_proj",
18
  "down_proj",
19
+ "o_proj",
20
  "k_proj",
21
  "up_proj",
22
+ "gate_proj",
23
+ "q_proj"
 
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
checkpoint-600/adapter_model/adapter_model/adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:adb9e9477e25e5f833e174c991243e847d63a5fecd0c370719b4465628a0deda
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:316c05afc65201a82db1fe35558b74edc638e2bf7fc8b812ef6a0a8584071b89
3
  size 319977229
checkpoint-800/README.md CHANGED
@@ -4,6 +4,17 @@ library_name: peft
4
  ## Training procedure
5
 
6
 
 
 
 
 
 
 
 
 
 
 
 
7
  The following `bitsandbytes` quantization config was used during training:
8
  - load_in_8bit: False
9
  - load_in_4bit: True
@@ -16,5 +27,6 @@ The following `bitsandbytes` quantization config was used during training:
16
  - bnb_4bit_compute_dtype: bfloat16
17
  ### Framework versions
18
 
 
19
 
20
  - PEFT 0.4.0
 
4
  ## Training procedure
5
 
6
 
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: True
16
+ - bnb_4bit_compute_dtype: bfloat16
17
+
18
  The following `bitsandbytes` quantization config was used during training:
19
  - load_in_8bit: False
20
  - load_in_4bit: True
 
27
  - bnb_4bit_compute_dtype: bfloat16
28
  ### Framework versions
29
 
30
+ - PEFT 0.4.0
31
 
32
  - PEFT 0.4.0
checkpoint-800/adapter_config.json CHANGED
@@ -14,13 +14,13 @@
14
  "r": 64,
15
  "revision": null,
16
  "target_modules": [
17
- "q_proj",
18
  "down_proj",
 
19
  "k_proj",
20
  "up_proj",
21
- "o_proj",
22
- "v_proj",
23
- "gate_proj"
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
 
14
  "r": 64,
15
  "revision": null,
16
  "target_modules": [
17
+ "v_proj",
18
  "down_proj",
19
+ "o_proj",
20
  "k_proj",
21
  "up_proj",
22
+ "gate_proj",
23
+ "q_proj"
 
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
checkpoint-800/adapter_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:48c98b0f1b54c6fc81b65004bc46e17f6849f7279191b69433376de49ee40588
3
  size 319977229
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e76a12b40d96b039c516a755ff0fc82c6483336c390439b21ebc6df94896779f
3
  size 319977229
checkpoint-800/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:37dc9c1b7f600c52fffdec4850c5e89dabdf9ee2ac15dd84ea365744e9a23683
3
  size 1279539973
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be25d67708276096612e3d4f1c60ff2d6079396ce302ff645fda3c9e333cb0f9
3
  size 1279539973
checkpoint-800/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:71d6e45f0d667730d4d2f18005c58f35d563043cc958d6630f436fdad1f5953d
3
  size 14511
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b2ac0bbb2415a408ba2161d5dc7b93cf5d07edaece22603a20a05406fb2f35f
3
  size 14511
checkpoint-800/trainer_state.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "best_metric": 0.7561990022659302,
3
  "best_model_checkpoint": "experts/expert-16/checkpoint-800",
4
  "epoch": 0.2534854245880862,
5
  "global_step": 800,
@@ -10,7 +10,7 @@
10
  {
11
  "epoch": 0.0,
12
  "learning_rate": 0.0002,
13
- "loss": 0.8331,
14
  "step": 10
15
  },
16
  {
@@ -22,7 +22,7 @@
22
  {
23
  "epoch": 0.01,
24
  "learning_rate": 0.0002,
25
- "loss": 0.9038,
26
  "step": 30
27
  },
28
  {
@@ -34,13 +34,13 @@
34
  {
35
  "epoch": 0.02,
36
  "learning_rate": 0.0002,
37
- "loss": 0.8155,
38
  "step": 50
39
  },
40
  {
41
  "epoch": 0.02,
42
  "learning_rate": 0.0002,
43
- "loss": 0.7896,
44
  "step": 60
45
  },
46
  {
@@ -52,105 +52,105 @@
52
  {
53
  "epoch": 0.03,
54
  "learning_rate": 0.0002,
55
- "loss": 0.8827,
56
  "step": 80
57
  },
58
  {
59
  "epoch": 0.03,
60
  "learning_rate": 0.0002,
61
- "loss": 0.861,
62
  "step": 90
63
  },
64
  {
65
  "epoch": 0.03,
66
  "learning_rate": 0.0002,
67
- "loss": 0.7879,
68
  "step": 100
69
  },
70
  {
71
  "epoch": 0.03,
72
  "learning_rate": 0.0002,
73
- "loss": 0.803,
74
  "step": 110
75
  },
76
  {
77
  "epoch": 0.04,
78
  "learning_rate": 0.0002,
79
- "loss": 0.8212,
80
  "step": 120
81
  },
82
  {
83
  "epoch": 0.04,
84
  "learning_rate": 0.0002,
85
- "loss": 0.8075,
86
  "step": 130
87
  },
88
  {
89
  "epoch": 0.04,
90
  "learning_rate": 0.0002,
91
- "loss": 0.9263,
92
  "step": 140
93
  },
94
  {
95
  "epoch": 0.05,
96
  "learning_rate": 0.0002,
97
- "loss": 0.7969,
98
  "step": 150
99
  },
100
  {
101
  "epoch": 0.05,
102
  "learning_rate": 0.0002,
103
- "loss": 0.7883,
104
  "step": 160
105
  },
106
  {
107
  "epoch": 0.05,
108
  "learning_rate": 0.0002,
109
- "loss": 0.7582,
110
  "step": 170
111
  },
112
  {
113
  "epoch": 0.06,
114
  "learning_rate": 0.0002,
115
- "loss": 0.8095,
116
  "step": 180
117
  },
118
  {
119
  "epoch": 0.06,
120
  "learning_rate": 0.0002,
121
- "loss": 0.8614,
122
  "step": 190
123
  },
124
  {
125
  "epoch": 0.06,
126
  "learning_rate": 0.0002,
127
- "loss": 0.8674,
128
  "step": 200
129
  },
130
  {
131
  "epoch": 0.06,
132
- "eval_loss": 0.7774001359939575,
133
- "eval_runtime": 149.8878,
134
- "eval_samples_per_second": 6.672,
135
- "eval_steps_per_second": 3.336,
136
  "step": 200
137
  },
138
  {
139
  "epoch": 0.06,
140
- "mmlu_eval_accuracy": 0.4759538024775667,
141
  "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
142
  "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
143
  "mmlu_eval_accuracy_astronomy": 0.4375,
144
  "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
145
- "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
146
  "mmlu_eval_accuracy_college_biology": 0.4375,
147
- "mmlu_eval_accuracy_college_chemistry": 0.25,
148
  "mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
149
  "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
150
  "mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
151
  "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
152
  "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
153
- "mmlu_eval_accuracy_conceptual_physics": 0.3076923076923077,
154
  "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
155
  "mmlu_eval_accuracy_electrical_engineering": 0.25,
156
  "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
@@ -159,16 +159,16 @@
159
  "mmlu_eval_accuracy_high_school_biology": 0.375,
160
  "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
161
  "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
162
- "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
163
  "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
164
  "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
165
  "mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
166
  "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
167
- "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
168
  "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
169
  "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
170
- "mmlu_eval_accuracy_high_school_statistics": 0.391304347826087,
171
- "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
172
  "mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
173
  "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
174
  "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
@@ -177,306 +177,306 @@
177
  "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
178
  "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
179
  "mmlu_eval_accuracy_management": 0.6363636363636364,
180
- "mmlu_eval_accuracy_marketing": 0.84,
181
  "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
182
- "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
183
  "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
184
- "mmlu_eval_accuracy_moral_scenarios": 0.25,
185
  "mmlu_eval_accuracy_nutrition": 0.6666666666666666,
186
  "mmlu_eval_accuracy_philosophy": 0.5,
187
  "mmlu_eval_accuracy_prehistory": 0.42857142857142855,
188
  "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
189
- "mmlu_eval_accuracy_professional_law": 0.3058823529411765,
190
  "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
191
- "mmlu_eval_accuracy_professional_psychology": 0.463768115942029,
192
  "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
193
  "mmlu_eval_accuracy_security_studies": 0.5185185185185185,
194
- "mmlu_eval_accuracy_sociology": 0.6363636363636364,
195
  "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
196
  "mmlu_eval_accuracy_virology": 0.5555555555555556,
197
- "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
198
- "mmlu_loss": 1.619096915108728,
199
  "step": 200
200
  },
201
  {
202
  "epoch": 0.07,
203
  "learning_rate": 0.0002,
204
- "loss": 0.8312,
205
  "step": 210
206
  },
207
  {
208
  "epoch": 0.07,
209
  "learning_rate": 0.0002,
210
- "loss": 0.8465,
211
  "step": 220
212
  },
213
  {
214
  "epoch": 0.07,
215
  "learning_rate": 0.0002,
216
- "loss": 0.8433,
217
  "step": 230
218
  },
219
  {
220
  "epoch": 0.08,
221
  "learning_rate": 0.0002,
222
- "loss": 0.8223,
223
  "step": 240
224
  },
225
  {
226
  "epoch": 0.08,
227
  "learning_rate": 0.0002,
228
- "loss": 0.7884,
229
  "step": 250
230
  },
231
  {
232
  "epoch": 0.08,
233
  "learning_rate": 0.0002,
234
- "loss": 0.8233,
235
  "step": 260
236
  },
237
  {
238
  "epoch": 0.09,
239
  "learning_rate": 0.0002,
240
- "loss": 0.812,
241
  "step": 270
242
  },
243
  {
244
  "epoch": 0.09,
245
  "learning_rate": 0.0002,
246
- "loss": 0.8342,
247
  "step": 280
248
  },
249
  {
250
  "epoch": 0.09,
251
  "learning_rate": 0.0002,
252
- "loss": 0.8316,
253
  "step": 290
254
  },
255
  {
256
  "epoch": 0.1,
257
  "learning_rate": 0.0002,
258
- "loss": 0.7984,
259
  "step": 300
260
  },
261
  {
262
  "epoch": 0.1,
263
  "learning_rate": 0.0002,
264
- "loss": 0.7821,
265
  "step": 310
266
  },
267
  {
268
  "epoch": 0.1,
269
  "learning_rate": 0.0002,
270
- "loss": 0.7715,
271
  "step": 320
272
  },
273
  {
274
  "epoch": 0.1,
275
  "learning_rate": 0.0002,
276
- "loss": 0.7675,
277
  "step": 330
278
  },
279
  {
280
  "epoch": 0.11,
281
  "learning_rate": 0.0002,
282
- "loss": 0.793,
283
  "step": 340
284
  },
285
  {
286
  "epoch": 0.11,
287
  "learning_rate": 0.0002,
288
- "loss": 0.8223,
289
  "step": 350
290
  },
291
  {
292
  "epoch": 0.11,
293
  "learning_rate": 0.0002,
294
- "loss": 0.7916,
295
  "step": 360
296
  },
297
  {
298
  "epoch": 0.12,
299
  "learning_rate": 0.0002,
300
- "loss": 0.8094,
301
  "step": 370
302
  },
303
  {
304
  "epoch": 0.12,
305
  "learning_rate": 0.0002,
306
- "loss": 0.7655,
307
  "step": 380
308
  },
309
  {
310
  "epoch": 0.12,
311
  "learning_rate": 0.0002,
312
- "loss": 0.7868,
313
  "step": 390
314
  },
315
  {
316
  "epoch": 0.13,
317
  "learning_rate": 0.0002,
318
- "loss": 0.7983,
319
  "step": 400
320
  },
321
  {
322
  "epoch": 0.13,
323
- "eval_loss": 0.7662714719772339,
324
- "eval_runtime": 149.8239,
325
- "eval_samples_per_second": 6.675,
326
- "eval_steps_per_second": 3.337,
327
  "step": 400
328
  },
329
  {
330
  "epoch": 0.13,
331
- "mmlu_eval_accuracy": 0.47563130411411997,
332
  "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
333
  "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
334
  "mmlu_eval_accuracy_astronomy": 0.4375,
335
  "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
336
- "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
337
  "mmlu_eval_accuracy_college_biology": 0.4375,
338
  "mmlu_eval_accuracy_college_chemistry": 0.125,
339
  "mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
340
  "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
341
  "mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
342
- "mmlu_eval_accuracy_college_physics": 0.45454545454545453,
343
- "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
344
  "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
345
  "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
346
  "mmlu_eval_accuracy_electrical_engineering": 0.25,
347
- "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
348
  "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
349
- "mmlu_eval_accuracy_global_facts": 0.3,
350
  "mmlu_eval_accuracy_high_school_biology": 0.40625,
351
  "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
352
  "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
353
- "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
354
  "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
355
  "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
356
  "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
357
  "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
358
- "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
359
  "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
360
- "mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
361
- "mmlu_eval_accuracy_high_school_statistics": 0.43478260869565216,
362
- "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
363
  "mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
364
  "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
365
  "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
366
  "mmlu_eval_accuracy_international_law": 0.8461538461538461,
367
- "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
368
  "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
369
  "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
370
  "mmlu_eval_accuracy_management": 0.6363636363636364,
371
  "mmlu_eval_accuracy_marketing": 0.84,
372
  "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
373
- "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
374
  "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
375
- "mmlu_eval_accuracy_moral_scenarios": 0.24,
376
- "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
377
  "mmlu_eval_accuracy_philosophy": 0.5,
378
- "mmlu_eval_accuracy_prehistory": 0.45714285714285713,
379
  "mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
380
  "mmlu_eval_accuracy_professional_law": 0.3058823529411765,
381
  "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
382
  "mmlu_eval_accuracy_professional_psychology": 0.5217391304347826,
383
  "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
384
- "mmlu_eval_accuracy_security_studies": 0.48148148148148145,
385
  "mmlu_eval_accuracy_sociology": 0.6818181818181818,
386
  "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
387
  "mmlu_eval_accuracy_virology": 0.5,
388
  "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
389
- "mmlu_loss": 1.474700499685875,
390
  "step": 400
391
  },
392
  {
393
  "epoch": 0.13,
394
  "learning_rate": 0.0002,
395
- "loss": 0.8181,
396
  "step": 410
397
  },
398
  {
399
  "epoch": 0.13,
400
  "learning_rate": 0.0002,
401
- "loss": 0.8442,
402
  "step": 420
403
  },
404
  {
405
  "epoch": 0.14,
406
  "learning_rate": 0.0002,
407
- "loss": 0.8194,
408
  "step": 430
409
  },
410
  {
411
  "epoch": 0.14,
412
  "learning_rate": 0.0002,
413
- "loss": 0.8192,
414
  "step": 440
415
  },
416
  {
417
  "epoch": 0.14,
418
  "learning_rate": 0.0002,
419
- "loss": 0.8265,
420
  "step": 450
421
  },
422
  {
423
  "epoch": 0.15,
424
  "learning_rate": 0.0002,
425
- "loss": 0.8383,
426
  "step": 460
427
  },
428
  {
429
  "epoch": 0.15,
430
  "learning_rate": 0.0002,
431
- "loss": 0.8375,
432
  "step": 470
433
  },
434
  {
435
  "epoch": 0.15,
436
  "learning_rate": 0.0002,
437
- "loss": 0.808,
438
  "step": 480
439
  },
440
  {
441
  "epoch": 0.16,
442
  "learning_rate": 0.0002,
443
- "loss": 0.8209,
444
  "step": 490
445
  },
446
  {
447
  "epoch": 0.16,
448
  "learning_rate": 0.0002,
449
- "loss": 0.8144,
450
  "step": 500
451
  },
452
  {
453
  "epoch": 0.16,
454
  "learning_rate": 0.0002,
455
- "loss": 0.8465,
456
  "step": 510
457
  },
458
  {
459
  "epoch": 0.16,
460
  "learning_rate": 0.0002,
461
- "loss": 0.8437,
462
  "step": 520
463
  },
464
  {
465
  "epoch": 0.17,
466
  "learning_rate": 0.0002,
467
- "loss": 0.8091,
468
  "step": 530
469
  },
470
  {
471
  "epoch": 0.17,
472
  "learning_rate": 0.0002,
473
- "loss": 0.8501,
474
  "step": 540
475
  },
476
  {
477
  "epoch": 0.17,
478
  "learning_rate": 0.0002,
479
- "loss": 0.7731,
480
  "step": 550
481
  },
482
  {
@@ -488,19 +488,19 @@
488
  {
489
  "epoch": 0.18,
490
  "learning_rate": 0.0002,
491
- "loss": 0.749,
492
  "step": 570
493
  },
494
  {
495
  "epoch": 0.18,
496
  "learning_rate": 0.0002,
497
- "loss": 0.7947,
498
  "step": 580
499
  },
500
  {
501
  "epoch": 0.19,
502
  "learning_rate": 0.0002,
503
- "loss": 0.736,
504
  "step": 590
505
  },
506
  {
@@ -511,47 +511,47 @@
511
  },
512
  {
513
  "epoch": 0.19,
514
- "eval_loss": 0.7616425156593323,
515
- "eval_runtime": 149.4328,
516
- "eval_samples_per_second": 6.692,
517
- "eval_steps_per_second": 3.346,
518
  "step": 600
519
  },
520
  {
521
  "epoch": 0.19,
522
- "mmlu_eval_accuracy": 0.4814898904813968,
523
  "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
524
  "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
525
  "mmlu_eval_accuracy_astronomy": 0.4375,
526
  "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
527
- "mmlu_eval_accuracy_clinical_knowledge": 0.5172413793103449,
528
  "mmlu_eval_accuracy_college_biology": 0.4375,
529
  "mmlu_eval_accuracy_college_chemistry": 0.25,
530
- "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
531
  "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
532
- "mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
533
  "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
534
  "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
535
  "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
536
- "mmlu_eval_accuracy_econometrics": 0.25,
537
  "mmlu_eval_accuracy_electrical_engineering": 0.25,
538
  "mmlu_eval_accuracy_elementary_mathematics": 0.2682926829268293,
539
  "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
540
- "mmlu_eval_accuracy_global_facts": 0.4,
541
  "mmlu_eval_accuracy_high_school_biology": 0.40625,
542
- "mmlu_eval_accuracy_high_school_chemistry": 0.3181818181818182,
543
  "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
544
  "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
545
  "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
546
  "mmlu_eval_accuracy_high_school_government_and_politics": 0.47619047619047616,
547
- "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
548
- "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
549
- "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
550
  "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
551
  "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
552
- "mmlu_eval_accuracy_high_school_statistics": 0.43478260869565216,
553
  "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
554
- "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
555
  "mmlu_eval_accuracy_human_aging": 0.6521739130434783,
556
  "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
557
  "mmlu_eval_accuracy_international_law": 0.8461538461538461,
@@ -559,16 +559,16 @@
559
  "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
560
  "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
561
  "mmlu_eval_accuracy_management": 0.6363636363636364,
562
- "mmlu_eval_accuracy_marketing": 0.88,
563
  "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
564
- "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
565
  "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
566
- "mmlu_eval_accuracy_moral_scenarios": 0.27,
567
  "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
568
- "mmlu_eval_accuracy_philosophy": 0.47058823529411764,
569
- "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
570
- "mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
571
- "mmlu_eval_accuracy_professional_law": 0.3058823529411765,
572
  "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
573
  "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
574
  "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
@@ -577,61 +577,61 @@
577
  "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
578
  "mmlu_eval_accuracy_virology": 0.5,
579
  "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
580
- "mmlu_loss": 1.5487773687658983,
581
  "step": 600
582
  },
583
  {
584
  "epoch": 0.19,
585
  "learning_rate": 0.0002,
586
- "loss": 0.7917,
587
  "step": 610
588
  },
589
  {
590
  "epoch": 0.2,
591
  "learning_rate": 0.0002,
592
- "loss": 0.7324,
593
  "step": 620
594
  },
595
  {
596
  "epoch": 0.2,
597
  "learning_rate": 0.0002,
598
- "loss": 0.7894,
599
  "step": 630
600
  },
601
  {
602
  "epoch": 0.2,
603
  "learning_rate": 0.0002,
604
- "loss": 0.7818,
605
  "step": 640
606
  },
607
  {
608
  "epoch": 0.21,
609
  "learning_rate": 0.0002,
610
- "loss": 0.8839,
611
  "step": 650
612
  },
613
  {
614
  "epoch": 0.21,
615
  "learning_rate": 0.0002,
616
- "loss": 0.7716,
617
  "step": 660
618
  },
619
  {
620
  "epoch": 0.21,
621
  "learning_rate": 0.0002,
622
- "loss": 0.8243,
623
  "step": 670
624
  },
625
  {
626
  "epoch": 0.22,
627
  "learning_rate": 0.0002,
628
- "loss": 0.7961,
629
  "step": 680
630
  },
631
  {
632
  "epoch": 0.22,
633
  "learning_rate": 0.0002,
634
- "loss": 0.742,
635
  "step": 690
636
  },
637
  {
@@ -643,88 +643,88 @@
643
  {
644
  "epoch": 0.22,
645
  "learning_rate": 0.0002,
646
- "loss": 0.814,
647
  "step": 710
648
  },
649
  {
650
  "epoch": 0.23,
651
  "learning_rate": 0.0002,
652
- "loss": 0.8581,
653
  "step": 720
654
  },
655
  {
656
  "epoch": 0.23,
657
  "learning_rate": 0.0002,
658
- "loss": 0.8023,
659
  "step": 730
660
  },
661
  {
662
  "epoch": 0.23,
663
  "learning_rate": 0.0002,
664
- "loss": 0.8199,
665
  "step": 740
666
  },
667
  {
668
  "epoch": 0.24,
669
  "learning_rate": 0.0002,
670
- "loss": 0.807,
671
  "step": 750
672
  },
673
  {
674
  "epoch": 0.24,
675
  "learning_rate": 0.0002,
676
- "loss": 0.8133,
677
  "step": 760
678
  },
679
  {
680
  "epoch": 0.24,
681
  "learning_rate": 0.0002,
682
- "loss": 0.7978,
683
  "step": 770
684
  },
685
  {
686
  "epoch": 0.25,
687
  "learning_rate": 0.0002,
688
- "loss": 0.7977,
689
  "step": 780
690
  },
691
  {
692
  "epoch": 0.25,
693
  "learning_rate": 0.0002,
694
- "loss": 0.7802,
695
  "step": 790
696
  },
697
  {
698
  "epoch": 0.25,
699
  "learning_rate": 0.0002,
700
- "loss": 0.8041,
701
  "step": 800
702
  },
703
  {
704
  "epoch": 0.25,
705
- "eval_loss": 0.7561990022659302,
706
- "eval_runtime": 149.4612,
707
- "eval_samples_per_second": 6.691,
708
- "eval_steps_per_second": 3.345,
709
  "step": 800
710
  },
711
  {
712
  "epoch": 0.25,
713
- "mmlu_eval_accuracy": 0.4774734261484596,
714
  "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
715
  "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
716
  "mmlu_eval_accuracy_astronomy": 0.4375,
717
  "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
718
  "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
719
- "mmlu_eval_accuracy_college_biology": 0.5,
720
  "mmlu_eval_accuracy_college_chemistry": 0.125,
721
  "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
722
  "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
723
- "mmlu_eval_accuracy_college_medicine": 0.3181818181818182,
724
  "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
725
  "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
726
  "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
727
- "mmlu_eval_accuracy_econometrics": 0.25,
728
  "mmlu_eval_accuracy_electrical_engineering": 0.25,
729
  "mmlu_eval_accuracy_elementary_mathematics": 0.2926829268292683,
730
  "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
@@ -732,43 +732,43 @@
732
  "mmlu_eval_accuracy_high_school_biology": 0.375,
733
  "mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727,
734
  "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
735
- "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
736
  "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
737
- "mmlu_eval_accuracy_high_school_government_and_politics": 0.47619047619047616,
738
- "mmlu_eval_accuracy_high_school_macroeconomics": 0.4883720930232558,
739
- "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
740
- "mmlu_eval_accuracy_high_school_microeconomics": 0.4230769230769231,
741
  "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
742
  "mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
743
- "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
744
  "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
745
  "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
746
- "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
747
  "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
748
  "mmlu_eval_accuracy_international_law": 0.8461538461538461,
749
- "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
750
  "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
751
- "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
752
  "mmlu_eval_accuracy_management": 0.6363636363636364,
753
  "mmlu_eval_accuracy_marketing": 0.84,
754
  "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
755
- "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
756
- "mmlu_eval_accuracy_moral_disputes": 0.47368421052631576,
757
- "mmlu_eval_accuracy_moral_scenarios": 0.24,
758
  "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
759
  "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
760
- "mmlu_eval_accuracy_prehistory": 0.42857142857142855,
761
- "mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
762
- "mmlu_eval_accuracy_professional_law": 0.31176470588235294,
763
  "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
764
- "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
765
  "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
766
  "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
767
  "mmlu_eval_accuracy_sociology": 0.6363636363636364,
768
  "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
769
  "mmlu_eval_accuracy_virology": 0.5555555555555556,
770
- "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
771
- "mmlu_loss": 1.498360338394698,
772
  "step": 800
773
  }
774
  ],
 
1
  {
2
+ "best_metric": 0.7563537359237671,
3
  "best_model_checkpoint": "experts/expert-16/checkpoint-800",
4
  "epoch": 0.2534854245880862,
5
  "global_step": 800,
 
10
  {
11
  "epoch": 0.0,
12
  "learning_rate": 0.0002,
13
+ "loss": 0.8339,
14
  "step": 10
15
  },
16
  {
 
22
  {
23
  "epoch": 0.01,
24
  "learning_rate": 0.0002,
25
+ "loss": 0.9041,
26
  "step": 30
27
  },
28
  {
 
34
  {
35
  "epoch": 0.02,
36
  "learning_rate": 0.0002,
37
+ "loss": 0.8151,
38
  "step": 50
39
  },
40
  {
41
  "epoch": 0.02,
42
  "learning_rate": 0.0002,
43
+ "loss": 0.79,
44
  "step": 60
45
  },
46
  {
 
52
  {
53
  "epoch": 0.03,
54
  "learning_rate": 0.0002,
55
+ "loss": 0.8831,
56
  "step": 80
57
  },
58
  {
59
  "epoch": 0.03,
60
  "learning_rate": 0.0002,
61
+ "loss": 0.8607,
62
  "step": 90
63
  },
64
  {
65
  "epoch": 0.03,
66
  "learning_rate": 0.0002,
67
+ "loss": 0.7876,
68
  "step": 100
69
  },
70
  {
71
  "epoch": 0.03,
72
  "learning_rate": 0.0002,
73
+ "loss": 0.8031,
74
  "step": 110
75
  },
76
  {
77
  "epoch": 0.04,
78
  "learning_rate": 0.0002,
79
+ "loss": 0.8207,
80
  "step": 120
81
  },
82
  {
83
  "epoch": 0.04,
84
  "learning_rate": 0.0002,
85
+ "loss": 0.807,
86
  "step": 130
87
  },
88
  {
89
  "epoch": 0.04,
90
  "learning_rate": 0.0002,
91
+ "loss": 0.9262,
92
  "step": 140
93
  },
94
  {
95
  "epoch": 0.05,
96
  "learning_rate": 0.0002,
97
+ "loss": 0.7964,
98
  "step": 150
99
  },
100
  {
101
  "epoch": 0.05,
102
  "learning_rate": 0.0002,
103
+ "loss": 0.7879,
104
  "step": 160
105
  },
106
  {
107
  "epoch": 0.05,
108
  "learning_rate": 0.0002,
109
+ "loss": 0.7587,
110
  "step": 170
111
  },
112
  {
113
  "epoch": 0.06,
114
  "learning_rate": 0.0002,
115
+ "loss": 0.8091,
116
  "step": 180
117
  },
118
  {
119
  "epoch": 0.06,
120
  "learning_rate": 0.0002,
121
+ "loss": 0.8615,
122
  "step": 190
123
  },
124
  {
125
  "epoch": 0.06,
126
  "learning_rate": 0.0002,
127
+ "loss": 0.8672,
128
  "step": 200
129
  },
130
  {
131
  "epoch": 0.06,
132
+ "eval_loss": 0.7779108881950378,
133
+ "eval_runtime": 110.9863,
134
+ "eval_samples_per_second": 9.01,
135
+ "eval_steps_per_second": 4.505,
136
  "step": 200
137
  },
138
  {
139
  "epoch": 0.06,
140
+ "mmlu_eval_accuracy": 0.4744171116325413,
141
  "mmlu_eval_accuracy_abstract_algebra": 0.36363636363636365,
142
  "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
143
  "mmlu_eval_accuracy_astronomy": 0.4375,
144
  "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
145
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
146
  "mmlu_eval_accuracy_college_biology": 0.4375,
147
+ "mmlu_eval_accuracy_college_chemistry": 0.125,
148
  "mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
149
  "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
150
  "mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
151
  "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
152
  "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
153
+ "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
154
  "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
155
  "mmlu_eval_accuracy_electrical_engineering": 0.25,
156
  "mmlu_eval_accuracy_elementary_mathematics": 0.3170731707317073,
 
159
  "mmlu_eval_accuracy_high_school_biology": 0.375,
160
  "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
161
  "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
162
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
163
  "mmlu_eval_accuracy_high_school_geography": 0.8636363636363636,
164
  "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
165
  "mmlu_eval_accuracy_high_school_macroeconomics": 0.4418604651162791,
166
  "mmlu_eval_accuracy_high_school_mathematics": 0.20689655172413793,
167
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.3076923076923077,
168
  "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
169
  "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
170
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
171
+ "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
172
  "mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
173
  "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
174
  "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
 
177
  "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
178
  "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
179
  "mmlu_eval_accuracy_management": 0.6363636363636364,
180
+ "mmlu_eval_accuracy_marketing": 0.88,
181
  "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
182
+ "mmlu_eval_accuracy_miscellaneous": 0.6627906976744186,
183
  "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
184
+ "mmlu_eval_accuracy_moral_scenarios": 0.27,
185
  "mmlu_eval_accuracy_nutrition": 0.6666666666666666,
186
  "mmlu_eval_accuracy_philosophy": 0.5,
187
  "mmlu_eval_accuracy_prehistory": 0.42857142857142855,
188
  "mmlu_eval_accuracy_professional_accounting": 0.3225806451612903,
189
+ "mmlu_eval_accuracy_professional_law": 0.3176470588235294,
190
  "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
191
+ "mmlu_eval_accuracy_professional_psychology": 0.4927536231884058,
192
  "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
193
  "mmlu_eval_accuracy_security_studies": 0.5185185185185185,
194
+ "mmlu_eval_accuracy_sociology": 0.6818181818181818,
195
  "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
196
  "mmlu_eval_accuracy_virology": 0.5555555555555556,
197
+ "mmlu_eval_accuracy_world_religions": 0.7368421052631579,
198
+ "mmlu_loss": 1.5868234255450824,
199
  "step": 200
200
  },
201
  {
202
  "epoch": 0.07,
203
  "learning_rate": 0.0002,
204
+ "loss": 0.8316,
205
  "step": 210
206
  },
207
  {
208
  "epoch": 0.07,
209
  "learning_rate": 0.0002,
210
+ "loss": 0.8454,
211
  "step": 220
212
  },
213
  {
214
  "epoch": 0.07,
215
  "learning_rate": 0.0002,
216
+ "loss": 0.8434,
217
  "step": 230
218
  },
219
  {
220
  "epoch": 0.08,
221
  "learning_rate": 0.0002,
222
+ "loss": 0.821,
223
  "step": 240
224
  },
225
  {
226
  "epoch": 0.08,
227
  "learning_rate": 0.0002,
228
+ "loss": 0.7893,
229
  "step": 250
230
  },
231
  {
232
  "epoch": 0.08,
233
  "learning_rate": 0.0002,
234
+ "loss": 0.8242,
235
  "step": 260
236
  },
237
  {
238
  "epoch": 0.09,
239
  "learning_rate": 0.0002,
240
+ "loss": 0.8128,
241
  "step": 270
242
  },
243
  {
244
  "epoch": 0.09,
245
  "learning_rate": 0.0002,
246
+ "loss": 0.8344,
247
  "step": 280
248
  },
249
  {
250
  "epoch": 0.09,
251
  "learning_rate": 0.0002,
252
+ "loss": 0.8338,
253
  "step": 290
254
  },
255
  {
256
  "epoch": 0.1,
257
  "learning_rate": 0.0002,
258
+ "loss": 0.7981,
259
  "step": 300
260
  },
261
  {
262
  "epoch": 0.1,
263
  "learning_rate": 0.0002,
264
+ "loss": 0.781,
265
  "step": 310
266
  },
267
  {
268
  "epoch": 0.1,
269
  "learning_rate": 0.0002,
270
+ "loss": 0.7717,
271
  "step": 320
272
  },
273
  {
274
  "epoch": 0.1,
275
  "learning_rate": 0.0002,
276
+ "loss": 0.767,
277
  "step": 330
278
  },
279
  {
280
  "epoch": 0.11,
281
  "learning_rate": 0.0002,
282
+ "loss": 0.7925,
283
  "step": 340
284
  },
285
  {
286
  "epoch": 0.11,
287
  "learning_rate": 0.0002,
288
+ "loss": 0.8226,
289
  "step": 350
290
  },
291
  {
292
  "epoch": 0.11,
293
  "learning_rate": 0.0002,
294
+ "loss": 0.7912,
295
  "step": 360
296
  },
297
  {
298
  "epoch": 0.12,
299
  "learning_rate": 0.0002,
300
+ "loss": 0.8093,
301
  "step": 370
302
  },
303
  {
304
  "epoch": 0.12,
305
  "learning_rate": 0.0002,
306
+ "loss": 0.7648,
307
  "step": 380
308
  },
309
  {
310
  "epoch": 0.12,
311
  "learning_rate": 0.0002,
312
+ "loss": 0.7866,
313
  "step": 390
314
  },
315
  {
316
  "epoch": 0.13,
317
  "learning_rate": 0.0002,
318
+ "loss": 0.7976,
319
  "step": 400
320
  },
321
  {
322
  "epoch": 0.13,
323
+ "eval_loss": 0.7656086683273315,
324
+ "eval_runtime": 110.9802,
325
+ "eval_samples_per_second": 9.011,
326
+ "eval_steps_per_second": 4.505,
327
  "step": 400
328
  },
329
  {
330
  "epoch": 0.13,
331
+ "mmlu_eval_accuracy": 0.47124130233512024,
332
  "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
333
  "mmlu_eval_accuracy_anatomy": 0.6428571428571429,
334
  "mmlu_eval_accuracy_astronomy": 0.4375,
335
  "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
336
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4482758620689655,
337
  "mmlu_eval_accuracy_college_biology": 0.4375,
338
  "mmlu_eval_accuracy_college_chemistry": 0.125,
339
  "mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
340
  "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
341
  "mmlu_eval_accuracy_college_medicine": 0.45454545454545453,
342
+ "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
343
+ "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
344
  "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
345
  "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
346
  "mmlu_eval_accuracy_electrical_engineering": 0.25,
347
+ "mmlu_eval_accuracy_elementary_mathematics": 0.2926829268292683,
348
  "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
349
+ "mmlu_eval_accuracy_global_facts": 0.4,
350
  "mmlu_eval_accuracy_high_school_biology": 0.40625,
351
  "mmlu_eval_accuracy_high_school_chemistry": 0.22727272727272727,
352
  "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
353
+ "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
354
  "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
355
  "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
356
  "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
357
  "mmlu_eval_accuracy_high_school_mathematics": 0.2413793103448276,
358
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.34615384615384615,
359
  "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
360
+ "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
361
+ "mmlu_eval_accuracy_high_school_statistics": 0.30434782608695654,
362
+ "mmlu_eval_accuracy_high_school_us_history": 0.6363636363636364,
363
  "mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
364
  "mmlu_eval_accuracy_human_aging": 0.6956521739130435,
365
  "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
366
  "mmlu_eval_accuracy_international_law": 0.8461538461538461,
367
+ "mmlu_eval_accuracy_jurisprudence": 0.36363636363636365,
368
  "mmlu_eval_accuracy_logical_fallacies": 0.6111111111111112,
369
  "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
370
  "mmlu_eval_accuracy_management": 0.6363636363636364,
371
  "mmlu_eval_accuracy_marketing": 0.84,
372
  "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
373
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
374
  "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
375
+ "mmlu_eval_accuracy_moral_scenarios": 0.25,
376
+ "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
377
  "mmlu_eval_accuracy_philosophy": 0.5,
378
+ "mmlu_eval_accuracy_prehistory": 0.4857142857142857,
379
  "mmlu_eval_accuracy_professional_accounting": 0.3870967741935484,
380
  "mmlu_eval_accuracy_professional_law": 0.3058823529411765,
381
  "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
382
  "mmlu_eval_accuracy_professional_psychology": 0.5217391304347826,
383
  "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
384
+ "mmlu_eval_accuracy_security_studies": 0.5185185185185185,
385
  "mmlu_eval_accuracy_sociology": 0.6818181818181818,
386
  "mmlu_eval_accuracy_us_foreign_policy": 0.5454545454545454,
387
  "mmlu_eval_accuracy_virology": 0.5,
388
  "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
389
+ "mmlu_loss": 1.4339068503199297,
390
  "step": 400
391
  },
392
  {
393
  "epoch": 0.13,
394
  "learning_rate": 0.0002,
395
+ "loss": 0.8182,
396
  "step": 410
397
  },
398
  {
399
  "epoch": 0.13,
400
  "learning_rate": 0.0002,
401
+ "loss": 0.8438,
402
  "step": 420
403
  },
404
  {
405
  "epoch": 0.14,
406
  "learning_rate": 0.0002,
407
+ "loss": 0.8184,
408
  "step": 430
409
  },
410
  {
411
  "epoch": 0.14,
412
  "learning_rate": 0.0002,
413
+ "loss": 0.8202,
414
  "step": 440
415
  },
416
  {
417
  "epoch": 0.14,
418
  "learning_rate": 0.0002,
419
+ "loss": 0.8264,
420
  "step": 450
421
  },
422
  {
423
  "epoch": 0.15,
424
  "learning_rate": 0.0002,
425
+ "loss": 0.8384,
426
  "step": 460
427
  },
428
  {
429
  "epoch": 0.15,
430
  "learning_rate": 0.0002,
431
+ "loss": 0.8372,
432
  "step": 470
433
  },
434
  {
435
  "epoch": 0.15,
436
  "learning_rate": 0.0002,
437
+ "loss": 0.8072,
438
  "step": 480
439
  },
440
  {
441
  "epoch": 0.16,
442
  "learning_rate": 0.0002,
443
+ "loss": 0.8214,
444
  "step": 490
445
  },
446
  {
447
  "epoch": 0.16,
448
  "learning_rate": 0.0002,
449
+ "loss": 0.814,
450
  "step": 500
451
  },
452
  {
453
  "epoch": 0.16,
454
  "learning_rate": 0.0002,
455
+ "loss": 0.847,
456
  "step": 510
457
  },
458
  {
459
  "epoch": 0.16,
460
  "learning_rate": 0.0002,
461
+ "loss": 0.8444,
462
  "step": 520
463
  },
464
  {
465
  "epoch": 0.17,
466
  "learning_rate": 0.0002,
467
+ "loss": 0.8096,
468
  "step": 530
469
  },
470
  {
471
  "epoch": 0.17,
472
  "learning_rate": 0.0002,
473
+ "loss": 0.8496,
474
  "step": 540
475
  },
476
  {
477
  "epoch": 0.17,
478
  "learning_rate": 0.0002,
479
+ "loss": 0.7729,
480
  "step": 550
481
  },
482
  {
 
488
  {
489
  "epoch": 0.18,
490
  "learning_rate": 0.0002,
491
+ "loss": 0.7478,
492
  "step": 570
493
  },
494
  {
495
  "epoch": 0.18,
496
  "learning_rate": 0.0002,
497
+ "loss": 0.7953,
498
  "step": 580
499
  },
500
  {
501
  "epoch": 0.19,
502
  "learning_rate": 0.0002,
503
+ "loss": 0.7363,
504
  "step": 590
505
  },
506
  {
 
511
  },
512
  {
513
  "epoch": 0.19,
514
+ "eval_loss": 0.7616064548492432,
515
+ "eval_runtime": 110.9404,
516
+ "eval_samples_per_second": 9.014,
517
+ "eval_steps_per_second": 4.507,
518
  "step": 600
519
  },
520
  {
521
  "epoch": 0.19,
522
+ "mmlu_eval_accuracy": 0.4749850916074463,
523
  "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
524
  "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
525
  "mmlu_eval_accuracy_astronomy": 0.4375,
526
  "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
527
+ "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
528
  "mmlu_eval_accuracy_college_biology": 0.4375,
529
  "mmlu_eval_accuracy_college_chemistry": 0.25,
530
+ "mmlu_eval_accuracy_college_computer_science": 0.18181818181818182,
531
  "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
532
+ "mmlu_eval_accuracy_college_medicine": 0.2727272727272727,
533
  "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
534
  "mmlu_eval_accuracy_computer_security": 0.36363636363636365,
535
  "mmlu_eval_accuracy_conceptual_physics": 0.4230769230769231,
536
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
537
  "mmlu_eval_accuracy_electrical_engineering": 0.25,
538
  "mmlu_eval_accuracy_elementary_mathematics": 0.2682926829268293,
539
  "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
540
+ "mmlu_eval_accuracy_global_facts": 0.3,
541
  "mmlu_eval_accuracy_high_school_biology": 0.40625,
542
+ "mmlu_eval_accuracy_high_school_chemistry": 0.36363636363636365,
543
  "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
544
  "mmlu_eval_accuracy_high_school_european_history": 0.6111111111111112,
545
  "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
546
  "mmlu_eval_accuracy_high_school_government_and_politics": 0.47619047619047616,
547
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
548
+ "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
549
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.3076923076923077,
550
  "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
551
  "mmlu_eval_accuracy_high_school_psychology": 0.8666666666666667,
552
+ "mmlu_eval_accuracy_high_school_statistics": 0.34782608695652173,
553
  "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
554
+ "mmlu_eval_accuracy_high_school_world_history": 0.7692307692307693,
555
  "mmlu_eval_accuracy_human_aging": 0.6521739130434783,
556
  "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
557
  "mmlu_eval_accuracy_international_law": 0.8461538461538461,
 
559
  "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
560
  "mmlu_eval_accuracy_machine_learning": 0.18181818181818182,
561
  "mmlu_eval_accuracy_management": 0.6363636363636364,
562
+ "mmlu_eval_accuracy_marketing": 0.84,
563
  "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
564
+ "mmlu_eval_accuracy_miscellaneous": 0.6511627906976745,
565
  "mmlu_eval_accuracy_moral_disputes": 0.4473684210526316,
566
+ "mmlu_eval_accuracy_moral_scenarios": 0.26,
567
  "mmlu_eval_accuracy_nutrition": 0.6060606060606061,
568
+ "mmlu_eval_accuracy_philosophy": 0.5294117647058824,
569
+ "mmlu_eval_accuracy_prehistory": 0.5142857142857142,
570
+ "mmlu_eval_accuracy_professional_accounting": 0.41935483870967744,
571
+ "mmlu_eval_accuracy_professional_law": 0.3,
572
  "mmlu_eval_accuracy_professional_medicine": 0.5483870967741935,
573
  "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
574
  "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
 
577
  "mmlu_eval_accuracy_us_foreign_policy": 0.6363636363636364,
578
  "mmlu_eval_accuracy_virology": 0.5,
579
  "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
580
+ "mmlu_loss": 1.5647042619341658,
581
  "step": 600
582
  },
583
  {
584
  "epoch": 0.19,
585
  "learning_rate": 0.0002,
586
+ "loss": 0.7936,
587
  "step": 610
588
  },
589
  {
590
  "epoch": 0.2,
591
  "learning_rate": 0.0002,
592
+ "loss": 0.7319,
593
  "step": 620
594
  },
595
  {
596
  "epoch": 0.2,
597
  "learning_rate": 0.0002,
598
+ "loss": 0.79,
599
  "step": 630
600
  },
601
  {
602
  "epoch": 0.2,
603
  "learning_rate": 0.0002,
604
+ "loss": 0.7806,
605
  "step": 640
606
  },
607
  {
608
  "epoch": 0.21,
609
  "learning_rate": 0.0002,
610
+ "loss": 0.8833,
611
  "step": 650
612
  },
613
  {
614
  "epoch": 0.21,
615
  "learning_rate": 0.0002,
616
+ "loss": 0.7711,
617
  "step": 660
618
  },
619
  {
620
  "epoch": 0.21,
621
  "learning_rate": 0.0002,
622
+ "loss": 0.8242,
623
  "step": 670
624
  },
625
  {
626
  "epoch": 0.22,
627
  "learning_rate": 0.0002,
628
+ "loss": 0.7948,
629
  "step": 680
630
  },
631
  {
632
  "epoch": 0.22,
633
  "learning_rate": 0.0002,
634
+ "loss": 0.7417,
635
  "step": 690
636
  },
637
  {
 
643
  {
644
  "epoch": 0.22,
645
  "learning_rate": 0.0002,
646
+ "loss": 0.8137,
647
  "step": 710
648
  },
649
  {
650
  "epoch": 0.23,
651
  "learning_rate": 0.0002,
652
+ "loss": 0.8568,
653
  "step": 720
654
  },
655
  {
656
  "epoch": 0.23,
657
  "learning_rate": 0.0002,
658
+ "loss": 0.802,
659
  "step": 730
660
  },
661
  {
662
  "epoch": 0.23,
663
  "learning_rate": 0.0002,
664
+ "loss": 0.8202,
665
  "step": 740
666
  },
667
  {
668
  "epoch": 0.24,
669
  "learning_rate": 0.0002,
670
+ "loss": 0.8077,
671
  "step": 750
672
  },
673
  {
674
  "epoch": 0.24,
675
  "learning_rate": 0.0002,
676
+ "loss": 0.814,
677
  "step": 760
678
  },
679
  {
680
  "epoch": 0.24,
681
  "learning_rate": 0.0002,
682
+ "loss": 0.7971,
683
  "step": 770
684
  },
685
  {
686
  "epoch": 0.25,
687
  "learning_rate": 0.0002,
688
+ "loss": 0.798,
689
  "step": 780
690
  },
691
  {
692
  "epoch": 0.25,
693
  "learning_rate": 0.0002,
694
+ "loss": 0.7806,
695
  "step": 790
696
  },
697
  {
698
  "epoch": 0.25,
699
  "learning_rate": 0.0002,
700
+ "loss": 0.8042,
701
  "step": 800
702
  },
703
  {
704
  "epoch": 0.25,
705
+ "eval_loss": 0.7563537359237671,
706
+ "eval_runtime": 111.023,
707
+ "eval_samples_per_second": 9.007,
708
+ "eval_steps_per_second": 4.504,
709
  "step": 800
710
  },
711
  {
712
  "epoch": 0.25,
713
+ "mmlu_eval_accuracy": 0.4796267144005645,
714
  "mmlu_eval_accuracy_abstract_algebra": 0.2727272727272727,
715
  "mmlu_eval_accuracy_anatomy": 0.7142857142857143,
716
  "mmlu_eval_accuracy_astronomy": 0.4375,
717
  "mmlu_eval_accuracy_business_ethics": 0.5454545454545454,
718
  "mmlu_eval_accuracy_clinical_knowledge": 0.4827586206896552,
719
+ "mmlu_eval_accuracy_college_biology": 0.4375,
720
  "mmlu_eval_accuracy_college_chemistry": 0.125,
721
  "mmlu_eval_accuracy_college_computer_science": 0.2727272727272727,
722
  "mmlu_eval_accuracy_college_mathematics": 0.2727272727272727,
723
+ "mmlu_eval_accuracy_college_medicine": 0.36363636363636365,
724
  "mmlu_eval_accuracy_college_physics": 0.36363636363636365,
725
  "mmlu_eval_accuracy_computer_security": 0.45454545454545453,
726
  "mmlu_eval_accuracy_conceptual_physics": 0.34615384615384615,
727
+ "mmlu_eval_accuracy_econometrics": 0.16666666666666666,
728
  "mmlu_eval_accuracy_electrical_engineering": 0.25,
729
  "mmlu_eval_accuracy_elementary_mathematics": 0.2926829268292683,
730
  "mmlu_eval_accuracy_formal_logic": 0.07142857142857142,
 
732
  "mmlu_eval_accuracy_high_school_biology": 0.375,
733
  "mmlu_eval_accuracy_high_school_chemistry": 0.2727272727272727,
734
  "mmlu_eval_accuracy_high_school_computer_science": 0.5555555555555556,
735
+ "mmlu_eval_accuracy_high_school_european_history": 0.6666666666666666,
736
  "mmlu_eval_accuracy_high_school_geography": 0.9090909090909091,
737
+ "mmlu_eval_accuracy_high_school_government_and_politics": 0.5238095238095238,
738
+ "mmlu_eval_accuracy_high_school_macroeconomics": 0.46511627906976744,
739
+ "mmlu_eval_accuracy_high_school_mathematics": 0.27586206896551724,
740
+ "mmlu_eval_accuracy_high_school_microeconomics": 0.38461538461538464,
741
  "mmlu_eval_accuracy_high_school_physics": 0.11764705882352941,
742
  "mmlu_eval_accuracy_high_school_psychology": 0.8833333333333333,
743
+ "mmlu_eval_accuracy_high_school_statistics": 0.2608695652173913,
744
  "mmlu_eval_accuracy_high_school_us_history": 0.5909090909090909,
745
  "mmlu_eval_accuracy_high_school_world_history": 0.7307692307692307,
746
+ "mmlu_eval_accuracy_human_aging": 0.7391304347826086,
747
  "mmlu_eval_accuracy_human_sexuality": 0.3333333333333333,
748
  "mmlu_eval_accuracy_international_law": 0.8461538461538461,
749
+ "mmlu_eval_accuracy_jurisprudence": 0.45454545454545453,
750
  "mmlu_eval_accuracy_logical_fallacies": 0.6666666666666666,
751
+ "mmlu_eval_accuracy_machine_learning": 0.2727272727272727,
752
  "mmlu_eval_accuracy_management": 0.6363636363636364,
753
  "mmlu_eval_accuracy_marketing": 0.84,
754
  "mmlu_eval_accuracy_medical_genetics": 0.9090909090909091,
755
+ "mmlu_eval_accuracy_miscellaneous": 0.6744186046511628,
756
+ "mmlu_eval_accuracy_moral_disputes": 0.5,
757
+ "mmlu_eval_accuracy_moral_scenarios": 0.23,
758
  "mmlu_eval_accuracy_nutrition": 0.6363636363636364,
759
  "mmlu_eval_accuracy_philosophy": 0.4411764705882353,
760
+ "mmlu_eval_accuracy_prehistory": 0.45714285714285713,
761
+ "mmlu_eval_accuracy_professional_accounting": 0.3548387096774194,
762
+ "mmlu_eval_accuracy_professional_law": 0.3,
763
  "mmlu_eval_accuracy_professional_medicine": 0.5806451612903226,
764
+ "mmlu_eval_accuracy_professional_psychology": 0.5072463768115942,
765
  "mmlu_eval_accuracy_public_relations": 0.6666666666666666,
766
  "mmlu_eval_accuracy_security_studies": 0.4074074074074074,
767
  "mmlu_eval_accuracy_sociology": 0.6363636363636364,
768
  "mmlu_eval_accuracy_us_foreign_policy": 0.7272727272727273,
769
  "mmlu_eval_accuracy_virology": 0.5555555555555556,
770
+ "mmlu_eval_accuracy_world_religions": 0.6842105263157895,
771
+ "mmlu_loss": 1.4866046660796157,
772
  "step": 800
773
  }
774
  ],
checkpoint-800/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d55ea0037adc27e709c2f87c727ac3fd55b607c5afed1c76d4262111f780937e
3
  size 5819
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:277dab2895cbf8d8b713e5f0339719c5d50d193c9afc1399ad953aa4910a2fab
3
  size 5819