pakphum commited on
Commit
c1ae72d
·
verified ·
1 Parent(s): 6d55ba5

End of training

Browse files
README.md CHANGED
@@ -4,6 +4,7 @@ license: llama3.2
4
  base_model: meta-llama/Llama-3.2-3B-Instruct
5
  tags:
6
  - llama-factory
 
7
  - generated_from_trainer
8
  model-index:
9
  - name: qlora-llama3b-all
@@ -15,7 +16,7 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  # qlora-llama3b-all
17
 
18
- This model is a fine-tuned version of [meta-llama/Llama-3.2-3B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
  - Loss: 0.5163
21
 
 
4
  base_model: meta-llama/Llama-3.2-3B-Instruct
5
  tags:
6
  - llama-factory
7
+ - lora
8
  - generated_from_trainer
9
  model-index:
10
  - name: qlora-llama3b-all
 
16
 
17
  # qlora-llama3b-all
18
 
19
+ This model is a fine-tuned version of [meta-llama/Llama-3.2-3B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct) on the train-all dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 0.5163
22
 
all_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.444444444444445,
3
+ "eval_loss": 0.5163235664367676,
4
+ "eval_runtime": 8.3814,
5
+ "eval_samples_per_second": 11.931,
6
+ "eval_steps_per_second": 11.931,
7
+ "total_flos": 1.496402435211264e+16,
8
+ "train_loss": 0.3140584453344345,
9
+ "train_runtime": 1600.6227,
10
+ "train_samples_per_second": 2.499,
11
+ "train_steps_per_second": 0.312
12
+ }
eval_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.444444444444445,
3
+ "eval_loss": 0.5163235664367676,
4
+ "eval_runtime": 8.3814,
5
+ "eval_samples_per_second": 11.931,
6
+ "eval_steps_per_second": 11.931
7
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.444444444444445,
3
+ "total_flos": 1.496402435211264e+16,
4
+ "train_loss": 0.3140584453344345,
5
+ "train_runtime": 1600.6227,
6
+ "train_samples_per_second": 2.499,
7
+ "train_steps_per_second": 0.312
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,792 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 4.444444444444445,
5
+ "eval_steps": 10,
6
+ "global_step": 500,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.08888888888888889,
13
+ "grad_norm": 3.339972496032715,
14
+ "learning_rate": 4e-05,
15
+ "loss": 2.2556,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.08888888888888889,
20
+ "eval_loss": 1.899280071258545,
21
+ "eval_runtime": 8.2861,
22
+ "eval_samples_per_second": 12.068,
23
+ "eval_steps_per_second": 12.068,
24
+ "step": 10
25
+ },
26
+ {
27
+ "epoch": 0.17777777777777778,
28
+ "grad_norm": 4.27776575088501,
29
+ "learning_rate": 8e-05,
30
+ "loss": 1.5026,
31
+ "step": 20
32
+ },
33
+ {
34
+ "epoch": 0.17777777777777778,
35
+ "eval_loss": 0.8436458706855774,
36
+ "eval_runtime": 8.2809,
37
+ "eval_samples_per_second": 12.076,
38
+ "eval_steps_per_second": 12.076,
39
+ "step": 20
40
+ },
41
+ {
42
+ "epoch": 0.26666666666666666,
43
+ "grad_norm": 2.9827308654785156,
44
+ "learning_rate": 0.00012,
45
+ "loss": 0.8291,
46
+ "step": 30
47
+ },
48
+ {
49
+ "epoch": 0.26666666666666666,
50
+ "eval_loss": 0.6633767485618591,
51
+ "eval_runtime": 8.2735,
52
+ "eval_samples_per_second": 12.087,
53
+ "eval_steps_per_second": 12.087,
54
+ "step": 30
55
+ },
56
+ {
57
+ "epoch": 0.35555555555555557,
58
+ "grad_norm": 2.1213743686676025,
59
+ "learning_rate": 0.00016,
60
+ "loss": 0.6688,
61
+ "step": 40
62
+ },
63
+ {
64
+ "epoch": 0.35555555555555557,
65
+ "eval_loss": 0.5723012685775757,
66
+ "eval_runtime": 8.273,
67
+ "eval_samples_per_second": 12.087,
68
+ "eval_steps_per_second": 12.087,
69
+ "step": 40
70
+ },
71
+ {
72
+ "epoch": 0.4444444444444444,
73
+ "grad_norm": 2.0819756984710693,
74
+ "learning_rate": 0.0002,
75
+ "loss": 0.6339,
76
+ "step": 50
77
+ },
78
+ {
79
+ "epoch": 0.4444444444444444,
80
+ "eval_loss": 0.5510777831077576,
81
+ "eval_runtime": 8.2675,
82
+ "eval_samples_per_second": 12.096,
83
+ "eval_steps_per_second": 12.096,
84
+ "step": 50
85
+ },
86
+ {
87
+ "epoch": 0.5333333333333333,
88
+ "grad_norm": 1.9045137166976929,
89
+ "learning_rate": 0.00019975640502598244,
90
+ "loss": 0.5258,
91
+ "step": 60
92
+ },
93
+ {
94
+ "epoch": 0.5333333333333333,
95
+ "eval_loss": 0.4759778082370758,
96
+ "eval_runtime": 8.2161,
97
+ "eval_samples_per_second": 12.171,
98
+ "eval_steps_per_second": 12.171,
99
+ "step": 60
100
+ },
101
+ {
102
+ "epoch": 0.6222222222222222,
103
+ "grad_norm": 1.4711352586746216,
104
+ "learning_rate": 0.00019902680687415705,
105
+ "loss": 0.4825,
106
+ "step": 70
107
+ },
108
+ {
109
+ "epoch": 0.6222222222222222,
110
+ "eval_loss": 0.4696222245693207,
111
+ "eval_runtime": 8.0384,
112
+ "eval_samples_per_second": 12.44,
113
+ "eval_steps_per_second": 12.44,
114
+ "step": 70
115
+ },
116
+ {
117
+ "epoch": 0.7111111111111111,
118
+ "grad_norm": 1.3467007875442505,
119
+ "learning_rate": 0.00019781476007338058,
120
+ "loss": 0.5488,
121
+ "step": 80
122
+ },
123
+ {
124
+ "epoch": 0.7111111111111111,
125
+ "eval_loss": 0.4698783755302429,
126
+ "eval_runtime": 8.0224,
127
+ "eval_samples_per_second": 12.465,
128
+ "eval_steps_per_second": 12.465,
129
+ "step": 80
130
+ },
131
+ {
132
+ "epoch": 0.8,
133
+ "grad_norm": 1.3427356481552124,
134
+ "learning_rate": 0.0001961261695938319,
135
+ "loss": 0.4231,
136
+ "step": 90
137
+ },
138
+ {
139
+ "epoch": 0.8,
140
+ "eval_loss": 0.45969489216804504,
141
+ "eval_runtime": 8.0315,
142
+ "eval_samples_per_second": 12.451,
143
+ "eval_steps_per_second": 12.451,
144
+ "step": 90
145
+ },
146
+ {
147
+ "epoch": 0.8888888888888888,
148
+ "grad_norm": 1.3022139072418213,
149
+ "learning_rate": 0.00019396926207859084,
150
+ "loss": 0.4558,
151
+ "step": 100
152
+ },
153
+ {
154
+ "epoch": 0.8888888888888888,
155
+ "eval_loss": 0.4219018220901489,
156
+ "eval_runtime": 8.0414,
157
+ "eval_samples_per_second": 12.436,
158
+ "eval_steps_per_second": 12.436,
159
+ "step": 100
160
+ },
161
+ {
162
+ "epoch": 0.9777777777777777,
163
+ "grad_norm": 1.149550199508667,
164
+ "learning_rate": 0.0001913545457642601,
165
+ "loss": 0.4588,
166
+ "step": 110
167
+ },
168
+ {
169
+ "epoch": 0.9777777777777777,
170
+ "eval_loss": 0.41754281520843506,
171
+ "eval_runtime": 8.0271,
172
+ "eval_samples_per_second": 12.458,
173
+ "eval_steps_per_second": 12.458,
174
+ "step": 110
175
+ },
176
+ {
177
+ "epoch": 1.0666666666666667,
178
+ "grad_norm": 1.335131049156189,
179
+ "learning_rate": 0.00018829475928589271,
180
+ "loss": 0.4592,
181
+ "step": 120
182
+ },
183
+ {
184
+ "epoch": 1.0666666666666667,
185
+ "eval_loss": 0.4288429617881775,
186
+ "eval_runtime": 8.0429,
187
+ "eval_samples_per_second": 12.433,
188
+ "eval_steps_per_second": 12.433,
189
+ "step": 120
190
+ },
191
+ {
192
+ "epoch": 1.1555555555555554,
193
+ "grad_norm": 1.0977760553359985,
194
+ "learning_rate": 0.0001848048096156426,
195
+ "loss": 0.2996,
196
+ "step": 130
197
+ },
198
+ {
199
+ "epoch": 1.1555555555555554,
200
+ "eval_loss": 0.3918496072292328,
201
+ "eval_runtime": 8.0388,
202
+ "eval_samples_per_second": 12.44,
203
+ "eval_steps_per_second": 12.44,
204
+ "step": 130
205
+ },
206
+ {
207
+ "epoch": 1.2444444444444445,
208
+ "grad_norm": 1.511498212814331,
209
+ "learning_rate": 0.00018090169943749476,
210
+ "loss": 0.3269,
211
+ "step": 140
212
+ },
213
+ {
214
+ "epoch": 1.2444444444444445,
215
+ "eval_loss": 0.41831883788108826,
216
+ "eval_runtime": 8.0461,
217
+ "eval_samples_per_second": 12.428,
218
+ "eval_steps_per_second": 12.428,
219
+ "step": 140
220
+ },
221
+ {
222
+ "epoch": 1.3333333333333333,
223
+ "grad_norm": 1.6732897758483887,
224
+ "learning_rate": 0.0001766044443118978,
225
+ "loss": 0.347,
226
+ "step": 150
227
+ },
228
+ {
229
+ "epoch": 1.3333333333333333,
230
+ "eval_loss": 0.43138808012008667,
231
+ "eval_runtime": 8.0313,
232
+ "eval_samples_per_second": 12.451,
233
+ "eval_steps_per_second": 12.451,
234
+ "step": 150
235
+ },
236
+ {
237
+ "epoch": 1.4222222222222223,
238
+ "grad_norm": 1.5496058464050293,
239
+ "learning_rate": 0.0001719339800338651,
240
+ "loss": 0.3251,
241
+ "step": 160
242
+ },
243
+ {
244
+ "epoch": 1.4222222222222223,
245
+ "eval_loss": 0.3889056444168091,
246
+ "eval_runtime": 8.0322,
247
+ "eval_samples_per_second": 12.45,
248
+ "eval_steps_per_second": 12.45,
249
+ "step": 160
250
+ },
251
+ {
252
+ "epoch": 1.511111111111111,
253
+ "grad_norm": 1.3534138202667236,
254
+ "learning_rate": 0.00016691306063588583,
255
+ "loss": 0.3035,
256
+ "step": 170
257
+ },
258
+ {
259
+ "epoch": 1.511111111111111,
260
+ "eval_loss": 0.37887275218963623,
261
+ "eval_runtime": 8.0354,
262
+ "eval_samples_per_second": 12.445,
263
+ "eval_steps_per_second": 12.445,
264
+ "step": 170
265
+ },
266
+ {
267
+ "epoch": 1.6,
268
+ "grad_norm": 1.4648184776306152,
269
+ "learning_rate": 0.0001615661475325658,
270
+ "loss": 0.3141,
271
+ "step": 180
272
+ },
273
+ {
274
+ "epoch": 1.6,
275
+ "eval_loss": 0.38691258430480957,
276
+ "eval_runtime": 8.0374,
277
+ "eval_samples_per_second": 12.442,
278
+ "eval_steps_per_second": 12.442,
279
+ "step": 180
280
+ },
281
+ {
282
+ "epoch": 1.6888888888888889,
283
+ "grad_norm": 1.0812690258026123,
284
+ "learning_rate": 0.0001559192903470747,
285
+ "loss": 0.2878,
286
+ "step": 190
287
+ },
288
+ {
289
+ "epoch": 1.6888888888888889,
290
+ "eval_loss": 0.3909819722175598,
291
+ "eval_runtime": 8.0411,
292
+ "eval_samples_per_second": 12.436,
293
+ "eval_steps_per_second": 12.436,
294
+ "step": 190
295
+ },
296
+ {
297
+ "epoch": 1.7777777777777777,
298
+ "grad_norm": 1.936132788658142,
299
+ "learning_rate": 0.00015000000000000001,
300
+ "loss": 0.3063,
301
+ "step": 200
302
+ },
303
+ {
304
+ "epoch": 1.7777777777777777,
305
+ "eval_loss": 0.39576366543769836,
306
+ "eval_runtime": 8.0433,
307
+ "eval_samples_per_second": 12.433,
308
+ "eval_steps_per_second": 12.433,
309
+ "step": 200
310
+ },
311
+ {
312
+ "epoch": 1.8666666666666667,
313
+ "grad_norm": 1.551062822341919,
314
+ "learning_rate": 0.00014383711467890774,
315
+ "loss": 0.2748,
316
+ "step": 210
317
+ },
318
+ {
319
+ "epoch": 1.8666666666666667,
320
+ "eval_loss": 0.3819361627101898,
321
+ "eval_runtime": 8.0342,
322
+ "eval_samples_per_second": 12.447,
323
+ "eval_steps_per_second": 12.447,
324
+ "step": 210
325
+ },
326
+ {
327
+ "epoch": 1.9555555555555557,
328
+ "grad_norm": 1.2996041774749756,
329
+ "learning_rate": 0.00013746065934159123,
330
+ "loss": 0.2725,
331
+ "step": 220
332
+ },
333
+ {
334
+ "epoch": 1.9555555555555557,
335
+ "eval_loss": 0.4039897620677948,
336
+ "eval_runtime": 8.024,
337
+ "eval_samples_per_second": 12.463,
338
+ "eval_steps_per_second": 12.463,
339
+ "step": 220
340
+ },
341
+ {
342
+ "epoch": 2.0444444444444443,
343
+ "grad_norm": 1.0756527185440063,
344
+ "learning_rate": 0.00013090169943749476,
345
+ "loss": 0.2897,
346
+ "step": 230
347
+ },
348
+ {
349
+ "epoch": 2.0444444444444443,
350
+ "eval_loss": 0.3928260803222656,
351
+ "eval_runtime": 8.0625,
352
+ "eval_samples_per_second": 12.403,
353
+ "eval_steps_per_second": 12.403,
354
+ "step": 230
355
+ },
356
+ {
357
+ "epoch": 2.1333333333333333,
358
+ "grad_norm": 0.8828668594360352,
359
+ "learning_rate": 0.00012419218955996676,
360
+ "loss": 0.1813,
361
+ "step": 240
362
+ },
363
+ {
364
+ "epoch": 2.1333333333333333,
365
+ "eval_loss": 0.4048071801662445,
366
+ "eval_runtime": 8.0422,
367
+ "eval_samples_per_second": 12.434,
368
+ "eval_steps_per_second": 12.434,
369
+ "step": 240
370
+ },
371
+ {
372
+ "epoch": 2.2222222222222223,
373
+ "grad_norm": 1.6169154644012451,
374
+ "learning_rate": 0.00011736481776669306,
375
+ "loss": 0.1965,
376
+ "step": 250
377
+ },
378
+ {
379
+ "epoch": 2.2222222222222223,
380
+ "eval_loss": 0.4035675525665283,
381
+ "eval_runtime": 8.0344,
382
+ "eval_samples_per_second": 12.447,
383
+ "eval_steps_per_second": 12.447,
384
+ "step": 250
385
+ },
386
+ {
387
+ "epoch": 2.311111111111111,
388
+ "grad_norm": 1.6981542110443115,
389
+ "learning_rate": 0.00011045284632676536,
390
+ "loss": 0.1751,
391
+ "step": 260
392
+ },
393
+ {
394
+ "epoch": 2.311111111111111,
395
+ "eval_loss": 0.4221409857273102,
396
+ "eval_runtime": 8.0296,
397
+ "eval_samples_per_second": 12.454,
398
+ "eval_steps_per_second": 12.454,
399
+ "step": 260
400
+ },
401
+ {
402
+ "epoch": 2.4,
403
+ "grad_norm": 1.5531835556030273,
404
+ "learning_rate": 0.00010348994967025012,
405
+ "loss": 0.1739,
406
+ "step": 270
407
+ },
408
+ {
409
+ "epoch": 2.4,
410
+ "eval_loss": 0.40371406078338623,
411
+ "eval_runtime": 8.017,
412
+ "eval_samples_per_second": 12.474,
413
+ "eval_steps_per_second": 12.474,
414
+ "step": 270
415
+ },
416
+ {
417
+ "epoch": 2.488888888888889,
418
+ "grad_norm": 1.6275184154510498,
419
+ "learning_rate": 9.651005032974994e-05,
420
+ "loss": 0.1629,
421
+ "step": 280
422
+ },
423
+ {
424
+ "epoch": 2.488888888888889,
425
+ "eval_loss": 0.41767382621765137,
426
+ "eval_runtime": 8.026,
427
+ "eval_samples_per_second": 12.46,
428
+ "eval_steps_per_second": 12.46,
429
+ "step": 280
430
+ },
431
+ {
432
+ "epoch": 2.5777777777777775,
433
+ "grad_norm": 1.6192083358764648,
434
+ "learning_rate": 8.954715367323468e-05,
435
+ "loss": 0.1919,
436
+ "step": 290
437
+ },
438
+ {
439
+ "epoch": 2.5777777777777775,
440
+ "eval_loss": 0.4002458155155182,
441
+ "eval_runtime": 8.0399,
442
+ "eval_samples_per_second": 12.438,
443
+ "eval_steps_per_second": 12.438,
444
+ "step": 290
445
+ },
446
+ {
447
+ "epoch": 2.6666666666666665,
448
+ "grad_norm": 1.4782391786575317,
449
+ "learning_rate": 8.263518223330697e-05,
450
+ "loss": 0.1804,
451
+ "step": 300
452
+ },
453
+ {
454
+ "epoch": 2.6666666666666665,
455
+ "eval_loss": 0.4098145365715027,
456
+ "eval_runtime": 8.0329,
457
+ "eval_samples_per_second": 12.449,
458
+ "eval_steps_per_second": 12.449,
459
+ "step": 300
460
+ },
461
+ {
462
+ "epoch": 2.7555555555555555,
463
+ "grad_norm": 1.2380214929580688,
464
+ "learning_rate": 7.580781044003324e-05,
465
+ "loss": 0.1569,
466
+ "step": 310
467
+ },
468
+ {
469
+ "epoch": 2.7555555555555555,
470
+ "eval_loss": 0.41249945759773254,
471
+ "eval_runtime": 8.0306,
472
+ "eval_samples_per_second": 12.452,
473
+ "eval_steps_per_second": 12.452,
474
+ "step": 310
475
+ },
476
+ {
477
+ "epoch": 2.8444444444444446,
478
+ "grad_norm": 1.0430532693862915,
479
+ "learning_rate": 6.909830056250527e-05,
480
+ "loss": 0.1914,
481
+ "step": 320
482
+ },
483
+ {
484
+ "epoch": 2.8444444444444446,
485
+ "eval_loss": 0.4051912724971771,
486
+ "eval_runtime": 8.0353,
487
+ "eval_samples_per_second": 12.445,
488
+ "eval_steps_per_second": 12.445,
489
+ "step": 320
490
+ },
491
+ {
492
+ "epoch": 2.9333333333333336,
493
+ "grad_norm": 0.8478929400444031,
494
+ "learning_rate": 6.25393406584088e-05,
495
+ "loss": 0.144,
496
+ "step": 330
497
+ },
498
+ {
499
+ "epoch": 2.9333333333333336,
500
+ "eval_loss": 0.4041104018688202,
501
+ "eval_runtime": 8.0292,
502
+ "eval_samples_per_second": 12.455,
503
+ "eval_steps_per_second": 12.455,
504
+ "step": 330
505
+ },
506
+ {
507
+ "epoch": 3.022222222222222,
508
+ "grad_norm": 0.7308769822120667,
509
+ "learning_rate": 5.616288532109225e-05,
510
+ "loss": 0.1738,
511
+ "step": 340
512
+ },
513
+ {
514
+ "epoch": 3.022222222222222,
515
+ "eval_loss": 0.42209112644195557,
516
+ "eval_runtime": 8.0367,
517
+ "eval_samples_per_second": 12.443,
518
+ "eval_steps_per_second": 12.443,
519
+ "step": 340
520
+ },
521
+ {
522
+ "epoch": 3.111111111111111,
523
+ "grad_norm": 0.47878319025039673,
524
+ "learning_rate": 5.000000000000002e-05,
525
+ "loss": 0.1087,
526
+ "step": 350
527
+ },
528
+ {
529
+ "epoch": 3.111111111111111,
530
+ "eval_loss": 0.42136698961257935,
531
+ "eval_runtime": 8.0346,
532
+ "eval_samples_per_second": 12.446,
533
+ "eval_steps_per_second": 12.446,
534
+ "step": 350
535
+ },
536
+ {
537
+ "epoch": 3.2,
538
+ "grad_norm": 0.8724251389503479,
539
+ "learning_rate": 4.4080709652925336e-05,
540
+ "loss": 0.0876,
541
+ "step": 360
542
+ },
543
+ {
544
+ "epoch": 3.2,
545
+ "eval_loss": 0.4379313588142395,
546
+ "eval_runtime": 8.025,
547
+ "eval_samples_per_second": 12.461,
548
+ "eval_steps_per_second": 12.461,
549
+ "step": 360
550
+ },
551
+ {
552
+ "epoch": 3.2888888888888888,
553
+ "grad_norm": 1.1260199546813965,
554
+ "learning_rate": 3.843385246743417e-05,
555
+ "loss": 0.0857,
556
+ "step": 370
557
+ },
558
+ {
559
+ "epoch": 3.2888888888888888,
560
+ "eval_loss": 0.46546536684036255,
561
+ "eval_runtime": 8.0261,
562
+ "eval_samples_per_second": 12.459,
563
+ "eval_steps_per_second": 12.459,
564
+ "step": 370
565
+ },
566
+ {
567
+ "epoch": 3.3777777777777778,
568
+ "grad_norm": 0.8650846481323242,
569
+ "learning_rate": 3.308693936411421e-05,
570
+ "loss": 0.0978,
571
+ "step": 380
572
+ },
573
+ {
574
+ "epoch": 3.3777777777777778,
575
+ "eval_loss": 0.4744359850883484,
576
+ "eval_runtime": 8.0343,
577
+ "eval_samples_per_second": 12.447,
578
+ "eval_steps_per_second": 12.447,
579
+ "step": 380
580
+ },
581
+ {
582
+ "epoch": 3.466666666666667,
583
+ "grad_norm": 1.4763766527175903,
584
+ "learning_rate": 2.8066019966134904e-05,
585
+ "loss": 0.0746,
586
+ "step": 390
587
+ },
588
+ {
589
+ "epoch": 3.466666666666667,
590
+ "eval_loss": 0.4815245568752289,
591
+ "eval_runtime": 8.0329,
592
+ "eval_samples_per_second": 12.449,
593
+ "eval_steps_per_second": 12.449,
594
+ "step": 390
595
+ },
596
+ {
597
+ "epoch": 3.5555555555555554,
598
+ "grad_norm": 0.7600903511047363,
599
+ "learning_rate": 2.339555568810221e-05,
600
+ "loss": 0.0897,
601
+ "step": 400
602
+ },
603
+ {
604
+ "epoch": 3.5555555555555554,
605
+ "eval_loss": 0.4889250695705414,
606
+ "eval_runtime": 8.0394,
607
+ "eval_samples_per_second": 12.439,
608
+ "eval_steps_per_second": 12.439,
609
+ "step": 400
610
+ },
611
+ {
612
+ "epoch": 3.6444444444444444,
613
+ "grad_norm": 0.7227004766464233,
614
+ "learning_rate": 1.9098300562505266e-05,
615
+ "loss": 0.0645,
616
+ "step": 410
617
+ },
618
+ {
619
+ "epoch": 3.6444444444444444,
620
+ "eval_loss": 0.4995201826095581,
621
+ "eval_runtime": 8.0321,
622
+ "eval_samples_per_second": 12.45,
623
+ "eval_steps_per_second": 12.45,
624
+ "step": 410
625
+ },
626
+ {
627
+ "epoch": 3.7333333333333334,
628
+ "grad_norm": 1.2814204692840576,
629
+ "learning_rate": 1.5195190384357404e-05,
630
+ "loss": 0.0649,
631
+ "step": 420
632
+ },
633
+ {
634
+ "epoch": 3.7333333333333334,
635
+ "eval_loss": 0.5078675746917725,
636
+ "eval_runtime": 8.0419,
637
+ "eval_samples_per_second": 12.435,
638
+ "eval_steps_per_second": 12.435,
639
+ "step": 420
640
+ },
641
+ {
642
+ "epoch": 3.822222222222222,
643
+ "grad_norm": 1.3571559190750122,
644
+ "learning_rate": 1.1705240714107302e-05,
645
+ "loss": 0.0896,
646
+ "step": 430
647
+ },
648
+ {
649
+ "epoch": 3.822222222222222,
650
+ "eval_loss": 0.5097964406013489,
651
+ "eval_runtime": 8.034,
652
+ "eval_samples_per_second": 12.447,
653
+ "eval_steps_per_second": 12.447,
654
+ "step": 430
655
+ },
656
+ {
657
+ "epoch": 3.911111111111111,
658
+ "grad_norm": 1.125535488128662,
659
+ "learning_rate": 8.645454235739903e-06,
660
+ "loss": 0.0788,
661
+ "step": 440
662
+ },
663
+ {
664
+ "epoch": 3.911111111111111,
665
+ "eval_loss": 0.5094956755638123,
666
+ "eval_runtime": 8.0631,
667
+ "eval_samples_per_second": 12.402,
668
+ "eval_steps_per_second": 12.402,
669
+ "step": 440
670
+ },
671
+ {
672
+ "epoch": 4.0,
673
+ "grad_norm": 3.562880754470825,
674
+ "learning_rate": 6.030737921409169e-06,
675
+ "loss": 0.0886,
676
+ "step": 450
677
+ },
678
+ {
679
+ "epoch": 4.0,
680
+ "eval_loss": 0.51046222448349,
681
+ "eval_runtime": 8.1706,
682
+ "eval_samples_per_second": 12.239,
683
+ "eval_steps_per_second": 12.239,
684
+ "step": 450
685
+ },
686
+ {
687
+ "epoch": 4.088888888888889,
688
+ "grad_norm": 0.6980682611465454,
689
+ "learning_rate": 3.873830406168111e-06,
690
+ "loss": 0.0471,
691
+ "step": 460
692
+ },
693
+ {
694
+ "epoch": 4.088888888888889,
695
+ "eval_loss": 0.511073887348175,
696
+ "eval_runtime": 8.2725,
697
+ "eval_samples_per_second": 12.088,
698
+ "eval_steps_per_second": 12.088,
699
+ "step": 460
700
+ },
701
+ {
702
+ "epoch": 4.177777777777778,
703
+ "grad_norm": 0.7668414115905762,
704
+ "learning_rate": 2.1852399266194314e-06,
705
+ "loss": 0.0461,
706
+ "step": 470
707
+ },
708
+ {
709
+ "epoch": 4.177777777777778,
710
+ "eval_loss": 0.515235185623169,
711
+ "eval_runtime": 8.2096,
712
+ "eval_samples_per_second": 12.181,
713
+ "eval_steps_per_second": 12.181,
714
+ "step": 470
715
+ },
716
+ {
717
+ "epoch": 4.266666666666667,
718
+ "grad_norm": 1.7168527841567993,
719
+ "learning_rate": 9.731931258429638e-07,
720
+ "loss": 0.0607,
721
+ "step": 480
722
+ },
723
+ {
724
+ "epoch": 4.266666666666667,
725
+ "eval_loss": 0.5151567459106445,
726
+ "eval_runtime": 8.2315,
727
+ "eval_samples_per_second": 12.149,
728
+ "eval_steps_per_second": 12.149,
729
+ "step": 480
730
+ },
731
+ {
732
+ "epoch": 4.355555555555555,
733
+ "grad_norm": 0.8711584806442261,
734
+ "learning_rate": 2.4359497401758024e-07,
735
+ "loss": 0.0473,
736
+ "step": 490
737
+ },
738
+ {
739
+ "epoch": 4.355555555555555,
740
+ "eval_loss": 0.519190788269043,
741
+ "eval_runtime": 8.2622,
742
+ "eval_samples_per_second": 12.103,
743
+ "eval_steps_per_second": 12.103,
744
+ "step": 490
745
+ },
746
+ {
747
+ "epoch": 4.444444444444445,
748
+ "grad_norm": 1.0105704069137573,
749
+ "learning_rate": 0.0,
750
+ "loss": 0.052,
751
+ "step": 500
752
+ },
753
+ {
754
+ "epoch": 4.444444444444445,
755
+ "eval_loss": 0.5163235664367676,
756
+ "eval_runtime": 8.4465,
757
+ "eval_samples_per_second": 11.839,
758
+ "eval_steps_per_second": 11.839,
759
+ "step": 500
760
+ },
761
+ {
762
+ "epoch": 4.444444444444445,
763
+ "step": 500,
764
+ "total_flos": 1.496402435211264e+16,
765
+ "train_loss": 0.3140584453344345,
766
+ "train_runtime": 1600.6227,
767
+ "train_samples_per_second": 2.499,
768
+ "train_steps_per_second": 0.312
769
+ }
770
+ ],
771
+ "logging_steps": 10,
772
+ "max_steps": 500,
773
+ "num_input_tokens_seen": 0,
774
+ "num_train_epochs": 5,
775
+ "save_steps": 500,
776
+ "stateful_callbacks": {
777
+ "TrainerControl": {
778
+ "args": {
779
+ "should_epoch_stop": false,
780
+ "should_evaluate": false,
781
+ "should_log": false,
782
+ "should_save": true,
783
+ "should_training_stop": true
784
+ },
785
+ "attributes": {}
786
+ }
787
+ },
788
+ "total_flos": 1.496402435211264e+16,
789
+ "train_batch_size": 1,
790
+ "trial_name": null,
791
+ "trial_params": null
792
+ }
training_eval_loss.png ADDED
training_loss.png ADDED