sameearif commited on
Commit
5076306
·
1 Parent(s): 2f53287

Upload folder using huggingface_hub

Browse files
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "</s>": 1,
3
+ "<pad>": 0,
4
+ "<unk>": 2
5
+ }
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/mt5-base",
3
+ "architectures": [
4
+ "MT5ForConditionalGeneration"
5
+ ],
6
+ "classifier_dropout": 0.0,
7
+ "d_ff": 2048,
8
+ "d_kv": 64,
9
+ "d_model": 768,
10
+ "decoder_start_token_id": 0,
11
+ "dense_act_fn": "gelu_new",
12
+ "dropout_rate": 0.1,
13
+ "eos_token_id": 1,
14
+ "feed_forward_proj": "gated-gelu",
15
+ "initializer_factor": 1.0,
16
+ "is_encoder_decoder": true,
17
+ "is_gated_act": true,
18
+ "layer_norm_epsilon": 1e-06,
19
+ "model_type": "mt5",
20
+ "num_decoder_layers": 12,
21
+ "num_heads": 12,
22
+ "num_layers": 12,
23
+ "output_past": true,
24
+ "pad_token_id": 0,
25
+ "relative_attention_max_distance": 128,
26
+ "relative_attention_num_buckets": 32,
27
+ "tie_word_embeddings": false,
28
+ "tokenizer_class": "T5Tokenizer",
29
+ "torch_dtype": "float32",
30
+ "transformers_version": "4.35.0.dev0",
31
+ "use_cache": true,
32
+ "vocab_size": 250112
33
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "decoder_start_token_id": 0,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.35.0.dev0"
7
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:219c2d4b65e5327044fa88b1b5e3d085b5fb504c85ea760a46d230eec535465d
3
+ size 2329702581
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4bf20501660e147da31ebbb6dddb7e92da6a31d7fa98b5cfbc78f2522df8040
3
+ size 14575
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b77548800548f399126dce9b48f795d755c3720f2f4f6b475d3bf94a70aee971
3
+ size 627
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "eos_token": "</s>",
3
+ "pad_token": "<pad>",
4
+ "unk_token": "<unk>"
5
+ }
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef78f86560d809067d12bac6c09f19a462cb3af3f54d2b8acbba26e1433125d6
3
+ size 4309802
tokenizer_config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<pad>",
5
+ "lstrip": true,
6
+ "normalized": false,
7
+ "rstrip": true,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "</s>",
13
+ "lstrip": true,
14
+ "normalized": false,
15
+ "rstrip": true,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "<unk>",
21
+ "lstrip": true,
22
+ "normalized": false,
23
+ "rstrip": true,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
+ },
28
+ "additional_special_tokens": [],
29
+ "clean_up_tokenization_spaces": true,
30
+ "eos_token": "</s>",
31
+ "extra_ids": 0,
32
+ "legacy": true,
33
+ "model_max_length": 1000000000000000019884624838656,
34
+ "pad_token": "<pad>",
35
+ "sp_model_kwargs": {},
36
+ "tokenizer_class": "T5Tokenizer",
37
+ "tokenizer_file": null,
38
+ "unk_token": "<unk>"
39
+ }
trainer_state.json ADDED
@@ -0,0 +1,677 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 5.0,
5
+ "eval_steps": 500,
6
+ "global_step": 51890,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.05,
13
+ "learning_rate": 4.95985096678872e-05,
14
+ "loss": 8.2502,
15
+ "step": 500
16
+ },
17
+ {
18
+ "epoch": 0.1,
19
+ "learning_rate": 4.91970193357744e-05,
20
+ "loss": 0.4362,
21
+ "step": 1000
22
+ },
23
+ {
24
+ "epoch": 0.14,
25
+ "learning_rate": 4.879552900366159e-05,
26
+ "loss": 0.2711,
27
+ "step": 1500
28
+ },
29
+ {
30
+ "epoch": 0.19,
31
+ "learning_rate": 4.839403867154879e-05,
32
+ "loss": 0.2293,
33
+ "step": 2000
34
+ },
35
+ {
36
+ "epoch": 0.24,
37
+ "learning_rate": 4.799254833943599e-05,
38
+ "loss": 0.2035,
39
+ "step": 2500
40
+ },
41
+ {
42
+ "epoch": 0.29,
43
+ "learning_rate": 4.759105800732319e-05,
44
+ "loss": 0.1796,
45
+ "step": 3000
46
+ },
47
+ {
48
+ "epoch": 0.34,
49
+ "learning_rate": 4.7189567675210385e-05,
50
+ "loss": 0.1772,
51
+ "step": 3500
52
+ },
53
+ {
54
+ "epoch": 0.39,
55
+ "learning_rate": 4.678807734309758e-05,
56
+ "loss": 0.1613,
57
+ "step": 4000
58
+ },
59
+ {
60
+ "epoch": 0.43,
61
+ "learning_rate": 4.6386587010984776e-05,
62
+ "loss": 0.1587,
63
+ "step": 4500
64
+ },
65
+ {
66
+ "epoch": 0.48,
67
+ "learning_rate": 4.5985096678871974e-05,
68
+ "loss": 0.1529,
69
+ "step": 5000
70
+ },
71
+ {
72
+ "epoch": 0.53,
73
+ "learning_rate": 4.558360634675917e-05,
74
+ "loss": 0.1462,
75
+ "step": 5500
76
+ },
77
+ {
78
+ "epoch": 0.58,
79
+ "learning_rate": 4.518211601464637e-05,
80
+ "loss": 0.1477,
81
+ "step": 6000
82
+ },
83
+ {
84
+ "epoch": 0.63,
85
+ "learning_rate": 4.478062568253357e-05,
86
+ "loss": 0.1385,
87
+ "step": 6500
88
+ },
89
+ {
90
+ "epoch": 0.67,
91
+ "learning_rate": 4.437913535042076e-05,
92
+ "loss": 0.137,
93
+ "step": 7000
94
+ },
95
+ {
96
+ "epoch": 0.72,
97
+ "learning_rate": 4.397764501830796e-05,
98
+ "loss": 0.1299,
99
+ "step": 7500
100
+ },
101
+ {
102
+ "epoch": 0.77,
103
+ "learning_rate": 4.357615468619516e-05,
104
+ "loss": 0.1336,
105
+ "step": 8000
106
+ },
107
+ {
108
+ "epoch": 0.82,
109
+ "learning_rate": 4.317466435408236e-05,
110
+ "loss": 0.1288,
111
+ "step": 8500
112
+ },
113
+ {
114
+ "epoch": 0.87,
115
+ "learning_rate": 4.2773174021969556e-05,
116
+ "loss": 0.1322,
117
+ "step": 9000
118
+ },
119
+ {
120
+ "epoch": 0.92,
121
+ "learning_rate": 4.237168368985675e-05,
122
+ "loss": 0.1213,
123
+ "step": 9500
124
+ },
125
+ {
126
+ "epoch": 0.96,
127
+ "learning_rate": 4.1970193357743946e-05,
128
+ "loss": 0.1286,
129
+ "step": 10000
130
+ },
131
+ {
132
+ "epoch": 1.0,
133
+ "eval_loss": 0.14025843143463135,
134
+ "eval_runtime": 124.3791,
135
+ "eval_samples_per_second": 89.798,
136
+ "eval_steps_per_second": 11.232,
137
+ "step": 10378
138
+ },
139
+ {
140
+ "epoch": 1.01,
141
+ "learning_rate": 4.1568703025631145e-05,
142
+ "loss": 0.1178,
143
+ "step": 10500
144
+ },
145
+ {
146
+ "epoch": 1.06,
147
+ "learning_rate": 4.116721269351834e-05,
148
+ "loss": 0.1113,
149
+ "step": 11000
150
+ },
151
+ {
152
+ "epoch": 1.11,
153
+ "learning_rate": 4.076572236140554e-05,
154
+ "loss": 0.1137,
155
+ "step": 11500
156
+ },
157
+ {
158
+ "epoch": 1.16,
159
+ "learning_rate": 4.0364232029292733e-05,
160
+ "loss": 0.1077,
161
+ "step": 12000
162
+ },
163
+ {
164
+ "epoch": 1.2,
165
+ "learning_rate": 3.996274169717993e-05,
166
+ "loss": 0.1075,
167
+ "step": 12500
168
+ },
169
+ {
170
+ "epoch": 1.25,
171
+ "learning_rate": 3.956125136506713e-05,
172
+ "loss": 0.106,
173
+ "step": 13000
174
+ },
175
+ {
176
+ "epoch": 1.3,
177
+ "learning_rate": 3.915976103295433e-05,
178
+ "loss": 0.107,
179
+ "step": 13500
180
+ },
181
+ {
182
+ "epoch": 1.35,
183
+ "learning_rate": 3.875827070084153e-05,
184
+ "loss": 0.1057,
185
+ "step": 14000
186
+ },
187
+ {
188
+ "epoch": 1.4,
189
+ "learning_rate": 3.8356780368728726e-05,
190
+ "loss": 0.0984,
191
+ "step": 14500
192
+ },
193
+ {
194
+ "epoch": 1.45,
195
+ "learning_rate": 3.795529003661592e-05,
196
+ "loss": 0.1016,
197
+ "step": 15000
198
+ },
199
+ {
200
+ "epoch": 1.49,
201
+ "learning_rate": 3.7553799704503117e-05,
202
+ "loss": 0.1019,
203
+ "step": 15500
204
+ },
205
+ {
206
+ "epoch": 1.54,
207
+ "learning_rate": 3.7152309372390315e-05,
208
+ "loss": 0.1023,
209
+ "step": 16000
210
+ },
211
+ {
212
+ "epoch": 1.59,
213
+ "learning_rate": 3.6750819040277514e-05,
214
+ "loss": 0.1004,
215
+ "step": 16500
216
+ },
217
+ {
218
+ "epoch": 1.64,
219
+ "learning_rate": 3.634932870816471e-05,
220
+ "loss": 0.1008,
221
+ "step": 17000
222
+ },
223
+ {
224
+ "epoch": 1.69,
225
+ "learning_rate": 3.5947838376051904e-05,
226
+ "loss": 0.0962,
227
+ "step": 17500
228
+ },
229
+ {
230
+ "epoch": 1.73,
231
+ "learning_rate": 3.55463480439391e-05,
232
+ "loss": 0.1001,
233
+ "step": 18000
234
+ },
235
+ {
236
+ "epoch": 1.78,
237
+ "learning_rate": 3.51448577118263e-05,
238
+ "loss": 0.0942,
239
+ "step": 18500
240
+ },
241
+ {
242
+ "epoch": 1.83,
243
+ "learning_rate": 3.47433673797135e-05,
244
+ "loss": 0.0975,
245
+ "step": 19000
246
+ },
247
+ {
248
+ "epoch": 1.88,
249
+ "learning_rate": 3.43418770476007e-05,
250
+ "loss": 0.1006,
251
+ "step": 19500
252
+ },
253
+ {
254
+ "epoch": 1.93,
255
+ "learning_rate": 3.394038671548789e-05,
256
+ "loss": 0.0932,
257
+ "step": 20000
258
+ },
259
+ {
260
+ "epoch": 1.98,
261
+ "learning_rate": 3.353889638337509e-05,
262
+ "loss": 0.0937,
263
+ "step": 20500
264
+ },
265
+ {
266
+ "epoch": 2.0,
267
+ "eval_loss": 0.12487868219614029,
268
+ "eval_runtime": 124.206,
269
+ "eval_samples_per_second": 89.923,
270
+ "eval_steps_per_second": 11.247,
271
+ "step": 20756
272
+ },
273
+ {
274
+ "epoch": 2.02,
275
+ "learning_rate": 3.313740605126229e-05,
276
+ "loss": 0.0927,
277
+ "step": 21000
278
+ },
279
+ {
280
+ "epoch": 2.07,
281
+ "learning_rate": 3.2735915719149485e-05,
282
+ "loss": 0.0833,
283
+ "step": 21500
284
+ },
285
+ {
286
+ "epoch": 2.12,
287
+ "learning_rate": 3.2334425387036684e-05,
288
+ "loss": 0.0838,
289
+ "step": 22000
290
+ },
291
+ {
292
+ "epoch": 2.17,
293
+ "learning_rate": 3.193293505492388e-05,
294
+ "loss": 0.0833,
295
+ "step": 22500
296
+ },
297
+ {
298
+ "epoch": 2.22,
299
+ "learning_rate": 3.1531444722811074e-05,
300
+ "loss": 0.0807,
301
+ "step": 23000
302
+ },
303
+ {
304
+ "epoch": 2.26,
305
+ "learning_rate": 3.112995439069827e-05,
306
+ "loss": 0.0802,
307
+ "step": 23500
308
+ },
309
+ {
310
+ "epoch": 2.31,
311
+ "learning_rate": 3.072846405858547e-05,
312
+ "loss": 0.0818,
313
+ "step": 24000
314
+ },
315
+ {
316
+ "epoch": 2.36,
317
+ "learning_rate": 3.032697372647267e-05,
318
+ "loss": 0.0834,
319
+ "step": 24500
320
+ },
321
+ {
322
+ "epoch": 2.41,
323
+ "learning_rate": 2.9925483394359865e-05,
324
+ "loss": 0.0815,
325
+ "step": 25000
326
+ },
327
+ {
328
+ "epoch": 2.46,
329
+ "learning_rate": 2.9523993062247064e-05,
330
+ "loss": 0.0831,
331
+ "step": 25500
332
+ },
333
+ {
334
+ "epoch": 2.51,
335
+ "learning_rate": 2.912250273013426e-05,
336
+ "loss": 0.0835,
337
+ "step": 26000
338
+ },
339
+ {
340
+ "epoch": 2.55,
341
+ "learning_rate": 2.8721012398021457e-05,
342
+ "loss": 0.0812,
343
+ "step": 26500
344
+ },
345
+ {
346
+ "epoch": 2.6,
347
+ "learning_rate": 2.8319522065908656e-05,
348
+ "loss": 0.0782,
349
+ "step": 27000
350
+ },
351
+ {
352
+ "epoch": 2.65,
353
+ "learning_rate": 2.791803173379585e-05,
354
+ "loss": 0.084,
355
+ "step": 27500
356
+ },
357
+ {
358
+ "epoch": 2.7,
359
+ "learning_rate": 2.751654140168305e-05,
360
+ "loss": 0.0821,
361
+ "step": 28000
362
+ },
363
+ {
364
+ "epoch": 2.75,
365
+ "learning_rate": 2.7115051069570248e-05,
366
+ "loss": 0.0788,
367
+ "step": 28500
368
+ },
369
+ {
370
+ "epoch": 2.79,
371
+ "learning_rate": 2.6713560737457443e-05,
372
+ "loss": 0.0838,
373
+ "step": 29000
374
+ },
375
+ {
376
+ "epoch": 2.84,
377
+ "learning_rate": 2.6312070405344642e-05,
378
+ "loss": 0.0822,
379
+ "step": 29500
380
+ },
381
+ {
382
+ "epoch": 2.89,
383
+ "learning_rate": 2.5910580073231837e-05,
384
+ "loss": 0.0801,
385
+ "step": 30000
386
+ },
387
+ {
388
+ "epoch": 2.94,
389
+ "learning_rate": 2.5509089741119035e-05,
390
+ "loss": 0.0799,
391
+ "step": 30500
392
+ },
393
+ {
394
+ "epoch": 2.99,
395
+ "learning_rate": 2.5107599409006234e-05,
396
+ "loss": 0.0809,
397
+ "step": 31000
398
+ },
399
+ {
400
+ "epoch": 3.0,
401
+ "eval_loss": 0.11599379032850266,
402
+ "eval_runtime": 124.1737,
403
+ "eval_samples_per_second": 89.947,
404
+ "eval_steps_per_second": 11.25,
405
+ "step": 31134
406
+ },
407
+ {
408
+ "epoch": 3.04,
409
+ "learning_rate": 2.470610907689343e-05,
410
+ "loss": 0.0754,
411
+ "step": 31500
412
+ },
413
+ {
414
+ "epoch": 3.08,
415
+ "learning_rate": 2.4304618744780628e-05,
416
+ "loss": 0.0693,
417
+ "step": 32000
418
+ },
419
+ {
420
+ "epoch": 3.13,
421
+ "learning_rate": 2.3903128412667826e-05,
422
+ "loss": 0.0726,
423
+ "step": 32500
424
+ },
425
+ {
426
+ "epoch": 3.18,
427
+ "learning_rate": 2.350163808055502e-05,
428
+ "loss": 0.0689,
429
+ "step": 33000
430
+ },
431
+ {
432
+ "epoch": 3.23,
433
+ "learning_rate": 2.310014774844222e-05,
434
+ "loss": 0.0718,
435
+ "step": 33500
436
+ },
437
+ {
438
+ "epoch": 3.28,
439
+ "learning_rate": 2.2698657416329415e-05,
440
+ "loss": 0.07,
441
+ "step": 34000
442
+ },
443
+ {
444
+ "epoch": 3.32,
445
+ "learning_rate": 2.2297167084216614e-05,
446
+ "loss": 0.0753,
447
+ "step": 34500
448
+ },
449
+ {
450
+ "epoch": 3.37,
451
+ "learning_rate": 2.1895676752103812e-05,
452
+ "loss": 0.071,
453
+ "step": 35000
454
+ },
455
+ {
456
+ "epoch": 3.42,
457
+ "learning_rate": 2.1494186419991007e-05,
458
+ "loss": 0.0688,
459
+ "step": 35500
460
+ },
461
+ {
462
+ "epoch": 3.47,
463
+ "learning_rate": 2.1092696087878206e-05,
464
+ "loss": 0.0692,
465
+ "step": 36000
466
+ },
467
+ {
468
+ "epoch": 3.52,
469
+ "learning_rate": 2.06912057557654e-05,
470
+ "loss": 0.0696,
471
+ "step": 36500
472
+ },
473
+ {
474
+ "epoch": 3.57,
475
+ "learning_rate": 2.02897154236526e-05,
476
+ "loss": 0.0714,
477
+ "step": 37000
478
+ },
479
+ {
480
+ "epoch": 3.61,
481
+ "learning_rate": 1.9888225091539798e-05,
482
+ "loss": 0.0682,
483
+ "step": 37500
484
+ },
485
+ {
486
+ "epoch": 3.66,
487
+ "learning_rate": 1.9486734759426993e-05,
488
+ "loss": 0.0675,
489
+ "step": 38000
490
+ },
491
+ {
492
+ "epoch": 3.71,
493
+ "learning_rate": 1.9085244427314192e-05,
494
+ "loss": 0.0689,
495
+ "step": 38500
496
+ },
497
+ {
498
+ "epoch": 3.76,
499
+ "learning_rate": 1.868375409520139e-05,
500
+ "loss": 0.0727,
501
+ "step": 39000
502
+ },
503
+ {
504
+ "epoch": 3.81,
505
+ "learning_rate": 1.8282263763088585e-05,
506
+ "loss": 0.0688,
507
+ "step": 39500
508
+ },
509
+ {
510
+ "epoch": 3.85,
511
+ "learning_rate": 1.7880773430975784e-05,
512
+ "loss": 0.0677,
513
+ "step": 40000
514
+ },
515
+ {
516
+ "epoch": 3.9,
517
+ "learning_rate": 1.747928309886298e-05,
518
+ "loss": 0.0664,
519
+ "step": 40500
520
+ },
521
+ {
522
+ "epoch": 3.95,
523
+ "learning_rate": 1.7077792766750178e-05,
524
+ "loss": 0.0677,
525
+ "step": 41000
526
+ },
527
+ {
528
+ "epoch": 4.0,
529
+ "learning_rate": 1.6676302434637376e-05,
530
+ "loss": 0.0712,
531
+ "step": 41500
532
+ },
533
+ {
534
+ "epoch": 4.0,
535
+ "eval_loss": 0.11910858005285263,
536
+ "eval_runtime": 124.1925,
537
+ "eval_samples_per_second": 89.933,
538
+ "eval_steps_per_second": 11.249,
539
+ "step": 41512
540
+ },
541
+ {
542
+ "epoch": 4.05,
543
+ "learning_rate": 1.627481210252457e-05,
544
+ "loss": 0.064,
545
+ "step": 42000
546
+ },
547
+ {
548
+ "epoch": 4.1,
549
+ "learning_rate": 1.587332177041177e-05,
550
+ "loss": 0.0642,
551
+ "step": 42500
552
+ },
553
+ {
554
+ "epoch": 4.14,
555
+ "learning_rate": 1.547183143829897e-05,
556
+ "loss": 0.0603,
557
+ "step": 43000
558
+ },
559
+ {
560
+ "epoch": 4.19,
561
+ "learning_rate": 1.5070341106186164e-05,
562
+ "loss": 0.059,
563
+ "step": 43500
564
+ },
565
+ {
566
+ "epoch": 4.24,
567
+ "learning_rate": 1.4668850774073362e-05,
568
+ "loss": 0.0636,
569
+ "step": 44000
570
+ },
571
+ {
572
+ "epoch": 4.29,
573
+ "learning_rate": 1.4267360441960559e-05,
574
+ "loss": 0.0582,
575
+ "step": 44500
576
+ },
577
+ {
578
+ "epoch": 4.34,
579
+ "learning_rate": 1.3865870109847756e-05,
580
+ "loss": 0.0648,
581
+ "step": 45000
582
+ },
583
+ {
584
+ "epoch": 4.38,
585
+ "learning_rate": 1.3464379777734953e-05,
586
+ "loss": 0.0586,
587
+ "step": 45500
588
+ },
589
+ {
590
+ "epoch": 4.43,
591
+ "learning_rate": 1.3062889445622151e-05,
592
+ "loss": 0.0612,
593
+ "step": 46000
594
+ },
595
+ {
596
+ "epoch": 4.48,
597
+ "learning_rate": 1.2661399113509348e-05,
598
+ "loss": 0.0601,
599
+ "step": 46500
600
+ },
601
+ {
602
+ "epoch": 4.53,
603
+ "learning_rate": 1.2259908781396545e-05,
604
+ "loss": 0.0618,
605
+ "step": 47000
606
+ },
607
+ {
608
+ "epoch": 4.58,
609
+ "learning_rate": 1.1858418449283742e-05,
610
+ "loss": 0.0612,
611
+ "step": 47500
612
+ },
613
+ {
614
+ "epoch": 4.63,
615
+ "learning_rate": 1.145692811717094e-05,
616
+ "loss": 0.0598,
617
+ "step": 48000
618
+ },
619
+ {
620
+ "epoch": 4.67,
621
+ "learning_rate": 1.1055437785058137e-05,
622
+ "loss": 0.0609,
623
+ "step": 48500
624
+ },
625
+ {
626
+ "epoch": 4.72,
627
+ "learning_rate": 1.0653947452945334e-05,
628
+ "loss": 0.0639,
629
+ "step": 49000
630
+ },
631
+ {
632
+ "epoch": 4.77,
633
+ "learning_rate": 1.0252457120832531e-05,
634
+ "loss": 0.0639,
635
+ "step": 49500
636
+ },
637
+ {
638
+ "epoch": 4.82,
639
+ "learning_rate": 9.85096678871973e-06,
640
+ "loss": 0.064,
641
+ "step": 50000
642
+ },
643
+ {
644
+ "epoch": 4.87,
645
+ "learning_rate": 9.449476456606926e-06,
646
+ "loss": 0.0614,
647
+ "step": 50500
648
+ },
649
+ {
650
+ "epoch": 4.91,
651
+ "learning_rate": 9.047986124494123e-06,
652
+ "loss": 0.0618,
653
+ "step": 51000
654
+ },
655
+ {
656
+ "epoch": 4.96,
657
+ "learning_rate": 8.64649579238132e-06,
658
+ "loss": 0.0624,
659
+ "step": 51500
660
+ },
661
+ {
662
+ "epoch": 5.0,
663
+ "eval_loss": 0.1161259263753891,
664
+ "eval_runtime": 124.2379,
665
+ "eval_samples_per_second": 89.9,
666
+ "eval_steps_per_second": 11.245,
667
+ "step": 51890
668
+ }
669
+ ],
670
+ "logging_steps": 500,
671
+ "max_steps": 62268,
672
+ "num_train_epochs": 6,
673
+ "save_steps": 500,
674
+ "total_flos": 4.977120588123341e+17,
675
+ "trial_name": null,
676
+ "trial_params": null
677
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e03c87d26f7ce342369e25d9b8c3a6ad70ae61b48dfadebe01808769e70830db
3
+ size 4155