Update version 4 of T5 model

#43
T5_ver4/config.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/kaggle/input/mt-t5finetune/pytorch/default/1",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "classifier_dropout": 0.0,
7
+ "d_ff": 2048,
8
+ "d_kv": 64,
9
+ "d_model": 512,
10
+ "decoder_start_token_id": 0,
11
+ "dense_act_fn": "relu",
12
+ "dropout_rate": 0.1,
13
+ "eos_token_id": 1,
14
+ "feed_forward_proj": "relu",
15
+ "initializer_factor": 1.0,
16
+ "is_encoder_decoder": true,
17
+ "is_gated_act": false,
18
+ "layer_norm_epsilon": 1e-06,
19
+ "model_type": "t5",
20
+ "n_positions": 512,
21
+ "num_decoder_layers": 6,
22
+ "num_heads": 8,
23
+ "num_layers": 6,
24
+ "output_past": true,
25
+ "pad_token_id": 0,
26
+ "relative_attention_max_distance": 128,
27
+ "relative_attention_num_buckets": 32,
28
+ "task_specific_params": {
29
+ "summarization": {
30
+ "early_stopping": true,
31
+ "length_penalty": 2.0,
32
+ "max_length": 200,
33
+ "min_length": 30,
34
+ "no_repeat_ngram_size": 3,
35
+ "num_beams": 4,
36
+ "prefix": "summarize: "
37
+ },
38
+ "translation_en_to_de": {
39
+ "early_stopping": true,
40
+ "max_length": 300,
41
+ "num_beams": 4,
42
+ "prefix": "translate English to German: "
43
+ },
44
+ "translation_en_to_fr": {
45
+ "early_stopping": true,
46
+ "max_length": 300,
47
+ "num_beams": 4,
48
+ "prefix": "translate English to French: "
49
+ },
50
+ "translation_en_to_ro": {
51
+ "early_stopping": true,
52
+ "max_length": 300,
53
+ "num_beams": 4,
54
+ "prefix": "translate English to Romanian: "
55
+ }
56
+ },
57
+ "torch_dtype": "float32",
58
+ "transformers_version": "4.46.3",
59
+ "use_cache": true,
60
+ "vocab_size": 32128
61
+ }
T5_ver4/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "decoder_start_token_id": 0,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.46.3"
7
+ }
T5_ver4/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2863bda13ff902e8fcc667c434ad1673c30dd9500e2da98375e83050d1a37be
3
+ size 242041896
T5_ver4/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:190c1282d4cc2e890532451aff138b1b8af03148beec7a1d158250a2367d3b7a
3
+ size 484163514
T5_ver4/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1644ca0879a0a4132791dcf4f4b3c759a066ea07d0d2c14bcea32920f2d16423
3
+ size 14244
T5_ver4/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40cb0df3528ee2ee7aa5866c3fa1296b783032cfa876d4a8849b9931e6e69719
3
+ size 1064
T5_ver4/trainer_state.json ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.78855299949646,
3
+ "best_model_checkpoint": "./results_v1/checkpoint-3724",
4
+ "epoch": 4.0,
5
+ "eval_steps": 500,
6
+ "global_step": 3724,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.10741138560687433,
13
+ "grad_norm": 12224.1005859375,
14
+ "learning_rate": 0.0004865735767991407,
15
+ "loss": 0.9019,
16
+ "step": 100
17
+ },
18
+ {
19
+ "epoch": 0.21482277121374865,
20
+ "grad_norm": 11560.216796875,
21
+ "learning_rate": 0.00047314715359828143,
22
+ "loss": 0.9088,
23
+ "step": 200
24
+ },
25
+ {
26
+ "epoch": 0.322234156820623,
27
+ "grad_norm": 10229.27734375,
28
+ "learning_rate": 0.0004597207303974221,
29
+ "loss": 0.9132,
30
+ "step": 300
31
+ },
32
+ {
33
+ "epoch": 0.4296455424274973,
34
+ "grad_norm": 23398.751953125,
35
+ "learning_rate": 0.00044629430719656286,
36
+ "loss": 0.9093,
37
+ "step": 400
38
+ },
39
+ {
40
+ "epoch": 0.5370569280343717,
41
+ "grad_norm": 9462.7333984375,
42
+ "learning_rate": 0.00043286788399570354,
43
+ "loss": 0.9045,
44
+ "step": 500
45
+ },
46
+ {
47
+ "epoch": 0.644468313641246,
48
+ "grad_norm": 11711.173828125,
49
+ "learning_rate": 0.00041944146079484423,
50
+ "loss": 0.9007,
51
+ "step": 600
52
+ },
53
+ {
54
+ "epoch": 0.7518796992481203,
55
+ "grad_norm": 12136.8037109375,
56
+ "learning_rate": 0.00040601503759398497,
57
+ "loss": 0.896,
58
+ "step": 700
59
+ },
60
+ {
61
+ "epoch": 0.8592910848549946,
62
+ "grad_norm": 17582.23828125,
63
+ "learning_rate": 0.00039258861439312565,
64
+ "loss": 0.8989,
65
+ "step": 800
66
+ },
67
+ {
68
+ "epoch": 0.966702470461869,
69
+ "grad_norm": 10597.923828125,
70
+ "learning_rate": 0.0003791621911922664,
71
+ "loss": 0.8925,
72
+ "step": 900
73
+ },
74
+ {
75
+ "epoch": 1.0,
76
+ "eval_loss": 0.8314220905303955,
77
+ "eval_runtime": 18.8784,
78
+ "eval_samples_per_second": 423.764,
79
+ "eval_steps_per_second": 3.337,
80
+ "step": 931
81
+ },
82
+ {
83
+ "epoch": 1.0741138560687433,
84
+ "grad_norm": 10806.78125,
85
+ "learning_rate": 0.00036573576799140713,
86
+ "loss": 0.8738,
87
+ "step": 1000
88
+ },
89
+ {
90
+ "epoch": 1.1815252416756177,
91
+ "grad_norm": 11682.1171875,
92
+ "learning_rate": 0.0003523093447905478,
93
+ "loss": 0.8611,
94
+ "step": 1100
95
+ },
96
+ {
97
+ "epoch": 1.2889366272824918,
98
+ "grad_norm": 14700.29296875,
99
+ "learning_rate": 0.00033888292158968855,
100
+ "loss": 0.8755,
101
+ "step": 1200
102
+ },
103
+ {
104
+ "epoch": 1.3963480128893662,
105
+ "grad_norm": 11728.32421875,
106
+ "learning_rate": 0.00032545649838882924,
107
+ "loss": 0.8683,
108
+ "step": 1300
109
+ },
110
+ {
111
+ "epoch": 1.5037593984962405,
112
+ "grad_norm": 12242.0029296875,
113
+ "learning_rate": 0.0003120300751879699,
114
+ "loss": 0.8745,
115
+ "step": 1400
116
+ },
117
+ {
118
+ "epoch": 1.6111707841031149,
119
+ "grad_norm": 11073.6376953125,
120
+ "learning_rate": 0.00029860365198711066,
121
+ "loss": 0.848,
122
+ "step": 1500
123
+ },
124
+ {
125
+ "epoch": 1.7185821697099892,
126
+ "grad_norm": 10194.4599609375,
127
+ "learning_rate": 0.00028517722878625135,
128
+ "loss": 0.8516,
129
+ "step": 1600
130
+ },
131
+ {
132
+ "epoch": 1.8259935553168636,
133
+ "grad_norm": 12174.0341796875,
134
+ "learning_rate": 0.00027175080558539203,
135
+ "loss": 0.8597,
136
+ "step": 1700
137
+ },
138
+ {
139
+ "epoch": 1.933404940923738,
140
+ "grad_norm": 11925.640625,
141
+ "learning_rate": 0.00025832438238453277,
142
+ "loss": 0.859,
143
+ "step": 1800
144
+ },
145
+ {
146
+ "epoch": 2.0,
147
+ "eval_loss": 0.8080422878265381,
148
+ "eval_runtime": 19.478,
149
+ "eval_samples_per_second": 410.72,
150
+ "eval_steps_per_second": 3.234,
151
+ "step": 1862
152
+ },
153
+ {
154
+ "epoch": 2.0408163265306123,
155
+ "grad_norm": 12538.783203125,
156
+ "learning_rate": 0.00024489795918367346,
157
+ "loss": 0.8473,
158
+ "step": 1900
159
+ },
160
+ {
161
+ "epoch": 2.1482277121374866,
162
+ "grad_norm": 11984.9697265625,
163
+ "learning_rate": 0.00023147153598281417,
164
+ "loss": 0.8413,
165
+ "step": 2000
166
+ },
167
+ {
168
+ "epoch": 2.255639097744361,
169
+ "grad_norm": 12034.09765625,
170
+ "learning_rate": 0.00021804511278195488,
171
+ "loss": 0.8344,
172
+ "step": 2100
173
+ },
174
+ {
175
+ "epoch": 2.3630504833512354,
176
+ "grad_norm": 10639.9912109375,
177
+ "learning_rate": 0.00020461868958109562,
178
+ "loss": 0.8344,
179
+ "step": 2200
180
+ },
181
+ {
182
+ "epoch": 2.4704618689581097,
183
+ "grad_norm": 10943.125,
184
+ "learning_rate": 0.0001911922663802363,
185
+ "loss": 0.8309,
186
+ "step": 2300
187
+ },
188
+ {
189
+ "epoch": 2.5778732545649836,
190
+ "grad_norm": 12608.966796875,
191
+ "learning_rate": 0.00017776584317937702,
192
+ "loss": 0.835,
193
+ "step": 2400
194
+ },
195
+ {
196
+ "epoch": 2.685284640171858,
197
+ "grad_norm": 12656.138671875,
198
+ "learning_rate": 0.00016433941997851773,
199
+ "loss": 0.8338,
200
+ "step": 2500
201
+ },
202
+ {
203
+ "epoch": 2.7926960257787323,
204
+ "grad_norm": 13147.025390625,
205
+ "learning_rate": 0.00015091299677765844,
206
+ "loss": 0.8361,
207
+ "step": 2600
208
+ },
209
+ {
210
+ "epoch": 2.9001074113856067,
211
+ "grad_norm": 11629.3115234375,
212
+ "learning_rate": 0.00013748657357679915,
213
+ "loss": 0.8314,
214
+ "step": 2700
215
+ },
216
+ {
217
+ "epoch": 3.0,
218
+ "eval_loss": 0.793174684047699,
219
+ "eval_runtime": 18.8558,
220
+ "eval_samples_per_second": 424.272,
221
+ "eval_steps_per_second": 3.341,
222
+ "step": 2793
223
+ },
224
+ {
225
+ "epoch": 3.007518796992481,
226
+ "grad_norm": 11106.541015625,
227
+ "learning_rate": 0.00012406015037593984,
228
+ "loss": 0.8261,
229
+ "step": 2800
230
+ },
231
+ {
232
+ "epoch": 3.1149301825993554,
233
+ "grad_norm": 11267.052734375,
234
+ "learning_rate": 0.00011063372717508056,
235
+ "loss": 0.8172,
236
+ "step": 2900
237
+ },
238
+ {
239
+ "epoch": 3.2223415682062297,
240
+ "grad_norm": 10196.681640625,
241
+ "learning_rate": 9.720730397422128e-05,
242
+ "loss": 0.8162,
243
+ "step": 3000
244
+ },
245
+ {
246
+ "epoch": 3.329752953813104,
247
+ "grad_norm": 11563.2431640625,
248
+ "learning_rate": 8.378088077336197e-05,
249
+ "loss": 0.8181,
250
+ "step": 3100
251
+ },
252
+ {
253
+ "epoch": 3.4371643394199785,
254
+ "grad_norm": 12632.6240234375,
255
+ "learning_rate": 7.035445757250269e-05,
256
+ "loss": 0.8176,
257
+ "step": 3200
258
+ },
259
+ {
260
+ "epoch": 3.544575725026853,
261
+ "grad_norm": 13568.869140625,
262
+ "learning_rate": 5.692803437164339e-05,
263
+ "loss": 0.8165,
264
+ "step": 3300
265
+ },
266
+ {
267
+ "epoch": 3.651987110633727,
268
+ "grad_norm": 12489.8134765625,
269
+ "learning_rate": 4.35016111707841e-05,
270
+ "loss": 0.8205,
271
+ "step": 3400
272
+ },
273
+ {
274
+ "epoch": 3.7593984962406015,
275
+ "grad_norm": 10397.326171875,
276
+ "learning_rate": 3.007518796992481e-05,
277
+ "loss": 0.819,
278
+ "step": 3500
279
+ },
280
+ {
281
+ "epoch": 3.866809881847476,
282
+ "grad_norm": 9453.00390625,
283
+ "learning_rate": 1.664876476906552e-05,
284
+ "loss": 0.8167,
285
+ "step": 3600
286
+ },
287
+ {
288
+ "epoch": 3.9742212674543502,
289
+ "grad_norm": 11430.806640625,
290
+ "learning_rate": 3.22234156820623e-06,
291
+ "loss": 0.8097,
292
+ "step": 3700
293
+ },
294
+ {
295
+ "epoch": 4.0,
296
+ "eval_loss": 0.78855299949646,
297
+ "eval_runtime": 18.9148,
298
+ "eval_samples_per_second": 422.95,
299
+ "eval_steps_per_second": 3.331,
300
+ "step": 3724
301
+ }
302
+ ],
303
+ "logging_steps": 100,
304
+ "max_steps": 3724,
305
+ "num_input_tokens_seen": 0,
306
+ "num_train_epochs": 4,
307
+ "save_steps": 500,
308
+ "stateful_callbacks": {
309
+ "EarlyStoppingCallback": {
310
+ "args": {
311
+ "early_stopping_patience": 2,
312
+ "early_stopping_threshold": 0.0
313
+ },
314
+ "attributes": {
315
+ "early_stopping_patience_counter": 0
316
+ }
317
+ },
318
+ "TrainerControl": {
319
+ "args": {
320
+ "should_epoch_stop": false,
321
+ "should_evaluate": false,
322
+ "should_log": false,
323
+ "should_save": true,
324
+ "should_training_stop": true
325
+ },
326
+ "attributes": {}
327
+ }
328
+ },
329
+ "total_flos": 1.6121847720443904e+16,
330
+ "train_batch_size": 256,
331
+ "trial_name": null,
332
+ "trial_params": null
333
+ }
T5_ver4/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d475290bdaca607d453be3670a063cd0623ed70c2480b37377b40c1a7731e523
3
+ size 5240
T5_ver4/vi_tokenizer_32128.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fa3156da7d3526ca0cfe1f3c769ee3ca3c502737256934f00ed394a59389c85
3
+ size 748144