nielsbantilan commited on
Commit
087b4f2
·
1 Parent(s): 5048497

Upload folder using huggingface_hub

Browse files
Files changed (23) hide show
  1. checkpoint-400/global_step400/zero_pp_rank_0_mp_rank_00_model_states.pt +1 -1
  2. checkpoint-400/global_step400/zero_pp_rank_0_mp_rank_00_optim_states.pt +1 -1
  3. checkpoint-400/global_step400/zero_pp_rank_1_mp_rank_00_model_states.pt +1 -1
  4. checkpoint-400/global_step400/zero_pp_rank_1_mp_rank_00_optim_states.pt +1 -1
  5. checkpoint-400/global_step400/zero_pp_rank_2_mp_rank_00_model_states.pt +1 -1
  6. checkpoint-400/global_step400/zero_pp_rank_2_mp_rank_00_optim_states.pt +1 -1
  7. checkpoint-400/global_step400/zero_pp_rank_3_mp_rank_00_model_states.pt +1 -1
  8. checkpoint-400/global_step400/zero_pp_rank_3_mp_rank_00_optim_states.pt +1 -1
  9. checkpoint-400/global_step400/zero_pp_rank_4_mp_rank_00_model_states.pt +1 -1
  10. checkpoint-400/global_step400/zero_pp_rank_4_mp_rank_00_optim_states.pt +1 -1
  11. checkpoint-400/global_step400/zero_pp_rank_5_mp_rank_00_model_states.pt +1 -1
  12. checkpoint-400/global_step400/zero_pp_rank_5_mp_rank_00_optim_states.pt +1 -1
  13. checkpoint-400/global_step400/zero_pp_rank_6_mp_rank_00_model_states.pt +1 -1
  14. checkpoint-400/global_step400/zero_pp_rank_6_mp_rank_00_optim_states.pt +1 -1
  15. checkpoint-400/global_step400/zero_pp_rank_7_mp_rank_00_model_states.pt +1 -1
  16. checkpoint-400/global_step400/zero_pp_rank_7_mp_rank_00_optim_states.pt +1 -1
  17. checkpoint-400/pytorch_model.bin +1 -1
  18. checkpoint-400/trainer_state.json +35 -35
  19. checkpoint-400/training_args.bin +1 -1
  20. flyte_training_config.json +1 -1
  21. pytorch_model.bin +1 -1
  22. trainer_state.json +54 -954
  23. training_args.bin +1 -1
checkpoint-400/global_step400/zero_pp_rank_0_mp_rank_00_model_states.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ef0c10789bcb3eda527750b2f1fc97a2ad5ed9cd117ea557ea5876e3b4b44b50
3
  size 134451731
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c93ee5b8cb3c9fe770a734c41a9651c2bd2146d06ed01d86f475bc092d763245
3
  size 134451731
checkpoint-400/global_step400/zero_pp_rank_0_mp_rank_00_optim_states.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:12c9ba6d05015fe5405a9da936ed3e2f5ff9f7661e6429ecaf2e460a28390e0f
3
  size 4163799934
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f2e2c33433b4a7873e0a2c58a13adc74837e4636a63cf083630a22f825bc948
3
  size 4163799934
checkpoint-400/global_step400/zero_pp_rank_1_mp_rank_00_model_states.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fde5b7d09fef56b9d25a86dcfc89fc47c3d11782b9142ae1375e0b2f6da98cf2
3
  size 134451731
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04a2f965bbffccbc96869f8a21e9abf77a2d94b05cbd52053a6b13fefb1b242f
3
  size 134451731
checkpoint-400/global_step400/zero_pp_rank_1_mp_rank_00_optim_states.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d06a601056b5affbe24fa4a0a318918685d84c434bc0801a6118b8cc7cb8b868
3
  size 4163799934
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16061aa0e674e18de87177bd697b06479c1b6217aa62f20756f0d17550a6b9d1
3
  size 4163799934
checkpoint-400/global_step400/zero_pp_rank_2_mp_rank_00_model_states.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f2015eb61ead1392e69766d89a0db16792e13811733af10d425ece514b3be05c
3
  size 134451731
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ed6c8f793581ff40564f7448fd7cdd4c3e97f5cfb8ff6c4c8c1836eaa505532
3
  size 134451731
checkpoint-400/global_step400/zero_pp_rank_2_mp_rank_00_optim_states.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:be6cba9a80a6b026a6e338c39e5b992d1866119a6048cb6e6948586ef4f9bcab
3
  size 4163799934
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1777ef89643d77b7d7615d7c5cd6bf22cec543a918d0d2181691167c3b2bf662
3
  size 4163799934
checkpoint-400/global_step400/zero_pp_rank_3_mp_rank_00_model_states.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c72cb5735a2083a90cecb19bac5d9560f458ccacb59b050bef801b7f9eba8c19
3
  size 134451731
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c52a2b7e7f349e660aa5f856d7fe448a97e68beb4de6c46968e37bc63d5fd37
3
  size 134451731
checkpoint-400/global_step400/zero_pp_rank_3_mp_rank_00_optim_states.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:979be6fa6ba16d52e471436be61d798561b86e39a66b245978498d3735530f42
3
  size 4163799934
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa2a50627546e6e379f5d592681a31a65f8c1f703301f03074ac3837ba7c78c8
3
  size 4163799934
checkpoint-400/global_step400/zero_pp_rank_4_mp_rank_00_model_states.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fb749e1d8758b3454505a3c583bd27630b93b41c714987a246c17e86b6e3cf9f
3
  size 134451731
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ad2756edd42edb4c1db8e438125921f09897cd655083806fb1c358e230b5eaa
3
  size 134451731
checkpoint-400/global_step400/zero_pp_rank_4_mp_rank_00_optim_states.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:89973fee08655591a7ed78b4e0fd8dc5fe0c107b072262f490a446e7393b5a16
3
  size 4163799934
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3579d9a26f8311633f02b253340b832e0335042ef8cc4ab9bb6de6540f5941e
3
  size 4163799934
checkpoint-400/global_step400/zero_pp_rank_5_mp_rank_00_model_states.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9db75fcb1f612036494b3ad7f3f9893f60ba84ac4f0921ad84194c9af37492c6
3
  size 134451731
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a32922ba3284e5a1bb06e98224a3bc3bb52e072386ea0c2ad3331a6fb19c1bc
3
  size 134451731
checkpoint-400/global_step400/zero_pp_rank_5_mp_rank_00_optim_states.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4631100a870e917734cf6d6120894642f41aef69bbf857a49363712a8419df7b
3
  size 4163799934
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b200e1c68d22e4ff09e8def614520ed4cd3ee318392d9795e98b10fb4b1642c8
3
  size 4163799934
checkpoint-400/global_step400/zero_pp_rank_6_mp_rank_00_model_states.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f6166a921ec43052d4a68a1c89f6eac86c97f6992a468d880a1746e02389a32b
3
  size 134451731
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c101d52f6ff8cb5cc6220900dc099fc438ab5ace5de24773ee1b4039c222e025
3
  size 134451731
checkpoint-400/global_step400/zero_pp_rank_6_mp_rank_00_optim_states.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:be2f16ff9afc318399b12b0373e1a435fb2f62cd9640ecb8b22068e0e32d6c9b
3
  size 4163799934
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9480e0823bd09697f3944726f296063f9236576ab9f5765d93b20c62cf1bfb2f
3
  size 4163799934
checkpoint-400/global_step400/zero_pp_rank_7_mp_rank_00_model_states.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ae86cf4d7adb3ca6934324b24c2cd1f3b078c6e9d1c245147b7587e81a2e9b79
3
  size 134451731
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d739b4b9f8c2f56af45739f78a55b08b87619d78ba896d57002909378c95222b
3
  size 134451731
checkpoint-400/global_step400/zero_pp_rank_7_mp_rank_00_optim_states.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:debed0eca41e789b85121bc469404216f166208725234b6af79b78cf885c58d9
3
  size 4163799934
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3441f14607a54bc3f5eff1b191fe2864d101588934f8f9c17fa4b45feec9a210
3
  size 4163799934
checkpoint-400/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:12d7ad2052b524441df7eb0291f23bd6f573dd8161c99830c26ce2a38a2f113b
3
  size 5686106713
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6ac41172c7bd6abf75e8e0e73e4f8dbe3202ec6c399179c73462afcffdd1671
3
  size 5686106713
checkpoint-400/trainer_state.json CHANGED
@@ -10,43 +10,43 @@
10
  {
11
  "epoch": 6.67,
12
  "learning_rate": 1.5357481488588927e-05,
13
- "loss": 1.9677,
14
  "step": 10
15
  },
16
  {
17
  "epoch": 13.33,
18
  "learning_rate": 2e-05,
19
- "loss": 1.0371,
20
  "step": 20
21
  },
22
  {
23
  "epoch": 20.0,
24
  "learning_rate": 2e-05,
25
- "loss": 0.2217,
26
  "step": 30
27
  },
28
  {
29
  "epoch": 26.67,
30
  "learning_rate": 2e-05,
31
- "loss": 0.0689,
32
  "step": 40
33
  },
34
  {
35
  "epoch": 33.33,
36
  "learning_rate": 2e-05,
37
- "loss": 0.0456,
38
  "step": 50
39
  },
40
  {
41
  "epoch": 40.0,
42
  "learning_rate": 2e-05,
43
- "loss": 0.0316,
44
  "step": 60
45
  },
46
  {
47
  "epoch": 46.67,
48
  "learning_rate": 2e-05,
49
- "loss": 0.0248,
50
  "step": 70
51
  },
52
  {
@@ -58,91 +58,91 @@
58
  {
59
  "epoch": 60.0,
60
  "learning_rate": 2e-05,
61
- "loss": 0.0144,
62
  "step": 90
63
  },
64
  {
65
  "epoch": 66.67,
66
  "learning_rate": 2e-05,
67
- "loss": 0.0113,
68
  "step": 100
69
  },
70
  {
71
  "epoch": 73.33,
72
  "learning_rate": 2e-05,
73
- "loss": 0.0098,
74
  "step": 110
75
  },
76
  {
77
  "epoch": 80.0,
78
  "learning_rate": 2e-05,
79
- "loss": 0.009,
80
  "step": 120
81
  },
82
  {
83
  "epoch": 86.67,
84
  "learning_rate": 2e-05,
85
- "loss": 0.0076,
86
  "step": 130
87
  },
88
  {
89
  "epoch": 93.33,
90
  "learning_rate": 2e-05,
91
- "loss": 0.0075,
92
  "step": 140
93
  },
94
  {
95
  "epoch": 100.0,
96
  "learning_rate": 2e-05,
97
- "loss": 0.0065,
98
  "step": 150
99
  },
100
  {
101
  "epoch": 106.67,
102
  "learning_rate": 2e-05,
103
- "loss": 0.0064,
104
  "step": 160
105
  },
106
  {
107
  "epoch": 113.33,
108
  "learning_rate": 2e-05,
109
- "loss": 0.0057,
110
  "step": 170
111
  },
112
  {
113
  "epoch": 120.0,
114
  "learning_rate": 2e-05,
115
- "loss": 0.0054,
116
  "step": 180
117
  },
118
  {
119
  "epoch": 126.67,
120
  "learning_rate": 2e-05,
121
- "loss": 0.0079,
122
  "step": 190
123
  },
124
  {
125
  "epoch": 133.33,
126
  "learning_rate": 2e-05,
127
- "loss": 0.0063,
128
  "step": 200
129
  },
130
  {
131
  "epoch": 140.0,
132
  "learning_rate": 2e-05,
133
- "loss": 0.0058,
134
  "step": 210
135
  },
136
  {
137
  "epoch": 146.67,
138
  "learning_rate": 2e-05,
139
- "loss": 0.005,
140
  "step": 220
141
  },
142
  {
143
  "epoch": 153.33,
144
  "learning_rate": 2e-05,
145
- "loss": 0.005,
146
  "step": 230
147
  },
148
  {
@@ -160,19 +160,19 @@
160
  {
161
  "epoch": 173.33,
162
  "learning_rate": 2e-05,
163
- "loss": 0.0043,
164
  "step": 260
165
  },
166
  {
167
  "epoch": 180.0,
168
  "learning_rate": 2e-05,
169
- "loss": 0.0043,
170
  "step": 270
171
  },
172
  {
173
  "epoch": 186.67,
174
  "learning_rate": 2e-05,
175
- "loss": 0.0042,
176
  "step": 280
177
  },
178
  {
@@ -184,19 +184,19 @@
184
  {
185
  "epoch": 200.0,
186
  "learning_rate": 2e-05,
187
- "loss": 0.0042,
188
  "step": 300
189
  },
190
  {
191
  "epoch": 206.67,
192
  "learning_rate": 2e-05,
193
- "loss": 0.0038,
194
  "step": 310
195
  },
196
  {
197
  "epoch": 213.33,
198
  "learning_rate": 2e-05,
199
- "loss": 0.0039,
200
  "step": 320
201
  },
202
  {
@@ -208,43 +208,43 @@
208
  {
209
  "epoch": 226.67,
210
  "learning_rate": 2e-05,
211
- "loss": 0.0038,
212
  "step": 340
213
  },
214
  {
215
  "epoch": 233.33,
216
  "learning_rate": 2e-05,
217
- "loss": 0.0034,
218
  "step": 350
219
  },
220
  {
221
  "epoch": 240.0,
222
  "learning_rate": 2e-05,
223
- "loss": 0.0037,
224
  "step": 360
225
  },
226
  {
227
  "epoch": 246.67,
228
  "learning_rate": 2e-05,
229
- "loss": 0.0038,
230
  "step": 370
231
  },
232
  {
233
  "epoch": 253.33,
234
  "learning_rate": 2e-05,
235
- "loss": 0.0037,
236
  "step": 380
237
  },
238
  {
239
  "epoch": 260.0,
240
  "learning_rate": 2e-05,
241
- "loss": 0.0037,
242
  "step": 390
243
  },
244
  {
245
  "epoch": 266.67,
246
  "learning_rate": 2e-05,
247
- "loss": 0.0036,
248
  "step": 400
249
  }
250
  ],
 
10
  {
11
  "epoch": 6.67,
12
  "learning_rate": 1.5357481488588927e-05,
13
+ "loss": 1.9783,
14
  "step": 10
15
  },
16
  {
17
  "epoch": 13.33,
18
  "learning_rate": 2e-05,
19
+ "loss": 1.0744,
20
  "step": 20
21
  },
22
  {
23
  "epoch": 20.0,
24
  "learning_rate": 2e-05,
25
+ "loss": 0.2335,
26
  "step": 30
27
  },
28
  {
29
  "epoch": 26.67,
30
  "learning_rate": 2e-05,
31
+ "loss": 0.0723,
32
  "step": 40
33
  },
34
  {
35
  "epoch": 33.33,
36
  "learning_rate": 2e-05,
37
+ "loss": 0.046,
38
  "step": 50
39
  },
40
  {
41
  "epoch": 40.0,
42
  "learning_rate": 2e-05,
43
+ "loss": 0.0321,
44
  "step": 60
45
  },
46
  {
47
  "epoch": 46.67,
48
  "learning_rate": 2e-05,
49
+ "loss": 0.0261,
50
  "step": 70
51
  },
52
  {
 
58
  {
59
  "epoch": 60.0,
60
  "learning_rate": 2e-05,
61
+ "loss": 0.0158,
62
  "step": 90
63
  },
64
  {
65
  "epoch": 66.67,
66
  "learning_rate": 2e-05,
67
+ "loss": 0.0124,
68
  "step": 100
69
  },
70
  {
71
  "epoch": 73.33,
72
  "learning_rate": 2e-05,
73
+ "loss": 0.0106,
74
  "step": 110
75
  },
76
  {
77
  "epoch": 80.0,
78
  "learning_rate": 2e-05,
79
+ "loss": 0.0091,
80
  "step": 120
81
  },
82
  {
83
  "epoch": 86.67,
84
  "learning_rate": 2e-05,
85
+ "loss": 0.0083,
86
  "step": 130
87
  },
88
  {
89
  "epoch": 93.33,
90
  "learning_rate": 2e-05,
91
+ "loss": 0.0071,
92
  "step": 140
93
  },
94
  {
95
  "epoch": 100.0,
96
  "learning_rate": 2e-05,
97
+ "loss": 0.007,
98
  "step": 150
99
  },
100
  {
101
  "epoch": 106.67,
102
  "learning_rate": 2e-05,
103
+ "loss": 0.0063,
104
  "step": 160
105
  },
106
  {
107
  "epoch": 113.33,
108
  "learning_rate": 2e-05,
109
+ "loss": 0.0061,
110
  "step": 170
111
  },
112
  {
113
  "epoch": 120.0,
114
  "learning_rate": 2e-05,
115
+ "loss": 0.0059,
116
  "step": 180
117
  },
118
  {
119
  "epoch": 126.67,
120
  "learning_rate": 2e-05,
121
+ "loss": 0.0054,
122
  "step": 190
123
  },
124
  {
125
  "epoch": 133.33,
126
  "learning_rate": 2e-05,
127
+ "loss": 0.0052,
128
  "step": 200
129
  },
130
  {
131
  "epoch": 140.0,
132
  "learning_rate": 2e-05,
133
+ "loss": 0.0049,
134
  "step": 210
135
  },
136
  {
137
  "epoch": 146.67,
138
  "learning_rate": 2e-05,
139
+ "loss": 0.0048,
140
  "step": 220
141
  },
142
  {
143
  "epoch": 153.33,
144
  "learning_rate": 2e-05,
145
+ "loss": 0.0045,
146
  "step": 230
147
  },
148
  {
 
160
  {
161
  "epoch": 173.33,
162
  "learning_rate": 2e-05,
163
+ "loss": 0.0046,
164
  "step": 260
165
  },
166
  {
167
  "epoch": 180.0,
168
  "learning_rate": 2e-05,
169
+ "loss": 0.0042,
170
  "step": 270
171
  },
172
  {
173
  "epoch": 186.67,
174
  "learning_rate": 2e-05,
175
+ "loss": 0.0043,
176
  "step": 280
177
  },
178
  {
 
184
  {
185
  "epoch": 200.0,
186
  "learning_rate": 2e-05,
187
+ "loss": 0.0041,
188
  "step": 300
189
  },
190
  {
191
  "epoch": 206.67,
192
  "learning_rate": 2e-05,
193
+ "loss": 0.0041,
194
  "step": 310
195
  },
196
  {
197
  "epoch": 213.33,
198
  "learning_rate": 2e-05,
199
+ "loss": 0.0043,
200
  "step": 320
201
  },
202
  {
 
208
  {
209
  "epoch": 226.67,
210
  "learning_rate": 2e-05,
211
+ "loss": 0.0041,
212
  "step": 340
213
  },
214
  {
215
  "epoch": 233.33,
216
  "learning_rate": 2e-05,
217
+ "loss": 0.004,
218
  "step": 350
219
  },
220
  {
221
  "epoch": 240.0,
222
  "learning_rate": 2e-05,
223
+ "loss": 0.004,
224
  "step": 360
225
  },
226
  {
227
  "epoch": 246.67,
228
  "learning_rate": 2e-05,
229
+ "loss": 0.0042,
230
  "step": 370
231
  },
232
  {
233
  "epoch": 253.33,
234
  "learning_rate": 2e-05,
235
+ "loss": 0.0041,
236
  "step": 380
237
  },
238
  {
239
  "epoch": 260.0,
240
  "learning_rate": 2e-05,
241
+ "loss": 0.0038,
242
  "step": 390
243
  },
244
  {
245
  "epoch": 266.67,
246
  "learning_rate": 2e-05,
247
+ "loss": 0.0041,
248
  "step": 400
249
  }
250
  ],
checkpoint-400/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d27383b338ae82946f100d10bfcecddd777acad3c5731d207d2c433e024bc936
3
  size 5563
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:567801fbca456df7279774860aae6a5f038bd64e03f891b6ae2b93a59c8c417b
3
  size 5563
flyte_training_config.json CHANGED
@@ -1 +1 @@
1
- {"base_model": "togethercomputer/RedPajama-INCITE-Base-3B-v1", "data_path": "wikipedia", "data_name": "20220301.simple", "num_epochs": 1, "max_steps": 2001, "learning_rate": 2e-05, "weight_decay": 0.02, "warmup_ratio": 0.03, "lr_scheduler_type": "cosine", "batch_size": 16, "micro_batch_size": 1, "val_set_size": 0, "group_by_length": false, "instruction_key": "instruction", "input_key": "input", "output_key": "output", "device_map": "auto", "cache_dir": null, "optim": "adamw_torch", "model_max_length": 512, "debug_mode": false, "debug_train_data_size": 1024, "wandb_project": ""}
 
1
+ {"base_model": "togethercomputer/RedPajama-INCITE-Base-3B-v1", "data_path": "wikipedia", "data_name": "20220301.simple", "num_epochs": 1, "max_steps": 500, "learning_rate": 2e-05, "weight_decay": 0.02, "warmup_ratio": 0.03, "lr_scheduler_type": "cosine", "batch_size": 16, "micro_batch_size": 1, "val_set_size": 0, "group_by_length": false, "instruction_key": "instruction", "input_key": "input", "output_key": "output", "device_map": "auto", "cache_dir": null, "optim": "adamw_torch", "model_max_length": 512, "debug_mode": false, "debug_train_data_size": 1024, "wandb_project": ""}
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6ffb1344823c7e872b10d296a16b98a5712cc86b52b776238f0b801f54d28833
3
  size 5686106713
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f3895af09e0538e006a1966cba065c0ffa5e3f6694c04007a381d31c326bcf4
3
  size 5686106713
trainer_state.json CHANGED
@@ -1,94 +1,94 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 1334.0,
5
- "global_step": 2001,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
9
  "log_history": [
10
  {
11
  "epoch": 6.67,
12
- "learning_rate": 9.46713625058711e-06,
13
- "loss": 2.0455,
14
  "step": 10
15
  },
16
  {
17
  "epoch": 13.33,
18
- "learning_rate": 1.3783995508828243e-05,
19
- "loss": 1.425,
20
  "step": 20
21
  },
22
  {
23
  "epoch": 20.0,
24
- "learning_rate": 1.603472631319529e-05,
25
- "loss": 0.5237,
26
  "step": 30
27
  },
28
  {
29
  "epoch": 26.67,
30
- "learning_rate": 1.7567641489142956e-05,
31
- "loss": 0.1184,
32
  "step": 40
33
  },
34
  {
35
  "epoch": 33.33,
36
- "learning_rate": 1.8731528764550483e-05,
37
- "loss": 0.0585,
38
  "step": 50
39
  },
40
  {
41
  "epoch": 40.0,
42
- "learning_rate": 1.9670033192067303e-05,
43
- "loss": 0.0411,
44
  "step": 60
45
  },
46
  {
47
  "epoch": 46.67,
48
  "learning_rate": 2e-05,
49
- "loss": 0.0321,
50
  "step": 70
51
  },
52
  {
53
  "epoch": 53.33,
54
  "learning_rate": 2e-05,
55
- "loss": 0.0232,
56
  "step": 80
57
  },
58
  {
59
  "epoch": 60.0,
60
  "learning_rate": 2e-05,
61
- "loss": 0.0182,
62
  "step": 90
63
  },
64
  {
65
  "epoch": 66.67,
66
  "learning_rate": 2e-05,
67
- "loss": 0.0137,
68
  "step": 100
69
  },
70
  {
71
  "epoch": 73.33,
72
  "learning_rate": 2e-05,
73
- "loss": 0.0111,
74
  "step": 110
75
  },
76
  {
77
  "epoch": 80.0,
78
  "learning_rate": 2e-05,
79
- "loss": 0.0096,
80
  "step": 120
81
  },
82
  {
83
  "epoch": 86.67,
84
  "learning_rate": 2e-05,
85
- "loss": 0.0085,
86
  "step": 130
87
  },
88
  {
89
  "epoch": 93.33,
90
  "learning_rate": 2e-05,
91
- "loss": 0.0078,
92
  "step": 140
93
  },
94
  {
@@ -100,7 +100,7 @@
100
  {
101
  "epoch": 106.67,
102
  "learning_rate": 2e-05,
103
- "loss": 0.0066,
104
  "step": 160
105
  },
106
  {
@@ -112,7 +112,7 @@
112
  {
113
  "epoch": 120.0,
114
  "learning_rate": 2e-05,
115
- "loss": 0.0057,
116
  "step": 180
117
  },
118
  {
@@ -142,13 +142,13 @@
142
  {
143
  "epoch": 153.33,
144
  "learning_rate": 2e-05,
145
- "loss": 0.0048,
146
  "step": 230
147
  },
148
  {
149
  "epoch": 160.0,
150
  "learning_rate": 2e-05,
151
- "loss": 0.0045,
152
  "step": 240
153
  },
154
  {
@@ -160,31 +160,31 @@
160
  {
161
  "epoch": 173.33,
162
  "learning_rate": 2e-05,
163
- "loss": 0.0045,
164
  "step": 260
165
  },
166
  {
167
  "epoch": 180.0,
168
  "learning_rate": 2e-05,
169
- "loss": 0.0041,
170
  "step": 270
171
  },
172
  {
173
  "epoch": 186.67,
174
  "learning_rate": 2e-05,
175
- "loss": 0.0044,
176
  "step": 280
177
  },
178
  {
179
  "epoch": 193.33,
180
  "learning_rate": 2e-05,
181
- "loss": 0.0042,
182
  "step": 290
183
  },
184
  {
185
  "epoch": 200.0,
186
  "learning_rate": 2e-05,
187
- "loss": 0.0042,
188
  "step": 300
189
  },
190
  {
@@ -196,19 +196,19 @@
196
  {
197
  "epoch": 213.33,
198
  "learning_rate": 2e-05,
199
- "loss": 0.0041,
200
  "step": 320
201
  },
202
  {
203
  "epoch": 220.0,
204
  "learning_rate": 2e-05,
205
- "loss": 0.0041,
206
  "step": 330
207
  },
208
  {
209
  "epoch": 226.67,
210
  "learning_rate": 2e-05,
211
- "loss": 0.004,
212
  "step": 340
213
  },
214
  {
@@ -220,31 +220,31 @@
220
  {
221
  "epoch": 240.0,
222
  "learning_rate": 2e-05,
223
- "loss": 0.0038,
224
  "step": 360
225
  },
226
  {
227
  "epoch": 246.67,
228
  "learning_rate": 2e-05,
229
- "loss": 0.004,
230
  "step": 370
231
  },
232
  {
233
  "epoch": 253.33,
234
  "learning_rate": 2e-05,
235
- "loss": 0.004,
236
  "step": 380
237
  },
238
  {
239
  "epoch": 260.0,
240
  "learning_rate": 2e-05,
241
- "loss": 0.0041,
242
  "step": 390
243
  },
244
  {
245
  "epoch": 266.67,
246
  "learning_rate": 2e-05,
247
- "loss": 0.0039,
248
  "step": 400
249
  },
250
  {
@@ -268,37 +268,37 @@
268
  {
269
  "epoch": 293.33,
270
  "learning_rate": 2e-05,
271
- "loss": 0.004,
272
  "step": 440
273
  },
274
  {
275
  "epoch": 300.0,
276
  "learning_rate": 2e-05,
277
- "loss": 0.0039,
278
  "step": 450
279
  },
280
  {
281
  "epoch": 306.67,
282
  "learning_rate": 2e-05,
283
- "loss": 0.0038,
284
  "step": 460
285
  },
286
  {
287
  "epoch": 313.33,
288
  "learning_rate": 2e-05,
289
- "loss": 0.0037,
290
  "step": 470
291
  },
292
  {
293
  "epoch": 320.0,
294
  "learning_rate": 2e-05,
295
- "loss": 0.0038,
296
  "step": 480
297
  },
298
  {
299
  "epoch": 326.67,
300
  "learning_rate": 2e-05,
301
- "loss": 0.0038,
302
  "step": 490
303
  },
304
  {
@@ -308,918 +308,18 @@
308
  "step": 500
309
  },
310
  {
311
- "epoch": 340.0,
312
- "learning_rate": 2e-05,
313
- "loss": 0.0038,
314
- "step": 510
315
- },
316
- {
317
- "epoch": 346.67,
318
- "learning_rate": 2e-05,
319
- "loss": 0.004,
320
- "step": 520
321
- },
322
- {
323
- "epoch": 353.33,
324
- "learning_rate": 2e-05,
325
- "loss": 0.0037,
326
- "step": 530
327
- },
328
- {
329
- "epoch": 360.0,
330
- "learning_rate": 2e-05,
331
- "loss": 0.0039,
332
- "step": 540
333
- },
334
- {
335
- "epoch": 366.67,
336
- "learning_rate": 2e-05,
337
- "loss": 0.0045,
338
- "step": 550
339
- },
340
- {
341
- "epoch": 373.33,
342
- "learning_rate": 2e-05,
343
- "loss": 0.005,
344
- "step": 560
345
- },
346
- {
347
- "epoch": 380.0,
348
- "learning_rate": 2e-05,
349
- "loss": 0.014,
350
- "step": 570
351
- },
352
- {
353
- "epoch": 386.67,
354
- "learning_rate": 2e-05,
355
- "loss": 0.0149,
356
- "step": 580
357
- },
358
- {
359
- "epoch": 393.33,
360
- "learning_rate": 2e-05,
361
- "loss": 0.0084,
362
- "step": 590
363
- },
364
- {
365
- "epoch": 400.0,
366
- "learning_rate": 2e-05,
367
- "loss": 0.0072,
368
- "step": 600
369
- },
370
- {
371
- "epoch": 406.67,
372
- "learning_rate": 2e-05,
373
- "loss": 0.0058,
374
- "step": 610
375
- },
376
- {
377
- "epoch": 413.33,
378
- "learning_rate": 2e-05,
379
- "loss": 0.0053,
380
- "step": 620
381
- },
382
- {
383
- "epoch": 420.0,
384
- "learning_rate": 2e-05,
385
- "loss": 0.0051,
386
- "step": 630
387
- },
388
- {
389
- "epoch": 426.67,
390
- "learning_rate": 2e-05,
391
- "loss": 0.0047,
392
- "step": 640
393
- },
394
- {
395
- "epoch": 433.33,
396
- "learning_rate": 2e-05,
397
- "loss": 0.0045,
398
- "step": 650
399
- },
400
- {
401
- "epoch": 440.0,
402
- "learning_rate": 2e-05,
403
- "loss": 0.0043,
404
- "step": 660
405
- },
406
- {
407
- "epoch": 446.67,
408
- "learning_rate": 2e-05,
409
- "loss": 0.0041,
410
- "step": 670
411
- },
412
- {
413
- "epoch": 453.33,
414
- "learning_rate": 2e-05,
415
- "loss": 0.0039,
416
- "step": 680
417
- },
418
- {
419
- "epoch": 460.0,
420
- "learning_rate": 2e-05,
421
- "loss": 0.0038,
422
- "step": 690
423
- },
424
- {
425
- "epoch": 466.67,
426
- "learning_rate": 2e-05,
427
- "loss": 0.004,
428
- "step": 700
429
- },
430
- {
431
- "epoch": 473.33,
432
- "learning_rate": 2e-05,
433
- "loss": 0.004,
434
- "step": 710
435
- },
436
- {
437
- "epoch": 480.0,
438
- "learning_rate": 2e-05,
439
- "loss": 0.0036,
440
- "step": 720
441
- },
442
- {
443
- "epoch": 486.67,
444
- "learning_rate": 2e-05,
445
- "loss": 0.0036,
446
- "step": 730
447
- },
448
- {
449
- "epoch": 493.33,
450
- "learning_rate": 2e-05,
451
- "loss": 0.0037,
452
- "step": 740
453
- },
454
- {
455
- "epoch": 500.0,
456
- "learning_rate": 2e-05,
457
- "loss": 0.0036,
458
- "step": 750
459
- },
460
- {
461
- "epoch": 506.67,
462
- "learning_rate": 2e-05,
463
- "loss": 0.0034,
464
- "step": 760
465
- },
466
- {
467
- "epoch": 513.33,
468
- "learning_rate": 2e-05,
469
- "loss": 0.0035,
470
- "step": 770
471
- },
472
- {
473
- "epoch": 520.0,
474
- "learning_rate": 2e-05,
475
- "loss": 0.0035,
476
- "step": 780
477
- },
478
- {
479
- "epoch": 526.67,
480
- "learning_rate": 2e-05,
481
- "loss": 0.0035,
482
- "step": 790
483
- },
484
- {
485
- "epoch": 533.33,
486
- "learning_rate": 2e-05,
487
- "loss": 0.0034,
488
- "step": 800
489
- },
490
- {
491
- "epoch": 540.0,
492
- "learning_rate": 2e-05,
493
- "loss": 0.0035,
494
- "step": 810
495
- },
496
- {
497
- "epoch": 546.67,
498
- "learning_rate": 2e-05,
499
- "loss": 0.0034,
500
- "step": 820
501
- },
502
- {
503
- "epoch": 553.33,
504
- "learning_rate": 2e-05,
505
- "loss": 0.0037,
506
- "step": 830
507
- },
508
- {
509
- "epoch": 560.0,
510
- "learning_rate": 2e-05,
511
- "loss": 0.0034,
512
- "step": 840
513
- },
514
- {
515
- "epoch": 566.67,
516
- "learning_rate": 2e-05,
517
- "loss": 0.0034,
518
- "step": 850
519
- },
520
- {
521
- "epoch": 573.33,
522
- "learning_rate": 2e-05,
523
- "loss": 0.0034,
524
- "step": 860
525
- },
526
- {
527
- "epoch": 580.0,
528
- "learning_rate": 2e-05,
529
- "loss": 0.0033,
530
- "step": 870
531
- },
532
- {
533
- "epoch": 586.67,
534
- "learning_rate": 2e-05,
535
- "loss": 0.0035,
536
- "step": 880
537
- },
538
- {
539
- "epoch": 593.33,
540
- "learning_rate": 2e-05,
541
- "loss": 0.0033,
542
- "step": 890
543
- },
544
- {
545
- "epoch": 600.0,
546
- "learning_rate": 2e-05,
547
- "loss": 0.0035,
548
- "step": 900
549
- },
550
- {
551
- "epoch": 606.67,
552
- "learning_rate": 2e-05,
553
- "loss": 0.0035,
554
- "step": 910
555
- },
556
- {
557
- "epoch": 613.33,
558
- "learning_rate": 2e-05,
559
- "loss": 0.0034,
560
- "step": 920
561
- },
562
- {
563
- "epoch": 620.0,
564
- "learning_rate": 2e-05,
565
- "loss": 0.0033,
566
- "step": 930
567
- },
568
- {
569
- "epoch": 626.67,
570
- "learning_rate": 2e-05,
571
- "loss": 0.0034,
572
- "step": 940
573
- },
574
- {
575
- "epoch": 633.33,
576
- "learning_rate": 2e-05,
577
- "loss": 0.0035,
578
- "step": 950
579
- },
580
- {
581
- "epoch": 640.0,
582
- "learning_rate": 2e-05,
583
- "loss": 0.0034,
584
- "step": 960
585
- },
586
- {
587
- "epoch": 646.67,
588
- "learning_rate": 2e-05,
589
- "loss": 0.0035,
590
- "step": 970
591
- },
592
- {
593
- "epoch": 653.33,
594
- "learning_rate": 2e-05,
595
- "loss": 0.0034,
596
- "step": 980
597
- },
598
- {
599
- "epoch": 660.0,
600
- "learning_rate": 2e-05,
601
- "loss": 0.0035,
602
- "step": 990
603
- },
604
- {
605
- "epoch": 666.67,
606
- "learning_rate": 2e-05,
607
- "loss": 0.0034,
608
- "step": 1000
609
- },
610
- {
611
- "epoch": 673.33,
612
- "learning_rate": 2e-05,
613
- "loss": 0.0035,
614
- "step": 1010
615
- },
616
- {
617
- "epoch": 680.0,
618
- "learning_rate": 2e-05,
619
- "loss": 0.0034,
620
- "step": 1020
621
- },
622
- {
623
- "epoch": 686.67,
624
- "learning_rate": 2e-05,
625
- "loss": 0.0034,
626
- "step": 1030
627
- },
628
- {
629
- "epoch": 693.33,
630
- "learning_rate": 2e-05,
631
- "loss": 0.0033,
632
- "step": 1040
633
- },
634
- {
635
- "epoch": 700.0,
636
- "learning_rate": 2e-05,
637
- "loss": 0.0033,
638
- "step": 1050
639
- },
640
- {
641
- "epoch": 706.67,
642
- "learning_rate": 2e-05,
643
- "loss": 0.0033,
644
- "step": 1060
645
- },
646
- {
647
- "epoch": 713.33,
648
- "learning_rate": 2e-05,
649
- "loss": 0.0035,
650
- "step": 1070
651
- },
652
- {
653
- "epoch": 720.0,
654
- "learning_rate": 2e-05,
655
- "loss": 0.0035,
656
- "step": 1080
657
- },
658
- {
659
- "epoch": 726.67,
660
- "learning_rate": 2e-05,
661
- "loss": 0.0035,
662
- "step": 1090
663
- },
664
- {
665
- "epoch": 733.33,
666
- "learning_rate": 2e-05,
667
- "loss": 0.0034,
668
- "step": 1100
669
- },
670
- {
671
- "epoch": 740.0,
672
- "learning_rate": 2e-05,
673
- "loss": 0.0034,
674
- "step": 1110
675
- },
676
- {
677
- "epoch": 746.67,
678
- "learning_rate": 2e-05,
679
- "loss": 0.0034,
680
- "step": 1120
681
- },
682
- {
683
- "epoch": 753.33,
684
- "learning_rate": 2e-05,
685
- "loss": 0.0035,
686
- "step": 1130
687
- },
688
- {
689
- "epoch": 760.0,
690
- "learning_rate": 2e-05,
691
- "loss": 0.0035,
692
- "step": 1140
693
- },
694
- {
695
- "epoch": 766.67,
696
- "learning_rate": 2e-05,
697
- "loss": 0.0039,
698
- "step": 1150
699
- },
700
- {
701
- "epoch": 773.33,
702
- "learning_rate": 2e-05,
703
- "loss": 0.0049,
704
- "step": 1160
705
- },
706
- {
707
- "epoch": 780.0,
708
- "learning_rate": 2e-05,
709
- "loss": 0.0049,
710
- "step": 1170
711
- },
712
- {
713
- "epoch": 786.67,
714
- "learning_rate": 2e-05,
715
- "loss": 0.0048,
716
- "step": 1180
717
- },
718
- {
719
- "epoch": 793.33,
720
- "learning_rate": 2e-05,
721
- "loss": 0.0048,
722
- "step": 1190
723
- },
724
- {
725
- "epoch": 800.0,
726
- "learning_rate": 2e-05,
727
- "loss": 0.0046,
728
- "step": 1200
729
- },
730
- {
731
- "epoch": 806.67,
732
- "learning_rate": 2e-05,
733
- "loss": 0.0041,
734
- "step": 1210
735
- },
736
- {
737
- "epoch": 813.33,
738
- "learning_rate": 2e-05,
739
- "loss": 0.0038,
740
- "step": 1220
741
- },
742
- {
743
- "epoch": 820.0,
744
- "learning_rate": 2e-05,
745
- "loss": 0.0043,
746
- "step": 1230
747
- },
748
- {
749
- "epoch": 826.67,
750
- "learning_rate": 2e-05,
751
- "loss": 0.0042,
752
- "step": 1240
753
- },
754
- {
755
- "epoch": 833.33,
756
- "learning_rate": 2e-05,
757
- "loss": 0.004,
758
- "step": 1250
759
- },
760
- {
761
- "epoch": 840.0,
762
- "learning_rate": 2e-05,
763
- "loss": 0.0037,
764
- "step": 1260
765
- },
766
- {
767
- "epoch": 846.67,
768
- "learning_rate": 2e-05,
769
- "loss": 0.0043,
770
- "step": 1270
771
- },
772
- {
773
- "epoch": 853.33,
774
- "learning_rate": 2e-05,
775
- "loss": 0.0037,
776
- "step": 1280
777
- },
778
- {
779
- "epoch": 860.0,
780
- "learning_rate": 2e-05,
781
- "loss": 0.004,
782
- "step": 1290
783
- },
784
- {
785
- "epoch": 866.67,
786
- "learning_rate": 2e-05,
787
- "loss": 0.004,
788
- "step": 1300
789
- },
790
- {
791
- "epoch": 873.33,
792
- "learning_rate": 2e-05,
793
- "loss": 0.0051,
794
- "step": 1310
795
- },
796
- {
797
- "epoch": 880.0,
798
- "learning_rate": 2e-05,
799
- "loss": 0.0127,
800
- "step": 1320
801
- },
802
- {
803
- "epoch": 886.67,
804
- "learning_rate": 2e-05,
805
- "loss": 0.0082,
806
- "step": 1330
807
- },
808
- {
809
- "epoch": 893.33,
810
- "learning_rate": 2e-05,
811
- "loss": 0.0193,
812
- "step": 1340
813
- },
814
- {
815
- "epoch": 900.0,
816
- "learning_rate": 2e-05,
817
- "loss": 0.0072,
818
- "step": 1350
819
- },
820
- {
821
- "epoch": 906.67,
822
- "learning_rate": 2e-05,
823
- "loss": 0.0055,
824
- "step": 1360
825
- },
826
- {
827
- "epoch": 913.33,
828
- "learning_rate": 2e-05,
829
- "loss": 0.0052,
830
- "step": 1370
831
- },
832
- {
833
- "epoch": 920.0,
834
- "learning_rate": 2e-05,
835
- "loss": 0.0045,
836
- "step": 1380
837
- },
838
- {
839
- "epoch": 926.67,
840
- "learning_rate": 2e-05,
841
- "loss": 0.0042,
842
- "step": 1390
843
- },
844
- {
845
- "epoch": 933.33,
846
- "learning_rate": 2e-05,
847
- "loss": 0.0042,
848
- "step": 1400
849
- },
850
- {
851
- "epoch": 940.0,
852
- "learning_rate": 2e-05,
853
- "loss": 0.0037,
854
- "step": 1410
855
- },
856
- {
857
- "epoch": 946.67,
858
- "learning_rate": 2e-05,
859
- "loss": 0.0037,
860
- "step": 1420
861
- },
862
- {
863
- "epoch": 953.33,
864
- "learning_rate": 2e-05,
865
- "loss": 0.0038,
866
- "step": 1430
867
- },
868
- {
869
- "epoch": 960.0,
870
- "learning_rate": 2e-05,
871
- "loss": 0.0036,
872
- "step": 1440
873
- },
874
- {
875
- "epoch": 966.67,
876
- "learning_rate": 2e-05,
877
- "loss": 0.0037,
878
- "step": 1450
879
- },
880
- {
881
- "epoch": 973.33,
882
- "learning_rate": 2e-05,
883
- "loss": 0.0035,
884
- "step": 1460
885
- },
886
- {
887
- "epoch": 980.0,
888
- "learning_rate": 2e-05,
889
- "loss": 0.0037,
890
- "step": 1470
891
- },
892
- {
893
- "epoch": 986.67,
894
- "learning_rate": 2e-05,
895
- "loss": 0.0035,
896
- "step": 1480
897
- },
898
- {
899
- "epoch": 993.33,
900
- "learning_rate": 2e-05,
901
- "loss": 0.0034,
902
- "step": 1490
903
- },
904
- {
905
- "epoch": 1000.0,
906
- "learning_rate": 2e-05,
907
- "loss": 0.0035,
908
- "step": 1500
909
- },
910
- {
911
- "epoch": 1006.67,
912
- "learning_rate": 2e-05,
913
- "loss": 0.0035,
914
- "step": 1510
915
- },
916
- {
917
- "epoch": 1013.33,
918
- "learning_rate": 2e-05,
919
- "loss": 0.0035,
920
- "step": 1520
921
- },
922
- {
923
- "epoch": 1020.0,
924
- "learning_rate": 2e-05,
925
- "loss": 0.0034,
926
- "step": 1530
927
- },
928
- {
929
- "epoch": 1026.67,
930
- "learning_rate": 2e-05,
931
- "loss": 0.0035,
932
- "step": 1540
933
- },
934
- {
935
- "epoch": 1033.33,
936
- "learning_rate": 2e-05,
937
- "loss": 0.0034,
938
- "step": 1550
939
- },
940
- {
941
- "epoch": 1040.0,
942
- "learning_rate": 2e-05,
943
- "loss": 0.0034,
944
- "step": 1560
945
- },
946
- {
947
- "epoch": 1046.67,
948
- "learning_rate": 2e-05,
949
- "loss": 0.0032,
950
- "step": 1570
951
- },
952
- {
953
- "epoch": 1053.33,
954
- "learning_rate": 2e-05,
955
- "loss": 0.0032,
956
- "step": 1580
957
- },
958
- {
959
- "epoch": 1060.0,
960
- "learning_rate": 2e-05,
961
- "loss": 0.0034,
962
- "step": 1590
963
- },
964
- {
965
- "epoch": 1066.67,
966
- "learning_rate": 2e-05,
967
- "loss": 0.0034,
968
- "step": 1600
969
- },
970
- {
971
- "epoch": 1073.33,
972
- "learning_rate": 2e-05,
973
- "loss": 0.0033,
974
- "step": 1610
975
- },
976
- {
977
- "epoch": 1080.0,
978
- "learning_rate": 2e-05,
979
- "loss": 0.0033,
980
- "step": 1620
981
- },
982
- {
983
- "epoch": 1086.67,
984
- "learning_rate": 2e-05,
985
- "loss": 0.0034,
986
- "step": 1630
987
- },
988
- {
989
- "epoch": 1093.33,
990
- "learning_rate": 2e-05,
991
- "loss": 0.0032,
992
- "step": 1640
993
- },
994
- {
995
- "epoch": 1100.0,
996
- "learning_rate": 2e-05,
997
- "loss": 0.0032,
998
- "step": 1650
999
- },
1000
- {
1001
- "epoch": 1106.67,
1002
- "learning_rate": 2e-05,
1003
- "loss": 0.0032,
1004
- "step": 1660
1005
- },
1006
- {
1007
- "epoch": 1113.33,
1008
- "learning_rate": 2e-05,
1009
- "loss": 0.0033,
1010
- "step": 1670
1011
- },
1012
- {
1013
- "epoch": 1120.0,
1014
- "learning_rate": 2e-05,
1015
- "loss": 0.0033,
1016
- "step": 1680
1017
- },
1018
- {
1019
- "epoch": 1126.67,
1020
- "learning_rate": 2e-05,
1021
- "loss": 0.0032,
1022
- "step": 1690
1023
- },
1024
- {
1025
- "epoch": 1133.33,
1026
- "learning_rate": 2e-05,
1027
- "loss": 0.0032,
1028
- "step": 1700
1029
- },
1030
- {
1031
- "epoch": 1140.0,
1032
- "learning_rate": 2e-05,
1033
- "loss": 0.0032,
1034
- "step": 1710
1035
- },
1036
- {
1037
- "epoch": 1146.67,
1038
- "learning_rate": 2e-05,
1039
- "loss": 0.0033,
1040
- "step": 1720
1041
- },
1042
- {
1043
- "epoch": 1153.33,
1044
- "learning_rate": 2e-05,
1045
- "loss": 0.0034,
1046
- "step": 1730
1047
- },
1048
- {
1049
- "epoch": 1160.0,
1050
- "learning_rate": 2e-05,
1051
- "loss": 0.0034,
1052
- "step": 1740
1053
- },
1054
- {
1055
- "epoch": 1166.67,
1056
- "learning_rate": 2e-05,
1057
- "loss": 0.0032,
1058
- "step": 1750
1059
- },
1060
- {
1061
- "epoch": 1173.33,
1062
- "learning_rate": 2e-05,
1063
- "loss": 0.0033,
1064
- "step": 1760
1065
- },
1066
- {
1067
- "epoch": 1180.0,
1068
- "learning_rate": 2e-05,
1069
- "loss": 0.0032,
1070
- "step": 1770
1071
- },
1072
- {
1073
- "epoch": 1186.67,
1074
- "learning_rate": 2e-05,
1075
- "loss": 0.0032,
1076
- "step": 1780
1077
- },
1078
- {
1079
- "epoch": 1193.33,
1080
- "learning_rate": 2e-05,
1081
- "loss": 0.0032,
1082
- "step": 1790
1083
- },
1084
- {
1085
- "epoch": 1200.0,
1086
- "learning_rate": 2e-05,
1087
- "loss": 0.0032,
1088
- "step": 1800
1089
- },
1090
- {
1091
- "epoch": 1206.67,
1092
- "learning_rate": 2e-05,
1093
- "loss": 0.0032,
1094
- "step": 1810
1095
- },
1096
- {
1097
- "epoch": 1213.33,
1098
- "learning_rate": 2e-05,
1099
- "loss": 0.0043,
1100
- "step": 1820
1101
- },
1102
- {
1103
- "epoch": 1220.0,
1104
- "learning_rate": 2e-05,
1105
- "loss": 0.0034,
1106
- "step": 1830
1107
- },
1108
- {
1109
- "epoch": 1226.67,
1110
- "learning_rate": 2e-05,
1111
- "loss": 0.0032,
1112
- "step": 1840
1113
- },
1114
- {
1115
- "epoch": 1233.33,
1116
- "learning_rate": 2e-05,
1117
- "loss": 0.0034,
1118
- "step": 1850
1119
- },
1120
- {
1121
- "epoch": 1240.0,
1122
- "learning_rate": 2e-05,
1123
- "loss": 0.0063,
1124
- "step": 1860
1125
- },
1126
- {
1127
- "epoch": 1246.67,
1128
- "learning_rate": 2e-05,
1129
- "loss": 0.0032,
1130
- "step": 1870
1131
- },
1132
- {
1133
- "epoch": 1253.33,
1134
- "learning_rate": 2e-05,
1135
- "loss": 0.0061,
1136
- "step": 1880
1137
- },
1138
- {
1139
- "epoch": 1260.0,
1140
- "learning_rate": 2e-05,
1141
- "loss": 0.0071,
1142
- "step": 1890
1143
- },
1144
- {
1145
- "epoch": 1266.67,
1146
- "learning_rate": 2e-05,
1147
- "loss": 0.0055,
1148
- "step": 1900
1149
- },
1150
- {
1151
- "epoch": 1273.33,
1152
- "learning_rate": 2e-05,
1153
- "loss": 0.0053,
1154
- "step": 1910
1155
- },
1156
- {
1157
- "epoch": 1280.0,
1158
- "learning_rate": 2e-05,
1159
- "loss": 0.0043,
1160
- "step": 1920
1161
- },
1162
- {
1163
- "epoch": 1286.67,
1164
- "learning_rate": 2e-05,
1165
- "loss": 0.0042,
1166
- "step": 1930
1167
- },
1168
- {
1169
- "epoch": 1293.33,
1170
- "learning_rate": 2e-05,
1171
- "loss": 0.0039,
1172
- "step": 1940
1173
- },
1174
- {
1175
- "epoch": 1300.0,
1176
- "learning_rate": 2e-05,
1177
- "loss": 0.004,
1178
- "step": 1950
1179
- },
1180
- {
1181
- "epoch": 1306.67,
1182
- "learning_rate": 2e-05,
1183
- "loss": 0.0038,
1184
- "step": 1960
1185
- },
1186
- {
1187
- "epoch": 1313.33,
1188
- "learning_rate": 2e-05,
1189
- "loss": 0.004,
1190
- "step": 1970
1191
- },
1192
- {
1193
- "epoch": 1320.0,
1194
- "learning_rate": 2e-05,
1195
- "loss": 0.0038,
1196
- "step": 1980
1197
- },
1198
- {
1199
- "epoch": 1326.67,
1200
- "learning_rate": 2e-05,
1201
- "loss": 0.0036,
1202
- "step": 1990
1203
- },
1204
- {
1205
- "epoch": 1333.33,
1206
- "learning_rate": 2e-05,
1207
- "loss": 0.0036,
1208
- "step": 2000
1209
- },
1210
- {
1211
- "epoch": 1334.0,
1212
- "step": 2001,
1213
- "total_flos": 841939628851200.0,
1214
- "train_loss": 0.02562973479824564,
1215
- "train_runtime": 85372.5158,
1216
- "train_samples_per_second": 3.0,
1217
  "train_steps_per_second": 0.023
1218
  }
1219
  ],
1220
- "max_steps": 2001,
1221
- "num_train_epochs": 2001,
1222
- "total_flos": 841939628851200.0,
1223
  "trial_name": null,
1224
  "trial_params": null
1225
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 333.3333333333333,
5
+ "global_step": 500,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
9
  "log_history": [
10
  {
11
  "epoch": 6.67,
12
+ "learning_rate": 1.5357481488588927e-05,
13
+ "loss": 1.9783,
14
  "step": 10
15
  },
16
  {
17
  "epoch": 13.33,
18
+ "learning_rate": 2e-05,
19
+ "loss": 1.0744,
20
  "step": 20
21
  },
22
  {
23
  "epoch": 20.0,
24
+ "learning_rate": 2e-05,
25
+ "loss": 0.2335,
26
  "step": 30
27
  },
28
  {
29
  "epoch": 26.67,
30
+ "learning_rate": 2e-05,
31
+ "loss": 0.0723,
32
  "step": 40
33
  },
34
  {
35
  "epoch": 33.33,
36
+ "learning_rate": 2e-05,
37
+ "loss": 0.046,
38
  "step": 50
39
  },
40
  {
41
  "epoch": 40.0,
42
+ "learning_rate": 2e-05,
43
+ "loss": 0.0321,
44
  "step": 60
45
  },
46
  {
47
  "epoch": 46.67,
48
  "learning_rate": 2e-05,
49
+ "loss": 0.0261,
50
  "step": 70
51
  },
52
  {
53
  "epoch": 53.33,
54
  "learning_rate": 2e-05,
55
+ "loss": 0.0189,
56
  "step": 80
57
  },
58
  {
59
  "epoch": 60.0,
60
  "learning_rate": 2e-05,
61
+ "loss": 0.0158,
62
  "step": 90
63
  },
64
  {
65
  "epoch": 66.67,
66
  "learning_rate": 2e-05,
67
+ "loss": 0.0124,
68
  "step": 100
69
  },
70
  {
71
  "epoch": 73.33,
72
  "learning_rate": 2e-05,
73
+ "loss": 0.0106,
74
  "step": 110
75
  },
76
  {
77
  "epoch": 80.0,
78
  "learning_rate": 2e-05,
79
+ "loss": 0.0091,
80
  "step": 120
81
  },
82
  {
83
  "epoch": 86.67,
84
  "learning_rate": 2e-05,
85
+ "loss": 0.0083,
86
  "step": 130
87
  },
88
  {
89
  "epoch": 93.33,
90
  "learning_rate": 2e-05,
91
+ "loss": 0.0071,
92
  "step": 140
93
  },
94
  {
 
100
  {
101
  "epoch": 106.67,
102
  "learning_rate": 2e-05,
103
+ "loss": 0.0063,
104
  "step": 160
105
  },
106
  {
 
112
  {
113
  "epoch": 120.0,
114
  "learning_rate": 2e-05,
115
+ "loss": 0.0059,
116
  "step": 180
117
  },
118
  {
 
142
  {
143
  "epoch": 153.33,
144
  "learning_rate": 2e-05,
145
+ "loss": 0.0045,
146
  "step": 230
147
  },
148
  {
149
  "epoch": 160.0,
150
  "learning_rate": 2e-05,
151
+ "loss": 0.0047,
152
  "step": 240
153
  },
154
  {
 
160
  {
161
  "epoch": 173.33,
162
  "learning_rate": 2e-05,
163
+ "loss": 0.0046,
164
  "step": 260
165
  },
166
  {
167
  "epoch": 180.0,
168
  "learning_rate": 2e-05,
169
+ "loss": 0.0042,
170
  "step": 270
171
  },
172
  {
173
  "epoch": 186.67,
174
  "learning_rate": 2e-05,
175
+ "loss": 0.0043,
176
  "step": 280
177
  },
178
  {
179
  "epoch": 193.33,
180
  "learning_rate": 2e-05,
181
+ "loss": 0.0043,
182
  "step": 290
183
  },
184
  {
185
  "epoch": 200.0,
186
  "learning_rate": 2e-05,
187
+ "loss": 0.0041,
188
  "step": 300
189
  },
190
  {
 
196
  {
197
  "epoch": 213.33,
198
  "learning_rate": 2e-05,
199
+ "loss": 0.0043,
200
  "step": 320
201
  },
202
  {
203
  "epoch": 220.0,
204
  "learning_rate": 2e-05,
205
+ "loss": 0.004,
206
  "step": 330
207
  },
208
  {
209
  "epoch": 226.67,
210
  "learning_rate": 2e-05,
211
+ "loss": 0.0041,
212
  "step": 340
213
  },
214
  {
 
220
  {
221
  "epoch": 240.0,
222
  "learning_rate": 2e-05,
223
+ "loss": 0.004,
224
  "step": 360
225
  },
226
  {
227
  "epoch": 246.67,
228
  "learning_rate": 2e-05,
229
+ "loss": 0.0042,
230
  "step": 370
231
  },
232
  {
233
  "epoch": 253.33,
234
  "learning_rate": 2e-05,
235
+ "loss": 0.0041,
236
  "step": 380
237
  },
238
  {
239
  "epoch": 260.0,
240
  "learning_rate": 2e-05,
241
+ "loss": 0.0038,
242
  "step": 390
243
  },
244
  {
245
  "epoch": 266.67,
246
  "learning_rate": 2e-05,
247
+ "loss": 0.0041,
248
  "step": 400
249
  },
250
  {
 
268
  {
269
  "epoch": 293.33,
270
  "learning_rate": 2e-05,
271
+ "loss": 0.0037,
272
  "step": 440
273
  },
274
  {
275
  "epoch": 300.0,
276
  "learning_rate": 2e-05,
277
+ "loss": 0.0038,
278
  "step": 450
279
  },
280
  {
281
  "epoch": 306.67,
282
  "learning_rate": 2e-05,
283
+ "loss": 0.0037,
284
  "step": 460
285
  },
286
  {
287
  "epoch": 313.33,
288
  "learning_rate": 2e-05,
289
+ "loss": 0.0039,
290
  "step": 470
291
  },
292
  {
293
  "epoch": 320.0,
294
  "learning_rate": 2e-05,
295
+ "loss": 0.0037,
296
  "step": 480
297
  },
298
  {
299
  "epoch": 326.67,
300
  "learning_rate": 2e-05,
301
+ "loss": 0.0036,
302
  "step": 490
303
  },
304
  {
 
308
  "step": 500
309
  },
310
  {
311
+ "epoch": 333.33,
312
+ "step": 500,
313
+ "total_flos": 210359990353920.0,
314
+ "train_loss": 0.07407628475874663,
315
+ "train_runtime": 21320.65,
316
+ "train_samples_per_second": 3.002,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317
  "train_steps_per_second": 0.023
318
  }
319
  ],
320
+ "max_steps": 500,
321
+ "num_train_epochs": 500,
322
+ "total_flos": 210359990353920.0,
323
  "trial_name": null,
324
  "trial_params": null
325
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a6f37d82e0ffda307f018bd024cb5411a96a38d758bc5f0c3bd5e051be3c5dc1
3
  size 5563
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:567801fbca456df7279774860aae6a5f038bd64e03f891b6ae2b93a59c8c417b
3
  size 5563