fats-fme commited on
Commit
2b0a744
·
verified ·
1 Parent(s): d75d312

Training in progress, step 30, checkpoint

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ab9044e6d2214d0e9b55b01f498c588625efba45f76fa710ea1554395b794d6d
3
  size 239452242
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af9e788e546f18a3b27058939ea4434cdddac18c862a33dfd4b9a0f72f9921d8
3
  size 239452242
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:294def80c0e86e2627232895f7cd12be51fbafc300e62762cd2104710191e91f
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9beca6c32763a3cc8051aec28baf91f153cdbe56a51f49590523253e9deac3c0
3
  size 14512
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c95ca955d9a9a4fe17edeb37a62e75a1e26aa0a8da062df4f479bc6cf74ba889
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1384fee391a6ad2405d499f49efeb5a52b5f8c74d45b3ac0747ecb863c12d32
3
  size 14512
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8bb840dbc65c27ef8840855786e35bfea033749e5d7571cf601f6645aa29cc1e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fbc936ac0284bc0c88ce620e190b6b6984693d58131c42013c320d4b2c3a45e
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.24896265560165975,
5
  "eval_steps": 15,
6
- "global_step": 15,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -128,6 +128,119 @@
128
  "eval_samples_per_second": 6.02,
129
  "eval_steps_per_second": 1.535,
130
  "step": 15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  }
132
  ],
133
  "logging_steps": 1,
@@ -147,7 +260,7 @@
147
  "attributes": {}
148
  }
149
  },
150
- "total_flos": 4.42658589769728e+16,
151
  "train_batch_size": 2,
152
  "trial_name": null,
153
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.4979253112033195,
5
  "eval_steps": 15,
6
+ "global_step": 30,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
128
  "eval_samples_per_second": 6.02,
129
  "eval_steps_per_second": 1.535,
130
  "step": 15
131
+ },
132
+ {
133
+ "epoch": 0.26556016597510373,
134
+ "grad_norm": NaN,
135
+ "learning_rate": 0.00016,
136
+ "loss": 0.0,
137
+ "step": 16
138
+ },
139
+ {
140
+ "epoch": 0.2821576763485477,
141
+ "grad_norm": NaN,
142
+ "learning_rate": 0.00017,
143
+ "loss": 0.0,
144
+ "step": 17
145
+ },
146
+ {
147
+ "epoch": 0.2987551867219917,
148
+ "grad_norm": NaN,
149
+ "learning_rate": 0.00018,
150
+ "loss": 0.0,
151
+ "step": 18
152
+ },
153
+ {
154
+ "epoch": 0.3153526970954357,
155
+ "grad_norm": NaN,
156
+ "learning_rate": 0.00019,
157
+ "loss": 0.0,
158
+ "step": 19
159
+ },
160
+ {
161
+ "epoch": 0.33195020746887965,
162
+ "grad_norm": NaN,
163
+ "learning_rate": 0.0002,
164
+ "loss": 0.0,
165
+ "step": 20
166
+ },
167
+ {
168
+ "epoch": 0.34854771784232363,
169
+ "grad_norm": NaN,
170
+ "learning_rate": 0.0001996917333733128,
171
+ "loss": 0.0,
172
+ "step": 21
173
+ },
174
+ {
175
+ "epoch": 0.3651452282157676,
176
+ "grad_norm": NaN,
177
+ "learning_rate": 0.00019876883405951377,
178
+ "loss": 0.0,
179
+ "step": 22
180
+ },
181
+ {
182
+ "epoch": 0.3817427385892116,
183
+ "grad_norm": NaN,
184
+ "learning_rate": 0.00019723699203976766,
185
+ "loss": 0.0,
186
+ "step": 23
187
+ },
188
+ {
189
+ "epoch": 0.3983402489626556,
190
+ "grad_norm": NaN,
191
+ "learning_rate": 0.00019510565162951537,
192
+ "loss": 0.0,
193
+ "step": 24
194
+ },
195
+ {
196
+ "epoch": 0.4149377593360996,
197
+ "grad_norm": NaN,
198
+ "learning_rate": 0.0001923879532511287,
199
+ "loss": 0.0,
200
+ "step": 25
201
+ },
202
+ {
203
+ "epoch": 0.4315352697095436,
204
+ "grad_norm": NaN,
205
+ "learning_rate": 0.0001891006524188368,
206
+ "loss": 0.0,
207
+ "step": 26
208
+ },
209
+ {
210
+ "epoch": 0.44813278008298757,
211
+ "grad_norm": NaN,
212
+ "learning_rate": 0.00018526401643540922,
213
+ "loss": 0.0,
214
+ "step": 27
215
+ },
216
+ {
217
+ "epoch": 0.46473029045643155,
218
+ "grad_norm": NaN,
219
+ "learning_rate": 0.00018090169943749476,
220
+ "loss": 0.0,
221
+ "step": 28
222
+ },
223
+ {
224
+ "epoch": 0.48132780082987553,
225
+ "grad_norm": NaN,
226
+ "learning_rate": 0.0001760405965600031,
227
+ "loss": 0.0,
228
+ "step": 29
229
+ },
230
+ {
231
+ "epoch": 0.4979253112033195,
232
+ "grad_norm": NaN,
233
+ "learning_rate": 0.00017071067811865476,
234
+ "loss": 0.0,
235
+ "step": 30
236
+ },
237
+ {
238
+ "epoch": 0.4979253112033195,
239
+ "eval_loss": NaN,
240
+ "eval_runtime": 16.7657,
241
+ "eval_samples_per_second": 6.084,
242
+ "eval_steps_per_second": 1.551,
243
+ "step": 30
244
  }
245
  ],
246
  "logging_steps": 1,
 
260
  "attributes": {}
261
  }
262
  },
263
+ "total_flos": 8.85317179539456e+16,
264
  "train_batch_size": 2,
265
  "trial_name": null,
266
  "trial_params": null