dada22231 commited on
Commit
744a03f
·
verified ·
1 Parent(s): 2cbb285

Training in progress, step 50, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:651cb403146e3bb7ba0b577227371161a04dcf9dbaf7c61180460dced926a62a
3
  size 166182480
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc884815c126a9bcc057e85b4609aafc0b566add0cf47a55eadbfb55a727803d
3
  size 166182480
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:84bcab4eed33d004e8d1d06f0e18eff5e7e9fcff8a9d3c309d28b8bc542185e5
3
  size 332574358
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c52ae7a5fa365f7f694460996c62193712df4fdfad12b35db4105904d2b000d1
3
  size 332574358
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:472a802910da1680bf70feab18c4e65613f96d757c027a2ae95f02fb89274fd2
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05803afa19cda5b6abb99394d678e810ac5426335c13e6fdaacfe1b4d370527b
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e8ed6bb1dbbfd44566f2fbe9adc5637e0ba94bef602bf1d3210d3b442a180974
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6bcfd288976efa04b300ce8dfe3482dedae9984ba25cbd81fd57a72e62a6f94
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4478fe01b2cdb72a2a7f53a49afbaa4b13af4c110a1a8fa020796beb83da19f9
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ba0d85e29096cba3540854efbe2eb24b0dcb354bd598c9b0442f1a068e97147
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bfab690b5170c855d7cf054d8dc90b5b048878528e4a1eb1f923a3f7e795fa72
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bce6c0f85e52f47ca30e0c92d68100127220279776878577c4b7dbbb15af1305
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0f5c305e4a92be904895c02f0f0a1da666e6e7555f6043a8f089990c87f4ce88
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df19ed1a9610a5422497073697cbf4575f80de47fbb46ef0cdd2779386b031fa
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.6187465190887451,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-25",
4
- "epoch": 0.012340346763744061,
5
  "eval_steps": 25,
6
- "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -198,6 +198,189 @@
198
  "eval_samples_per_second": 23.03,
199
  "eval_steps_per_second": 5.988,
200
  "step": 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  }
202
  ],
203
  "logging_steps": 1,
@@ -226,7 +409,7 @@
226
  "attributes": {}
227
  }
228
  },
229
- "total_flos": 8.1240447254528e+16,
230
  "train_batch_size": 1,
231
  "trial_name": null,
232
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.558501124382019,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
+ "epoch": 0.024680693527488123,
5
  "eval_steps": 25,
6
+ "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
198
  "eval_samples_per_second": 23.03,
199
  "eval_steps_per_second": 5.988,
200
  "step": 25
201
+ },
202
+ {
203
+ "epoch": 0.012833960634293824,
204
+ "grad_norm": 0.9111001491546631,
205
+ "learning_rate": 8.681980515339464e-05,
206
+ "loss": 1.643,
207
+ "step": 26
208
+ },
209
+ {
210
+ "epoch": 0.013327574504843585,
211
+ "grad_norm": 0.869102418422699,
212
+ "learning_rate": 8.571489144483944e-05,
213
+ "loss": 1.6734,
214
+ "step": 27
215
+ },
216
+ {
217
+ "epoch": 0.013821188375393348,
218
+ "grad_norm": 0.842080295085907,
219
+ "learning_rate": 8.457416554680877e-05,
220
+ "loss": 1.476,
221
+ "step": 28
222
+ },
223
+ {
224
+ "epoch": 0.014314802245943111,
225
+ "grad_norm": 0.835245668888092,
226
+ "learning_rate": 8.339895749467238e-05,
227
+ "loss": 1.6803,
228
+ "step": 29
229
+ },
230
+ {
231
+ "epoch": 0.014808416116492874,
232
+ "grad_norm": 0.7700492739677429,
233
+ "learning_rate": 8.219063752844926e-05,
234
+ "loss": 1.4141,
235
+ "step": 30
236
+ },
237
+ {
238
+ "epoch": 0.015302029987042635,
239
+ "grad_norm": 0.8414303064346313,
240
+ "learning_rate": 8.095061449516903e-05,
241
+ "loss": 1.5424,
242
+ "step": 31
243
+ },
244
+ {
245
+ "epoch": 0.015795643857592398,
246
+ "grad_norm": 0.8434069752693176,
247
+ "learning_rate": 7.968033420621935e-05,
248
+ "loss": 1.5554,
249
+ "step": 32
250
+ },
251
+ {
252
+ "epoch": 0.01628925772814216,
253
+ "grad_norm": 0.8102946877479553,
254
+ "learning_rate": 7.838127775159452e-05,
255
+ "loss": 1.5322,
256
+ "step": 33
257
+ },
258
+ {
259
+ "epoch": 0.016782871598691924,
260
+ "grad_norm": 0.777534544467926,
261
+ "learning_rate": 7.705495977301078e-05,
262
+ "loss": 1.5407,
263
+ "step": 34
264
+ },
265
+ {
266
+ "epoch": 0.017276485469241685,
267
+ "grad_norm": 0.8392916321754456,
268
+ "learning_rate": 7.570292669790186e-05,
269
+ "loss": 1.5694,
270
+ "step": 35
271
+ },
272
+ {
273
+ "epoch": 0.01777009933979145,
274
+ "grad_norm": 0.8198193907737732,
275
+ "learning_rate": 7.43267549363537e-05,
276
+ "loss": 1.5633,
277
+ "step": 36
278
+ },
279
+ {
280
+ "epoch": 0.01826371321034121,
281
+ "grad_norm": 0.8337418437004089,
282
+ "learning_rate": 7.292804904308087e-05,
283
+ "loss": 1.563,
284
+ "step": 37
285
+ },
286
+ {
287
+ "epoch": 0.01875732708089097,
288
+ "grad_norm": 0.7560647130012512,
289
+ "learning_rate": 7.150843984658754e-05,
290
+ "loss": 1.4525,
291
+ "step": 38
292
+ },
293
+ {
294
+ "epoch": 0.019250940951440736,
295
+ "grad_norm": 0.7717271447181702,
296
+ "learning_rate": 7.006958254769438e-05,
297
+ "loss": 1.5498,
298
+ "step": 39
299
+ },
300
+ {
301
+ "epoch": 0.019744554821990497,
302
+ "grad_norm": 0.7649319767951965,
303
+ "learning_rate": 6.861315478964841e-05,
304
+ "loss": 1.529,
305
+ "step": 40
306
+ },
307
+ {
308
+ "epoch": 0.020238168692540262,
309
+ "grad_norm": 0.747653067111969,
310
+ "learning_rate": 6.714085470206609e-05,
311
+ "loss": 1.388,
312
+ "step": 41
313
+ },
314
+ {
315
+ "epoch": 0.020731782563090023,
316
+ "grad_norm": 0.7593997120857239,
317
+ "learning_rate": 6.56543989209901e-05,
318
+ "loss": 1.4656,
319
+ "step": 42
320
+ },
321
+ {
322
+ "epoch": 0.021225396433639784,
323
+ "grad_norm": 0.7581989765167236,
324
+ "learning_rate": 6.415552058736854e-05,
325
+ "loss": 1.5301,
326
+ "step": 43
327
+ },
328
+ {
329
+ "epoch": 0.02171901030418955,
330
+ "grad_norm": 0.8096319437026978,
331
+ "learning_rate": 6.264596732629e-05,
332
+ "loss": 1.5115,
333
+ "step": 44
334
+ },
335
+ {
336
+ "epoch": 0.02221262417473931,
337
+ "grad_norm": 0.8088666200637817,
338
+ "learning_rate": 6.112749920933111e-05,
339
+ "loss": 1.5176,
340
+ "step": 45
341
+ },
342
+ {
343
+ "epoch": 0.02270623804528907,
344
+ "grad_norm": 0.7563244700431824,
345
+ "learning_rate": 5.960188670239154e-05,
346
+ "loss": 1.3766,
347
+ "step": 46
348
+ },
349
+ {
350
+ "epoch": 0.023199851915838836,
351
+ "grad_norm": 0.8245486617088318,
352
+ "learning_rate": 5.80709086014102e-05,
353
+ "loss": 1.4407,
354
+ "step": 47
355
+ },
356
+ {
357
+ "epoch": 0.023693465786388597,
358
+ "grad_norm": 0.8189464807510376,
359
+ "learning_rate": 5.653634995836856e-05,
360
+ "loss": 1.4207,
361
+ "step": 48
362
+ },
363
+ {
364
+ "epoch": 0.02418707965693836,
365
+ "grad_norm": 0.86505126953125,
366
+ "learning_rate": 5.500000000000001e-05,
367
+ "loss": 1.5054,
368
+ "step": 49
369
+ },
370
+ {
371
+ "epoch": 0.024680693527488123,
372
+ "grad_norm": 0.9769317507743835,
373
+ "learning_rate": 5.346365004163145e-05,
374
+ "loss": 1.505,
375
+ "step": 50
376
+ },
377
+ {
378
+ "epoch": 0.024680693527488123,
379
+ "eval_loss": 1.558501124382019,
380
+ "eval_runtime": 2.151,
381
+ "eval_samples_per_second": 23.245,
382
+ "eval_steps_per_second": 6.044,
383
+ "step": 50
384
  }
385
  ],
386
  "logging_steps": 1,
 
409
  "attributes": {}
410
  }
411
  },
412
+ "total_flos": 1.62480894509056e+17,
413
  "train_batch_size": 1,
414
  "trial_name": null,
415
  "trial_params": null