fats-fme commited on
Commit
b003c60
·
verified ·
1 Parent(s): 80c8a62

Training in progress, step 60, checkpoint

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c3b0162c125dace0461332123e8c7d602462e84e158ad2d3f071bbc125b34b8d
3
  size 239452242
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:132c6ad2f0e6ca9cbafb4299d39bae52bde8b115fc570cfa83951b10df31dc73
3
  size 239452242
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:56e0b4692f0d79aa027250daa798b9b99aa2daab99fce81150537a9623556468
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98f27784cc712643b6f32c7b360c776b909647ee0e557d8667bfc0fed90886b9
3
  size 14512
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a7c19a532ddf743881461eea886de51a0451550ba062c994babb62565733b22d
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b4164ec2060cabb2b939eb68c0aa306cca823c2d53027e782ea1488cef57ea8
3
  size 14512
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7e94f4caf902c120d48372da3bb1cf8d147f3ddef7262ea875dbd118eb698ece
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ee9289e194488fcfb7c8a56f41078ad0cbfc6c87530ed9f3e7891e3005bdd0b
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.7468879668049793,
5
  "eval_steps": 15,
6
- "global_step": 45,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -354,6 +354,119 @@
354
  "eval_samples_per_second": 6.039,
355
  "eval_steps_per_second": 1.539,
356
  "step": 45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
357
  }
358
  ],
359
  "logging_steps": 1,
@@ -368,12 +481,12 @@
368
  "should_evaluate": false,
369
  "should_log": false,
370
  "should_save": true,
371
- "should_training_stop": false
372
  },
373
  "attributes": {}
374
  }
375
  },
376
- "total_flos": 1.327975769309184e+17,
377
  "train_batch_size": 2,
378
  "trial_name": null,
379
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.995850622406639,
5
  "eval_steps": 15,
6
+ "global_step": 60,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
354
  "eval_samples_per_second": 6.039,
355
  "eval_steps_per_second": 1.539,
356
  "step": 45
357
+ },
358
+ {
359
+ "epoch": 0.7634854771784232,
360
+ "grad_norm": NaN,
361
+ "learning_rate": 5.4600950026045326e-05,
362
+ "loss": 0.0,
363
+ "step": 46
364
+ },
365
+ {
366
+ "epoch": 0.7800829875518672,
367
+ "grad_norm": NaN,
368
+ "learning_rate": 4.7750143528405126e-05,
369
+ "loss": 0.0,
370
+ "step": 47
371
+ },
372
+ {
373
+ "epoch": 0.7966804979253111,
374
+ "grad_norm": NaN,
375
+ "learning_rate": 4.12214747707527e-05,
376
+ "loss": 0.0,
377
+ "step": 48
378
+ },
379
+ {
380
+ "epoch": 0.8132780082987552,
381
+ "grad_norm": NaN,
382
+ "learning_rate": 3.5055195166981645e-05,
383
+ "loss": 0.0,
384
+ "step": 49
385
+ },
386
+ {
387
+ "epoch": 0.8298755186721992,
388
+ "grad_norm": NaN,
389
+ "learning_rate": 2.9289321881345254e-05,
390
+ "loss": 0.0,
391
+ "step": 50
392
+ },
393
+ {
394
+ "epoch": 0.8464730290456431,
395
+ "grad_norm": NaN,
396
+ "learning_rate": 2.3959403439996907e-05,
397
+ "loss": 0.0,
398
+ "step": 51
399
+ },
400
+ {
401
+ "epoch": 0.8630705394190872,
402
+ "grad_norm": NaN,
403
+ "learning_rate": 1.9098300562505266e-05,
404
+ "loss": 0.0,
405
+ "step": 52
406
+ },
407
+ {
408
+ "epoch": 0.8796680497925311,
409
+ "grad_norm": NaN,
410
+ "learning_rate": 1.4735983564590783e-05,
411
+ "loss": 0.0,
412
+ "step": 53
413
+ },
414
+ {
415
+ "epoch": 0.8962655601659751,
416
+ "grad_norm": NaN,
417
+ "learning_rate": 1.0899347581163221e-05,
418
+ "loss": 0.0,
419
+ "step": 54
420
+ },
421
+ {
422
+ "epoch": 0.9128630705394191,
423
+ "grad_norm": NaN,
424
+ "learning_rate": 7.612046748871327e-06,
425
+ "loss": 0.0,
426
+ "step": 55
427
+ },
428
+ {
429
+ "epoch": 0.9294605809128631,
430
+ "grad_norm": NaN,
431
+ "learning_rate": 4.8943483704846475e-06,
432
+ "loss": 0.0,
433
+ "step": 56
434
+ },
435
+ {
436
+ "epoch": 0.946058091286307,
437
+ "grad_norm": NaN,
438
+ "learning_rate": 2.7630079602323442e-06,
439
+ "loss": 0.0,
440
+ "step": 57
441
+ },
442
+ {
443
+ "epoch": 0.9626556016597511,
444
+ "grad_norm": NaN,
445
+ "learning_rate": 1.231165940486234e-06,
446
+ "loss": 0.0,
447
+ "step": 58
448
+ },
449
+ {
450
+ "epoch": 0.979253112033195,
451
+ "grad_norm": NaN,
452
+ "learning_rate": 3.0826662668720364e-07,
453
+ "loss": 0.0,
454
+ "step": 59
455
+ },
456
+ {
457
+ "epoch": 0.995850622406639,
458
+ "grad_norm": NaN,
459
+ "learning_rate": 0.0,
460
+ "loss": 0.0,
461
+ "step": 60
462
+ },
463
+ {
464
+ "epoch": 0.995850622406639,
465
+ "eval_loss": NaN,
466
+ "eval_runtime": 16.7744,
467
+ "eval_samples_per_second": 6.081,
468
+ "eval_steps_per_second": 1.55,
469
+ "step": 60
470
  }
471
  ],
472
  "logging_steps": 1,
 
481
  "should_evaluate": false,
482
  "should_log": false,
483
  "should_save": true,
484
+ "should_training_stop": true
485
  },
486
  "attributes": {}
487
  }
488
  },
489
+ "total_flos": 1.770634359078912e+17,
490
  "train_batch_size": 2,
491
  "trial_name": null,
492
  "trial_params": null