RoyJoy commited on
Commit
4c0b01e
·
verified ·
1 Parent(s): 5d48900

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6d09592663ebce3ca76edc6b1dc7c5b52a82e8b68c909135b3f5396017d4cf08
3
  size 78480072
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6ea0d357987c22e19d62bc693ad25f9f75e8c29705927966891984bcfa15006
3
  size 78480072
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:80691a5d1b917dce75b40a22b97196c5b35cc85d6a247be8b2b413b4a00a30bc
3
  size 157104826
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:360c92066fe24030b748d3e1a4a79cdad39167ce2d86a5cde6185f78d0be2afd
3
  size 157104826
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0f3d0420f4db62c415afed106815a86180664afafea1a46eb1a620929057d64e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72e4f40153d720af5a9bc2d43b6e8c0264d4a78ff7944cbfcd5eae1a40c99950
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3fb35492667ef61a31726db189dfe16538df914ba267bbdcb060e3f1ab4c1e82
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48e5a60fd5a260f9c83e80fb175afa680e039a9594b460e7b3c282c848321c17
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:32527abdcebc720c825f16a7f3420d7d7097ac042dd8a0e585710ea60c9802ab
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:375a1f39f77328881cc4dff15556f86b1cf1c19b312aabc309ef42062f9242cc
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:743a4ce5a39d89cf04aefa9ed018507ca517a94d5397b4693e6903bda54804c3
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0a8e7093cfc46d661e82fccdbf1eaa9abd9a2f1dda8b3d0a97ab63067e39050
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5ddc0483fb60dd989ea0520ff2e69158bc6dd74d83a5562802d3d9255cecbc12
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:910cff932a69f6abbc99df6b748a086af6efc7922d876a1390fb1cf725edb026
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.3951396942138672,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.8445500131960939,
5
  "eval_steps": 25,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -381,6 +381,372 @@
381
  "eval_samples_per_second": 72.139,
382
  "eval_steps_per_second": 18.756,
383
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384
  }
385
  ],
386
  "logging_steps": 1,
@@ -409,7 +775,7 @@
409
  "attributes": {}
410
  }
411
  },
412
- "total_flos": 3.148081105207296e+17,
413
  "train_batch_size": 1,
414
  "trial_name": null,
415
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.12606343626976013,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
+ "epoch": 1.6891000263921878,
5
  "eval_steps": 25,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
381
  "eval_samples_per_second": 72.139,
382
  "eval_steps_per_second": 18.756,
383
  "step": 50
384
+ },
385
+ {
386
+ "epoch": 0.8614410134600158,
387
+ "grad_norm": 11.373549461364746,
388
+ "learning_rate": 0.000172161483570762,
389
+ "loss": 0.8296,
390
+ "step": 51
391
+ },
392
+ {
393
+ "epoch": 0.8783320137239378,
394
+ "grad_norm": 8.694587707519531,
395
+ "learning_rate": 0.0001709553414463167,
396
+ "loss": 0.7572,
397
+ "step": 52
398
+ },
399
+ {
400
+ "epoch": 0.8952230139878596,
401
+ "grad_norm": 10.283465385437012,
402
+ "learning_rate": 0.00016972862587414592,
403
+ "loss": 0.575,
404
+ "step": 53
405
+ },
406
+ {
407
+ "epoch": 0.9121140142517815,
408
+ "grad_norm": 10.08366870880127,
409
+ "learning_rate": 0.0001684817508912824,
410
+ "loss": 0.3361,
411
+ "step": 54
412
+ },
413
+ {
414
+ "epoch": 0.9290050145157034,
415
+ "grad_norm": 4.075512886047363,
416
+ "learning_rate": 0.00016721513733889716,
417
+ "loss": 0.2184,
418
+ "step": 55
419
+ },
420
+ {
421
+ "epoch": 0.9458960147796253,
422
+ "grad_norm": 4.301791191101074,
423
+ "learning_rate": 0.00016592921272025882,
424
+ "loss": 0.2137,
425
+ "step": 56
426
+ },
427
+ {
428
+ "epoch": 0.9627870150435471,
429
+ "grad_norm": 6.267927646636963,
430
+ "learning_rate": 0.0001646244110564441,
431
+ "loss": 0.3192,
432
+ "step": 57
433
+ },
434
+ {
435
+ "epoch": 0.979678015307469,
436
+ "grad_norm": 4.850979804992676,
437
+ "learning_rate": 0.00016330117273984822,
438
+ "loss": 0.2671,
439
+ "step": 58
440
+ },
441
+ {
442
+ "epoch": 0.9965690155713909,
443
+ "grad_norm": 7.005282402038574,
444
+ "learning_rate": 0.0001619599443855452,
445
+ "loss": 0.1437,
446
+ "step": 59
447
+ },
448
+ {
449
+ "epoch": 1.0134600158353126,
450
+ "grad_norm": 28.618061065673828,
451
+ "learning_rate": 0.00016060117868054789,
452
+ "loss": 0.4224,
453
+ "step": 60
454
+ },
455
+ {
456
+ "epoch": 1.0303510160992346,
457
+ "grad_norm": 4.588587284088135,
458
+ "learning_rate": 0.00015922533423101844,
459
+ "loss": 0.2437,
460
+ "step": 61
461
+ },
462
+ {
463
+ "epoch": 1.0472420163631564,
464
+ "grad_norm": 5.091424942016602,
465
+ "learning_rate": 0.00015783287540748105,
466
+ "loss": 0.2079,
467
+ "step": 62
468
+ },
469
+ {
470
+ "epoch": 1.0641330166270784,
471
+ "grad_norm": 4.7382025718688965,
472
+ "learning_rate": 0.00015642427218808918,
473
+ "loss": 0.1574,
474
+ "step": 63
475
+ },
476
+ {
477
+ "epoch": 1.0810240168910004,
478
+ "grad_norm": 4.417683124542236,
479
+ "learning_rate": 0.000155,
480
+ "loss": 0.1475,
481
+ "step": 64
482
+ },
483
+ {
484
+ "epoch": 1.0979150171549221,
485
+ "grad_norm": 1.9370671510696411,
486
+ "learning_rate": 0.00015356053955890993,
487
+ "loss": 0.1239,
488
+ "step": 65
489
+ },
490
+ {
491
+ "epoch": 1.114806017418844,
492
+ "grad_norm": 4.699803829193115,
493
+ "learning_rate": 0.00015210637670680472,
494
+ "loss": 0.357,
495
+ "step": 66
496
+ },
497
+ {
498
+ "epoch": 1.1316970176827659,
499
+ "grad_norm": 4.869682788848877,
500
+ "learning_rate": 0.00015063800224798007,
501
+ "loss": 0.3851,
502
+ "step": 67
503
+ },
504
+ {
505
+ "epoch": 1.1485880179466879,
506
+ "grad_norm": 3.7498834133148193,
507
+ "learning_rate": 0.0001491559117833866,
508
+ "loss": 0.334,
509
+ "step": 68
510
+ },
511
+ {
512
+ "epoch": 1.1654790182106096,
513
+ "grad_norm": 4.540921211242676,
514
+ "learning_rate": 0.0001476606055433565,
515
+ "loss": 0.3964,
516
+ "step": 69
517
+ },
518
+ {
519
+ "epoch": 1.1823700184745316,
520
+ "grad_norm": 4.821572303771973,
521
+ "learning_rate": 0.00014615258821876727,
522
+ "loss": 0.3421,
523
+ "step": 70
524
+ },
525
+ {
526
+ "epoch": 1.1992610187384534,
527
+ "grad_norm": 6.549943923950195,
528
+ "learning_rate": 0.00014463236879070013,
529
+ "loss": 0.4211,
530
+ "step": 71
531
+ },
532
+ {
533
+ "epoch": 1.2161520190023754,
534
+ "grad_norm": 5.620640277862549,
535
+ "learning_rate": 0.0001431004603586504,
536
+ "loss": 0.4023,
537
+ "step": 72
538
+ },
539
+ {
540
+ "epoch": 1.2330430192662971,
541
+ "grad_norm": 6.463934421539307,
542
+ "learning_rate": 0.0001415573799673479,
543
+ "loss": 0.4277,
544
+ "step": 73
545
+ },
546
+ {
547
+ "epoch": 1.249934019530219,
548
+ "grad_norm": 3.2470703125,
549
+ "learning_rate": 0.00014000364843224562,
550
+ "loss": 0.3017,
551
+ "step": 74
552
+ },
553
+ {
554
+ "epoch": 1.2668250197941409,
555
+ "grad_norm": 3.9443600177764893,
556
+ "learning_rate": 0.00013843979016373573,
557
+ "loss": 0.3262,
558
+ "step": 75
559
+ },
560
+ {
561
+ "epoch": 1.2668250197941409,
562
+ "eval_loss": 0.19759434461593628,
563
+ "eval_runtime": 0.6939,
564
+ "eval_samples_per_second": 72.058,
565
+ "eval_steps_per_second": 18.735,
566
+ "step": 75
567
+ },
568
+ {
569
+ "epoch": 1.2837160200580628,
570
+ "grad_norm": 4.828883647918701,
571
+ "learning_rate": 0.00013686633299015251,
572
+ "loss": 0.3834,
573
+ "step": 76
574
+ },
575
+ {
576
+ "epoch": 1.3006070203219848,
577
+ "grad_norm": 5.459804534912109,
578
+ "learning_rate": 0.00013528380797962126,
579
+ "loss": 0.3427,
580
+ "step": 77
581
+ },
582
+ {
583
+ "epoch": 1.3174980205859066,
584
+ "grad_norm": 4.264815330505371,
585
+ "learning_rate": 0.00013369274926081394,
586
+ "loss": 0.2111,
587
+ "step": 78
588
+ },
589
+ {
590
+ "epoch": 1.3343890208498284,
591
+ "grad_norm": 3.133007526397705,
592
+ "learning_rate": 0.00013209369384267194,
593
+ "loss": 0.177,
594
+ "step": 79
595
+ },
596
+ {
597
+ "epoch": 1.3512800211137503,
598
+ "grad_norm": 2.766847848892212,
599
+ "learning_rate": 0.00013048718143315643,
600
+ "loss": 0.1618,
601
+ "step": 80
602
+ },
603
+ {
604
+ "epoch": 1.3681710213776723,
605
+ "grad_norm": 3.8833649158477783,
606
+ "learning_rate": 0.00012887375425708794,
607
+ "loss": 0.2155,
608
+ "step": 81
609
+ },
610
+ {
611
+ "epoch": 1.385062021641594,
612
+ "grad_norm": 3.8597495555877686,
613
+ "learning_rate": 0.00012725395687313647,
614
+ "loss": 0.1842,
615
+ "step": 82
616
+ },
617
+ {
618
+ "epoch": 1.4019530219055159,
619
+ "grad_norm": 6.810535907745361,
620
+ "learning_rate": 0.00012562833599002375,
621
+ "loss": 0.2377,
622
+ "step": 83
623
+ },
624
+ {
625
+ "epoch": 1.4188440221694378,
626
+ "grad_norm": 12.93388843536377,
627
+ "learning_rate": 0.0001239974402819999,
628
+ "loss": 0.2895,
629
+ "step": 84
630
+ },
631
+ {
632
+ "epoch": 1.4357350224333598,
633
+ "grad_norm": 3.9634480476379395,
634
+ "learning_rate": 0.00012236182020365673,
635
+ "loss": 0.2547,
636
+ "step": 85
637
+ },
638
+ {
639
+ "epoch": 1.4526260226972816,
640
+ "grad_norm": 4.790787220001221,
641
+ "learning_rate": 0.00012072202780414012,
642
+ "loss": 0.2652,
643
+ "step": 86
644
+ },
645
+ {
646
+ "epoch": 1.4695170229612033,
647
+ "grad_norm": 3.763577699661255,
648
+ "learning_rate": 0.00011907861654082417,
649
+ "loss": 0.1602,
650
+ "step": 87
651
+ },
652
+ {
653
+ "epoch": 1.4864080232251253,
654
+ "grad_norm": 3.399597644805908,
655
+ "learning_rate": 0.00011743214109250993,
656
+ "loss": 0.1438,
657
+ "step": 88
658
+ },
659
+ {
660
+ "epoch": 1.5032990234890473,
661
+ "grad_norm": 2.587632417678833,
662
+ "learning_rate": 0.00011578315717221234,
663
+ "loss": 0.1121,
664
+ "step": 89
665
+ },
666
+ {
667
+ "epoch": 1.520190023752969,
668
+ "grad_norm": 1.242765188217163,
669
+ "learning_rate": 0.00011413222133959747,
670
+ "loss": 0.0997,
671
+ "step": 90
672
+ },
673
+ {
674
+ "epoch": 1.5370810240168908,
675
+ "grad_norm": 3.1180193424224854,
676
+ "learning_rate": 0.0001124798908131346,
677
+ "loss": 0.2238,
678
+ "step": 91
679
+ },
680
+ {
681
+ "epoch": 1.5539720242808128,
682
+ "grad_norm": 3.6643130779266357,
683
+ "learning_rate": 0.00011082672328202539,
684
+ "loss": 0.312,
685
+ "step": 92
686
+ },
687
+ {
688
+ "epoch": 1.5708630245447348,
689
+ "grad_norm": 3.1813502311706543,
690
+ "learning_rate": 0.00010917327671797463,
691
+ "loss": 0.31,
692
+ "step": 93
693
+ },
694
+ {
695
+ "epoch": 1.5877540248086568,
696
+ "grad_norm": 2.9217069149017334,
697
+ "learning_rate": 0.00010752010918686544,
698
+ "loss": 0.2821,
699
+ "step": 94
700
+ },
701
+ {
702
+ "epoch": 1.6046450250725786,
703
+ "grad_norm": 3.552128314971924,
704
+ "learning_rate": 0.00010586777866040254,
705
+ "loss": 0.2903,
706
+ "step": 95
707
+ },
708
+ {
709
+ "epoch": 1.6215360253365003,
710
+ "grad_norm": 5.272158622741699,
711
+ "learning_rate": 0.0001042168428277877,
712
+ "loss": 0.3607,
713
+ "step": 96
714
+ },
715
+ {
716
+ "epoch": 1.6384270256004223,
717
+ "grad_norm": 3.992403507232666,
718
+ "learning_rate": 0.0001025678589074901,
719
+ "loss": 0.2743,
720
+ "step": 97
721
+ },
722
+ {
723
+ "epoch": 1.6553180258643443,
724
+ "grad_norm": 3.073756217956543,
725
+ "learning_rate": 0.00010092138345917588,
726
+ "loss": 0.2355,
727
+ "step": 98
728
+ },
729
+ {
730
+ "epoch": 1.672209026128266,
731
+ "grad_norm": 2.897674083709717,
732
+ "learning_rate": 9.927797219585989e-05,
733
+ "loss": 0.206,
734
+ "step": 99
735
+ },
736
+ {
737
+ "epoch": 1.6891000263921878,
738
+ "grad_norm": 3.1351470947265625,
739
+ "learning_rate": 9.763817979634326e-05,
740
+ "loss": 0.1611,
741
+ "step": 100
742
+ },
743
+ {
744
+ "epoch": 1.6891000263921878,
745
+ "eval_loss": 0.12606343626976013,
746
+ "eval_runtime": 0.6938,
747
+ "eval_samples_per_second": 72.064,
748
+ "eval_steps_per_second": 18.737,
749
+ "step": 100
750
  }
751
  ],
752
  "logging_steps": 1,
 
775
  "attributes": {}
776
  }
777
  },
778
+ "total_flos": 6.296162210414592e+17,
779
  "train_batch_size": 1,
780
  "trial_name": null,
781
  "trial_params": null