RoyJoy commited on
Commit
5f18cde
·
verified ·
1 Parent(s): 704bb3e

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:70122e986d394fbfe0435c0902609b1adb13e4ebc8237ec883d148d0c4557c37
3
  size 159967880
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2088566eadc56a5c8309a9531bb6fcf5a287cf6aed01561219646680e775bc7c
3
  size 159967880
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d6d2f4b940d8d65716ab8a594f5860b0fa57bc959d29df63fab7ce7fdccbe4f0
3
  size 320194002
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0afc49d3ba801d7bb515f6132b53e0e8d83661fee7451b4f10d12f29700de203
3
  size 320194002
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bab730537e540c34cfdf5e0f8be92b1be503e4ce60510134e44b1031525ea7ca
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec74150783c2cc872e2529bbd0d2e014450ca2c2d41574b73ae43e04a4e186f6
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:791c32da2eac8608df9f854f10b37ff45c5c0a567c1b47e67904d21876c24b8e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b450beb98468102f678848fd7b1c9ac170795ba31d2520dca8e02930e5c50342
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c23b02f10cdba37918608df459e273567fe7542d5699cce224e583af90a91670
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89d66feb0ea8a54d11448a4600eb4704984427b5101713ae09a68f2ee18ad14b
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:94bbfa45675256c2e5aac009002b2ca52df3bd7639ddccdc88d4f70ac55c4b31
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a063ce9295766fdd4ede689a08b3bb1669d43ea325c566a6b0df29b077083b7
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:af0b383cec316f72188c1e51ca087e23b574e1c5f76f3184d352f214c68aea7e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c160342f903ff44fe4f09c12524bb2ea4c4fd5ceb523de4433c70f0855514c14
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.1531009674072266,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.012589206329223482,
5
  "eval_steps": 25,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -381,6 +381,372 @@
381
  "eval_samples_per_second": 32.832,
382
  "eval_steps_per_second": 8.536,
383
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384
  }
385
  ],
386
  "logging_steps": 1,
@@ -409,7 +775,7 @@
409
  "attributes": {}
410
  }
411
  },
412
- "total_flos": 5.391030065037312e+17,
413
  "train_batch_size": 1,
414
  "trial_name": null,
415
  "trial_params": null
 
1
  {
2
+ "best_metric": 1.106641411781311,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
+ "epoch": 0.025178412658446964,
5
  "eval_steps": 25,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
381
  "eval_samples_per_second": 32.832,
382
  "eval_steps_per_second": 8.536,
383
  "step": 50
384
+ },
385
+ {
386
+ "epoch": 0.012840990455807952,
387
+ "grad_norm": 0.10851637274026871,
388
+ "learning_rate": 9.207653525908994e-05,
389
+ "loss": 1.1919,
390
+ "step": 51
391
+ },
392
+ {
393
+ "epoch": 0.01309277458239242,
394
+ "grad_norm": 0.12280933558940887,
395
+ "learning_rate": 9.170690845727655e-05,
396
+ "loss": 1.2008,
397
+ "step": 52
398
+ },
399
+ {
400
+ "epoch": 0.01334455870897689,
401
+ "grad_norm": 0.11200592666864395,
402
+ "learning_rate": 9.132972809209626e-05,
403
+ "loss": 1.1743,
404
+ "step": 53
405
+ },
406
+ {
407
+ "epoch": 0.01359634283556136,
408
+ "grad_norm": 0.10003667324781418,
409
+ "learning_rate": 9.094507177988643e-05,
410
+ "loss": 1.1631,
411
+ "step": 54
412
+ },
413
+ {
414
+ "epoch": 0.01384812696214583,
415
+ "grad_norm": 0.09316058456897736,
416
+ "learning_rate": 9.055301867538794e-05,
417
+ "loss": 1.2185,
418
+ "step": 55
419
+ },
420
+ {
421
+ "epoch": 0.0140999110887303,
422
+ "grad_norm": 0.07689037919044495,
423
+ "learning_rate": 9.01536494554568e-05,
424
+ "loss": 1.1249,
425
+ "step": 56
426
+ },
427
+ {
428
+ "epoch": 0.014351695215314769,
429
+ "grad_norm": 0.0907893106341362,
430
+ "learning_rate": 8.974704630246239e-05,
431
+ "loss": 1.1772,
432
+ "step": 57
433
+ },
434
+ {
435
+ "epoch": 0.014603479341899239,
436
+ "grad_norm": 0.09316601604223251,
437
+ "learning_rate": 8.933329288737597e-05,
438
+ "loss": 1.1482,
439
+ "step": 58
440
+ },
441
+ {
442
+ "epoch": 0.014855263468483709,
443
+ "grad_norm": 0.09033600986003876,
444
+ "learning_rate": 8.89124743525527e-05,
445
+ "loss": 1.1258,
446
+ "step": 59
447
+ },
448
+ {
449
+ "epoch": 0.015107047595068179,
450
+ "grad_norm": 0.10970163345336914,
451
+ "learning_rate": 8.848467729421124e-05,
452
+ "loss": 1.1349,
453
+ "step": 60
454
+ },
455
+ {
456
+ "epoch": 0.015358831721652647,
457
+ "grad_norm": 0.10259843617677689,
458
+ "learning_rate": 8.804998974461371e-05,
459
+ "loss": 1.1576,
460
+ "step": 61
461
+ },
462
+ {
463
+ "epoch": 0.015610615848237117,
464
+ "grad_norm": 0.11174706369638443,
465
+ "learning_rate": 8.760850115395054e-05,
466
+ "loss": 1.1705,
467
+ "step": 62
468
+ },
469
+ {
470
+ "epoch": 0.015862399974821587,
471
+ "grad_norm": 0.08973610401153564,
472
+ "learning_rate": 8.716030237193325e-05,
473
+ "loss": 1.1515,
474
+ "step": 63
475
+ },
476
+ {
477
+ "epoch": 0.016114184101406057,
478
+ "grad_norm": 0.08280681818723679,
479
+ "learning_rate": 8.670548562909947e-05,
480
+ "loss": 1.1607,
481
+ "step": 64
482
+ },
483
+ {
484
+ "epoch": 0.016365968227990527,
485
+ "grad_norm": 0.09221348911523819,
486
+ "learning_rate": 8.624414451783364e-05,
487
+ "loss": 1.1482,
488
+ "step": 65
489
+ },
490
+ {
491
+ "epoch": 0.016617752354574997,
492
+ "grad_norm": 0.09574998915195465,
493
+ "learning_rate": 8.577637397310749e-05,
494
+ "loss": 1.241,
495
+ "step": 66
496
+ },
497
+ {
498
+ "epoch": 0.016869536481159467,
499
+ "grad_norm": 0.09795909374952316,
500
+ "learning_rate": 8.530227025294435e-05,
501
+ "loss": 1.1739,
502
+ "step": 67
503
+ },
504
+ {
505
+ "epoch": 0.017121320607743934,
506
+ "grad_norm": 0.09504640102386475,
507
+ "learning_rate": 8.482193091861112e-05,
508
+ "loss": 1.1334,
509
+ "step": 68
510
+ },
511
+ {
512
+ "epoch": 0.017373104734328404,
513
+ "grad_norm": 0.08822384476661682,
514
+ "learning_rate": 8.433545481454206e-05,
515
+ "loss": 1.137,
516
+ "step": 69
517
+ },
518
+ {
519
+ "epoch": 0.017624888860912874,
520
+ "grad_norm": 0.0878458246588707,
521
+ "learning_rate": 8.384294204799853e-05,
522
+ "loss": 1.1719,
523
+ "step": 70
524
+ },
525
+ {
526
+ "epoch": 0.017876672987497344,
527
+ "grad_norm": 0.08970195800065994,
528
+ "learning_rate": 8.334449396846886e-05,
529
+ "loss": 1.1298,
530
+ "step": 71
531
+ },
532
+ {
533
+ "epoch": 0.018128457114081814,
534
+ "grad_norm": 0.0950387567281723,
535
+ "learning_rate": 8.284021314681265e-05,
536
+ "loss": 1.1556,
537
+ "step": 72
538
+ },
539
+ {
540
+ "epoch": 0.018380241240666284,
541
+ "grad_norm": 0.09765107929706573,
542
+ "learning_rate": 8.233020335415371e-05,
543
+ "loss": 1.1732,
544
+ "step": 73
545
+ },
546
+ {
547
+ "epoch": 0.018632025367250754,
548
+ "grad_norm": 0.11165706068277359,
549
+ "learning_rate": 8.18145695405259e-05,
550
+ "loss": 1.1746,
551
+ "step": 74
552
+ },
553
+ {
554
+ "epoch": 0.018883809493835224,
555
+ "grad_norm": 0.15581081807613373,
556
+ "learning_rate": 8.129341781327658e-05,
557
+ "loss": 1.0963,
558
+ "step": 75
559
+ },
560
+ {
561
+ "epoch": 0.018883809493835224,
562
+ "eval_loss": 1.122991681098938,
563
+ "eval_runtime": 1.5233,
564
+ "eval_samples_per_second": 32.822,
565
+ "eval_steps_per_second": 8.534,
566
+ "step": 75
567
+ },
568
+ {
569
+ "epoch": 0.019135593620419694,
570
+ "grad_norm": 0.08741319179534912,
571
+ "learning_rate": 8.07668554152317e-05,
572
+ "loss": 1.1688,
573
+ "step": 76
574
+ },
575
+ {
576
+ "epoch": 0.01938737774700416,
577
+ "grad_norm": 0.08875293284654617,
578
+ "learning_rate": 8.02349907026274e-05,
579
+ "loss": 1.1829,
580
+ "step": 77
581
+ },
582
+ {
583
+ "epoch": 0.01963916187358863,
584
+ "grad_norm": 0.08861377090215683,
585
+ "learning_rate": 7.969793312281237e-05,
586
+ "loss": 1.1803,
587
+ "step": 78
588
+ },
589
+ {
590
+ "epoch": 0.0198909460001731,
591
+ "grad_norm": 0.09791383892297745,
592
+ "learning_rate": 7.915579319172573e-05,
593
+ "loss": 1.2001,
594
+ "step": 79
595
+ },
596
+ {
597
+ "epoch": 0.02014273012675757,
598
+ "grad_norm": 0.0991906151175499,
599
+ "learning_rate": 7.860868247115505e-05,
600
+ "loss": 1.1669,
601
+ "step": 80
602
+ },
603
+ {
604
+ "epoch": 0.02039451425334204,
605
+ "grad_norm": 0.09529578685760498,
606
+ "learning_rate": 7.805671354577908e-05,
607
+ "loss": 1.1522,
608
+ "step": 81
609
+ },
610
+ {
611
+ "epoch": 0.02064629837992651,
612
+ "grad_norm": 0.09793104231357574,
613
+ "learning_rate": 7.75e-05,
614
+ "loss": 1.1407,
615
+ "step": 82
616
+ },
617
+ {
618
+ "epoch": 0.02089808250651098,
619
+ "grad_norm": 0.09607716649770737,
620
+ "learning_rate": 7.693865639457011e-05,
621
+ "loss": 1.1291,
622
+ "step": 83
623
+ },
624
+ {
625
+ "epoch": 0.02114986663309545,
626
+ "grad_norm": 0.10056506842374802,
627
+ "learning_rate": 7.637279824301728e-05,
628
+ "loss": 1.1124,
629
+ "step": 84
630
+ },
631
+ {
632
+ "epoch": 0.02140165075967992,
633
+ "grad_norm": 0.10144215822219849,
634
+ "learning_rate": 7.580254198787463e-05,
635
+ "loss": 1.1022,
636
+ "step": 85
637
+ },
638
+ {
639
+ "epoch": 0.021653434886264387,
640
+ "grad_norm": 0.10326708108186722,
641
+ "learning_rate": 7.522800497671897e-05,
642
+ "loss": 1.073,
643
+ "step": 86
644
+ },
645
+ {
646
+ "epoch": 0.021905219012848857,
647
+ "grad_norm": 0.1260983794927597,
648
+ "learning_rate": 7.464930543802289e-05,
649
+ "loss": 1.1218,
650
+ "step": 87
651
+ },
652
+ {
653
+ "epoch": 0.022157003139433327,
654
+ "grad_norm": 0.10883725434541702,
655
+ "learning_rate": 7.406656245682565e-05,
656
+ "loss": 1.1167,
657
+ "step": 88
658
+ },
659
+ {
660
+ "epoch": 0.022408787266017798,
661
+ "grad_norm": 0.09835278987884521,
662
+ "learning_rate": 7.34798959502279e-05,
663
+ "loss": 1.1506,
664
+ "step": 89
665
+ },
666
+ {
667
+ "epoch": 0.022660571392602268,
668
+ "grad_norm": 0.10083261877298355,
669
+ "learning_rate": 7.288942664271503e-05,
670
+ "loss": 1.1792,
671
+ "step": 90
672
+ },
673
+ {
674
+ "epoch": 0.022912355519186738,
675
+ "grad_norm": 0.10250851511955261,
676
+ "learning_rate": 7.229527604131436e-05,
677
+ "loss": 1.1897,
678
+ "step": 91
679
+ },
680
+ {
681
+ "epoch": 0.023164139645771208,
682
+ "grad_norm": 0.1085507944226265,
683
+ "learning_rate": 7.16975664105915e-05,
684
+ "loss": 1.1402,
685
+ "step": 92
686
+ },
687
+ {
688
+ "epoch": 0.023415923772355678,
689
+ "grad_norm": 0.10734712332487106,
690
+ "learning_rate": 7.109642074749067e-05,
691
+ "loss": 1.0878,
692
+ "step": 93
693
+ },
694
+ {
695
+ "epoch": 0.023667707898940148,
696
+ "grad_norm": 0.10455948859453201,
697
+ "learning_rate": 7.049196275602421e-05,
698
+ "loss": 1.116,
699
+ "step": 94
700
+ },
701
+ {
702
+ "epoch": 0.023919492025524614,
703
+ "grad_norm": 0.10420308262109756,
704
+ "learning_rate": 6.988431682181693e-05,
705
+ "loss": 1.1243,
706
+ "step": 95
707
+ },
708
+ {
709
+ "epoch": 0.024171276152109084,
710
+ "grad_norm": 0.10482881218194962,
711
+ "learning_rate": 6.927360798650978e-05,
712
+ "loss": 1.1198,
713
+ "step": 96
714
+ },
715
+ {
716
+ "epoch": 0.024423060278693554,
717
+ "grad_norm": 0.11121159791946411,
718
+ "learning_rate": 6.865996192202884e-05,
719
+ "loss": 1.1097,
720
+ "step": 97
721
+ },
722
+ {
723
+ "epoch": 0.024674844405278024,
724
+ "grad_norm": 0.11902576684951782,
725
+ "learning_rate": 6.804350490472446e-05,
726
+ "loss": 1.1056,
727
+ "step": 98
728
+ },
729
+ {
730
+ "epoch": 0.024926628531862494,
731
+ "grad_norm": 0.12510505318641663,
732
+ "learning_rate": 6.742436378938612e-05,
733
+ "loss": 1.109,
734
+ "step": 99
735
+ },
736
+ {
737
+ "epoch": 0.025178412658446964,
738
+ "grad_norm": 0.1589539796113968,
739
+ "learning_rate": 6.680266598313802e-05,
740
+ "loss": 1.065,
741
+ "step": 100
742
+ },
743
+ {
744
+ "epoch": 0.025178412658446964,
745
+ "eval_loss": 1.106641411781311,
746
+ "eval_runtime": 1.5233,
747
+ "eval_samples_per_second": 32.824,
748
+ "eval_steps_per_second": 8.534,
749
+ "step": 100
750
  }
751
  ],
752
  "logging_steps": 1,
 
775
  "attributes": {}
776
  }
777
  },
778
+ "total_flos": 1.0782060130074624e+18,
779
  "train_batch_size": 1,
780
  "trial_name": null,
781
  "trial_params": null