lesso09 commited on
Commit
1e696a2
·
verified ·
1 Parent(s): 08abfea

Training in progress, step 84, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4cc8e98653996ccbe8e6e7055cadbd604a38886cb78c8c9dabe55c550b2d8bd2
3
  size 478211024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8390484e99a6ca25506ccaf92cb645bb0bf1ab6983a116f36fae158cd06bbfc2
3
  size 478211024
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2538df05ce2e3d95ac456f624673838f53b318273ce47ca1b7c8a886d4261f6d
3
  size 243337876
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:928608a38f32520a33ef7af17d06b62dc27cd037a2bf71cb5c4720dcb06e835b
3
  size 243337876
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2e3484a8fae8bef26e8aceca1a4986225c025687e241c60c48ff1e088b574d9c
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a889f36d0b3b0e6dbbd898a2660bec9b7db6408bce16221a02808a207d0a2e39
3
  size 15984
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:87b0a5e2553ff606038df9001dd88a817d7fc5872b495490bae91e01fad1ec5c
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5dd921a952ea9c7967e8db6ce5ee54cda5b37007fbc220247a9a4d513537e8b4
3
  size 15984
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54bad5f88df8ac9b5c20646448922516fa39d7287816baa41b79627fe21c46aa
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:462ac9d41f9ae16f3d7503a703b674e354b5ce1c9d7b0ab6fc3347bf4f3f0629
3
  size 15984
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:96a326ecc1330f08d92e39047259a8624f4099061baa191d3c258d0e544825d6
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92252b6027cdc118ad9dfaf1397e24f9e5791e684dfb65e9d19bca24bf212bb0
3
  size 15984
last-checkpoint/rng_state_4.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f646d116a4b94d580a5f63a0a9a98584c51569731fd11c856d15da201f219f9
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91153713420b5f9d63d1f5a6eaee63174c1b2d5735a06a7bbdf0aaa08ff45016
3
  size 15984
last-checkpoint/rng_state_5.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:330623ee88337c01c8bd095f67d83bf6ad0846e58dd146fa5dde80c5db29f6f9
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7affa3a740eb7dfccbe1f59366cf137ac9d6884cf98b06d914099fee6c3e958
3
  size 15984
last-checkpoint/rng_state_6.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b61ef68b78f474a58968515752e2de9b80e4d7b7fc05d6b1707f02a47d657621
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9db0008f3524b7fd4944ab5a33b7a25383d04061f998c04fc1c61e76c29b40c
3
  size 15984
last-checkpoint/rng_state_7.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6bcd1e4f18fb0864f5598bac2c33d66314f526665ba40514ba24df7c912075fd
3
  size 15984
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6901d2a29b14e7a40d89ae1c8eea7922a2dac90b3d788847d854c8acda7fea8f
3
  size 15984
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6ea1834b19ac5c7c4cdbdb524938202c9fe17deb719aea1234ea8bf8c83879be
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f033506de37408b1d728cf1b729bd4839fc41c8f2e858dbc7623579a6647247
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 9.761539459228516,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 1.8198198198198199,
5
  "eval_steps": 50,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -373,6 +373,244 @@
373
  "eval_samples_per_second": 131.211,
374
  "eval_steps_per_second": 4.221,
375
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
376
  }
377
  ],
378
  "logging_steps": 1,
@@ -396,12 +634,12 @@
396
  "should_evaluate": false,
397
  "should_log": false,
398
  "should_save": true,
399
- "should_training_stop": false
400
  },
401
  "attributes": {}
402
  }
403
  },
404
- "total_flos": 3.081994717227909e+17,
405
  "train_batch_size": 8,
406
  "trial_name": null,
407
  "trial_params": null
 
1
  {
2
  "best_metric": 9.761539459228516,
3
  "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
+ "epoch": 3.081081081081081,
5
  "eval_steps": 50,
6
+ "global_step": 84,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
373
  "eval_samples_per_second": 131.211,
374
  "eval_steps_per_second": 4.221,
375
  "step": 50
376
+ },
377
+ {
378
+ "epoch": 1.855855855855856,
379
+ "grad_norm": 1.0086657164815892e+18,
380
+ "learning_rate": 4.528945529245924e-05,
381
+ "loss": 8.9588,
382
+ "step": 51
383
+ },
384
+ {
385
+ "epoch": 1.8918918918918919,
386
+ "grad_norm": 1.4040368349708288e+17,
387
+ "learning_rate": 4.301797978523293e-05,
388
+ "loss": 9.0851,
389
+ "step": 52
390
+ },
391
+ {
392
+ "epoch": 1.9279279279279278,
393
+ "grad_norm": Infinity,
394
+ "learning_rate": 4.076719565451069e-05,
395
+ "loss": 10.925,
396
+ "step": 53
397
+ },
398
+ {
399
+ "epoch": 1.9639639639639639,
400
+ "grad_norm": Infinity,
401
+ "learning_rate": 3.854115896542801e-05,
402
+ "loss": 8.9936,
403
+ "step": 54
404
+ },
405
+ {
406
+ "epoch": 2.018018018018018,
407
+ "grad_norm": Infinity,
408
+ "learning_rate": 3.6343881186557866e-05,
409
+ "loss": 17.2423,
410
+ "step": 55
411
+ },
412
+ {
413
+ "epoch": 2.054054054054054,
414
+ "grad_norm": Infinity,
415
+ "learning_rate": 3.417932196097319e-05,
416
+ "loss": 9.8011,
417
+ "step": 56
418
+ },
419
+ {
420
+ "epoch": 2.09009009009009,
421
+ "grad_norm": Infinity,
422
+ "learning_rate": 3.205138197070241e-05,
423
+ "loss": 9.3979,
424
+ "step": 57
425
+ },
426
+ {
427
+ "epoch": 2.126126126126126,
428
+ "grad_norm": Infinity,
429
+ "learning_rate": 2.99638959074368e-05,
430
+ "loss": 9.1279,
431
+ "step": 58
432
+ },
433
+ {
434
+ "epoch": 2.1621621621621623,
435
+ "grad_norm": 1.0414861110829122e+19,
436
+ "learning_rate": 2.7920625562156932e-05,
437
+ "loss": 8.6924,
438
+ "step": 59
439
+ },
440
+ {
441
+ "epoch": 2.1981981981981984,
442
+ "grad_norm": 2.4940312447156224e+16,
443
+ "learning_rate": 2.5925253046130883e-05,
444
+ "loss": 8.528,
445
+ "step": 60
446
+ },
447
+ {
448
+ "epoch": 2.234234234234234,
449
+ "grad_norm": Infinity,
450
+ "learning_rate": 2.3981374155501056e-05,
451
+ "loss": 8.5312,
452
+ "step": 61
453
+ },
454
+ {
455
+ "epoch": 2.27027027027027,
456
+ "grad_norm": Infinity,
457
+ "learning_rate": 2.2092491891416384e-05,
458
+ "loss": 12.068,
459
+ "step": 62
460
+ },
461
+ {
462
+ "epoch": 2.3063063063063063,
463
+ "grad_norm": Infinity,
464
+ "learning_rate": 2.026201014738749e-05,
465
+ "loss": 9.5087,
466
+ "step": 63
467
+ },
468
+ {
469
+ "epoch": 2.3423423423423424,
470
+ "grad_norm": 8.642452215144907e+18,
471
+ "learning_rate": 1.849322757524057e-05,
472
+ "loss": 9.183,
473
+ "step": 64
474
+ },
475
+ {
476
+ "epoch": 2.3783783783783785,
477
+ "grad_norm": 7.275224349412426e+18,
478
+ "learning_rate": 1.6789331640723738e-05,
479
+ "loss": 8.6685,
480
+ "step": 65
481
+ },
482
+ {
483
+ "epoch": 2.4144144144144146,
484
+ "grad_norm": 6.894451927841505e+18,
485
+ "learning_rate": 1.515339287947842e-05,
486
+ "loss": 8.5355,
487
+ "step": 66
488
+ },
489
+ {
490
+ "epoch": 2.4504504504504503,
491
+ "grad_norm": 563174499680256.0,
492
+ "learning_rate": 1.3588359363726518e-05,
493
+ "loss": 4.2306,
494
+ "step": 67
495
+ },
496
+ {
497
+ "epoch": 2.4864864864864864,
498
+ "grad_norm": Infinity,
499
+ "learning_rate": 1.2097051389645058e-05,
500
+ "loss": 16.6928,
501
+ "step": 68
502
+ },
503
+ {
504
+ "epoch": 2.5225225225225225,
505
+ "grad_norm": Infinity,
506
+ "learning_rate": 1.0682156395001747e-05,
507
+ "loss": 9.3695,
508
+ "step": 69
509
+ },
510
+ {
511
+ "epoch": 2.5585585585585586,
512
+ "grad_norm": 9.359216148710687e+17,
513
+ "learning_rate": 9.346224116210607e-06,
514
+ "loss": 8.805,
515
+ "step": 70
516
+ },
517
+ {
518
+ "epoch": 2.5945945945945947,
519
+ "grad_norm": 4.12155610163839e+17,
520
+ "learning_rate": 8.091661993534535e-06,
521
+ "loss": 8.7406,
522
+ "step": 71
523
+ },
524
+ {
525
+ "epoch": 2.6306306306306304,
526
+ "grad_norm": 4.016165506934374e+16,
527
+ "learning_rate": 6.920730832715246e-06,
528
+ "loss": 8.5016,
529
+ "step": 72
530
+ },
531
+ {
532
+ "epoch": 2.6666666666666665,
533
+ "grad_norm": 1.49182067510136e+18,
534
+ "learning_rate": 5.835540730848524e-06,
535
+ "loss": 4.957,
536
+ "step": 73
537
+ },
538
+ {
539
+ "epoch": 2.7027027027027026,
540
+ "grad_norm": Infinity,
541
+ "learning_rate": 4.838047273846599e-06,
542
+ "loss": 15.8309,
543
+ "step": 74
544
+ },
545
+ {
546
+ "epoch": 2.7387387387387387,
547
+ "grad_norm": Infinity,
548
+ "learning_rate": 3.930048012340241e-06,
549
+ "loss": 9.5723,
550
+ "step": 75
551
+ },
552
+ {
553
+ "epoch": 2.774774774774775,
554
+ "grad_norm": Infinity,
555
+ "learning_rate": 3.113179222370911e-06,
556
+ "loss": 9.0965,
557
+ "step": 76
558
+ },
559
+ {
560
+ "epoch": 2.810810810810811,
561
+ "grad_norm": 3.073233680802513e+18,
562
+ "learning_rate": 2.388912956710829e-06,
563
+ "loss": 8.6315,
564
+ "step": 77
565
+ },
566
+ {
567
+ "epoch": 2.846846846846847,
568
+ "grad_norm": 6.825145861652087e+18,
569
+ "learning_rate": 1.7585543921243115e-06,
570
+ "loss": 8.6798,
571
+ "step": 78
572
+ },
573
+ {
574
+ "epoch": 2.8828828828828827,
575
+ "grad_norm": 7784822034399232.0,
576
+ "learning_rate": 1.2232394773511053e-06,
577
+ "loss": 8.6864,
578
+ "step": 79
579
+ },
580
+ {
581
+ "epoch": 2.918918918918919,
582
+ "grad_norm": Infinity,
583
+ "learning_rate": 7.839328860498892e-07,
584
+ "loss": 10.5033,
585
+ "step": 80
586
+ },
587
+ {
588
+ "epoch": 2.954954954954955,
589
+ "grad_norm": Infinity,
590
+ "learning_rate": 4.414262783912126e-07,
591
+ "loss": 8.1782,
592
+ "step": 81
593
+ },
594
+ {
595
+ "epoch": 3.009009009009009,
596
+ "grad_norm": Infinity,
597
+ "learning_rate": 1.9633687443230459e-07,
598
+ "loss": 15.877,
599
+ "step": 82
600
+ },
601
+ {
602
+ "epoch": 3.045045045045045,
603
+ "grad_norm": Infinity,
604
+ "learning_rate": 4.9106341844862845e-08,
605
+ "loss": 9.9209,
606
+ "step": 83
607
+ },
608
+ {
609
+ "epoch": 3.081081081081081,
610
+ "grad_norm": Infinity,
611
+ "learning_rate": 0.0,
612
+ "loss": 9.3869,
613
+ "step": 84
614
  }
615
  ],
616
  "logging_steps": 1,
 
634
  "should_evaluate": false,
635
  "should_log": false,
636
  "should_save": true,
637
+ "should_training_stop": true
638
  },
639
  "attributes": {}
640
  }
641
  },
642
+ "total_flos": 5.181981315112632e+17,
643
  "train_batch_size": 8,
644
  "trial_name": null,
645
  "trial_params": null