antimage88's picture
Training in progress, step 200, checkpoint
e9d0312 verified
{
"best_metric": 2.118539333343506,
"best_model_checkpoint": "miner_id_24/checkpoint-200",
"epoch": 0.01761959298740199,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 8.809796493700995e-05,
"grad_norm": 1.3703079223632812,
"learning_rate": 1e-05,
"loss": 3.0141,
"step": 1
},
{
"epoch": 8.809796493700995e-05,
"eval_loss": 3.609485387802124,
"eval_runtime": 1534.0007,
"eval_samples_per_second": 12.463,
"eval_steps_per_second": 3.116,
"step": 1
},
{
"epoch": 0.0001761959298740199,
"grad_norm": 1.5978015661239624,
"learning_rate": 2e-05,
"loss": 3.5118,
"step": 2
},
{
"epoch": 0.0002642938948110299,
"grad_norm": 2.8979551792144775,
"learning_rate": 3e-05,
"loss": 3.7367,
"step": 3
},
{
"epoch": 0.0003523918597480398,
"grad_norm": 3.107316017150879,
"learning_rate": 4e-05,
"loss": 3.9572,
"step": 4
},
{
"epoch": 0.00044048982468504976,
"grad_norm": 3.510542631149292,
"learning_rate": 5e-05,
"loss": 4.2917,
"step": 5
},
{
"epoch": 0.0005285877896220598,
"grad_norm": 4.381110668182373,
"learning_rate": 6e-05,
"loss": 5.3797,
"step": 6
},
{
"epoch": 0.0006166857545590697,
"grad_norm": 1.7741827964782715,
"learning_rate": 7e-05,
"loss": 2.88,
"step": 7
},
{
"epoch": 0.0007047837194960796,
"grad_norm": 1.8455522060394287,
"learning_rate": 8e-05,
"loss": 2.7309,
"step": 8
},
{
"epoch": 0.0007928816844330896,
"grad_norm": 1.3065730333328247,
"learning_rate": 9e-05,
"loss": 2.3478,
"step": 9
},
{
"epoch": 0.0008809796493700995,
"grad_norm": 1.3236929178237915,
"learning_rate": 0.0001,
"loss": 2.5093,
"step": 10
},
{
"epoch": 0.0009690776143071095,
"grad_norm": 1.2378246784210205,
"learning_rate": 9.999316524962345e-05,
"loss": 2.1896,
"step": 11
},
{
"epoch": 0.0010571755792441195,
"grad_norm": 1.4242873191833496,
"learning_rate": 9.997266286704631e-05,
"loss": 1.9805,
"step": 12
},
{
"epoch": 0.0011452735441811294,
"grad_norm": 2.1129419803619385,
"learning_rate": 9.993849845741524e-05,
"loss": 2.3077,
"step": 13
},
{
"epoch": 0.0012333715091181394,
"grad_norm": 1.7962192296981812,
"learning_rate": 9.989068136093873e-05,
"loss": 2.0388,
"step": 14
},
{
"epoch": 0.0013214694740551493,
"grad_norm": 1.2544958591461182,
"learning_rate": 9.98292246503335e-05,
"loss": 2.512,
"step": 15
},
{
"epoch": 0.0014095674389921593,
"grad_norm": 1.01021409034729,
"learning_rate": 9.975414512725057e-05,
"loss": 1.9605,
"step": 16
},
{
"epoch": 0.0014976654039291692,
"grad_norm": 1.3743892908096313,
"learning_rate": 9.966546331768191e-05,
"loss": 2.2432,
"step": 17
},
{
"epoch": 0.0015857633688661792,
"grad_norm": 1.0722851753234863,
"learning_rate": 9.956320346634876e-05,
"loss": 2.0226,
"step": 18
},
{
"epoch": 0.001673861333803189,
"grad_norm": 1.3317127227783203,
"learning_rate": 9.944739353007344e-05,
"loss": 2.1351,
"step": 19
},
{
"epoch": 0.001761959298740199,
"grad_norm": 1.0619531869888306,
"learning_rate": 9.931806517013612e-05,
"loss": 2.4753,
"step": 20
},
{
"epoch": 0.001850057263677209,
"grad_norm": 1.039036512374878,
"learning_rate": 9.917525374361912e-05,
"loss": 2.6494,
"step": 21
},
{
"epoch": 0.001938155228614219,
"grad_norm": 1.1717562675476074,
"learning_rate": 9.901899829374047e-05,
"loss": 2.0948,
"step": 22
},
{
"epoch": 0.002026253193551229,
"grad_norm": 1.2023098468780518,
"learning_rate": 9.884934153917997e-05,
"loss": 2.4069,
"step": 23
},
{
"epoch": 0.002114351158488239,
"grad_norm": 1.0159027576446533,
"learning_rate": 9.86663298624003e-05,
"loss": 1.5713,
"step": 24
},
{
"epoch": 0.0022024491234252487,
"grad_norm": 0.9439099431037903,
"learning_rate": 9.847001329696653e-05,
"loss": 2.2179,
"step": 25
},
{
"epoch": 0.002290547088362259,
"grad_norm": 1.4066282510757446,
"learning_rate": 9.826044551386744e-05,
"loss": 2.1787,
"step": 26
},
{
"epoch": 0.0023786450532992686,
"grad_norm": 1.134215235710144,
"learning_rate": 9.803768380684242e-05,
"loss": 2.0327,
"step": 27
},
{
"epoch": 0.0024667430182362788,
"grad_norm": 1.1775543689727783,
"learning_rate": 9.780178907671789e-05,
"loss": 2.4197,
"step": 28
},
{
"epoch": 0.0025548409831732885,
"grad_norm": 1.0036230087280273,
"learning_rate": 9.755282581475769e-05,
"loss": 2.3988,
"step": 29
},
{
"epoch": 0.0026429389481102987,
"grad_norm": 1.5884368419647217,
"learning_rate": 9.729086208503174e-05,
"loss": 1.7062,
"step": 30
},
{
"epoch": 0.002731036913047309,
"grad_norm": 0.9893029928207397,
"learning_rate": 9.701596950580806e-05,
"loss": 2.1408,
"step": 31
},
{
"epoch": 0.0028191348779843185,
"grad_norm": 1.0743763446807861,
"learning_rate": 9.672822322997305e-05,
"loss": 1.8952,
"step": 32
},
{
"epoch": 0.0029072328429213287,
"grad_norm": 1.1220619678497314,
"learning_rate": 9.642770192448536e-05,
"loss": 2.0744,
"step": 33
},
{
"epoch": 0.0029953308078583384,
"grad_norm": 1.105539083480835,
"learning_rate": 9.611448774886924e-05,
"loss": 2.3422,
"step": 34
},
{
"epoch": 0.0030834287727953486,
"grad_norm": 1.1663483381271362,
"learning_rate": 9.578866633275288e-05,
"loss": 2.2309,
"step": 35
},
{
"epoch": 0.0031715267377323583,
"grad_norm": 1.247113585472107,
"learning_rate": 9.545032675245813e-05,
"loss": 2.1887,
"step": 36
},
{
"epoch": 0.0032596247026693685,
"grad_norm": 1.1522586345672607,
"learning_rate": 9.509956150664796e-05,
"loss": 2.2006,
"step": 37
},
{
"epoch": 0.003347722667606378,
"grad_norm": 1.322829008102417,
"learning_rate": 9.473646649103818e-05,
"loss": 2.5447,
"step": 38
},
{
"epoch": 0.0034358206325433883,
"grad_norm": 1.4406850337982178,
"learning_rate": 9.43611409721806e-05,
"loss": 2.9089,
"step": 39
},
{
"epoch": 0.003523918597480398,
"grad_norm": 1.6782419681549072,
"learning_rate": 9.397368756032445e-05,
"loss": 2.8456,
"step": 40
},
{
"epoch": 0.0036120165624174082,
"grad_norm": 1.5158904790878296,
"learning_rate": 9.357421218136386e-05,
"loss": 2.8615,
"step": 41
},
{
"epoch": 0.003700114527354418,
"grad_norm": 2.1779725551605225,
"learning_rate": 9.316282404787871e-05,
"loss": 2.5124,
"step": 42
},
{
"epoch": 0.003788212492291428,
"grad_norm": 3.116645097732544,
"learning_rate": 9.273963562927695e-05,
"loss": 2.8568,
"step": 43
},
{
"epoch": 0.003876310457228438,
"grad_norm": 1.9371808767318726,
"learning_rate": 9.230476262104677e-05,
"loss": 3.2537,
"step": 44
},
{
"epoch": 0.0039644084221654476,
"grad_norm": 2.267286539077759,
"learning_rate": 9.185832391312644e-05,
"loss": 3.2095,
"step": 45
},
{
"epoch": 0.004052506387102458,
"grad_norm": 2.8555848598480225,
"learning_rate": 9.140044155740101e-05,
"loss": 2.9137,
"step": 46
},
{
"epoch": 0.004140604352039468,
"grad_norm": 3.2448887825012207,
"learning_rate": 9.093124073433463e-05,
"loss": 2.9787,
"step": 47
},
{
"epoch": 0.004228702316976478,
"grad_norm": 3.1865904331207275,
"learning_rate": 9.045084971874738e-05,
"loss": 3.3278,
"step": 48
},
{
"epoch": 0.004316800281913488,
"grad_norm": 3.5772500038146973,
"learning_rate": 8.995939984474624e-05,
"loss": 3.124,
"step": 49
},
{
"epoch": 0.0044048982468504975,
"grad_norm": 4.340330600738525,
"learning_rate": 8.945702546981969e-05,
"loss": 3.68,
"step": 50
},
{
"epoch": 0.0044048982468504975,
"eval_loss": 2.4575276374816895,
"eval_runtime": 1543.3782,
"eval_samples_per_second": 12.387,
"eval_steps_per_second": 3.097,
"step": 50
},
{
"epoch": 0.004492996211787508,
"grad_norm": 3.2812650203704834,
"learning_rate": 8.894386393810563e-05,
"loss": 2.6602,
"step": 51
},
{
"epoch": 0.004581094176724518,
"grad_norm": 2.0209906101226807,
"learning_rate": 8.842005554284296e-05,
"loss": 2.5236,
"step": 52
},
{
"epoch": 0.004669192141661528,
"grad_norm": 2.413069725036621,
"learning_rate": 8.788574348801675e-05,
"loss": 2.3756,
"step": 53
},
{
"epoch": 0.004757290106598537,
"grad_norm": 1.528618335723877,
"learning_rate": 8.73410738492077e-05,
"loss": 2.7807,
"step": 54
},
{
"epoch": 0.004845388071535547,
"grad_norm": 4.053102493286133,
"learning_rate": 8.678619553365659e-05,
"loss": 2.5973,
"step": 55
},
{
"epoch": 0.0049334860364725576,
"grad_norm": 4.596653938293457,
"learning_rate": 8.622126023955446e-05,
"loss": 2.4458,
"step": 56
},
{
"epoch": 0.005021584001409568,
"grad_norm": 1.9208513498306274,
"learning_rate": 8.564642241456986e-05,
"loss": 1.7748,
"step": 57
},
{
"epoch": 0.005109681966346577,
"grad_norm": 1.5058166980743408,
"learning_rate": 8.506183921362443e-05,
"loss": 1.8438,
"step": 58
},
{
"epoch": 0.005197779931283587,
"grad_norm": 1.7489439249038696,
"learning_rate": 8.44676704559283e-05,
"loss": 1.9541,
"step": 59
},
{
"epoch": 0.005285877896220597,
"grad_norm": 1.639020323753357,
"learning_rate": 8.386407858128706e-05,
"loss": 2.0599,
"step": 60
},
{
"epoch": 0.0053739758611576075,
"grad_norm": 1.192241907119751,
"learning_rate": 8.32512286056924e-05,
"loss": 1.9016,
"step": 61
},
{
"epoch": 0.005462073826094618,
"grad_norm": 0.9033663272857666,
"learning_rate": 8.262928807620843e-05,
"loss": 1.8668,
"step": 62
},
{
"epoch": 0.005550171791031627,
"grad_norm": 1.055040717124939,
"learning_rate": 8.199842702516583e-05,
"loss": 1.8006,
"step": 63
},
{
"epoch": 0.005638269755968637,
"grad_norm": 0.9945392608642578,
"learning_rate": 8.135881792367686e-05,
"loss": 1.8398,
"step": 64
},
{
"epoch": 0.005726367720905647,
"grad_norm": 0.9917660355567932,
"learning_rate": 8.07106356344834e-05,
"loss": 1.6722,
"step": 65
},
{
"epoch": 0.005814465685842657,
"grad_norm": 0.8994877338409424,
"learning_rate": 8.005405736415126e-05,
"loss": 1.9905,
"step": 66
},
{
"epoch": 0.005902563650779667,
"grad_norm": 0.9745481014251709,
"learning_rate": 7.938926261462366e-05,
"loss": 1.8248,
"step": 67
},
{
"epoch": 0.005990661615716677,
"grad_norm": 0.9892241954803467,
"learning_rate": 7.871643313414718e-05,
"loss": 1.9396,
"step": 68
},
{
"epoch": 0.006078759580653687,
"grad_norm": 1.2636889219284058,
"learning_rate": 7.803575286758364e-05,
"loss": 1.7582,
"step": 69
},
{
"epoch": 0.006166857545590697,
"grad_norm": 1.0713516473770142,
"learning_rate": 7.734740790612136e-05,
"loss": 2.3583,
"step": 70
},
{
"epoch": 0.0062549555105277065,
"grad_norm": 0.9909570217132568,
"learning_rate": 7.66515864363997e-05,
"loss": 1.9061,
"step": 71
},
{
"epoch": 0.006343053475464717,
"grad_norm": 0.93809974193573,
"learning_rate": 7.594847868906076e-05,
"loss": 2.0174,
"step": 72
},
{
"epoch": 0.006431151440401727,
"grad_norm": 0.9634197354316711,
"learning_rate": 7.52382768867422e-05,
"loss": 2.0958,
"step": 73
},
{
"epoch": 0.006519249405338737,
"grad_norm": 1.0403608083724976,
"learning_rate": 7.452117519152542e-05,
"loss": 2.012,
"step": 74
},
{
"epoch": 0.006607347370275746,
"grad_norm": 0.8340011835098267,
"learning_rate": 7.379736965185368e-05,
"loss": 1.9449,
"step": 75
},
{
"epoch": 0.006695445335212756,
"grad_norm": 0.9526219964027405,
"learning_rate": 7.30670581489344e-05,
"loss": 2.333,
"step": 76
},
{
"epoch": 0.0067835433001497665,
"grad_norm": 0.966435968875885,
"learning_rate": 7.233044034264034e-05,
"loss": 2.0382,
"step": 77
},
{
"epoch": 0.006871641265086777,
"grad_norm": 1.0597312450408936,
"learning_rate": 7.158771761692464e-05,
"loss": 1.9467,
"step": 78
},
{
"epoch": 0.006959739230023787,
"grad_norm": 0.8439422249794006,
"learning_rate": 7.083909302476453e-05,
"loss": 1.6777,
"step": 79
},
{
"epoch": 0.007047837194960796,
"grad_norm": 0.9130121469497681,
"learning_rate": 7.008477123264848e-05,
"loss": 2.2626,
"step": 80
},
{
"epoch": 0.007135935159897806,
"grad_norm": 1.0774731636047363,
"learning_rate": 6.932495846462261e-05,
"loss": 2.4193,
"step": 81
},
{
"epoch": 0.0072240331248348165,
"grad_norm": 0.9519051313400269,
"learning_rate": 6.855986244591104e-05,
"loss": 1.9523,
"step": 82
},
{
"epoch": 0.007312131089771827,
"grad_norm": 1.3067713975906372,
"learning_rate": 6.778969234612584e-05,
"loss": 2.5505,
"step": 83
},
{
"epoch": 0.007400229054708836,
"grad_norm": 1.2102036476135254,
"learning_rate": 6.701465872208216e-05,
"loss": 2.4259,
"step": 84
},
{
"epoch": 0.007488327019645846,
"grad_norm": 1.0884771347045898,
"learning_rate": 6.623497346023418e-05,
"loss": 2.0779,
"step": 85
},
{
"epoch": 0.007576424984582856,
"grad_norm": 1.0241056680679321,
"learning_rate": 6.545084971874738e-05,
"loss": 2.3429,
"step": 86
},
{
"epoch": 0.007664522949519866,
"grad_norm": 1.287420392036438,
"learning_rate": 6.466250186922325e-05,
"loss": 2.9207,
"step": 87
},
{
"epoch": 0.007752620914456876,
"grad_norm": 1.1505160331726074,
"learning_rate": 6.387014543809223e-05,
"loss": 2.3306,
"step": 88
},
{
"epoch": 0.007840718879393886,
"grad_norm": 1.435824990272522,
"learning_rate": 6.307399704769099e-05,
"loss": 2.7714,
"step": 89
},
{
"epoch": 0.007928816844330895,
"grad_norm": 1.29757559299469,
"learning_rate": 6.227427435703997e-05,
"loss": 2.6654,
"step": 90
},
{
"epoch": 0.008016914809267906,
"grad_norm": 1.518862247467041,
"learning_rate": 6.147119600233758e-05,
"loss": 2.2625,
"step": 91
},
{
"epoch": 0.008105012774204915,
"grad_norm": 1.9497013092041016,
"learning_rate": 6.066498153718735e-05,
"loss": 2.633,
"step": 92
},
{
"epoch": 0.008193110739141926,
"grad_norm": 2.0879249572753906,
"learning_rate": 5.985585137257401e-05,
"loss": 2.5696,
"step": 93
},
{
"epoch": 0.008281208704078936,
"grad_norm": 1.385206937789917,
"learning_rate": 5.90440267166055e-05,
"loss": 2.8205,
"step": 94
},
{
"epoch": 0.008369306669015945,
"grad_norm": 1.7030164003372192,
"learning_rate": 5.8229729514036705e-05,
"loss": 2.6852,
"step": 95
},
{
"epoch": 0.008457404633952956,
"grad_norm": 1.7870999574661255,
"learning_rate": 5.74131823855921e-05,
"loss": 2.816,
"step": 96
},
{
"epoch": 0.008545502598889965,
"grad_norm": 1.800995945930481,
"learning_rate": 5.6594608567103456e-05,
"loss": 3.0532,
"step": 97
},
{
"epoch": 0.008633600563826976,
"grad_norm": 1.7901670932769775,
"learning_rate": 5.577423184847932e-05,
"loss": 2.8305,
"step": 98
},
{
"epoch": 0.008721698528763986,
"grad_norm": 2.5747008323669434,
"learning_rate": 5.495227651252315e-05,
"loss": 3.1613,
"step": 99
},
{
"epoch": 0.008809796493700995,
"grad_norm": 4.401782989501953,
"learning_rate": 5.4128967273616625e-05,
"loss": 3.2871,
"step": 100
},
{
"epoch": 0.008809796493700995,
"eval_loss": 2.215390682220459,
"eval_runtime": 1540.1875,
"eval_samples_per_second": 12.413,
"eval_steps_per_second": 3.104,
"step": 100
},
{
"epoch": 0.008897894458638006,
"grad_norm": 0.9050671458244324,
"learning_rate": 5.330452921628497e-05,
"loss": 2.2883,
"step": 101
},
{
"epoch": 0.008985992423575015,
"grad_norm": 1.1153374910354614,
"learning_rate": 5.247918773366112e-05,
"loss": 2.3626,
"step": 102
},
{
"epoch": 0.009074090388512025,
"grad_norm": 1.0824012756347656,
"learning_rate": 5.165316846586541e-05,
"loss": 2.0213,
"step": 103
},
{
"epoch": 0.009162188353449036,
"grad_norm": 1.352673888206482,
"learning_rate": 5.0826697238317935e-05,
"loss": 2.3118,
"step": 104
},
{
"epoch": 0.009250286318386045,
"grad_norm": 1.4655729532241821,
"learning_rate": 5e-05,
"loss": 2.1875,
"step": 105
},
{
"epoch": 0.009338384283323056,
"grad_norm": 2.1609301567077637,
"learning_rate": 4.917330276168208e-05,
"loss": 2.2598,
"step": 106
},
{
"epoch": 0.009426482248260065,
"grad_norm": 1.9131826162338257,
"learning_rate": 4.834683153413459e-05,
"loss": 1.96,
"step": 107
},
{
"epoch": 0.009514580213197074,
"grad_norm": 1.315741777420044,
"learning_rate": 4.7520812266338885e-05,
"loss": 1.8584,
"step": 108
},
{
"epoch": 0.009602678178134086,
"grad_norm": 1.335062026977539,
"learning_rate": 4.669547078371504e-05,
"loss": 1.7208,
"step": 109
},
{
"epoch": 0.009690776143071095,
"grad_norm": 1.405185580253601,
"learning_rate": 4.5871032726383386e-05,
"loss": 1.9594,
"step": 110
},
{
"epoch": 0.009778874108008106,
"grad_norm": 1.1304210424423218,
"learning_rate": 4.504772348747687e-05,
"loss": 1.7369,
"step": 111
},
{
"epoch": 0.009866972072945115,
"grad_norm": 1.1716078519821167,
"learning_rate": 4.4225768151520694e-05,
"loss": 1.6646,
"step": 112
},
{
"epoch": 0.009955070037882124,
"grad_norm": 1.1705647706985474,
"learning_rate": 4.3405391432896555e-05,
"loss": 1.9726,
"step": 113
},
{
"epoch": 0.010043168002819135,
"grad_norm": 0.96344393491745,
"learning_rate": 4.2586817614407895e-05,
"loss": 1.8153,
"step": 114
},
{
"epoch": 0.010131265967756145,
"grad_norm": 1.2785229682922363,
"learning_rate": 4.17702704859633e-05,
"loss": 2.0573,
"step": 115
},
{
"epoch": 0.010219363932693154,
"grad_norm": 0.9819148778915405,
"learning_rate": 4.095597328339452e-05,
"loss": 2.1911,
"step": 116
},
{
"epoch": 0.010307461897630165,
"grad_norm": 0.9272201657295227,
"learning_rate": 4.0144148627425993e-05,
"loss": 1.7888,
"step": 117
},
{
"epoch": 0.010395559862567174,
"grad_norm": 1.0092214345932007,
"learning_rate": 3.933501846281267e-05,
"loss": 1.7979,
"step": 118
},
{
"epoch": 0.010483657827504185,
"grad_norm": 0.9747551679611206,
"learning_rate": 3.852880399766243e-05,
"loss": 2.1713,
"step": 119
},
{
"epoch": 0.010571755792441195,
"grad_norm": 0.7432554364204407,
"learning_rate": 3.772572564296005e-05,
"loss": 1.7009,
"step": 120
},
{
"epoch": 0.010659853757378204,
"grad_norm": 0.921968936920166,
"learning_rate": 3.6926002952309016e-05,
"loss": 1.7757,
"step": 121
},
{
"epoch": 0.010747951722315215,
"grad_norm": 1.2300440073013306,
"learning_rate": 3.612985456190778e-05,
"loss": 2.2349,
"step": 122
},
{
"epoch": 0.010836049687252224,
"grad_norm": 0.9479213356971741,
"learning_rate": 3.533749813077677e-05,
"loss": 1.6906,
"step": 123
},
{
"epoch": 0.010924147652189235,
"grad_norm": 1.0780354738235474,
"learning_rate": 3.4549150281252636e-05,
"loss": 2.0537,
"step": 124
},
{
"epoch": 0.011012245617126245,
"grad_norm": 1.0302643775939941,
"learning_rate": 3.3765026539765834e-05,
"loss": 1.7242,
"step": 125
},
{
"epoch": 0.011100343582063254,
"grad_norm": 0.8660168051719666,
"learning_rate": 3.298534127791785e-05,
"loss": 2.0556,
"step": 126
},
{
"epoch": 0.011188441547000265,
"grad_norm": 0.862213134765625,
"learning_rate": 3.221030765387417e-05,
"loss": 2.1566,
"step": 127
},
{
"epoch": 0.011276539511937274,
"grad_norm": 0.8829602003097534,
"learning_rate": 3.144013755408895e-05,
"loss": 1.9982,
"step": 128
},
{
"epoch": 0.011364637476874283,
"grad_norm": 1.0032317638397217,
"learning_rate": 3.0675041535377405e-05,
"loss": 2.1343,
"step": 129
},
{
"epoch": 0.011452735441811294,
"grad_norm": 0.7568157911300659,
"learning_rate": 2.991522876735154e-05,
"loss": 1.4642,
"step": 130
},
{
"epoch": 0.011540833406748304,
"grad_norm": 0.8952288031578064,
"learning_rate": 2.916090697523549e-05,
"loss": 1.9543,
"step": 131
},
{
"epoch": 0.011628931371685315,
"grad_norm": 1.0352801084518433,
"learning_rate": 2.8412282383075363e-05,
"loss": 2.2417,
"step": 132
},
{
"epoch": 0.011717029336622324,
"grad_norm": 1.1499708890914917,
"learning_rate": 2.766955965735968e-05,
"loss": 2.2376,
"step": 133
},
{
"epoch": 0.011805127301559333,
"grad_norm": 0.9345830678939819,
"learning_rate": 2.693294185106562e-05,
"loss": 2.1228,
"step": 134
},
{
"epoch": 0.011893225266496344,
"grad_norm": 1.0170817375183105,
"learning_rate": 2.6202630348146324e-05,
"loss": 2.0897,
"step": 135
},
{
"epoch": 0.011981323231433354,
"grad_norm": 1.066669225692749,
"learning_rate": 2.547882480847461e-05,
"loss": 2.1516,
"step": 136
},
{
"epoch": 0.012069421196370363,
"grad_norm": 1.1325106620788574,
"learning_rate": 2.476172311325783e-05,
"loss": 2.1015,
"step": 137
},
{
"epoch": 0.012157519161307374,
"grad_norm": 0.9881788492202759,
"learning_rate": 2.405152131093926e-05,
"loss": 2.2977,
"step": 138
},
{
"epoch": 0.012245617126244383,
"grad_norm": 1.055762767791748,
"learning_rate": 2.3348413563600325e-05,
"loss": 2.4716,
"step": 139
},
{
"epoch": 0.012333715091181394,
"grad_norm": 1.007345199584961,
"learning_rate": 2.2652592093878666e-05,
"loss": 2.5644,
"step": 140
},
{
"epoch": 0.012421813056118404,
"grad_norm": 1.18160879611969,
"learning_rate": 2.196424713241637e-05,
"loss": 2.4766,
"step": 141
},
{
"epoch": 0.012509911021055413,
"grad_norm": 1.1100205183029175,
"learning_rate": 2.128356686585282e-05,
"loss": 2.4212,
"step": 142
},
{
"epoch": 0.012598008985992424,
"grad_norm": 1.360809564590454,
"learning_rate": 2.061073738537635e-05,
"loss": 2.4825,
"step": 143
},
{
"epoch": 0.012686106950929433,
"grad_norm": 1.160150408744812,
"learning_rate": 1.9945942635848748e-05,
"loss": 2.5554,
"step": 144
},
{
"epoch": 0.012774204915866444,
"grad_norm": 1.450110912322998,
"learning_rate": 1.928936436551661e-05,
"loss": 2.6773,
"step": 145
},
{
"epoch": 0.012862302880803454,
"grad_norm": 1.3221920728683472,
"learning_rate": 1.8641182076323148e-05,
"loss": 2.5966,
"step": 146
},
{
"epoch": 0.012950400845740463,
"grad_norm": 1.7865961790084839,
"learning_rate": 1.800157297483417e-05,
"loss": 2.9323,
"step": 147
},
{
"epoch": 0.013038498810677474,
"grad_norm": 1.967854380607605,
"learning_rate": 1.7370711923791567e-05,
"loss": 2.7435,
"step": 148
},
{
"epoch": 0.013126596775614483,
"grad_norm": 1.8304901123046875,
"learning_rate": 1.6748771394307585e-05,
"loss": 2.717,
"step": 149
},
{
"epoch": 0.013214694740551492,
"grad_norm": 2.8990800380706787,
"learning_rate": 1.6135921418712956e-05,
"loss": 3.4761,
"step": 150
},
{
"epoch": 0.013214694740551492,
"eval_loss": 2.151653289794922,
"eval_runtime": 1538.9923,
"eval_samples_per_second": 12.422,
"eval_steps_per_second": 3.106,
"step": 150
},
{
"epoch": 0.013302792705488503,
"grad_norm": 0.7825070023536682,
"learning_rate": 1.553232954407171e-05,
"loss": 2.1874,
"step": 151
},
{
"epoch": 0.013390890670425513,
"grad_norm": 0.9451290965080261,
"learning_rate": 1.4938160786375572e-05,
"loss": 2.3405,
"step": 152
},
{
"epoch": 0.013478988635362524,
"grad_norm": 0.9004181027412415,
"learning_rate": 1.435357758543015e-05,
"loss": 2.0163,
"step": 153
},
{
"epoch": 0.013567086600299533,
"grad_norm": 1.4334670305252075,
"learning_rate": 1.3778739760445552e-05,
"loss": 2.3106,
"step": 154
},
{
"epoch": 0.013655184565236542,
"grad_norm": 1.415030598640442,
"learning_rate": 1.3213804466343421e-05,
"loss": 2.3683,
"step": 155
},
{
"epoch": 0.013743282530173553,
"grad_norm": 2.0904955863952637,
"learning_rate": 1.2658926150792322e-05,
"loss": 2.5222,
"step": 156
},
{
"epoch": 0.013831380495110563,
"grad_norm": 1.351582407951355,
"learning_rate": 1.2114256511983274e-05,
"loss": 1.9877,
"step": 157
},
{
"epoch": 0.013919478460047574,
"grad_norm": 0.7900760173797607,
"learning_rate": 1.157994445715706e-05,
"loss": 1.8699,
"step": 158
},
{
"epoch": 0.014007576424984583,
"grad_norm": 0.8611205816268921,
"learning_rate": 1.1056136061894384e-05,
"loss": 1.6852,
"step": 159
},
{
"epoch": 0.014095674389921592,
"grad_norm": 0.9854053854942322,
"learning_rate": 1.0542974530180327e-05,
"loss": 1.9345,
"step": 160
},
{
"epoch": 0.014183772354858603,
"grad_norm": 0.7881184816360474,
"learning_rate": 1.0040600155253765e-05,
"loss": 1.5244,
"step": 161
},
{
"epoch": 0.014271870319795613,
"grad_norm": 0.9325931668281555,
"learning_rate": 9.549150281252633e-06,
"loss": 1.8048,
"step": 162
},
{
"epoch": 0.014359968284732622,
"grad_norm": 0.8224790692329407,
"learning_rate": 9.068759265665384e-06,
"loss": 1.7362,
"step": 163
},
{
"epoch": 0.014448066249669633,
"grad_norm": 0.9829339981079102,
"learning_rate": 8.599558442598998e-06,
"loss": 1.9421,
"step": 164
},
{
"epoch": 0.014536164214606642,
"grad_norm": 0.9197496175765991,
"learning_rate": 8.141676086873572e-06,
"loss": 2.0049,
"step": 165
},
{
"epoch": 0.014624262179543653,
"grad_norm": 0.7884261608123779,
"learning_rate": 7.695237378953223e-06,
"loss": 1.5814,
"step": 166
},
{
"epoch": 0.014712360144480663,
"grad_norm": 0.8714703917503357,
"learning_rate": 7.260364370723044e-06,
"loss": 1.6752,
"step": 167
},
{
"epoch": 0.014800458109417672,
"grad_norm": 0.7556380033493042,
"learning_rate": 6.837175952121306e-06,
"loss": 1.2927,
"step": 168
},
{
"epoch": 0.014888556074354683,
"grad_norm": 0.9977834820747375,
"learning_rate": 6.425787818636131e-06,
"loss": 1.7826,
"step": 169
},
{
"epoch": 0.014976654039291692,
"grad_norm": 0.9084951281547546,
"learning_rate": 6.026312439675552e-06,
"loss": 2.0509,
"step": 170
},
{
"epoch": 0.015064752004228703,
"grad_norm": 1.0041999816894531,
"learning_rate": 5.6388590278194096e-06,
"loss": 1.9909,
"step": 171
},
{
"epoch": 0.015152849969165712,
"grad_norm": 0.8483880162239075,
"learning_rate": 5.263533508961827e-06,
"loss": 1.7303,
"step": 172
},
{
"epoch": 0.015240947934102722,
"grad_norm": 0.9573462605476379,
"learning_rate": 4.900438493352055e-06,
"loss": 2.0127,
"step": 173
},
{
"epoch": 0.015329045899039733,
"grad_norm": 0.984149158000946,
"learning_rate": 4.549673247541875e-06,
"loss": 1.8659,
"step": 174
},
{
"epoch": 0.015417143863976742,
"grad_norm": 0.9139711260795593,
"learning_rate": 4.2113336672471245e-06,
"loss": 1.8221,
"step": 175
},
{
"epoch": 0.015505241828913751,
"grad_norm": 0.8064496517181396,
"learning_rate": 3.885512251130763e-06,
"loss": 1.4021,
"step": 176
},
{
"epoch": 0.015593339793850762,
"grad_norm": 0.9448890089988708,
"learning_rate": 3.5722980755146517e-06,
"loss": 2.1902,
"step": 177
},
{
"epoch": 0.01568143775878777,
"grad_norm": 1.2787121534347534,
"learning_rate": 3.271776770026963e-06,
"loss": 2.2281,
"step": 178
},
{
"epoch": 0.015769535723724783,
"grad_norm": 0.8766438961029053,
"learning_rate": 2.9840304941919415e-06,
"loss": 2.0466,
"step": 179
},
{
"epoch": 0.01585763368866179,
"grad_norm": 1.024119257926941,
"learning_rate": 2.7091379149682685e-06,
"loss": 2.1415,
"step": 180
},
{
"epoch": 0.0159457316535988,
"grad_norm": 0.9783895015716553,
"learning_rate": 2.4471741852423237e-06,
"loss": 2.0409,
"step": 181
},
{
"epoch": 0.016033829618535812,
"grad_norm": 0.933120846748352,
"learning_rate": 2.1982109232821178e-06,
"loss": 2.0306,
"step": 182
},
{
"epoch": 0.016121927583472823,
"grad_norm": 0.9729553461074829,
"learning_rate": 1.962316193157593e-06,
"loss": 2.0754,
"step": 183
},
{
"epoch": 0.01621002554840983,
"grad_norm": 1.0812957286834717,
"learning_rate": 1.7395544861325718e-06,
"loss": 2.2615,
"step": 184
},
{
"epoch": 0.016298123513346842,
"grad_norm": 1.0151242017745972,
"learning_rate": 1.5299867030334814e-06,
"loss": 1.9102,
"step": 185
},
{
"epoch": 0.016386221478283853,
"grad_norm": 1.2771090269088745,
"learning_rate": 1.333670137599713e-06,
"loss": 2.2552,
"step": 186
},
{
"epoch": 0.01647431944322086,
"grad_norm": 1.2217825651168823,
"learning_rate": 1.1506584608200367e-06,
"loss": 2.3867,
"step": 187
},
{
"epoch": 0.01656241740815787,
"grad_norm": 1.0359219312667847,
"learning_rate": 9.810017062595322e-07,
"loss": 2.163,
"step": 188
},
{
"epoch": 0.016650515373094883,
"grad_norm": 1.4206933975219727,
"learning_rate": 8.247462563808817e-07,
"loss": 2.5583,
"step": 189
},
{
"epoch": 0.01673861333803189,
"grad_norm": 1.1746091842651367,
"learning_rate": 6.819348298638839e-07,
"loss": 2.4067,
"step": 190
},
{
"epoch": 0.0168267113029689,
"grad_norm": 1.3580045700073242,
"learning_rate": 5.526064699265753e-07,
"loss": 2.7075,
"step": 191
},
{
"epoch": 0.016914809267905912,
"grad_norm": 1.3700116872787476,
"learning_rate": 4.367965336512403e-07,
"loss": 2.6118,
"step": 192
},
{
"epoch": 0.01700290723284292,
"grad_norm": 1.4006810188293457,
"learning_rate": 3.3453668231809286e-07,
"loss": 2.4773,
"step": 193
},
{
"epoch": 0.01709100519777993,
"grad_norm": 1.7444610595703125,
"learning_rate": 2.458548727494292e-07,
"loss": 2.501,
"step": 194
},
{
"epoch": 0.017179103162716942,
"grad_norm": 1.4544957876205444,
"learning_rate": 1.7077534966650766e-07,
"loss": 2.7903,
"step": 195
},
{
"epoch": 0.017267201127653953,
"grad_norm": 1.4317971467971802,
"learning_rate": 1.0931863906127327e-07,
"loss": 2.6364,
"step": 196
},
{
"epoch": 0.01735529909259096,
"grad_norm": 1.8673317432403564,
"learning_rate": 6.150154258476315e-08,
"loss": 2.5461,
"step": 197
},
{
"epoch": 0.01744339705752797,
"grad_norm": 1.612182378768921,
"learning_rate": 2.7337132953697554e-08,
"loss": 2.6307,
"step": 198
},
{
"epoch": 0.017531495022464982,
"grad_norm": 1.6980342864990234,
"learning_rate": 6.834750376549792e-09,
"loss": 2.5241,
"step": 199
},
{
"epoch": 0.01761959298740199,
"grad_norm": 2.5721540451049805,
"learning_rate": 0.0,
"loss": 3.2906,
"step": 200
},
{
"epoch": 0.01761959298740199,
"eval_loss": 2.118539333343506,
"eval_runtime": 1540.9802,
"eval_samples_per_second": 12.406,
"eval_steps_per_second": 3.102,
"step": 200
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.013602113004503e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}