Kat380's picture
upload model checkpoints
0dfcf5f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 9.987937273823885,
"eval_steps": 500,
"global_step": 4140,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0024125452352231603,
"grad_norm": 388.0,
"learning_rate": 4.830917874396135e-07,
"loss": 49.9057,
"step": 1
},
{
"epoch": 0.012062726176115802,
"grad_norm": 368.0,
"learning_rate": 2.4154589371980677e-06,
"loss": 50.2975,
"step": 5
},
{
"epoch": 0.024125452352231604,
"grad_norm": 336.0,
"learning_rate": 4.830917874396135e-06,
"loss": 50.3862,
"step": 10
},
{
"epoch": 0.03618817852834741,
"grad_norm": 177.0,
"learning_rate": 7.246376811594203e-06,
"loss": 43.802,
"step": 15
},
{
"epoch": 0.04825090470446321,
"grad_norm": 130.0,
"learning_rate": 9.66183574879227e-06,
"loss": 36.945,
"step": 20
},
{
"epoch": 0.06031363088057901,
"grad_norm": 85.0,
"learning_rate": 1.2077294685990338e-05,
"loss": 30.8863,
"step": 25
},
{
"epoch": 0.07237635705669482,
"grad_norm": 25.0,
"learning_rate": 1.4492753623188407e-05,
"loss": 26.5704,
"step": 30
},
{
"epoch": 0.08443908323281062,
"grad_norm": 18.875,
"learning_rate": 1.6908212560386476e-05,
"loss": 25.2905,
"step": 35
},
{
"epoch": 0.09650180940892641,
"grad_norm": 16.5,
"learning_rate": 1.932367149758454e-05,
"loss": 23.3032,
"step": 40
},
{
"epoch": 0.10856453558504221,
"grad_norm": 10.4375,
"learning_rate": 2.173913043478261e-05,
"loss": 21.9795,
"step": 45
},
{
"epoch": 0.12062726176115803,
"grad_norm": 6.5,
"learning_rate": 2.4154589371980676e-05,
"loss": 20.9041,
"step": 50
},
{
"epoch": 0.13268998793727382,
"grad_norm": 5.0,
"learning_rate": 2.6570048309178748e-05,
"loss": 20.1174,
"step": 55
},
{
"epoch": 0.14475271411338964,
"grad_norm": 4.59375,
"learning_rate": 2.8985507246376814e-05,
"loss": 19.4134,
"step": 60
},
{
"epoch": 0.15681544028950542,
"grad_norm": 5.21875,
"learning_rate": 3.140096618357488e-05,
"loss": 19.0099,
"step": 65
},
{
"epoch": 0.16887816646562123,
"grad_norm": 6.5625,
"learning_rate": 3.381642512077295e-05,
"loss": 18.6242,
"step": 70
},
{
"epoch": 0.18094089264173704,
"grad_norm": 11.0,
"learning_rate": 3.6231884057971014e-05,
"loss": 17.8084,
"step": 75
},
{
"epoch": 0.19300361881785283,
"grad_norm": 12.6875,
"learning_rate": 3.864734299516908e-05,
"loss": 17.1619,
"step": 80
},
{
"epoch": 0.20506634499396864,
"grad_norm": 19.875,
"learning_rate": 4.106280193236715e-05,
"loss": 15.4623,
"step": 85
},
{
"epoch": 0.21712907117008443,
"grad_norm": 31.0,
"learning_rate": 4.347826086956522e-05,
"loss": 12.9656,
"step": 90
},
{
"epoch": 0.22919179734620024,
"grad_norm": 31.625,
"learning_rate": 4.589371980676328e-05,
"loss": 9.1598,
"step": 95
},
{
"epoch": 0.24125452352231605,
"grad_norm": 23.25,
"learning_rate": 4.830917874396135e-05,
"loss": 5.018,
"step": 100
},
{
"epoch": 0.25331724969843183,
"grad_norm": 6.09375,
"learning_rate": 5.072463768115943e-05,
"loss": 2.6562,
"step": 105
},
{
"epoch": 0.26537997587454765,
"grad_norm": 2.5,
"learning_rate": 5.3140096618357496e-05,
"loss": 2.0877,
"step": 110
},
{
"epoch": 0.27744270205066346,
"grad_norm": 1.4296875,
"learning_rate": 5.555555555555556e-05,
"loss": 1.8147,
"step": 115
},
{
"epoch": 0.28950542822677927,
"grad_norm": 0.828125,
"learning_rate": 5.797101449275363e-05,
"loss": 1.646,
"step": 120
},
{
"epoch": 0.30156815440289503,
"grad_norm": 1.6953125,
"learning_rate": 6.0386473429951696e-05,
"loss": 1.5332,
"step": 125
},
{
"epoch": 0.31363088057901084,
"grad_norm": 0.8984375,
"learning_rate": 6.280193236714976e-05,
"loss": 1.4679,
"step": 130
},
{
"epoch": 0.32569360675512665,
"grad_norm": 0.6015625,
"learning_rate": 6.521739130434783e-05,
"loss": 1.4093,
"step": 135
},
{
"epoch": 0.33775633293124246,
"grad_norm": 1.03125,
"learning_rate": 6.76328502415459e-05,
"loss": 1.3534,
"step": 140
},
{
"epoch": 0.3498190591073583,
"grad_norm": 1.140625,
"learning_rate": 7.004830917874396e-05,
"loss": 1.3102,
"step": 145
},
{
"epoch": 0.3618817852834741,
"grad_norm": 1.0,
"learning_rate": 7.246376811594203e-05,
"loss": 1.2869,
"step": 150
},
{
"epoch": 0.37394451145958985,
"grad_norm": 1.1640625,
"learning_rate": 7.48792270531401e-05,
"loss": 1.2508,
"step": 155
},
{
"epoch": 0.38600723763570566,
"grad_norm": 1.3984375,
"learning_rate": 7.729468599033817e-05,
"loss": 1.2457,
"step": 160
},
{
"epoch": 0.39806996381182147,
"grad_norm": 0.99609375,
"learning_rate": 7.971014492753623e-05,
"loss": 1.2096,
"step": 165
},
{
"epoch": 0.4101326899879373,
"grad_norm": 1.359375,
"learning_rate": 8.21256038647343e-05,
"loss": 1.1968,
"step": 170
},
{
"epoch": 0.4221954161640531,
"grad_norm": 3.171875,
"learning_rate": 8.454106280193237e-05,
"loss": 1.1807,
"step": 175
},
{
"epoch": 0.43425814234016885,
"grad_norm": 0.921875,
"learning_rate": 8.695652173913044e-05,
"loss": 1.167,
"step": 180
},
{
"epoch": 0.44632086851628466,
"grad_norm": 2.03125,
"learning_rate": 8.937198067632851e-05,
"loss": 1.173,
"step": 185
},
{
"epoch": 0.4583835946924005,
"grad_norm": 5.8125,
"learning_rate": 9.178743961352657e-05,
"loss": 1.1547,
"step": 190
},
{
"epoch": 0.4704463208685163,
"grad_norm": 1.8984375,
"learning_rate": 9.420289855072463e-05,
"loss": 1.1475,
"step": 195
},
{
"epoch": 0.4825090470446321,
"grad_norm": 2.578125,
"learning_rate": 9.66183574879227e-05,
"loss": 1.1483,
"step": 200
},
{
"epoch": 0.4945717732207479,
"grad_norm": 0.98046875,
"learning_rate": 9.903381642512077e-05,
"loss": 1.1399,
"step": 205
},
{
"epoch": 0.5066344993968637,
"grad_norm": 1.5390625,
"learning_rate": 0.00010144927536231885,
"loss": 1.1287,
"step": 210
},
{
"epoch": 0.5186972255729795,
"grad_norm": 2.09375,
"learning_rate": 0.00010386473429951691,
"loss": 1.1176,
"step": 215
},
{
"epoch": 0.5307599517490953,
"grad_norm": 3.171875,
"learning_rate": 0.00010628019323671499,
"loss": 1.1057,
"step": 220
},
{
"epoch": 0.5428226779252111,
"grad_norm": 2.09375,
"learning_rate": 0.00010869565217391305,
"loss": 1.1042,
"step": 225
},
{
"epoch": 0.5548854041013269,
"grad_norm": 2.046875,
"learning_rate": 0.00011111111111111112,
"loss": 1.091,
"step": 230
},
{
"epoch": 0.5669481302774427,
"grad_norm": 1.796875,
"learning_rate": 0.00011352657004830917,
"loss": 1.0847,
"step": 235
},
{
"epoch": 0.5790108564535585,
"grad_norm": 2.140625,
"learning_rate": 0.00011594202898550725,
"loss": 1.0686,
"step": 240
},
{
"epoch": 0.5910735826296744,
"grad_norm": 1.4609375,
"learning_rate": 0.00011835748792270531,
"loss": 1.0752,
"step": 245
},
{
"epoch": 0.6031363088057901,
"grad_norm": 1.2890625,
"learning_rate": 0.00012077294685990339,
"loss": 1.06,
"step": 250
},
{
"epoch": 0.6151990349819059,
"grad_norm": 1.328125,
"learning_rate": 0.00012318840579710145,
"loss": 1.0583,
"step": 255
},
{
"epoch": 0.6272617611580217,
"grad_norm": 1.8125,
"learning_rate": 0.00012560386473429953,
"loss": 1.0598,
"step": 260
},
{
"epoch": 0.6393244873341375,
"grad_norm": 2.3125,
"learning_rate": 0.00012801932367149759,
"loss": 1.0482,
"step": 265
},
{
"epoch": 0.6513872135102533,
"grad_norm": 2.203125,
"learning_rate": 0.00013043478260869567,
"loss": 1.0542,
"step": 270
},
{
"epoch": 0.6634499396863691,
"grad_norm": 1.484375,
"learning_rate": 0.00013285024154589372,
"loss": 1.0398,
"step": 275
},
{
"epoch": 0.6755126658624849,
"grad_norm": 2.4375,
"learning_rate": 0.0001352657004830918,
"loss": 1.0327,
"step": 280
},
{
"epoch": 0.6875753920386007,
"grad_norm": 3.140625,
"learning_rate": 0.00013768115942028986,
"loss": 1.0287,
"step": 285
},
{
"epoch": 0.6996381182147166,
"grad_norm": 2.953125,
"learning_rate": 0.00014009661835748792,
"loss": 1.035,
"step": 290
},
{
"epoch": 0.7117008443908324,
"grad_norm": 2.5625,
"learning_rate": 0.000142512077294686,
"loss": 1.0149,
"step": 295
},
{
"epoch": 0.7237635705669482,
"grad_norm": 2.546875,
"learning_rate": 0.00014492753623188405,
"loss": 1.0279,
"step": 300
},
{
"epoch": 0.7358262967430639,
"grad_norm": 1.6953125,
"learning_rate": 0.00014734299516908214,
"loss": 1.0261,
"step": 305
},
{
"epoch": 0.7478890229191797,
"grad_norm": 1.8515625,
"learning_rate": 0.0001497584541062802,
"loss": 1.0113,
"step": 310
},
{
"epoch": 0.7599517490952955,
"grad_norm": 4.03125,
"learning_rate": 0.00015217391304347827,
"loss": 1.0244,
"step": 315
},
{
"epoch": 0.7720144752714113,
"grad_norm": 2.046875,
"learning_rate": 0.00015458937198067633,
"loss": 1.0105,
"step": 320
},
{
"epoch": 0.7840772014475271,
"grad_norm": 1.515625,
"learning_rate": 0.0001570048309178744,
"loss": 1.005,
"step": 325
},
{
"epoch": 0.7961399276236429,
"grad_norm": 5.6875,
"learning_rate": 0.00015942028985507247,
"loss": 1.0101,
"step": 330
},
{
"epoch": 0.8082026537997588,
"grad_norm": 1.78125,
"learning_rate": 0.00016183574879227055,
"loss": 1.0173,
"step": 335
},
{
"epoch": 0.8202653799758746,
"grad_norm": 1.1953125,
"learning_rate": 0.0001642512077294686,
"loss": 1.0132,
"step": 340
},
{
"epoch": 0.8323281061519904,
"grad_norm": 1.5859375,
"learning_rate": 0.0001666666666666667,
"loss": 1.0205,
"step": 345
},
{
"epoch": 0.8443908323281062,
"grad_norm": 1.5078125,
"learning_rate": 0.00016908212560386474,
"loss": 1.0074,
"step": 350
},
{
"epoch": 0.856453558504222,
"grad_norm": 2.265625,
"learning_rate": 0.00017149758454106283,
"loss": 0.9998,
"step": 355
},
{
"epoch": 0.8685162846803377,
"grad_norm": 1.984375,
"learning_rate": 0.00017391304347826088,
"loss": 0.9886,
"step": 360
},
{
"epoch": 0.8805790108564535,
"grad_norm": 1.2265625,
"learning_rate": 0.00017632850241545896,
"loss": 0.9902,
"step": 365
},
{
"epoch": 0.8926417370325693,
"grad_norm": 1.484375,
"learning_rate": 0.00017874396135265702,
"loss": 1.0085,
"step": 370
},
{
"epoch": 0.9047044632086851,
"grad_norm": 5.5,
"learning_rate": 0.00018115942028985507,
"loss": 1.0021,
"step": 375
},
{
"epoch": 0.916767189384801,
"grad_norm": 8.875,
"learning_rate": 0.00018357487922705313,
"loss": 1.0323,
"step": 380
},
{
"epoch": 0.9288299155609168,
"grad_norm": 1.6484375,
"learning_rate": 0.0001859903381642512,
"loss": 1.0051,
"step": 385
},
{
"epoch": 0.9408926417370326,
"grad_norm": 9.1875,
"learning_rate": 0.00018840579710144927,
"loss": 1.0105,
"step": 390
},
{
"epoch": 0.9529553679131484,
"grad_norm": 7.21875,
"learning_rate": 0.00019082125603864735,
"loss": 1.0062,
"step": 395
},
{
"epoch": 0.9650180940892642,
"grad_norm": 1.1796875,
"learning_rate": 0.0001932367149758454,
"loss": 0.9982,
"step": 400
},
{
"epoch": 0.97708082026538,
"grad_norm": 1.125,
"learning_rate": 0.0001956521739130435,
"loss": 0.9794,
"step": 405
},
{
"epoch": 0.9891435464414958,
"grad_norm": 2.46875,
"learning_rate": 0.00019806763285024154,
"loss": 0.9973,
"step": 410
},
{
"epoch": 0.9987937273823885,
"eval_loss": 2.522090435028076,
"eval_runtime": 0.3154,
"eval_samples_per_second": 31.706,
"eval_steps_per_second": 3.171,
"step": 414
},
{
"epoch": 1.0012062726176116,
"grad_norm": 2.546875,
"learning_rate": 0.00019999996445454953,
"loss": 0.9904,
"step": 415
},
{
"epoch": 1.0132689987937273,
"grad_norm": 4.5,
"learning_rate": 0.00019999872036643513,
"loss": 1.0068,
"step": 420
},
{
"epoch": 1.0253317249698433,
"grad_norm": 1.515625,
"learning_rate": 0.00019999569903106498,
"loss": 0.9834,
"step": 425
},
{
"epoch": 1.037394451145959,
"grad_norm": 3.453125,
"learning_rate": 0.00019999090050213636,
"loss": 0.9873,
"step": 430
},
{
"epoch": 1.0494571773220749,
"grad_norm": 5.9375,
"learning_rate": 0.00019998432486493205,
"loss": 0.9593,
"step": 435
},
{
"epoch": 1.0615199034981906,
"grad_norm": 1.78125,
"learning_rate": 0.00019997597223631895,
"loss": 0.9534,
"step": 440
},
{
"epoch": 1.0735826296743065,
"grad_norm": 10.9375,
"learning_rate": 0.0001999658427647457,
"loss": 0.9689,
"step": 445
},
{
"epoch": 1.0856453558504222,
"grad_norm": 1.6484375,
"learning_rate": 0.00019995393663024054,
"loss": 0.9786,
"step": 450
},
{
"epoch": 1.097708082026538,
"grad_norm": 6.15625,
"learning_rate": 0.00019994025404440743,
"loss": 0.9602,
"step": 455
},
{
"epoch": 1.1097708082026538,
"grad_norm": 4.0,
"learning_rate": 0.00019992479525042303,
"loss": 0.9507,
"step": 460
},
{
"epoch": 1.1218335343787695,
"grad_norm": 1.4140625,
"learning_rate": 0.00019990756052303173,
"loss": 0.9581,
"step": 465
},
{
"epoch": 1.1338962605548855,
"grad_norm": 2.5,
"learning_rate": 0.0001998885501685412,
"loss": 0.9755,
"step": 470
},
{
"epoch": 1.1459589867310012,
"grad_norm": 1.2109375,
"learning_rate": 0.0001998677645248168,
"loss": 0.9557,
"step": 475
},
{
"epoch": 1.158021712907117,
"grad_norm": 5.21875,
"learning_rate": 0.00019984520396127553,
"loss": 0.975,
"step": 480
},
{
"epoch": 1.1700844390832328,
"grad_norm": 1.921875,
"learning_rate": 0.00019982086887887948,
"loss": 0.9619,
"step": 485
},
{
"epoch": 1.1821471652593487,
"grad_norm": 1.1171875,
"learning_rate": 0.0001997947597101288,
"loss": 0.9619,
"step": 490
},
{
"epoch": 1.1942098914354644,
"grad_norm": 1.875,
"learning_rate": 0.00019976687691905393,
"loss": 0.9504,
"step": 495
},
{
"epoch": 1.2062726176115803,
"grad_norm": 2.46875,
"learning_rate": 0.0001997372210012073,
"loss": 0.9357,
"step": 500
},
{
"epoch": 1.218335343787696,
"grad_norm": 1.90625,
"learning_rate": 0.00019970579248365468,
"loss": 0.9435,
"step": 505
},
{
"epoch": 1.2303980699638117,
"grad_norm": 5.125,
"learning_rate": 0.0001996725919249657,
"loss": 0.9404,
"step": 510
},
{
"epoch": 1.2424607961399277,
"grad_norm": 2.34375,
"learning_rate": 0.00019963761991520387,
"loss": 0.9383,
"step": 515
},
{
"epoch": 1.2545235223160434,
"grad_norm": 3.046875,
"learning_rate": 0.00019960087707591626,
"loss": 0.9493,
"step": 520
},
{
"epoch": 1.2665862484921593,
"grad_norm": 3.125,
"learning_rate": 0.00019956236406012232,
"loss": 0.9341,
"step": 525
},
{
"epoch": 1.278648974668275,
"grad_norm": 2.0625,
"learning_rate": 0.00019952208155230234,
"loss": 0.9472,
"step": 530
},
{
"epoch": 1.290711700844391,
"grad_norm": 1.4921875,
"learning_rate": 0.00019948003026838525,
"loss": 0.9308,
"step": 535
},
{
"epoch": 1.3027744270205066,
"grad_norm": 1.3125,
"learning_rate": 0.00019943621095573586,
"loss": 0.9416,
"step": 540
},
{
"epoch": 1.3148371531966223,
"grad_norm": 2.875,
"learning_rate": 0.00019939062439314174,
"loss": 0.9424,
"step": 545
},
{
"epoch": 1.3268998793727382,
"grad_norm": 3.859375,
"learning_rate": 0.00019934327139079915,
"loss": 0.9347,
"step": 550
},
{
"epoch": 1.3389626055488542,
"grad_norm": 14.125,
"learning_rate": 0.00019929415279029873,
"loss": 0.9494,
"step": 555
},
{
"epoch": 1.3510253317249699,
"grad_norm": 1.7734375,
"learning_rate": 0.00019924326946461074,
"loss": 0.9426,
"step": 560
},
{
"epoch": 1.3630880579010856,
"grad_norm": 1.921875,
"learning_rate": 0.00019919062231806922,
"loss": 0.928,
"step": 565
},
{
"epoch": 1.3751507840772015,
"grad_norm": 3.703125,
"learning_rate": 0.0001991362122863561,
"loss": 0.942,
"step": 570
},
{
"epoch": 1.3872135102533172,
"grad_norm": 1.015625,
"learning_rate": 0.00019908004033648453,
"loss": 0.9355,
"step": 575
},
{
"epoch": 1.399276236429433,
"grad_norm": 9.375,
"learning_rate": 0.0001990221074667818,
"loss": 0.9308,
"step": 580
},
{
"epoch": 1.4113389626055488,
"grad_norm": 2.9375,
"learning_rate": 0.0001989624147068713,
"loss": 0.9226,
"step": 585
},
{
"epoch": 1.4234016887816647,
"grad_norm": 0.9140625,
"learning_rate": 0.00019890096311765465,
"loss": 0.9179,
"step": 590
},
{
"epoch": 1.4354644149577804,
"grad_norm": 2.0,
"learning_rate": 0.00019883775379129248,
"loss": 0.9161,
"step": 595
},
{
"epoch": 1.4475271411338961,
"grad_norm": 1.8828125,
"learning_rate": 0.00019877278785118517,
"loss": 0.9137,
"step": 600
},
{
"epoch": 1.459589867310012,
"grad_norm": 2.765625,
"learning_rate": 0.0001987060664519529,
"loss": 0.909,
"step": 605
},
{
"epoch": 1.471652593486128,
"grad_norm": 4.9375,
"learning_rate": 0.00019863759077941504,
"loss": 0.9055,
"step": 610
},
{
"epoch": 1.4837153196622437,
"grad_norm": 1.4609375,
"learning_rate": 0.0001985673620505692,
"loss": 0.9221,
"step": 615
},
{
"epoch": 1.4957780458383594,
"grad_norm": 10.4375,
"learning_rate": 0.00019849538151356955,
"loss": 0.9249,
"step": 620
},
{
"epoch": 1.5078407720144753,
"grad_norm": 1.046875,
"learning_rate": 0.00019842165044770452,
"loss": 0.9188,
"step": 625
},
{
"epoch": 1.5199034981905912,
"grad_norm": 1.0390625,
"learning_rate": 0.0001983461701633742,
"loss": 0.9134,
"step": 630
},
{
"epoch": 1.531966224366707,
"grad_norm": 2.6875,
"learning_rate": 0.00019826894200206715,
"loss": 0.9084,
"step": 635
},
{
"epoch": 1.5440289505428226,
"grad_norm": 1.0078125,
"learning_rate": 0.00019818996733633618,
"loss": 0.9136,
"step": 640
},
{
"epoch": 1.5560916767189386,
"grad_norm": 2.203125,
"learning_rate": 0.00019810924756977444,
"loss": 0.9372,
"step": 645
},
{
"epoch": 1.5681544028950543,
"grad_norm": 1.671875,
"learning_rate": 0.00019802678413699006,
"loss": 0.8959,
"step": 650
},
{
"epoch": 1.58021712907117,
"grad_norm": 3.578125,
"learning_rate": 0.0001979425785035809,
"loss": 0.8991,
"step": 655
},
{
"epoch": 1.5922798552472859,
"grad_norm": 1.0234375,
"learning_rate": 0.00019785663216610844,
"loss": 0.9074,
"step": 660
},
{
"epoch": 1.6043425814234018,
"grad_norm": 3.046875,
"learning_rate": 0.00019776894665207113,
"loss": 0.9085,
"step": 665
},
{
"epoch": 1.6164053075995175,
"grad_norm": 1.578125,
"learning_rate": 0.0001976795235198773,
"loss": 0.9085,
"step": 670
},
{
"epoch": 1.6284680337756332,
"grad_norm": 2.53125,
"learning_rate": 0.00019758836435881746,
"loss": 0.9026,
"step": 675
},
{
"epoch": 1.6405307599517491,
"grad_norm": 1.015625,
"learning_rate": 0.00019749547078903604,
"loss": 0.9033,
"step": 680
},
{
"epoch": 1.652593486127865,
"grad_norm": 15.25,
"learning_rate": 0.00019740084446150257,
"loss": 0.8952,
"step": 685
},
{
"epoch": 1.6646562123039808,
"grad_norm": 1.5703125,
"learning_rate": 0.00019730448705798239,
"loss": 0.921,
"step": 690
},
{
"epoch": 1.6767189384800965,
"grad_norm": 1.796875,
"learning_rate": 0.00019720640029100673,
"loss": 0.9014,
"step": 695
},
{
"epoch": 1.6887816646562124,
"grad_norm": 0.859375,
"learning_rate": 0.00019710658590384227,
"loss": 0.8948,
"step": 700
},
{
"epoch": 1.700844390832328,
"grad_norm": 2.34375,
"learning_rate": 0.00019700504567046013,
"loss": 0.8953,
"step": 705
},
{
"epoch": 1.7129071170084438,
"grad_norm": 0.85546875,
"learning_rate": 0.00019690178139550443,
"loss": 0.8933,
"step": 710
},
{
"epoch": 1.7249698431845597,
"grad_norm": 2.046875,
"learning_rate": 0.0001967967949142602,
"loss": 0.9286,
"step": 715
},
{
"epoch": 1.7370325693606756,
"grad_norm": 1.796875,
"learning_rate": 0.00019669008809262062,
"loss": 0.8984,
"step": 720
},
{
"epoch": 1.7490952955367913,
"grad_norm": 2.34375,
"learning_rate": 0.00019658166282705405,
"loss": 0.9069,
"step": 725
},
{
"epoch": 1.761158021712907,
"grad_norm": 1.078125,
"learning_rate": 0.00019647152104457013,
"loss": 0.8887,
"step": 730
},
{
"epoch": 1.773220747889023,
"grad_norm": 1.7734375,
"learning_rate": 0.00019635966470268583,
"loss": 0.8827,
"step": 735
},
{
"epoch": 1.7852834740651389,
"grad_norm": 1.7421875,
"learning_rate": 0.00019624609578939027,
"loss": 0.8757,
"step": 740
},
{
"epoch": 1.7973462002412546,
"grad_norm": 4.78125,
"learning_rate": 0.0001961308163231097,
"loss": 0.8809,
"step": 745
},
{
"epoch": 1.8094089264173703,
"grad_norm": 1.25,
"learning_rate": 0.0001960138283526715,
"loss": 0.8817,
"step": 750
},
{
"epoch": 1.8214716525934862,
"grad_norm": 2.453125,
"learning_rate": 0.00019589513395726777,
"loss": 0.881,
"step": 755
},
{
"epoch": 1.833534378769602,
"grad_norm": 2.703125,
"learning_rate": 0.0001957747352464184,
"loss": 0.8839,
"step": 760
},
{
"epoch": 1.8455971049457176,
"grad_norm": 1.234375,
"learning_rate": 0.0001956526343599335,
"loss": 0.8854,
"step": 765
},
{
"epoch": 1.8576598311218335,
"grad_norm": 1.265625,
"learning_rate": 0.00019552883346787552,
"loss": 0.8799,
"step": 770
},
{
"epoch": 1.8697225572979495,
"grad_norm": 0.953125,
"learning_rate": 0.0001954033347705206,
"loss": 0.873,
"step": 775
},
{
"epoch": 1.8817852834740652,
"grad_norm": 2.296875,
"learning_rate": 0.0001952761404983194,
"loss": 0.864,
"step": 780
},
{
"epoch": 1.8938480096501809,
"grad_norm": 1.375,
"learning_rate": 0.00019514725291185762,
"loss": 0.8806,
"step": 785
},
{
"epoch": 1.9059107358262968,
"grad_norm": 10.1875,
"learning_rate": 0.0001950166743018156,
"loss": 0.8879,
"step": 790
},
{
"epoch": 1.9179734620024127,
"grad_norm": 0.96875,
"learning_rate": 0.0001948844069889278,
"loss": 0.892,
"step": 795
},
{
"epoch": 1.9300361881785284,
"grad_norm": 16.875,
"learning_rate": 0.00019475045332394153,
"loss": 0.8922,
"step": 800
},
{
"epoch": 1.942098914354644,
"grad_norm": 2.203125,
"learning_rate": 0.00019461481568757506,
"loss": 0.876,
"step": 805
},
{
"epoch": 1.95416164053076,
"grad_norm": 1.3046875,
"learning_rate": 0.00019447749649047542,
"loss": 0.8787,
"step": 810
},
{
"epoch": 1.9662243667068757,
"grad_norm": 1.234375,
"learning_rate": 0.00019433849817317552,
"loss": 0.8625,
"step": 815
},
{
"epoch": 1.9782870928829914,
"grad_norm": 11.4375,
"learning_rate": 0.0001941978232060507,
"loss": 0.8643,
"step": 820
},
{
"epoch": 1.9903498190591074,
"grad_norm": 15.75,
"learning_rate": 0.00019405547408927502,
"loss": 0.9036,
"step": 825
},
{
"epoch": 2.0,
"eval_loss": 2.435774326324463,
"eval_runtime": 0.2379,
"eval_samples_per_second": 42.031,
"eval_steps_per_second": 4.203,
"step": 829
},
{
"epoch": 2.0024125452352233,
"grad_norm": 9.8125,
"learning_rate": 0.00019391145335277655,
"loss": 0.8698,
"step": 830
},
{
"epoch": 2.0144752714113388,
"grad_norm": 1.984375,
"learning_rate": 0.0001937657635561927,
"loss": 0.8737,
"step": 835
},
{
"epoch": 2.0265379975874547,
"grad_norm": 1.4375,
"learning_rate": 0.00019361840728882447,
"loss": 0.8449,
"step": 840
},
{
"epoch": 2.0386007237635706,
"grad_norm": 1.375,
"learning_rate": 0.0001934693871695906,
"loss": 0.853,
"step": 845
},
{
"epoch": 2.0506634499396865,
"grad_norm": 9.375,
"learning_rate": 0.00019331870584698093,
"loss": 0.8422,
"step": 850
},
{
"epoch": 2.062726176115802,
"grad_norm": 4.4375,
"learning_rate": 0.00019316636599900946,
"loss": 0.8357,
"step": 855
},
{
"epoch": 2.074788902291918,
"grad_norm": 4.25,
"learning_rate": 0.00019301237033316659,
"loss": 0.8618,
"step": 860
},
{
"epoch": 2.086851628468034,
"grad_norm": 6.125,
"learning_rate": 0.00019285672158637104,
"loss": 0.8538,
"step": 865
},
{
"epoch": 2.0989143546441498,
"grad_norm": 1.734375,
"learning_rate": 0.00019269942252492133,
"loss": 0.8476,
"step": 870
},
{
"epoch": 2.1109770808202653,
"grad_norm": 1.8046875,
"learning_rate": 0.0001925404759444465,
"loss": 0.8442,
"step": 875
},
{
"epoch": 2.123039806996381,
"grad_norm": 1.5703125,
"learning_rate": 0.0001923798846698564,
"loss": 0.8363,
"step": 880
},
{
"epoch": 2.135102533172497,
"grad_norm": 0.984375,
"learning_rate": 0.00019221765155529158,
"loss": 0.8512,
"step": 885
},
{
"epoch": 2.147165259348613,
"grad_norm": 2.4375,
"learning_rate": 0.00019205377948407258,
"loss": 0.8464,
"step": 890
},
{
"epoch": 2.1592279855247285,
"grad_norm": 1.640625,
"learning_rate": 0.00019188827136864842,
"loss": 0.8341,
"step": 895
},
{
"epoch": 2.1712907117008444,
"grad_norm": 1.21875,
"learning_rate": 0.00019172113015054532,
"loss": 0.837,
"step": 900
},
{
"epoch": 2.1833534378769603,
"grad_norm": 3.421875,
"learning_rate": 0.0001915523588003139,
"loss": 0.8768,
"step": 905
},
{
"epoch": 2.195416164053076,
"grad_norm": 1.53125,
"learning_rate": 0.00019138196031747681,
"loss": 0.8494,
"step": 910
},
{
"epoch": 2.2074788902291917,
"grad_norm": 0.8203125,
"learning_rate": 0.00019120993773047513,
"loss": 0.8934,
"step": 915
},
{
"epoch": 2.2195416164053077,
"grad_norm": 0.80859375,
"learning_rate": 0.0001910362940966147,
"loss": 0.8258,
"step": 920
},
{
"epoch": 2.2316043425814236,
"grad_norm": 6.5,
"learning_rate": 0.00019086103250201165,
"loss": 0.8256,
"step": 925
},
{
"epoch": 2.243667068757539,
"grad_norm": 0.7421875,
"learning_rate": 0.00019068415606153787,
"loss": 0.8347,
"step": 930
},
{
"epoch": 2.255729794933655,
"grad_norm": 11.4375,
"learning_rate": 0.00019050566791876516,
"loss": 0.8264,
"step": 935
},
{
"epoch": 2.267792521109771,
"grad_norm": 0.90625,
"learning_rate": 0.00019032557124590974,
"loss": 0.8232,
"step": 940
},
{
"epoch": 2.2798552472858864,
"grad_norm": 1.8515625,
"learning_rate": 0.00019014386924377582,
"loss": 0.8218,
"step": 945
},
{
"epoch": 2.2919179734620023,
"grad_norm": 0.69921875,
"learning_rate": 0.00018996056514169844,
"loss": 0.8146,
"step": 950
},
{
"epoch": 2.3039806996381182,
"grad_norm": 13.4375,
"learning_rate": 0.00018977566219748653,
"loss": 0.817,
"step": 955
},
{
"epoch": 2.316043425814234,
"grad_norm": 1.4609375,
"learning_rate": 0.0001895891636973646,
"loss": 0.8263,
"step": 960
},
{
"epoch": 2.3281061519903496,
"grad_norm": 13.0,
"learning_rate": 0.00018940107295591454,
"loss": 0.8203,
"step": 965
},
{
"epoch": 2.3401688781664656,
"grad_norm": 3.640625,
"learning_rate": 0.00018921139331601667,
"loss": 0.8183,
"step": 970
},
{
"epoch": 2.3522316043425815,
"grad_norm": 0.5,
"learning_rate": 0.0001890201281487903,
"loss": 0.8013,
"step": 975
},
{
"epoch": 2.3642943305186974,
"grad_norm": 0.61328125,
"learning_rate": 0.00018882728085353392,
"loss": 0.8068,
"step": 980
},
{
"epoch": 2.376357056694813,
"grad_norm": 6.0625,
"learning_rate": 0.0001886328548576646,
"loss": 0.801,
"step": 985
},
{
"epoch": 2.388419782870929,
"grad_norm": 1.1640625,
"learning_rate": 0.00018843685361665723,
"loss": 0.8269,
"step": 990
},
{
"epoch": 2.4004825090470447,
"grad_norm": 2.015625,
"learning_rate": 0.00018823928061398313,
"loss": 0.8141,
"step": 995
},
{
"epoch": 2.4125452352231607,
"grad_norm": 0.62109375,
"learning_rate": 0.00018804013936104792,
"loss": 0.8157,
"step": 1000
},
{
"epoch": 2.424607961399276,
"grad_norm": 0.61328125,
"learning_rate": 0.00018783943339712938,
"loss": 0.8016,
"step": 1005
},
{
"epoch": 2.436670687575392,
"grad_norm": 0.59375,
"learning_rate": 0.00018763716628931437,
"loss": 0.7957,
"step": 1010
},
{
"epoch": 2.448733413751508,
"grad_norm": 0.8046875,
"learning_rate": 0.0001874333416324356,
"loss": 0.8014,
"step": 1015
},
{
"epoch": 2.4607961399276235,
"grad_norm": 1.890625,
"learning_rate": 0.0001872279630490074,
"loss": 0.8054,
"step": 1020
},
{
"epoch": 2.4728588661037394,
"grad_norm": 1.2421875,
"learning_rate": 0.00018702103418916183,
"loss": 0.7972,
"step": 1025
},
{
"epoch": 2.4849215922798553,
"grad_norm": 1.0390625,
"learning_rate": 0.00018681255873058338,
"loss": 0.8115,
"step": 1030
},
{
"epoch": 2.4969843184559712,
"grad_norm": 2.140625,
"learning_rate": 0.00018660254037844388,
"loss": 0.8134,
"step": 1035
},
{
"epoch": 2.5090470446320867,
"grad_norm": 3.765625,
"learning_rate": 0.00018639098286533644,
"loss": 0.8286,
"step": 1040
},
{
"epoch": 2.5211097708082026,
"grad_norm": 1.3984375,
"learning_rate": 0.00018617788995120931,
"loss": 0.804,
"step": 1045
},
{
"epoch": 2.5331724969843186,
"grad_norm": 0.68359375,
"learning_rate": 0.00018596326542329888,
"loss": 0.8048,
"step": 1050
},
{
"epoch": 2.545235223160434,
"grad_norm": 0.87109375,
"learning_rate": 0.00018574711309606249,
"loss": 0.7941,
"step": 1055
},
{
"epoch": 2.55729794933655,
"grad_norm": 0.7578125,
"learning_rate": 0.00018552943681111067,
"loss": 0.795,
"step": 1060
},
{
"epoch": 2.569360675512666,
"grad_norm": 2.046875,
"learning_rate": 0.00018531024043713868,
"loss": 0.7905,
"step": 1065
},
{
"epoch": 2.581423401688782,
"grad_norm": 3.765625,
"learning_rate": 0.0001850895278698579,
"loss": 0.793,
"step": 1070
},
{
"epoch": 2.5934861278648977,
"grad_norm": 1.328125,
"learning_rate": 0.00018486730303192658,
"loss": 0.7986,
"step": 1075
},
{
"epoch": 2.605548854041013,
"grad_norm": 0.53515625,
"learning_rate": 0.00018464356987288013,
"loss": 0.7941,
"step": 1080
},
{
"epoch": 2.617611580217129,
"grad_norm": 1.6875,
"learning_rate": 0.0001844183323690608,
"loss": 0.7904,
"step": 1085
},
{
"epoch": 2.6296743063932446,
"grad_norm": 1.8515625,
"learning_rate": 0.0001841915945235472,
"loss": 0.7932,
"step": 1090
},
{
"epoch": 2.6417370325693605,
"grad_norm": 0.57421875,
"learning_rate": 0.00018396336036608307,
"loss": 0.7942,
"step": 1095
},
{
"epoch": 2.6537997587454765,
"grad_norm": 1.046875,
"learning_rate": 0.00018373363395300554,
"loss": 0.7905,
"step": 1100
},
{
"epoch": 2.6658624849215924,
"grad_norm": 0.69140625,
"learning_rate": 0.0001835024193671733,
"loss": 0.7929,
"step": 1105
},
{
"epoch": 2.6779252110977083,
"grad_norm": 0.6328125,
"learning_rate": 0.0001832697207178938,
"loss": 0.7877,
"step": 1110
},
{
"epoch": 2.689987937273824,
"grad_norm": 0.5546875,
"learning_rate": 0.0001830355421408503,
"loss": 0.7909,
"step": 1115
},
{
"epoch": 2.7020506634499397,
"grad_norm": 1.46875,
"learning_rate": 0.00018279988779802833,
"loss": 0.7815,
"step": 1120
},
{
"epoch": 2.7141133896260556,
"grad_norm": 0.515625,
"learning_rate": 0.00018256276187764197,
"loss": 0.7829,
"step": 1125
},
{
"epoch": 2.726176115802171,
"grad_norm": 0.75,
"learning_rate": 0.00018232416859405895,
"loss": 0.7816,
"step": 1130
},
{
"epoch": 2.738238841978287,
"grad_norm": 0.53515625,
"learning_rate": 0.00018208411218772615,
"loss": 0.7836,
"step": 1135
},
{
"epoch": 2.750301568154403,
"grad_norm": 0.98828125,
"learning_rate": 0.00018184259692509406,
"loss": 0.785,
"step": 1140
},
{
"epoch": 2.762364294330519,
"grad_norm": 1.4375,
"learning_rate": 0.000181599627098541,
"loss": 0.7771,
"step": 1145
},
{
"epoch": 2.7744270205066344,
"grad_norm": 0.59765625,
"learning_rate": 0.00018135520702629675,
"loss": 0.7808,
"step": 1150
},
{
"epoch": 2.7864897466827503,
"grad_norm": 0.76171875,
"learning_rate": 0.000181109341052366,
"loss": 0.7756,
"step": 1155
},
{
"epoch": 2.798552472858866,
"grad_norm": 1.1015625,
"learning_rate": 0.00018086203354645089,
"loss": 0.7838,
"step": 1160
},
{
"epoch": 2.8106151990349817,
"grad_norm": 0.388671875,
"learning_rate": 0.00018061328890387352,
"loss": 0.7856,
"step": 1165
},
{
"epoch": 2.8226779252110976,
"grad_norm": 1.328125,
"learning_rate": 0.00018036311154549784,
"loss": 0.7889,
"step": 1170
},
{
"epoch": 2.8347406513872135,
"grad_norm": 0.953125,
"learning_rate": 0.00018011150591765092,
"loss": 0.7835,
"step": 1175
},
{
"epoch": 2.8468033775633295,
"grad_norm": 1.1015625,
"learning_rate": 0.00017985847649204417,
"loss": 0.7874,
"step": 1180
},
{
"epoch": 2.8588661037394454,
"grad_norm": 1.65625,
"learning_rate": 0.0001796040277656936,
"loss": 0.7743,
"step": 1185
},
{
"epoch": 2.870928829915561,
"grad_norm": 0.68359375,
"learning_rate": 0.00017934816426084008,
"loss": 0.7652,
"step": 1190
},
{
"epoch": 2.882991556091677,
"grad_norm": 1.5703125,
"learning_rate": 0.00017909089052486898,
"loss": 0.7671,
"step": 1195
},
{
"epoch": 2.8950542822677923,
"grad_norm": 1.7421875,
"learning_rate": 0.00017883221113022916,
"loss": 0.7769,
"step": 1200
},
{
"epoch": 2.907117008443908,
"grad_norm": 1.34375,
"learning_rate": 0.00017857213067435195,
"loss": 0.7669,
"step": 1205
},
{
"epoch": 2.919179734620024,
"grad_norm": 0.8984375,
"learning_rate": 0.0001783106537795692,
"loss": 0.7656,
"step": 1210
},
{
"epoch": 2.93124246079614,
"grad_norm": 0.85546875,
"learning_rate": 0.00017804778509303138,
"loss": 0.7717,
"step": 1215
},
{
"epoch": 2.943305186972256,
"grad_norm": 1.234375,
"learning_rate": 0.00017778352928662474,
"loss": 0.7809,
"step": 1220
},
{
"epoch": 2.9553679131483714,
"grad_norm": 1.234375,
"learning_rate": 0.00017751789105688846,
"loss": 0.7736,
"step": 1225
},
{
"epoch": 2.9674306393244874,
"grad_norm": 1.2421875,
"learning_rate": 0.0001772508751249311,
"loss": 0.7744,
"step": 1230
},
{
"epoch": 2.9794933655006033,
"grad_norm": 0.6875,
"learning_rate": 0.0001769824862363467,
"loss": 0.759,
"step": 1235
},
{
"epoch": 2.9915560916767188,
"grad_norm": 0.8515625,
"learning_rate": 0.00017671272916113052,
"loss": 0.7651,
"step": 1240
},
{
"epoch": 2.9987937273823886,
"eval_loss": 2.3987040519714355,
"eval_runtime": 0.2575,
"eval_samples_per_second": 38.831,
"eval_steps_per_second": 3.883,
"step": 1243
},
{
"epoch": 3.0036188178528347,
"grad_norm": 0.828125,
"learning_rate": 0.00017644160869359404,
"loss": 0.7612,
"step": 1245
},
{
"epoch": 3.0156815440289506,
"grad_norm": 11.375,
"learning_rate": 0.00017616912965228001,
"loss": 0.7436,
"step": 1250
},
{
"epoch": 3.0277442702050665,
"grad_norm": 1.09375,
"learning_rate": 0.00017589529687987674,
"loss": 0.7548,
"step": 1255
},
{
"epoch": 3.039806996381182,
"grad_norm": 1.140625,
"learning_rate": 0.00017562011524313185,
"loss": 0.7489,
"step": 1260
},
{
"epoch": 3.051869722557298,
"grad_norm": 0.5703125,
"learning_rate": 0.00017534358963276607,
"loss": 0.7469,
"step": 1265
},
{
"epoch": 3.063932448733414,
"grad_norm": 1.265625,
"learning_rate": 0.0001750657249633861,
"loss": 0.7501,
"step": 1270
},
{
"epoch": 3.0759951749095293,
"grad_norm": 1.375,
"learning_rate": 0.00017478652617339738,
"loss": 0.7503,
"step": 1275
},
{
"epoch": 3.0880579010856453,
"grad_norm": 0.53125,
"learning_rate": 0.00017450599822491615,
"loss": 0.7587,
"step": 1280
},
{
"epoch": 3.100120627261761,
"grad_norm": 0.416015625,
"learning_rate": 0.00017422414610368157,
"loss": 0.7483,
"step": 1285
},
{
"epoch": 3.112183353437877,
"grad_norm": 0.60546875,
"learning_rate": 0.00017394097481896676,
"loss": 0.7554,
"step": 1290
},
{
"epoch": 3.1242460796139926,
"grad_norm": 0.435546875,
"learning_rate": 0.00017365648940349004,
"loss": 0.7484,
"step": 1295
},
{
"epoch": 3.1363088057901085,
"grad_norm": 0.384765625,
"learning_rate": 0.00017337069491332537,
"loss": 0.7389,
"step": 1300
},
{
"epoch": 3.1483715319662244,
"grad_norm": 0.61328125,
"learning_rate": 0.00017308359642781242,
"loss": 0.7471,
"step": 1305
},
{
"epoch": 3.1604342581423404,
"grad_norm": 0.419921875,
"learning_rate": 0.00017279519904946647,
"loss": 0.7469,
"step": 1310
},
{
"epoch": 3.172496984318456,
"grad_norm": 0.48046875,
"learning_rate": 0.00017250550790388764,
"loss": 0.7525,
"step": 1315
},
{
"epoch": 3.1845597104945718,
"grad_norm": 0.640625,
"learning_rate": 0.0001722145281396697,
"loss": 0.755,
"step": 1320
},
{
"epoch": 3.1966224366706877,
"grad_norm": 0.8125,
"learning_rate": 0.0001719222649283087,
"loss": 0.7463,
"step": 1325
},
{
"epoch": 3.2086851628468036,
"grad_norm": 0.73046875,
"learning_rate": 0.00017162872346411102,
"loss": 0.7443,
"step": 1330
},
{
"epoch": 3.220747889022919,
"grad_norm": 0.546875,
"learning_rate": 0.00017133390896410106,
"loss": 0.7454,
"step": 1335
},
{
"epoch": 3.232810615199035,
"grad_norm": 0.451171875,
"learning_rate": 0.00017103782666792844,
"loss": 0.7459,
"step": 1340
},
{
"epoch": 3.244873341375151,
"grad_norm": 1.359375,
"learning_rate": 0.00017074048183777504,
"loss": 0.7454,
"step": 1345
},
{
"epoch": 3.2569360675512664,
"grad_norm": 1.296875,
"learning_rate": 0.00017044187975826124,
"loss": 0.7462,
"step": 1350
},
{
"epoch": 3.2689987937273823,
"grad_norm": 0.42578125,
"learning_rate": 0.00017014202573635228,
"loss": 0.7441,
"step": 1355
},
{
"epoch": 3.2810615199034983,
"grad_norm": 0.61328125,
"learning_rate": 0.00016984092510126367,
"loss": 0.7378,
"step": 1360
},
{
"epoch": 3.293124246079614,
"grad_norm": 1.1640625,
"learning_rate": 0.00016953858320436672,
"loss": 0.7529,
"step": 1365
},
{
"epoch": 3.3051869722557297,
"grad_norm": 0.9921875,
"learning_rate": 0.0001692350054190932,
"loss": 0.7426,
"step": 1370
},
{
"epoch": 3.3172496984318456,
"grad_norm": 1.03125,
"learning_rate": 0.00016893019714084008,
"loss": 0.7414,
"step": 1375
},
{
"epoch": 3.3293124246079615,
"grad_norm": 0.7421875,
"learning_rate": 0.0001686241637868734,
"loss": 0.7461,
"step": 1380
},
{
"epoch": 3.341375150784077,
"grad_norm": 0.6640625,
"learning_rate": 0.0001683169107962321,
"loss": 0.7418,
"step": 1385
},
{
"epoch": 3.353437876960193,
"grad_norm": 1.0859375,
"learning_rate": 0.00016800844362963147,
"loss": 0.7371,
"step": 1390
},
{
"epoch": 3.365500603136309,
"grad_norm": 0.453125,
"learning_rate": 0.0001676987677693659,
"loss": 0.736,
"step": 1395
},
{
"epoch": 3.3775633293124248,
"grad_norm": 0.515625,
"learning_rate": 0.00016738788871921152,
"loss": 0.738,
"step": 1400
},
{
"epoch": 3.3896260554885402,
"grad_norm": 0.50390625,
"learning_rate": 0.00016707581200432845,
"loss": 0.7458,
"step": 1405
},
{
"epoch": 3.401688781664656,
"grad_norm": 0.53125,
"learning_rate": 0.00016676254317116252,
"loss": 0.7379,
"step": 1410
},
{
"epoch": 3.413751507840772,
"grad_norm": 0.3828125,
"learning_rate": 0.00016644808778734668,
"loss": 0.7328,
"step": 1415
},
{
"epoch": 3.425814234016888,
"grad_norm": 0.41796875,
"learning_rate": 0.0001661324514416022,
"loss": 0.7389,
"step": 1420
},
{
"epoch": 3.4378769601930035,
"grad_norm": 0.486328125,
"learning_rate": 0.00016581563974363902,
"loss": 0.7339,
"step": 1425
},
{
"epoch": 3.4499396863691194,
"grad_norm": 0.75,
"learning_rate": 0.00016549765832405653,
"loss": 0.7338,
"step": 1430
},
{
"epoch": 3.4620024125452353,
"grad_norm": 0.388671875,
"learning_rate": 0.00016517851283424304,
"loss": 0.7367,
"step": 1435
},
{
"epoch": 3.4740651387213513,
"grad_norm": 0.51171875,
"learning_rate": 0.0001648582089462756,
"loss": 0.7476,
"step": 1440
},
{
"epoch": 3.4861278648974667,
"grad_norm": 0.51171875,
"learning_rate": 0.00016453675235281913,
"loss": 0.7285,
"step": 1445
},
{
"epoch": 3.4981905910735827,
"grad_norm": 0.37109375,
"learning_rate": 0.00016421414876702518,
"loss": 0.7326,
"step": 1450
},
{
"epoch": 3.5102533172496986,
"grad_norm": 0.4375,
"learning_rate": 0.00016389040392243056,
"loss": 0.7314,
"step": 1455
},
{
"epoch": 3.522316043425814,
"grad_norm": 0.439453125,
"learning_rate": 0.00016356552357285522,
"loss": 0.7326,
"step": 1460
},
{
"epoch": 3.53437876960193,
"grad_norm": 0.42578125,
"learning_rate": 0.00016323951349230017,
"loss": 0.7269,
"step": 1465
},
{
"epoch": 3.546441495778046,
"grad_norm": 0.4296875,
"learning_rate": 0.0001629123794748447,
"loss": 0.7265,
"step": 1470
},
{
"epoch": 3.558504221954162,
"grad_norm": 0.373046875,
"learning_rate": 0.0001625841273345436,
"loss": 0.734,
"step": 1475
},
{
"epoch": 3.5705669481302773,
"grad_norm": 0.43359375,
"learning_rate": 0.00016225476290532374,
"loss": 0.7316,
"step": 1480
},
{
"epoch": 3.5826296743063932,
"grad_norm": 2.734375,
"learning_rate": 0.0001619242920408802,
"loss": 0.7232,
"step": 1485
},
{
"epoch": 3.594692400482509,
"grad_norm": 0.419921875,
"learning_rate": 0.00016159272061457255,
"loss": 0.7239,
"step": 1490
},
{
"epoch": 3.6067551266586246,
"grad_norm": 0.60546875,
"learning_rate": 0.0001612600545193203,
"loss": 0.7207,
"step": 1495
},
{
"epoch": 3.6188178528347406,
"grad_norm": 0.44140625,
"learning_rate": 0.0001609262996674981,
"loss": 0.7236,
"step": 1500
},
{
"epoch": 3.6308805790108565,
"grad_norm": 0.462890625,
"learning_rate": 0.00016059146199083087,
"loss": 0.736,
"step": 1505
},
{
"epoch": 3.6429433051869724,
"grad_norm": 0.71484375,
"learning_rate": 0.0001602555474402881,
"loss": 0.7294,
"step": 1510
},
{
"epoch": 3.6550060313630883,
"grad_norm": 1.5234375,
"learning_rate": 0.0001599185619859784,
"loss": 0.7288,
"step": 1515
},
{
"epoch": 3.667068757539204,
"grad_norm": 4.3125,
"learning_rate": 0.00015958051161704307,
"loss": 0.7411,
"step": 1520
},
{
"epoch": 3.6791314837153197,
"grad_norm": 0.86328125,
"learning_rate": 0.0001592414023415499,
"loss": 0.7431,
"step": 1525
},
{
"epoch": 3.691194209891435,
"grad_norm": 3.75,
"learning_rate": 0.00015890124018638638,
"loss": 0.7379,
"step": 1530
},
{
"epoch": 3.703256936067551,
"grad_norm": 0.59765625,
"learning_rate": 0.00015856003119715242,
"loss": 0.7388,
"step": 1535
},
{
"epoch": 3.715319662243667,
"grad_norm": 0.49609375,
"learning_rate": 0.00015821778143805296,
"loss": 0.7282,
"step": 1540
},
{
"epoch": 3.727382388419783,
"grad_norm": 0.98046875,
"learning_rate": 0.00015787449699179035,
"loss": 0.73,
"step": 1545
},
{
"epoch": 3.739445114595899,
"grad_norm": 0.4296875,
"learning_rate": 0.00015753018395945598,
"loss": 0.7288,
"step": 1550
},
{
"epoch": 3.7515078407720144,
"grad_norm": 0.9921875,
"learning_rate": 0.00015718484846042214,
"loss": 0.7296,
"step": 1555
},
{
"epoch": 3.7635705669481303,
"grad_norm": 0.5625,
"learning_rate": 0.00015683849663223308,
"loss": 0.7361,
"step": 1560
},
{
"epoch": 3.7756332931242462,
"grad_norm": 0.45703125,
"learning_rate": 0.00015649113463049586,
"loss": 0.7303,
"step": 1565
},
{
"epoch": 3.7876960193003617,
"grad_norm": 0.8125,
"learning_rate": 0.00015614276862877113,
"loss": 0.7263,
"step": 1570
},
{
"epoch": 3.7997587454764776,
"grad_norm": 0.484375,
"learning_rate": 0.00015579340481846336,
"loss": 0.7193,
"step": 1575
},
{
"epoch": 3.8118214716525936,
"grad_norm": 1.171875,
"learning_rate": 0.0001554430494087107,
"loss": 0.7251,
"step": 1580
},
{
"epoch": 3.8238841978287095,
"grad_norm": 0.486328125,
"learning_rate": 0.00015509170862627476,
"loss": 0.7284,
"step": 1585
},
{
"epoch": 3.835946924004825,
"grad_norm": 0.7109375,
"learning_rate": 0.00015473938871542986,
"loss": 0.722,
"step": 1590
},
{
"epoch": 3.848009650180941,
"grad_norm": 0.66796875,
"learning_rate": 0.00015438609593785202,
"loss": 0.7337,
"step": 1595
},
{
"epoch": 3.860072376357057,
"grad_norm": 0.63671875,
"learning_rate": 0.00015403183657250788,
"loss": 0.7216,
"step": 1600
},
{
"epoch": 3.8721351025331723,
"grad_norm": 0.65625,
"learning_rate": 0.0001536766169155428,
"loss": 0.7222,
"step": 1605
},
{
"epoch": 3.884197828709288,
"grad_norm": 0.8515625,
"learning_rate": 0.00015332044328016914,
"loss": 0.7214,
"step": 1610
},
{
"epoch": 3.896260554885404,
"grad_norm": 1.890625,
"learning_rate": 0.0001529633219965541,
"loss": 0.7264,
"step": 1615
},
{
"epoch": 3.90832328106152,
"grad_norm": 0.474609375,
"learning_rate": 0.00015260525941170712,
"loss": 0.7232,
"step": 1620
},
{
"epoch": 3.920386007237636,
"grad_norm": 0.703125,
"learning_rate": 0.00015224626188936705,
"loss": 0.7238,
"step": 1625
},
{
"epoch": 3.9324487334137515,
"grad_norm": 0.9296875,
"learning_rate": 0.00015188633580988926,
"loss": 0.7227,
"step": 1630
},
{
"epoch": 3.9445114595898674,
"grad_norm": 0.427734375,
"learning_rate": 0.00015152548757013182,
"loss": 0.7194,
"step": 1635
},
{
"epoch": 3.956574185765983,
"grad_norm": 0.8046875,
"learning_rate": 0.00015116372358334233,
"loss": 0.7223,
"step": 1640
},
{
"epoch": 3.9686369119420988,
"grad_norm": 0.546875,
"learning_rate": 0.00015080105027904362,
"loss": 0.7192,
"step": 1645
},
{
"epoch": 3.9806996381182147,
"grad_norm": 0.404296875,
"learning_rate": 0.00015043747410291945,
"loss": 0.7206,
"step": 1650
},
{
"epoch": 3.9927623642943306,
"grad_norm": 0.66015625,
"learning_rate": 0.00015007300151670015,
"loss": 0.7192,
"step": 1655
},
{
"epoch": 4.0,
"eval_loss": 2.397005558013916,
"eval_runtime": 0.2387,
"eval_samples_per_second": 41.901,
"eval_steps_per_second": 4.19,
"step": 1658
},
{
"epoch": 4.0048250904704465,
"grad_norm": 0.44921875,
"learning_rate": 0.00014970763899804763,
"loss": 0.7131,
"step": 1660
},
{
"epoch": 4.0168878166465625,
"grad_norm": 0.390625,
"learning_rate": 0.00014934139304044033,
"loss": 0.6955,
"step": 1665
},
{
"epoch": 4.0289505428226775,
"grad_norm": 0.474609375,
"learning_rate": 0.0001489742701530578,
"loss": 0.6989,
"step": 1670
},
{
"epoch": 4.041013268998793,
"grad_norm": 0.71484375,
"learning_rate": 0.0001486062768606649,
"loss": 0.6997,
"step": 1675
},
{
"epoch": 4.053075995174909,
"grad_norm": 1.0546875,
"learning_rate": 0.00014823741970349606,
"loss": 0.7021,
"step": 1680
},
{
"epoch": 4.065138721351025,
"grad_norm": 0.45703125,
"learning_rate": 0.0001478677052371389,
"loss": 0.7052,
"step": 1685
},
{
"epoch": 4.077201447527141,
"grad_norm": 0.828125,
"learning_rate": 0.0001474971400324177,
"loss": 0.7003,
"step": 1690
},
{
"epoch": 4.089264173703257,
"grad_norm": 0.65234375,
"learning_rate": 0.00014712573067527664,
"loss": 0.7057,
"step": 1695
},
{
"epoch": 4.101326899879373,
"grad_norm": 0.8046875,
"learning_rate": 0.00014675348376666278,
"loss": 0.7061,
"step": 1700
},
{
"epoch": 4.113389626055489,
"grad_norm": 0.390625,
"learning_rate": 0.00014638040592240877,
"loss": 0.6999,
"step": 1705
},
{
"epoch": 4.125452352231604,
"grad_norm": 1.234375,
"learning_rate": 0.00014600650377311522,
"loss": 0.6965,
"step": 1710
},
{
"epoch": 4.13751507840772,
"grad_norm": 0.8203125,
"learning_rate": 0.00014563178396403284,
"loss": 0.7121,
"step": 1715
},
{
"epoch": 4.149577804583836,
"grad_norm": 0.66015625,
"learning_rate": 0.00014525625315494435,
"loss": 0.7075,
"step": 1720
},
{
"epoch": 4.161640530759952,
"grad_norm": 0.421875,
"learning_rate": 0.00014487991802004623,
"loss": 0.7059,
"step": 1725
},
{
"epoch": 4.173703256936068,
"grad_norm": 0.5,
"learning_rate": 0.00014450278524782986,
"loss": 0.7055,
"step": 1730
},
{
"epoch": 4.185765983112184,
"grad_norm": 0.5390625,
"learning_rate": 0.00014412486154096284,
"loss": 0.7039,
"step": 1735
},
{
"epoch": 4.1978287092882995,
"grad_norm": 0.390625,
"learning_rate": 0.00014374615361616985,
"loss": 0.6998,
"step": 1740
},
{
"epoch": 4.209891435464415,
"grad_norm": 0.373046875,
"learning_rate": 0.00014336666820411327,
"loss": 0.7046,
"step": 1745
},
{
"epoch": 4.2219541616405305,
"grad_norm": 0.74609375,
"learning_rate": 0.00014298641204927342,
"loss": 0.7002,
"step": 1750
},
{
"epoch": 4.234016887816646,
"grad_norm": 0.59375,
"learning_rate": 0.00014260539190982886,
"loss": 0.6991,
"step": 1755
},
{
"epoch": 4.246079613992762,
"grad_norm": 0.361328125,
"learning_rate": 0.0001422236145575362,
"loss": 0.6973,
"step": 1760
},
{
"epoch": 4.258142340168878,
"grad_norm": 0.58203125,
"learning_rate": 0.00014184108677760984,
"loss": 0.701,
"step": 1765
},
{
"epoch": 4.270205066344994,
"grad_norm": 0.373046875,
"learning_rate": 0.00014145781536860122,
"loss": 0.6959,
"step": 1770
},
{
"epoch": 4.28226779252111,
"grad_norm": 0.453125,
"learning_rate": 0.00014107380714227806,
"loss": 0.6982,
"step": 1775
},
{
"epoch": 4.294330518697226,
"grad_norm": 0.4921875,
"learning_rate": 0.00014068906892350343,
"loss": 0.7017,
"step": 1780
},
{
"epoch": 4.306393244873341,
"grad_norm": 0.5625,
"learning_rate": 0.00014030360755011424,
"loss": 0.6932,
"step": 1785
},
{
"epoch": 4.318455971049457,
"grad_norm": 0.494140625,
"learning_rate": 0.0001399174298727998,
"loss": 0.7008,
"step": 1790
},
{
"epoch": 4.330518697225573,
"grad_norm": 0.408203125,
"learning_rate": 0.00013953054275498013,
"loss": 0.6944,
"step": 1795
},
{
"epoch": 4.342581423401689,
"grad_norm": 0.5078125,
"learning_rate": 0.00013914295307268396,
"loss": 0.6977,
"step": 1800
},
{
"epoch": 4.354644149577805,
"grad_norm": 0.515625,
"learning_rate": 0.0001387546677144263,
"loss": 0.7053,
"step": 1805
},
{
"epoch": 4.366706875753921,
"grad_norm": 0.43359375,
"learning_rate": 0.00013836569358108647,
"loss": 0.7112,
"step": 1810
},
{
"epoch": 4.378769601930037,
"grad_norm": 0.4296875,
"learning_rate": 0.00013797603758578496,
"loss": 0.7034,
"step": 1815
},
{
"epoch": 4.390832328106152,
"grad_norm": 0.4296875,
"learning_rate": 0.00013758570665376086,
"loss": 0.7081,
"step": 1820
},
{
"epoch": 4.402895054282268,
"grad_norm": 0.466796875,
"learning_rate": 0.0001371947077222488,
"loss": 0.7029,
"step": 1825
},
{
"epoch": 4.4149577804583835,
"grad_norm": 0.380859375,
"learning_rate": 0.00013680304774035538,
"loss": 0.699,
"step": 1830
},
{
"epoch": 4.427020506634499,
"grad_norm": 0.373046875,
"learning_rate": 0.00013641073366893607,
"loss": 0.7041,
"step": 1835
},
{
"epoch": 4.439083232810615,
"grad_norm": 0.412109375,
"learning_rate": 0.00013601777248047105,
"loss": 0.6954,
"step": 1840
},
{
"epoch": 4.451145958986731,
"grad_norm": 0.404296875,
"learning_rate": 0.00013562417115894172,
"loss": 0.6941,
"step": 1845
},
{
"epoch": 4.463208685162847,
"grad_norm": 0.3828125,
"learning_rate": 0.0001352299366997062,
"loss": 0.7027,
"step": 1850
},
{
"epoch": 4.475271411338962,
"grad_norm": 0.443359375,
"learning_rate": 0.0001348350761093753,
"loss": 0.6979,
"step": 1855
},
{
"epoch": 4.487334137515078,
"grad_norm": 0.40234375,
"learning_rate": 0.0001344395964056878,
"loss": 0.7028,
"step": 1860
},
{
"epoch": 4.499396863691194,
"grad_norm": 0.349609375,
"learning_rate": 0.00013404350461738586,
"loss": 0.6999,
"step": 1865
},
{
"epoch": 4.51145958986731,
"grad_norm": 0.470703125,
"learning_rate": 0.00013364680778409,
"loss": 0.6981,
"step": 1870
},
{
"epoch": 4.523522316043426,
"grad_norm": 0.359375,
"learning_rate": 0.00013324951295617398,
"loss": 0.6984,
"step": 1875
},
{
"epoch": 4.535585042219542,
"grad_norm": 0.34375,
"learning_rate": 0.00013285162719463961,
"loss": 0.6999,
"step": 1880
},
{
"epoch": 4.547647768395658,
"grad_norm": 0.59765625,
"learning_rate": 0.00013245315757099118,
"loss": 0.6966,
"step": 1885
},
{
"epoch": 4.559710494571773,
"grad_norm": 0.54296875,
"learning_rate": 0.00013205411116710972,
"loss": 0.6985,
"step": 1890
},
{
"epoch": 4.571773220747889,
"grad_norm": 0.51953125,
"learning_rate": 0.00013165449507512725,
"loss": 0.6873,
"step": 1895
},
{
"epoch": 4.583835946924005,
"grad_norm": 0.515625,
"learning_rate": 0.0001312543163973007,
"loss": 0.6906,
"step": 1900
},
{
"epoch": 4.595898673100121,
"grad_norm": 0.458984375,
"learning_rate": 0.00013085358224588565,
"loss": 0.7018,
"step": 1905
},
{
"epoch": 4.6079613992762365,
"grad_norm": 0.482421875,
"learning_rate": 0.00013045229974300993,
"loss": 0.6973,
"step": 1910
},
{
"epoch": 4.620024125452352,
"grad_norm": 0.67578125,
"learning_rate": 0.0001300504760205471,
"loss": 0.6942,
"step": 1915
},
{
"epoch": 4.632086851628468,
"grad_norm": 0.416015625,
"learning_rate": 0.0001296481182199896,
"loss": 0.692,
"step": 1920
},
{
"epoch": 4.644149577804583,
"grad_norm": 0.37109375,
"learning_rate": 0.00012924523349232189,
"loss": 0.6993,
"step": 1925
},
{
"epoch": 4.656212303980699,
"grad_norm": 0.4609375,
"learning_rate": 0.00012884182899789343,
"loss": 0.7002,
"step": 1930
},
{
"epoch": 4.668275030156815,
"grad_norm": 0.482421875,
"learning_rate": 0.0001284379119062912,
"loss": 0.694,
"step": 1935
},
{
"epoch": 4.680337756332931,
"grad_norm": 0.390625,
"learning_rate": 0.00012803348939621252,
"loss": 0.6981,
"step": 1940
},
{
"epoch": 4.692400482509047,
"grad_norm": 0.38671875,
"learning_rate": 0.0001276285686553373,
"loss": 0.6957,
"step": 1945
},
{
"epoch": 4.704463208685163,
"grad_norm": 0.67578125,
"learning_rate": 0.00012722315688020047,
"loss": 0.6988,
"step": 1950
},
{
"epoch": 4.716525934861279,
"grad_norm": 0.5703125,
"learning_rate": 0.00012681726127606376,
"loss": 0.6962,
"step": 1955
},
{
"epoch": 4.728588661037395,
"grad_norm": 0.455078125,
"learning_rate": 0.00012641088905678802,
"loss": 0.6877,
"step": 1960
},
{
"epoch": 4.74065138721351,
"grad_norm": 0.71484375,
"learning_rate": 0.0001260040474447048,
"loss": 0.6978,
"step": 1965
},
{
"epoch": 4.752714113389626,
"grad_norm": 0.48828125,
"learning_rate": 0.000125596743670488,
"loss": 0.6903,
"step": 1970
},
{
"epoch": 4.764776839565742,
"grad_norm": 0.39453125,
"learning_rate": 0.0001251889849730254,
"loss": 0.6989,
"step": 1975
},
{
"epoch": 4.776839565741858,
"grad_norm": 0.373046875,
"learning_rate": 0.00012478077859929,
"loss": 0.7012,
"step": 1980
},
{
"epoch": 4.788902291917974,
"grad_norm": 0.3671875,
"learning_rate": 0.00012437213180421122,
"loss": 0.6962,
"step": 1985
},
{
"epoch": 4.8009650180940895,
"grad_norm": 0.486328125,
"learning_rate": 0.0001239630518505459,
"loss": 0.7007,
"step": 1990
},
{
"epoch": 4.813027744270205,
"grad_norm": 0.3515625,
"learning_rate": 0.0001235535460087494,
"loss": 0.6958,
"step": 1995
},
{
"epoch": 4.825090470446321,
"grad_norm": 0.359375,
"learning_rate": 0.00012314362155684612,
"loss": 0.6963,
"step": 2000
},
{
"epoch": 4.837153196622436,
"grad_norm": 0.3359375,
"learning_rate": 0.0001227332857803004,
"loss": 0.6985,
"step": 2005
},
{
"epoch": 4.849215922798552,
"grad_norm": 0.408203125,
"learning_rate": 0.00012232254597188688,
"loss": 0.6881,
"step": 2010
},
{
"epoch": 4.861278648974668,
"grad_norm": 0.416015625,
"learning_rate": 0.00012191140943156091,
"loss": 0.695,
"step": 2015
},
{
"epoch": 4.873341375150784,
"grad_norm": 0.5,
"learning_rate": 0.00012149988346632894,
"loss": 0.6966,
"step": 2020
},
{
"epoch": 4.8854041013269,
"grad_norm": 0.443359375,
"learning_rate": 0.00012108797539011847,
"loss": 0.6937,
"step": 2025
},
{
"epoch": 4.897466827503016,
"grad_norm": 0.6796875,
"learning_rate": 0.00012067569252364809,
"loss": 0.6939,
"step": 2030
},
{
"epoch": 4.909529553679132,
"grad_norm": 0.68359375,
"learning_rate": 0.00012026304219429759,
"loss": 0.7004,
"step": 2035
},
{
"epoch": 4.921592279855247,
"grad_norm": 0.400390625,
"learning_rate": 0.0001198500317359774,
"loss": 0.6884,
"step": 2040
},
{
"epoch": 4.933655006031363,
"grad_norm": 0.361328125,
"learning_rate": 0.00011943666848899853,
"loss": 0.6988,
"step": 2045
},
{
"epoch": 4.945717732207479,
"grad_norm": 0.66796875,
"learning_rate": 0.00011902295979994192,
"loss": 0.6925,
"step": 2050
},
{
"epoch": 4.957780458383595,
"grad_norm": 0.359375,
"learning_rate": 0.00011860891302152798,
"loss": 0.7009,
"step": 2055
},
{
"epoch": 4.969843184559711,
"grad_norm": 0.36328125,
"learning_rate": 0.00011819453551248592,
"loss": 0.6949,
"step": 2060
},
{
"epoch": 4.981905910735827,
"grad_norm": 0.3515625,
"learning_rate": 0.00011777983463742285,
"loss": 0.6937,
"step": 2065
},
{
"epoch": 4.9939686369119425,
"grad_norm": 0.341796875,
"learning_rate": 0.00011736481776669306,
"loss": 0.6986,
"step": 2070
},
{
"epoch": 4.998793727382388,
"eval_loss": 2.416297197341919,
"eval_runtime": 0.261,
"eval_samples_per_second": 38.317,
"eval_steps_per_second": 3.832,
"step": 2072
},
{
"epoch": 5.0060313630880575,
"grad_norm": 0.36328125,
"learning_rate": 0.0001169494922762668,
"loss": 0.6738,
"step": 2075
},
{
"epoch": 5.018094089264173,
"grad_norm": 0.376953125,
"learning_rate": 0.00011653386554759946,
"loss": 0.6829,
"step": 2080
},
{
"epoch": 5.030156815440289,
"grad_norm": 0.462890625,
"learning_rate": 0.00011611794496750019,
"loss": 0.6748,
"step": 2085
},
{
"epoch": 5.042219541616405,
"grad_norm": 0.51953125,
"learning_rate": 0.00011570173792800066,
"loss": 0.6752,
"step": 2090
},
{
"epoch": 5.054282267792521,
"grad_norm": 0.57421875,
"learning_rate": 0.00011528525182622371,
"loss": 0.6672,
"step": 2095
},
{
"epoch": 5.066344993968637,
"grad_norm": 0.408203125,
"learning_rate": 0.00011486849406425188,
"loss": 0.6744,
"step": 2100
},
{
"epoch": 5.078407720144753,
"grad_norm": 0.41015625,
"learning_rate": 0.00011445147204899587,
"loss": 0.6868,
"step": 2105
},
{
"epoch": 5.090470446320868,
"grad_norm": 0.451171875,
"learning_rate": 0.00011403419319206284,
"loss": 0.681,
"step": 2110
},
{
"epoch": 5.102533172496984,
"grad_norm": 0.404296875,
"learning_rate": 0.00011361666490962468,
"loss": 0.6833,
"step": 2115
},
{
"epoch": 5.1145958986731,
"grad_norm": 0.41796875,
"learning_rate": 0.0001131988946222863,
"loss": 0.6677,
"step": 2120
},
{
"epoch": 5.126658624849216,
"grad_norm": 0.5703125,
"learning_rate": 0.00011278088975495369,
"loss": 0.6852,
"step": 2125
},
{
"epoch": 5.138721351025332,
"grad_norm": 0.59765625,
"learning_rate": 0.00011236265773670196,
"loss": 0.6755,
"step": 2130
},
{
"epoch": 5.150784077201448,
"grad_norm": 0.51953125,
"learning_rate": 0.00011194420600064329,
"loss": 0.6803,
"step": 2135
},
{
"epoch": 5.162846803377564,
"grad_norm": 0.5078125,
"learning_rate": 0.00011152554198379484,
"loss": 0.6776,
"step": 2140
},
{
"epoch": 5.1749095295536796,
"grad_norm": 0.5703125,
"learning_rate": 0.00011110667312694653,
"loss": 0.6764,
"step": 2145
},
{
"epoch": 5.186972255729795,
"grad_norm": 0.431640625,
"learning_rate": 0.00011068760687452895,
"loss": 0.6748,
"step": 2150
},
{
"epoch": 5.1990349819059105,
"grad_norm": 0.53515625,
"learning_rate": 0.00011026835067448082,
"loss": 0.6837,
"step": 2155
},
{
"epoch": 5.211097708082026,
"grad_norm": 0.8671875,
"learning_rate": 0.00010984891197811687,
"loss": 0.6763,
"step": 2160
},
{
"epoch": 5.223160434258142,
"grad_norm": 0.5078125,
"learning_rate": 0.00010942929823999517,
"loss": 0.685,
"step": 2165
},
{
"epoch": 5.235223160434258,
"grad_norm": 0.5234375,
"learning_rate": 0.00010900951691778481,
"loss": 0.6851,
"step": 2170
},
{
"epoch": 5.247285886610374,
"grad_norm": 0.404296875,
"learning_rate": 0.00010858957547213327,
"loss": 0.677,
"step": 2175
},
{
"epoch": 5.25934861278649,
"grad_norm": 0.404296875,
"learning_rate": 0.00010816948136653386,
"loss": 0.6815,
"step": 2180
},
{
"epoch": 5.271411338962605,
"grad_norm": 0.443359375,
"learning_rate": 0.0001077492420671931,
"loss": 0.6799,
"step": 2185
},
{
"epoch": 5.283474065138721,
"grad_norm": 0.484375,
"learning_rate": 0.000107328865042898,
"loss": 0.677,
"step": 2190
},
{
"epoch": 5.295536791314837,
"grad_norm": 0.4375,
"learning_rate": 0.00010690835776488328,
"loss": 0.6851,
"step": 2195
},
{
"epoch": 5.307599517490953,
"grad_norm": 0.59375,
"learning_rate": 0.00010648772770669861,
"loss": 0.6816,
"step": 2200
},
{
"epoch": 5.319662243667069,
"grad_norm": 0.470703125,
"learning_rate": 0.00010606698234407586,
"loss": 0.6715,
"step": 2205
},
{
"epoch": 5.331724969843185,
"grad_norm": 0.53515625,
"learning_rate": 0.00010564612915479612,
"loss": 0.6798,
"step": 2210
},
{
"epoch": 5.343787696019301,
"grad_norm": 0.44921875,
"learning_rate": 0.00010522517561855683,
"loss": 0.6787,
"step": 2215
},
{
"epoch": 5.355850422195417,
"grad_norm": 0.439453125,
"learning_rate": 0.00010480412921683888,
"loss": 0.6855,
"step": 2220
},
{
"epoch": 5.367913148371532,
"grad_norm": 0.388671875,
"learning_rate": 0.00010438299743277371,
"loss": 0.6823,
"step": 2225
},
{
"epoch": 5.379975874547648,
"grad_norm": 0.375,
"learning_rate": 0.00010396178775101014,
"loss": 0.6781,
"step": 2230
},
{
"epoch": 5.3920386007237635,
"grad_norm": 0.349609375,
"learning_rate": 0.00010354050765758147,
"loss": 0.6819,
"step": 2235
},
{
"epoch": 5.404101326899879,
"grad_norm": 0.41015625,
"learning_rate": 0.00010311916463977242,
"loss": 0.6793,
"step": 2240
},
{
"epoch": 5.416164053075995,
"grad_norm": 0.376953125,
"learning_rate": 0.00010269776618598602,
"loss": 0.6783,
"step": 2245
},
{
"epoch": 5.428226779252111,
"grad_norm": 0.37109375,
"learning_rate": 0.00010227631978561056,
"loss": 0.6899,
"step": 2250
},
{
"epoch": 5.440289505428227,
"grad_norm": 0.361328125,
"learning_rate": 0.00010185483292888654,
"loss": 0.6771,
"step": 2255
},
{
"epoch": 5.452352231604342,
"grad_norm": 0.341796875,
"learning_rate": 0.00010143331310677331,
"loss": 0.6756,
"step": 2260
},
{
"epoch": 5.464414957780458,
"grad_norm": 0.498046875,
"learning_rate": 0.00010101176781081625,
"loss": 0.6761,
"step": 2265
},
{
"epoch": 5.476477683956574,
"grad_norm": 0.8671875,
"learning_rate": 0.00010059020453301345,
"loss": 0.6769,
"step": 2270
},
{
"epoch": 5.48854041013269,
"grad_norm": 0.8046875,
"learning_rate": 0.00010016863076568254,
"loss": 0.6803,
"step": 2275
},
{
"epoch": 5.500603136308806,
"grad_norm": 0.578125,
"learning_rate": 9.974705400132764e-05,
"loss": 0.6773,
"step": 2280
},
{
"epoch": 5.512665862484922,
"grad_norm": 0.36328125,
"learning_rate": 9.932548173250607e-05,
"loss": 0.6759,
"step": 2285
},
{
"epoch": 5.524728588661038,
"grad_norm": 0.37109375,
"learning_rate": 9.890392145169531e-05,
"loss": 0.6813,
"step": 2290
},
{
"epoch": 5.536791314837153,
"grad_norm": 0.34765625,
"learning_rate": 9.848238065115975e-05,
"loss": 0.6801,
"step": 2295
},
{
"epoch": 5.548854041013269,
"grad_norm": 0.380859375,
"learning_rate": 9.806086682281758e-05,
"loss": 0.6768,
"step": 2300
},
{
"epoch": 5.560916767189385,
"grad_norm": 0.376953125,
"learning_rate": 9.763938745810757e-05,
"loss": 0.677,
"step": 2305
},
{
"epoch": 5.572979493365501,
"grad_norm": 0.345703125,
"learning_rate": 9.721795004785605e-05,
"loss": 0.6783,
"step": 2310
},
{
"epoch": 5.5850422195416165,
"grad_norm": 0.365234375,
"learning_rate": 9.679656208214366e-05,
"loss": 0.6885,
"step": 2315
},
{
"epoch": 5.597104945717732,
"grad_norm": 0.349609375,
"learning_rate": 9.637523105017229e-05,
"loss": 0.6786,
"step": 2320
},
{
"epoch": 5.609167671893848,
"grad_norm": 0.48046875,
"learning_rate": 9.595396444013205e-05,
"loss": 0.6791,
"step": 2325
},
{
"epoch": 5.621230398069963,
"grad_norm": 0.384765625,
"learning_rate": 9.553276973906786e-05,
"loss": 0.6731,
"step": 2330
},
{
"epoch": 5.633293124246079,
"grad_norm": 0.40625,
"learning_rate": 9.511165443274688e-05,
"loss": 0.6718,
"step": 2335
},
{
"epoch": 5.645355850422195,
"grad_norm": 0.42578125,
"learning_rate": 9.469062600552509e-05,
"loss": 0.6723,
"step": 2340
},
{
"epoch": 5.657418576598311,
"grad_norm": 0.431640625,
"learning_rate": 9.426969194021437e-05,
"loss": 0.6793,
"step": 2345
},
{
"epoch": 5.669481302774427,
"grad_norm": 0.36328125,
"learning_rate": 9.384885971794961e-05,
"loss": 0.6822,
"step": 2350
},
{
"epoch": 5.681544028950543,
"grad_norm": 0.361328125,
"learning_rate": 9.342813681805565e-05,
"loss": 0.6811,
"step": 2355
},
{
"epoch": 5.693606755126659,
"grad_norm": 0.421875,
"learning_rate": 9.300753071791434e-05,
"loss": 0.6811,
"step": 2360
},
{
"epoch": 5.705669481302774,
"grad_norm": 0.453125,
"learning_rate": 9.258704889283175e-05,
"loss": 0.6873,
"step": 2365
},
{
"epoch": 5.71773220747889,
"grad_norm": 0.36328125,
"learning_rate": 9.216669881590515e-05,
"loss": 0.6764,
"step": 2370
},
{
"epoch": 5.729794933655006,
"grad_norm": 0.482421875,
"learning_rate": 9.174648795789039e-05,
"loss": 0.6735,
"step": 2375
},
{
"epoch": 5.741857659831122,
"grad_norm": 0.4140625,
"learning_rate": 9.132642378706894e-05,
"loss": 0.6767,
"step": 2380
},
{
"epoch": 5.753920386007238,
"grad_norm": 0.35546875,
"learning_rate": 9.09065137691153e-05,
"loss": 0.6793,
"step": 2385
},
{
"epoch": 5.765983112183354,
"grad_norm": 0.39453125,
"learning_rate": 9.048676536696425e-05,
"loss": 0.6815,
"step": 2390
},
{
"epoch": 5.7780458383594695,
"grad_norm": 0.3828125,
"learning_rate": 9.006718604067823e-05,
"loss": 0.6889,
"step": 2395
},
{
"epoch": 5.790108564535585,
"grad_norm": 0.39453125,
"learning_rate": 8.964778324731467e-05,
"loss": 0.6778,
"step": 2400
},
{
"epoch": 5.8021712907117005,
"grad_norm": 0.3828125,
"learning_rate": 8.922856444079362e-05,
"loss": 0.6748,
"step": 2405
},
{
"epoch": 5.814234016887816,
"grad_norm": 0.51171875,
"learning_rate": 8.880953707176514e-05,
"loss": 0.6752,
"step": 2410
},
{
"epoch": 5.826296743063932,
"grad_norm": 0.6171875,
"learning_rate": 8.839070858747697e-05,
"loss": 0.6773,
"step": 2415
},
{
"epoch": 5.838359469240048,
"grad_norm": 0.458984375,
"learning_rate": 8.797208643164212e-05,
"loss": 0.6766,
"step": 2420
},
{
"epoch": 5.850422195416164,
"grad_norm": 0.5546875,
"learning_rate": 8.755367804430651e-05,
"loss": 0.6801,
"step": 2425
},
{
"epoch": 5.86248492159228,
"grad_norm": 0.53125,
"learning_rate": 8.713549086171691e-05,
"loss": 0.6732,
"step": 2430
},
{
"epoch": 5.874547647768396,
"grad_norm": 0.376953125,
"learning_rate": 8.671753231618866e-05,
"loss": 0.6822,
"step": 2435
},
{
"epoch": 5.886610373944512,
"grad_norm": 0.53125,
"learning_rate": 8.629980983597358e-05,
"loss": 0.6734,
"step": 2440
},
{
"epoch": 5.898673100120627,
"grad_norm": 0.59375,
"learning_rate": 8.5882330845128e-05,
"loss": 0.6751,
"step": 2445
},
{
"epoch": 5.910735826296743,
"grad_norm": 0.40234375,
"learning_rate": 8.546510276338078e-05,
"loss": 0.6684,
"step": 2450
},
{
"epoch": 5.922798552472859,
"grad_norm": 0.369140625,
"learning_rate": 8.504813300600141e-05,
"loss": 0.6755,
"step": 2455
},
{
"epoch": 5.934861278648975,
"grad_norm": 0.5546875,
"learning_rate": 8.463142898366834e-05,
"loss": 0.6837,
"step": 2460
},
{
"epoch": 5.946924004825091,
"grad_norm": 0.361328125,
"learning_rate": 8.42149981023371e-05,
"loss": 0.6831,
"step": 2465
},
{
"epoch": 5.958986731001207,
"grad_norm": 0.341796875,
"learning_rate": 8.37988477631088e-05,
"loss": 0.675,
"step": 2470
},
{
"epoch": 5.9710494571773225,
"grad_norm": 0.380859375,
"learning_rate": 8.33829853620986e-05,
"loss": 0.6663,
"step": 2475
},
{
"epoch": 5.9831121833534375,
"grad_norm": 0.439453125,
"learning_rate": 8.296741829030418e-05,
"loss": 0.6817,
"step": 2480
},
{
"epoch": 5.9951749095295535,
"grad_norm": 0.341796875,
"learning_rate": 8.255215393347443e-05,
"loss": 0.6737,
"step": 2485
},
{
"epoch": 6.0,
"eval_loss": 2.423604965209961,
"eval_runtime": 0.2373,
"eval_samples_per_second": 42.143,
"eval_steps_per_second": 4.214,
"step": 2487
},
{
"epoch": 6.007237635705669,
"grad_norm": 0.474609375,
"learning_rate": 8.213719967197817e-05,
"loss": 0.6732,
"step": 2490
},
{
"epoch": 6.019300361881785,
"grad_norm": 0.390625,
"learning_rate": 8.172256288067298e-05,
"loss": 0.6612,
"step": 2495
},
{
"epoch": 6.031363088057901,
"grad_norm": 0.404296875,
"learning_rate": 8.130825092877418e-05,
"loss": 0.6584,
"step": 2500
},
{
"epoch": 6.043425814234017,
"grad_norm": 0.357421875,
"learning_rate": 8.089427117972378e-05,
"loss": 0.6623,
"step": 2505
},
{
"epoch": 6.055488540410133,
"grad_norm": 0.43359375,
"learning_rate": 8.04806309910597e-05,
"loss": 0.6606,
"step": 2510
},
{
"epoch": 6.067551266586248,
"grad_norm": 0.376953125,
"learning_rate": 8.006733771428485e-05,
"loss": 0.667,
"step": 2515
},
{
"epoch": 6.079613992762364,
"grad_norm": 0.40234375,
"learning_rate": 7.965439869473664e-05,
"loss": 0.6666,
"step": 2520
},
{
"epoch": 6.09167671893848,
"grad_norm": 0.42578125,
"learning_rate": 7.924182127145642e-05,
"loss": 0.6538,
"step": 2525
},
{
"epoch": 6.103739445114596,
"grad_norm": 0.39453125,
"learning_rate": 7.882961277705895e-05,
"loss": 0.6693,
"step": 2530
},
{
"epoch": 6.115802171290712,
"grad_norm": 0.3828125,
"learning_rate": 7.841778053760211e-05,
"loss": 0.6706,
"step": 2535
},
{
"epoch": 6.127864897466828,
"grad_norm": 0.41015625,
"learning_rate": 7.800633187245673e-05,
"loss": 0.6676,
"step": 2540
},
{
"epoch": 6.139927623642944,
"grad_norm": 0.37109375,
"learning_rate": 7.759527409417653e-05,
"loss": 0.6664,
"step": 2545
},
{
"epoch": 6.151990349819059,
"grad_norm": 0.4140625,
"learning_rate": 7.718461450836804e-05,
"loss": 0.6557,
"step": 2550
},
{
"epoch": 6.164053075995175,
"grad_norm": 0.578125,
"learning_rate": 7.67743604135609e-05,
"loss": 0.6618,
"step": 2555
},
{
"epoch": 6.1761158021712905,
"grad_norm": 0.380859375,
"learning_rate": 7.636451910107806e-05,
"loss": 0.6667,
"step": 2560
},
{
"epoch": 6.1881785283474064,
"grad_norm": 0.396484375,
"learning_rate": 7.595509785490617e-05,
"loss": 0.6557,
"step": 2565
},
{
"epoch": 6.200241254523522,
"grad_norm": 0.37109375,
"learning_rate": 7.554610395156624e-05,
"loss": 0.6644,
"step": 2570
},
{
"epoch": 6.212303980699638,
"grad_norm": 0.40234375,
"learning_rate": 7.513754465998418e-05,
"loss": 0.6622,
"step": 2575
},
{
"epoch": 6.224366706875754,
"grad_norm": 0.341796875,
"learning_rate": 7.472942724136174e-05,
"loss": 0.6657,
"step": 2580
},
{
"epoch": 6.23642943305187,
"grad_norm": 0.349609375,
"learning_rate": 7.432175894904733e-05,
"loss": 0.669,
"step": 2585
},
{
"epoch": 6.248492159227985,
"grad_norm": 0.63671875,
"learning_rate": 7.391454702840722e-05,
"loss": 0.6641,
"step": 2590
},
{
"epoch": 6.260554885404101,
"grad_norm": 0.45703125,
"learning_rate": 7.350779871669669e-05,
"loss": 0.6673,
"step": 2595
},
{
"epoch": 6.272617611580217,
"grad_norm": 0.439453125,
"learning_rate": 7.310152124293146e-05,
"loss": 0.6639,
"step": 2600
},
{
"epoch": 6.284680337756333,
"grad_norm": 0.4140625,
"learning_rate": 7.269572182775921e-05,
"loss": 0.6649,
"step": 2605
},
{
"epoch": 6.296743063932449,
"grad_norm": 0.51953125,
"learning_rate": 7.229040768333115e-05,
"loss": 0.6606,
"step": 2610
},
{
"epoch": 6.308805790108565,
"grad_norm": 0.46484375,
"learning_rate": 7.188558601317396e-05,
"loss": 0.6597,
"step": 2615
},
{
"epoch": 6.320868516284681,
"grad_norm": 0.478515625,
"learning_rate": 7.14812640120618e-05,
"loss": 0.6671,
"step": 2620
},
{
"epoch": 6.332931242460796,
"grad_norm": 0.396484375,
"learning_rate": 7.107744886588824e-05,
"loss": 0.664,
"step": 2625
},
{
"epoch": 6.344993968636912,
"grad_norm": 0.34375,
"learning_rate": 7.067414775153871e-05,
"loss": 0.6565,
"step": 2630
},
{
"epoch": 6.357056694813028,
"grad_norm": 0.40625,
"learning_rate": 7.027136783676295e-05,
"loss": 0.6623,
"step": 2635
},
{
"epoch": 6.3691194209891435,
"grad_norm": 0.37890625,
"learning_rate": 6.986911628004753e-05,
"loss": 0.6641,
"step": 2640
},
{
"epoch": 6.381182147165259,
"grad_norm": 0.34375,
"learning_rate": 6.94674002304887e-05,
"loss": 0.6664,
"step": 2645
},
{
"epoch": 6.393244873341375,
"grad_norm": 0.384765625,
"learning_rate": 6.906622682766526e-05,
"loss": 0.6601,
"step": 2650
},
{
"epoch": 6.405307599517491,
"grad_norm": 0.37109375,
"learning_rate": 6.866560320151179e-05,
"loss": 0.6696,
"step": 2655
},
{
"epoch": 6.417370325693607,
"grad_norm": 0.404296875,
"learning_rate": 6.826553647219175e-05,
"loss": 0.6652,
"step": 2660
},
{
"epoch": 6.429433051869722,
"grad_norm": 0.3828125,
"learning_rate": 6.786603374997111e-05,
"loss": 0.6721,
"step": 2665
},
{
"epoch": 6.441495778045838,
"grad_norm": 0.4609375,
"learning_rate": 6.74671021350919e-05,
"loss": 0.6717,
"step": 2670
},
{
"epoch": 6.453558504221954,
"grad_norm": 0.349609375,
"learning_rate": 6.706874871764603e-05,
"loss": 0.6646,
"step": 2675
},
{
"epoch": 6.46562123039807,
"grad_norm": 0.384765625,
"learning_rate": 6.667098057744927e-05,
"loss": 0.6673,
"step": 2680
},
{
"epoch": 6.477683956574186,
"grad_norm": 0.3515625,
"learning_rate": 6.627380478391543e-05,
"loss": 0.66,
"step": 2685
},
{
"epoch": 6.489746682750302,
"grad_norm": 0.36328125,
"learning_rate": 6.587722839593073e-05,
"loss": 0.6636,
"step": 2690
},
{
"epoch": 6.501809408926418,
"grad_norm": 0.361328125,
"learning_rate": 6.548125846172836e-05,
"loss": 0.6635,
"step": 2695
},
{
"epoch": 6.513872135102533,
"grad_norm": 0.45703125,
"learning_rate": 6.508590201876317e-05,
"loss": 0.6708,
"step": 2700
},
{
"epoch": 6.525934861278649,
"grad_norm": 0.3671875,
"learning_rate": 6.469116609358654e-05,
"loss": 0.669,
"step": 2705
},
{
"epoch": 6.537997587454765,
"grad_norm": 0.359375,
"learning_rate": 6.429705770172168e-05,
"loss": 0.6638,
"step": 2710
},
{
"epoch": 6.550060313630881,
"grad_norm": 0.400390625,
"learning_rate": 6.390358384753881e-05,
"loss": 0.6574,
"step": 2715
},
{
"epoch": 6.5621230398069965,
"grad_norm": 0.365234375,
"learning_rate": 6.351075152413068e-05,
"loss": 0.6666,
"step": 2720
},
{
"epoch": 6.574185765983112,
"grad_norm": 0.37109375,
"learning_rate": 6.311856771318832e-05,
"loss": 0.6734,
"step": 2725
},
{
"epoch": 6.586248492159228,
"grad_norm": 0.38671875,
"learning_rate": 6.272703938487694e-05,
"loss": 0.6633,
"step": 2730
},
{
"epoch": 6.598311218335343,
"grad_norm": 0.36328125,
"learning_rate": 6.233617349771205e-05,
"loss": 0.6625,
"step": 2735
},
{
"epoch": 6.610373944511459,
"grad_norm": 0.41015625,
"learning_rate": 6.194597699843581e-05,
"loss": 0.6596,
"step": 2740
},
{
"epoch": 6.622436670687575,
"grad_norm": 0.384765625,
"learning_rate": 6.155645682189351e-05,
"loss": 0.6679,
"step": 2745
},
{
"epoch": 6.634499396863691,
"grad_norm": 0.37109375,
"learning_rate": 6.116761989091042e-05,
"loss": 0.6646,
"step": 2750
},
{
"epoch": 6.646562123039807,
"grad_norm": 0.443359375,
"learning_rate": 6.0779473116168627e-05,
"loss": 0.6662,
"step": 2755
},
{
"epoch": 6.658624849215923,
"grad_norm": 0.51953125,
"learning_rate": 6.039202339608432e-05,
"loss": 0.6681,
"step": 2760
},
{
"epoch": 6.670687575392039,
"grad_norm": 0.369140625,
"learning_rate": 6.000527761668513e-05,
"loss": 0.661,
"step": 2765
},
{
"epoch": 6.682750301568154,
"grad_norm": 0.47265625,
"learning_rate": 5.961924265148777e-05,
"loss": 0.6679,
"step": 2770
},
{
"epoch": 6.69481302774427,
"grad_norm": 0.5390625,
"learning_rate": 5.9233925361375864e-05,
"loss": 0.6653,
"step": 2775
},
{
"epoch": 6.706875753920386,
"grad_norm": 0.470703125,
"learning_rate": 5.884933259447798e-05,
"loss": 0.6685,
"step": 2780
},
{
"epoch": 6.718938480096502,
"grad_norm": 0.419921875,
"learning_rate": 5.8465471186046015e-05,
"loss": 0.6678,
"step": 2785
},
{
"epoch": 6.731001206272618,
"grad_norm": 0.4296875,
"learning_rate": 5.8082347958333625e-05,
"loss": 0.6661,
"step": 2790
},
{
"epoch": 6.743063932448734,
"grad_norm": 0.349609375,
"learning_rate": 5.769996972047491e-05,
"loss": 0.6664,
"step": 2795
},
{
"epoch": 6.7551266586248495,
"grad_norm": 0.375,
"learning_rate": 5.731834326836366e-05,
"loss": 0.6703,
"step": 2800
},
{
"epoch": 6.7671893848009645,
"grad_norm": 0.388671875,
"learning_rate": 5.693747538453229e-05,
"loss": 0.6661,
"step": 2805
},
{
"epoch": 6.7792521109770805,
"grad_norm": 0.357421875,
"learning_rate": 5.6557372838031384e-05,
"loss": 0.6598,
"step": 2810
},
{
"epoch": 6.791314837153196,
"grad_norm": 0.3515625,
"learning_rate": 5.6178042384309546e-05,
"loss": 0.6663,
"step": 2815
},
{
"epoch": 6.803377563329312,
"grad_norm": 0.375,
"learning_rate": 5.579949076509305e-05,
"loss": 0.6664,
"step": 2820
},
{
"epoch": 6.815440289505428,
"grad_norm": 0.349609375,
"learning_rate": 5.542172470826632e-05,
"loss": 0.6623,
"step": 2825
},
{
"epoch": 6.827503015681544,
"grad_norm": 0.349609375,
"learning_rate": 5.5044750927752106e-05,
"loss": 0.6643,
"step": 2830
},
{
"epoch": 6.83956574185766,
"grad_norm": 0.349609375,
"learning_rate": 5.466857612339229e-05,
"loss": 0.6641,
"step": 2835
},
{
"epoch": 6.851628468033776,
"grad_norm": 0.361328125,
"learning_rate": 5.429320698082887e-05,
"loss": 0.6631,
"step": 2840
},
{
"epoch": 6.863691194209892,
"grad_norm": 0.37890625,
"learning_rate": 5.391865017138493e-05,
"loss": 0.6648,
"step": 2845
},
{
"epoch": 6.875753920386007,
"grad_norm": 0.37109375,
"learning_rate": 5.354491235194635e-05,
"loss": 0.6698,
"step": 2850
},
{
"epoch": 6.887816646562123,
"grad_norm": 0.42578125,
"learning_rate": 5.3172000164843195e-05,
"loss": 0.6569,
"step": 2855
},
{
"epoch": 6.899879372738239,
"grad_norm": 0.392578125,
"learning_rate": 5.279992023773195e-05,
"loss": 0.6653,
"step": 2860
},
{
"epoch": 6.911942098914355,
"grad_norm": 0.419921875,
"learning_rate": 5.2428679183477505e-05,
"loss": 0.6678,
"step": 2865
},
{
"epoch": 6.924004825090471,
"grad_norm": 0.5625,
"learning_rate": 5.205828360003568e-05,
"loss": 0.6667,
"step": 2870
},
{
"epoch": 6.936067551266587,
"grad_norm": 0.478515625,
"learning_rate": 5.168874007033615e-05,
"loss": 0.6647,
"step": 2875
},
{
"epoch": 6.9481302774427025,
"grad_norm": 0.419921875,
"learning_rate": 5.1320055162165115e-05,
"loss": 0.6611,
"step": 2880
},
{
"epoch": 6.9601930036188175,
"grad_norm": 0.3515625,
"learning_rate": 5.0952235428048966e-05,
"loss": 0.6668,
"step": 2885
},
{
"epoch": 6.9722557297949335,
"grad_norm": 0.349609375,
"learning_rate": 5.0585287405137305e-05,
"loss": 0.6656,
"step": 2890
},
{
"epoch": 6.984318455971049,
"grad_norm": 0.3515625,
"learning_rate": 5.021921761508739e-05,
"loss": 0.6631,
"step": 2895
},
{
"epoch": 6.996381182147165,
"grad_norm": 0.349609375,
"learning_rate": 4.9854032563947714e-05,
"loss": 0.6633,
"step": 2900
},
{
"epoch": 6.998793727382388,
"eval_loss": 2.4494216442108154,
"eval_runtime": 0.2569,
"eval_samples_per_second": 38.924,
"eval_steps_per_second": 3.892,
"step": 2901
},
{
"epoch": 7.008443908323281,
"grad_norm": 0.33984375,
"learning_rate": 4.9489738742042616e-05,
"loss": 0.6572,
"step": 2905
},
{
"epoch": 7.020506634499397,
"grad_norm": 0.369140625,
"learning_rate": 4.912634262385695e-05,
"loss": 0.6561,
"step": 2910
},
{
"epoch": 7.032569360675513,
"grad_norm": 0.37109375,
"learning_rate": 4.876385066792084e-05,
"loss": 0.659,
"step": 2915
},
{
"epoch": 7.044632086851628,
"grad_norm": 0.380859375,
"learning_rate": 4.8402269316695134e-05,
"loss": 0.6543,
"step": 2920
},
{
"epoch": 7.056694813027744,
"grad_norm": 0.365234375,
"learning_rate": 4.804160499645667e-05,
"loss": 0.6483,
"step": 2925
},
{
"epoch": 7.06875753920386,
"grad_norm": 0.36328125,
"learning_rate": 4.768186411718417e-05,
"loss": 0.6515,
"step": 2930
},
{
"epoch": 7.080820265379976,
"grad_norm": 0.3671875,
"learning_rate": 4.732305307244444e-05,
"loss": 0.6613,
"step": 2935
},
{
"epoch": 7.092882991556092,
"grad_norm": 0.359375,
"learning_rate": 4.696517823927842e-05,
"loss": 0.6636,
"step": 2940
},
{
"epoch": 7.104945717732208,
"grad_norm": 0.3671875,
"learning_rate": 4.660824597808825e-05,
"loss": 0.6584,
"step": 2945
},
{
"epoch": 7.117008443908324,
"grad_norm": 0.349609375,
"learning_rate": 4.625226263252386e-05,
"loss": 0.6458,
"step": 2950
},
{
"epoch": 7.129071170084439,
"grad_norm": 0.369140625,
"learning_rate": 4.589723452937049e-05,
"loss": 0.652,
"step": 2955
},
{
"epoch": 7.141133896260555,
"grad_norm": 0.3515625,
"learning_rate": 4.554316797843609e-05,
"loss": 0.6545,
"step": 2960
},
{
"epoch": 7.1531966224366705,
"grad_norm": 0.353515625,
"learning_rate": 4.519006927243922e-05,
"loss": 0.6529,
"step": 2965
},
{
"epoch": 7.1652593486127865,
"grad_norm": 0.3515625,
"learning_rate": 4.483794468689728e-05,
"loss": 0.6543,
"step": 2970
},
{
"epoch": 7.177322074788902,
"grad_norm": 0.365234375,
"learning_rate": 4.448680048001485e-05,
"loss": 0.6566,
"step": 2975
},
{
"epoch": 7.189384800965018,
"grad_norm": 0.3671875,
"learning_rate": 4.413664289257265e-05,
"loss": 0.6579,
"step": 2980
},
{
"epoch": 7.201447527141134,
"grad_norm": 0.3515625,
"learning_rate": 4.3787478147816296e-05,
"loss": 0.6578,
"step": 2985
},
{
"epoch": 7.213510253317249,
"grad_norm": 0.345703125,
"learning_rate": 4.343931245134616e-05,
"loss": 0.6566,
"step": 2990
},
{
"epoch": 7.225572979493365,
"grad_norm": 0.416015625,
"learning_rate": 4.3092151991006654e-05,
"loss": 0.6571,
"step": 2995
},
{
"epoch": 7.237635705669481,
"grad_norm": 0.369140625,
"learning_rate": 4.274600293677647e-05,
"loss": 0.6571,
"step": 3000
},
{
"epoch": 7.249698431845597,
"grad_norm": 0.357421875,
"learning_rate": 4.240087144065895e-05,
"loss": 0.6607,
"step": 3005
},
{
"epoch": 7.261761158021713,
"grad_norm": 0.35546875,
"learning_rate": 4.2056763636572574e-05,
"loss": 0.6583,
"step": 3010
},
{
"epoch": 7.273823884197829,
"grad_norm": 0.359375,
"learning_rate": 4.1713685640242165e-05,
"loss": 0.6566,
"step": 3015
},
{
"epoch": 7.285886610373945,
"grad_norm": 0.36328125,
"learning_rate": 4.137164354908999e-05,
"loss": 0.651,
"step": 3020
},
{
"epoch": 7.297949336550061,
"grad_norm": 0.353515625,
"learning_rate": 4.103064344212748e-05,
"loss": 0.6496,
"step": 3025
},
{
"epoch": 7.310012062726176,
"grad_norm": 0.349609375,
"learning_rate": 4.069069137984731e-05,
"loss": 0.6603,
"step": 3030
},
{
"epoch": 7.322074788902292,
"grad_norm": 0.369140625,
"learning_rate": 4.035179340411541e-05,
"loss": 0.6599,
"step": 3035
},
{
"epoch": 7.334137515078408,
"grad_norm": 0.35546875,
"learning_rate": 4.001395553806391e-05,
"loss": 0.6504,
"step": 3040
},
{
"epoch": 7.3462002412545235,
"grad_norm": 0.36328125,
"learning_rate": 3.967718378598376e-05,
"loss": 0.66,
"step": 3045
},
{
"epoch": 7.3582629674306395,
"grad_norm": 0.48828125,
"learning_rate": 3.9341484133218366e-05,
"loss": 0.6586,
"step": 3050
},
{
"epoch": 7.370325693606755,
"grad_norm": 0.390625,
"learning_rate": 3.9006862546056876e-05,
"loss": 0.6655,
"step": 3055
},
{
"epoch": 7.382388419782871,
"grad_norm": 0.369140625,
"learning_rate": 3.8673324971628357e-05,
"loss": 0.657,
"step": 3060
},
{
"epoch": 7.394451145958986,
"grad_norm": 0.38671875,
"learning_rate": 3.834087733779611e-05,
"loss": 0.6577,
"step": 3065
},
{
"epoch": 7.406513872135102,
"grad_norm": 0.37109375,
"learning_rate": 3.800952555305216e-05,
"loss": 0.6587,
"step": 3070
},
{
"epoch": 7.418576598311218,
"grad_norm": 0.361328125,
"learning_rate": 3.767927550641237e-05,
"loss": 0.6615,
"step": 3075
},
{
"epoch": 7.430639324487334,
"grad_norm": 0.37109375,
"learning_rate": 3.7350133067311686e-05,
"loss": 0.6537,
"step": 3080
},
{
"epoch": 7.44270205066345,
"grad_norm": 0.388671875,
"learning_rate": 3.702210408550002e-05,
"loss": 0.6545,
"step": 3085
},
{
"epoch": 7.454764776839566,
"grad_norm": 0.357421875,
"learning_rate": 3.669519439093801e-05,
"loss": 0.6571,
"step": 3090
},
{
"epoch": 7.466827503015682,
"grad_norm": 0.359375,
"learning_rate": 3.6369409793693544e-05,
"loss": 0.6555,
"step": 3095
},
{
"epoch": 7.478890229191798,
"grad_norm": 0.359375,
"learning_rate": 3.604475608383858e-05,
"loss": 0.659,
"step": 3100
},
{
"epoch": 7.490952955367913,
"grad_norm": 0.3984375,
"learning_rate": 3.5721239031346066e-05,
"loss": 0.6569,
"step": 3105
},
{
"epoch": 7.503015681544029,
"grad_norm": 0.357421875,
"learning_rate": 3.539886438598756e-05,
"loss": 0.6622,
"step": 3110
},
{
"epoch": 7.515078407720145,
"grad_norm": 0.3515625,
"learning_rate": 3.507763787723086e-05,
"loss": 0.6545,
"step": 3115
},
{
"epoch": 7.527141133896261,
"grad_norm": 0.36328125,
"learning_rate": 3.475756521413839e-05,
"loss": 0.6543,
"step": 3120
},
{
"epoch": 7.5392038600723765,
"grad_norm": 0.46484375,
"learning_rate": 3.443865208526554e-05,
"loss": 0.6597,
"step": 3125
},
{
"epoch": 7.5512665862484925,
"grad_norm": 0.361328125,
"learning_rate": 3.412090415855963e-05,
"loss": 0.656,
"step": 3130
},
{
"epoch": 7.563329312424608,
"grad_norm": 0.349609375,
"learning_rate": 3.3804327081259304e-05,
"loss": 0.6556,
"step": 3135
},
{
"epoch": 7.575392038600723,
"grad_norm": 0.369140625,
"learning_rate": 3.348892647979389e-05,
"loss": 0.6591,
"step": 3140
},
{
"epoch": 7.587454764776839,
"grad_norm": 0.36328125,
"learning_rate": 3.317470795968376e-05,
"loss": 0.6617,
"step": 3145
},
{
"epoch": 7.599517490952955,
"grad_norm": 0.361328125,
"learning_rate": 3.2861677105440336e-05,
"loss": 0.6562,
"step": 3150
},
{
"epoch": 7.611580217129071,
"grad_norm": 0.345703125,
"learning_rate": 3.254983948046705e-05,
"loss": 0.6485,
"step": 3155
},
{
"epoch": 7.623642943305187,
"grad_norm": 0.353515625,
"learning_rate": 3.223920062696052e-05,
"loss": 0.6595,
"step": 3160
},
{
"epoch": 7.635705669481303,
"grad_norm": 0.365234375,
"learning_rate": 3.192976606581186e-05,
"loss": 0.6538,
"step": 3165
},
{
"epoch": 7.647768395657419,
"grad_norm": 0.345703125,
"learning_rate": 3.1621541296508695e-05,
"loss": 0.6535,
"step": 3170
},
{
"epoch": 7.659831121833534,
"grad_norm": 0.35546875,
"learning_rate": 3.131453179703734e-05,
"loss": 0.6542,
"step": 3175
},
{
"epoch": 7.67189384800965,
"grad_norm": 0.380859375,
"learning_rate": 3.100874302378559e-05,
"loss": 0.657,
"step": 3180
},
{
"epoch": 7.683956574185766,
"grad_norm": 0.369140625,
"learning_rate": 3.0704180411445524e-05,
"loss": 0.6587,
"step": 3185
},
{
"epoch": 7.696019300361882,
"grad_norm": 0.349609375,
"learning_rate": 3.0400849372917073e-05,
"loss": 0.6587,
"step": 3190
},
{
"epoch": 7.708082026537998,
"grad_norm": 0.36328125,
"learning_rate": 3.009875529921181e-05,
"loss": 0.6564,
"step": 3195
},
{
"epoch": 7.720144752714114,
"grad_norm": 0.375,
"learning_rate": 2.979790355935703e-05,
"loss": 0.659,
"step": 3200
},
{
"epoch": 7.7322074788902295,
"grad_norm": 0.37109375,
"learning_rate": 2.9498299500300518e-05,
"loss": 0.6536,
"step": 3205
},
{
"epoch": 7.744270205066345,
"grad_norm": 0.58984375,
"learning_rate": 2.919994844681524e-05,
"loss": 0.6572,
"step": 3210
},
{
"epoch": 7.7563329312424605,
"grad_norm": 0.349609375,
"learning_rate": 2.890285570140504e-05,
"loss": 0.6549,
"step": 3215
},
{
"epoch": 7.768395657418576,
"grad_norm": 0.353515625,
"learning_rate": 2.8607026544210114e-05,
"loss": 0.6529,
"step": 3220
},
{
"epoch": 7.780458383594692,
"grad_norm": 0.388671875,
"learning_rate": 2.8312466232913282e-05,
"loss": 0.6607,
"step": 3225
},
{
"epoch": 7.792521109770808,
"grad_norm": 0.361328125,
"learning_rate": 2.801918000264665e-05,
"loss": 0.6606,
"step": 3230
},
{
"epoch": 7.804583835946924,
"grad_norm": 0.361328125,
"learning_rate": 2.7727173065898347e-05,
"loss": 0.6537,
"step": 3235
},
{
"epoch": 7.81664656212304,
"grad_norm": 0.349609375,
"learning_rate": 2.7436450612420095e-05,
"loss": 0.6492,
"step": 3240
},
{
"epoch": 7.828709288299155,
"grad_norm": 0.3515625,
"learning_rate": 2.7147017809134822e-05,
"loss": 0.6536,
"step": 3245
},
{
"epoch": 7.840772014475271,
"grad_norm": 0.369140625,
"learning_rate": 2.6858879800044866e-05,
"loss": 0.6572,
"step": 3250
},
{
"epoch": 7.852834740651387,
"grad_norm": 0.380859375,
"learning_rate": 2.6572041706140683e-05,
"loss": 0.6598,
"step": 3255
},
{
"epoch": 7.864897466827503,
"grad_norm": 0.37109375,
"learning_rate": 2.6286508625309624e-05,
"loss": 0.6583,
"step": 3260
},
{
"epoch": 7.876960193003619,
"grad_norm": 0.40625,
"learning_rate": 2.6002285632245482e-05,
"loss": 0.6595,
"step": 3265
},
{
"epoch": 7.889022919179735,
"grad_norm": 0.34765625,
"learning_rate": 2.57193777783582e-05,
"loss": 0.6511,
"step": 3270
},
{
"epoch": 7.901085645355851,
"grad_norm": 0.349609375,
"learning_rate": 2.5437790091684244e-05,
"loss": 0.6534,
"step": 3275
},
{
"epoch": 7.913148371531967,
"grad_norm": 0.34765625,
"learning_rate": 2.515752757679707e-05,
"loss": 0.6527,
"step": 3280
},
{
"epoch": 7.9252110977080825,
"grad_norm": 0.392578125,
"learning_rate": 2.4878595214718236e-05,
"loss": 0.6579,
"step": 3285
},
{
"epoch": 7.9372738238841976,
"grad_norm": 0.3515625,
"learning_rate": 2.4600997962828987e-05,
"loss": 0.6571,
"step": 3290
},
{
"epoch": 7.9493365500603135,
"grad_norm": 0.34375,
"learning_rate": 2.432474075478194e-05,
"loss": 0.6556,
"step": 3295
},
{
"epoch": 7.961399276236429,
"grad_norm": 0.345703125,
"learning_rate": 2.404982850041363e-05,
"loss": 0.6585,
"step": 3300
},
{
"epoch": 7.973462002412545,
"grad_norm": 0.359375,
"learning_rate": 2.3776266085657018e-05,
"loss": 0.6544,
"step": 3305
},
{
"epoch": 7.985524728588661,
"grad_norm": 0.375,
"learning_rate": 2.3504058372454884e-05,
"loss": 0.6539,
"step": 3310
},
{
"epoch": 7.997587454764777,
"grad_norm": 0.3671875,
"learning_rate": 2.3233210198673218e-05,
"loss": 0.661,
"step": 3315
},
{
"epoch": 8.0,
"eval_loss": 2.462099313735962,
"eval_runtime": 0.2392,
"eval_samples_per_second": 41.81,
"eval_steps_per_second": 4.181,
"step": 3316
},
{
"epoch": 8.009650180940893,
"grad_norm": 0.365234375,
"learning_rate": 2.2963726378015327e-05,
"loss": 0.6547,
"step": 3320
},
{
"epoch": 8.021712907117008,
"grad_norm": 0.35546875,
"learning_rate": 2.269561169993637e-05,
"loss": 0.6514,
"step": 3325
},
{
"epoch": 8.033775633293125,
"grad_norm": 0.34765625,
"learning_rate": 2.242887092955801e-05,
"loss": 0.6449,
"step": 3330
},
{
"epoch": 8.04583835946924,
"grad_norm": 0.341796875,
"learning_rate": 2.2163508807583998e-05,
"loss": 0.6495,
"step": 3335
},
{
"epoch": 8.057901085645355,
"grad_norm": 0.384765625,
"learning_rate": 2.189953005021569e-05,
"loss": 0.6556,
"step": 3340
},
{
"epoch": 8.069963811821472,
"grad_norm": 0.361328125,
"learning_rate": 2.1636939349068308e-05,
"loss": 0.6537,
"step": 3345
},
{
"epoch": 8.082026537997587,
"grad_norm": 0.365234375,
"learning_rate": 2.1375741371087677e-05,
"loss": 0.6504,
"step": 3350
},
{
"epoch": 8.094089264173704,
"grad_norm": 0.3828125,
"learning_rate": 2.111594075846701e-05,
"loss": 0.6589,
"step": 3355
},
{
"epoch": 8.106151990349819,
"grad_norm": 0.359375,
"learning_rate": 2.085754212856471e-05,
"loss": 0.6536,
"step": 3360
},
{
"epoch": 8.118214716525936,
"grad_norm": 0.365234375,
"learning_rate": 2.0600550073822056e-05,
"loss": 0.6512,
"step": 3365
},
{
"epoch": 8.13027744270205,
"grad_norm": 0.369140625,
"learning_rate": 2.0344969161681792e-05,
"loss": 0.6497,
"step": 3370
},
{
"epoch": 8.142340168878167,
"grad_norm": 0.369140625,
"learning_rate": 2.0090803934506764e-05,
"loss": 0.6492,
"step": 3375
},
{
"epoch": 8.154402895054282,
"grad_norm": 0.3984375,
"learning_rate": 1.983805890949927e-05,
"loss": 0.6446,
"step": 3380
},
{
"epoch": 8.166465621230397,
"grad_norm": 0.380859375,
"learning_rate": 1.9586738578620855e-05,
"loss": 0.6518,
"step": 3385
},
{
"epoch": 8.178528347406514,
"grad_norm": 0.361328125,
"learning_rate": 1.9336847408512328e-05,
"loss": 0.6605,
"step": 3390
},
{
"epoch": 8.19059107358263,
"grad_norm": 0.353515625,
"learning_rate": 1.908838984041452e-05,
"loss": 0.6528,
"step": 3395
},
{
"epoch": 8.202653799758746,
"grad_norm": 0.3828125,
"learning_rate": 1.884137029008921e-05,
"loss": 0.6508,
"step": 3400
},
{
"epoch": 8.214716525934861,
"grad_norm": 0.349609375,
"learning_rate": 1.859579314774079e-05,
"loss": 0.6522,
"step": 3405
},
{
"epoch": 8.226779252110978,
"grad_norm": 0.3828125,
"learning_rate": 1.8351662777938127e-05,
"loss": 0.6521,
"step": 3410
},
{
"epoch": 8.238841978287093,
"grad_norm": 0.35546875,
"learning_rate": 1.810898351953699e-05,
"loss": 0.6537,
"step": 3415
},
{
"epoch": 8.250904704463208,
"grad_norm": 0.345703125,
"learning_rate": 1.7867759685603114e-05,
"loss": 0.6518,
"step": 3420
},
{
"epoch": 8.262967430639325,
"grad_norm": 0.357421875,
"learning_rate": 1.762799556333524e-05,
"loss": 0.6546,
"step": 3425
},
{
"epoch": 8.27503015681544,
"grad_norm": 0.3515625,
"learning_rate": 1.738969541398926e-05,
"loss": 0.6564,
"step": 3430
},
{
"epoch": 8.287092882991557,
"grad_norm": 0.349609375,
"learning_rate": 1.7152863472802195e-05,
"loss": 0.6515,
"step": 3435
},
{
"epoch": 8.299155609167672,
"grad_norm": 0.34375,
"learning_rate": 1.691750394891707e-05,
"loss": 0.6526,
"step": 3440
},
{
"epoch": 8.311218335343789,
"grad_norm": 0.359375,
"learning_rate": 1.668362102530815e-05,
"loss": 0.6508,
"step": 3445
},
{
"epoch": 8.323281061519904,
"grad_norm": 0.359375,
"learning_rate": 1.6451218858706374e-05,
"loss": 0.6547,
"step": 3450
},
{
"epoch": 8.335343787696019,
"grad_norm": 0.349609375,
"learning_rate": 1.6220301579525798e-05,
"loss": 0.652,
"step": 3455
},
{
"epoch": 8.347406513872135,
"grad_norm": 0.349609375,
"learning_rate": 1.59908732917899e-05,
"loss": 0.6496,
"step": 3460
},
{
"epoch": 8.35946924004825,
"grad_norm": 0.353515625,
"learning_rate": 1.5762938073058853e-05,
"loss": 0.654,
"step": 3465
},
{
"epoch": 8.371531966224367,
"grad_norm": 0.349609375,
"learning_rate": 1.5536499974356866e-05,
"loss": 0.6491,
"step": 3470
},
{
"epoch": 8.383594692400482,
"grad_norm": 0.359375,
"learning_rate": 1.5311563020100373e-05,
"loss": 0.6531,
"step": 3475
},
{
"epoch": 8.395657418576599,
"grad_norm": 0.349609375,
"learning_rate": 1.5088131208026367e-05,
"loss": 0.6545,
"step": 3480
},
{
"epoch": 8.407720144752714,
"grad_norm": 0.369140625,
"learning_rate": 1.4866208509121383e-05,
"loss": 0.6506,
"step": 3485
},
{
"epoch": 8.41978287092883,
"grad_norm": 0.349609375,
"learning_rate": 1.4645798867551008e-05,
"loss": 0.6495,
"step": 3490
},
{
"epoch": 8.431845597104946,
"grad_norm": 0.3515625,
"learning_rate": 1.442690620058964e-05,
"loss": 0.656,
"step": 3495
},
{
"epoch": 8.443908323281061,
"grad_norm": 0.345703125,
"learning_rate": 1.4209534398551016e-05,
"loss": 0.6574,
"step": 3500
},
{
"epoch": 8.455971049457178,
"grad_norm": 0.349609375,
"learning_rate": 1.3993687324718929e-05,
"loss": 0.6553,
"step": 3505
},
{
"epoch": 8.468033775633293,
"grad_norm": 0.357421875,
"learning_rate": 1.3779368815278647e-05,
"loss": 0.6513,
"step": 3510
},
{
"epoch": 8.48009650180941,
"grad_norm": 0.34375,
"learning_rate": 1.3566582679248796e-05,
"loss": 0.652,
"step": 3515
},
{
"epoch": 8.492159227985525,
"grad_norm": 0.3515625,
"learning_rate": 1.335533269841347e-05,
"loss": 0.655,
"step": 3520
},
{
"epoch": 8.50422195416164,
"grad_norm": 0.34765625,
"learning_rate": 1.314562262725526e-05,
"loss": 0.6593,
"step": 3525
},
{
"epoch": 8.516284680337757,
"grad_norm": 0.3515625,
"learning_rate": 1.2937456192888309e-05,
"loss": 0.6572,
"step": 3530
},
{
"epoch": 8.528347406513872,
"grad_norm": 0.36328125,
"learning_rate": 1.2730837094992199e-05,
"loss": 0.6437,
"step": 3535
},
{
"epoch": 8.540410132689988,
"grad_norm": 0.353515625,
"learning_rate": 1.252576900574618e-05,
"loss": 0.655,
"step": 3540
},
{
"epoch": 8.552472858866103,
"grad_norm": 0.353515625,
"learning_rate": 1.2322255569763852e-05,
"loss": 0.6562,
"step": 3545
},
{
"epoch": 8.56453558504222,
"grad_norm": 0.373046875,
"learning_rate": 1.2120300404028507e-05,
"loss": 0.6499,
"step": 3550
},
{
"epoch": 8.576598311218335,
"grad_norm": 0.345703125,
"learning_rate": 1.1919907097828653e-05,
"loss": 0.6584,
"step": 3555
},
{
"epoch": 8.588661037394452,
"grad_norm": 0.357421875,
"learning_rate": 1.1721079212694452e-05,
"loss": 0.656,
"step": 3560
},
{
"epoch": 8.600723763570567,
"grad_norm": 0.37109375,
"learning_rate": 1.1523820282334219e-05,
"loss": 0.6548,
"step": 3565
},
{
"epoch": 8.612786489746682,
"grad_norm": 0.357421875,
"learning_rate": 1.1328133812571784e-05,
"loss": 0.6494,
"step": 3570
},
{
"epoch": 8.624849215922799,
"grad_norm": 0.369140625,
"learning_rate": 1.1134023281284023e-05,
"loss": 0.6484,
"step": 3575
},
{
"epoch": 8.636911942098914,
"grad_norm": 0.359375,
"learning_rate": 1.0941492138339183e-05,
"loss": 0.6616,
"step": 3580
},
{
"epoch": 8.64897466827503,
"grad_norm": 0.345703125,
"learning_rate": 1.0750543805535518e-05,
"loss": 0.6589,
"step": 3585
},
{
"epoch": 8.661037394451146,
"grad_norm": 0.34765625,
"learning_rate": 1.0561181676540444e-05,
"loss": 0.6541,
"step": 3590
},
{
"epoch": 8.67310012062726,
"grad_norm": 0.345703125,
"learning_rate": 1.037340911683028e-05,
"loss": 0.6455,
"step": 3595
},
{
"epoch": 8.685162846803378,
"grad_norm": 0.345703125,
"learning_rate": 1.01872294636304e-05,
"loss": 0.6522,
"step": 3600
},
{
"epoch": 8.697225572979493,
"grad_norm": 0.365234375,
"learning_rate": 1.0002646025855888e-05,
"loss": 0.6539,
"step": 3605
},
{
"epoch": 8.70928829915561,
"grad_norm": 0.376953125,
"learning_rate": 9.81966208405285e-06,
"loss": 0.6543,
"step": 3610
},
{
"epoch": 8.721351025331725,
"grad_norm": 0.3671875,
"learning_rate": 9.638280890339945e-06,
"loss": 0.6546,
"step": 3615
},
{
"epoch": 8.733413751507841,
"grad_norm": 0.3515625,
"learning_rate": 9.458505668350759e-06,
"loss": 0.6543,
"step": 3620
},
{
"epoch": 8.745476477683956,
"grad_norm": 0.353515625,
"learning_rate": 9.280339613176348e-06,
"loss": 0.6538,
"step": 3625
},
{
"epoch": 8.757539203860073,
"grad_norm": 0.349609375,
"learning_rate": 9.103785891308547e-06,
"loss": 0.661,
"step": 3630
},
{
"epoch": 8.769601930036188,
"grad_norm": 0.34765625,
"learning_rate": 8.928847640583715e-06,
"loss": 0.644,
"step": 3635
},
{
"epoch": 8.781664656212303,
"grad_norm": 0.376953125,
"learning_rate": 8.755527970126853e-06,
"loss": 0.6599,
"step": 3640
},
{
"epoch": 8.79372738238842,
"grad_norm": 0.361328125,
"learning_rate": 8.58382996029652e-06,
"loss": 0.6494,
"step": 3645
},
{
"epoch": 8.805790108564535,
"grad_norm": 0.349609375,
"learning_rate": 8.413756662629879e-06,
"loss": 0.651,
"step": 3650
},
{
"epoch": 8.817852834740652,
"grad_norm": 0.349609375,
"learning_rate": 8.245311099788666e-06,
"loss": 0.6546,
"step": 3655
},
{
"epoch": 8.829915560916767,
"grad_norm": 0.359375,
"learning_rate": 8.07849626550531e-06,
"loss": 0.656,
"step": 3660
},
{
"epoch": 8.841978287092884,
"grad_norm": 0.39453125,
"learning_rate": 7.91331512452983e-06,
"loss": 0.6518,
"step": 3665
},
{
"epoch": 8.854041013268999,
"grad_norm": 0.3515625,
"learning_rate": 7.74977061257709e-06,
"loss": 0.6529,
"step": 3670
},
{
"epoch": 8.866103739445114,
"grad_norm": 0.353515625,
"learning_rate": 7.587865636274594e-06,
"loss": 0.6492,
"step": 3675
},
{
"epoch": 8.87816646562123,
"grad_norm": 0.376953125,
"learning_rate": 7.427603073110967e-06,
"loss": 0.6535,
"step": 3680
},
{
"epoch": 8.890229191797346,
"grad_norm": 0.365234375,
"learning_rate": 7.268985771384618e-06,
"loss": 0.651,
"step": 3685
},
{
"epoch": 8.902291917973463,
"grad_norm": 0.349609375,
"learning_rate": 7.1120165501533e-06,
"loss": 0.6458,
"step": 3690
},
{
"epoch": 8.914354644149578,
"grad_norm": 0.359375,
"learning_rate": 6.956698199183864e-06,
"loss": 0.6505,
"step": 3695
},
{
"epoch": 8.926417370325694,
"grad_norm": 0.359375,
"learning_rate": 6.803033478902765e-06,
"loss": 0.6451,
"step": 3700
},
{
"epoch": 8.93848009650181,
"grad_norm": 0.337890625,
"learning_rate": 6.651025120346988e-06,
"loss": 0.655,
"step": 3705
},
{
"epoch": 8.950542822677924,
"grad_norm": 0.353515625,
"learning_rate": 6.500675825115454e-06,
"loss": 0.6497,
"step": 3710
},
{
"epoch": 8.962605548854041,
"grad_norm": 0.345703125,
"learning_rate": 6.351988265321129e-06,
"loss": 0.6511,
"step": 3715
},
{
"epoch": 8.974668275030156,
"grad_norm": 0.361328125,
"learning_rate": 6.204965083543368e-06,
"loss": 0.6494,
"step": 3720
},
{
"epoch": 8.986731001206273,
"grad_norm": 0.353515625,
"learning_rate": 6.059608892781088e-06,
"loss": 0.6477,
"step": 3725
},
{
"epoch": 8.998793727382388,
"grad_norm": 0.357421875,
"learning_rate": 5.915922276406249e-06,
"loss": 0.643,
"step": 3730
},
{
"epoch": 8.998793727382388,
"eval_loss": 2.4790937900543213,
"eval_runtime": 0.2579,
"eval_samples_per_second": 38.775,
"eval_steps_per_second": 3.877,
"step": 3730
},
{
"epoch": 9.010856453558505,
"grad_norm": 0.35546875,
"learning_rate": 5.77390778811796e-06,
"loss": 0.65,
"step": 3735
},
{
"epoch": 9.02291917973462,
"grad_norm": 0.392578125,
"learning_rate": 5.633567951897145e-06,
"loss": 0.6517,
"step": 3740
},
{
"epoch": 9.034981905910735,
"grad_norm": 0.349609375,
"learning_rate": 5.494905261961581e-06,
"loss": 0.6468,
"step": 3745
},
{
"epoch": 9.047044632086852,
"grad_norm": 0.36328125,
"learning_rate": 5.357922182721687e-06,
"loss": 0.6557,
"step": 3750
},
{
"epoch": 9.059107358262967,
"grad_norm": 0.392578125,
"learning_rate": 5.222621148736595e-06,
"loss": 0.6555,
"step": 3755
},
{
"epoch": 9.071170084439084,
"grad_norm": 0.361328125,
"learning_rate": 5.08900456467103e-06,
"loss": 0.6509,
"step": 3760
},
{
"epoch": 9.083232810615199,
"grad_norm": 0.37890625,
"learning_rate": 4.957074805252437e-06,
"loss": 0.6506,
"step": 3765
},
{
"epoch": 9.095295536791316,
"grad_norm": 0.38671875,
"learning_rate": 4.826834215228826e-06,
"loss": 0.6464,
"step": 3770
},
{
"epoch": 9.10735826296743,
"grad_norm": 0.357421875,
"learning_rate": 4.698285109327161e-06,
"loss": 0.6515,
"step": 3775
},
{
"epoch": 9.119420989143546,
"grad_norm": 0.349609375,
"learning_rate": 4.5714297722121106e-06,
"loss": 0.6493,
"step": 3780
},
{
"epoch": 9.131483715319662,
"grad_norm": 0.349609375,
"learning_rate": 4.446270458445545e-06,
"loss": 0.6508,
"step": 3785
},
{
"epoch": 9.143546441495777,
"grad_norm": 0.37109375,
"learning_rate": 4.322809392446392e-06,
"loss": 0.6547,
"step": 3790
},
{
"epoch": 9.155609167671894,
"grad_norm": 0.357421875,
"learning_rate": 4.20104876845111e-06,
"loss": 0.6499,
"step": 3795
},
{
"epoch": 9.16767189384801,
"grad_norm": 0.35546875,
"learning_rate": 4.080990750474778e-06,
"loss": 0.6468,
"step": 3800
},
{
"epoch": 9.179734620024126,
"grad_norm": 0.375,
"learning_rate": 3.962637472272501e-06,
"loss": 0.6497,
"step": 3805
},
{
"epoch": 9.191797346200241,
"grad_norm": 0.357421875,
"learning_rate": 3.84599103730161e-06,
"loss": 0.6578,
"step": 3810
},
{
"epoch": 9.203860072376358,
"grad_norm": 0.3515625,
"learning_rate": 3.73105351868418e-06,
"loss": 0.6466,
"step": 3815
},
{
"epoch": 9.215922798552473,
"grad_norm": 0.361328125,
"learning_rate": 3.617826959170256e-06,
"loss": 0.6528,
"step": 3820
},
{
"epoch": 9.227985524728588,
"grad_norm": 0.349609375,
"learning_rate": 3.5063133711014882e-06,
"loss": 0.6565,
"step": 3825
},
{
"epoch": 9.240048250904705,
"grad_norm": 0.34375,
"learning_rate": 3.3965147363754555e-06,
"loss": 0.6544,
"step": 3830
},
{
"epoch": 9.25211097708082,
"grad_norm": 0.34765625,
"learning_rate": 3.2884330064103297e-06,
"loss": 0.6532,
"step": 3835
},
{
"epoch": 9.264173703256937,
"grad_norm": 0.3515625,
"learning_rate": 3.182070102110257e-06,
"loss": 0.6519,
"step": 3840
},
{
"epoch": 9.276236429433052,
"grad_norm": 0.369140625,
"learning_rate": 3.0774279138312657e-06,
"loss": 0.6529,
"step": 3845
},
{
"epoch": 9.288299155609169,
"grad_norm": 0.345703125,
"learning_rate": 2.974508301347534e-06,
"loss": 0.651,
"step": 3850
},
{
"epoch": 9.300361881785284,
"grad_norm": 0.353515625,
"learning_rate": 2.873313093818486e-06,
"loss": 0.6533,
"step": 3855
},
{
"epoch": 9.312424607961399,
"grad_norm": 0.35546875,
"learning_rate": 2.7738440897561723e-06,
"loss": 0.6514,
"step": 3860
},
{
"epoch": 9.324487334137515,
"grad_norm": 0.361328125,
"learning_rate": 2.676103056993362e-06,
"loss": 0.6581,
"step": 3865
},
{
"epoch": 9.33655006031363,
"grad_norm": 0.365234375,
"learning_rate": 2.580091732652101e-06,
"loss": 0.6529,
"step": 3870
},
{
"epoch": 9.348612786489747,
"grad_norm": 0.359375,
"learning_rate": 2.485811823112849e-06,
"loss": 0.6552,
"step": 3875
},
{
"epoch": 9.360675512665862,
"grad_norm": 0.39453125,
"learning_rate": 2.3932650039841687e-06,
"loss": 0.6576,
"step": 3880
},
{
"epoch": 9.372738238841979,
"grad_norm": 0.33984375,
"learning_rate": 2.302452920072895e-06,
"loss": 0.6513,
"step": 3885
},
{
"epoch": 9.384800965018094,
"grad_norm": 0.337890625,
"learning_rate": 2.213377185354959e-06,
"loss": 0.648,
"step": 3890
},
{
"epoch": 9.39686369119421,
"grad_norm": 0.361328125,
"learning_rate": 2.126039382946676e-06,
"loss": 0.6528,
"step": 3895
},
{
"epoch": 9.408926417370326,
"grad_norm": 0.359375,
"learning_rate": 2.0404410650765817e-06,
"loss": 0.646,
"step": 3900
},
{
"epoch": 9.420989143546441,
"grad_norm": 0.376953125,
"learning_rate": 1.9565837530579166e-06,
"loss": 0.654,
"step": 3905
},
{
"epoch": 9.433051869722558,
"grad_norm": 0.37109375,
"learning_rate": 1.874468937261531e-06,
"loss": 0.6514,
"step": 3910
},
{
"epoch": 9.445114595898673,
"grad_norm": 0.361328125,
"learning_rate": 1.7940980770894122e-06,
"loss": 0.6522,
"step": 3915
},
{
"epoch": 9.45717732207479,
"grad_norm": 0.33984375,
"learning_rate": 1.71547260094872e-06,
"loss": 0.6441,
"step": 3920
},
{
"epoch": 9.469240048250905,
"grad_norm": 0.373046875,
"learning_rate": 1.6385939062264822e-06,
"loss": 0.6546,
"step": 3925
},
{
"epoch": 9.48130277442702,
"grad_norm": 0.361328125,
"learning_rate": 1.5634633592646609e-06,
"loss": 0.6493,
"step": 3930
},
{
"epoch": 9.493365500603137,
"grad_norm": 0.341796875,
"learning_rate": 1.490082295335926e-06,
"loss": 0.6557,
"step": 3935
},
{
"epoch": 9.505428226779252,
"grad_norm": 0.404296875,
"learning_rate": 1.4184520186199202e-06,
"loss": 0.6489,
"step": 3940
},
{
"epoch": 9.517490952955368,
"grad_norm": 0.353515625,
"learning_rate": 1.348573802180053e-06,
"loss": 0.6515,
"step": 3945
},
{
"epoch": 9.529553679131483,
"grad_norm": 0.359375,
"learning_rate": 1.2804488879408993e-06,
"loss": 0.6514,
"step": 3950
},
{
"epoch": 9.5416164053076,
"grad_norm": 0.3515625,
"learning_rate": 1.214078486666137e-06,
"loss": 0.6548,
"step": 3955
},
{
"epoch": 9.553679131483715,
"grad_norm": 0.400390625,
"learning_rate": 1.1494637779369766e-06,
"loss": 0.6505,
"step": 3960
},
{
"epoch": 9.56574185765983,
"grad_norm": 0.357421875,
"learning_rate": 1.0866059101312553e-06,
"loss": 0.6445,
"step": 3965
},
{
"epoch": 9.577804583835947,
"grad_norm": 0.369140625,
"learning_rate": 1.0255060004030093e-06,
"loss": 0.6528,
"step": 3970
},
{
"epoch": 9.589867310012062,
"grad_norm": 0.359375,
"learning_rate": 9.661651346625889e-07,
"loss": 0.6577,
"step": 3975
},
{
"epoch": 9.601930036188179,
"grad_norm": 0.349609375,
"learning_rate": 9.085843675574079e-07,
"loss": 0.642,
"step": 3980
},
{
"epoch": 9.613992762364294,
"grad_norm": 0.34765625,
"learning_rate": 8.527647224531699e-07,
"loss": 0.6515,
"step": 3985
},
{
"epoch": 9.62605548854041,
"grad_norm": 0.359375,
"learning_rate": 7.987071914156596e-07,
"loss": 0.6547,
"step": 3990
},
{
"epoch": 9.638118214716526,
"grad_norm": 0.3515625,
"learning_rate": 7.464127351931805e-07,
"loss": 0.6513,
"step": 3995
},
{
"epoch": 9.650180940892643,
"grad_norm": 0.349609375,
"learning_rate": 6.958822831994005e-07,
"loss": 0.6486,
"step": 4000
},
{
"epoch": 9.662243667068758,
"grad_norm": 0.369140625,
"learning_rate": 6.471167334968886e-07,
"loss": 0.6516,
"step": 4005
},
{
"epoch": 9.674306393244873,
"grad_norm": 0.36328125,
"learning_rate": 6.001169527811268e-07,
"loss": 0.6508,
"step": 4010
},
{
"epoch": 9.68636911942099,
"grad_norm": 0.353515625,
"learning_rate": 5.548837763651115e-07,
"loss": 0.6512,
"step": 4015
},
{
"epoch": 9.698431845597105,
"grad_norm": 0.359375,
"learning_rate": 5.114180081645214e-07,
"loss": 0.6516,
"step": 4020
},
{
"epoch": 9.710494571773221,
"grad_norm": 0.349609375,
"learning_rate": 4.6972042068341714e-07,
"loss": 0.6506,
"step": 4025
},
{
"epoch": 9.722557297949336,
"grad_norm": 0.37109375,
"learning_rate": 4.2979175500050817e-07,
"loss": 0.6589,
"step": 4030
},
{
"epoch": 9.734620024125451,
"grad_norm": 0.34375,
"learning_rate": 3.9163272075599664e-07,
"loss": 0.6506,
"step": 4035
},
{
"epoch": 9.746682750301568,
"grad_norm": 0.34765625,
"learning_rate": 3.552439961389431e-07,
"loss": 0.6492,
"step": 4040
},
{
"epoch": 9.758745476477683,
"grad_norm": 0.36328125,
"learning_rate": 3.206262278752314e-07,
"loss": 0.6463,
"step": 4045
},
{
"epoch": 9.7708082026538,
"grad_norm": 0.37109375,
"learning_rate": 2.877800312160783e-07,
"loss": 0.657,
"step": 4050
},
{
"epoch": 9.782870928829915,
"grad_norm": 0.359375,
"learning_rate": 2.5670598992707516e-07,
"loss": 0.6509,
"step": 4055
},
{
"epoch": 9.794933655006032,
"grad_norm": 0.353515625,
"learning_rate": 2.274046562778409e-07,
"loss": 0.6568,
"step": 4060
},
{
"epoch": 9.806996381182147,
"grad_norm": 0.359375,
"learning_rate": 1.9987655103217428e-07,
"loss": 0.6505,
"step": 4065
},
{
"epoch": 9.819059107358264,
"grad_norm": 0.361328125,
"learning_rate": 1.7412216343885014e-07,
"loss": 0.6493,
"step": 4070
},
{
"epoch": 9.831121833534379,
"grad_norm": 0.37890625,
"learning_rate": 1.5014195122287078e-07,
"loss": 0.6527,
"step": 4075
},
{
"epoch": 9.843184559710494,
"grad_norm": 0.369140625,
"learning_rate": 1.2793634057732818e-07,
"loss": 0.656,
"step": 4080
},
{
"epoch": 9.85524728588661,
"grad_norm": 0.359375,
"learning_rate": 1.0750572615590982e-07,
"loss": 0.6508,
"step": 4085
},
{
"epoch": 9.867310012062726,
"grad_norm": 0.349609375,
"learning_rate": 8.885047106578227e-08,
"loss": 0.6539,
"step": 4090
},
{
"epoch": 9.879372738238843,
"grad_norm": 0.34765625,
"learning_rate": 7.197090686119623e-08,
"loss": 0.6515,
"step": 4095
},
{
"epoch": 9.891435464414958,
"grad_norm": 0.345703125,
"learning_rate": 5.6867333537580226e-08,
"loss": 0.6523,
"step": 4100
},
{
"epoch": 9.903498190591074,
"grad_norm": 0.353515625,
"learning_rate": 4.354001952621145e-08,
"loss": 0.6545,
"step": 4105
},
{
"epoch": 9.91556091676719,
"grad_norm": 0.359375,
"learning_rate": 3.1989201689452967e-08,
"loss": 0.6485,
"step": 4110
},
{
"epoch": 9.927623642943304,
"grad_norm": 0.36328125,
"learning_rate": 2.221508531652372e-08,
"loss": 0.6493,
"step": 4115
},
{
"epoch": 9.939686369119421,
"grad_norm": 0.37890625,
"learning_rate": 1.4217844119857048e-08,
"loss": 0.6523,
"step": 4120
},
{
"epoch": 9.951749095295536,
"grad_norm": 0.345703125,
"learning_rate": 7.997620232014225e-09,
"loss": 0.6501,
"step": 4125
},
{
"epoch": 9.963811821471653,
"grad_norm": 0.357421875,
"learning_rate": 3.554524203175369e-09,
"loss": 0.6438,
"step": 4130
},
{
"epoch": 9.975874547647768,
"grad_norm": 0.40625,
"learning_rate": 8.886349991521492e-10,
"loss": 0.6541,
"step": 4135
},
{
"epoch": 9.987937273823885,
"grad_norm": 0.365234375,
"learning_rate": 0.0,
"loss": 0.6511,
"step": 4140
},
{
"epoch": 9.987937273823885,
"eval_loss": 2.4690446853637695,
"eval_runtime": 0.2371,
"eval_samples_per_second": 42.169,
"eval_steps_per_second": 4.217,
"step": 4140
},
{
"epoch": 9.987937273823885,
"step": 4140,
"total_flos": 1.263927943748177e+19,
"train_loss": 1.320238353146447,
"train_runtime": 10095.6387,
"train_samples_per_second": 26.261,
"train_steps_per_second": 0.41
}
],
"logging_steps": 5,
"max_steps": 4140,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 100,
"total_flos": 1.263927943748177e+19,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}