ben81828's picture
End of training
5b7de38 verified
{
"best_metric": 0.18780523538589478,
"best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-2650",
"epoch": 1.751995879474633,
"eval_steps": 50,
"global_step": 3401,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0025753283543651817,
"grad_norm": 21.379672872549193,
"learning_rate": 2.564102564102564e-06,
"loss": 3.0388,
"num_input_tokens_seen": 58496,
"step": 5
},
{
"epoch": 0.0051506567087303634,
"grad_norm": 20.76117223991023,
"learning_rate": 5.128205128205128e-06,
"loss": 2.9831,
"num_input_tokens_seen": 116960,
"step": 10
},
{
"epoch": 0.007725985063095545,
"grad_norm": 22.5213517141881,
"learning_rate": 7.692307692307694e-06,
"loss": 2.8696,
"num_input_tokens_seen": 175448,
"step": 15
},
{
"epoch": 0.010301313417460727,
"grad_norm": 20.673071198727328,
"learning_rate": 1.0256410256410256e-05,
"loss": 2.6316,
"num_input_tokens_seen": 233944,
"step": 20
},
{
"epoch": 0.012876641771825908,
"grad_norm": 18.902291974538457,
"learning_rate": 1.282051282051282e-05,
"loss": 1.9707,
"num_input_tokens_seen": 292416,
"step": 25
},
{
"epoch": 0.01545197012619109,
"grad_norm": 8.05718270484028,
"learning_rate": 1.5384615384615387e-05,
"loss": 1.3782,
"num_input_tokens_seen": 350904,
"step": 30
},
{
"epoch": 0.018027298480556272,
"grad_norm": 3.6465275188422344,
"learning_rate": 1.794871794871795e-05,
"loss": 1.0628,
"num_input_tokens_seen": 409384,
"step": 35
},
{
"epoch": 0.020602626834921454,
"grad_norm": 4.842154180410959,
"learning_rate": 2.0512820512820512e-05,
"loss": 0.9789,
"num_input_tokens_seen": 467864,
"step": 40
},
{
"epoch": 0.023177955189286635,
"grad_norm": 2.6799567517341396,
"learning_rate": 2.307692307692308e-05,
"loss": 0.9327,
"num_input_tokens_seen": 526384,
"step": 45
},
{
"epoch": 0.025753283543651816,
"grad_norm": 2.629272923472648,
"learning_rate": 2.564102564102564e-05,
"loss": 0.9233,
"num_input_tokens_seen": 584856,
"step": 50
},
{
"epoch": 0.025753283543651816,
"eval_loss": 0.9281821846961975,
"eval_runtime": 48.2484,
"eval_samples_per_second": 1.244,
"eval_steps_per_second": 0.311,
"num_input_tokens_seen": 584856,
"step": 50
},
{
"epoch": 0.028328611898016998,
"grad_norm": 1.2858813899048422,
"learning_rate": 2.8205128205128207e-05,
"loss": 0.897,
"num_input_tokens_seen": 643344,
"step": 55
},
{
"epoch": 0.03090394025238218,
"grad_norm": 1.177678811476692,
"learning_rate": 3.0769230769230774e-05,
"loss": 0.9169,
"num_input_tokens_seen": 701808,
"step": 60
},
{
"epoch": 0.03347926860674736,
"grad_norm": 1.2077065633120996,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.9019,
"num_input_tokens_seen": 760304,
"step": 65
},
{
"epoch": 0.036054596961112545,
"grad_norm": 1.1560644429967823,
"learning_rate": 3.58974358974359e-05,
"loss": 0.8996,
"num_input_tokens_seen": 818760,
"step": 70
},
{
"epoch": 0.03862992531547772,
"grad_norm": 0.732907212566054,
"learning_rate": 3.846153846153846e-05,
"loss": 0.9073,
"num_input_tokens_seen": 877256,
"step": 75
},
{
"epoch": 0.04120525366984291,
"grad_norm": 0.9616993870089134,
"learning_rate": 4.1025641025641023e-05,
"loss": 0.9081,
"num_input_tokens_seen": 935752,
"step": 80
},
{
"epoch": 0.043780582024208085,
"grad_norm": 0.8384067209941525,
"learning_rate": 4.358974358974359e-05,
"loss": 0.906,
"num_input_tokens_seen": 994216,
"step": 85
},
{
"epoch": 0.04635591037857327,
"grad_norm": 0.4045876972188175,
"learning_rate": 4.615384615384616e-05,
"loss": 0.8952,
"num_input_tokens_seen": 1052704,
"step": 90
},
{
"epoch": 0.04893123873293845,
"grad_norm": 0.6062678622593307,
"learning_rate": 4.871794871794872e-05,
"loss": 0.8996,
"num_input_tokens_seen": 1111176,
"step": 95
},
{
"epoch": 0.05150656708730363,
"grad_norm": 0.5316642041721752,
"learning_rate": 5.128205128205128e-05,
"loss": 0.9024,
"num_input_tokens_seen": 1169664,
"step": 100
},
{
"epoch": 0.05150656708730363,
"eval_loss": 0.911374032497406,
"eval_runtime": 19.566,
"eval_samples_per_second": 3.067,
"eval_steps_per_second": 0.767,
"num_input_tokens_seen": 1169664,
"step": 100
},
{
"epoch": 0.05408189544166881,
"grad_norm": 0.47189419512253006,
"learning_rate": 5.384615384615385e-05,
"loss": 0.9142,
"num_input_tokens_seen": 1228112,
"step": 105
},
{
"epoch": 0.056657223796033995,
"grad_norm": 0.4885000351277984,
"learning_rate": 5.6410256410256414e-05,
"loss": 0.9054,
"num_input_tokens_seen": 1286608,
"step": 110
},
{
"epoch": 0.05923255215039917,
"grad_norm": 1.0232694160031948,
"learning_rate": 5.897435897435898e-05,
"loss": 0.8997,
"num_input_tokens_seen": 1345072,
"step": 115
},
{
"epoch": 0.06180788050476436,
"grad_norm": 0.6656697152989639,
"learning_rate": 6.153846153846155e-05,
"loss": 0.8988,
"num_input_tokens_seen": 1403544,
"step": 120
},
{
"epoch": 0.06438320885912954,
"grad_norm": 0.6273175951192728,
"learning_rate": 6.410256410256412e-05,
"loss": 0.9087,
"num_input_tokens_seen": 1462024,
"step": 125
},
{
"epoch": 0.06695853721349472,
"grad_norm": 0.707089894516894,
"learning_rate": 6.666666666666667e-05,
"loss": 0.8961,
"num_input_tokens_seen": 1520528,
"step": 130
},
{
"epoch": 0.0695338655678599,
"grad_norm": 0.4633668497982238,
"learning_rate": 6.923076923076924e-05,
"loss": 0.903,
"num_input_tokens_seen": 1579024,
"step": 135
},
{
"epoch": 0.07210919392222509,
"grad_norm": 0.5052802522069755,
"learning_rate": 7.17948717948718e-05,
"loss": 0.899,
"num_input_tokens_seen": 1637504,
"step": 140
},
{
"epoch": 0.07468452227659027,
"grad_norm": 0.7577940010204668,
"learning_rate": 7.435897435897436e-05,
"loss": 0.9071,
"num_input_tokens_seen": 1696024,
"step": 145
},
{
"epoch": 0.07725985063095545,
"grad_norm": 0.5812587904219971,
"learning_rate": 7.692307692307693e-05,
"loss": 0.9045,
"num_input_tokens_seen": 1754512,
"step": 150
},
{
"epoch": 0.07725985063095545,
"eval_loss": 0.8934853076934814,
"eval_runtime": 19.8765,
"eval_samples_per_second": 3.019,
"eval_steps_per_second": 0.755,
"num_input_tokens_seen": 1754512,
"step": 150
},
{
"epoch": 0.07983517898532062,
"grad_norm": 0.5167982536583405,
"learning_rate": 7.948717948717948e-05,
"loss": 0.8992,
"num_input_tokens_seen": 1812976,
"step": 155
},
{
"epoch": 0.08241050733968582,
"grad_norm": 0.4971816797735092,
"learning_rate": 8.205128205128205e-05,
"loss": 0.8965,
"num_input_tokens_seen": 1871464,
"step": 160
},
{
"epoch": 0.08498583569405099,
"grad_norm": 0.6561749633642688,
"learning_rate": 8.461538461538461e-05,
"loss": 0.9094,
"num_input_tokens_seen": 1929928,
"step": 165
},
{
"epoch": 0.08756116404841617,
"grad_norm": 0.5010857314708574,
"learning_rate": 8.717948717948718e-05,
"loss": 0.903,
"num_input_tokens_seen": 1988432,
"step": 170
},
{
"epoch": 0.09013649240278135,
"grad_norm": 0.48794512034251364,
"learning_rate": 8.974358974358975e-05,
"loss": 0.902,
"num_input_tokens_seen": 2046920,
"step": 175
},
{
"epoch": 0.09271182075714654,
"grad_norm": 0.4040014684262414,
"learning_rate": 9.230769230769232e-05,
"loss": 0.9006,
"num_input_tokens_seen": 2105392,
"step": 180
},
{
"epoch": 0.09528714911151172,
"grad_norm": 0.5312840597942438,
"learning_rate": 9.487179487179487e-05,
"loss": 0.9042,
"num_input_tokens_seen": 2163872,
"step": 185
},
{
"epoch": 0.0978624774658769,
"grad_norm": 0.3535119366494406,
"learning_rate": 9.743589743589744e-05,
"loss": 0.9096,
"num_input_tokens_seen": 2222352,
"step": 190
},
{
"epoch": 0.10043780582024209,
"grad_norm": 0.30590378285024006,
"learning_rate": 0.0001,
"loss": 0.9037,
"num_input_tokens_seen": 2280800,
"step": 195
},
{
"epoch": 0.10301313417460727,
"grad_norm": 0.3055264226667786,
"learning_rate": 9.999954623308172e-05,
"loss": 0.904,
"num_input_tokens_seen": 2339304,
"step": 200
},
{
"epoch": 0.10301313417460727,
"eval_loss": 0.8980139493942261,
"eval_runtime": 19.316,
"eval_samples_per_second": 3.106,
"eval_steps_per_second": 0.777,
"num_input_tokens_seen": 2339304,
"step": 200
},
{
"epoch": 0.10558846252897244,
"grad_norm": 0.8828178200664915,
"learning_rate": 9.999818494056303e-05,
"loss": 0.9029,
"num_input_tokens_seen": 2397808,
"step": 205
},
{
"epoch": 0.10816379088333762,
"grad_norm": 0.4308314655260644,
"learning_rate": 9.99959161471523e-05,
"loss": 0.9005,
"num_input_tokens_seen": 2456288,
"step": 210
},
{
"epoch": 0.11073911923770281,
"grad_norm": 0.4482188659643584,
"learning_rate": 9.99927398940297e-05,
"loss": 0.9096,
"num_input_tokens_seen": 2514784,
"step": 215
},
{
"epoch": 0.11331444759206799,
"grad_norm": 0.49014741417238206,
"learning_rate": 9.998865623884635e-05,
"loss": 0.9036,
"num_input_tokens_seen": 2573240,
"step": 220
},
{
"epoch": 0.11588977594643317,
"grad_norm": 0.2774850522391394,
"learning_rate": 9.998366525572336e-05,
"loss": 0.901,
"num_input_tokens_seen": 2631672,
"step": 225
},
{
"epoch": 0.11846510430079835,
"grad_norm": 0.49390873315018263,
"learning_rate": 9.997776703525046e-05,
"loss": 0.9018,
"num_input_tokens_seen": 2690112,
"step": 230
},
{
"epoch": 0.12104043265516354,
"grad_norm": 0.3284306399258997,
"learning_rate": 9.997096168448432e-05,
"loss": 0.8934,
"num_input_tokens_seen": 2748608,
"step": 235
},
{
"epoch": 0.12361576100952872,
"grad_norm": 0.7182680023403506,
"learning_rate": 9.996324932694668e-05,
"loss": 0.8876,
"num_input_tokens_seen": 2807080,
"step": 240
},
{
"epoch": 0.1261910893638939,
"grad_norm": 0.7305499346526235,
"learning_rate": 9.995463010262206e-05,
"loss": 0.9084,
"num_input_tokens_seen": 2865520,
"step": 245
},
{
"epoch": 0.12876641771825909,
"grad_norm": 0.5773211522908436,
"learning_rate": 9.994510416795519e-05,
"loss": 0.9106,
"num_input_tokens_seen": 2924016,
"step": 250
},
{
"epoch": 0.12876641771825909,
"eval_loss": 0.8958488702774048,
"eval_runtime": 19.507,
"eval_samples_per_second": 3.076,
"eval_steps_per_second": 0.769,
"num_input_tokens_seen": 2924016,
"step": 250
},
{
"epoch": 0.13134174607262425,
"grad_norm": 0.44306061088962184,
"learning_rate": 9.993467169584824e-05,
"loss": 0.9012,
"num_input_tokens_seen": 2982520,
"step": 255
},
{
"epoch": 0.13391707442698944,
"grad_norm": 0.7851687259125024,
"learning_rate": 9.992333287565765e-05,
"loss": 0.9069,
"num_input_tokens_seen": 3041008,
"step": 260
},
{
"epoch": 0.13649240278135463,
"grad_norm": 0.5705235716557865,
"learning_rate": 9.991108791319066e-05,
"loss": 0.8918,
"num_input_tokens_seen": 3099464,
"step": 265
},
{
"epoch": 0.1390677311357198,
"grad_norm": 0.6202972137914602,
"learning_rate": 9.989793703070163e-05,
"loss": 0.8996,
"num_input_tokens_seen": 3157944,
"step": 270
},
{
"epoch": 0.141643059490085,
"grad_norm": 0.7583768377175583,
"learning_rate": 9.988388046688799e-05,
"loss": 0.9009,
"num_input_tokens_seen": 3216448,
"step": 275
},
{
"epoch": 0.14421838784445018,
"grad_norm": 0.7180540444266581,
"learning_rate": 9.986891847688587e-05,
"loss": 0.9059,
"num_input_tokens_seen": 3274928,
"step": 280
},
{
"epoch": 0.14679371619881534,
"grad_norm": 0.4173225854654158,
"learning_rate": 9.985305133226553e-05,
"loss": 0.8939,
"num_input_tokens_seen": 3333408,
"step": 285
},
{
"epoch": 0.14936904455318054,
"grad_norm": 0.7825855108807762,
"learning_rate": 9.983627932102638e-05,
"loss": 0.8899,
"num_input_tokens_seen": 3391896,
"step": 290
},
{
"epoch": 0.1519443729075457,
"grad_norm": 0.4850249272160501,
"learning_rate": 9.981860274759173e-05,
"loss": 0.9092,
"num_input_tokens_seen": 3450392,
"step": 295
},
{
"epoch": 0.1545197012619109,
"grad_norm": 0.3325682106309916,
"learning_rate": 9.980002193280342e-05,
"loss": 0.8901,
"num_input_tokens_seen": 3508888,
"step": 300
},
{
"epoch": 0.1545197012619109,
"eval_loss": 0.8932263255119324,
"eval_runtime": 19.7633,
"eval_samples_per_second": 3.036,
"eval_steps_per_second": 0.759,
"num_input_tokens_seen": 3508888,
"step": 300
},
{
"epoch": 0.15709502961627608,
"grad_norm": 0.36562722920113416,
"learning_rate": 9.978053721391578e-05,
"loss": 0.9042,
"num_input_tokens_seen": 3567368,
"step": 305
},
{
"epoch": 0.15967035797064125,
"grad_norm": 0.3765491511973325,
"learning_rate": 9.976014894458963e-05,
"loss": 0.9007,
"num_input_tokens_seen": 3625848,
"step": 310
},
{
"epoch": 0.16224568632500644,
"grad_norm": 0.5264420727347517,
"learning_rate": 9.973885749488589e-05,
"loss": 0.9036,
"num_input_tokens_seen": 3684336,
"step": 315
},
{
"epoch": 0.16482101467937163,
"grad_norm": 0.24680747784235688,
"learning_rate": 9.971666325125874e-05,
"loss": 0.8936,
"num_input_tokens_seen": 3742800,
"step": 320
},
{
"epoch": 0.1673963430337368,
"grad_norm": 0.4982571051665039,
"learning_rate": 9.969356661654876e-05,
"loss": 0.8989,
"num_input_tokens_seen": 3801280,
"step": 325
},
{
"epoch": 0.16997167138810199,
"grad_norm": 0.49943012602572584,
"learning_rate": 9.966956800997546e-05,
"loss": 0.8983,
"num_input_tokens_seen": 3859792,
"step": 330
},
{
"epoch": 0.17254699974246718,
"grad_norm": 0.37381050353079964,
"learning_rate": 9.964466786712984e-05,
"loss": 0.9038,
"num_input_tokens_seen": 3918272,
"step": 335
},
{
"epoch": 0.17512232809683234,
"grad_norm": 0.7501484170811903,
"learning_rate": 9.961886663996629e-05,
"loss": 0.8947,
"num_input_tokens_seen": 3976760,
"step": 340
},
{
"epoch": 0.17769765645119753,
"grad_norm": 0.5623847203835772,
"learning_rate": 9.959216479679458e-05,
"loss": 0.9179,
"num_input_tokens_seen": 4035240,
"step": 345
},
{
"epoch": 0.1802729848055627,
"grad_norm": 0.34381878607605765,
"learning_rate": 9.956456282227122e-05,
"loss": 0.9059,
"num_input_tokens_seen": 4093688,
"step": 350
},
{
"epoch": 0.1802729848055627,
"eval_loss": 0.8960411548614502,
"eval_runtime": 20.0734,
"eval_samples_per_second": 2.989,
"eval_steps_per_second": 0.747,
"num_input_tokens_seen": 4093688,
"step": 350
},
{
"epoch": 0.1828483131599279,
"grad_norm": 0.5786050277605497,
"learning_rate": 9.953606121739074e-05,
"loss": 0.8795,
"num_input_tokens_seen": 4152160,
"step": 355
},
{
"epoch": 0.18542364151429308,
"grad_norm": 1.3309062985901938,
"learning_rate": 9.950666049947653e-05,
"loss": 0.9143,
"num_input_tokens_seen": 4210648,
"step": 360
},
{
"epoch": 0.18799896986865824,
"grad_norm": 0.5010297124723248,
"learning_rate": 9.947636120217155e-05,
"loss": 0.9164,
"num_input_tokens_seen": 4269136,
"step": 365
},
{
"epoch": 0.19057429822302344,
"grad_norm": 0.7250841632803818,
"learning_rate": 9.944516387542852e-05,
"loss": 0.9061,
"num_input_tokens_seen": 4327664,
"step": 370
},
{
"epoch": 0.19314962657738863,
"grad_norm": 0.4506280653909736,
"learning_rate": 9.941306908550005e-05,
"loss": 0.8873,
"num_input_tokens_seen": 4386120,
"step": 375
},
{
"epoch": 0.1957249549317538,
"grad_norm": 0.6467175538087946,
"learning_rate": 9.938007741492828e-05,
"loss": 0.9038,
"num_input_tokens_seen": 4444560,
"step": 380
},
{
"epoch": 0.19830028328611898,
"grad_norm": 0.6742654767461002,
"learning_rate": 9.934618946253437e-05,
"loss": 0.9116,
"num_input_tokens_seen": 4503016,
"step": 385
},
{
"epoch": 0.20087561164048418,
"grad_norm": 0.418516338281364,
"learning_rate": 9.931140584340761e-05,
"loss": 0.9023,
"num_input_tokens_seen": 4561496,
"step": 390
},
{
"epoch": 0.20345093999484934,
"grad_norm": 0.5738919793445436,
"learning_rate": 9.92757271888942e-05,
"loss": 0.8901,
"num_input_tokens_seen": 4619944,
"step": 395
},
{
"epoch": 0.20602626834921453,
"grad_norm": 0.3600152815973316,
"learning_rate": 9.923915414658587e-05,
"loss": 0.9033,
"num_input_tokens_seen": 4678384,
"step": 400
},
{
"epoch": 0.20602626834921453,
"eval_loss": 0.906301736831665,
"eval_runtime": 19.8079,
"eval_samples_per_second": 3.029,
"eval_steps_per_second": 0.757,
"num_input_tokens_seen": 4678384,
"step": 400
},
{
"epoch": 0.2086015967035797,
"grad_norm": 0.4854922483196253,
"learning_rate": 9.920168738030807e-05,
"loss": 0.8951,
"num_input_tokens_seen": 4736904,
"step": 405
},
{
"epoch": 0.2111769250579449,
"grad_norm": 0.4537032287993717,
"learning_rate": 9.916332757010799e-05,
"loss": 0.9131,
"num_input_tokens_seen": 4795376,
"step": 410
},
{
"epoch": 0.21375225341231008,
"grad_norm": 0.46655140937482126,
"learning_rate": 9.912407541224213e-05,
"loss": 0.8923,
"num_input_tokens_seen": 4853880,
"step": 415
},
{
"epoch": 0.21632758176667524,
"grad_norm": 0.3608301258843965,
"learning_rate": 9.908393161916374e-05,
"loss": 0.9026,
"num_input_tokens_seen": 4912360,
"step": 420
},
{
"epoch": 0.21890291012104043,
"grad_norm": 0.5055648531803498,
"learning_rate": 9.904289691950979e-05,
"loss": 0.905,
"num_input_tokens_seen": 4970872,
"step": 425
},
{
"epoch": 0.22147823847540563,
"grad_norm": 0.3171606869940592,
"learning_rate": 9.900097205808789e-05,
"loss": 0.8941,
"num_input_tokens_seen": 5029304,
"step": 430
},
{
"epoch": 0.2240535668297708,
"grad_norm": 0.5798722428230844,
"learning_rate": 9.895815779586262e-05,
"loss": 0.9031,
"num_input_tokens_seen": 5087800,
"step": 435
},
{
"epoch": 0.22662889518413598,
"grad_norm": 0.5751083474979835,
"learning_rate": 9.891445490994182e-05,
"loss": 0.8975,
"num_input_tokens_seen": 5146312,
"step": 440
},
{
"epoch": 0.22920422353850115,
"grad_norm": 1.2316619525123293,
"learning_rate": 9.886986419356246e-05,
"loss": 0.901,
"num_input_tokens_seen": 5204800,
"step": 445
},
{
"epoch": 0.23177955189286634,
"grad_norm": 0.5953254293558816,
"learning_rate": 9.88243864560762e-05,
"loss": 0.9062,
"num_input_tokens_seen": 5263304,
"step": 450
},
{
"epoch": 0.23177955189286634,
"eval_loss": 0.9007609486579895,
"eval_runtime": 20.8827,
"eval_samples_per_second": 2.873,
"eval_steps_per_second": 0.718,
"num_input_tokens_seen": 5263304,
"step": 450
},
{
"epoch": 0.23435488024723153,
"grad_norm": 0.7549819464827967,
"learning_rate": 9.877802252293474e-05,
"loss": 0.8891,
"num_input_tokens_seen": 5321760,
"step": 455
},
{
"epoch": 0.2369302086015967,
"grad_norm": 0.9960909370043465,
"learning_rate": 9.873077323567488e-05,
"loss": 0.9026,
"num_input_tokens_seen": 5380224,
"step": 460
},
{
"epoch": 0.23950553695596188,
"grad_norm": 1.5888386521989892,
"learning_rate": 9.868263945190312e-05,
"loss": 0.8707,
"num_input_tokens_seen": 5438704,
"step": 465
},
{
"epoch": 0.24208086531032708,
"grad_norm": 3.0542478842411587,
"learning_rate": 9.863362204528024e-05,
"loss": 0.9051,
"num_input_tokens_seen": 5497208,
"step": 470
},
{
"epoch": 0.24465619366469224,
"grad_norm": 1.2908325061552137,
"learning_rate": 9.858372190550533e-05,
"loss": 0.8711,
"num_input_tokens_seen": 5555704,
"step": 475
},
{
"epoch": 0.24723152201905743,
"grad_norm": 3.1989324866235744,
"learning_rate": 9.853293993829969e-05,
"loss": 0.885,
"num_input_tokens_seen": 5614160,
"step": 480
},
{
"epoch": 0.24980685037342262,
"grad_norm": 3.591366302378185,
"learning_rate": 9.848127706539039e-05,
"loss": 0.8615,
"num_input_tokens_seen": 5672640,
"step": 485
},
{
"epoch": 0.2523821787277878,
"grad_norm": 2.053833335696007,
"learning_rate": 9.842873422449354e-05,
"loss": 0.9057,
"num_input_tokens_seen": 5731072,
"step": 490
},
{
"epoch": 0.254957507082153,
"grad_norm": 1.4501486574941083,
"learning_rate": 9.837531236929726e-05,
"loss": 0.8818,
"num_input_tokens_seen": 5789544,
"step": 495
},
{
"epoch": 0.25753283543651817,
"grad_norm": 2.1068404021122866,
"learning_rate": 9.832101246944439e-05,
"loss": 0.8576,
"num_input_tokens_seen": 5848048,
"step": 500
},
{
"epoch": 0.25753283543651817,
"eval_loss": 0.8268976211547852,
"eval_runtime": 19.6346,
"eval_samples_per_second": 3.056,
"eval_steps_per_second": 0.764,
"num_input_tokens_seen": 5848048,
"step": 500
},
{
"epoch": 0.26010816379088336,
"grad_norm": 4.216936754020565,
"learning_rate": 9.826583551051483e-05,
"loss": 0.8566,
"num_input_tokens_seen": 5906512,
"step": 505
},
{
"epoch": 0.2626834921452485,
"grad_norm": 10.456282683777822,
"learning_rate": 9.820978249400773e-05,
"loss": 0.8365,
"num_input_tokens_seen": 5965024,
"step": 510
},
{
"epoch": 0.2652588204996137,
"grad_norm": 2.334974931865165,
"learning_rate": 9.81528544373233e-05,
"loss": 0.8882,
"num_input_tokens_seen": 6023496,
"step": 515
},
{
"epoch": 0.2678341488539789,
"grad_norm": 0.6948827424617825,
"learning_rate": 9.809505237374426e-05,
"loss": 0.8799,
"num_input_tokens_seen": 6082000,
"step": 520
},
{
"epoch": 0.2704094772083441,
"grad_norm": 0.8415524888602947,
"learning_rate": 9.80363773524172e-05,
"loss": 0.8758,
"num_input_tokens_seen": 6140480,
"step": 525
},
{
"epoch": 0.27298480556270927,
"grad_norm": 1.3536520282199265,
"learning_rate": 9.797683043833345e-05,
"loss": 0.8644,
"num_input_tokens_seen": 6198968,
"step": 530
},
{
"epoch": 0.2755601339170744,
"grad_norm": 4.556188528469967,
"learning_rate": 9.791641271230982e-05,
"loss": 0.8453,
"num_input_tokens_seen": 6257464,
"step": 535
},
{
"epoch": 0.2781354622714396,
"grad_norm": 2.890141630286954,
"learning_rate": 9.78551252709689e-05,
"loss": 0.8533,
"num_input_tokens_seen": 6315944,
"step": 540
},
{
"epoch": 0.2807107906258048,
"grad_norm": 4.471490037342243,
"learning_rate": 9.779296922671923e-05,
"loss": 0.8575,
"num_input_tokens_seen": 6374408,
"step": 545
},
{
"epoch": 0.28328611898017,
"grad_norm": 3.174906426420603,
"learning_rate": 9.77299457077351e-05,
"loss": 0.8666,
"num_input_tokens_seen": 6432936,
"step": 550
},
{
"epoch": 0.28328611898017,
"eval_loss": 0.7909801602363586,
"eval_runtime": 19.8739,
"eval_samples_per_second": 3.019,
"eval_steps_per_second": 0.755,
"num_input_tokens_seen": 6432936,
"step": 550
},
{
"epoch": 0.28586144733453517,
"grad_norm": 3.529163852540611,
"learning_rate": 9.7666055857936e-05,
"loss": 0.8264,
"num_input_tokens_seen": 6491400,
"step": 555
},
{
"epoch": 0.28843677568890036,
"grad_norm": 4.044590312854015,
"learning_rate": 9.760130083696595e-05,
"loss": 0.8456,
"num_input_tokens_seen": 6549872,
"step": 560
},
{
"epoch": 0.2910121040432655,
"grad_norm": 4.650808013267891,
"learning_rate": 9.75356818201724e-05,
"loss": 0.8032,
"num_input_tokens_seen": 6608296,
"step": 565
},
{
"epoch": 0.2935874323976307,
"grad_norm": 6.577223054225459,
"learning_rate": 9.746919999858492e-05,
"loss": 0.8081,
"num_input_tokens_seen": 6666768,
"step": 570
},
{
"epoch": 0.2961627607519959,
"grad_norm": 7.1732684079932545,
"learning_rate": 9.740185657889357e-05,
"loss": 0.8398,
"num_input_tokens_seen": 6725248,
"step": 575
},
{
"epoch": 0.29873808910636107,
"grad_norm": 21.451661035438484,
"learning_rate": 9.733365278342696e-05,
"loss": 0.8908,
"num_input_tokens_seen": 6783680,
"step": 580
},
{
"epoch": 0.30131341746072626,
"grad_norm": 4.031699151478832,
"learning_rate": 9.726458985013017e-05,
"loss": 0.8248,
"num_input_tokens_seen": 6842144,
"step": 585
},
{
"epoch": 0.3038887458150914,
"grad_norm": 3.45579530759462,
"learning_rate": 9.719466903254215e-05,
"loss": 0.829,
"num_input_tokens_seen": 6900656,
"step": 590
},
{
"epoch": 0.3064640741694566,
"grad_norm": 4.518719062630672,
"learning_rate": 9.712389159977307e-05,
"loss": 0.8269,
"num_input_tokens_seen": 6959128,
"step": 595
},
{
"epoch": 0.3090394025238218,
"grad_norm": 12.728221405806083,
"learning_rate": 9.705225883648121e-05,
"loss": 0.7997,
"num_input_tokens_seen": 7017576,
"step": 600
},
{
"epoch": 0.3090394025238218,
"eval_loss": 0.787663459777832,
"eval_runtime": 19.6121,
"eval_samples_per_second": 3.059,
"eval_steps_per_second": 0.765,
"num_input_tokens_seen": 7017576,
"step": 600
},
{
"epoch": 0.311614730878187,
"grad_norm": 6.69798043480266,
"learning_rate": 9.697977204284973e-05,
"loss": 0.8925,
"num_input_tokens_seen": 7076032,
"step": 605
},
{
"epoch": 0.31419005923255217,
"grad_norm": 5.067921055882507,
"learning_rate": 9.690643253456297e-05,
"loss": 0.8159,
"num_input_tokens_seen": 7134536,
"step": 610
},
{
"epoch": 0.31676538758691736,
"grad_norm": 7.400939684061883,
"learning_rate": 9.683224164278264e-05,
"loss": 0.826,
"num_input_tokens_seen": 7193032,
"step": 615
},
{
"epoch": 0.3193407159412825,
"grad_norm": 5.898525799199162,
"learning_rate": 9.675720071412365e-05,
"loss": 0.8187,
"num_input_tokens_seen": 7251568,
"step": 620
},
{
"epoch": 0.3219160442956477,
"grad_norm": 11.267105316774332,
"learning_rate": 9.66813111106296e-05,
"loss": 0.8524,
"num_input_tokens_seen": 7310072,
"step": 625
},
{
"epoch": 0.3244913726500129,
"grad_norm": 6.703970582399643,
"learning_rate": 9.660457420974819e-05,
"loss": 0.7966,
"num_input_tokens_seen": 7368560,
"step": 630
},
{
"epoch": 0.32706670100437807,
"grad_norm": 6.945445265294353,
"learning_rate": 9.652699140430608e-05,
"loss": 0.799,
"num_input_tokens_seen": 7427040,
"step": 635
},
{
"epoch": 0.32964202935874326,
"grad_norm": 7.0684293091171595,
"learning_rate": 9.644856410248369e-05,
"loss": 0.8477,
"num_input_tokens_seen": 7485552,
"step": 640
},
{
"epoch": 0.3322173577131084,
"grad_norm": 7.165086711244158,
"learning_rate": 9.636929372778963e-05,
"loss": 0.7867,
"num_input_tokens_seen": 7544040,
"step": 645
},
{
"epoch": 0.3347926860674736,
"grad_norm": 9.185933515393563,
"learning_rate": 9.628918171903485e-05,
"loss": 0.8367,
"num_input_tokens_seen": 7602512,
"step": 650
},
{
"epoch": 0.3347926860674736,
"eval_loss": 0.7940558791160583,
"eval_runtime": 19.7641,
"eval_samples_per_second": 3.036,
"eval_steps_per_second": 0.759,
"num_input_tokens_seen": 7602512,
"step": 650
},
{
"epoch": 0.3373680144218388,
"grad_norm": 6.586425160827751,
"learning_rate": 9.620822953030652e-05,
"loss": 0.8131,
"num_input_tokens_seen": 7660968,
"step": 655
},
{
"epoch": 0.33994334277620397,
"grad_norm": 6.92970378602844,
"learning_rate": 9.612643863094163e-05,
"loss": 0.8348,
"num_input_tokens_seen": 7719448,
"step": 660
},
{
"epoch": 0.34251867113056916,
"grad_norm": 8.35228285894448,
"learning_rate": 9.604381050550038e-05,
"loss": 0.8289,
"num_input_tokens_seen": 7777928,
"step": 665
},
{
"epoch": 0.34509399948493436,
"grad_norm": 12.894782157020227,
"learning_rate": 9.596034665373916e-05,
"loss": 0.7758,
"num_input_tokens_seen": 7836424,
"step": 670
},
{
"epoch": 0.3476693278392995,
"grad_norm": 13.409694970235305,
"learning_rate": 9.587604859058334e-05,
"loss": 0.8189,
"num_input_tokens_seen": 7894904,
"step": 675
},
{
"epoch": 0.3502446561936647,
"grad_norm": 8.783205826578632,
"learning_rate": 9.579091784609984e-05,
"loss": 0.8221,
"num_input_tokens_seen": 7953432,
"step": 680
},
{
"epoch": 0.3528199845480299,
"grad_norm": 8.368380903445857,
"learning_rate": 9.570495596546926e-05,
"loss": 0.8378,
"num_input_tokens_seen": 8011888,
"step": 685
},
{
"epoch": 0.35539531290239507,
"grad_norm": 6.7086179135551145,
"learning_rate": 9.561816450895793e-05,
"loss": 0.7529,
"num_input_tokens_seen": 8070344,
"step": 690
},
{
"epoch": 0.35797064125676026,
"grad_norm": 8.476897088436242,
"learning_rate": 9.55305450518895e-05,
"loss": 0.7311,
"num_input_tokens_seen": 8128816,
"step": 695
},
{
"epoch": 0.3605459696111254,
"grad_norm": 10.427785019598666,
"learning_rate": 9.544209918461642e-05,
"loss": 0.774,
"num_input_tokens_seen": 8187320,
"step": 700
},
{
"epoch": 0.3605459696111254,
"eval_loss": 0.7318872809410095,
"eval_runtime": 19.6917,
"eval_samples_per_second": 3.047,
"eval_steps_per_second": 0.762,
"num_input_tokens_seen": 8187320,
"step": 700
},
{
"epoch": 0.3631212979654906,
"grad_norm": 14.492396688166755,
"learning_rate": 9.535282851249103e-05,
"loss": 0.765,
"num_input_tokens_seen": 8245776,
"step": 705
},
{
"epoch": 0.3656966263198558,
"grad_norm": 16.054951836007135,
"learning_rate": 9.526273465583646e-05,
"loss": 0.7287,
"num_input_tokens_seen": 8304280,
"step": 710
},
{
"epoch": 0.36827195467422097,
"grad_norm": 21.4994447839661,
"learning_rate": 9.517181924991716e-05,
"loss": 0.758,
"num_input_tokens_seen": 8362728,
"step": 715
},
{
"epoch": 0.37084728302858616,
"grad_norm": 10.676012702912917,
"learning_rate": 9.508008394490926e-05,
"loss": 0.795,
"num_input_tokens_seen": 8421224,
"step": 720
},
{
"epoch": 0.37342261138295135,
"grad_norm": 9.802559177691224,
"learning_rate": 9.498753040587066e-05,
"loss": 0.6901,
"num_input_tokens_seen": 8479720,
"step": 725
},
{
"epoch": 0.3759979397373165,
"grad_norm": 12.345047855457121,
"learning_rate": 9.48941603127107e-05,
"loss": 0.7618,
"num_input_tokens_seen": 8538192,
"step": 730
},
{
"epoch": 0.3785732680916817,
"grad_norm": 9.115843124142248,
"learning_rate": 9.479997536015977e-05,
"loss": 0.7481,
"num_input_tokens_seen": 8596664,
"step": 735
},
{
"epoch": 0.3811485964460469,
"grad_norm": 13.754407712653018,
"learning_rate": 9.47049772577385e-05,
"loss": 0.746,
"num_input_tokens_seen": 8655128,
"step": 740
},
{
"epoch": 0.38372392480041206,
"grad_norm": 11.612003816357428,
"learning_rate": 9.460916772972672e-05,
"loss": 0.812,
"num_input_tokens_seen": 8713624,
"step": 745
},
{
"epoch": 0.38629925315477726,
"grad_norm": 27.606819697848053,
"learning_rate": 9.451254851513222e-05,
"loss": 0.6751,
"num_input_tokens_seen": 8772104,
"step": 750
},
{
"epoch": 0.38629925315477726,
"eval_loss": 0.732211709022522,
"eval_runtime": 19.8828,
"eval_samples_per_second": 3.018,
"eval_steps_per_second": 0.754,
"num_input_tokens_seen": 8772104,
"step": 750
},
{
"epoch": 0.3888745815091424,
"grad_norm": 14.96041133173447,
"learning_rate": 9.441512136765911e-05,
"loss": 0.7772,
"num_input_tokens_seen": 8830568,
"step": 755
},
{
"epoch": 0.3914499098635076,
"grad_norm": 11.503685090619637,
"learning_rate": 9.431688805567607e-05,
"loss": 0.7114,
"num_input_tokens_seen": 8889072,
"step": 760
},
{
"epoch": 0.3940252382178728,
"grad_norm": 10.096502565612639,
"learning_rate": 9.421785036218417e-05,
"loss": 0.8463,
"num_input_tokens_seen": 8947568,
"step": 765
},
{
"epoch": 0.39660056657223797,
"grad_norm": 10.03630613489591,
"learning_rate": 9.411801008478459e-05,
"loss": 0.7822,
"num_input_tokens_seen": 9006056,
"step": 770
},
{
"epoch": 0.39917589492660316,
"grad_norm": 9.513127486957377,
"learning_rate": 9.401736903564592e-05,
"loss": 0.7628,
"num_input_tokens_seen": 9064592,
"step": 775
},
{
"epoch": 0.40175122328096835,
"grad_norm": 6.959520401923045,
"learning_rate": 9.39159290414713e-05,
"loss": 0.7589,
"num_input_tokens_seen": 9123096,
"step": 780
},
{
"epoch": 0.4043265516353335,
"grad_norm": 11.121102798997068,
"learning_rate": 9.381369194346527e-05,
"loss": 0.7564,
"num_input_tokens_seen": 9181576,
"step": 785
},
{
"epoch": 0.4069018799896987,
"grad_norm": 8.968142728241798,
"learning_rate": 9.371065959730039e-05,
"loss": 0.6934,
"num_input_tokens_seen": 9240048,
"step": 790
},
{
"epoch": 0.40947720834406387,
"grad_norm": 12.849518629503493,
"learning_rate": 9.36068338730834e-05,
"loss": 0.7314,
"num_input_tokens_seen": 9298528,
"step": 795
},
{
"epoch": 0.41205253669842906,
"grad_norm": 68.63999985815002,
"learning_rate": 9.35022166553215e-05,
"loss": 0.6911,
"num_input_tokens_seen": 9357016,
"step": 800
},
{
"epoch": 0.41205253669842906,
"eval_loss": 0.7180347442626953,
"eval_runtime": 19.3266,
"eval_samples_per_second": 3.105,
"eval_steps_per_second": 0.776,
"num_input_tokens_seen": 9357016,
"step": 800
},
{
"epoch": 0.41462786505279425,
"grad_norm": 12.26130532110871,
"learning_rate": 9.339680984288799e-05,
"loss": 0.7086,
"num_input_tokens_seen": 9415480,
"step": 805
},
{
"epoch": 0.4172031934071594,
"grad_norm": 15.883264561409183,
"learning_rate": 9.329061534898783e-05,
"loss": 0.7726,
"num_input_tokens_seen": 9473928,
"step": 810
},
{
"epoch": 0.4197785217615246,
"grad_norm": 17.855002564946467,
"learning_rate": 9.318363510112296e-05,
"loss": 0.7286,
"num_input_tokens_seen": 9532408,
"step": 815
},
{
"epoch": 0.4223538501158898,
"grad_norm": 11.119850082821575,
"learning_rate": 9.307587104105729e-05,
"loss": 0.7515,
"num_input_tokens_seen": 9590920,
"step": 820
},
{
"epoch": 0.42492917847025496,
"grad_norm": 9.916888211602318,
"learning_rate": 9.296732512478139e-05,
"loss": 0.7344,
"num_input_tokens_seen": 9649400,
"step": 825
},
{
"epoch": 0.42750450682462016,
"grad_norm": 9.805389743080928,
"learning_rate": 9.285799932247714e-05,
"loss": 0.6954,
"num_input_tokens_seen": 9707888,
"step": 830
},
{
"epoch": 0.43007983517898535,
"grad_norm": 8.068508417105404,
"learning_rate": 9.274789561848183e-05,
"loss": 0.7312,
"num_input_tokens_seen": 9766384,
"step": 835
},
{
"epoch": 0.4326551635333505,
"grad_norm": 8.152928433205195,
"learning_rate": 9.263701601125218e-05,
"loss": 0.647,
"num_input_tokens_seen": 9824896,
"step": 840
},
{
"epoch": 0.4352304918877157,
"grad_norm": 17.465963143357314,
"learning_rate": 9.252536251332813e-05,
"loss": 0.7273,
"num_input_tokens_seen": 9883408,
"step": 845
},
{
"epoch": 0.43780582024208087,
"grad_norm": 6.231784874865904,
"learning_rate": 9.24129371512962e-05,
"loss": 0.7455,
"num_input_tokens_seen": 9941896,
"step": 850
},
{
"epoch": 0.43780582024208087,
"eval_loss": 0.7039459347724915,
"eval_runtime": 19.7834,
"eval_samples_per_second": 3.033,
"eval_steps_per_second": 0.758,
"num_input_tokens_seen": 9941896,
"step": 850
},
{
"epoch": 0.44038114859644606,
"grad_norm": 8.584709523479143,
"learning_rate": 9.22997419657528e-05,
"loss": 0.6829,
"num_input_tokens_seen": 10000336,
"step": 855
},
{
"epoch": 0.44295647695081125,
"grad_norm": 9.894526327436065,
"learning_rate": 9.218577901126713e-05,
"loss": 0.6971,
"num_input_tokens_seen": 10058816,
"step": 860
},
{
"epoch": 0.4455318053051764,
"grad_norm": 6.844807331677797,
"learning_rate": 9.207105035634397e-05,
"loss": 0.7239,
"num_input_tokens_seen": 10117320,
"step": 865
},
{
"epoch": 0.4481071336595416,
"grad_norm": 9.911971184322747,
"learning_rate": 9.195555808338603e-05,
"loss": 0.7113,
"num_input_tokens_seen": 10175824,
"step": 870
},
{
"epoch": 0.45068246201390677,
"grad_norm": 11.42729729117445,
"learning_rate": 9.183930428865622e-05,
"loss": 0.685,
"num_input_tokens_seen": 10234288,
"step": 875
},
{
"epoch": 0.45325779036827196,
"grad_norm": 6.6164653318829005,
"learning_rate": 9.17222910822396e-05,
"loss": 0.6804,
"num_input_tokens_seen": 10292736,
"step": 880
},
{
"epoch": 0.45583311872263715,
"grad_norm": 9.566984327413312,
"learning_rate": 9.160452058800504e-05,
"loss": 0.7056,
"num_input_tokens_seen": 10351224,
"step": 885
},
{
"epoch": 0.4584084470770023,
"grad_norm": 8.37543764212447,
"learning_rate": 9.148599494356671e-05,
"loss": 0.7234,
"num_input_tokens_seen": 10409736,
"step": 890
},
{
"epoch": 0.4609837754313675,
"grad_norm": 8.118764978921215,
"learning_rate": 9.136671630024527e-05,
"loss": 0.7505,
"num_input_tokens_seen": 10468240,
"step": 895
},
{
"epoch": 0.4635591037857327,
"grad_norm": 8.91603360160678,
"learning_rate": 9.124668682302882e-05,
"loss": 0.7378,
"num_input_tokens_seen": 10526712,
"step": 900
},
{
"epoch": 0.4635591037857327,
"eval_loss": 0.7197856903076172,
"eval_runtime": 19.6818,
"eval_samples_per_second": 3.048,
"eval_steps_per_second": 0.762,
"num_input_tokens_seen": 10526712,
"step": 900
},
{
"epoch": 0.46613443214009787,
"grad_norm": 7.649426233722995,
"learning_rate": 9.112590869053359e-05,
"loss": 0.6794,
"num_input_tokens_seen": 10585232,
"step": 905
},
{
"epoch": 0.46870976049446306,
"grad_norm": 6.066459270532772,
"learning_rate": 9.100438409496444e-05,
"loss": 0.6817,
"num_input_tokens_seen": 10643728,
"step": 910
},
{
"epoch": 0.47128508884882825,
"grad_norm": 7.144597127673979,
"learning_rate": 9.088211524207497e-05,
"loss": 0.6503,
"num_input_tokens_seen": 10702240,
"step": 915
},
{
"epoch": 0.4738604172031934,
"grad_norm": 9.676112884447143,
"learning_rate": 9.075910435112766e-05,
"loss": 0.6903,
"num_input_tokens_seen": 10760656,
"step": 920
},
{
"epoch": 0.4764357455575586,
"grad_norm": 12.206584747037537,
"learning_rate": 9.063535365485341e-05,
"loss": 0.6611,
"num_input_tokens_seen": 10819128,
"step": 925
},
{
"epoch": 0.47901107391192377,
"grad_norm": 8.724970113237934,
"learning_rate": 9.051086539941108e-05,
"loss": 0.6361,
"num_input_tokens_seen": 10877600,
"step": 930
},
{
"epoch": 0.48158640226628896,
"grad_norm": 26.26773221921971,
"learning_rate": 9.038564184434676e-05,
"loss": 0.7006,
"num_input_tokens_seen": 10936088,
"step": 935
},
{
"epoch": 0.48416173062065415,
"grad_norm": 6.223867131390233,
"learning_rate": 9.025968526255275e-05,
"loss": 0.7012,
"num_input_tokens_seen": 10994560,
"step": 940
},
{
"epoch": 0.4867370589750193,
"grad_norm": 6.1072541360418295,
"learning_rate": 9.013299794022622e-05,
"loss": 0.6968,
"num_input_tokens_seen": 11053016,
"step": 945
},
{
"epoch": 0.4893123873293845,
"grad_norm": 8.372921238626587,
"learning_rate": 9.00055821768278e-05,
"loss": 0.6825,
"num_input_tokens_seen": 11111520,
"step": 950
},
{
"epoch": 0.4893123873293845,
"eval_loss": 0.6830747723579407,
"eval_runtime": 19.7357,
"eval_samples_per_second": 3.04,
"eval_steps_per_second": 0.76,
"num_input_tokens_seen": 11111520,
"step": 950
},
{
"epoch": 0.49188771568374967,
"grad_norm": 8.942834634941512,
"learning_rate": 8.987744028503981e-05,
"loss": 0.6957,
"num_input_tokens_seen": 11170016,
"step": 955
},
{
"epoch": 0.49446304403811486,
"grad_norm": 7.280449373939037,
"learning_rate": 8.974857459072435e-05,
"loss": 0.6531,
"num_input_tokens_seen": 11228496,
"step": 960
},
{
"epoch": 0.49703837239248005,
"grad_norm": 11.559627425451879,
"learning_rate": 8.961898743288094e-05,
"loss": 0.6201,
"num_input_tokens_seen": 11286928,
"step": 965
},
{
"epoch": 0.49961370074684525,
"grad_norm": 8.705456467164504,
"learning_rate": 8.948868116360421e-05,
"loss": 0.5677,
"num_input_tokens_seen": 11345400,
"step": 970
},
{
"epoch": 0.5021890291012104,
"grad_norm": 11.843729276311603,
"learning_rate": 8.935765814804112e-05,
"loss": 0.5763,
"num_input_tokens_seen": 11403912,
"step": 975
},
{
"epoch": 0.5047643574555756,
"grad_norm": 9.09749490323839,
"learning_rate": 8.922592076434804e-05,
"loss": 0.6348,
"num_input_tokens_seen": 11462344,
"step": 980
},
{
"epoch": 0.5073396858099408,
"grad_norm": 9.15512782816083,
"learning_rate": 8.90934714036477e-05,
"loss": 0.6541,
"num_input_tokens_seen": 11520808,
"step": 985
},
{
"epoch": 0.509915014164306,
"grad_norm": 7.3541970927874125,
"learning_rate": 8.896031246998558e-05,
"loss": 0.7012,
"num_input_tokens_seen": 11579248,
"step": 990
},
{
"epoch": 0.5124903425186711,
"grad_norm": 7.2544111988448305,
"learning_rate": 8.882644638028646e-05,
"loss": 0.6508,
"num_input_tokens_seen": 11637712,
"step": 995
},
{
"epoch": 0.5150656708730363,
"grad_norm": 8.317370673188798,
"learning_rate": 8.869187556431046e-05,
"loss": 0.5971,
"num_input_tokens_seen": 11696200,
"step": 1000
},
{
"epoch": 0.5150656708730363,
"eval_loss": 0.7078786492347717,
"eval_runtime": 19.6933,
"eval_samples_per_second": 3.047,
"eval_steps_per_second": 0.762,
"num_input_tokens_seen": 11696200,
"step": 1000
},
{
"epoch": 0.5176409992274015,
"grad_norm": 9.38238507668501,
"learning_rate": 8.855660246460895e-05,
"loss": 0.6959,
"num_input_tokens_seen": 11754720,
"step": 1005
},
{
"epoch": 0.5202163275817667,
"grad_norm": 8.602013574457626,
"learning_rate": 8.842062953648023e-05,
"loss": 0.6918,
"num_input_tokens_seen": 11813216,
"step": 1010
},
{
"epoch": 0.5227916559361319,
"grad_norm": 6.752768596735988,
"learning_rate": 8.828395924792497e-05,
"loss": 0.7269,
"num_input_tokens_seen": 11871712,
"step": 1015
},
{
"epoch": 0.525366984290497,
"grad_norm": 3.9336242665009187,
"learning_rate": 8.814659407960141e-05,
"loss": 0.7026,
"num_input_tokens_seen": 11930200,
"step": 1020
},
{
"epoch": 0.5279423126448622,
"grad_norm": 5.739197643968765,
"learning_rate": 8.800853652478028e-05,
"loss": 0.6467,
"num_input_tokens_seen": 11988704,
"step": 1025
},
{
"epoch": 0.5305176409992274,
"grad_norm": 4.583970239100745,
"learning_rate": 8.786978908929966e-05,
"loss": 0.6155,
"num_input_tokens_seen": 12047176,
"step": 1030
},
{
"epoch": 0.5330929693535926,
"grad_norm": 7.891614334520996,
"learning_rate": 8.773035429151937e-05,
"loss": 0.7849,
"num_input_tokens_seen": 12105680,
"step": 1035
},
{
"epoch": 0.5356682977079578,
"grad_norm": 10.308514200015722,
"learning_rate": 8.759023466227538e-05,
"loss": 0.6341,
"num_input_tokens_seen": 12164208,
"step": 1040
},
{
"epoch": 0.5382436260623229,
"grad_norm": 5.0434906291132995,
"learning_rate": 8.744943274483376e-05,
"loss": 0.7189,
"num_input_tokens_seen": 12222672,
"step": 1045
},
{
"epoch": 0.5408189544166881,
"grad_norm": 4.3092898145567125,
"learning_rate": 8.730795109484461e-05,
"loss": 0.6914,
"num_input_tokens_seen": 12281072,
"step": 1050
},
{
"epoch": 0.5408189544166881,
"eval_loss": 0.6824291348457336,
"eval_runtime": 19.7949,
"eval_samples_per_second": 3.031,
"eval_steps_per_second": 0.758,
"num_input_tokens_seen": 12281072,
"step": 1050
},
{
"epoch": 0.5433942827710533,
"grad_norm": 6.118699079738975,
"learning_rate": 8.716579228029562e-05,
"loss": 0.6567,
"num_input_tokens_seen": 12339544,
"step": 1055
},
{
"epoch": 0.5459696111254185,
"grad_norm": 10.911613071983485,
"learning_rate": 8.702295888146548e-05,
"loss": 0.6155,
"num_input_tokens_seen": 12397992,
"step": 1060
},
{
"epoch": 0.5485449394797837,
"grad_norm": 7.643873763070769,
"learning_rate": 8.687945349087703e-05,
"loss": 0.6029,
"num_input_tokens_seen": 12456480,
"step": 1065
},
{
"epoch": 0.5511202678341488,
"grad_norm": 8.136001935771205,
"learning_rate": 8.673527871325022e-05,
"loss": 0.6129,
"num_input_tokens_seen": 12515000,
"step": 1070
},
{
"epoch": 0.553695596188514,
"grad_norm": 11.244237678841547,
"learning_rate": 8.659043716545485e-05,
"loss": 0.6825,
"num_input_tokens_seen": 12573504,
"step": 1075
},
{
"epoch": 0.5562709245428792,
"grad_norm": 7.506618014617338,
"learning_rate": 8.644493147646302e-05,
"loss": 0.6828,
"num_input_tokens_seen": 12632008,
"step": 1080
},
{
"epoch": 0.5588462528972444,
"grad_norm": 6.1353619139783415,
"learning_rate": 8.629876428730145e-05,
"loss": 0.6286,
"num_input_tokens_seen": 12690520,
"step": 1085
},
{
"epoch": 0.5614215812516096,
"grad_norm": 4.639695384779573,
"learning_rate": 8.615193825100355e-05,
"loss": 0.6361,
"num_input_tokens_seen": 12749032,
"step": 1090
},
{
"epoch": 0.5639969096059748,
"grad_norm": 8.458071149567038,
"learning_rate": 8.600445603256123e-05,
"loss": 0.6087,
"num_input_tokens_seen": 12807504,
"step": 1095
},
{
"epoch": 0.56657223796034,
"grad_norm": 7.622756865192901,
"learning_rate": 8.585632030887658e-05,
"loss": 0.5825,
"num_input_tokens_seen": 12865992,
"step": 1100
},
{
"epoch": 0.56657223796034,
"eval_loss": 0.6431913375854492,
"eval_runtime": 19.2744,
"eval_samples_per_second": 3.113,
"eval_steps_per_second": 0.778,
"num_input_tokens_seen": 12865992,
"step": 1100
},
{
"epoch": 0.5691475663147051,
"grad_norm": 23.7865505450662,
"learning_rate": 8.57075337687132e-05,
"loss": 0.6113,
"num_input_tokens_seen": 12924448,
"step": 1105
},
{
"epoch": 0.5717228946690703,
"grad_norm": 9.766678144472763,
"learning_rate": 8.55580991126475e-05,
"loss": 0.562,
"num_input_tokens_seen": 12982912,
"step": 1110
},
{
"epoch": 0.5742982230234355,
"grad_norm": 11.601294815310595,
"learning_rate": 8.540801905301963e-05,
"loss": 0.6124,
"num_input_tokens_seen": 13041424,
"step": 1115
},
{
"epoch": 0.5768735513778007,
"grad_norm": 14.728116727049585,
"learning_rate": 8.525729631388421e-05,
"loss": 0.6788,
"num_input_tokens_seen": 13099888,
"step": 1120
},
{
"epoch": 0.5794488797321659,
"grad_norm": 9.209119741223232,
"learning_rate": 8.510593363096097e-05,
"loss": 0.5904,
"num_input_tokens_seen": 13158344,
"step": 1125
},
{
"epoch": 0.582024208086531,
"grad_norm": 8.883499835719407,
"learning_rate": 8.495393375158504e-05,
"loss": 0.5433,
"num_input_tokens_seen": 13216840,
"step": 1130
},
{
"epoch": 0.5845995364408962,
"grad_norm": 10.675613755673567,
"learning_rate": 8.480129943465709e-05,
"loss": 0.5937,
"num_input_tokens_seen": 13275328,
"step": 1135
},
{
"epoch": 0.5871748647952614,
"grad_norm": 18.416876655689737,
"learning_rate": 8.464803345059324e-05,
"loss": 0.5785,
"num_input_tokens_seen": 13333784,
"step": 1140
},
{
"epoch": 0.5897501931496266,
"grad_norm": 7.2789290785005845,
"learning_rate": 8.449413858127487e-05,
"loss": 0.5472,
"num_input_tokens_seen": 13392280,
"step": 1145
},
{
"epoch": 0.5923255215039918,
"grad_norm": 7.9221272500905355,
"learning_rate": 8.433961761999796e-05,
"loss": 0.5228,
"num_input_tokens_seen": 13450720,
"step": 1150
},
{
"epoch": 0.5923255215039918,
"eval_loss": 0.6229755282402039,
"eval_runtime": 19.3811,
"eval_samples_per_second": 3.096,
"eval_steps_per_second": 0.774,
"num_input_tokens_seen": 13450720,
"step": 1150
},
{
"epoch": 0.5949008498583569,
"grad_norm": 7.6504285821029265,
"learning_rate": 8.418447337142254e-05,
"loss": 0.5654,
"num_input_tokens_seen": 13509200,
"step": 1155
},
{
"epoch": 0.5974761782127221,
"grad_norm": 12.935585815436268,
"learning_rate": 8.402870865152172e-05,
"loss": 0.5074,
"num_input_tokens_seen": 13567656,
"step": 1160
},
{
"epoch": 0.6000515065670873,
"grad_norm": 11.833351794705523,
"learning_rate": 8.387232628753056e-05,
"loss": 0.6436,
"num_input_tokens_seen": 13626136,
"step": 1165
},
{
"epoch": 0.6026268349214525,
"grad_norm": 6.637170802158791,
"learning_rate": 8.371532911789482e-05,
"loss": 0.55,
"num_input_tokens_seen": 13684608,
"step": 1170
},
{
"epoch": 0.6052021632758177,
"grad_norm": 13.363715980324447,
"learning_rate": 8.355771999221937e-05,
"loss": 0.5399,
"num_input_tokens_seen": 13743080,
"step": 1175
},
{
"epoch": 0.6077774916301828,
"grad_norm": 14.735226460570393,
"learning_rate": 8.339950177121647e-05,
"loss": 0.5402,
"num_input_tokens_seen": 13801552,
"step": 1180
},
{
"epoch": 0.610352819984548,
"grad_norm": 15.934621739592105,
"learning_rate": 8.324067732665393e-05,
"loss": 0.5559,
"num_input_tokens_seen": 13860064,
"step": 1185
},
{
"epoch": 0.6129281483389132,
"grad_norm": 10.693551994820394,
"learning_rate": 8.308124954130289e-05,
"loss": 0.5619,
"num_input_tokens_seen": 13918552,
"step": 1190
},
{
"epoch": 0.6155034766932784,
"grad_norm": 12.596573646765963,
"learning_rate": 8.292122130888558e-05,
"loss": 0.5933,
"num_input_tokens_seen": 13977056,
"step": 1195
},
{
"epoch": 0.6180788050476436,
"grad_norm": 8.090399041458168,
"learning_rate": 8.276059553402265e-05,
"loss": 0.5078,
"num_input_tokens_seen": 14035544,
"step": 1200
},
{
"epoch": 0.6180788050476436,
"eval_loss": 0.6184359192848206,
"eval_runtime": 19.3097,
"eval_samples_per_second": 3.107,
"eval_steps_per_second": 0.777,
"num_input_tokens_seen": 14035544,
"step": 1200
},
{
"epoch": 0.6206541334020087,
"grad_norm": 5.625941133087891,
"learning_rate": 8.259937513218066e-05,
"loss": 0.5109,
"num_input_tokens_seen": 14094024,
"step": 1205
},
{
"epoch": 0.623229461756374,
"grad_norm": 11.892235969186327,
"learning_rate": 8.243756302961898e-05,
"loss": 0.4738,
"num_input_tokens_seen": 14152504,
"step": 1210
},
{
"epoch": 0.6258047901107391,
"grad_norm": 45.17960159223106,
"learning_rate": 8.227516216333679e-05,
"loss": 0.5615,
"num_input_tokens_seen": 14210992,
"step": 1215
},
{
"epoch": 0.6283801184651043,
"grad_norm": 14.930236962628644,
"learning_rate": 8.211217548101973e-05,
"loss": 0.5584,
"num_input_tokens_seen": 14269488,
"step": 1220
},
{
"epoch": 0.6309554468194695,
"grad_norm": 49.91459221869246,
"learning_rate": 8.194860594098635e-05,
"loss": 0.4856,
"num_input_tokens_seen": 14327968,
"step": 1225
},
{
"epoch": 0.6335307751738347,
"grad_norm": 14.899444451092219,
"learning_rate": 8.17844565121345e-05,
"loss": 0.5378,
"num_input_tokens_seen": 14386448,
"step": 1230
},
{
"epoch": 0.6361061035281999,
"grad_norm": 10.76781481162281,
"learning_rate": 8.161973017388744e-05,
"loss": 0.4484,
"num_input_tokens_seen": 14444912,
"step": 1235
},
{
"epoch": 0.638681431882565,
"grad_norm": 11.97619546639196,
"learning_rate": 8.145442991613963e-05,
"loss": 0.4772,
"num_input_tokens_seen": 14503392,
"step": 1240
},
{
"epoch": 0.6412567602369302,
"grad_norm": 12.878458794693833,
"learning_rate": 8.128855873920265e-05,
"loss": 0.5807,
"num_input_tokens_seen": 14561872,
"step": 1245
},
{
"epoch": 0.6438320885912954,
"grad_norm": 5.57738881271864,
"learning_rate": 8.112211965375059e-05,
"loss": 0.5268,
"num_input_tokens_seen": 14620336,
"step": 1250
},
{
"epoch": 0.6438320885912954,
"eval_loss": 0.5496931672096252,
"eval_runtime": 19.4472,
"eval_samples_per_second": 3.085,
"eval_steps_per_second": 0.771,
"num_input_tokens_seen": 14620336,
"step": 1250
},
{
"epoch": 0.6464074169456606,
"grad_norm": 7.347855237786093,
"learning_rate": 8.095511568076548e-05,
"loss": 0.6012,
"num_input_tokens_seen": 14678792,
"step": 1255
},
{
"epoch": 0.6489827453000258,
"grad_norm": 9.779072667301502,
"learning_rate": 8.078754985148247e-05,
"loss": 0.5358,
"num_input_tokens_seen": 14737272,
"step": 1260
},
{
"epoch": 0.6515580736543909,
"grad_norm": 13.80618279187987,
"learning_rate": 8.061942520733474e-05,
"loss": 0.4676,
"num_input_tokens_seen": 14795784,
"step": 1265
},
{
"epoch": 0.6541334020087561,
"grad_norm": 10.954100508877262,
"learning_rate": 8.045074479989838e-05,
"loss": 0.489,
"num_input_tokens_seen": 14854272,
"step": 1270
},
{
"epoch": 0.6567087303631213,
"grad_norm": 9.330856277290438,
"learning_rate": 8.02815116908369e-05,
"loss": 0.4505,
"num_input_tokens_seen": 14912720,
"step": 1275
},
{
"epoch": 0.6592840587174865,
"grad_norm": 12.562590417573535,
"learning_rate": 8.011172895184579e-05,
"loss": 0.4987,
"num_input_tokens_seen": 14971192,
"step": 1280
},
{
"epoch": 0.6618593870718517,
"grad_norm": 13.11335003986863,
"learning_rate": 7.994139966459664e-05,
"loss": 0.5156,
"num_input_tokens_seen": 15029656,
"step": 1285
},
{
"epoch": 0.6644347154262168,
"grad_norm": 9.518625656710672,
"learning_rate": 7.977052692068127e-05,
"loss": 0.5266,
"num_input_tokens_seen": 15088144,
"step": 1290
},
{
"epoch": 0.667010043780582,
"grad_norm": 8.95720787325786,
"learning_rate": 7.959911382155566e-05,
"loss": 0.4502,
"num_input_tokens_seen": 15146600,
"step": 1295
},
{
"epoch": 0.6695853721349472,
"grad_norm": 11.65058483213029,
"learning_rate": 7.942716347848353e-05,
"loss": 0.4578,
"num_input_tokens_seen": 15205064,
"step": 1300
},
{
"epoch": 0.6695853721349472,
"eval_loss": 0.49471279978752136,
"eval_runtime": 19.2738,
"eval_samples_per_second": 3.113,
"eval_steps_per_second": 0.778,
"num_input_tokens_seen": 15205064,
"step": 1300
},
{
"epoch": 0.6721607004893124,
"grad_norm": 10.254593495214214,
"learning_rate": 7.925467901247996e-05,
"loss": 0.4589,
"num_input_tokens_seen": 15263560,
"step": 1305
},
{
"epoch": 0.6747360288436776,
"grad_norm": 10.097755490465826,
"learning_rate": 7.908166355425475e-05,
"loss": 0.457,
"num_input_tokens_seen": 15322016,
"step": 1310
},
{
"epoch": 0.6773113571980427,
"grad_norm": 10.727169120741898,
"learning_rate": 7.890812024415555e-05,
"loss": 0.51,
"num_input_tokens_seen": 15380504,
"step": 1315
},
{
"epoch": 0.6798866855524079,
"grad_norm": 11.12869176509039,
"learning_rate": 7.873405223211087e-05,
"loss": 0.4994,
"num_input_tokens_seen": 15438944,
"step": 1320
},
{
"epoch": 0.6824620139067731,
"grad_norm": 8.499688286725998,
"learning_rate": 7.855946267757295e-05,
"loss": 0.4501,
"num_input_tokens_seen": 15497384,
"step": 1325
},
{
"epoch": 0.6850373422611383,
"grad_norm": 10.352066646699223,
"learning_rate": 7.838435474946034e-05,
"loss": 0.4807,
"num_input_tokens_seen": 15555856,
"step": 1330
},
{
"epoch": 0.6876126706155035,
"grad_norm": 14.714795422962215,
"learning_rate": 7.820873162610044e-05,
"loss": 0.5112,
"num_input_tokens_seen": 15614368,
"step": 1335
},
{
"epoch": 0.6901879989698687,
"grad_norm": 8.466874504995866,
"learning_rate": 7.803259649517178e-05,
"loss": 0.4825,
"num_input_tokens_seen": 15672864,
"step": 1340
},
{
"epoch": 0.6927633273242338,
"grad_norm": 7.62934190428385,
"learning_rate": 7.78559525536462e-05,
"loss": 0.5147,
"num_input_tokens_seen": 15731376,
"step": 1345
},
{
"epoch": 0.695338655678599,
"grad_norm": 9.019045929732858,
"learning_rate": 7.767880300773074e-05,
"loss": 0.4702,
"num_input_tokens_seen": 15789848,
"step": 1350
},
{
"epoch": 0.695338655678599,
"eval_loss": 0.5247787237167358,
"eval_runtime": 19.436,
"eval_samples_per_second": 3.087,
"eval_steps_per_second": 0.772,
"num_input_tokens_seen": 15789848,
"step": 1350
},
{
"epoch": 0.6979139840329642,
"grad_norm": 9.176622281786903,
"learning_rate": 7.750115107280959e-05,
"loss": 0.4601,
"num_input_tokens_seen": 15848328,
"step": 1355
},
{
"epoch": 0.7004893123873294,
"grad_norm": 13.657128530320302,
"learning_rate": 7.732299997338557e-05,
"loss": 0.4704,
"num_input_tokens_seen": 15906824,
"step": 1360
},
{
"epoch": 0.7030646407416946,
"grad_norm": 20.00043922120396,
"learning_rate": 7.714435294302168e-05,
"loss": 0.4937,
"num_input_tokens_seen": 15965312,
"step": 1365
},
{
"epoch": 0.7056399690960597,
"grad_norm": 10.050026656050768,
"learning_rate": 7.696521322428245e-05,
"loss": 0.4635,
"num_input_tokens_seen": 16023824,
"step": 1370
},
{
"epoch": 0.7082152974504249,
"grad_norm": 10.723911953770791,
"learning_rate": 7.678558406867498e-05,
"loss": 0.4421,
"num_input_tokens_seen": 16082280,
"step": 1375
},
{
"epoch": 0.7107906258047901,
"grad_norm": 8.12550455034103,
"learning_rate": 7.660546873659e-05,
"loss": 0.4823,
"num_input_tokens_seen": 16140800,
"step": 1380
},
{
"epoch": 0.7133659541591553,
"grad_norm": 9.105312428399467,
"learning_rate": 7.642487049724271e-05,
"loss": 0.4122,
"num_input_tokens_seen": 16199320,
"step": 1385
},
{
"epoch": 0.7159412825135205,
"grad_norm": 5.873152833761692,
"learning_rate": 7.624379262861335e-05,
"loss": 0.4372,
"num_input_tokens_seen": 16257768,
"step": 1390
},
{
"epoch": 0.7185166108678857,
"grad_norm": 10.07764587174093,
"learning_rate": 7.606223841738775e-05,
"loss": 0.4899,
"num_input_tokens_seen": 16316264,
"step": 1395
},
{
"epoch": 0.7210919392222508,
"grad_norm": 20.283363042209615,
"learning_rate": 7.588021115889777e-05,
"loss": 0.4294,
"num_input_tokens_seen": 16374784,
"step": 1400
},
{
"epoch": 0.7210919392222508,
"eval_loss": 0.47318556904792786,
"eval_runtime": 19.2484,
"eval_samples_per_second": 3.117,
"eval_steps_per_second": 0.779,
"num_input_tokens_seen": 16374784,
"step": 1400
},
{
"epoch": 0.723667267576616,
"grad_norm": 9.384799103772558,
"learning_rate": 7.569771415706126e-05,
"loss": 0.444,
"num_input_tokens_seen": 16433288,
"step": 1405
},
{
"epoch": 0.7262425959309812,
"grad_norm": 16.840151224890615,
"learning_rate": 7.551475072432237e-05,
"loss": 0.4971,
"num_input_tokens_seen": 16491784,
"step": 1410
},
{
"epoch": 0.7288179242853464,
"grad_norm": 10.190076716766358,
"learning_rate": 7.533132418159119e-05,
"loss": 0.4284,
"num_input_tokens_seen": 16550272,
"step": 1415
},
{
"epoch": 0.7313932526397116,
"grad_norm": 7.663975666748285,
"learning_rate": 7.514743785818361e-05,
"loss": 0.414,
"num_input_tokens_seen": 16608704,
"step": 1420
},
{
"epoch": 0.7339685809940767,
"grad_norm": 7.403751704217463,
"learning_rate": 7.496309509176082e-05,
"loss": 0.3715,
"num_input_tokens_seen": 16667200,
"step": 1425
},
{
"epoch": 0.7365439093484419,
"grad_norm": 15.05075115467088,
"learning_rate": 7.477829922826883e-05,
"loss": 0.4723,
"num_input_tokens_seen": 16725704,
"step": 1430
},
{
"epoch": 0.7391192377028071,
"grad_norm": 8.817475778915243,
"learning_rate": 7.459305362187756e-05,
"loss": 0.4357,
"num_input_tokens_seen": 16784192,
"step": 1435
},
{
"epoch": 0.7416945660571723,
"grad_norm": 14.070137984729548,
"learning_rate": 7.440736163492016e-05,
"loss": 0.4402,
"num_input_tokens_seen": 16842704,
"step": 1440
},
{
"epoch": 0.7442698944115375,
"grad_norm": 14.494333760708628,
"learning_rate": 7.422122663783187e-05,
"loss": 0.4457,
"num_input_tokens_seen": 16901176,
"step": 1445
},
{
"epoch": 0.7468452227659027,
"grad_norm": 8.298786058991697,
"learning_rate": 7.403465200908883e-05,
"loss": 0.4353,
"num_input_tokens_seen": 16959632,
"step": 1450
},
{
"epoch": 0.7468452227659027,
"eval_loss": 0.43503889441490173,
"eval_runtime": 19.5652,
"eval_samples_per_second": 3.067,
"eval_steps_per_second": 0.767,
"num_input_tokens_seen": 16959632,
"step": 1450
},
{
"epoch": 0.7494205511202678,
"grad_norm": 8.48162757316697,
"learning_rate": 7.38476411351468e-05,
"loss": 0.4183,
"num_input_tokens_seen": 17018072,
"step": 1455
},
{
"epoch": 0.751995879474633,
"grad_norm": 6.408395141132007,
"learning_rate": 7.366019741037973e-05,
"loss": 0.4632,
"num_input_tokens_seen": 17076560,
"step": 1460
},
{
"epoch": 0.7545712078289982,
"grad_norm": 8.824594003357785,
"learning_rate": 7.347232423701804e-05,
"loss": 0.4512,
"num_input_tokens_seen": 17135016,
"step": 1465
},
{
"epoch": 0.7571465361833634,
"grad_norm": 6.923088148907062,
"learning_rate": 7.328402502508701e-05,
"loss": 0.4121,
"num_input_tokens_seen": 17193496,
"step": 1470
},
{
"epoch": 0.7597218645377286,
"grad_norm": 7.015139822100635,
"learning_rate": 7.309530319234472e-05,
"loss": 0.5234,
"num_input_tokens_seen": 17251944,
"step": 1475
},
{
"epoch": 0.7622971928920937,
"grad_norm": 9.64129523887212,
"learning_rate": 7.290616216422017e-05,
"loss": 0.3895,
"num_input_tokens_seen": 17310456,
"step": 1480
},
{
"epoch": 0.7648725212464589,
"grad_norm": 10.31264909937419,
"learning_rate": 7.271660537375102e-05,
"loss": 0.3641,
"num_input_tokens_seen": 17368968,
"step": 1485
},
{
"epoch": 0.7674478496008241,
"grad_norm": 12.820424037076199,
"learning_rate": 7.25266362615213e-05,
"loss": 0.3867,
"num_input_tokens_seen": 17427424,
"step": 1490
},
{
"epoch": 0.7700231779551893,
"grad_norm": 19.961448856074927,
"learning_rate": 7.233625827559893e-05,
"loss": 0.3214,
"num_input_tokens_seen": 17485936,
"step": 1495
},
{
"epoch": 0.7725985063095545,
"grad_norm": 7.797571046303324,
"learning_rate": 7.21454748714732e-05,
"loss": 0.3369,
"num_input_tokens_seen": 17544440,
"step": 1500
},
{
"epoch": 0.7725985063095545,
"eval_loss": 0.3963810205459595,
"eval_runtime": 19.2433,
"eval_samples_per_second": 3.118,
"eval_steps_per_second": 0.779,
"num_input_tokens_seen": 17544440,
"step": 1500
},
{
"epoch": 0.7751738346639196,
"grad_norm": 10.358996803929989,
"learning_rate": 7.195428951199204e-05,
"loss": 0.3397,
"num_input_tokens_seen": 17602952,
"step": 1505
},
{
"epoch": 0.7777491630182848,
"grad_norm": 11.954413440576635,
"learning_rate": 7.176270566729904e-05,
"loss": 0.5039,
"num_input_tokens_seen": 17661440,
"step": 1510
},
{
"epoch": 0.78032449137265,
"grad_norm": 15.247523980815545,
"learning_rate": 7.157072681477069e-05,
"loss": 0.426,
"num_input_tokens_seen": 17719928,
"step": 1515
},
{
"epoch": 0.7828998197270152,
"grad_norm": 11.560314722738736,
"learning_rate": 7.137835643895305e-05,
"loss": 0.3587,
"num_input_tokens_seen": 17778424,
"step": 1520
},
{
"epoch": 0.7854751480813804,
"grad_norm": 9.214361802688169,
"learning_rate": 7.118559803149865e-05,
"loss": 0.4849,
"num_input_tokens_seen": 17836936,
"step": 1525
},
{
"epoch": 0.7880504764357456,
"grad_norm": 9.825924936343675,
"learning_rate": 7.099245509110299e-05,
"loss": 0.3795,
"num_input_tokens_seen": 17895392,
"step": 1530
},
{
"epoch": 0.7906258047901107,
"grad_norm": 28.893219975863104,
"learning_rate": 7.079893112344118e-05,
"loss": 0.4206,
"num_input_tokens_seen": 17953872,
"step": 1535
},
{
"epoch": 0.7932011331444759,
"grad_norm": 9.96463686082691,
"learning_rate": 7.060502964110418e-05,
"loss": 0.3567,
"num_input_tokens_seen": 18012320,
"step": 1540
},
{
"epoch": 0.7957764614988411,
"grad_norm": 16.21585017609129,
"learning_rate": 7.041075416353513e-05,
"loss": 0.4172,
"num_input_tokens_seen": 18070792,
"step": 1545
},
{
"epoch": 0.7983517898532063,
"grad_norm": 7.729155059823011,
"learning_rate": 7.02161082169654e-05,
"loss": 0.4666,
"num_input_tokens_seen": 18129304,
"step": 1550
},
{
"epoch": 0.7983517898532063,
"eval_loss": 0.42660555243492126,
"eval_runtime": 19.4313,
"eval_samples_per_second": 3.088,
"eval_steps_per_second": 0.772,
"num_input_tokens_seen": 18129304,
"step": 1550
},
{
"epoch": 0.8009271182075715,
"grad_norm": 6.105522334359041,
"learning_rate": 7.002109533435066e-05,
"loss": 0.4305,
"num_input_tokens_seen": 18187728,
"step": 1555
},
{
"epoch": 0.8035024465619367,
"grad_norm": 7.852746488368037,
"learning_rate": 6.982571905530669e-05,
"loss": 0.4529,
"num_input_tokens_seen": 18246192,
"step": 1560
},
{
"epoch": 0.8060777749163018,
"grad_norm": 5.8884808032636275,
"learning_rate": 6.962998292604517e-05,
"loss": 0.4569,
"num_input_tokens_seen": 18304632,
"step": 1565
},
{
"epoch": 0.808653103270667,
"grad_norm": 8.738504630489588,
"learning_rate": 6.943389049930931e-05,
"loss": 0.3936,
"num_input_tokens_seen": 18363136,
"step": 1570
},
{
"epoch": 0.8112284316250322,
"grad_norm": 8.410645723765466,
"learning_rate": 6.923744533430937e-05,
"loss": 0.4083,
"num_input_tokens_seen": 18421592,
"step": 1575
},
{
"epoch": 0.8138037599793974,
"grad_norm": 7.654296364994705,
"learning_rate": 6.904065099665803e-05,
"loss": 0.4564,
"num_input_tokens_seen": 18480104,
"step": 1580
},
{
"epoch": 0.8163790883337626,
"grad_norm": 17.3461741974751,
"learning_rate": 6.884351105830568e-05,
"loss": 0.3928,
"num_input_tokens_seen": 18538600,
"step": 1585
},
{
"epoch": 0.8189544166881277,
"grad_norm": 16.63308936698189,
"learning_rate": 6.864602909747563e-05,
"loss": 0.4278,
"num_input_tokens_seen": 18597104,
"step": 1590
},
{
"epoch": 0.8215297450424929,
"grad_norm": 10.83446543348463,
"learning_rate": 6.84482086985991e-05,
"loss": 0.3956,
"num_input_tokens_seen": 18655584,
"step": 1595
},
{
"epoch": 0.8241050733968581,
"grad_norm": 7.98539427128297,
"learning_rate": 6.825005345225019e-05,
"loss": 0.3834,
"num_input_tokens_seen": 18714072,
"step": 1600
},
{
"epoch": 0.8241050733968581,
"eval_loss": 0.44774264097213745,
"eval_runtime": 19.316,
"eval_samples_per_second": 3.106,
"eval_steps_per_second": 0.777,
"num_input_tokens_seen": 18714072,
"step": 1600
},
{
"epoch": 0.8266804017512233,
"grad_norm": 6.7599286769908815,
"learning_rate": 6.805156695508075e-05,
"loss": 0.4136,
"num_input_tokens_seen": 18772552,
"step": 1605
},
{
"epoch": 0.8292557301055885,
"grad_norm": 5.6959660380818,
"learning_rate": 6.7852752809755e-05,
"loss": 0.3523,
"num_input_tokens_seen": 18830992,
"step": 1610
},
{
"epoch": 0.8318310584599536,
"grad_norm": 5.054175814859553,
"learning_rate": 6.765361462488424e-05,
"loss": 0.2983,
"num_input_tokens_seen": 18889480,
"step": 1615
},
{
"epoch": 0.8344063868143188,
"grad_norm": 8.9830923866359,
"learning_rate": 6.745415601496127e-05,
"loss": 0.3987,
"num_input_tokens_seen": 18947936,
"step": 1620
},
{
"epoch": 0.836981715168684,
"grad_norm": 9.543713321488678,
"learning_rate": 6.725438060029485e-05,
"loss": 0.5012,
"num_input_tokens_seen": 19006432,
"step": 1625
},
{
"epoch": 0.8395570435230492,
"grad_norm": 9.329917566254677,
"learning_rate": 6.705429200694396e-05,
"loss": 0.36,
"num_input_tokens_seen": 19064920,
"step": 1630
},
{
"epoch": 0.8421323718774144,
"grad_norm": 4.257164474078224,
"learning_rate": 6.685389386665197e-05,
"loss": 0.3816,
"num_input_tokens_seen": 19123376,
"step": 1635
},
{
"epoch": 0.8447077002317795,
"grad_norm": 5.420005475317537,
"learning_rate": 6.665318981678072e-05,
"loss": 0.3503,
"num_input_tokens_seen": 19181864,
"step": 1640
},
{
"epoch": 0.8472830285861447,
"grad_norm": 6.544911058180563,
"learning_rate": 6.645218350024456e-05,
"loss": 0.3644,
"num_input_tokens_seen": 19240352,
"step": 1645
},
{
"epoch": 0.8498583569405099,
"grad_norm": 9.593843549483235,
"learning_rate": 6.625087856544416e-05,
"loss": 0.475,
"num_input_tokens_seen": 19298848,
"step": 1650
},
{
"epoch": 0.8498583569405099,
"eval_loss": 0.35129043459892273,
"eval_runtime": 19.4001,
"eval_samples_per_second": 3.093,
"eval_steps_per_second": 0.773,
"num_input_tokens_seen": 19298848,
"step": 1650
},
{
"epoch": 0.8524336852948751,
"grad_norm": 6.358459669534092,
"learning_rate": 6.604927866620031e-05,
"loss": 0.3414,
"num_input_tokens_seen": 19357304,
"step": 1655
},
{
"epoch": 0.8550090136492403,
"grad_norm": 9.39994214990231,
"learning_rate": 6.584738746168762e-05,
"loss": 0.4728,
"num_input_tokens_seen": 19415800,
"step": 1660
},
{
"epoch": 0.8575843420036054,
"grad_norm": 14.50676995208697,
"learning_rate": 6.564520861636808e-05,
"loss": 0.4264,
"num_input_tokens_seen": 19474256,
"step": 1665
},
{
"epoch": 0.8601596703579707,
"grad_norm": 4.791356534368335,
"learning_rate": 6.544274579992453e-05,
"loss": 0.4038,
"num_input_tokens_seen": 19532736,
"step": 1670
},
{
"epoch": 0.8627349987123358,
"grad_norm": 6.4285821594117225,
"learning_rate": 6.524000268719411e-05,
"loss": 0.3241,
"num_input_tokens_seen": 19591184,
"step": 1675
},
{
"epoch": 0.865310327066701,
"grad_norm": 8.268794944409354,
"learning_rate": 6.503698295810154e-05,
"loss": 0.3248,
"num_input_tokens_seen": 19649656,
"step": 1680
},
{
"epoch": 0.8678856554210662,
"grad_norm": 12.365745303187085,
"learning_rate": 6.483369029759229e-05,
"loss": 0.4295,
"num_input_tokens_seen": 19708144,
"step": 1685
},
{
"epoch": 0.8704609837754314,
"grad_norm": 6.3136571538884505,
"learning_rate": 6.463012839556569e-05,
"loss": 0.3785,
"num_input_tokens_seen": 19766592,
"step": 1690
},
{
"epoch": 0.8730363121297966,
"grad_norm": 6.1513513385401195,
"learning_rate": 6.442630094680805e-05,
"loss": 0.3376,
"num_input_tokens_seen": 19825064,
"step": 1695
},
{
"epoch": 0.8756116404841617,
"grad_norm": 9.59752445262446,
"learning_rate": 6.42222116509255e-05,
"loss": 0.3752,
"num_input_tokens_seen": 19883504,
"step": 1700
},
{
"epoch": 0.8756116404841617,
"eval_loss": 0.34382957220077515,
"eval_runtime": 19.3987,
"eval_samples_per_second": 3.093,
"eval_steps_per_second": 0.773,
"num_input_tokens_seen": 19883504,
"step": 1700
},
{
"epoch": 0.8781869688385269,
"grad_norm": 18.837461373103466,
"learning_rate": 6.401786421227686e-05,
"loss": 0.3453,
"num_input_tokens_seen": 19942000,
"step": 1705
},
{
"epoch": 0.8807622971928921,
"grad_norm": 10.315591123527769,
"learning_rate": 6.381326233990644e-05,
"loss": 0.3278,
"num_input_tokens_seen": 20000440,
"step": 1710
},
{
"epoch": 0.8833376255472573,
"grad_norm": 14.473376988028903,
"learning_rate": 6.360840974747666e-05,
"loss": 0.3695,
"num_input_tokens_seen": 20058920,
"step": 1715
},
{
"epoch": 0.8859129539016225,
"grad_norm": 13.569903569955356,
"learning_rate": 6.340331015320073e-05,
"loss": 0.4362,
"num_input_tokens_seen": 20117416,
"step": 1720
},
{
"epoch": 0.8884882822559876,
"grad_norm": 8.610173422350266,
"learning_rate": 6.319796727977509e-05,
"loss": 0.3178,
"num_input_tokens_seen": 20175936,
"step": 1725
},
{
"epoch": 0.8910636106103528,
"grad_norm": 13.406485921668688,
"learning_rate": 6.299238485431183e-05,
"loss": 0.3679,
"num_input_tokens_seen": 20234368,
"step": 1730
},
{
"epoch": 0.893638938964718,
"grad_norm": 10.15746393869747,
"learning_rate": 6.27865666082711e-05,
"loss": 0.3529,
"num_input_tokens_seen": 20292856,
"step": 1735
},
{
"epoch": 0.8962142673190832,
"grad_norm": 6.0390850533631,
"learning_rate": 6.258051627739337e-05,
"loss": 0.281,
"num_input_tokens_seen": 20351272,
"step": 1740
},
{
"epoch": 0.8987895956734484,
"grad_norm": 11.746443511207032,
"learning_rate": 6.237423760163156e-05,
"loss": 0.3148,
"num_input_tokens_seen": 20409744,
"step": 1745
},
{
"epoch": 0.9013649240278135,
"grad_norm": 23.680917449871366,
"learning_rate": 6.216773432508325e-05,
"loss": 0.3233,
"num_input_tokens_seen": 20468200,
"step": 1750
},
{
"epoch": 0.9013649240278135,
"eval_loss": 0.3325226604938507,
"eval_runtime": 19.3673,
"eval_samples_per_second": 3.098,
"eval_steps_per_second": 0.775,
"num_input_tokens_seen": 20468200,
"step": 1750
},
{
"epoch": 0.9039402523821787,
"grad_norm": 10.184672332401709,
"learning_rate": 6.196101019592264e-05,
"loss": 0.3731,
"num_input_tokens_seen": 20526696,
"step": 1755
},
{
"epoch": 0.9065155807365439,
"grad_norm": 14.226563654043238,
"learning_rate": 6.175406896633258e-05,
"loss": 0.3092,
"num_input_tokens_seen": 20585160,
"step": 1760
},
{
"epoch": 0.9090909090909091,
"grad_norm": 16.003520349130227,
"learning_rate": 6.154691439243644e-05,
"loss": 0.311,
"num_input_tokens_seen": 20643672,
"step": 1765
},
{
"epoch": 0.9116662374452743,
"grad_norm": 13.517061006658674,
"learning_rate": 6.133955023422991e-05,
"loss": 0.3085,
"num_input_tokens_seen": 20702152,
"step": 1770
},
{
"epoch": 0.9142415657996394,
"grad_norm": 12.015661764026639,
"learning_rate": 6.11319802555128e-05,
"loss": 0.3236,
"num_input_tokens_seen": 20760664,
"step": 1775
},
{
"epoch": 0.9168168941540046,
"grad_norm": 11.503732085983884,
"learning_rate": 6.092420822382069e-05,
"loss": 0.4067,
"num_input_tokens_seen": 20819144,
"step": 1780
},
{
"epoch": 0.9193922225083698,
"grad_norm": 24.85467047869542,
"learning_rate": 6.071623791035657e-05,
"loss": 0.3228,
"num_input_tokens_seen": 20877624,
"step": 1785
},
{
"epoch": 0.921967550862735,
"grad_norm": 7.500048546411068,
"learning_rate": 6.050807308992234e-05,
"loss": 0.2672,
"num_input_tokens_seen": 20936112,
"step": 1790
},
{
"epoch": 0.9245428792171002,
"grad_norm": 7.916795517296481,
"learning_rate": 6.02997175408504e-05,
"loss": 0.3308,
"num_input_tokens_seen": 20994608,
"step": 1795
},
{
"epoch": 0.9271182075714653,
"grad_norm": 13.438136619817039,
"learning_rate": 6.009117504493493e-05,
"loss": 0.3279,
"num_input_tokens_seen": 21053080,
"step": 1800
},
{
"epoch": 0.9271182075714653,
"eval_loss": 0.3501794636249542,
"eval_runtime": 19.2844,
"eval_samples_per_second": 3.111,
"eval_steps_per_second": 0.778,
"num_input_tokens_seen": 21053080,
"step": 1800
},
{
"epoch": 0.9296935359258306,
"grad_norm": 10.926380171957325,
"learning_rate": 5.98824493873634e-05,
"loss": 0.3107,
"num_input_tokens_seen": 21111592,
"step": 1805
},
{
"epoch": 0.9322688642801957,
"grad_norm": 9.204735754102403,
"learning_rate": 5.9673544356647706e-05,
"loss": 0.3453,
"num_input_tokens_seen": 21170024,
"step": 1810
},
{
"epoch": 0.9348441926345609,
"grad_norm": 10.122487159211746,
"learning_rate": 5.946446374455555e-05,
"loss": 0.3607,
"num_input_tokens_seen": 21228480,
"step": 1815
},
{
"epoch": 0.9374195209889261,
"grad_norm": 9.590674638217761,
"learning_rate": 5.9255211346041526e-05,
"loss": 0.3375,
"num_input_tokens_seen": 21286944,
"step": 1820
},
{
"epoch": 0.9399948493432912,
"grad_norm": 5.469697379095751,
"learning_rate": 5.9045790959178296e-05,
"loss": 0.2521,
"num_input_tokens_seen": 21345456,
"step": 1825
},
{
"epoch": 0.9425701776976565,
"grad_norm": 12.20617548886948,
"learning_rate": 5.883620638508756e-05,
"loss": 0.3855,
"num_input_tokens_seen": 21403896,
"step": 1830
},
{
"epoch": 0.9451455060520216,
"grad_norm": 12.862485307076343,
"learning_rate": 5.8626461427871204e-05,
"loss": 0.2947,
"num_input_tokens_seen": 21462360,
"step": 1835
},
{
"epoch": 0.9477208344063868,
"grad_norm": 15.353261731205423,
"learning_rate": 5.841655989454213e-05,
"loss": 0.4505,
"num_input_tokens_seen": 21520864,
"step": 1840
},
{
"epoch": 0.950296162760752,
"grad_norm": 9.642536812282682,
"learning_rate": 5.820650559495523e-05,
"loss": 0.3758,
"num_input_tokens_seen": 21579376,
"step": 1845
},
{
"epoch": 0.9528714911151172,
"grad_norm": 9.811875826616257,
"learning_rate": 5.7996302341738164e-05,
"loss": 0.3221,
"num_input_tokens_seen": 21637848,
"step": 1850
},
{
"epoch": 0.9528714911151172,
"eval_loss": 0.2935050129890442,
"eval_runtime": 19.4702,
"eval_samples_per_second": 3.082,
"eval_steps_per_second": 0.77,
"num_input_tokens_seen": 21637848,
"step": 1850
},
{
"epoch": 0.9554468194694824,
"grad_norm": 11.793922029031792,
"learning_rate": 5.778595395022226e-05,
"loss": 0.4205,
"num_input_tokens_seen": 21696328,
"step": 1855
},
{
"epoch": 0.9580221478238475,
"grad_norm": 7.242120499330675,
"learning_rate": 5.757546423837314e-05,
"loss": 0.3075,
"num_input_tokens_seen": 21754816,
"step": 1860
},
{
"epoch": 0.9605974761782127,
"grad_norm": 15.811484589504676,
"learning_rate": 5.736483702672155e-05,
"loss": 0.2604,
"num_input_tokens_seen": 21813296,
"step": 1865
},
{
"epoch": 0.9631728045325779,
"grad_norm": 13.712784450465344,
"learning_rate": 5.7154076138293914e-05,
"loss": 0.342,
"num_input_tokens_seen": 21871800,
"step": 1870
},
{
"epoch": 0.9657481328869431,
"grad_norm": 7.941289555505942,
"learning_rate": 5.694318539854297e-05,
"loss": 0.411,
"num_input_tokens_seen": 21930272,
"step": 1875
},
{
"epoch": 0.9683234612413083,
"grad_norm": 13.422362176081661,
"learning_rate": 5.673216863527836e-05,
"loss": 0.2606,
"num_input_tokens_seen": 21988736,
"step": 1880
},
{
"epoch": 0.9708987895956734,
"grad_norm": 3.5902749610502247,
"learning_rate": 5.652102967859715e-05,
"loss": 0.2647,
"num_input_tokens_seen": 22047200,
"step": 1885
},
{
"epoch": 0.9734741179500386,
"grad_norm": 10.785857611570844,
"learning_rate": 5.6309772360814295e-05,
"loss": 0.2961,
"num_input_tokens_seen": 22105664,
"step": 1890
},
{
"epoch": 0.9760494463044038,
"grad_norm": 12.758145270494738,
"learning_rate": 5.6098400516393065e-05,
"loss": 0.2776,
"num_input_tokens_seen": 22164184,
"step": 1895
},
{
"epoch": 0.978624774658769,
"grad_norm": 9.907554223635353,
"learning_rate": 5.5886917981875485e-05,
"loss": 0.3781,
"num_input_tokens_seen": 22222632,
"step": 1900
},
{
"epoch": 0.978624774658769,
"eval_loss": 0.297338604927063,
"eval_runtime": 19.4745,
"eval_samples_per_second": 3.081,
"eval_steps_per_second": 0.77,
"num_input_tokens_seen": 22222632,
"step": 1900
},
{
"epoch": 0.9812001030131342,
"grad_norm": 8.000918034870224,
"learning_rate": 5.567532859581267e-05,
"loss": 0.2966,
"num_input_tokens_seen": 22281080,
"step": 1905
},
{
"epoch": 0.9837754313674993,
"grad_norm": 7.463040505678827,
"learning_rate": 5.5463636198695166e-05,
"loss": 0.304,
"num_input_tokens_seen": 22339552,
"step": 1910
},
{
"epoch": 0.9863507597218646,
"grad_norm": 8.329553137160708,
"learning_rate": 5.5251844632883244e-05,
"loss": 0.3534,
"num_input_tokens_seen": 22398056,
"step": 1915
},
{
"epoch": 0.9889260880762297,
"grad_norm": 6.690575164415027,
"learning_rate": 5.503995774253714e-05,
"loss": 0.319,
"num_input_tokens_seen": 22456520,
"step": 1920
},
{
"epoch": 0.9915014164305949,
"grad_norm": 6.058967141203479,
"learning_rate": 5.482797937354731e-05,
"loss": 0.312,
"num_input_tokens_seen": 22515024,
"step": 1925
},
{
"epoch": 0.9940767447849601,
"grad_norm": 6.871005440128773,
"learning_rate": 5.461591337346461e-05,
"loss": 0.3001,
"num_input_tokens_seen": 22573480,
"step": 1930
},
{
"epoch": 0.9966520731393252,
"grad_norm": 7.544493120290949,
"learning_rate": 5.4403763591430436e-05,
"loss": 0.2938,
"num_input_tokens_seen": 22631960,
"step": 1935
},
{
"epoch": 0.9992274014936905,
"grad_norm": 7.715734053518216,
"learning_rate": 5.419153387810693e-05,
"loss": 0.3429,
"num_input_tokens_seen": 22690432,
"step": 1940
},
{
"epoch": 1.001545197012619,
"grad_norm": 18.765317639079587,
"learning_rate": 5.397922808560698e-05,
"loss": 0.292,
"num_input_tokens_seen": 22743048,
"step": 1945
},
{
"epoch": 1.0041205253669843,
"grad_norm": 7.403981126755249,
"learning_rate": 5.376685006742441e-05,
"loss": 0.2845,
"num_input_tokens_seen": 22801512,
"step": 1950
},
{
"epoch": 1.0041205253669843,
"eval_loss": 0.2472737729549408,
"eval_runtime": 19.468,
"eval_samples_per_second": 3.082,
"eval_steps_per_second": 0.77,
"num_input_tokens_seen": 22801512,
"step": 1950
},
{
"epoch": 1.0066958537213495,
"grad_norm": 9.441611539164802,
"learning_rate": 5.355440367836396e-05,
"loss": 0.278,
"num_input_tokens_seen": 22859952,
"step": 1955
},
{
"epoch": 1.0092711820757148,
"grad_norm": 13.17108131671136,
"learning_rate": 5.334189277447138e-05,
"loss": 0.3433,
"num_input_tokens_seen": 22918440,
"step": 1960
},
{
"epoch": 1.0118465104300798,
"grad_norm": 6.702666612997951,
"learning_rate": 5.312932121296339e-05,
"loss": 0.2929,
"num_input_tokens_seen": 22976944,
"step": 1965
},
{
"epoch": 1.014421838784445,
"grad_norm": 8.293945652706178,
"learning_rate": 5.291669285215766e-05,
"loss": 0.2804,
"num_input_tokens_seen": 23035408,
"step": 1970
},
{
"epoch": 1.0169971671388103,
"grad_norm": 9.067903045446336,
"learning_rate": 5.270401155140284e-05,
"loss": 0.2583,
"num_input_tokens_seen": 23093912,
"step": 1975
},
{
"epoch": 1.0195724954931753,
"grad_norm": 6.0243656792037905,
"learning_rate": 5.2491281171008476e-05,
"loss": 0.3232,
"num_input_tokens_seen": 23152344,
"step": 1980
},
{
"epoch": 1.0221478238475405,
"grad_norm": 11.796220731230777,
"learning_rate": 5.227850557217494e-05,
"loss": 0.2935,
"num_input_tokens_seen": 23210800,
"step": 1985
},
{
"epoch": 1.0247231522019058,
"grad_norm": 12.531746818475774,
"learning_rate": 5.2065688616923314e-05,
"loss": 0.2677,
"num_input_tokens_seen": 23269304,
"step": 1990
},
{
"epoch": 1.0272984805562708,
"grad_norm": 12.674176291351744,
"learning_rate": 5.185283416802539e-05,
"loss": 0.2697,
"num_input_tokens_seen": 23327800,
"step": 1995
},
{
"epoch": 1.029873808910636,
"grad_norm": 9.205507664261733,
"learning_rate": 5.1639946088933444e-05,
"loss": 0.2272,
"num_input_tokens_seen": 23386232,
"step": 2000
},
{
"epoch": 1.029873808910636,
"eval_loss": 0.2834003269672394,
"eval_runtime": 19.4052,
"eval_samples_per_second": 3.092,
"eval_steps_per_second": 0.773,
"num_input_tokens_seen": 23386232,
"step": 2000
},
{
"epoch": 1.0324491372650013,
"grad_norm": 11.997812805404342,
"learning_rate": 5.1427028243710174e-05,
"loss": 0.2455,
"num_input_tokens_seen": 23444712,
"step": 2005
},
{
"epoch": 1.0350244656193666,
"grad_norm": 5.4126484214019825,
"learning_rate": 5.121408449695856e-05,
"loss": 0.268,
"num_input_tokens_seen": 23503192,
"step": 2010
},
{
"epoch": 1.0375997939737316,
"grad_norm": 9.189153902836047,
"learning_rate": 5.100111871375173e-05,
"loss": 0.2721,
"num_input_tokens_seen": 23561688,
"step": 2015
},
{
"epoch": 1.0401751223280968,
"grad_norm": 13.140777565626072,
"learning_rate": 5.078813475956276e-05,
"loss": 0.3521,
"num_input_tokens_seen": 23620160,
"step": 2020
},
{
"epoch": 1.042750450682462,
"grad_norm": 12.09033726574419,
"learning_rate": 5.057513650019452e-05,
"loss": 0.3624,
"num_input_tokens_seen": 23678624,
"step": 2025
},
{
"epoch": 1.045325779036827,
"grad_norm": 16.274994870434952,
"learning_rate": 5.0362127801709614e-05,
"loss": 0.2742,
"num_input_tokens_seen": 23737064,
"step": 2030
},
{
"epoch": 1.0479011073911924,
"grad_norm": 7.84210743850582,
"learning_rate": 5.014911253036004e-05,
"loss": 0.3377,
"num_input_tokens_seen": 23795568,
"step": 2035
},
{
"epoch": 1.0504764357455576,
"grad_norm": 14.343911569057362,
"learning_rate": 4.993609455251713e-05,
"loss": 0.3059,
"num_input_tokens_seen": 23854040,
"step": 2040
},
{
"epoch": 1.0530517640999228,
"grad_norm": 13.491088361277422,
"learning_rate": 4.972307773460133e-05,
"loss": 0.2534,
"num_input_tokens_seen": 23912520,
"step": 2045
},
{
"epoch": 1.0556270924542879,
"grad_norm": 6.573215232214223,
"learning_rate": 4.9510065943012076e-05,
"loss": 0.2924,
"num_input_tokens_seen": 23971048,
"step": 2050
},
{
"epoch": 1.0556270924542879,
"eval_loss": 0.2703675329685211,
"eval_runtime": 19.3939,
"eval_samples_per_second": 3.094,
"eval_steps_per_second": 0.773,
"num_input_tokens_seen": 23971048,
"step": 2050
},
{
"epoch": 1.0582024208086531,
"grad_norm": 11.207729973488336,
"learning_rate": 4.929706304405748e-05,
"loss": 0.217,
"num_input_tokens_seen": 24029544,
"step": 2055
},
{
"epoch": 1.0607777491630184,
"grad_norm": 10.285736762582813,
"learning_rate": 4.9084072903884345e-05,
"loss": 0.2132,
"num_input_tokens_seen": 24088008,
"step": 2060
},
{
"epoch": 1.0633530775173834,
"grad_norm": 6.469754305471942,
"learning_rate": 4.887109938840783e-05,
"loss": 0.2438,
"num_input_tokens_seen": 24146480,
"step": 2065
},
{
"epoch": 1.0659284058717486,
"grad_norm": 7.546288384087562,
"learning_rate": 4.8658146363241406e-05,
"loss": 0.2623,
"num_input_tokens_seen": 24204984,
"step": 2070
},
{
"epoch": 1.0685037342261139,
"grad_norm": 4.658066041307915,
"learning_rate": 4.844521769362654e-05,
"loss": 0.2041,
"num_input_tokens_seen": 24263456,
"step": 2075
},
{
"epoch": 1.071079062580479,
"grad_norm": 6.232031131112278,
"learning_rate": 4.823231724436271e-05,
"loss": 0.246,
"num_input_tokens_seen": 24321960,
"step": 2080
},
{
"epoch": 1.0736543909348442,
"grad_norm": 10.347547308861373,
"learning_rate": 4.801944887973714e-05,
"loss": 0.2857,
"num_input_tokens_seen": 24380464,
"step": 2085
},
{
"epoch": 1.0762297192892094,
"grad_norm": 7.357894649180612,
"learning_rate": 4.7806616463454715e-05,
"loss": 0.2577,
"num_input_tokens_seen": 24438976,
"step": 2090
},
{
"epoch": 1.0788050476435747,
"grad_norm": 9.485170393194913,
"learning_rate": 4.759382385856779e-05,
"loss": 0.2523,
"num_input_tokens_seen": 24497448,
"step": 2095
},
{
"epoch": 1.0813803759979397,
"grad_norm": 6.0022045554501045,
"learning_rate": 4.738107492740619e-05,
"loss": 0.2805,
"num_input_tokens_seen": 24555904,
"step": 2100
},
{
"epoch": 1.0813803759979397,
"eval_loss": 0.3205481469631195,
"eval_runtime": 19.3676,
"eval_samples_per_second": 3.098,
"eval_steps_per_second": 0.774,
"num_input_tokens_seen": 24555904,
"step": 2100
},
{
"epoch": 1.083955704352305,
"grad_norm": 10.785937986790138,
"learning_rate": 4.716837353150695e-05,
"loss": 0.2942,
"num_input_tokens_seen": 24614392,
"step": 2105
},
{
"epoch": 1.0865310327066702,
"grad_norm": 7.201431051550072,
"learning_rate": 4.695572353154438e-05,
"loss": 0.2357,
"num_input_tokens_seen": 24672848,
"step": 2110
},
{
"epoch": 1.0891063610610352,
"grad_norm": 10.470411807057468,
"learning_rate": 4.674312878725985e-05,
"loss": 0.2476,
"num_input_tokens_seen": 24731344,
"step": 2115
},
{
"epoch": 1.0916816894154004,
"grad_norm": 10.029698195020993,
"learning_rate": 4.653059315739188e-05,
"loss": 0.3875,
"num_input_tokens_seen": 24789832,
"step": 2120
},
{
"epoch": 1.0942570177697657,
"grad_norm": 5.623217865601237,
"learning_rate": 4.631812049960595e-05,
"loss": 0.3313,
"num_input_tokens_seen": 24848264,
"step": 2125
},
{
"epoch": 1.0968323461241307,
"grad_norm": 4.705669392169022,
"learning_rate": 4.61057146704246e-05,
"loss": 0.2385,
"num_input_tokens_seen": 24906744,
"step": 2130
},
{
"epoch": 1.099407674478496,
"grad_norm": 9.334998248379781,
"learning_rate": 4.589337952515736e-05,
"loss": 0.2478,
"num_input_tokens_seen": 24965256,
"step": 2135
},
{
"epoch": 1.1019830028328612,
"grad_norm": 8.45475259780102,
"learning_rate": 4.5681118917830835e-05,
"loss": 0.214,
"num_input_tokens_seen": 25023768,
"step": 2140
},
{
"epoch": 1.1045583311872265,
"grad_norm": 23.234169875193494,
"learning_rate": 4.546893670111866e-05,
"loss": 0.2806,
"num_input_tokens_seen": 25082248,
"step": 2145
},
{
"epoch": 1.1071336595415915,
"grad_norm": 7.852580632471935,
"learning_rate": 4.525683672627168e-05,
"loss": 0.2536,
"num_input_tokens_seen": 25140752,
"step": 2150
},
{
"epoch": 1.1071336595415915,
"eval_loss": 0.30806976556777954,
"eval_runtime": 19.3086,
"eval_samples_per_second": 3.107,
"eval_steps_per_second": 0.777,
"num_input_tokens_seen": 25140752,
"step": 2150
},
{
"epoch": 1.1097089878959567,
"grad_norm": 25.69995406882716,
"learning_rate": 4.504482284304795e-05,
"loss": 0.2318,
"num_input_tokens_seen": 25199208,
"step": 2155
},
{
"epoch": 1.112284316250322,
"grad_norm": 7.248312268680657,
"learning_rate": 4.4832898899642974e-05,
"loss": 0.2371,
"num_input_tokens_seen": 25257680,
"step": 2160
},
{
"epoch": 1.114859644604687,
"grad_norm": 5.020751335158321,
"learning_rate": 4.462106874261969e-05,
"loss": 0.2517,
"num_input_tokens_seen": 25316200,
"step": 2165
},
{
"epoch": 1.1174349729590523,
"grad_norm": 6.9914691655915915,
"learning_rate": 4.440933621683882e-05,
"loss": 0.3029,
"num_input_tokens_seen": 25374656,
"step": 2170
},
{
"epoch": 1.1200103013134175,
"grad_norm": 16.522791503108277,
"learning_rate": 4.419770516538899e-05,
"loss": 0.3005,
"num_input_tokens_seen": 25433168,
"step": 2175
},
{
"epoch": 1.1225856296677827,
"grad_norm": 5.579772115611286,
"learning_rate": 4.398617942951703e-05,
"loss": 0.3059,
"num_input_tokens_seen": 25491664,
"step": 2180
},
{
"epoch": 1.1251609580221478,
"grad_norm": 5.159164617041174,
"learning_rate": 4.3774762848558124e-05,
"loss": 0.2385,
"num_input_tokens_seen": 25550128,
"step": 2185
},
{
"epoch": 1.127736286376513,
"grad_norm": 6.950405613058986,
"learning_rate": 4.356345925986637e-05,
"loss": 0.2795,
"num_input_tokens_seen": 25608624,
"step": 2190
},
{
"epoch": 1.1303116147308783,
"grad_norm": 11.442249196591066,
"learning_rate": 4.335227249874484e-05,
"loss": 0.2622,
"num_input_tokens_seen": 25667128,
"step": 2195
},
{
"epoch": 1.1328869430852433,
"grad_norm": 9.573659203425603,
"learning_rate": 4.314120639837619e-05,
"loss": 0.3184,
"num_input_tokens_seen": 25725560,
"step": 2200
},
{
"epoch": 1.1328869430852433,
"eval_loss": 0.24919146299362183,
"eval_runtime": 19.1706,
"eval_samples_per_second": 3.13,
"eval_steps_per_second": 0.782,
"num_input_tokens_seen": 25725560,
"step": 2200
},
{
"epoch": 1.1354622714396085,
"grad_norm": 18.05516134286847,
"learning_rate": 4.2930264789752974e-05,
"loss": 0.3121,
"num_input_tokens_seen": 25784048,
"step": 2205
},
{
"epoch": 1.1380375997939738,
"grad_norm": 5.276257572884321,
"learning_rate": 4.271945150160815e-05,
"loss": 0.2448,
"num_input_tokens_seen": 25842512,
"step": 2210
},
{
"epoch": 1.140612928148339,
"grad_norm": 9.772532033416248,
"learning_rate": 4.250877036034552e-05,
"loss": 0.3141,
"num_input_tokens_seen": 25900992,
"step": 2215
},
{
"epoch": 1.143188256502704,
"grad_norm": 4.984185148660987,
"learning_rate": 4.2298225189970424e-05,
"loss": 0.2589,
"num_input_tokens_seen": 25959448,
"step": 2220
},
{
"epoch": 1.1457635848570693,
"grad_norm": 5.610565971454716,
"learning_rate": 4.2087819812020155e-05,
"loss": 0.3071,
"num_input_tokens_seen": 26017936,
"step": 2225
},
{
"epoch": 1.1483389132114346,
"grad_norm": 6.203972820387514,
"learning_rate": 4.1877558045494735e-05,
"loss": 0.1913,
"num_input_tokens_seen": 26076440,
"step": 2230
},
{
"epoch": 1.1509142415657996,
"grad_norm": 6.8902811910693185,
"learning_rate": 4.166744370678748e-05,
"loss": 0.1998,
"num_input_tokens_seen": 26134896,
"step": 2235
},
{
"epoch": 1.1534895699201648,
"grad_norm": 13.51914586072303,
"learning_rate": 4.1457480609615866e-05,
"loss": 0.2382,
"num_input_tokens_seen": 26193368,
"step": 2240
},
{
"epoch": 1.15606489827453,
"grad_norm": 10.238408014223214,
"learning_rate": 4.124767256495217e-05,
"loss": 0.231,
"num_input_tokens_seen": 26251848,
"step": 2245
},
{
"epoch": 1.158640226628895,
"grad_norm": 3.473707001713829,
"learning_rate": 4.1038023380954415e-05,
"loss": 0.273,
"num_input_tokens_seen": 26310336,
"step": 2250
},
{
"epoch": 1.158640226628895,
"eval_loss": 0.22014015913009644,
"eval_runtime": 20.315,
"eval_samples_per_second": 2.953,
"eval_steps_per_second": 0.738,
"num_input_tokens_seen": 26310336,
"step": 2250
},
{
"epoch": 1.1612155549832603,
"grad_norm": 8.283787630987762,
"learning_rate": 4.0828536862897125e-05,
"loss": 0.2986,
"num_input_tokens_seen": 26368840,
"step": 2255
},
{
"epoch": 1.1637908833376256,
"grad_norm": 20.215982883136522,
"learning_rate": 4.061921681310241e-05,
"loss": 0.263,
"num_input_tokens_seen": 26427280,
"step": 2260
},
{
"epoch": 1.1663662116919906,
"grad_norm": 6.802291856358878,
"learning_rate": 4.04100670308708e-05,
"loss": 0.2282,
"num_input_tokens_seen": 26485760,
"step": 2265
},
{
"epoch": 1.1689415400463559,
"grad_norm": 8.985818465549714,
"learning_rate": 4.0201091312412394e-05,
"loss": 0.2311,
"num_input_tokens_seen": 26544224,
"step": 2270
},
{
"epoch": 1.1715168684007211,
"grad_norm": 8.383045025215052,
"learning_rate": 3.999229345077789e-05,
"loss": 0.2939,
"num_input_tokens_seen": 26602712,
"step": 2275
},
{
"epoch": 1.1740921967550864,
"grad_norm": 6.558546292884369,
"learning_rate": 3.978367723578981e-05,
"loss": 0.2165,
"num_input_tokens_seen": 26661160,
"step": 2280
},
{
"epoch": 1.1766675251094514,
"grad_norm": 7.108641756919162,
"learning_rate": 3.957524645397359e-05,
"loss": 0.1823,
"num_input_tokens_seen": 26719648,
"step": 2285
},
{
"epoch": 1.1792428534638166,
"grad_norm": 11.468609977960636,
"learning_rate": 3.936700488848899e-05,
"loss": 0.2827,
"num_input_tokens_seen": 26778112,
"step": 2290
},
{
"epoch": 1.1818181818181819,
"grad_norm": 11.548102910785317,
"learning_rate": 3.915895631906135e-05,
"loss": 0.2897,
"num_input_tokens_seen": 26836592,
"step": 2295
},
{
"epoch": 1.184393510172547,
"grad_norm": 4.826501140271565,
"learning_rate": 3.895110452191301e-05,
"loss": 0.2903,
"num_input_tokens_seen": 26895096,
"step": 2300
},
{
"epoch": 1.184393510172547,
"eval_loss": 0.2940039336681366,
"eval_runtime": 20.1001,
"eval_samples_per_second": 2.985,
"eval_steps_per_second": 0.746,
"num_input_tokens_seen": 26895096,
"step": 2300
},
{
"epoch": 1.1869688385269122,
"grad_norm": 14.75757212453522,
"learning_rate": 3.874345326969471e-05,
"loss": 0.2475,
"num_input_tokens_seen": 26953560,
"step": 2305
},
{
"epoch": 1.1895441668812774,
"grad_norm": 8.582159713261374,
"learning_rate": 3.853600633141725e-05,
"loss": 0.2921,
"num_input_tokens_seen": 27012024,
"step": 2310
},
{
"epoch": 1.1921194952356426,
"grad_norm": 5.07888266594019,
"learning_rate": 3.8328767472382896e-05,
"loss": 0.2065,
"num_input_tokens_seen": 27070520,
"step": 2315
},
{
"epoch": 1.1946948235900077,
"grad_norm": 9.693667990788615,
"learning_rate": 3.812174045411724e-05,
"loss": 0.2232,
"num_input_tokens_seen": 27128984,
"step": 2320
},
{
"epoch": 1.197270151944373,
"grad_norm": 8.720131927787088,
"learning_rate": 3.791492903430071e-05,
"loss": 0.266,
"num_input_tokens_seen": 27187432,
"step": 2325
},
{
"epoch": 1.1998454802987382,
"grad_norm": 4.72913904175421,
"learning_rate": 3.7708336966700574e-05,
"loss": 0.2451,
"num_input_tokens_seen": 27245920,
"step": 2330
},
{
"epoch": 1.2024208086531032,
"grad_norm": 7.832988091306638,
"learning_rate": 3.7501968001102634e-05,
"loss": 0.2031,
"num_input_tokens_seen": 27304384,
"step": 2335
},
{
"epoch": 1.2049961370074684,
"grad_norm": 9.913047640861075,
"learning_rate": 3.7295825883243306e-05,
"loss": 0.2605,
"num_input_tokens_seen": 27362864,
"step": 2340
},
{
"epoch": 1.2075714653618337,
"grad_norm": 15.020677650419302,
"learning_rate": 3.708991435474151e-05,
"loss": 0.2293,
"num_input_tokens_seen": 27421360,
"step": 2345
},
{
"epoch": 1.210146793716199,
"grad_norm": 15.606205008499776,
"learning_rate": 3.688423715303082e-05,
"loss": 0.2757,
"num_input_tokens_seen": 27479840,
"step": 2350
},
{
"epoch": 1.210146793716199,
"eval_loss": 0.26213815808296204,
"eval_runtime": 19.7609,
"eval_samples_per_second": 3.036,
"eval_steps_per_second": 0.759,
"num_input_tokens_seen": 27479840,
"step": 2350
},
{
"epoch": 1.212722122070564,
"grad_norm": 9.144283973474371,
"learning_rate": 3.6678798011291674e-05,
"loss": 0.242,
"num_input_tokens_seen": 27538344,
"step": 2355
},
{
"epoch": 1.2152974504249292,
"grad_norm": 12.80063906167105,
"learning_rate": 3.647360065838348e-05,
"loss": 0.2451,
"num_input_tokens_seen": 27596808,
"step": 2360
},
{
"epoch": 1.2178727787792945,
"grad_norm": 3.44448459243246,
"learning_rate": 3.6268648818777105e-05,
"loss": 0.2237,
"num_input_tokens_seen": 27655272,
"step": 2365
},
{
"epoch": 1.2204481071336595,
"grad_norm": 14.301919876267943,
"learning_rate": 3.606394621248709e-05,
"loss": 0.2033,
"num_input_tokens_seen": 27713784,
"step": 2370
},
{
"epoch": 1.2230234354880247,
"grad_norm": 11.349699284999199,
"learning_rate": 3.585949655500429e-05,
"loss": 0.2741,
"num_input_tokens_seen": 27772248,
"step": 2375
},
{
"epoch": 1.22559876384239,
"grad_norm": 4.036313729279471,
"learning_rate": 3.5655303557228335e-05,
"loss": 0.2677,
"num_input_tokens_seen": 27830704,
"step": 2380
},
{
"epoch": 1.228174092196755,
"grad_norm": 5.042006341816937,
"learning_rate": 3.545137092540035e-05,
"loss": 0.2117,
"num_input_tokens_seen": 27889160,
"step": 2385
},
{
"epoch": 1.2307494205511202,
"grad_norm": 4.191635671104207,
"learning_rate": 3.524770236103556e-05,
"loss": 0.2216,
"num_input_tokens_seen": 27947608,
"step": 2390
},
{
"epoch": 1.2333247489054855,
"grad_norm": 13.530120109841187,
"learning_rate": 3.504430156085629e-05,
"loss": 0.28,
"num_input_tokens_seen": 28006112,
"step": 2395
},
{
"epoch": 1.2359000772598505,
"grad_norm": 23.252663154179324,
"learning_rate": 3.484117221672465e-05,
"loss": 0.2766,
"num_input_tokens_seen": 28064552,
"step": 2400
},
{
"epoch": 1.2359000772598505,
"eval_loss": 0.2361450344324112,
"eval_runtime": 19.8674,
"eval_samples_per_second": 3.02,
"eval_steps_per_second": 0.755,
"num_input_tokens_seen": 28064552,
"step": 2400
},
{
"epoch": 1.2384754056142158,
"grad_norm": 16.379510205091783,
"learning_rate": 3.463831801557577e-05,
"loss": 0.1933,
"num_input_tokens_seen": 28123016,
"step": 2405
},
{
"epoch": 1.241050733968581,
"grad_norm": 3.6631986381781556,
"learning_rate": 3.443574263935062e-05,
"loss": 0.2014,
"num_input_tokens_seen": 28181464,
"step": 2410
},
{
"epoch": 1.2436260623229463,
"grad_norm": 5.654529592966364,
"learning_rate": 3.42334497649294e-05,
"loss": 0.2383,
"num_input_tokens_seen": 28239920,
"step": 2415
},
{
"epoch": 1.2462013906773113,
"grad_norm": 14.005033039787135,
"learning_rate": 3.403144306406466e-05,
"loss": 0.2074,
"num_input_tokens_seen": 28298432,
"step": 2420
},
{
"epoch": 1.2487767190316765,
"grad_norm": 7.470199127973674,
"learning_rate": 3.382972620331475e-05,
"loss": 0.2202,
"num_input_tokens_seen": 28356904,
"step": 2425
},
{
"epoch": 1.2513520473860418,
"grad_norm": 3.9239650586287484,
"learning_rate": 3.362830284397716e-05,
"loss": 0.1756,
"num_input_tokens_seen": 28415384,
"step": 2430
},
{
"epoch": 1.2539273757404068,
"grad_norm": 32.67443269914418,
"learning_rate": 3.342717664202223e-05,
"loss": 0.2564,
"num_input_tokens_seen": 28473848,
"step": 2435
},
{
"epoch": 1.256502704094772,
"grad_norm": 8.79661830656602,
"learning_rate": 3.322635124802658e-05,
"loss": 0.242,
"num_input_tokens_seen": 28532312,
"step": 2440
},
{
"epoch": 1.2590780324491373,
"grad_norm": 9.41003020617472,
"learning_rate": 3.3025830307107035e-05,
"loss": 0.3455,
"num_input_tokens_seen": 28590784,
"step": 2445
},
{
"epoch": 1.2616533608035025,
"grad_norm": 23.884422578805488,
"learning_rate": 3.2825617458854376e-05,
"loss": 0.3076,
"num_input_tokens_seen": 28649256,
"step": 2450
},
{
"epoch": 1.2616533608035025,
"eval_loss": 0.2371988147497177,
"eval_runtime": 19.6997,
"eval_samples_per_second": 3.046,
"eval_steps_per_second": 0.761,
"num_input_tokens_seen": 28649256,
"step": 2450
},
{
"epoch": 1.2642286891578676,
"grad_norm": 13.48575395754194,
"learning_rate": 3.2625716337267295e-05,
"loss": 0.2041,
"num_input_tokens_seen": 28707736,
"step": 2455
},
{
"epoch": 1.2668040175122328,
"grad_norm": 3.1641318472119986,
"learning_rate": 3.242613057068641e-05,
"loss": 0.3022,
"num_input_tokens_seen": 28766232,
"step": 2460
},
{
"epoch": 1.269379345866598,
"grad_norm": 17.472204317681474,
"learning_rate": 3.222686378172847e-05,
"loss": 0.2756,
"num_input_tokens_seen": 28824688,
"step": 2465
},
{
"epoch": 1.271954674220963,
"grad_norm": 4.051662934956441,
"learning_rate": 3.2027919587220516e-05,
"loss": 0.2395,
"num_input_tokens_seen": 28883176,
"step": 2470
},
{
"epoch": 1.2745300025753283,
"grad_norm": 5.058807389542336,
"learning_rate": 3.1829301598134355e-05,
"loss": 0.2204,
"num_input_tokens_seen": 28941592,
"step": 2475
},
{
"epoch": 1.2771053309296936,
"grad_norm": 4.820344297526228,
"learning_rate": 3.1631013419520855e-05,
"loss": 0.2477,
"num_input_tokens_seen": 29000064,
"step": 2480
},
{
"epoch": 1.2796806592840588,
"grad_norm": 6.098120285465299,
"learning_rate": 3.143305865044467e-05,
"loss": 0.2645,
"num_input_tokens_seen": 29058496,
"step": 2485
},
{
"epoch": 1.2822559876384239,
"grad_norm": 10.021136804794267,
"learning_rate": 3.123544088391881e-05,
"loss": 0.2184,
"num_input_tokens_seen": 29116992,
"step": 2490
},
{
"epoch": 1.284831315992789,
"grad_norm": 32.07716009409277,
"learning_rate": 3.10381637068395e-05,
"loss": 0.1857,
"num_input_tokens_seen": 29175496,
"step": 2495
},
{
"epoch": 1.2874066443471541,
"grad_norm": 10.697788786097991,
"learning_rate": 3.084123069992096e-05,
"loss": 0.257,
"num_input_tokens_seen": 29233968,
"step": 2500
},
{
"epoch": 1.2874066443471541,
"eval_loss": 0.2488754242658615,
"eval_runtime": 19.5416,
"eval_samples_per_second": 3.07,
"eval_steps_per_second": 0.768,
"num_input_tokens_seen": 29233968,
"step": 2500
},
{
"epoch": 1.2899819727015194,
"grad_norm": 3.8096764598668864,
"learning_rate": 3.064464543763057e-05,
"loss": 0.1617,
"num_input_tokens_seen": 29292464,
"step": 2505
},
{
"epoch": 1.2925573010558846,
"grad_norm": 8.299858124153992,
"learning_rate": 3.0448411488123862e-05,
"loss": 0.2068,
"num_input_tokens_seen": 29350952,
"step": 2510
},
{
"epoch": 1.2951326294102499,
"grad_norm": 10.959075314673317,
"learning_rate": 3.0252532413179825e-05,
"loss": 0.2188,
"num_input_tokens_seen": 29409432,
"step": 2515
},
{
"epoch": 1.2977079577646151,
"grad_norm": 7.014945794108391,
"learning_rate": 3.0057011768136223e-05,
"loss": 0.1948,
"num_input_tokens_seen": 29467880,
"step": 2520
},
{
"epoch": 1.3002832861189801,
"grad_norm": 5.3992920228602665,
"learning_rate": 2.9861853101825094e-05,
"loss": 0.2366,
"num_input_tokens_seen": 29526384,
"step": 2525
},
{
"epoch": 1.3028586144733454,
"grad_norm": 7.369149060264004,
"learning_rate": 2.966705995650827e-05,
"loss": 0.2601,
"num_input_tokens_seen": 29584880,
"step": 2530
},
{
"epoch": 1.3054339428277104,
"grad_norm": 8.85591028977132,
"learning_rate": 2.9472635867813185e-05,
"loss": 0.1913,
"num_input_tokens_seen": 29643344,
"step": 2535
},
{
"epoch": 1.3080092711820757,
"grad_norm": 6.292489461251922,
"learning_rate": 2.927858436466861e-05,
"loss": 0.2,
"num_input_tokens_seen": 29701864,
"step": 2540
},
{
"epoch": 1.310584599536441,
"grad_norm": 10.28646529424139,
"learning_rate": 2.9084908969240664e-05,
"loss": 0.2503,
"num_input_tokens_seen": 29760376,
"step": 2545
},
{
"epoch": 1.3131599278908062,
"grad_norm": 6.3979585721541135,
"learning_rate": 2.8891613196868816e-05,
"loss": 0.2192,
"num_input_tokens_seen": 29818856,
"step": 2550
},
{
"epoch": 1.3131599278908062,
"eval_loss": 0.24317465722560883,
"eval_runtime": 19.477,
"eval_samples_per_second": 3.081,
"eval_steps_per_second": 0.77,
"num_input_tokens_seen": 29818856,
"step": 2550
},
{
"epoch": 1.3157352562451712,
"grad_norm": 5.864909751734038,
"learning_rate": 2.8698700556002178e-05,
"loss": 0.2231,
"num_input_tokens_seen": 29877288,
"step": 2555
},
{
"epoch": 1.3183105845995364,
"grad_norm": 11.12053960539336,
"learning_rate": 2.8506174548135695e-05,
"loss": 0.3007,
"num_input_tokens_seen": 29935776,
"step": 2560
},
{
"epoch": 1.3208859129539017,
"grad_norm": 5.495356583672569,
"learning_rate": 2.8314038667746732e-05,
"loss": 0.1895,
"num_input_tokens_seen": 29994232,
"step": 2565
},
{
"epoch": 1.3234612413082667,
"grad_norm": 11.482142819244615,
"learning_rate": 2.81222964022315e-05,
"loss": 0.2728,
"num_input_tokens_seen": 30052760,
"step": 2570
},
{
"epoch": 1.326036569662632,
"grad_norm": 8.217608228490523,
"learning_rate": 2.793095123184193e-05,
"loss": 0.2827,
"num_input_tokens_seen": 30111200,
"step": 2575
},
{
"epoch": 1.3286118980169972,
"grad_norm": 5.535041581031614,
"learning_rate": 2.77400066296223e-05,
"loss": 0.2191,
"num_input_tokens_seen": 30169680,
"step": 2580
},
{
"epoch": 1.3311872263713624,
"grad_norm": 6.639368336077178,
"learning_rate": 2.7549466061346386e-05,
"loss": 0.2317,
"num_input_tokens_seen": 30228176,
"step": 2585
},
{
"epoch": 1.3337625547257275,
"grad_norm": 5.625387657466934,
"learning_rate": 2.7359332985454443e-05,
"loss": 0.2293,
"num_input_tokens_seen": 30286656,
"step": 2590
},
{
"epoch": 1.3363378830800927,
"grad_norm": 3.631245501478199,
"learning_rate": 2.716961085299049e-05,
"loss": 0.1903,
"num_input_tokens_seen": 30345160,
"step": 2595
},
{
"epoch": 1.338913211434458,
"grad_norm": 11.391263066967507,
"learning_rate": 2.698030310753958e-05,
"loss": 0.224,
"num_input_tokens_seen": 30403640,
"step": 2600
},
{
"epoch": 1.338913211434458,
"eval_loss": 0.20262545347213745,
"eval_runtime": 19.6125,
"eval_samples_per_second": 3.059,
"eval_steps_per_second": 0.765,
"num_input_tokens_seen": 30403640,
"step": 2600
},
{
"epoch": 1.341488539788823,
"grad_norm": 8.177384294556918,
"learning_rate": 2.6791413185165442e-05,
"loss": 0.2563,
"num_input_tokens_seen": 30462128,
"step": 2605
},
{
"epoch": 1.3440638681431882,
"grad_norm": 6.121025769450026,
"learning_rate": 2.660294451434796e-05,
"loss": 0.235,
"num_input_tokens_seen": 30520616,
"step": 2610
},
{
"epoch": 1.3466391964975535,
"grad_norm": 8.690869571731369,
"learning_rate": 2.641490051592107e-05,
"loss": 0.2113,
"num_input_tokens_seen": 30579024,
"step": 2615
},
{
"epoch": 1.3492145248519187,
"grad_norm": 7.285970612374189,
"learning_rate": 2.6227284603010604e-05,
"loss": 0.216,
"num_input_tokens_seen": 30637464,
"step": 2620
},
{
"epoch": 1.3517898532062838,
"grad_norm": 5.682668591040817,
"learning_rate": 2.6040100180972355e-05,
"loss": 0.2599,
"num_input_tokens_seen": 30695936,
"step": 2625
},
{
"epoch": 1.354365181560649,
"grad_norm": 5.815763005326374,
"learning_rate": 2.585335064733022e-05,
"loss": 0.2421,
"num_input_tokens_seen": 30754408,
"step": 2630
},
{
"epoch": 1.356940509915014,
"grad_norm": 6.8726858718673975,
"learning_rate": 2.5667039391714666e-05,
"loss": 0.2095,
"num_input_tokens_seen": 30812848,
"step": 2635
},
{
"epoch": 1.3595158382693793,
"grad_norm": 10.374721228070925,
"learning_rate": 2.5481169795801008e-05,
"loss": 0.2223,
"num_input_tokens_seen": 30871344,
"step": 2640
},
{
"epoch": 1.3620911666237445,
"grad_norm": 4.502700102041516,
"learning_rate": 2.5295745233248248e-05,
"loss": 0.2273,
"num_input_tokens_seen": 30929824,
"step": 2645
},
{
"epoch": 1.3646664949781098,
"grad_norm": 6.715540062682421,
"learning_rate": 2.511076906963763e-05,
"loss": 0.2377,
"num_input_tokens_seen": 30988344,
"step": 2650
},
{
"epoch": 1.3646664949781098,
"eval_loss": 0.18780523538589478,
"eval_runtime": 19.4103,
"eval_samples_per_second": 3.091,
"eval_steps_per_second": 0.773,
"num_input_tokens_seen": 30988344,
"step": 2650
},
{
"epoch": 1.367241823332475,
"grad_norm": 7.2025231601123085,
"learning_rate": 2.4926244662411734e-05,
"loss": 0.2109,
"num_input_tokens_seen": 31046848,
"step": 2655
},
{
"epoch": 1.36981715168684,
"grad_norm": 4.944078503740031,
"learning_rate": 2.474217536081342e-05,
"loss": 0.2544,
"num_input_tokens_seen": 31105352,
"step": 2660
},
{
"epoch": 1.3723924800412053,
"grad_norm": 20.15441745302564,
"learning_rate": 2.4558564505825088e-05,
"loss": 0.2616,
"num_input_tokens_seen": 31163848,
"step": 2665
},
{
"epoch": 1.3749678083955703,
"grad_norm": 9.97997619530482,
"learning_rate": 2.4375415430107977e-05,
"loss": 0.1722,
"num_input_tokens_seen": 31222360,
"step": 2670
},
{
"epoch": 1.3775431367499356,
"grad_norm": 2.1763725749744465,
"learning_rate": 2.4192731457941805e-05,
"loss": 0.1966,
"num_input_tokens_seen": 31280840,
"step": 2675
},
{
"epoch": 1.3801184651043008,
"grad_norm": 10.60754914489294,
"learning_rate": 2.4010515905164243e-05,
"loss": 0.2578,
"num_input_tokens_seen": 31339264,
"step": 2680
},
{
"epoch": 1.382693793458666,
"grad_norm": 15.190148047284161,
"learning_rate": 2.3828772079110907e-05,
"loss": 0.3076,
"num_input_tokens_seen": 31397744,
"step": 2685
},
{
"epoch": 1.385269121813031,
"grad_norm": 20.889251542966374,
"learning_rate": 2.3647503278555233e-05,
"loss": 0.1911,
"num_input_tokens_seen": 31456240,
"step": 2690
},
{
"epoch": 1.3878444501673963,
"grad_norm": 4.797824757647722,
"learning_rate": 2.3466712793648638e-05,
"loss": 0.2204,
"num_input_tokens_seen": 31514744,
"step": 2695
},
{
"epoch": 1.3904197785217616,
"grad_norm": 16.36975645045513,
"learning_rate": 2.3286403905860733e-05,
"loss": 0.2269,
"num_input_tokens_seen": 31573240,
"step": 2700
},
{
"epoch": 1.3904197785217616,
"eval_loss": 0.24001093208789825,
"eval_runtime": 19.3768,
"eval_samples_per_second": 3.096,
"eval_steps_per_second": 0.774,
"num_input_tokens_seen": 31573240,
"step": 2700
},
{
"epoch": 1.3929951068761266,
"grad_norm": 23.383619982845065,
"learning_rate": 2.3106579887919894e-05,
"loss": 0.2339,
"num_input_tokens_seen": 31631688,
"step": 2705
},
{
"epoch": 1.3955704352304918,
"grad_norm": 4.588108718468969,
"learning_rate": 2.2927244003753694e-05,
"loss": 0.2197,
"num_input_tokens_seen": 31690200,
"step": 2710
},
{
"epoch": 1.398145763584857,
"grad_norm": 3.4659557649616284,
"learning_rate": 2.2748399508429834e-05,
"loss": 0.1999,
"num_input_tokens_seen": 31748664,
"step": 2715
},
{
"epoch": 1.4007210919392223,
"grad_norm": 5.297859489588456,
"learning_rate": 2.2570049648096902e-05,
"loss": 0.2757,
"num_input_tokens_seen": 31807176,
"step": 2720
},
{
"epoch": 1.4032964202935874,
"grad_norm": 8.718394323594142,
"learning_rate": 2.239219765992557e-05,
"loss": 0.2128,
"num_input_tokens_seen": 31865696,
"step": 2725
},
{
"epoch": 1.4058717486479526,
"grad_norm": 11.91241028531398,
"learning_rate": 2.221484677204978e-05,
"loss": 0.2657,
"num_input_tokens_seen": 31924200,
"step": 2730
},
{
"epoch": 1.4084470770023179,
"grad_norm": 6.407371884386196,
"learning_rate": 2.203800020350819e-05,
"loss": 0.2349,
"num_input_tokens_seen": 31982712,
"step": 2735
},
{
"epoch": 1.4110224053566829,
"grad_norm": 4.998470631619502,
"learning_rate": 2.1861661164185652e-05,
"loss": 0.2188,
"num_input_tokens_seen": 32041176,
"step": 2740
},
{
"epoch": 1.4135977337110481,
"grad_norm": 6.503122447364567,
"learning_rate": 2.1685832854755115e-05,
"loss": 0.2531,
"num_input_tokens_seen": 32099656,
"step": 2745
},
{
"epoch": 1.4161730620654134,
"grad_norm": 3.8963890551505123,
"learning_rate": 2.1510518466619334e-05,
"loss": 0.1416,
"num_input_tokens_seen": 32158144,
"step": 2750
},
{
"epoch": 1.4161730620654134,
"eval_loss": 0.24715903401374817,
"eval_runtime": 19.5042,
"eval_samples_per_second": 3.076,
"eval_steps_per_second": 0.769,
"num_input_tokens_seen": 32158144,
"step": 2750
},
{
"epoch": 1.4187483904197786,
"grad_norm": 8.62691802455825,
"learning_rate": 2.133572118185313e-05,
"loss": 0.2459,
"num_input_tokens_seen": 32216656,
"step": 2755
},
{
"epoch": 1.4213237187741437,
"grad_norm": 11.518284151131217,
"learning_rate": 2.1161444173145468e-05,
"loss": 0.2656,
"num_input_tokens_seen": 32275168,
"step": 2760
},
{
"epoch": 1.423899047128509,
"grad_norm": 5.717001204241147,
"learning_rate": 2.098769060374206e-05,
"loss": 0.199,
"num_input_tokens_seen": 32333664,
"step": 2765
},
{
"epoch": 1.4264743754828741,
"grad_norm": 5.060469427631098,
"learning_rate": 2.0814463627387744e-05,
"loss": 0.2552,
"num_input_tokens_seen": 32392088,
"step": 2770
},
{
"epoch": 1.4290497038372392,
"grad_norm": 10.068742247308663,
"learning_rate": 2.0641766388269345e-05,
"loss": 0.2123,
"num_input_tokens_seen": 32450584,
"step": 2775
},
{
"epoch": 1.4316250321916044,
"grad_norm": 8.04329511861408,
"learning_rate": 2.046960202095866e-05,
"loss": 0.1827,
"num_input_tokens_seen": 32509096,
"step": 2780
},
{
"epoch": 1.4342003605459697,
"grad_norm": 5.111975388475009,
"learning_rate": 2.0297973650355435e-05,
"loss": 0.3038,
"num_input_tokens_seen": 32567560,
"step": 2785
},
{
"epoch": 1.436775688900335,
"grad_norm": 2.2165480876124666,
"learning_rate": 2.012688439163075e-05,
"loss": 0.2078,
"num_input_tokens_seen": 32626056,
"step": 2790
},
{
"epoch": 1.4393510172547,
"grad_norm": 5.208837069631258,
"learning_rate": 1.9956337350170446e-05,
"loss": 0.23,
"num_input_tokens_seen": 32684520,
"step": 2795
},
{
"epoch": 1.4419263456090652,
"grad_norm": 14.804471075670579,
"learning_rate": 1.978633562151875e-05,
"loss": 0.2162,
"num_input_tokens_seen": 32743032,
"step": 2800
},
{
"epoch": 1.4419263456090652,
"eval_loss": 0.2771253287792206,
"eval_runtime": 19.4256,
"eval_samples_per_second": 3.089,
"eval_steps_per_second": 0.772,
"num_input_tokens_seen": 32743032,
"step": 2800
},
{
"epoch": 1.4445016739634302,
"grad_norm": 5.47081802170478,
"learning_rate": 1.9616882291322043e-05,
"loss": 0.2128,
"num_input_tokens_seen": 32801504,
"step": 2805
},
{
"epoch": 1.4470770023177955,
"grad_norm": 8.153473173608221,
"learning_rate": 1.9447980435272982e-05,
"loss": 0.226,
"num_input_tokens_seen": 32860032,
"step": 2810
},
{
"epoch": 1.4496523306721607,
"grad_norm": 3.2012899866356275,
"learning_rate": 1.9279633119054524e-05,
"loss": 0.1945,
"num_input_tokens_seen": 32918472,
"step": 2815
},
{
"epoch": 1.452227659026526,
"grad_norm": 3.275737115662877,
"learning_rate": 1.9111843398284412e-05,
"loss": 0.1593,
"num_input_tokens_seen": 32976944,
"step": 2820
},
{
"epoch": 1.4548029873808912,
"grad_norm": 5.6817295869650755,
"learning_rate": 1.8944614318459604e-05,
"loss": 0.2154,
"num_input_tokens_seen": 33035424,
"step": 2825
},
{
"epoch": 1.4573783157352562,
"grad_norm": 4.248487908153702,
"learning_rate": 1.8777948914901066e-05,
"loss": 0.2266,
"num_input_tokens_seen": 33093880,
"step": 2830
},
{
"epoch": 1.4599536440896215,
"grad_norm": 4.65833753095402,
"learning_rate": 1.8611850212698678e-05,
"loss": 0.1948,
"num_input_tokens_seen": 33152328,
"step": 2835
},
{
"epoch": 1.4625289724439865,
"grad_norm": 6.9576190671694205,
"learning_rate": 1.84463212266563e-05,
"loss": 0.2159,
"num_input_tokens_seen": 33210816,
"step": 2840
},
{
"epoch": 1.4651043007983517,
"grad_norm": 28.44341760505862,
"learning_rate": 1.8281364961237013e-05,
"loss": 0.2326,
"num_input_tokens_seen": 33269320,
"step": 2845
},
{
"epoch": 1.467679629152717,
"grad_norm": 12.408239979973144,
"learning_rate": 1.8116984410508696e-05,
"loss": 0.1912,
"num_input_tokens_seen": 33327720,
"step": 2850
},
{
"epoch": 1.467679629152717,
"eval_loss": 0.2647402584552765,
"eval_runtime": 19.6149,
"eval_samples_per_second": 3.059,
"eval_steps_per_second": 0.765,
"num_input_tokens_seen": 33327720,
"step": 2850
},
{
"epoch": 1.4702549575070822,
"grad_norm": 8.379574283067335,
"learning_rate": 1.795318255808956e-05,
"loss": 0.2166,
"num_input_tokens_seen": 33386160,
"step": 2855
},
{
"epoch": 1.4728302858614473,
"grad_norm": 9.376274649781424,
"learning_rate": 1.7789962377094088e-05,
"loss": 0.1843,
"num_input_tokens_seen": 33444648,
"step": 2860
},
{
"epoch": 1.4754056142158125,
"grad_norm": 2.5653090157172724,
"learning_rate": 1.762732683007902e-05,
"loss": 0.1988,
"num_input_tokens_seen": 33503144,
"step": 2865
},
{
"epoch": 1.4779809425701778,
"grad_norm": 12.205543974310553,
"learning_rate": 1.746527886898962e-05,
"loss": 0.1715,
"num_input_tokens_seen": 33561600,
"step": 2870
},
{
"epoch": 1.4805562709245428,
"grad_norm": 2.016900668078144,
"learning_rate": 1.7303821435105998e-05,
"loss": 0.1577,
"num_input_tokens_seen": 33620056,
"step": 2875
},
{
"epoch": 1.483131599278908,
"grad_norm": 7.53734329898392,
"learning_rate": 1.714295745898989e-05,
"loss": 0.2204,
"num_input_tokens_seen": 33678512,
"step": 2880
},
{
"epoch": 1.4857069276332733,
"grad_norm": 25.928242388695963,
"learning_rate": 1.6982689860431283e-05,
"loss": 0.2283,
"num_input_tokens_seen": 33736960,
"step": 2885
},
{
"epoch": 1.4882822559876385,
"grad_norm": 21.45493152674021,
"learning_rate": 1.682302154839558e-05,
"loss": 0.2632,
"num_input_tokens_seen": 33795416,
"step": 2890
},
{
"epoch": 1.4908575843420036,
"grad_norm": 8.396238950090229,
"learning_rate": 1.6663955420970667e-05,
"loss": 0.1844,
"num_input_tokens_seen": 33853936,
"step": 2895
},
{
"epoch": 1.4934329126963688,
"grad_norm": 4.710042357794161,
"learning_rate": 1.650549436531442e-05,
"loss": 0.2015,
"num_input_tokens_seen": 33912440,
"step": 2900
},
{
"epoch": 1.4934329126963688,
"eval_loss": 0.23923428356647491,
"eval_runtime": 19.3819,
"eval_samples_per_second": 3.096,
"eval_steps_per_second": 0.774,
"num_input_tokens_seen": 33912440,
"step": 2900
},
{
"epoch": 1.496008241050734,
"grad_norm": 4.859449394658537,
"learning_rate": 1.6347641257602236e-05,
"loss": 0.2244,
"num_input_tokens_seen": 33970888,
"step": 2905
},
{
"epoch": 1.498583569405099,
"grad_norm": 3.9094863370380684,
"learning_rate": 1.6190398962974856e-05,
"loss": 0.1864,
"num_input_tokens_seen": 34029376,
"step": 2910
},
{
"epoch": 1.5011588977594643,
"grad_norm": 2.8103187330737267,
"learning_rate": 1.6033770335486305e-05,
"loss": 0.1969,
"num_input_tokens_seen": 34087864,
"step": 2915
},
{
"epoch": 1.5037342261138296,
"grad_norm": 6.033935213601098,
"learning_rate": 1.5877758218052207e-05,
"loss": 0.2449,
"num_input_tokens_seen": 34146296,
"step": 2920
},
{
"epoch": 1.5063095544681948,
"grad_norm": 4.397544925205015,
"learning_rate": 1.572236544239803e-05,
"loss": 0.155,
"num_input_tokens_seen": 34204800,
"step": 2925
},
{
"epoch": 1.5088848828225598,
"grad_norm": 5.933520955975732,
"learning_rate": 1.5567594829007835e-05,
"loss": 0.2031,
"num_input_tokens_seen": 34263296,
"step": 2930
},
{
"epoch": 1.511460211176925,
"grad_norm": 6.341695577358793,
"learning_rate": 1.541344918707299e-05,
"loss": 0.1917,
"num_input_tokens_seen": 34321792,
"step": 2935
},
{
"epoch": 1.51403553953129,
"grad_norm": 4.985028365945458,
"learning_rate": 1.525993131444123e-05,
"loss": 0.1609,
"num_input_tokens_seen": 34380264,
"step": 2940
},
{
"epoch": 1.5166108678856554,
"grad_norm": 6.463643326585979,
"learning_rate": 1.5107043997565795e-05,
"loss": 0.2217,
"num_input_tokens_seen": 34438752,
"step": 2945
},
{
"epoch": 1.5191861962400206,
"grad_norm": 15.279246366044804,
"learning_rate": 1.495479001145499e-05,
"loss": 0.2069,
"num_input_tokens_seen": 34497216,
"step": 2950
},
{
"epoch": 1.5191861962400206,
"eval_loss": 0.2638954222202301,
"eval_runtime": 19.2939,
"eval_samples_per_second": 3.11,
"eval_steps_per_second": 0.777,
"num_input_tokens_seen": 34497216,
"step": 2950
},
{
"epoch": 1.5217615245943859,
"grad_norm": 7.283233671161431,
"learning_rate": 1.4803172119621661e-05,
"loss": 0.2811,
"num_input_tokens_seen": 34555704,
"step": 2955
},
{
"epoch": 1.524336852948751,
"grad_norm": 5.1545528457087295,
"learning_rate": 1.4652193074033188e-05,
"loss": 0.2056,
"num_input_tokens_seen": 34614168,
"step": 2960
},
{
"epoch": 1.5269121813031161,
"grad_norm": 5.345680235559109,
"learning_rate": 1.4501855615061376e-05,
"loss": 0.2097,
"num_input_tokens_seen": 34672624,
"step": 2965
},
{
"epoch": 1.5294875096574814,
"grad_norm": 5.662685942725516,
"learning_rate": 1.4352162471432872e-05,
"loss": 0.1913,
"num_input_tokens_seen": 34731056,
"step": 2970
},
{
"epoch": 1.5320628380118464,
"grad_norm": 7.857317097258681,
"learning_rate": 1.420311636017953e-05,
"loss": 0.1996,
"num_input_tokens_seen": 34789552,
"step": 2975
},
{
"epoch": 1.5346381663662116,
"grad_norm": 5.32739810275719,
"learning_rate": 1.4054719986589148e-05,
"loss": 0.2102,
"num_input_tokens_seen": 34848064,
"step": 2980
},
{
"epoch": 1.537213494720577,
"grad_norm": 18.39581577669835,
"learning_rate": 1.39069760441563e-05,
"loss": 0.232,
"num_input_tokens_seen": 34906592,
"step": 2985
},
{
"epoch": 1.5397888230749421,
"grad_norm": 8.502936446963005,
"learning_rate": 1.3759887214533557e-05,
"loss": 0.2109,
"num_input_tokens_seen": 34965064,
"step": 2990
},
{
"epoch": 1.5423641514293074,
"grad_norm": 5.97229161259624,
"learning_rate": 1.3613456167482685e-05,
"loss": 0.1989,
"num_input_tokens_seen": 35023552,
"step": 2995
},
{
"epoch": 1.5449394797836724,
"grad_norm": 17.874594504088744,
"learning_rate": 1.3467685560826315e-05,
"loss": 0.2027,
"num_input_tokens_seen": 35082056,
"step": 3000
},
{
"epoch": 1.5449394797836724,
"eval_loss": 0.2371213436126709,
"eval_runtime": 19.9605,
"eval_samples_per_second": 3.006,
"eval_steps_per_second": 0.751,
"num_input_tokens_seen": 35082056,
"step": 3000
},
{
"epoch": 1.5475148081380374,
"grad_norm": 4.476150794936761,
"learning_rate": 1.3322578040399558e-05,
"loss": 0.1383,
"num_input_tokens_seen": 35140568,
"step": 3005
},
{
"epoch": 1.5500901364924027,
"grad_norm": 8.697328407960805,
"learning_rate": 1.3178136240002164e-05,
"loss": 0.2006,
"num_input_tokens_seen": 35199064,
"step": 3010
},
{
"epoch": 1.552665464846768,
"grad_norm": 5.605565161628193,
"learning_rate": 1.3034362781350524e-05,
"loss": 0.251,
"num_input_tokens_seen": 35257576,
"step": 3015
},
{
"epoch": 1.5552407932011332,
"grad_norm": 9.015316943288264,
"learning_rate": 1.289126027403023e-05,
"loss": 0.1584,
"num_input_tokens_seen": 35316064,
"step": 3020
},
{
"epoch": 1.5578161215554984,
"grad_norm": 43.17142293680494,
"learning_rate": 1.2748831315448606e-05,
"loss": 0.2173,
"num_input_tokens_seen": 35374504,
"step": 3025
},
{
"epoch": 1.5603914499098637,
"grad_norm": 79.21408258106743,
"learning_rate": 1.260707849078766e-05,
"loss": 0.2518,
"num_input_tokens_seen": 35432984,
"step": 3030
},
{
"epoch": 1.5629667782642287,
"grad_norm": 3.4648607953939714,
"learning_rate": 1.2466004372957068e-05,
"loss": 0.1936,
"num_input_tokens_seen": 35491488,
"step": 3035
},
{
"epoch": 1.5655421066185937,
"grad_norm": 2.890163536607479,
"learning_rate": 1.2325611522547547e-05,
"loss": 0.1677,
"num_input_tokens_seen": 35549984,
"step": 3040
},
{
"epoch": 1.568117434972959,
"grad_norm": 3.8910029052375856,
"learning_rate": 1.218590248778434e-05,
"loss": 0.2021,
"num_input_tokens_seen": 35608488,
"step": 3045
},
{
"epoch": 1.5706927633273242,
"grad_norm": 7.663989340971129,
"learning_rate": 1.2046879804480993e-05,
"loss": 0.1925,
"num_input_tokens_seen": 35666976,
"step": 3050
},
{
"epoch": 1.5706927633273242,
"eval_loss": 0.24840803444385529,
"eval_runtime": 20.1238,
"eval_samples_per_second": 2.982,
"eval_steps_per_second": 0.745,
"num_input_tokens_seen": 35666976,
"step": 3050
},
{
"epoch": 1.5732680916816895,
"grad_norm": 2.403638244773669,
"learning_rate": 1.1908545995993253e-05,
"loss": 0.1686,
"num_input_tokens_seen": 35725456,
"step": 3055
},
{
"epoch": 1.5758434200360547,
"grad_norm": 14.854500855372818,
"learning_rate": 1.1770903573173398e-05,
"loss": 0.2366,
"num_input_tokens_seen": 35783912,
"step": 3060
},
{
"epoch": 1.5784187483904197,
"grad_norm": 4.505603869422207,
"learning_rate": 1.1633955034324507e-05,
"loss": 0.2029,
"num_input_tokens_seen": 35842376,
"step": 3065
},
{
"epoch": 1.580994076744785,
"grad_norm": 6.561062574184342,
"learning_rate": 1.1497702865155274e-05,
"loss": 0.2116,
"num_input_tokens_seen": 35900824,
"step": 3070
},
{
"epoch": 1.58356940509915,
"grad_norm": 3.075946215121841,
"learning_rate": 1.1362149538734745e-05,
"loss": 0.1676,
"num_input_tokens_seen": 35959264,
"step": 3075
},
{
"epoch": 1.5861447334535153,
"grad_norm": 3.932942100960138,
"learning_rate": 1.1227297515447543e-05,
"loss": 0.1811,
"num_input_tokens_seen": 36017760,
"step": 3080
},
{
"epoch": 1.5887200618078805,
"grad_norm": 2.914398659228269,
"learning_rate": 1.1093149242949152e-05,
"loss": 0.1298,
"num_input_tokens_seen": 36076280,
"step": 3085
},
{
"epoch": 1.5912953901622457,
"grad_norm": 7.36077869474613,
"learning_rate": 1.0959707156121506e-05,
"loss": 0.1979,
"num_input_tokens_seen": 36134784,
"step": 3090
},
{
"epoch": 1.593870718516611,
"grad_norm": 7.180782753385591,
"learning_rate": 1.0826973677028767e-05,
"loss": 0.2539,
"num_input_tokens_seen": 36193288,
"step": 3095
},
{
"epoch": 1.596446046870976,
"grad_norm": 5.80837569754525,
"learning_rate": 1.0694951214873427e-05,
"loss": 0.2139,
"num_input_tokens_seen": 36251744,
"step": 3100
},
{
"epoch": 1.596446046870976,
"eval_loss": 0.2746882140636444,
"eval_runtime": 20.8164,
"eval_samples_per_second": 2.882,
"eval_steps_per_second": 0.721,
"num_input_tokens_seen": 36251744,
"step": 3100
},
{
"epoch": 1.5990213752253413,
"grad_norm": 7.332248506243341,
"learning_rate": 1.0563642165952497e-05,
"loss": 0.1658,
"num_input_tokens_seen": 36310200,
"step": 3105
},
{
"epoch": 1.6015967035797063,
"grad_norm": 4.227211949206946,
"learning_rate": 1.043304891361408e-05,
"loss": 0.2308,
"num_input_tokens_seen": 36368688,
"step": 3110
},
{
"epoch": 1.6041720319340715,
"grad_norm": 7.364454762284429,
"learning_rate": 1.030317382821409e-05,
"loss": 0.1838,
"num_input_tokens_seen": 36427192,
"step": 3115
},
{
"epoch": 1.6067473602884368,
"grad_norm": 3.831562577565949,
"learning_rate": 1.0174019267073225e-05,
"loss": 0.1604,
"num_input_tokens_seen": 36485656,
"step": 3120
},
{
"epoch": 1.609322688642802,
"grad_norm": 7.497731599778419,
"learning_rate": 1.0045587574434157e-05,
"loss": 0.2069,
"num_input_tokens_seen": 36544128,
"step": 3125
},
{
"epoch": 1.6118980169971673,
"grad_norm": 6.255929929298457,
"learning_rate": 9.917881081419033e-06,
"loss": 0.2161,
"num_input_tokens_seen": 36602568,
"step": 3130
},
{
"epoch": 1.6144733453515323,
"grad_norm": 5.299301385448033,
"learning_rate": 9.790902105987103e-06,
"loss": 0.2202,
"num_input_tokens_seen": 36661064,
"step": 3135
},
{
"epoch": 1.6170486737058976,
"grad_norm": 5.618502845362283,
"learning_rate": 9.66465295289274e-06,
"loss": 0.1867,
"num_input_tokens_seen": 36719544,
"step": 3140
},
{
"epoch": 1.6196240020602626,
"grad_norm": 3.7802756504457067,
"learning_rate": 9.539135913643465e-06,
"loss": 0.1811,
"num_input_tokens_seen": 36778064,
"step": 3145
},
{
"epoch": 1.6221993304146278,
"grad_norm": 5.770789580720362,
"learning_rate": 9.414353266458509e-06,
"loss": 0.204,
"num_input_tokens_seen": 36836560,
"step": 3150
},
{
"epoch": 1.6221993304146278,
"eval_loss": 0.2422773241996765,
"eval_runtime": 20.2302,
"eval_samples_per_second": 2.966,
"eval_steps_per_second": 0.741,
"num_input_tokens_seen": 36836560,
"step": 3150
},
{
"epoch": 1.624774658768993,
"grad_norm": 5.877162783758339,
"learning_rate": 9.29030727622735e-06,
"loss": 0.211,
"num_input_tokens_seen": 36895016,
"step": 3155
},
{
"epoch": 1.6273499871233583,
"grad_norm": 6.742491213588064,
"learning_rate": 9.167000194468684e-06,
"loss": 0.1973,
"num_input_tokens_seen": 36953528,
"step": 3160
},
{
"epoch": 1.6299253154777236,
"grad_norm": 3.9319298837048304,
"learning_rate": 9.044434259289452e-06,
"loss": 0.2209,
"num_input_tokens_seen": 37012056,
"step": 3165
},
{
"epoch": 1.6325006438320886,
"grad_norm": 4.3444513188690355,
"learning_rate": 8.922611695344352e-06,
"loss": 0.1527,
"num_input_tokens_seen": 37070528,
"step": 3170
},
{
"epoch": 1.6350759721864536,
"grad_norm": 6.583116555049651,
"learning_rate": 8.801534713795339e-06,
"loss": 0.189,
"num_input_tokens_seen": 37129008,
"step": 3175
},
{
"epoch": 1.6376513005408189,
"grad_norm": 2.419245633548623,
"learning_rate": 8.681205512271574e-06,
"loss": 0.1712,
"num_input_tokens_seen": 37187472,
"step": 3180
},
{
"epoch": 1.6402266288951841,
"grad_norm": 8.598240776469794,
"learning_rate": 8.561626274829498e-06,
"loss": 0.1751,
"num_input_tokens_seen": 37245936,
"step": 3185
},
{
"epoch": 1.6428019572495494,
"grad_norm": 3.6018632639200225,
"learning_rate": 8.442799171913201e-06,
"loss": 0.1963,
"num_input_tokens_seen": 37304424,
"step": 3190
},
{
"epoch": 1.6453772856039146,
"grad_norm": 2.5245912719574277,
"learning_rate": 8.324726360314995e-06,
"loss": 0.1133,
"num_input_tokens_seen": 37362888,
"step": 3195
},
{
"epoch": 1.6479526139582796,
"grad_norm": 6.895930963412961,
"learning_rate": 8.207409983136332e-06,
"loss": 0.1851,
"num_input_tokens_seen": 37421416,
"step": 3200
},
{
"epoch": 1.6479526139582796,
"eval_loss": 0.22857078909873962,
"eval_runtime": 20.4323,
"eval_samples_per_second": 2.937,
"eval_steps_per_second": 0.734,
"num_input_tokens_seen": 37421416,
"step": 3200
},
{
"epoch": 1.6505279423126449,
"grad_norm": 3.0909496285264964,
"learning_rate": 8.090852169748848e-06,
"loss": 0.2208,
"num_input_tokens_seen": 37479848,
"step": 3205
},
{
"epoch": 1.65310327066701,
"grad_norm": 4.235698681690494,
"learning_rate": 7.975055035755713e-06,
"loss": 0.2252,
"num_input_tokens_seen": 37538312,
"step": 3210
},
{
"epoch": 1.6556785990213752,
"grad_norm": 10.661168860452113,
"learning_rate": 7.860020682953284e-06,
"loss": 0.254,
"num_input_tokens_seen": 37596800,
"step": 3215
},
{
"epoch": 1.6582539273757404,
"grad_norm": 3.6874689284357736,
"learning_rate": 7.745751199292923e-06,
"loss": 0.1741,
"num_input_tokens_seen": 37655312,
"step": 3220
},
{
"epoch": 1.6608292557301056,
"grad_norm": 10.065911223324512,
"learning_rate": 7.632248658843088e-06,
"loss": 0.2133,
"num_input_tokens_seen": 37713800,
"step": 3225
},
{
"epoch": 1.663404584084471,
"grad_norm": 3.799333032004199,
"learning_rate": 7.51951512175168e-06,
"loss": 0.1509,
"num_input_tokens_seen": 37772280,
"step": 3230
},
{
"epoch": 1.665979912438836,
"grad_norm": 11.66939874014716,
"learning_rate": 7.407552634208714e-06,
"loss": 0.2229,
"num_input_tokens_seen": 37830800,
"step": 3235
},
{
"epoch": 1.6685552407932012,
"grad_norm": 5.949219050013212,
"learning_rate": 7.2963632284090756e-06,
"loss": 0.1795,
"num_input_tokens_seen": 37889280,
"step": 3240
},
{
"epoch": 1.6711305691475662,
"grad_norm": 6.566491462966474,
"learning_rate": 7.185948922515762e-06,
"loss": 0.2316,
"num_input_tokens_seen": 37947728,
"step": 3245
},
{
"epoch": 1.6737058975019314,
"grad_norm": 6.379883324544748,
"learning_rate": 7.076311720623108e-06,
"loss": 0.2072,
"num_input_tokens_seen": 38006200,
"step": 3250
},
{
"epoch": 1.6737058975019314,
"eval_loss": 0.2405671775341034,
"eval_runtime": 20.709,
"eval_samples_per_second": 2.897,
"eval_steps_per_second": 0.724,
"num_input_tokens_seen": 38006200,
"step": 3250
},
{
"epoch": 1.6762812258562967,
"grad_norm": 3.5869080263938082,
"learning_rate": 6.967453612720543e-06,
"loss": 0.2139,
"num_input_tokens_seen": 38064672,
"step": 3255
},
{
"epoch": 1.678856554210662,
"grad_norm": 6.877217191111715,
"learning_rate": 6.859376574656395e-06,
"loss": 0.2038,
"num_input_tokens_seen": 38123192,
"step": 3260
},
{
"epoch": 1.6814318825650272,
"grad_norm": 3.2559262371448146,
"learning_rate": 6.7520825681020476e-06,
"loss": 0.232,
"num_input_tokens_seen": 38181696,
"step": 3265
},
{
"epoch": 1.6840072109193922,
"grad_norm": 8.785530689010578,
"learning_rate": 6.6455735405163145e-06,
"loss": 0.1632,
"num_input_tokens_seen": 38240216,
"step": 3270
},
{
"epoch": 1.6865825392737575,
"grad_norm": 5.531242921031575,
"learning_rate": 6.539851425110138e-06,
"loss": 0.1423,
"num_input_tokens_seen": 38298688,
"step": 3275
},
{
"epoch": 1.6891578676281225,
"grad_norm": 4.0644227405062345,
"learning_rate": 6.434918140811447e-06,
"loss": 0.1673,
"num_input_tokens_seen": 38357128,
"step": 3280
},
{
"epoch": 1.6917331959824877,
"grad_norm": 2.3469449774677957,
"learning_rate": 6.330775592230365e-06,
"loss": 0.137,
"num_input_tokens_seen": 38415600,
"step": 3285
},
{
"epoch": 1.694308524336853,
"grad_norm": 13.136879672491618,
"learning_rate": 6.2274256696246314e-06,
"loss": 0.241,
"num_input_tokens_seen": 38474112,
"step": 3290
},
{
"epoch": 1.6968838526912182,
"grad_norm": 7.6753531592588295,
"learning_rate": 6.124870248865289e-06,
"loss": 0.1795,
"num_input_tokens_seen": 38532624,
"step": 3295
},
{
"epoch": 1.6994591810455835,
"grad_norm": 3.6160334005930412,
"learning_rate": 6.023111191402603e-06,
"loss": 0.2145,
"num_input_tokens_seen": 38591128,
"step": 3300
},
{
"epoch": 1.6994591810455835,
"eval_loss": 0.2692144513130188,
"eval_runtime": 19.8713,
"eval_samples_per_second": 3.019,
"eval_steps_per_second": 0.755,
"num_input_tokens_seen": 38591128,
"step": 3300
},
{
"epoch": 1.7020345093999485,
"grad_norm": 4.245599320283208,
"learning_rate": 5.922150344232358e-06,
"loss": 0.1577,
"num_input_tokens_seen": 38649640,
"step": 3305
},
{
"epoch": 1.7046098377543135,
"grad_norm": 6.439585458462034,
"learning_rate": 5.821989539862227e-06,
"loss": 0.1865,
"num_input_tokens_seen": 38708112,
"step": 3310
},
{
"epoch": 1.7071851661086788,
"grad_norm": 4.332556239205023,
"learning_rate": 5.722630596278617e-06,
"loss": 0.2041,
"num_input_tokens_seen": 38766624,
"step": 3315
},
{
"epoch": 1.709760494463044,
"grad_norm": 8.300709555053524,
"learning_rate": 5.624075316913568e-06,
"loss": 0.2379,
"num_input_tokens_seen": 38825104,
"step": 3320
},
{
"epoch": 1.7123358228174093,
"grad_norm": 3.5209088708004423,
"learning_rate": 5.526325490612106e-06,
"loss": 0.1446,
"num_input_tokens_seen": 38883560,
"step": 3325
},
{
"epoch": 1.7149111511717745,
"grad_norm": 6.083318731157017,
"learning_rate": 5.429382891599732e-06,
"loss": 0.2214,
"num_input_tokens_seen": 38942040,
"step": 3330
},
{
"epoch": 1.7174864795261395,
"grad_norm": 5.245646471007174,
"learning_rate": 5.333249279450231e-06,
"loss": 0.1735,
"num_input_tokens_seen": 39000488,
"step": 3335
},
{
"epoch": 1.7200618078805048,
"grad_norm": 5.482406255032746,
"learning_rate": 5.237926399053699e-06,
"loss": 0.1996,
"num_input_tokens_seen": 39058960,
"step": 3340
},
{
"epoch": 1.7226371362348698,
"grad_norm": 6.210583442446586,
"learning_rate": 5.143415980584948e-06,
"loss": 0.2478,
"num_input_tokens_seen": 39117416,
"step": 3345
},
{
"epoch": 1.725212464589235,
"grad_norm": 5.159144010193508,
"learning_rate": 5.049719739472009e-06,
"loss": 0.2158,
"num_input_tokens_seen": 39175888,
"step": 3350
},
{
"epoch": 1.725212464589235,
"eval_loss": 0.2447366714477539,
"eval_runtime": 20.3156,
"eval_samples_per_second": 2.953,
"eval_steps_per_second": 0.738,
"num_input_tokens_seen": 39175888,
"step": 3350
},
{
"epoch": 1.7277877929436003,
"grad_norm": 6.187206503231987,
"learning_rate": 4.956839376365069e-06,
"loss": 0.1795,
"num_input_tokens_seen": 39234336,
"step": 3355
},
{
"epoch": 1.7303631212979655,
"grad_norm": 4.18667410182292,
"learning_rate": 4.864776577105584e-06,
"loss": 0.1964,
"num_input_tokens_seen": 39292800,
"step": 3360
},
{
"epoch": 1.7329384496523308,
"grad_norm": 5.8485329747855275,
"learning_rate": 4.773533012695663e-06,
"loss": 0.1558,
"num_input_tokens_seen": 39351272,
"step": 3365
},
{
"epoch": 1.7355137780066958,
"grad_norm": 2.991559338645507,
"learning_rate": 4.683110339267732e-06,
"loss": 0.187,
"num_input_tokens_seen": 39409736,
"step": 3370
},
{
"epoch": 1.738089106361061,
"grad_norm": 3.7473435202339376,
"learning_rate": 4.5935101980545295e-06,
"loss": 0.1945,
"num_input_tokens_seen": 39468176,
"step": 3375
},
{
"epoch": 1.740664434715426,
"grad_norm": 3.664152846888827,
"learning_rate": 4.5047342153592245e-06,
"loss": 0.1475,
"num_input_tokens_seen": 39526688,
"step": 3380
},
{
"epoch": 1.7432397630697913,
"grad_norm": 2.0318595088746334,
"learning_rate": 4.416784002526003e-06,
"loss": 0.1334,
"num_input_tokens_seen": 39585152,
"step": 3385
},
{
"epoch": 1.7458150914241566,
"grad_norm": 4.706820298087635,
"learning_rate": 4.329661155910736e-06,
"loss": 0.1953,
"num_input_tokens_seen": 39643640,
"step": 3390
},
{
"epoch": 1.7483904197785218,
"grad_norm": 9.496012423646269,
"learning_rate": 4.243367256852049e-06,
"loss": 0.2158,
"num_input_tokens_seen": 39702144,
"step": 3395
},
{
"epoch": 1.750965748132887,
"grad_norm": 4.388828356289149,
"learning_rate": 4.15790387164261e-06,
"loss": 0.1488,
"num_input_tokens_seen": 39760664,
"step": 3400
},
{
"epoch": 1.750965748132887,
"eval_loss": 0.22250184416770935,
"eval_runtime": 20.8021,
"eval_samples_per_second": 2.884,
"eval_steps_per_second": 0.721,
"num_input_tokens_seen": 39760664,
"step": 3400
},
{
"epoch": 1.751995879474633,
"num_input_tokens_seen": 39772368,
"step": 3401,
"total_flos": 2624309677719552.0,
"train_loss": 9.810503915070576e-05,
"train_runtime": 113.2963,
"train_samples_per_second": 720.235,
"train_steps_per_second": 30.01
}
],
"logging_steps": 5,
"max_steps": 3400,
"num_input_tokens_seen": 39772368,
"num_train_epochs": 2,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2624309677719552.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}