chansung's picture
Model save
da3efe6 verified
raw
history blame
41.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.988913525498892,
"eval_steps": 500,
"global_step": 1125,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004434589800443459,
"grad_norm": 1.911939263343811,
"learning_rate": 8.849557522123894e-06,
"loss": 2.8127,
"step": 1
},
{
"epoch": 0.022172949002217297,
"grad_norm": 2.071887731552124,
"learning_rate": 4.424778761061947e-05,
"loss": 2.8128,
"step": 5
},
{
"epoch": 0.04434589800443459,
"grad_norm": 1.1280204057693481,
"learning_rate": 8.849557522123894e-05,
"loss": 2.5671,
"step": 10
},
{
"epoch": 0.06651884700665188,
"grad_norm": 1.151498794555664,
"learning_rate": 0.00013274336283185842,
"loss": 2.3153,
"step": 15
},
{
"epoch": 0.08869179600886919,
"grad_norm": 0.7351519465446472,
"learning_rate": 0.00017699115044247788,
"loss": 2.087,
"step": 20
},
{
"epoch": 0.11086474501108648,
"grad_norm": 0.8526760339736938,
"learning_rate": 0.00022123893805309737,
"loss": 1.9278,
"step": 25
},
{
"epoch": 0.13303769401330376,
"grad_norm": 0.4035344421863556,
"learning_rate": 0.00026548672566371683,
"loss": 1.8159,
"step": 30
},
{
"epoch": 0.15521064301552107,
"grad_norm": 0.2873729169368744,
"learning_rate": 0.00030973451327433627,
"loss": 1.7511,
"step": 35
},
{
"epoch": 0.17738359201773837,
"grad_norm": 0.2770038843154907,
"learning_rate": 0.00035398230088495576,
"loss": 1.7015,
"step": 40
},
{
"epoch": 0.19955654101995565,
"grad_norm": 0.3265347480773926,
"learning_rate": 0.00039823008849557525,
"loss": 1.6655,
"step": 45
},
{
"epoch": 0.22172949002217296,
"grad_norm": 0.5982062220573425,
"learning_rate": 0.00044247787610619474,
"loss": 1.6314,
"step": 50
},
{
"epoch": 0.24390243902439024,
"grad_norm": 0.4608590602874756,
"learning_rate": 0.0004867256637168142,
"loss": 1.571,
"step": 55
},
{
"epoch": 0.2660753880266075,
"grad_norm": 0.4388026297092438,
"learning_rate": 0.0005309734513274337,
"loss": 1.5656,
"step": 60
},
{
"epoch": 0.28824833702882485,
"grad_norm": 0.3848969638347626,
"learning_rate": 0.0005752212389380532,
"loss": 1.5407,
"step": 65
},
{
"epoch": 0.31042128603104213,
"grad_norm": 0.2577425241470337,
"learning_rate": 0.0006194690265486725,
"loss": 1.5097,
"step": 70
},
{
"epoch": 0.3325942350332594,
"grad_norm": 0.38717567920684814,
"learning_rate": 0.0006637168141592921,
"loss": 1.5087,
"step": 75
},
{
"epoch": 0.35476718403547675,
"grad_norm": 0.2715625464916229,
"learning_rate": 0.0007079646017699115,
"loss": 1.5023,
"step": 80
},
{
"epoch": 0.376940133037694,
"grad_norm": 0.29234352707862854,
"learning_rate": 0.0007522123893805309,
"loss": 1.4785,
"step": 85
},
{
"epoch": 0.3991130820399113,
"grad_norm": 0.28365185856819153,
"learning_rate": 0.0007964601769911505,
"loss": 1.4721,
"step": 90
},
{
"epoch": 0.4212860310421286,
"grad_norm": 0.32057851552963257,
"learning_rate": 0.0008407079646017699,
"loss": 1.4595,
"step": 95
},
{
"epoch": 0.4434589800443459,
"grad_norm": 0.30948102474212646,
"learning_rate": 0.0008849557522123895,
"loss": 1.4668,
"step": 100
},
{
"epoch": 0.4656319290465632,
"grad_norm": 0.31636759638786316,
"learning_rate": 0.0009292035398230089,
"loss": 1.4544,
"step": 105
},
{
"epoch": 0.4878048780487805,
"grad_norm": 0.24775166809558868,
"learning_rate": 0.0009734513274336283,
"loss": 1.4405,
"step": 110
},
{
"epoch": 0.5099778270509978,
"grad_norm": 0.24200735986232758,
"learning_rate": 0.0009999903631006022,
"loss": 1.4508,
"step": 115
},
{
"epoch": 0.532150776053215,
"grad_norm": 0.306389719247818,
"learning_rate": 0.0009998819522485391,
"loss": 1.4232,
"step": 120
},
{
"epoch": 0.5543237250554324,
"grad_norm": 0.2549804449081421,
"learning_rate": 0.0009996531106254026,
"loss": 1.4249,
"step": 125
},
{
"epoch": 0.5764966740576497,
"grad_norm": 0.24480336904525757,
"learning_rate": 0.0009993038933633555,
"loss": 1.4142,
"step": 130
},
{
"epoch": 0.5986696230598669,
"grad_norm": 0.2402483969926834,
"learning_rate": 0.0009988343845952696,
"loss": 1.414,
"step": 135
},
{
"epoch": 0.6208425720620843,
"grad_norm": 0.22881096601486206,
"learning_rate": 0.000998244697434456,
"loss": 1.4312,
"step": 140
},
{
"epoch": 0.6430155210643016,
"grad_norm": 0.2914198637008667,
"learning_rate": 0.0009975349739474153,
"loss": 1.417,
"step": 145
},
{
"epoch": 0.6651884700665188,
"grad_norm": 0.2460290491580963,
"learning_rate": 0.0009967053851196099,
"loss": 1.4257,
"step": 150
},
{
"epoch": 0.6873614190687362,
"grad_norm": 0.22039610147476196,
"learning_rate": 0.0009957561308142709,
"loss": 1.4142,
"step": 155
},
{
"epoch": 0.7095343680709535,
"grad_norm": 0.24831444025039673,
"learning_rate": 0.0009946874397242474,
"loss": 1.403,
"step": 160
},
{
"epoch": 0.7317073170731707,
"grad_norm": 0.2765662372112274,
"learning_rate": 0.0009934995693169104,
"loss": 1.3855,
"step": 165
},
{
"epoch": 0.753880266075388,
"grad_norm": 0.26668626070022583,
"learning_rate": 0.0009921928057721242,
"loss": 1.3983,
"step": 170
},
{
"epoch": 0.7760532150776053,
"grad_norm": 0.305702805519104,
"learning_rate": 0.0009907674639132995,
"loss": 1.4083,
"step": 175
},
{
"epoch": 0.7982261640798226,
"grad_norm": 0.3682064414024353,
"learning_rate": 0.0009892238871315475,
"loss": 1.4011,
"step": 180
},
{
"epoch": 0.8203991130820399,
"grad_norm": 0.2574247419834137,
"learning_rate": 0.0009875624473029507,
"loss": 1.3962,
"step": 185
},
{
"epoch": 0.8425720620842572,
"grad_norm": 0.42513400316238403,
"learning_rate": 0.0009857835446989707,
"loss": 1.3924,
"step": 190
},
{
"epoch": 0.8647450110864745,
"grad_norm": 0.3164115250110626,
"learning_rate": 0.0009838876078900156,
"loss": 1.3804,
"step": 195
},
{
"epoch": 0.8869179600886918,
"grad_norm": 0.2795542776584625,
"learning_rate": 0.0009818750936421894,
"loss": 1.3943,
"step": 200
},
{
"epoch": 0.9090909090909091,
"grad_norm": 0.2589350938796997,
"learning_rate": 0.0009797464868072487,
"loss": 1.384,
"step": 205
},
{
"epoch": 0.9312638580931264,
"grad_norm": 0.9124072790145874,
"learning_rate": 0.000977502300205793,
"loss": 1.3689,
"step": 210
},
{
"epoch": 0.9534368070953437,
"grad_norm": 0.3424608111381531,
"learning_rate": 0.0009751430745037169,
"loss": 1.3664,
"step": 215
},
{
"epoch": 0.975609756097561,
"grad_norm": 0.25026756525039673,
"learning_rate": 0.0009726693780819534,
"loss": 1.3913,
"step": 220
},
{
"epoch": 0.9977827050997783,
"grad_norm": 0.4633461833000183,
"learning_rate": 0.0009700818068995407,
"loss": 1.3936,
"step": 225
},
{
"epoch": 0.9977827050997783,
"eval_loss": 1.791922926902771,
"eval_runtime": 0.3732,
"eval_samples_per_second": 2.679,
"eval_steps_per_second": 2.679,
"step": 225
},
{
"epoch": 1.0199556541019956,
"grad_norm": 0.2957637310028076,
"learning_rate": 0.0009673809843500446,
"loss": 1.3285,
"step": 230
},
{
"epoch": 1.042128603104213,
"grad_norm": 0.29558026790618896,
"learning_rate": 0.0009645675611113716,
"loss": 1.333,
"step": 235
},
{
"epoch": 1.06430155210643,
"grad_norm": 0.30888795852661133,
"learning_rate": 0.0009616422149890085,
"loss": 1.3411,
"step": 240
},
{
"epoch": 1.0864745011086474,
"grad_norm": 2.1240732669830322,
"learning_rate": 0.0009586056507527265,
"loss": 1.3353,
"step": 245
},
{
"epoch": 1.1086474501108647,
"grad_norm": 0.4370739758014679,
"learning_rate": 0.0009554585999667896,
"loss": 1.3688,
"step": 250
},
{
"epoch": 1.130820399113082,
"grad_norm": 0.5616880059242249,
"learning_rate": 0.0009522018208137067,
"loss": 1.3541,
"step": 255
},
{
"epoch": 1.1529933481152994,
"grad_norm": 0.31425368785858154,
"learning_rate": 0.0009488360979115719,
"loss": 1.3366,
"step": 260
},
{
"epoch": 1.1751662971175167,
"grad_norm": 0.2877110242843628,
"learning_rate": 0.0009453622421250352,
"loss": 1.3446,
"step": 265
},
{
"epoch": 1.1973392461197339,
"grad_norm": 0.32708439230918884,
"learning_rate": 0.0009417810903699507,
"loss": 1.3453,
"step": 270
},
{
"epoch": 1.2195121951219512,
"grad_norm": 0.24861423671245575,
"learning_rate": 0.000938093505411748,
"loss": 1.3226,
"step": 275
},
{
"epoch": 1.2416851441241685,
"grad_norm": 0.2731972634792328,
"learning_rate": 0.0009343003756575757,
"loss": 1.3322,
"step": 280
},
{
"epoch": 1.2638580931263859,
"grad_norm": 0.3049946129322052,
"learning_rate": 0.000930402614942268,
"loss": 1.3487,
"step": 285
},
{
"epoch": 1.2860310421286032,
"grad_norm": 0.27978989481925964,
"learning_rate": 0.0009264011623081859,
"loss": 1.3274,
"step": 290
},
{
"epoch": 1.3082039911308203,
"grad_norm": 0.9313792586326599,
"learning_rate": 0.0009222969817789828,
"loss": 1.3204,
"step": 295
},
{
"epoch": 1.3303769401330376,
"grad_norm": 0.2632613778114319,
"learning_rate": 0.0009180910621273555,
"loss": 1.3382,
"step": 300
},
{
"epoch": 1.352549889135255,
"grad_norm": 0.35932329297065735,
"learning_rate": 0.0009137844166368287,
"loss": 1.3397,
"step": 305
},
{
"epoch": 1.3747228381374723,
"grad_norm": 0.31215277314186096,
"learning_rate": 0.0009093780828576379,
"loss": 1.3241,
"step": 310
},
{
"epoch": 1.3968957871396896,
"grad_norm": 1.306994080543518,
"learning_rate": 0.0009048731223567636,
"loss": 1.3411,
"step": 315
},
{
"epoch": 1.4190687361419068,
"grad_norm": 0.7964722514152527,
"learning_rate": 0.0009002706204621802,
"loss": 1.3433,
"step": 320
},
{
"epoch": 1.441241685144124,
"grad_norm": 0.32706671953201294,
"learning_rate": 0.0008955716860013812,
"loss": 1.3231,
"step": 325
},
{
"epoch": 1.4634146341463414,
"grad_norm": 0.2626619338989258,
"learning_rate": 0.0008907774510342412,
"loss": 1.333,
"step": 330
},
{
"epoch": 1.4855875831485588,
"grad_norm": 0.2566235363483429,
"learning_rate": 0.0008858890705802829,
"loss": 1.3289,
"step": 335
},
{
"epoch": 1.507760532150776,
"grad_norm": 0.22669324278831482,
"learning_rate": 0.0008809077223404109,
"loss": 1.3345,
"step": 340
},
{
"epoch": 1.5299334811529932,
"grad_norm": 0.24277335405349731,
"learning_rate": 0.0008758346064131824,
"loss": 1.3258,
"step": 345
},
{
"epoch": 1.5521064301552108,
"grad_norm": 0.23311229050159454,
"learning_rate": 0.0008706709450056802,
"loss": 1.3208,
"step": 350
},
{
"epoch": 1.5742793791574279,
"grad_norm": 0.3494793176651001,
"learning_rate": 0.0008654179821390621,
"loss": 1.3253,
"step": 355
},
{
"epoch": 1.5964523281596452,
"grad_norm": 0.22604632377624512,
"learning_rate": 0.0008600769833488522,
"loss": 1.3244,
"step": 360
},
{
"epoch": 1.6186252771618626,
"grad_norm": 0.26579421758651733,
"learning_rate": 0.0008546492353800504,
"loss": 1.3256,
"step": 365
},
{
"epoch": 1.6407982261640797,
"grad_norm": 0.2301110476255417,
"learning_rate": 0.000849136045877132,
"loss": 1.3383,
"step": 370
},
{
"epoch": 1.6629711751662972,
"grad_norm": 0.22868593037128448,
"learning_rate": 0.0008435387430690114,
"loss": 1.3194,
"step": 375
},
{
"epoch": 1.6851441241685143,
"grad_norm": 0.28906944394111633,
"learning_rate": 0.0008378586754490483,
"loss": 1.3196,
"step": 380
},
{
"epoch": 1.7073170731707317,
"grad_norm": 0.22416484355926514,
"learning_rate": 0.0008320972114501697,
"loss": 1.3281,
"step": 385
},
{
"epoch": 1.729490022172949,
"grad_norm": 0.2212500125169754,
"learning_rate": 0.0008262557391151904,
"loss": 1.3166,
"step": 390
},
{
"epoch": 1.7516629711751663,
"grad_norm": 0.24597449600696564,
"learning_rate": 0.0008203356657624068,
"loss": 1.3147,
"step": 395
},
{
"epoch": 1.7738359201773837,
"grad_norm": 0.20681482553482056,
"learning_rate": 0.0008143384176465486,
"loss": 1.3207,
"step": 400
},
{
"epoch": 1.7960088691796008,
"grad_norm": 0.22514577209949493,
"learning_rate": 0.0008082654396151675,
"loss": 1.3256,
"step": 405
},
{
"epoch": 1.8181818181818183,
"grad_norm": 0.2058994621038437,
"learning_rate": 0.0008021181947605473,
"loss": 1.3051,
"step": 410
},
{
"epoch": 1.8403547671840355,
"grad_norm": 0.24868044257164001,
"learning_rate": 0.0007958981640672172,
"loss": 1.3133,
"step": 415
},
{
"epoch": 1.8625277161862528,
"grad_norm": 0.219014972448349,
"learning_rate": 0.0007896068460551562,
"loss": 1.3016,
"step": 420
},
{
"epoch": 1.8847006651884701,
"grad_norm": 0.2263939380645752,
"learning_rate": 0.0007832457564187715,
"loss": 1.3269,
"step": 425
},
{
"epoch": 1.9068736141906872,
"grad_norm": 0.22888123989105225,
"learning_rate": 0.0007768164276617396,
"loss": 1.3297,
"step": 430
},
{
"epoch": 1.9290465631929048,
"grad_norm": 0.21448783576488495,
"learning_rate": 0.0007703204087277988,
"loss": 1.3042,
"step": 435
},
{
"epoch": 1.951219512195122,
"grad_norm": 0.217611163854599,
"learning_rate": 0.0007637592646275793,
"loss": 1.3171,
"step": 440
},
{
"epoch": 1.9733924611973392,
"grad_norm": 0.2066190540790558,
"learning_rate": 0.0007571345760615634,
"loss": 1.3131,
"step": 445
},
{
"epoch": 1.9955654101995566,
"grad_norm": 0.2094324827194214,
"learning_rate": 0.0007504479390392661,
"loss": 1.31,
"step": 450
},
{
"epoch": 2.0,
"eval_loss": 1.7869027853012085,
"eval_runtime": 0.3369,
"eval_samples_per_second": 2.969,
"eval_steps_per_second": 2.969,
"step": 451
},
{
"epoch": 2.0177383592017737,
"grad_norm": 0.2380608320236206,
"learning_rate": 0.0007437009644947268,
"loss": 1.2636,
"step": 455
},
{
"epoch": 2.0399113082039912,
"grad_norm": 0.226850226521492,
"learning_rate": 0.0007368952778984051,
"loss": 1.2298,
"step": 460
},
{
"epoch": 2.0620842572062084,
"grad_norm": 0.23007981479167938,
"learning_rate": 0.0007300325188655761,
"loss": 1.2459,
"step": 465
},
{
"epoch": 2.084257206208426,
"grad_norm": 0.21383607387542725,
"learning_rate": 0.0007231143407613156,
"loss": 1.2328,
"step": 470
},
{
"epoch": 2.106430155210643,
"grad_norm": 0.22145338356494904,
"learning_rate": 0.0007161424103021752,
"loss": 1.2326,
"step": 475
},
{
"epoch": 2.12860310421286,
"grad_norm": 0.26510247588157654,
"learning_rate": 0.0007091184071546384,
"loss": 1.2377,
"step": 480
},
{
"epoch": 2.1507760532150777,
"grad_norm": 0.23144353926181793,
"learning_rate": 0.0007020440235304592,
"loss": 1.2195,
"step": 485
},
{
"epoch": 2.172949002217295,
"grad_norm": 0.22692181169986725,
"learning_rate": 0.000694920963778976,
"loss": 1.2181,
"step": 490
},
{
"epoch": 2.1951219512195124,
"grad_norm": 0.2270960956811905,
"learning_rate": 0.0006877509439765037,
"loss": 1.2444,
"step": 495
},
{
"epoch": 2.2172949002217295,
"grad_norm": 0.22735467553138733,
"learning_rate": 0.0006805356915128977,
"loss": 1.2385,
"step": 500
},
{
"epoch": 2.2394678492239466,
"grad_norm": 0.21610836684703827,
"learning_rate": 0.0006732769446753953,
"loss": 1.2476,
"step": 505
},
{
"epoch": 2.261640798226164,
"grad_norm": 0.4567781686782837,
"learning_rate": 0.0006659764522298296,
"loss": 1.2411,
"step": 510
},
{
"epoch": 2.2838137472283813,
"grad_norm": 0.23705683648586273,
"learning_rate": 0.0006586359729993199,
"loss": 1.2334,
"step": 515
},
{
"epoch": 2.305986696230599,
"grad_norm": 0.22446921467781067,
"learning_rate": 0.0006512572754405379,
"loss": 1.2342,
"step": 520
},
{
"epoch": 2.328159645232816,
"grad_norm": 0.2318691909313202,
"learning_rate": 0.0006438421372176556,
"loss": 1.2379,
"step": 525
},
{
"epoch": 2.3503325942350335,
"grad_norm": 0.2584494650363922,
"learning_rate": 0.0006363923447740718,
"loss": 1.2371,
"step": 530
},
{
"epoch": 2.3725055432372506,
"grad_norm": 0.2136611044406891,
"learning_rate": 0.0006289096929020253,
"loss": 1.2497,
"step": 535
},
{
"epoch": 2.3946784922394677,
"grad_norm": 0.22401590645313263,
"learning_rate": 0.000621395984310197,
"loss": 1.2508,
"step": 540
},
{
"epoch": 2.4168514412416853,
"grad_norm": 0.21865229308605194,
"learning_rate": 0.0006138530291894032,
"loss": 1.2494,
"step": 545
},
{
"epoch": 2.4390243902439024,
"grad_norm": 0.23078739643096924,
"learning_rate": 0.0006062826447764884,
"loss": 1.2474,
"step": 550
},
{
"epoch": 2.4611973392461195,
"grad_norm": 0.24809886515140533,
"learning_rate": 0.0005986866549165184,
"loss": 1.2472,
"step": 555
},
{
"epoch": 2.483370288248337,
"grad_norm": 0.23055674135684967,
"learning_rate": 0.000591066889623383,
"loss": 1.23,
"step": 560
},
{
"epoch": 2.505543237250554,
"grad_norm": 0.22716328501701355,
"learning_rate": 0.000583425184638912,
"loss": 1.2495,
"step": 565
},
{
"epoch": 2.5277161862527717,
"grad_norm": 0.21761415898799896,
"learning_rate": 0.0005757633809906107,
"loss": 1.2448,
"step": 570
},
{
"epoch": 2.549889135254989,
"grad_norm": 0.2366757094860077,
"learning_rate": 0.0005680833245481234,
"loss": 1.2481,
"step": 575
},
{
"epoch": 2.5720620842572064,
"grad_norm": 0.23113702237606049,
"learning_rate": 0.0005603868655785279,
"loss": 1.2422,
"step": 580
},
{
"epoch": 2.5942350332594235,
"grad_norm": 0.2284475862979889,
"learning_rate": 0.0005526758583005735,
"loss": 1.2354,
"step": 585
},
{
"epoch": 2.6164079822616406,
"grad_norm": 0.23871304094791412,
"learning_rate": 0.0005449521604379652,
"loss": 1.2573,
"step": 590
},
{
"epoch": 2.638580931263858,
"grad_norm": 0.2440497726202011,
"learning_rate": 0.0005372176327718029,
"loss": 1.2634,
"step": 595
},
{
"epoch": 2.6607538802660753,
"grad_norm": 0.23553554713726044,
"learning_rate": 0.0005294741386922863,
"loss": 1.2494,
"step": 600
},
{
"epoch": 2.682926829268293,
"grad_norm": 0.22893203794956207,
"learning_rate": 0.000521723543749789,
"loss": 1.2317,
"step": 605
},
{
"epoch": 2.70509977827051,
"grad_norm": 0.21567869186401367,
"learning_rate": 0.0005139677152054135,
"loss": 1.2267,
"step": 610
},
{
"epoch": 2.7272727272727275,
"grad_norm": 0.21539655327796936,
"learning_rate": 0.000506208521581133,
"loss": 1.2314,
"step": 615
},
{
"epoch": 2.7494456762749446,
"grad_norm": 0.263122022151947,
"learning_rate": 0.0004984478322096308,
"loss": 1.2504,
"step": 620
},
{
"epoch": 2.7716186252771617,
"grad_norm": 0.21988996863365173,
"learning_rate": 0.0004906875167839433,
"loss": 1.2424,
"step": 625
},
{
"epoch": 2.7937915742793793,
"grad_norm": 0.22908321022987366,
"learning_rate": 0.00048292944490701606,
"loss": 1.2374,
"step": 630
},
{
"epoch": 2.8159645232815964,
"grad_norm": 0.21693512797355652,
"learning_rate": 0.00047517548564128293,
"loss": 1.2368,
"step": 635
},
{
"epoch": 2.8381374722838135,
"grad_norm": 0.22369663417339325,
"learning_rate": 0.00046742750705837356,
"loss": 1.2227,
"step": 640
},
{
"epoch": 2.860310421286031,
"grad_norm": 0.21551957726478577,
"learning_rate": 0.0004596873757890612,
"loss": 1.2353,
"step": 645
},
{
"epoch": 2.882483370288248,
"grad_norm": 0.21659965813159943,
"learning_rate": 0.00045195695657355636,
"loss": 1.242,
"step": 650
},
{
"epoch": 2.9046563192904657,
"grad_norm": 0.2131386399269104,
"learning_rate": 0.00044423811181225727,
"loss": 1.2372,
"step": 655
},
{
"epoch": 2.926829268292683,
"grad_norm": 0.21243086457252502,
"learning_rate": 0.0004365327011170628,
"loss": 1.2492,
"step": 660
},
{
"epoch": 2.9490022172949004,
"grad_norm": 0.2212466597557068,
"learning_rate": 0.0004288425808633575,
"loss": 1.2548,
"step": 665
},
{
"epoch": 2.9711751662971175,
"grad_norm": 0.21596314013004303,
"learning_rate": 0.0004211696037427772,
"loss": 1.231,
"step": 670
},
{
"epoch": 2.9933481152993346,
"grad_norm": 0.20963598787784576,
"learning_rate": 0.0004135156183168613,
"loss": 1.2416,
"step": 675
},
{
"epoch": 2.9977827050997785,
"eval_loss": 1.824194312095642,
"eval_runtime": 0.3515,
"eval_samples_per_second": 2.845,
"eval_steps_per_second": 2.845,
"step": 676
},
{
"epoch": 3.015521064301552,
"grad_norm": 0.23751798272132874,
"learning_rate": 0.0004058824685716997,
"loss": 1.1799,
"step": 680
},
{
"epoch": 3.0376940133037693,
"grad_norm": 0.22644291818141937,
"learning_rate": 0.0003982719934736832,
"loss": 1.1569,
"step": 685
},
{
"epoch": 3.059866962305987,
"grad_norm": 0.24408124387264252,
"learning_rate": 0.0003906860265264622,
"loss": 1.1543,
"step": 690
},
{
"epoch": 3.082039911308204,
"grad_norm": 0.23074336349964142,
"learning_rate": 0.00038312639532922245,
"loss": 1.1437,
"step": 695
},
{
"epoch": 3.104212860310421,
"grad_norm": 0.2394222617149353,
"learning_rate": 0.00037559492113638205,
"loss": 1.1394,
"step": 700
},
{
"epoch": 3.1263858093126387,
"grad_norm": 0.23385775089263916,
"learning_rate": 0.00036809341841881815,
"loss": 1.1507,
"step": 705
},
{
"epoch": 3.1485587583148558,
"grad_norm": 0.23921357095241547,
"learning_rate": 0.00036062369442672724,
"loss": 1.1554,
"step": 710
},
{
"epoch": 3.1707317073170733,
"grad_norm": 0.2213825136423111,
"learning_rate": 0.00035318754875422585,
"loss": 1.1523,
"step": 715
},
{
"epoch": 3.1929046563192904,
"grad_norm": 0.24610434472560883,
"learning_rate": 0.0003457867729057942,
"loss": 1.1484,
"step": 720
},
{
"epoch": 3.2150776053215075,
"grad_norm": 0.23285223543643951,
"learning_rate": 0.0003384231498646706,
"loss": 1.1599,
"step": 725
},
{
"epoch": 3.237250554323725,
"grad_norm": 0.23643791675567627,
"learning_rate": 0.0003310984536632975,
"loss": 1.1655,
"step": 730
},
{
"epoch": 3.259423503325942,
"grad_norm": 0.23265090584754944,
"learning_rate": 0.0003238144489559248,
"loss": 1.1772,
"step": 735
},
{
"epoch": 3.2815964523281598,
"grad_norm": 0.23318685591220856,
"learning_rate": 0.00031657289059347184,
"loss": 1.1607,
"step": 740
},
{
"epoch": 3.303769401330377,
"grad_norm": 0.23179373145103455,
"learning_rate": 0.00030937552320075114,
"loss": 1.1541,
"step": 745
},
{
"epoch": 3.3259423503325944,
"grad_norm": 0.22854293882846832,
"learning_rate": 0.0003022240807561569,
"loss": 1.1573,
"step": 750
},
{
"epoch": 3.3481152993348116,
"grad_norm": 0.24048267304897308,
"learning_rate": 0.0002951202861739173,
"loss": 1.134,
"step": 755
},
{
"epoch": 3.3702882483370287,
"grad_norm": 0.24298301339149475,
"learning_rate": 0.0002880658508890125,
"loss": 1.1615,
"step": 760
},
{
"epoch": 3.3924611973392462,
"grad_norm": 0.24603115022182465,
"learning_rate": 0.0002810624744448588,
"loss": 1.1458,
"step": 765
},
{
"epoch": 3.4146341463414633,
"grad_norm": 0.2374211847782135,
"learning_rate": 0.000274111844083857,
"loss": 1.1604,
"step": 770
},
{
"epoch": 3.436807095343681,
"grad_norm": 0.23238298296928406,
"learning_rate": 0.0002672156343409053,
"loss": 1.1525,
"step": 775
},
{
"epoch": 3.458980044345898,
"grad_norm": 0.24065588414669037,
"learning_rate": 0.00026037550663997176,
"loss": 1.1626,
"step": 780
},
{
"epoch": 3.481152993348115,
"grad_norm": 0.23232793807983398,
"learning_rate": 0.00025359310889382737,
"loss": 1.1567,
"step": 785
},
{
"epoch": 3.5033259423503327,
"grad_norm": 0.31952565908432007,
"learning_rate": 0.0002468700751070346,
"loss": 1.1441,
"step": 790
},
{
"epoch": 3.52549889135255,
"grad_norm": 0.25355201959609985,
"learning_rate": 0.00024020802498228333,
"loss": 1.1578,
"step": 795
},
{
"epoch": 3.5476718403547673,
"grad_norm": 0.23360273241996765,
"learning_rate": 0.00023360856353017617,
"loss": 1.1624,
"step": 800
},
{
"epoch": 3.5698447893569845,
"grad_norm": 0.24496670067310333,
"learning_rate": 0.00022707328068255166,
"loss": 1.1608,
"step": 805
},
{
"epoch": 3.5920177383592016,
"grad_norm": 0.23215603828430176,
"learning_rate": 0.00022060375090944025,
"loss": 1.1561,
"step": 810
},
{
"epoch": 3.614190687361419,
"grad_norm": 0.24374577403068542,
"learning_rate": 0.00021420153283974535,
"loss": 1.1582,
"step": 815
},
{
"epoch": 3.6363636363636362,
"grad_norm": 0.23796550929546356,
"learning_rate": 0.00020786816888574095,
"loss": 1.1714,
"step": 820
},
{
"epoch": 3.658536585365854,
"grad_norm": 0.33616843819618225,
"learning_rate": 0.00020160518487147579,
"loss": 1.155,
"step": 825
},
{
"epoch": 3.680709534368071,
"grad_norm": 0.23069876432418823,
"learning_rate": 0.00019541408966517566,
"loss": 1.1552,
"step": 830
},
{
"epoch": 3.7028824833702885,
"grad_norm": 0.23669025301933289,
"learning_rate": 0.00018929637481572713,
"loss": 1.1598,
"step": 835
},
{
"epoch": 3.7250554323725056,
"grad_norm": 0.23340646922588348,
"learning_rate": 0.0001832535141933373,
"loss": 1.1478,
"step": 840
},
{
"epoch": 3.7472283813747227,
"grad_norm": 0.24212823808193207,
"learning_rate": 0.00017728696363445117,
"loss": 1.1521,
"step": 845
},
{
"epoch": 3.7694013303769403,
"grad_norm": 0.243617445230484,
"learning_rate": 0.0001713981605910137,
"loss": 1.1534,
"step": 850
},
{
"epoch": 3.7915742793791574,
"grad_norm": 0.23238743841648102,
"learning_rate": 0.0001655885237841611,
"loss": 1.1595,
"step": 855
},
{
"epoch": 3.8137472283813745,
"grad_norm": 0.2364315241575241,
"learning_rate": 0.00015985945286242452,
"loss": 1.1499,
"step": 860
},
{
"epoch": 3.835920177383592,
"grad_norm": 0.23698101937770844,
"learning_rate": 0.00015421232806452916,
"loss": 1.1564,
"step": 865
},
{
"epoch": 3.858093126385809,
"grad_norm": 0.2341204732656479,
"learning_rate": 0.00014864850988687017,
"loss": 1.1575,
"step": 870
},
{
"epoch": 3.8802660753880267,
"grad_norm": 0.2593097984790802,
"learning_rate": 0.0001431693387557424,
"loss": 1.1644,
"step": 875
},
{
"epoch": 3.902439024390244,
"grad_norm": 0.24039191007614136,
"learning_rate": 0.0001377761347044079,
"loss": 1.1549,
"step": 880
},
{
"epoch": 3.9246119733924614,
"grad_norm": 0.23400068283081055,
"learning_rate": 0.00013247019705507596,
"loss": 1.1712,
"step": 885
},
{
"epoch": 3.9467849223946785,
"grad_norm": 0.23080703616142273,
"learning_rate": 0.00012725280410587166,
"loss": 1.1687,
"step": 890
},
{
"epoch": 3.9689578713968956,
"grad_norm": 0.23793019354343414,
"learning_rate": 0.00012212521282287093,
"loss": 1.1562,
"step": 895
},
{
"epoch": 3.991130820399113,
"grad_norm": 0.254978746175766,
"learning_rate": 0.00011708865853727369,
"loss": 1.1447,
"step": 900
},
{
"epoch": 4.0,
"eval_loss": 1.9029209613800049,
"eval_runtime": 0.3379,
"eval_samples_per_second": 2.959,
"eval_steps_per_second": 2.959,
"step": 902
},
{
"epoch": 4.013303769401331,
"grad_norm": 0.22599312663078308,
"learning_rate": 0.00011214435464779005,
"loss": 1.1088,
"step": 905
},
{
"epoch": 4.035476718403547,
"grad_norm": 0.26651766896247864,
"learning_rate": 0.00010729349232831092,
"loss": 1.0774,
"step": 910
},
{
"epoch": 4.057649667405765,
"grad_norm": 0.2449249029159546,
"learning_rate": 0.00010253724024093103,
"loss": 1.0788,
"step": 915
},
{
"epoch": 4.0798226164079825,
"grad_norm": 0.24662241339683533,
"learning_rate": 9.787674425439719e-05,
"loss": 1.0857,
"step": 920
},
{
"epoch": 4.101995565410199,
"grad_norm": 0.24599789083003998,
"learning_rate": 9.331312716804791e-05,
"loss": 1.0937,
"step": 925
},
{
"epoch": 4.124168514412417,
"grad_norm": 0.25388607382774353,
"learning_rate": 8.884748844130986e-05,
"loss": 1.0834,
"step": 930
},
{
"epoch": 4.146341463414634,
"grad_norm": 0.24923333525657654,
"learning_rate": 8.448090392881796e-05,
"loss": 1.0861,
"step": 935
},
{
"epoch": 4.168514412416852,
"grad_norm": 0.24948322772979736,
"learning_rate": 8.021442562122194e-05,
"loss": 1.0852,
"step": 940
},
{
"epoch": 4.1906873614190685,
"grad_norm": 0.25804150104522705,
"learning_rate": 7.604908139174255e-05,
"loss": 1.0998,
"step": 945
},
{
"epoch": 4.212860310421286,
"grad_norm": 0.2608419954776764,
"learning_rate": 7.198587474853863e-05,
"loss": 1.081,
"step": 950
},
{
"epoch": 4.235033259423504,
"grad_norm": 0.2649877965450287,
"learning_rate": 6.802578459294235e-05,
"loss": 1.0946,
"step": 955
},
{
"epoch": 4.25720620842572,
"grad_norm": 0.25245440006256104,
"learning_rate": 6.416976498362431e-05,
"loss": 1.0893,
"step": 960
},
{
"epoch": 4.279379157427938,
"grad_norm": 0.2566092908382416,
"learning_rate": 6.041874490674415e-05,
"loss": 1.0925,
"step": 965
},
{
"epoch": 4.301552106430155,
"grad_norm": 0.25820818543434143,
"learning_rate": 5.6773628052139036e-05,
"loss": 1.0999,
"step": 970
},
{
"epoch": 4.323725055432373,
"grad_norm": 0.24905657768249512,
"learning_rate": 5.3235292595609106e-05,
"loss": 1.0727,
"step": 975
},
{
"epoch": 4.34589800443459,
"grad_norm": 0.2517815828323364,
"learning_rate": 4.9804590987348854e-05,
"loss": 1.0929,
"step": 980
},
{
"epoch": 4.368070953436807,
"grad_norm": 0.25075092911720276,
"learning_rate": 4.648234974657578e-05,
"loss": 1.1038,
"step": 985
},
{
"epoch": 4.390243902439025,
"grad_norm": 0.25245392322540283,
"learning_rate": 4.326936926240682e-05,
"loss": 1.0721,
"step": 990
},
{
"epoch": 4.412416851441241,
"grad_norm": 0.2557605504989624,
"learning_rate": 4.0166423601029736e-05,
"loss": 1.0926,
"step": 995
},
{
"epoch": 4.434589800443459,
"grad_norm": 0.24609370529651642,
"learning_rate": 3.717426031921639e-05,
"loss": 1.0866,
"step": 1000
},
{
"epoch": 4.4567627494456765,
"grad_norm": 0.2574557363986969,
"learning_rate": 3.429360028422307e-05,
"loss": 1.0874,
"step": 1005
},
{
"epoch": 4.478935698447893,
"grad_norm": 0.256829172372818,
"learning_rate": 3.152513750011921e-05,
"loss": 1.092,
"step": 1010
},
{
"epoch": 4.501108647450111,
"grad_norm": 0.25330841541290283,
"learning_rate": 2.8869538940589802e-05,
"loss": 1.0949,
"step": 1015
},
{
"epoch": 4.523281596452328,
"grad_norm": 0.25692063570022583,
"learning_rate": 2.6327444388249076e-05,
"loss": 1.0946,
"step": 1020
},
{
"epoch": 4.545454545454545,
"grad_norm": 0.25190240144729614,
"learning_rate": 2.3899466280504933e-05,
"loss": 1.0901,
"step": 1025
},
{
"epoch": 4.5676274944567625,
"grad_norm": 0.25231993198394775,
"learning_rate": 2.158618956201158e-05,
"loss": 1.095,
"step": 1030
},
{
"epoch": 4.58980044345898,
"grad_norm": 0.25305166840553284,
"learning_rate": 1.9388171543745393e-05,
"loss": 1.0747,
"step": 1035
},
{
"epoch": 4.611973392461198,
"grad_norm": 0.2522623538970947,
"learning_rate": 1.730594176873851e-05,
"loss": 1.0927,
"step": 1040
},
{
"epoch": 4.634146341463414,
"grad_norm": 0.2547568678855896,
"learning_rate": 1.5340001884502576e-05,
"loss": 1.1009,
"step": 1045
},
{
"epoch": 4.656319290465632,
"grad_norm": 0.25178787112236023,
"learning_rate": 1.3490825522172012e-05,
"loss": 1.0879,
"step": 1050
},
{
"epoch": 4.678492239467849,
"grad_norm": 0.2568508982658386,
"learning_rate": 1.1758858182397692e-05,
"loss": 1.0887,
"step": 1055
},
{
"epoch": 4.700665188470067,
"grad_norm": 0.2606378197669983,
"learning_rate": 1.014451712801806e-05,
"loss": 1.0881,
"step": 1060
},
{
"epoch": 4.722838137472284,
"grad_norm": 0.24862989783287048,
"learning_rate": 8.648191283532336e-06,
"loss": 1.0887,
"step": 1065
},
{
"epoch": 4.745011086474501,
"grad_norm": 0.24630777537822723,
"learning_rate": 7.270241141401568e-06,
"loss": 1.0745,
"step": 1070
},
{
"epoch": 4.767184035476719,
"grad_norm": 0.26346486806869507,
"learning_rate": 6.010998675199553e-06,
"loss": 1.1047,
"step": 1075
},
{
"epoch": 4.789356984478935,
"grad_norm": 0.25308722257614136,
"learning_rate": 4.870767259633868e-06,
"loss": 1.0981,
"step": 1080
},
{
"epoch": 4.811529933481153,
"grad_norm": 0.2649281322956085,
"learning_rate": 3.849821597457892e-06,
"loss": 1.0827,
"step": 1085
},
{
"epoch": 4.8337028824833705,
"grad_norm": 0.2485639750957489,
"learning_rate": 2.948407653289409e-06,
"loss": 1.0849,
"step": 1090
},
{
"epoch": 4.855875831485587,
"grad_norm": 0.25217127799987793,
"learning_rate": 2.166742594353288e-06,
"loss": 1.0861,
"step": 1095
},
{
"epoch": 4.878048780487805,
"grad_norm": 0.2509874403476715,
"learning_rate": 1.5050147381619473e-06,
"loss": 1.1002,
"step": 1100
},
{
"epoch": 4.900221729490022,
"grad_norm": 0.2537370026111603,
"learning_rate": 9.633835071463092e-07,
"loss": 1.0815,
"step": 1105
},
{
"epoch": 4.922394678492239,
"grad_norm": 0.25824105739593506,
"learning_rate": 5.419793902477488e-07,
"loss": 1.085,
"step": 1110
},
{
"epoch": 4.9445676274944566,
"grad_norm": 0.2534651756286621,
"learning_rate": 2.4090391148112736e-07,
"loss": 1.0952,
"step": 1115
},
{
"epoch": 4.966740576496674,
"grad_norm": 0.25321847200393677,
"learning_rate": 6.022960547563683e-08,
"loss": 1.0899,
"step": 1120
},
{
"epoch": 4.988913525498892,
"grad_norm": 0.2509351968765259,
"learning_rate": 0.0,
"loss": 1.098,
"step": 1125
},
{
"epoch": 4.988913525498892,
"eval_loss": 1.9855170249938965,
"eval_runtime": 0.3384,
"eval_samples_per_second": 2.955,
"eval_steps_per_second": 2.955,
"step": 1125
},
{
"epoch": 4.988913525498892,
"step": 1125,
"total_flos": 1.6629843858229821e+18,
"train_loss": 1.2764637470245361,
"train_runtime": 3608.211,
"train_samples_per_second": 9.984,
"train_steps_per_second": 0.312
}
],
"logging_steps": 5,
"max_steps": 1125,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.6629843858229821e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}