|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.09291521486643438, |
|
"eval_steps": 34, |
|
"global_step": 190, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0004890274466654441, |
|
"grad_norm": 0.42223185300827026, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 1.2369, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0004890274466654441, |
|
"eval_loss": 1.3036277294158936, |
|
"eval_runtime": 1355.2246, |
|
"eval_samples_per_second": 1.906, |
|
"eval_steps_per_second": 0.635, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0009780548933308881, |
|
"grad_norm": 0.49382174015045166, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 1.3544, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0014670823399963323, |
|
"grad_norm": 0.43727555871009827, |
|
"learning_rate": 2e-05, |
|
"loss": 1.403, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0019561097866617762, |
|
"grad_norm": 0.3653078079223633, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 1.189, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0024451372333272204, |
|
"grad_norm": 0.3689245283603668, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 1.1686, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0029341646799926646, |
|
"grad_norm": 0.44767728447914124, |
|
"learning_rate": 4e-05, |
|
"loss": 1.2578, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0034231921266581087, |
|
"grad_norm": 0.4529785215854645, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 1.2452, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0039122195733235525, |
|
"grad_norm": 0.6146576404571533, |
|
"learning_rate": 5.333333333333333e-05, |
|
"loss": 1.5423, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.004401247019988997, |
|
"grad_norm": 0.42456191778182983, |
|
"learning_rate": 6e-05, |
|
"loss": 1.1323, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.004890274466654441, |
|
"grad_norm": 0.4658707082271576, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 1.2595, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.005379301913319885, |
|
"grad_norm": 0.6462928652763367, |
|
"learning_rate": 7.333333333333333e-05, |
|
"loss": 1.3136, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.005868329359985329, |
|
"grad_norm": 0.514143168926239, |
|
"learning_rate": 8e-05, |
|
"loss": 1.1999, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.006357356806650773, |
|
"grad_norm": 0.43792784214019775, |
|
"learning_rate": 8.666666666666667e-05, |
|
"loss": 1.0675, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0068463842533162175, |
|
"grad_norm": 0.6212848424911499, |
|
"learning_rate": 9.333333333333334e-05, |
|
"loss": 1.3066, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.007335411699981661, |
|
"grad_norm": 0.6186118125915527, |
|
"learning_rate": 0.0001, |
|
"loss": 1.2355, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.007824439146647105, |
|
"grad_norm": 0.6825079917907715, |
|
"learning_rate": 0.00010666666666666667, |
|
"loss": 1.2258, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.00831346659331255, |
|
"grad_norm": 0.3936300575733185, |
|
"learning_rate": 0.00011333333333333334, |
|
"loss": 1.1337, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.008802494039977994, |
|
"grad_norm": 0.5038918852806091, |
|
"learning_rate": 0.00012, |
|
"loss": 1.1278, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.009291521486643438, |
|
"grad_norm": 0.5894134640693665, |
|
"learning_rate": 0.00012666666666666666, |
|
"loss": 1.1902, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.009780548933308882, |
|
"grad_norm": 1.1452070474624634, |
|
"learning_rate": 0.00013333333333333334, |
|
"loss": 1.1023, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.010269576379974325, |
|
"grad_norm": 0.4289817214012146, |
|
"learning_rate": 0.00014, |
|
"loss": 1.0195, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.01075860382663977, |
|
"grad_norm": 1.290226936340332, |
|
"learning_rate": 0.00014666666666666666, |
|
"loss": 0.9534, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.011247631273305215, |
|
"grad_norm": 0.5022335648536682, |
|
"learning_rate": 0.00015333333333333334, |
|
"loss": 0.8952, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.011736658719970658, |
|
"grad_norm": 0.5421992540359497, |
|
"learning_rate": 0.00016, |
|
"loss": 0.8553, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.012225686166636102, |
|
"grad_norm": 1.568458080291748, |
|
"learning_rate": 0.0001666666666666667, |
|
"loss": 0.9764, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.012714713613301546, |
|
"grad_norm": 0.6190416812896729, |
|
"learning_rate": 0.00017333333333333334, |
|
"loss": 0.9819, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.013203741059966991, |
|
"grad_norm": 0.4747830927371979, |
|
"learning_rate": 0.00018, |
|
"loss": 0.9395, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.013692768506632435, |
|
"grad_norm": 0.9486120343208313, |
|
"learning_rate": 0.0001866666666666667, |
|
"loss": 0.9669, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.014181795953297879, |
|
"grad_norm": 0.4615766108036041, |
|
"learning_rate": 0.00019333333333333333, |
|
"loss": 0.9341, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.014670823399963322, |
|
"grad_norm": 0.46146905422210693, |
|
"learning_rate": 0.0002, |
|
"loss": 1.0082, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.015159850846628768, |
|
"grad_norm": 0.47625860571861267, |
|
"learning_rate": 0.00019998292504580528, |
|
"loss": 0.9419, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.01564887829329421, |
|
"grad_norm": 0.5290003418922424, |
|
"learning_rate": 0.0001999317060143023, |
|
"loss": 1.1635, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.016137905739959654, |
|
"grad_norm": 0.5592769980430603, |
|
"learning_rate": 0.0001998463603967434, |
|
"loss": 0.9437, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0166269331866251, |
|
"grad_norm": 0.3583241403102875, |
|
"learning_rate": 0.00019972691733857883, |
|
"loss": 1.1428, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.0166269331866251, |
|
"eval_loss": 1.0231887102127075, |
|
"eval_runtime": 1359.3297, |
|
"eval_samples_per_second": 1.9, |
|
"eval_steps_per_second": 0.633, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.017115960633290545, |
|
"grad_norm": 0.41851457953453064, |
|
"learning_rate": 0.00019957341762950344, |
|
"loss": 1.0976, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.017604988079955988, |
|
"grad_norm": 0.46739837527275085, |
|
"learning_rate": 0.0001993859136895274, |
|
"loss": 1.0089, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.018094015526621432, |
|
"grad_norm": 0.3970998227596283, |
|
"learning_rate": 0.00019916446955107428, |
|
"loss": 1.1005, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.018583042973286876, |
|
"grad_norm": 0.5996494293212891, |
|
"learning_rate": 0.0001989091608371146, |
|
"loss": 0.9741, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.01907207041995232, |
|
"grad_norm": 0.27929016947746277, |
|
"learning_rate": 0.00019862007473534025, |
|
"loss": 0.9607, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.019561097866617763, |
|
"grad_norm": 0.33169642090797424, |
|
"learning_rate": 0.0001982973099683902, |
|
"loss": 1.1173, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.020050125313283207, |
|
"grad_norm": 0.3042043149471283, |
|
"learning_rate": 0.0001979409767601366, |
|
"loss": 1.1178, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.02053915275994865, |
|
"grad_norm": 0.3394615650177002, |
|
"learning_rate": 0.00019755119679804367, |
|
"loss": 1.0167, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.021028180206614098, |
|
"grad_norm": 0.3241017758846283, |
|
"learning_rate": 0.0001971281031916114, |
|
"loss": 1.0089, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.02151720765327954, |
|
"grad_norm": 0.6870384812355042, |
|
"learning_rate": 0.00019667184042691875, |
|
"loss": 1.0362, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.022006235099944985, |
|
"grad_norm": 0.31295979022979736, |
|
"learning_rate": 0.00019618256431728194, |
|
"loss": 1.0032, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.02249526254661043, |
|
"grad_norm": 0.32893508672714233, |
|
"learning_rate": 0.0001956604419500441, |
|
"loss": 1.0651, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.022984289993275873, |
|
"grad_norm": 0.28745728731155396, |
|
"learning_rate": 0.00019510565162951537, |
|
"loss": 0.9031, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.023473317439941317, |
|
"grad_norm": 0.558315098285675, |
|
"learning_rate": 0.00019451838281608197, |
|
"loss": 1.0545, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.02396234488660676, |
|
"grad_norm": 0.5743781924247742, |
|
"learning_rate": 0.00019389883606150566, |
|
"loss": 1.0951, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.024451372333272204, |
|
"grad_norm": 0.41075870394706726, |
|
"learning_rate": 0.00019324722294043558, |
|
"loss": 1.0124, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.024940399779937648, |
|
"grad_norm": 0.2487930953502655, |
|
"learning_rate": 0.00019256376597815564, |
|
"loss": 0.9894, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.02542942722660309, |
|
"grad_norm": 0.39657795429229736, |
|
"learning_rate": 0.00019184869857459232, |
|
"loss": 1.138, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.02591845467326854, |
|
"grad_norm": 0.5362446308135986, |
|
"learning_rate": 0.00019110226492460885, |
|
"loss": 1.0342, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.026407482119933982, |
|
"grad_norm": 0.5089200139045715, |
|
"learning_rate": 0.0001903247199346129, |
|
"loss": 1.1193, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.026896509566599426, |
|
"grad_norm": 0.28259485960006714, |
|
"learning_rate": 0.00018951632913550626, |
|
"loss": 0.9054, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.02738553701326487, |
|
"grad_norm": 0.3576945662498474, |
|
"learning_rate": 0.0001886773685920062, |
|
"loss": 1.0002, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.027874564459930314, |
|
"grad_norm": 0.2915222942829132, |
|
"learning_rate": 0.0001878081248083698, |
|
"loss": 0.9491, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.028363591906595757, |
|
"grad_norm": 0.296265572309494, |
|
"learning_rate": 0.00018690889463055283, |
|
"loss": 1.0483, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.0288526193532612, |
|
"grad_norm": 0.3489420413970947, |
|
"learning_rate": 0.00018597998514483725, |
|
"loss": 0.9064, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.029341646799926645, |
|
"grad_norm": 0.37772810459136963, |
|
"learning_rate": 0.00018502171357296144, |
|
"loss": 0.9779, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.02983067424659209, |
|
"grad_norm": 0.5371522903442383, |
|
"learning_rate": 0.00018403440716378928, |
|
"loss": 1.0268, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.030319701693257536, |
|
"grad_norm": 0.3227771520614624, |
|
"learning_rate": 0.00018301840308155507, |
|
"loss": 1.0642, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.03080872913992298, |
|
"grad_norm": 0.4422720670700073, |
|
"learning_rate": 0.00018197404829072215, |
|
"loss": 1.1083, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.03129775658658842, |
|
"grad_norm": 0.5397651791572571, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 1.2271, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.03178678403325386, |
|
"grad_norm": 0.4943908452987671, |
|
"learning_rate": 0.000179801722728024, |
|
"loss": 0.9873, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.03227581147991931, |
|
"grad_norm": 0.31046292185783386, |
|
"learning_rate": 0.00017867449380334834, |
|
"loss": 0.9235, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.03276483892658476, |
|
"grad_norm": 0.4786432981491089, |
|
"learning_rate": 0.00017752039761111297, |
|
"loss": 1.0804, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.0332538663732502, |
|
"grad_norm": 0.38078072667121887, |
|
"learning_rate": 0.00017633982827411032, |
|
"loss": 1.0508, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.0332538663732502, |
|
"eval_loss": 1.0058449506759644, |
|
"eval_runtime": 1360.1032, |
|
"eval_samples_per_second": 1.899, |
|
"eval_steps_per_second": 0.633, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.033742893819915645, |
|
"grad_norm": 0.302775114774704, |
|
"learning_rate": 0.00017513318895568737, |
|
"loss": 1.0186, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.03423192126658109, |
|
"grad_norm": 0.7890788316726685, |
|
"learning_rate": 0.00017390089172206592, |
|
"loss": 0.9441, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.03472094871324653, |
|
"grad_norm": 0.3507513999938965, |
|
"learning_rate": 0.00017264335740162242, |
|
"loss": 0.9604, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.035209976159911976, |
|
"grad_norm": 0.39182308316230774, |
|
"learning_rate": 0.00017136101544117525, |
|
"loss": 1.0598, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.03569900360657742, |
|
"grad_norm": 0.3877043128013611, |
|
"learning_rate": 0.0001700543037593291, |
|
"loss": 0.8719, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.036188031053242864, |
|
"grad_norm": 0.6628935933113098, |
|
"learning_rate": 0.00016872366859692627, |
|
"loss": 0.9241, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.03667705849990831, |
|
"grad_norm": 0.45674100518226624, |
|
"learning_rate": 0.00016736956436465573, |
|
"loss": 1.2833, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.03716608594657375, |
|
"grad_norm": 0.5053292512893677, |
|
"learning_rate": 0.0001659924534878723, |
|
"loss": 0.9678, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.037655113393239195, |
|
"grad_norm": 0.31924471259117126, |
|
"learning_rate": 0.00016459280624867874, |
|
"loss": 0.9702, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.03814414083990464, |
|
"grad_norm": 0.37664365768432617, |
|
"learning_rate": 0.0001631711006253251, |
|
"loss": 0.8382, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.03863316828657008, |
|
"grad_norm": 0.3583241403102875, |
|
"learning_rate": 0.0001617278221289793, |
|
"loss": 1.0149, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.039122195733235526, |
|
"grad_norm": 0.3891322910785675, |
|
"learning_rate": 0.00016026346363792567, |
|
"loss": 1.2247, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.03961122317990097, |
|
"grad_norm": 0.32489892840385437, |
|
"learning_rate": 0.00015877852522924732, |
|
"loss": 0.9108, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.040100250626566414, |
|
"grad_norm": 0.3395017981529236, |
|
"learning_rate": 0.00015727351400805052, |
|
"loss": 0.9968, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.04058927807323186, |
|
"grad_norm": 0.34815898537635803, |
|
"learning_rate": 0.00015574894393428855, |
|
"loss": 0.967, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.0410783055198973, |
|
"grad_norm": 0.4931004047393799, |
|
"learning_rate": 0.00015420533564724495, |
|
"loss": 1.0232, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.041567332966562745, |
|
"grad_norm": 0.45658788084983826, |
|
"learning_rate": 0.0001526432162877356, |
|
"loss": 1.0845, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.042056360413228196, |
|
"grad_norm": 0.2645609378814697, |
|
"learning_rate": 0.0001510631193180907, |
|
"loss": 0.8387, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.04254538785989364, |
|
"grad_norm": 0.2979009747505188, |
|
"learning_rate": 0.0001494655843399779, |
|
"loss": 1.0789, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.04303441530655908, |
|
"grad_norm": 0.3363690674304962, |
|
"learning_rate": 0.00014785115691012864, |
|
"loss": 0.9624, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.04352344275322453, |
|
"grad_norm": 0.700744092464447, |
|
"learning_rate": 0.00014622038835403133, |
|
"loss": 1.0248, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.04401247019988997, |
|
"grad_norm": 0.4093436300754547, |
|
"learning_rate": 0.00014457383557765386, |
|
"loss": 0.993, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.044501497646555414, |
|
"grad_norm": 0.39897996187210083, |
|
"learning_rate": 0.0001429120608772609, |
|
"loss": 1.0264, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.04499052509322086, |
|
"grad_norm": 0.2525433897972107, |
|
"learning_rate": 0.00014123563174739037, |
|
"loss": 1.0285, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.0454795525398863, |
|
"grad_norm": 0.2693867087364197, |
|
"learning_rate": 0.00013954512068705424, |
|
"loss": 0.9909, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.045968579986551746, |
|
"grad_norm": 0.4188447594642639, |
|
"learning_rate": 0.00013784110500423104, |
|
"loss": 0.9365, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.04645760743321719, |
|
"grad_norm": 0.3923894762992859, |
|
"learning_rate": 0.00013612416661871533, |
|
"loss": 1.187, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.04694663487988263, |
|
"grad_norm": 0.4264492392539978, |
|
"learning_rate": 0.00013439489186339282, |
|
"loss": 1.1103, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.04743566232654808, |
|
"grad_norm": 0.40396761894226074, |
|
"learning_rate": 0.0001326538712840083, |
|
"loss": 1.0, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.04792468977321352, |
|
"grad_norm": 0.30181634426116943, |
|
"learning_rate": 0.00013090169943749476, |
|
"loss": 0.9821, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.048413717219878964, |
|
"grad_norm": 0.37075236439704895, |
|
"learning_rate": 0.00012913897468893248, |
|
"loss": 1.0607, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.04890274466654441, |
|
"grad_norm": 0.39950835704803467, |
|
"learning_rate": 0.0001273662990072083, |
|
"loss": 1.011, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.04939177211320985, |
|
"grad_norm": 0.3369726240634918, |
|
"learning_rate": 0.00012558427775944357, |
|
"loss": 0.9987, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.049880799559875295, |
|
"grad_norm": 0.33174750208854675, |
|
"learning_rate": 0.00012379351950426187, |
|
"loss": 0.9603, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.049880799559875295, |
|
"eval_loss": 0.9992830753326416, |
|
"eval_runtime": 1359.1213, |
|
"eval_samples_per_second": 1.9, |
|
"eval_steps_per_second": 0.633, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.05036982700654074, |
|
"grad_norm": 0.4101943373680115, |
|
"learning_rate": 0.00012199463578396688, |
|
"loss": 0.931, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.05085885445320618, |
|
"grad_norm": 0.39311182498931885, |
|
"learning_rate": 0.00012018824091570103, |
|
"loss": 1.1224, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.051347881899871634, |
|
"grad_norm": 0.3011833429336548, |
|
"learning_rate": 0.00011837495178165706, |
|
"loss": 1.0653, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.05183690934653708, |
|
"grad_norm": 0.28122061491012573, |
|
"learning_rate": 0.000116555387618413, |
|
"loss": 1.0055, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.05232593679320252, |
|
"grad_norm": 0.5028234720230103, |
|
"learning_rate": 0.00011473016980546377, |
|
"loss": 0.9866, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.052814964239867965, |
|
"grad_norm": 0.3072775602340698, |
|
"learning_rate": 0.00011289992165302035, |
|
"loss": 1.1557, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.05330399168653341, |
|
"grad_norm": 0.2929151654243469, |
|
"learning_rate": 0.00011106526818915008, |
|
"loss": 1.04, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.05379301913319885, |
|
"grad_norm": 0.35853561758995056, |
|
"learning_rate": 0.00010922683594633021, |
|
"loss": 1.0056, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.054282046579864296, |
|
"grad_norm": 0.2755361795425415, |
|
"learning_rate": 0.00010738525274748741, |
|
"loss": 0.9208, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.05477107402652974, |
|
"grad_norm": 0.4574558734893799, |
|
"learning_rate": 0.000105541147491597, |
|
"loss": 0.8838, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.05526010147319518, |
|
"grad_norm": 0.4807765781879425, |
|
"learning_rate": 0.00010369514993891452, |
|
"loss": 0.8996, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.05574912891986063, |
|
"grad_norm": 0.3702511489391327, |
|
"learning_rate": 0.00010184789049591299, |
|
"loss": 0.9956, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.05623815636652607, |
|
"grad_norm": 0.6011176705360413, |
|
"learning_rate": 0.0001, |
|
"loss": 1.077, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.056727183813191515, |
|
"grad_norm": 0.2759286165237427, |
|
"learning_rate": 9.815210950408704e-05, |
|
"loss": 1.0208, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.05721621125985696, |
|
"grad_norm": 0.35885995626449585, |
|
"learning_rate": 9.630485006108553e-05, |
|
"loss": 0.8758, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.0577052387065224, |
|
"grad_norm": 0.3405105471611023, |
|
"learning_rate": 9.4458852508403e-05, |
|
"loss": 1.012, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.058194266153187846, |
|
"grad_norm": 0.3069765865802765, |
|
"learning_rate": 9.261474725251261e-05, |
|
"loss": 1.0491, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.05868329359985329, |
|
"grad_norm": 0.4522247016429901, |
|
"learning_rate": 9.077316405366981e-05, |
|
"loss": 0.9517, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.05917232104651873, |
|
"grad_norm": 0.5270655751228333, |
|
"learning_rate": 8.893473181084994e-05, |
|
"loss": 1.0467, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.05966134849318418, |
|
"grad_norm": 0.46659836173057556, |
|
"learning_rate": 8.710007834697969e-05, |
|
"loss": 0.924, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.06015037593984962, |
|
"grad_norm": 0.3675633668899536, |
|
"learning_rate": 8.526983019453623e-05, |
|
"loss": 1.0624, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.06063940338651507, |
|
"grad_norm": 0.38594844937324524, |
|
"learning_rate": 8.344461238158699e-05, |
|
"loss": 0.9479, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.061128430833180515, |
|
"grad_norm": 0.34377649426460266, |
|
"learning_rate": 8.162504821834295e-05, |
|
"loss": 1.0532, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.06161745827984596, |
|
"grad_norm": 0.39318689703941345, |
|
"learning_rate": 7.9811759084299e-05, |
|
"loss": 1.06, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.0621064857265114, |
|
"grad_norm": 0.386799693107605, |
|
"learning_rate": 7.800536421603317e-05, |
|
"loss": 1.0903, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.06259551317317684, |
|
"grad_norm": 0.4709027409553528, |
|
"learning_rate": 7.620648049573815e-05, |
|
"loss": 0.9893, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.06308454061984228, |
|
"grad_norm": 0.38078197836875916, |
|
"learning_rate": 7.441572224055644e-05, |
|
"loss": 1.0638, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.06357356806650773, |
|
"grad_norm": 0.2914995849132538, |
|
"learning_rate": 7.263370099279172e-05, |
|
"loss": 1.0144, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.06406259551317317, |
|
"grad_norm": 0.48772111535072327, |
|
"learning_rate": 7.086102531106754e-05, |
|
"loss": 1.055, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.06455162295983861, |
|
"grad_norm": 0.37441909313201904, |
|
"learning_rate": 6.909830056250527e-05, |
|
"loss": 0.948, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.06504065040650407, |
|
"grad_norm": 0.4598063826560974, |
|
"learning_rate": 6.734612871599168e-05, |
|
"loss": 1.0565, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.06552967785316952, |
|
"grad_norm": 0.34052199125289917, |
|
"learning_rate": 6.560510813660719e-05, |
|
"loss": 0.9724, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.06601870529983496, |
|
"grad_norm": 0.4426654279232025, |
|
"learning_rate": 6.387583338128471e-05, |
|
"loss": 1.1751, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.0665077327465004, |
|
"grad_norm": 0.3435130715370178, |
|
"learning_rate": 6.215889499576898e-05, |
|
"loss": 1.0164, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.0665077327465004, |
|
"eval_loss": 0.9951372146606445, |
|
"eval_runtime": 1359.5911, |
|
"eval_samples_per_second": 1.9, |
|
"eval_steps_per_second": 0.633, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.06699676019316585, |
|
"grad_norm": 0.2642524540424347, |
|
"learning_rate": 6.0454879312945754e-05, |
|
"loss": 0.9332, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.06748578763983129, |
|
"grad_norm": 0.30863985419273376, |
|
"learning_rate": 5.876436825260967e-05, |
|
"loss": 1.0283, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.06797481508649673, |
|
"grad_norm": 0.5168886780738831, |
|
"learning_rate": 5.708793912273911e-05, |
|
"loss": 0.6723, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.06846384253316218, |
|
"grad_norm": 0.4818655848503113, |
|
"learning_rate": 5.542616442234618e-05, |
|
"loss": 1.1226, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.06895286997982762, |
|
"grad_norm": 0.3270304501056671, |
|
"learning_rate": 5.37796116459687e-05, |
|
"loss": 0.818, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.06944189742649307, |
|
"grad_norm": 0.44965583086013794, |
|
"learning_rate": 5.214884308987136e-05, |
|
"loss": 0.865, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.06993092487315851, |
|
"grad_norm": 0.25816816091537476, |
|
"learning_rate": 5.0534415660022136e-05, |
|
"loss": 0.9777, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.07041995231982395, |
|
"grad_norm": 0.293589323759079, |
|
"learning_rate": 4.893688068190932e-05, |
|
"loss": 1.0442, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.0709089797664894, |
|
"grad_norm": 0.4365968704223633, |
|
"learning_rate": 4.735678371226441e-05, |
|
"loss": 0.8951, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.07139800721315484, |
|
"grad_norm": 0.4603347182273865, |
|
"learning_rate": 4.5794664352755055e-05, |
|
"loss": 1.0377, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.07188703465982028, |
|
"grad_norm": 0.28372839093208313, |
|
"learning_rate": 4.425105606571145e-05, |
|
"loss": 1.008, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.07237606210648573, |
|
"grad_norm": 0.29044821858406067, |
|
"learning_rate": 4.272648599194948e-05, |
|
"loss": 1.0118, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.07286508955315117, |
|
"grad_norm": 0.3254542350769043, |
|
"learning_rate": 4.12214747707527e-05, |
|
"loss": 1.0088, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.07335411699981662, |
|
"grad_norm": 0.3154352605342865, |
|
"learning_rate": 3.973653636207437e-05, |
|
"loss": 1.0273, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.07384314444648206, |
|
"grad_norm": 0.3300507664680481, |
|
"learning_rate": 3.8272177871020723e-05, |
|
"loss": 1.0187, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.0743321718931475, |
|
"grad_norm": 0.4003622531890869, |
|
"learning_rate": 3.682889937467493e-05, |
|
"loss": 0.9066, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.07482119933981295, |
|
"grad_norm": 0.3900095522403717, |
|
"learning_rate": 3.540719375132129e-05, |
|
"loss": 0.866, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.07531022678647839, |
|
"grad_norm": 0.54018634557724, |
|
"learning_rate": 3.400754651212776e-05, |
|
"loss": 0.981, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.07579925423314383, |
|
"grad_norm": 0.3802890479564667, |
|
"learning_rate": 3.263043563534428e-05, |
|
"loss": 1.0331, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.07628828167980928, |
|
"grad_norm": 0.40078866481781006, |
|
"learning_rate": 3.1276331403073735e-05, |
|
"loss": 0.9615, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.07677730912647472, |
|
"grad_norm": 0.6274716258049011, |
|
"learning_rate": 2.9945696240670906e-05, |
|
"loss": 1.0867, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.07726633657314017, |
|
"grad_norm": 0.3220311403274536, |
|
"learning_rate": 2.8638984558824777e-05, |
|
"loss": 0.9813, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.07775536401980561, |
|
"grad_norm": 0.3146653473377228, |
|
"learning_rate": 2.7356642598377603e-05, |
|
"loss": 0.9991, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.07824439146647105, |
|
"grad_norm": 0.22245855629444122, |
|
"learning_rate": 2.6099108277934103e-05, |
|
"loss": 0.9811, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.0787334189131365, |
|
"grad_norm": 0.36206722259521484, |
|
"learning_rate": 2.4866811044312665e-05, |
|
"loss": 1.1474, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.07922244635980194, |
|
"grad_norm": 0.38807740807533264, |
|
"learning_rate": 2.36601717258897e-05, |
|
"loss": 0.9162, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.07971147380646738, |
|
"grad_norm": 0.36179038882255554, |
|
"learning_rate": 2.2479602388887012e-05, |
|
"loss": 0.8902, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.08020050125313283, |
|
"grad_norm": 0.40321770310401917, |
|
"learning_rate": 2.132550619665168e-05, |
|
"loss": 0.8454, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.08068952869979827, |
|
"grad_norm": 0.3098496198654175, |
|
"learning_rate": 2.0198277271976052e-05, |
|
"loss": 1.0062, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.08117855614646372, |
|
"grad_norm": 0.45435985922813416, |
|
"learning_rate": 1.9098300562505266e-05, |
|
"loss": 0.8418, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.08166758359312916, |
|
"grad_norm": 0.37857669591903687, |
|
"learning_rate": 1.8025951709277898e-05, |
|
"loss": 1.0043, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.0821566110397946, |
|
"grad_norm": 0.3529188930988312, |
|
"learning_rate": 1.6981596918444953e-05, |
|
"loss": 1.1446, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.08264563848646005, |
|
"grad_norm": 0.40138813853263855, |
|
"learning_rate": 1.5965592836210743e-05, |
|
"loss": 1.0077, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.08313466593312549, |
|
"grad_norm": 0.3948013186454773, |
|
"learning_rate": 1.4978286427038601e-05, |
|
"loss": 0.843, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.08313466593312549, |
|
"eval_loss": 0.9932751059532166, |
|
"eval_runtime": 1360.6949, |
|
"eval_samples_per_second": 1.898, |
|
"eval_steps_per_second": 0.633, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.08362369337979095, |
|
"grad_norm": 0.4719301164150238, |
|
"learning_rate": 1.4020014855162755e-05, |
|
"loss": 1.0868, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.08411272082645639, |
|
"grad_norm": 0.3227783143520355, |
|
"learning_rate": 1.3091105369447165e-05, |
|
"loss": 0.9113, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.08460174827312184, |
|
"grad_norm": 0.37850451469421387, |
|
"learning_rate": 1.2191875191630209e-05, |
|
"loss": 1.0702, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.08509077571978728, |
|
"grad_norm": 0.29281488060951233, |
|
"learning_rate": 1.1322631407993811e-05, |
|
"loss": 0.978, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.08557980316645272, |
|
"grad_norm": 0.2620236277580261, |
|
"learning_rate": 1.0483670864493778e-05, |
|
"loss": 0.9053, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.08606883061311817, |
|
"grad_norm": 0.46799540519714355, |
|
"learning_rate": 9.675280065387116e-06, |
|
"loss": 0.9817, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.08655785805978361, |
|
"grad_norm": 0.28441375494003296, |
|
"learning_rate": 8.897735075391155e-06, |
|
"loss": 0.8335, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.08704688550644905, |
|
"grad_norm": 0.3706384301185608, |
|
"learning_rate": 8.151301425407699e-06, |
|
"loss": 0.9957, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.0875359129531145, |
|
"grad_norm": 0.2594084143638611, |
|
"learning_rate": 7.43623402184438e-06, |
|
"loss": 0.8361, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.08802494039977994, |
|
"grad_norm": 0.48771926760673523, |
|
"learning_rate": 6.75277705956443e-06, |
|
"loss": 0.9614, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.08851396784644538, |
|
"grad_norm": 0.3285332918167114, |
|
"learning_rate": 6.1011639384943585e-06, |
|
"loss": 1.0667, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.08900299529311083, |
|
"grad_norm": 0.293144166469574, |
|
"learning_rate": 5.481617183918053e-06, |
|
"loss": 1.0208, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.08949202273977627, |
|
"grad_norm": 0.39136576652526855, |
|
"learning_rate": 4.8943483704846475e-06, |
|
"loss": 0.9498, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.08998105018644172, |
|
"grad_norm": 0.31894204020500183, |
|
"learning_rate": 4.339558049955927e-06, |
|
"loss": 1.06, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.09047007763310716, |
|
"grad_norm": 0.3131878972053528, |
|
"learning_rate": 3.817435682718096e-06, |
|
"loss": 0.9719, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.0909591050797726, |
|
"grad_norm": 0.26452943682670593, |
|
"learning_rate": 3.3281595730812575e-06, |
|
"loss": 0.9504, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.09144813252643805, |
|
"grad_norm": 0.36320924758911133, |
|
"learning_rate": 2.8718968083886075e-06, |
|
"loss": 1.1415, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.09193715997310349, |
|
"grad_norm": 0.24845726788043976, |
|
"learning_rate": 2.4488032019563402e-06, |
|
"loss": 1.0171, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.09242618741976893, |
|
"grad_norm": 0.5962578058242798, |
|
"learning_rate": 2.0590232398634114e-06, |
|
"loss": 0.9151, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.09291521486643438, |
|
"grad_norm": 0.3705110549926758, |
|
"learning_rate": 1.7026900316098215e-06, |
|
"loss": 1.1048, |
|
"step": 190 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 5, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 8.434012971545395e+17, |
|
"train_batch_size": 3, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|