|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.04401247019988997, |
|
"eval_steps": 34, |
|
"global_step": 90, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0004890274466654441, |
|
"grad_norm": 0.42223185300827026, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 1.2369, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0004890274466654441, |
|
"eval_loss": 1.3036277294158936, |
|
"eval_runtime": 1355.2246, |
|
"eval_samples_per_second": 1.906, |
|
"eval_steps_per_second": 0.635, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0009780548933308881, |
|
"grad_norm": 0.49382174015045166, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 1.3544, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0014670823399963323, |
|
"grad_norm": 0.43727555871009827, |
|
"learning_rate": 2e-05, |
|
"loss": 1.403, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0019561097866617762, |
|
"grad_norm": 0.3653078079223633, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 1.189, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0024451372333272204, |
|
"grad_norm": 0.3689245283603668, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 1.1686, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0029341646799926646, |
|
"grad_norm": 0.44767728447914124, |
|
"learning_rate": 4e-05, |
|
"loss": 1.2578, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0034231921266581087, |
|
"grad_norm": 0.4529785215854645, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 1.2452, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0039122195733235525, |
|
"grad_norm": 0.6146576404571533, |
|
"learning_rate": 5.333333333333333e-05, |
|
"loss": 1.5423, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.004401247019988997, |
|
"grad_norm": 0.42456191778182983, |
|
"learning_rate": 6e-05, |
|
"loss": 1.1323, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.004890274466654441, |
|
"grad_norm": 0.4658707082271576, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 1.2595, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.005379301913319885, |
|
"grad_norm": 0.6462928652763367, |
|
"learning_rate": 7.333333333333333e-05, |
|
"loss": 1.3136, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.005868329359985329, |
|
"grad_norm": 0.514143168926239, |
|
"learning_rate": 8e-05, |
|
"loss": 1.1999, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.006357356806650773, |
|
"grad_norm": 0.43792784214019775, |
|
"learning_rate": 8.666666666666667e-05, |
|
"loss": 1.0675, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0068463842533162175, |
|
"grad_norm": 0.6212848424911499, |
|
"learning_rate": 9.333333333333334e-05, |
|
"loss": 1.3066, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.007335411699981661, |
|
"grad_norm": 0.6186118125915527, |
|
"learning_rate": 0.0001, |
|
"loss": 1.2355, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.007824439146647105, |
|
"grad_norm": 0.6825079917907715, |
|
"learning_rate": 0.00010666666666666667, |
|
"loss": 1.2258, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.00831346659331255, |
|
"grad_norm": 0.3936300575733185, |
|
"learning_rate": 0.00011333333333333334, |
|
"loss": 1.1337, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.008802494039977994, |
|
"grad_norm": 0.5038918852806091, |
|
"learning_rate": 0.00012, |
|
"loss": 1.1278, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.009291521486643438, |
|
"grad_norm": 0.5894134640693665, |
|
"learning_rate": 0.00012666666666666666, |
|
"loss": 1.1902, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.009780548933308882, |
|
"grad_norm": 1.1452070474624634, |
|
"learning_rate": 0.00013333333333333334, |
|
"loss": 1.1023, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.010269576379974325, |
|
"grad_norm": 0.4289817214012146, |
|
"learning_rate": 0.00014, |
|
"loss": 1.0195, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.01075860382663977, |
|
"grad_norm": 1.290226936340332, |
|
"learning_rate": 0.00014666666666666666, |
|
"loss": 0.9534, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.011247631273305215, |
|
"grad_norm": 0.5022335648536682, |
|
"learning_rate": 0.00015333333333333334, |
|
"loss": 0.8952, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.011736658719970658, |
|
"grad_norm": 0.5421992540359497, |
|
"learning_rate": 0.00016, |
|
"loss": 0.8553, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.012225686166636102, |
|
"grad_norm": 1.568458080291748, |
|
"learning_rate": 0.0001666666666666667, |
|
"loss": 0.9764, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.012714713613301546, |
|
"grad_norm": 0.6190416812896729, |
|
"learning_rate": 0.00017333333333333334, |
|
"loss": 0.9819, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.013203741059966991, |
|
"grad_norm": 0.4747830927371979, |
|
"learning_rate": 0.00018, |
|
"loss": 0.9395, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.013692768506632435, |
|
"grad_norm": 0.9486120343208313, |
|
"learning_rate": 0.0001866666666666667, |
|
"loss": 0.9669, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.014181795953297879, |
|
"grad_norm": 0.4615766108036041, |
|
"learning_rate": 0.00019333333333333333, |
|
"loss": 0.9341, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.014670823399963322, |
|
"grad_norm": 0.46146905422210693, |
|
"learning_rate": 0.0002, |
|
"loss": 1.0082, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.015159850846628768, |
|
"grad_norm": 0.47625860571861267, |
|
"learning_rate": 0.00019998292504580528, |
|
"loss": 0.9419, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.01564887829329421, |
|
"grad_norm": 0.5290003418922424, |
|
"learning_rate": 0.0001999317060143023, |
|
"loss": 1.1635, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.016137905739959654, |
|
"grad_norm": 0.5592769980430603, |
|
"learning_rate": 0.0001998463603967434, |
|
"loss": 0.9437, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.0166269331866251, |
|
"grad_norm": 0.3583241403102875, |
|
"learning_rate": 0.00019972691733857883, |
|
"loss": 1.1428, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.0166269331866251, |
|
"eval_loss": 1.0231887102127075, |
|
"eval_runtime": 1359.3297, |
|
"eval_samples_per_second": 1.9, |
|
"eval_steps_per_second": 0.633, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.017115960633290545, |
|
"grad_norm": 0.41851457953453064, |
|
"learning_rate": 0.00019957341762950344, |
|
"loss": 1.0976, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.017604988079955988, |
|
"grad_norm": 0.46739837527275085, |
|
"learning_rate": 0.0001993859136895274, |
|
"loss": 1.0089, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.018094015526621432, |
|
"grad_norm": 0.3970998227596283, |
|
"learning_rate": 0.00019916446955107428, |
|
"loss": 1.1005, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.018583042973286876, |
|
"grad_norm": 0.5996494293212891, |
|
"learning_rate": 0.0001989091608371146, |
|
"loss": 0.9741, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.01907207041995232, |
|
"grad_norm": 0.27929016947746277, |
|
"learning_rate": 0.00019862007473534025, |
|
"loss": 0.9607, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.019561097866617763, |
|
"grad_norm": 0.33169642090797424, |
|
"learning_rate": 0.0001982973099683902, |
|
"loss": 1.1173, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.020050125313283207, |
|
"grad_norm": 0.3042043149471283, |
|
"learning_rate": 0.0001979409767601366, |
|
"loss": 1.1178, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.02053915275994865, |
|
"grad_norm": 0.3394615650177002, |
|
"learning_rate": 0.00019755119679804367, |
|
"loss": 1.0167, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.021028180206614098, |
|
"grad_norm": 0.3241017758846283, |
|
"learning_rate": 0.0001971281031916114, |
|
"loss": 1.0089, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.02151720765327954, |
|
"grad_norm": 0.6870384812355042, |
|
"learning_rate": 0.00019667184042691875, |
|
"loss": 1.0362, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.022006235099944985, |
|
"grad_norm": 0.31295979022979736, |
|
"learning_rate": 0.00019618256431728194, |
|
"loss": 1.0032, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.02249526254661043, |
|
"grad_norm": 0.32893508672714233, |
|
"learning_rate": 0.0001956604419500441, |
|
"loss": 1.0651, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.022984289993275873, |
|
"grad_norm": 0.28745728731155396, |
|
"learning_rate": 0.00019510565162951537, |
|
"loss": 0.9031, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.023473317439941317, |
|
"grad_norm": 0.558315098285675, |
|
"learning_rate": 0.00019451838281608197, |
|
"loss": 1.0545, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.02396234488660676, |
|
"grad_norm": 0.5743781924247742, |
|
"learning_rate": 0.00019389883606150566, |
|
"loss": 1.0951, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.024451372333272204, |
|
"grad_norm": 0.41075870394706726, |
|
"learning_rate": 0.00019324722294043558, |
|
"loss": 1.0124, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.024940399779937648, |
|
"grad_norm": 0.2487930953502655, |
|
"learning_rate": 0.00019256376597815564, |
|
"loss": 0.9894, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.02542942722660309, |
|
"grad_norm": 0.39657795429229736, |
|
"learning_rate": 0.00019184869857459232, |
|
"loss": 1.138, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.02591845467326854, |
|
"grad_norm": 0.5362446308135986, |
|
"learning_rate": 0.00019110226492460885, |
|
"loss": 1.0342, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.026407482119933982, |
|
"grad_norm": 0.5089200139045715, |
|
"learning_rate": 0.0001903247199346129, |
|
"loss": 1.1193, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.026896509566599426, |
|
"grad_norm": 0.28259485960006714, |
|
"learning_rate": 0.00018951632913550626, |
|
"loss": 0.9054, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.02738553701326487, |
|
"grad_norm": 0.3576945662498474, |
|
"learning_rate": 0.0001886773685920062, |
|
"loss": 1.0002, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.027874564459930314, |
|
"grad_norm": 0.2915222942829132, |
|
"learning_rate": 0.0001878081248083698, |
|
"loss": 0.9491, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.028363591906595757, |
|
"grad_norm": 0.296265572309494, |
|
"learning_rate": 0.00018690889463055283, |
|
"loss": 1.0483, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.0288526193532612, |
|
"grad_norm": 0.3489420413970947, |
|
"learning_rate": 0.00018597998514483725, |
|
"loss": 0.9064, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.029341646799926645, |
|
"grad_norm": 0.37772810459136963, |
|
"learning_rate": 0.00018502171357296144, |
|
"loss": 0.9779, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.02983067424659209, |
|
"grad_norm": 0.5371522903442383, |
|
"learning_rate": 0.00018403440716378928, |
|
"loss": 1.0268, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.030319701693257536, |
|
"grad_norm": 0.3227771520614624, |
|
"learning_rate": 0.00018301840308155507, |
|
"loss": 1.0642, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.03080872913992298, |
|
"grad_norm": 0.4422720670700073, |
|
"learning_rate": 0.00018197404829072215, |
|
"loss": 1.1083, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.03129775658658842, |
|
"grad_norm": 0.5397651791572571, |
|
"learning_rate": 0.00018090169943749476, |
|
"loss": 1.2271, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.03178678403325386, |
|
"grad_norm": 0.4943908452987671, |
|
"learning_rate": 0.000179801722728024, |
|
"loss": 0.9873, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.03227581147991931, |
|
"grad_norm": 0.31046292185783386, |
|
"learning_rate": 0.00017867449380334834, |
|
"loss": 0.9235, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.03276483892658476, |
|
"grad_norm": 0.4786432981491089, |
|
"learning_rate": 0.00017752039761111297, |
|
"loss": 1.0804, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.0332538663732502, |
|
"grad_norm": 0.38078072667121887, |
|
"learning_rate": 0.00017633982827411032, |
|
"loss": 1.0508, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.0332538663732502, |
|
"eval_loss": 1.0058449506759644, |
|
"eval_runtime": 1360.1032, |
|
"eval_samples_per_second": 1.899, |
|
"eval_steps_per_second": 0.633, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.033742893819915645, |
|
"grad_norm": 0.302775114774704, |
|
"learning_rate": 0.00017513318895568737, |
|
"loss": 1.0186, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.03423192126658109, |
|
"grad_norm": 0.7890788316726685, |
|
"learning_rate": 0.00017390089172206592, |
|
"loss": 0.9441, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.03472094871324653, |
|
"grad_norm": 0.3507513999938965, |
|
"learning_rate": 0.00017264335740162242, |
|
"loss": 0.9604, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.035209976159911976, |
|
"grad_norm": 0.39182308316230774, |
|
"learning_rate": 0.00017136101544117525, |
|
"loss": 1.0598, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.03569900360657742, |
|
"grad_norm": 0.3877043128013611, |
|
"learning_rate": 0.0001700543037593291, |
|
"loss": 0.8719, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.036188031053242864, |
|
"grad_norm": 0.6628935933113098, |
|
"learning_rate": 0.00016872366859692627, |
|
"loss": 0.9241, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.03667705849990831, |
|
"grad_norm": 0.45674100518226624, |
|
"learning_rate": 0.00016736956436465573, |
|
"loss": 1.2833, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.03716608594657375, |
|
"grad_norm": 0.5053292512893677, |
|
"learning_rate": 0.0001659924534878723, |
|
"loss": 0.9678, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.037655113393239195, |
|
"grad_norm": 0.31924471259117126, |
|
"learning_rate": 0.00016459280624867874, |
|
"loss": 0.9702, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.03814414083990464, |
|
"grad_norm": 0.37664365768432617, |
|
"learning_rate": 0.0001631711006253251, |
|
"loss": 0.8382, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.03863316828657008, |
|
"grad_norm": 0.3583241403102875, |
|
"learning_rate": 0.0001617278221289793, |
|
"loss": 1.0149, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.039122195733235526, |
|
"grad_norm": 0.3891322910785675, |
|
"learning_rate": 0.00016026346363792567, |
|
"loss": 1.2247, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.03961122317990097, |
|
"grad_norm": 0.32489892840385437, |
|
"learning_rate": 0.00015877852522924732, |
|
"loss": 0.9108, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.040100250626566414, |
|
"grad_norm": 0.3395017981529236, |
|
"learning_rate": 0.00015727351400805052, |
|
"loss": 0.9968, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.04058927807323186, |
|
"grad_norm": 0.34815898537635803, |
|
"learning_rate": 0.00015574894393428855, |
|
"loss": 0.967, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.0410783055198973, |
|
"grad_norm": 0.4931004047393799, |
|
"learning_rate": 0.00015420533564724495, |
|
"loss": 1.0232, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.041567332966562745, |
|
"grad_norm": 0.45658788084983826, |
|
"learning_rate": 0.0001526432162877356, |
|
"loss": 1.0845, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.042056360413228196, |
|
"grad_norm": 0.2645609378814697, |
|
"learning_rate": 0.0001510631193180907, |
|
"loss": 0.8387, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.04254538785989364, |
|
"grad_norm": 0.2979009747505188, |
|
"learning_rate": 0.0001494655843399779, |
|
"loss": 1.0789, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.04303441530655908, |
|
"grad_norm": 0.3363690674304962, |
|
"learning_rate": 0.00014785115691012864, |
|
"loss": 0.9624, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.04352344275322453, |
|
"grad_norm": 0.700744092464447, |
|
"learning_rate": 0.00014622038835403133, |
|
"loss": 1.0248, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.04401247019988997, |
|
"grad_norm": 0.4093436300754547, |
|
"learning_rate": 0.00014457383557765386, |
|
"loss": 0.993, |
|
"step": 90 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 5, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.995058775995187e+17, |
|
"train_batch_size": 3, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|