|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.2442002442002442, |
|
"eval_steps": 9, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.002442002442002442, |
|
"grad_norm": 4.003821849822998, |
|
"learning_rate": 1e-05, |
|
"loss": 3.5448, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002442002442002442, |
|
"eval_loss": 3.4570086002349854, |
|
"eval_runtime": 17.1854, |
|
"eval_samples_per_second": 20.075, |
|
"eval_steps_per_second": 2.56, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.004884004884004884, |
|
"grad_norm": 4.212690353393555, |
|
"learning_rate": 2e-05, |
|
"loss": 3.29, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.007326007326007326, |
|
"grad_norm": 3.684267282485962, |
|
"learning_rate": 3e-05, |
|
"loss": 3.2101, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.009768009768009768, |
|
"grad_norm": 4.348191738128662, |
|
"learning_rate": 4e-05, |
|
"loss": 3.6773, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.01221001221001221, |
|
"grad_norm": 3.9922070503234863, |
|
"learning_rate": 5e-05, |
|
"loss": 3.4306, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.014652014652014652, |
|
"grad_norm": 3.5863325595855713, |
|
"learning_rate": 6e-05, |
|
"loss": 3.3782, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.017094017094017096, |
|
"grad_norm": 3.6543312072753906, |
|
"learning_rate": 7e-05, |
|
"loss": 3.2914, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.019536019536019536, |
|
"grad_norm": 2.883704900741577, |
|
"learning_rate": 8e-05, |
|
"loss": 2.8062, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.02197802197802198, |
|
"grad_norm": 2.7370340824127197, |
|
"learning_rate": 9e-05, |
|
"loss": 2.8026, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.02197802197802198, |
|
"eval_loss": 2.863778591156006, |
|
"eval_runtime": 17.1862, |
|
"eval_samples_per_second": 20.074, |
|
"eval_steps_per_second": 2.56, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.02442002442002442, |
|
"grad_norm": 3.3165810108184814, |
|
"learning_rate": 0.0001, |
|
"loss": 2.9095, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.026862026862026864, |
|
"grad_norm": 3.8215725421905518, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 2.8289, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.029304029304029304, |
|
"grad_norm": 3.539818286895752, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 2.2962, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.031746031746031744, |
|
"grad_norm": 3.0036020278930664, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 2.569, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.03418803418803419, |
|
"grad_norm": 3.063443422317505, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 2.6549, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.03663003663003663, |
|
"grad_norm": 2.7809972763061523, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 2.7653, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.03907203907203907, |
|
"grad_norm": 2.9072892665863037, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 2.6655, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.04151404151404151, |
|
"grad_norm": 2.6445059776306152, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 2.7984, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.04395604395604396, |
|
"grad_norm": 2.6554043292999268, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 2.6775, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.04395604395604396, |
|
"eval_loss": 2.5405185222625732, |
|
"eval_runtime": 17.192, |
|
"eval_samples_per_second": 20.067, |
|
"eval_steps_per_second": 2.559, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0463980463980464, |
|
"grad_norm": 2.496406316757202, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 2.4901, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.04884004884004884, |
|
"grad_norm": 2.5684244632720947, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 2.7073, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.05128205128205128, |
|
"grad_norm": 2.4029531478881836, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 2.1923, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.05372405372405373, |
|
"grad_norm": 2.1948258876800537, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 2.4754, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.05616605616605617, |
|
"grad_norm": 2.6035783290863037, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 2.8365, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.05860805860805861, |
|
"grad_norm": 2.6996240615844727, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 2.1656, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.06105006105006105, |
|
"grad_norm": 2.1867291927337646, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 2.509, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.06349206349206349, |
|
"grad_norm": 2.629974842071533, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 2.4488, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.06593406593406594, |
|
"grad_norm": 2.655991554260254, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 2.2719, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.06593406593406594, |
|
"eval_loss": 2.3683769702911377, |
|
"eval_runtime": 17.208, |
|
"eval_samples_per_second": 20.049, |
|
"eval_steps_per_second": 2.557, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.06837606837606838, |
|
"grad_norm": 2.4463114738464355, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 2.4659, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.07081807081807082, |
|
"grad_norm": 2.276737928390503, |
|
"learning_rate": 8.940053768033609e-05, |
|
"loss": 2.4116, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.07326007326007326, |
|
"grad_norm": 2.5003745555877686, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 2.1826, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0757020757020757, |
|
"grad_norm": 2.833949565887451, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 2.4994, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.07814407814407814, |
|
"grad_norm": 2.667140007019043, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 2.4033, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.08058608058608059, |
|
"grad_norm": 2.864898920059204, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 2.1158, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.08302808302808302, |
|
"grad_norm": 3.267824172973633, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 2.0373, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.08547008547008547, |
|
"grad_norm": 2.865539789199829, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 2.4525, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.08791208791208792, |
|
"grad_norm": 2.8213765621185303, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 2.3295, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.08791208791208792, |
|
"eval_loss": 2.2654080390930176, |
|
"eval_runtime": 17.1512, |
|
"eval_samples_per_second": 20.115, |
|
"eval_steps_per_second": 2.565, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.09035409035409035, |
|
"grad_norm": 2.6522955894470215, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 2.0216, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.0927960927960928, |
|
"grad_norm": 2.883388042449951, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 2.2312, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.09523809523809523, |
|
"grad_norm": 2.54302978515625, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 2.3645, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.09768009768009768, |
|
"grad_norm": 2.6332242488861084, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 2.2103, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.10012210012210013, |
|
"grad_norm": 2.973139762878418, |
|
"learning_rate": 7.347357813929454e-05, |
|
"loss": 2.3439, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.10256410256410256, |
|
"grad_norm": 2.7934212684631348, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 2.1581, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.10500610500610501, |
|
"grad_norm": 2.8419196605682373, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 2.3719, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.10744810744810745, |
|
"grad_norm": 2.6125094890594482, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 2.1408, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.10989010989010989, |
|
"grad_norm": 2.8555796146392822, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 2.3435, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.10989010989010989, |
|
"eval_loss": 2.1842544078826904, |
|
"eval_runtime": 17.2121, |
|
"eval_samples_per_second": 20.044, |
|
"eval_steps_per_second": 2.556, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.11233211233211234, |
|
"grad_norm": 3.3376097679138184, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 2.1704, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.11477411477411477, |
|
"grad_norm": 3.228142499923706, |
|
"learning_rate": 6.378186779084995e-05, |
|
"loss": 2.2675, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.11721611721611722, |
|
"grad_norm": 3.246316432952881, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 2.2922, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.11965811965811966, |
|
"grad_norm": 3.411884307861328, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 2.1232, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.1221001221001221, |
|
"grad_norm": 2.7644128799438477, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 1.9756, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.12454212454212454, |
|
"grad_norm": 2.9831814765930176, |
|
"learning_rate": 5.695865504800327e-05, |
|
"loss": 2.1691, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.12698412698412698, |
|
"grad_norm": 2.948953628540039, |
|
"learning_rate": 5.522642316338268e-05, |
|
"loss": 2.2066, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.12942612942612944, |
|
"grad_norm": 3.319122552871704, |
|
"learning_rate": 5.348782368720626e-05, |
|
"loss": 2.0857, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.13186813186813187, |
|
"grad_norm": 3.127619743347168, |
|
"learning_rate": 5.174497483512506e-05, |
|
"loss": 1.9764, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.13186813186813187, |
|
"eval_loss": 2.127415180206299, |
|
"eval_runtime": 17.2075, |
|
"eval_samples_per_second": 20.049, |
|
"eval_steps_per_second": 2.557, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.1343101343101343, |
|
"grad_norm": 3.369476795196533, |
|
"learning_rate": 5e-05, |
|
"loss": 2.194, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.13675213675213677, |
|
"grad_norm": 3.0237653255462646, |
|
"learning_rate": 4.825502516487497e-05, |
|
"loss": 2.1449, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.1391941391941392, |
|
"grad_norm": 2.8375723361968994, |
|
"learning_rate": 4.6512176312793736e-05, |
|
"loss": 2.2385, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.14163614163614163, |
|
"grad_norm": 3.1608798503875732, |
|
"learning_rate": 4.477357683661734e-05, |
|
"loss": 1.7792, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.14407814407814407, |
|
"grad_norm": 3.247514247894287, |
|
"learning_rate": 4.3041344951996746e-05, |
|
"loss": 2.1278, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.14652014652014653, |
|
"grad_norm": 3.289715051651001, |
|
"learning_rate": 4.131759111665349e-05, |
|
"loss": 2.3176, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.14896214896214896, |
|
"grad_norm": 3.2936837673187256, |
|
"learning_rate": 3.960441545911204e-05, |
|
"loss": 2.072, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.1514041514041514, |
|
"grad_norm": 3.1719565391540527, |
|
"learning_rate": 3.790390522001662e-05, |
|
"loss": 1.916, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.15384615384615385, |
|
"grad_norm": 2.834185838699341, |
|
"learning_rate": 3.6218132209150045e-05, |
|
"loss": 2.0461, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.15384615384615385, |
|
"eval_loss": 2.0899388790130615, |
|
"eval_runtime": 17.2055, |
|
"eval_samples_per_second": 20.052, |
|
"eval_steps_per_second": 2.557, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.1562881562881563, |
|
"grad_norm": 3.3529248237609863, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 2.0755, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.15873015873015872, |
|
"grad_norm": 3.64978289604187, |
|
"learning_rate": 3.289899283371657e-05, |
|
"loss": 2.2543, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.16117216117216118, |
|
"grad_norm": 2.9989237785339355, |
|
"learning_rate": 3.12696703292044e-05, |
|
"loss": 1.9864, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.16361416361416362, |
|
"grad_norm": 3.2978744506835938, |
|
"learning_rate": 2.9663167846209998e-05, |
|
"loss": 2.1456, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.16605616605616605, |
|
"grad_norm": 3.1260225772857666, |
|
"learning_rate": 2.8081442660546125e-05, |
|
"loss": 2.1197, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.1684981684981685, |
|
"grad_norm": 2.8456709384918213, |
|
"learning_rate": 2.6526421860705473e-05, |
|
"loss": 2.1985, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.17094017094017094, |
|
"grad_norm": 3.1655471324920654, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 2.1985, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.17338217338217338, |
|
"grad_norm": 3.4631283283233643, |
|
"learning_rate": 2.350403678833976e-05, |
|
"loss": 2.0562, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.17582417582417584, |
|
"grad_norm": 2.9049081802368164, |
|
"learning_rate": 2.2040354826462668e-05, |
|
"loss": 2.0665, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.17582417582417584, |
|
"eval_loss": 2.0659754276275635, |
|
"eval_runtime": 17.2028, |
|
"eval_samples_per_second": 20.055, |
|
"eval_steps_per_second": 2.558, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.17826617826617827, |
|
"grad_norm": 3.1705820560455322, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 2.2104, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.1807081807081807, |
|
"grad_norm": 3.19238543510437, |
|
"learning_rate": 1.9216926233717085e-05, |
|
"loss": 1.8963, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.18315018315018314, |
|
"grad_norm": 2.8358798027038574, |
|
"learning_rate": 1.7860619515673033e-05, |
|
"loss": 2.1482, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.1855921855921856, |
|
"grad_norm": 3.2111284732818604, |
|
"learning_rate": 1.6543469682057106e-05, |
|
"loss": 2.0634, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.18803418803418803, |
|
"grad_norm": 3.5797221660614014, |
|
"learning_rate": 1.526708147705013e-05, |
|
"loss": 1.8014, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.19047619047619047, |
|
"grad_norm": 3.0201733112335205, |
|
"learning_rate": 1.4033009983067452e-05, |
|
"loss": 2.0404, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.19291819291819293, |
|
"grad_norm": 3.203850507736206, |
|
"learning_rate": 1.2842758726130283e-05, |
|
"loss": 2.1296, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.19536019536019536, |
|
"grad_norm": 2.728576898574829, |
|
"learning_rate": 1.1697777844051105e-05, |
|
"loss": 1.9965, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.1978021978021978, |
|
"grad_norm": 3.051905632019043, |
|
"learning_rate": 1.0599462319663905e-05, |
|
"loss": 2.2437, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.1978021978021978, |
|
"eval_loss": 2.0485291481018066, |
|
"eval_runtime": 17.19, |
|
"eval_samples_per_second": 20.07, |
|
"eval_steps_per_second": 2.56, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.20024420024420025, |
|
"grad_norm": 3.3389062881469727, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 2.0766, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.2026862026862027, |
|
"grad_norm": 3.4000632762908936, |
|
"learning_rate": 8.548121372247918e-06, |
|
"loss": 2.0668, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.20512820512820512, |
|
"grad_norm": 3.469282627105713, |
|
"learning_rate": 7.597595192178702e-06, |
|
"loss": 1.9469, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.20757020757020758, |
|
"grad_norm": 3.1487700939178467, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 2.0561, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.21001221001221002, |
|
"grad_norm": 3.3516573905944824, |
|
"learning_rate": 5.852620357053651e-06, |
|
"loss": 2.4972, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.21245421245421245, |
|
"grad_norm": 3.0450797080993652, |
|
"learning_rate": 5.060297685041659e-06, |
|
"loss": 2.0687, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.2148962148962149, |
|
"grad_norm": 3.4229252338409424, |
|
"learning_rate": 4.322727117869951e-06, |
|
"loss": 2.1243, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.21733821733821734, |
|
"grad_norm": 3.3112401962280273, |
|
"learning_rate": 3.6408072716606346e-06, |
|
"loss": 2.0881, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.21978021978021978, |
|
"grad_norm": 2.852926015853882, |
|
"learning_rate": 3.0153689607045845e-06, |
|
"loss": 2.0004, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.21978021978021978, |
|
"eval_loss": 2.040705919265747, |
|
"eval_runtime": 17.1884, |
|
"eval_samples_per_second": 20.072, |
|
"eval_steps_per_second": 2.56, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.2222222222222222, |
|
"grad_norm": 3.144771099090576, |
|
"learning_rate": 2.4471741852423237e-06, |
|
"loss": 2.2776, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.22466422466422467, |
|
"grad_norm": 2.85107159614563, |
|
"learning_rate": 1.9369152030840556e-06, |
|
"loss": 2.1482, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.2271062271062271, |
|
"grad_norm": 3.379453182220459, |
|
"learning_rate": 1.4852136862001764e-06, |
|
"loss": 2.0163, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.22954822954822954, |
|
"grad_norm": 3.250808000564575, |
|
"learning_rate": 1.0926199633097157e-06, |
|
"loss": 2.114, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.231990231990232, |
|
"grad_norm": 3.544097423553467, |
|
"learning_rate": 7.596123493895991e-07, |
|
"loss": 1.875, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.23443223443223443, |
|
"grad_norm": 3.2743308544158936, |
|
"learning_rate": 4.865965629214819e-07, |
|
"loss": 1.9819, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.23687423687423687, |
|
"grad_norm": 3.108639717102051, |
|
"learning_rate": 2.7390523158633554e-07, |
|
"loss": 2.3488, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.23931623931623933, |
|
"grad_norm": 3.2740273475646973, |
|
"learning_rate": 1.2179748700879012e-07, |
|
"loss": 2.3454, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.24175824175824176, |
|
"grad_norm": 2.951927661895752, |
|
"learning_rate": 3.04586490452119e-08, |
|
"loss": 2.0124, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.24175824175824176, |
|
"eval_loss": 2.0391764640808105, |
|
"eval_runtime": 17.1917, |
|
"eval_samples_per_second": 20.068, |
|
"eval_steps_per_second": 2.559, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.2442002442002442, |
|
"grad_norm": 3.4022247791290283, |
|
"learning_rate": 0.0, |
|
"loss": 2.0964, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.96755069075456e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|