|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.1662510390689942, |
|
"eval_steps": 9, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0016625103906899418, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0016625103906899418, |
|
"eval_loss": NaN, |
|
"eval_runtime": 97.6098, |
|
"eval_samples_per_second": 5.194, |
|
"eval_steps_per_second": 0.656, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0033250207813798837, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 0.0, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.004987531172069825, |
|
"grad_norm": NaN, |
|
"learning_rate": 6e-06, |
|
"loss": 0.0, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.006650041562759767, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.0, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.00831255195344971, |
|
"grad_norm": NaN, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.00997506234413965, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.0, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.011637572734829594, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.4e-05, |
|
"loss": 0.0, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.013300083125519535, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 0.0, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.014962593516209476, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.8e-05, |
|
"loss": 0.0, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.014962593516209476, |
|
"eval_loss": NaN, |
|
"eval_runtime": 96.6004, |
|
"eval_samples_per_second": 5.248, |
|
"eval_steps_per_second": 0.663, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.01662510390689942, |
|
"grad_norm": NaN, |
|
"learning_rate": 2e-05, |
|
"loss": 0.0, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01828761429758936, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.999390827019096e-05, |
|
"loss": 0.0, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0199501246882793, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.9975640502598243e-05, |
|
"loss": 0.0, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.021612635078969242, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.9945218953682736e-05, |
|
"loss": 0.0, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.023275145469659187, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.9902680687415704e-05, |
|
"loss": 0.0, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.02493765586034913, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.9848077530122083e-05, |
|
"loss": 0.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02660016625103907, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.9781476007338058e-05, |
|
"loss": 0.0, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.02826267664172901, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.9702957262759964e-05, |
|
"loss": 0.0, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.029925187032418952, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.961261695938319e-05, |
|
"loss": 0.0, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.029925187032418952, |
|
"eval_loss": NaN, |
|
"eval_runtime": 96.6099, |
|
"eval_samples_per_second": 5.248, |
|
"eval_steps_per_second": 0.662, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.03158769742310889, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.9510565162951538e-05, |
|
"loss": 0.0, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.03325020781379884, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.9396926207859085e-05, |
|
"loss": 0.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.034912718204488775, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.9271838545667876e-05, |
|
"loss": 0.0, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.03657522859517872, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.913545457642601e-05, |
|
"loss": 0.0, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.038237738985868665, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.8987940462991673e-05, |
|
"loss": 0.0, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0399002493765586, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.8829475928589272e-05, |
|
"loss": 0.0, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.04156275976724855, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.866025403784439e-05, |
|
"loss": 0.0, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.043225270157938485, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.848048096156426e-05, |
|
"loss": 0.0, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.04488778054862843, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.8290375725550417e-05, |
|
"loss": 0.0, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.04488778054862843, |
|
"eval_loss": NaN, |
|
"eval_runtime": 96.6279, |
|
"eval_samples_per_second": 5.247, |
|
"eval_steps_per_second": 0.662, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.046550290939318374, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.8090169943749477e-05, |
|
"loss": 0.0, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.04821280133000831, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.788010753606722e-05, |
|
"loss": 0.0, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.04987531172069826, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.766044443118978e-05, |
|
"loss": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.051537822111388194, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.7431448254773943e-05, |
|
"loss": 0.0, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.05320033250207814, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.7193398003386514e-05, |
|
"loss": 0.0, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.05486284289276808, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.6946583704589973e-05, |
|
"loss": 0.0, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.05652535328345802, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.6691306063588583e-05, |
|
"loss": 0.0, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.058187863674147966, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.6427876096865394e-05, |
|
"loss": 0.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.059850374064837904, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.6156614753256583e-05, |
|
"loss": 0.0, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.059850374064837904, |
|
"eval_loss": NaN, |
|
"eval_runtime": 96.6597, |
|
"eval_samples_per_second": 5.245, |
|
"eval_steps_per_second": 0.662, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.06151288445552785, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.5877852522924733e-05, |
|
"loss": 0.0, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.06317539484621779, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.5591929034707468e-05, |
|
"loss": 0.0, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.06483790523690773, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.529919264233205e-05, |
|
"loss": 0.0, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.06650041562759768, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.5000000000000002e-05, |
|
"loss": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.06816292601828762, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.469471562785891e-05, |
|
"loss": 0.0, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.06982543640897755, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.4383711467890776e-05, |
|
"loss": 0.0, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.0714879467996675, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.4067366430758004e-05, |
|
"loss": 0.0, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.07315045719035744, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.3746065934159123e-05, |
|
"loss": 0.0, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.07481296758104738, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.342020143325669e-05, |
|
"loss": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.07481296758104738, |
|
"eval_loss": NaN, |
|
"eval_runtime": 96.5875, |
|
"eval_samples_per_second": 5.249, |
|
"eval_steps_per_second": 0.663, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.07647547797173733, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.3090169943749475e-05, |
|
"loss": 0.0, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.07813798836242726, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.2756373558169992e-05, |
|
"loss": 0.0, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.0798004987531172, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.2419218955996677e-05, |
|
"loss": 0.0, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.08146300914380715, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.2079116908177592e-05, |
|
"loss": 0.0, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.0831255195344971, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.1736481776669307e-05, |
|
"loss": 0.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.08478802992518704, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.1391731009600655e-05, |
|
"loss": 0.0, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.08645054031587697, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.1045284632676535e-05, |
|
"loss": 0.0, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.08811305070656691, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.0697564737441254e-05, |
|
"loss": 0.0, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.08977556109725686, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.0348994967025012e-05, |
|
"loss": 0.0, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.08977556109725686, |
|
"eval_loss": NaN, |
|
"eval_runtime": 96.6415, |
|
"eval_samples_per_second": 5.246, |
|
"eval_steps_per_second": 0.662, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.0914380714879468, |
|
"grad_norm": NaN, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.09310058187863675, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.651005032974994e-06, |
|
"loss": 0.0, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.09476309226932668, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.302435262558748e-06, |
|
"loss": 0.0, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.09642560266001662, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.954715367323468e-06, |
|
"loss": 0.0, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.09808811305070657, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.60826899039935e-06, |
|
"loss": 0.0, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.09975062344139651, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.263518223330698e-06, |
|
"loss": 0.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.10141313383208644, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.92088309182241e-06, |
|
"loss": 0.0, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.10307564422277639, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.580781044003324e-06, |
|
"loss": 0.0, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.10473815461346633, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.243626441830009e-06, |
|
"loss": 0.0, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.10473815461346633, |
|
"eval_loss": NaN, |
|
"eval_runtime": 96.6267, |
|
"eval_samples_per_second": 5.247, |
|
"eval_steps_per_second": 0.662, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.10640066500415628, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.909830056250527e-06, |
|
"loss": 0.0, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.10806317539484622, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.579798566743314e-06, |
|
"loss": 0.0, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.10972568578553615, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.25393406584088e-06, |
|
"loss": 0.0, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.1113881961762261, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.932633569242e-06, |
|
"loss": 0.0, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.11305070656691604, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.616288532109225e-06, |
|
"loss": 0.0, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.11471321695760599, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.305284372141095e-06, |
|
"loss": 0.0, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.11637572734829593, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.000000000000003e-06, |
|
"loss": 0.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.11803823773898586, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.700807357667953e-06, |
|
"loss": 0.0, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.11970074812967581, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.408070965292534e-06, |
|
"loss": 0.0, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.11970074812967581, |
|
"eval_loss": NaN, |
|
"eval_runtime": 96.6383, |
|
"eval_samples_per_second": 5.246, |
|
"eval_steps_per_second": 0.662, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.12136325852036575, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.12214747707527e-06, |
|
"loss": 0.0, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.1230257689110557, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.8433852467434175e-06, |
|
"loss": 0.0, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.12468827930174564, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.5721239031346067e-06, |
|
"loss": 0.0, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.12635078969243557, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.308693936411421e-06, |
|
"loss": 0.0, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.12801330008312553, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.0534162954100264e-06, |
|
"loss": 0.0, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.12967581047381546, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.8066019966134907e-06, |
|
"loss": 0.0, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.1313383208645054, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.5685517452260566e-06, |
|
"loss": 0.0, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.13300083125519535, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.339555568810221e-06, |
|
"loss": 0.0, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.13466334164588528, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.119892463932781e-06, |
|
"loss": 0.0, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.13466334164588528, |
|
"eval_loss": NaN, |
|
"eval_runtime": 96.6256, |
|
"eval_samples_per_second": 5.247, |
|
"eval_steps_per_second": 0.662, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.13632585203657524, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.9098300562505266e-06, |
|
"loss": 0.0, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.13798836242726517, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.709624274449584e-06, |
|
"loss": 0.0, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.1396508728179551, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.5195190384357405e-06, |
|
"loss": 0.0, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.14131338320864506, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.339745962155613e-06, |
|
"loss": 0.0, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.142975893599335, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.1705240714107301e-06, |
|
"loss": 0.0, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.14463840399002495, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.012059537008332e-06, |
|
"loss": 0.0, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.14630091438071488, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.645454235739903e-07, |
|
"loss": 0.0, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.1479634247714048, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.281614543321269e-07, |
|
"loss": 0.0, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.14962593516209477, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.030737921409169e-07, |
|
"loss": 0.0, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.14962593516209477, |
|
"eval_loss": NaN, |
|
"eval_runtime": 96.6211, |
|
"eval_samples_per_second": 5.247, |
|
"eval_steps_per_second": 0.662, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.1512884455527847, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.894348370484648e-07, |
|
"loss": 0.0, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.15295095594347466, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.8738304061681107e-07, |
|
"loss": 0.0, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.1546134663341646, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.970427372400353e-07, |
|
"loss": 0.0, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.15627597672485452, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.1852399266194312e-07, |
|
"loss": 0.0, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.15793848711554448, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.519224698779198e-07, |
|
"loss": 0.0, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.1596009975062344, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.731931258429638e-08, |
|
"loss": 0.0, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.16126350789692437, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.4781046317267103e-08, |
|
"loss": 0.0, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.1629260182876143, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.4359497401758026e-08, |
|
"loss": 0.0, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.16458852867830423, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.091729809042379e-09, |
|
"loss": 0.0, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.16458852867830423, |
|
"eval_loss": NaN, |
|
"eval_runtime": 96.6138, |
|
"eval_samples_per_second": 5.248, |
|
"eval_steps_per_second": 0.662, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.1662510390689942, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0, |
|
"loss": 0.0, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.3869079534239744e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|