|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9977241693218024, |
|
"eval_steps": 69, |
|
"global_step": 274, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0036413290851160674, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0036413290851160674, |
|
"eval_loss": NaN, |
|
"eval_runtime": 55.4813, |
|
"eval_samples_per_second": 8.345, |
|
"eval_steps_per_second": 2.091, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.007282658170232135, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.000000000000001e-06, |
|
"loss": 0.0, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.010923987255348202, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.2e-05, |
|
"loss": 0.0, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.01456531634046427, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.6000000000000003e-05, |
|
"loss": 0.0, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.018206645425580335, |
|
"grad_norm": NaN, |
|
"learning_rate": 2e-05, |
|
"loss": 0.0, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.021847974510696404, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.4e-05, |
|
"loss": 0.0, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.02548930359581247, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.8000000000000003e-05, |
|
"loss": 0.0, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.02913063268092854, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.2000000000000005e-05, |
|
"loss": 0.0, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.032771961766044605, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.6e-05, |
|
"loss": 0.0, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.03641329085116067, |
|
"grad_norm": NaN, |
|
"learning_rate": 4e-05, |
|
"loss": 0.0, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.04005461993627674, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.4000000000000006e-05, |
|
"loss": 0.0, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.04369594902139281, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.8e-05, |
|
"loss": 0.0, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.047337278106508875, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.2000000000000004e-05, |
|
"loss": 0.0, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.05097860719162494, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.6000000000000006e-05, |
|
"loss": 0.0, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.05461993627674101, |
|
"grad_norm": NaN, |
|
"learning_rate": 6e-05, |
|
"loss": 0.0, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.05826126536185708, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.400000000000001e-05, |
|
"loss": 0.0, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.061902594446973144, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.800000000000001e-05, |
|
"loss": 0.0, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.06554392353208921, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.2e-05, |
|
"loss": 0.0, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.06918525261720528, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.6e-05, |
|
"loss": 0.0, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.07282658170232134, |
|
"grad_norm": NaN, |
|
"learning_rate": 8e-05, |
|
"loss": 0.0, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.07646791078743742, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.4e-05, |
|
"loss": 0.0, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.08010923987255349, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.800000000000001e-05, |
|
"loss": 0.0, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.08375056895766955, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.200000000000001e-05, |
|
"loss": 0.0, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.08739189804278562, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.6e-05, |
|
"loss": 0.0, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.09103322712790168, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001, |
|
"loss": 0.0, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.09467455621301775, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00010400000000000001, |
|
"loss": 0.0, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.09831588529813381, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00010800000000000001, |
|
"loss": 0.0, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.10195721438324988, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00011200000000000001, |
|
"loss": 0.0, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.10559854346836596, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.000116, |
|
"loss": 0.0, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.10923987255348203, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00012, |
|
"loss": 0.0, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.11288120163859809, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.000124, |
|
"loss": 0.0, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.11652253072371416, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00012800000000000002, |
|
"loss": 0.0, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.12016385980883022, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.000132, |
|
"loss": 0.0, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.12380518889394629, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00013600000000000003, |
|
"loss": 0.0, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.12744651797906237, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00014, |
|
"loss": 0.0, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.13108784706417842, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.000144, |
|
"loss": 0.0, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.1347291761492945, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.000148, |
|
"loss": 0.0, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.13837050523441055, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.000152, |
|
"loss": 0.0, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.14201183431952663, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00015600000000000002, |
|
"loss": 0.0, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.14565316340464268, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00016, |
|
"loss": 0.0, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.14929449248975876, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.000164, |
|
"loss": 0.0, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.15293582157487484, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.000168, |
|
"loss": 0.0, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.1565771506599909, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.000172, |
|
"loss": 0.0, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.16021847974510697, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00017600000000000002, |
|
"loss": 0.0, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.16385980883022302, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00018, |
|
"loss": 0.0, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.1675011379153391, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00018400000000000003, |
|
"loss": 0.0, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.17114246700045516, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.000188, |
|
"loss": 0.0, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.17478379608557124, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.000192, |
|
"loss": 0.0, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.1784251251706873, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.000196, |
|
"loss": 0.0, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.18206645425580337, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0002, |
|
"loss": 0.0, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.18570778334091945, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019999016517595753, |
|
"loss": 0.0, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.1893491124260355, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019996066263830531, |
|
"loss": 0.0, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.19299044151115158, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001999114981900887, |
|
"loss": 0.0, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.19663177059626763, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019984268150178167, |
|
"loss": 0.0, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.2002730996813837, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019975422610938462, |
|
"loss": 0.0, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.20391442876649976, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019964614941176195, |
|
"loss": 0.0, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.20755575785161584, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001995184726672197, |
|
"loss": 0.0, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.21119708693673192, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019937122098932428, |
|
"loss": 0.0, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.21483841602184797, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019920442334196248, |
|
"loss": 0.0, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.21847974510696405, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019901811253364456, |
|
"loss": 0.0, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.2221210741920801, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019881232521105089, |
|
"loss": 0.0, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.22576240327719618, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001985871018518236, |
|
"loss": 0.0, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.22940373236231223, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019834248675660486, |
|
"loss": 0.0, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.23304506144742831, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019807852804032305, |
|
"loss": 0.0, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.23668639053254437, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019779527762272877, |
|
"loss": 0.0, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.24032771961766045, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019749279121818235, |
|
"loss": 0.0, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.24396904870277653, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001971711283246951, |
|
"loss": 0.0, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.24761037778789258, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019683035221222618, |
|
"loss": 0.0, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.25125170687300863, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001964705299102376, |
|
"loss": 0.0, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.25125170687300863, |
|
"eval_loss": NaN, |
|
"eval_runtime": 55.3493, |
|
"eval_samples_per_second": 8.365, |
|
"eval_steps_per_second": 2.096, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.25489303595812474, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019609173219450998, |
|
"loss": 0.0, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.2585343650432408, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001956940335732209, |
|
"loss": 0.0, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.26217569412835684, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019527751227228963, |
|
"loss": 0.0, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.2658170232134729, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001948422502199903, |
|
"loss": 0.0, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.269458352298589, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019438833303083678, |
|
"loss": 0.0, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.27309968138370505, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001939158499887428, |
|
"loss": 0.0, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.2767410104688211, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019342489402945998, |
|
"loss": 0.0, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.2803823395539372, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019291556172229785, |
|
"loss": 0.0, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.28402366863905326, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001923879532511287, |
|
"loss": 0.0, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.2876649977241693, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019184217239468212, |
|
"loss": 0.0, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.29130632680928537, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019127832650613189, |
|
"loss": 0.0, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.2949476558944015, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00019069652649198005, |
|
"loss": 0.0, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.2985889849795175, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001900968867902419, |
|
"loss": 0.0, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.3022303140646336, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00018947952534793661, |
|
"loss": 0.0, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.3058716431497497, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00018884456359788724, |
|
"loss": 0.0, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.30951297223486574, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001881921264348355, |
|
"loss": 0.0, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.3131543013199818, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00018752234219087538, |
|
"loss": 0.0, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.31679563040509784, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00018683534261021057, |
|
"loss": 0.0, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.32043695949021395, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00018613126282324092, |
|
"loss": 0.0, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.32407828857533, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00018541024131998274, |
|
"loss": 0.0, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.32771961766044605, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00018467241992282843, |
|
"loss": 0.0, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.33136094674556216, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00018391794375865024, |
|
"loss": 0.0, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.3350022758306782, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00018314696123025454, |
|
"loss": 0.0, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.33864360491579426, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00018235962398719147, |
|
"loss": 0.0, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.3422849340009103, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00018155608689592604, |
|
"loss": 0.0, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.3459262630860264, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00018073650800937624, |
|
"loss": 0.0, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.34956759217114247, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00017990104853582493, |
|
"loss": 0.0, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.3532089212562585, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00017904987280721035, |
|
"loss": 0.0, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.3568502503413746, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.000178183148246803, |
|
"loss": 0.0, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.3604915794264907, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001773010453362737, |
|
"loss": 0.0, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.36413290851160673, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00017640373758216077, |
|
"loss": 0.0, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.3677742375967228, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001754914014817416, |
|
"loss": 0.0, |
|
"step": 101 |
|
}, |
|
{ |
|
"epoch": 0.3714155666818389, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00017456421648831655, |
|
"loss": 0.0, |
|
"step": 102 |
|
}, |
|
{ |
|
"epoch": 0.37505689576695495, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00017362236497591094, |
|
"loss": 0.0, |
|
"step": 103 |
|
}, |
|
{ |
|
"epoch": 0.378698224852071, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001726660322034027, |
|
"loss": 0.0, |
|
"step": 104 |
|
}, |
|
{ |
|
"epoch": 0.38233955393718705, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00017169540627808274, |
|
"loss": 0.0, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.38598088302230316, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00017071067811865476, |
|
"loss": 0.0, |
|
"step": 106 |
|
}, |
|
{ |
|
"epoch": 0.3896222121074192, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00016971204141768233, |
|
"loss": 0.0, |
|
"step": 107 |
|
}, |
|
{ |
|
"epoch": 0.39326354119253526, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00016869969260349018, |
|
"loss": 0.0, |
|
"step": 108 |
|
}, |
|
{ |
|
"epoch": 0.39690487027765137, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00016767383080152742, |
|
"loss": 0.0, |
|
"step": 109 |
|
}, |
|
{ |
|
"epoch": 0.4005461993627674, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001666346577952004, |
|
"loss": 0.0, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.40418752844788347, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00016558237798618245, |
|
"loss": 0.0, |
|
"step": 111 |
|
}, |
|
{ |
|
"epoch": 0.4078288575329995, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00016451719835420877, |
|
"loss": 0.0, |
|
"step": 112 |
|
}, |
|
{ |
|
"epoch": 0.41147018661811563, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00016343932841636456, |
|
"loss": 0.0, |
|
"step": 113 |
|
}, |
|
{ |
|
"epoch": 0.4151115157032317, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00016234898018587337, |
|
"loss": 0.0, |
|
"step": 114 |
|
}, |
|
{ |
|
"epoch": 0.41875284478834773, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00016124636813039502, |
|
"loss": 0.0, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.42239417387346384, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00016013170912984058, |
|
"loss": 0.0, |
|
"step": 116 |
|
}, |
|
{ |
|
"epoch": 0.4260355029585799, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00015900522243371282, |
|
"loss": 0.0, |
|
"step": 117 |
|
}, |
|
{ |
|
"epoch": 0.42967683204369594, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001578671296179806, |
|
"loss": 0.0, |
|
"step": 118 |
|
}, |
|
{ |
|
"epoch": 0.433318161128812, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00015671765454149559, |
|
"loss": 0.0, |
|
"step": 119 |
|
}, |
|
{ |
|
"epoch": 0.4369594902139281, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00015555702330196023, |
|
"loss": 0.0, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.44060081929904416, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00015438546419145488, |
|
"loss": 0.0, |
|
"step": 121 |
|
}, |
|
{ |
|
"epoch": 0.4442421483841602, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00015320320765153367, |
|
"loss": 0.0, |
|
"step": 122 |
|
}, |
|
{ |
|
"epoch": 0.44788347746927626, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00015201048622789747, |
|
"loss": 0.0, |
|
"step": 123 |
|
}, |
|
{ |
|
"epoch": 0.45152480655439237, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00015080753452465296, |
|
"loss": 0.0, |
|
"step": 124 |
|
}, |
|
{ |
|
"epoch": 0.4551661356395084, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001495945891581668, |
|
"loss": 0.0, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.45880746472462447, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.000148371888710524, |
|
"loss": 0.0, |
|
"step": 126 |
|
}, |
|
{ |
|
"epoch": 0.4624487938097406, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001471396736825998, |
|
"loss": 0.0, |
|
"step": 127 |
|
}, |
|
{ |
|
"epoch": 0.46609012289485663, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00014589818644675378, |
|
"loss": 0.0, |
|
"step": 128 |
|
}, |
|
{ |
|
"epoch": 0.4697314519799727, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00014464767119915629, |
|
"loss": 0.0, |
|
"step": 129 |
|
}, |
|
{ |
|
"epoch": 0.47337278106508873, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00014338837391175582, |
|
"loss": 0.0, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.47701411015020484, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001421205422838971, |
|
"loss": 0.0, |
|
"step": 131 |
|
}, |
|
{ |
|
"epoch": 0.4806554392353209, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00014084442569359964, |
|
"loss": 0.0, |
|
"step": 132 |
|
}, |
|
{ |
|
"epoch": 0.48429676832043694, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001395602751485059, |
|
"loss": 0.0, |
|
"step": 133 |
|
}, |
|
{ |
|
"epoch": 0.48793809740555305, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.000138268343236509, |
|
"loss": 0.0, |
|
"step": 134 |
|
}, |
|
{ |
|
"epoch": 0.4915794264906691, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00013696888407606952, |
|
"loss": 0.0, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.49522075557578515, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001356621532662313, |
|
"loss": 0.0, |
|
"step": 136 |
|
}, |
|
{ |
|
"epoch": 0.4988620846609012, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001343484078363461, |
|
"loss": 0.0, |
|
"step": 137 |
|
}, |
|
{ |
|
"epoch": 0.5025034137460173, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00013302790619551674, |
|
"loss": 0.0, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.5025034137460173, |
|
"eval_loss": NaN, |
|
"eval_runtime": 55.2669, |
|
"eval_samples_per_second": 8.378, |
|
"eval_steps_per_second": 2.099, |
|
"step": 138 |
|
}, |
|
{ |
|
"epoch": 0.5061447428311333, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00013170090808176883, |
|
"loss": 0.0, |
|
"step": 139 |
|
}, |
|
{ |
|
"epoch": 0.5097860719162495, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00013036767451096148, |
|
"loss": 0.0, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.5134274010013655, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00012902846772544624, |
|
"loss": 0.0, |
|
"step": 141 |
|
}, |
|
{ |
|
"epoch": 0.5170687300864816, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00012768355114248494, |
|
"loss": 0.0, |
|
"step": 142 |
|
}, |
|
{ |
|
"epoch": 0.5207100591715976, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00012633318930243648, |
|
"loss": 0.0, |
|
"step": 143 |
|
}, |
|
{ |
|
"epoch": 0.5243513882567137, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001249776478167227, |
|
"loss": 0.0, |
|
"step": 144 |
|
}, |
|
{ |
|
"epoch": 0.5279927173418297, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00012361719331558345, |
|
"loss": 0.0, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.5316340464269458, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00012225209339563145, |
|
"loss": 0.0, |
|
"step": 146 |
|
}, |
|
{ |
|
"epoch": 0.535275375512062, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.000120882616567217, |
|
"loss": 0.0, |
|
"step": 147 |
|
}, |
|
{ |
|
"epoch": 0.538916704597178, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00011950903220161285, |
|
"loss": 0.0, |
|
"step": 148 |
|
}, |
|
{ |
|
"epoch": 0.542558033682294, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00011813161047802985, |
|
"loss": 0.0, |
|
"step": 149 |
|
}, |
|
{ |
|
"epoch": 0.5461993627674101, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00011675062233047364, |
|
"loss": 0.0, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.5498406918525262, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.000115366339394453, |
|
"loss": 0.0, |
|
"step": 151 |
|
}, |
|
{ |
|
"epoch": 0.5534820209376422, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00011397903395354996, |
|
"loss": 0.0, |
|
"step": 152 |
|
}, |
|
{ |
|
"epoch": 0.5571233500227583, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00011258897888586255, |
|
"loss": 0.0, |
|
"step": 153 |
|
}, |
|
{ |
|
"epoch": 0.5607646791078744, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00011119644761033078, |
|
"loss": 0.0, |
|
"step": 154 |
|
}, |
|
{ |
|
"epoch": 0.5644060081929905, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001098017140329561, |
|
"loss": 0.0, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.5680473372781065, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00010840505249292476, |
|
"loss": 0.0, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 0.5716886663632226, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00010700673770864673, |
|
"loss": 0.0, |
|
"step": 157 |
|
}, |
|
{ |
|
"epoch": 0.5753299954483386, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00010560704472371919, |
|
"loss": 0.0, |
|
"step": 158 |
|
}, |
|
{ |
|
"epoch": 0.5789713245334547, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00010420624885282653, |
|
"loss": 0.0, |
|
"step": 159 |
|
}, |
|
{ |
|
"epoch": 0.5826126536185707, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001028046256275869, |
|
"loss": 0.0, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.5862539827036869, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.00010140245074235624, |
|
"loss": 0.0, |
|
"step": 161 |
|
}, |
|
{ |
|
"epoch": 0.589895311788803, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0001, |
|
"loss": 0.0, |
|
"step": 162 |
|
}, |
|
{ |
|
"epoch": 0.593536640873919, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.859754925764378e-05, |
|
"loss": 0.0, |
|
"step": 163 |
|
}, |
|
{ |
|
"epoch": 0.597177969959035, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.719537437241312e-05, |
|
"loss": 0.0, |
|
"step": 164 |
|
}, |
|
{ |
|
"epoch": 0.6008192990441511, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.579375114717351e-05, |
|
"loss": 0.0, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.6044606281292672, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.439295527628081e-05, |
|
"loss": 0.0, |
|
"step": 166 |
|
}, |
|
{ |
|
"epoch": 0.6081019572143832, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.299326229135326e-05, |
|
"loss": 0.0, |
|
"step": 167 |
|
}, |
|
{ |
|
"epoch": 0.6117432862994994, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.159494750707526e-05, |
|
"loss": 0.0, |
|
"step": 168 |
|
}, |
|
{ |
|
"epoch": 0.6153846153846154, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.019828596704394e-05, |
|
"loss": 0.0, |
|
"step": 169 |
|
}, |
|
{ |
|
"epoch": 0.6190259444697315, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.880355238966923e-05, |
|
"loss": 0.0, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.6226672735548475, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.741102111413748e-05, |
|
"loss": 0.0, |
|
"step": 171 |
|
}, |
|
{ |
|
"epoch": 0.6263086026399636, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.602096604645009e-05, |
|
"loss": 0.0, |
|
"step": 172 |
|
}, |
|
{ |
|
"epoch": 0.6299499317250796, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.463366060554698e-05, |
|
"loss": 0.0, |
|
"step": 173 |
|
}, |
|
{ |
|
"epoch": 0.6335912608101957, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.324937766952638e-05, |
|
"loss": 0.0, |
|
"step": 174 |
|
}, |
|
{ |
|
"epoch": 0.6372325898953118, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.186838952197018e-05, |
|
"loss": 0.0, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.6408739189804279, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.049096779838719e-05, |
|
"loss": 0.0, |
|
"step": 176 |
|
}, |
|
{ |
|
"epoch": 0.6445152480655439, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.911738343278304e-05, |
|
"loss": 0.0, |
|
"step": 177 |
|
}, |
|
{ |
|
"epoch": 0.64815657715066, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.774790660436858e-05, |
|
"loss": 0.0, |
|
"step": 178 |
|
}, |
|
{ |
|
"epoch": 0.651797906235776, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.63828066844166e-05, |
|
"loss": 0.0, |
|
"step": 179 |
|
}, |
|
{ |
|
"epoch": 0.6554392353208921, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.502235218327731e-05, |
|
"loss": 0.0, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.6590805644060082, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.366681069756352e-05, |
|
"loss": 0.0, |
|
"step": 181 |
|
}, |
|
{ |
|
"epoch": 0.6627218934911243, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.231644885751507e-05, |
|
"loss": 0.0, |
|
"step": 182 |
|
}, |
|
{ |
|
"epoch": 0.6663632225762404, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.097153227455379e-05, |
|
"loss": 0.0, |
|
"step": 183 |
|
}, |
|
{ |
|
"epoch": 0.6700045516613564, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.963232548903853e-05, |
|
"loss": 0.0, |
|
"step": 184 |
|
}, |
|
{ |
|
"epoch": 0.6736458807464725, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.829909191823121e-05, |
|
"loss": 0.0, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.6772872098315885, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.697209380448333e-05, |
|
"loss": 0.0, |
|
"step": 186 |
|
}, |
|
{ |
|
"epoch": 0.6809285389167046, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.565159216365389e-05, |
|
"loss": 0.0, |
|
"step": 187 |
|
}, |
|
{ |
|
"epoch": 0.6845698680018206, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.43378467337687e-05, |
|
"loss": 0.0, |
|
"step": 188 |
|
}, |
|
{ |
|
"epoch": 0.6882111970869367, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.30311159239305e-05, |
|
"loss": 0.0, |
|
"step": 189 |
|
}, |
|
{ |
|
"epoch": 0.6918525261720528, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.173165676349103e-05, |
|
"loss": 0.0, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.6954938552571689, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.043972485149414e-05, |
|
"loss": 0.0, |
|
"step": 191 |
|
}, |
|
{ |
|
"epoch": 0.6991351843422849, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.9155574306400395e-05, |
|
"loss": 0.0, |
|
"step": 192 |
|
}, |
|
{ |
|
"epoch": 0.702776513427401, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.787945771610296e-05, |
|
"loss": 0.0, |
|
"step": 193 |
|
}, |
|
{ |
|
"epoch": 0.706417842512517, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.6611626088244194e-05, |
|
"loss": 0.0, |
|
"step": 194 |
|
}, |
|
{ |
|
"epoch": 0.7100591715976331, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.5352328800843724e-05, |
|
"loss": 0.0, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.7137005006827492, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.410181355324622e-05, |
|
"loss": 0.0, |
|
"step": 196 |
|
}, |
|
{ |
|
"epoch": 0.7173418297678653, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.286032631740023e-05, |
|
"loss": 0.0, |
|
"step": 197 |
|
}, |
|
{ |
|
"epoch": 0.7209831588529814, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.162811128947602e-05, |
|
"loss": 0.0, |
|
"step": 198 |
|
}, |
|
{ |
|
"epoch": 0.7246244879380974, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.0405410841833253e-05, |
|
"loss": 0.0, |
|
"step": 199 |
|
}, |
|
{ |
|
"epoch": 0.7282658170232135, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.919246547534708e-05, |
|
"loss": 0.0, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.7319071461083295, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.7989513772102537e-05, |
|
"loss": 0.0, |
|
"step": 201 |
|
}, |
|
{ |
|
"epoch": 0.7355484751934456, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.6796792348466356e-05, |
|
"loss": 0.0, |
|
"step": 202 |
|
}, |
|
{ |
|
"epoch": 0.7391898042785616, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.561453580854516e-05, |
|
"loss": 0.0, |
|
"step": 203 |
|
}, |
|
{ |
|
"epoch": 0.7428311333636778, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.444297669803981e-05, |
|
"loss": 0.0, |
|
"step": 204 |
|
}, |
|
{ |
|
"epoch": 0.7464724624487938, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.328234545850442e-05, |
|
"loss": 0.0, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.7501137915339099, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.213287038201943e-05, |
|
"loss": 0.0, |
|
"step": 206 |
|
}, |
|
{ |
|
"epoch": 0.7537551206190259, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.0994777566287204e-05, |
|
"loss": 0.0, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 0.7537551206190259, |
|
"eval_loss": NaN, |
|
"eval_runtime": 55.3157, |
|
"eval_samples_per_second": 8.37, |
|
"eval_steps_per_second": 2.097, |
|
"step": 207 |
|
}, |
|
{ |
|
"epoch": 0.757396449704142, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.9868290870159405e-05, |
|
"loss": 0.0, |
|
"step": 208 |
|
}, |
|
{ |
|
"epoch": 0.761037778789258, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.875363186960499e-05, |
|
"loss": 0.0, |
|
"step": 209 |
|
}, |
|
{ |
|
"epoch": 0.7646791078743741, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.7651019814126654e-05, |
|
"loss": 0.0, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.7683204369594903, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.6560671583635467e-05, |
|
"loss": 0.0, |
|
"step": 211 |
|
}, |
|
{ |
|
"epoch": 0.7719617660446063, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.548280164579126e-05, |
|
"loss": 0.0, |
|
"step": 212 |
|
}, |
|
{ |
|
"epoch": 0.7756030951297224, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.4417622013817595e-05, |
|
"loss": 0.0, |
|
"step": 213 |
|
}, |
|
{ |
|
"epoch": 0.7792444242148384, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.336534220479961e-05, |
|
"loss": 0.0, |
|
"step": 214 |
|
}, |
|
{ |
|
"epoch": 0.7828857532999545, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.2326169198472556e-05, |
|
"loss": 0.0, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.7865270823850705, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.130030739650983e-05, |
|
"loss": 0.0, |
|
"step": 216 |
|
}, |
|
{ |
|
"epoch": 0.7901684114701866, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.0287958582317676e-05, |
|
"loss": 0.0, |
|
"step": 217 |
|
}, |
|
{ |
|
"epoch": 0.7938097405553027, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.9289321881345254e-05, |
|
"loss": 0.0, |
|
"step": 218 |
|
}, |
|
{ |
|
"epoch": 0.7974510696404188, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.8304593721917285e-05, |
|
"loss": 0.0, |
|
"step": 219 |
|
}, |
|
{ |
|
"epoch": 0.8010923987255348, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.7333967796597315e-05, |
|
"loss": 0.0, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.8047337278106509, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.6377635024089087e-05, |
|
"loss": 0.0, |
|
"step": 221 |
|
}, |
|
{ |
|
"epoch": 0.8083750568957669, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.5435783511683443e-05, |
|
"loss": 0.0, |
|
"step": 222 |
|
}, |
|
{ |
|
"epoch": 0.812016385980883, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.450859851825842e-05, |
|
"loss": 0.0, |
|
"step": 223 |
|
}, |
|
{ |
|
"epoch": 0.815657715065999, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.3596262417839255e-05, |
|
"loss": 0.0, |
|
"step": 224 |
|
}, |
|
{ |
|
"epoch": 0.8192990441511152, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.26989546637263e-05, |
|
"loss": 0.0, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.8229403732362313, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.181685175319702e-05, |
|
"loss": 0.0, |
|
"step": 226 |
|
}, |
|
{ |
|
"epoch": 0.8265817023213473, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.095012719278966e-05, |
|
"loss": 0.0, |
|
"step": 227 |
|
}, |
|
{ |
|
"epoch": 0.8302230314064634, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.009895146417512e-05, |
|
"loss": 0.0, |
|
"step": 228 |
|
}, |
|
{ |
|
"epoch": 0.8338643604915794, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.926349199062376e-05, |
|
"loss": 0.0, |
|
"step": 229 |
|
}, |
|
{ |
|
"epoch": 0.8375056895766955, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.8443913104073983e-05, |
|
"loss": 0.0, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.8411470186618115, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.7640376012808536e-05, |
|
"loss": 0.0, |
|
"step": 231 |
|
}, |
|
{ |
|
"epoch": 0.8447883477469277, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.6853038769745467e-05, |
|
"loss": 0.0, |
|
"step": 232 |
|
}, |
|
{ |
|
"epoch": 0.8484296768320437, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.6082056241349786e-05, |
|
"loss": 0.0, |
|
"step": 233 |
|
}, |
|
{ |
|
"epoch": 0.8520710059171598, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.5327580077171587e-05, |
|
"loss": 0.0, |
|
"step": 234 |
|
}, |
|
{ |
|
"epoch": 0.8557123350022758, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.4589758680017263e-05, |
|
"loss": 0.0, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.8593536640873919, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.3868737176759106e-05, |
|
"loss": 0.0, |
|
"step": 236 |
|
}, |
|
{ |
|
"epoch": 0.8629949931725079, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.3164657389789458e-05, |
|
"loss": 0.0, |
|
"step": 237 |
|
}, |
|
{ |
|
"epoch": 0.866636322257624, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.2477657809124631e-05, |
|
"loss": 0.0, |
|
"step": 238 |
|
}, |
|
{ |
|
"epoch": 0.8702776513427402, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.1807873565164506e-05, |
|
"loss": 0.0, |
|
"step": 239 |
|
}, |
|
{ |
|
"epoch": 0.8739189804278562, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.1155436402112785e-05, |
|
"loss": 0.0, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.8775603095129723, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.0520474652063394e-05, |
|
"loss": 0.0, |
|
"step": 241 |
|
}, |
|
{ |
|
"epoch": 0.8812016385980883, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.903113209758096e-06, |
|
"loss": 0.0, |
|
"step": 242 |
|
}, |
|
{ |
|
"epoch": 0.8848429676832044, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.303473508019944e-06, |
|
"loss": 0.0, |
|
"step": 243 |
|
}, |
|
{ |
|
"epoch": 0.8884842967683204, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.72167349386811e-06, |
|
"loss": 0.0, |
|
"step": 244 |
|
}, |
|
{ |
|
"epoch": 0.8921256258534365, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.157827605317892e-06, |
|
"loss": 0.0, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.8957669549385525, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.612046748871327e-06, |
|
"loss": 0.0, |
|
"step": 246 |
|
}, |
|
{ |
|
"epoch": 0.8994082840236687, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.084438277702188e-06, |
|
"loss": 0.0, |
|
"step": 247 |
|
}, |
|
{ |
|
"epoch": 0.9030496131087847, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.5751059705400295e-06, |
|
"loss": 0.0, |
|
"step": 248 |
|
}, |
|
{ |
|
"epoch": 0.9066909421939008, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.084150011257239e-06, |
|
"loss": 0.0, |
|
"step": 249 |
|
}, |
|
{ |
|
"epoch": 0.9103322712790168, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.611666969163243e-06, |
|
"loss": 0.0, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.9139736003641329, |
|
"grad_norm": NaN, |
|
"learning_rate": 5.157749780009735e-06, |
|
"loss": 0.0, |
|
"step": 251 |
|
}, |
|
{ |
|
"epoch": 0.9176149294492489, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.722487727710368e-06, |
|
"loss": 0.0, |
|
"step": 252 |
|
}, |
|
{ |
|
"epoch": 0.921256258534365, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.305966426779118e-06, |
|
"loss": 0.0, |
|
"step": 253 |
|
}, |
|
{ |
|
"epoch": 0.9248975876194812, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.908267805490051e-06, |
|
"loss": 0.0, |
|
"step": 254 |
|
}, |
|
{ |
|
"epoch": 0.9285389167045972, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.529470089762421e-06, |
|
"loss": 0.0, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.9321802457897133, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.169647787773866e-06, |
|
"loss": 0.0, |
|
"step": 256 |
|
}, |
|
{ |
|
"epoch": 0.9358215748748293, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.8288716753049005e-06, |
|
"loss": 0.0, |
|
"step": 257 |
|
}, |
|
{ |
|
"epoch": 0.9394629039599454, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.5072087818176382e-06, |
|
"loss": 0.0, |
|
"step": 258 |
|
}, |
|
{ |
|
"epoch": 0.9431042330450614, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.20472237727124e-06, |
|
"loss": 0.0, |
|
"step": 259 |
|
}, |
|
{ |
|
"epoch": 0.9467455621301775, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.921471959676957e-06, |
|
"loss": 0.0, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.9503868912152936, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.657513243395159e-06, |
|
"loss": 0.0, |
|
"step": 261 |
|
}, |
|
{ |
|
"epoch": 0.9540282203004097, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.4128981481764115e-06, |
|
"loss": 0.0, |
|
"step": 262 |
|
}, |
|
{ |
|
"epoch": 0.9576695493855257, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.1876747889491223e-06, |
|
"loss": 0.0, |
|
"step": 263 |
|
}, |
|
{ |
|
"epoch": 0.9613108784706418, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.818874663554357e-07, |
|
"loss": 0.0, |
|
"step": 264 |
|
}, |
|
{ |
|
"epoch": 0.9649522075557578, |
|
"grad_norm": NaN, |
|
"learning_rate": 7.955766580375335e-07, |
|
"loss": 0.0, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.9685935366408739, |
|
"grad_norm": NaN, |
|
"learning_rate": 6.287790106757396e-07, |
|
"loss": 0.0, |
|
"step": 266 |
|
}, |
|
{ |
|
"epoch": 0.9722348657259899, |
|
"grad_norm": NaN, |
|
"learning_rate": 4.815273327803182e-07, |
|
"loss": 0.0, |
|
"step": 267 |
|
}, |
|
{ |
|
"epoch": 0.9758761948111061, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.5385058823809156e-07, |
|
"loss": 0.0, |
|
"step": 268 |
|
}, |
|
{ |
|
"epoch": 0.9795175238962222, |
|
"grad_norm": NaN, |
|
"learning_rate": 2.457738906153972e-07, |
|
"loss": 0.0, |
|
"step": 269 |
|
}, |
|
{ |
|
"epoch": 0.9831588529813382, |
|
"grad_norm": NaN, |
|
"learning_rate": 1.5731849821833954e-07, |
|
"loss": 0.0, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.9868001820664543, |
|
"grad_norm": NaN, |
|
"learning_rate": 8.850180991131219e-08, |
|
"loss": 0.0, |
|
"step": 271 |
|
}, |
|
{ |
|
"epoch": 0.9904415111515703, |
|
"grad_norm": NaN, |
|
"learning_rate": 3.933736169471347e-08, |
|
"loss": 0.0, |
|
"step": 272 |
|
}, |
|
{ |
|
"epoch": 0.9940828402366864, |
|
"grad_norm": NaN, |
|
"learning_rate": 9.834824042498358e-09, |
|
"loss": 0.0, |
|
"step": 273 |
|
}, |
|
{ |
|
"epoch": 0.9977241693218024, |
|
"grad_norm": NaN, |
|
"learning_rate": 0.0, |
|
"loss": 0.0, |
|
"step": 274 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 274, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 69, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.706308131705324e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|