|
{ |
|
"best_metric": 0.47318556904792786, |
|
"best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-1400", |
|
"epoch": 0.7210919392222508, |
|
"eval_steps": 50, |
|
"global_step": 1400, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0025753283543651817, |
|
"grad_norm": 21.379672872549193, |
|
"learning_rate": 2.564102564102564e-06, |
|
"loss": 3.0388, |
|
"num_input_tokens_seen": 58496, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0051506567087303634, |
|
"grad_norm": 20.76117223991023, |
|
"learning_rate": 5.128205128205128e-06, |
|
"loss": 2.9831, |
|
"num_input_tokens_seen": 116960, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.007725985063095545, |
|
"grad_norm": 22.5213517141881, |
|
"learning_rate": 7.692307692307694e-06, |
|
"loss": 2.8696, |
|
"num_input_tokens_seen": 175448, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.010301313417460727, |
|
"grad_norm": 20.673071198727328, |
|
"learning_rate": 1.0256410256410256e-05, |
|
"loss": 2.6316, |
|
"num_input_tokens_seen": 233944, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.012876641771825908, |
|
"grad_norm": 18.902291974538457, |
|
"learning_rate": 1.282051282051282e-05, |
|
"loss": 1.9707, |
|
"num_input_tokens_seen": 292416, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.01545197012619109, |
|
"grad_norm": 8.05718270484028, |
|
"learning_rate": 1.5384615384615387e-05, |
|
"loss": 1.3782, |
|
"num_input_tokens_seen": 350904, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.018027298480556272, |
|
"grad_norm": 3.6465275188422344, |
|
"learning_rate": 1.794871794871795e-05, |
|
"loss": 1.0628, |
|
"num_input_tokens_seen": 409384, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.020602626834921454, |
|
"grad_norm": 4.842154180410959, |
|
"learning_rate": 2.0512820512820512e-05, |
|
"loss": 0.9789, |
|
"num_input_tokens_seen": 467864, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.023177955189286635, |
|
"grad_norm": 2.6799567517341396, |
|
"learning_rate": 2.307692307692308e-05, |
|
"loss": 0.9327, |
|
"num_input_tokens_seen": 526384, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.025753283543651816, |
|
"grad_norm": 2.629272923472648, |
|
"learning_rate": 2.564102564102564e-05, |
|
"loss": 0.9233, |
|
"num_input_tokens_seen": 584856, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.025753283543651816, |
|
"eval_loss": 0.9281821846961975, |
|
"eval_runtime": 48.2484, |
|
"eval_samples_per_second": 1.244, |
|
"eval_steps_per_second": 0.311, |
|
"num_input_tokens_seen": 584856, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.028328611898016998, |
|
"grad_norm": 1.2858813899048422, |
|
"learning_rate": 2.8205128205128207e-05, |
|
"loss": 0.897, |
|
"num_input_tokens_seen": 643344, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.03090394025238218, |
|
"grad_norm": 1.177678811476692, |
|
"learning_rate": 3.0769230769230774e-05, |
|
"loss": 0.9169, |
|
"num_input_tokens_seen": 701808, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.03347926860674736, |
|
"grad_norm": 1.2077065633120996, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.9019, |
|
"num_input_tokens_seen": 760304, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.036054596961112545, |
|
"grad_norm": 1.1560644429967823, |
|
"learning_rate": 3.58974358974359e-05, |
|
"loss": 0.8996, |
|
"num_input_tokens_seen": 818760, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.03862992531547772, |
|
"grad_norm": 0.732907212566054, |
|
"learning_rate": 3.846153846153846e-05, |
|
"loss": 0.9073, |
|
"num_input_tokens_seen": 877256, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.04120525366984291, |
|
"grad_norm": 0.9616993870089134, |
|
"learning_rate": 4.1025641025641023e-05, |
|
"loss": 0.9081, |
|
"num_input_tokens_seen": 935752, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.043780582024208085, |
|
"grad_norm": 0.8384067209941525, |
|
"learning_rate": 4.358974358974359e-05, |
|
"loss": 0.906, |
|
"num_input_tokens_seen": 994216, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.04635591037857327, |
|
"grad_norm": 0.4045876972188175, |
|
"learning_rate": 4.615384615384616e-05, |
|
"loss": 0.8952, |
|
"num_input_tokens_seen": 1052704, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.04893123873293845, |
|
"grad_norm": 0.6062678622593307, |
|
"learning_rate": 4.871794871794872e-05, |
|
"loss": 0.8996, |
|
"num_input_tokens_seen": 1111176, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.05150656708730363, |
|
"grad_norm": 0.5316642041721752, |
|
"learning_rate": 5.128205128205128e-05, |
|
"loss": 0.9024, |
|
"num_input_tokens_seen": 1169664, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.05150656708730363, |
|
"eval_loss": 0.911374032497406, |
|
"eval_runtime": 19.566, |
|
"eval_samples_per_second": 3.067, |
|
"eval_steps_per_second": 0.767, |
|
"num_input_tokens_seen": 1169664, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.05408189544166881, |
|
"grad_norm": 0.47189419512253006, |
|
"learning_rate": 5.384615384615385e-05, |
|
"loss": 0.9142, |
|
"num_input_tokens_seen": 1228112, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.056657223796033995, |
|
"grad_norm": 0.4885000351277984, |
|
"learning_rate": 5.6410256410256414e-05, |
|
"loss": 0.9054, |
|
"num_input_tokens_seen": 1286608, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.05923255215039917, |
|
"grad_norm": 1.0232694160031948, |
|
"learning_rate": 5.897435897435898e-05, |
|
"loss": 0.8997, |
|
"num_input_tokens_seen": 1345072, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.06180788050476436, |
|
"grad_norm": 0.6656697152989639, |
|
"learning_rate": 6.153846153846155e-05, |
|
"loss": 0.8988, |
|
"num_input_tokens_seen": 1403544, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.06438320885912954, |
|
"grad_norm": 0.6273175951192728, |
|
"learning_rate": 6.410256410256412e-05, |
|
"loss": 0.9087, |
|
"num_input_tokens_seen": 1462024, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.06695853721349472, |
|
"grad_norm": 0.707089894516894, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 0.8961, |
|
"num_input_tokens_seen": 1520528, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.0695338655678599, |
|
"grad_norm": 0.4633668497982238, |
|
"learning_rate": 6.923076923076924e-05, |
|
"loss": 0.903, |
|
"num_input_tokens_seen": 1579024, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.07210919392222509, |
|
"grad_norm": 0.5052802522069755, |
|
"learning_rate": 7.17948717948718e-05, |
|
"loss": 0.899, |
|
"num_input_tokens_seen": 1637504, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.07468452227659027, |
|
"grad_norm": 0.7577940010204668, |
|
"learning_rate": 7.435897435897436e-05, |
|
"loss": 0.9071, |
|
"num_input_tokens_seen": 1696024, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.07725985063095545, |
|
"grad_norm": 0.5812587904219971, |
|
"learning_rate": 7.692307692307693e-05, |
|
"loss": 0.9045, |
|
"num_input_tokens_seen": 1754512, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.07725985063095545, |
|
"eval_loss": 0.8934853076934814, |
|
"eval_runtime": 19.8765, |
|
"eval_samples_per_second": 3.019, |
|
"eval_steps_per_second": 0.755, |
|
"num_input_tokens_seen": 1754512, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.07983517898532062, |
|
"grad_norm": 0.5167982536583405, |
|
"learning_rate": 7.948717948717948e-05, |
|
"loss": 0.8992, |
|
"num_input_tokens_seen": 1812976, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.08241050733968582, |
|
"grad_norm": 0.4971816797735092, |
|
"learning_rate": 8.205128205128205e-05, |
|
"loss": 0.8965, |
|
"num_input_tokens_seen": 1871464, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.08498583569405099, |
|
"grad_norm": 0.6561749633642688, |
|
"learning_rate": 8.461538461538461e-05, |
|
"loss": 0.9094, |
|
"num_input_tokens_seen": 1929928, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.08756116404841617, |
|
"grad_norm": 0.5010857314708574, |
|
"learning_rate": 8.717948717948718e-05, |
|
"loss": 0.903, |
|
"num_input_tokens_seen": 1988432, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.09013649240278135, |
|
"grad_norm": 0.48794512034251364, |
|
"learning_rate": 8.974358974358975e-05, |
|
"loss": 0.902, |
|
"num_input_tokens_seen": 2046920, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.09271182075714654, |
|
"grad_norm": 0.4040014684262414, |
|
"learning_rate": 9.230769230769232e-05, |
|
"loss": 0.9006, |
|
"num_input_tokens_seen": 2105392, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.09528714911151172, |
|
"grad_norm": 0.5312840597942438, |
|
"learning_rate": 9.487179487179487e-05, |
|
"loss": 0.9042, |
|
"num_input_tokens_seen": 2163872, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.0978624774658769, |
|
"grad_norm": 0.3535119366494406, |
|
"learning_rate": 9.743589743589744e-05, |
|
"loss": 0.9096, |
|
"num_input_tokens_seen": 2222352, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.10043780582024209, |
|
"grad_norm": 0.30590378285024006, |
|
"learning_rate": 0.0001, |
|
"loss": 0.9037, |
|
"num_input_tokens_seen": 2280800, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.10301313417460727, |
|
"grad_norm": 0.3055264226667786, |
|
"learning_rate": 9.999954623308172e-05, |
|
"loss": 0.904, |
|
"num_input_tokens_seen": 2339304, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.10301313417460727, |
|
"eval_loss": 0.8980139493942261, |
|
"eval_runtime": 19.316, |
|
"eval_samples_per_second": 3.106, |
|
"eval_steps_per_second": 0.777, |
|
"num_input_tokens_seen": 2339304, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.10558846252897244, |
|
"grad_norm": 0.8828178200664915, |
|
"learning_rate": 9.999818494056303e-05, |
|
"loss": 0.9029, |
|
"num_input_tokens_seen": 2397808, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.10816379088333762, |
|
"grad_norm": 0.4308314655260644, |
|
"learning_rate": 9.99959161471523e-05, |
|
"loss": 0.9005, |
|
"num_input_tokens_seen": 2456288, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.11073911923770281, |
|
"grad_norm": 0.4482188659643584, |
|
"learning_rate": 9.99927398940297e-05, |
|
"loss": 0.9096, |
|
"num_input_tokens_seen": 2514784, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.11331444759206799, |
|
"grad_norm": 0.49014741417238206, |
|
"learning_rate": 9.998865623884635e-05, |
|
"loss": 0.9036, |
|
"num_input_tokens_seen": 2573240, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.11588977594643317, |
|
"grad_norm": 0.2774850522391394, |
|
"learning_rate": 9.998366525572336e-05, |
|
"loss": 0.901, |
|
"num_input_tokens_seen": 2631672, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.11846510430079835, |
|
"grad_norm": 0.49390873315018263, |
|
"learning_rate": 9.997776703525046e-05, |
|
"loss": 0.9018, |
|
"num_input_tokens_seen": 2690112, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.12104043265516354, |
|
"grad_norm": 0.3284306399258997, |
|
"learning_rate": 9.997096168448432e-05, |
|
"loss": 0.8934, |
|
"num_input_tokens_seen": 2748608, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.12361576100952872, |
|
"grad_norm": 0.7182680023403506, |
|
"learning_rate": 9.996324932694668e-05, |
|
"loss": 0.8876, |
|
"num_input_tokens_seen": 2807080, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.1261910893638939, |
|
"grad_norm": 0.7305499346526235, |
|
"learning_rate": 9.995463010262206e-05, |
|
"loss": 0.9084, |
|
"num_input_tokens_seen": 2865520, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.12876641771825909, |
|
"grad_norm": 0.5773211522908436, |
|
"learning_rate": 9.994510416795519e-05, |
|
"loss": 0.9106, |
|
"num_input_tokens_seen": 2924016, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.12876641771825909, |
|
"eval_loss": 0.8958488702774048, |
|
"eval_runtime": 19.507, |
|
"eval_samples_per_second": 3.076, |
|
"eval_steps_per_second": 0.769, |
|
"num_input_tokens_seen": 2924016, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.13134174607262425, |
|
"grad_norm": 0.44306061088962184, |
|
"learning_rate": 9.993467169584824e-05, |
|
"loss": 0.9012, |
|
"num_input_tokens_seen": 2982520, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.13391707442698944, |
|
"grad_norm": 0.7851687259125024, |
|
"learning_rate": 9.992333287565765e-05, |
|
"loss": 0.9069, |
|
"num_input_tokens_seen": 3041008, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.13649240278135463, |
|
"grad_norm": 0.5705235716557865, |
|
"learning_rate": 9.991108791319066e-05, |
|
"loss": 0.8918, |
|
"num_input_tokens_seen": 3099464, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.1390677311357198, |
|
"grad_norm": 0.6202972137914602, |
|
"learning_rate": 9.989793703070163e-05, |
|
"loss": 0.8996, |
|
"num_input_tokens_seen": 3157944, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.141643059490085, |
|
"grad_norm": 0.7583768377175583, |
|
"learning_rate": 9.988388046688799e-05, |
|
"loss": 0.9009, |
|
"num_input_tokens_seen": 3216448, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.14421838784445018, |
|
"grad_norm": 0.7180540444266581, |
|
"learning_rate": 9.986891847688587e-05, |
|
"loss": 0.9059, |
|
"num_input_tokens_seen": 3274928, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.14679371619881534, |
|
"grad_norm": 0.4173225854654158, |
|
"learning_rate": 9.985305133226553e-05, |
|
"loss": 0.8939, |
|
"num_input_tokens_seen": 3333408, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.14936904455318054, |
|
"grad_norm": 0.7825855108807762, |
|
"learning_rate": 9.983627932102638e-05, |
|
"loss": 0.8899, |
|
"num_input_tokens_seen": 3391896, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.1519443729075457, |
|
"grad_norm": 0.4850249272160501, |
|
"learning_rate": 9.981860274759173e-05, |
|
"loss": 0.9092, |
|
"num_input_tokens_seen": 3450392, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.1545197012619109, |
|
"grad_norm": 0.3325682106309916, |
|
"learning_rate": 9.980002193280342e-05, |
|
"loss": 0.8901, |
|
"num_input_tokens_seen": 3508888, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.1545197012619109, |
|
"eval_loss": 0.8932263255119324, |
|
"eval_runtime": 19.7633, |
|
"eval_samples_per_second": 3.036, |
|
"eval_steps_per_second": 0.759, |
|
"num_input_tokens_seen": 3508888, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.15709502961627608, |
|
"grad_norm": 0.36562722920113416, |
|
"learning_rate": 9.978053721391578e-05, |
|
"loss": 0.9042, |
|
"num_input_tokens_seen": 3567368, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.15967035797064125, |
|
"grad_norm": 0.3765491511973325, |
|
"learning_rate": 9.976014894458963e-05, |
|
"loss": 0.9007, |
|
"num_input_tokens_seen": 3625848, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.16224568632500644, |
|
"grad_norm": 0.5264420727347517, |
|
"learning_rate": 9.973885749488589e-05, |
|
"loss": 0.9036, |
|
"num_input_tokens_seen": 3684336, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.16482101467937163, |
|
"grad_norm": 0.24680747784235688, |
|
"learning_rate": 9.971666325125874e-05, |
|
"loss": 0.8936, |
|
"num_input_tokens_seen": 3742800, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.1673963430337368, |
|
"grad_norm": 0.4982571051665039, |
|
"learning_rate": 9.969356661654876e-05, |
|
"loss": 0.8989, |
|
"num_input_tokens_seen": 3801280, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.16997167138810199, |
|
"grad_norm": 0.49943012602572584, |
|
"learning_rate": 9.966956800997546e-05, |
|
"loss": 0.8983, |
|
"num_input_tokens_seen": 3859792, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.17254699974246718, |
|
"grad_norm": 0.37381050353079964, |
|
"learning_rate": 9.964466786712984e-05, |
|
"loss": 0.9038, |
|
"num_input_tokens_seen": 3918272, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.17512232809683234, |
|
"grad_norm": 0.7501484170811903, |
|
"learning_rate": 9.961886663996629e-05, |
|
"loss": 0.8947, |
|
"num_input_tokens_seen": 3976760, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.17769765645119753, |
|
"grad_norm": 0.5623847203835772, |
|
"learning_rate": 9.959216479679458e-05, |
|
"loss": 0.9179, |
|
"num_input_tokens_seen": 4035240, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.1802729848055627, |
|
"grad_norm": 0.34381878607605765, |
|
"learning_rate": 9.956456282227122e-05, |
|
"loss": 0.9059, |
|
"num_input_tokens_seen": 4093688, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.1802729848055627, |
|
"eval_loss": 0.8960411548614502, |
|
"eval_runtime": 20.0734, |
|
"eval_samples_per_second": 2.989, |
|
"eval_steps_per_second": 0.747, |
|
"num_input_tokens_seen": 4093688, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.1828483131599279, |
|
"grad_norm": 0.5786050277605497, |
|
"learning_rate": 9.953606121739074e-05, |
|
"loss": 0.8795, |
|
"num_input_tokens_seen": 4152160, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.18542364151429308, |
|
"grad_norm": 1.3309062985901938, |
|
"learning_rate": 9.950666049947653e-05, |
|
"loss": 0.9143, |
|
"num_input_tokens_seen": 4210648, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.18799896986865824, |
|
"grad_norm": 0.5010297124723248, |
|
"learning_rate": 9.947636120217155e-05, |
|
"loss": 0.9164, |
|
"num_input_tokens_seen": 4269136, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.19057429822302344, |
|
"grad_norm": 0.7250841632803818, |
|
"learning_rate": 9.944516387542852e-05, |
|
"loss": 0.9061, |
|
"num_input_tokens_seen": 4327664, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.19314962657738863, |
|
"grad_norm": 0.4506280653909736, |
|
"learning_rate": 9.941306908550005e-05, |
|
"loss": 0.8873, |
|
"num_input_tokens_seen": 4386120, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.1957249549317538, |
|
"grad_norm": 0.6467175538087946, |
|
"learning_rate": 9.938007741492828e-05, |
|
"loss": 0.9038, |
|
"num_input_tokens_seen": 4444560, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.19830028328611898, |
|
"grad_norm": 0.6742654767461002, |
|
"learning_rate": 9.934618946253437e-05, |
|
"loss": 0.9116, |
|
"num_input_tokens_seen": 4503016, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.20087561164048418, |
|
"grad_norm": 0.418516338281364, |
|
"learning_rate": 9.931140584340761e-05, |
|
"loss": 0.9023, |
|
"num_input_tokens_seen": 4561496, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.20345093999484934, |
|
"grad_norm": 0.5738919793445436, |
|
"learning_rate": 9.92757271888942e-05, |
|
"loss": 0.8901, |
|
"num_input_tokens_seen": 4619944, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.20602626834921453, |
|
"grad_norm": 0.3600152815973316, |
|
"learning_rate": 9.923915414658587e-05, |
|
"loss": 0.9033, |
|
"num_input_tokens_seen": 4678384, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.20602626834921453, |
|
"eval_loss": 0.906301736831665, |
|
"eval_runtime": 19.8079, |
|
"eval_samples_per_second": 3.029, |
|
"eval_steps_per_second": 0.757, |
|
"num_input_tokens_seen": 4678384, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.2086015967035797, |
|
"grad_norm": 0.4854922483196253, |
|
"learning_rate": 9.920168738030807e-05, |
|
"loss": 0.8951, |
|
"num_input_tokens_seen": 4736904, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.2111769250579449, |
|
"grad_norm": 0.4537032287993717, |
|
"learning_rate": 9.916332757010799e-05, |
|
"loss": 0.9131, |
|
"num_input_tokens_seen": 4795376, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.21375225341231008, |
|
"grad_norm": 0.46655140937482126, |
|
"learning_rate": 9.912407541224213e-05, |
|
"loss": 0.8923, |
|
"num_input_tokens_seen": 4853880, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.21632758176667524, |
|
"grad_norm": 0.3608301258843965, |
|
"learning_rate": 9.908393161916374e-05, |
|
"loss": 0.9026, |
|
"num_input_tokens_seen": 4912360, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.21890291012104043, |
|
"grad_norm": 0.5055648531803498, |
|
"learning_rate": 9.904289691950979e-05, |
|
"loss": 0.905, |
|
"num_input_tokens_seen": 4970872, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.22147823847540563, |
|
"grad_norm": 0.3171606869940592, |
|
"learning_rate": 9.900097205808789e-05, |
|
"loss": 0.8941, |
|
"num_input_tokens_seen": 5029304, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.2240535668297708, |
|
"grad_norm": 0.5798722428230844, |
|
"learning_rate": 9.895815779586262e-05, |
|
"loss": 0.9031, |
|
"num_input_tokens_seen": 5087800, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.22662889518413598, |
|
"grad_norm": 0.5751083474979835, |
|
"learning_rate": 9.891445490994182e-05, |
|
"loss": 0.8975, |
|
"num_input_tokens_seen": 5146312, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.22920422353850115, |
|
"grad_norm": 1.2316619525123293, |
|
"learning_rate": 9.886986419356246e-05, |
|
"loss": 0.901, |
|
"num_input_tokens_seen": 5204800, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.23177955189286634, |
|
"grad_norm": 0.5953254293558816, |
|
"learning_rate": 9.88243864560762e-05, |
|
"loss": 0.9062, |
|
"num_input_tokens_seen": 5263304, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.23177955189286634, |
|
"eval_loss": 0.9007609486579895, |
|
"eval_runtime": 20.8827, |
|
"eval_samples_per_second": 2.873, |
|
"eval_steps_per_second": 0.718, |
|
"num_input_tokens_seen": 5263304, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.23435488024723153, |
|
"grad_norm": 0.7549819464827967, |
|
"learning_rate": 9.877802252293474e-05, |
|
"loss": 0.8891, |
|
"num_input_tokens_seen": 5321760, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.2369302086015967, |
|
"grad_norm": 0.9960909370043465, |
|
"learning_rate": 9.873077323567488e-05, |
|
"loss": 0.9026, |
|
"num_input_tokens_seen": 5380224, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.23950553695596188, |
|
"grad_norm": 1.5888386521989892, |
|
"learning_rate": 9.868263945190312e-05, |
|
"loss": 0.8707, |
|
"num_input_tokens_seen": 5438704, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.24208086531032708, |
|
"grad_norm": 3.0542478842411587, |
|
"learning_rate": 9.863362204528024e-05, |
|
"loss": 0.9051, |
|
"num_input_tokens_seen": 5497208, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.24465619366469224, |
|
"grad_norm": 1.2908325061552137, |
|
"learning_rate": 9.858372190550533e-05, |
|
"loss": 0.8711, |
|
"num_input_tokens_seen": 5555704, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.24723152201905743, |
|
"grad_norm": 3.1989324866235744, |
|
"learning_rate": 9.853293993829969e-05, |
|
"loss": 0.885, |
|
"num_input_tokens_seen": 5614160, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.24980685037342262, |
|
"grad_norm": 3.591366302378185, |
|
"learning_rate": 9.848127706539039e-05, |
|
"loss": 0.8615, |
|
"num_input_tokens_seen": 5672640, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.2523821787277878, |
|
"grad_norm": 2.053833335696007, |
|
"learning_rate": 9.842873422449354e-05, |
|
"loss": 0.9057, |
|
"num_input_tokens_seen": 5731072, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.254957507082153, |
|
"grad_norm": 1.4501486574941083, |
|
"learning_rate": 9.837531236929726e-05, |
|
"loss": 0.8818, |
|
"num_input_tokens_seen": 5789544, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.25753283543651817, |
|
"grad_norm": 2.1068404021122866, |
|
"learning_rate": 9.832101246944439e-05, |
|
"loss": 0.8576, |
|
"num_input_tokens_seen": 5848048, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.25753283543651817, |
|
"eval_loss": 0.8268976211547852, |
|
"eval_runtime": 19.6346, |
|
"eval_samples_per_second": 3.056, |
|
"eval_steps_per_second": 0.764, |
|
"num_input_tokens_seen": 5848048, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.26010816379088336, |
|
"grad_norm": 4.216936754020565, |
|
"learning_rate": 9.826583551051483e-05, |
|
"loss": 0.8566, |
|
"num_input_tokens_seen": 5906512, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.2626834921452485, |
|
"grad_norm": 10.456282683777822, |
|
"learning_rate": 9.820978249400773e-05, |
|
"loss": 0.8365, |
|
"num_input_tokens_seen": 5965024, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.2652588204996137, |
|
"grad_norm": 2.334974931865165, |
|
"learning_rate": 9.81528544373233e-05, |
|
"loss": 0.8882, |
|
"num_input_tokens_seen": 6023496, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.2678341488539789, |
|
"grad_norm": 0.6948827424617825, |
|
"learning_rate": 9.809505237374426e-05, |
|
"loss": 0.8799, |
|
"num_input_tokens_seen": 6082000, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.2704094772083441, |
|
"grad_norm": 0.8415524888602947, |
|
"learning_rate": 9.80363773524172e-05, |
|
"loss": 0.8758, |
|
"num_input_tokens_seen": 6140480, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.27298480556270927, |
|
"grad_norm": 1.3536520282199265, |
|
"learning_rate": 9.797683043833345e-05, |
|
"loss": 0.8644, |
|
"num_input_tokens_seen": 6198968, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.2755601339170744, |
|
"grad_norm": 4.556188528469967, |
|
"learning_rate": 9.791641271230982e-05, |
|
"loss": 0.8453, |
|
"num_input_tokens_seen": 6257464, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.2781354622714396, |
|
"grad_norm": 2.890141630286954, |
|
"learning_rate": 9.78551252709689e-05, |
|
"loss": 0.8533, |
|
"num_input_tokens_seen": 6315944, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.2807107906258048, |
|
"grad_norm": 4.471490037342243, |
|
"learning_rate": 9.779296922671923e-05, |
|
"loss": 0.8575, |
|
"num_input_tokens_seen": 6374408, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.28328611898017, |
|
"grad_norm": 3.174906426420603, |
|
"learning_rate": 9.77299457077351e-05, |
|
"loss": 0.8666, |
|
"num_input_tokens_seen": 6432936, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.28328611898017, |
|
"eval_loss": 0.7909801602363586, |
|
"eval_runtime": 19.8739, |
|
"eval_samples_per_second": 3.019, |
|
"eval_steps_per_second": 0.755, |
|
"num_input_tokens_seen": 6432936, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.28586144733453517, |
|
"grad_norm": 3.529163852540611, |
|
"learning_rate": 9.7666055857936e-05, |
|
"loss": 0.8264, |
|
"num_input_tokens_seen": 6491400, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.28843677568890036, |
|
"grad_norm": 4.044590312854015, |
|
"learning_rate": 9.760130083696595e-05, |
|
"loss": 0.8456, |
|
"num_input_tokens_seen": 6549872, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.2910121040432655, |
|
"grad_norm": 4.650808013267891, |
|
"learning_rate": 9.75356818201724e-05, |
|
"loss": 0.8032, |
|
"num_input_tokens_seen": 6608296, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.2935874323976307, |
|
"grad_norm": 6.577223054225459, |
|
"learning_rate": 9.746919999858492e-05, |
|
"loss": 0.8081, |
|
"num_input_tokens_seen": 6666768, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.2961627607519959, |
|
"grad_norm": 7.1732684079932545, |
|
"learning_rate": 9.740185657889357e-05, |
|
"loss": 0.8398, |
|
"num_input_tokens_seen": 6725248, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.29873808910636107, |
|
"grad_norm": 21.451661035438484, |
|
"learning_rate": 9.733365278342696e-05, |
|
"loss": 0.8908, |
|
"num_input_tokens_seen": 6783680, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.30131341746072626, |
|
"grad_norm": 4.031699151478832, |
|
"learning_rate": 9.726458985013017e-05, |
|
"loss": 0.8248, |
|
"num_input_tokens_seen": 6842144, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.3038887458150914, |
|
"grad_norm": 3.45579530759462, |
|
"learning_rate": 9.719466903254215e-05, |
|
"loss": 0.829, |
|
"num_input_tokens_seen": 6900656, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.3064640741694566, |
|
"grad_norm": 4.518719062630672, |
|
"learning_rate": 9.712389159977307e-05, |
|
"loss": 0.8269, |
|
"num_input_tokens_seen": 6959128, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.3090394025238218, |
|
"grad_norm": 12.728221405806083, |
|
"learning_rate": 9.705225883648121e-05, |
|
"loss": 0.7997, |
|
"num_input_tokens_seen": 7017576, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.3090394025238218, |
|
"eval_loss": 0.787663459777832, |
|
"eval_runtime": 19.6121, |
|
"eval_samples_per_second": 3.059, |
|
"eval_steps_per_second": 0.765, |
|
"num_input_tokens_seen": 7017576, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.311614730878187, |
|
"grad_norm": 6.69798043480266, |
|
"learning_rate": 9.697977204284973e-05, |
|
"loss": 0.8925, |
|
"num_input_tokens_seen": 7076032, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.31419005923255217, |
|
"grad_norm": 5.067921055882507, |
|
"learning_rate": 9.690643253456297e-05, |
|
"loss": 0.8159, |
|
"num_input_tokens_seen": 7134536, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.31676538758691736, |
|
"grad_norm": 7.400939684061883, |
|
"learning_rate": 9.683224164278264e-05, |
|
"loss": 0.826, |
|
"num_input_tokens_seen": 7193032, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.3193407159412825, |
|
"grad_norm": 5.898525799199162, |
|
"learning_rate": 9.675720071412365e-05, |
|
"loss": 0.8187, |
|
"num_input_tokens_seen": 7251568, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.3219160442956477, |
|
"grad_norm": 11.267105316774332, |
|
"learning_rate": 9.66813111106296e-05, |
|
"loss": 0.8524, |
|
"num_input_tokens_seen": 7310072, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.3244913726500129, |
|
"grad_norm": 6.703970582399643, |
|
"learning_rate": 9.660457420974819e-05, |
|
"loss": 0.7966, |
|
"num_input_tokens_seen": 7368560, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.32706670100437807, |
|
"grad_norm": 6.945445265294353, |
|
"learning_rate": 9.652699140430608e-05, |
|
"loss": 0.799, |
|
"num_input_tokens_seen": 7427040, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.32964202935874326, |
|
"grad_norm": 7.0684293091171595, |
|
"learning_rate": 9.644856410248369e-05, |
|
"loss": 0.8477, |
|
"num_input_tokens_seen": 7485552, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.3322173577131084, |
|
"grad_norm": 7.165086711244158, |
|
"learning_rate": 9.636929372778963e-05, |
|
"loss": 0.7867, |
|
"num_input_tokens_seen": 7544040, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.3347926860674736, |
|
"grad_norm": 9.185933515393563, |
|
"learning_rate": 9.628918171903485e-05, |
|
"loss": 0.8367, |
|
"num_input_tokens_seen": 7602512, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.3347926860674736, |
|
"eval_loss": 0.7940558791160583, |
|
"eval_runtime": 19.7641, |
|
"eval_samples_per_second": 3.036, |
|
"eval_steps_per_second": 0.759, |
|
"num_input_tokens_seen": 7602512, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.3373680144218388, |
|
"grad_norm": 6.586425160827751, |
|
"learning_rate": 9.620822953030652e-05, |
|
"loss": 0.8131, |
|
"num_input_tokens_seen": 7660968, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.33994334277620397, |
|
"grad_norm": 6.92970378602844, |
|
"learning_rate": 9.612643863094163e-05, |
|
"loss": 0.8348, |
|
"num_input_tokens_seen": 7719448, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.34251867113056916, |
|
"grad_norm": 8.35228285894448, |
|
"learning_rate": 9.604381050550038e-05, |
|
"loss": 0.8289, |
|
"num_input_tokens_seen": 7777928, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.34509399948493436, |
|
"grad_norm": 12.894782157020227, |
|
"learning_rate": 9.596034665373916e-05, |
|
"loss": 0.7758, |
|
"num_input_tokens_seen": 7836424, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.3476693278392995, |
|
"grad_norm": 13.409694970235305, |
|
"learning_rate": 9.587604859058334e-05, |
|
"loss": 0.8189, |
|
"num_input_tokens_seen": 7894904, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.3502446561936647, |
|
"grad_norm": 8.783205826578632, |
|
"learning_rate": 9.579091784609984e-05, |
|
"loss": 0.8221, |
|
"num_input_tokens_seen": 7953432, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.3528199845480299, |
|
"grad_norm": 8.368380903445857, |
|
"learning_rate": 9.570495596546926e-05, |
|
"loss": 0.8378, |
|
"num_input_tokens_seen": 8011888, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 0.35539531290239507, |
|
"grad_norm": 6.7086179135551145, |
|
"learning_rate": 9.561816450895793e-05, |
|
"loss": 0.7529, |
|
"num_input_tokens_seen": 8070344, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.35797064125676026, |
|
"grad_norm": 8.476897088436242, |
|
"learning_rate": 9.55305450518895e-05, |
|
"loss": 0.7311, |
|
"num_input_tokens_seen": 8128816, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 0.3605459696111254, |
|
"grad_norm": 10.427785019598666, |
|
"learning_rate": 9.544209918461642e-05, |
|
"loss": 0.774, |
|
"num_input_tokens_seen": 8187320, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.3605459696111254, |
|
"eval_loss": 0.7318872809410095, |
|
"eval_runtime": 19.6917, |
|
"eval_samples_per_second": 3.047, |
|
"eval_steps_per_second": 0.762, |
|
"num_input_tokens_seen": 8187320, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.3631212979654906, |
|
"grad_norm": 14.492396688166755, |
|
"learning_rate": 9.535282851249103e-05, |
|
"loss": 0.765, |
|
"num_input_tokens_seen": 8245776, |
|
"step": 705 |
|
}, |
|
{ |
|
"epoch": 0.3656966263198558, |
|
"grad_norm": 16.054951836007135, |
|
"learning_rate": 9.526273465583646e-05, |
|
"loss": 0.7287, |
|
"num_input_tokens_seen": 8304280, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.36827195467422097, |
|
"grad_norm": 21.4994447839661, |
|
"learning_rate": 9.517181924991716e-05, |
|
"loss": 0.758, |
|
"num_input_tokens_seen": 8362728, |
|
"step": 715 |
|
}, |
|
{ |
|
"epoch": 0.37084728302858616, |
|
"grad_norm": 10.676012702912917, |
|
"learning_rate": 9.508008394490926e-05, |
|
"loss": 0.795, |
|
"num_input_tokens_seen": 8421224, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.37342261138295135, |
|
"grad_norm": 9.802559177691224, |
|
"learning_rate": 9.498753040587066e-05, |
|
"loss": 0.6901, |
|
"num_input_tokens_seen": 8479720, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.3759979397373165, |
|
"grad_norm": 12.345047855457121, |
|
"learning_rate": 9.48941603127107e-05, |
|
"loss": 0.7618, |
|
"num_input_tokens_seen": 8538192, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.3785732680916817, |
|
"grad_norm": 9.115843124142248, |
|
"learning_rate": 9.479997536015977e-05, |
|
"loss": 0.7481, |
|
"num_input_tokens_seen": 8596664, |
|
"step": 735 |
|
}, |
|
{ |
|
"epoch": 0.3811485964460469, |
|
"grad_norm": 13.754407712653018, |
|
"learning_rate": 9.47049772577385e-05, |
|
"loss": 0.746, |
|
"num_input_tokens_seen": 8655128, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.38372392480041206, |
|
"grad_norm": 11.612003816357428, |
|
"learning_rate": 9.460916772972672e-05, |
|
"loss": 0.812, |
|
"num_input_tokens_seen": 8713624, |
|
"step": 745 |
|
}, |
|
{ |
|
"epoch": 0.38629925315477726, |
|
"grad_norm": 27.606819697848053, |
|
"learning_rate": 9.451254851513222e-05, |
|
"loss": 0.6751, |
|
"num_input_tokens_seen": 8772104, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.38629925315477726, |
|
"eval_loss": 0.732211709022522, |
|
"eval_runtime": 19.8828, |
|
"eval_samples_per_second": 3.018, |
|
"eval_steps_per_second": 0.754, |
|
"num_input_tokens_seen": 8772104, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.3888745815091424, |
|
"grad_norm": 14.96041133173447, |
|
"learning_rate": 9.441512136765911e-05, |
|
"loss": 0.7772, |
|
"num_input_tokens_seen": 8830568, |
|
"step": 755 |
|
}, |
|
{ |
|
"epoch": 0.3914499098635076, |
|
"grad_norm": 11.503685090619637, |
|
"learning_rate": 9.431688805567607e-05, |
|
"loss": 0.7114, |
|
"num_input_tokens_seen": 8889072, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.3940252382178728, |
|
"grad_norm": 10.096502565612639, |
|
"learning_rate": 9.421785036218417e-05, |
|
"loss": 0.8463, |
|
"num_input_tokens_seen": 8947568, |
|
"step": 765 |
|
}, |
|
{ |
|
"epoch": 0.39660056657223797, |
|
"grad_norm": 10.03630613489591, |
|
"learning_rate": 9.411801008478459e-05, |
|
"loss": 0.7822, |
|
"num_input_tokens_seen": 9006056, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.39917589492660316, |
|
"grad_norm": 9.513127486957377, |
|
"learning_rate": 9.401736903564592e-05, |
|
"loss": 0.7628, |
|
"num_input_tokens_seen": 9064592, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.40175122328096835, |
|
"grad_norm": 6.959520401923045, |
|
"learning_rate": 9.39159290414713e-05, |
|
"loss": 0.7589, |
|
"num_input_tokens_seen": 9123096, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.4043265516353335, |
|
"grad_norm": 11.121102798997068, |
|
"learning_rate": 9.381369194346527e-05, |
|
"loss": 0.7564, |
|
"num_input_tokens_seen": 9181576, |
|
"step": 785 |
|
}, |
|
{ |
|
"epoch": 0.4069018799896987, |
|
"grad_norm": 8.968142728241798, |
|
"learning_rate": 9.371065959730039e-05, |
|
"loss": 0.6934, |
|
"num_input_tokens_seen": 9240048, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.40947720834406387, |
|
"grad_norm": 12.849518629503493, |
|
"learning_rate": 9.36068338730834e-05, |
|
"loss": 0.7314, |
|
"num_input_tokens_seen": 9298528, |
|
"step": 795 |
|
}, |
|
{ |
|
"epoch": 0.41205253669842906, |
|
"grad_norm": 68.63999985815002, |
|
"learning_rate": 9.35022166553215e-05, |
|
"loss": 0.6911, |
|
"num_input_tokens_seen": 9357016, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.41205253669842906, |
|
"eval_loss": 0.7180347442626953, |
|
"eval_runtime": 19.3266, |
|
"eval_samples_per_second": 3.105, |
|
"eval_steps_per_second": 0.776, |
|
"num_input_tokens_seen": 9357016, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.41462786505279425, |
|
"grad_norm": 12.26130532110871, |
|
"learning_rate": 9.339680984288799e-05, |
|
"loss": 0.7086, |
|
"num_input_tokens_seen": 9415480, |
|
"step": 805 |
|
}, |
|
{ |
|
"epoch": 0.4172031934071594, |
|
"grad_norm": 15.883264561409183, |
|
"learning_rate": 9.329061534898783e-05, |
|
"loss": 0.7726, |
|
"num_input_tokens_seen": 9473928, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.4197785217615246, |
|
"grad_norm": 17.855002564946467, |
|
"learning_rate": 9.318363510112296e-05, |
|
"loss": 0.7286, |
|
"num_input_tokens_seen": 9532408, |
|
"step": 815 |
|
}, |
|
{ |
|
"epoch": 0.4223538501158898, |
|
"grad_norm": 11.119850082821575, |
|
"learning_rate": 9.307587104105729e-05, |
|
"loss": 0.7515, |
|
"num_input_tokens_seen": 9590920, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.42492917847025496, |
|
"grad_norm": 9.916888211602318, |
|
"learning_rate": 9.296732512478139e-05, |
|
"loss": 0.7344, |
|
"num_input_tokens_seen": 9649400, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 0.42750450682462016, |
|
"grad_norm": 9.805389743080928, |
|
"learning_rate": 9.285799932247714e-05, |
|
"loss": 0.6954, |
|
"num_input_tokens_seen": 9707888, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.43007983517898535, |
|
"grad_norm": 8.068508417105404, |
|
"learning_rate": 9.274789561848183e-05, |
|
"loss": 0.7312, |
|
"num_input_tokens_seen": 9766384, |
|
"step": 835 |
|
}, |
|
{ |
|
"epoch": 0.4326551635333505, |
|
"grad_norm": 8.152928433205195, |
|
"learning_rate": 9.263701601125218e-05, |
|
"loss": 0.647, |
|
"num_input_tokens_seen": 9824896, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.4352304918877157, |
|
"grad_norm": 17.465963143357314, |
|
"learning_rate": 9.252536251332813e-05, |
|
"loss": 0.7273, |
|
"num_input_tokens_seen": 9883408, |
|
"step": 845 |
|
}, |
|
{ |
|
"epoch": 0.43780582024208087, |
|
"grad_norm": 6.231784874865904, |
|
"learning_rate": 9.24129371512962e-05, |
|
"loss": 0.7455, |
|
"num_input_tokens_seen": 9941896, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.43780582024208087, |
|
"eval_loss": 0.7039459347724915, |
|
"eval_runtime": 19.7834, |
|
"eval_samples_per_second": 3.033, |
|
"eval_steps_per_second": 0.758, |
|
"num_input_tokens_seen": 9941896, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.44038114859644606, |
|
"grad_norm": 8.584709523479143, |
|
"learning_rate": 9.22997419657528e-05, |
|
"loss": 0.6829, |
|
"num_input_tokens_seen": 10000336, |
|
"step": 855 |
|
}, |
|
{ |
|
"epoch": 0.44295647695081125, |
|
"grad_norm": 9.894526327436065, |
|
"learning_rate": 9.218577901126713e-05, |
|
"loss": 0.6971, |
|
"num_input_tokens_seen": 10058816, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.4455318053051764, |
|
"grad_norm": 6.844807331677797, |
|
"learning_rate": 9.207105035634397e-05, |
|
"loss": 0.7239, |
|
"num_input_tokens_seen": 10117320, |
|
"step": 865 |
|
}, |
|
{ |
|
"epoch": 0.4481071336595416, |
|
"grad_norm": 9.911971184322747, |
|
"learning_rate": 9.195555808338603e-05, |
|
"loss": 0.7113, |
|
"num_input_tokens_seen": 10175824, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.45068246201390677, |
|
"grad_norm": 11.42729729117445, |
|
"learning_rate": 9.183930428865622e-05, |
|
"loss": 0.685, |
|
"num_input_tokens_seen": 10234288, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.45325779036827196, |
|
"grad_norm": 6.6164653318829005, |
|
"learning_rate": 9.17222910822396e-05, |
|
"loss": 0.6804, |
|
"num_input_tokens_seen": 10292736, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.45583311872263715, |
|
"grad_norm": 9.566984327413312, |
|
"learning_rate": 9.160452058800504e-05, |
|
"loss": 0.7056, |
|
"num_input_tokens_seen": 10351224, |
|
"step": 885 |
|
}, |
|
{ |
|
"epoch": 0.4584084470770023, |
|
"grad_norm": 8.37543764212447, |
|
"learning_rate": 9.148599494356671e-05, |
|
"loss": 0.7234, |
|
"num_input_tokens_seen": 10409736, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.4609837754313675, |
|
"grad_norm": 8.118764978921215, |
|
"learning_rate": 9.136671630024527e-05, |
|
"loss": 0.7505, |
|
"num_input_tokens_seen": 10468240, |
|
"step": 895 |
|
}, |
|
{ |
|
"epoch": 0.4635591037857327, |
|
"grad_norm": 8.91603360160678, |
|
"learning_rate": 9.124668682302882e-05, |
|
"loss": 0.7378, |
|
"num_input_tokens_seen": 10526712, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.4635591037857327, |
|
"eval_loss": 0.7197856903076172, |
|
"eval_runtime": 19.6818, |
|
"eval_samples_per_second": 3.048, |
|
"eval_steps_per_second": 0.762, |
|
"num_input_tokens_seen": 10526712, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.46613443214009787, |
|
"grad_norm": 7.649426233722995, |
|
"learning_rate": 9.112590869053359e-05, |
|
"loss": 0.6794, |
|
"num_input_tokens_seen": 10585232, |
|
"step": 905 |
|
}, |
|
{ |
|
"epoch": 0.46870976049446306, |
|
"grad_norm": 6.066459270532772, |
|
"learning_rate": 9.100438409496444e-05, |
|
"loss": 0.6817, |
|
"num_input_tokens_seen": 10643728, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.47128508884882825, |
|
"grad_norm": 7.144597127673979, |
|
"learning_rate": 9.088211524207497e-05, |
|
"loss": 0.6503, |
|
"num_input_tokens_seen": 10702240, |
|
"step": 915 |
|
}, |
|
{ |
|
"epoch": 0.4738604172031934, |
|
"grad_norm": 9.676112884447143, |
|
"learning_rate": 9.075910435112766e-05, |
|
"loss": 0.6903, |
|
"num_input_tokens_seen": 10760656, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.4764357455575586, |
|
"grad_norm": 12.206584747037537, |
|
"learning_rate": 9.063535365485341e-05, |
|
"loss": 0.6611, |
|
"num_input_tokens_seen": 10819128, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 0.47901107391192377, |
|
"grad_norm": 8.724970113237934, |
|
"learning_rate": 9.051086539941108e-05, |
|
"loss": 0.6361, |
|
"num_input_tokens_seen": 10877600, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.48158640226628896, |
|
"grad_norm": 26.26773221921971, |
|
"learning_rate": 9.038564184434676e-05, |
|
"loss": 0.7006, |
|
"num_input_tokens_seen": 10936088, |
|
"step": 935 |
|
}, |
|
{ |
|
"epoch": 0.48416173062065415, |
|
"grad_norm": 6.223867131390233, |
|
"learning_rate": 9.025968526255275e-05, |
|
"loss": 0.7012, |
|
"num_input_tokens_seen": 10994560, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.4867370589750193, |
|
"grad_norm": 6.1072541360418295, |
|
"learning_rate": 9.013299794022622e-05, |
|
"loss": 0.6968, |
|
"num_input_tokens_seen": 11053016, |
|
"step": 945 |
|
}, |
|
{ |
|
"epoch": 0.4893123873293845, |
|
"grad_norm": 8.372921238626587, |
|
"learning_rate": 9.00055821768278e-05, |
|
"loss": 0.6825, |
|
"num_input_tokens_seen": 11111520, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.4893123873293845, |
|
"eval_loss": 0.6830747723579407, |
|
"eval_runtime": 19.7357, |
|
"eval_samples_per_second": 3.04, |
|
"eval_steps_per_second": 0.76, |
|
"num_input_tokens_seen": 11111520, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.49188771568374967, |
|
"grad_norm": 8.942834634941512, |
|
"learning_rate": 8.987744028503981e-05, |
|
"loss": 0.6957, |
|
"num_input_tokens_seen": 11170016, |
|
"step": 955 |
|
}, |
|
{ |
|
"epoch": 0.49446304403811486, |
|
"grad_norm": 7.280449373939037, |
|
"learning_rate": 8.974857459072435e-05, |
|
"loss": 0.6531, |
|
"num_input_tokens_seen": 11228496, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.49703837239248005, |
|
"grad_norm": 11.559627425451879, |
|
"learning_rate": 8.961898743288094e-05, |
|
"loss": 0.6201, |
|
"num_input_tokens_seen": 11286928, |
|
"step": 965 |
|
}, |
|
{ |
|
"epoch": 0.49961370074684525, |
|
"grad_norm": 8.705456467164504, |
|
"learning_rate": 8.948868116360421e-05, |
|
"loss": 0.5677, |
|
"num_input_tokens_seen": 11345400, |
|
"step": 970 |
|
}, |
|
{ |
|
"epoch": 0.5021890291012104, |
|
"grad_norm": 11.843729276311603, |
|
"learning_rate": 8.935765814804112e-05, |
|
"loss": 0.5763, |
|
"num_input_tokens_seen": 11403912, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 0.5047643574555756, |
|
"grad_norm": 9.09749490323839, |
|
"learning_rate": 8.922592076434804e-05, |
|
"loss": 0.6348, |
|
"num_input_tokens_seen": 11462344, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.5073396858099408, |
|
"grad_norm": 9.15512782816083, |
|
"learning_rate": 8.90934714036477e-05, |
|
"loss": 0.6541, |
|
"num_input_tokens_seen": 11520808, |
|
"step": 985 |
|
}, |
|
{ |
|
"epoch": 0.509915014164306, |
|
"grad_norm": 7.3541970927874125, |
|
"learning_rate": 8.896031246998558e-05, |
|
"loss": 0.7012, |
|
"num_input_tokens_seen": 11579248, |
|
"step": 990 |
|
}, |
|
{ |
|
"epoch": 0.5124903425186711, |
|
"grad_norm": 7.2544111988448305, |
|
"learning_rate": 8.882644638028646e-05, |
|
"loss": 0.6508, |
|
"num_input_tokens_seen": 11637712, |
|
"step": 995 |
|
}, |
|
{ |
|
"epoch": 0.5150656708730363, |
|
"grad_norm": 8.317370673188798, |
|
"learning_rate": 8.869187556431046e-05, |
|
"loss": 0.5971, |
|
"num_input_tokens_seen": 11696200, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.5150656708730363, |
|
"eval_loss": 0.7078786492347717, |
|
"eval_runtime": 19.6933, |
|
"eval_samples_per_second": 3.047, |
|
"eval_steps_per_second": 0.762, |
|
"num_input_tokens_seen": 11696200, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.5176409992274015, |
|
"grad_norm": 9.38238507668501, |
|
"learning_rate": 8.855660246460895e-05, |
|
"loss": 0.6959, |
|
"num_input_tokens_seen": 11754720, |
|
"step": 1005 |
|
}, |
|
{ |
|
"epoch": 0.5202163275817667, |
|
"grad_norm": 8.602013574457626, |
|
"learning_rate": 8.842062953648023e-05, |
|
"loss": 0.6918, |
|
"num_input_tokens_seen": 11813216, |
|
"step": 1010 |
|
}, |
|
{ |
|
"epoch": 0.5227916559361319, |
|
"grad_norm": 6.752768596735988, |
|
"learning_rate": 8.828395924792497e-05, |
|
"loss": 0.7269, |
|
"num_input_tokens_seen": 11871712, |
|
"step": 1015 |
|
}, |
|
{ |
|
"epoch": 0.525366984290497, |
|
"grad_norm": 3.9336242665009187, |
|
"learning_rate": 8.814659407960141e-05, |
|
"loss": 0.7026, |
|
"num_input_tokens_seen": 11930200, |
|
"step": 1020 |
|
}, |
|
{ |
|
"epoch": 0.5279423126448622, |
|
"grad_norm": 5.739197643968765, |
|
"learning_rate": 8.800853652478028e-05, |
|
"loss": 0.6467, |
|
"num_input_tokens_seen": 11988704, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 0.5305176409992274, |
|
"grad_norm": 4.583970239100745, |
|
"learning_rate": 8.786978908929966e-05, |
|
"loss": 0.6155, |
|
"num_input_tokens_seen": 12047176, |
|
"step": 1030 |
|
}, |
|
{ |
|
"epoch": 0.5330929693535926, |
|
"grad_norm": 7.891614334520996, |
|
"learning_rate": 8.773035429151937e-05, |
|
"loss": 0.7849, |
|
"num_input_tokens_seen": 12105680, |
|
"step": 1035 |
|
}, |
|
{ |
|
"epoch": 0.5356682977079578, |
|
"grad_norm": 10.308514200015722, |
|
"learning_rate": 8.759023466227538e-05, |
|
"loss": 0.6341, |
|
"num_input_tokens_seen": 12164208, |
|
"step": 1040 |
|
}, |
|
{ |
|
"epoch": 0.5382436260623229, |
|
"grad_norm": 5.0434906291132995, |
|
"learning_rate": 8.744943274483376e-05, |
|
"loss": 0.7189, |
|
"num_input_tokens_seen": 12222672, |
|
"step": 1045 |
|
}, |
|
{ |
|
"epoch": 0.5408189544166881, |
|
"grad_norm": 4.3092898145567125, |
|
"learning_rate": 8.730795109484461e-05, |
|
"loss": 0.6914, |
|
"num_input_tokens_seen": 12281072, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.5408189544166881, |
|
"eval_loss": 0.6824291348457336, |
|
"eval_runtime": 19.7949, |
|
"eval_samples_per_second": 3.031, |
|
"eval_steps_per_second": 0.758, |
|
"num_input_tokens_seen": 12281072, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.5433942827710533, |
|
"grad_norm": 6.118699079738975, |
|
"learning_rate": 8.716579228029562e-05, |
|
"loss": 0.6567, |
|
"num_input_tokens_seen": 12339544, |
|
"step": 1055 |
|
}, |
|
{ |
|
"epoch": 0.5459696111254185, |
|
"grad_norm": 10.911613071983485, |
|
"learning_rate": 8.702295888146548e-05, |
|
"loss": 0.6155, |
|
"num_input_tokens_seen": 12397992, |
|
"step": 1060 |
|
}, |
|
{ |
|
"epoch": 0.5485449394797837, |
|
"grad_norm": 7.643873763070769, |
|
"learning_rate": 8.687945349087703e-05, |
|
"loss": 0.6029, |
|
"num_input_tokens_seen": 12456480, |
|
"step": 1065 |
|
}, |
|
{ |
|
"epoch": 0.5511202678341488, |
|
"grad_norm": 8.136001935771205, |
|
"learning_rate": 8.673527871325022e-05, |
|
"loss": 0.6129, |
|
"num_input_tokens_seen": 12515000, |
|
"step": 1070 |
|
}, |
|
{ |
|
"epoch": 0.553695596188514, |
|
"grad_norm": 11.244237678841547, |
|
"learning_rate": 8.659043716545485e-05, |
|
"loss": 0.6825, |
|
"num_input_tokens_seen": 12573504, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 0.5562709245428792, |
|
"grad_norm": 7.506618014617338, |
|
"learning_rate": 8.644493147646302e-05, |
|
"loss": 0.6828, |
|
"num_input_tokens_seen": 12632008, |
|
"step": 1080 |
|
}, |
|
{ |
|
"epoch": 0.5588462528972444, |
|
"grad_norm": 6.1353619139783415, |
|
"learning_rate": 8.629876428730145e-05, |
|
"loss": 0.6286, |
|
"num_input_tokens_seen": 12690520, |
|
"step": 1085 |
|
}, |
|
{ |
|
"epoch": 0.5614215812516096, |
|
"grad_norm": 4.639695384779573, |
|
"learning_rate": 8.615193825100355e-05, |
|
"loss": 0.6361, |
|
"num_input_tokens_seen": 12749032, |
|
"step": 1090 |
|
}, |
|
{ |
|
"epoch": 0.5639969096059748, |
|
"grad_norm": 8.458071149567038, |
|
"learning_rate": 8.600445603256123e-05, |
|
"loss": 0.6087, |
|
"num_input_tokens_seen": 12807504, |
|
"step": 1095 |
|
}, |
|
{ |
|
"epoch": 0.56657223796034, |
|
"grad_norm": 7.622756865192901, |
|
"learning_rate": 8.585632030887658e-05, |
|
"loss": 0.5825, |
|
"num_input_tokens_seen": 12865992, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.56657223796034, |
|
"eval_loss": 0.6431913375854492, |
|
"eval_runtime": 19.2744, |
|
"eval_samples_per_second": 3.113, |
|
"eval_steps_per_second": 0.778, |
|
"num_input_tokens_seen": 12865992, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.5691475663147051, |
|
"grad_norm": 23.7865505450662, |
|
"learning_rate": 8.57075337687132e-05, |
|
"loss": 0.6113, |
|
"num_input_tokens_seen": 12924448, |
|
"step": 1105 |
|
}, |
|
{ |
|
"epoch": 0.5717228946690703, |
|
"grad_norm": 9.766678144472763, |
|
"learning_rate": 8.55580991126475e-05, |
|
"loss": 0.562, |
|
"num_input_tokens_seen": 12982912, |
|
"step": 1110 |
|
}, |
|
{ |
|
"epoch": 0.5742982230234355, |
|
"grad_norm": 11.601294815310595, |
|
"learning_rate": 8.540801905301963e-05, |
|
"loss": 0.6124, |
|
"num_input_tokens_seen": 13041424, |
|
"step": 1115 |
|
}, |
|
{ |
|
"epoch": 0.5768735513778007, |
|
"grad_norm": 14.728116727049585, |
|
"learning_rate": 8.525729631388421e-05, |
|
"loss": 0.6788, |
|
"num_input_tokens_seen": 13099888, |
|
"step": 1120 |
|
}, |
|
{ |
|
"epoch": 0.5794488797321659, |
|
"grad_norm": 9.209119741223232, |
|
"learning_rate": 8.510593363096097e-05, |
|
"loss": 0.5904, |
|
"num_input_tokens_seen": 13158344, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 0.582024208086531, |
|
"grad_norm": 8.883499835719407, |
|
"learning_rate": 8.495393375158504e-05, |
|
"loss": 0.5433, |
|
"num_input_tokens_seen": 13216840, |
|
"step": 1130 |
|
}, |
|
{ |
|
"epoch": 0.5845995364408962, |
|
"grad_norm": 10.675613755673567, |
|
"learning_rate": 8.480129943465709e-05, |
|
"loss": 0.5937, |
|
"num_input_tokens_seen": 13275328, |
|
"step": 1135 |
|
}, |
|
{ |
|
"epoch": 0.5871748647952614, |
|
"grad_norm": 18.416876655689737, |
|
"learning_rate": 8.464803345059324e-05, |
|
"loss": 0.5785, |
|
"num_input_tokens_seen": 13333784, |
|
"step": 1140 |
|
}, |
|
{ |
|
"epoch": 0.5897501931496266, |
|
"grad_norm": 7.2789290785005845, |
|
"learning_rate": 8.449413858127487e-05, |
|
"loss": 0.5472, |
|
"num_input_tokens_seen": 13392280, |
|
"step": 1145 |
|
}, |
|
{ |
|
"epoch": 0.5923255215039918, |
|
"grad_norm": 7.9221272500905355, |
|
"learning_rate": 8.433961761999796e-05, |
|
"loss": 0.5228, |
|
"num_input_tokens_seen": 13450720, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.5923255215039918, |
|
"eval_loss": 0.6229755282402039, |
|
"eval_runtime": 19.3811, |
|
"eval_samples_per_second": 3.096, |
|
"eval_steps_per_second": 0.774, |
|
"num_input_tokens_seen": 13450720, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.5949008498583569, |
|
"grad_norm": 7.6504285821029265, |
|
"learning_rate": 8.418447337142254e-05, |
|
"loss": 0.5654, |
|
"num_input_tokens_seen": 13509200, |
|
"step": 1155 |
|
}, |
|
{ |
|
"epoch": 0.5974761782127221, |
|
"grad_norm": 12.935585815436268, |
|
"learning_rate": 8.402870865152172e-05, |
|
"loss": 0.5074, |
|
"num_input_tokens_seen": 13567656, |
|
"step": 1160 |
|
}, |
|
{ |
|
"epoch": 0.6000515065670873, |
|
"grad_norm": 11.833351794705523, |
|
"learning_rate": 8.387232628753056e-05, |
|
"loss": 0.6436, |
|
"num_input_tokens_seen": 13626136, |
|
"step": 1165 |
|
}, |
|
{ |
|
"epoch": 0.6026268349214525, |
|
"grad_norm": 6.637170802158791, |
|
"learning_rate": 8.371532911789482e-05, |
|
"loss": 0.55, |
|
"num_input_tokens_seen": 13684608, |
|
"step": 1170 |
|
}, |
|
{ |
|
"epoch": 0.6052021632758177, |
|
"grad_norm": 13.363715980324447, |
|
"learning_rate": 8.355771999221937e-05, |
|
"loss": 0.5399, |
|
"num_input_tokens_seen": 13743080, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 0.6077774916301828, |
|
"grad_norm": 14.735226460570393, |
|
"learning_rate": 8.339950177121647e-05, |
|
"loss": 0.5402, |
|
"num_input_tokens_seen": 13801552, |
|
"step": 1180 |
|
}, |
|
{ |
|
"epoch": 0.610352819984548, |
|
"grad_norm": 15.934621739592105, |
|
"learning_rate": 8.324067732665393e-05, |
|
"loss": 0.5559, |
|
"num_input_tokens_seen": 13860064, |
|
"step": 1185 |
|
}, |
|
{ |
|
"epoch": 0.6129281483389132, |
|
"grad_norm": 10.693551994820394, |
|
"learning_rate": 8.308124954130289e-05, |
|
"loss": 0.5619, |
|
"num_input_tokens_seen": 13918552, |
|
"step": 1190 |
|
}, |
|
{ |
|
"epoch": 0.6155034766932784, |
|
"grad_norm": 12.596573646765963, |
|
"learning_rate": 8.292122130888558e-05, |
|
"loss": 0.5933, |
|
"num_input_tokens_seen": 13977056, |
|
"step": 1195 |
|
}, |
|
{ |
|
"epoch": 0.6180788050476436, |
|
"grad_norm": 8.090399041458168, |
|
"learning_rate": 8.276059553402265e-05, |
|
"loss": 0.5078, |
|
"num_input_tokens_seen": 14035544, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.6180788050476436, |
|
"eval_loss": 0.6184359192848206, |
|
"eval_runtime": 19.3097, |
|
"eval_samples_per_second": 3.107, |
|
"eval_steps_per_second": 0.777, |
|
"num_input_tokens_seen": 14035544, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.6206541334020087, |
|
"grad_norm": 5.625941133087891, |
|
"learning_rate": 8.259937513218066e-05, |
|
"loss": 0.5109, |
|
"num_input_tokens_seen": 14094024, |
|
"step": 1205 |
|
}, |
|
{ |
|
"epoch": 0.623229461756374, |
|
"grad_norm": 11.892235969186327, |
|
"learning_rate": 8.243756302961898e-05, |
|
"loss": 0.4738, |
|
"num_input_tokens_seen": 14152504, |
|
"step": 1210 |
|
}, |
|
{ |
|
"epoch": 0.6258047901107391, |
|
"grad_norm": 45.17960159223106, |
|
"learning_rate": 8.227516216333679e-05, |
|
"loss": 0.5615, |
|
"num_input_tokens_seen": 14210992, |
|
"step": 1215 |
|
}, |
|
{ |
|
"epoch": 0.6283801184651043, |
|
"grad_norm": 14.930236962628644, |
|
"learning_rate": 8.211217548101973e-05, |
|
"loss": 0.5584, |
|
"num_input_tokens_seen": 14269488, |
|
"step": 1220 |
|
}, |
|
{ |
|
"epoch": 0.6309554468194695, |
|
"grad_norm": 49.91459221869246, |
|
"learning_rate": 8.194860594098635e-05, |
|
"loss": 0.4856, |
|
"num_input_tokens_seen": 14327968, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 0.6335307751738347, |
|
"grad_norm": 14.899444451092219, |
|
"learning_rate": 8.17844565121345e-05, |
|
"loss": 0.5378, |
|
"num_input_tokens_seen": 14386448, |
|
"step": 1230 |
|
}, |
|
{ |
|
"epoch": 0.6361061035281999, |
|
"grad_norm": 10.76781481162281, |
|
"learning_rate": 8.161973017388744e-05, |
|
"loss": 0.4484, |
|
"num_input_tokens_seen": 14444912, |
|
"step": 1235 |
|
}, |
|
{ |
|
"epoch": 0.638681431882565, |
|
"grad_norm": 11.97619546639196, |
|
"learning_rate": 8.145442991613963e-05, |
|
"loss": 0.4772, |
|
"num_input_tokens_seen": 14503392, |
|
"step": 1240 |
|
}, |
|
{ |
|
"epoch": 0.6412567602369302, |
|
"grad_norm": 12.878458794693833, |
|
"learning_rate": 8.128855873920265e-05, |
|
"loss": 0.5807, |
|
"num_input_tokens_seen": 14561872, |
|
"step": 1245 |
|
}, |
|
{ |
|
"epoch": 0.6438320885912954, |
|
"grad_norm": 5.57738881271864, |
|
"learning_rate": 8.112211965375059e-05, |
|
"loss": 0.5268, |
|
"num_input_tokens_seen": 14620336, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.6438320885912954, |
|
"eval_loss": 0.5496931672096252, |
|
"eval_runtime": 19.4472, |
|
"eval_samples_per_second": 3.085, |
|
"eval_steps_per_second": 0.771, |
|
"num_input_tokens_seen": 14620336, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.6464074169456606, |
|
"grad_norm": 7.347855237786093, |
|
"learning_rate": 8.095511568076548e-05, |
|
"loss": 0.6012, |
|
"num_input_tokens_seen": 14678792, |
|
"step": 1255 |
|
}, |
|
{ |
|
"epoch": 0.6489827453000258, |
|
"grad_norm": 9.779072667301502, |
|
"learning_rate": 8.078754985148247e-05, |
|
"loss": 0.5358, |
|
"num_input_tokens_seen": 14737272, |
|
"step": 1260 |
|
}, |
|
{ |
|
"epoch": 0.6515580736543909, |
|
"grad_norm": 13.80618279187987, |
|
"learning_rate": 8.061942520733474e-05, |
|
"loss": 0.4676, |
|
"num_input_tokens_seen": 14795784, |
|
"step": 1265 |
|
}, |
|
{ |
|
"epoch": 0.6541334020087561, |
|
"grad_norm": 10.954100508877262, |
|
"learning_rate": 8.045074479989838e-05, |
|
"loss": 0.489, |
|
"num_input_tokens_seen": 14854272, |
|
"step": 1270 |
|
}, |
|
{ |
|
"epoch": 0.6567087303631213, |
|
"grad_norm": 9.330856277290438, |
|
"learning_rate": 8.02815116908369e-05, |
|
"loss": 0.4505, |
|
"num_input_tokens_seen": 14912720, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 0.6592840587174865, |
|
"grad_norm": 12.562590417573535, |
|
"learning_rate": 8.011172895184579e-05, |
|
"loss": 0.4987, |
|
"num_input_tokens_seen": 14971192, |
|
"step": 1280 |
|
}, |
|
{ |
|
"epoch": 0.6618593870718517, |
|
"grad_norm": 13.11335003986863, |
|
"learning_rate": 7.994139966459664e-05, |
|
"loss": 0.5156, |
|
"num_input_tokens_seen": 15029656, |
|
"step": 1285 |
|
}, |
|
{ |
|
"epoch": 0.6644347154262168, |
|
"grad_norm": 9.518625656710672, |
|
"learning_rate": 7.977052692068127e-05, |
|
"loss": 0.5266, |
|
"num_input_tokens_seen": 15088144, |
|
"step": 1290 |
|
}, |
|
{ |
|
"epoch": 0.667010043780582, |
|
"grad_norm": 8.95720787325786, |
|
"learning_rate": 7.959911382155566e-05, |
|
"loss": 0.4502, |
|
"num_input_tokens_seen": 15146600, |
|
"step": 1295 |
|
}, |
|
{ |
|
"epoch": 0.6695853721349472, |
|
"grad_norm": 11.65058483213029, |
|
"learning_rate": 7.942716347848353e-05, |
|
"loss": 0.4578, |
|
"num_input_tokens_seen": 15205064, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.6695853721349472, |
|
"eval_loss": 0.49471279978752136, |
|
"eval_runtime": 19.2738, |
|
"eval_samples_per_second": 3.113, |
|
"eval_steps_per_second": 0.778, |
|
"num_input_tokens_seen": 15205064, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.6721607004893124, |
|
"grad_norm": 10.254593495214214, |
|
"learning_rate": 7.925467901247996e-05, |
|
"loss": 0.4589, |
|
"num_input_tokens_seen": 15263560, |
|
"step": 1305 |
|
}, |
|
{ |
|
"epoch": 0.6747360288436776, |
|
"grad_norm": 10.097755490465826, |
|
"learning_rate": 7.908166355425475e-05, |
|
"loss": 0.457, |
|
"num_input_tokens_seen": 15322016, |
|
"step": 1310 |
|
}, |
|
{ |
|
"epoch": 0.6773113571980427, |
|
"grad_norm": 10.727169120741898, |
|
"learning_rate": 7.890812024415555e-05, |
|
"loss": 0.51, |
|
"num_input_tokens_seen": 15380504, |
|
"step": 1315 |
|
}, |
|
{ |
|
"epoch": 0.6798866855524079, |
|
"grad_norm": 11.12869176509039, |
|
"learning_rate": 7.873405223211087e-05, |
|
"loss": 0.4994, |
|
"num_input_tokens_seen": 15438944, |
|
"step": 1320 |
|
}, |
|
{ |
|
"epoch": 0.6824620139067731, |
|
"grad_norm": 8.499688286725998, |
|
"learning_rate": 7.855946267757295e-05, |
|
"loss": 0.4501, |
|
"num_input_tokens_seen": 15497384, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 0.6850373422611383, |
|
"grad_norm": 10.352066646699223, |
|
"learning_rate": 7.838435474946034e-05, |
|
"loss": 0.4807, |
|
"num_input_tokens_seen": 15555856, |
|
"step": 1330 |
|
}, |
|
{ |
|
"epoch": 0.6876126706155035, |
|
"grad_norm": 14.714795422962215, |
|
"learning_rate": 7.820873162610044e-05, |
|
"loss": 0.5112, |
|
"num_input_tokens_seen": 15614368, |
|
"step": 1335 |
|
}, |
|
{ |
|
"epoch": 0.6901879989698687, |
|
"grad_norm": 8.466874504995866, |
|
"learning_rate": 7.803259649517178e-05, |
|
"loss": 0.4825, |
|
"num_input_tokens_seen": 15672864, |
|
"step": 1340 |
|
}, |
|
{ |
|
"epoch": 0.6927633273242338, |
|
"grad_norm": 7.62934190428385, |
|
"learning_rate": 7.78559525536462e-05, |
|
"loss": 0.5147, |
|
"num_input_tokens_seen": 15731376, |
|
"step": 1345 |
|
}, |
|
{ |
|
"epoch": 0.695338655678599, |
|
"grad_norm": 9.019045929732858, |
|
"learning_rate": 7.767880300773074e-05, |
|
"loss": 0.4702, |
|
"num_input_tokens_seen": 15789848, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 0.695338655678599, |
|
"eval_loss": 0.5247787237167358, |
|
"eval_runtime": 19.436, |
|
"eval_samples_per_second": 3.087, |
|
"eval_steps_per_second": 0.772, |
|
"num_input_tokens_seen": 15789848, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 0.6979139840329642, |
|
"grad_norm": 9.176622281786903, |
|
"learning_rate": 7.750115107280959e-05, |
|
"loss": 0.4601, |
|
"num_input_tokens_seen": 15848328, |
|
"step": 1355 |
|
}, |
|
{ |
|
"epoch": 0.7004893123873294, |
|
"grad_norm": 13.657128530320302, |
|
"learning_rate": 7.732299997338557e-05, |
|
"loss": 0.4704, |
|
"num_input_tokens_seen": 15906824, |
|
"step": 1360 |
|
}, |
|
{ |
|
"epoch": 0.7030646407416946, |
|
"grad_norm": 20.00043922120396, |
|
"learning_rate": 7.714435294302168e-05, |
|
"loss": 0.4937, |
|
"num_input_tokens_seen": 15965312, |
|
"step": 1365 |
|
}, |
|
{ |
|
"epoch": 0.7056399690960597, |
|
"grad_norm": 10.050026656050768, |
|
"learning_rate": 7.696521322428245e-05, |
|
"loss": 0.4635, |
|
"num_input_tokens_seen": 16023824, |
|
"step": 1370 |
|
}, |
|
{ |
|
"epoch": 0.7082152974504249, |
|
"grad_norm": 10.723911953770791, |
|
"learning_rate": 7.678558406867498e-05, |
|
"loss": 0.4421, |
|
"num_input_tokens_seen": 16082280, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 0.7107906258047901, |
|
"grad_norm": 8.12550455034103, |
|
"learning_rate": 7.660546873659e-05, |
|
"loss": 0.4823, |
|
"num_input_tokens_seen": 16140800, |
|
"step": 1380 |
|
}, |
|
{ |
|
"epoch": 0.7133659541591553, |
|
"grad_norm": 9.105312428399467, |
|
"learning_rate": 7.642487049724271e-05, |
|
"loss": 0.4122, |
|
"num_input_tokens_seen": 16199320, |
|
"step": 1385 |
|
}, |
|
{ |
|
"epoch": 0.7159412825135205, |
|
"grad_norm": 5.873152833761692, |
|
"learning_rate": 7.624379262861335e-05, |
|
"loss": 0.4372, |
|
"num_input_tokens_seen": 16257768, |
|
"step": 1390 |
|
}, |
|
{ |
|
"epoch": 0.7185166108678857, |
|
"grad_norm": 10.07764587174093, |
|
"learning_rate": 7.606223841738775e-05, |
|
"loss": 0.4899, |
|
"num_input_tokens_seen": 16316264, |
|
"step": 1395 |
|
}, |
|
{ |
|
"epoch": 0.7210919392222508, |
|
"grad_norm": 20.283363042209615, |
|
"learning_rate": 7.588021115889777e-05, |
|
"loss": 0.4294, |
|
"num_input_tokens_seen": 16374784, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.7210919392222508, |
|
"eval_loss": 0.47318556904792786, |
|
"eval_runtime": 19.2484, |
|
"eval_samples_per_second": 3.117, |
|
"eval_steps_per_second": 0.779, |
|
"num_input_tokens_seen": 16374784, |
|
"step": 1400 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 3882, |
|
"num_input_tokens_seen": 16374784, |
|
"num_train_epochs": 2, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1080424859697152.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|