|
{ |
|
"best_metric": 0.7318872809410095, |
|
"best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-700", |
|
"epoch": 0.3605459696111254, |
|
"eval_steps": 50, |
|
"global_step": 700, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0025753283543651817, |
|
"grad_norm": 21.379672872549193, |
|
"learning_rate": 2.564102564102564e-06, |
|
"loss": 3.0388, |
|
"num_input_tokens_seen": 58496, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0051506567087303634, |
|
"grad_norm": 20.76117223991023, |
|
"learning_rate": 5.128205128205128e-06, |
|
"loss": 2.9831, |
|
"num_input_tokens_seen": 116960, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.007725985063095545, |
|
"grad_norm": 22.5213517141881, |
|
"learning_rate": 7.692307692307694e-06, |
|
"loss": 2.8696, |
|
"num_input_tokens_seen": 175448, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.010301313417460727, |
|
"grad_norm": 20.673071198727328, |
|
"learning_rate": 1.0256410256410256e-05, |
|
"loss": 2.6316, |
|
"num_input_tokens_seen": 233944, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.012876641771825908, |
|
"grad_norm": 18.902291974538457, |
|
"learning_rate": 1.282051282051282e-05, |
|
"loss": 1.9707, |
|
"num_input_tokens_seen": 292416, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.01545197012619109, |
|
"grad_norm": 8.05718270484028, |
|
"learning_rate": 1.5384615384615387e-05, |
|
"loss": 1.3782, |
|
"num_input_tokens_seen": 350904, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.018027298480556272, |
|
"grad_norm": 3.6465275188422344, |
|
"learning_rate": 1.794871794871795e-05, |
|
"loss": 1.0628, |
|
"num_input_tokens_seen": 409384, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.020602626834921454, |
|
"grad_norm": 4.842154180410959, |
|
"learning_rate": 2.0512820512820512e-05, |
|
"loss": 0.9789, |
|
"num_input_tokens_seen": 467864, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.023177955189286635, |
|
"grad_norm": 2.6799567517341396, |
|
"learning_rate": 2.307692307692308e-05, |
|
"loss": 0.9327, |
|
"num_input_tokens_seen": 526384, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.025753283543651816, |
|
"grad_norm": 2.629272923472648, |
|
"learning_rate": 2.564102564102564e-05, |
|
"loss": 0.9233, |
|
"num_input_tokens_seen": 584856, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.025753283543651816, |
|
"eval_loss": 0.9281821846961975, |
|
"eval_runtime": 48.2484, |
|
"eval_samples_per_second": 1.244, |
|
"eval_steps_per_second": 0.311, |
|
"num_input_tokens_seen": 584856, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.028328611898016998, |
|
"grad_norm": 1.2858813899048422, |
|
"learning_rate": 2.8205128205128207e-05, |
|
"loss": 0.897, |
|
"num_input_tokens_seen": 643344, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.03090394025238218, |
|
"grad_norm": 1.177678811476692, |
|
"learning_rate": 3.0769230769230774e-05, |
|
"loss": 0.9169, |
|
"num_input_tokens_seen": 701808, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.03347926860674736, |
|
"grad_norm": 1.2077065633120996, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.9019, |
|
"num_input_tokens_seen": 760304, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.036054596961112545, |
|
"grad_norm": 1.1560644429967823, |
|
"learning_rate": 3.58974358974359e-05, |
|
"loss": 0.8996, |
|
"num_input_tokens_seen": 818760, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.03862992531547772, |
|
"grad_norm": 0.732907212566054, |
|
"learning_rate": 3.846153846153846e-05, |
|
"loss": 0.9073, |
|
"num_input_tokens_seen": 877256, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.04120525366984291, |
|
"grad_norm": 0.9616993870089134, |
|
"learning_rate": 4.1025641025641023e-05, |
|
"loss": 0.9081, |
|
"num_input_tokens_seen": 935752, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.043780582024208085, |
|
"grad_norm": 0.8384067209941525, |
|
"learning_rate": 4.358974358974359e-05, |
|
"loss": 0.906, |
|
"num_input_tokens_seen": 994216, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.04635591037857327, |
|
"grad_norm": 0.4045876972188175, |
|
"learning_rate": 4.615384615384616e-05, |
|
"loss": 0.8952, |
|
"num_input_tokens_seen": 1052704, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.04893123873293845, |
|
"grad_norm": 0.6062678622593307, |
|
"learning_rate": 4.871794871794872e-05, |
|
"loss": 0.8996, |
|
"num_input_tokens_seen": 1111176, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.05150656708730363, |
|
"grad_norm": 0.5316642041721752, |
|
"learning_rate": 5.128205128205128e-05, |
|
"loss": 0.9024, |
|
"num_input_tokens_seen": 1169664, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.05150656708730363, |
|
"eval_loss": 0.911374032497406, |
|
"eval_runtime": 19.566, |
|
"eval_samples_per_second": 3.067, |
|
"eval_steps_per_second": 0.767, |
|
"num_input_tokens_seen": 1169664, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.05408189544166881, |
|
"grad_norm": 0.47189419512253006, |
|
"learning_rate": 5.384615384615385e-05, |
|
"loss": 0.9142, |
|
"num_input_tokens_seen": 1228112, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.056657223796033995, |
|
"grad_norm": 0.4885000351277984, |
|
"learning_rate": 5.6410256410256414e-05, |
|
"loss": 0.9054, |
|
"num_input_tokens_seen": 1286608, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.05923255215039917, |
|
"grad_norm": 1.0232694160031948, |
|
"learning_rate": 5.897435897435898e-05, |
|
"loss": 0.8997, |
|
"num_input_tokens_seen": 1345072, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.06180788050476436, |
|
"grad_norm": 0.6656697152989639, |
|
"learning_rate": 6.153846153846155e-05, |
|
"loss": 0.8988, |
|
"num_input_tokens_seen": 1403544, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.06438320885912954, |
|
"grad_norm": 0.6273175951192728, |
|
"learning_rate": 6.410256410256412e-05, |
|
"loss": 0.9087, |
|
"num_input_tokens_seen": 1462024, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.06695853721349472, |
|
"grad_norm": 0.707089894516894, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 0.8961, |
|
"num_input_tokens_seen": 1520528, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.0695338655678599, |
|
"grad_norm": 0.4633668497982238, |
|
"learning_rate": 6.923076923076924e-05, |
|
"loss": 0.903, |
|
"num_input_tokens_seen": 1579024, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.07210919392222509, |
|
"grad_norm": 0.5052802522069755, |
|
"learning_rate": 7.17948717948718e-05, |
|
"loss": 0.899, |
|
"num_input_tokens_seen": 1637504, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.07468452227659027, |
|
"grad_norm": 0.7577940010204668, |
|
"learning_rate": 7.435897435897436e-05, |
|
"loss": 0.9071, |
|
"num_input_tokens_seen": 1696024, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.07725985063095545, |
|
"grad_norm": 0.5812587904219971, |
|
"learning_rate": 7.692307692307693e-05, |
|
"loss": 0.9045, |
|
"num_input_tokens_seen": 1754512, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.07725985063095545, |
|
"eval_loss": 0.8934853076934814, |
|
"eval_runtime": 19.8765, |
|
"eval_samples_per_second": 3.019, |
|
"eval_steps_per_second": 0.755, |
|
"num_input_tokens_seen": 1754512, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.07983517898532062, |
|
"grad_norm": 0.5167982536583405, |
|
"learning_rate": 7.948717948717948e-05, |
|
"loss": 0.8992, |
|
"num_input_tokens_seen": 1812976, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.08241050733968582, |
|
"grad_norm": 0.4971816797735092, |
|
"learning_rate": 8.205128205128205e-05, |
|
"loss": 0.8965, |
|
"num_input_tokens_seen": 1871464, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.08498583569405099, |
|
"grad_norm": 0.6561749633642688, |
|
"learning_rate": 8.461538461538461e-05, |
|
"loss": 0.9094, |
|
"num_input_tokens_seen": 1929928, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.08756116404841617, |
|
"grad_norm": 0.5010857314708574, |
|
"learning_rate": 8.717948717948718e-05, |
|
"loss": 0.903, |
|
"num_input_tokens_seen": 1988432, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.09013649240278135, |
|
"grad_norm": 0.48794512034251364, |
|
"learning_rate": 8.974358974358975e-05, |
|
"loss": 0.902, |
|
"num_input_tokens_seen": 2046920, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.09271182075714654, |
|
"grad_norm": 0.4040014684262414, |
|
"learning_rate": 9.230769230769232e-05, |
|
"loss": 0.9006, |
|
"num_input_tokens_seen": 2105392, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.09528714911151172, |
|
"grad_norm": 0.5312840597942438, |
|
"learning_rate": 9.487179487179487e-05, |
|
"loss": 0.9042, |
|
"num_input_tokens_seen": 2163872, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.0978624774658769, |
|
"grad_norm": 0.3535119366494406, |
|
"learning_rate": 9.743589743589744e-05, |
|
"loss": 0.9096, |
|
"num_input_tokens_seen": 2222352, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.10043780582024209, |
|
"grad_norm": 0.30590378285024006, |
|
"learning_rate": 0.0001, |
|
"loss": 0.9037, |
|
"num_input_tokens_seen": 2280800, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.10301313417460727, |
|
"grad_norm": 0.3055264226667786, |
|
"learning_rate": 9.999954623308172e-05, |
|
"loss": 0.904, |
|
"num_input_tokens_seen": 2339304, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.10301313417460727, |
|
"eval_loss": 0.8980139493942261, |
|
"eval_runtime": 19.316, |
|
"eval_samples_per_second": 3.106, |
|
"eval_steps_per_second": 0.777, |
|
"num_input_tokens_seen": 2339304, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.10558846252897244, |
|
"grad_norm": 0.8828178200664915, |
|
"learning_rate": 9.999818494056303e-05, |
|
"loss": 0.9029, |
|
"num_input_tokens_seen": 2397808, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.10816379088333762, |
|
"grad_norm": 0.4308314655260644, |
|
"learning_rate": 9.99959161471523e-05, |
|
"loss": 0.9005, |
|
"num_input_tokens_seen": 2456288, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.11073911923770281, |
|
"grad_norm": 0.4482188659643584, |
|
"learning_rate": 9.99927398940297e-05, |
|
"loss": 0.9096, |
|
"num_input_tokens_seen": 2514784, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.11331444759206799, |
|
"grad_norm": 0.49014741417238206, |
|
"learning_rate": 9.998865623884635e-05, |
|
"loss": 0.9036, |
|
"num_input_tokens_seen": 2573240, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.11588977594643317, |
|
"grad_norm": 0.2774850522391394, |
|
"learning_rate": 9.998366525572336e-05, |
|
"loss": 0.901, |
|
"num_input_tokens_seen": 2631672, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.11846510430079835, |
|
"grad_norm": 0.49390873315018263, |
|
"learning_rate": 9.997776703525046e-05, |
|
"loss": 0.9018, |
|
"num_input_tokens_seen": 2690112, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.12104043265516354, |
|
"grad_norm": 0.3284306399258997, |
|
"learning_rate": 9.997096168448432e-05, |
|
"loss": 0.8934, |
|
"num_input_tokens_seen": 2748608, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.12361576100952872, |
|
"grad_norm": 0.7182680023403506, |
|
"learning_rate": 9.996324932694668e-05, |
|
"loss": 0.8876, |
|
"num_input_tokens_seen": 2807080, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.1261910893638939, |
|
"grad_norm": 0.7305499346526235, |
|
"learning_rate": 9.995463010262206e-05, |
|
"loss": 0.9084, |
|
"num_input_tokens_seen": 2865520, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.12876641771825909, |
|
"grad_norm": 0.5773211522908436, |
|
"learning_rate": 9.994510416795519e-05, |
|
"loss": 0.9106, |
|
"num_input_tokens_seen": 2924016, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.12876641771825909, |
|
"eval_loss": 0.8958488702774048, |
|
"eval_runtime": 19.507, |
|
"eval_samples_per_second": 3.076, |
|
"eval_steps_per_second": 0.769, |
|
"num_input_tokens_seen": 2924016, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.13134174607262425, |
|
"grad_norm": 0.44306061088962184, |
|
"learning_rate": 9.993467169584824e-05, |
|
"loss": 0.9012, |
|
"num_input_tokens_seen": 2982520, |
|
"step": 255 |
|
}, |
|
{ |
|
"epoch": 0.13391707442698944, |
|
"grad_norm": 0.7851687259125024, |
|
"learning_rate": 9.992333287565765e-05, |
|
"loss": 0.9069, |
|
"num_input_tokens_seen": 3041008, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.13649240278135463, |
|
"grad_norm": 0.5705235716557865, |
|
"learning_rate": 9.991108791319066e-05, |
|
"loss": 0.8918, |
|
"num_input_tokens_seen": 3099464, |
|
"step": 265 |
|
}, |
|
{ |
|
"epoch": 0.1390677311357198, |
|
"grad_norm": 0.6202972137914602, |
|
"learning_rate": 9.989793703070163e-05, |
|
"loss": 0.8996, |
|
"num_input_tokens_seen": 3157944, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.141643059490085, |
|
"grad_norm": 0.7583768377175583, |
|
"learning_rate": 9.988388046688799e-05, |
|
"loss": 0.9009, |
|
"num_input_tokens_seen": 3216448, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.14421838784445018, |
|
"grad_norm": 0.7180540444266581, |
|
"learning_rate": 9.986891847688587e-05, |
|
"loss": 0.9059, |
|
"num_input_tokens_seen": 3274928, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.14679371619881534, |
|
"grad_norm": 0.4173225854654158, |
|
"learning_rate": 9.985305133226553e-05, |
|
"loss": 0.8939, |
|
"num_input_tokens_seen": 3333408, |
|
"step": 285 |
|
}, |
|
{ |
|
"epoch": 0.14936904455318054, |
|
"grad_norm": 0.7825855108807762, |
|
"learning_rate": 9.983627932102638e-05, |
|
"loss": 0.8899, |
|
"num_input_tokens_seen": 3391896, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.1519443729075457, |
|
"grad_norm": 0.4850249272160501, |
|
"learning_rate": 9.981860274759173e-05, |
|
"loss": 0.9092, |
|
"num_input_tokens_seen": 3450392, |
|
"step": 295 |
|
}, |
|
{ |
|
"epoch": 0.1545197012619109, |
|
"grad_norm": 0.3325682106309916, |
|
"learning_rate": 9.980002193280342e-05, |
|
"loss": 0.8901, |
|
"num_input_tokens_seen": 3508888, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.1545197012619109, |
|
"eval_loss": 0.8932263255119324, |
|
"eval_runtime": 19.7633, |
|
"eval_samples_per_second": 3.036, |
|
"eval_steps_per_second": 0.759, |
|
"num_input_tokens_seen": 3508888, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.15709502961627608, |
|
"grad_norm": 0.36562722920113416, |
|
"learning_rate": 9.978053721391578e-05, |
|
"loss": 0.9042, |
|
"num_input_tokens_seen": 3567368, |
|
"step": 305 |
|
}, |
|
{ |
|
"epoch": 0.15967035797064125, |
|
"grad_norm": 0.3765491511973325, |
|
"learning_rate": 9.976014894458963e-05, |
|
"loss": 0.9007, |
|
"num_input_tokens_seen": 3625848, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.16224568632500644, |
|
"grad_norm": 0.5264420727347517, |
|
"learning_rate": 9.973885749488589e-05, |
|
"loss": 0.9036, |
|
"num_input_tokens_seen": 3684336, |
|
"step": 315 |
|
}, |
|
{ |
|
"epoch": 0.16482101467937163, |
|
"grad_norm": 0.24680747784235688, |
|
"learning_rate": 9.971666325125874e-05, |
|
"loss": 0.8936, |
|
"num_input_tokens_seen": 3742800, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.1673963430337368, |
|
"grad_norm": 0.4982571051665039, |
|
"learning_rate": 9.969356661654876e-05, |
|
"loss": 0.8989, |
|
"num_input_tokens_seen": 3801280, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.16997167138810199, |
|
"grad_norm": 0.49943012602572584, |
|
"learning_rate": 9.966956800997546e-05, |
|
"loss": 0.8983, |
|
"num_input_tokens_seen": 3859792, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.17254699974246718, |
|
"grad_norm": 0.37381050353079964, |
|
"learning_rate": 9.964466786712984e-05, |
|
"loss": 0.9038, |
|
"num_input_tokens_seen": 3918272, |
|
"step": 335 |
|
}, |
|
{ |
|
"epoch": 0.17512232809683234, |
|
"grad_norm": 0.7501484170811903, |
|
"learning_rate": 9.961886663996629e-05, |
|
"loss": 0.8947, |
|
"num_input_tokens_seen": 3976760, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.17769765645119753, |
|
"grad_norm": 0.5623847203835772, |
|
"learning_rate": 9.959216479679458e-05, |
|
"loss": 0.9179, |
|
"num_input_tokens_seen": 4035240, |
|
"step": 345 |
|
}, |
|
{ |
|
"epoch": 0.1802729848055627, |
|
"grad_norm": 0.34381878607605765, |
|
"learning_rate": 9.956456282227122e-05, |
|
"loss": 0.9059, |
|
"num_input_tokens_seen": 4093688, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.1802729848055627, |
|
"eval_loss": 0.8960411548614502, |
|
"eval_runtime": 20.0734, |
|
"eval_samples_per_second": 2.989, |
|
"eval_steps_per_second": 0.747, |
|
"num_input_tokens_seen": 4093688, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.1828483131599279, |
|
"grad_norm": 0.5786050277605497, |
|
"learning_rate": 9.953606121739074e-05, |
|
"loss": 0.8795, |
|
"num_input_tokens_seen": 4152160, |
|
"step": 355 |
|
}, |
|
{ |
|
"epoch": 0.18542364151429308, |
|
"grad_norm": 1.3309062985901938, |
|
"learning_rate": 9.950666049947653e-05, |
|
"loss": 0.9143, |
|
"num_input_tokens_seen": 4210648, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.18799896986865824, |
|
"grad_norm": 0.5010297124723248, |
|
"learning_rate": 9.947636120217155e-05, |
|
"loss": 0.9164, |
|
"num_input_tokens_seen": 4269136, |
|
"step": 365 |
|
}, |
|
{ |
|
"epoch": 0.19057429822302344, |
|
"grad_norm": 0.7250841632803818, |
|
"learning_rate": 9.944516387542852e-05, |
|
"loss": 0.9061, |
|
"num_input_tokens_seen": 4327664, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.19314962657738863, |
|
"grad_norm": 0.4506280653909736, |
|
"learning_rate": 9.941306908550005e-05, |
|
"loss": 0.8873, |
|
"num_input_tokens_seen": 4386120, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.1957249549317538, |
|
"grad_norm": 0.6467175538087946, |
|
"learning_rate": 9.938007741492828e-05, |
|
"loss": 0.9038, |
|
"num_input_tokens_seen": 4444560, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.19830028328611898, |
|
"grad_norm": 0.6742654767461002, |
|
"learning_rate": 9.934618946253437e-05, |
|
"loss": 0.9116, |
|
"num_input_tokens_seen": 4503016, |
|
"step": 385 |
|
}, |
|
{ |
|
"epoch": 0.20087561164048418, |
|
"grad_norm": 0.418516338281364, |
|
"learning_rate": 9.931140584340761e-05, |
|
"loss": 0.9023, |
|
"num_input_tokens_seen": 4561496, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.20345093999484934, |
|
"grad_norm": 0.5738919793445436, |
|
"learning_rate": 9.92757271888942e-05, |
|
"loss": 0.8901, |
|
"num_input_tokens_seen": 4619944, |
|
"step": 395 |
|
}, |
|
{ |
|
"epoch": 0.20602626834921453, |
|
"grad_norm": 0.3600152815973316, |
|
"learning_rate": 9.923915414658587e-05, |
|
"loss": 0.9033, |
|
"num_input_tokens_seen": 4678384, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.20602626834921453, |
|
"eval_loss": 0.906301736831665, |
|
"eval_runtime": 19.8079, |
|
"eval_samples_per_second": 3.029, |
|
"eval_steps_per_second": 0.757, |
|
"num_input_tokens_seen": 4678384, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.2086015967035797, |
|
"grad_norm": 0.4854922483196253, |
|
"learning_rate": 9.920168738030807e-05, |
|
"loss": 0.8951, |
|
"num_input_tokens_seen": 4736904, |
|
"step": 405 |
|
}, |
|
{ |
|
"epoch": 0.2111769250579449, |
|
"grad_norm": 0.4537032287993717, |
|
"learning_rate": 9.916332757010799e-05, |
|
"loss": 0.9131, |
|
"num_input_tokens_seen": 4795376, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.21375225341231008, |
|
"grad_norm": 0.46655140937482126, |
|
"learning_rate": 9.912407541224213e-05, |
|
"loss": 0.8923, |
|
"num_input_tokens_seen": 4853880, |
|
"step": 415 |
|
}, |
|
{ |
|
"epoch": 0.21632758176667524, |
|
"grad_norm": 0.3608301258843965, |
|
"learning_rate": 9.908393161916374e-05, |
|
"loss": 0.9026, |
|
"num_input_tokens_seen": 4912360, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.21890291012104043, |
|
"grad_norm": 0.5055648531803498, |
|
"learning_rate": 9.904289691950979e-05, |
|
"loss": 0.905, |
|
"num_input_tokens_seen": 4970872, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.22147823847540563, |
|
"grad_norm": 0.3171606869940592, |
|
"learning_rate": 9.900097205808789e-05, |
|
"loss": 0.8941, |
|
"num_input_tokens_seen": 5029304, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.2240535668297708, |
|
"grad_norm": 0.5798722428230844, |
|
"learning_rate": 9.895815779586262e-05, |
|
"loss": 0.9031, |
|
"num_input_tokens_seen": 5087800, |
|
"step": 435 |
|
}, |
|
{ |
|
"epoch": 0.22662889518413598, |
|
"grad_norm": 0.5751083474979835, |
|
"learning_rate": 9.891445490994182e-05, |
|
"loss": 0.8975, |
|
"num_input_tokens_seen": 5146312, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.22920422353850115, |
|
"grad_norm": 1.2316619525123293, |
|
"learning_rate": 9.886986419356246e-05, |
|
"loss": 0.901, |
|
"num_input_tokens_seen": 5204800, |
|
"step": 445 |
|
}, |
|
{ |
|
"epoch": 0.23177955189286634, |
|
"grad_norm": 0.5953254293558816, |
|
"learning_rate": 9.88243864560762e-05, |
|
"loss": 0.9062, |
|
"num_input_tokens_seen": 5263304, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.23177955189286634, |
|
"eval_loss": 0.9007609486579895, |
|
"eval_runtime": 20.8827, |
|
"eval_samples_per_second": 2.873, |
|
"eval_steps_per_second": 0.718, |
|
"num_input_tokens_seen": 5263304, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.23435488024723153, |
|
"grad_norm": 0.7549819464827967, |
|
"learning_rate": 9.877802252293474e-05, |
|
"loss": 0.8891, |
|
"num_input_tokens_seen": 5321760, |
|
"step": 455 |
|
}, |
|
{ |
|
"epoch": 0.2369302086015967, |
|
"grad_norm": 0.9960909370043465, |
|
"learning_rate": 9.873077323567488e-05, |
|
"loss": 0.9026, |
|
"num_input_tokens_seen": 5380224, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.23950553695596188, |
|
"grad_norm": 1.5888386521989892, |
|
"learning_rate": 9.868263945190312e-05, |
|
"loss": 0.8707, |
|
"num_input_tokens_seen": 5438704, |
|
"step": 465 |
|
}, |
|
{ |
|
"epoch": 0.24208086531032708, |
|
"grad_norm": 3.0542478842411587, |
|
"learning_rate": 9.863362204528024e-05, |
|
"loss": 0.9051, |
|
"num_input_tokens_seen": 5497208, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.24465619366469224, |
|
"grad_norm": 1.2908325061552137, |
|
"learning_rate": 9.858372190550533e-05, |
|
"loss": 0.8711, |
|
"num_input_tokens_seen": 5555704, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.24723152201905743, |
|
"grad_norm": 3.1989324866235744, |
|
"learning_rate": 9.853293993829969e-05, |
|
"loss": 0.885, |
|
"num_input_tokens_seen": 5614160, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.24980685037342262, |
|
"grad_norm": 3.591366302378185, |
|
"learning_rate": 9.848127706539039e-05, |
|
"loss": 0.8615, |
|
"num_input_tokens_seen": 5672640, |
|
"step": 485 |
|
}, |
|
{ |
|
"epoch": 0.2523821787277878, |
|
"grad_norm": 2.053833335696007, |
|
"learning_rate": 9.842873422449354e-05, |
|
"loss": 0.9057, |
|
"num_input_tokens_seen": 5731072, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.254957507082153, |
|
"grad_norm": 1.4501486574941083, |
|
"learning_rate": 9.837531236929726e-05, |
|
"loss": 0.8818, |
|
"num_input_tokens_seen": 5789544, |
|
"step": 495 |
|
}, |
|
{ |
|
"epoch": 0.25753283543651817, |
|
"grad_norm": 2.1068404021122866, |
|
"learning_rate": 9.832101246944439e-05, |
|
"loss": 0.8576, |
|
"num_input_tokens_seen": 5848048, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.25753283543651817, |
|
"eval_loss": 0.8268976211547852, |
|
"eval_runtime": 19.6346, |
|
"eval_samples_per_second": 3.056, |
|
"eval_steps_per_second": 0.764, |
|
"num_input_tokens_seen": 5848048, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.26010816379088336, |
|
"grad_norm": 4.216936754020565, |
|
"learning_rate": 9.826583551051483e-05, |
|
"loss": 0.8566, |
|
"num_input_tokens_seen": 5906512, |
|
"step": 505 |
|
}, |
|
{ |
|
"epoch": 0.2626834921452485, |
|
"grad_norm": 10.456282683777822, |
|
"learning_rate": 9.820978249400773e-05, |
|
"loss": 0.8365, |
|
"num_input_tokens_seen": 5965024, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.2652588204996137, |
|
"grad_norm": 2.334974931865165, |
|
"learning_rate": 9.81528544373233e-05, |
|
"loss": 0.8882, |
|
"num_input_tokens_seen": 6023496, |
|
"step": 515 |
|
}, |
|
{ |
|
"epoch": 0.2678341488539789, |
|
"grad_norm": 0.6948827424617825, |
|
"learning_rate": 9.809505237374426e-05, |
|
"loss": 0.8799, |
|
"num_input_tokens_seen": 6082000, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.2704094772083441, |
|
"grad_norm": 0.8415524888602947, |
|
"learning_rate": 9.80363773524172e-05, |
|
"loss": 0.8758, |
|
"num_input_tokens_seen": 6140480, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.27298480556270927, |
|
"grad_norm": 1.3536520282199265, |
|
"learning_rate": 9.797683043833345e-05, |
|
"loss": 0.8644, |
|
"num_input_tokens_seen": 6198968, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.2755601339170744, |
|
"grad_norm": 4.556188528469967, |
|
"learning_rate": 9.791641271230982e-05, |
|
"loss": 0.8453, |
|
"num_input_tokens_seen": 6257464, |
|
"step": 535 |
|
}, |
|
{ |
|
"epoch": 0.2781354622714396, |
|
"grad_norm": 2.890141630286954, |
|
"learning_rate": 9.78551252709689e-05, |
|
"loss": 0.8533, |
|
"num_input_tokens_seen": 6315944, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.2807107906258048, |
|
"grad_norm": 4.471490037342243, |
|
"learning_rate": 9.779296922671923e-05, |
|
"loss": 0.8575, |
|
"num_input_tokens_seen": 6374408, |
|
"step": 545 |
|
}, |
|
{ |
|
"epoch": 0.28328611898017, |
|
"grad_norm": 3.174906426420603, |
|
"learning_rate": 9.77299457077351e-05, |
|
"loss": 0.8666, |
|
"num_input_tokens_seen": 6432936, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.28328611898017, |
|
"eval_loss": 0.7909801602363586, |
|
"eval_runtime": 19.8739, |
|
"eval_samples_per_second": 3.019, |
|
"eval_steps_per_second": 0.755, |
|
"num_input_tokens_seen": 6432936, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.28586144733453517, |
|
"grad_norm": 3.529163852540611, |
|
"learning_rate": 9.7666055857936e-05, |
|
"loss": 0.8264, |
|
"num_input_tokens_seen": 6491400, |
|
"step": 555 |
|
}, |
|
{ |
|
"epoch": 0.28843677568890036, |
|
"grad_norm": 4.044590312854015, |
|
"learning_rate": 9.760130083696595e-05, |
|
"loss": 0.8456, |
|
"num_input_tokens_seen": 6549872, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.2910121040432655, |
|
"grad_norm": 4.650808013267891, |
|
"learning_rate": 9.75356818201724e-05, |
|
"loss": 0.8032, |
|
"num_input_tokens_seen": 6608296, |
|
"step": 565 |
|
}, |
|
{ |
|
"epoch": 0.2935874323976307, |
|
"grad_norm": 6.577223054225459, |
|
"learning_rate": 9.746919999858492e-05, |
|
"loss": 0.8081, |
|
"num_input_tokens_seen": 6666768, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.2961627607519959, |
|
"grad_norm": 7.1732684079932545, |
|
"learning_rate": 9.740185657889357e-05, |
|
"loss": 0.8398, |
|
"num_input_tokens_seen": 6725248, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.29873808910636107, |
|
"grad_norm": 21.451661035438484, |
|
"learning_rate": 9.733365278342696e-05, |
|
"loss": 0.8908, |
|
"num_input_tokens_seen": 6783680, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.30131341746072626, |
|
"grad_norm": 4.031699151478832, |
|
"learning_rate": 9.726458985013017e-05, |
|
"loss": 0.8248, |
|
"num_input_tokens_seen": 6842144, |
|
"step": 585 |
|
}, |
|
{ |
|
"epoch": 0.3038887458150914, |
|
"grad_norm": 3.45579530759462, |
|
"learning_rate": 9.719466903254215e-05, |
|
"loss": 0.829, |
|
"num_input_tokens_seen": 6900656, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.3064640741694566, |
|
"grad_norm": 4.518719062630672, |
|
"learning_rate": 9.712389159977307e-05, |
|
"loss": 0.8269, |
|
"num_input_tokens_seen": 6959128, |
|
"step": 595 |
|
}, |
|
{ |
|
"epoch": 0.3090394025238218, |
|
"grad_norm": 12.728221405806083, |
|
"learning_rate": 9.705225883648121e-05, |
|
"loss": 0.7997, |
|
"num_input_tokens_seen": 7017576, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.3090394025238218, |
|
"eval_loss": 0.787663459777832, |
|
"eval_runtime": 19.6121, |
|
"eval_samples_per_second": 3.059, |
|
"eval_steps_per_second": 0.765, |
|
"num_input_tokens_seen": 7017576, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.311614730878187, |
|
"grad_norm": 6.69798043480266, |
|
"learning_rate": 9.697977204284973e-05, |
|
"loss": 0.8925, |
|
"num_input_tokens_seen": 7076032, |
|
"step": 605 |
|
}, |
|
{ |
|
"epoch": 0.31419005923255217, |
|
"grad_norm": 5.067921055882507, |
|
"learning_rate": 9.690643253456297e-05, |
|
"loss": 0.8159, |
|
"num_input_tokens_seen": 7134536, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.31676538758691736, |
|
"grad_norm": 7.400939684061883, |
|
"learning_rate": 9.683224164278264e-05, |
|
"loss": 0.826, |
|
"num_input_tokens_seen": 7193032, |
|
"step": 615 |
|
}, |
|
{ |
|
"epoch": 0.3193407159412825, |
|
"grad_norm": 5.898525799199162, |
|
"learning_rate": 9.675720071412365e-05, |
|
"loss": 0.8187, |
|
"num_input_tokens_seen": 7251568, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.3219160442956477, |
|
"grad_norm": 11.267105316774332, |
|
"learning_rate": 9.66813111106296e-05, |
|
"loss": 0.8524, |
|
"num_input_tokens_seen": 7310072, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.3244913726500129, |
|
"grad_norm": 6.703970582399643, |
|
"learning_rate": 9.660457420974819e-05, |
|
"loss": 0.7966, |
|
"num_input_tokens_seen": 7368560, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.32706670100437807, |
|
"grad_norm": 6.945445265294353, |
|
"learning_rate": 9.652699140430608e-05, |
|
"loss": 0.799, |
|
"num_input_tokens_seen": 7427040, |
|
"step": 635 |
|
}, |
|
{ |
|
"epoch": 0.32964202935874326, |
|
"grad_norm": 7.0684293091171595, |
|
"learning_rate": 9.644856410248369e-05, |
|
"loss": 0.8477, |
|
"num_input_tokens_seen": 7485552, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.3322173577131084, |
|
"grad_norm": 7.165086711244158, |
|
"learning_rate": 9.636929372778963e-05, |
|
"loss": 0.7867, |
|
"num_input_tokens_seen": 7544040, |
|
"step": 645 |
|
}, |
|
{ |
|
"epoch": 0.3347926860674736, |
|
"grad_norm": 9.185933515393563, |
|
"learning_rate": 9.628918171903485e-05, |
|
"loss": 0.8367, |
|
"num_input_tokens_seen": 7602512, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.3347926860674736, |
|
"eval_loss": 0.7940558791160583, |
|
"eval_runtime": 19.7641, |
|
"eval_samples_per_second": 3.036, |
|
"eval_steps_per_second": 0.759, |
|
"num_input_tokens_seen": 7602512, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.3373680144218388, |
|
"grad_norm": 6.586425160827751, |
|
"learning_rate": 9.620822953030652e-05, |
|
"loss": 0.8131, |
|
"num_input_tokens_seen": 7660968, |
|
"step": 655 |
|
}, |
|
{ |
|
"epoch": 0.33994334277620397, |
|
"grad_norm": 6.92970378602844, |
|
"learning_rate": 9.612643863094163e-05, |
|
"loss": 0.8348, |
|
"num_input_tokens_seen": 7719448, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.34251867113056916, |
|
"grad_norm": 8.35228285894448, |
|
"learning_rate": 9.604381050550038e-05, |
|
"loss": 0.8289, |
|
"num_input_tokens_seen": 7777928, |
|
"step": 665 |
|
}, |
|
{ |
|
"epoch": 0.34509399948493436, |
|
"grad_norm": 12.894782157020227, |
|
"learning_rate": 9.596034665373916e-05, |
|
"loss": 0.7758, |
|
"num_input_tokens_seen": 7836424, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.3476693278392995, |
|
"grad_norm": 13.409694970235305, |
|
"learning_rate": 9.587604859058334e-05, |
|
"loss": 0.8189, |
|
"num_input_tokens_seen": 7894904, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.3502446561936647, |
|
"grad_norm": 8.783205826578632, |
|
"learning_rate": 9.579091784609984e-05, |
|
"loss": 0.8221, |
|
"num_input_tokens_seen": 7953432, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.3528199845480299, |
|
"grad_norm": 8.368380903445857, |
|
"learning_rate": 9.570495596546926e-05, |
|
"loss": 0.8378, |
|
"num_input_tokens_seen": 8011888, |
|
"step": 685 |
|
}, |
|
{ |
|
"epoch": 0.35539531290239507, |
|
"grad_norm": 6.7086179135551145, |
|
"learning_rate": 9.561816450895793e-05, |
|
"loss": 0.7529, |
|
"num_input_tokens_seen": 8070344, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.35797064125676026, |
|
"grad_norm": 8.476897088436242, |
|
"learning_rate": 9.55305450518895e-05, |
|
"loss": 0.7311, |
|
"num_input_tokens_seen": 8128816, |
|
"step": 695 |
|
}, |
|
{ |
|
"epoch": 0.3605459696111254, |
|
"grad_norm": 10.427785019598666, |
|
"learning_rate": 9.544209918461642e-05, |
|
"loss": 0.774, |
|
"num_input_tokens_seen": 8187320, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.3605459696111254, |
|
"eval_loss": 0.7318872809410095, |
|
"eval_runtime": 19.6917, |
|
"eval_samples_per_second": 3.047, |
|
"eval_steps_per_second": 0.762, |
|
"num_input_tokens_seen": 8187320, |
|
"step": 700 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 3882, |
|
"num_input_tokens_seen": 8187320, |
|
"num_train_epochs": 2, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 540175003287552.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|