|
{ |
|
"best_metric": 0.8934853076934814, |
|
"best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-150", |
|
"epoch": 0.12876641771825909, |
|
"eval_steps": 50, |
|
"global_step": 250, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0025753283543651817, |
|
"grad_norm": 21.379672872549193, |
|
"learning_rate": 2.564102564102564e-06, |
|
"loss": 3.0388, |
|
"num_input_tokens_seen": 58496, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0051506567087303634, |
|
"grad_norm": 20.76117223991023, |
|
"learning_rate": 5.128205128205128e-06, |
|
"loss": 2.9831, |
|
"num_input_tokens_seen": 116960, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.007725985063095545, |
|
"grad_norm": 22.5213517141881, |
|
"learning_rate": 7.692307692307694e-06, |
|
"loss": 2.8696, |
|
"num_input_tokens_seen": 175448, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.010301313417460727, |
|
"grad_norm": 20.673071198727328, |
|
"learning_rate": 1.0256410256410256e-05, |
|
"loss": 2.6316, |
|
"num_input_tokens_seen": 233944, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.012876641771825908, |
|
"grad_norm": 18.902291974538457, |
|
"learning_rate": 1.282051282051282e-05, |
|
"loss": 1.9707, |
|
"num_input_tokens_seen": 292416, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.01545197012619109, |
|
"grad_norm": 8.05718270484028, |
|
"learning_rate": 1.5384615384615387e-05, |
|
"loss": 1.3782, |
|
"num_input_tokens_seen": 350904, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.018027298480556272, |
|
"grad_norm": 3.6465275188422344, |
|
"learning_rate": 1.794871794871795e-05, |
|
"loss": 1.0628, |
|
"num_input_tokens_seen": 409384, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.020602626834921454, |
|
"grad_norm": 4.842154180410959, |
|
"learning_rate": 2.0512820512820512e-05, |
|
"loss": 0.9789, |
|
"num_input_tokens_seen": 467864, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.023177955189286635, |
|
"grad_norm": 2.6799567517341396, |
|
"learning_rate": 2.307692307692308e-05, |
|
"loss": 0.9327, |
|
"num_input_tokens_seen": 526384, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.025753283543651816, |
|
"grad_norm": 2.629272923472648, |
|
"learning_rate": 2.564102564102564e-05, |
|
"loss": 0.9233, |
|
"num_input_tokens_seen": 584856, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.025753283543651816, |
|
"eval_loss": 0.9281821846961975, |
|
"eval_runtime": 48.2484, |
|
"eval_samples_per_second": 1.244, |
|
"eval_steps_per_second": 0.311, |
|
"num_input_tokens_seen": 584856, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.028328611898016998, |
|
"grad_norm": 1.2858813899048422, |
|
"learning_rate": 2.8205128205128207e-05, |
|
"loss": 0.897, |
|
"num_input_tokens_seen": 643344, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.03090394025238218, |
|
"grad_norm": 1.177678811476692, |
|
"learning_rate": 3.0769230769230774e-05, |
|
"loss": 0.9169, |
|
"num_input_tokens_seen": 701808, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.03347926860674736, |
|
"grad_norm": 1.2077065633120996, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.9019, |
|
"num_input_tokens_seen": 760304, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.036054596961112545, |
|
"grad_norm": 1.1560644429967823, |
|
"learning_rate": 3.58974358974359e-05, |
|
"loss": 0.8996, |
|
"num_input_tokens_seen": 818760, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.03862992531547772, |
|
"grad_norm": 0.732907212566054, |
|
"learning_rate": 3.846153846153846e-05, |
|
"loss": 0.9073, |
|
"num_input_tokens_seen": 877256, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.04120525366984291, |
|
"grad_norm": 0.9616993870089134, |
|
"learning_rate": 4.1025641025641023e-05, |
|
"loss": 0.9081, |
|
"num_input_tokens_seen": 935752, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.043780582024208085, |
|
"grad_norm": 0.8384067209941525, |
|
"learning_rate": 4.358974358974359e-05, |
|
"loss": 0.906, |
|
"num_input_tokens_seen": 994216, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.04635591037857327, |
|
"grad_norm": 0.4045876972188175, |
|
"learning_rate": 4.615384615384616e-05, |
|
"loss": 0.8952, |
|
"num_input_tokens_seen": 1052704, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.04893123873293845, |
|
"grad_norm": 0.6062678622593307, |
|
"learning_rate": 4.871794871794872e-05, |
|
"loss": 0.8996, |
|
"num_input_tokens_seen": 1111176, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.05150656708730363, |
|
"grad_norm": 0.5316642041721752, |
|
"learning_rate": 5.128205128205128e-05, |
|
"loss": 0.9024, |
|
"num_input_tokens_seen": 1169664, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.05150656708730363, |
|
"eval_loss": 0.911374032497406, |
|
"eval_runtime": 19.566, |
|
"eval_samples_per_second": 3.067, |
|
"eval_steps_per_second": 0.767, |
|
"num_input_tokens_seen": 1169664, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.05408189544166881, |
|
"grad_norm": 0.47189419512253006, |
|
"learning_rate": 5.384615384615385e-05, |
|
"loss": 0.9142, |
|
"num_input_tokens_seen": 1228112, |
|
"step": 105 |
|
}, |
|
{ |
|
"epoch": 0.056657223796033995, |
|
"grad_norm": 0.4885000351277984, |
|
"learning_rate": 5.6410256410256414e-05, |
|
"loss": 0.9054, |
|
"num_input_tokens_seen": 1286608, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.05923255215039917, |
|
"grad_norm": 1.0232694160031948, |
|
"learning_rate": 5.897435897435898e-05, |
|
"loss": 0.8997, |
|
"num_input_tokens_seen": 1345072, |
|
"step": 115 |
|
}, |
|
{ |
|
"epoch": 0.06180788050476436, |
|
"grad_norm": 0.6656697152989639, |
|
"learning_rate": 6.153846153846155e-05, |
|
"loss": 0.8988, |
|
"num_input_tokens_seen": 1403544, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.06438320885912954, |
|
"grad_norm": 0.6273175951192728, |
|
"learning_rate": 6.410256410256412e-05, |
|
"loss": 0.9087, |
|
"num_input_tokens_seen": 1462024, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.06695853721349472, |
|
"grad_norm": 0.707089894516894, |
|
"learning_rate": 6.666666666666667e-05, |
|
"loss": 0.8961, |
|
"num_input_tokens_seen": 1520528, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.0695338655678599, |
|
"grad_norm": 0.4633668497982238, |
|
"learning_rate": 6.923076923076924e-05, |
|
"loss": 0.903, |
|
"num_input_tokens_seen": 1579024, |
|
"step": 135 |
|
}, |
|
{ |
|
"epoch": 0.07210919392222509, |
|
"grad_norm": 0.5052802522069755, |
|
"learning_rate": 7.17948717948718e-05, |
|
"loss": 0.899, |
|
"num_input_tokens_seen": 1637504, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.07468452227659027, |
|
"grad_norm": 0.7577940010204668, |
|
"learning_rate": 7.435897435897436e-05, |
|
"loss": 0.9071, |
|
"num_input_tokens_seen": 1696024, |
|
"step": 145 |
|
}, |
|
{ |
|
"epoch": 0.07725985063095545, |
|
"grad_norm": 0.5812587904219971, |
|
"learning_rate": 7.692307692307693e-05, |
|
"loss": 0.9045, |
|
"num_input_tokens_seen": 1754512, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.07725985063095545, |
|
"eval_loss": 0.8934853076934814, |
|
"eval_runtime": 19.8765, |
|
"eval_samples_per_second": 3.019, |
|
"eval_steps_per_second": 0.755, |
|
"num_input_tokens_seen": 1754512, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.07983517898532062, |
|
"grad_norm": 0.5167982536583405, |
|
"learning_rate": 7.948717948717948e-05, |
|
"loss": 0.8992, |
|
"num_input_tokens_seen": 1812976, |
|
"step": 155 |
|
}, |
|
{ |
|
"epoch": 0.08241050733968582, |
|
"grad_norm": 0.4971816797735092, |
|
"learning_rate": 8.205128205128205e-05, |
|
"loss": 0.8965, |
|
"num_input_tokens_seen": 1871464, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.08498583569405099, |
|
"grad_norm": 0.6561749633642688, |
|
"learning_rate": 8.461538461538461e-05, |
|
"loss": 0.9094, |
|
"num_input_tokens_seen": 1929928, |
|
"step": 165 |
|
}, |
|
{ |
|
"epoch": 0.08756116404841617, |
|
"grad_norm": 0.5010857314708574, |
|
"learning_rate": 8.717948717948718e-05, |
|
"loss": 0.903, |
|
"num_input_tokens_seen": 1988432, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.09013649240278135, |
|
"grad_norm": 0.48794512034251364, |
|
"learning_rate": 8.974358974358975e-05, |
|
"loss": 0.902, |
|
"num_input_tokens_seen": 2046920, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.09271182075714654, |
|
"grad_norm": 0.4040014684262414, |
|
"learning_rate": 9.230769230769232e-05, |
|
"loss": 0.9006, |
|
"num_input_tokens_seen": 2105392, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.09528714911151172, |
|
"grad_norm": 0.5312840597942438, |
|
"learning_rate": 9.487179487179487e-05, |
|
"loss": 0.9042, |
|
"num_input_tokens_seen": 2163872, |
|
"step": 185 |
|
}, |
|
{ |
|
"epoch": 0.0978624774658769, |
|
"grad_norm": 0.3535119366494406, |
|
"learning_rate": 9.743589743589744e-05, |
|
"loss": 0.9096, |
|
"num_input_tokens_seen": 2222352, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.10043780582024209, |
|
"grad_norm": 0.30590378285024006, |
|
"learning_rate": 0.0001, |
|
"loss": 0.9037, |
|
"num_input_tokens_seen": 2280800, |
|
"step": 195 |
|
}, |
|
{ |
|
"epoch": 0.10301313417460727, |
|
"grad_norm": 0.3055264226667786, |
|
"learning_rate": 9.999954623308172e-05, |
|
"loss": 0.904, |
|
"num_input_tokens_seen": 2339304, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.10301313417460727, |
|
"eval_loss": 0.8980139493942261, |
|
"eval_runtime": 19.316, |
|
"eval_samples_per_second": 3.106, |
|
"eval_steps_per_second": 0.777, |
|
"num_input_tokens_seen": 2339304, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.10558846252897244, |
|
"grad_norm": 0.8828178200664915, |
|
"learning_rate": 9.999818494056303e-05, |
|
"loss": 0.9029, |
|
"num_input_tokens_seen": 2397808, |
|
"step": 205 |
|
}, |
|
{ |
|
"epoch": 0.10816379088333762, |
|
"grad_norm": 0.4308314655260644, |
|
"learning_rate": 9.99959161471523e-05, |
|
"loss": 0.9005, |
|
"num_input_tokens_seen": 2456288, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.11073911923770281, |
|
"grad_norm": 0.4482188659643584, |
|
"learning_rate": 9.99927398940297e-05, |
|
"loss": 0.9096, |
|
"num_input_tokens_seen": 2514784, |
|
"step": 215 |
|
}, |
|
{ |
|
"epoch": 0.11331444759206799, |
|
"grad_norm": 0.49014741417238206, |
|
"learning_rate": 9.998865623884635e-05, |
|
"loss": 0.9036, |
|
"num_input_tokens_seen": 2573240, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.11588977594643317, |
|
"grad_norm": 0.2774850522391394, |
|
"learning_rate": 9.998366525572336e-05, |
|
"loss": 0.901, |
|
"num_input_tokens_seen": 2631672, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.11846510430079835, |
|
"grad_norm": 0.49390873315018263, |
|
"learning_rate": 9.997776703525046e-05, |
|
"loss": 0.9018, |
|
"num_input_tokens_seen": 2690112, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.12104043265516354, |
|
"grad_norm": 0.3284306399258997, |
|
"learning_rate": 9.997096168448432e-05, |
|
"loss": 0.8934, |
|
"num_input_tokens_seen": 2748608, |
|
"step": 235 |
|
}, |
|
{ |
|
"epoch": 0.12361576100952872, |
|
"grad_norm": 0.7182680023403506, |
|
"learning_rate": 9.996324932694668e-05, |
|
"loss": 0.8876, |
|
"num_input_tokens_seen": 2807080, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.1261910893638939, |
|
"grad_norm": 0.7305499346526235, |
|
"learning_rate": 9.995463010262206e-05, |
|
"loss": 0.9084, |
|
"num_input_tokens_seen": 2865520, |
|
"step": 245 |
|
}, |
|
{ |
|
"epoch": 0.12876641771825909, |
|
"grad_norm": 0.5773211522908436, |
|
"learning_rate": 9.994510416795519e-05, |
|
"loss": 0.9106, |
|
"num_input_tokens_seen": 2924016, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.12876641771825909, |
|
"eval_loss": 0.8958488702774048, |
|
"eval_runtime": 19.507, |
|
"eval_samples_per_second": 3.076, |
|
"eval_steps_per_second": 0.769, |
|
"num_input_tokens_seen": 2924016, |
|
"step": 250 |
|
} |
|
], |
|
"logging_steps": 5, |
|
"max_steps": 3882, |
|
"num_input_tokens_seen": 2924016, |
|
"num_train_epochs": 2, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 192878045298688.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|