ben81828's picture
Training in progress, step 400, checkpoint
c07e54b verified
raw
history blame
19.9 kB
{
"best_metric": 0.8932263255119324,
"best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4/lora/sft/checkpoint-300",
"epoch": 0.20602626834921453,
"eval_steps": 50,
"global_step": 400,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0025753283543651817,
"grad_norm": 21.379672872549193,
"learning_rate": 2.564102564102564e-06,
"loss": 3.0388,
"num_input_tokens_seen": 58496,
"step": 5
},
{
"epoch": 0.0051506567087303634,
"grad_norm": 20.76117223991023,
"learning_rate": 5.128205128205128e-06,
"loss": 2.9831,
"num_input_tokens_seen": 116960,
"step": 10
},
{
"epoch": 0.007725985063095545,
"grad_norm": 22.5213517141881,
"learning_rate": 7.692307692307694e-06,
"loss": 2.8696,
"num_input_tokens_seen": 175448,
"step": 15
},
{
"epoch": 0.010301313417460727,
"grad_norm": 20.673071198727328,
"learning_rate": 1.0256410256410256e-05,
"loss": 2.6316,
"num_input_tokens_seen": 233944,
"step": 20
},
{
"epoch": 0.012876641771825908,
"grad_norm": 18.902291974538457,
"learning_rate": 1.282051282051282e-05,
"loss": 1.9707,
"num_input_tokens_seen": 292416,
"step": 25
},
{
"epoch": 0.01545197012619109,
"grad_norm": 8.05718270484028,
"learning_rate": 1.5384615384615387e-05,
"loss": 1.3782,
"num_input_tokens_seen": 350904,
"step": 30
},
{
"epoch": 0.018027298480556272,
"grad_norm": 3.6465275188422344,
"learning_rate": 1.794871794871795e-05,
"loss": 1.0628,
"num_input_tokens_seen": 409384,
"step": 35
},
{
"epoch": 0.020602626834921454,
"grad_norm": 4.842154180410959,
"learning_rate": 2.0512820512820512e-05,
"loss": 0.9789,
"num_input_tokens_seen": 467864,
"step": 40
},
{
"epoch": 0.023177955189286635,
"grad_norm": 2.6799567517341396,
"learning_rate": 2.307692307692308e-05,
"loss": 0.9327,
"num_input_tokens_seen": 526384,
"step": 45
},
{
"epoch": 0.025753283543651816,
"grad_norm": 2.629272923472648,
"learning_rate": 2.564102564102564e-05,
"loss": 0.9233,
"num_input_tokens_seen": 584856,
"step": 50
},
{
"epoch": 0.025753283543651816,
"eval_loss": 0.9281821846961975,
"eval_runtime": 48.2484,
"eval_samples_per_second": 1.244,
"eval_steps_per_second": 0.311,
"num_input_tokens_seen": 584856,
"step": 50
},
{
"epoch": 0.028328611898016998,
"grad_norm": 1.2858813899048422,
"learning_rate": 2.8205128205128207e-05,
"loss": 0.897,
"num_input_tokens_seen": 643344,
"step": 55
},
{
"epoch": 0.03090394025238218,
"grad_norm": 1.177678811476692,
"learning_rate": 3.0769230769230774e-05,
"loss": 0.9169,
"num_input_tokens_seen": 701808,
"step": 60
},
{
"epoch": 0.03347926860674736,
"grad_norm": 1.2077065633120996,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.9019,
"num_input_tokens_seen": 760304,
"step": 65
},
{
"epoch": 0.036054596961112545,
"grad_norm": 1.1560644429967823,
"learning_rate": 3.58974358974359e-05,
"loss": 0.8996,
"num_input_tokens_seen": 818760,
"step": 70
},
{
"epoch": 0.03862992531547772,
"grad_norm": 0.732907212566054,
"learning_rate": 3.846153846153846e-05,
"loss": 0.9073,
"num_input_tokens_seen": 877256,
"step": 75
},
{
"epoch": 0.04120525366984291,
"grad_norm": 0.9616993870089134,
"learning_rate": 4.1025641025641023e-05,
"loss": 0.9081,
"num_input_tokens_seen": 935752,
"step": 80
},
{
"epoch": 0.043780582024208085,
"grad_norm": 0.8384067209941525,
"learning_rate": 4.358974358974359e-05,
"loss": 0.906,
"num_input_tokens_seen": 994216,
"step": 85
},
{
"epoch": 0.04635591037857327,
"grad_norm": 0.4045876972188175,
"learning_rate": 4.615384615384616e-05,
"loss": 0.8952,
"num_input_tokens_seen": 1052704,
"step": 90
},
{
"epoch": 0.04893123873293845,
"grad_norm": 0.6062678622593307,
"learning_rate": 4.871794871794872e-05,
"loss": 0.8996,
"num_input_tokens_seen": 1111176,
"step": 95
},
{
"epoch": 0.05150656708730363,
"grad_norm": 0.5316642041721752,
"learning_rate": 5.128205128205128e-05,
"loss": 0.9024,
"num_input_tokens_seen": 1169664,
"step": 100
},
{
"epoch": 0.05150656708730363,
"eval_loss": 0.911374032497406,
"eval_runtime": 19.566,
"eval_samples_per_second": 3.067,
"eval_steps_per_second": 0.767,
"num_input_tokens_seen": 1169664,
"step": 100
},
{
"epoch": 0.05408189544166881,
"grad_norm": 0.47189419512253006,
"learning_rate": 5.384615384615385e-05,
"loss": 0.9142,
"num_input_tokens_seen": 1228112,
"step": 105
},
{
"epoch": 0.056657223796033995,
"grad_norm": 0.4885000351277984,
"learning_rate": 5.6410256410256414e-05,
"loss": 0.9054,
"num_input_tokens_seen": 1286608,
"step": 110
},
{
"epoch": 0.05923255215039917,
"grad_norm": 1.0232694160031948,
"learning_rate": 5.897435897435898e-05,
"loss": 0.8997,
"num_input_tokens_seen": 1345072,
"step": 115
},
{
"epoch": 0.06180788050476436,
"grad_norm": 0.6656697152989639,
"learning_rate": 6.153846153846155e-05,
"loss": 0.8988,
"num_input_tokens_seen": 1403544,
"step": 120
},
{
"epoch": 0.06438320885912954,
"grad_norm": 0.6273175951192728,
"learning_rate": 6.410256410256412e-05,
"loss": 0.9087,
"num_input_tokens_seen": 1462024,
"step": 125
},
{
"epoch": 0.06695853721349472,
"grad_norm": 0.707089894516894,
"learning_rate": 6.666666666666667e-05,
"loss": 0.8961,
"num_input_tokens_seen": 1520528,
"step": 130
},
{
"epoch": 0.0695338655678599,
"grad_norm": 0.4633668497982238,
"learning_rate": 6.923076923076924e-05,
"loss": 0.903,
"num_input_tokens_seen": 1579024,
"step": 135
},
{
"epoch": 0.07210919392222509,
"grad_norm": 0.5052802522069755,
"learning_rate": 7.17948717948718e-05,
"loss": 0.899,
"num_input_tokens_seen": 1637504,
"step": 140
},
{
"epoch": 0.07468452227659027,
"grad_norm": 0.7577940010204668,
"learning_rate": 7.435897435897436e-05,
"loss": 0.9071,
"num_input_tokens_seen": 1696024,
"step": 145
},
{
"epoch": 0.07725985063095545,
"grad_norm": 0.5812587904219971,
"learning_rate": 7.692307692307693e-05,
"loss": 0.9045,
"num_input_tokens_seen": 1754512,
"step": 150
},
{
"epoch": 0.07725985063095545,
"eval_loss": 0.8934853076934814,
"eval_runtime": 19.8765,
"eval_samples_per_second": 3.019,
"eval_steps_per_second": 0.755,
"num_input_tokens_seen": 1754512,
"step": 150
},
{
"epoch": 0.07983517898532062,
"grad_norm": 0.5167982536583405,
"learning_rate": 7.948717948717948e-05,
"loss": 0.8992,
"num_input_tokens_seen": 1812976,
"step": 155
},
{
"epoch": 0.08241050733968582,
"grad_norm": 0.4971816797735092,
"learning_rate": 8.205128205128205e-05,
"loss": 0.8965,
"num_input_tokens_seen": 1871464,
"step": 160
},
{
"epoch": 0.08498583569405099,
"grad_norm": 0.6561749633642688,
"learning_rate": 8.461538461538461e-05,
"loss": 0.9094,
"num_input_tokens_seen": 1929928,
"step": 165
},
{
"epoch": 0.08756116404841617,
"grad_norm": 0.5010857314708574,
"learning_rate": 8.717948717948718e-05,
"loss": 0.903,
"num_input_tokens_seen": 1988432,
"step": 170
},
{
"epoch": 0.09013649240278135,
"grad_norm": 0.48794512034251364,
"learning_rate": 8.974358974358975e-05,
"loss": 0.902,
"num_input_tokens_seen": 2046920,
"step": 175
},
{
"epoch": 0.09271182075714654,
"grad_norm": 0.4040014684262414,
"learning_rate": 9.230769230769232e-05,
"loss": 0.9006,
"num_input_tokens_seen": 2105392,
"step": 180
},
{
"epoch": 0.09528714911151172,
"grad_norm": 0.5312840597942438,
"learning_rate": 9.487179487179487e-05,
"loss": 0.9042,
"num_input_tokens_seen": 2163872,
"step": 185
},
{
"epoch": 0.0978624774658769,
"grad_norm": 0.3535119366494406,
"learning_rate": 9.743589743589744e-05,
"loss": 0.9096,
"num_input_tokens_seen": 2222352,
"step": 190
},
{
"epoch": 0.10043780582024209,
"grad_norm": 0.30590378285024006,
"learning_rate": 0.0001,
"loss": 0.9037,
"num_input_tokens_seen": 2280800,
"step": 195
},
{
"epoch": 0.10301313417460727,
"grad_norm": 0.3055264226667786,
"learning_rate": 9.999954623308172e-05,
"loss": 0.904,
"num_input_tokens_seen": 2339304,
"step": 200
},
{
"epoch": 0.10301313417460727,
"eval_loss": 0.8980139493942261,
"eval_runtime": 19.316,
"eval_samples_per_second": 3.106,
"eval_steps_per_second": 0.777,
"num_input_tokens_seen": 2339304,
"step": 200
},
{
"epoch": 0.10558846252897244,
"grad_norm": 0.8828178200664915,
"learning_rate": 9.999818494056303e-05,
"loss": 0.9029,
"num_input_tokens_seen": 2397808,
"step": 205
},
{
"epoch": 0.10816379088333762,
"grad_norm": 0.4308314655260644,
"learning_rate": 9.99959161471523e-05,
"loss": 0.9005,
"num_input_tokens_seen": 2456288,
"step": 210
},
{
"epoch": 0.11073911923770281,
"grad_norm": 0.4482188659643584,
"learning_rate": 9.99927398940297e-05,
"loss": 0.9096,
"num_input_tokens_seen": 2514784,
"step": 215
},
{
"epoch": 0.11331444759206799,
"grad_norm": 0.49014741417238206,
"learning_rate": 9.998865623884635e-05,
"loss": 0.9036,
"num_input_tokens_seen": 2573240,
"step": 220
},
{
"epoch": 0.11588977594643317,
"grad_norm": 0.2774850522391394,
"learning_rate": 9.998366525572336e-05,
"loss": 0.901,
"num_input_tokens_seen": 2631672,
"step": 225
},
{
"epoch": 0.11846510430079835,
"grad_norm": 0.49390873315018263,
"learning_rate": 9.997776703525046e-05,
"loss": 0.9018,
"num_input_tokens_seen": 2690112,
"step": 230
},
{
"epoch": 0.12104043265516354,
"grad_norm": 0.3284306399258997,
"learning_rate": 9.997096168448432e-05,
"loss": 0.8934,
"num_input_tokens_seen": 2748608,
"step": 235
},
{
"epoch": 0.12361576100952872,
"grad_norm": 0.7182680023403506,
"learning_rate": 9.996324932694668e-05,
"loss": 0.8876,
"num_input_tokens_seen": 2807080,
"step": 240
},
{
"epoch": 0.1261910893638939,
"grad_norm": 0.7305499346526235,
"learning_rate": 9.995463010262206e-05,
"loss": 0.9084,
"num_input_tokens_seen": 2865520,
"step": 245
},
{
"epoch": 0.12876641771825909,
"grad_norm": 0.5773211522908436,
"learning_rate": 9.994510416795519e-05,
"loss": 0.9106,
"num_input_tokens_seen": 2924016,
"step": 250
},
{
"epoch": 0.12876641771825909,
"eval_loss": 0.8958488702774048,
"eval_runtime": 19.507,
"eval_samples_per_second": 3.076,
"eval_steps_per_second": 0.769,
"num_input_tokens_seen": 2924016,
"step": 250
},
{
"epoch": 0.13134174607262425,
"grad_norm": 0.44306061088962184,
"learning_rate": 9.993467169584824e-05,
"loss": 0.9012,
"num_input_tokens_seen": 2982520,
"step": 255
},
{
"epoch": 0.13391707442698944,
"grad_norm": 0.7851687259125024,
"learning_rate": 9.992333287565765e-05,
"loss": 0.9069,
"num_input_tokens_seen": 3041008,
"step": 260
},
{
"epoch": 0.13649240278135463,
"grad_norm": 0.5705235716557865,
"learning_rate": 9.991108791319066e-05,
"loss": 0.8918,
"num_input_tokens_seen": 3099464,
"step": 265
},
{
"epoch": 0.1390677311357198,
"grad_norm": 0.6202972137914602,
"learning_rate": 9.989793703070163e-05,
"loss": 0.8996,
"num_input_tokens_seen": 3157944,
"step": 270
},
{
"epoch": 0.141643059490085,
"grad_norm": 0.7583768377175583,
"learning_rate": 9.988388046688799e-05,
"loss": 0.9009,
"num_input_tokens_seen": 3216448,
"step": 275
},
{
"epoch": 0.14421838784445018,
"grad_norm": 0.7180540444266581,
"learning_rate": 9.986891847688587e-05,
"loss": 0.9059,
"num_input_tokens_seen": 3274928,
"step": 280
},
{
"epoch": 0.14679371619881534,
"grad_norm": 0.4173225854654158,
"learning_rate": 9.985305133226553e-05,
"loss": 0.8939,
"num_input_tokens_seen": 3333408,
"step": 285
},
{
"epoch": 0.14936904455318054,
"grad_norm": 0.7825855108807762,
"learning_rate": 9.983627932102638e-05,
"loss": 0.8899,
"num_input_tokens_seen": 3391896,
"step": 290
},
{
"epoch": 0.1519443729075457,
"grad_norm": 0.4850249272160501,
"learning_rate": 9.981860274759173e-05,
"loss": 0.9092,
"num_input_tokens_seen": 3450392,
"step": 295
},
{
"epoch": 0.1545197012619109,
"grad_norm": 0.3325682106309916,
"learning_rate": 9.980002193280342e-05,
"loss": 0.8901,
"num_input_tokens_seen": 3508888,
"step": 300
},
{
"epoch": 0.1545197012619109,
"eval_loss": 0.8932263255119324,
"eval_runtime": 19.7633,
"eval_samples_per_second": 3.036,
"eval_steps_per_second": 0.759,
"num_input_tokens_seen": 3508888,
"step": 300
},
{
"epoch": 0.15709502961627608,
"grad_norm": 0.36562722920113416,
"learning_rate": 9.978053721391578e-05,
"loss": 0.9042,
"num_input_tokens_seen": 3567368,
"step": 305
},
{
"epoch": 0.15967035797064125,
"grad_norm": 0.3765491511973325,
"learning_rate": 9.976014894458963e-05,
"loss": 0.9007,
"num_input_tokens_seen": 3625848,
"step": 310
},
{
"epoch": 0.16224568632500644,
"grad_norm": 0.5264420727347517,
"learning_rate": 9.973885749488589e-05,
"loss": 0.9036,
"num_input_tokens_seen": 3684336,
"step": 315
},
{
"epoch": 0.16482101467937163,
"grad_norm": 0.24680747784235688,
"learning_rate": 9.971666325125874e-05,
"loss": 0.8936,
"num_input_tokens_seen": 3742800,
"step": 320
},
{
"epoch": 0.1673963430337368,
"grad_norm": 0.4982571051665039,
"learning_rate": 9.969356661654876e-05,
"loss": 0.8989,
"num_input_tokens_seen": 3801280,
"step": 325
},
{
"epoch": 0.16997167138810199,
"grad_norm": 0.49943012602572584,
"learning_rate": 9.966956800997546e-05,
"loss": 0.8983,
"num_input_tokens_seen": 3859792,
"step": 330
},
{
"epoch": 0.17254699974246718,
"grad_norm": 0.37381050353079964,
"learning_rate": 9.964466786712984e-05,
"loss": 0.9038,
"num_input_tokens_seen": 3918272,
"step": 335
},
{
"epoch": 0.17512232809683234,
"grad_norm": 0.7501484170811903,
"learning_rate": 9.961886663996629e-05,
"loss": 0.8947,
"num_input_tokens_seen": 3976760,
"step": 340
},
{
"epoch": 0.17769765645119753,
"grad_norm": 0.5623847203835772,
"learning_rate": 9.959216479679458e-05,
"loss": 0.9179,
"num_input_tokens_seen": 4035240,
"step": 345
},
{
"epoch": 0.1802729848055627,
"grad_norm": 0.34381878607605765,
"learning_rate": 9.956456282227122e-05,
"loss": 0.9059,
"num_input_tokens_seen": 4093688,
"step": 350
},
{
"epoch": 0.1802729848055627,
"eval_loss": 0.8960411548614502,
"eval_runtime": 20.0734,
"eval_samples_per_second": 2.989,
"eval_steps_per_second": 0.747,
"num_input_tokens_seen": 4093688,
"step": 350
},
{
"epoch": 0.1828483131599279,
"grad_norm": 0.5786050277605497,
"learning_rate": 9.953606121739074e-05,
"loss": 0.8795,
"num_input_tokens_seen": 4152160,
"step": 355
},
{
"epoch": 0.18542364151429308,
"grad_norm": 1.3309062985901938,
"learning_rate": 9.950666049947653e-05,
"loss": 0.9143,
"num_input_tokens_seen": 4210648,
"step": 360
},
{
"epoch": 0.18799896986865824,
"grad_norm": 0.5010297124723248,
"learning_rate": 9.947636120217155e-05,
"loss": 0.9164,
"num_input_tokens_seen": 4269136,
"step": 365
},
{
"epoch": 0.19057429822302344,
"grad_norm": 0.7250841632803818,
"learning_rate": 9.944516387542852e-05,
"loss": 0.9061,
"num_input_tokens_seen": 4327664,
"step": 370
},
{
"epoch": 0.19314962657738863,
"grad_norm": 0.4506280653909736,
"learning_rate": 9.941306908550005e-05,
"loss": 0.8873,
"num_input_tokens_seen": 4386120,
"step": 375
},
{
"epoch": 0.1957249549317538,
"grad_norm": 0.6467175538087946,
"learning_rate": 9.938007741492828e-05,
"loss": 0.9038,
"num_input_tokens_seen": 4444560,
"step": 380
},
{
"epoch": 0.19830028328611898,
"grad_norm": 0.6742654767461002,
"learning_rate": 9.934618946253437e-05,
"loss": 0.9116,
"num_input_tokens_seen": 4503016,
"step": 385
},
{
"epoch": 0.20087561164048418,
"grad_norm": 0.418516338281364,
"learning_rate": 9.931140584340761e-05,
"loss": 0.9023,
"num_input_tokens_seen": 4561496,
"step": 390
},
{
"epoch": 0.20345093999484934,
"grad_norm": 0.5738919793445436,
"learning_rate": 9.92757271888942e-05,
"loss": 0.8901,
"num_input_tokens_seen": 4619944,
"step": 395
},
{
"epoch": 0.20602626834921453,
"grad_norm": 0.3600152815973316,
"learning_rate": 9.923915414658587e-05,
"loss": 0.9033,
"num_input_tokens_seen": 4678384,
"step": 400
},
{
"epoch": 0.20602626834921453,
"eval_loss": 0.906301736831665,
"eval_runtime": 19.8079,
"eval_samples_per_second": 3.029,
"eval_steps_per_second": 0.757,
"num_input_tokens_seen": 4678384,
"step": 400
}
],
"logging_steps": 5,
"max_steps": 3882,
"num_input_tokens_seen": 4678384,
"num_train_epochs": 2,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 308638862540800.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}