|
{ |
|
"best_metric": 0.8587530820711519, |
|
"best_model_checkpoint": "relevant_model/run-1/checkpoint-35610", |
|
"epoch": 2.0, |
|
"eval_steps": 500, |
|
"global_step": 35610, |
|
"is_hyper_param_search": true, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 4.101665655728474e-06, |
|
"loss": 0.5111, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 4.078498947704027e-06, |
|
"loss": 0.4216, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 4.0553322396795785e-06, |
|
"loss": 0.4711, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 4.0321655316551306e-06, |
|
"loss": 0.4472, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 4.0089988236306835e-06, |
|
"loss": 0.4429, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 3.985832115606236e-06, |
|
"loss": 0.5083, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 3.962665407581788e-06, |
|
"loss": 0.469, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 3.93949869955734e-06, |
|
"loss": 0.4424, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 3.916331991532892e-06, |
|
"loss": 0.472, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"learning_rate": 3.893165283508445e-06, |
|
"loss": 0.4344, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 3.869998575483997e-06, |
|
"loss": 0.4255, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.34, |
|
"learning_rate": 3.846831867459549e-06, |
|
"loss": 0.4293, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"learning_rate": 3.823665159435101e-06, |
|
"loss": 0.4581, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.39, |
|
"learning_rate": 3.8004984514106537e-06, |
|
"loss": 0.4235, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.42, |
|
"learning_rate": 3.777331743386206e-06, |
|
"loss": 0.4334, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.45, |
|
"learning_rate": 3.7541650353617583e-06, |
|
"loss": 0.3703, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 3.7309983273373104e-06, |
|
"loss": 0.4415, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.51, |
|
"learning_rate": 3.7078316193128626e-06, |
|
"loss": 0.4234, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.53, |
|
"learning_rate": 3.684664911288415e-06, |
|
"loss": 0.4184, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 3.661498203263967e-06, |
|
"loss": 0.3912, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.59, |
|
"learning_rate": 3.6383314952395193e-06, |
|
"loss": 0.4158, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"learning_rate": 3.615164787215072e-06, |
|
"loss": 0.3966, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.65, |
|
"learning_rate": 3.591998079190624e-06, |
|
"loss": 0.4171, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.67, |
|
"learning_rate": 3.568831371166176e-06, |
|
"loss": 0.381, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.7, |
|
"learning_rate": 3.5456646631417285e-06, |
|
"loss": 0.3867, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.73, |
|
"learning_rate": 3.5224979551172806e-06, |
|
"loss": 0.4438, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"learning_rate": 3.4993312470928328e-06, |
|
"loss": 0.3806, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 0.79, |
|
"learning_rate": 3.4761645390683853e-06, |
|
"loss": 0.391, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"learning_rate": 3.4529978310439374e-06, |
|
"loss": 0.3947, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"learning_rate": 3.42983112301949e-06, |
|
"loss": 0.397, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"learning_rate": 3.406664414995042e-06, |
|
"loss": 0.4048, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"learning_rate": 3.383497706970594e-06, |
|
"loss": 0.4648, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"learning_rate": 3.3603309989461466e-06, |
|
"loss": 0.3751, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 0.95, |
|
"learning_rate": 3.3371642909216987e-06, |
|
"loss": 0.3951, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 0.98, |
|
"learning_rate": 3.313997582897251e-06, |
|
"loss": 0.4386, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_f1": 0.8416666666666667, |
|
"eval_loss": 0.3926413059234619, |
|
"eval_runtime": 7.7212, |
|
"eval_samples_per_second": 512.357, |
|
"eval_steps_per_second": 8.03, |
|
"step": 17805 |
|
}, |
|
{ |
|
"epoch": 1.01, |
|
"learning_rate": 3.2908308748728034e-06, |
|
"loss": 0.3639, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"learning_rate": 3.2676641668483555e-06, |
|
"loss": 0.372, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 1.07, |
|
"learning_rate": 3.2444974588239076e-06, |
|
"loss": 0.3249, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 3.22133075079946e-06, |
|
"loss": 0.3586, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 3.1981640427750122e-06, |
|
"loss": 0.3595, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 1.15, |
|
"learning_rate": 3.1749973347505643e-06, |
|
"loss": 0.3577, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 1.18, |
|
"learning_rate": 3.151830626726117e-06, |
|
"loss": 0.3505, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 3.128663918701669e-06, |
|
"loss": 0.3448, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"learning_rate": 3.105497210677221e-06, |
|
"loss": 0.3726, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"learning_rate": 3.0823305026527736e-06, |
|
"loss": 0.3522, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 1.29, |
|
"learning_rate": 3.0591637946283257e-06, |
|
"loss": 0.3814, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 3.0359970866038778e-06, |
|
"loss": 0.3641, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 1.35, |
|
"learning_rate": 3.0128303785794303e-06, |
|
"loss": 0.343, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 1.38, |
|
"learning_rate": 2.9896636705549824e-06, |
|
"loss": 0.331, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"learning_rate": 2.966496962530535e-06, |
|
"loss": 0.3737, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 2.943330254506087e-06, |
|
"loss": 0.3437, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 1.46, |
|
"learning_rate": 2.920163546481639e-06, |
|
"loss": 0.3648, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.49, |
|
"learning_rate": 2.8969968384571917e-06, |
|
"loss": 0.3421, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"learning_rate": 2.8738301304327438e-06, |
|
"loss": 0.3344, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 2.850663422408296e-06, |
|
"loss": 0.3533, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 1.57, |
|
"learning_rate": 2.8274967143838484e-06, |
|
"loss": 0.371, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 2.8043300063594005e-06, |
|
"loss": 0.3526, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 1.63, |
|
"learning_rate": 2.7811632983349526e-06, |
|
"loss": 0.3539, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 2.757996590310505e-06, |
|
"loss": 0.3692, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 1.68, |
|
"learning_rate": 2.7348298822860572e-06, |
|
"loss": 0.3104, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 1.71, |
|
"learning_rate": 2.7116631742616094e-06, |
|
"loss": 0.3371, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 1.74, |
|
"learning_rate": 2.688496466237162e-06, |
|
"loss": 0.3818, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 1.77, |
|
"learning_rate": 2.665329758212714e-06, |
|
"loss": 0.384, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"learning_rate": 2.6421630501882657e-06, |
|
"loss": 0.3249, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 1.83, |
|
"learning_rate": 2.6189963421638186e-06, |
|
"loss": 0.3385, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 1.85, |
|
"learning_rate": 2.5958296341393703e-06, |
|
"loss": 0.3837, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"learning_rate": 2.5726629261149224e-06, |
|
"loss": 0.3224, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 1.91, |
|
"learning_rate": 2.549496218090475e-06, |
|
"loss": 0.3555, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 1.94, |
|
"learning_rate": 2.526329510066027e-06, |
|
"loss": 0.3555, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"learning_rate": 2.50316280204158e-06, |
|
"loss": 0.3487, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 1.99, |
|
"learning_rate": 2.4799960940171317e-06, |
|
"loss": 0.3847, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_f1": 0.8587530820711519, |
|
"eval_loss": 0.37445908784866333, |
|
"eval_runtime": 7.6778, |
|
"eval_samples_per_second": 515.253, |
|
"eval_steps_per_second": 8.075, |
|
"step": 35610 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 89025, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 5, |
|
"save_steps": 500, |
|
"total_flos": 5581990155809868.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": { |
|
"learning_rate": 4.124832363752922e-06, |
|
"num_train_epochs": 5, |
|
"per_device_train_batch_size": 4, |
|
"seed": 40 |
|
} |
|
} |
|
|