|
{ |
|
"best_metric": 33.144610036959996, |
|
"best_model_checkpoint": "/root/turkic_qa/en_uzn_models/orig_uzn_roberta_base_model/checkpoint-5814", |
|
"epoch": 10.0, |
|
"eval_steps": 500, |
|
"global_step": 6460, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.0, |
|
"step": 646, |
|
"train_exact_match": 6.993006993006993, |
|
"train_f1": 13.61593527592794, |
|
"train_runtime": 10.7947, |
|
"train_samples_per_second": 109.498, |
|
"train_steps_per_second": 3.983 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 12.138856887817383, |
|
"learning_rate": 5e-06, |
|
"loss": 4.6327, |
|
"step": 646 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_exact_match": 6.59375, |
|
"eval_f1": 13.607989660328585, |
|
"eval_runtime": 35.2774, |
|
"eval_samples_per_second": 107.803, |
|
"eval_steps_per_second": 3.855, |
|
"step": 646 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"step": 1292, |
|
"train_exact_match": 13.986013986013987, |
|
"train_f1": 22.14711239214366, |
|
"train_runtime": 10.8509, |
|
"train_samples_per_second": 109.669, |
|
"train_steps_per_second": 3.963 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 15.265068054199219, |
|
"learning_rate": 1e-05, |
|
"loss": 3.5453, |
|
"step": 1292 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_exact_match": 11.40625, |
|
"eval_f1": 18.47470115180993, |
|
"eval_runtime": 34.4859, |
|
"eval_samples_per_second": 110.277, |
|
"eval_steps_per_second": 3.944, |
|
"step": 1292 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 1938, |
|
"train_exact_match": 23.276723276723278, |
|
"train_f1": 32.096631211616916, |
|
"train_runtime": 11.0171, |
|
"train_samples_per_second": 110.102, |
|
"train_steps_per_second": 3.994 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"grad_norm": 14.558008193969727, |
|
"learning_rate": 8.750000000000001e-06, |
|
"loss": 3.1006, |
|
"step": 1938 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_exact_match": 17.84375, |
|
"eval_f1": 25.580935818007237, |
|
"eval_runtime": 34.7591, |
|
"eval_samples_per_second": 109.41, |
|
"eval_steps_per_second": 3.913, |
|
"step": 1938 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"step": 2584, |
|
"train_exact_match": 31.16883116883117, |
|
"train_f1": 39.17139975564133, |
|
"train_runtime": 11.0165, |
|
"train_samples_per_second": 109.381, |
|
"train_steps_per_second": 3.994 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 17.505403518676758, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 2.7652, |
|
"step": 2584 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_exact_match": 19.40625, |
|
"eval_f1": 27.64189036545174, |
|
"eval_runtime": 34.3705, |
|
"eval_samples_per_second": 110.647, |
|
"eval_steps_per_second": 3.957, |
|
"step": 2584 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"step": 3230, |
|
"train_exact_match": 36.663336663336665, |
|
"train_f1": 46.09688684971856, |
|
"train_runtime": 10.8186, |
|
"train_samples_per_second": 109.349, |
|
"train_steps_per_second": 3.975 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"grad_norm": 20.703981399536133, |
|
"learning_rate": 6.25e-06, |
|
"loss": 2.5139, |
|
"step": 3230 |
|
}, |
|
{ |
|
"epoch": 5.0, |
|
"eval_exact_match": 20.4375, |
|
"eval_f1": 29.34032966285343, |
|
"eval_runtime": 34.5691, |
|
"eval_samples_per_second": 110.011, |
|
"eval_steps_per_second": 3.934, |
|
"step": 3230 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"step": 3876, |
|
"train_exact_match": 41.658341658341655, |
|
"train_f1": 50.86006517307416, |
|
"train_runtime": 10.7595, |
|
"train_samples_per_second": 110.042, |
|
"train_steps_per_second": 3.996 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"grad_norm": 20.977209091186523, |
|
"learning_rate": 5e-06, |
|
"loss": 2.3079, |
|
"step": 3876 |
|
}, |
|
{ |
|
"epoch": 6.0, |
|
"eval_exact_match": 21.5, |
|
"eval_f1": 30.58201123722236, |
|
"eval_runtime": 34.5751, |
|
"eval_samples_per_second": 109.992, |
|
"eval_steps_per_second": 3.933, |
|
"step": 3876 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"step": 4522, |
|
"train_exact_match": 47.45254745254745, |
|
"train_f1": 56.694415842055, |
|
"train_runtime": 10.7135, |
|
"train_samples_per_second": 109.021, |
|
"train_steps_per_second": 3.92 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"grad_norm": 22.69150161743164, |
|
"learning_rate": 3.7500000000000005e-06, |
|
"loss": 2.111, |
|
"step": 4522 |
|
}, |
|
{ |
|
"epoch": 7.0, |
|
"eval_exact_match": 22.03125, |
|
"eval_f1": 31.858315646855917, |
|
"eval_runtime": 34.9801, |
|
"eval_samples_per_second": 108.719, |
|
"eval_steps_per_second": 3.888, |
|
"step": 4522 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"step": 5168, |
|
"train_exact_match": 50.74925074925075, |
|
"train_f1": 60.56020993278913, |
|
"train_runtime": 10.9489, |
|
"train_samples_per_second": 108.595, |
|
"train_steps_per_second": 3.927 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 27.088655471801758, |
|
"learning_rate": 2.5e-06, |
|
"loss": 1.9761, |
|
"step": 5168 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_exact_match": 22.71875, |
|
"eval_f1": 32.80883643467974, |
|
"eval_runtime": 35.0553, |
|
"eval_samples_per_second": 108.486, |
|
"eval_steps_per_second": 3.88, |
|
"step": 5168 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"step": 5814, |
|
"train_exact_match": 53.84615384615385, |
|
"train_f1": 63.610163638389785, |
|
"train_runtime": 11.1443, |
|
"train_samples_per_second": 108.755, |
|
"train_steps_per_second": 3.948 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"grad_norm": 33.555702209472656, |
|
"learning_rate": 1.25e-06, |
|
"loss": 1.8691, |
|
"step": 5814 |
|
}, |
|
{ |
|
"epoch": 9.0, |
|
"eval_exact_match": 22.8125, |
|
"eval_f1": 33.144610036959996, |
|
"eval_runtime": 35.2983, |
|
"eval_samples_per_second": 107.739, |
|
"eval_steps_per_second": 3.853, |
|
"step": 5814 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 6460, |
|
"train_exact_match": 55.344655344655344, |
|
"train_f1": 64.42442903231023, |
|
"train_runtime": 10.756, |
|
"train_samples_per_second": 110.636, |
|
"train_steps_per_second": 3.998 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"grad_norm": 31.045358657836914, |
|
"learning_rate": 0.0, |
|
"loss": 1.8088, |
|
"step": 6460 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"eval_exact_match": 22.84375, |
|
"eval_f1": 33.05957357823847, |
|
"eval_runtime": 34.2632, |
|
"eval_samples_per_second": 110.994, |
|
"eval_steps_per_second": 3.969, |
|
"step": 6460 |
|
}, |
|
{ |
|
"epoch": 10.0, |
|
"step": 6460, |
|
"total_flos": 1.770872117833728e+16, |
|
"train_loss": 2.663083348702351, |
|
"train_runtime": 2359.5642, |
|
"train_samples_per_second": 76.59, |
|
"train_steps_per_second": 2.738 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 6460, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 10, |
|
"save_steps": 500, |
|
"total_flos": 1.770872117833728e+16, |
|
"train_batch_size": 28, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|