|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 79.98765432098766, |
|
"global_step": 4800, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 1.66, |
|
"learning_rate": 3.7125e-06, |
|
"loss": 14.5868, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 3.33, |
|
"learning_rate": 7.4625e-06, |
|
"loss": 6.8756, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 4.99, |
|
"learning_rate": 1.1212499999999998e-05, |
|
"loss": 4.2978, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 6.66, |
|
"learning_rate": 1.49625e-05, |
|
"loss": 3.6126, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"learning_rate": 1.8712499999999997e-05, |
|
"loss": 3.1674, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 8.33, |
|
"eval_loss": 3.0295047760009766, |
|
"eval_runtime": 32.2697, |
|
"eval_samples_per_second": 27.642, |
|
"eval_steps_per_second": 3.471, |
|
"eval_wer": 1.0, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 9.99, |
|
"learning_rate": 2.2462499999999997e-05, |
|
"loss": 2.8989, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 11.66, |
|
"learning_rate": 2.6212499999999997e-05, |
|
"loss": 2.8318, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 13.33, |
|
"learning_rate": 2.99625e-05, |
|
"loss": 2.7744, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 14.99, |
|
"learning_rate": 3.37125e-05, |
|
"loss": 2.7043, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 16.66, |
|
"learning_rate": 3.7462499999999996e-05, |
|
"loss": 2.6987, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 16.66, |
|
"eval_loss": 2.687849760055542, |
|
"eval_runtime": 32.1535, |
|
"eval_samples_per_second": 27.742, |
|
"eval_steps_per_second": 3.483, |
|
"eval_wer": 1.0, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 18.33, |
|
"learning_rate": 4.12125e-05, |
|
"loss": 2.6565, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 19.99, |
|
"learning_rate": 4.4962499999999995e-05, |
|
"loss": 2.4086, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 21.66, |
|
"learning_rate": 4.871249999999999e-05, |
|
"loss": 1.7625, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 23.33, |
|
"learning_rate": 5.2462499999999994e-05, |
|
"loss": 1.4648, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 24.99, |
|
"learning_rate": 5.62125e-05, |
|
"loss": 1.3454, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 24.99, |
|
"eval_loss": 0.6813644766807556, |
|
"eval_runtime": 32.0646, |
|
"eval_samples_per_second": 27.819, |
|
"eval_steps_per_second": 3.493, |
|
"eval_wer": 0.698066935949221, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 26.66, |
|
"learning_rate": 5.9962499999999994e-05, |
|
"loss": 1.2913, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 28.33, |
|
"learning_rate": 6.37125e-05, |
|
"loss": 1.2416, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 29.99, |
|
"learning_rate": 6.746249999999999e-05, |
|
"loss": 1.1899, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 31.66, |
|
"learning_rate": 7.121249999999999e-05, |
|
"loss": 1.1745, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"learning_rate": 7.492499999999999e-05, |
|
"loss": 1.1227, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 33.33, |
|
"eval_loss": 0.5790585875511169, |
|
"eval_runtime": 32.0814, |
|
"eval_samples_per_second": 27.804, |
|
"eval_steps_per_second": 3.491, |
|
"eval_wer": 0.6513271783035199, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 34.99, |
|
"learning_rate": 7.2375e-05, |
|
"loss": 1.0795, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 36.66, |
|
"learning_rate": 6.972321428571428e-05, |
|
"loss": 1.0646, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 38.33, |
|
"learning_rate": 6.704464285714285e-05, |
|
"loss": 1.05, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 39.99, |
|
"learning_rate": 6.436607142857142e-05, |
|
"loss": 1.0149, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 41.66, |
|
"learning_rate": 6.16875e-05, |
|
"loss": 0.9972, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 41.66, |
|
"eval_loss": 0.5235142111778259, |
|
"eval_runtime": 31.8053, |
|
"eval_samples_per_second": 28.046, |
|
"eval_steps_per_second": 3.521, |
|
"eval_wer": 0.5718407386035776, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 43.33, |
|
"learning_rate": 5.9008928571428565e-05, |
|
"loss": 0.9722, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 44.99, |
|
"learning_rate": 5.633035714285714e-05, |
|
"loss": 0.9401, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 46.66, |
|
"learning_rate": 5.3651785714285706e-05, |
|
"loss": 0.9439, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 48.33, |
|
"learning_rate": 5.0973214285714276e-05, |
|
"loss": 0.923, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 49.99, |
|
"learning_rate": 4.829464285714285e-05, |
|
"loss": 0.9123, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 49.99, |
|
"eval_loss": 0.5104396939277649, |
|
"eval_runtime": 31.7859, |
|
"eval_samples_per_second": 28.063, |
|
"eval_steps_per_second": 3.524, |
|
"eval_wer": 0.5633294864396999, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 51.66, |
|
"learning_rate": 4.561607142857142e-05, |
|
"loss": 0.8979, |
|
"step": 3100 |
|
}, |
|
{ |
|
"epoch": 53.33, |
|
"learning_rate": 4.29375e-05, |
|
"loss": 0.8814, |
|
"step": 3200 |
|
}, |
|
{ |
|
"epoch": 54.99, |
|
"learning_rate": 4.025892857142857e-05, |
|
"loss": 0.8548, |
|
"step": 3300 |
|
}, |
|
{ |
|
"epoch": 56.66, |
|
"learning_rate": 3.760714285714286e-05, |
|
"loss": 0.8515, |
|
"step": 3400 |
|
}, |
|
{ |
|
"epoch": 58.33, |
|
"learning_rate": 3.492857142857142e-05, |
|
"loss": 0.836, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 58.33, |
|
"eval_loss": 0.49266090989112854, |
|
"eval_runtime": 31.7464, |
|
"eval_samples_per_second": 28.098, |
|
"eval_steps_per_second": 3.528, |
|
"eval_wer": 0.5579919215233698, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 59.99, |
|
"learning_rate": 3.225e-05, |
|
"loss": 0.8045, |
|
"step": 3600 |
|
}, |
|
{ |
|
"epoch": 61.66, |
|
"learning_rate": 2.9571428571428568e-05, |
|
"loss": 0.8036, |
|
"step": 3700 |
|
}, |
|
{ |
|
"epoch": 63.33, |
|
"learning_rate": 2.689285714285714e-05, |
|
"loss": 0.7924, |
|
"step": 3800 |
|
}, |
|
{ |
|
"epoch": 64.99, |
|
"learning_rate": 2.4214285714285712e-05, |
|
"loss": 0.7837, |
|
"step": 3900 |
|
}, |
|
{ |
|
"epoch": 66.66, |
|
"learning_rate": 2.1535714285714285e-05, |
|
"loss": 0.7725, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 66.66, |
|
"eval_loss": 0.5077508687973022, |
|
"eval_runtime": 31.8976, |
|
"eval_samples_per_second": 27.964, |
|
"eval_steps_per_second": 3.511, |
|
"eval_wer": 0.5778995960761685, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 68.33, |
|
"learning_rate": 1.888392857142857e-05, |
|
"loss": 0.7655, |
|
"step": 4100 |
|
}, |
|
{ |
|
"epoch": 69.99, |
|
"learning_rate": 1.6205357142857143e-05, |
|
"loss": 0.741, |
|
"step": 4200 |
|
}, |
|
{ |
|
"epoch": 71.66, |
|
"learning_rate": 1.3526785714285713e-05, |
|
"loss": 0.7499, |
|
"step": 4300 |
|
}, |
|
{ |
|
"epoch": 73.33, |
|
"learning_rate": 1.0848214285714287e-05, |
|
"loss": 0.7479, |
|
"step": 4400 |
|
}, |
|
{ |
|
"epoch": 74.99, |
|
"learning_rate": 8.169642857142857e-06, |
|
"loss": 0.7297, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 74.99, |
|
"eval_loss": 0.4939458966255188, |
|
"eval_runtime": 31.9903, |
|
"eval_samples_per_second": 27.883, |
|
"eval_steps_per_second": 3.501, |
|
"eval_wer": 0.5737160992498558, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 76.66, |
|
"learning_rate": 5.491071428571429e-06, |
|
"loss": 0.7318, |
|
"step": 4600 |
|
}, |
|
{ |
|
"epoch": 78.33, |
|
"learning_rate": 2.8124999999999998e-06, |
|
"loss": 0.7232, |
|
"step": 4700 |
|
}, |
|
{ |
|
"epoch": 79.99, |
|
"learning_rate": 1.3392857142857142e-07, |
|
"loss": 0.7195, |
|
"step": 4800 |
|
}, |
|
{ |
|
"epoch": 79.99, |
|
"step": 4800, |
|
"total_flos": 2.017023736432276e+19, |
|
"train_loss": 1.795632479985555, |
|
"train_runtime": 8730.4816, |
|
"train_samples_per_second": 17.786, |
|
"train_steps_per_second": 0.55 |
|
} |
|
], |
|
"max_steps": 4800, |
|
"num_train_epochs": 80, |
|
"total_flos": 2.017023736432276e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|