|
{ |
|
"best_metric": 0.18060417473316193, |
|
"best_model_checkpoint": "/content/drive/MyDrive/W210 Capstone - Lyric Generation with Melody/loaf/models/kwsylgen/bart/bart-finetuned-kwsylgen-64-simple_input_BARTlarge/checkpoint-6500", |
|
"epoch": 2.3322569070685324, |
|
"eval_steps": 500, |
|
"global_step": 6500, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.18, |
|
"grad_norm": 0.9277993440628052, |
|
"learning_rate": 1.8815931108719056e-05, |
|
"loss": 2.0641, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"eval_loss": 0.24505409598350525, |
|
"eval_runtime": 128.1235, |
|
"eval_samples_per_second": 184.845, |
|
"eval_steps_per_second": 2.896, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 1.0129226446151733, |
|
"learning_rate": 1.7619901925606986e-05, |
|
"loss": 0.2194, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"eval_loss": 0.22281154990196228, |
|
"eval_runtime": 128.9637, |
|
"eval_samples_per_second": 183.641, |
|
"eval_steps_per_second": 2.877, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"grad_norm": 1.0302517414093018, |
|
"learning_rate": 1.6423872742494916e-05, |
|
"loss": 0.1989, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.54, |
|
"eval_loss": 0.20858530700206757, |
|
"eval_runtime": 129.0216, |
|
"eval_samples_per_second": 183.558, |
|
"eval_steps_per_second": 2.875, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 176.42080688476562, |
|
"learning_rate": 1.5227843559382852e-05, |
|
"loss": 0.1888, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"eval_loss": 0.20265720784664154, |
|
"eval_runtime": 129.0285, |
|
"eval_samples_per_second": 183.549, |
|
"eval_steps_per_second": 2.875, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"grad_norm": 0.7003317475318909, |
|
"learning_rate": 1.4031814376270783e-05, |
|
"loss": 0.177, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.9, |
|
"eval_loss": 0.19755715131759644, |
|
"eval_runtime": 129.1437, |
|
"eval_samples_per_second": 183.385, |
|
"eval_steps_per_second": 2.873, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"grad_norm": 0.9357886910438538, |
|
"learning_rate": 1.2835785193158714e-05, |
|
"loss": 0.1703, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"eval_loss": 0.19327057898044586, |
|
"eval_runtime": 128.8805, |
|
"eval_samples_per_second": 183.759, |
|
"eval_steps_per_second": 2.879, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"grad_norm": 0.7597601413726807, |
|
"learning_rate": 1.1639756010046645e-05, |
|
"loss": 0.1647, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.26, |
|
"eval_loss": 0.19279009103775024, |
|
"eval_runtime": 130.5884, |
|
"eval_samples_per_second": 181.356, |
|
"eval_steps_per_second": 2.841, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"grad_norm": 0.7547380924224854, |
|
"learning_rate": 1.0443726826934577e-05, |
|
"loss": 0.159, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"eval_loss": 0.1890030950307846, |
|
"eval_runtime": 130.7667, |
|
"eval_samples_per_second": 181.109, |
|
"eval_steps_per_second": 2.837, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"grad_norm": 1.0234203338623047, |
|
"learning_rate": 9.24769764382251e-06, |
|
"loss": 0.1538, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.61, |
|
"eval_loss": 0.18637944757938385, |
|
"eval_runtime": 129.4971, |
|
"eval_samples_per_second": 182.884, |
|
"eval_steps_per_second": 2.865, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"grad_norm": 0.8639469742774963, |
|
"learning_rate": 8.051668460710443e-06, |
|
"loss": 0.151, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.79, |
|
"eval_loss": 0.18570180237293243, |
|
"eval_runtime": 130.9952, |
|
"eval_samples_per_second": 180.793, |
|
"eval_steps_per_second": 2.832, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"grad_norm": 1.0679283142089844, |
|
"learning_rate": 6.855639277598374e-06, |
|
"loss": 0.1471, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.97, |
|
"eval_loss": 0.1828351765871048, |
|
"eval_runtime": 130.7048, |
|
"eval_samples_per_second": 181.195, |
|
"eval_steps_per_second": 2.838, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"grad_norm": 0.7938817143440247, |
|
"learning_rate": 5.6596100944863066e-06, |
|
"loss": 0.1436, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.15, |
|
"eval_loss": 0.18144695460796356, |
|
"eval_runtime": 129.7461, |
|
"eval_samples_per_second": 182.533, |
|
"eval_steps_per_second": 2.859, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"grad_norm": 0.6742356419563293, |
|
"learning_rate": 4.463580911374238e-06, |
|
"loss": 0.1435, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 2.33, |
|
"eval_loss": 0.18060417473316193, |
|
"eval_runtime": 128.241, |
|
"eval_samples_per_second": 184.676, |
|
"eval_steps_per_second": 2.893, |
|
"step": 6500 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 8361, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"total_flos": 5.63409272218583e+16, |
|
"train_batch_size": 64, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|