PyTorch
llama
Llama2-7B-DPO / trainer_state.json
sabersaleh's picture
Upload folder using huggingface_hub
bf8f61d verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9956483899042646,
"global_step": 143,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07,
"learning_rate": 9.63768115942029e-06,
"loss": 0.6928,
"step": 10
},
{
"epoch": 0.14,
"learning_rate": 8.91304347826087e-06,
"loss": 0.693,
"step": 20
},
{
"epoch": 0.21,
"learning_rate": 8.188405797101449e-06,
"loss": 0.6929,
"step": 30
},
{
"epoch": 0.28,
"learning_rate": 7.4637681159420295e-06,
"loss": 0.6905,
"step": 40
},
{
"epoch": 0.35,
"learning_rate": 6.739130434782609e-06,
"loss": 0.6894,
"step": 50
},
{
"epoch": 0.42,
"learning_rate": 6.014492753623189e-06,
"loss": 0.6882,
"step": 60
},
{
"epoch": 0.49,
"learning_rate": 5.289855072463769e-06,
"loss": 0.6945,
"step": 70
},
{
"epoch": 0.56,
"learning_rate": 4.565217391304348e-06,
"loss": 0.6872,
"step": 80
},
{
"epoch": 0.63,
"learning_rate": 3.840579710144928e-06,
"loss": 0.6933,
"step": 90
},
{
"epoch": 0.7,
"learning_rate": 3.1159420289855073e-06,
"loss": 0.6883,
"step": 100
},
{
"epoch": 0.77,
"learning_rate": 2.391304347826087e-06,
"loss": 0.6851,
"step": 110
},
{
"epoch": 0.84,
"learning_rate": 1.6666666666666667e-06,
"loss": 0.6935,
"step": 120
},
{
"epoch": 0.91,
"learning_rate": 9.420289855072465e-07,
"loss": 0.6867,
"step": 130
},
{
"epoch": 0.97,
"learning_rate": 2.173913043478261e-07,
"loss": 0.6839,
"step": 140
},
{
"epoch": 1.0,
"step": 143,
"total_flos": 0.0,
"train_loss": 0.68989242040194,
"train_runtime": 1214.1618,
"train_samples_per_second": 7.57,
"train_steps_per_second": 0.118
}
],
"max_steps": 143,
"num_train_epochs": 1,
"total_flos": 0.0,
"trial_name": null,
"trial_params": null
}