ClarenceDan's picture
Training in progress, step 6, checkpoint
2f70c62 verified
raw
history blame
2.36 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.00014019428591456327,
"eval_steps": 3,
"global_step": 6,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 2.3365714319093876e-05,
"grad_norm": 0.8085529804229736,
"learning_rate": 2e-05,
"loss": 2.9772,
"step": 1
},
{
"epoch": 2.3365714319093876e-05,
"eval_loss": 0.8430490493774414,
"eval_runtime": 1868.3465,
"eval_samples_per_second": 9.645,
"eval_steps_per_second": 4.823,
"step": 1
},
{
"epoch": 4.673142863818775e-05,
"grad_norm": 0.8253940939903259,
"learning_rate": 4e-05,
"loss": 2.9822,
"step": 2
},
{
"epoch": 7.009714295728164e-05,
"grad_norm": 0.7106623649597168,
"learning_rate": 6e-05,
"loss": 3.1731,
"step": 3
},
{
"epoch": 7.009714295728164e-05,
"eval_loss": 0.8419922590255737,
"eval_runtime": 935.4078,
"eval_samples_per_second": 19.265,
"eval_steps_per_second": 9.633,
"step": 3
},
{
"epoch": 9.34628572763755e-05,
"grad_norm": 0.8510690927505493,
"learning_rate": 8e-05,
"loss": 2.6167,
"step": 4
},
{
"epoch": 0.00011682857159546939,
"grad_norm": 0.9762678742408752,
"learning_rate": 0.0001,
"loss": 3.4279,
"step": 5
},
{
"epoch": 0.00014019428591456327,
"grad_norm": 0.885900616645813,
"learning_rate": 0.00012,
"loss": 3.2001,
"step": 6
},
{
"epoch": 0.00014019428591456327,
"eval_loss": 0.8309927582740784,
"eval_runtime": 943.7787,
"eval_samples_per_second": 19.095,
"eval_steps_per_second": 9.548,
"step": 6
}
],
"logging_steps": 1,
"max_steps": 10,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 3,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1628733135912960.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}