lesso06's picture
Training in progress, step 25, checkpoint
ffd602a verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.11682242990654206,
"eval_steps": 4,
"global_step": 25,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.004672897196261682,
"grad_norm": 0.021811505779623985,
"learning_rate": 1.0000000000000002e-06,
"loss": 10.3773,
"step": 1
},
{
"epoch": 0.004672897196261682,
"eval_loss": 10.380165100097656,
"eval_runtime": 0.4183,
"eval_samples_per_second": 215.178,
"eval_steps_per_second": 54.99,
"step": 1
},
{
"epoch": 0.009345794392523364,
"grad_norm": 0.027132729068398476,
"learning_rate": 2.0000000000000003e-06,
"loss": 10.3817,
"step": 2
},
{
"epoch": 0.014018691588785047,
"grad_norm": 0.03020065277814865,
"learning_rate": 3e-06,
"loss": 10.3781,
"step": 3
},
{
"epoch": 0.018691588785046728,
"grad_norm": 0.022099485620856285,
"learning_rate": 4.000000000000001e-06,
"loss": 10.3826,
"step": 4
},
{
"epoch": 0.018691588785046728,
"eval_loss": 10.380165100097656,
"eval_runtime": 0.4296,
"eval_samples_per_second": 209.519,
"eval_steps_per_second": 53.544,
"step": 4
},
{
"epoch": 0.02336448598130841,
"grad_norm": 0.025594104081392288,
"learning_rate": 5e-06,
"loss": 10.381,
"step": 5
},
{
"epoch": 0.028037383177570093,
"grad_norm": 0.03467366844415665,
"learning_rate": 6e-06,
"loss": 10.3765,
"step": 6
},
{
"epoch": 0.03271028037383177,
"grad_norm": 0.022812705487012863,
"learning_rate": 7e-06,
"loss": 10.3822,
"step": 7
},
{
"epoch": 0.037383177570093455,
"grad_norm": 0.026599343866109848,
"learning_rate": 8.000000000000001e-06,
"loss": 10.3811,
"step": 8
},
{
"epoch": 0.037383177570093455,
"eval_loss": 10.380156517028809,
"eval_runtime": 0.4169,
"eval_samples_per_second": 215.905,
"eval_steps_per_second": 55.176,
"step": 8
},
{
"epoch": 0.04205607476635514,
"grad_norm": 0.023074598982930183,
"learning_rate": 9e-06,
"loss": 10.3728,
"step": 9
},
{
"epoch": 0.04672897196261682,
"grad_norm": 0.032919738441705704,
"learning_rate": 1e-05,
"loss": 10.3786,
"step": 10
},
{
"epoch": 0.0514018691588785,
"grad_norm": 0.02511332742869854,
"learning_rate": 9.890738003669029e-06,
"loss": 10.3747,
"step": 11
},
{
"epoch": 0.056074766355140186,
"grad_norm": 0.024660587310791016,
"learning_rate": 9.567727288213005e-06,
"loss": 10.3836,
"step": 12
},
{
"epoch": 0.056074766355140186,
"eval_loss": 10.38013744354248,
"eval_runtime": 0.4437,
"eval_samples_per_second": 202.859,
"eval_steps_per_second": 51.842,
"step": 12
},
{
"epoch": 0.06074766355140187,
"grad_norm": 0.023776650428771973,
"learning_rate": 9.045084971874738e-06,
"loss": 10.3729,
"step": 13
},
{
"epoch": 0.06542056074766354,
"grad_norm": 0.02619120106101036,
"learning_rate": 8.345653031794292e-06,
"loss": 10.3764,
"step": 14
},
{
"epoch": 0.07009345794392523,
"grad_norm": 0.022596610710024834,
"learning_rate": 7.500000000000001e-06,
"loss": 10.3796,
"step": 15
},
{
"epoch": 0.07476635514018691,
"grad_norm": 0.02786189876496792,
"learning_rate": 6.545084971874738e-06,
"loss": 10.3807,
"step": 16
},
{
"epoch": 0.07476635514018691,
"eval_loss": 10.380114555358887,
"eval_runtime": 0.4279,
"eval_samples_per_second": 210.353,
"eval_steps_per_second": 53.757,
"step": 16
},
{
"epoch": 0.0794392523364486,
"grad_norm": 0.026685988530516624,
"learning_rate": 5.522642316338268e-06,
"loss": 10.3827,
"step": 17
},
{
"epoch": 0.08411214953271028,
"grad_norm": 0.025408007204532623,
"learning_rate": 4.477357683661734e-06,
"loss": 10.3776,
"step": 18
},
{
"epoch": 0.08878504672897196,
"grad_norm": 0.02390649914741516,
"learning_rate": 3.4549150281252635e-06,
"loss": 10.3809,
"step": 19
},
{
"epoch": 0.09345794392523364,
"grad_norm": 0.028136681765317917,
"learning_rate": 2.5000000000000015e-06,
"loss": 10.3781,
"step": 20
},
{
"epoch": 0.09345794392523364,
"eval_loss": 10.380105018615723,
"eval_runtime": 0.549,
"eval_samples_per_second": 163.933,
"eval_steps_per_second": 41.894,
"step": 20
},
{
"epoch": 0.09813084112149532,
"grad_norm": 0.021048307418823242,
"learning_rate": 1.6543469682057105e-06,
"loss": 10.376,
"step": 21
},
{
"epoch": 0.102803738317757,
"grad_norm": 0.021575015038251877,
"learning_rate": 9.549150281252633e-07,
"loss": 10.3777,
"step": 22
},
{
"epoch": 0.10747663551401869,
"grad_norm": 0.02331606112420559,
"learning_rate": 4.322727117869951e-07,
"loss": 10.3831,
"step": 23
},
{
"epoch": 0.11214953271028037,
"grad_norm": 0.02492584101855755,
"learning_rate": 1.0926199633097156e-07,
"loss": 10.3764,
"step": 24
},
{
"epoch": 0.11214953271028037,
"eval_loss": 10.380101203918457,
"eval_runtime": 0.4245,
"eval_samples_per_second": 212.025,
"eval_steps_per_second": 54.184,
"step": 24
},
{
"epoch": 0.11682242990654206,
"grad_norm": 0.028461946174502373,
"learning_rate": 0.0,
"loss": 10.3803,
"step": 25
}
],
"logging_steps": 1,
"max_steps": 25,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 653780582400.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}