nttx's picture
Training in progress, epoch 0, checkpoint
5f63b24 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.032609138711123795,
"eval_steps": 50,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00016304569355561895,
"eval_loss": 0.9726263880729675,
"eval_runtime": 94.6739,
"eval_samples_per_second": 27.283,
"eval_steps_per_second": 13.647,
"step": 1
},
{
"epoch": 0.0008152284677780948,
"grad_norm": 0.07707637548446655,
"learning_rate": 5e-05,
"loss": 0.6907,
"step": 5
},
{
"epoch": 0.0016304569355561896,
"grad_norm": 0.055902931839227676,
"learning_rate": 0.0001,
"loss": 0.676,
"step": 10
},
{
"epoch": 0.0024456854033342843,
"grad_norm": 0.07526875287294388,
"learning_rate": 9.98292246503335e-05,
"loss": 0.701,
"step": 15
},
{
"epoch": 0.003260913871112379,
"grad_norm": 0.11501318961381912,
"learning_rate": 9.931806517013612e-05,
"loss": 0.7658,
"step": 20
},
{
"epoch": 0.004076142338890474,
"grad_norm": 0.12424055486917496,
"learning_rate": 9.847001329696653e-05,
"loss": 0.8096,
"step": 25
},
{
"epoch": 0.004891370806668569,
"grad_norm": 0.1637168526649475,
"learning_rate": 9.729086208503174e-05,
"loss": 0.8079,
"step": 30
},
{
"epoch": 0.005706599274446664,
"grad_norm": 0.17115910351276398,
"learning_rate": 9.578866633275288e-05,
"loss": 0.8622,
"step": 35
},
{
"epoch": 0.006521827742224758,
"grad_norm": 0.3511226177215576,
"learning_rate": 9.397368756032445e-05,
"loss": 0.9294,
"step": 40
},
{
"epoch": 0.0073370562100028534,
"grad_norm": 0.2993008494377136,
"learning_rate": 9.185832391312644e-05,
"loss": 0.9747,
"step": 45
},
{
"epoch": 0.008152284677780949,
"grad_norm": 0.6940411925315857,
"learning_rate": 8.945702546981969e-05,
"loss": 1.188,
"step": 50
},
{
"epoch": 0.008152284677780949,
"eval_loss": 0.8416158556938171,
"eval_runtime": 94.5672,
"eval_samples_per_second": 27.314,
"eval_steps_per_second": 13.662,
"step": 50
},
{
"epoch": 0.008967513145559042,
"grad_norm": 0.11171204596757889,
"learning_rate": 8.678619553365659e-05,
"loss": 0.6554,
"step": 55
},
{
"epoch": 0.009782741613337137,
"grad_norm": 0.13064613938331604,
"learning_rate": 8.386407858128706e-05,
"loss": 0.6926,
"step": 60
},
{
"epoch": 0.010597970081115233,
"grad_norm": 0.11693071573972702,
"learning_rate": 8.07106356344834e-05,
"loss": 0.7102,
"step": 65
},
{
"epoch": 0.011413198548893328,
"grad_norm": 0.1502353698015213,
"learning_rate": 7.734740790612136e-05,
"loss": 0.6994,
"step": 70
},
{
"epoch": 0.012228427016671423,
"grad_norm": 0.15267403423786163,
"learning_rate": 7.379736965185368e-05,
"loss": 0.7595,
"step": 75
},
{
"epoch": 0.013043655484449516,
"grad_norm": 0.2149164080619812,
"learning_rate": 7.008477123264848e-05,
"loss": 0.762,
"step": 80
},
{
"epoch": 0.013858883952227612,
"grad_norm": 0.2367481142282486,
"learning_rate": 6.623497346023418e-05,
"loss": 0.8421,
"step": 85
},
{
"epoch": 0.014674112420005707,
"grad_norm": 0.35609787702560425,
"learning_rate": 6.227427435703997e-05,
"loss": 0.7825,
"step": 90
},
{
"epoch": 0.015489340887783802,
"grad_norm": 0.5247498750686646,
"learning_rate": 5.8229729514036705e-05,
"loss": 0.9606,
"step": 95
},
{
"epoch": 0.016304569355561897,
"grad_norm": 1.1298545598983765,
"learning_rate": 5.4128967273616625e-05,
"loss": 1.2002,
"step": 100
},
{
"epoch": 0.016304569355561897,
"eval_loss": 0.8084781765937805,
"eval_runtime": 94.6181,
"eval_samples_per_second": 27.299,
"eval_steps_per_second": 13.655,
"step": 100
},
{
"epoch": 0.017119797823339993,
"grad_norm": 0.1418055295944214,
"learning_rate": 5e-05,
"loss": 0.5892,
"step": 105
},
{
"epoch": 0.017935026291118084,
"grad_norm": 0.1185089647769928,
"learning_rate": 4.5871032726383386e-05,
"loss": 0.5924,
"step": 110
},
{
"epoch": 0.01875025475889618,
"grad_norm": 0.13358411192893982,
"learning_rate": 4.17702704859633e-05,
"loss": 0.6427,
"step": 115
},
{
"epoch": 0.019565483226674275,
"grad_norm": 0.15520331263542175,
"learning_rate": 3.772572564296005e-05,
"loss": 0.7182,
"step": 120
},
{
"epoch": 0.02038071169445237,
"grad_norm": 0.20835325121879578,
"learning_rate": 3.3765026539765834e-05,
"loss": 0.7745,
"step": 125
},
{
"epoch": 0.021195940162230465,
"grad_norm": 0.15979139506816864,
"learning_rate": 2.991522876735154e-05,
"loss": 0.7509,
"step": 130
},
{
"epoch": 0.02201116863000856,
"grad_norm": 0.26534563302993774,
"learning_rate": 2.6202630348146324e-05,
"loss": 0.8185,
"step": 135
},
{
"epoch": 0.022826397097786656,
"grad_norm": 0.2851661145687103,
"learning_rate": 2.2652592093878666e-05,
"loss": 0.8134,
"step": 140
},
{
"epoch": 0.02364162556556475,
"grad_norm": 0.6048436760902405,
"learning_rate": 1.928936436551661e-05,
"loss": 0.8977,
"step": 145
},
{
"epoch": 0.024456854033342846,
"grad_norm": 0.7918521761894226,
"learning_rate": 1.6135921418712956e-05,
"loss": 1.1297,
"step": 150
},
{
"epoch": 0.024456854033342846,
"eval_loss": 0.7911348342895508,
"eval_runtime": 94.5989,
"eval_samples_per_second": 27.305,
"eval_steps_per_second": 13.658,
"step": 150
},
{
"epoch": 0.025272082501120938,
"grad_norm": 0.0935765951871872,
"learning_rate": 1.3213804466343421e-05,
"loss": 0.6047,
"step": 155
},
{
"epoch": 0.026087310968899033,
"grad_norm": 0.13173741102218628,
"learning_rate": 1.0542974530180327e-05,
"loss": 0.6212,
"step": 160
},
{
"epoch": 0.026902539436677128,
"grad_norm": 0.13724230229854584,
"learning_rate": 8.141676086873572e-06,
"loss": 0.7117,
"step": 165
},
{
"epoch": 0.027717767904455223,
"grad_norm": 0.13333597779273987,
"learning_rate": 6.026312439675552e-06,
"loss": 0.678,
"step": 170
},
{
"epoch": 0.02853299637223332,
"grad_norm": 0.1563432365655899,
"learning_rate": 4.2113336672471245e-06,
"loss": 0.8257,
"step": 175
},
{
"epoch": 0.029348224840011414,
"grad_norm": 0.17387519776821136,
"learning_rate": 2.7091379149682685e-06,
"loss": 0.8124,
"step": 180
},
{
"epoch": 0.03016345330778951,
"grad_norm": 0.2212686836719513,
"learning_rate": 1.5299867030334814e-06,
"loss": 0.7535,
"step": 185
},
{
"epoch": 0.030978681775567604,
"grad_norm": 0.25696220993995667,
"learning_rate": 6.819348298638839e-07,
"loss": 0.7698,
"step": 190
},
{
"epoch": 0.0317939102433457,
"grad_norm": 0.4329439699649811,
"learning_rate": 1.7077534966650766e-07,
"loss": 0.9063,
"step": 195
},
{
"epoch": 0.032609138711123795,
"grad_norm": 0.7288855910301208,
"learning_rate": 0.0,
"loss": 1.0139,
"step": 200
},
{
"epoch": 0.032609138711123795,
"eval_loss": 0.7942183017730713,
"eval_runtime": 94.3849,
"eval_samples_per_second": 27.367,
"eval_steps_per_second": 13.689,
"step": 200
}
],
"logging_steps": 5,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.7574856463745024e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}