asdfasda112312's picture
Training in progress, step 500
7889283 verified
raw
history blame
8.3 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.3118706566444476,
"eval_steps": 3526,
"global_step": 18500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03545596369309318,
"grad_norm": 1.6790207624435425,
"learning_rate": 0.0001964544036306907,
"loss": 4.1281,
"step": 500
},
{
"epoch": 0.07091192738618636,
"grad_norm": 1.736142635345459,
"learning_rate": 0.00019290880726138138,
"loss": 3.4072,
"step": 1000
},
{
"epoch": 0.10636789107927953,
"grad_norm": 1.8541169166564941,
"learning_rate": 0.00018936321089207204,
"loss": 3.1885,
"step": 1500
},
{
"epoch": 0.14182385477237272,
"grad_norm": 2.1542677879333496,
"learning_rate": 0.00018581761452276274,
"loss": 3.1089,
"step": 2000
},
{
"epoch": 0.1772798184654659,
"grad_norm": 1.8087749481201172,
"learning_rate": 0.00018227201815345344,
"loss": 3.0863,
"step": 2500
},
{
"epoch": 0.21273578215855907,
"grad_norm": 2.463923692703247,
"learning_rate": 0.0001787264217841441,
"loss": 3.0656,
"step": 3000
},
{
"epoch": 0.24819174585165224,
"grad_norm": 2.494748115539551,
"learning_rate": 0.00017518082541483478,
"loss": 3.0086,
"step": 3500
},
{
"epoch": 0.2500354559636931,
"eval_loss": 2.7105393409729004,
"eval_runtime": 360.9098,
"eval_samples_per_second": 9.77,
"eval_steps_per_second": 9.77,
"step": 3526
},
{
"epoch": 0.28364770954474544,
"grad_norm": 1.3893351554870605,
"learning_rate": 0.00017163522904552547,
"loss": 2.9823,
"step": 4000
},
{
"epoch": 0.3191036732378386,
"grad_norm": 1.878633975982666,
"learning_rate": 0.00016808963267621617,
"loss": 2.8684,
"step": 4500
},
{
"epoch": 0.3545596369309318,
"grad_norm": 3.777602434158325,
"learning_rate": 0.0001645440363069068,
"loss": 2.802,
"step": 5000
},
{
"epoch": 0.39001560062402496,
"grad_norm": 1.610077977180481,
"learning_rate": 0.0001609984399375975,
"loss": 2.894,
"step": 5500
},
{
"epoch": 0.42547156431711813,
"grad_norm": 2.231553316116333,
"learning_rate": 0.0001574528435682882,
"loss": 2.8238,
"step": 6000
},
{
"epoch": 0.4609275280102113,
"grad_norm": 3.5686066150665283,
"learning_rate": 0.00015390724719897887,
"loss": 2.8877,
"step": 6500
},
{
"epoch": 0.4963834917033045,
"grad_norm": 1.6852627992630005,
"learning_rate": 0.00015036165082966954,
"loss": 2.8127,
"step": 7000
},
{
"epoch": 0.5000709119273862,
"eval_loss": 2.5279786586761475,
"eval_runtime": 360.7362,
"eval_samples_per_second": 9.774,
"eval_steps_per_second": 9.774,
"step": 7052
},
{
"epoch": 0.5318394553963977,
"grad_norm": 2.0495705604553223,
"learning_rate": 0.00014681605446036024,
"loss": 2.7904,
"step": 7500
},
{
"epoch": 0.5672954190894909,
"grad_norm": 1.7553592920303345,
"learning_rate": 0.00014327045809105093,
"loss": 2.7686,
"step": 8000
},
{
"epoch": 0.602751382782584,
"grad_norm": 1.814765453338623,
"learning_rate": 0.0001397248617217416,
"loss": 2.7501,
"step": 8500
},
{
"epoch": 0.6382073464756772,
"grad_norm": 2.8568713665008545,
"learning_rate": 0.00013617926535243227,
"loss": 2.7077,
"step": 9000
},
{
"epoch": 0.6736633101687703,
"grad_norm": 2.317953109741211,
"learning_rate": 0.00013263366898312297,
"loss": 2.7284,
"step": 9500
},
{
"epoch": 0.7091192738618636,
"grad_norm": 2.4004650115966797,
"learning_rate": 0.00012908807261381367,
"loss": 2.6601,
"step": 10000
},
{
"epoch": 0.7445752375549567,
"grad_norm": 2.3228759765625,
"learning_rate": 0.00012554247624450433,
"loss": 2.717,
"step": 10500
},
{
"epoch": 0.7501063678910793,
"eval_loss": 2.42594575881958,
"eval_runtime": 360.6749,
"eval_samples_per_second": 9.776,
"eval_steps_per_second": 9.776,
"step": 10578
},
{
"epoch": 0.7800312012480499,
"grad_norm": 2.101666212081909,
"learning_rate": 0.00012199687987519502,
"loss": 2.692,
"step": 11000
},
{
"epoch": 0.8154871649411432,
"grad_norm": 2.441512107849121,
"learning_rate": 0.0001184512835058857,
"loss": 2.6627,
"step": 11500
},
{
"epoch": 0.8509431286342363,
"grad_norm": 1.934930443763733,
"learning_rate": 0.00011490568713657637,
"loss": 2.6634,
"step": 12000
},
{
"epoch": 0.8863990923273295,
"grad_norm": 2.1375555992126465,
"learning_rate": 0.00011136009076726705,
"loss": 2.6631,
"step": 12500
},
{
"epoch": 0.9218550560204226,
"grad_norm": 2.178725242614746,
"learning_rate": 0.00010781449439795775,
"loss": 2.6373,
"step": 13000
},
{
"epoch": 0.9573110197135158,
"grad_norm": 1.6436994075775146,
"learning_rate": 0.00010426889802864843,
"loss": 2.5462,
"step": 13500
},
{
"epoch": 0.992766983406609,
"grad_norm": 3.6108076572418213,
"learning_rate": 0.0001007233016593391,
"loss": 2.6291,
"step": 14000
},
{
"epoch": 1.0001418238547723,
"eval_loss": 2.368666410446167,
"eval_runtime": 360.6633,
"eval_samples_per_second": 9.776,
"eval_steps_per_second": 9.776,
"step": 14104
},
{
"epoch": 1.0282229470997022,
"grad_norm": 2.3566112518310547,
"learning_rate": 9.717770529002978e-05,
"loss": 2.678,
"step": 14500
},
{
"epoch": 1.0636789107927953,
"grad_norm": 2.204242467880249,
"learning_rate": 9.363210892072048e-05,
"loss": 2.587,
"step": 15000
},
{
"epoch": 1.0991348744858884,
"grad_norm": 1.9366542100906372,
"learning_rate": 9.008651255141115e-05,
"loss": 2.6107,
"step": 15500
},
{
"epoch": 1.1345908381789818,
"grad_norm": 2.088714838027954,
"learning_rate": 8.654091618210185e-05,
"loss": 2.5692,
"step": 16000
},
{
"epoch": 1.1700468018720749,
"grad_norm": 2.3588242530822754,
"learning_rate": 8.299531981279251e-05,
"loss": 2.5354,
"step": 16500
},
{
"epoch": 1.205502765565168,
"grad_norm": 2.398696184158325,
"learning_rate": 7.94497234434832e-05,
"loss": 2.5996,
"step": 17000
},
{
"epoch": 1.2409587292582613,
"grad_norm": 1.8444238901138306,
"learning_rate": 7.590412707417388e-05,
"loss": 2.6186,
"step": 17500
},
{
"epoch": 1.2501772798184656,
"eval_loss": 2.3247604370117188,
"eval_runtime": 360.8279,
"eval_samples_per_second": 9.772,
"eval_steps_per_second": 9.772,
"step": 17630
},
{
"epoch": 1.2764146929513545,
"grad_norm": 2.0656793117523193,
"learning_rate": 7.235853070486456e-05,
"loss": 2.5647,
"step": 18000
},
{
"epoch": 1.3118706566444476,
"grad_norm": 2.8596479892730713,
"learning_rate": 6.881293433555525e-05,
"loss": 2.555,
"step": 18500
}
],
"logging_steps": 500,
"max_steps": 28204,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 8.0374916972544e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}