Nexspear's picture
Training in progress, step 99, checkpoint
389d1e8 verified
raw
history blame
8.94 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0819672131147542,
"eval_steps": 9,
"global_step": 99,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01092896174863388,
"eval_loss": 2.294912338256836,
"eval_runtime": 9.8437,
"eval_samples_per_second": 15.644,
"eval_steps_per_second": 2.032,
"step": 1
},
{
"epoch": 0.03278688524590164,
"grad_norm": 1.559897541999817,
"learning_rate": 1.5e-05,
"loss": 8.8979,
"step": 3
},
{
"epoch": 0.06557377049180328,
"grad_norm": 1.6194515228271484,
"learning_rate": 3e-05,
"loss": 9.1763,
"step": 6
},
{
"epoch": 0.09836065573770492,
"grad_norm": 1.691707968711853,
"learning_rate": 4.5e-05,
"loss": 9.2363,
"step": 9
},
{
"epoch": 0.09836065573770492,
"eval_loss": 2.229104518890381,
"eval_runtime": 9.9458,
"eval_samples_per_second": 15.484,
"eval_steps_per_second": 2.011,
"step": 9
},
{
"epoch": 0.13114754098360656,
"grad_norm": 1.64967942237854,
"learning_rate": 4.993910125649561e-05,
"loss": 8.735,
"step": 12
},
{
"epoch": 0.16393442622950818,
"grad_norm": 1.896277666091919,
"learning_rate": 4.962019382530521e-05,
"loss": 8.176,
"step": 15
},
{
"epoch": 0.19672131147540983,
"grad_norm": 1.1657590866088867,
"learning_rate": 4.9031542398457974e-05,
"loss": 7.7598,
"step": 18
},
{
"epoch": 0.19672131147540983,
"eval_loss": 1.973254680633545,
"eval_runtime": 9.9953,
"eval_samples_per_second": 15.407,
"eval_steps_per_second": 2.001,
"step": 18
},
{
"epoch": 0.22950819672131148,
"grad_norm": 1.325817584991455,
"learning_rate": 4.817959636416969e-05,
"loss": 8.0191,
"step": 21
},
{
"epoch": 0.26229508196721313,
"grad_norm": 1.2216711044311523,
"learning_rate": 4.707368982147318e-05,
"loss": 7.6623,
"step": 24
},
{
"epoch": 0.29508196721311475,
"grad_norm": 1.25918710231781,
"learning_rate": 4.572593931387604e-05,
"loss": 7.7173,
"step": 27
},
{
"epoch": 0.29508196721311475,
"eval_loss": 1.913491129875183,
"eval_runtime": 9.9988,
"eval_samples_per_second": 15.402,
"eval_steps_per_second": 2.0,
"step": 27
},
{
"epoch": 0.32786885245901637,
"grad_norm": 1.1863789558410645,
"learning_rate": 4.415111107797445e-05,
"loss": 7.6305,
"step": 30
},
{
"epoch": 0.36065573770491804,
"grad_norm": 1.1477130651474,
"learning_rate": 4.2366459261474933e-05,
"loss": 7.6748,
"step": 33
},
{
"epoch": 0.39344262295081966,
"grad_norm": 1.2501188516616821,
"learning_rate": 4.039153688314145e-05,
"loss": 7.5313,
"step": 36
},
{
"epoch": 0.39344262295081966,
"eval_loss": 1.8740694522857666,
"eval_runtime": 10.0026,
"eval_samples_per_second": 15.396,
"eval_steps_per_second": 1.999,
"step": 36
},
{
"epoch": 0.4262295081967213,
"grad_norm": 1.2684975862503052,
"learning_rate": 3.824798160583012e-05,
"loss": 7.5737,
"step": 39
},
{
"epoch": 0.45901639344262296,
"grad_norm": 1.303396224975586,
"learning_rate": 3.5959278669726935e-05,
"loss": 7.6169,
"step": 42
},
{
"epoch": 0.4918032786885246,
"grad_norm": 1.1348557472229004,
"learning_rate": 3.355050358314172e-05,
"loss": 7.3236,
"step": 45
},
{
"epoch": 0.4918032786885246,
"eval_loss": 1.8486707210540771,
"eval_runtime": 9.9999,
"eval_samples_per_second": 15.4,
"eval_steps_per_second": 2.0,
"step": 45
},
{
"epoch": 0.5245901639344263,
"grad_norm": 1.3024663925170898,
"learning_rate": 3.104804738999169e-05,
"loss": 7.5219,
"step": 48
},
{
"epoch": 0.5573770491803278,
"grad_norm": 1.4016789197921753,
"learning_rate": 2.8479327524001636e-05,
"loss": 7.6823,
"step": 51
},
{
"epoch": 0.5901639344262295,
"grad_norm": 1.232653260231018,
"learning_rate": 2.587248741756253e-05,
"loss": 7.2973,
"step": 54
},
{
"epoch": 0.5901639344262295,
"eval_loss": 1.835157871246338,
"eval_runtime": 10.0097,
"eval_samples_per_second": 15.385,
"eval_steps_per_second": 1.998,
"step": 54
},
{
"epoch": 0.6229508196721312,
"grad_norm": 1.5029542446136475,
"learning_rate": 2.3256088156396868e-05,
"loss": 7.4675,
"step": 57
},
{
"epoch": 0.6557377049180327,
"grad_norm": 1.2838174104690552,
"learning_rate": 2.0658795558326743e-05,
"loss": 7.3278,
"step": 60
},
{
"epoch": 0.6885245901639344,
"grad_norm": 1.2395374774932861,
"learning_rate": 1.8109066104575023e-05,
"loss": 7.4121,
"step": 63
},
{
"epoch": 0.6885245901639344,
"eval_loss": 1.8278913497924805,
"eval_runtime": 10.0008,
"eval_samples_per_second": 15.399,
"eval_steps_per_second": 2.0,
"step": 63
},
{
"epoch": 0.7213114754098361,
"grad_norm": 1.1322687864303589,
"learning_rate": 1.56348351646022e-05,
"loss": 7.2278,
"step": 66
},
{
"epoch": 0.7540983606557377,
"grad_norm": 1.2498345375061035,
"learning_rate": 1.3263210930352737e-05,
"loss": 7.2407,
"step": 69
},
{
"epoch": 0.7868852459016393,
"grad_norm": 1.4177753925323486,
"learning_rate": 1.1020177413231334e-05,
"loss": 7.0907,
"step": 72
},
{
"epoch": 0.7868852459016393,
"eval_loss": 1.8231874704360962,
"eval_runtime": 10.0062,
"eval_samples_per_second": 15.39,
"eval_steps_per_second": 1.999,
"step": 72
},
{
"epoch": 0.819672131147541,
"grad_norm": 1.3069887161254883,
"learning_rate": 8.930309757836517e-06,
"loss": 7.4671,
"step": 75
},
{
"epoch": 0.8524590163934426,
"grad_norm": 1.2993086576461792,
"learning_rate": 7.016504991533726e-06,
"loss": 7.5022,
"step": 78
},
{
"epoch": 0.8852459016393442,
"grad_norm": 1.28661048412323,
"learning_rate": 5.299731159831953e-06,
"loss": 7.0951,
"step": 81
},
{
"epoch": 0.8852459016393442,
"eval_loss": 1.8206969499588013,
"eval_runtime": 10.005,
"eval_samples_per_second": 15.392,
"eval_steps_per_second": 1.999,
"step": 81
},
{
"epoch": 0.9180327868852459,
"grad_norm": 1.2159041166305542,
"learning_rate": 3.798797596089351e-06,
"loss": 7.4236,
"step": 84
},
{
"epoch": 0.9508196721311475,
"grad_norm": 1.4966245889663696,
"learning_rate": 2.5301488425208296e-06,
"loss": 7.1669,
"step": 87
},
{
"epoch": 0.9836065573770492,
"grad_norm": 1.1949121952056885,
"learning_rate": 1.5076844803522922e-06,
"loss": 7.4799,
"step": 90
},
{
"epoch": 0.9836065573770492,
"eval_loss": 1.8195555210113525,
"eval_runtime": 10.013,
"eval_samples_per_second": 15.38,
"eval_steps_per_second": 1.997,
"step": 90
},
{
"epoch": 1.0163934426229508,
"grad_norm": 1.1719740629196167,
"learning_rate": 7.426068431000882e-07,
"loss": 7.2704,
"step": 93
},
{
"epoch": 1.0491803278688525,
"grad_norm": 1.204927921295166,
"learning_rate": 2.4329828146074095e-07,
"loss": 7.3601,
"step": 96
},
{
"epoch": 1.0819672131147542,
"grad_norm": 1.423250675201416,
"learning_rate": 1.522932452260595e-08,
"loss": 7.4071,
"step": 99
},
{
"epoch": 1.0819672131147542,
"eval_loss": 1.8196653127670288,
"eval_runtime": 10.0108,
"eval_samples_per_second": 15.383,
"eval_steps_per_second": 1.998,
"step": 99
}
],
"logging_steps": 3,
"max_steps": 100,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 9,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.3150846313299968e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}