{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.078412153883852, "eval_steps": 5, "global_step": 20, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0039206076941926, "grad_norm": 1.108206868171692, "learning_rate": 1e-05, "loss": 0.9312, "step": 1 }, { "epoch": 0.0039206076941926, "eval_loss": 1.099448561668396, "eval_runtime": 64.3877, "eval_samples_per_second": 6.678, "eval_steps_per_second": 3.339, "step": 1 }, { "epoch": 0.0078412153883852, "grad_norm": 1.2041150331497192, "learning_rate": 2e-05, "loss": 1.0575, "step": 2 }, { "epoch": 0.0117618230825778, "grad_norm": 1.2153946161270142, "learning_rate": 3e-05, "loss": 1.0726, "step": 3 }, { "epoch": 0.0156824307767704, "grad_norm": 1.1603235006332397, "learning_rate": 4e-05, "loss": 1.0766, "step": 4 }, { "epoch": 0.019603038470963, "grad_norm": 1.2363840341567993, "learning_rate": 5e-05, "loss": 1.0818, "step": 5 }, { "epoch": 0.019603038470963, "eval_loss": 0.9847230315208435, "eval_runtime": 6.9547, "eval_samples_per_second": 61.829, "eval_steps_per_second": 30.914, "step": 5 }, { "epoch": 0.0235236461651556, "grad_norm": 1.3062018156051636, "learning_rate": 6e-05, "loss": 1.1497, "step": 6 }, { "epoch": 0.0274442538593482, "grad_norm": 0.8951370120048523, "learning_rate": 7e-05, "loss": 0.9674, "step": 7 }, { "epoch": 0.0313648615535408, "grad_norm": 0.9190327525138855, "learning_rate": 8e-05, "loss": 0.8878, "step": 8 }, { "epoch": 0.035285469247733396, "grad_norm": 0.7922409772872925, "learning_rate": 9e-05, "loss": 0.7617, "step": 9 }, { "epoch": 0.039206076941926, "grad_norm": 0.9028505086898804, "learning_rate": 0.0001, "loss": 0.8743, "step": 10 }, { "epoch": 0.039206076941926, "eval_loss": 0.7476094365119934, "eval_runtime": 6.9611, "eval_samples_per_second": 61.772, "eval_steps_per_second": 30.886, "step": 10 }, { "epoch": 0.0431266846361186, "grad_norm": 0.7901967167854309, "learning_rate": 9.755282581475769e-05, "loss": 0.7681, "step": 11 }, { "epoch": 0.0470472923303112, "grad_norm": 0.7195383310317993, "learning_rate": 9.045084971874738e-05, "loss": 0.7462, "step": 12 }, { "epoch": 0.0509679000245038, "grad_norm": 0.7095159292221069, "learning_rate": 7.938926261462366e-05, "loss": 0.6836, "step": 13 }, { "epoch": 0.0548885077186964, "grad_norm": 0.5935850143432617, "learning_rate": 6.545084971874738e-05, "loss": 0.5753, "step": 14 }, { "epoch": 0.058809115412888996, "grad_norm": 0.5951647758483887, "learning_rate": 5e-05, "loss": 0.5539, "step": 15 }, { "epoch": 0.058809115412888996, "eval_loss": 0.5578025579452515, "eval_runtime": 6.9612, "eval_samples_per_second": 61.771, "eval_steps_per_second": 30.886, "step": 15 }, { "epoch": 0.0627297231070816, "grad_norm": 0.633608877658844, "learning_rate": 3.4549150281252636e-05, "loss": 0.5572, "step": 16 }, { "epoch": 0.0666503308012742, "grad_norm": 0.6298178434371948, "learning_rate": 2.061073738537635e-05, "loss": 0.5276, "step": 17 }, { "epoch": 0.07057093849546679, "grad_norm": 0.6569718718528748, "learning_rate": 9.549150281252633e-06, "loss": 0.5219, "step": 18 }, { "epoch": 0.07449154618965939, "grad_norm": 0.6659846305847168, "learning_rate": 2.4471741852423237e-06, "loss": 0.5346, "step": 19 }, { "epoch": 0.078412153883852, "grad_norm": 0.695641815662384, "learning_rate": 0.0, "loss": 0.4932, "step": 20 }, { "epoch": 0.078412153883852, "eval_loss": 0.5049919486045837, "eval_runtime": 6.9646, "eval_samples_per_second": 61.741, "eval_steps_per_second": 30.871, "step": 20 } ], "logging_steps": 1, "max_steps": 20, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 5, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": true }, "attributes": {} } }, "total_flos": 5501993254649856.0, "train_batch_size": 2, "trial_name": null, "trial_params": null }