{ "best_metric": null, "best_model_checkpoint": null, "epoch": 0.004300381658872225, "eval_steps": 5, "global_step": 15, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.0002866921105914817, "grad_norm": 2.3434934616088867, "learning_rate": 1e-05, "loss": 2.3633, "step": 1 }, { "epoch": 0.0002866921105914817, "eval_loss": 2.5438878536224365, "eval_runtime": 193.7172, "eval_samples_per_second": 30.328, "eval_steps_per_second": 15.166, "step": 1 }, { "epoch": 0.0005733842211829634, "grad_norm": 1.747134804725647, "learning_rate": 2e-05, "loss": 2.4938, "step": 2 }, { "epoch": 0.000860076331774445, "grad_norm": 1.7026760578155518, "learning_rate": 3e-05, "loss": 2.338, "step": 3 }, { "epoch": 0.0011467684423659267, "grad_norm": 1.9191794395446777, "learning_rate": 4e-05, "loss": 2.6796, "step": 4 }, { "epoch": 0.0014334605529574083, "grad_norm": 1.7047806978225708, "learning_rate": 5e-05, "loss": 2.4081, "step": 5 }, { "epoch": 0.0014334605529574083, "eval_loss": 2.463175058364868, "eval_runtime": 48.5493, "eval_samples_per_second": 121.011, "eval_steps_per_second": 60.516, "step": 5 }, { "epoch": 0.00172015266354889, "grad_norm": 1.7617462873458862, "learning_rate": 6e-05, "loss": 2.2562, "step": 6 }, { "epoch": 0.0020068447741403714, "grad_norm": 1.7860451936721802, "learning_rate": 7e-05, "loss": 2.4667, "step": 7 }, { "epoch": 0.0022935368847318534, "grad_norm": 1.786962628364563, "learning_rate": 8e-05, "loss": 2.3607, "step": 8 }, { "epoch": 0.002580228995323335, "grad_norm": 1.9283440113067627, "learning_rate": 9e-05, "loss": 2.2896, "step": 9 }, { "epoch": 0.0028669211059148166, "grad_norm": 1.9662737846374512, "learning_rate": 0.0001, "loss": 2.1083, "step": 10 }, { "epoch": 0.0028669211059148166, "eval_loss": 2.109614610671997, "eval_runtime": 48.5494, "eval_samples_per_second": 121.011, "eval_steps_per_second": 60.516, "step": 10 }, { "epoch": 0.003153613216506298, "grad_norm": 1.9880238771438599, "learning_rate": 9.755282581475769e-05, "loss": 2.2276, "step": 11 }, { "epoch": 0.00344030532709778, "grad_norm": 1.6718554496765137, "learning_rate": 9.045084971874738e-05, "loss": 2.0454, "step": 12 }, { "epoch": 0.0037269974376892617, "grad_norm": 1.6015005111694336, "learning_rate": 7.938926261462366e-05, "loss": 1.7847, "step": 13 }, { "epoch": 0.004013689548280743, "grad_norm": 1.6074224710464478, "learning_rate": 6.545084971874738e-05, "loss": 1.807, "step": 14 }, { "epoch": 0.004300381658872225, "grad_norm": 1.5804719924926758, "learning_rate": 5e-05, "loss": 1.5977, "step": 15 }, { "epoch": 0.004300381658872225, "eval_loss": 1.6562390327453613, "eval_runtime": 52.2266, "eval_samples_per_second": 112.491, "eval_steps_per_second": 56.255, "step": 15 } ], "logging_steps": 1, "max_steps": 20, "num_input_tokens_seen": 0, "num_train_epochs": 1, "save_steps": 5, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 1547541468413952.0, "train_batch_size": 2, "trial_name": null, "trial_params": null }