|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.0036865510011290063, |
|
"eval_steps": 5, |
|
"global_step": 20, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0001843275500564503, |
|
"grad_norm": 0.10575726628303528, |
|
"learning_rate": 1e-05, |
|
"loss": 6.9357, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0001843275500564503, |
|
"eval_loss": 6.940097808837891, |
|
"eval_runtime": 41.232, |
|
"eval_samples_per_second": 221.6, |
|
"eval_steps_per_second": 110.812, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0003686551001129006, |
|
"grad_norm": 0.09431350976228714, |
|
"learning_rate": 2e-05, |
|
"loss": 6.9415, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0005529826501693509, |
|
"grad_norm": 0.09502771496772766, |
|
"learning_rate": 3e-05, |
|
"loss": 6.943, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0007373102002258012, |
|
"grad_norm": 0.09998767077922821, |
|
"learning_rate": 4e-05, |
|
"loss": 6.9373, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0009216377502822516, |
|
"grad_norm": 0.10265155881643295, |
|
"learning_rate": 5e-05, |
|
"loss": 6.9364, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0009216377502822516, |
|
"eval_loss": 6.939851760864258, |
|
"eval_runtime": 16.2124, |
|
"eval_samples_per_second": 563.582, |
|
"eval_steps_per_second": 281.822, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0011059653003387018, |
|
"grad_norm": 0.10776941478252411, |
|
"learning_rate": 6e-05, |
|
"loss": 6.9381, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0012902928503951523, |
|
"grad_norm": 0.1045922115445137, |
|
"learning_rate": 7e-05, |
|
"loss": 6.9417, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0014746204004516025, |
|
"grad_norm": 0.09844892472028732, |
|
"learning_rate": 8e-05, |
|
"loss": 6.9342, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0016589479505080527, |
|
"grad_norm": 0.10482911020517349, |
|
"learning_rate": 9e-05, |
|
"loss": 6.9423, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0018432755005645032, |
|
"grad_norm": 0.10163327306509018, |
|
"learning_rate": 0.0001, |
|
"loss": 6.9399, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0018432755005645032, |
|
"eval_loss": 6.9390177726745605, |
|
"eval_runtime": 12.1573, |
|
"eval_samples_per_second": 751.565, |
|
"eval_steps_per_second": 375.823, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.0020276030506209534, |
|
"grad_norm": 0.10278677940368652, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 6.9418, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0022119306006774036, |
|
"grad_norm": 0.105440154671669, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 6.9387, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.002396258150733854, |
|
"grad_norm": 0.09692682325839996, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 6.9364, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0025805857007903045, |
|
"grad_norm": 0.10328719764947891, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 6.9379, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0027649132508467547, |
|
"grad_norm": 0.09780015796422958, |
|
"learning_rate": 5e-05, |
|
"loss": 6.934, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0027649132508467547, |
|
"eval_loss": 6.937956809997559, |
|
"eval_runtime": 12.06, |
|
"eval_samples_per_second": 757.629, |
|
"eval_steps_per_second": 378.856, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.002949240800903205, |
|
"grad_norm": 0.10987330228090286, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 6.9401, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.003133568350959655, |
|
"grad_norm": 0.11077705770730972, |
|
"learning_rate": 2.061073738537635e-05, |
|
"loss": 6.9358, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0033178959010161054, |
|
"grad_norm": 0.10548257827758789, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 6.937, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.003502223451072556, |
|
"grad_norm": 0.1043916642665863, |
|
"learning_rate": 2.4471741852423237e-06, |
|
"loss": 6.9403, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0036865510011290063, |
|
"grad_norm": 0.10964982956647873, |
|
"learning_rate": 0.0, |
|
"loss": 6.9381, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.0036865510011290063, |
|
"eval_loss": 6.937661170959473, |
|
"eval_runtime": 12.1995, |
|
"eval_samples_per_second": 748.968, |
|
"eval_steps_per_second": 374.525, |
|
"step": 20 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 20, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 5, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 105796012032.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|