ShenaoZ's picture
Model save
c3c9a2f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9874476987447699,
"eval_steps": 500,
"global_step": 59,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016736401673640166,
"grad_norm": 38.17963514831367,
"learning_rate": 8.333333333333333e-08,
"logits/chosen": -2.289388656616211,
"logits/rejected": -2.266869068145752,
"logps/chosen": -490.1561584472656,
"logps/rejected": -577.6099853515625,
"loss": 0.6931,
"rewards/accuracies": 0.0,
"rewards/chosen": 0.0,
"rewards/margins": 0.0,
"rewards/rejected": 0.0,
"step": 1
},
{
"epoch": 0.16736401673640167,
"grad_norm": 39.09010964537688,
"learning_rate": 4.930057285201027e-07,
"logits/chosen": -2.251622438430786,
"logits/rejected": -2.1955909729003906,
"logps/chosen": -519.5778198242188,
"logps/rejected": -626.5214233398438,
"loss": 0.6765,
"rewards/accuracies": 0.5208333134651184,
"rewards/chosen": -0.12538284063339233,
"rewards/margins": 0.08053015172481537,
"rewards/rejected": -0.2059129923582077,
"step": 10
},
{
"epoch": 0.33472803347280333,
"grad_norm": 37.575241185625224,
"learning_rate": 4.187457503795526e-07,
"logits/chosen": -2.1704227924346924,
"logits/rejected": -2.113105297088623,
"logps/chosen": -574.7396240234375,
"logps/rejected": -690.5613403320312,
"loss": 0.6614,
"rewards/accuracies": 0.643750011920929,
"rewards/chosen": -0.6970238089561462,
"rewards/margins": 0.36311453580856323,
"rewards/rejected": -1.0601383447647095,
"step": 20
},
{
"epoch": 0.502092050209205,
"grad_norm": 40.16374886117498,
"learning_rate": 2.8691164100062034e-07,
"logits/chosen": -2.12542462348938,
"logits/rejected": -2.090336561203003,
"logps/chosen": -564.1171264648438,
"logps/rejected": -661.2869873046875,
"loss": 0.6174,
"rewards/accuracies": 0.640625,
"rewards/chosen": -0.5616307854652405,
"rewards/margins": 0.25331956148147583,
"rewards/rejected": -0.8149503469467163,
"step": 30
},
{
"epoch": 0.6694560669456067,
"grad_norm": 53.94348043999881,
"learning_rate": 1.4248369943086995e-07,
"logits/chosen": -2.1451804637908936,
"logits/rejected": -2.0768136978149414,
"logps/chosen": -549.3674926757812,
"logps/rejected": -696.8201293945312,
"loss": 0.5933,
"rewards/accuracies": 0.703125,
"rewards/chosen": -0.43148988485336304,
"rewards/margins": 0.4352260231971741,
"rewards/rejected": -0.8667157888412476,
"step": 40
},
{
"epoch": 0.8368200836820083,
"grad_norm": 48.7316594517855,
"learning_rate": 3.473909705816111e-08,
"logits/chosen": -2.0674972534179688,
"logits/rejected": -2.0488924980163574,
"logps/chosen": -601.2271118164062,
"logps/rejected": -747.360595703125,
"loss": 0.5873,
"rewards/accuracies": 0.684374988079071,
"rewards/chosen": -0.7331333756446838,
"rewards/margins": 0.4694816470146179,
"rewards/rejected": -1.2026150226593018,
"step": 50
},
{
"epoch": 0.9874476987447699,
"step": 59,
"total_flos": 0.0,
"train_loss": 0.6273458044407731,
"train_runtime": 1643.7461,
"train_samples_per_second": 9.298,
"train_steps_per_second": 0.036
}
],
"logging_steps": 10,
"max_steps": 59,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}