|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.0, |
|
"eval_steps": 100, |
|
"global_step": 161, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"grad_norm": 4.59084677091884, |
|
"learning_rate": 2.941176470588235e-08, |
|
"logits/chosen": 0.16597579419612885, |
|
"logits/rejected": -0.893264651298523, |
|
"logps/chosen": -687.0694580078125, |
|
"logps/rejected": -1150.8621826171875, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"grad_norm": 4.2570451246729855, |
|
"learning_rate": 2.941176470588235e-07, |
|
"logits/chosen": 0.3311888873577118, |
|
"logits/rejected": -0.695980429649353, |
|
"logps/chosen": -518.3226928710938, |
|
"logps/rejected": -964.5950317382812, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.4722222089767456, |
|
"rewards/chosen": 0.00023680418962612748, |
|
"rewards/margins": 0.0004964147228747606, |
|
"rewards/rejected": -0.0002596105041448027, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 3.6335223260874243, |
|
"learning_rate": 4.994647308096508e-07, |
|
"logits/chosen": 0.1593589186668396, |
|
"logits/rejected": -0.6873095631599426, |
|
"logps/chosen": -594.7459716796875, |
|
"logps/rejected": -906.0856323242188, |
|
"loss": 0.6921, |
|
"rewards/accuracies": 0.550000011920929, |
|
"rewards/chosen": 0.0020452039316296577, |
|
"rewards/margins": 0.0012601878261193633, |
|
"rewards/rejected": 0.0007850162801332772, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"grad_norm": 3.767508932992487, |
|
"learning_rate": 4.900124635964822e-07, |
|
"logits/chosen": 0.19252721965312958, |
|
"logits/rejected": -0.6640821099281311, |
|
"logps/chosen": -542.6539916992188, |
|
"logps/rejected": -986.5949096679688, |
|
"loss": 0.6864, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": 0.00898707564920187, |
|
"rewards/margins": 0.01296447217464447, |
|
"rewards/rejected": -0.003977396059781313, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"grad_norm": 3.787975298295214, |
|
"learning_rate": 4.691816889268769e-07, |
|
"logits/chosen": 0.12474404275417328, |
|
"logits/rejected": -0.6657639741897583, |
|
"logps/chosen": -521.1658935546875, |
|
"logps/rejected": -978.1348876953125, |
|
"loss": 0.6777, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.024259410798549652, |
|
"rewards/margins": 0.02917664125561714, |
|
"rewards/rejected": -0.004917231388390064, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"grad_norm": 3.7293054585623944, |
|
"learning_rate": 4.379599518697443e-07, |
|
"logits/chosen": 0.1471071094274521, |
|
"logits/rejected": -0.7825916409492493, |
|
"logps/chosen": -521.8971557617188, |
|
"logps/rejected": -1019.1888427734375, |
|
"loss": 0.6663, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.0357883982360363, |
|
"rewards/margins": 0.05407026410102844, |
|
"rewards/rejected": -0.01828186586499214, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.37, |
|
"grad_norm": 3.713443091206878, |
|
"learning_rate": 3.978274120908956e-07, |
|
"logits/chosen": 0.2528793215751648, |
|
"logits/rejected": -0.5950145721435547, |
|
"logps/chosen": -513.4190673828125, |
|
"logps/rejected": -923.5573120117188, |
|
"loss": 0.6465, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.05145885795354843, |
|
"rewards/margins": 0.11846508085727692, |
|
"rewards/rejected": -0.06700621545314789, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.43, |
|
"grad_norm": 3.6423407771754426, |
|
"learning_rate": 3.506866724646843e-07, |
|
"logits/chosen": 0.12498810142278671, |
|
"logits/rejected": -0.5959888100624084, |
|
"logps/chosen": -588.9588623046875, |
|
"logps/rejected": -933.2869873046875, |
|
"loss": 0.6387, |
|
"rewards/accuracies": 0.8374999761581421, |
|
"rewards/chosen": 0.05268377810716629, |
|
"rewards/margins": 0.18767260015010834, |
|
"rewards/rejected": -0.13498882949352264, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"grad_norm": 4.012585827479405, |
|
"learning_rate": 2.987725805040321e-07, |
|
"logits/chosen": 0.22346019744873047, |
|
"logits/rejected": -0.7820445895195007, |
|
"logps/chosen": -509.46636962890625, |
|
"logps/rejected": -1001.2806396484375, |
|
"loss": 0.6114, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": 0.06713758409023285, |
|
"rewards/margins": 0.2159689962863922, |
|
"rewards/rejected": -0.14883141219615936, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 4.135114682631198, |
|
"learning_rate": 2.445462787413597e-07, |
|
"logits/chosen": 0.21334119141101837, |
|
"logits/rejected": -0.5833973288536072, |
|
"logps/chosen": -570.7240600585938, |
|
"logps/rejected": -869.2347412109375, |
|
"loss": 0.6137, |
|
"rewards/accuracies": 0.831250011920929, |
|
"rewards/chosen": 0.06728974729776382, |
|
"rewards/margins": 0.33051320910453796, |
|
"rewards/rejected": -0.26322346925735474, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"grad_norm": 3.7781884105465315, |
|
"learning_rate": 1.9057852691845675e-07, |
|
"logits/chosen": 0.09849376976490021, |
|
"logits/rejected": -0.5272496938705444, |
|
"logps/chosen": -549.6043090820312, |
|
"logps/rejected": -869.72998046875, |
|
"loss": 0.6083, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": 0.04291980713605881, |
|
"rewards/margins": 0.2984945476055145, |
|
"rewards/rejected": -0.2555747330188751, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.62, |
|
"eval_logits/chosen": 0.483458012342453, |
|
"eval_logits/rejected": -0.3319720923900604, |
|
"eval_logps/chosen": -424.87884521484375, |
|
"eval_logps/rejected": -934.056640625, |
|
"eval_loss": 0.6257418394088745, |
|
"eval_rewards/accuracies": 0.7647058963775635, |
|
"eval_rewards/chosen": 0.04604706913232803, |
|
"eval_rewards/margins": 0.15231524407863617, |
|
"eval_rewards/rejected": -0.10626816749572754, |
|
"eval_runtime": 61.5744, |
|
"eval_samples_per_second": 8.445, |
|
"eval_steps_per_second": 0.276, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 4.581420382507564, |
|
"learning_rate": 1.3942782744524973e-07, |
|
"logits/chosen": 0.13494840264320374, |
|
"logits/rejected": -0.6365220546722412, |
|
"logps/chosen": -582.8129272460938, |
|
"logps/rejected": -940.0499267578125, |
|
"loss": 0.5918, |
|
"rewards/accuracies": 0.862500011920929, |
|
"rewards/chosen": 0.056283652782440186, |
|
"rewards/margins": 0.4604174494743347, |
|
"rewards/rejected": -0.40413379669189453, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"grad_norm": 4.661011326004169, |
|
"learning_rate": 9.351913195398522e-08, |
|
"logits/chosen": 0.19021789729595184, |
|
"logits/rejected": -0.5815246105194092, |
|
"logps/chosen": -587.3818359375, |
|
"logps/rejected": -930.6414184570312, |
|
"loss": 0.5884, |
|
"rewards/accuracies": 0.84375, |
|
"rewards/chosen": 0.04712503030896187, |
|
"rewards/margins": 0.40363869071006775, |
|
"rewards/rejected": -0.35651373863220215, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.81, |
|
"grad_norm": 5.358183039543505, |
|
"learning_rate": 5.502887922677959e-08, |
|
"logits/chosen": 0.12769225239753723, |
|
"logits/rejected": -0.5939640402793884, |
|
"logps/chosen": -566.1625366210938, |
|
"logps/rejected": -957.2125854492188, |
|
"loss": 0.5788, |
|
"rewards/accuracies": 0.856249988079071, |
|
"rewards/chosen": 0.04238845035433769, |
|
"rewards/margins": 0.35202011466026306, |
|
"rewards/rejected": -0.30963170528411865, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.87, |
|
"grad_norm": 4.092331782234157, |
|
"learning_rate": 2.5781814616827933e-08, |
|
"logits/chosen": 0.15436235070228577, |
|
"logits/rejected": -0.7000457048416138, |
|
"logps/chosen": -537.4251708984375, |
|
"logps/rejected": -1025.563720703125, |
|
"loss": 0.5739, |
|
"rewards/accuracies": 0.84375, |
|
"rewards/chosen": 0.042118191719055176, |
|
"rewards/margins": 0.514610767364502, |
|
"rewards/rejected": -0.4724925458431244, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.93, |
|
"grad_norm": 4.210158802402911, |
|
"learning_rate": 7.1644825466846415e-09, |
|
"logits/chosen": 0.20159873366355896, |
|
"logits/rejected": -0.6283941268920898, |
|
"logps/chosen": -500.183837890625, |
|
"logps/rejected": -933.0728759765625, |
|
"loss": 0.5726, |
|
"rewards/accuracies": 0.887499988079071, |
|
"rewards/chosen": 0.05504927039146423, |
|
"rewards/margins": 0.4628073275089264, |
|
"rewards/rejected": -0.40775808691978455, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"grad_norm": 5.053548631311742, |
|
"learning_rate": 5.949323002271688e-11, |
|
"logits/chosen": 0.21686339378356934, |
|
"logits/rejected": -0.6202417612075806, |
|
"logps/chosen": -521.2774047851562, |
|
"logps/rejected": -943.6966552734375, |
|
"loss": 0.5866, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": 0.04219362139701843, |
|
"rewards/margins": 0.3843230903148651, |
|
"rewards/rejected": -0.3421294689178467, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 161, |
|
"total_flos": 0.0, |
|
"train_loss": 0.62646086023461, |
|
"train_runtime": 2476.5181, |
|
"train_samples_per_second": 4.151, |
|
"train_steps_per_second": 0.065 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 161, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|