|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9968652037617555, |
|
"eval_steps": 500, |
|
"global_step": 159, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 3.125e-08, |
|
"logits/chosen": -2.6679039001464844, |
|
"logits/rejected": -2.5546512603759766, |
|
"logps/chosen": -164.941650390625, |
|
"logps/pi_response": -79.30451965332031, |
|
"logps/ref_response": -79.30451965332031, |
|
"logps/rejected": -140.9091339111328, |
|
"loss": 0.6931, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 3.1249999999999997e-07, |
|
"logits/chosen": -2.6873955726623535, |
|
"logits/rejected": -2.689854145050049, |
|
"logps/chosen": -237.33799743652344, |
|
"logps/pi_response": -119.61322784423828, |
|
"logps/ref_response": -118.72599029541016, |
|
"logps/rejected": -235.4971466064453, |
|
"loss": 0.6916, |
|
"rewards/accuracies": 0.4722222089767456, |
|
"rewards/chosen": -0.011896245181560516, |
|
"rewards/margins": 0.0006968590896576643, |
|
"rewards/rejected": -0.012593105435371399, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 4.990353313429303e-07, |
|
"logits/chosen": -2.7025179862976074, |
|
"logits/rejected": -2.67500638961792, |
|
"logps/chosen": -270.3321838378906, |
|
"logps/pi_response": -143.97360229492188, |
|
"logps/ref_response": -130.33164978027344, |
|
"logps/rejected": -248.6990966796875, |
|
"loss": 0.6777, |
|
"rewards/accuracies": 0.4937500059604645, |
|
"rewards/chosen": -0.16961950063705444, |
|
"rewards/margins": 0.03370685502886772, |
|
"rewards/rejected": -0.20332637429237366, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 4.882681251368548e-07, |
|
"logits/chosen": -2.5037894248962402, |
|
"logits/rejected": -2.4394562244415283, |
|
"logps/chosen": -244.659423828125, |
|
"logps/pi_response": -130.29275512695312, |
|
"logps/ref_response": -112.08052825927734, |
|
"logps/rejected": -252.48519897460938, |
|
"loss": 0.6661, |
|
"rewards/accuracies": 0.5687500238418579, |
|
"rewards/chosen": -0.18323533236980438, |
|
"rewards/margins": 0.1224108338356018, |
|
"rewards/rejected": -0.3056461215019226, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.25, |
|
"learning_rate": 4.6604720940421207e-07, |
|
"logits/chosen": -2.298877239227295, |
|
"logits/rejected": -2.2800426483154297, |
|
"logps/chosen": -256.75372314453125, |
|
"logps/pi_response": -161.98574829101562, |
|
"logps/ref_response": -110.90777587890625, |
|
"logps/rejected": -324.766845703125, |
|
"loss": 0.6473, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.49192649126052856, |
|
"rewards/margins": 0.17362619936466217, |
|
"rewards/rejected": -0.6655526757240295, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.31, |
|
"learning_rate": 4.3344075855595097e-07, |
|
"logits/chosen": -2.197845935821533, |
|
"logits/rejected": -2.1729886531829834, |
|
"logps/chosen": -286.7721252441406, |
|
"logps/pi_response": -180.2733917236328, |
|
"logps/ref_response": -117.65016174316406, |
|
"logps/rejected": -298.1932678222656, |
|
"loss": 0.6403, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": -0.5857284069061279, |
|
"rewards/margins": 0.10038965940475464, |
|
"rewards/rejected": -0.6861181259155273, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.38, |
|
"learning_rate": 3.920161866827889e-07, |
|
"logits/chosen": -2.1232476234436035, |
|
"logits/rejected": -2.0888590812683105, |
|
"logps/chosen": -276.8944396972656, |
|
"logps/pi_response": -186.1301727294922, |
|
"logps/ref_response": -108.0089340209961, |
|
"logps/rejected": -310.871826171875, |
|
"loss": 0.6327, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -0.5667150020599365, |
|
"rewards/margins": 0.3353308141231537, |
|
"rewards/rejected": -0.9020459055900574, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 3.4376480090239047e-07, |
|
"logits/chosen": -2.0895802974700928, |
|
"logits/rejected": -2.040750503540039, |
|
"logps/chosen": -284.739501953125, |
|
"logps/pi_response": -197.33822631835938, |
|
"logps/ref_response": -112.66800689697266, |
|
"logps/rejected": -308.96185302734375, |
|
"loss": 0.6129, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.746911883354187, |
|
"rewards/margins": 0.3234737515449524, |
|
"rewards/rejected": -1.0703856945037842, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.5, |
|
"learning_rate": 2.910060778827554e-07, |
|
"logits/chosen": -2.077698230743408, |
|
"logits/rejected": -2.019683361053467, |
|
"logps/chosen": -285.592529296875, |
|
"logps/pi_response": -201.0194549560547, |
|
"logps/ref_response": -101.95613098144531, |
|
"logps/rejected": -329.99365234375, |
|
"loss": 0.6269, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.8359489440917969, |
|
"rewards/margins": 0.2896420359611511, |
|
"rewards/rejected": -1.1255908012390137, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"learning_rate": 2.3627616503391812e-07, |
|
"logits/chosen": -2.1262636184692383, |
|
"logits/rejected": -2.0831663608551025, |
|
"logps/chosen": -301.2969970703125, |
|
"logps/pi_response": -201.43801879882812, |
|
"logps/ref_response": -100.02299499511719, |
|
"logps/rejected": -323.3294677734375, |
|
"loss": 0.6113, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -0.8298277854919434, |
|
"rewards/margins": 0.32933610677719116, |
|
"rewards/rejected": -1.1591640710830688, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.63, |
|
"learning_rate": 1.8220596619089573e-07, |
|
"logits/chosen": -2.0351803302764893, |
|
"logits/rejected": -2.0266597270965576, |
|
"logps/chosen": -320.48626708984375, |
|
"logps/pi_response": -233.0988311767578, |
|
"logps/ref_response": -112.04389953613281, |
|
"logps/rejected": -384.39801025390625, |
|
"loss": 0.5891, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -1.0577332973480225, |
|
"rewards/margins": 0.3904644846916199, |
|
"rewards/rejected": -1.4481977224349976, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.69, |
|
"learning_rate": 1.3139467229135998e-07, |
|
"logits/chosen": -2.0701494216918945, |
|
"logits/rejected": -2.0804686546325684, |
|
"logps/chosen": -326.40399169921875, |
|
"logps/pi_response": -230.00381469726562, |
|
"logps/ref_response": -100.44270324707031, |
|
"logps/rejected": -366.92144775390625, |
|
"loss": 0.6036, |
|
"rewards/accuracies": 0.6499999761581421, |
|
"rewards/chosen": -1.151390552520752, |
|
"rewards/margins": 0.32332736253738403, |
|
"rewards/rejected": -1.4747179746627808, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.75, |
|
"learning_rate": 8.628481651367875e-08, |
|
"logits/chosen": -2.0780344009399414, |
|
"logits/rejected": -2.005913019180298, |
|
"logps/chosen": -350.05859375, |
|
"logps/pi_response": -259.2313537597656, |
|
"logps/ref_response": -129.4988250732422, |
|
"logps/rejected": -373.9602355957031, |
|
"loss": 0.5968, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -1.1450227499008179, |
|
"rewards/margins": 0.2739308774471283, |
|
"rewards/rejected": -1.4189536571502686, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.82, |
|
"learning_rate": 4.904486005914027e-08, |
|
"logits/chosen": -2.1151814460754395, |
|
"logits/rejected": -2.09941029548645, |
|
"logps/chosen": -314.6081848144531, |
|
"logps/pi_response": -237.43270874023438, |
|
"logps/ref_response": -112.079345703125, |
|
"logps/rejected": -380.1162414550781, |
|
"loss": 0.5854, |
|
"rewards/accuracies": 0.6625000238418579, |
|
"rewards/chosen": -1.1245102882385254, |
|
"rewards/margins": 0.32735899090766907, |
|
"rewards/rejected": -1.4518693685531616, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 2.1464952759020856e-08, |
|
"logits/chosen": -2.0996551513671875, |
|
"logits/rejected": -2.0452041625976562, |
|
"logps/chosen": -353.1548767089844, |
|
"logps/pi_response": -273.06121826171875, |
|
"logps/ref_response": -119.2865219116211, |
|
"logps/rejected": -395.5834045410156, |
|
"loss": 0.5958, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": -1.241917371749878, |
|
"rewards/margins": 0.4515906274318695, |
|
"rewards/rejected": -1.6935079097747803, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.94, |
|
"learning_rate": 4.8708793644441086e-09, |
|
"logits/chosen": -2.0539727210998535, |
|
"logits/rejected": -2.024879217147827, |
|
"logps/chosen": -331.5842590332031, |
|
"logps/pi_response": -237.0673370361328, |
|
"logps/ref_response": -101.13041687011719, |
|
"logps/rejected": -359.00762939453125, |
|
"loss": 0.5711, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": -1.1688032150268555, |
|
"rewards/margins": 0.34362536668777466, |
|
"rewards/rejected": -1.5124285221099854, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"step": 159, |
|
"total_flos": 0.0, |
|
"train_loss": 0.6222286434293544, |
|
"train_runtime": 4182.97, |
|
"train_samples_per_second": 4.872, |
|
"train_steps_per_second": 0.038 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 159, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|