|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9994767137624281, |
|
"eval_steps": 100, |
|
"global_step": 955, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.010465724751439037, |
|
"grad_norm": 10.83441036661972, |
|
"learning_rate": 5.208333333333333e-08, |
|
"logits/chosen": -2.6288294792175293, |
|
"logits/rejected": -2.5920276641845703, |
|
"logps/chosen": -269.02484130859375, |
|
"logps/rejected": -247.2714385986328, |
|
"loss": 0.6932, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": 0.00044025605893693864, |
|
"rewards/margins": 0.000744122255127877, |
|
"rewards/rejected": -0.0003038661670871079, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.020931449502878074, |
|
"grad_norm": 12.596112829643161, |
|
"learning_rate": 1.0416666666666667e-07, |
|
"logits/chosen": -2.613401412963867, |
|
"logits/rejected": -2.5758602619171143, |
|
"logps/chosen": -283.1111755371094, |
|
"logps/rejected": -282.2903747558594, |
|
"loss": 0.6929, |
|
"rewards/accuracies": 0.48750001192092896, |
|
"rewards/chosen": -0.00027975160628557205, |
|
"rewards/margins": -0.0001261378638446331, |
|
"rewards/rejected": -0.00015361374244093895, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03139717425431711, |
|
"grad_norm": 12.23952141074508, |
|
"learning_rate": 1.5624999999999999e-07, |
|
"logits/chosen": -2.6915743350982666, |
|
"logits/rejected": -2.667309045791626, |
|
"logps/chosen": -270.18572998046875, |
|
"logps/rejected": -276.73675537109375, |
|
"loss": 0.6921, |
|
"rewards/accuracies": 0.5687500238418579, |
|
"rewards/chosen": 0.0009639340569265187, |
|
"rewards/margins": 0.0015827405732125044, |
|
"rewards/rejected": -0.0006188066909089684, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04186289900575615, |
|
"grad_norm": 11.873929193021679, |
|
"learning_rate": 2.0833333333333333e-07, |
|
"logits/chosen": -2.6651158332824707, |
|
"logits/rejected": -2.5901150703430176, |
|
"logps/chosen": -290.4391784667969, |
|
"logps/rejected": -282.10711669921875, |
|
"loss": 0.69, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": 0.005596032831817865, |
|
"rewards/margins": 0.006025379989296198, |
|
"rewards/rejected": -0.0004293472447898239, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.052328623757195186, |
|
"grad_norm": 13.181894776310466, |
|
"learning_rate": 2.604166666666667e-07, |
|
"logits/chosen": -2.671447277069092, |
|
"logits/rejected": -2.5876405239105225, |
|
"logps/chosen": -266.19891357421875, |
|
"logps/rejected": -236.5002899169922, |
|
"loss": 0.686, |
|
"rewards/accuracies": 0.643750011920929, |
|
"rewards/chosen": 0.013971127569675446, |
|
"rewards/margins": 0.01311213057488203, |
|
"rewards/rejected": 0.0008589973440393806, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.06279434850863422, |
|
"grad_norm": 11.864289091919504, |
|
"learning_rate": 3.1249999999999997e-07, |
|
"logits/chosen": -2.6260297298431396, |
|
"logits/rejected": -2.5904088020324707, |
|
"logps/chosen": -299.61639404296875, |
|
"logps/rejected": -274.44915771484375, |
|
"loss": 0.679, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": 0.04407883808016777, |
|
"rewards/margins": 0.03104178048670292, |
|
"rewards/rejected": 0.013037058524787426, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.07326007326007326, |
|
"grad_norm": 11.92324421228381, |
|
"learning_rate": 3.645833333333333e-07, |
|
"logits/chosen": -2.5302624702453613, |
|
"logits/rejected": -2.5223822593688965, |
|
"logps/chosen": -257.5154113769531, |
|
"logps/rejected": -262.79608154296875, |
|
"loss": 0.6701, |
|
"rewards/accuracies": 0.6937500238418579, |
|
"rewards/chosen": 0.021234791725873947, |
|
"rewards/margins": 0.06698025017976761, |
|
"rewards/rejected": -0.04574545472860336, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.0837257980115123, |
|
"grad_norm": 14.017084052960483, |
|
"learning_rate": 4.1666666666666667e-07, |
|
"logits/chosen": -2.550006151199341, |
|
"logits/rejected": -2.477769613265991, |
|
"logps/chosen": -274.51092529296875, |
|
"logps/rejected": -261.02435302734375, |
|
"loss": 0.6505, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": 0.015317601151764393, |
|
"rewards/margins": 0.11243382841348648, |
|
"rewards/rejected": -0.09711623191833496, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.09419152276295134, |
|
"grad_norm": 15.68510838988432, |
|
"learning_rate": 4.6874999999999996e-07, |
|
"logits/chosen": -2.498664140701294, |
|
"logits/rejected": -2.4632701873779297, |
|
"logps/chosen": -261.152587890625, |
|
"logps/rejected": -277.7818603515625, |
|
"loss": 0.6428, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": -0.05554679036140442, |
|
"rewards/margins": 0.07454034686088562, |
|
"rewards/rejected": -0.13008712232112885, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.10465724751439037, |
|
"grad_norm": 13.973992370085414, |
|
"learning_rate": 4.999732492681437e-07, |
|
"logits/chosen": -2.4807047843933105, |
|
"logits/rejected": -2.405395030975342, |
|
"logps/chosen": -315.41790771484375, |
|
"logps/rejected": -313.3694763183594, |
|
"loss": 0.6267, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.15593726933002472, |
|
"rewards/margins": 0.17772409319877625, |
|
"rewards/rejected": -0.33366137742996216, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.10465724751439037, |
|
"eval_logits/chosen": -2.488471031188965, |
|
"eval_logits/rejected": -2.4228012561798096, |
|
"eval_logps/chosen": -298.8784484863281, |
|
"eval_logps/rejected": -298.6604919433594, |
|
"eval_loss": 0.6219407916069031, |
|
"eval_rewards/accuracies": 0.7123016119003296, |
|
"eval_rewards/chosen": -0.16910475492477417, |
|
"eval_rewards/margins": 0.2154317945241928, |
|
"eval_rewards/rejected": -0.3845365047454834, |
|
"eval_runtime": 187.9022, |
|
"eval_samples_per_second": 10.644, |
|
"eval_steps_per_second": 0.335, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.1151229722658294, |
|
"grad_norm": 15.96260202543978, |
|
"learning_rate": 4.996723692767926e-07, |
|
"logits/chosen": -2.491821765899658, |
|
"logits/rejected": -2.4371533393859863, |
|
"logps/chosen": -274.3268127441406, |
|
"logps/rejected": -287.5362854003906, |
|
"loss": 0.6362, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -0.32872217893600464, |
|
"rewards/margins": 0.17391590774059296, |
|
"rewards/rejected": -0.5026381015777588, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 0.12558869701726844, |
|
"grad_norm": 17.158979848789446, |
|
"learning_rate": 4.990375746213598e-07, |
|
"logits/chosen": -2.429931879043579, |
|
"logits/rejected": -2.371150493621826, |
|
"logps/chosen": -276.56256103515625, |
|
"logps/rejected": -327.0640563964844, |
|
"loss": 0.6013, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.13984180986881256, |
|
"rewards/margins": 0.24633511900901794, |
|
"rewards/rejected": -0.3861769139766693, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.1360544217687075, |
|
"grad_norm": 22.882706219149167, |
|
"learning_rate": 4.980697142834314e-07, |
|
"logits/chosen": -2.407017230987549, |
|
"logits/rejected": -2.352510452270508, |
|
"logps/chosen": -307.400390625, |
|
"logps/rejected": -317.0360107421875, |
|
"loss": 0.5918, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.26127806305885315, |
|
"rewards/margins": 0.364439994096756, |
|
"rewards/rejected": -0.6257181167602539, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 0.14652014652014653, |
|
"grad_norm": 17.994424262965605, |
|
"learning_rate": 4.967700826904229e-07, |
|
"logits/chosen": -2.3387370109558105, |
|
"logits/rejected": -2.2937257289886475, |
|
"logps/chosen": -318.0648498535156, |
|
"logps/rejected": -351.2364807128906, |
|
"loss": 0.5806, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.2697739601135254, |
|
"rewards/margins": 0.3528417944908142, |
|
"rewards/rejected": -0.6226157546043396, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.15698587127158556, |
|
"grad_norm": 23.796525050931013, |
|
"learning_rate": 4.951404179843962e-07, |
|
"logits/chosen": -2.4819083213806152, |
|
"logits/rejected": -2.419260025024414, |
|
"logps/chosen": -314.9351501464844, |
|
"logps/rejected": -308.2325439453125, |
|
"loss": 0.5866, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": -0.2703876793384552, |
|
"rewards/margins": 0.3419578969478607, |
|
"rewards/rejected": -0.6123455762863159, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.1674515960230246, |
|
"grad_norm": 22.081896847350087, |
|
"learning_rate": 4.931828996974498e-07, |
|
"logits/chosen": -2.057835817337036, |
|
"logits/rejected": -1.8778709173202515, |
|
"logps/chosen": -326.67999267578125, |
|
"logps/rejected": -331.5979919433594, |
|
"loss": 0.5645, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.30867281556129456, |
|
"rewards/margins": 0.44759130477905273, |
|
"rewards/rejected": -0.7562640905380249, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.17791732077446362, |
|
"grad_norm": 29.732483466418604, |
|
"learning_rate": 4.909001458367866e-07, |
|
"logits/chosen": -1.0668941736221313, |
|
"logits/rejected": -0.83452308177948, |
|
"logps/chosen": -359.36517333984375, |
|
"logps/rejected": -380.9996032714844, |
|
"loss": 0.5579, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.8089600801467896, |
|
"rewards/margins": 0.5925653576850891, |
|
"rewards/rejected": -1.4015254974365234, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 0.18838304552590268, |
|
"grad_norm": 32.71624465909897, |
|
"learning_rate": 4.882952093833627e-07, |
|
"logits/chosen": -0.9632415771484375, |
|
"logits/rejected": -0.6905760765075684, |
|
"logps/chosen": -316.61285400390625, |
|
"logps/rejected": -393.2853698730469, |
|
"loss": 0.5255, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.7666488289833069, |
|
"rewards/margins": 0.698399007320404, |
|
"rewards/rejected": -1.465047836303711, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.1988487702773417, |
|
"grad_norm": 40.766965114697804, |
|
"learning_rate": 4.853715742087946e-07, |
|
"logits/chosen": -0.8538354635238647, |
|
"logits/rejected": -0.7357890009880066, |
|
"logps/chosen": -324.13482666015625, |
|
"logps/rejected": -404.38104248046875, |
|
"loss": 0.5416, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.8815537691116333, |
|
"rewards/margins": 0.6634948253631592, |
|
"rewards/rejected": -1.5450485944747925, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 0.20931449502878074, |
|
"grad_norm": 29.235739332049928, |
|
"learning_rate": 4.821331504159906e-07, |
|
"logits/chosen": -1.136308193206787, |
|
"logits/rejected": -0.8372336626052856, |
|
"logps/chosen": -400.89337158203125, |
|
"logps/rejected": -403.34661865234375, |
|
"loss": 0.5618, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.9023653268814087, |
|
"rewards/margins": 0.5599400997161865, |
|
"rewards/rejected": -1.4623053073883057, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.20931449502878074, |
|
"eval_logits/chosen": -1.164330244064331, |
|
"eval_logits/rejected": -0.902479887008667, |
|
"eval_logps/chosen": -368.7673034667969, |
|
"eval_logps/rejected": -409.5042724609375, |
|
"eval_loss": 0.5558701753616333, |
|
"eval_rewards/accuracies": 0.75, |
|
"eval_rewards/chosen": -0.8679934144020081, |
|
"eval_rewards/margins": 0.6249809265136719, |
|
"eval_rewards/rejected": -1.4929742813110352, |
|
"eval_runtime": 187.9171, |
|
"eval_samples_per_second": 10.643, |
|
"eval_steps_per_second": 0.335, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.21978021978021978, |
|
"grad_norm": 31.58922442348176, |
|
"learning_rate": 4.785842691097342e-07, |
|
"logits/chosen": -1.2115342617034912, |
|
"logits/rejected": -0.8818065524101257, |
|
"logps/chosen": -376.1970520019531, |
|
"logps/rejected": -423.3211364746094, |
|
"loss": 0.5707, |
|
"rewards/accuracies": 0.675000011920929, |
|
"rewards/chosen": -0.9338651895523071, |
|
"rewards/margins": 0.5767671465873718, |
|
"rewards/rejected": -1.5106322765350342, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 0.2302459445316588, |
|
"grad_norm": 32.894317779850255, |
|
"learning_rate": 4.7472967660421603e-07, |
|
"logits/chosen": -1.2553176879882812, |
|
"logits/rejected": -0.8748141527175903, |
|
"logps/chosen": -373.59405517578125, |
|
"logps/rejected": -387.2649841308594, |
|
"loss": 0.5356, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.8638602495193481, |
|
"rewards/margins": 0.6470237970352173, |
|
"rewards/rejected": -1.5108838081359863, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.24071166928309787, |
|
"grad_norm": 28.08326614419892, |
|
"learning_rate": 4.705745280752585e-07, |
|
"logits/chosen": -0.6662174463272095, |
|
"logits/rejected": -0.4035705029964447, |
|
"logps/chosen": -383.9234313964844, |
|
"logps/rejected": -422.6239318847656, |
|
"loss": 0.5459, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -0.9519649744033813, |
|
"rewards/margins": 0.5889177918434143, |
|
"rewards/rejected": -1.5408827066421509, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 0.25117739403453687, |
|
"grad_norm": 39.48203486696788, |
|
"learning_rate": 4.6612438066572555e-07, |
|
"logits/chosen": -0.6064160466194153, |
|
"logits/rejected": -0.3868633806705475, |
|
"logps/chosen": -335.01751708984375, |
|
"logps/rejected": -390.9185791015625, |
|
"loss": 0.5273, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.674433708190918, |
|
"rewards/margins": 0.6930244565010071, |
|
"rewards/rejected": -1.3674581050872803, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.2616431187859759, |
|
"grad_norm": 34.237893602613276, |
|
"learning_rate": 4.6138518605333664e-07, |
|
"logits/chosen": -0.47975221276283264, |
|
"logits/rejected": -0.14331252872943878, |
|
"logps/chosen": -361.3214111328125, |
|
"logps/rejected": -415.11041259765625, |
|
"loss": 0.5078, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -0.8353713750839233, |
|
"rewards/margins": 0.7399575114250183, |
|
"rewards/rejected": -1.575329065322876, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.272108843537415, |
|
"grad_norm": 32.902973510279786, |
|
"learning_rate": 4.5636328249082514e-07, |
|
"logits/chosen": -0.4277305603027344, |
|
"logits/rejected": 0.006294989492744207, |
|
"logps/chosen": -380.130615234375, |
|
"logps/rejected": -431.3246154785156, |
|
"loss": 0.5335, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -0.9116414189338684, |
|
"rewards/margins": 0.5790916681289673, |
|
"rewards/rejected": -1.490733027458191, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.282574568288854, |
|
"grad_norm": 32.87398996289449, |
|
"learning_rate": 4.510653863290871e-07, |
|
"logits/chosen": -0.31390756368637085, |
|
"logits/rejected": 0.07414981722831726, |
|
"logps/chosen": -358.8498229980469, |
|
"logps/rejected": -385.8482360839844, |
|
"loss": 0.5059, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.5919984579086304, |
|
"rewards/margins": 0.7309141159057617, |
|
"rewards/rejected": -1.322912573814392, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 0.29304029304029305, |
|
"grad_norm": 29.98028421270958, |
|
"learning_rate": 4.4549858303465737e-07, |
|
"logits/chosen": 0.725542426109314, |
|
"logits/rejected": 1.1469298601150513, |
|
"logps/chosen": -435.123046875, |
|
"logps/rejected": -490.77728271484375, |
|
"loss": 0.5528, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -1.388789415359497, |
|
"rewards/margins": 0.686213493347168, |
|
"rewards/rejected": -2.075002670288086, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.3035060177917321, |
|
"grad_norm": 58.327111920842306, |
|
"learning_rate": 4.396703177135261e-07, |
|
"logits/chosen": 0.6794191598892212, |
|
"logits/rejected": 1.2331650257110596, |
|
"logps/chosen": -412.5553283691406, |
|
"logps/rejected": -441.51885986328125, |
|
"loss": 0.5419, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -1.3977099657058716, |
|
"rewards/margins": 0.6473307609558105, |
|
"rewards/rejected": -2.0450408458709717, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 0.3139717425431711, |
|
"grad_norm": 32.108741797348614, |
|
"learning_rate": 4.335883851539693e-07, |
|
"logits/chosen": 0.3234609365463257, |
|
"logits/rejected": 0.596695065498352, |
|
"logps/chosen": -386.80084228515625, |
|
"logps/rejected": -449.28826904296875, |
|
"loss": 0.5298, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -1.177787184715271, |
|
"rewards/margins": 0.6378301382064819, |
|
"rewards/rejected": -1.815617322921753, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.3139717425431711, |
|
"eval_logits/chosen": 0.10990963876247406, |
|
"eval_logits/rejected": 0.5848421454429626, |
|
"eval_logps/chosen": -387.000732421875, |
|
"eval_logps/rejected": -442.802734375, |
|
"eval_loss": 0.5283085703849792, |
|
"eval_rewards/accuracies": 0.7678571343421936, |
|
"eval_rewards/chosen": -1.0503278970718384, |
|
"eval_rewards/margins": 0.7756310105323792, |
|
"eval_rewards/rejected": -1.8259588479995728, |
|
"eval_runtime": 188.0257, |
|
"eval_samples_per_second": 10.637, |
|
"eval_steps_per_second": 0.335, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.32443746729461015, |
|
"grad_norm": 36.14740359571429, |
|
"learning_rate": 4.272609194017105e-07, |
|
"logits/chosen": -0.09506132453680038, |
|
"logits/rejected": 0.9149357080459595, |
|
"logps/chosen": -386.4206237792969, |
|
"logps/rejected": -420.0350646972656, |
|
"loss": 0.5151, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -1.026588797569275, |
|
"rewards/margins": 0.846285343170166, |
|
"rewards/rejected": -1.8728742599487305, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 0.3349031920460492, |
|
"grad_norm": 81.36672999217505, |
|
"learning_rate": 4.2069638288135547e-07, |
|
"logits/chosen": 0.712912380695343, |
|
"logits/rejected": 1.3047354221343994, |
|
"logps/chosen": -368.98211669921875, |
|
"logps/rejected": -457.4791564941406, |
|
"loss": 0.539, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -1.1596534252166748, |
|
"rewards/margins": 0.9091463088989258, |
|
"rewards/rejected": -2.0687994956970215, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.3453689167974882, |
|
"grad_norm": 37.1171085131941, |
|
"learning_rate": 4.139035550786494e-07, |
|
"logits/chosen": 0.6312053203582764, |
|
"logits/rejected": 1.20461106300354, |
|
"logps/chosen": -362.6842041015625, |
|
"logps/rejected": -418.4046936035156, |
|
"loss": 0.5228, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -1.0634013414382935, |
|
"rewards/margins": 0.7995752096176147, |
|
"rewards/rejected": -1.8629766702651978, |
|
"step": 330 |
|
}, |
|
{ |
|
"epoch": 0.35583464154892724, |
|
"grad_norm": 37.627503772606644, |
|
"learning_rate": 4.0689152079869306e-07, |
|
"logits/chosen": 0.016367901116609573, |
|
"logits/rejected": 0.5447463989257812, |
|
"logps/chosen": -359.12408447265625, |
|
"logps/rejected": -400.4968566894531, |
|
"loss": 0.5483, |
|
"rewards/accuracies": 0.7250000238418579, |
|
"rewards/chosen": -0.8963689804077148, |
|
"rewards/margins": 0.7486964464187622, |
|
"rewards/rejected": -1.6450653076171875, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.3663003663003663, |
|
"grad_norm": 33.01557559410967, |
|
"learning_rate": 3.99669658015821e-07, |
|
"logits/chosen": -0.16072605550289154, |
|
"logits/rejected": 0.38478875160217285, |
|
"logps/chosen": -390.49713134765625, |
|
"logps/rejected": -430.6659240722656, |
|
"loss": 0.5069, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.992621123790741, |
|
"rewards/margins": 0.7531676292419434, |
|
"rewards/rejected": -1.745788812637329, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.37676609105180536, |
|
"grad_norm": 44.68187947620173, |
|
"learning_rate": 3.92247625331392e-07, |
|
"logits/chosen": -0.1652616709470749, |
|
"logits/rejected": 0.44031864404678345, |
|
"logps/chosen": -382.75506591796875, |
|
"logps/rejected": -438.5421447753906, |
|
"loss": 0.4836, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -0.9687082171440125, |
|
"rewards/margins": 0.7890796065330505, |
|
"rewards/rejected": -1.7577879428863525, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.3872318158032444, |
|
"grad_norm": 33.273260026493126, |
|
"learning_rate": 3.846353490562664e-07, |
|
"logits/chosen": 0.03936319425702095, |
|
"logits/rejected": 0.5893961191177368, |
|
"logps/chosen": -384.0589294433594, |
|
"logps/rejected": -427.94195556640625, |
|
"loss": 0.5164, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -1.2078273296356201, |
|
"rewards/margins": 0.715100109577179, |
|
"rewards/rejected": -1.9229274988174438, |
|
"step": 370 |
|
}, |
|
{ |
|
"epoch": 0.3976975405546834, |
|
"grad_norm": 34.01539660056953, |
|
"learning_rate": 3.768430099352445e-07, |
|
"logits/chosen": -0.5641449689865112, |
|
"logits/rejected": 0.1418592482805252, |
|
"logps/chosen": -361.6178894042969, |
|
"logps/rejected": -431.11676025390625, |
|
"loss": 0.5157, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -1.0697083473205566, |
|
"rewards/margins": 0.7781006693840027, |
|
"rewards/rejected": -1.847809076309204, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.40816326530612246, |
|
"grad_norm": 47.794540434199334, |
|
"learning_rate": 3.6888102953122304e-07, |
|
"logits/chosen": -0.6706582307815552, |
|
"logits/rejected": -0.07904417812824249, |
|
"logps/chosen": -381.03082275390625, |
|
"logps/rejected": -446.5023498535156, |
|
"loss": 0.527, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -0.9941921234130859, |
|
"rewards/margins": 0.7871489524841309, |
|
"rewards/rejected": -1.7813411951065063, |
|
"step": 390 |
|
}, |
|
{ |
|
"epoch": 0.4186289900575615, |
|
"grad_norm": 105.50541790205155, |
|
"learning_rate": 3.607600562872785e-07, |
|
"logits/chosen": -0.7714785933494568, |
|
"logits/rejected": -0.36160725355148315, |
|
"logps/chosen": -372.08929443359375, |
|
"logps/rejected": -427.34033203125, |
|
"loss": 0.5585, |
|
"rewards/accuracies": 0.6812499761581421, |
|
"rewards/chosen": -1.0209808349609375, |
|
"rewards/margins": 0.5754439830780029, |
|
"rewards/rejected": -1.5964248180389404, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.4186289900575615, |
|
"eval_logits/chosen": -1.2259753942489624, |
|
"eval_logits/rejected": -0.6461271643638611, |
|
"eval_logps/chosen": -368.9012145996094, |
|
"eval_logps/rejected": -419.9412841796875, |
|
"eval_loss": 0.5200169682502747, |
|
"eval_rewards/accuracies": 0.7638888955116272, |
|
"eval_rewards/chosen": -0.8693323731422424, |
|
"eval_rewards/margins": 0.7280117869377136, |
|
"eval_rewards/rejected": -1.5973442792892456, |
|
"eval_runtime": 188.0525, |
|
"eval_samples_per_second": 10.635, |
|
"eval_steps_per_second": 0.335, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.4290947148090005, |
|
"grad_norm": 26.241716083541785, |
|
"learning_rate": 3.5249095128531856e-07, |
|
"logits/chosen": -0.8230039477348328, |
|
"logits/rejected": -0.25722962617874146, |
|
"logps/chosen": -365.2613830566406, |
|
"logps/rejected": -401.42303466796875, |
|
"loss": 0.533, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -0.935257613658905, |
|
"rewards/margins": 0.6104739904403687, |
|
"rewards/rejected": -1.545731782913208, |
|
"step": 410 |
|
}, |
|
{ |
|
"epoch": 0.43956043956043955, |
|
"grad_norm": 39.03130812586807, |
|
"learning_rate": 3.4408477372034736e-07, |
|
"logits/chosen": -0.37245553731918335, |
|
"logits/rejected": 0.7487915754318237, |
|
"logps/chosen": -372.9959716796875, |
|
"logps/rejected": -427.50543212890625, |
|
"loss": 0.5211, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -0.9082446098327637, |
|
"rewards/margins": 0.8466314077377319, |
|
"rewards/rejected": -1.7548761367797852, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.4500261643118786, |
|
"grad_norm": 36.69584086608421, |
|
"learning_rate": 3.3555276610977276e-07, |
|
"logits/chosen": 0.6589815020561218, |
|
"logits/rejected": 1.4469521045684814, |
|
"logps/chosen": -358.0375671386719, |
|
"logps/rejected": -443.81805419921875, |
|
"loss": 0.5084, |
|
"rewards/accuracies": 0.75, |
|
"rewards/chosen": -1.042842149734497, |
|
"rewards/margins": 0.8935502767562866, |
|
"rewards/rejected": -1.9363921880722046, |
|
"step": 430 |
|
}, |
|
{ |
|
"epoch": 0.4604918890633176, |
|
"grad_norm": 51.30602318273095, |
|
"learning_rate": 3.269063392575352e-07, |
|
"logits/chosen": 0.5997369885444641, |
|
"logits/rejected": 1.3968632221221924, |
|
"logps/chosen": -401.80572509765625, |
|
"logps/rejected": -433.399169921875, |
|
"loss": 0.5186, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -1.0663107633590698, |
|
"rewards/margins": 0.7133516073226929, |
|
"rewards/rejected": -1.7796624898910522, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.47095761381475665, |
|
"grad_norm": 33.49322912748958, |
|
"learning_rate": 3.1815705699316964e-07, |
|
"logits/chosen": 0.1546940952539444, |
|
"logits/rejected": 0.9945381879806519, |
|
"logps/chosen": -377.54681396484375, |
|
"logps/rejected": -482.41571044921875, |
|
"loss": 0.5122, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -0.9515895843505859, |
|
"rewards/margins": 1.089389443397522, |
|
"rewards/rejected": -2.0409789085388184, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.48142333856619574, |
|
"grad_norm": 30.664524114008092, |
|
"learning_rate": 3.0931662070620794e-07, |
|
"logits/chosen": -0.1982288807630539, |
|
"logits/rejected": 0.6370102167129517, |
|
"logps/chosen": -359.25714111328125, |
|
"logps/rejected": -424.4283142089844, |
|
"loss": 0.5352, |
|
"rewards/accuracies": 0.71875, |
|
"rewards/chosen": -1.0179884433746338, |
|
"rewards/margins": 0.7760909795761108, |
|
"rewards/rejected": -1.7940794229507446, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.49188906331763477, |
|
"grad_norm": 46.179707835042336, |
|
"learning_rate": 3.003968536966078e-07, |
|
"logits/chosen": -0.19681891798973083, |
|
"logits/rejected": 0.48038846254348755, |
|
"logps/chosen": -375.6156311035156, |
|
"logps/rejected": -413.18896484375, |
|
"loss": 0.4836, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.9182689785957336, |
|
"rewards/margins": 0.7142398357391357, |
|
"rewards/rejected": -1.6325088739395142, |
|
"step": 470 |
|
}, |
|
{ |
|
"epoch": 0.5023547880690737, |
|
"grad_norm": 39.61595548058563, |
|
"learning_rate": 2.9140968536213693e-07, |
|
"logits/chosen": 0.666913628578186, |
|
"logits/rejected": 1.8080024719238281, |
|
"logps/chosen": -383.5103454589844, |
|
"logps/rejected": -473.51617431640625, |
|
"loss": 0.508, |
|
"rewards/accuracies": 0.8062499761581421, |
|
"rewards/chosen": -1.1629759073257446, |
|
"rewards/margins": 1.0592198371887207, |
|
"rewards/rejected": -2.2221953868865967, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.5128205128205128, |
|
"grad_norm": 39.54220015635658, |
|
"learning_rate": 2.823671352438608e-07, |
|
"logits/chosen": 0.3436744213104248, |
|
"logits/rejected": 1.8912662267684937, |
|
"logps/chosen": -413.87078857421875, |
|
"logps/rejected": -469.492431640625, |
|
"loss": 0.4843, |
|
"rewards/accuracies": 0.8500000238418579, |
|
"rewards/chosen": -1.0193939208984375, |
|
"rewards/margins": 1.1113532781600952, |
|
"rewards/rejected": -2.1307473182678223, |
|
"step": 490 |
|
}, |
|
{ |
|
"epoch": 0.5232862375719518, |
|
"grad_norm": 33.25619580338524, |
|
"learning_rate": 2.73281296951072e-07, |
|
"logits/chosen": 0.5848696827888489, |
|
"logits/rejected": 1.3165467977523804, |
|
"logps/chosen": -379.54254150390625, |
|
"logps/rejected": -430.6253967285156, |
|
"loss": 0.5074, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -1.2089840173721313, |
|
"rewards/margins": 0.8229929804801941, |
|
"rewards/rejected": -2.0319769382476807, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.5232862375719518, |
|
"eval_logits/chosen": 0.16514113545417786, |
|
"eval_logits/rejected": 0.9655360579490662, |
|
"eval_logps/chosen": -402.58990478515625, |
|
"eval_logps/rejected": -472.5686950683594, |
|
"eval_loss": 0.5042572021484375, |
|
"eval_rewards/accuracies": 0.7698412537574768, |
|
"eval_rewards/chosen": -1.2062196731567383, |
|
"eval_rewards/margins": 0.9173988699913025, |
|
"eval_rewards/rejected": -2.1236186027526855, |
|
"eval_runtime": 188.128, |
|
"eval_samples_per_second": 10.631, |
|
"eval_steps_per_second": 0.335, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.533751962323391, |
|
"grad_norm": 37.202500300291135, |
|
"learning_rate": 2.641643219871597e-07, |
|
"logits/chosen": -0.01917283609509468, |
|
"logits/rejected": 0.7179991006851196, |
|
"logps/chosen": -378.31353759765625, |
|
"logps/rejected": -470.5415954589844, |
|
"loss": 0.4773, |
|
"rewards/accuracies": 0.793749988079071, |
|
"rewards/chosen": -0.9437530636787415, |
|
"rewards/margins": 1.1235778331756592, |
|
"rewards/rejected": -2.067330837249756, |
|
"step": 510 |
|
}, |
|
{ |
|
"epoch": 0.54421768707483, |
|
"grad_norm": 35.63609405402596, |
|
"learning_rate": 2.550284034980507e-07, |
|
"logits/chosen": 0.1290065050125122, |
|
"logits/rejected": 0.8995389938354492, |
|
"logps/chosen": -391.45245361328125, |
|
"logps/rejected": -450.935302734375, |
|
"loss": 0.5197, |
|
"rewards/accuracies": 0.699999988079071, |
|
"rewards/chosen": -1.1938081979751587, |
|
"rewards/margins": 0.8160873651504517, |
|
"rewards/rejected": -2.0098955631256104, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.554683411826269, |
|
"grad_norm": 37.764443273543485, |
|
"learning_rate": 2.4588575996495794e-07, |
|
"logits/chosen": -0.4371569752693176, |
|
"logits/rejected": 0.2857760488986969, |
|
"logps/chosen": -350.2786560058594, |
|
"logps/rejected": -428.732666015625, |
|
"loss": 0.505, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -0.8153169751167297, |
|
"rewards/margins": 0.9217718839645386, |
|
"rewards/rejected": -1.7370887994766235, |
|
"step": 530 |
|
}, |
|
{ |
|
"epoch": 0.565149136577708, |
|
"grad_norm": 34.380579779251335, |
|
"learning_rate": 2.367486188632446e-07, |
|
"logits/chosen": -0.2316119372844696, |
|
"logits/rejected": 0.5598622560501099, |
|
"logps/chosen": -385.16131591796875, |
|
"logps/rejected": -442.238525390625, |
|
"loss": 0.5165, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -0.9481117129325867, |
|
"rewards/margins": 0.8683555722236633, |
|
"rewards/rejected": -1.816467523574829, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.5756148613291471, |
|
"grad_norm": 44.258921212465225, |
|
"learning_rate": 2.276292003092593e-07, |
|
"logits/chosen": 0.2254778891801834, |
|
"logits/rejected": 1.186446189880371, |
|
"logps/chosen": -401.5592346191406, |
|
"logps/rejected": -446.5083923339844, |
|
"loss": 0.5346, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -1.3028686046600342, |
|
"rewards/margins": 0.8693955540657043, |
|
"rewards/rejected": -2.172264575958252, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.5860805860805861, |
|
"grad_norm": 58.17960656157151, |
|
"learning_rate": 2.185397007170141e-07, |
|
"logits/chosen": 0.4589771628379822, |
|
"logits/rejected": 1.423572301864624, |
|
"logps/chosen": -409.08038330078125, |
|
"logps/rejected": -472.00299072265625, |
|
"loss": 0.5248, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -1.262632131576538, |
|
"rewards/margins": 0.9083721041679382, |
|
"rewards/rejected": -2.171004056930542, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.5965463108320251, |
|
"grad_norm": 37.58413657059095, |
|
"learning_rate": 2.094922764865619e-07, |
|
"logits/chosen": 0.26942554116249084, |
|
"logits/rejected": 1.2135722637176514, |
|
"logps/chosen": -423.8333435058594, |
|
"logps/rejected": -494.722412109375, |
|
"loss": 0.5078, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -1.44338858127594, |
|
"rewards/margins": 0.8417580723762512, |
|
"rewards/rejected": -2.285146474838257, |
|
"step": 570 |
|
}, |
|
{ |
|
"epoch": 0.6070120355834642, |
|
"grad_norm": 37.085175613726506, |
|
"learning_rate": 2.0049902774588797e-07, |
|
"logits/chosen": 0.0792941227555275, |
|
"logits/rejected": 0.9457789659500122, |
|
"logps/chosen": -383.8653259277344, |
|
"logps/rejected": -429.4107971191406, |
|
"loss": 0.5141, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -1.3052608966827393, |
|
"rewards/margins": 0.8605607748031616, |
|
"rewards/rejected": -2.1658215522766113, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.6174777603349032, |
|
"grad_norm": 36.23072166325168, |
|
"learning_rate": 1.9157198216806238e-07, |
|
"logits/chosen": -0.4192202687263489, |
|
"logits/rejected": 0.5567474961280823, |
|
"logps/chosen": -407.47918701171875, |
|
"logps/rejected": -483.21771240234375, |
|
"loss": 0.4922, |
|
"rewards/accuracies": 0.6875, |
|
"rewards/chosen": -1.2952667474746704, |
|
"rewards/margins": 0.8493944406509399, |
|
"rewards/rejected": -2.1446611881256104, |
|
"step": 590 |
|
}, |
|
{ |
|
"epoch": 0.6279434850863422, |
|
"grad_norm": 41.952323384763105, |
|
"learning_rate": 1.8272307888529274e-07, |
|
"logits/chosen": 0.15778926014900208, |
|
"logits/rejected": 0.9704158902168274, |
|
"logps/chosen": -409.7381286621094, |
|
"logps/rejected": -479.462158203125, |
|
"loss": 0.4678, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -1.2721447944641113, |
|
"rewards/margins": 0.9216303825378418, |
|
"rewards/rejected": -2.193774938583374, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.6279434850863422, |
|
"eval_logits/chosen": -0.1275799423456192, |
|
"eval_logits/rejected": 0.7528029680252075, |
|
"eval_logps/chosen": -405.3749694824219, |
|
"eval_logps/rejected": -480.077392578125, |
|
"eval_loss": 0.4995136857032776, |
|
"eval_rewards/accuracies": 0.7698412537574768, |
|
"eval_rewards/chosen": -1.234070062637329, |
|
"eval_rewards/margins": 0.9646352529525757, |
|
"eval_rewards/rejected": -2.1987051963806152, |
|
"eval_runtime": 188.2618, |
|
"eval_samples_per_second": 10.624, |
|
"eval_steps_per_second": 0.335, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.6384092098377813, |
|
"grad_norm": 36.8980365038612, |
|
"learning_rate": 1.7396415252139288e-07, |
|
"logits/chosen": -0.17294003069400787, |
|
"logits/rejected": 1.0471775531768799, |
|
"logps/chosen": -429.52301025390625, |
|
"logps/rejected": -468.57666015625, |
|
"loss": 0.504, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -1.2395267486572266, |
|
"rewards/margins": 0.9991617202758789, |
|
"rewards/rejected": -2.2386884689331055, |
|
"step": 610 |
|
}, |
|
{ |
|
"epoch": 0.6488749345892203, |
|
"grad_norm": 37.2641783147775, |
|
"learning_rate": 1.6530691736402316e-07, |
|
"logits/chosen": 0.1285095065832138, |
|
"logits/rejected": 0.9715364575386047, |
|
"logps/chosen": -385.4737243652344, |
|
"logps/rejected": -448.80560302734375, |
|
"loss": 0.4733, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -1.1445059776306152, |
|
"rewards/margins": 0.9794843792915344, |
|
"rewards/rejected": -2.123990297317505, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.6593406593406593, |
|
"grad_norm": 60.096239521040786, |
|
"learning_rate": 1.5676295169786864e-07, |
|
"logits/chosen": 0.053101230412721634, |
|
"logits/rejected": 1.1824990510940552, |
|
"logps/chosen": -383.7541809082031, |
|
"logps/rejected": -444.8204650878906, |
|
"loss": 0.4758, |
|
"rewards/accuracies": 0.800000011920929, |
|
"rewards/chosen": -1.0906901359558105, |
|
"rewards/margins": 0.9977191686630249, |
|
"rewards/rejected": -2.088409423828125, |
|
"step": 630 |
|
}, |
|
{ |
|
"epoch": 0.6698063840920984, |
|
"grad_norm": 46.96867253083772, |
|
"learning_rate": 1.483436823197092e-07, |
|
"logits/chosen": 0.7413724064826965, |
|
"logits/rejected": 1.8815301656723022, |
|
"logps/chosen": -407.17962646484375, |
|
"logps/rejected": -458.6732482910156, |
|
"loss": 0.4872, |
|
"rewards/accuracies": 0.7875000238418579, |
|
"rewards/chosen": -1.401586651802063, |
|
"rewards/margins": 0.8984085321426392, |
|
"rewards/rejected": -2.299994945526123, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.6802721088435374, |
|
"grad_norm": 42.88319624739259, |
|
"learning_rate": 1.4006036925609243e-07, |
|
"logits/chosen": 0.10141663253307343, |
|
"logits/rejected": 0.8694413900375366, |
|
"logps/chosen": -378.79437255859375, |
|
"logps/rejected": -466.408935546875, |
|
"loss": 0.4933, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -1.1111141443252563, |
|
"rewards/margins": 0.8358948826789856, |
|
"rewards/rejected": -1.9470090866088867, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.6907378335949764, |
|
"grad_norm": 51.88763965072434, |
|
"learning_rate": 1.319240907040458e-07, |
|
"logits/chosen": 0.04605213552713394, |
|
"logits/rejected": 1.151568055152893, |
|
"logps/chosen": -388.32391357421875, |
|
"logps/rejected": -469.0081481933594, |
|
"loss": 0.4886, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -1.061607003211975, |
|
"rewards/margins": 1.0539276599884033, |
|
"rewards/rejected": -2.115534782409668, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.7012035583464155, |
|
"grad_norm": 55.401157791020196, |
|
"learning_rate": 1.239457282149695e-07, |
|
"logits/chosen": -0.006102992687374353, |
|
"logits/rejected": 0.8945139050483704, |
|
"logps/chosen": -399.7294616699219, |
|
"logps/rejected": -500.3643493652344, |
|
"loss": 0.4712, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -1.1118131875991821, |
|
"rewards/margins": 1.06088387966156, |
|
"rewards/rejected": -2.172697067260742, |
|
"step": 670 |
|
}, |
|
{ |
|
"epoch": 0.7116692830978545, |
|
"grad_norm": 40.22762583896388, |
|
"learning_rate": 1.1613595214152711e-07, |
|
"logits/chosen": 0.2633221745491028, |
|
"logits/rejected": 1.1040217876434326, |
|
"logps/chosen": -423.1806640625, |
|
"logps/rejected": -509.38189697265625, |
|
"loss": 0.5052, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -1.1152422428131104, |
|
"rewards/margins": 1.0996170043945312, |
|
"rewards/rejected": -2.2148590087890625, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.7221350078492935, |
|
"grad_norm": 30.575855567467922, |
|
"learning_rate": 1.0850520736699362e-07, |
|
"logits/chosen": 0.04858311265707016, |
|
"logits/rejected": 1.2780332565307617, |
|
"logps/chosen": -390.74359130859375, |
|
"logps/rejected": -458.5567932128906, |
|
"loss": 0.4558, |
|
"rewards/accuracies": 0.793749988079071, |
|
"rewards/chosen": -1.2008898258209229, |
|
"rewards/margins": 1.0576915740966797, |
|
"rewards/rejected": -2.2585813999176025, |
|
"step": 690 |
|
}, |
|
{ |
|
"epoch": 0.7326007326007326, |
|
"grad_norm": 34.53526618452247, |
|
"learning_rate": 1.0106369933615042e-07, |
|
"logits/chosen": 0.4477645456790924, |
|
"logits/rejected": 1.1958597898483276, |
|
"logps/chosen": -393.6428527832031, |
|
"logps/rejected": -463.24969482421875, |
|
"loss": 0.4767, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -1.3954741954803467, |
|
"rewards/margins": 0.8515383005142212, |
|
"rewards/rejected": -2.2470126152038574, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.7326007326007326, |
|
"eval_logits/chosen": 0.3566991090774536, |
|
"eval_logits/rejected": 1.195650577545166, |
|
"eval_logps/chosen": -416.3518371582031, |
|
"eval_logps/rejected": -496.01458740234375, |
|
"eval_loss": 0.49741485714912415, |
|
"eval_rewards/accuracies": 0.761904776096344, |
|
"eval_rewards/chosen": -1.3438389301300049, |
|
"eval_rewards/margins": 1.014238715171814, |
|
"eval_rewards/rejected": -2.3580777645111084, |
|
"eval_runtime": 187.7233, |
|
"eval_samples_per_second": 10.654, |
|
"eval_steps_per_second": 0.336, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.7430664573521716, |
|
"grad_norm": 45.7676677868217, |
|
"learning_rate": 9.382138040640714e-08, |
|
"logits/chosen": 0.588351845741272, |
|
"logits/rejected": 1.5306826829910278, |
|
"logps/chosen": -412.3795471191406, |
|
"logps/rejected": -485.49188232421875, |
|
"loss": 0.5374, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -1.5390466451644897, |
|
"rewards/margins": 0.8864930272102356, |
|
"rewards/rejected": -2.42553973197937, |
|
"step": 710 |
|
}, |
|
{ |
|
"epoch": 0.7535321821036107, |
|
"grad_norm": 36.760991249638835, |
|
"learning_rate": 8.678793653740632e-08, |
|
"logits/chosen": 0.5254176259040833, |
|
"logits/rejected": 1.342352032661438, |
|
"logps/chosen": -417.776611328125, |
|
"logps/rejected": -491.72088623046875, |
|
"loss": 0.4792, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -1.340393304824829, |
|
"rewards/margins": 1.1020629405975342, |
|
"rewards/rejected": -2.442456007003784, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.7639979068550498, |
|
"grad_norm": 39.73391988291906, |
|
"learning_rate": 7.997277433690983e-08, |
|
"logits/chosen": 0.5563046932220459, |
|
"logits/rejected": 1.2839845418930054, |
|
"logps/chosen": -424.78240966796875, |
|
"logps/rejected": -473.6419372558594, |
|
"loss": 0.4991, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -1.5942018032073975, |
|
"rewards/margins": 0.7723394632339478, |
|
"rewards/rejected": -2.3665413856506348, |
|
"step": 730 |
|
}, |
|
{ |
|
"epoch": 0.7744636316064888, |
|
"grad_norm": 51.17374712345499, |
|
"learning_rate": 7.338500848029602e-08, |
|
"logits/chosen": 0.4739949107170105, |
|
"logits/rejected": 1.098067045211792, |
|
"logps/chosen": -395.0653076171875, |
|
"logps/rejected": -454.8160705566406, |
|
"loss": 0.5093, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -1.4219887256622314, |
|
"rewards/margins": 0.8037675619125366, |
|
"rewards/rejected": -2.2257564067840576, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.7849293563579278, |
|
"grad_norm": 35.40234763834154, |
|
"learning_rate": 6.70334495204884e-08, |
|
"logits/chosen": 0.38877877593040466, |
|
"logits/rejected": 1.059231162071228, |
|
"logps/chosen": -398.44549560546875, |
|
"logps/rejected": -491.61737060546875, |
|
"loss": 0.4763, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -1.5158002376556396, |
|
"rewards/margins": 0.9638466835021973, |
|
"rewards/rejected": -2.479646921157837, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.7953950811093669, |
|
"grad_norm": 32.10738245200245, |
|
"learning_rate": 6.092659210462231e-08, |
|
"logits/chosen": 0.5303295850753784, |
|
"logits/rejected": 1.1202110052108765, |
|
"logps/chosen": -394.3402404785156, |
|
"logps/rejected": -486.60028076171875, |
|
"loss": 0.5102, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -1.5299334526062012, |
|
"rewards/margins": 0.9059597849845886, |
|
"rewards/rejected": -2.4358930587768555, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.8058608058608059, |
|
"grad_norm": 27.247225736152675, |
|
"learning_rate": 5.507260361320737e-08, |
|
"logits/chosen": 0.18302218616008759, |
|
"logits/rejected": 0.5413929224014282, |
|
"logps/chosen": -445.942138671875, |
|
"logps/rejected": -551.6734008789062, |
|
"loss": 0.464, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -1.4259153604507446, |
|
"rewards/margins": 0.8710676431655884, |
|
"rewards/rejected": -2.296983242034912, |
|
"step": 770 |
|
}, |
|
{ |
|
"epoch": 0.8163265306122449, |
|
"grad_norm": 45.84319205475222, |
|
"learning_rate": 4.947931323697982e-08, |
|
"logits/chosen": 0.2840246260166168, |
|
"logits/rejected": 0.9958732724189758, |
|
"logps/chosen": -458.2361755371094, |
|
"logps/rejected": -497.19073486328125, |
|
"loss": 0.5094, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -1.3927518129348755, |
|
"rewards/margins": 0.9035183787345886, |
|
"rewards/rejected": -2.2962703704833984, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.826792255363684, |
|
"grad_norm": 44.089647753755735, |
|
"learning_rate": 4.415420150605398e-08, |
|
"logits/chosen": 0.5449953079223633, |
|
"logits/rejected": 1.7322685718536377, |
|
"logps/chosen": -432.45721435546875, |
|
"logps/rejected": -500.68536376953125, |
|
"loss": 0.5157, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -1.5388188362121582, |
|
"rewards/margins": 1.0153836011886597, |
|
"rewards/rejected": -2.5542023181915283, |
|
"step": 790 |
|
}, |
|
{ |
|
"epoch": 0.837257980115123, |
|
"grad_norm": 34.70483028107009, |
|
"learning_rate": 3.9104390285376374e-08, |
|
"logits/chosen": 0.6709474325180054, |
|
"logits/rejected": 1.429652452468872, |
|
"logps/chosen": -424.94525146484375, |
|
"logps/rejected": -514.93359375, |
|
"loss": 0.475, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -1.588392972946167, |
|
"rewards/margins": 0.9277172088623047, |
|
"rewards/rejected": -2.5161101818084717, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.837257980115123, |
|
"eval_logits/chosen": 0.2101772576570511, |
|
"eval_logits/rejected": 1.0101326704025269, |
|
"eval_logps/chosen": -421.8183898925781, |
|
"eval_logps/rejected": -499.10052490234375, |
|
"eval_loss": 0.49712884426116943, |
|
"eval_rewards/accuracies": 0.7638888955116272, |
|
"eval_rewards/chosen": -1.3985040187835693, |
|
"eval_rewards/margins": 0.9904327392578125, |
|
"eval_rewards/rejected": -2.388936758041382, |
|
"eval_runtime": 187.4518, |
|
"eval_samples_per_second": 10.669, |
|
"eval_steps_per_second": 0.336, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.847723704866562, |
|
"grad_norm": 177.69409876949015, |
|
"learning_rate": 3.433663324986208e-08, |
|
"logits/chosen": 0.23488643765449524, |
|
"logits/rejected": 1.2444018125534058, |
|
"logps/chosen": -428.610595703125, |
|
"logps/rejected": -474.4007263183594, |
|
"loss": 0.5007, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -1.373721718788147, |
|
"rewards/margins": 0.9057692289352417, |
|
"rewards/rejected": -2.2794909477233887, |
|
"step": 810 |
|
}, |
|
{ |
|
"epoch": 0.858189429618001, |
|
"grad_norm": 34.88776442194179, |
|
"learning_rate": 2.9857306851953897e-08, |
|
"logits/chosen": 0.18579676747322083, |
|
"logits/rejected": 1.0101568698883057, |
|
"logps/chosen": -445.45135498046875, |
|
"logps/rejected": -517.8641357421875, |
|
"loss": 0.4978, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -1.4067695140838623, |
|
"rewards/margins": 0.9408528208732605, |
|
"rewards/rejected": -2.3476223945617676, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.8686551543694401, |
|
"grad_norm": 38.51135078969842, |
|
"learning_rate": 2.567240179368185e-08, |
|
"logits/chosen": 0.36057838797569275, |
|
"logits/rejected": 1.117890477180481, |
|
"logps/chosen": -402.0654602050781, |
|
"logps/rejected": -492.4375915527344, |
|
"loss": 0.5064, |
|
"rewards/accuracies": 0.7124999761581421, |
|
"rewards/chosen": -1.439205288887024, |
|
"rewards/margins": 0.9872032999992371, |
|
"rewards/rejected": -2.4264087677001953, |
|
"step": 830 |
|
}, |
|
{ |
|
"epoch": 0.8791208791208791, |
|
"grad_norm": 36.67834755885448, |
|
"learning_rate": 2.1787515014630357e-08, |
|
"logits/chosen": 0.33748364448547363, |
|
"logits/rejected": 1.2341878414154053, |
|
"logps/chosen": -429.6908264160156, |
|
"logps/rejected": -508.3697204589844, |
|
"loss": 0.4633, |
|
"rewards/accuracies": 0.7437499761581421, |
|
"rewards/chosen": -1.4741687774658203, |
|
"rewards/margins": 0.9488083124160767, |
|
"rewards/rejected": -2.4229774475097656, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.8895866038723181, |
|
"grad_norm": 43.50042104660291, |
|
"learning_rate": 1.820784220652766e-08, |
|
"logits/chosen": 0.6195130944252014, |
|
"logits/rejected": 1.6208534240722656, |
|
"logps/chosen": -424.727294921875, |
|
"logps/rejected": -481.82470703125, |
|
"loss": 0.4617, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -1.4389230012893677, |
|
"rewards/margins": 1.0447529554367065, |
|
"rewards/rejected": -2.483675956726074, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.9000523286237572, |
|
"grad_norm": 39.02433201892746, |
|
"learning_rate": 1.4938170864468636e-08, |
|
"logits/chosen": 0.5546199679374695, |
|
"logits/rejected": 1.5712369680404663, |
|
"logps/chosen": -434.83526611328125, |
|
"logps/rejected": -505.6844787597656, |
|
"loss": 0.4839, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -1.4654333591461182, |
|
"rewards/margins": 0.9778777360916138, |
|
"rewards/rejected": -2.4433112144470215, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.9105180533751962, |
|
"grad_norm": 59.667427453114435, |
|
"learning_rate": 1.1982873884064465e-08, |
|
"logits/chosen": 0.2385740727186203, |
|
"logits/rejected": 1.2312796115875244, |
|
"logps/chosen": -410.1748046875, |
|
"logps/rejected": -481.6756286621094, |
|
"loss": 0.4943, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -1.3840436935424805, |
|
"rewards/margins": 0.967445969581604, |
|
"rewards/rejected": -2.351489543914795, |
|
"step": 870 |
|
}, |
|
{ |
|
"epoch": 0.9209837781266352, |
|
"grad_norm": 34.34973812097979, |
|
"learning_rate": 9.345903713082304e-09, |
|
"logits/chosen": 0.22545938193798065, |
|
"logits/rejected": 1.0729210376739502, |
|
"logps/chosen": -431.2491149902344, |
|
"logps/rejected": -526.138427734375, |
|
"loss": 0.4804, |
|
"rewards/accuracies": 0.768750011920929, |
|
"rewards/chosen": -1.2711529731750488, |
|
"rewards/margins": 1.043867826461792, |
|
"rewards/rejected": -2.3150203227996826, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.9314495028780743, |
|
"grad_norm": 31.959454214655377, |
|
"learning_rate": 7.030787065396865e-09, |
|
"logits/chosen": 0.2903442978858948, |
|
"logits/rejected": 1.0575335025787354, |
|
"logps/chosen": -420.6573181152344, |
|
"logps/rejected": -513.2061767578125, |
|
"loss": 0.4855, |
|
"rewards/accuracies": 0.737500011920929, |
|
"rewards/chosen": -1.3298420906066895, |
|
"rewards/margins": 1.0002529621124268, |
|
"rewards/rejected": -2.330094814300537, |
|
"step": 890 |
|
}, |
|
{ |
|
"epoch": 0.9419152276295133, |
|
"grad_norm": 47.23727995191594, |
|
"learning_rate": 5.04062020432286e-09, |
|
"logits/chosen": 0.19465875625610352, |
|
"logits/rejected": 0.8790876269340515, |
|
"logps/chosen": -413.4163513183594, |
|
"logps/rejected": -506.9383850097656, |
|
"loss": 0.4828, |
|
"rewards/accuracies": 0.762499988079071, |
|
"rewards/chosen": -1.3302490711212158, |
|
"rewards/margins": 0.9251013994216919, |
|
"rewards/rejected": -2.2553505897521973, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.9419152276295133, |
|
"eval_logits/chosen": 0.251442551612854, |
|
"eval_logits/rejected": 1.1202069520950317, |
|
"eval_logps/chosen": -418.4964599609375, |
|
"eval_logps/rejected": -499.6516418457031, |
|
"eval_loss": 0.4963185489177704, |
|
"eval_rewards/accuracies": 0.7638888955116272, |
|
"eval_rewards/chosen": -1.3652852773666382, |
|
"eval_rewards/margins": 1.0291632413864136, |
|
"eval_rewards/rejected": -2.394448757171631, |
|
"eval_runtime": 187.5429, |
|
"eval_samples_per_second": 10.664, |
|
"eval_steps_per_second": 0.336, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.9523809523809523, |
|
"grad_norm": 57.54106835407393, |
|
"learning_rate": 3.3780648016376866e-09, |
|
"logits/chosen": 0.4812156558036804, |
|
"logits/rejected": 1.2290699481964111, |
|
"logps/chosen": -363.21429443359375, |
|
"logps/rejected": -442.1210021972656, |
|
"loss": 0.4842, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -1.3979285955429077, |
|
"rewards/margins": 0.894494891166687, |
|
"rewards/rejected": -2.2924234867095947, |
|
"step": 910 |
|
}, |
|
{ |
|
"epoch": 0.9628466771323915, |
|
"grad_norm": 46.08332660505321, |
|
"learning_rate": 2.0453443778310766e-09, |
|
"logits/chosen": 0.28524526953697205, |
|
"logits/rejected": 1.2063854932785034, |
|
"logps/chosen": -419.14453125, |
|
"logps/rejected": -487.4471740722656, |
|
"loss": 0.4991, |
|
"rewards/accuracies": 0.7562500238418579, |
|
"rewards/chosen": -1.3250755071640015, |
|
"rewards/margins": 1.0333658456802368, |
|
"rewards/rejected": -2.358441114425659, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.9733124018838305, |
|
"grad_norm": 43.54975464877693, |
|
"learning_rate": 1.0442413283435758e-09, |
|
"logits/chosen": 0.056365132331848145, |
|
"logits/rejected": 1.318570852279663, |
|
"logps/chosen": -439.2395935058594, |
|
"logps/rejected": -490.8741760253906, |
|
"loss": 0.488, |
|
"rewards/accuracies": 0.7749999761581421, |
|
"rewards/chosen": -1.2440545558929443, |
|
"rewards/margins": 1.0225216150283813, |
|
"rewards/rejected": -2.2665762901306152, |
|
"step": 930 |
|
}, |
|
{ |
|
"epoch": 0.9837781266352695, |
|
"grad_norm": 42.097145146923, |
|
"learning_rate": 3.760945397705828e-10, |
|
"logits/chosen": 0.23059086501598358, |
|
"logits/rejected": 1.1359683275222778, |
|
"logps/chosen": -389.34002685546875, |
|
"logps/rejected": -485.71234130859375, |
|
"loss": 0.4755, |
|
"rewards/accuracies": 0.731249988079071, |
|
"rewards/chosen": -1.2615879774093628, |
|
"rewards/margins": 0.9983237981796265, |
|
"rewards/rejected": -2.2599120140075684, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.9942438513867086, |
|
"grad_norm": 38.985040795872834, |
|
"learning_rate": 4.17975992204056e-11, |
|
"logits/chosen": 0.29284530878067017, |
|
"logits/rejected": 0.8657740354537964, |
|
"logps/chosen": -441.49786376953125, |
|
"logps/rejected": -506.05804443359375, |
|
"loss": 0.4913, |
|
"rewards/accuracies": 0.706250011920929, |
|
"rewards/chosen": -1.386546015739441, |
|
"rewards/margins": 0.8242311477661133, |
|
"rewards/rejected": -2.2107772827148438, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.9994767137624281, |
|
"step": 955, |
|
"total_flos": 0.0, |
|
"train_loss": 0.1815465917137905, |
|
"train_runtime": 7518.522, |
|
"train_samples_per_second": 8.131, |
|
"train_steps_per_second": 0.127 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 955, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|