|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.968, |
|
"eval_steps": 100, |
|
"global_step": 248, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 2.0000000000000002e-07, |
|
"logits/chosen": -0.18450471758842468, |
|
"logits/rejected": -0.11320095509290695, |
|
"logps/chosen": -164.21368408203125, |
|
"logps/rejected": -62.20133590698242, |
|
"loss": 0.0011, |
|
"rewards/accuracies": 0.0, |
|
"rewards/chosen": 0.0, |
|
"rewards/margins": 0.0, |
|
"rewards/rejected": 0.0, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"logits/chosen": -0.19920343160629272, |
|
"logits/rejected": 0.12002943456172943, |
|
"logps/chosen": -144.63650512695312, |
|
"logps/rejected": -83.57919311523438, |
|
"loss": 0.0013, |
|
"rewards/accuracies": 0.4375, |
|
"rewards/chosen": -0.0008703459752723575, |
|
"rewards/margins": -0.0009600308840163052, |
|
"rewards/rejected": 8.96848359843716e-05, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"learning_rate": 4.000000000000001e-06, |
|
"logits/chosen": -0.03189752623438835, |
|
"logits/rejected": 0.1474052220582962, |
|
"logps/chosen": -144.80337524414062, |
|
"logps/rejected": -82.944580078125, |
|
"loss": 0.0012, |
|
"rewards/accuracies": 0.4625000059604645, |
|
"rewards/chosen": -0.0006064423942007124, |
|
"rewards/margins": -0.0002433264598948881, |
|
"rewards/rejected": -0.00036311594885773957, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"learning_rate": 4.993800445762451e-06, |
|
"logits/chosen": -0.13454397022724152, |
|
"logits/rejected": 0.1784980446100235, |
|
"logps/chosen": -137.59768676757812, |
|
"logps/rejected": -79.26807403564453, |
|
"loss": 0.0013, |
|
"rewards/accuracies": 0.42500001192092896, |
|
"rewards/chosen": -0.00021711275621782988, |
|
"rewards/margins": -0.0008902138797566295, |
|
"rewards/rejected": 0.0006731009343639016, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"learning_rate": 4.944388344834205e-06, |
|
"logits/chosen": -0.12394044548273087, |
|
"logits/rejected": 0.17124100029468536, |
|
"logps/chosen": -147.41329956054688, |
|
"logps/rejected": -83.2558822631836, |
|
"loss": 0.0011, |
|
"rewards/accuracies": 0.5375000238418579, |
|
"rewards/chosen": 0.002327266614884138, |
|
"rewards/margins": 0.001547815278172493, |
|
"rewards/rejected": 0.0007794516277499497, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"learning_rate": 4.8465431931347904e-06, |
|
"logits/chosen": -0.0875336229801178, |
|
"logits/rejected": 0.07007980346679688, |
|
"logps/chosen": -155.75111389160156, |
|
"logps/rejected": -95.81137084960938, |
|
"loss": 0.0013, |
|
"rewards/accuracies": 0.4937500059604645, |
|
"rewards/chosen": 0.0015659708296880126, |
|
"rewards/margins": 0.0006253436440601945, |
|
"rewards/rejected": 0.0009406275348737836, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"learning_rate": 4.702203692102539e-06, |
|
"logits/chosen": -0.0579068660736084, |
|
"logits/rejected": 0.17608948051929474, |
|
"logps/chosen": -147.31118774414062, |
|
"logps/rejected": -89.31456756591797, |
|
"loss": 0.0011, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.0032808627001941204, |
|
"rewards/margins": 0.002143878024071455, |
|
"rewards/rejected": 0.001136984908953309, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"learning_rate": 4.514229781074239e-06, |
|
"logits/chosen": -0.07274739444255829, |
|
"logits/rejected": 0.09790283441543579, |
|
"logps/chosen": -151.98243713378906, |
|
"logps/rejected": -94.0592269897461, |
|
"loss": 0.001, |
|
"rewards/accuracies": 0.5062500238418579, |
|
"rewards/chosen": 0.00562315946444869, |
|
"rewards/margins": 0.003317736554890871, |
|
"rewards/rejected": 0.002305423142388463, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"learning_rate": 4.286345970517195e-06, |
|
"logits/chosen": -0.13577523827552795, |
|
"logits/rejected": 0.07853694260120392, |
|
"logps/chosen": -155.74014282226562, |
|
"logps/rejected": -85.44630432128906, |
|
"loss": 0.0011, |
|
"rewards/accuracies": 0.6187499761581421, |
|
"rewards/chosen": 0.005103850271552801, |
|
"rewards/margins": 0.0037234011106193066, |
|
"rewards/rejected": 0.0013804491609334946, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"learning_rate": 4.023067544670082e-06, |
|
"logits/chosen": -0.11358518898487091, |
|
"logits/rejected": 0.16092757880687714, |
|
"logps/chosen": -149.1753387451172, |
|
"logps/rejected": -91.94307708740234, |
|
"loss": 0.0011, |
|
"rewards/accuracies": 0.5249999761581421, |
|
"rewards/chosen": 0.006153634749352932, |
|
"rewards/margins": 0.0030088797211647034, |
|
"rewards/rejected": 0.0031447552610188723, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"learning_rate": 3.7296110958116845e-06, |
|
"logits/chosen": -0.16605840623378754, |
|
"logits/rejected": 0.10877911746501923, |
|
"logps/chosen": -144.11111450195312, |
|
"logps/rejected": -77.82760620117188, |
|
"loss": 0.001, |
|
"rewards/accuracies": 0.574999988079071, |
|
"rewards/chosen": 0.007716761436313391, |
|
"rewards/margins": 0.004192883148789406, |
|
"rewards/rejected": 0.003523878753185272, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"eval_logits/chosen": -0.0515277236700058, |
|
"eval_logits/rejected": 0.04684555158019066, |
|
"eval_logps/chosen": -307.1495361328125, |
|
"eval_logps/rejected": -279.2278137207031, |
|
"eval_loss": 0.0017420838121324778, |
|
"eval_rewards/accuracies": 0.49000000953674316, |
|
"eval_rewards/chosen": -0.003111687721684575, |
|
"eval_rewards/margins": -0.00043085336801595986, |
|
"eval_rewards/rejected": -0.0026808343827724457, |
|
"eval_runtime": 523.062, |
|
"eval_samples_per_second": 3.824, |
|
"eval_steps_per_second": 0.956, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 3.4117911628292944e-06, |
|
"logits/chosen": -0.03756412863731384, |
|
"logits/rejected": 0.11871184408664703, |
|
"logps/chosen": -139.15431213378906, |
|
"logps/rejected": -83.21810150146484, |
|
"loss": 0.001, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.010171065106987953, |
|
"rewards/margins": 0.004307509399950504, |
|
"rewards/rejected": 0.005863555707037449, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"learning_rate": 3.075905022087675e-06, |
|
"logits/chosen": -0.21418194472789764, |
|
"logits/rejected": 0.10314931720495224, |
|
"logps/chosen": -133.09384155273438, |
|
"logps/rejected": -81.83766174316406, |
|
"loss": 0.0011, |
|
"rewards/accuracies": 0.53125, |
|
"rewards/chosen": 0.007402367889881134, |
|
"rewards/margins": 0.0029455057810992002, |
|
"rewards/rejected": 0.0044568623416125774, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 2.08, |
|
"learning_rate": 2.728607913349464e-06, |
|
"logits/chosen": -0.08502141386270523, |
|
"logits/rejected": 0.15310132503509521, |
|
"logps/chosen": -149.8968048095703, |
|
"logps/rejected": -92.3510971069336, |
|
"loss": 0.0011, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": 0.011875095777213573, |
|
"rewards/margins": 0.00466475635766983, |
|
"rewards/rejected": 0.007210339419543743, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 2.24, |
|
"learning_rate": 2.376781173017589e-06, |
|
"logits/chosen": -0.1818578690290451, |
|
"logits/rejected": 0.06916014850139618, |
|
"logps/chosen": -137.927001953125, |
|
"logps/rejected": -76.7030258178711, |
|
"loss": 0.001, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": 0.010994290933012962, |
|
"rewards/margins": 0.006979555822908878, |
|
"rewards/rejected": 0.004014736507087946, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 2.4, |
|
"learning_rate": 2.0273958875043877e-06, |
|
"logits/chosen": -0.04663145914673805, |
|
"logits/rejected": 0.18785881996154785, |
|
"logps/chosen": -160.57423400878906, |
|
"logps/rejected": -94.46165466308594, |
|
"loss": 0.001, |
|
"rewards/accuracies": 0.5874999761581421, |
|
"rewards/chosen": 0.01304453331977129, |
|
"rewards/margins": 0.006584475748240948, |
|
"rewards/rejected": 0.006460057105869055, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 2.56, |
|
"learning_rate": 1.6873747682962393e-06, |
|
"logits/chosen": -0.12049274146556854, |
|
"logits/rejected": 0.10580462217330933, |
|
"logps/chosen": -142.25698852539062, |
|
"logps/rejected": -88.6644515991211, |
|
"loss": 0.001, |
|
"rewards/accuracies": 0.581250011920929, |
|
"rewards/chosen": 0.011662685312330723, |
|
"rewards/margins": 0.005346192512661219, |
|
"rewards/rejected": 0.006316492799669504, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 2.72, |
|
"learning_rate": 1.363454985517803e-06, |
|
"logits/chosen": -0.109579898416996, |
|
"logits/rejected": 0.09508191049098969, |
|
"logps/chosen": -131.90289306640625, |
|
"logps/rejected": -86.8044662475586, |
|
"loss": 0.0009, |
|
"rewards/accuracies": 0.5687500238418579, |
|
"rewards/chosen": 0.012221863493323326, |
|
"rewards/margins": 0.0077283428981900215, |
|
"rewards/rejected": 0.004493518732488155, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 2.88, |
|
"learning_rate": 1.062054677808238e-06, |
|
"logits/chosen": -0.12257204204797745, |
|
"logits/rejected": 0.05640065670013428, |
|
"logps/chosen": -155.1592254638672, |
|
"logps/rejected": -79.97932434082031, |
|
"loss": 0.001, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": 0.013236519880592823, |
|
"rewards/margins": 0.0076187909580767155, |
|
"rewards/rejected": 0.005617729388177395, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 3.04, |
|
"learning_rate": 7.891457834794711e-07, |
|
"logits/chosen": -0.12836693227291107, |
|
"logits/rejected": 0.06110278517007828, |
|
"logps/chosen": -134.49652099609375, |
|
"logps/rejected": -80.5157241821289, |
|
"loss": 0.001, |
|
"rewards/accuracies": 0.581250011920929, |
|
"rewards/chosen": 0.012557892128825188, |
|
"rewards/margins": 0.006546019576489925, |
|
"rewards/rejected": 0.006011873483657837, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"learning_rate": 5.501357126768117e-07, |
|
"logits/chosen": -0.21034152805805206, |
|
"logits/rejected": 0.12967748939990997, |
|
"logps/chosen": -141.66494750976562, |
|
"logps/rejected": -86.77215576171875, |
|
"loss": 0.0011, |
|
"rewards/accuracies": 0.59375, |
|
"rewards/chosen": 0.013257928192615509, |
|
"rewards/margins": 0.005711545702069998, |
|
"rewards/rejected": 0.0075463829562067986, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 3.2, |
|
"eval_logits/chosen": -0.09329269826412201, |
|
"eval_logits/rejected": 0.007055533118546009, |
|
"eval_logps/chosen": -307.246826171875, |
|
"eval_logps/rejected": -279.3705139160156, |
|
"eval_loss": 0.0018617328023537993, |
|
"eval_rewards/accuracies": 0.5005000233650208, |
|
"eval_rewards/chosen": -0.004084425512701273, |
|
"eval_rewards/margins": 2.3556031010230072e-05, |
|
"eval_rewards/rejected": -0.004107981454581022, |
|
"eval_runtime": 520.9971, |
|
"eval_samples_per_second": 3.839, |
|
"eval_steps_per_second": 0.96, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 3.36, |
|
"learning_rate": 3.4976020508682345e-07, |
|
"logits/chosen": -0.19708134233951569, |
|
"logits/rejected": 0.14637216925621033, |
|
"logps/chosen": -155.29025268554688, |
|
"logps/rejected": -90.92948150634766, |
|
"loss": 0.0009, |
|
"rewards/accuracies": 0.65625, |
|
"rewards/chosen": 0.016783371567726135, |
|
"rewards/margins": 0.00943558569997549, |
|
"rewards/rejected": 0.007347785867750645, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 3.52, |
|
"learning_rate": 1.9198949610721273e-07, |
|
"logits/chosen": -0.11682029813528061, |
|
"logits/rejected": -0.014275476336479187, |
|
"logps/chosen": -132.5178985595703, |
|
"logps/rejected": -68.00812530517578, |
|
"loss": 0.001, |
|
"rewards/accuracies": 0.625, |
|
"rewards/chosen": 0.012291627004742622, |
|
"rewards/margins": 0.007276636548340321, |
|
"rewards/rejected": 0.005014990922063589, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 3.68, |
|
"learning_rate": 7.994965069994143e-08, |
|
"logits/chosen": -0.09643147885799408, |
|
"logits/rejected": 0.07124640792608261, |
|
"logps/chosen": -136.13864135742188, |
|
"logps/rejected": -82.87481689453125, |
|
"loss": 0.0009, |
|
"rewards/accuracies": 0.668749988079071, |
|
"rewards/chosen": 0.013331139460206032, |
|
"rewards/margins": 0.00686370674520731, |
|
"rewards/rejected": 0.006467430852353573, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 3.84, |
|
"learning_rate": 1.5860623616664183e-08, |
|
"logits/chosen": -0.1297198235988617, |
|
"logits/rejected": 0.03874051570892334, |
|
"logps/chosen": -161.54931640625, |
|
"logps/rejected": -92.08390808105469, |
|
"loss": 0.0008, |
|
"rewards/accuracies": 0.6312500238418579, |
|
"rewards/chosen": 0.013919742777943611, |
|
"rewards/margins": 0.008374048396945, |
|
"rewards/rejected": 0.0055456943809986115, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"step": 248, |
|
"total_flos": 0.0, |
|
"train_loss": 0.0010581601813863663, |
|
"train_runtime": 3353.9187, |
|
"train_samples_per_second": 1.193, |
|
"train_steps_per_second": 0.074 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 248, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 100, |
|
"total_flos": 0.0, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|