|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 2.985781990521327, |
|
"eval_steps": 50, |
|
"global_step": 315, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0947867298578199, |
|
"grad_norm": 45.73923520041355, |
|
"learning_rate": 1.5624999999999999e-07, |
|
"logits/chosen": -2.8428008556365967, |
|
"logits/rejected": -2.6068577766418457, |
|
"logps/chosen": -368.2735290527344, |
|
"logps/rejected": -498.0081481933594, |
|
"loss": 0.6856, |
|
"rewards/accuracies": 0.6000000238418579, |
|
"rewards/chosen": 0.0027675286401063204, |
|
"rewards/margins": 0.014056036248803139, |
|
"rewards/rejected": -0.011288506910204887, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.1895734597156398, |
|
"grad_norm": 16.393339414177866, |
|
"learning_rate": 3.1249999999999997e-07, |
|
"logits/chosen": -2.853433132171631, |
|
"logits/rejected": -2.6817729473114014, |
|
"logps/chosen": -370.88690185546875, |
|
"logps/rejected": -610.00048828125, |
|
"loss": 0.473, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.09298940747976303, |
|
"rewards/margins": 0.63857501745224, |
|
"rewards/rejected": -0.5455856323242188, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.2843601895734597, |
|
"grad_norm": 4.343562060345208, |
|
"learning_rate": 4.6874999999999996e-07, |
|
"logits/chosen": -2.9111626148223877, |
|
"logits/rejected": -2.6944823265075684, |
|
"logps/chosen": -330.89703369140625, |
|
"logps/rejected": -913.5744018554688, |
|
"loss": 0.1487, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.38449177145957947, |
|
"rewards/margins": 4.493498802185059, |
|
"rewards/rejected": -4.109007358551025, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.3791469194312796, |
|
"grad_norm": 1.5210747026486233, |
|
"learning_rate": 4.990147841143461e-07, |
|
"logits/chosen": -2.918667793273926, |
|
"logits/rejected": -2.744393825531006, |
|
"logps/chosen": -352.75640869140625, |
|
"logps/rejected": -1823.7945556640625, |
|
"loss": 0.0266, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": 0.18624749779701233, |
|
"rewards/margins": 13.044224739074707, |
|
"rewards/rejected": -12.857976913452148, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.47393364928909953, |
|
"grad_norm": 3.5117794451703537, |
|
"learning_rate": 4.950256493879794e-07, |
|
"logits/chosen": -2.8872039318084717, |
|
"logits/rejected": -2.6208763122558594, |
|
"logps/chosen": -432.4601135253906, |
|
"logps/rejected": -3069.329345703125, |
|
"loss": 0.009, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.5042247176170349, |
|
"rewards/margins": 25.241214752197266, |
|
"rewards/rejected": -25.745441436767578, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.47393364928909953, |
|
"eval_logits/chosen": -2.932264804840088, |
|
"eval_logits/rejected": -2.640563726425171, |
|
"eval_logps/chosen": -430.4127197265625, |
|
"eval_logps/rejected": -4133.1240234375, |
|
"eval_loss": 0.002647999208420515, |
|
"eval_rewards/accuracies": 0.9959677457809448, |
|
"eval_rewards/chosen": -0.6343097686767578, |
|
"eval_rewards/margins": 34.28904724121094, |
|
"eval_rewards/rejected": -34.92335510253906, |
|
"eval_runtime": 193.1739, |
|
"eval_samples_per_second": 20.215, |
|
"eval_steps_per_second": 0.321, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.5687203791469194, |
|
"grad_norm": 2.067910467176069, |
|
"learning_rate": 4.88020090697132e-07, |
|
"logits/chosen": -2.941565990447998, |
|
"logits/rejected": -2.5026259422302246, |
|
"logps/chosen": -496.873046875, |
|
"logps/rejected": -3218.004638671875, |
|
"loss": 0.0218, |
|
"rewards/accuracies": 0.987500011920929, |
|
"rewards/chosen": -1.0619714260101318, |
|
"rewards/margins": 25.82952880859375, |
|
"rewards/rejected": -26.89150047302246, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.6635071090047393, |
|
"grad_norm": 27.786761703670376, |
|
"learning_rate": 4.780843509929904e-07, |
|
"logits/chosen": -1.2228466272354126, |
|
"logits/rejected": 0.15184922516345978, |
|
"logps/chosen": -490.0467834472656, |
|
"logps/rejected": -3536.85302734375, |
|
"loss": 0.0127, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -1.3228168487548828, |
|
"rewards/margins": 29.06142807006836, |
|
"rewards/rejected": -30.384246826171875, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.7582938388625592, |
|
"grad_norm": 1.926082688082676, |
|
"learning_rate": 4.6534074564712217e-07, |
|
"logits/chosen": -0.27225083112716675, |
|
"logits/rejected": 1.7922433614730835, |
|
"logps/chosen": -459.4078063964844, |
|
"logps/rejected": -3558.192626953125, |
|
"loss": 0.0027, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.0403556823730469, |
|
"rewards/margins": 29.467926025390625, |
|
"rewards/rejected": -30.508281707763672, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.8530805687203792, |
|
"grad_norm": 1.213238062829923, |
|
"learning_rate": 4.4994615667026846e-07, |
|
"logits/chosen": -0.5399178266525269, |
|
"logits/rejected": 2.844102382659912, |
|
"logps/chosen": -458.8783264160156, |
|
"logps/rejected": -3624.69921875, |
|
"loss": 0.0043, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -0.9510987997055054, |
|
"rewards/margins": 30.12051773071289, |
|
"rewards/rejected": -31.071613311767578, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.9478672985781991, |
|
"grad_norm": 2.2220593339773402, |
|
"learning_rate": 4.320901013934887e-07, |
|
"logits/chosen": 0.34000855684280396, |
|
"logits/rejected": 3.4551188945770264, |
|
"logps/chosen": -497.6631774902344, |
|
"logps/rejected": -4114.6044921875, |
|
"loss": 0.0158, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.247301459312439, |
|
"rewards/margins": 34.767173767089844, |
|
"rewards/rejected": -36.01448059082031, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.9478672985781991, |
|
"eval_logits/chosen": -0.5345638394355774, |
|
"eval_logits/rejected": 3.056351661682129, |
|
"eval_logps/chosen": -493.986572265625, |
|
"eval_logps/rejected": -4983.6845703125, |
|
"eval_loss": 0.0003322066040709615, |
|
"eval_rewards/accuracies": 1.0, |
|
"eval_rewards/chosen": -1.2700488567352295, |
|
"eval_rewards/margins": 42.158912658691406, |
|
"eval_rewards/rejected": -43.42896270751953, |
|
"eval_runtime": 192.4704, |
|
"eval_samples_per_second": 20.289, |
|
"eval_steps_per_second": 0.322, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.042654028436019, |
|
"grad_norm": 0.22005560080220604, |
|
"learning_rate": 4.119923993874379e-07, |
|
"logits/chosen": -0.7820493578910828, |
|
"logits/rejected": 3.120879650115967, |
|
"logps/chosen": -517.9307861328125, |
|
"logps/rejected": -3928.251953125, |
|
"loss": 0.006, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.4943805932998657, |
|
"rewards/margins": 32.6540641784668, |
|
"rewards/rejected": -34.14844512939453, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 1.1374407582938388, |
|
"grad_norm": 0.12162920408221319, |
|
"learning_rate": 3.899004663415083e-07, |
|
"logits/chosen": -0.34208816289901733, |
|
"logits/rejected": 3.0167593955993652, |
|
"logps/chosen": -538.0582275390625, |
|
"logps/rejected": -4409.0302734375, |
|
"loss": 0.0011, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -1.8046563863754272, |
|
"rewards/margins": 36.82363510131836, |
|
"rewards/rejected": -38.62828826904297, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 1.2322274881516588, |
|
"grad_norm": 0.09818677325286322, |
|
"learning_rate": 3.6608626821692824e-07, |
|
"logits/chosen": 0.6416308879852295, |
|
"logits/rejected": 3.524348735809326, |
|
"logps/chosen": -605.2796630859375, |
|
"logps/rejected": -4457.169921875, |
|
"loss": 0.0005, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -2.267808437347412, |
|
"rewards/margins": 37.17861557006836, |
|
"rewards/rejected": -39.4464225769043, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 1.3270142180094786, |
|
"grad_norm": 0.4137355125115123, |
|
"learning_rate": 3.408429731701635e-07, |
|
"logits/chosen": 1.477095603942871, |
|
"logits/rejected": 4.739525318145752, |
|
"logps/chosen": -659.0659790039062, |
|
"logps/rejected": -4712.701171875, |
|
"loss": 0.0002, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -2.851088762283325, |
|
"rewards/margins": 39.070613861083984, |
|
"rewards/rejected": -41.92170333862305, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 1.4218009478672986, |
|
"grad_norm": 0.038412138392737674, |
|
"learning_rate": 3.144813424636031e-07, |
|
"logits/chosen": 1.158630132675171, |
|
"logits/rejected": 4.032771110534668, |
|
"logps/chosen": -702.2916259765625, |
|
"logps/rejected": -4613.56201171875, |
|
"loss": 0.0007, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -3.405531406402588, |
|
"rewards/margins": 37.08515930175781, |
|
"rewards/rejected": -40.490692138671875, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.4218009478672986, |
|
"eval_logits/chosen": 0.7923636436462402, |
|
"eval_logits/rejected": 4.171131134033203, |
|
"eval_logps/chosen": -666.1113891601562, |
|
"eval_logps/rejected": -5907.1767578125, |
|
"eval_loss": 0.0005207537906244397, |
|
"eval_rewards/accuracies": 0.9979838728904724, |
|
"eval_rewards/chosen": -2.9912962913513184, |
|
"eval_rewards/margins": 49.67258834838867, |
|
"eval_rewards/rejected": -52.66389083862305, |
|
"eval_runtime": 192.4622, |
|
"eval_samples_per_second": 20.29, |
|
"eval_steps_per_second": 0.322, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 1.5165876777251186, |
|
"grad_norm": 0.010481668812329812, |
|
"learning_rate": 2.8732590479375165e-07, |
|
"logits/chosen": 0.743170440196991, |
|
"logits/rejected": 3.9760921001434326, |
|
"logps/chosen": -743.1651611328125, |
|
"logps/rejected": -4806.0869140625, |
|
"loss": 0.0027, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -3.295560121536255, |
|
"rewards/margins": 39.42099380493164, |
|
"rewards/rejected": -42.716552734375, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 1.6113744075829384, |
|
"grad_norm": 0.09321288882655707, |
|
"learning_rate": 2.597109611334169e-07, |
|
"logits/chosen": 1.2140353918075562, |
|
"logits/rejected": 4.648254871368408, |
|
"logps/chosen": -728.6572265625, |
|
"logps/rejected": -4531.54150390625, |
|
"loss": 0.0008, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -3.5468170642852783, |
|
"rewards/margins": 37.09358596801758, |
|
"rewards/rejected": -40.640403747558594, |
|
"step": 170 |
|
}, |
|
{ |
|
"epoch": 1.7061611374407581, |
|
"grad_norm": 0.10729010910304806, |
|
"learning_rate": 2.3197646927086694e-07, |
|
"logits/chosen": 1.2630093097686768, |
|
"logits/rejected": 4.423764228820801, |
|
"logps/chosen": -708.4100341796875, |
|
"logps/rejected": -4733.248046875, |
|
"loss": 0.0002, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -3.5483551025390625, |
|
"rewards/margins": 38.949485778808594, |
|
"rewards/rejected": -42.497840881347656, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 1.8009478672985781, |
|
"grad_norm": 0.24468759681680258, |
|
"learning_rate": 2.0446385870993467e-07, |
|
"logits/chosen": 1.4046242237091064, |
|
"logits/rejected": 4.30928897857666, |
|
"logps/chosen": -701.5442504882812, |
|
"logps/rejected": -5078.294921875, |
|
"loss": 0.0023, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -3.5222976207733154, |
|
"rewards/margins": 41.7343635559082, |
|
"rewards/rejected": -45.25666427612305, |
|
"step": 190 |
|
}, |
|
{ |
|
"epoch": 1.8957345971563981, |
|
"grad_norm": 0.3903600557186432, |
|
"learning_rate": 1.775118274523545e-07, |
|
"logits/chosen": 1.221166729927063, |
|
"logits/rejected": 4.167235851287842, |
|
"logps/chosen": -732.6544189453125, |
|
"logps/rejected": -4501.24951171875, |
|
"loss": 0.0015, |
|
"rewards/accuracies": 0.9937499761581421, |
|
"rewards/chosen": -3.7073051929473877, |
|
"rewards/margins": 36.32033157348633, |
|
"rewards/rejected": -40.02763366699219, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.8957345971563981, |
|
"eval_logits/chosen": 1.0487316846847534, |
|
"eval_logits/rejected": 4.071253299713135, |
|
"eval_logps/chosen": -726.3069458007812, |
|
"eval_logps/rejected": -6069.36572265625, |
|
"eval_loss": 0.0006494168192148209, |
|
"eval_rewards/accuracies": 0.9979838728904724, |
|
"eval_rewards/chosen": -3.593252658843994, |
|
"eval_rewards/margins": 50.6925163269043, |
|
"eval_rewards/rejected": -54.285770416259766, |
|
"eval_runtime": 194.2327, |
|
"eval_samples_per_second": 20.105, |
|
"eval_steps_per_second": 0.319, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 1.9905213270142181, |
|
"grad_norm": 0.0026076475546211236, |
|
"learning_rate": 1.514521724066537e-07, |
|
"logits/chosen": 1.5519434213638306, |
|
"logits/rejected": 4.584723472595215, |
|
"logps/chosen": -740.3926391601562, |
|
"logps/rejected": -4949.19970703125, |
|
"loss": 0.0011, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -3.849459409713745, |
|
"rewards/margins": 40.864036560058594, |
|
"rewards/rejected": -44.71349334716797, |
|
"step": 210 |
|
}, |
|
{ |
|
"epoch": 2.085308056872038, |
|
"grad_norm": 0.06001104822177154, |
|
"learning_rate": 1.266057047539568e-07, |
|
"logits/chosen": 1.105302095413208, |
|
"logits/rejected": 3.880047559738159, |
|
"logps/chosen": -792.5735473632812, |
|
"logps/rejected": -5075.94384765625, |
|
"loss": 0.0002, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -4.032340049743652, |
|
"rewards/margins": 41.21745681762695, |
|
"rewards/rejected": -45.24979782104492, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 2.1800947867298577, |
|
"grad_norm": 0.015291332826346503, |
|
"learning_rate": 1.032783005551884e-07, |
|
"logits/chosen": 1.3159544467926025, |
|
"logits/rejected": 4.874353885650635, |
|
"logps/chosen": -772.8294677734375, |
|
"logps/rejected": -4771.970703125, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -4.134366035461426, |
|
"rewards/margins": 38.77143096923828, |
|
"rewards/rejected": -42.90580368041992, |
|
"step": 230 |
|
}, |
|
{ |
|
"epoch": 2.2748815165876777, |
|
"grad_norm": 0.019790975881220592, |
|
"learning_rate": 8.175713521924976e-08, |
|
"logits/chosen": 1.721247673034668, |
|
"logits/rejected": 4.219584941864014, |
|
"logps/chosen": -779.8192138671875, |
|
"logps/rejected": -5218.1943359375, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -4.025120735168457, |
|
"rewards/margins": 42.607852935791016, |
|
"rewards/rejected": -46.632972717285156, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 2.3696682464454977, |
|
"grad_norm": 0.029539030378801425, |
|
"learning_rate": 6.230714818829733e-08, |
|
"logits/chosen": 1.274677038192749, |
|
"logits/rejected": 4.052763938903809, |
|
"logps/chosen": -783.1781005859375, |
|
"logps/rejected": -5148.7236328125, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -4.11904764175415, |
|
"rewards/margins": 41.87529754638672, |
|
"rewards/rejected": -45.99434280395508, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.3696682464454977, |
|
"eval_logits/chosen": 1.259900450706482, |
|
"eval_logits/rejected": 4.24884557723999, |
|
"eval_logps/chosen": -760.6976928710938, |
|
"eval_logps/rejected": -6404.8037109375, |
|
"eval_loss": 0.0009418130503036082, |
|
"eval_rewards/accuracies": 0.9979838728904724, |
|
"eval_rewards/chosen": -3.937159299850464, |
|
"eval_rewards/margins": 53.70299530029297, |
|
"eval_rewards/rejected": -57.640159606933594, |
|
"eval_runtime": 193.6285, |
|
"eval_samples_per_second": 20.167, |
|
"eval_steps_per_second": 0.32, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 2.4644549763033177, |
|
"grad_norm": 0.002101891811001152, |
|
"learning_rate": 4.516778136213037e-08, |
|
"logits/chosen": 1.4295393228530884, |
|
"logits/rejected": 4.259391784667969, |
|
"logps/chosen": -791.4283447265625, |
|
"logps/rejected": -5040.88037109375, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -4.1635847091674805, |
|
"rewards/margins": 40.99254608154297, |
|
"rewards/rejected": -45.15613555908203, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 2.5592417061611377, |
|
"grad_norm": 0.007793292774055604, |
|
"learning_rate": 3.055003141378948e-08, |
|
"logits/chosen": 1.430039644241333, |
|
"logits/rejected": 4.239804267883301, |
|
"logps/chosen": -771.2034301757812, |
|
"logps/rejected": -5207.9248046875, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -4.072695255279541, |
|
"rewards/margins": 42.79798126220703, |
|
"rewards/rejected": -46.87067794799805, |
|
"step": 270 |
|
}, |
|
{ |
|
"epoch": 2.654028436018957, |
|
"grad_norm": 0.005849603176213166, |
|
"learning_rate": 1.8633852284264508e-08, |
|
"logits/chosen": 1.6255613565444946, |
|
"logits/rejected": 4.967244625091553, |
|
"logps/chosen": -777.3895874023438, |
|
"logps/rejected": -4971.68994140625, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -3.9707741737365723, |
|
"rewards/margins": 40.53221893310547, |
|
"rewards/rejected": -44.502994537353516, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 2.748815165876777, |
|
"grad_norm": 0.0343313276372006, |
|
"learning_rate": 9.56593983327919e-09, |
|
"logits/chosen": 1.3324038982391357, |
|
"logits/rejected": 4.286895275115967, |
|
"logps/chosen": -791.8960571289062, |
|
"logps/rejected": -5403.97119140625, |
|
"loss": 0.0, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -4.294528007507324, |
|
"rewards/margins": 44.62022399902344, |
|
"rewards/rejected": -48.91474914550781, |
|
"step": 290 |
|
}, |
|
{ |
|
"epoch": 2.843601895734597, |
|
"grad_norm": 0.052240981983852935, |
|
"learning_rate": 3.4579259185321398e-09, |
|
"logits/chosen": 1.404039740562439, |
|
"logits/rejected": 3.9035141468048096, |
|
"logps/chosen": -799.1031494140625, |
|
"logps/rejected": -5045.74658203125, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -4.179960250854492, |
|
"rewards/margins": 40.62794876098633, |
|
"rewards/rejected": -44.80791473388672, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.843601895734597, |
|
"eval_logits/chosen": 1.2520259618759155, |
|
"eval_logits/rejected": 4.261307716369629, |
|
"eval_logps/chosen": -763.88330078125, |
|
"eval_logps/rejected": -6457.25146484375, |
|
"eval_loss": 0.0009674043976701796, |
|
"eval_rewards/accuracies": 0.9979838728904724, |
|
"eval_rewards/chosen": -3.969015598297119, |
|
"eval_rewards/margins": 54.195613861083984, |
|
"eval_rewards/rejected": -58.16463851928711, |
|
"eval_runtime": 192.9853, |
|
"eval_samples_per_second": 20.235, |
|
"eval_steps_per_second": 0.321, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 2.938388625592417, |
|
"grad_norm": 0.08742324749616634, |
|
"learning_rate": 3.850041354441502e-10, |
|
"logits/chosen": 1.2420690059661865, |
|
"logits/rejected": 3.870642900466919, |
|
"logps/chosen": -808.8611450195312, |
|
"logps/rejected": -5520.935546875, |
|
"loss": 0.0001, |
|
"rewards/accuracies": 1.0, |
|
"rewards/chosen": -4.237000465393066, |
|
"rewards/margins": 45.446434020996094, |
|
"rewards/rejected": -49.68342971801758, |
|
"step": 310 |
|
}, |
|
{ |
|
"epoch": 2.985781990521327, |
|
"step": 315, |
|
"total_flos": 0.0, |
|
"train_loss": 0.04501082229597535, |
|
"train_runtime": 9068.4154, |
|
"train_samples_per_second": 4.466, |
|
"train_steps_per_second": 0.035 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 315, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 100, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|