|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.10666666666666667, |
|
"eval_steps": 500, |
|
"global_step": 200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"completion_length": 485.49220275878906, |
|
"epoch": 0.0010666666666666667, |
|
"grad_norm": 0.13112049806907955, |
|
"kl": 0.0, |
|
"learning_rate": 7.142857142857142e-08, |
|
"loss": -0.0, |
|
"reward": 0.3281250111758709, |
|
"reward_std": 0.4913413915783167, |
|
"rewards/equation_reward_func": 0.05729166814126074, |
|
"rewards/format_reward_func": 0.27083334047347307, |
|
"step": 2 |
|
}, |
|
{ |
|
"completion_length": 530.2265796661377, |
|
"epoch": 0.0021333333333333334, |
|
"grad_norm": 0.12198138838063947, |
|
"kl": 0.0003826618194580078, |
|
"learning_rate": 1.4285714285714285e-07, |
|
"loss": 0.0, |
|
"reward": 0.299479172565043, |
|
"reward_std": 0.44007157534360886, |
|
"rewards/equation_reward_func": 0.03385416744276881, |
|
"rewards/format_reward_func": 0.26562500977888703, |
|
"step": 4 |
|
}, |
|
{ |
|
"completion_length": 496.8776264190674, |
|
"epoch": 0.0032, |
|
"grad_norm": 0.12162717174644112, |
|
"kl": 0.0003865957260131836, |
|
"learning_rate": 2.1428571428571426e-07, |
|
"loss": 0.0, |
|
"reward": 0.2916666781529784, |
|
"reward_std": 0.47719811648130417, |
|
"rewards/equation_reward_func": 0.05468750116415322, |
|
"rewards/format_reward_func": 0.23697917303070426, |
|
"step": 6 |
|
}, |
|
{ |
|
"completion_length": 504.77865982055664, |
|
"epoch": 0.004266666666666667, |
|
"grad_norm": 0.13232346241260942, |
|
"kl": 0.0003762245178222656, |
|
"learning_rate": 2.857142857142857e-07, |
|
"loss": 0.0, |
|
"reward": 0.33593750558793545, |
|
"reward_std": 0.4751614350825548, |
|
"rewards/equation_reward_func": 0.04947916720993817, |
|
"rewards/format_reward_func": 0.28645834140479565, |
|
"step": 8 |
|
}, |
|
{ |
|
"completion_length": 475.7057456970215, |
|
"epoch": 0.005333333333333333, |
|
"grad_norm": 0.13843718324607834, |
|
"kl": 0.0003968477249145508, |
|
"learning_rate": 3.5714285714285716e-07, |
|
"loss": 0.0, |
|
"reward": 0.3828125102445483, |
|
"reward_std": 0.5227206833660603, |
|
"rewards/equation_reward_func": 0.0885416679084301, |
|
"rewards/format_reward_func": 0.2942708395421505, |
|
"step": 10 |
|
}, |
|
{ |
|
"completion_length": 475.98699378967285, |
|
"epoch": 0.0064, |
|
"grad_norm": 0.14337833822484186, |
|
"kl": 0.0004818439483642578, |
|
"learning_rate": 4.285714285714285e-07, |
|
"loss": 0.0, |
|
"reward": 0.33333334140479565, |
|
"reward_std": 0.4693184234201908, |
|
"rewards/equation_reward_func": 0.05468750139698386, |
|
"rewards/format_reward_func": 0.2786458395421505, |
|
"step": 12 |
|
}, |
|
{ |
|
"completion_length": 472.8099060058594, |
|
"epoch": 0.007466666666666667, |
|
"grad_norm": 0.129867140491159, |
|
"kl": 0.0007684230804443359, |
|
"learning_rate": 5e-07, |
|
"loss": 0.0, |
|
"reward": 0.45052084885537624, |
|
"reward_std": 0.5166866518557072, |
|
"rewards/equation_reward_func": 0.041666667675599456, |
|
"rewards/format_reward_func": 0.40885418094694614, |
|
"step": 14 |
|
}, |
|
{ |
|
"completion_length": 464.46875762939453, |
|
"epoch": 0.008533333333333334, |
|
"grad_norm": 0.1279190002551803, |
|
"kl": 0.0013058185577392578, |
|
"learning_rate": 4.999740409224932e-07, |
|
"loss": 0.0, |
|
"reward": 0.5052083488553762, |
|
"reward_std": 0.5728582534939051, |
|
"rewards/equation_reward_func": 0.06510416814126074, |
|
"rewards/format_reward_func": 0.4401041753590107, |
|
"step": 16 |
|
}, |
|
{ |
|
"completion_length": 480.2291717529297, |
|
"epoch": 0.0096, |
|
"grad_norm": 0.10647649710010443, |
|
"kl": 0.00380706787109375, |
|
"learning_rate": 4.998961690809627e-07, |
|
"loss": 0.0, |
|
"reward": 0.6588541902601719, |
|
"reward_std": 0.5287479311227798, |
|
"rewards/equation_reward_func": 0.05468750139698386, |
|
"rewards/format_reward_func": 0.6041666828095913, |
|
"step": 18 |
|
}, |
|
{ |
|
"completion_length": 493.8073043823242, |
|
"epoch": 0.010666666666666666, |
|
"grad_norm": 0.10522760864642984, |
|
"kl": 0.004913330078125, |
|
"learning_rate": 4.997664006472578e-07, |
|
"loss": 0.0, |
|
"reward": 0.7734375223517418, |
|
"reward_std": 0.4910791157744825, |
|
"rewards/equation_reward_func": 0.07031250116415322, |
|
"rewards/format_reward_func": 0.7031250186264515, |
|
"step": 20 |
|
}, |
|
{ |
|
"completion_length": 455.6510524749756, |
|
"epoch": 0.011733333333333333, |
|
"grad_norm": 0.09917661844432689, |
|
"kl": 0.008411407470703125, |
|
"learning_rate": 4.995847625707292e-07, |
|
"loss": 0.0, |
|
"reward": 0.7812500186264515, |
|
"reward_std": 0.4674575887620449, |
|
"rewards/equation_reward_func": 0.0651041679084301, |
|
"rewards/format_reward_func": 0.7161458507180214, |
|
"step": 22 |
|
}, |
|
{ |
|
"completion_length": 464.50782012939453, |
|
"epoch": 0.0128, |
|
"grad_norm": 0.10189992043189328, |
|
"kl": 0.0059833526611328125, |
|
"learning_rate": 4.993512925726318e-07, |
|
"loss": 0.0, |
|
"reward": 0.8619791865348816, |
|
"reward_std": 0.49650320410728455, |
|
"rewards/equation_reward_func": 0.08854166860692203, |
|
"rewards/format_reward_func": 0.7734375223517418, |
|
"step": 24 |
|
}, |
|
{ |
|
"completion_length": 447.40626335144043, |
|
"epoch": 0.013866666666666666, |
|
"grad_norm": 0.09219816177682034, |
|
"kl": 0.006900787353515625, |
|
"learning_rate": 4.990660391382923e-07, |
|
"loss": 0.0, |
|
"reward": 0.960937537252903, |
|
"reward_std": 0.4377214591950178, |
|
"rewards/equation_reward_func": 0.11718750256113708, |
|
"rewards/format_reward_func": 0.8437500260770321, |
|
"step": 26 |
|
}, |
|
{ |
|
"completion_length": 436.3099117279053, |
|
"epoch": 0.014933333333333333, |
|
"grad_norm": 0.07907793746945187, |
|
"kl": 0.009281158447265625, |
|
"learning_rate": 4.987290615070384e-07, |
|
"loss": 0.0, |
|
"reward": 0.9713542014360428, |
|
"reward_std": 0.3975960807874799, |
|
"rewards/equation_reward_func": 0.09895833535119891, |
|
"rewards/format_reward_func": 0.872395858168602, |
|
"step": 28 |
|
}, |
|
{ |
|
"completion_length": 428.1354293823242, |
|
"epoch": 0.016, |
|
"grad_norm": 0.0845150241699145, |
|
"kl": 0.011430740356445312, |
|
"learning_rate": 4.983404296598978e-07, |
|
"loss": 0.0, |
|
"reward": 0.9531250298023224, |
|
"reward_std": 0.359499204903841, |
|
"rewards/equation_reward_func": 0.0703125016298145, |
|
"rewards/format_reward_func": 0.8828125223517418, |
|
"step": 30 |
|
}, |
|
{ |
|
"completion_length": 434.62500953674316, |
|
"epoch": 0.017066666666666667, |
|
"grad_norm": 0.08196142586101564, |
|
"kl": 0.010782241821289062, |
|
"learning_rate": 4.979002243050646e-07, |
|
"loss": 0.0, |
|
"reward": 1.0260416977107525, |
|
"reward_std": 0.30062979739159346, |
|
"rewards/equation_reward_func": 0.09635416860692203, |
|
"rewards/format_reward_func": 0.9296875260770321, |
|
"step": 32 |
|
}, |
|
{ |
|
"completion_length": 438.08595085144043, |
|
"epoch": 0.018133333333333335, |
|
"grad_norm": 0.08432790923176402, |
|
"kl": 0.012115478515625, |
|
"learning_rate": 4.974085368611381e-07, |
|
"loss": 0.0, |
|
"reward": 1.049479205161333, |
|
"reward_std": 0.3121222285553813, |
|
"rewards/equation_reward_func": 0.11197917000390589, |
|
"rewards/format_reward_func": 0.9375000223517418, |
|
"step": 34 |
|
}, |
|
{ |
|
"completion_length": 421.16407203674316, |
|
"epoch": 0.0192, |
|
"grad_norm": 0.080639508238748, |
|
"kl": 0.01397705078125, |
|
"learning_rate": 4.968654694381379e-07, |
|
"loss": 0.0, |
|
"reward": 1.0598958618938923, |
|
"reward_std": 0.27779901027679443, |
|
"rewards/equation_reward_func": 0.10416666907258332, |
|
"rewards/format_reward_func": 0.9557291865348816, |
|
"step": 36 |
|
}, |
|
{ |
|
"completion_length": 405.12240982055664, |
|
"epoch": 0.020266666666666665, |
|
"grad_norm": 0.07681322754981268, |
|
"kl": 0.013866424560546875, |
|
"learning_rate": 4.962711348162987e-07, |
|
"loss": 0.0, |
|
"reward": 1.0390625409781933, |
|
"reward_std": 0.2664716215804219, |
|
"rewards/equation_reward_func": 0.08593750279396772, |
|
"rewards/format_reward_func": 0.9531250186264515, |
|
"step": 38 |
|
}, |
|
{ |
|
"completion_length": 400.0104274749756, |
|
"epoch": 0.021333333333333333, |
|
"grad_norm": 0.08198714493646655, |
|
"kl": 0.015293121337890625, |
|
"learning_rate": 4.956256564226487e-07, |
|
"loss": 0.0, |
|
"reward": 1.1067708730697632, |
|
"reward_std": 0.28682188084349036, |
|
"rewards/equation_reward_func": 0.14322917023673654, |
|
"rewards/format_reward_func": 0.9635416828095913, |
|
"step": 40 |
|
}, |
|
{ |
|
"completion_length": 400.78386306762695, |
|
"epoch": 0.0224, |
|
"grad_norm": 0.0855015587257399, |
|
"kl": 0.018611907958984375, |
|
"learning_rate": 4.949291683053768e-07, |
|
"loss": 0.0, |
|
"reward": 1.0937500223517418, |
|
"reward_std": 0.24716421775519848, |
|
"rewards/equation_reward_func": 0.11197916930541396, |
|
"rewards/format_reward_func": 0.9817708469927311, |
|
"step": 42 |
|
}, |
|
{ |
|
"completion_length": 408.34115409851074, |
|
"epoch": 0.023466666666666667, |
|
"grad_norm": 0.07249139521720419, |
|
"kl": 0.017139434814453125, |
|
"learning_rate": 4.941818151059955e-07, |
|
"loss": 0.0, |
|
"reward": 1.0546875298023224, |
|
"reward_std": 0.24979113461449742, |
|
"rewards/equation_reward_func": 0.0963541695382446, |
|
"rewards/format_reward_func": 0.9583333544433117, |
|
"step": 44 |
|
}, |
|
{ |
|
"completion_length": 388.62761306762695, |
|
"epoch": 0.024533333333333334, |
|
"grad_norm": 0.07507762465990464, |
|
"kl": 0.017795562744140625, |
|
"learning_rate": 4.933837520293017e-07, |
|
"loss": 0.0, |
|
"reward": 1.0781250484287739, |
|
"reward_std": 0.2523620016872883, |
|
"rewards/equation_reward_func": 0.11197917046956718, |
|
"rewards/format_reward_func": 0.9661458507180214, |
|
"step": 46 |
|
}, |
|
{ |
|
"completion_length": 399.57032585144043, |
|
"epoch": 0.0256, |
|
"grad_norm": 0.06604121980517051, |
|
"kl": 0.017971038818359375, |
|
"learning_rate": 4.925351448111454e-07, |
|
"loss": 0.0, |
|
"reward": 1.0468750335276127, |
|
"reward_std": 0.21127380011603236, |
|
"rewards/equation_reward_func": 0.07552083511836827, |
|
"rewards/format_reward_func": 0.9713541865348816, |
|
"step": 48 |
|
}, |
|
{ |
|
"completion_length": 392.4687557220459, |
|
"epoch": 0.02666666666666667, |
|
"grad_norm": 0.0903310628098169, |
|
"kl": 0.0193939208984375, |
|
"learning_rate": 4.91636169684011e-07, |
|
"loss": 0.0, |
|
"reward": 1.1093750409781933, |
|
"reward_std": 0.29062134958803654, |
|
"rewards/equation_reward_func": 0.13541667209938169, |
|
"rewards/format_reward_func": 0.9739583432674408, |
|
"step": 50 |
|
}, |
|
{ |
|
"completion_length": 374.1067810058594, |
|
"epoch": 0.027733333333333332, |
|
"grad_norm": 0.07170688038365744, |
|
"kl": 0.02114105224609375, |
|
"learning_rate": 4.906870133404186e-07, |
|
"loss": 0.0, |
|
"reward": 1.0833333693444729, |
|
"reward_std": 0.2505181049928069, |
|
"rewards/equation_reward_func": 0.11458333535119891, |
|
"rewards/format_reward_func": 0.9687500074505806, |
|
"step": 52 |
|
}, |
|
{ |
|
"completion_length": 385.36980056762695, |
|
"epoch": 0.0288, |
|
"grad_norm": 0.08750349012682264, |
|
"kl": 0.02417755126953125, |
|
"learning_rate": 4.896878728941531e-07, |
|
"loss": 0.0, |
|
"reward": 1.1432292014360428, |
|
"reward_std": 0.3026517196558416, |
|
"rewards/equation_reward_func": 0.16666667186655104, |
|
"rewards/format_reward_func": 0.9765625186264515, |
|
"step": 54 |
|
}, |
|
{ |
|
"completion_length": 382.93751335144043, |
|
"epoch": 0.029866666666666666, |
|
"grad_norm": 0.08790417727425984, |
|
"kl": 0.018756866455078125, |
|
"learning_rate": 4.886389558393284e-07, |
|
"loss": 0.0, |
|
"reward": 1.1223958693444729, |
|
"reward_std": 0.2850013840943575, |
|
"rewards/equation_reward_func": 0.14322917023673654, |
|
"rewards/format_reward_func": 0.979166679084301, |
|
"step": 56 |
|
}, |
|
{ |
|
"completion_length": 403.513032913208, |
|
"epoch": 0.030933333333333334, |
|
"grad_norm": 0.07614614875477466, |
|
"kl": 0.02037811279296875, |
|
"learning_rate": 4.875404800072976e-07, |
|
"loss": 0.0, |
|
"reward": 1.1432292088866234, |
|
"reward_std": 0.2669796203263104, |
|
"rewards/equation_reward_func": 0.16145833977498114, |
|
"rewards/format_reward_func": 0.9817708469927311, |
|
"step": 58 |
|
}, |
|
{ |
|
"completion_length": 382.6432418823242, |
|
"epoch": 0.032, |
|
"grad_norm": 0.0923539372935242, |
|
"kl": 0.02227783203125, |
|
"learning_rate": 4.86392673521415e-07, |
|
"loss": 0.0, |
|
"reward": 1.1536458693444729, |
|
"reward_std": 0.31879409588873386, |
|
"rewards/equation_reward_func": 0.1796875053551048, |
|
"rewards/format_reward_func": 0.9739583432674408, |
|
"step": 60 |
|
}, |
|
{ |
|
"completion_length": 358.4349060058594, |
|
"epoch": 0.03306666666666667, |
|
"grad_norm": 0.10173008574938576, |
|
"kl": 0.02350616455078125, |
|
"learning_rate": 4.851957747496606e-07, |
|
"loss": 0.0, |
|
"reward": 1.1562500447034836, |
|
"reward_std": 0.2772155348211527, |
|
"rewards/equation_reward_func": 0.16927083814516664, |
|
"rewards/format_reward_func": 0.9869791753590107, |
|
"step": 62 |
|
}, |
|
{ |
|
"completion_length": 364.18490409851074, |
|
"epoch": 0.034133333333333335, |
|
"grad_norm": 0.08496355305518047, |
|
"kl": 0.02630615234375, |
|
"learning_rate": 4.839500322551386e-07, |
|
"loss": 0.0, |
|
"reward": 1.1093750447034836, |
|
"reward_std": 0.24286148557439446, |
|
"rewards/equation_reward_func": 0.12760417093522847, |
|
"rewards/format_reward_func": 0.9817708432674408, |
|
"step": 64 |
|
}, |
|
{ |
|
"completion_length": 373.833345413208, |
|
"epoch": 0.0352, |
|
"grad_norm": 0.09458816516251609, |
|
"kl": 0.02667999267578125, |
|
"learning_rate": 4.826557047444563e-07, |
|
"loss": 0.0, |
|
"reward": 1.1848958656191826, |
|
"reward_std": 0.3138170298188925, |
|
"rewards/equation_reward_func": 0.20572917233221233, |
|
"rewards/format_reward_func": 0.979166679084301, |
|
"step": 66 |
|
}, |
|
{ |
|
"completion_length": 352.2083435058594, |
|
"epoch": 0.03626666666666667, |
|
"grad_norm": 0.08416786947789269, |
|
"kl": 0.03083038330078125, |
|
"learning_rate": 4.813130610139993e-07, |
|
"loss": 0.0, |
|
"reward": 1.1744792088866234, |
|
"reward_std": 0.2558550937101245, |
|
"rewards/equation_reward_func": 0.18489583861082792, |
|
"rewards/format_reward_func": 0.9895833432674408, |
|
"step": 68 |
|
}, |
|
{ |
|
"completion_length": 377.62500953674316, |
|
"epoch": 0.037333333333333336, |
|
"grad_norm": 0.07539913544982475, |
|
"kl": 0.03000640869140625, |
|
"learning_rate": 4.799223798941089e-07, |
|
"loss": 0.0, |
|
"reward": 1.070312537252903, |
|
"reward_std": 0.17193882586434484, |
|
"rewards/equation_reward_func": 0.08072916814126074, |
|
"rewards/format_reward_func": 0.9895833432674408, |
|
"step": 70 |
|
}, |
|
{ |
|
"completion_length": 370.4531364440918, |
|
"epoch": 0.0384, |
|
"grad_norm": 0.09187391682605524, |
|
"kl": 0.03436279296875, |
|
"learning_rate": 4.78483950191177e-07, |
|
"loss": 0.0, |
|
"reward": 1.1562500298023224, |
|
"reward_std": 0.2756755482405424, |
|
"rewards/equation_reward_func": 0.1770833362825215, |
|
"rewards/format_reward_func": 0.979166679084301, |
|
"step": 72 |
|
}, |
|
{ |
|
"completion_length": 373.04688453674316, |
|
"epoch": 0.039466666666666664, |
|
"grad_norm": 0.09537515327502834, |
|
"kl": 0.03740692138671875, |
|
"learning_rate": 4.769980706276687e-07, |
|
"loss": 0.0, |
|
"reward": 1.1354167088866234, |
|
"reward_std": 0.25582731096073985, |
|
"rewards/equation_reward_func": 0.15885417256504297, |
|
"rewards/format_reward_func": 0.9765625186264515, |
|
"step": 74 |
|
}, |
|
{ |
|
"completion_length": 387.8567810058594, |
|
"epoch": 0.04053333333333333, |
|
"grad_norm": 0.08000596022141122, |
|
"kl": 0.0389404296875, |
|
"learning_rate": 4.7546504978008595e-07, |
|
"loss": 0.0, |
|
"reward": 1.1328125335276127, |
|
"reward_std": 0.30721960263326764, |
|
"rewards/equation_reward_func": 0.1666666700039059, |
|
"rewards/format_reward_func": 0.9661458507180214, |
|
"step": 76 |
|
}, |
|
{ |
|
"completion_length": 390.2448024749756, |
|
"epoch": 0.0416, |
|
"grad_norm": 0.07863626089779173, |
|
"kl": 0.0442047119140625, |
|
"learning_rate": 4.738852060148848e-07, |
|
"loss": 0.0, |
|
"reward": 1.127604205161333, |
|
"reward_std": 0.26901706866919994, |
|
"rewards/equation_reward_func": 0.15364583814516664, |
|
"rewards/format_reward_func": 0.9739583507180214, |
|
"step": 78 |
|
}, |
|
{ |
|
"completion_length": 379.2161521911621, |
|
"epoch": 0.042666666666666665, |
|
"grad_norm": 0.08601071973954262, |
|
"kl": 0.0434417724609375, |
|
"learning_rate": 4.722588674223593e-07, |
|
"loss": 0.0, |
|
"reward": 1.1380208656191826, |
|
"reward_std": 0.2812240272760391, |
|
"rewards/equation_reward_func": 0.164062503259629, |
|
"rewards/format_reward_func": 0.9739583469927311, |
|
"step": 80 |
|
}, |
|
{ |
|
"completion_length": 371.59115409851074, |
|
"epoch": 0.04373333333333333, |
|
"grad_norm": 0.07568126962421509, |
|
"kl": 0.0497894287109375, |
|
"learning_rate": 4.70586371748506e-07, |
|
"loss": 0.0, |
|
"reward": 1.1380208656191826, |
|
"reward_std": 0.25603401800617576, |
|
"rewards/equation_reward_func": 0.15885417070239782, |
|
"rewards/format_reward_func": 0.9791666828095913, |
|
"step": 82 |
|
}, |
|
{ |
|
"completion_length": 381.30469703674316, |
|
"epoch": 0.0448, |
|
"grad_norm": 0.08065111561901392, |
|
"kl": 0.0525665283203125, |
|
"learning_rate": 4.6886806632488363e-07, |
|
"loss": 0.0001, |
|
"reward": 1.169270858168602, |
|
"reward_std": 0.25962691847234964, |
|
"rewards/equation_reward_func": 0.19531250419095159, |
|
"rewards/format_reward_func": 0.9739583432674408, |
|
"step": 84 |
|
}, |
|
{ |
|
"completion_length": 350.8020944595337, |
|
"epoch": 0.04586666666666667, |
|
"grad_norm": 0.09857801804683694, |
|
"kl": 0.0528717041015625, |
|
"learning_rate": 4.6710430799648143e-07, |
|
"loss": 0.0001, |
|
"reward": 1.1796875447034836, |
|
"reward_std": 0.2887058644555509, |
|
"rewards/equation_reward_func": 0.1979166748933494, |
|
"rewards/format_reward_func": 0.9817708507180214, |
|
"step": 86 |
|
}, |
|
{ |
|
"completion_length": 353.0573024749756, |
|
"epoch": 0.046933333333333334, |
|
"grad_norm": 0.09077630030450431, |
|
"kl": 0.059906005859375, |
|
"learning_rate": 4.652954630476127e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2239583730697632, |
|
"reward_std": 0.30759388813748956, |
|
"rewards/equation_reward_func": 0.2500000069849193, |
|
"rewards/format_reward_func": 0.9739583507180214, |
|
"step": 88 |
|
}, |
|
{ |
|
"completion_length": 359.1666784286499, |
|
"epoch": 0.048, |
|
"grad_norm": 0.09945645690703775, |
|
"kl": 0.0612030029296875, |
|
"learning_rate": 4.6344190712584713e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2630208805203438, |
|
"reward_std": 0.27152396691963077, |
|
"rewards/equation_reward_func": 0.27604167466051877, |
|
"rewards/format_reward_func": 0.9869791753590107, |
|
"step": 90 |
|
}, |
|
{ |
|
"completion_length": 349.7083396911621, |
|
"epoch": 0.04906666666666667, |
|
"grad_norm": 0.08132855652754273, |
|
"kl": 0.06951904296875, |
|
"learning_rate": 4.615440251639995e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2395833656191826, |
|
"reward_std": 0.23549415357410908, |
|
"rewards/equation_reward_func": 0.25520834024064243, |
|
"rewards/format_reward_func": 0.9843750074505806, |
|
"step": 92 |
|
}, |
|
{ |
|
"completion_length": 340.52084159851074, |
|
"epoch": 0.050133333333333335, |
|
"grad_norm": 0.08537044318099979, |
|
"kl": 0.075592041015625, |
|
"learning_rate": 4.596022113001894e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2916667014360428, |
|
"reward_std": 0.3320260518230498, |
|
"rewards/equation_reward_func": 0.312500006519258, |
|
"rewards/format_reward_func": 0.9791666828095913, |
|
"step": 94 |
|
}, |
|
{ |
|
"completion_length": 327.13282012939453, |
|
"epoch": 0.0512, |
|
"grad_norm": 0.09240179037114699, |
|
"kl": 0.070465087890625, |
|
"learning_rate": 4.576168687959895e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3255208805203438, |
|
"reward_std": 0.386608456261456, |
|
"rewards/equation_reward_func": 0.3567708469927311, |
|
"rewards/format_reward_func": 0.9687500074505806, |
|
"step": 96 |
|
}, |
|
{ |
|
"completion_length": 364.68751335144043, |
|
"epoch": 0.05226666666666667, |
|
"grad_norm": 0.10186126690814491, |
|
"kl": 0.080108642578125, |
|
"learning_rate": 4.555884099526793e-07, |
|
"loss": 0.0001, |
|
"reward": 1.1770833656191826, |
|
"reward_std": 0.2261401410214603, |
|
"rewards/equation_reward_func": 0.19010417093522847, |
|
"rewards/format_reward_func": 0.986979179084301, |
|
"step": 98 |
|
}, |
|
{ |
|
"completion_length": 378.69271755218506, |
|
"epoch": 0.05333333333333334, |
|
"grad_norm": 0.08283978901098998, |
|
"kl": 0.070831298828125, |
|
"learning_rate": 4.5351725602562174e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2083333805203438, |
|
"reward_std": 0.2539973724633455, |
|
"rewards/equation_reward_func": 0.22656251001171768, |
|
"rewards/format_reward_func": 0.9817708469927311, |
|
"step": 100 |
|
}, |
|
{ |
|
"completion_length": 349.2812604904175, |
|
"epoch": 0.0544, |
|
"grad_norm": 0.10236681253344297, |
|
"kl": 0.0821533203125, |
|
"learning_rate": 4.514038371367791e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3098958730697632, |
|
"reward_std": 0.3604734097607434, |
|
"rewards/equation_reward_func": 0.3437500107102096, |
|
"rewards/format_reward_func": 0.9661458432674408, |
|
"step": 102 |
|
}, |
|
{ |
|
"completion_length": 357.3619899749756, |
|
"epoch": 0.055466666666666664, |
|
"grad_norm": 0.09738517323229856, |
|
"kl": 0.086883544921875, |
|
"learning_rate": 4.4924859218538936e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3046875447034836, |
|
"reward_std": 0.3255553734488785, |
|
"rewards/equation_reward_func": 0.33072917233221233, |
|
"rewards/format_reward_func": 0.9739583469927311, |
|
"step": 104 |
|
}, |
|
{ |
|
"completion_length": 365.2760524749756, |
|
"epoch": 0.05653333333333333, |
|
"grad_norm": 0.08975705250496918, |
|
"kl": 0.09002685546875, |
|
"learning_rate": 4.470519687568185e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2968750484287739, |
|
"reward_std": 0.33681244123727083, |
|
"rewards/equation_reward_func": 0.3203125132713467, |
|
"rewards/format_reward_func": 0.9765625074505806, |
|
"step": 106 |
|
}, |
|
{ |
|
"completion_length": 380.40365505218506, |
|
"epoch": 0.0576, |
|
"grad_norm": 0.08944591293143872, |
|
"kl": 0.085113525390625, |
|
"learning_rate": 4.4481442302960923e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2135417126119137, |
|
"reward_std": 0.2771795648150146, |
|
"rewards/equation_reward_func": 0.23697917629033327, |
|
"rewards/format_reward_func": 0.9765625149011612, |
|
"step": 108 |
|
}, |
|
{ |
|
"completion_length": 389.94271659851074, |
|
"epoch": 0.058666666666666666, |
|
"grad_norm": 0.09619766220094576, |
|
"kl": 0.09130859375, |
|
"learning_rate": 4.4253641968074505e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2500000335276127, |
|
"reward_std": 0.2892899289727211, |
|
"rewards/equation_reward_func": 0.28645833977498114, |
|
"rewards/format_reward_func": 0.963541679084301, |
|
"step": 110 |
|
}, |
|
{ |
|
"completion_length": 381.99219512939453, |
|
"epoch": 0.05973333333333333, |
|
"grad_norm": 0.10498087313979376, |
|
"kl": 0.09423828125, |
|
"learning_rate": 4.402184317891501e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3229167088866234, |
|
"reward_std": 0.32423597015440464, |
|
"rewards/equation_reward_func": 0.35156250768341124, |
|
"rewards/format_reward_func": 0.9713541865348816, |
|
"step": 112 |
|
}, |
|
{ |
|
"completion_length": 391.36719608306885, |
|
"epoch": 0.0608, |
|
"grad_norm": 0.09626210263887228, |
|
"kl": 0.097381591796875, |
|
"learning_rate": 4.37860940737443e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2500000298023224, |
|
"reward_std": 0.3341089729219675, |
|
"rewards/equation_reward_func": 0.28385417186655104, |
|
"rewards/format_reward_func": 0.9661458469927311, |
|
"step": 114 |
|
}, |
|
{ |
|
"completion_length": 379.2812614440918, |
|
"epoch": 0.06186666666666667, |
|
"grad_norm": 0.0919023125255689, |
|
"kl": 0.09576416015625, |
|
"learning_rate": 4.354644361119671e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2968750447034836, |
|
"reward_std": 0.30213321885094047, |
|
"rewards/equation_reward_func": 0.3255208437331021, |
|
"rewards/format_reward_func": 0.9713541865348816, |
|
"step": 116 |
|
}, |
|
{ |
|
"completion_length": 377.5989694595337, |
|
"epoch": 0.06293333333333333, |
|
"grad_norm": 0.1258891080084215, |
|
"kl": 0.1126708984375, |
|
"learning_rate": 4.3302941560111716e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3880208656191826, |
|
"reward_std": 0.29082584474235773, |
|
"rewards/equation_reward_func": 0.4192708428017795, |
|
"rewards/format_reward_func": 0.9687500149011612, |
|
"step": 118 |
|
}, |
|
{ |
|
"completion_length": 406.2239742279053, |
|
"epoch": 0.064, |
|
"grad_norm": 0.07341804992437591, |
|
"kl": 0.094940185546875, |
|
"learning_rate": 4.3055638489198236e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3072916977107525, |
|
"reward_std": 0.3007106310687959, |
|
"rewards/equation_reward_func": 0.3359375118743628, |
|
"rewards/format_reward_func": 0.9713541865348816, |
|
"step": 120 |
|
}, |
|
{ |
|
"completion_length": 440.3489694595337, |
|
"epoch": 0.06506666666666666, |
|
"grad_norm": 0.07202980759381214, |
|
"kl": 0.102081298828125, |
|
"learning_rate": 4.280458575653296e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2942708879709244, |
|
"reward_std": 0.2781888456083834, |
|
"rewards/equation_reward_func": 0.34635417629033327, |
|
"rewards/format_reward_func": 0.9479166828095913, |
|
"step": 122 |
|
}, |
|
{ |
|
"completion_length": 373.723970413208, |
|
"epoch": 0.06613333333333334, |
|
"grad_norm": 0.12005799306967507, |
|
"kl": 0.109588623046875, |
|
"learning_rate": 4.2549835498894665e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3619792126119137, |
|
"reward_std": 0.30349841713905334, |
|
"rewards/equation_reward_func": 0.39583334419876337, |
|
"rewards/format_reward_func": 0.9661458544433117, |
|
"step": 124 |
|
}, |
|
{ |
|
"completion_length": 424.8880310058594, |
|
"epoch": 0.0672, |
|
"grad_norm": 0.08141812528362076, |
|
"kl": 0.11883544921875, |
|
"learning_rate": 4.229144062093679e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3072916939854622, |
|
"reward_std": 0.34696589363738894, |
|
"rewards/equation_reward_func": 0.37760417303070426, |
|
"rewards/format_reward_func": 0.9296875186264515, |
|
"step": 126 |
|
}, |
|
{ |
|
"completion_length": 425.73959732055664, |
|
"epoch": 0.06826666666666667, |
|
"grad_norm": 0.08953036809534468, |
|
"kl": 0.10595703125, |
|
"learning_rate": 4.2029454784200675e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3203125335276127, |
|
"reward_std": 0.2961498526856303, |
|
"rewards/equation_reward_func": 0.3697916716337204, |
|
"rewards/format_reward_func": 0.9505208544433117, |
|
"step": 128 |
|
}, |
|
{ |
|
"completion_length": 456.8411560058594, |
|
"epoch": 0.06933333333333333, |
|
"grad_norm": 0.10202840286205975, |
|
"kl": 0.128875732421875, |
|
"learning_rate": 4.1763932395971433e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2786458767950535, |
|
"reward_std": 0.3274143426679075, |
|
"rewards/equation_reward_func": 0.3463541774544865, |
|
"rewards/format_reward_func": 0.9322916902601719, |
|
"step": 130 |
|
}, |
|
{ |
|
"completion_length": 391.82552909851074, |
|
"epoch": 0.0704, |
|
"grad_norm": 0.09343731649452018, |
|
"kl": 0.116180419921875, |
|
"learning_rate": 4.1494928597979117e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4427083693444729, |
|
"reward_std": 0.2739125872030854, |
|
"rewards/equation_reward_func": 0.48958334792405367, |
|
"rewards/format_reward_func": 0.9531250149011612, |
|
"step": 132 |
|
}, |
|
{ |
|
"completion_length": 431.3073043823242, |
|
"epoch": 0.07146666666666666, |
|
"grad_norm": 0.09948838006778335, |
|
"kl": 0.113861083984375, |
|
"learning_rate": 4.122249925494726e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3281250447034836, |
|
"reward_std": 0.267287774477154, |
|
"rewards/equation_reward_func": 0.37239584675990045, |
|
"rewards/format_reward_func": 0.955729179084301, |
|
"step": 134 |
|
}, |
|
{ |
|
"completion_length": 422.07032203674316, |
|
"epoch": 0.07253333333333334, |
|
"grad_norm": 0.07206798558431624, |
|
"kl": 0.13238525390625, |
|
"learning_rate": 4.094670094299131e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3750000409781933, |
|
"reward_std": 0.2928238473832607, |
|
"rewards/equation_reward_func": 0.42447917675599456, |
|
"rewards/format_reward_func": 0.9505208544433117, |
|
"step": 136 |
|
}, |
|
{ |
|
"completion_length": 443.54688358306885, |
|
"epoch": 0.0736, |
|
"grad_norm": 0.10976069905088891, |
|
"kl": 0.109039306640625, |
|
"learning_rate": 4.066759093786931e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2630208693444729, |
|
"reward_std": 0.2776200850494206, |
|
"rewards/equation_reward_func": 0.33593750884756446, |
|
"rewards/format_reward_func": 0.927083358168602, |
|
"step": 138 |
|
}, |
|
{ |
|
"completion_length": 398.4505367279053, |
|
"epoch": 0.07466666666666667, |
|
"grad_norm": 0.084796835919473, |
|
"kl": 0.12408447265625, |
|
"learning_rate": 4.038522720308732e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4375000484287739, |
|
"reward_std": 0.2496197698637843, |
|
"rewards/equation_reward_func": 0.4739583432674408, |
|
"rewards/format_reward_func": 0.9635416902601719, |
|
"step": 140 |
|
}, |
|
{ |
|
"completion_length": 375.6875104904175, |
|
"epoch": 0.07573333333333333, |
|
"grad_norm": 0.06578890547258132, |
|
"kl": 0.134796142578125, |
|
"learning_rate": 4.009966837786194e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4114583730697632, |
|
"reward_std": 0.2610441828146577, |
|
"rewards/equation_reward_func": 0.4453125111758709, |
|
"rewards/format_reward_func": 0.9661458507180214, |
|
"step": 142 |
|
}, |
|
{ |
|
"completion_length": 368.48959255218506, |
|
"epoch": 0.0768, |
|
"grad_norm": 0.10902428052363637, |
|
"kl": 0.136627197265625, |
|
"learning_rate": 3.981097376494259e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4687500521540642, |
|
"reward_std": 0.25434603728353977, |
|
"rewards/equation_reward_func": 0.49218752002343535, |
|
"rewards/format_reward_func": 0.9765625223517418, |
|
"step": 144 |
|
}, |
|
{ |
|
"completion_length": 406.8750114440918, |
|
"epoch": 0.07786666666666667, |
|
"grad_norm": 0.1079461409291905, |
|
"kl": 0.129119873046875, |
|
"learning_rate": 3.951920331829592e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3281250484287739, |
|
"reward_std": 0.25655436515808105, |
|
"rewards/equation_reward_func": 0.372395841171965, |
|
"rewards/format_reward_func": 0.9557291902601719, |
|
"step": 146 |
|
}, |
|
{ |
|
"completion_length": 401.2213668823242, |
|
"epoch": 0.07893333333333333, |
|
"grad_norm": 0.1067111189421742, |
|
"kl": 0.132171630859375, |
|
"learning_rate": 3.922441763065506e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3906250447034836, |
|
"reward_std": 0.249761619605124, |
|
"rewards/equation_reward_func": 0.4270833439659327, |
|
"rewards/format_reward_func": 0.9635416902601719, |
|
"step": 148 |
|
}, |
|
{ |
|
"completion_length": 450.89844512939453, |
|
"epoch": 0.08, |
|
"grad_norm": 0.07018564082166065, |
|
"kl": 0.112640380859375, |
|
"learning_rate": 3.8926677920936093e-07, |
|
"loss": 0.0001, |
|
"reward": 1.1901042126119137, |
|
"reward_std": 0.21367743890732527, |
|
"rewards/equation_reward_func": 0.23177083814516664, |
|
"rewards/format_reward_func": 0.9583333507180214, |
|
"step": 150 |
|
}, |
|
{ |
|
"completion_length": 319.31250762939453, |
|
"epoch": 0.08106666666666666, |
|
"grad_norm": 0.09931506831300466, |
|
"kl": 0.16046142578125, |
|
"learning_rate": 3.862604602152464e-07, |
|
"loss": 0.0002, |
|
"reward": 1.5156250521540642, |
|
"reward_std": 0.17254623072221875, |
|
"rewards/equation_reward_func": 0.5416666828095913, |
|
"rewards/format_reward_func": 0.9739583432674408, |
|
"step": 152 |
|
}, |
|
{ |
|
"completion_length": 372.6979274749756, |
|
"epoch": 0.08213333333333334, |
|
"grad_norm": 0.08423289509476255, |
|
"kl": 0.1458740234375, |
|
"learning_rate": 3.8322584365434934e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4036458805203438, |
|
"reward_std": 0.23620562674477696, |
|
"rewards/equation_reward_func": 0.43229168234393, |
|
"rewards/format_reward_func": 0.971354179084301, |
|
"step": 154 |
|
}, |
|
{ |
|
"completion_length": 383.4323024749756, |
|
"epoch": 0.0832, |
|
"grad_norm": 0.11684907563681693, |
|
"kl": 0.13134765625, |
|
"learning_rate": 3.8016355973344173e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3619792014360428, |
|
"reward_std": 0.2366077760234475, |
|
"rewards/equation_reward_func": 0.39062501303851604, |
|
"rewards/format_reward_func": 0.9713541828095913, |
|
"step": 156 |
|
}, |
|
{ |
|
"completion_length": 421.96876335144043, |
|
"epoch": 0.08426666666666667, |
|
"grad_norm": 0.1641382478416818, |
|
"kl": 0.124755859375, |
|
"learning_rate": 3.7707424440504863e-07, |
|
"loss": 0.0001, |
|
"reward": 1.296875037252903, |
|
"reward_std": 0.23676540749147534, |
|
"rewards/equation_reward_func": 0.3333333421032876, |
|
"rewards/format_reward_func": 0.963541679084301, |
|
"step": 158 |
|
}, |
|
{ |
|
"completion_length": 365.9166793823242, |
|
"epoch": 0.08533333333333333, |
|
"grad_norm": 0.07817184043452526, |
|
"kl": 0.1395263671875, |
|
"learning_rate": 3.739585392353787e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3880208767950535, |
|
"reward_std": 0.19910774566233158, |
|
"rewards/equation_reward_func": 0.4140625100117177, |
|
"rewards/format_reward_func": 0.9739583469927311, |
|
"step": 160 |
|
}, |
|
{ |
|
"completion_length": 336.37500953674316, |
|
"epoch": 0.0864, |
|
"grad_norm": 0.08666435211660832, |
|
"kl": 0.155364990234375, |
|
"learning_rate": 3.7081709127108767e-07, |
|
"loss": 0.0002, |
|
"reward": 1.5182291939854622, |
|
"reward_std": 0.20631011482328176, |
|
"rewards/equation_reward_func": 0.5312500149011612, |
|
"rewards/format_reward_func": 0.986979179084301, |
|
"step": 162 |
|
}, |
|
{ |
|
"completion_length": 360.15625953674316, |
|
"epoch": 0.08746666666666666, |
|
"grad_norm": 0.07995177005809427, |
|
"kl": 0.134307861328125, |
|
"learning_rate": 3.6765055290490513e-07, |
|
"loss": 0.0001, |
|
"reward": 1.367187537252903, |
|
"reward_std": 0.23405077820643783, |
|
"rewards/equation_reward_func": 0.3776041774544865, |
|
"rewards/format_reward_func": 0.9895833432674408, |
|
"step": 164 |
|
}, |
|
{ |
|
"completion_length": 316.8932418823242, |
|
"epoch": 0.08853333333333334, |
|
"grad_norm": 0.15304933704233734, |
|
"kl": 0.155242919921875, |
|
"learning_rate": 3.644595817401501e-07, |
|
"loss": 0.0002, |
|
"reward": 1.533854216337204, |
|
"reward_std": 0.25381680950522423, |
|
"rewards/equation_reward_func": 0.5442708469927311, |
|
"rewards/format_reward_func": 0.9895833432674408, |
|
"step": 166 |
|
}, |
|
{ |
|
"completion_length": 388.1224002838135, |
|
"epoch": 0.0896, |
|
"grad_norm": 0.08385643880585995, |
|
"kl": 0.141143798828125, |
|
"learning_rate": 3.6124484045416483e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3723958767950535, |
|
"reward_std": 0.24668778479099274, |
|
"rewards/equation_reward_func": 0.40885417466051877, |
|
"rewards/format_reward_func": 0.9635416828095913, |
|
"step": 168 |
|
}, |
|
{ |
|
"completion_length": 351.93750858306885, |
|
"epoch": 0.09066666666666667, |
|
"grad_norm": 0.07697038389931672, |
|
"kl": 0.14190673828125, |
|
"learning_rate": 3.580069966606949e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4479166939854622, |
|
"reward_std": 0.2045988291501999, |
|
"rewards/equation_reward_func": 0.4557291753590107, |
|
"rewards/format_reward_func": 0.9921875037252903, |
|
"step": 170 |
|
}, |
|
{ |
|
"completion_length": 334.4765729904175, |
|
"epoch": 0.09173333333333333, |
|
"grad_norm": 0.0735056203449723, |
|
"kl": 0.144317626953125, |
|
"learning_rate": 3.547467227712444e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4973958656191826, |
|
"reward_std": 0.19876712281256914, |
|
"rewards/equation_reward_func": 0.5130208474583924, |
|
"rewards/format_reward_func": 0.9843750074505806, |
|
"step": 172 |
|
}, |
|
{ |
|
"completion_length": 327.29948711395264, |
|
"epoch": 0.0928, |
|
"grad_norm": 0.12548498564602414, |
|
"kl": 0.1490478515625, |
|
"learning_rate": 3.5146469585543386e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4947917088866234, |
|
"reward_std": 0.22728270338848233, |
|
"rewards/equation_reward_func": 0.5078125204890966, |
|
"rewards/format_reward_func": 0.9869791753590107, |
|
"step": 174 |
|
}, |
|
{ |
|
"completion_length": 398.49220085144043, |
|
"epoch": 0.09386666666666667, |
|
"grad_norm": 0.08293639399157755, |
|
"kl": 0.149139404296875, |
|
"learning_rate": 3.481615975003922e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3489583730697632, |
|
"reward_std": 0.20350094605237246, |
|
"rewards/equation_reward_func": 0.3645833428017795, |
|
"rewards/format_reward_func": 0.9843750111758709, |
|
"step": 176 |
|
}, |
|
{ |
|
"completion_length": 324.3489713668823, |
|
"epoch": 0.09493333333333333, |
|
"grad_norm": 0.09550896801162942, |
|
"kl": 0.155364990234375, |
|
"learning_rate": 3.448381136692089e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4583333730697632, |
|
"reward_std": 0.1914014257490635, |
|
"rewards/equation_reward_func": 0.4661458458285779, |
|
"rewards/format_reward_func": 0.9921875037252903, |
|
"step": 178 |
|
}, |
|
{ |
|
"completion_length": 363.1067838668823, |
|
"epoch": 0.096, |
|
"grad_norm": 0.06570582721768603, |
|
"kl": 0.131988525390625, |
|
"learning_rate": 3.4149493455847897e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4010417014360428, |
|
"reward_std": 0.15116061177104712, |
|
"rewards/equation_reward_func": 0.41927084419876337, |
|
"rewards/format_reward_func": 0.9817708432674408, |
|
"step": 180 |
|
}, |
|
{ |
|
"completion_length": 350.994797706604, |
|
"epoch": 0.09706666666666666, |
|
"grad_norm": 0.08114840800090561, |
|
"kl": 0.165863037109375, |
|
"learning_rate": 3.3813275445496766e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4375000447034836, |
|
"reward_std": 0.2402082341723144, |
|
"rewards/equation_reward_func": 0.45572917768731713, |
|
"rewards/format_reward_func": 0.9817708469927311, |
|
"step": 182 |
|
}, |
|
{ |
|
"completion_length": 341.2786521911621, |
|
"epoch": 0.09813333333333334, |
|
"grad_norm": 0.09604192137372529, |
|
"kl": 0.140167236328125, |
|
"learning_rate": 3.347522715914262e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4739583805203438, |
|
"reward_std": 0.2517684092745185, |
|
"rewards/equation_reward_func": 0.48958334303461015, |
|
"rewards/format_reward_func": 0.9843750111758709, |
|
"step": 184 |
|
}, |
|
{ |
|
"completion_length": 334.8333444595337, |
|
"epoch": 0.0992, |
|
"grad_norm": 0.06092803443628096, |
|
"kl": 0.15545654296875, |
|
"learning_rate": 3.313541880015877e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4869792014360428, |
|
"reward_std": 0.17553172213956714, |
|
"rewards/equation_reward_func": 0.49218751210719347, |
|
"rewards/format_reward_func": 0.9947916716337204, |
|
"step": 186 |
|
}, |
|
{ |
|
"completion_length": 296.9895906448364, |
|
"epoch": 0.10026666666666667, |
|
"grad_norm": 0.08698608388272158, |
|
"kl": 0.15386962890625, |
|
"learning_rate": 3.279392093743747e-07, |
|
"loss": 0.0002, |
|
"reward": 1.484375037252903, |
|
"reward_std": 0.18876954959705472, |
|
"rewards/equation_reward_func": 0.5130208488553762, |
|
"rewards/format_reward_func": 0.971354179084301, |
|
"step": 188 |
|
}, |
|
{ |
|
"completion_length": 369.6171989440918, |
|
"epoch": 0.10133333333333333, |
|
"grad_norm": 0.07100886524919588, |
|
"kl": 0.1356201171875, |
|
"learning_rate": 3.245080449073459e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3932291939854622, |
|
"reward_std": 0.22231243178248405, |
|
"rewards/equation_reward_func": 0.40885417722165585, |
|
"rewards/format_reward_func": 0.9843750149011612, |
|
"step": 190 |
|
}, |
|
{ |
|
"completion_length": 340.66928005218506, |
|
"epoch": 0.1024, |
|
"grad_norm": 0.06938271896378453, |
|
"kl": 0.164703369140625, |
|
"learning_rate": 3.210614071594162e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4322917088866234, |
|
"reward_std": 0.21930959541350603, |
|
"rewards/equation_reward_func": 0.44791667885147035, |
|
"rewards/format_reward_func": 0.9843750111758709, |
|
"step": 192 |
|
}, |
|
{ |
|
"completion_length": 366.10157108306885, |
|
"epoch": 0.10346666666666667, |
|
"grad_norm": 0.0640261989037358, |
|
"kl": 0.1395263671875, |
|
"learning_rate": 3.1760001190287695e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3567708730697632, |
|
"reward_std": 0.14457962242886424, |
|
"rewards/equation_reward_func": 0.3671875111758709, |
|
"rewards/format_reward_func": 0.9895833395421505, |
|
"step": 194 |
|
}, |
|
{ |
|
"completion_length": 335.52344608306885, |
|
"epoch": 0.10453333333333334, |
|
"grad_norm": 0.07693618401025876, |
|
"kl": 0.152862548828125, |
|
"learning_rate": 3.141245779747502e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3723958805203438, |
|
"reward_std": 0.19283229811117053, |
|
"rewards/equation_reward_func": 0.3828125139698386, |
|
"rewards/format_reward_func": 0.9895833395421505, |
|
"step": 196 |
|
}, |
|
{ |
|
"completion_length": 317.8932409286499, |
|
"epoch": 0.1056, |
|
"grad_norm": 0.08196224183163763, |
|
"kl": 0.158447265625, |
|
"learning_rate": 3.106358271275056e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4947917126119137, |
|
"reward_std": 0.19350500591099262, |
|
"rewards/equation_reward_func": 0.5104166797827929, |
|
"rewards/format_reward_func": 0.9843750074505806, |
|
"step": 198 |
|
}, |
|
{ |
|
"completion_length": 317.8619861602783, |
|
"epoch": 0.10666666666666667, |
|
"grad_norm": 0.09403807304980984, |
|
"kl": 0.15289306640625, |
|
"learning_rate": 3.0713448387917227e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4609375223517418, |
|
"reward_std": 0.18717782059684396, |
|
"rewards/equation_reward_func": 0.46354167303070426, |
|
"rewards/format_reward_func": 0.9973958358168602, |
|
"step": 200 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 450, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|