|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.24, |
|
"eval_steps": 500, |
|
"global_step": 450, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"completion_length": 485.49220275878906, |
|
"epoch": 0.0010666666666666667, |
|
"grad_norm": 0.13112049806907955, |
|
"kl": 0.0, |
|
"learning_rate": 7.142857142857142e-08, |
|
"loss": -0.0, |
|
"reward": 0.3281250111758709, |
|
"reward_std": 0.4913413915783167, |
|
"rewards/equation_reward_func": 0.05729166814126074, |
|
"rewards/format_reward_func": 0.27083334047347307, |
|
"step": 2 |
|
}, |
|
{ |
|
"completion_length": 530.2265796661377, |
|
"epoch": 0.0021333333333333334, |
|
"grad_norm": 0.12198138838063947, |
|
"kl": 0.0003826618194580078, |
|
"learning_rate": 1.4285714285714285e-07, |
|
"loss": 0.0, |
|
"reward": 0.299479172565043, |
|
"reward_std": 0.44007157534360886, |
|
"rewards/equation_reward_func": 0.03385416744276881, |
|
"rewards/format_reward_func": 0.26562500977888703, |
|
"step": 4 |
|
}, |
|
{ |
|
"completion_length": 496.8776264190674, |
|
"epoch": 0.0032, |
|
"grad_norm": 0.12162717174644112, |
|
"kl": 0.0003865957260131836, |
|
"learning_rate": 2.1428571428571426e-07, |
|
"loss": 0.0, |
|
"reward": 0.2916666781529784, |
|
"reward_std": 0.47719811648130417, |
|
"rewards/equation_reward_func": 0.05468750116415322, |
|
"rewards/format_reward_func": 0.23697917303070426, |
|
"step": 6 |
|
}, |
|
{ |
|
"completion_length": 504.77865982055664, |
|
"epoch": 0.004266666666666667, |
|
"grad_norm": 0.13232346241260942, |
|
"kl": 0.0003762245178222656, |
|
"learning_rate": 2.857142857142857e-07, |
|
"loss": 0.0, |
|
"reward": 0.33593750558793545, |
|
"reward_std": 0.4751614350825548, |
|
"rewards/equation_reward_func": 0.04947916720993817, |
|
"rewards/format_reward_func": 0.28645834140479565, |
|
"step": 8 |
|
}, |
|
{ |
|
"completion_length": 475.7057456970215, |
|
"epoch": 0.005333333333333333, |
|
"grad_norm": 0.13843718324607834, |
|
"kl": 0.0003968477249145508, |
|
"learning_rate": 3.5714285714285716e-07, |
|
"loss": 0.0, |
|
"reward": 0.3828125102445483, |
|
"reward_std": 0.5227206833660603, |
|
"rewards/equation_reward_func": 0.0885416679084301, |
|
"rewards/format_reward_func": 0.2942708395421505, |
|
"step": 10 |
|
}, |
|
{ |
|
"completion_length": 475.98699378967285, |
|
"epoch": 0.0064, |
|
"grad_norm": 0.14337833822484186, |
|
"kl": 0.0004818439483642578, |
|
"learning_rate": 4.285714285714285e-07, |
|
"loss": 0.0, |
|
"reward": 0.33333334140479565, |
|
"reward_std": 0.4693184234201908, |
|
"rewards/equation_reward_func": 0.05468750139698386, |
|
"rewards/format_reward_func": 0.2786458395421505, |
|
"step": 12 |
|
}, |
|
{ |
|
"completion_length": 472.8099060058594, |
|
"epoch": 0.007466666666666667, |
|
"grad_norm": 0.129867140491159, |
|
"kl": 0.0007684230804443359, |
|
"learning_rate": 5e-07, |
|
"loss": 0.0, |
|
"reward": 0.45052084885537624, |
|
"reward_std": 0.5166866518557072, |
|
"rewards/equation_reward_func": 0.041666667675599456, |
|
"rewards/format_reward_func": 0.40885418094694614, |
|
"step": 14 |
|
}, |
|
{ |
|
"completion_length": 464.46875762939453, |
|
"epoch": 0.008533333333333334, |
|
"grad_norm": 0.1279190002551803, |
|
"kl": 0.0013058185577392578, |
|
"learning_rate": 4.999740409224932e-07, |
|
"loss": 0.0, |
|
"reward": 0.5052083488553762, |
|
"reward_std": 0.5728582534939051, |
|
"rewards/equation_reward_func": 0.06510416814126074, |
|
"rewards/format_reward_func": 0.4401041753590107, |
|
"step": 16 |
|
}, |
|
{ |
|
"completion_length": 480.2291717529297, |
|
"epoch": 0.0096, |
|
"grad_norm": 0.10647649710010443, |
|
"kl": 0.00380706787109375, |
|
"learning_rate": 4.998961690809627e-07, |
|
"loss": 0.0, |
|
"reward": 0.6588541902601719, |
|
"reward_std": 0.5287479311227798, |
|
"rewards/equation_reward_func": 0.05468750139698386, |
|
"rewards/format_reward_func": 0.6041666828095913, |
|
"step": 18 |
|
}, |
|
{ |
|
"completion_length": 493.8073043823242, |
|
"epoch": 0.010666666666666666, |
|
"grad_norm": 0.10522760864642984, |
|
"kl": 0.004913330078125, |
|
"learning_rate": 4.997664006472578e-07, |
|
"loss": 0.0, |
|
"reward": 0.7734375223517418, |
|
"reward_std": 0.4910791157744825, |
|
"rewards/equation_reward_func": 0.07031250116415322, |
|
"rewards/format_reward_func": 0.7031250186264515, |
|
"step": 20 |
|
}, |
|
{ |
|
"completion_length": 455.6510524749756, |
|
"epoch": 0.011733333333333333, |
|
"grad_norm": 0.09917661844432689, |
|
"kl": 0.008411407470703125, |
|
"learning_rate": 4.995847625707292e-07, |
|
"loss": 0.0, |
|
"reward": 0.7812500186264515, |
|
"reward_std": 0.4674575887620449, |
|
"rewards/equation_reward_func": 0.0651041679084301, |
|
"rewards/format_reward_func": 0.7161458507180214, |
|
"step": 22 |
|
}, |
|
{ |
|
"completion_length": 464.50782012939453, |
|
"epoch": 0.0128, |
|
"grad_norm": 0.10189992043189328, |
|
"kl": 0.0059833526611328125, |
|
"learning_rate": 4.993512925726318e-07, |
|
"loss": 0.0, |
|
"reward": 0.8619791865348816, |
|
"reward_std": 0.49650320410728455, |
|
"rewards/equation_reward_func": 0.08854166860692203, |
|
"rewards/format_reward_func": 0.7734375223517418, |
|
"step": 24 |
|
}, |
|
{ |
|
"completion_length": 447.40626335144043, |
|
"epoch": 0.013866666666666666, |
|
"grad_norm": 0.09219816177682034, |
|
"kl": 0.006900787353515625, |
|
"learning_rate": 4.990660391382923e-07, |
|
"loss": 0.0, |
|
"reward": 0.960937537252903, |
|
"reward_std": 0.4377214591950178, |
|
"rewards/equation_reward_func": 0.11718750256113708, |
|
"rewards/format_reward_func": 0.8437500260770321, |
|
"step": 26 |
|
}, |
|
{ |
|
"completion_length": 436.3099117279053, |
|
"epoch": 0.014933333333333333, |
|
"grad_norm": 0.07907793746945187, |
|
"kl": 0.009281158447265625, |
|
"learning_rate": 4.987290615070384e-07, |
|
"loss": 0.0, |
|
"reward": 0.9713542014360428, |
|
"reward_std": 0.3975960807874799, |
|
"rewards/equation_reward_func": 0.09895833535119891, |
|
"rewards/format_reward_func": 0.872395858168602, |
|
"step": 28 |
|
}, |
|
{ |
|
"completion_length": 428.1354293823242, |
|
"epoch": 0.016, |
|
"grad_norm": 0.0845150241699145, |
|
"kl": 0.011430740356445312, |
|
"learning_rate": 4.983404296598978e-07, |
|
"loss": 0.0, |
|
"reward": 0.9531250298023224, |
|
"reward_std": 0.359499204903841, |
|
"rewards/equation_reward_func": 0.0703125016298145, |
|
"rewards/format_reward_func": 0.8828125223517418, |
|
"step": 30 |
|
}, |
|
{ |
|
"completion_length": 434.62500953674316, |
|
"epoch": 0.017066666666666667, |
|
"grad_norm": 0.08196142586101564, |
|
"kl": 0.010782241821289062, |
|
"learning_rate": 4.979002243050646e-07, |
|
"loss": 0.0, |
|
"reward": 1.0260416977107525, |
|
"reward_std": 0.30062979739159346, |
|
"rewards/equation_reward_func": 0.09635416860692203, |
|
"rewards/format_reward_func": 0.9296875260770321, |
|
"step": 32 |
|
}, |
|
{ |
|
"completion_length": 438.08595085144043, |
|
"epoch": 0.018133333333333335, |
|
"grad_norm": 0.08432790923176402, |
|
"kl": 0.012115478515625, |
|
"learning_rate": 4.974085368611381e-07, |
|
"loss": 0.0, |
|
"reward": 1.049479205161333, |
|
"reward_std": 0.3121222285553813, |
|
"rewards/equation_reward_func": 0.11197917000390589, |
|
"rewards/format_reward_func": 0.9375000223517418, |
|
"step": 34 |
|
}, |
|
{ |
|
"completion_length": 421.16407203674316, |
|
"epoch": 0.0192, |
|
"grad_norm": 0.080639508238748, |
|
"kl": 0.01397705078125, |
|
"learning_rate": 4.968654694381379e-07, |
|
"loss": 0.0, |
|
"reward": 1.0598958618938923, |
|
"reward_std": 0.27779901027679443, |
|
"rewards/equation_reward_func": 0.10416666907258332, |
|
"rewards/format_reward_func": 0.9557291865348816, |
|
"step": 36 |
|
}, |
|
{ |
|
"completion_length": 405.12240982055664, |
|
"epoch": 0.020266666666666665, |
|
"grad_norm": 0.07681322754981268, |
|
"kl": 0.013866424560546875, |
|
"learning_rate": 4.962711348162987e-07, |
|
"loss": 0.0, |
|
"reward": 1.0390625409781933, |
|
"reward_std": 0.2664716215804219, |
|
"rewards/equation_reward_func": 0.08593750279396772, |
|
"rewards/format_reward_func": 0.9531250186264515, |
|
"step": 38 |
|
}, |
|
{ |
|
"completion_length": 400.0104274749756, |
|
"epoch": 0.021333333333333333, |
|
"grad_norm": 0.08198714493646655, |
|
"kl": 0.015293121337890625, |
|
"learning_rate": 4.956256564226487e-07, |
|
"loss": 0.0, |
|
"reward": 1.1067708730697632, |
|
"reward_std": 0.28682188084349036, |
|
"rewards/equation_reward_func": 0.14322917023673654, |
|
"rewards/format_reward_func": 0.9635416828095913, |
|
"step": 40 |
|
}, |
|
{ |
|
"completion_length": 400.78386306762695, |
|
"epoch": 0.0224, |
|
"grad_norm": 0.0855015587257399, |
|
"kl": 0.018611907958984375, |
|
"learning_rate": 4.949291683053768e-07, |
|
"loss": 0.0, |
|
"reward": 1.0937500223517418, |
|
"reward_std": 0.24716421775519848, |
|
"rewards/equation_reward_func": 0.11197916930541396, |
|
"rewards/format_reward_func": 0.9817708469927311, |
|
"step": 42 |
|
}, |
|
{ |
|
"completion_length": 408.34115409851074, |
|
"epoch": 0.023466666666666667, |
|
"grad_norm": 0.07249139521720419, |
|
"kl": 0.017139434814453125, |
|
"learning_rate": 4.941818151059955e-07, |
|
"loss": 0.0, |
|
"reward": 1.0546875298023224, |
|
"reward_std": 0.24979113461449742, |
|
"rewards/equation_reward_func": 0.0963541695382446, |
|
"rewards/format_reward_func": 0.9583333544433117, |
|
"step": 44 |
|
}, |
|
{ |
|
"completion_length": 388.62761306762695, |
|
"epoch": 0.024533333333333334, |
|
"grad_norm": 0.07507762465990464, |
|
"kl": 0.017795562744140625, |
|
"learning_rate": 4.933837520293017e-07, |
|
"loss": 0.0, |
|
"reward": 1.0781250484287739, |
|
"reward_std": 0.2523620016872883, |
|
"rewards/equation_reward_func": 0.11197917046956718, |
|
"rewards/format_reward_func": 0.9661458507180214, |
|
"step": 46 |
|
}, |
|
{ |
|
"completion_length": 399.57032585144043, |
|
"epoch": 0.0256, |
|
"grad_norm": 0.06604121980517051, |
|
"kl": 0.017971038818359375, |
|
"learning_rate": 4.925351448111454e-07, |
|
"loss": 0.0, |
|
"reward": 1.0468750335276127, |
|
"reward_std": 0.21127380011603236, |
|
"rewards/equation_reward_func": 0.07552083511836827, |
|
"rewards/format_reward_func": 0.9713541865348816, |
|
"step": 48 |
|
}, |
|
{ |
|
"completion_length": 392.4687557220459, |
|
"epoch": 0.02666666666666667, |
|
"grad_norm": 0.0903310628098169, |
|
"kl": 0.0193939208984375, |
|
"learning_rate": 4.91636169684011e-07, |
|
"loss": 0.0, |
|
"reward": 1.1093750409781933, |
|
"reward_std": 0.29062134958803654, |
|
"rewards/equation_reward_func": 0.13541667209938169, |
|
"rewards/format_reward_func": 0.9739583432674408, |
|
"step": 50 |
|
}, |
|
{ |
|
"completion_length": 374.1067810058594, |
|
"epoch": 0.027733333333333332, |
|
"grad_norm": 0.07170688038365744, |
|
"kl": 0.02114105224609375, |
|
"learning_rate": 4.906870133404186e-07, |
|
"loss": 0.0, |
|
"reward": 1.0833333693444729, |
|
"reward_std": 0.2505181049928069, |
|
"rewards/equation_reward_func": 0.11458333535119891, |
|
"rewards/format_reward_func": 0.9687500074505806, |
|
"step": 52 |
|
}, |
|
{ |
|
"completion_length": 385.36980056762695, |
|
"epoch": 0.0288, |
|
"grad_norm": 0.08750349012682264, |
|
"kl": 0.02417755126953125, |
|
"learning_rate": 4.896878728941531e-07, |
|
"loss": 0.0, |
|
"reward": 1.1432292014360428, |
|
"reward_std": 0.3026517196558416, |
|
"rewards/equation_reward_func": 0.16666667186655104, |
|
"rewards/format_reward_func": 0.9765625186264515, |
|
"step": 54 |
|
}, |
|
{ |
|
"completion_length": 382.93751335144043, |
|
"epoch": 0.029866666666666666, |
|
"grad_norm": 0.08790417727425984, |
|
"kl": 0.018756866455078125, |
|
"learning_rate": 4.886389558393284e-07, |
|
"loss": 0.0, |
|
"reward": 1.1223958693444729, |
|
"reward_std": 0.2850013840943575, |
|
"rewards/equation_reward_func": 0.14322917023673654, |
|
"rewards/format_reward_func": 0.979166679084301, |
|
"step": 56 |
|
}, |
|
{ |
|
"completion_length": 403.513032913208, |
|
"epoch": 0.030933333333333334, |
|
"grad_norm": 0.07614614875477466, |
|
"kl": 0.02037811279296875, |
|
"learning_rate": 4.875404800072976e-07, |
|
"loss": 0.0, |
|
"reward": 1.1432292088866234, |
|
"reward_std": 0.2669796203263104, |
|
"rewards/equation_reward_func": 0.16145833977498114, |
|
"rewards/format_reward_func": 0.9817708469927311, |
|
"step": 58 |
|
}, |
|
{ |
|
"completion_length": 382.6432418823242, |
|
"epoch": 0.032, |
|
"grad_norm": 0.0923539372935242, |
|
"kl": 0.02227783203125, |
|
"learning_rate": 4.86392673521415e-07, |
|
"loss": 0.0, |
|
"reward": 1.1536458693444729, |
|
"reward_std": 0.31879409588873386, |
|
"rewards/equation_reward_func": 0.1796875053551048, |
|
"rewards/format_reward_func": 0.9739583432674408, |
|
"step": 60 |
|
}, |
|
{ |
|
"completion_length": 358.4349060058594, |
|
"epoch": 0.03306666666666667, |
|
"grad_norm": 0.10173008574938576, |
|
"kl": 0.02350616455078125, |
|
"learning_rate": 4.851957747496606e-07, |
|
"loss": 0.0, |
|
"reward": 1.1562500447034836, |
|
"reward_std": 0.2772155348211527, |
|
"rewards/equation_reward_func": 0.16927083814516664, |
|
"rewards/format_reward_func": 0.9869791753590107, |
|
"step": 62 |
|
}, |
|
{ |
|
"completion_length": 364.18490409851074, |
|
"epoch": 0.034133333333333335, |
|
"grad_norm": 0.08496355305518047, |
|
"kl": 0.02630615234375, |
|
"learning_rate": 4.839500322551386e-07, |
|
"loss": 0.0, |
|
"reward": 1.1093750447034836, |
|
"reward_std": 0.24286148557439446, |
|
"rewards/equation_reward_func": 0.12760417093522847, |
|
"rewards/format_reward_func": 0.9817708432674408, |
|
"step": 64 |
|
}, |
|
{ |
|
"completion_length": 373.833345413208, |
|
"epoch": 0.0352, |
|
"grad_norm": 0.09458816516251609, |
|
"kl": 0.02667999267578125, |
|
"learning_rate": 4.826557047444563e-07, |
|
"loss": 0.0, |
|
"reward": 1.1848958656191826, |
|
"reward_std": 0.3138170298188925, |
|
"rewards/equation_reward_func": 0.20572917233221233, |
|
"rewards/format_reward_func": 0.979166679084301, |
|
"step": 66 |
|
}, |
|
{ |
|
"completion_length": 352.2083435058594, |
|
"epoch": 0.03626666666666667, |
|
"grad_norm": 0.08416786947789269, |
|
"kl": 0.03083038330078125, |
|
"learning_rate": 4.813130610139993e-07, |
|
"loss": 0.0, |
|
"reward": 1.1744792088866234, |
|
"reward_std": 0.2558550937101245, |
|
"rewards/equation_reward_func": 0.18489583861082792, |
|
"rewards/format_reward_func": 0.9895833432674408, |
|
"step": 68 |
|
}, |
|
{ |
|
"completion_length": 377.62500953674316, |
|
"epoch": 0.037333333333333336, |
|
"grad_norm": 0.07539913544982475, |
|
"kl": 0.03000640869140625, |
|
"learning_rate": 4.799223798941089e-07, |
|
"loss": 0.0, |
|
"reward": 1.070312537252903, |
|
"reward_std": 0.17193882586434484, |
|
"rewards/equation_reward_func": 0.08072916814126074, |
|
"rewards/format_reward_func": 0.9895833432674408, |
|
"step": 70 |
|
}, |
|
{ |
|
"completion_length": 370.4531364440918, |
|
"epoch": 0.0384, |
|
"grad_norm": 0.09187391682605524, |
|
"kl": 0.03436279296875, |
|
"learning_rate": 4.78483950191177e-07, |
|
"loss": 0.0, |
|
"reward": 1.1562500298023224, |
|
"reward_std": 0.2756755482405424, |
|
"rewards/equation_reward_func": 0.1770833362825215, |
|
"rewards/format_reward_func": 0.979166679084301, |
|
"step": 72 |
|
}, |
|
{ |
|
"completion_length": 373.04688453674316, |
|
"epoch": 0.039466666666666664, |
|
"grad_norm": 0.09537515327502834, |
|
"kl": 0.03740692138671875, |
|
"learning_rate": 4.769980706276687e-07, |
|
"loss": 0.0, |
|
"reward": 1.1354167088866234, |
|
"reward_std": 0.25582731096073985, |
|
"rewards/equation_reward_func": 0.15885417256504297, |
|
"rewards/format_reward_func": 0.9765625186264515, |
|
"step": 74 |
|
}, |
|
{ |
|
"completion_length": 387.8567810058594, |
|
"epoch": 0.04053333333333333, |
|
"grad_norm": 0.08000596022141122, |
|
"kl": 0.0389404296875, |
|
"learning_rate": 4.7546504978008595e-07, |
|
"loss": 0.0, |
|
"reward": 1.1328125335276127, |
|
"reward_std": 0.30721960263326764, |
|
"rewards/equation_reward_func": 0.1666666700039059, |
|
"rewards/format_reward_func": 0.9661458507180214, |
|
"step": 76 |
|
}, |
|
{ |
|
"completion_length": 390.2448024749756, |
|
"epoch": 0.0416, |
|
"grad_norm": 0.07863626089779173, |
|
"kl": 0.0442047119140625, |
|
"learning_rate": 4.738852060148848e-07, |
|
"loss": 0.0, |
|
"reward": 1.127604205161333, |
|
"reward_std": 0.26901706866919994, |
|
"rewards/equation_reward_func": 0.15364583814516664, |
|
"rewards/format_reward_func": 0.9739583507180214, |
|
"step": 78 |
|
}, |
|
{ |
|
"completion_length": 379.2161521911621, |
|
"epoch": 0.042666666666666665, |
|
"grad_norm": 0.08601071973954262, |
|
"kl": 0.0434417724609375, |
|
"learning_rate": 4.722588674223593e-07, |
|
"loss": 0.0, |
|
"reward": 1.1380208656191826, |
|
"reward_std": 0.2812240272760391, |
|
"rewards/equation_reward_func": 0.164062503259629, |
|
"rewards/format_reward_func": 0.9739583469927311, |
|
"step": 80 |
|
}, |
|
{ |
|
"completion_length": 371.59115409851074, |
|
"epoch": 0.04373333333333333, |
|
"grad_norm": 0.07568126962421509, |
|
"kl": 0.0497894287109375, |
|
"learning_rate": 4.70586371748506e-07, |
|
"loss": 0.0, |
|
"reward": 1.1380208656191826, |
|
"reward_std": 0.25603401800617576, |
|
"rewards/equation_reward_func": 0.15885417070239782, |
|
"rewards/format_reward_func": 0.9791666828095913, |
|
"step": 82 |
|
}, |
|
{ |
|
"completion_length": 381.30469703674316, |
|
"epoch": 0.0448, |
|
"grad_norm": 0.08065111561901392, |
|
"kl": 0.0525665283203125, |
|
"learning_rate": 4.6886806632488363e-07, |
|
"loss": 0.0001, |
|
"reward": 1.169270858168602, |
|
"reward_std": 0.25962691847234964, |
|
"rewards/equation_reward_func": 0.19531250419095159, |
|
"rewards/format_reward_func": 0.9739583432674408, |
|
"step": 84 |
|
}, |
|
{ |
|
"completion_length": 350.8020944595337, |
|
"epoch": 0.04586666666666667, |
|
"grad_norm": 0.09857801804683694, |
|
"kl": 0.0528717041015625, |
|
"learning_rate": 4.6710430799648143e-07, |
|
"loss": 0.0001, |
|
"reward": 1.1796875447034836, |
|
"reward_std": 0.2887058644555509, |
|
"rewards/equation_reward_func": 0.1979166748933494, |
|
"rewards/format_reward_func": 0.9817708507180214, |
|
"step": 86 |
|
}, |
|
{ |
|
"completion_length": 353.0573024749756, |
|
"epoch": 0.046933333333333334, |
|
"grad_norm": 0.09077630030450431, |
|
"kl": 0.059906005859375, |
|
"learning_rate": 4.652954630476127e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2239583730697632, |
|
"reward_std": 0.30759388813748956, |
|
"rewards/equation_reward_func": 0.2500000069849193, |
|
"rewards/format_reward_func": 0.9739583507180214, |
|
"step": 88 |
|
}, |
|
{ |
|
"completion_length": 359.1666784286499, |
|
"epoch": 0.048, |
|
"grad_norm": 0.09945645690703775, |
|
"kl": 0.0612030029296875, |
|
"learning_rate": 4.6344190712584713e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2630208805203438, |
|
"reward_std": 0.27152396691963077, |
|
"rewards/equation_reward_func": 0.27604167466051877, |
|
"rewards/format_reward_func": 0.9869791753590107, |
|
"step": 90 |
|
}, |
|
{ |
|
"completion_length": 349.7083396911621, |
|
"epoch": 0.04906666666666667, |
|
"grad_norm": 0.08132855652754273, |
|
"kl": 0.06951904296875, |
|
"learning_rate": 4.615440251639995e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2395833656191826, |
|
"reward_std": 0.23549415357410908, |
|
"rewards/equation_reward_func": 0.25520834024064243, |
|
"rewards/format_reward_func": 0.9843750074505806, |
|
"step": 92 |
|
}, |
|
{ |
|
"completion_length": 340.52084159851074, |
|
"epoch": 0.050133333333333335, |
|
"grad_norm": 0.08537044318099979, |
|
"kl": 0.075592041015625, |
|
"learning_rate": 4.596022113001894e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2916667014360428, |
|
"reward_std": 0.3320260518230498, |
|
"rewards/equation_reward_func": 0.312500006519258, |
|
"rewards/format_reward_func": 0.9791666828095913, |
|
"step": 94 |
|
}, |
|
{ |
|
"completion_length": 327.13282012939453, |
|
"epoch": 0.0512, |
|
"grad_norm": 0.09240179037114699, |
|
"kl": 0.070465087890625, |
|
"learning_rate": 4.576168687959895e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3255208805203438, |
|
"reward_std": 0.386608456261456, |
|
"rewards/equation_reward_func": 0.3567708469927311, |
|
"rewards/format_reward_func": 0.9687500074505806, |
|
"step": 96 |
|
}, |
|
{ |
|
"completion_length": 364.68751335144043, |
|
"epoch": 0.05226666666666667, |
|
"grad_norm": 0.10186126690814491, |
|
"kl": 0.080108642578125, |
|
"learning_rate": 4.555884099526793e-07, |
|
"loss": 0.0001, |
|
"reward": 1.1770833656191826, |
|
"reward_std": 0.2261401410214603, |
|
"rewards/equation_reward_func": 0.19010417093522847, |
|
"rewards/format_reward_func": 0.986979179084301, |
|
"step": 98 |
|
}, |
|
{ |
|
"completion_length": 378.69271755218506, |
|
"epoch": 0.05333333333333334, |
|
"grad_norm": 0.08283978901098998, |
|
"kl": 0.070831298828125, |
|
"learning_rate": 4.5351725602562174e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2083333805203438, |
|
"reward_std": 0.2539973724633455, |
|
"rewards/equation_reward_func": 0.22656251001171768, |
|
"rewards/format_reward_func": 0.9817708469927311, |
|
"step": 100 |
|
}, |
|
{ |
|
"completion_length": 349.2812604904175, |
|
"epoch": 0.0544, |
|
"grad_norm": 0.10236681253344297, |
|
"kl": 0.0821533203125, |
|
"learning_rate": 4.514038371367791e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3098958730697632, |
|
"reward_std": 0.3604734097607434, |
|
"rewards/equation_reward_func": 0.3437500107102096, |
|
"rewards/format_reward_func": 0.9661458432674408, |
|
"step": 102 |
|
}, |
|
{ |
|
"completion_length": 357.3619899749756, |
|
"epoch": 0.055466666666666664, |
|
"grad_norm": 0.09738517323229856, |
|
"kl": 0.086883544921875, |
|
"learning_rate": 4.4924859218538936e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3046875447034836, |
|
"reward_std": 0.3255553734488785, |
|
"rewards/equation_reward_func": 0.33072917233221233, |
|
"rewards/format_reward_func": 0.9739583469927311, |
|
"step": 104 |
|
}, |
|
{ |
|
"completion_length": 365.2760524749756, |
|
"epoch": 0.05653333333333333, |
|
"grad_norm": 0.08975705250496918, |
|
"kl": 0.09002685546875, |
|
"learning_rate": 4.470519687568185e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2968750484287739, |
|
"reward_std": 0.33681244123727083, |
|
"rewards/equation_reward_func": 0.3203125132713467, |
|
"rewards/format_reward_func": 0.9765625074505806, |
|
"step": 106 |
|
}, |
|
{ |
|
"completion_length": 380.40365505218506, |
|
"epoch": 0.0576, |
|
"grad_norm": 0.08944591293143872, |
|
"kl": 0.085113525390625, |
|
"learning_rate": 4.4481442302960923e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2135417126119137, |
|
"reward_std": 0.2771795648150146, |
|
"rewards/equation_reward_func": 0.23697917629033327, |
|
"rewards/format_reward_func": 0.9765625149011612, |
|
"step": 108 |
|
}, |
|
{ |
|
"completion_length": 389.94271659851074, |
|
"epoch": 0.058666666666666666, |
|
"grad_norm": 0.09619766220094576, |
|
"kl": 0.09130859375, |
|
"learning_rate": 4.4253641968074505e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2500000335276127, |
|
"reward_std": 0.2892899289727211, |
|
"rewards/equation_reward_func": 0.28645833977498114, |
|
"rewards/format_reward_func": 0.963541679084301, |
|
"step": 110 |
|
}, |
|
{ |
|
"completion_length": 381.99219512939453, |
|
"epoch": 0.05973333333333333, |
|
"grad_norm": 0.10498087313979376, |
|
"kl": 0.09423828125, |
|
"learning_rate": 4.402184317891501e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3229167088866234, |
|
"reward_std": 0.32423597015440464, |
|
"rewards/equation_reward_func": 0.35156250768341124, |
|
"rewards/format_reward_func": 0.9713541865348816, |
|
"step": 112 |
|
}, |
|
{ |
|
"completion_length": 391.36719608306885, |
|
"epoch": 0.0608, |
|
"grad_norm": 0.09626210263887228, |
|
"kl": 0.097381591796875, |
|
"learning_rate": 4.37860940737443e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2500000298023224, |
|
"reward_std": 0.3341089729219675, |
|
"rewards/equation_reward_func": 0.28385417186655104, |
|
"rewards/format_reward_func": 0.9661458469927311, |
|
"step": 114 |
|
}, |
|
{ |
|
"completion_length": 379.2812614440918, |
|
"epoch": 0.06186666666666667, |
|
"grad_norm": 0.0919023125255689, |
|
"kl": 0.09576416015625, |
|
"learning_rate": 4.354644361119671e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2968750447034836, |
|
"reward_std": 0.30213321885094047, |
|
"rewards/equation_reward_func": 0.3255208437331021, |
|
"rewards/format_reward_func": 0.9713541865348816, |
|
"step": 116 |
|
}, |
|
{ |
|
"completion_length": 377.5989694595337, |
|
"epoch": 0.06293333333333333, |
|
"grad_norm": 0.1258891080084215, |
|
"kl": 0.1126708984375, |
|
"learning_rate": 4.3302941560111716e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3880208656191826, |
|
"reward_std": 0.29082584474235773, |
|
"rewards/equation_reward_func": 0.4192708428017795, |
|
"rewards/format_reward_func": 0.9687500149011612, |
|
"step": 118 |
|
}, |
|
{ |
|
"completion_length": 406.2239742279053, |
|
"epoch": 0.064, |
|
"grad_norm": 0.07341804992437591, |
|
"kl": 0.094940185546875, |
|
"learning_rate": 4.3055638489198236e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3072916977107525, |
|
"reward_std": 0.3007106310687959, |
|
"rewards/equation_reward_func": 0.3359375118743628, |
|
"rewards/format_reward_func": 0.9713541865348816, |
|
"step": 120 |
|
}, |
|
{ |
|
"completion_length": 440.3489694595337, |
|
"epoch": 0.06506666666666666, |
|
"grad_norm": 0.07202980759381214, |
|
"kl": 0.102081298828125, |
|
"learning_rate": 4.280458575653296e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2942708879709244, |
|
"reward_std": 0.2781888456083834, |
|
"rewards/equation_reward_func": 0.34635417629033327, |
|
"rewards/format_reward_func": 0.9479166828095913, |
|
"step": 122 |
|
}, |
|
{ |
|
"completion_length": 373.723970413208, |
|
"epoch": 0.06613333333333334, |
|
"grad_norm": 0.12005799306967507, |
|
"kl": 0.109588623046875, |
|
"learning_rate": 4.2549835498894665e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3619792126119137, |
|
"reward_std": 0.30349841713905334, |
|
"rewards/equation_reward_func": 0.39583334419876337, |
|
"rewards/format_reward_func": 0.9661458544433117, |
|
"step": 124 |
|
}, |
|
{ |
|
"completion_length": 424.8880310058594, |
|
"epoch": 0.0672, |
|
"grad_norm": 0.08141812528362076, |
|
"kl": 0.11883544921875, |
|
"learning_rate": 4.229144062093679e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3072916939854622, |
|
"reward_std": 0.34696589363738894, |
|
"rewards/equation_reward_func": 0.37760417303070426, |
|
"rewards/format_reward_func": 0.9296875186264515, |
|
"step": 126 |
|
}, |
|
{ |
|
"completion_length": 425.73959732055664, |
|
"epoch": 0.06826666666666667, |
|
"grad_norm": 0.08953036809534468, |
|
"kl": 0.10595703125, |
|
"learning_rate": 4.2029454784200675e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3203125335276127, |
|
"reward_std": 0.2961498526856303, |
|
"rewards/equation_reward_func": 0.3697916716337204, |
|
"rewards/format_reward_func": 0.9505208544433117, |
|
"step": 128 |
|
}, |
|
{ |
|
"completion_length": 456.8411560058594, |
|
"epoch": 0.06933333333333333, |
|
"grad_norm": 0.10202840286205975, |
|
"kl": 0.128875732421875, |
|
"learning_rate": 4.1763932395971433e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2786458767950535, |
|
"reward_std": 0.3274143426679075, |
|
"rewards/equation_reward_func": 0.3463541774544865, |
|
"rewards/format_reward_func": 0.9322916902601719, |
|
"step": 130 |
|
}, |
|
{ |
|
"completion_length": 391.82552909851074, |
|
"epoch": 0.0704, |
|
"grad_norm": 0.09343731649452018, |
|
"kl": 0.116180419921875, |
|
"learning_rate": 4.1494928597979117e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4427083693444729, |
|
"reward_std": 0.2739125872030854, |
|
"rewards/equation_reward_func": 0.48958334792405367, |
|
"rewards/format_reward_func": 0.9531250149011612, |
|
"step": 132 |
|
}, |
|
{ |
|
"completion_length": 431.3073043823242, |
|
"epoch": 0.07146666666666666, |
|
"grad_norm": 0.09948838006778335, |
|
"kl": 0.113861083984375, |
|
"learning_rate": 4.122249925494726e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3281250447034836, |
|
"reward_std": 0.267287774477154, |
|
"rewards/equation_reward_func": 0.37239584675990045, |
|
"rewards/format_reward_func": 0.955729179084301, |
|
"step": 134 |
|
}, |
|
{ |
|
"completion_length": 422.07032203674316, |
|
"epoch": 0.07253333333333334, |
|
"grad_norm": 0.07206798558431624, |
|
"kl": 0.13238525390625, |
|
"learning_rate": 4.094670094299131e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3750000409781933, |
|
"reward_std": 0.2928238473832607, |
|
"rewards/equation_reward_func": 0.42447917675599456, |
|
"rewards/format_reward_func": 0.9505208544433117, |
|
"step": 136 |
|
}, |
|
{ |
|
"completion_length": 443.54688358306885, |
|
"epoch": 0.0736, |
|
"grad_norm": 0.10976069905088891, |
|
"kl": 0.109039306640625, |
|
"learning_rate": 4.066759093786931e-07, |
|
"loss": 0.0001, |
|
"reward": 1.2630208693444729, |
|
"reward_std": 0.2776200850494206, |
|
"rewards/equation_reward_func": 0.33593750884756446, |
|
"rewards/format_reward_func": 0.927083358168602, |
|
"step": 138 |
|
}, |
|
{ |
|
"completion_length": 398.4505367279053, |
|
"epoch": 0.07466666666666667, |
|
"grad_norm": 0.084796835919473, |
|
"kl": 0.12408447265625, |
|
"learning_rate": 4.038522720308732e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4375000484287739, |
|
"reward_std": 0.2496197698637843, |
|
"rewards/equation_reward_func": 0.4739583432674408, |
|
"rewards/format_reward_func": 0.9635416902601719, |
|
"step": 140 |
|
}, |
|
{ |
|
"completion_length": 375.6875104904175, |
|
"epoch": 0.07573333333333333, |
|
"grad_norm": 0.06578890547258132, |
|
"kl": 0.134796142578125, |
|
"learning_rate": 4.009966837786194e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4114583730697632, |
|
"reward_std": 0.2610441828146577, |
|
"rewards/equation_reward_func": 0.4453125111758709, |
|
"rewards/format_reward_func": 0.9661458507180214, |
|
"step": 142 |
|
}, |
|
{ |
|
"completion_length": 368.48959255218506, |
|
"epoch": 0.0768, |
|
"grad_norm": 0.10902428052363637, |
|
"kl": 0.136627197265625, |
|
"learning_rate": 3.981097376494259e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4687500521540642, |
|
"reward_std": 0.25434603728353977, |
|
"rewards/equation_reward_func": 0.49218752002343535, |
|
"rewards/format_reward_func": 0.9765625223517418, |
|
"step": 144 |
|
}, |
|
{ |
|
"completion_length": 406.8750114440918, |
|
"epoch": 0.07786666666666667, |
|
"grad_norm": 0.1079461409291905, |
|
"kl": 0.129119873046875, |
|
"learning_rate": 3.951920331829592e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3281250484287739, |
|
"reward_std": 0.25655436515808105, |
|
"rewards/equation_reward_func": 0.372395841171965, |
|
"rewards/format_reward_func": 0.9557291902601719, |
|
"step": 146 |
|
}, |
|
{ |
|
"completion_length": 401.2213668823242, |
|
"epoch": 0.07893333333333333, |
|
"grad_norm": 0.1067111189421742, |
|
"kl": 0.132171630859375, |
|
"learning_rate": 3.922441763065506e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3906250447034836, |
|
"reward_std": 0.249761619605124, |
|
"rewards/equation_reward_func": 0.4270833439659327, |
|
"rewards/format_reward_func": 0.9635416902601719, |
|
"step": 148 |
|
}, |
|
{ |
|
"completion_length": 450.89844512939453, |
|
"epoch": 0.08, |
|
"grad_norm": 0.07018564082166065, |
|
"kl": 0.112640380859375, |
|
"learning_rate": 3.8926677920936093e-07, |
|
"loss": 0.0001, |
|
"reward": 1.1901042126119137, |
|
"reward_std": 0.21367743890732527, |
|
"rewards/equation_reward_func": 0.23177083814516664, |
|
"rewards/format_reward_func": 0.9583333507180214, |
|
"step": 150 |
|
}, |
|
{ |
|
"completion_length": 319.31250762939453, |
|
"epoch": 0.08106666666666666, |
|
"grad_norm": 0.09931506831300466, |
|
"kl": 0.16046142578125, |
|
"learning_rate": 3.862604602152464e-07, |
|
"loss": 0.0002, |
|
"reward": 1.5156250521540642, |
|
"reward_std": 0.17254623072221875, |
|
"rewards/equation_reward_func": 0.5416666828095913, |
|
"rewards/format_reward_func": 0.9739583432674408, |
|
"step": 152 |
|
}, |
|
{ |
|
"completion_length": 372.6979274749756, |
|
"epoch": 0.08213333333333334, |
|
"grad_norm": 0.08423289509476255, |
|
"kl": 0.1458740234375, |
|
"learning_rate": 3.8322584365434934e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4036458805203438, |
|
"reward_std": 0.23620562674477696, |
|
"rewards/equation_reward_func": 0.43229168234393, |
|
"rewards/format_reward_func": 0.971354179084301, |
|
"step": 154 |
|
}, |
|
{ |
|
"completion_length": 383.4323024749756, |
|
"epoch": 0.0832, |
|
"grad_norm": 0.11684907563681693, |
|
"kl": 0.13134765625, |
|
"learning_rate": 3.8016355973344173e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3619792014360428, |
|
"reward_std": 0.2366077760234475, |
|
"rewards/equation_reward_func": 0.39062501303851604, |
|
"rewards/format_reward_func": 0.9713541828095913, |
|
"step": 156 |
|
}, |
|
{ |
|
"completion_length": 421.96876335144043, |
|
"epoch": 0.08426666666666667, |
|
"grad_norm": 0.1641382478416818, |
|
"kl": 0.124755859375, |
|
"learning_rate": 3.7707424440504863e-07, |
|
"loss": 0.0001, |
|
"reward": 1.296875037252903, |
|
"reward_std": 0.23676540749147534, |
|
"rewards/equation_reward_func": 0.3333333421032876, |
|
"rewards/format_reward_func": 0.963541679084301, |
|
"step": 158 |
|
}, |
|
{ |
|
"completion_length": 365.9166793823242, |
|
"epoch": 0.08533333333333333, |
|
"grad_norm": 0.07817184043452526, |
|
"kl": 0.1395263671875, |
|
"learning_rate": 3.739585392353787e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3880208767950535, |
|
"reward_std": 0.19910774566233158, |
|
"rewards/equation_reward_func": 0.4140625100117177, |
|
"rewards/format_reward_func": 0.9739583469927311, |
|
"step": 160 |
|
}, |
|
{ |
|
"completion_length": 336.37500953674316, |
|
"epoch": 0.0864, |
|
"grad_norm": 0.08666435211660832, |
|
"kl": 0.155364990234375, |
|
"learning_rate": 3.7081709127108767e-07, |
|
"loss": 0.0002, |
|
"reward": 1.5182291939854622, |
|
"reward_std": 0.20631011482328176, |
|
"rewards/equation_reward_func": 0.5312500149011612, |
|
"rewards/format_reward_func": 0.986979179084301, |
|
"step": 162 |
|
}, |
|
{ |
|
"completion_length": 360.15625953674316, |
|
"epoch": 0.08746666666666666, |
|
"grad_norm": 0.07995177005809427, |
|
"kl": 0.134307861328125, |
|
"learning_rate": 3.6765055290490513e-07, |
|
"loss": 0.0001, |
|
"reward": 1.367187537252903, |
|
"reward_std": 0.23405077820643783, |
|
"rewards/equation_reward_func": 0.3776041774544865, |
|
"rewards/format_reward_func": 0.9895833432674408, |
|
"step": 164 |
|
}, |
|
{ |
|
"completion_length": 316.8932418823242, |
|
"epoch": 0.08853333333333334, |
|
"grad_norm": 0.15304933704233734, |
|
"kl": 0.155242919921875, |
|
"learning_rate": 3.644595817401501e-07, |
|
"loss": 0.0002, |
|
"reward": 1.533854216337204, |
|
"reward_std": 0.25381680950522423, |
|
"rewards/equation_reward_func": 0.5442708469927311, |
|
"rewards/format_reward_func": 0.9895833432674408, |
|
"step": 166 |
|
}, |
|
{ |
|
"completion_length": 388.1224002838135, |
|
"epoch": 0.0896, |
|
"grad_norm": 0.08385643880585995, |
|
"kl": 0.141143798828125, |
|
"learning_rate": 3.6124484045416483e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3723958767950535, |
|
"reward_std": 0.24668778479099274, |
|
"rewards/equation_reward_func": 0.40885417466051877, |
|
"rewards/format_reward_func": 0.9635416828095913, |
|
"step": 168 |
|
}, |
|
{ |
|
"completion_length": 351.93750858306885, |
|
"epoch": 0.09066666666666667, |
|
"grad_norm": 0.07697038389931672, |
|
"kl": 0.14190673828125, |
|
"learning_rate": 3.580069966606949e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4479166939854622, |
|
"reward_std": 0.2045988291501999, |
|
"rewards/equation_reward_func": 0.4557291753590107, |
|
"rewards/format_reward_func": 0.9921875037252903, |
|
"step": 170 |
|
}, |
|
{ |
|
"completion_length": 334.4765729904175, |
|
"epoch": 0.09173333333333333, |
|
"grad_norm": 0.0735056203449723, |
|
"kl": 0.144317626953125, |
|
"learning_rate": 3.547467227712444e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4973958656191826, |
|
"reward_std": 0.19876712281256914, |
|
"rewards/equation_reward_func": 0.5130208474583924, |
|
"rewards/format_reward_func": 0.9843750074505806, |
|
"step": 172 |
|
}, |
|
{ |
|
"completion_length": 327.29948711395264, |
|
"epoch": 0.0928, |
|
"grad_norm": 0.12548498564602414, |
|
"kl": 0.1490478515625, |
|
"learning_rate": 3.5146469585543386e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4947917088866234, |
|
"reward_std": 0.22728270338848233, |
|
"rewards/equation_reward_func": 0.5078125204890966, |
|
"rewards/format_reward_func": 0.9869791753590107, |
|
"step": 174 |
|
}, |
|
{ |
|
"completion_length": 398.49220085144043, |
|
"epoch": 0.09386666666666667, |
|
"grad_norm": 0.08293639399157755, |
|
"kl": 0.149139404296875, |
|
"learning_rate": 3.481615975003922e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3489583730697632, |
|
"reward_std": 0.20350094605237246, |
|
"rewards/equation_reward_func": 0.3645833428017795, |
|
"rewards/format_reward_func": 0.9843750111758709, |
|
"step": 176 |
|
}, |
|
{ |
|
"completion_length": 324.3489713668823, |
|
"epoch": 0.09493333333333333, |
|
"grad_norm": 0.09550896801162942, |
|
"kl": 0.155364990234375, |
|
"learning_rate": 3.448381136692089e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4583333730697632, |
|
"reward_std": 0.1914014257490635, |
|
"rewards/equation_reward_func": 0.4661458458285779, |
|
"rewards/format_reward_func": 0.9921875037252903, |
|
"step": 178 |
|
}, |
|
{ |
|
"completion_length": 363.1067838668823, |
|
"epoch": 0.096, |
|
"grad_norm": 0.06570582721768603, |
|
"kl": 0.131988525390625, |
|
"learning_rate": 3.4149493455847897e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4010417014360428, |
|
"reward_std": 0.15116061177104712, |
|
"rewards/equation_reward_func": 0.41927084419876337, |
|
"rewards/format_reward_func": 0.9817708432674408, |
|
"step": 180 |
|
}, |
|
{ |
|
"completion_length": 350.994797706604, |
|
"epoch": 0.09706666666666666, |
|
"grad_norm": 0.08114840800090561, |
|
"kl": 0.165863037109375, |
|
"learning_rate": 3.3813275445496766e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4375000447034836, |
|
"reward_std": 0.2402082341723144, |
|
"rewards/equation_reward_func": 0.45572917768731713, |
|
"rewards/format_reward_func": 0.9817708469927311, |
|
"step": 182 |
|
}, |
|
{ |
|
"completion_length": 341.2786521911621, |
|
"epoch": 0.09813333333333334, |
|
"grad_norm": 0.09604192137372529, |
|
"kl": 0.140167236328125, |
|
"learning_rate": 3.347522715914262e-07, |
|
"loss": 0.0001, |
|
"reward": 1.4739583805203438, |
|
"reward_std": 0.2517684092745185, |
|
"rewards/equation_reward_func": 0.48958334303461015, |
|
"rewards/format_reward_func": 0.9843750111758709, |
|
"step": 184 |
|
}, |
|
{ |
|
"completion_length": 334.8333444595337, |
|
"epoch": 0.0992, |
|
"grad_norm": 0.06092803443628096, |
|
"kl": 0.15545654296875, |
|
"learning_rate": 3.313541880015877e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4869792014360428, |
|
"reward_std": 0.17553172213956714, |
|
"rewards/equation_reward_func": 0.49218751210719347, |
|
"rewards/format_reward_func": 0.9947916716337204, |
|
"step": 186 |
|
}, |
|
{ |
|
"completion_length": 296.9895906448364, |
|
"epoch": 0.10026666666666667, |
|
"grad_norm": 0.08698608388272158, |
|
"kl": 0.15386962890625, |
|
"learning_rate": 3.279392093743747e-07, |
|
"loss": 0.0002, |
|
"reward": 1.484375037252903, |
|
"reward_std": 0.18876954959705472, |
|
"rewards/equation_reward_func": 0.5130208488553762, |
|
"rewards/format_reward_func": 0.971354179084301, |
|
"step": 188 |
|
}, |
|
{ |
|
"completion_length": 369.6171989440918, |
|
"epoch": 0.10133333333333333, |
|
"grad_norm": 0.07100886524919588, |
|
"kl": 0.1356201171875, |
|
"learning_rate": 3.245080449073459e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3932291939854622, |
|
"reward_std": 0.22231243178248405, |
|
"rewards/equation_reward_func": 0.40885417722165585, |
|
"rewards/format_reward_func": 0.9843750149011612, |
|
"step": 190 |
|
}, |
|
{ |
|
"completion_length": 340.66928005218506, |
|
"epoch": 0.1024, |
|
"grad_norm": 0.06938271896378453, |
|
"kl": 0.164703369140625, |
|
"learning_rate": 3.210614071594162e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4322917088866234, |
|
"reward_std": 0.21930959541350603, |
|
"rewards/equation_reward_func": 0.44791667885147035, |
|
"rewards/format_reward_func": 0.9843750111758709, |
|
"step": 192 |
|
}, |
|
{ |
|
"completion_length": 366.10157108306885, |
|
"epoch": 0.10346666666666667, |
|
"grad_norm": 0.0640261989037358, |
|
"kl": 0.1395263671875, |
|
"learning_rate": 3.1760001190287695e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3567708730697632, |
|
"reward_std": 0.14457962242886424, |
|
"rewards/equation_reward_func": 0.3671875111758709, |
|
"rewards/format_reward_func": 0.9895833395421505, |
|
"step": 194 |
|
}, |
|
{ |
|
"completion_length": 335.52344608306885, |
|
"epoch": 0.10453333333333334, |
|
"grad_norm": 0.07693618401025876, |
|
"kl": 0.152862548828125, |
|
"learning_rate": 3.141245779747502e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3723958805203438, |
|
"reward_std": 0.19283229811117053, |
|
"rewards/equation_reward_func": 0.3828125139698386, |
|
"rewards/format_reward_func": 0.9895833395421505, |
|
"step": 196 |
|
}, |
|
{ |
|
"completion_length": 317.8932409286499, |
|
"epoch": 0.1056, |
|
"grad_norm": 0.08196224183163763, |
|
"kl": 0.158447265625, |
|
"learning_rate": 3.106358271275056e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4947917126119137, |
|
"reward_std": 0.19350500591099262, |
|
"rewards/equation_reward_func": 0.5104166797827929, |
|
"rewards/format_reward_func": 0.9843750074505806, |
|
"step": 198 |
|
}, |
|
{ |
|
"completion_length": 317.8619861602783, |
|
"epoch": 0.10666666666666667, |
|
"grad_norm": 0.09403807304980984, |
|
"kl": 0.15289306640625, |
|
"learning_rate": 3.0713448387917227e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4609375223517418, |
|
"reward_std": 0.18717782059684396, |
|
"rewards/equation_reward_func": 0.46354167303070426, |
|
"rewards/format_reward_func": 0.9973958358168602, |
|
"step": 200 |
|
}, |
|
{ |
|
"completion_length": 349.03907012939453, |
|
"epoch": 0.10773333333333333, |
|
"grad_norm": 0.0618629735347176, |
|
"kl": 0.14788818359375, |
|
"learning_rate": 3.0362127536287636e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3906250447034836, |
|
"reward_std": 0.20409536687657237, |
|
"rewards/equation_reward_func": 0.4088541797827929, |
|
"rewards/format_reward_func": 0.9817708469927311, |
|
"step": 202 |
|
}, |
|
{ |
|
"completion_length": 280.3645906448364, |
|
"epoch": 0.1088, |
|
"grad_norm": 0.06963964620642316, |
|
"kl": 0.168182373046875, |
|
"learning_rate": 3.0009693117583523e-07, |
|
"loss": 0.0002, |
|
"reward": 1.5182292088866234, |
|
"reward_std": 0.12625272339209914, |
|
"rewards/equation_reward_func": 0.5234375149011612, |
|
"rewards/format_reward_func": 0.9947916716337204, |
|
"step": 204 |
|
}, |
|
{ |
|
"completion_length": 307.3724031448364, |
|
"epoch": 0.10986666666666667, |
|
"grad_norm": 0.07039292185299532, |
|
"kl": 0.175628662109375, |
|
"learning_rate": 2.965621832278401e-07, |
|
"loss": 0.0002, |
|
"reward": 1.486979205161333, |
|
"reward_std": 0.15833014715462923, |
|
"rewards/equation_reward_func": 0.49739584419876337, |
|
"rewards/format_reward_func": 0.9895833395421505, |
|
"step": 206 |
|
}, |
|
{ |
|
"completion_length": 341.1041774749756, |
|
"epoch": 0.11093333333333333, |
|
"grad_norm": 0.06143705194536295, |
|
"kl": 0.154327392578125, |
|
"learning_rate": 2.9301776558925875e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4218750447034836, |
|
"reward_std": 0.15025723353028297, |
|
"rewards/equation_reward_func": 0.4375000144354999, |
|
"rewards/format_reward_func": 0.9843750074505806, |
|
"step": 208 |
|
}, |
|
{ |
|
"completion_length": 334.9270935058594, |
|
"epoch": 0.112, |
|
"grad_norm": 0.0858887137019435, |
|
"kl": 0.15777587890625, |
|
"learning_rate": 2.894644143385885e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4739583730697632, |
|
"reward_std": 0.24965603277087212, |
|
"rewards/equation_reward_func": 0.4895833469927311, |
|
"rewards/format_reward_func": 0.9843750149011612, |
|
"step": 210 |
|
}, |
|
{ |
|
"completion_length": 322.8906316757202, |
|
"epoch": 0.11306666666666666, |
|
"grad_norm": 0.0904494730503501, |
|
"kl": 0.16424560546875, |
|
"learning_rate": 2.859028674095937e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4479167088866234, |
|
"reward_std": 0.17492430424317718, |
|
"rewards/equation_reward_func": 0.4635416748933494, |
|
"rewards/format_reward_func": 0.9843750149011612, |
|
"step": 212 |
|
}, |
|
{ |
|
"completion_length": 296.30469512939453, |
|
"epoch": 0.11413333333333334, |
|
"grad_norm": 0.08632111011343167, |
|
"kl": 0.1639404296875, |
|
"learning_rate": 2.823338644380566e-07, |
|
"loss": 0.0002, |
|
"reward": 1.5156250521540642, |
|
"reward_std": 0.14193214289844036, |
|
"rewards/equation_reward_func": 0.5338541865348816, |
|
"rewards/format_reward_func": 0.9817708469927311, |
|
"step": 214 |
|
}, |
|
{ |
|
"completion_length": 306.0208387374878, |
|
"epoch": 0.1152, |
|
"grad_norm": 0.08745683085319692, |
|
"kl": 0.159393310546875, |
|
"learning_rate": 2.7875814660817504e-07, |
|
"loss": 0.0002, |
|
"reward": 1.5364583805203438, |
|
"reward_std": 0.22032672306522727, |
|
"rewards/equation_reward_func": 0.557291679084301, |
|
"rewards/format_reward_func": 0.9791666828095913, |
|
"step": 216 |
|
}, |
|
{ |
|
"completion_length": 351.677095413208, |
|
"epoch": 0.11626666666666667, |
|
"grad_norm": 0.0850761586470191, |
|
"kl": 0.1666259765625, |
|
"learning_rate": 2.751764564986396e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3958333656191826, |
|
"reward_std": 0.28290195716544986, |
|
"rewards/equation_reward_func": 0.4062500118743628, |
|
"rewards/format_reward_func": 0.9895833395421505, |
|
"step": 218 |
|
}, |
|
{ |
|
"completion_length": 308.17448806762695, |
|
"epoch": 0.11733333333333333, |
|
"grad_norm": 0.07684504845856611, |
|
"kl": 0.173187255859375, |
|
"learning_rate": 2.715895379284194e-07, |
|
"loss": 0.0002, |
|
"reward": 1.5104166977107525, |
|
"reward_std": 0.18647652165964246, |
|
"rewards/equation_reward_func": 0.5390625081490725, |
|
"rewards/format_reward_func": 0.971354179084301, |
|
"step": 220 |
|
}, |
|
{ |
|
"completion_length": 328.5859479904175, |
|
"epoch": 0.1184, |
|
"grad_norm": 0.11203271187490091, |
|
"kl": 0.16510009765625, |
|
"learning_rate": 2.6799813580229174e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4557292014360428, |
|
"reward_std": 0.2715247953310609, |
|
"rewards/equation_reward_func": 0.4895833469927311, |
|
"rewards/format_reward_func": 0.9661458507180214, |
|
"step": 222 |
|
}, |
|
{ |
|
"completion_length": 388.0573043823242, |
|
"epoch": 0.11946666666666667, |
|
"grad_norm": 0.06225243124003096, |
|
"kl": 0.14501953125, |
|
"learning_rate": 2.6440299595614606e-07, |
|
"loss": 0.0001, |
|
"reward": 1.3333333730697632, |
|
"reward_std": 0.22618821496143937, |
|
"rewards/equation_reward_func": 0.35937500931322575, |
|
"rewards/format_reward_func": 0.9739583469927311, |
|
"step": 224 |
|
}, |
|
{ |
|
"completion_length": 303.80209159851074, |
|
"epoch": 0.12053333333333334, |
|
"grad_norm": 0.08582274740530478, |
|
"kl": 0.18316650390625, |
|
"learning_rate": 2.6080486500209347e-07, |
|
"loss": 0.0002, |
|
"reward": 1.5052083618938923, |
|
"reward_std": 0.1736113135702908, |
|
"rewards/equation_reward_func": 0.5234375176951289, |
|
"rewards/format_reward_func": 0.9817708395421505, |
|
"step": 226 |
|
}, |
|
{ |
|
"completion_length": 366.50261306762695, |
|
"epoch": 0.1216, |
|
"grad_norm": 0.08747456062041993, |
|
"kl": 0.17315673828125, |
|
"learning_rate": 2.572044901734166e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3489583805203438, |
|
"reward_std": 0.2056382312439382, |
|
"rewards/equation_reward_func": 0.3880208423361182, |
|
"rewards/format_reward_func": 0.9609375260770321, |
|
"step": 228 |
|
}, |
|
{ |
|
"completion_length": 335.09636306762695, |
|
"epoch": 0.12266666666666666, |
|
"grad_norm": 0.34141856790128705, |
|
"kl": 0.56512451171875, |
|
"learning_rate": 2.536026191693893e-07, |
|
"loss": 0.0006, |
|
"reward": 1.3645833805203438, |
|
"reward_std": 0.15194532042369246, |
|
"rewards/equation_reward_func": 0.3906250118743628, |
|
"rewards/format_reward_func": 0.9739583469927311, |
|
"step": 230 |
|
}, |
|
{ |
|
"completion_length": 338.17969703674316, |
|
"epoch": 0.12373333333333333, |
|
"grad_norm": 0.06484061977942189, |
|
"kl": 0.17218017578125, |
|
"learning_rate": 2.5e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4270833693444729, |
|
"reward_std": 0.15773996990174055, |
|
"rewards/equation_reward_func": 0.4479166741948575, |
|
"rewards/format_reward_func": 0.9791666828095913, |
|
"step": 232 |
|
}, |
|
{ |
|
"completion_length": 369.4713649749756, |
|
"epoch": 0.1248, |
|
"grad_norm": 0.06533716217495784, |
|
"kl": 0.172607421875, |
|
"learning_rate": 2.4639738083061073e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3229167014360428, |
|
"reward_std": 0.1891809026710689, |
|
"rewards/equation_reward_func": 0.3619791816454381, |
|
"rewards/format_reward_func": 0.9609375186264515, |
|
"step": 234 |
|
}, |
|
{ |
|
"completion_length": 326.32552909851074, |
|
"epoch": 0.12586666666666665, |
|
"grad_norm": 0.11951442156862668, |
|
"kl": 0.16986083984375, |
|
"learning_rate": 2.4279550982658345e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4348958805203438, |
|
"reward_std": 0.20815222803503275, |
|
"rewards/equation_reward_func": 0.45833334419876337, |
|
"rewards/format_reward_func": 0.9765625149011612, |
|
"step": 236 |
|
}, |
|
{ |
|
"completion_length": 288.45052909851074, |
|
"epoch": 0.12693333333333334, |
|
"grad_norm": 0.08691549270327721, |
|
"kl": 0.19256591796875, |
|
"learning_rate": 2.3919513499790646e-07, |
|
"loss": 0.0002, |
|
"reward": 1.5260417088866234, |
|
"reward_std": 0.1767690358683467, |
|
"rewards/equation_reward_func": 0.5520833525806665, |
|
"rewards/format_reward_func": 0.9739583469927311, |
|
"step": 238 |
|
}, |
|
{ |
|
"completion_length": 286.3697991371155, |
|
"epoch": 0.128, |
|
"grad_norm": 0.07559248926700043, |
|
"kl": 0.19488525390625, |
|
"learning_rate": 2.3559700404385394e-07, |
|
"loss": 0.0002, |
|
"reward": 1.528645858168602, |
|
"reward_std": 0.22208327893167734, |
|
"rewards/equation_reward_func": 0.5520833469927311, |
|
"rewards/format_reward_func": 0.9765625037252903, |
|
"step": 240 |
|
}, |
|
{ |
|
"completion_length": 340.2682399749756, |
|
"epoch": 0.12906666666666666, |
|
"grad_norm": 0.10530634140083081, |
|
"kl": 0.189453125, |
|
"learning_rate": 2.3200186419770823e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3984375149011612, |
|
"reward_std": 0.23324821423739195, |
|
"rewards/equation_reward_func": 0.4348958395421505, |
|
"rewards/format_reward_func": 0.963541679084301, |
|
"step": 242 |
|
}, |
|
{ |
|
"completion_length": 362.66927909851074, |
|
"epoch": 0.13013333333333332, |
|
"grad_norm": 1.3010084453655495, |
|
"kl": 0.183837890625, |
|
"learning_rate": 2.284104620715807e-07, |
|
"loss": 0.0002, |
|
"reward": 1.2968750298023224, |
|
"reward_std": 0.2373127401806414, |
|
"rewards/equation_reward_func": 0.3515625090803951, |
|
"rewards/format_reward_func": 0.9453125186264515, |
|
"step": 244 |
|
}, |
|
{ |
|
"completion_length": 340.72917652130127, |
|
"epoch": 0.1312, |
|
"grad_norm": 0.06959692685962483, |
|
"kl": 0.18695068359375, |
|
"learning_rate": 2.2482354350136043e-07, |
|
"loss": 0.0002, |
|
"reward": 1.354166716337204, |
|
"reward_std": 0.19205195363610983, |
|
"rewards/equation_reward_func": 0.3802083437331021, |
|
"rewards/format_reward_func": 0.9739583544433117, |
|
"step": 246 |
|
}, |
|
{ |
|
"completion_length": 340.98438453674316, |
|
"epoch": 0.13226666666666667, |
|
"grad_norm": 0.0765593787053769, |
|
"kl": 0.202880859375, |
|
"learning_rate": 2.2124185339182496e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3203125447034836, |
|
"reward_std": 0.20337719656527042, |
|
"rewards/equation_reward_func": 0.36979167396202683, |
|
"rewards/format_reward_func": 0.950520858168602, |
|
"step": 248 |
|
}, |
|
{ |
|
"completion_length": 324.2135548591614, |
|
"epoch": 0.13333333333333333, |
|
"grad_norm": 0.06964123521038948, |
|
"kl": 0.191162109375, |
|
"learning_rate": 2.1766613556194344e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4114583618938923, |
|
"reward_std": 0.1772644561715424, |
|
"rewards/equation_reward_func": 0.44010417722165585, |
|
"rewards/format_reward_func": 0.971354179084301, |
|
"step": 250 |
|
}, |
|
{ |
|
"completion_length": 345.8020935058594, |
|
"epoch": 0.1344, |
|
"grad_norm": 0.07570707850632788, |
|
"kl": 0.1973876953125, |
|
"learning_rate": 2.1409713259040628e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3802083656191826, |
|
"reward_std": 0.29142854968085885, |
|
"rewards/equation_reward_func": 0.4427083432674408, |
|
"rewards/format_reward_func": 0.9375000186264515, |
|
"step": 252 |
|
}, |
|
{ |
|
"completion_length": 320.135422706604, |
|
"epoch": 0.13546666666666668, |
|
"grad_norm": 0.09393829326895221, |
|
"kl": 0.2281494140625, |
|
"learning_rate": 2.105355856614115e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3932292126119137, |
|
"reward_std": 0.26339731831103563, |
|
"rewards/equation_reward_func": 0.4479166797827929, |
|
"rewards/format_reward_func": 0.9453125186264515, |
|
"step": 254 |
|
}, |
|
{ |
|
"completion_length": 296.93751096725464, |
|
"epoch": 0.13653333333333334, |
|
"grad_norm": 0.10209820757740125, |
|
"kl": 0.21038818359375, |
|
"learning_rate": 2.069822344107413e-07, |
|
"loss": 0.0002, |
|
"reward": 1.458333384245634, |
|
"reward_std": 0.2565371445380151, |
|
"rewards/equation_reward_func": 0.518229179084301, |
|
"rewards/format_reward_func": 0.9401041902601719, |
|
"step": 256 |
|
}, |
|
{ |
|
"completion_length": 349.19271659851074, |
|
"epoch": 0.1376, |
|
"grad_norm": 0.08766590131943175, |
|
"kl": 0.20196533203125, |
|
"learning_rate": 2.034378167721599e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3046875409781933, |
|
"reward_std": 0.26743903663009405, |
|
"rewards/equation_reward_func": 0.369791679084301, |
|
"rewards/format_reward_func": 0.9348958544433117, |
|
"step": 258 |
|
}, |
|
{ |
|
"completion_length": 313.64844703674316, |
|
"epoch": 0.13866666666666666, |
|
"grad_norm": 0.06846377481636261, |
|
"kl": 0.21148681640625, |
|
"learning_rate": 1.9990306882416485e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4401041865348816, |
|
"reward_std": 0.18869125936180353, |
|
"rewards/equation_reward_func": 0.47916667885147035, |
|
"rewards/format_reward_func": 0.9609375260770321, |
|
"step": 260 |
|
}, |
|
{ |
|
"completion_length": 364.0937614440918, |
|
"epoch": 0.13973333333333332, |
|
"grad_norm": 0.09910364824859053, |
|
"kl": 0.19482421875, |
|
"learning_rate": 1.9637872463712362e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3229167088866234, |
|
"reward_std": 0.28169540874660015, |
|
"rewards/equation_reward_func": 0.40364584513008595, |
|
"rewards/format_reward_func": 0.9192708507180214, |
|
"step": 262 |
|
}, |
|
{ |
|
"completion_length": 299.85417461395264, |
|
"epoch": 0.1408, |
|
"grad_norm": 0.08446825243053117, |
|
"kl": 0.21746826171875, |
|
"learning_rate": 1.9286551612082773e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4270833618938923, |
|
"reward_std": 0.254016081802547, |
|
"rewards/equation_reward_func": 0.4921875118743628, |
|
"rewards/format_reward_func": 0.9348958469927311, |
|
"step": 264 |
|
}, |
|
{ |
|
"completion_length": 310.96875762939453, |
|
"epoch": 0.14186666666666667, |
|
"grad_norm": 0.08911856620012026, |
|
"kl": 0.20257568359375, |
|
"learning_rate": 1.8936417287249446e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4843750521540642, |
|
"reward_std": 0.2710961364209652, |
|
"rewards/equation_reward_func": 0.5364583488553762, |
|
"rewards/format_reward_func": 0.9479166865348816, |
|
"step": 266 |
|
}, |
|
{ |
|
"completion_length": 370.25782585144043, |
|
"epoch": 0.14293333333333333, |
|
"grad_norm": 0.09626183381346134, |
|
"kl": 0.20556640625, |
|
"learning_rate": 1.8587542202524985e-07, |
|
"loss": 0.0002, |
|
"reward": 1.2786458618938923, |
|
"reward_std": 0.3155774394981563, |
|
"rewards/equation_reward_func": 0.3567708437331021, |
|
"rewards/format_reward_func": 0.9218750186264515, |
|
"step": 268 |
|
}, |
|
{ |
|
"completion_length": 332.5781316757202, |
|
"epoch": 0.144, |
|
"grad_norm": 0.08565923133416391, |
|
"kl": 0.19805908203125, |
|
"learning_rate": 1.82399988097123e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4322917088866234, |
|
"reward_std": 0.2691522967070341, |
|
"rewards/equation_reward_func": 0.4713541753590107, |
|
"rewards/format_reward_func": 0.9609375111758709, |
|
"step": 270 |
|
}, |
|
{ |
|
"completion_length": 319.74740505218506, |
|
"epoch": 0.14506666666666668, |
|
"grad_norm": 0.08562597807288291, |
|
"kl": 0.20941162109375, |
|
"learning_rate": 1.7893859284058378e-07, |
|
"loss": 0.0002, |
|
"reward": 1.437500026077032, |
|
"reward_std": 0.15510809421539307, |
|
"rewards/equation_reward_func": 0.48177084047347307, |
|
"rewards/format_reward_func": 0.9557291828095913, |
|
"step": 272 |
|
}, |
|
{ |
|
"completion_length": 330.8229260444641, |
|
"epoch": 0.14613333333333334, |
|
"grad_norm": 0.09727298428810927, |
|
"kl": 0.21234130859375, |
|
"learning_rate": 1.7549195509265407e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3671875335276127, |
|
"reward_std": 0.2629071534611285, |
|
"rewards/equation_reward_func": 0.4348958428017795, |
|
"rewards/format_reward_func": 0.932291679084301, |
|
"step": 274 |
|
}, |
|
{ |
|
"completion_length": 326.29167795181274, |
|
"epoch": 0.1472, |
|
"grad_norm": 0.08109494925679199, |
|
"kl": 652.1973876953125, |
|
"learning_rate": 1.7206079062562536e-07, |
|
"loss": 0.6501, |
|
"reward": 1.388020858168602, |
|
"reward_std": 0.2561843590810895, |
|
"rewards/equation_reward_func": 0.45052084513008595, |
|
"rewards/format_reward_func": 0.9375000223517418, |
|
"step": 276 |
|
}, |
|
{ |
|
"completion_length": 272.9349036216736, |
|
"epoch": 0.14826666666666666, |
|
"grad_norm": 0.13757973385264483, |
|
"kl": 0.20831298828125, |
|
"learning_rate": 1.6864581199841226e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4973958730697632, |
|
"reward_std": 0.25729979015886784, |
|
"rewards/equation_reward_func": 0.5338541837409139, |
|
"rewards/format_reward_func": 0.9635416828095913, |
|
"step": 278 |
|
}, |
|
{ |
|
"completion_length": 315.5833430290222, |
|
"epoch": 0.14933333333333335, |
|
"grad_norm": 0.08374109208415109, |
|
"kl": 0.2430419921875, |
|
"learning_rate": 1.6524772840857388e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4166667014360428, |
|
"reward_std": 0.29498640540987253, |
|
"rewards/equation_reward_func": 0.4869791779201478, |
|
"rewards/format_reward_func": 0.9296875149011612, |
|
"step": 280 |
|
}, |
|
{ |
|
"completion_length": 315.5911560058594, |
|
"epoch": 0.1504, |
|
"grad_norm": 0.10137195504364034, |
|
"kl": 0.20843505859375, |
|
"learning_rate": 1.6186724554503237e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4010417088866234, |
|
"reward_std": 0.28228656528517604, |
|
"rewards/equation_reward_func": 0.46354168001562357, |
|
"rewards/format_reward_func": 0.9375000186264515, |
|
"step": 282 |
|
}, |
|
{ |
|
"completion_length": 290.7968854904175, |
|
"epoch": 0.15146666666666667, |
|
"grad_norm": 0.09905811066649715, |
|
"kl": 0.2109375, |
|
"learning_rate": 1.5850506544152103e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4635417014360428, |
|
"reward_std": 0.22374329762533307, |
|
"rewards/equation_reward_func": 0.5000000149011612, |
|
"rewards/format_reward_func": 0.963541679084301, |
|
"step": 284 |
|
}, |
|
{ |
|
"completion_length": 320.25261402130127, |
|
"epoch": 0.15253333333333333, |
|
"grad_norm": 0.09912686512752485, |
|
"kl": 0.2152099609375, |
|
"learning_rate": 1.5516188633079107e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3958333767950535, |
|
"reward_std": 0.2674556761048734, |
|
"rewards/equation_reward_func": 0.4531250149011612, |
|
"rewards/format_reward_func": 0.9427083544433117, |
|
"step": 286 |
|
}, |
|
{ |
|
"completion_length": 344.84896755218506, |
|
"epoch": 0.1536, |
|
"grad_norm": 0.10445550891293186, |
|
"kl": 0.220947265625, |
|
"learning_rate": 1.5183840249960784e-07, |
|
"loss": 0.0002, |
|
"reward": 1.2552083767950535, |
|
"reward_std": 0.3165745525620878, |
|
"rewards/equation_reward_func": 0.32291667559184134, |
|
"rewards/format_reward_func": 0.9322916865348816, |
|
"step": 288 |
|
}, |
|
{ |
|
"completion_length": 356.31511402130127, |
|
"epoch": 0.15466666666666667, |
|
"grad_norm": 2.7822894146719332, |
|
"kl": 4.81689453125, |
|
"learning_rate": 1.4853530414456612e-07, |
|
"loss": 0.0048, |
|
"reward": 1.322916705161333, |
|
"reward_std": 0.2766005671583116, |
|
"rewards/equation_reward_func": 0.38541667675599456, |
|
"rewards/format_reward_func": 0.9375000260770321, |
|
"step": 290 |
|
}, |
|
{ |
|
"completion_length": 333.315110206604, |
|
"epoch": 0.15573333333333333, |
|
"grad_norm": 0.06794837428942249, |
|
"kl": 0.20697021484375, |
|
"learning_rate": 1.4525327722875568e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3854167126119137, |
|
"reward_std": 0.23598269233480096, |
|
"rewards/equation_reward_func": 0.4531250102445483, |
|
"rewards/format_reward_func": 0.9322916865348816, |
|
"step": 292 |
|
}, |
|
{ |
|
"completion_length": 345.68230056762695, |
|
"epoch": 0.1568, |
|
"grad_norm": 0.7164246784066609, |
|
"kl": 0.811279296875, |
|
"learning_rate": 1.4199300333930515e-07, |
|
"loss": 0.0008, |
|
"reward": 1.4036458805203438, |
|
"reward_std": 0.2555072852410376, |
|
"rewards/equation_reward_func": 0.4661458469927311, |
|
"rewards/format_reward_func": 0.9375000260770321, |
|
"step": 294 |
|
}, |
|
{ |
|
"completion_length": 347.70834255218506, |
|
"epoch": 0.15786666666666666, |
|
"grad_norm": 0.10342795992816751, |
|
"kl": 0.212646484375, |
|
"learning_rate": 1.3875515954583523e-07, |
|
"loss": 0.0002, |
|
"reward": 1.338541705161333, |
|
"reward_std": 0.23221872048452497, |
|
"rewards/equation_reward_func": 0.40364584419876337, |
|
"rewards/format_reward_func": 0.934895858168602, |
|
"step": 296 |
|
}, |
|
{ |
|
"completion_length": 364.0052127838135, |
|
"epoch": 0.15893333333333334, |
|
"grad_norm": 0.08244692798356867, |
|
"kl": 0.215087890625, |
|
"learning_rate": 1.3554041825985e-07, |
|
"loss": 0.0002, |
|
"reward": 1.338541705161333, |
|
"reward_std": 0.2457712898030877, |
|
"rewards/equation_reward_func": 0.4062500100117177, |
|
"rewards/format_reward_func": 0.9322916865348816, |
|
"step": 298 |
|
}, |
|
{ |
|
"completion_length": 331.8255319595337, |
|
"epoch": 0.16, |
|
"grad_norm": 0.09987532231840503, |
|
"kl": 0.21429443359375, |
|
"learning_rate": 1.323494470950949e-07, |
|
"loss": 0.0002, |
|
"reward": 1.346354216337204, |
|
"reward_std": 0.2734911320731044, |
|
"rewards/equation_reward_func": 0.419270841171965, |
|
"rewards/format_reward_func": 0.9270833507180214, |
|
"step": 300 |
|
}, |
|
{ |
|
"completion_length": 307.08594703674316, |
|
"epoch": 0.16106666666666666, |
|
"grad_norm": 0.1125414635061611, |
|
"kl": 0.23858642578125, |
|
"learning_rate": 1.2918290872891236e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4453125521540642, |
|
"reward_std": 0.21242208778858185, |
|
"rewards/equation_reward_func": 0.5000000167638063, |
|
"rewards/format_reward_func": 0.9453125149011612, |
|
"step": 302 |
|
}, |
|
{ |
|
"completion_length": 328.62761211395264, |
|
"epoch": 0.16213333333333332, |
|
"grad_norm": 0.09030164033344525, |
|
"kl": 0.2169189453125, |
|
"learning_rate": 1.260414607646213e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3593750223517418, |
|
"reward_std": 0.23800909472629428, |
|
"rewards/equation_reward_func": 0.4322916760575026, |
|
"rewards/format_reward_func": 0.9270833469927311, |
|
"step": 304 |
|
}, |
|
{ |
|
"completion_length": 327.34115505218506, |
|
"epoch": 0.1632, |
|
"grad_norm": 0.084605395523196, |
|
"kl": 0.2135009765625, |
|
"learning_rate": 1.2292575559495143e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4036458805203438, |
|
"reward_std": 0.22087143966928124, |
|
"rewards/equation_reward_func": 0.46875000884756446, |
|
"rewards/format_reward_func": 0.9348958469927311, |
|
"step": 306 |
|
}, |
|
{ |
|
"completion_length": 351.6718807220459, |
|
"epoch": 0.16426666666666667, |
|
"grad_norm": 0.07930872672272365, |
|
"kl": 0.20831298828125, |
|
"learning_rate": 1.1983644026655835e-07, |
|
"loss": 0.0002, |
|
"reward": 1.2734375484287739, |
|
"reward_std": 0.2579080620780587, |
|
"rewards/equation_reward_func": 0.3489583432674408, |
|
"rewards/format_reward_func": 0.9244791939854622, |
|
"step": 308 |
|
}, |
|
{ |
|
"completion_length": 319.75261306762695, |
|
"epoch": 0.16533333333333333, |
|
"grad_norm": 0.08243203802642829, |
|
"kl": 0.60150146484375, |
|
"learning_rate": 1.1677415634565066e-07, |
|
"loss": 0.0006, |
|
"reward": 1.3984375298023224, |
|
"reward_std": 0.2197747863829136, |
|
"rewards/equation_reward_func": 0.4765625111758709, |
|
"rewards/format_reward_func": 0.9218750223517418, |
|
"step": 310 |
|
}, |
|
{ |
|
"completion_length": 323.96094512939453, |
|
"epoch": 0.1664, |
|
"grad_norm": 0.11257163282471587, |
|
"kl": 0.22088623046875, |
|
"learning_rate": 1.1373953978475353e-07, |
|
"loss": 0.0002, |
|
"reward": 1.4427083805203438, |
|
"reward_std": 0.24971541296690702, |
|
"rewards/equation_reward_func": 0.4921875118743628, |
|
"rewards/format_reward_func": 0.9505208507180214, |
|
"step": 312 |
|
}, |
|
{ |
|
"completion_length": 318.0625071525574, |
|
"epoch": 0.16746666666666668, |
|
"grad_norm": 0.13460921785647972, |
|
"kl": 0.208740234375, |
|
"learning_rate": 1.1073322079063913e-07, |
|
"loss": 0.0002, |
|
"reward": 1.408854205161333, |
|
"reward_std": 0.24213178781792521, |
|
"rewards/equation_reward_func": 0.4635416781529784, |
|
"rewards/format_reward_func": 0.9453125149011612, |
|
"step": 314 |
|
}, |
|
{ |
|
"completion_length": 319.69532012939453, |
|
"epoch": 0.16853333333333334, |
|
"grad_norm": 0.06764575402513974, |
|
"kl": 0.19781494140625, |
|
"learning_rate": 1.0775582369344946e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3697917275130749, |
|
"reward_std": 0.19613029761239886, |
|
"rewards/equation_reward_func": 0.41145834838971496, |
|
"rewards/format_reward_func": 0.9583333469927311, |
|
"step": 316 |
|
}, |
|
{ |
|
"completion_length": 326.3984441757202, |
|
"epoch": 0.1696, |
|
"grad_norm": 0.09588186769190044, |
|
"kl": 0.20263671875, |
|
"learning_rate": 1.0480796681704077e-07, |
|
"loss": 0.0002, |
|
"reward": 1.3697916939854622, |
|
"reward_std": 0.2743687550537288, |
|
"rewards/equation_reward_func": 0.4218750102445483, |
|
"rewards/format_reward_func": 0.9479166828095913, |
|
"step": 318 |
|
}, |
|
{ |
|
"completion_length": 259.299485206604, |
|
"epoch": 0.17066666666666666, |
|
"grad_norm": 0.11223918390782062, |
|
"kl": 0.21942138671875, |
|
"learning_rate": 1.018902623505741e-07, |
|
"loss": 0.0002, |
|
"reward": 1.6145833730697632, |
|
"reward_std": 0.23305691732093692, |
|
"rewards/equation_reward_func": 0.6510416809469461, |
|
"rewards/format_reward_func": 0.9635416865348816, |
|
"step": 320 |
|
}, |
|
{ |
|
"completion_length": 284.2135534286499, |
|
"epoch": 0.17173333333333332, |
|
"grad_norm": 0.08458399836341833, |
|
"kl": 0.20318603515625, |
|
"learning_rate": 9.900331622138063e-08, |
|
"loss": 0.0002, |
|
"reward": 1.5026042014360428, |
|
"reward_std": 0.19581556040793657, |
|
"rewards/equation_reward_func": 0.5494791748933494, |
|
"rewards/format_reward_func": 0.9531250149011612, |
|
"step": 322 |
|
}, |
|
{ |
|
"completion_length": 292.7161521911621, |
|
"epoch": 0.1728, |
|
"grad_norm": 0.08816589688045734, |
|
"kl": 0.2020263671875, |
|
"learning_rate": 9.614772796912681e-08, |
|
"loss": 0.0002, |
|
"reward": 1.5286458656191826, |
|
"reward_std": 0.19881888292729855, |
|
"rewards/equation_reward_func": 0.580729179084301, |
|
"rewards/format_reward_func": 0.9479166753590107, |
|
"step": 324 |
|
}, |
|
{ |
|
"completion_length": 293.8697986602783, |
|
"epoch": 0.17386666666666667, |
|
"grad_norm": 0.10028674350174616, |
|
"kl": 0.199462890625, |
|
"learning_rate": 9.332409062130686e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3984375558793545, |
|
"reward_std": 0.19026571605354548, |
|
"rewards/equation_reward_func": 0.42708334350027144, |
|
"rewards/format_reward_func": 0.9713541828095913, |
|
"step": 326 |
|
}, |
|
{ |
|
"completion_length": 279.73438262939453, |
|
"epoch": 0.17493333333333333, |
|
"grad_norm": 0.1228325067153052, |
|
"kl": 0.2115478515625, |
|
"learning_rate": 9.053299057008699e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4843750447034836, |
|
"reward_std": 0.2166175846941769, |
|
"rewards/equation_reward_func": 0.5312500144354999, |
|
"rewards/format_reward_func": 0.9531250186264515, |
|
"step": 328 |
|
}, |
|
{ |
|
"completion_length": 340.1328229904175, |
|
"epoch": 0.176, |
|
"grad_norm": 0.060609232054425886, |
|
"kl": 0.201171875, |
|
"learning_rate": 8.777500745052743e-08, |
|
"loss": 0.0002, |
|
"reward": 1.2682292014360428, |
|
"reward_std": 0.18949319841340184, |
|
"rewards/equation_reward_func": 0.3098958395421505, |
|
"rewards/format_reward_func": 0.9583333469927311, |
|
"step": 330 |
|
}, |
|
{ |
|
"completion_length": 296.57292556762695, |
|
"epoch": 0.17706666666666668, |
|
"grad_norm": 0.724192154071362, |
|
"kl": 2.50811767578125, |
|
"learning_rate": 8.505071402020892e-08, |
|
"loss": 0.0025, |
|
"reward": 1.473958384245634, |
|
"reward_std": 0.2744275564327836, |
|
"rewards/equation_reward_func": 0.5182291809469461, |
|
"rewards/format_reward_func": 0.9557291828095913, |
|
"step": 332 |
|
}, |
|
{ |
|
"completion_length": 314.63542556762695, |
|
"epoch": 0.17813333333333334, |
|
"grad_norm": 0.07744283195119217, |
|
"kl": 0.21636962890625, |
|
"learning_rate": 8.236067604028562e-08, |
|
"loss": 0.0002, |
|
"reward": 1.385416716337204, |
|
"reward_std": 0.24492605542764068, |
|
"rewards/equation_reward_func": 0.4453125149011612, |
|
"rewards/format_reward_func": 0.9401041939854622, |
|
"step": 334 |
|
}, |
|
{ |
|
"completion_length": 283.77605056762695, |
|
"epoch": 0.1792, |
|
"grad_norm": 0.09014034608663256, |
|
"kl": 0.21405029296875, |
|
"learning_rate": 7.970545215799327e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4609375409781933, |
|
"reward_std": 0.2396250907331705, |
|
"rewards/equation_reward_func": 0.5208333446644247, |
|
"rewards/format_reward_func": 0.9401041902601719, |
|
"step": 336 |
|
}, |
|
{ |
|
"completion_length": 326.2578239440918, |
|
"epoch": 0.18026666666666666, |
|
"grad_norm": 0.08812085562876558, |
|
"kl": 0.203857421875, |
|
"learning_rate": 7.708559379063204e-08, |
|
"loss": 0.0002, |
|
"reward": 1.377604216337204, |
|
"reward_std": 0.2484951955266297, |
|
"rewards/equation_reward_func": 0.45572918234393, |
|
"rewards/format_reward_func": 0.9218750186264515, |
|
"step": 338 |
|
}, |
|
{ |
|
"completion_length": 290.2395906448364, |
|
"epoch": 0.18133333333333335, |
|
"grad_norm": 0.12096313121694759, |
|
"kl": 0.96221923828125, |
|
"learning_rate": 7.45016450110534e-08, |
|
"loss": 0.001, |
|
"reward": 1.4557292126119137, |
|
"reward_std": 0.17546244524419308, |
|
"rewards/equation_reward_func": 0.5078125062864274, |
|
"rewards/format_reward_func": 0.9479166865348816, |
|
"step": 340 |
|
}, |
|
{ |
|
"completion_length": 277.1953191757202, |
|
"epoch": 0.1824, |
|
"grad_norm": 0.10079414583051031, |
|
"kl": 0.2108154296875, |
|
"learning_rate": 7.195414243467029e-08, |
|
"loss": 0.0002, |
|
"reward": 1.5416667088866234, |
|
"reward_std": 0.21704197628423572, |
|
"rewards/equation_reward_func": 0.5781250186264515, |
|
"rewards/format_reward_func": 0.963541679084301, |
|
"step": 342 |
|
}, |
|
{ |
|
"completion_length": 323.82813262939453, |
|
"epoch": 0.18346666666666667, |
|
"grad_norm": 0.09237849268052975, |
|
"kl": 0.20794677734375, |
|
"learning_rate": 6.944361510801763e-08, |
|
"loss": 0.0002, |
|
"reward": 1.367187537252903, |
|
"reward_std": 0.27509107533842325, |
|
"rewards/equation_reward_func": 0.43229168257676065, |
|
"rewards/format_reward_func": 0.9348958544433117, |
|
"step": 344 |
|
}, |
|
{ |
|
"completion_length": 325.0052185058594, |
|
"epoch": 0.18453333333333333, |
|
"grad_norm": 0.11420262344533028, |
|
"kl": 0.19384765625, |
|
"learning_rate": 6.697058439888283e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3619792088866234, |
|
"reward_std": 0.22549985628575087, |
|
"rewards/equation_reward_func": 0.40625001210719347, |
|
"rewards/format_reward_func": 0.955729179084301, |
|
"step": 346 |
|
}, |
|
{ |
|
"completion_length": 281.9427156448364, |
|
"epoch": 0.1856, |
|
"grad_norm": 0.06077341740641138, |
|
"kl": 0.2020263671875, |
|
"learning_rate": 6.453556388803288e-08, |
|
"loss": 0.0002, |
|
"reward": 1.505208358168602, |
|
"reward_std": 0.2182565974071622, |
|
"rewards/equation_reward_func": 0.5546875118743628, |
|
"rewards/format_reward_func": 0.9505208507180214, |
|
"step": 348 |
|
}, |
|
{ |
|
"completion_length": 277.53646516799927, |
|
"epoch": 0.18666666666666668, |
|
"grad_norm": 0.07280350150631636, |
|
"kl": 0.217041015625, |
|
"learning_rate": 6.213905926255697e-08, |
|
"loss": 0.0002, |
|
"reward": 1.531250026077032, |
|
"reward_std": 0.20672065950930119, |
|
"rewards/equation_reward_func": 0.5677083488553762, |
|
"rewards/format_reward_func": 0.9635416828095913, |
|
"step": 350 |
|
}, |
|
{ |
|
"completion_length": 312.04167127609253, |
|
"epoch": 0.18773333333333334, |
|
"grad_norm": 0.1087978791138447, |
|
"kl": 0.2020263671875, |
|
"learning_rate": 5.978156821084987e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4140625447034836, |
|
"reward_std": 0.28301327722147107, |
|
"rewards/equation_reward_func": 0.466145847691223, |
|
"rewards/format_reward_func": 0.947916679084301, |
|
"step": 352 |
|
}, |
|
{ |
|
"completion_length": 317.19793128967285, |
|
"epoch": 0.1888, |
|
"grad_norm": 0.07417618395975106, |
|
"kl": 0.2265625, |
|
"learning_rate": 5.7463580319254853e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3203125298023224, |
|
"reward_std": 0.26750255608931184, |
|
"rewards/equation_reward_func": 0.38281251210719347, |
|
"rewards/format_reward_func": 0.9375000186264515, |
|
"step": 354 |
|
}, |
|
{ |
|
"completion_length": 293.9921941757202, |
|
"epoch": 0.18986666666666666, |
|
"grad_norm": 0.07468400690550668, |
|
"kl": 0.20404052734375, |
|
"learning_rate": 5.518557697039081e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4609375447034836, |
|
"reward_std": 0.1771034407429397, |
|
"rewards/equation_reward_func": 0.5026041809469461, |
|
"rewards/format_reward_func": 0.9583333507180214, |
|
"step": 356 |
|
}, |
|
{ |
|
"completion_length": 280.30730056762695, |
|
"epoch": 0.19093333333333334, |
|
"grad_norm": 0.10781941802386472, |
|
"kl": 0.21038818359375, |
|
"learning_rate": 5.294803124318145e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4713541939854622, |
|
"reward_std": 0.25494540203362703, |
|
"rewards/equation_reward_func": 0.5130208432674408, |
|
"rewards/format_reward_func": 0.9583333469927311, |
|
"step": 358 |
|
}, |
|
{ |
|
"completion_length": 282.8307361602783, |
|
"epoch": 0.192, |
|
"grad_norm": 0.07731622217050184, |
|
"kl": 0.234375, |
|
"learning_rate": 5.07514078146106e-08, |
|
"loss": 0.0002, |
|
"reward": 1.455729190260172, |
|
"reward_std": 0.1868862328119576, |
|
"rewards/equation_reward_func": 0.5052083507180214, |
|
"rewards/format_reward_func": 0.950520858168602, |
|
"step": 360 |
|
}, |
|
{ |
|
"completion_length": 282.46094703674316, |
|
"epoch": 0.19306666666666666, |
|
"grad_norm": 0.1170891309726923, |
|
"kl": 0.21356201171875, |
|
"learning_rate": 4.859616286322094e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4296875558793545, |
|
"reward_std": 0.25303210131824017, |
|
"rewards/equation_reward_func": 0.47916667722165585, |
|
"rewards/format_reward_func": 0.9505208507180214, |
|
"step": 362 |
|
}, |
|
{ |
|
"completion_length": 292.97917556762695, |
|
"epoch": 0.19413333333333332, |
|
"grad_norm": 0.13632866604679422, |
|
"kl": 0.21417236328125, |
|
"learning_rate": 4.648274397437829e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4192708656191826, |
|
"reward_std": 0.2526449547149241, |
|
"rewards/equation_reward_func": 0.4765625155996531, |
|
"rewards/format_reward_func": 0.9427083544433117, |
|
"step": 364 |
|
}, |
|
{ |
|
"completion_length": 255.17970085144043, |
|
"epoch": 0.1952, |
|
"grad_norm": 0.09399601024713211, |
|
"kl": 0.19903564453125, |
|
"learning_rate": 4.4411590047320617e-08, |
|
"loss": 0.0002, |
|
"reward": 1.6093750186264515, |
|
"reward_std": 0.17325606709346175, |
|
"rewards/equation_reward_func": 0.632812513737008, |
|
"rewards/format_reward_func": 0.9765625111758709, |
|
"step": 366 |
|
}, |
|
{ |
|
"completion_length": 305.31511306762695, |
|
"epoch": 0.19626666666666667, |
|
"grad_norm": 0.09591492950535363, |
|
"kl": 0.20355224609375, |
|
"learning_rate": 4.2383131204010494e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3203125335276127, |
|
"reward_std": 0.27175394957885146, |
|
"rewards/equation_reward_func": 0.3723958432674408, |
|
"rewards/format_reward_func": 0.9479166865348816, |
|
"step": 368 |
|
}, |
|
{ |
|
"completion_length": 274.8046979904175, |
|
"epoch": 0.19733333333333333, |
|
"grad_norm": 0.07448380610650404, |
|
"kl": 0.22686767578125, |
|
"learning_rate": 4.039778869981064e-08, |
|
"loss": 0.0002, |
|
"reward": 1.408854205161333, |
|
"reward_std": 0.1444006934762001, |
|
"rewards/equation_reward_func": 0.4427083423361182, |
|
"rewards/format_reward_func": 0.9661458432674408, |
|
"step": 370 |
|
}, |
|
{ |
|
"completion_length": 295.8073043823242, |
|
"epoch": 0.1984, |
|
"grad_norm": 0.08046455188518299, |
|
"kl": 0.20953369140625, |
|
"learning_rate": 3.845597483600049e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3984375335276127, |
|
"reward_std": 0.2535385196097195, |
|
"rewards/equation_reward_func": 0.4505208421032876, |
|
"rewards/format_reward_func": 0.9479166828095913, |
|
"step": 372 |
|
}, |
|
{ |
|
"completion_length": 281.377610206604, |
|
"epoch": 0.19946666666666665, |
|
"grad_norm": 0.1231734086836591, |
|
"kl": 0.2137451171875, |
|
"learning_rate": 3.655809287415284e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4322916939854622, |
|
"reward_std": 0.2503278241492808, |
|
"rewards/equation_reward_func": 0.48958334792405367, |
|
"rewards/format_reward_func": 0.9427083544433117, |
|
"step": 374 |
|
}, |
|
{ |
|
"completion_length": 254.83854961395264, |
|
"epoch": 0.20053333333333334, |
|
"grad_norm": 0.08564861254444861, |
|
"kl": 0.20831298828125, |
|
"learning_rate": 3.4704536952387285e-08, |
|
"loss": 0.0002, |
|
"reward": 1.5598958618938923, |
|
"reward_std": 0.18554534064605832, |
|
"rewards/equation_reward_func": 0.5937500074505806, |
|
"rewards/format_reward_func": 0.9661458507180214, |
|
"step": 376 |
|
}, |
|
{ |
|
"completion_length": 322.82292556762695, |
|
"epoch": 0.2016, |
|
"grad_norm": 0.13685877620537756, |
|
"kl": 0.1998291015625, |
|
"learning_rate": 3.2895692003518575e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3541667088866234, |
|
"reward_std": 0.24434165563434362, |
|
"rewards/equation_reward_func": 0.40625001350417733, |
|
"rewards/format_reward_func": 0.9479166865348816, |
|
"step": 378 |
|
}, |
|
{ |
|
"completion_length": 267.2968854904175, |
|
"epoch": 0.20266666666666666, |
|
"grad_norm": 0.10129770118592953, |
|
"kl": 0.20574951171875, |
|
"learning_rate": 3.113193367511635e-08, |
|
"loss": 0.0002, |
|
"reward": 1.5260417014360428, |
|
"reward_std": 0.1942262640222907, |
|
"rewards/equation_reward_func": 0.570312513038516, |
|
"rewards/format_reward_func": 0.9557291828095913, |
|
"step": 380 |
|
}, |
|
{ |
|
"completion_length": 299.36719369888306, |
|
"epoch": 0.20373333333333332, |
|
"grad_norm": 0.14156955034396038, |
|
"kl": 0.23870849609375, |
|
"learning_rate": 2.9413628251493934e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4817708730697632, |
|
"reward_std": 0.2450651628896594, |
|
"rewards/equation_reward_func": 0.5286458479240537, |
|
"rewards/format_reward_func": 0.9531250149011612, |
|
"step": 382 |
|
}, |
|
{ |
|
"completion_length": 305.47396659851074, |
|
"epoch": 0.2048, |
|
"grad_norm": 0.1385039320261207, |
|
"kl": 0.21966552734375, |
|
"learning_rate": 2.774113257764066e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3437500298023224, |
|
"reward_std": 0.14954586559906602, |
|
"rewards/equation_reward_func": 0.39322917722165585, |
|
"rewards/format_reward_func": 0.9505208507180214, |
|
"step": 384 |
|
}, |
|
{ |
|
"completion_length": 283.994797706604, |
|
"epoch": 0.20586666666666667, |
|
"grad_norm": 0.08046202628694818, |
|
"kl": 0.20733642578125, |
|
"learning_rate": 2.611479398511518e-08, |
|
"loss": 0.0002, |
|
"reward": 1.429687526077032, |
|
"reward_std": 0.20751357544213533, |
|
"rewards/equation_reward_func": 0.4739583395421505, |
|
"rewards/format_reward_func": 0.955729179084301, |
|
"step": 386 |
|
}, |
|
{ |
|
"completion_length": 299.1197986602783, |
|
"epoch": 0.20693333333333333, |
|
"grad_norm": 0.09252027726323123, |
|
"kl": 0.20440673828125, |
|
"learning_rate": 2.4534950219914057e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4296875447034836, |
|
"reward_std": 0.28528111474588513, |
|
"rewards/equation_reward_func": 0.48958334559574723, |
|
"rewards/format_reward_func": 0.9401041828095913, |
|
"step": 388 |
|
}, |
|
{ |
|
"completion_length": 304.86459159851074, |
|
"epoch": 0.208, |
|
"grad_norm": 0.1258367102892705, |
|
"kl": 0.209228515625, |
|
"learning_rate": 2.300192937233128e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4218750298023224, |
|
"reward_std": 0.25267517613247037, |
|
"rewards/equation_reward_func": 0.4843750155996531, |
|
"rewards/format_reward_func": 0.9375000149011612, |
|
"step": 390 |
|
}, |
|
{ |
|
"completion_length": 258.6849012374878, |
|
"epoch": 0.20906666666666668, |
|
"grad_norm": 0.0745793407744394, |
|
"kl": 0.21881103515625, |
|
"learning_rate": 2.1516049808822935e-08, |
|
"loss": 0.0002, |
|
"reward": 1.5729167088866234, |
|
"reward_std": 0.18264030944555998, |
|
"rewards/equation_reward_func": 0.6250000149011612, |
|
"rewards/format_reward_func": 0.947916679084301, |
|
"step": 392 |
|
}, |
|
{ |
|
"completion_length": 315.58073902130127, |
|
"epoch": 0.21013333333333334, |
|
"grad_norm": 0.08894574865715046, |
|
"kl": 0.212890625, |
|
"learning_rate": 2.007762010589098e-08, |
|
"loss": 0.0002, |
|
"reward": 1.375000026077032, |
|
"reward_std": 0.20314896712079644, |
|
"rewards/equation_reward_func": 0.4244791748933494, |
|
"rewards/format_reward_func": 0.9505208469927311, |
|
"step": 394 |
|
}, |
|
{ |
|
"completion_length": 285.76042652130127, |
|
"epoch": 0.2112, |
|
"grad_norm": 0.0978921012598675, |
|
"kl": 0.21484375, |
|
"learning_rate": 1.8686938986000627e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4947917312383652, |
|
"reward_std": 0.22043491480872035, |
|
"rewards/equation_reward_func": 0.5234375174622983, |
|
"rewards/format_reward_func": 0.9713541828095913, |
|
"step": 396 |
|
}, |
|
{ |
|
"completion_length": 249.42448902130127, |
|
"epoch": 0.21226666666666666, |
|
"grad_norm": 0.11183349657491888, |
|
"kl": 0.24554443359375, |
|
"learning_rate": 1.734429525554365e-08, |
|
"loss": 0.0002, |
|
"reward": 1.5598958730697632, |
|
"reward_std": 0.22952570766210556, |
|
"rewards/equation_reward_func": 0.6145833507180214, |
|
"rewards/format_reward_func": 0.9453125149011612, |
|
"step": 398 |
|
}, |
|
{ |
|
"completion_length": 305.55209159851074, |
|
"epoch": 0.21333333333333335, |
|
"grad_norm": 0.1522170775819166, |
|
"kl": 0.2047119140625, |
|
"learning_rate": 1.604996774486145e-08, |
|
"loss": 0.0002, |
|
"reward": 1.440104205161333, |
|
"reward_std": 0.25300154415890574, |
|
"rewards/equation_reward_func": 0.494791679084301, |
|
"rewards/format_reward_func": 0.9453125149011612, |
|
"step": 400 |
|
}, |
|
{ |
|
"completion_length": 315.6744909286499, |
|
"epoch": 0.2144, |
|
"grad_norm": 0.12335986687631752, |
|
"kl": 0.213134765625, |
|
"learning_rate": 1.4804225250339281e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3333333916962147, |
|
"reward_std": 0.239916508551687, |
|
"rewards/equation_reward_func": 0.3802083432674408, |
|
"rewards/format_reward_func": 0.9531250260770321, |
|
"step": 402 |
|
}, |
|
{ |
|
"completion_length": 291.2291741371155, |
|
"epoch": 0.21546666666666667, |
|
"grad_norm": 0.10616474958419292, |
|
"kl": 0.27178955078125, |
|
"learning_rate": 1.360732647858498e-08, |
|
"loss": 0.0003, |
|
"reward": 1.4479167014360428, |
|
"reward_std": 0.2474421444348991, |
|
"rewards/equation_reward_func": 0.497395847691223, |
|
"rewards/format_reward_func": 0.9505208544433117, |
|
"step": 404 |
|
}, |
|
{ |
|
"completion_length": 257.3671932220459, |
|
"epoch": 0.21653333333333333, |
|
"grad_norm": 0.10220249166840606, |
|
"kl": 0.21807861328125, |
|
"learning_rate": 1.2459519992702311e-08, |
|
"loss": 0.0002, |
|
"reward": 1.5755208805203438, |
|
"reward_std": 0.21677069226279855, |
|
"rewards/equation_reward_func": 0.6041666846722364, |
|
"rewards/format_reward_func": 0.9713541865348816, |
|
"step": 406 |
|
}, |
|
{ |
|
"completion_length": 285.91146659851074, |
|
"epoch": 0.2176, |
|
"grad_norm": 0.10190078603346825, |
|
"kl": 0.2186279296875, |
|
"learning_rate": 1.1361044160671629e-08, |
|
"loss": 0.0002, |
|
"reward": 1.4010417014360428, |
|
"reward_std": 0.19971515564247966, |
|
"rewards/equation_reward_func": 0.44791668094694614, |
|
"rewards/format_reward_func": 0.9531250223517418, |
|
"step": 408 |
|
}, |
|
{ |
|
"completion_length": 308.1119918823242, |
|
"epoch": 0.21866666666666668, |
|
"grad_norm": 0.11265474290252611, |
|
"kl": 0.2017822265625, |
|
"learning_rate": 1.0312127105846947e-08, |
|
"loss": 0.0002, |
|
"reward": 1.3802083656191826, |
|
"reward_std": 0.27934338385239244, |
|
"rewards/equation_reward_func": 0.4453125107102096, |
|
"rewards/format_reward_func": 0.934895858168602, |
|
"step": 410 |
|
}, |
|
{ |
|
"completion_length": 297.32292461395264, |
|
"epoch": 0.21973333333333334, |
|
"grad_norm": 0.12758393552068756, |
|
"kl": 0.24029541015625, |
|
"learning_rate": 9.312986659581301e-09, |
|
"loss": 0.0002, |
|
"reward": 1.3802083730697632, |
|
"reward_std": 0.2152186674065888, |
|
"rewards/equation_reward_func": 0.4244791779201478, |
|
"rewards/format_reward_func": 0.9557291902601719, |
|
"step": 412 |
|
}, |
|
{ |
|
"completion_length": 277.3385486602783, |
|
"epoch": 0.2208, |
|
"grad_norm": 0.09214969419850558, |
|
"kl": 0.21759033203125, |
|
"learning_rate": 8.363830315988945e-09, |
|
"loss": 0.0002, |
|
"reward": 1.4765625447034836, |
|
"reward_std": 0.24217339418828487, |
|
"rewards/equation_reward_func": 0.5156250167638063, |
|
"rewards/format_reward_func": 0.9609375223517418, |
|
"step": 414 |
|
}, |
|
{ |
|
"completion_length": 308.1849021911621, |
|
"epoch": 0.22186666666666666, |
|
"grad_norm": 0.07929086654561879, |
|
"kl": 0.2041015625, |
|
"learning_rate": 7.46485518885462e-09, |
|
"loss": 0.0002, |
|
"reward": 1.3723958730697632, |
|
"reward_std": 0.18584083206951618, |
|
"rewards/equation_reward_func": 0.4166666786186397, |
|
"rewards/format_reward_func": 0.955729179084301, |
|
"step": 416 |
|
}, |
|
{ |
|
"completion_length": 301.53125762939453, |
|
"epoch": 0.22293333333333334, |
|
"grad_norm": 0.06780911669525314, |
|
"kl": 0.20703125, |
|
"learning_rate": 6.616247970698319e-09, |
|
"loss": 0.0002, |
|
"reward": 1.4192708656191826, |
|
"reward_std": 0.1916296579875052, |
|
"rewards/equation_reward_func": 0.46093751303851604, |
|
"rewards/format_reward_func": 0.9583333507180214, |
|
"step": 418 |
|
}, |
|
{ |
|
"completion_length": 307.27865409851074, |
|
"epoch": 0.224, |
|
"grad_norm": 0.11080570975859302, |
|
"kl": 0.2071533203125, |
|
"learning_rate": 5.8181848940044855e-09, |
|
"loss": 0.0002, |
|
"reward": 1.3567708730697632, |
|
"reward_std": 0.2836426943540573, |
|
"rewards/equation_reward_func": 0.41406251257285476, |
|
"rewards/format_reward_func": 0.9427083544433117, |
|
"step": 420 |
|
}, |
|
{ |
|
"completion_length": 250.89844417572021, |
|
"epoch": 0.22506666666666666, |
|
"grad_norm": 0.07513225181520254, |
|
"kl": 0.2593994140625, |
|
"learning_rate": 5.070831694623135e-09, |
|
"loss": 0.0003, |
|
"reward": 1.5364583656191826, |
|
"reward_std": 0.15577324572950602, |
|
"rewards/equation_reward_func": 0.5677083488553762, |
|
"rewards/format_reward_func": 0.9687500149011612, |
|
"step": 422 |
|
}, |
|
{ |
|
"completion_length": 269.37500953674316, |
|
"epoch": 0.22613333333333333, |
|
"grad_norm": 0.1018666761789586, |
|
"kl": 0.2789306640625, |
|
"learning_rate": 4.374343577351336e-09, |
|
"loss": 0.0003, |
|
"reward": 1.5338542088866234, |
|
"reward_std": 0.21989819640293717, |
|
"rewards/equation_reward_func": 0.5677083441987634, |
|
"rewards/format_reward_func": 0.9661458432674408, |
|
"step": 424 |
|
}, |
|
{ |
|
"completion_length": 255.07552909851074, |
|
"epoch": 0.2272, |
|
"grad_norm": 0.08471160288097286, |
|
"kl": 0.204833984375, |
|
"learning_rate": 3.7288651837012745e-09, |
|
"loss": 0.0002, |
|
"reward": 1.5546875447034836, |
|
"reward_std": 0.18895011255517602, |
|
"rewards/equation_reward_func": 0.5911458488553762, |
|
"rewards/format_reward_func": 0.9635416865348816, |
|
"step": 426 |
|
}, |
|
{ |
|
"completion_length": 324.56511211395264, |
|
"epoch": 0.22826666666666667, |
|
"grad_norm": 0.11348881355059917, |
|
"kl": 0.2518310546875, |
|
"learning_rate": 3.134530561862081e-09, |
|
"loss": 0.0003, |
|
"reward": 1.335937537252903, |
|
"reward_std": 0.30545433703809977, |
|
"rewards/equation_reward_func": 0.41406250884756446, |
|
"rewards/format_reward_func": 0.9218750186264515, |
|
"step": 428 |
|
}, |
|
{ |
|
"completion_length": 284.5234456062317, |
|
"epoch": 0.22933333333333333, |
|
"grad_norm": 0.06295532187463092, |
|
"kl": 0.2379150390625, |
|
"learning_rate": 2.5914631388619103e-09, |
|
"loss": 0.0002, |
|
"reward": 1.445312537252903, |
|
"reward_std": 0.16647559916600585, |
|
"rewards/equation_reward_func": 0.4843750111758709, |
|
"rewards/format_reward_func": 0.9609375186264515, |
|
"step": 430 |
|
}, |
|
{ |
|
"completion_length": 274.03386211395264, |
|
"epoch": 0.2304, |
|
"grad_norm": 0.09139534269326195, |
|
"kl": 0.19775390625, |
|
"learning_rate": 2.0997756949353297e-09, |
|
"loss": 0.0002, |
|
"reward": 1.4739583656191826, |
|
"reward_std": 0.21223737625405192, |
|
"rewards/equation_reward_func": 0.5078125074505806, |
|
"rewards/format_reward_func": 0.9661458469927311, |
|
"step": 432 |
|
}, |
|
{ |
|
"completion_length": 291.30469846725464, |
|
"epoch": 0.23146666666666665, |
|
"grad_norm": 0.10394164013405915, |
|
"kl": 0.20989990234375, |
|
"learning_rate": 1.6595703401020844e-09, |
|
"loss": 0.0002, |
|
"reward": 1.447916716337204, |
|
"reward_std": 0.2555937790311873, |
|
"rewards/equation_reward_func": 0.4921875139698386, |
|
"rewards/format_reward_func": 0.955729179084301, |
|
"step": 434 |
|
}, |
|
{ |
|
"completion_length": 304.8177194595337, |
|
"epoch": 0.23253333333333334, |
|
"grad_norm": 0.08324319886519, |
|
"kl": 0.19122314453125, |
|
"learning_rate": 1.2709384929615596e-09, |
|
"loss": 0.0002, |
|
"reward": 1.453125037252903, |
|
"reward_std": 0.24181979056447744, |
|
"rewards/equation_reward_func": 0.4947916865348816, |
|
"rewards/format_reward_func": 0.9583333544433117, |
|
"step": 436 |
|
}, |
|
{ |
|
"completion_length": 288.1406316757202, |
|
"epoch": 0.2336, |
|
"grad_norm": 0.09377210238406529, |
|
"kl": 0.23077392578125, |
|
"learning_rate": 9.339608617077165e-10, |
|
"loss": 0.0002, |
|
"reward": 1.4505208805203438, |
|
"reward_std": 0.2581090102903545, |
|
"rewards/equation_reward_func": 0.5052083465270698, |
|
"rewards/format_reward_func": 0.9453125186264515, |
|
"step": 438 |
|
}, |
|
{ |
|
"completion_length": 284.34375858306885, |
|
"epoch": 0.23466666666666666, |
|
"grad_norm": 0.09287842944659493, |
|
"kl": 0.67633056640625, |
|
"learning_rate": 6.487074273681114e-10, |
|
"loss": 0.0007, |
|
"reward": 1.471354205161333, |
|
"reward_std": 0.21115159848704934, |
|
"rewards/equation_reward_func": 0.5208333460614085, |
|
"rewards/format_reward_func": 0.9505208544433117, |
|
"step": 440 |
|
}, |
|
{ |
|
"completion_length": 279.61459159851074, |
|
"epoch": 0.23573333333333332, |
|
"grad_norm": 0.08429063807950674, |
|
"kl": 0.21234130859375, |
|
"learning_rate": 4.152374292708538e-10, |
|
"loss": 0.0002, |
|
"reward": 1.4270833730697632, |
|
"reward_std": 0.1764170629903674, |
|
"rewards/equation_reward_func": 0.46354168094694614, |
|
"rewards/format_reward_func": 0.9635416865348816, |
|
"step": 442 |
|
}, |
|
{ |
|
"completion_length": 256.75782203674316, |
|
"epoch": 0.2368, |
|
"grad_norm": 0.09914280924883595, |
|
"kl": 0.2081298828125, |
|
"learning_rate": 2.3359935274214204e-10, |
|
"loss": 0.0002, |
|
"reward": 1.546875037252903, |
|
"reward_std": 0.1623109932988882, |
|
"rewards/equation_reward_func": 0.5859375167638063, |
|
"rewards/format_reward_func": 0.9609375149011612, |
|
"step": 444 |
|
}, |
|
{ |
|
"completion_length": 299.05209159851074, |
|
"epoch": 0.23786666666666667, |
|
"grad_norm": 0.09385477005078556, |
|
"kl": 0.22528076171875, |
|
"learning_rate": 1.0383091903720665e-10, |
|
"loss": 0.0002, |
|
"reward": 1.3385417126119137, |
|
"reward_std": 0.21043815184384584, |
|
"rewards/equation_reward_func": 0.38281250931322575, |
|
"rewards/format_reward_func": 0.9557291977107525, |
|
"step": 446 |
|
}, |
|
{ |
|
"completion_length": 300.3750114440918, |
|
"epoch": 0.23893333333333333, |
|
"grad_norm": 0.08540311506458771, |
|
"kl": 0.20751953125, |
|
"learning_rate": 2.595907750671533e-11, |
|
"loss": 0.0002, |
|
"reward": 1.4140625335276127, |
|
"reward_std": 0.2364980010315776, |
|
"rewards/equation_reward_func": 0.47656251303851604, |
|
"rewards/format_reward_func": 0.9375000149011612, |
|
"step": 448 |
|
}, |
|
{ |
|
"completion_length": 310.57032203674316, |
|
"epoch": 0.24, |
|
"grad_norm": 0.08723686852514427, |
|
"kl": 0.20452880859375, |
|
"learning_rate": 0.0, |
|
"loss": 0.0002, |
|
"reward": 1.3619791977107525, |
|
"reward_std": 0.22422056132927537, |
|
"rewards/equation_reward_func": 0.4140625144354999, |
|
"rewards/format_reward_func": 0.9479166828095913, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"step": 450, |
|
"total_flos": 0.0, |
|
"train_loss": 0.003079434129682015, |
|
"train_runtime": 21082.1063, |
|
"train_samples_per_second": 0.512, |
|
"train_steps_per_second": 0.021 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 450, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 0.0, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|