combined_sft_10000_mcq_1epoch / trainer_state.json
Howard881010's picture
Upload folder using huggingface_hub
590f240 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 30,
"global_step": 900,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.011111111111111112,
"grad_norm": 0.6721189022064209,
"learning_rate": 9.99695413509548e-05,
"loss": 0.4836,
"step": 10
},
{
"epoch": 0.022222222222222223,
"grad_norm": 0.06062021106481552,
"learning_rate": 9.987820251299122e-05,
"loss": 0.0217,
"step": 20
},
{
"epoch": 0.03333333333333333,
"grad_norm": 0.034004177898168564,
"learning_rate": 9.972609476841367e-05,
"loss": 0.0043,
"step": 30
},
{
"epoch": 0.03333333333333333,
"eval_loss": 0.004450319334864616,
"eval_runtime": 393.1048,
"eval_samples_per_second": 5.088,
"eval_steps_per_second": 0.254,
"step": 30
},
{
"epoch": 0.044444444444444446,
"grad_norm": 0.014907660894095898,
"learning_rate": 9.951340343707852e-05,
"loss": 0.0043,
"step": 40
},
{
"epoch": 0.05555555555555555,
"grad_norm": 0.007317671552300453,
"learning_rate": 9.924038765061042e-05,
"loss": 0.0039,
"step": 50
},
{
"epoch": 0.06666666666666667,
"grad_norm": 0.03429165109992027,
"learning_rate": 9.890738003669029e-05,
"loss": 0.0041,
"step": 60
},
{
"epoch": 0.06666666666666667,
"eval_loss": 0.003960395231842995,
"eval_runtime": 404.1276,
"eval_samples_per_second": 4.949,
"eval_steps_per_second": 0.247,
"step": 60
},
{
"epoch": 0.07777777777777778,
"grad_norm": 0.7178479433059692,
"learning_rate": 9.851478631379982e-05,
"loss": 0.0042,
"step": 70
},
{
"epoch": 0.08888888888888889,
"grad_norm": 0.08405599743127823,
"learning_rate": 9.806308479691595e-05,
"loss": 0.0039,
"step": 80
},
{
"epoch": 0.1,
"grad_norm": 0.01760088838636875,
"learning_rate": 9.755282581475769e-05,
"loss": 0.0042,
"step": 90
},
{
"epoch": 0.1,
"eval_loss": 0.003934871405363083,
"eval_runtime": 392.97,
"eval_samples_per_second": 5.089,
"eval_steps_per_second": 0.254,
"step": 90
},
{
"epoch": 0.1111111111111111,
"grad_norm": 0.009712091647088528,
"learning_rate": 9.698463103929542e-05,
"loss": 0.0039,
"step": 100
},
{
"epoch": 0.12222222222222222,
"grad_norm": 0.00771428132429719,
"learning_rate": 9.635919272833938e-05,
"loss": 0.0039,
"step": 110
},
{
"epoch": 0.13333333333333333,
"grad_norm": 0.023663334548473358,
"learning_rate": 9.567727288213005e-05,
"loss": 0.0038,
"step": 120
},
{
"epoch": 0.13333333333333333,
"eval_loss": 0.00376720423810184,
"eval_runtime": 391.7943,
"eval_samples_per_second": 5.105,
"eval_steps_per_second": 0.255,
"step": 120
},
{
"epoch": 0.14444444444444443,
"grad_norm": 0.004410896450281143,
"learning_rate": 9.493970231495835e-05,
"loss": 0.0039,
"step": 130
},
{
"epoch": 0.15555555555555556,
"grad_norm": 0.014762775972485542,
"learning_rate": 9.414737964294636e-05,
"loss": 0.0038,
"step": 140
},
{
"epoch": 0.16666666666666666,
"grad_norm": 0.016578922048211098,
"learning_rate": 9.330127018922194e-05,
"loss": 0.0036,
"step": 150
},
{
"epoch": 0.16666666666666666,
"eval_loss": 0.0037360445130616426,
"eval_runtime": 394.3937,
"eval_samples_per_second": 5.071,
"eval_steps_per_second": 0.254,
"step": 150
},
{
"epoch": 0.17777777777777778,
"grad_norm": 0.012592969462275505,
"learning_rate": 9.24024048078213e-05,
"loss": 0.0038,
"step": 160
},
{
"epoch": 0.18888888888888888,
"grad_norm": 0.013751073740422726,
"learning_rate": 9.145187862775209e-05,
"loss": 0.0039,
"step": 170
},
{
"epoch": 0.2,
"grad_norm": 0.01368357427418232,
"learning_rate": 9.045084971874738e-05,
"loss": 0.0038,
"step": 180
},
{
"epoch": 0.2,
"eval_loss": 0.003743910463526845,
"eval_runtime": 397.88,
"eval_samples_per_second": 5.027,
"eval_steps_per_second": 0.251,
"step": 180
},
{
"epoch": 0.2111111111111111,
"grad_norm": 0.004404901526868343,
"learning_rate": 8.940053768033609e-05,
"loss": 0.0038,
"step": 190
},
{
"epoch": 0.2222222222222222,
"grad_norm": 0.030225256457924843,
"learning_rate": 8.83022221559489e-05,
"loss": 0.0039,
"step": 200
},
{
"epoch": 0.23333333333333334,
"grad_norm": 0.009082062169909477,
"learning_rate": 8.715724127386972e-05,
"loss": 0.0039,
"step": 210
},
{
"epoch": 0.23333333333333334,
"eval_loss": 0.0037830513902008533,
"eval_runtime": 397.6789,
"eval_samples_per_second": 5.029,
"eval_steps_per_second": 0.251,
"step": 210
},
{
"epoch": 0.24444444444444444,
"grad_norm": 0.009521303698420525,
"learning_rate": 8.596699001693255e-05,
"loss": 0.0038,
"step": 220
},
{
"epoch": 0.25555555555555554,
"grad_norm": 0.007651821710169315,
"learning_rate": 8.473291852294987e-05,
"loss": 0.0038,
"step": 230
},
{
"epoch": 0.26666666666666666,
"grad_norm": 0.009911688044667244,
"learning_rate": 8.345653031794292e-05,
"loss": 0.0038,
"step": 240
},
{
"epoch": 0.26666666666666666,
"eval_loss": 0.003697082633152604,
"eval_runtime": 397.7693,
"eval_samples_per_second": 5.028,
"eval_steps_per_second": 0.251,
"step": 240
},
{
"epoch": 0.2777777777777778,
"grad_norm": 0.022675497457385063,
"learning_rate": 8.213938048432697e-05,
"loss": 0.0037,
"step": 250
},
{
"epoch": 0.28888888888888886,
"grad_norm": 0.012265544384717941,
"learning_rate": 8.07830737662829e-05,
"loss": 0.0036,
"step": 260
},
{
"epoch": 0.3,
"grad_norm": 0.05352924019098282,
"learning_rate": 7.938926261462366e-05,
"loss": 0.0034,
"step": 270
},
{
"epoch": 0.3,
"eval_loss": 0.0031155734322965145,
"eval_runtime": 397.2379,
"eval_samples_per_second": 5.035,
"eval_steps_per_second": 0.252,
"step": 270
},
{
"epoch": 0.3111111111111111,
"grad_norm": 0.02856474742293358,
"learning_rate": 7.795964517353735e-05,
"loss": 0.0029,
"step": 280
},
{
"epoch": 0.32222222222222224,
"grad_norm": 0.07519371062517166,
"learning_rate": 7.649596321166024e-05,
"loss": 0.0026,
"step": 290
},
{
"epoch": 0.3333333333333333,
"grad_norm": 0.02294323220849037,
"learning_rate": 7.500000000000001e-05,
"loss": 0.0032,
"step": 300
},
{
"epoch": 0.3333333333333333,
"eval_loss": 0.0026329518295824528,
"eval_runtime": 394.8427,
"eval_samples_per_second": 5.065,
"eval_steps_per_second": 0.253,
"step": 300
},
{
"epoch": 0.34444444444444444,
"grad_norm": 0.013690228573977947,
"learning_rate": 7.347357813929454e-05,
"loss": 0.0029,
"step": 310
},
{
"epoch": 0.35555555555555557,
"grad_norm": 0.02574954181909561,
"learning_rate": 7.191855733945387e-05,
"loss": 0.0023,
"step": 320
},
{
"epoch": 0.36666666666666664,
"grad_norm": 0.02459203265607357,
"learning_rate": 7.033683215379002e-05,
"loss": 0.0027,
"step": 330
},
{
"epoch": 0.36666666666666664,
"eval_loss": 0.0024726453702896833,
"eval_runtime": 396.4992,
"eval_samples_per_second": 5.044,
"eval_steps_per_second": 0.252,
"step": 330
},
{
"epoch": 0.37777777777777777,
"grad_norm": 0.01764465682208538,
"learning_rate": 6.873032967079561e-05,
"loss": 0.0025,
"step": 340
},
{
"epoch": 0.3888888888888889,
"grad_norm": 0.013812178745865822,
"learning_rate": 6.710100716628344e-05,
"loss": 0.0022,
"step": 350
},
{
"epoch": 0.4,
"grad_norm": 0.03405527397990227,
"learning_rate": 6.545084971874738e-05,
"loss": 0.0022,
"step": 360
},
{
"epoch": 0.4,
"eval_loss": 0.002381704282015562,
"eval_runtime": 396.4549,
"eval_samples_per_second": 5.045,
"eval_steps_per_second": 0.252,
"step": 360
},
{
"epoch": 0.4111111111111111,
"grad_norm": 0.020748196169734,
"learning_rate": 6.378186779084995e-05,
"loss": 0.0024,
"step": 370
},
{
"epoch": 0.4222222222222222,
"grad_norm": 0.013957368209958076,
"learning_rate": 6.209609477998338e-05,
"loss": 0.0025,
"step": 380
},
{
"epoch": 0.43333333333333335,
"grad_norm": 0.03190489485859871,
"learning_rate": 6.0395584540887963e-05,
"loss": 0.002,
"step": 390
},
{
"epoch": 0.43333333333333335,
"eval_loss": 0.0022392040118575096,
"eval_runtime": 395.9075,
"eval_samples_per_second": 5.052,
"eval_steps_per_second": 0.253,
"step": 390
},
{
"epoch": 0.4444444444444444,
"grad_norm": 0.02455313876271248,
"learning_rate": 5.868240888334653e-05,
"loss": 0.0023,
"step": 400
},
{
"epoch": 0.45555555555555555,
"grad_norm": 0.012559180147945881,
"learning_rate": 5.695865504800327e-05,
"loss": 0.0023,
"step": 410
},
{
"epoch": 0.4666666666666667,
"grad_norm": 0.018574975430965424,
"learning_rate": 5.522642316338268e-05,
"loss": 0.0025,
"step": 420
},
{
"epoch": 0.4666666666666667,
"eval_loss": 0.002195958746597171,
"eval_runtime": 400.9447,
"eval_samples_per_second": 4.988,
"eval_steps_per_second": 0.249,
"step": 420
},
{
"epoch": 0.4777777777777778,
"grad_norm": 0.035168472677469254,
"learning_rate": 5.348782368720626e-05,
"loss": 0.0023,
"step": 430
},
{
"epoch": 0.4888888888888889,
"grad_norm": 0.025580240413546562,
"learning_rate": 5.174497483512506e-05,
"loss": 0.002,
"step": 440
},
{
"epoch": 0.5,
"grad_norm": 0.018627658486366272,
"learning_rate": 5e-05,
"loss": 0.0023,
"step": 450
},
{
"epoch": 0.5,
"eval_loss": 0.0020566468592733145,
"eval_runtime": 395.5489,
"eval_samples_per_second": 5.056,
"eval_steps_per_second": 0.253,
"step": 450
},
{
"epoch": 0.5111111111111111,
"grad_norm": 0.025099800899624825,
"learning_rate": 4.825502516487497e-05,
"loss": 0.0022,
"step": 460
},
{
"epoch": 0.5222222222222223,
"grad_norm": 0.017753081396222115,
"learning_rate": 4.6512176312793736e-05,
"loss": 0.002,
"step": 470
},
{
"epoch": 0.5333333333333333,
"grad_norm": 0.01829727366566658,
"learning_rate": 4.477357683661734e-05,
"loss": 0.0015,
"step": 480
},
{
"epoch": 0.5333333333333333,
"eval_loss": 0.0018324151169508696,
"eval_runtime": 392.5068,
"eval_samples_per_second": 5.095,
"eval_steps_per_second": 0.255,
"step": 480
},
{
"epoch": 0.5444444444444444,
"grad_norm": 0.020837554708123207,
"learning_rate": 4.3041344951996746e-05,
"loss": 0.0018,
"step": 490
},
{
"epoch": 0.5555555555555556,
"grad_norm": 0.027814585715532303,
"learning_rate": 4.131759111665349e-05,
"loss": 0.002,
"step": 500
},
{
"epoch": 0.5666666666666667,
"grad_norm": 0.040902189910411835,
"learning_rate": 3.960441545911204e-05,
"loss": 0.0017,
"step": 510
},
{
"epoch": 0.5666666666666667,
"eval_loss": 0.0016835549613460898,
"eval_runtime": 396.5527,
"eval_samples_per_second": 5.043,
"eval_steps_per_second": 0.252,
"step": 510
},
{
"epoch": 0.5777777777777777,
"grad_norm": 0.09094793349504471,
"learning_rate": 3.790390522001662e-05,
"loss": 0.0018,
"step": 520
},
{
"epoch": 0.5888888888888889,
"grad_norm": 0.03364603593945503,
"learning_rate": 3.6218132209150045e-05,
"loss": 0.0021,
"step": 530
},
{
"epoch": 0.6,
"grad_norm": 0.04592595249414444,
"learning_rate": 3.4549150281252636e-05,
"loss": 0.0024,
"step": 540
},
{
"epoch": 0.6,
"eval_loss": 0.0019755202811211348,
"eval_runtime": 396.0145,
"eval_samples_per_second": 5.05,
"eval_steps_per_second": 0.253,
"step": 540
},
{
"epoch": 0.6111111111111112,
"grad_norm": 0.031017672270536423,
"learning_rate": 3.289899283371657e-05,
"loss": 0.0018,
"step": 550
},
{
"epoch": 0.6222222222222222,
"grad_norm": 0.033711861819028854,
"learning_rate": 3.12696703292044e-05,
"loss": 0.0014,
"step": 560
},
{
"epoch": 0.6333333333333333,
"grad_norm": 0.01970149576663971,
"learning_rate": 2.9663167846209998e-05,
"loss": 0.0019,
"step": 570
},
{
"epoch": 0.6333333333333333,
"eval_loss": 0.0018198461038991809,
"eval_runtime": 396.0095,
"eval_samples_per_second": 5.05,
"eval_steps_per_second": 0.253,
"step": 570
},
{
"epoch": 0.6444444444444445,
"grad_norm": 0.02405315265059471,
"learning_rate": 2.8081442660546125e-05,
"loss": 0.0017,
"step": 580
},
{
"epoch": 0.6555555555555556,
"grad_norm": 0.03547287359833717,
"learning_rate": 2.6526421860705473e-05,
"loss": 0.0016,
"step": 590
},
{
"epoch": 0.6666666666666666,
"grad_norm": 0.02788299135863781,
"learning_rate": 2.500000000000001e-05,
"loss": 0.0015,
"step": 600
},
{
"epoch": 0.6666666666666666,
"eval_loss": 0.001557975192554295,
"eval_runtime": 396.8861,
"eval_samples_per_second": 5.039,
"eval_steps_per_second": 0.252,
"step": 600
},
{
"epoch": 0.6777777777777778,
"grad_norm": 0.0368446409702301,
"learning_rate": 2.350403678833976e-05,
"loss": 0.002,
"step": 610
},
{
"epoch": 0.6888888888888889,
"grad_norm": 0.023159069940447807,
"learning_rate": 2.2040354826462668e-05,
"loss": 0.0012,
"step": 620
},
{
"epoch": 0.7,
"grad_norm": 0.023195048794150352,
"learning_rate": 2.061073738537635e-05,
"loss": 0.0018,
"step": 630
},
{
"epoch": 0.7,
"eval_loss": 0.0015326091088354588,
"eval_runtime": 395.8645,
"eval_samples_per_second": 5.052,
"eval_steps_per_second": 0.253,
"step": 630
},
{
"epoch": 0.7111111111111111,
"grad_norm": 0.028873443603515625,
"learning_rate": 1.9216926233717085e-05,
"loss": 0.0013,
"step": 640
},
{
"epoch": 0.7222222222222222,
"grad_norm": 0.014128613285720348,
"learning_rate": 1.7860619515673033e-05,
"loss": 0.0021,
"step": 650
},
{
"epoch": 0.7333333333333333,
"grad_norm": 0.020189054310321808,
"learning_rate": 1.6543469682057106e-05,
"loss": 0.0014,
"step": 660
},
{
"epoch": 0.7333333333333333,
"eval_loss": 0.0015458085108548403,
"eval_runtime": 395.6978,
"eval_samples_per_second": 5.054,
"eval_steps_per_second": 0.253,
"step": 660
},
{
"epoch": 0.7444444444444445,
"grad_norm": 0.017600564286112785,
"learning_rate": 1.526708147705013e-05,
"loss": 0.0014,
"step": 670
},
{
"epoch": 0.7555555555555555,
"grad_norm": 0.021280810236930847,
"learning_rate": 1.4033009983067452e-05,
"loss": 0.0015,
"step": 680
},
{
"epoch": 0.7666666666666667,
"grad_norm": 0.04216349124908447,
"learning_rate": 1.2842758726130283e-05,
"loss": 0.0015,
"step": 690
},
{
"epoch": 0.7666666666666667,
"eval_loss": 0.0015286254929378629,
"eval_runtime": 396.6873,
"eval_samples_per_second": 5.042,
"eval_steps_per_second": 0.252,
"step": 690
},
{
"epoch": 0.7777777777777778,
"grad_norm": 0.02726539596915245,
"learning_rate": 1.1697777844051105e-05,
"loss": 0.0017,
"step": 700
},
{
"epoch": 0.7888888888888889,
"grad_norm": 0.03630968928337097,
"learning_rate": 1.0599462319663905e-05,
"loss": 0.0013,
"step": 710
},
{
"epoch": 0.8,
"grad_norm": 0.035689592361450195,
"learning_rate": 9.549150281252633e-06,
"loss": 0.0013,
"step": 720
},
{
"epoch": 0.8,
"eval_loss": 0.0014311277773231268,
"eval_runtime": 395.8604,
"eval_samples_per_second": 5.052,
"eval_steps_per_second": 0.253,
"step": 720
},
{
"epoch": 0.8111111111111111,
"grad_norm": 0.02306349016726017,
"learning_rate": 8.548121372247918e-06,
"loss": 0.0015,
"step": 730
},
{
"epoch": 0.8222222222222222,
"grad_norm": 0.047643404453992844,
"learning_rate": 7.597595192178702e-06,
"loss": 0.0013,
"step": 740
},
{
"epoch": 0.8333333333333334,
"grad_norm": 0.019335538148880005,
"learning_rate": 6.698729810778065e-06,
"loss": 0.0014,
"step": 750
},
{
"epoch": 0.8333333333333334,
"eval_loss": 0.0013907089596614242,
"eval_runtime": 392.5143,
"eval_samples_per_second": 5.095,
"eval_steps_per_second": 0.255,
"step": 750
},
{
"epoch": 0.8444444444444444,
"grad_norm": 0.03517748415470123,
"learning_rate": 5.852620357053651e-06,
"loss": 0.0013,
"step": 760
},
{
"epoch": 0.8555555555555555,
"grad_norm": 0.019817780703306198,
"learning_rate": 5.060297685041659e-06,
"loss": 0.0018,
"step": 770
},
{
"epoch": 0.8666666666666667,
"grad_norm": 0.035485655069351196,
"learning_rate": 4.322727117869951e-06,
"loss": 0.0017,
"step": 780
},
{
"epoch": 0.8666666666666667,
"eval_loss": 0.0013683486031368375,
"eval_runtime": 394.086,
"eval_samples_per_second": 5.075,
"eval_steps_per_second": 0.254,
"step": 780
},
{
"epoch": 0.8777777777777778,
"grad_norm": 0.025880370289087296,
"learning_rate": 3.6408072716606346e-06,
"loss": 0.0011,
"step": 790
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.019467687234282494,
"learning_rate": 3.0153689607045845e-06,
"loss": 0.0014,
"step": 800
},
{
"epoch": 0.9,
"grad_norm": 0.037629082798957825,
"learning_rate": 2.4471741852423237e-06,
"loss": 0.0016,
"step": 810
},
{
"epoch": 0.9,
"eval_loss": 0.0013409304665401578,
"eval_runtime": 394.4753,
"eval_samples_per_second": 5.07,
"eval_steps_per_second": 0.254,
"step": 810
},
{
"epoch": 0.9111111111111111,
"grad_norm": 0.017934681847691536,
"learning_rate": 1.9369152030840556e-06,
"loss": 0.0011,
"step": 820
},
{
"epoch": 0.9222222222222223,
"grad_norm": 0.023800263181328773,
"learning_rate": 1.4852136862001764e-06,
"loss": 0.0011,
"step": 830
},
{
"epoch": 0.9333333333333333,
"grad_norm": 0.03997348994016647,
"learning_rate": 1.0926199633097157e-06,
"loss": 0.0017,
"step": 840
},
{
"epoch": 0.9333333333333333,
"eval_loss": 0.001337591209448874,
"eval_runtime": 392.7054,
"eval_samples_per_second": 5.093,
"eval_steps_per_second": 0.255,
"step": 840
},
{
"epoch": 0.9444444444444444,
"grad_norm": 0.019138654693961143,
"learning_rate": 7.596123493895991e-07,
"loss": 0.0018,
"step": 850
},
{
"epoch": 0.9555555555555556,
"grad_norm": 0.03981081023812294,
"learning_rate": 4.865965629214819e-07,
"loss": 0.0013,
"step": 860
},
{
"epoch": 0.9666666666666667,
"grad_norm": 0.053155794739723206,
"learning_rate": 2.7390523158633554e-07,
"loss": 0.0011,
"step": 870
},
{
"epoch": 0.9666666666666667,
"eval_loss": 0.0013414380373433232,
"eval_runtime": 396.5291,
"eval_samples_per_second": 5.044,
"eval_steps_per_second": 0.252,
"step": 870
},
{
"epoch": 0.9777777777777777,
"grad_norm": 0.038830071687698364,
"learning_rate": 1.2179748700879012e-07,
"loss": 0.0014,
"step": 880
},
{
"epoch": 0.9888888888888889,
"grad_norm": 0.021814431995153427,
"learning_rate": 3.04586490452119e-08,
"loss": 0.0011,
"step": 890
},
{
"epoch": 1.0,
"grad_norm": 0.03686220571398735,
"learning_rate": 0.0,
"loss": 0.0015,
"step": 900
},
{
"epoch": 1.0,
"eval_loss": 0.001334603875875473,
"eval_runtime": 396.2672,
"eval_samples_per_second": 5.047,
"eval_steps_per_second": 0.252,
"step": 900
},
{
"epoch": 1.0,
"step": 900,
"total_flos": 1.6603645655625236e+18,
"train_loss": 0.007967362246579594,
"train_runtime": 23186.8308,
"train_samples_per_second": 0.776,
"train_steps_per_second": 0.039
}
],
"logging_steps": 10,
"max_steps": 900,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.6603645655625236e+18,
"train_batch_size": 10,
"trial_name": null,
"trial_params": null
}