theekshana's picture
End of training
8c47343 verified
raw
history blame
43.2 kB
{
"best_metric": 1.5205299854278564,
"best_model_checkpoint": "led-large-annual-report-QLoRA-fine-tuned-v0.9.5-openai\\checkpoint-975",
"epoch": 2.028,
"eval_steps": 25,
"global_step": 1014,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01,
"grad_norm": 0.5776566863059998,
"learning_rate": 0.00019959946595460615,
"loss": 2.9761,
"step": 5
},
{
"epoch": 0.02,
"grad_norm": 0.7108611464500427,
"learning_rate": 0.00019893190921228305,
"loss": 2.5028,
"step": 10
},
{
"epoch": 0.03,
"grad_norm": 0.6665928959846497,
"learning_rate": 0.0001983978638184246,
"loss": 2.7385,
"step": 15
},
{
"epoch": 0.04,
"grad_norm": 0.6933648586273193,
"learning_rate": 0.00019773030707610146,
"loss": 2.5519,
"step": 20
},
{
"epoch": 0.05,
"grad_norm": 0.6314701437950134,
"learning_rate": 0.0001970627503337784,
"loss": 2.2413,
"step": 25
},
{
"epoch": 0.05,
"eval_loss": 2.0085484981536865,
"eval_runtime": 157.7525,
"eval_samples_per_second": 0.33,
"eval_steps_per_second": 0.165,
"step": 25
},
{
"epoch": 0.06,
"grad_norm": 2.063861131668091,
"learning_rate": 0.0001963951935914553,
"loss": 2.6807,
"step": 30
},
{
"epoch": 0.07,
"grad_norm": 0.7584943175315857,
"learning_rate": 0.0001957276368491322,
"loss": 2.1435,
"step": 35
},
{
"epoch": 0.08,
"grad_norm": 0.650688886642456,
"learning_rate": 0.0001950600801068091,
"loss": 2.0311,
"step": 40
},
{
"epoch": 0.09,
"grad_norm": 0.6848713159561157,
"learning_rate": 0.000194392523364486,
"loss": 2.0928,
"step": 45
},
{
"epoch": 0.1,
"grad_norm": 0.915006697177887,
"learning_rate": 0.0001937249666221629,
"loss": 2.2454,
"step": 50
},
{
"epoch": 0.1,
"eval_loss": 1.8175970315933228,
"eval_runtime": 160.1948,
"eval_samples_per_second": 0.325,
"eval_steps_per_second": 0.162,
"step": 50
},
{
"epoch": 0.11,
"grad_norm": 0.63294917345047,
"learning_rate": 0.0001930574098798398,
"loss": 1.8694,
"step": 55
},
{
"epoch": 0.12,
"grad_norm": 0.6055558323860168,
"learning_rate": 0.0001923898531375167,
"loss": 2.0452,
"step": 60
},
{
"epoch": 0.13,
"grad_norm": 0.7194655537605286,
"learning_rate": 0.0001917222963951936,
"loss": 1.9801,
"step": 65
},
{
"epoch": 0.14,
"grad_norm": 0.7426390051841736,
"learning_rate": 0.00019105473965287052,
"loss": 1.9021,
"step": 70
},
{
"epoch": 0.15,
"grad_norm": 1.0124174356460571,
"learning_rate": 0.0001903871829105474,
"loss": 1.8887,
"step": 75
},
{
"epoch": 0.15,
"eval_loss": 1.7658432722091675,
"eval_runtime": 157.6865,
"eval_samples_per_second": 0.33,
"eval_steps_per_second": 0.165,
"step": 75
},
{
"epoch": 0.16,
"grad_norm": 0.6850053071975708,
"learning_rate": 0.00018971962616822432,
"loss": 1.9158,
"step": 80
},
{
"epoch": 0.17,
"grad_norm": 0.9443833827972412,
"learning_rate": 0.00018905206942590122,
"loss": 1.9566,
"step": 85
},
{
"epoch": 0.18,
"grad_norm": 0.9081319570541382,
"learning_rate": 0.00018838451268357812,
"loss": 1.6911,
"step": 90
},
{
"epoch": 0.19,
"grad_norm": 1.0233992338180542,
"learning_rate": 0.00018771695594125502,
"loss": 1.9792,
"step": 95
},
{
"epoch": 0.2,
"grad_norm": 0.8909695148468018,
"learning_rate": 0.00018704939919893192,
"loss": 2.096,
"step": 100
},
{
"epoch": 0.2,
"eval_loss": 1.723463773727417,
"eval_runtime": 155.3531,
"eval_samples_per_second": 0.335,
"eval_steps_per_second": 0.167,
"step": 100
},
{
"epoch": 0.21,
"grad_norm": 0.6819004416465759,
"learning_rate": 0.00018638184245660882,
"loss": 1.9699,
"step": 105
},
{
"epoch": 0.22,
"grad_norm": 1.0499932765960693,
"learning_rate": 0.00018571428571428572,
"loss": 1.4622,
"step": 110
},
{
"epoch": 0.23,
"grad_norm": 1.1806385517120361,
"learning_rate": 0.00018504672897196262,
"loss": 2.0254,
"step": 115
},
{
"epoch": 0.24,
"grad_norm": 0.8019367456436157,
"learning_rate": 0.00018437917222963952,
"loss": 1.7028,
"step": 120
},
{
"epoch": 0.25,
"grad_norm": 0.5862939953804016,
"learning_rate": 0.00018371161548731644,
"loss": 1.8961,
"step": 125
},
{
"epoch": 0.25,
"eval_loss": 1.6857578754425049,
"eval_runtime": 155.2512,
"eval_samples_per_second": 0.335,
"eval_steps_per_second": 0.167,
"step": 125
},
{
"epoch": 0.26,
"grad_norm": 0.8203465342521667,
"learning_rate": 0.00018304405874499332,
"loss": 1.695,
"step": 130
},
{
"epoch": 0.27,
"grad_norm": 0.9199301600456238,
"learning_rate": 0.00018237650200267024,
"loss": 1.6875,
"step": 135
},
{
"epoch": 0.28,
"grad_norm": 1.0262317657470703,
"learning_rate": 0.00018170894526034714,
"loss": 1.9771,
"step": 140
},
{
"epoch": 0.29,
"grad_norm": 0.6566195487976074,
"learning_rate": 0.00018104138851802404,
"loss": 1.794,
"step": 145
},
{
"epoch": 0.3,
"grad_norm": 0.6329429745674133,
"learning_rate": 0.00018037383177570094,
"loss": 1.4936,
"step": 150
},
{
"epoch": 0.3,
"eval_loss": 1.6933786869049072,
"eval_runtime": 154.938,
"eval_samples_per_second": 0.336,
"eval_steps_per_second": 0.168,
"step": 150
},
{
"epoch": 0.31,
"grad_norm": 1.1340160369873047,
"learning_rate": 0.00017970627503337784,
"loss": 1.9533,
"step": 155
},
{
"epoch": 0.32,
"grad_norm": 0.8708927631378174,
"learning_rate": 0.00017903871829105474,
"loss": 1.8068,
"step": 160
},
{
"epoch": 0.33,
"grad_norm": 0.7619530558586121,
"learning_rate": 0.00017837116154873167,
"loss": 1.6662,
"step": 165
},
{
"epoch": 0.34,
"grad_norm": 0.6751542687416077,
"learning_rate": 0.00017770360480640854,
"loss": 1.7322,
"step": 170
},
{
"epoch": 0.35,
"grad_norm": 1.106812596321106,
"learning_rate": 0.00017703604806408544,
"loss": 1.9186,
"step": 175
},
{
"epoch": 0.35,
"eval_loss": 1.7012258768081665,
"eval_runtime": 155.0194,
"eval_samples_per_second": 0.335,
"eval_steps_per_second": 0.168,
"step": 175
},
{
"epoch": 0.36,
"grad_norm": 0.7777801752090454,
"learning_rate": 0.00017636849132176237,
"loss": 1.7517,
"step": 180
},
{
"epoch": 0.37,
"grad_norm": 2.1003479957580566,
"learning_rate": 0.00017570093457943927,
"loss": 1.8307,
"step": 185
},
{
"epoch": 0.38,
"grad_norm": 0.6046275496482849,
"learning_rate": 0.00017503337783711614,
"loss": 1.5722,
"step": 190
},
{
"epoch": 0.39,
"grad_norm": 0.6874721050262451,
"learning_rate": 0.00017436582109479307,
"loss": 1.5634,
"step": 195
},
{
"epoch": 0.4,
"grad_norm": 0.6428207159042358,
"learning_rate": 0.00017369826435246997,
"loss": 1.8532,
"step": 200
},
{
"epoch": 0.4,
"eval_loss": 1.669812560081482,
"eval_runtime": 154.6164,
"eval_samples_per_second": 0.336,
"eval_steps_per_second": 0.168,
"step": 200
},
{
"epoch": 0.41,
"grad_norm": 0.7773897051811218,
"learning_rate": 0.00017303070761014687,
"loss": 1.591,
"step": 205
},
{
"epoch": 0.42,
"grad_norm": 0.6522992849349976,
"learning_rate": 0.00017236315086782377,
"loss": 1.6156,
"step": 210
},
{
"epoch": 0.43,
"grad_norm": 0.6386374831199646,
"learning_rate": 0.00017169559412550067,
"loss": 1.5089,
"step": 215
},
{
"epoch": 0.44,
"grad_norm": 0.5616068840026855,
"learning_rate": 0.0001710280373831776,
"loss": 1.3961,
"step": 220
},
{
"epoch": 0.45,
"grad_norm": 0.6595784425735474,
"learning_rate": 0.00017036048064085447,
"loss": 1.6172,
"step": 225
},
{
"epoch": 0.45,
"eval_loss": 1.6555695533752441,
"eval_runtime": 154.8952,
"eval_samples_per_second": 0.336,
"eval_steps_per_second": 0.168,
"step": 225
},
{
"epoch": 0.46,
"grad_norm": 0.5686245560646057,
"learning_rate": 0.00016969292389853137,
"loss": 1.7398,
"step": 230
},
{
"epoch": 0.47,
"grad_norm": 0.5429339408874512,
"learning_rate": 0.0001690253671562083,
"loss": 1.7083,
"step": 235
},
{
"epoch": 0.48,
"grad_norm": 0.5100717544555664,
"learning_rate": 0.0001683578104138852,
"loss": 1.803,
"step": 240
},
{
"epoch": 0.49,
"grad_norm": 0.8772192597389221,
"learning_rate": 0.00016769025367156207,
"loss": 1.7454,
"step": 245
},
{
"epoch": 0.5,
"grad_norm": 0.696287989616394,
"learning_rate": 0.000167022696929239,
"loss": 1.7838,
"step": 250
},
{
"epoch": 0.5,
"eval_loss": 1.6382060050964355,
"eval_runtime": 154.9708,
"eval_samples_per_second": 0.336,
"eval_steps_per_second": 0.168,
"step": 250
},
{
"epoch": 0.51,
"grad_norm": 0.7773283123970032,
"learning_rate": 0.0001663551401869159,
"loss": 1.7747,
"step": 255
},
{
"epoch": 0.52,
"grad_norm": 0.6426253318786621,
"learning_rate": 0.0001656875834445928,
"loss": 1.6671,
"step": 260
},
{
"epoch": 0.53,
"grad_norm": 0.5842812061309814,
"learning_rate": 0.0001650200267022697,
"loss": 1.8503,
"step": 265
},
{
"epoch": 0.54,
"grad_norm": 0.5060431361198425,
"learning_rate": 0.0001643524699599466,
"loss": 1.9078,
"step": 270
},
{
"epoch": 0.55,
"grad_norm": 0.7145740985870361,
"learning_rate": 0.00016368491321762352,
"loss": 2.0422,
"step": 275
},
{
"epoch": 0.55,
"eval_loss": 1.6412526369094849,
"eval_runtime": 154.8473,
"eval_samples_per_second": 0.336,
"eval_steps_per_second": 0.168,
"step": 275
},
{
"epoch": 0.56,
"grad_norm": 0.6752652525901794,
"learning_rate": 0.0001630173564753004,
"loss": 1.6336,
"step": 280
},
{
"epoch": 0.57,
"grad_norm": 0.7111806869506836,
"learning_rate": 0.0001623497997329773,
"loss": 1.6739,
"step": 285
},
{
"epoch": 0.58,
"grad_norm": 0.6902967691421509,
"learning_rate": 0.00016168224299065422,
"loss": 1.7994,
"step": 290
},
{
"epoch": 0.59,
"grad_norm": 0.5196375250816345,
"learning_rate": 0.00016101468624833112,
"loss": 1.5847,
"step": 295
},
{
"epoch": 0.6,
"grad_norm": 0.9794080853462219,
"learning_rate": 0.000160347129506008,
"loss": 1.8875,
"step": 300
},
{
"epoch": 0.6,
"eval_loss": 1.6254016160964966,
"eval_runtime": 154.7578,
"eval_samples_per_second": 0.336,
"eval_steps_per_second": 0.168,
"step": 300
},
{
"epoch": 0.61,
"grad_norm": 0.5826653242111206,
"learning_rate": 0.00015967957276368492,
"loss": 1.6773,
"step": 305
},
{
"epoch": 0.62,
"grad_norm": 0.5551023483276367,
"learning_rate": 0.00015901201602136182,
"loss": 1.685,
"step": 310
},
{
"epoch": 0.63,
"grad_norm": 0.6069286465644836,
"learning_rate": 0.00015834445927903872,
"loss": 1.7293,
"step": 315
},
{
"epoch": 0.64,
"grad_norm": 0.8002230525016785,
"learning_rate": 0.00015767690253671562,
"loss": 1.8484,
"step": 320
},
{
"epoch": 0.65,
"grad_norm": 0.7304033041000366,
"learning_rate": 0.00015700934579439252,
"loss": 1.687,
"step": 325
},
{
"epoch": 0.65,
"eval_loss": 1.605177402496338,
"eval_runtime": 154.868,
"eval_samples_per_second": 0.336,
"eval_steps_per_second": 0.168,
"step": 325
},
{
"epoch": 0.66,
"grad_norm": 0.46438586711883545,
"learning_rate": 0.00015634178905206945,
"loss": 1.5457,
"step": 330
},
{
"epoch": 0.67,
"grad_norm": 1.024244785308838,
"learning_rate": 0.00015567423230974635,
"loss": 1.6972,
"step": 335
},
{
"epoch": 0.68,
"grad_norm": 0.506373941898346,
"learning_rate": 0.00015500667556742322,
"loss": 1.4549,
"step": 340
},
{
"epoch": 0.69,
"grad_norm": 0.7029207944869995,
"learning_rate": 0.00015433911882510015,
"loss": 1.6963,
"step": 345
},
{
"epoch": 0.7,
"grad_norm": 0.5588846802711487,
"learning_rate": 0.00015367156208277705,
"loss": 1.6778,
"step": 350
},
{
"epoch": 0.7,
"eval_loss": 1.6053338050842285,
"eval_runtime": 154.828,
"eval_samples_per_second": 0.336,
"eval_steps_per_second": 0.168,
"step": 350
},
{
"epoch": 0.71,
"grad_norm": 0.5997027158737183,
"learning_rate": 0.00015300400534045395,
"loss": 1.5144,
"step": 355
},
{
"epoch": 0.72,
"grad_norm": 0.5491402745246887,
"learning_rate": 0.00015233644859813085,
"loss": 1.6086,
"step": 360
},
{
"epoch": 0.73,
"grad_norm": 0.6617192625999451,
"learning_rate": 0.00015166889185580775,
"loss": 1.5582,
"step": 365
},
{
"epoch": 0.74,
"grad_norm": 0.6810077428817749,
"learning_rate": 0.00015100133511348465,
"loss": 1.7498,
"step": 370
},
{
"epoch": 0.75,
"grad_norm": 0.6304115653038025,
"learning_rate": 0.00015033377837116155,
"loss": 1.5276,
"step": 375
},
{
"epoch": 0.75,
"eval_loss": 1.5960314273834229,
"eval_runtime": 154.8615,
"eval_samples_per_second": 0.336,
"eval_steps_per_second": 0.168,
"step": 375
},
{
"epoch": 0.76,
"grad_norm": 0.6610834002494812,
"learning_rate": 0.00014966622162883845,
"loss": 1.5969,
"step": 380
},
{
"epoch": 0.77,
"grad_norm": 0.8158746957778931,
"learning_rate": 0.00014899866488651538,
"loss": 1.5529,
"step": 385
},
{
"epoch": 0.78,
"grad_norm": 0.519496500492096,
"learning_rate": 0.00014833110814419228,
"loss": 1.6754,
"step": 390
},
{
"epoch": 0.79,
"grad_norm": 0.6644089221954346,
"learning_rate": 0.00014766355140186915,
"loss": 1.7254,
"step": 395
},
{
"epoch": 0.8,
"grad_norm": 0.7224368453025818,
"learning_rate": 0.00014699599465954608,
"loss": 1.4391,
"step": 400
},
{
"epoch": 0.8,
"eval_loss": 1.5978156328201294,
"eval_runtime": 153.9476,
"eval_samples_per_second": 0.338,
"eval_steps_per_second": 0.169,
"step": 400
},
{
"epoch": 0.81,
"grad_norm": 0.6566179394721985,
"learning_rate": 0.00014632843791722298,
"loss": 1.5994,
"step": 405
},
{
"epoch": 0.82,
"grad_norm": 1.0222816467285156,
"learning_rate": 0.00014566088117489988,
"loss": 1.4374,
"step": 410
},
{
"epoch": 0.83,
"grad_norm": 0.5965130925178528,
"learning_rate": 0.00014499332443257678,
"loss": 1.5863,
"step": 415
},
{
"epoch": 0.84,
"grad_norm": 0.5560926795005798,
"learning_rate": 0.00014432576769025368,
"loss": 1.5581,
"step": 420
},
{
"epoch": 0.85,
"grad_norm": 0.7057550549507141,
"learning_rate": 0.00014365821094793058,
"loss": 1.7263,
"step": 425
},
{
"epoch": 0.85,
"eval_loss": 1.6051459312438965,
"eval_runtime": 153.7143,
"eval_samples_per_second": 0.338,
"eval_steps_per_second": 0.169,
"step": 425
},
{
"epoch": 0.86,
"grad_norm": 1.132186770439148,
"learning_rate": 0.00014299065420560748,
"loss": 1.6684,
"step": 430
},
{
"epoch": 0.87,
"grad_norm": 0.5418440103530884,
"learning_rate": 0.00014232309746328438,
"loss": 1.6537,
"step": 435
},
{
"epoch": 0.88,
"grad_norm": 0.7945486903190613,
"learning_rate": 0.0001416555407209613,
"loss": 1.5685,
"step": 440
},
{
"epoch": 0.89,
"grad_norm": 0.6951822638511658,
"learning_rate": 0.0001409879839786382,
"loss": 1.2469,
"step": 445
},
{
"epoch": 0.9,
"grad_norm": 0.6232516765594482,
"learning_rate": 0.00014032042723631508,
"loss": 1.6128,
"step": 450
},
{
"epoch": 0.9,
"eval_loss": 1.5824800729751587,
"eval_runtime": 153.4919,
"eval_samples_per_second": 0.339,
"eval_steps_per_second": 0.169,
"step": 450
},
{
"epoch": 0.91,
"grad_norm": 0.5635793209075928,
"learning_rate": 0.000139652870493992,
"loss": 1.4892,
"step": 455
},
{
"epoch": 0.92,
"grad_norm": 0.7134138941764832,
"learning_rate": 0.0001389853137516689,
"loss": 1.6564,
"step": 460
},
{
"epoch": 0.93,
"grad_norm": 1.0067154169082642,
"learning_rate": 0.0001383177570093458,
"loss": 1.8738,
"step": 465
},
{
"epoch": 0.94,
"grad_norm": 0.7665852904319763,
"learning_rate": 0.0001376502002670227,
"loss": 1.7151,
"step": 470
},
{
"epoch": 0.95,
"grad_norm": 0.5606282949447632,
"learning_rate": 0.0001369826435246996,
"loss": 1.7477,
"step": 475
},
{
"epoch": 0.95,
"eval_loss": 1.5813050270080566,
"eval_runtime": 153.7414,
"eval_samples_per_second": 0.338,
"eval_steps_per_second": 0.169,
"step": 475
},
{
"epoch": 0.96,
"grad_norm": 0.48252391815185547,
"learning_rate": 0.0001363150867823765,
"loss": 1.6785,
"step": 480
},
{
"epoch": 0.97,
"grad_norm": 0.7147005200386047,
"learning_rate": 0.0001356475300400534,
"loss": 1.5719,
"step": 485
},
{
"epoch": 0.98,
"grad_norm": 0.5606821775436401,
"learning_rate": 0.0001349799732977303,
"loss": 1.6884,
"step": 490
},
{
"epoch": 0.99,
"grad_norm": 0.6102766394615173,
"learning_rate": 0.0001343124165554072,
"loss": 1.5512,
"step": 495
},
{
"epoch": 1.0,
"grad_norm": 0.7451562285423279,
"learning_rate": 0.00013364485981308413,
"loss": 1.5853,
"step": 500
},
{
"epoch": 1.0,
"eval_loss": 1.5768074989318848,
"eval_runtime": 153.3623,
"eval_samples_per_second": 0.339,
"eval_steps_per_second": 0.17,
"step": 500
},
{
"epoch": 1.01,
"grad_norm": 0.6127219200134277,
"learning_rate": 0.000132977303070761,
"loss": 1.5375,
"step": 505
},
{
"epoch": 1.02,
"grad_norm": 0.6121686100959778,
"learning_rate": 0.00013230974632843793,
"loss": 1.4151,
"step": 510
},
{
"epoch": 1.03,
"grad_norm": 0.8484262824058533,
"learning_rate": 0.00013164218958611483,
"loss": 1.6334,
"step": 515
},
{
"epoch": 1.04,
"grad_norm": 1.115262508392334,
"learning_rate": 0.00013097463284379173,
"loss": 1.5492,
"step": 520
},
{
"epoch": 1.05,
"grad_norm": 0.6433502435684204,
"learning_rate": 0.00013030707610146863,
"loss": 1.54,
"step": 525
},
{
"epoch": 1.05,
"eval_loss": 1.5698615312576294,
"eval_runtime": 154.4611,
"eval_samples_per_second": 0.337,
"eval_steps_per_second": 0.168,
"step": 525
},
{
"epoch": 1.06,
"grad_norm": 0.6583875417709351,
"learning_rate": 0.00012963951935914553,
"loss": 1.5267,
"step": 530
},
{
"epoch": 1.07,
"grad_norm": 0.7508483529090881,
"learning_rate": 0.00012897196261682243,
"loss": 1.7802,
"step": 535
},
{
"epoch": 1.08,
"grad_norm": 0.9875255227088928,
"learning_rate": 0.00012830440587449936,
"loss": 1.4288,
"step": 540
},
{
"epoch": 1.09,
"grad_norm": 0.5857439637184143,
"learning_rate": 0.00012763684913217623,
"loss": 1.4952,
"step": 545
},
{
"epoch": 1.1,
"grad_norm": 0.4867228865623474,
"learning_rate": 0.00012696929238985313,
"loss": 1.3511,
"step": 550
},
{
"epoch": 1.1,
"eval_loss": 1.5703368186950684,
"eval_runtime": 155.0663,
"eval_samples_per_second": 0.335,
"eval_steps_per_second": 0.168,
"step": 550
},
{
"epoch": 1.11,
"grad_norm": 1.0664533376693726,
"learning_rate": 0.00012630173564753006,
"loss": 1.5299,
"step": 555
},
{
"epoch": 1.12,
"grad_norm": 0.5459120869636536,
"learning_rate": 0.00012563417890520696,
"loss": 1.6358,
"step": 560
},
{
"epoch": 1.13,
"grad_norm": 0.7361356616020203,
"learning_rate": 0.00012496662216288386,
"loss": 1.1664,
"step": 565
},
{
"epoch": 1.1400000000000001,
"grad_norm": 0.6312634348869324,
"learning_rate": 0.00012429906542056076,
"loss": 1.4422,
"step": 570
},
{
"epoch": 1.15,
"grad_norm": 0.6251769065856934,
"learning_rate": 0.00012363150867823766,
"loss": 1.4539,
"step": 575
},
{
"epoch": 1.15,
"eval_loss": 1.582599401473999,
"eval_runtime": 161.8119,
"eval_samples_per_second": 0.321,
"eval_steps_per_second": 0.161,
"step": 575
},
{
"epoch": 1.16,
"grad_norm": 0.7663933634757996,
"learning_rate": 0.00012296395193591456,
"loss": 1.4286,
"step": 580
},
{
"epoch": 1.17,
"grad_norm": 0.5115808844566345,
"learning_rate": 0.00012229639519359146,
"loss": 1.5661,
"step": 585
},
{
"epoch": 1.18,
"grad_norm": 0.5785268545150757,
"learning_rate": 0.00012162883845126836,
"loss": 1.6004,
"step": 590
},
{
"epoch": 1.19,
"grad_norm": 1.1874048709869385,
"learning_rate": 0.00012096128170894527,
"loss": 1.4923,
"step": 595
},
{
"epoch": 1.2,
"grad_norm": 0.708734393119812,
"learning_rate": 0.00012029372496662217,
"loss": 1.6751,
"step": 600
},
{
"epoch": 1.2,
"eval_loss": 1.568034291267395,
"eval_runtime": 157.401,
"eval_samples_per_second": 0.33,
"eval_steps_per_second": 0.165,
"step": 600
},
{
"epoch": 1.21,
"grad_norm": 0.9826223850250244,
"learning_rate": 0.00011962616822429906,
"loss": 1.5187,
"step": 605
},
{
"epoch": 1.22,
"grad_norm": 0.5815021991729736,
"learning_rate": 0.00011895861148197598,
"loss": 1.3991,
"step": 610
},
{
"epoch": 1.23,
"grad_norm": 0.7048662304878235,
"learning_rate": 0.00011829105473965287,
"loss": 1.316,
"step": 615
},
{
"epoch": 1.24,
"grad_norm": 0.667458176612854,
"learning_rate": 0.00011762349799732977,
"loss": 1.6166,
"step": 620
},
{
"epoch": 1.25,
"grad_norm": 0.7350333333015442,
"learning_rate": 0.00011695594125500668,
"loss": 1.5397,
"step": 625
},
{
"epoch": 1.25,
"eval_loss": 1.5718026161193848,
"eval_runtime": 156.5658,
"eval_samples_per_second": 0.332,
"eval_steps_per_second": 0.166,
"step": 625
},
{
"epoch": 1.26,
"grad_norm": 0.6255879402160645,
"learning_rate": 0.00011628838451268358,
"loss": 1.3613,
"step": 630
},
{
"epoch": 1.27,
"grad_norm": 0.6177966594696045,
"learning_rate": 0.0001156208277703605,
"loss": 1.2452,
"step": 635
},
{
"epoch": 1.28,
"grad_norm": 0.5699389576911926,
"learning_rate": 0.0001149532710280374,
"loss": 1.2252,
"step": 640
},
{
"epoch": 1.29,
"grad_norm": 0.8304943442344666,
"learning_rate": 0.00011428571428571428,
"loss": 1.371,
"step": 645
},
{
"epoch": 1.3,
"grad_norm": 0.8348840475082397,
"learning_rate": 0.0001136181575433912,
"loss": 1.5561,
"step": 650
},
{
"epoch": 1.3,
"eval_loss": 1.5528948307037354,
"eval_runtime": 155.2207,
"eval_samples_per_second": 0.335,
"eval_steps_per_second": 0.168,
"step": 650
},
{
"epoch": 1.31,
"grad_norm": 0.6180741786956787,
"learning_rate": 0.0001129506008010681,
"loss": 1.3885,
"step": 655
},
{
"epoch": 1.32,
"grad_norm": 0.9591236114501953,
"learning_rate": 0.000112283044058745,
"loss": 1.8704,
"step": 660
},
{
"epoch": 1.33,
"grad_norm": 0.5324267148971558,
"learning_rate": 0.00011161548731642191,
"loss": 1.3346,
"step": 665
},
{
"epoch": 1.34,
"grad_norm": 0.519713819026947,
"learning_rate": 0.0001109479305740988,
"loss": 1.553,
"step": 670
},
{
"epoch": 1.35,
"grad_norm": 0.7792118191719055,
"learning_rate": 0.0001102803738317757,
"loss": 1.4646,
"step": 675
},
{
"epoch": 1.35,
"eval_loss": 1.5499815940856934,
"eval_runtime": 157.3901,
"eval_samples_per_second": 0.33,
"eval_steps_per_second": 0.165,
"step": 675
},
{
"epoch": 1.3599999999999999,
"grad_norm": 0.7332251667976379,
"learning_rate": 0.00010961281708945261,
"loss": 1.5366,
"step": 680
},
{
"epoch": 1.37,
"grad_norm": 1.0482912063598633,
"learning_rate": 0.00010894526034712951,
"loss": 1.7866,
"step": 685
},
{
"epoch": 1.38,
"grad_norm": 0.6060436964035034,
"learning_rate": 0.00010827770360480642,
"loss": 1.3853,
"step": 690
},
{
"epoch": 1.3900000000000001,
"grad_norm": 0.6550512313842773,
"learning_rate": 0.00010761014686248332,
"loss": 1.765,
"step": 695
},
{
"epoch": 1.4,
"grad_norm": 0.8347830176353455,
"learning_rate": 0.00010694259012016021,
"loss": 1.4687,
"step": 700
},
{
"epoch": 1.4,
"eval_loss": 1.5612692832946777,
"eval_runtime": 166.6597,
"eval_samples_per_second": 0.312,
"eval_steps_per_second": 0.156,
"step": 700
},
{
"epoch": 1.41,
"grad_norm": 0.6563287973403931,
"learning_rate": 0.00010627503337783712,
"loss": 1.3366,
"step": 705
},
{
"epoch": 1.42,
"grad_norm": 0.6529929637908936,
"learning_rate": 0.00010560747663551402,
"loss": 1.6132,
"step": 710
},
{
"epoch": 1.43,
"grad_norm": 0.504224956035614,
"learning_rate": 0.00010493991989319092,
"loss": 1.4062,
"step": 715
},
{
"epoch": 1.44,
"grad_norm": 0.5341633558273315,
"learning_rate": 0.00010427236315086784,
"loss": 1.3092,
"step": 720
},
{
"epoch": 1.45,
"grad_norm": 0.8322250843048096,
"learning_rate": 0.00010360480640854472,
"loss": 1.4273,
"step": 725
},
{
"epoch": 1.45,
"eval_loss": 1.5545457601547241,
"eval_runtime": 161.785,
"eval_samples_per_second": 0.321,
"eval_steps_per_second": 0.161,
"step": 725
},
{
"epoch": 1.46,
"grad_norm": 0.694214940071106,
"learning_rate": 0.00010293724966622162,
"loss": 1.3338,
"step": 730
},
{
"epoch": 1.47,
"grad_norm": 0.83758944272995,
"learning_rate": 0.00010226969292389854,
"loss": 1.4813,
"step": 735
},
{
"epoch": 1.48,
"grad_norm": 0.7225409150123596,
"learning_rate": 0.00010160213618157544,
"loss": 1.4857,
"step": 740
},
{
"epoch": 1.49,
"grad_norm": 0.6755008697509766,
"learning_rate": 0.00010093457943925234,
"loss": 1.452,
"step": 745
},
{
"epoch": 1.5,
"grad_norm": 0.8779104351997375,
"learning_rate": 0.00010026702269692925,
"loss": 1.5225,
"step": 750
},
{
"epoch": 1.5,
"eval_loss": 1.5489907264709473,
"eval_runtime": 157.1309,
"eval_samples_per_second": 0.331,
"eval_steps_per_second": 0.165,
"step": 750
},
{
"epoch": 1.51,
"grad_norm": 0.7593478560447693,
"learning_rate": 9.959946595460614e-05,
"loss": 1.7504,
"step": 755
},
{
"epoch": 1.52,
"grad_norm": 0.7336363792419434,
"learning_rate": 9.893190921228305e-05,
"loss": 1.7467,
"step": 760
},
{
"epoch": 1.53,
"grad_norm": 0.6226593852043152,
"learning_rate": 9.826435246995995e-05,
"loss": 1.2953,
"step": 765
},
{
"epoch": 1.54,
"grad_norm": 0.652702808380127,
"learning_rate": 9.759679572763686e-05,
"loss": 1.6913,
"step": 770
},
{
"epoch": 1.55,
"grad_norm": 0.6243285536766052,
"learning_rate": 9.692923898531375e-05,
"loss": 1.5129,
"step": 775
},
{
"epoch": 1.55,
"eval_loss": 1.5401288270950317,
"eval_runtime": 159.6754,
"eval_samples_per_second": 0.326,
"eval_steps_per_second": 0.163,
"step": 775
},
{
"epoch": 1.56,
"grad_norm": 0.5181707739830017,
"learning_rate": 9.626168224299066e-05,
"loss": 1.4,
"step": 780
},
{
"epoch": 1.5699999999999998,
"grad_norm": 0.4900369346141815,
"learning_rate": 9.559412550066756e-05,
"loss": 1.4891,
"step": 785
},
{
"epoch": 1.58,
"grad_norm": 0.7415319085121155,
"learning_rate": 9.492656875834446e-05,
"loss": 1.2679,
"step": 790
},
{
"epoch": 1.5899999999999999,
"grad_norm": 0.6447709798812866,
"learning_rate": 9.425901201602136e-05,
"loss": 1.4957,
"step": 795
},
{
"epoch": 1.6,
"grad_norm": 0.6303768754005432,
"learning_rate": 9.359145527369826e-05,
"loss": 1.4617,
"step": 800
},
{
"epoch": 1.6,
"eval_loss": 1.5425684452056885,
"eval_runtime": 157.5599,
"eval_samples_per_second": 0.33,
"eval_steps_per_second": 0.165,
"step": 800
},
{
"epoch": 1.6099999999999999,
"grad_norm": 0.6443194150924683,
"learning_rate": 9.292389853137518e-05,
"loss": 1.4427,
"step": 805
},
{
"epoch": 1.62,
"grad_norm": 0.6477059125900269,
"learning_rate": 9.225634178905206e-05,
"loss": 1.3323,
"step": 810
},
{
"epoch": 1.63,
"grad_norm": 1.2460426092147827,
"learning_rate": 9.158878504672898e-05,
"loss": 1.6938,
"step": 815
},
{
"epoch": 1.6400000000000001,
"grad_norm": 0.5094366073608398,
"learning_rate": 9.092122830440588e-05,
"loss": 1.3493,
"step": 820
},
{
"epoch": 1.65,
"grad_norm": 0.6189996004104614,
"learning_rate": 9.025367156208279e-05,
"loss": 1.5123,
"step": 825
},
{
"epoch": 1.65,
"eval_loss": 1.5375796556472778,
"eval_runtime": 156.9638,
"eval_samples_per_second": 0.331,
"eval_steps_per_second": 0.166,
"step": 825
},
{
"epoch": 1.6600000000000001,
"grad_norm": 0.8235365748405457,
"learning_rate": 8.958611481975968e-05,
"loss": 1.3337,
"step": 830
},
{
"epoch": 1.67,
"grad_norm": 0.6116329431533813,
"learning_rate": 8.891855807743659e-05,
"loss": 1.2533,
"step": 835
},
{
"epoch": 1.6800000000000002,
"grad_norm": 0.597701370716095,
"learning_rate": 8.825100133511349e-05,
"loss": 1.4036,
"step": 840
},
{
"epoch": 1.69,
"grad_norm": 0.5337836742401123,
"learning_rate": 8.758344459279039e-05,
"loss": 1.2977,
"step": 845
},
{
"epoch": 1.7,
"grad_norm": 0.5650852918624878,
"learning_rate": 8.691588785046729e-05,
"loss": 1.4909,
"step": 850
},
{
"epoch": 1.7,
"eval_loss": 1.5355829000473022,
"eval_runtime": 156.0274,
"eval_samples_per_second": 0.333,
"eval_steps_per_second": 0.167,
"step": 850
},
{
"epoch": 1.71,
"grad_norm": 0.7378519773483276,
"learning_rate": 8.62483311081442e-05,
"loss": 1.6229,
"step": 855
},
{
"epoch": 1.72,
"grad_norm": 0.6510607600212097,
"learning_rate": 8.55807743658211e-05,
"loss": 1.3392,
"step": 860
},
{
"epoch": 1.73,
"grad_norm": 0.6023868918418884,
"learning_rate": 8.4913217623498e-05,
"loss": 1.5289,
"step": 865
},
{
"epoch": 1.74,
"grad_norm": 0.7228203415870667,
"learning_rate": 8.42456608811749e-05,
"loss": 1.5144,
"step": 870
},
{
"epoch": 1.75,
"grad_norm": 0.7221407294273376,
"learning_rate": 8.35781041388518e-05,
"loss": 1.2201,
"step": 875
},
{
"epoch": 1.75,
"eval_loss": 1.5325042009353638,
"eval_runtime": 157.2229,
"eval_samples_per_second": 0.331,
"eval_steps_per_second": 0.165,
"step": 875
},
{
"epoch": 1.76,
"grad_norm": 0.48412030935287476,
"learning_rate": 8.29105473965287e-05,
"loss": 1.5154,
"step": 880
},
{
"epoch": 1.77,
"grad_norm": 0.7840531468391418,
"learning_rate": 8.22429906542056e-05,
"loss": 1.7819,
"step": 885
},
{
"epoch": 1.78,
"grad_norm": 0.6077267527580261,
"learning_rate": 8.157543391188252e-05,
"loss": 1.4317,
"step": 890
},
{
"epoch": 1.79,
"grad_norm": 0.6933810114860535,
"learning_rate": 8.090787716955942e-05,
"loss": 1.7069,
"step": 895
},
{
"epoch": 1.8,
"grad_norm": 0.7501831650733948,
"learning_rate": 8.024032042723632e-05,
"loss": 1.2877,
"step": 900
},
{
"epoch": 1.8,
"eval_loss": 1.5253818035125732,
"eval_runtime": 156.1433,
"eval_samples_per_second": 0.333,
"eval_steps_per_second": 0.167,
"step": 900
},
{
"epoch": 1.81,
"grad_norm": 0.6081863045692444,
"learning_rate": 7.957276368491322e-05,
"loss": 1.2527,
"step": 905
},
{
"epoch": 1.8199999999999998,
"grad_norm": 0.6956006288528442,
"learning_rate": 7.890520694259013e-05,
"loss": 1.4184,
"step": 910
},
{
"epoch": 1.83,
"grad_norm": 0.5345895886421204,
"learning_rate": 7.823765020026703e-05,
"loss": 1.6186,
"step": 915
},
{
"epoch": 1.8399999999999999,
"grad_norm": 0.682368814945221,
"learning_rate": 7.757009345794393e-05,
"loss": 1.3062,
"step": 920
},
{
"epoch": 1.85,
"grad_norm": 0.7534874677658081,
"learning_rate": 7.690253671562083e-05,
"loss": 1.3955,
"step": 925
},
{
"epoch": 1.85,
"eval_loss": 1.5264804363250732,
"eval_runtime": 157.3189,
"eval_samples_per_second": 0.331,
"eval_steps_per_second": 0.165,
"step": 925
},
{
"epoch": 1.8599999999999999,
"grad_norm": 0.8998626470565796,
"learning_rate": 7.623497997329774e-05,
"loss": 1.6625,
"step": 930
},
{
"epoch": 1.87,
"grad_norm": 0.5620250105857849,
"learning_rate": 7.556742323097463e-05,
"loss": 1.6022,
"step": 935
},
{
"epoch": 1.88,
"grad_norm": 0.656494677066803,
"learning_rate": 7.489986648865154e-05,
"loss": 1.3404,
"step": 940
},
{
"epoch": 1.8900000000000001,
"grad_norm": 1.0331978797912598,
"learning_rate": 7.423230974632844e-05,
"loss": 1.6219,
"step": 945
},
{
"epoch": 1.9,
"grad_norm": 0.7671311497688293,
"learning_rate": 7.356475300400534e-05,
"loss": 1.5102,
"step": 950
},
{
"epoch": 1.9,
"eval_loss": 1.5233145952224731,
"eval_runtime": 156.2243,
"eval_samples_per_second": 0.333,
"eval_steps_per_second": 0.166,
"step": 950
},
{
"epoch": 1.9100000000000001,
"grad_norm": 0.514800488948822,
"learning_rate": 7.289719626168224e-05,
"loss": 1.4231,
"step": 955
},
{
"epoch": 1.92,
"grad_norm": 1.1229217052459717,
"learning_rate": 7.222963951935914e-05,
"loss": 1.3732,
"step": 960
},
{
"epoch": 1.9300000000000002,
"grad_norm": 0.7795677185058594,
"learning_rate": 7.156208277703606e-05,
"loss": 1.242,
"step": 965
},
{
"epoch": 1.94,
"grad_norm": 0.6444641351699829,
"learning_rate": 7.089452603471294e-05,
"loss": 1.437,
"step": 970
},
{
"epoch": 1.95,
"grad_norm": 0.698694109916687,
"learning_rate": 7.022696929238986e-05,
"loss": 1.4972,
"step": 975
},
{
"epoch": 1.95,
"eval_loss": 1.5205299854278564,
"eval_runtime": 157.1769,
"eval_samples_per_second": 0.331,
"eval_steps_per_second": 0.165,
"step": 975
},
{
"epoch": 1.96,
"grad_norm": 0.7859231233596802,
"learning_rate": 6.955941255006676e-05,
"loss": 1.4448,
"step": 980
},
{
"epoch": 1.97,
"grad_norm": 0.6304051876068115,
"learning_rate": 6.889185580774367e-05,
"loss": 1.5132,
"step": 985
},
{
"epoch": 1.98,
"grad_norm": 0.6905663013458252,
"learning_rate": 6.822429906542056e-05,
"loss": 1.5612,
"step": 990
},
{
"epoch": 1.99,
"grad_norm": 0.6258041262626648,
"learning_rate": 6.755674232309747e-05,
"loss": 1.4587,
"step": 995
},
{
"epoch": 2.0,
"grad_norm": 0.7253485918045044,
"learning_rate": 6.688918558077437e-05,
"loss": 1.4498,
"step": 1000
},
{
"epoch": 2.0,
"eval_loss": 1.5251318216323853,
"eval_runtime": 156.2874,
"eval_samples_per_second": 0.333,
"eval_steps_per_second": 0.166,
"step": 1000
},
{
"epoch": 2.01,
"grad_norm": 0.5841794013977051,
"learning_rate": 6.622162883845127e-05,
"loss": 1.5441,
"step": 1005
},
{
"epoch": 2.02,
"grad_norm": 0.7779578566551208,
"learning_rate": 6.555407209612817e-05,
"loss": 1.4619,
"step": 1010
},
{
"epoch": 2.028,
"eval_loss": 1.5253530740737915,
"eval_runtime": 168.5711,
"eval_samples_per_second": 0.308,
"eval_steps_per_second": 0.154,
"step": 1014
}
],
"logging_steps": 5,
"max_steps": 1500,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 25,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 8.503998201987072e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}