whisper-small-id-cv17 / trainer_state.json
Bagus's picture
End of training
ed7cc5c verified
raw
history blame
36.7 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 19.23076923076923,
"eval_steps": 1000,
"global_step": 5000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.09615384615384616,
"grad_norm": 14.387816429138184,
"learning_rate": 4.6000000000000004e-07,
"loss": 1.0674,
"step": 25
},
{
"epoch": 0.19230769230769232,
"grad_norm": 11.297602653503418,
"learning_rate": 9.600000000000001e-07,
"loss": 0.9654,
"step": 50
},
{
"epoch": 0.28846153846153844,
"grad_norm": 8.031381607055664,
"learning_rate": 1.46e-06,
"loss": 0.7994,
"step": 75
},
{
"epoch": 0.38461538461538464,
"grad_norm": 6.947944641113281,
"learning_rate": 1.9600000000000003e-06,
"loss": 0.5243,
"step": 100
},
{
"epoch": 0.4807692307692308,
"grad_norm": 5.577055931091309,
"learning_rate": 2.46e-06,
"loss": 0.4378,
"step": 125
},
{
"epoch": 0.5769230769230769,
"grad_norm": 5.237943649291992,
"learning_rate": 2.96e-06,
"loss": 0.39,
"step": 150
},
{
"epoch": 0.6730769230769231,
"grad_norm": 5.476370811462402,
"learning_rate": 3.46e-06,
"loss": 0.3755,
"step": 175
},
{
"epoch": 0.7692307692307693,
"grad_norm": 6.1683244705200195,
"learning_rate": 3.96e-06,
"loss": 0.3851,
"step": 200
},
{
"epoch": 0.8653846153846154,
"grad_norm": 5.542613506317139,
"learning_rate": 4.4600000000000005e-06,
"loss": 0.3461,
"step": 225
},
{
"epoch": 0.9615384615384616,
"grad_norm": 7.256478786468506,
"learning_rate": 4.960000000000001e-06,
"loss": 0.3369,
"step": 250
},
{
"epoch": 1.0576923076923077,
"grad_norm": 5.407237529754639,
"learning_rate": 5.460000000000001e-06,
"loss": 0.2975,
"step": 275
},
{
"epoch": 1.1538461538461537,
"grad_norm": 5.3987579345703125,
"learning_rate": 5.9600000000000005e-06,
"loss": 0.2739,
"step": 300
},
{
"epoch": 1.25,
"grad_norm": 4.13232946395874,
"learning_rate": 6.460000000000001e-06,
"loss": 0.2519,
"step": 325
},
{
"epoch": 1.3461538461538463,
"grad_norm": 4.822619438171387,
"learning_rate": 6.96e-06,
"loss": 0.2663,
"step": 350
},
{
"epoch": 1.4423076923076923,
"grad_norm": 4.323586463928223,
"learning_rate": 7.4600000000000006e-06,
"loss": 0.2482,
"step": 375
},
{
"epoch": 1.5384615384615383,
"grad_norm": 4.278296947479248,
"learning_rate": 7.960000000000002e-06,
"loss": 0.2525,
"step": 400
},
{
"epoch": 1.6346153846153846,
"grad_norm": 5.396093368530273,
"learning_rate": 8.46e-06,
"loss": 0.2575,
"step": 425
},
{
"epoch": 1.7307692307692308,
"grad_norm": 4.330433368682861,
"learning_rate": 8.96e-06,
"loss": 0.2502,
"step": 450
},
{
"epoch": 1.8269230769230769,
"grad_norm": 4.134779930114746,
"learning_rate": 9.460000000000001e-06,
"loss": 0.2615,
"step": 475
},
{
"epoch": 1.9230769230769231,
"grad_norm": 4.702469348907471,
"learning_rate": 9.960000000000001e-06,
"loss": 0.2542,
"step": 500
},
{
"epoch": 2.019230769230769,
"grad_norm": 2.5487704277038574,
"learning_rate": 9.94888888888889e-06,
"loss": 0.2502,
"step": 525
},
{
"epoch": 2.1153846153846154,
"grad_norm": 3.5824356079101562,
"learning_rate": 9.893333333333334e-06,
"loss": 0.1373,
"step": 550
},
{
"epoch": 2.2115384615384617,
"grad_norm": 3.7268402576446533,
"learning_rate": 9.837777777777778e-06,
"loss": 0.1367,
"step": 575
},
{
"epoch": 2.3076923076923075,
"grad_norm": 3.243474245071411,
"learning_rate": 9.782222222222222e-06,
"loss": 0.1298,
"step": 600
},
{
"epoch": 2.4038461538461537,
"grad_norm": 3.1370737552642822,
"learning_rate": 9.726666666666668e-06,
"loss": 0.135,
"step": 625
},
{
"epoch": 2.5,
"grad_norm": 2.788139581680298,
"learning_rate": 9.671111111111112e-06,
"loss": 0.1239,
"step": 650
},
{
"epoch": 2.5961538461538463,
"grad_norm": 3.4849274158477783,
"learning_rate": 9.615555555555558e-06,
"loss": 0.1226,
"step": 675
},
{
"epoch": 2.6923076923076925,
"grad_norm": 3.447479009628296,
"learning_rate": 9.56e-06,
"loss": 0.1281,
"step": 700
},
{
"epoch": 2.7884615384615383,
"grad_norm": 3.737367868423462,
"learning_rate": 9.504444444444446e-06,
"loss": 0.121,
"step": 725
},
{
"epoch": 2.8846153846153846,
"grad_norm": 3.7365550994873047,
"learning_rate": 9.44888888888889e-06,
"loss": 0.1284,
"step": 750
},
{
"epoch": 2.980769230769231,
"grad_norm": 4.205287933349609,
"learning_rate": 9.393333333333334e-06,
"loss": 0.1393,
"step": 775
},
{
"epoch": 3.076923076923077,
"grad_norm": 2.6460373401641846,
"learning_rate": 9.33777777777778e-06,
"loss": 0.0834,
"step": 800
},
{
"epoch": 3.173076923076923,
"grad_norm": 2.1553828716278076,
"learning_rate": 9.282222222222222e-06,
"loss": 0.0541,
"step": 825
},
{
"epoch": 3.269230769230769,
"grad_norm": 1.773935079574585,
"learning_rate": 9.226666666666668e-06,
"loss": 0.0559,
"step": 850
},
{
"epoch": 3.3653846153846154,
"grad_norm": 2.047421455383301,
"learning_rate": 9.171111111111112e-06,
"loss": 0.0618,
"step": 875
},
{
"epoch": 3.4615384615384617,
"grad_norm": 2.5151689052581787,
"learning_rate": 9.115555555555556e-06,
"loss": 0.0604,
"step": 900
},
{
"epoch": 3.5576923076923075,
"grad_norm": 1.7141085863113403,
"learning_rate": 9.060000000000001e-06,
"loss": 0.0567,
"step": 925
},
{
"epoch": 3.6538461538461537,
"grad_norm": 2.3757855892181396,
"learning_rate": 9.004444444444445e-06,
"loss": 0.0567,
"step": 950
},
{
"epoch": 3.75,
"grad_norm": 2.048637866973877,
"learning_rate": 8.94888888888889e-06,
"loss": 0.0595,
"step": 975
},
{
"epoch": 3.8461538461538463,
"grad_norm": 2.78825306892395,
"learning_rate": 8.893333333333333e-06,
"loss": 0.0547,
"step": 1000
},
{
"epoch": 3.8461538461538463,
"eval_loss": 0.2570088803768158,
"eval_runtime": 418.585,
"eval_samples_per_second": 8.698,
"eval_steps_per_second": 0.545,
"eval_wer": 0.16873172769038006,
"step": 1000
},
{
"epoch": 3.9423076923076925,
"grad_norm": 2.7837064266204834,
"learning_rate": 8.83777777777778e-06,
"loss": 0.0566,
"step": 1025
},
{
"epoch": 4.038461538461538,
"grad_norm": 1.4186588525772095,
"learning_rate": 8.782222222222223e-06,
"loss": 0.0472,
"step": 1050
},
{
"epoch": 4.134615384615385,
"grad_norm": 1.4111158847808838,
"learning_rate": 8.726666666666667e-06,
"loss": 0.026,
"step": 1075
},
{
"epoch": 4.230769230769231,
"grad_norm": 0.8355940580368042,
"learning_rate": 8.671111111111113e-06,
"loss": 0.025,
"step": 1100
},
{
"epoch": 4.326923076923077,
"grad_norm": 2.8829832077026367,
"learning_rate": 8.615555555555555e-06,
"loss": 0.0281,
"step": 1125
},
{
"epoch": 4.423076923076923,
"grad_norm": 1.8035304546356201,
"learning_rate": 8.560000000000001e-06,
"loss": 0.0275,
"step": 1150
},
{
"epoch": 4.519230769230769,
"grad_norm": 2.558990240097046,
"learning_rate": 8.504444444444445e-06,
"loss": 0.0284,
"step": 1175
},
{
"epoch": 4.615384615384615,
"grad_norm": 3.0063929557800293,
"learning_rate": 8.448888888888889e-06,
"loss": 0.0262,
"step": 1200
},
{
"epoch": 4.711538461538462,
"grad_norm": 1.181315541267395,
"learning_rate": 8.393333333333335e-06,
"loss": 0.0224,
"step": 1225
},
{
"epoch": 4.8076923076923075,
"grad_norm": 1.4534687995910645,
"learning_rate": 8.337777777777777e-06,
"loss": 0.026,
"step": 1250
},
{
"epoch": 4.903846153846154,
"grad_norm": 1.941493034362793,
"learning_rate": 8.282222222222223e-06,
"loss": 0.0276,
"step": 1275
},
{
"epoch": 5.0,
"grad_norm": 1.048115849494934,
"learning_rate": 8.226666666666667e-06,
"loss": 0.0278,
"step": 1300
},
{
"epoch": 5.096153846153846,
"grad_norm": 1.3306798934936523,
"learning_rate": 8.171111111111113e-06,
"loss": 0.0136,
"step": 1325
},
{
"epoch": 5.1923076923076925,
"grad_norm": 1.5832746028900146,
"learning_rate": 8.115555555555557e-06,
"loss": 0.0128,
"step": 1350
},
{
"epoch": 5.288461538461538,
"grad_norm": 1.175854206085205,
"learning_rate": 8.06e-06,
"loss": 0.0115,
"step": 1375
},
{
"epoch": 5.384615384615385,
"grad_norm": 1.0961897373199463,
"learning_rate": 8.004444444444445e-06,
"loss": 0.0128,
"step": 1400
},
{
"epoch": 5.480769230769231,
"grad_norm": 1.1296463012695312,
"learning_rate": 7.948888888888889e-06,
"loss": 0.0116,
"step": 1425
},
{
"epoch": 5.576923076923077,
"grad_norm": 1.3500127792358398,
"learning_rate": 7.893333333333335e-06,
"loss": 0.0127,
"step": 1450
},
{
"epoch": 5.673076923076923,
"grad_norm": 1.6575067043304443,
"learning_rate": 7.837777777777779e-06,
"loss": 0.0126,
"step": 1475
},
{
"epoch": 5.769230769230769,
"grad_norm": 2.729482412338257,
"learning_rate": 7.782222222222223e-06,
"loss": 0.0125,
"step": 1500
},
{
"epoch": 5.865384615384615,
"grad_norm": 1.4455863237380981,
"learning_rate": 7.726666666666667e-06,
"loss": 0.0122,
"step": 1525
},
{
"epoch": 5.961538461538462,
"grad_norm": 1.5944123268127441,
"learning_rate": 7.67111111111111e-06,
"loss": 0.0147,
"step": 1550
},
{
"epoch": 6.0576923076923075,
"grad_norm": 0.8240692615509033,
"learning_rate": 7.6155555555555564e-06,
"loss": 0.0094,
"step": 1575
},
{
"epoch": 6.153846153846154,
"grad_norm": 0.41078874468803406,
"learning_rate": 7.5600000000000005e-06,
"loss": 0.0063,
"step": 1600
},
{
"epoch": 6.25,
"grad_norm": 0.2937760651111603,
"learning_rate": 7.504444444444445e-06,
"loss": 0.008,
"step": 1625
},
{
"epoch": 6.346153846153846,
"grad_norm": 0.2572005093097687,
"learning_rate": 7.44888888888889e-06,
"loss": 0.006,
"step": 1650
},
{
"epoch": 6.4423076923076925,
"grad_norm": 0.5730597376823425,
"learning_rate": 7.393333333333333e-06,
"loss": 0.0083,
"step": 1675
},
{
"epoch": 6.538461538461538,
"grad_norm": 0.9147136211395264,
"learning_rate": 7.337777777777778e-06,
"loss": 0.0066,
"step": 1700
},
{
"epoch": 6.634615384615385,
"grad_norm": 0.45330244302749634,
"learning_rate": 7.282222222222222e-06,
"loss": 0.0065,
"step": 1725
},
{
"epoch": 6.730769230769231,
"grad_norm": 0.6328800916671753,
"learning_rate": 7.226666666666667e-06,
"loss": 0.0078,
"step": 1750
},
{
"epoch": 6.826923076923077,
"grad_norm": 0.2872777581214905,
"learning_rate": 7.171111111111112e-06,
"loss": 0.0079,
"step": 1775
},
{
"epoch": 6.923076923076923,
"grad_norm": 1.7963801622390747,
"learning_rate": 7.115555555555557e-06,
"loss": 0.0078,
"step": 1800
},
{
"epoch": 7.019230769230769,
"grad_norm": 0.2437676042318344,
"learning_rate": 7.06e-06,
"loss": 0.0068,
"step": 1825
},
{
"epoch": 7.115384615384615,
"grad_norm": 0.20286525785923004,
"learning_rate": 7.004444444444445e-06,
"loss": 0.0039,
"step": 1850
},
{
"epoch": 7.211538461538462,
"grad_norm": 0.16271603107452393,
"learning_rate": 6.948888888888889e-06,
"loss": 0.0038,
"step": 1875
},
{
"epoch": 7.3076923076923075,
"grad_norm": 0.12690773606300354,
"learning_rate": 6.893333333333334e-06,
"loss": 0.0037,
"step": 1900
},
{
"epoch": 7.403846153846154,
"grad_norm": 0.41975799202919006,
"learning_rate": 6.837777777777779e-06,
"loss": 0.0042,
"step": 1925
},
{
"epoch": 7.5,
"grad_norm": 0.2555113732814789,
"learning_rate": 6.782222222222222e-06,
"loss": 0.0033,
"step": 1950
},
{
"epoch": 7.596153846153846,
"grad_norm": 1.6203404664993286,
"learning_rate": 6.726666666666667e-06,
"loss": 0.0049,
"step": 1975
},
{
"epoch": 7.6923076923076925,
"grad_norm": 0.16367614269256592,
"learning_rate": 6.671111111111112e-06,
"loss": 0.0034,
"step": 2000
},
{
"epoch": 7.6923076923076925,
"eval_loss": 0.31435152888298035,
"eval_runtime": 414.9289,
"eval_samples_per_second": 8.775,
"eval_steps_per_second": 0.549,
"eval_wer": 0.15889368416167804,
"step": 2000
},
{
"epoch": 7.788461538461538,
"grad_norm": 0.1244119256734848,
"learning_rate": 6.615555555555556e-06,
"loss": 0.0035,
"step": 2025
},
{
"epoch": 7.884615384615385,
"grad_norm": 0.7467623353004456,
"learning_rate": 6.560000000000001e-06,
"loss": 0.0042,
"step": 2050
},
{
"epoch": 7.980769230769231,
"grad_norm": 0.26792147755622864,
"learning_rate": 6.504444444444446e-06,
"loss": 0.0043,
"step": 2075
},
{
"epoch": 8.076923076923077,
"grad_norm": 0.0888567715883255,
"learning_rate": 6.448888888888889e-06,
"loss": 0.0029,
"step": 2100
},
{
"epoch": 8.173076923076923,
"grad_norm": 0.22752462327480316,
"learning_rate": 6.393333333333334e-06,
"loss": 0.003,
"step": 2125
},
{
"epoch": 8.26923076923077,
"grad_norm": 0.11363103985786438,
"learning_rate": 6.3377777777777786e-06,
"loss": 0.0023,
"step": 2150
},
{
"epoch": 8.365384615384615,
"grad_norm": 0.4889778196811676,
"learning_rate": 6.282222222222223e-06,
"loss": 0.0024,
"step": 2175
},
{
"epoch": 8.461538461538462,
"grad_norm": 0.08544881641864777,
"learning_rate": 6.2266666666666675e-06,
"loss": 0.0023,
"step": 2200
},
{
"epoch": 8.557692307692308,
"grad_norm": 0.1377352476119995,
"learning_rate": 6.171111111111112e-06,
"loss": 0.0032,
"step": 2225
},
{
"epoch": 8.653846153846153,
"grad_norm": 0.49440711736679077,
"learning_rate": 6.1155555555555555e-06,
"loss": 0.0022,
"step": 2250
},
{
"epoch": 8.75,
"grad_norm": 0.10170517861843109,
"learning_rate": 6.0600000000000004e-06,
"loss": 0.0029,
"step": 2275
},
{
"epoch": 8.846153846153847,
"grad_norm": 0.0798933133482933,
"learning_rate": 6.004444444444445e-06,
"loss": 0.0025,
"step": 2300
},
{
"epoch": 8.942307692307692,
"grad_norm": 0.06800112873315811,
"learning_rate": 5.948888888888889e-06,
"loss": 0.0023,
"step": 2325
},
{
"epoch": 9.038461538461538,
"grad_norm": 0.05597899481654167,
"learning_rate": 5.893333333333334e-06,
"loss": 0.0019,
"step": 2350
},
{
"epoch": 9.134615384615385,
"grad_norm": 0.055818960070610046,
"learning_rate": 5.837777777777777e-06,
"loss": 0.0016,
"step": 2375
},
{
"epoch": 9.23076923076923,
"grad_norm": 0.07546485215425491,
"learning_rate": 5.782222222222222e-06,
"loss": 0.002,
"step": 2400
},
{
"epoch": 9.326923076923077,
"grad_norm": 0.06564825028181076,
"learning_rate": 5.726666666666667e-06,
"loss": 0.0016,
"step": 2425
},
{
"epoch": 9.423076923076923,
"grad_norm": 0.0399746373295784,
"learning_rate": 5.671111111111112e-06,
"loss": 0.0016,
"step": 2450
},
{
"epoch": 9.51923076923077,
"grad_norm": 0.059612762182950974,
"learning_rate": 5.615555555555556e-06,
"loss": 0.0017,
"step": 2475
},
{
"epoch": 9.615384615384615,
"grad_norm": 0.06947290152311325,
"learning_rate": 5.560000000000001e-06,
"loss": 0.0016,
"step": 2500
},
{
"epoch": 9.711538461538462,
"grad_norm": 0.989651083946228,
"learning_rate": 5.504444444444444e-06,
"loss": 0.0017,
"step": 2525
},
{
"epoch": 9.807692307692308,
"grad_norm": 0.07500547170639038,
"learning_rate": 5.448888888888889e-06,
"loss": 0.0016,
"step": 2550
},
{
"epoch": 9.903846153846153,
"grad_norm": 0.09042783081531525,
"learning_rate": 5.393333333333334e-06,
"loss": 0.0017,
"step": 2575
},
{
"epoch": 10.0,
"grad_norm": 0.3791869282722473,
"learning_rate": 5.337777777777779e-06,
"loss": 0.0017,
"step": 2600
},
{
"epoch": 10.096153846153847,
"grad_norm": 0.043280228972435,
"learning_rate": 5.282222222222223e-06,
"loss": 0.0013,
"step": 2625
},
{
"epoch": 10.192307692307692,
"grad_norm": 0.04278066009283066,
"learning_rate": 5.226666666666667e-06,
"loss": 0.0014,
"step": 2650
},
{
"epoch": 10.288461538461538,
"grad_norm": 0.043745964765548706,
"learning_rate": 5.171111111111111e-06,
"loss": 0.0013,
"step": 2675
},
{
"epoch": 10.384615384615385,
"grad_norm": 0.05145561695098877,
"learning_rate": 5.115555555555556e-06,
"loss": 0.0013,
"step": 2700
},
{
"epoch": 10.48076923076923,
"grad_norm": 0.04810141772031784,
"learning_rate": 5.060000000000001e-06,
"loss": 0.0013,
"step": 2725
},
{
"epoch": 10.576923076923077,
"grad_norm": 0.046568796038627625,
"learning_rate": 5.004444444444445e-06,
"loss": 0.002,
"step": 2750
},
{
"epoch": 10.673076923076923,
"grad_norm": 0.05206083878874779,
"learning_rate": 4.94888888888889e-06,
"loss": 0.0013,
"step": 2775
},
{
"epoch": 10.76923076923077,
"grad_norm": 0.04524935781955719,
"learning_rate": 4.893333333333334e-06,
"loss": 0.0013,
"step": 2800
},
{
"epoch": 10.865384615384615,
"grad_norm": 0.05039019510149956,
"learning_rate": 4.837777777777778e-06,
"loss": 0.0012,
"step": 2825
},
{
"epoch": 10.961538461538462,
"grad_norm": 0.048185594379901886,
"learning_rate": 4.7822222222222226e-06,
"loss": 0.0024,
"step": 2850
},
{
"epoch": 11.057692307692308,
"grad_norm": 0.04220257326960564,
"learning_rate": 4.7266666666666674e-06,
"loss": 0.0012,
"step": 2875
},
{
"epoch": 11.153846153846153,
"grad_norm": 0.04200848937034607,
"learning_rate": 4.6711111111111115e-06,
"loss": 0.0011,
"step": 2900
},
{
"epoch": 11.25,
"grad_norm": 0.04376620426774025,
"learning_rate": 4.6155555555555555e-06,
"loss": 0.0011,
"step": 2925
},
{
"epoch": 11.346153846153847,
"grad_norm": 0.03870835155248642,
"learning_rate": 4.56e-06,
"loss": 0.0011,
"step": 2950
},
{
"epoch": 11.442307692307692,
"grad_norm": 0.03991697356104851,
"learning_rate": 4.504444444444444e-06,
"loss": 0.0011,
"step": 2975
},
{
"epoch": 11.538461538461538,
"grad_norm": 0.03659369796514511,
"learning_rate": 4.448888888888889e-06,
"loss": 0.0011,
"step": 3000
},
{
"epoch": 11.538461538461538,
"eval_loss": 0.33577749133110046,
"eval_runtime": 414.076,
"eval_samples_per_second": 8.793,
"eval_steps_per_second": 0.551,
"eval_wer": 0.16153881850665924,
"step": 3000
},
{
"epoch": 11.634615384615385,
"grad_norm": 0.035982731729745865,
"learning_rate": 4.393333333333334e-06,
"loss": 0.0011,
"step": 3025
},
{
"epoch": 11.73076923076923,
"grad_norm": 0.04060469567775726,
"learning_rate": 4.337777777777778e-06,
"loss": 0.0011,
"step": 3050
},
{
"epoch": 11.826923076923077,
"grad_norm": 0.03556708246469498,
"learning_rate": 4.282222222222222e-06,
"loss": 0.0011,
"step": 3075
},
{
"epoch": 11.923076923076923,
"grad_norm": 0.037598781287670135,
"learning_rate": 4.226666666666667e-06,
"loss": 0.001,
"step": 3100
},
{
"epoch": 12.01923076923077,
"grad_norm": 0.03427188843488693,
"learning_rate": 4.171111111111111e-06,
"loss": 0.0011,
"step": 3125
},
{
"epoch": 12.115384615384615,
"grad_norm": 0.03573931008577347,
"learning_rate": 4.115555555555556e-06,
"loss": 0.0009,
"step": 3150
},
{
"epoch": 12.211538461538462,
"grad_norm": 0.03210974112153053,
"learning_rate": 4.060000000000001e-06,
"loss": 0.0009,
"step": 3175
},
{
"epoch": 12.307692307692308,
"grad_norm": 0.036103058606386185,
"learning_rate": 4.004444444444445e-06,
"loss": 0.001,
"step": 3200
},
{
"epoch": 12.403846153846153,
"grad_norm": 0.036252211779356,
"learning_rate": 3.948888888888889e-06,
"loss": 0.001,
"step": 3225
},
{
"epoch": 12.5,
"grad_norm": 0.03289297968149185,
"learning_rate": 3.893333333333333e-06,
"loss": 0.0009,
"step": 3250
},
{
"epoch": 12.596153846153847,
"grad_norm": 0.035281531512737274,
"learning_rate": 3.837777777777778e-06,
"loss": 0.001,
"step": 3275
},
{
"epoch": 12.692307692307692,
"grad_norm": 0.036654986441135406,
"learning_rate": 3.782222222222223e-06,
"loss": 0.0009,
"step": 3300
},
{
"epoch": 12.788461538461538,
"grad_norm": 0.028989532962441444,
"learning_rate": 3.726666666666667e-06,
"loss": 0.0009,
"step": 3325
},
{
"epoch": 12.884615384615385,
"grad_norm": 0.033068809658288956,
"learning_rate": 3.6711111111111113e-06,
"loss": 0.0009,
"step": 3350
},
{
"epoch": 12.98076923076923,
"grad_norm": 0.035067297518253326,
"learning_rate": 3.615555555555556e-06,
"loss": 0.0009,
"step": 3375
},
{
"epoch": 13.076923076923077,
"grad_norm": 0.03311024233698845,
"learning_rate": 3.5600000000000002e-06,
"loss": 0.0008,
"step": 3400
},
{
"epoch": 13.173076923076923,
"grad_norm": 0.0331607460975647,
"learning_rate": 3.5044444444444447e-06,
"loss": 0.0009,
"step": 3425
},
{
"epoch": 13.26923076923077,
"grad_norm": 0.031973276287317276,
"learning_rate": 3.4488888888888896e-06,
"loss": 0.0015,
"step": 3450
},
{
"epoch": 13.365384615384615,
"grad_norm": 0.031224718317389488,
"learning_rate": 3.3933333333333336e-06,
"loss": 0.0008,
"step": 3475
},
{
"epoch": 13.461538461538462,
"grad_norm": 0.030508123338222504,
"learning_rate": 3.337777777777778e-06,
"loss": 0.0008,
"step": 3500
},
{
"epoch": 13.557692307692308,
"grad_norm": 0.03208326920866966,
"learning_rate": 3.282222222222223e-06,
"loss": 0.0009,
"step": 3525
},
{
"epoch": 13.653846153846153,
"grad_norm": 0.02819848246872425,
"learning_rate": 3.226666666666667e-06,
"loss": 0.0008,
"step": 3550
},
{
"epoch": 13.75,
"grad_norm": 0.02973184920847416,
"learning_rate": 3.1711111111111114e-06,
"loss": 0.0009,
"step": 3575
},
{
"epoch": 13.846153846153847,
"grad_norm": 0.026373526081442833,
"learning_rate": 3.1155555555555555e-06,
"loss": 0.0008,
"step": 3600
},
{
"epoch": 13.942307692307692,
"grad_norm": 0.030721429735422134,
"learning_rate": 3.0600000000000003e-06,
"loss": 0.0008,
"step": 3625
},
{
"epoch": 14.038461538461538,
"grad_norm": 0.027194401249289513,
"learning_rate": 3.004444444444445e-06,
"loss": 0.0008,
"step": 3650
},
{
"epoch": 14.134615384615385,
"grad_norm": 0.02467515505850315,
"learning_rate": 2.948888888888889e-06,
"loss": 0.0008,
"step": 3675
},
{
"epoch": 14.23076923076923,
"grad_norm": 0.02789357490837574,
"learning_rate": 2.8933333333333337e-06,
"loss": 0.0008,
"step": 3700
},
{
"epoch": 14.326923076923077,
"grad_norm": 0.02929651364684105,
"learning_rate": 2.837777777777778e-06,
"loss": 0.0008,
"step": 3725
},
{
"epoch": 14.423076923076923,
"grad_norm": 0.02698478475213051,
"learning_rate": 2.7822222222222222e-06,
"loss": 0.0008,
"step": 3750
},
{
"epoch": 14.51923076923077,
"grad_norm": 0.026549380272626877,
"learning_rate": 2.726666666666667e-06,
"loss": 0.0008,
"step": 3775
},
{
"epoch": 14.615384615384615,
"grad_norm": 0.025898369029164314,
"learning_rate": 2.6711111111111116e-06,
"loss": 0.0008,
"step": 3800
},
{
"epoch": 14.711538461538462,
"grad_norm": 0.024109981954097748,
"learning_rate": 2.6155555555555556e-06,
"loss": 0.0008,
"step": 3825
},
{
"epoch": 14.807692307692308,
"grad_norm": 0.028975041583180428,
"learning_rate": 2.56e-06,
"loss": 0.0007,
"step": 3850
},
{
"epoch": 14.903846153846153,
"grad_norm": 0.029065443202853203,
"learning_rate": 2.504444444444445e-06,
"loss": 0.0008,
"step": 3875
},
{
"epoch": 15.0,
"grad_norm": 0.03024737536907196,
"learning_rate": 2.448888888888889e-06,
"loss": 0.0013,
"step": 3900
},
{
"epoch": 15.096153846153847,
"grad_norm": 0.020746273919939995,
"learning_rate": 2.3933333333333334e-06,
"loss": 0.0007,
"step": 3925
},
{
"epoch": 15.192307692307692,
"grad_norm": 0.022498076781630516,
"learning_rate": 2.337777777777778e-06,
"loss": 0.0007,
"step": 3950
},
{
"epoch": 15.288461538461538,
"grad_norm": 0.025430675595998764,
"learning_rate": 2.2822222222222223e-06,
"loss": 0.0007,
"step": 3975
},
{
"epoch": 15.384615384615385,
"grad_norm": 0.024518582969903946,
"learning_rate": 2.226666666666667e-06,
"loss": 0.0007,
"step": 4000
},
{
"epoch": 15.384615384615385,
"eval_loss": 0.34604960680007935,
"eval_runtime": 414.6061,
"eval_samples_per_second": 8.782,
"eval_steps_per_second": 0.55,
"eval_wer": 0.1614460067752564,
"step": 4000
},
{
"epoch": 15.48076923076923,
"grad_norm": 0.028795845806598663,
"learning_rate": 2.1711111111111113e-06,
"loss": 0.0007,
"step": 4025
},
{
"epoch": 15.576923076923077,
"grad_norm": 0.02770942822098732,
"learning_rate": 2.1155555555555557e-06,
"loss": 0.0007,
"step": 4050
},
{
"epoch": 15.673076923076923,
"grad_norm": 0.026291629299521446,
"learning_rate": 2.06e-06,
"loss": 0.0008,
"step": 4075
},
{
"epoch": 15.76923076923077,
"grad_norm": 0.024974102154374123,
"learning_rate": 2.0044444444444446e-06,
"loss": 0.0007,
"step": 4100
},
{
"epoch": 15.865384615384615,
"grad_norm": 0.02725864201784134,
"learning_rate": 1.948888888888889e-06,
"loss": 0.0007,
"step": 4125
},
{
"epoch": 15.961538461538462,
"grad_norm": 0.027727024629712105,
"learning_rate": 1.8933333333333333e-06,
"loss": 0.0007,
"step": 4150
},
{
"epoch": 16.057692307692307,
"grad_norm": 0.023184822872281075,
"learning_rate": 1.837777777777778e-06,
"loss": 0.0007,
"step": 4175
},
{
"epoch": 16.153846153846153,
"grad_norm": 0.020365368574857712,
"learning_rate": 1.7822222222222225e-06,
"loss": 0.0007,
"step": 4200
},
{
"epoch": 16.25,
"grad_norm": 0.023292195051908493,
"learning_rate": 1.7266666666666667e-06,
"loss": 0.0006,
"step": 4225
},
{
"epoch": 16.346153846153847,
"grad_norm": 0.02580893039703369,
"learning_rate": 1.6711111111111112e-06,
"loss": 0.0006,
"step": 4250
},
{
"epoch": 16.442307692307693,
"grad_norm": 0.02440100722014904,
"learning_rate": 1.6155555555555559e-06,
"loss": 0.0007,
"step": 4275
},
{
"epoch": 16.53846153846154,
"grad_norm": 0.02583976835012436,
"learning_rate": 1.56e-06,
"loss": 0.0007,
"step": 4300
},
{
"epoch": 16.634615384615383,
"grad_norm": 0.024059804156422615,
"learning_rate": 1.5044444444444446e-06,
"loss": 0.0007,
"step": 4325
},
{
"epoch": 16.73076923076923,
"grad_norm": 0.023279106244444847,
"learning_rate": 1.4488888888888892e-06,
"loss": 0.0007,
"step": 4350
},
{
"epoch": 16.826923076923077,
"grad_norm": 0.022877462208271027,
"learning_rate": 1.3933333333333335e-06,
"loss": 0.0007,
"step": 4375
},
{
"epoch": 16.923076923076923,
"grad_norm": 0.02401687018573284,
"learning_rate": 1.337777777777778e-06,
"loss": 0.0006,
"step": 4400
},
{
"epoch": 17.01923076923077,
"grad_norm": 0.021333323791623116,
"learning_rate": 1.2822222222222222e-06,
"loss": 0.0006,
"step": 4425
},
{
"epoch": 17.115384615384617,
"grad_norm": 0.019655166193842888,
"learning_rate": 1.2266666666666666e-06,
"loss": 0.0006,
"step": 4450
},
{
"epoch": 17.21153846153846,
"grad_norm": 0.02106211706995964,
"learning_rate": 1.171111111111111e-06,
"loss": 0.0006,
"step": 4475
},
{
"epoch": 17.307692307692307,
"grad_norm": 0.022571563720703125,
"learning_rate": 1.1155555555555558e-06,
"loss": 0.0006,
"step": 4500
},
{
"epoch": 17.403846153846153,
"grad_norm": 0.023800240829586983,
"learning_rate": 1.06e-06,
"loss": 0.0006,
"step": 4525
},
{
"epoch": 17.5,
"grad_norm": 0.0228471290320158,
"learning_rate": 1.0044444444444445e-06,
"loss": 0.0006,
"step": 4550
},
{
"epoch": 17.596153846153847,
"grad_norm": 0.021810224279761314,
"learning_rate": 9.488888888888889e-07,
"loss": 0.0006,
"step": 4575
},
{
"epoch": 17.692307692307693,
"grad_norm": 0.02282741293311119,
"learning_rate": 8.933333333333334e-07,
"loss": 0.0008,
"step": 4600
},
{
"epoch": 17.78846153846154,
"grad_norm": 0.024272125214338303,
"learning_rate": 8.37777777777778e-07,
"loss": 0.0006,
"step": 4625
},
{
"epoch": 17.884615384615383,
"grad_norm": 0.026012342423200607,
"learning_rate": 7.822222222222223e-07,
"loss": 0.0006,
"step": 4650
},
{
"epoch": 17.98076923076923,
"grad_norm": 0.02298705466091633,
"learning_rate": 7.266666666666668e-07,
"loss": 0.0006,
"step": 4675
},
{
"epoch": 18.076923076923077,
"grad_norm": 0.021737568080425262,
"learning_rate": 6.711111111111111e-07,
"loss": 0.0006,
"step": 4700
},
{
"epoch": 18.173076923076923,
"grad_norm": 0.021294375881552696,
"learning_rate": 6.155555555555556e-07,
"loss": 0.0006,
"step": 4725
},
{
"epoch": 18.26923076923077,
"grad_norm": 0.0215039923787117,
"learning_rate": 5.6e-07,
"loss": 0.0006,
"step": 4750
},
{
"epoch": 18.365384615384617,
"grad_norm": 0.02031785622239113,
"learning_rate": 5.044444444444445e-07,
"loss": 0.0006,
"step": 4775
},
{
"epoch": 18.46153846153846,
"grad_norm": 0.02249339409172535,
"learning_rate": 4.488888888888889e-07,
"loss": 0.0006,
"step": 4800
},
{
"epoch": 18.557692307692307,
"grad_norm": 0.022298617288470268,
"learning_rate": 3.9333333333333336e-07,
"loss": 0.0006,
"step": 4825
},
{
"epoch": 18.653846153846153,
"grad_norm": 0.019332217052578926,
"learning_rate": 3.3777777777777777e-07,
"loss": 0.0006,
"step": 4850
},
{
"epoch": 18.75,
"grad_norm": 0.02518359012901783,
"learning_rate": 2.822222222222222e-07,
"loss": 0.0006,
"step": 4875
},
{
"epoch": 18.846153846153847,
"grad_norm": 0.022782141342759132,
"learning_rate": 2.266666666666667e-07,
"loss": 0.0006,
"step": 4900
},
{
"epoch": 18.942307692307693,
"grad_norm": 0.023741982877254486,
"learning_rate": 1.7111111111111114e-07,
"loss": 0.0006,
"step": 4925
},
{
"epoch": 19.03846153846154,
"grad_norm": 0.02083681896328926,
"learning_rate": 1.1555555555555556e-07,
"loss": 0.0006,
"step": 4950
},
{
"epoch": 19.134615384615383,
"grad_norm": 0.020423738285899162,
"learning_rate": 6.000000000000001e-08,
"loss": 0.0006,
"step": 4975
},
{
"epoch": 19.23076923076923,
"grad_norm": 0.0213633980602026,
"learning_rate": 4.444444444444445e-09,
"loss": 0.0006,
"step": 5000
},
{
"epoch": 19.23076923076923,
"eval_loss": 0.35056987404823303,
"eval_runtime": 416.1932,
"eval_samples_per_second": 8.748,
"eval_steps_per_second": 0.548,
"eval_wer": 0.16172444196946495,
"step": 5000
},
{
"epoch": 19.23076923076923,
"step": 5000,
"total_flos": 4.616818092859392e+19,
"train_loss": 0.05540717707388103,
"train_runtime": 18195.7149,
"train_samples_per_second": 8.793,
"train_steps_per_second": 0.275
}
],
"logging_steps": 25,
"max_steps": 5000,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.616818092859392e+19,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}