ViT-NIH-Chest-X-ray-dataset-small / trainer_state.json
Sohaibsoussi's picture
Cheers πŸŽ‰
0005076 verified
{
"best_metric": 0.0013051489368081093,
"best_model_checkpoint": "./ViT-NIH-Chest-X-ray-dataset-small/checkpoint-2400",
"epoch": 9.0,
"eval_steps": 100,
"global_step": 2439,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03690036900369004,
"grad_norm": 1.528047800064087,
"learning_rate": 0.000199179991799918,
"loss": 0.0667,
"step": 10
},
{
"epoch": 0.07380073800738007,
"grad_norm": 0.4356657862663269,
"learning_rate": 0.000198359983599836,
"loss": 0.0497,
"step": 20
},
{
"epoch": 0.11070110701107011,
"grad_norm": 0.7035173177719116,
"learning_rate": 0.000197539975399754,
"loss": 0.0452,
"step": 30
},
{
"epoch": 0.14760147601476015,
"grad_norm": 0.6605591773986816,
"learning_rate": 0.000196719967199672,
"loss": 0.0513,
"step": 40
},
{
"epoch": 0.18450184501845018,
"grad_norm": 0.3567582964897156,
"learning_rate": 0.00019589995899959,
"loss": 0.0582,
"step": 50
},
{
"epoch": 0.22140221402214022,
"grad_norm": 0.8019290566444397,
"learning_rate": 0.000195079950799508,
"loss": 0.066,
"step": 60
},
{
"epoch": 0.25830258302583026,
"grad_norm": 0.3395977020263672,
"learning_rate": 0.000194259942599426,
"loss": 0.0461,
"step": 70
},
{
"epoch": 0.2952029520295203,
"grad_norm": 1.1884530782699585,
"learning_rate": 0.000193439934399344,
"loss": 0.055,
"step": 80
},
{
"epoch": 0.33210332103321033,
"grad_norm": 0.3934822082519531,
"learning_rate": 0.000192619926199262,
"loss": 0.0341,
"step": 90
},
{
"epoch": 0.36900369003690037,
"grad_norm": 0.19823391735553741,
"learning_rate": 0.00019179991799918,
"loss": 0.0271,
"step": 100
},
{
"epoch": 0.36900369003690037,
"eval_accuracy": 0.8583815028901735,
"eval_loss": 0.03472097963094711,
"eval_runtime": 53.2441,
"eval_samples_per_second": 32.492,
"eval_steps_per_second": 4.076,
"step": 100
},
{
"epoch": 0.4059040590405904,
"grad_norm": 0.6334025859832764,
"learning_rate": 0.00019097990979909799,
"loss": 0.0347,
"step": 110
},
{
"epoch": 0.44280442804428044,
"grad_norm": 0.4480656683444977,
"learning_rate": 0.000190159901599016,
"loss": 0.038,
"step": 120
},
{
"epoch": 0.4797047970479705,
"grad_norm": 0.35815778374671936,
"learning_rate": 0.000189339893398934,
"loss": 0.0308,
"step": 130
},
{
"epoch": 0.5166051660516605,
"grad_norm": 0.5924233794212341,
"learning_rate": 0.000188519885198852,
"loss": 0.0329,
"step": 140
},
{
"epoch": 0.5535055350553506,
"grad_norm": 0.8932539224624634,
"learning_rate": 0.00018769987699876998,
"loss": 0.0447,
"step": 150
},
{
"epoch": 0.5904059040590406,
"grad_norm": 0.4765835702419281,
"learning_rate": 0.000186879868798688,
"loss": 0.0335,
"step": 160
},
{
"epoch": 0.6273062730627307,
"grad_norm": 1.848928451538086,
"learning_rate": 0.00018605986059860597,
"loss": 0.0353,
"step": 170
},
{
"epoch": 0.6642066420664207,
"grad_norm": 0.5017064809799194,
"learning_rate": 0.000185239852398524,
"loss": 0.0365,
"step": 180
},
{
"epoch": 0.7011070110701108,
"grad_norm": 0.4620348811149597,
"learning_rate": 0.00018441984419844198,
"loss": 0.0284,
"step": 190
},
{
"epoch": 0.7380073800738007,
"grad_norm": 0.716968834400177,
"learning_rate": 0.00018359983599836,
"loss": 0.0334,
"step": 200
},
{
"epoch": 0.7380073800738007,
"eval_accuracy": 0.8624277456647399,
"eval_loss": 0.02910423092544079,
"eval_runtime": 53.0242,
"eval_samples_per_second": 32.627,
"eval_steps_per_second": 4.092,
"step": 200
},
{
"epoch": 0.7749077490774908,
"grad_norm": 0.6060870289802551,
"learning_rate": 0.00018277982779827797,
"loss": 0.0383,
"step": 210
},
{
"epoch": 0.8118081180811808,
"grad_norm": 1.053126335144043,
"learning_rate": 0.000181959819598196,
"loss": 0.0521,
"step": 220
},
{
"epoch": 0.8487084870848709,
"grad_norm": 0.48799070715904236,
"learning_rate": 0.00018113981139811398,
"loss": 0.0529,
"step": 230
},
{
"epoch": 0.8856088560885609,
"grad_norm": 0.8857760429382324,
"learning_rate": 0.00018031980319803198,
"loss": 0.0359,
"step": 240
},
{
"epoch": 0.922509225092251,
"grad_norm": 0.2952769696712494,
"learning_rate": 0.00017949979499794997,
"loss": 0.0457,
"step": 250
},
{
"epoch": 0.959409594095941,
"grad_norm": 0.39872637391090393,
"learning_rate": 0.000178679786797868,
"loss": 0.0573,
"step": 260
},
{
"epoch": 0.996309963099631,
"grad_norm": 0.5376998782157898,
"learning_rate": 0.00017785977859778598,
"loss": 0.0352,
"step": 270
},
{
"epoch": 1.033210332103321,
"grad_norm": 0.32833996415138245,
"learning_rate": 0.00017703977039770397,
"loss": 0.0444,
"step": 280
},
{
"epoch": 1.070110701107011,
"grad_norm": 0.6235490441322327,
"learning_rate": 0.00017621976219762197,
"loss": 0.0347,
"step": 290
},
{
"epoch": 1.1070110701107012,
"grad_norm": 0.612684428691864,
"learning_rate": 0.00017539975399754,
"loss": 0.0438,
"step": 300
},
{
"epoch": 1.1070110701107012,
"eval_accuracy": 0.8606936416184972,
"eval_loss": 0.03517782315611839,
"eval_runtime": 53.0738,
"eval_samples_per_second": 32.596,
"eval_steps_per_second": 4.089,
"step": 300
},
{
"epoch": 1.1439114391143912,
"grad_norm": 0.29079392552375793,
"learning_rate": 0.00017457974579745798,
"loss": 0.0546,
"step": 310
},
{
"epoch": 1.1808118081180812,
"grad_norm": 0.3669208884239197,
"learning_rate": 0.00017375973759737597,
"loss": 0.03,
"step": 320
},
{
"epoch": 1.2177121771217712,
"grad_norm": 0.27689820528030396,
"learning_rate": 0.00017293972939729397,
"loss": 0.0287,
"step": 330
},
{
"epoch": 1.2546125461254611,
"grad_norm": 0.2193935513496399,
"learning_rate": 0.00017211972119721199,
"loss": 0.0336,
"step": 340
},
{
"epoch": 1.2915129151291513,
"grad_norm": 0.1442354917526245,
"learning_rate": 0.00017129971299712998,
"loss": 0.0301,
"step": 350
},
{
"epoch": 1.3284132841328413,
"grad_norm": 0.49084681272506714,
"learning_rate": 0.00017047970479704797,
"loss": 0.0383,
"step": 360
},
{
"epoch": 1.3653136531365313,
"grad_norm": 0.6412920951843262,
"learning_rate": 0.00016965969659696597,
"loss": 0.0338,
"step": 370
},
{
"epoch": 1.4022140221402215,
"grad_norm": 0.5825220942497253,
"learning_rate": 0.00016883968839688399,
"loss": 0.0344,
"step": 380
},
{
"epoch": 1.4391143911439115,
"grad_norm": 0.5842616558074951,
"learning_rate": 0.00016801968019680198,
"loss": 0.0358,
"step": 390
},
{
"epoch": 1.4760147601476015,
"grad_norm": 0.17261776328086853,
"learning_rate": 0.00016719967199671997,
"loss": 0.0215,
"step": 400
},
{
"epoch": 1.4760147601476015,
"eval_accuracy": 0.8745664739884393,
"eval_loss": 0.03190147876739502,
"eval_runtime": 53.163,
"eval_samples_per_second": 32.541,
"eval_steps_per_second": 4.082,
"step": 400
},
{
"epoch": 1.5129151291512914,
"grad_norm": 0.42338722944259644,
"learning_rate": 0.00016637966379663796,
"loss": 0.0343,
"step": 410
},
{
"epoch": 1.5498154981549814,
"grad_norm": 0.6431716680526733,
"learning_rate": 0.00016555965559655598,
"loss": 0.0308,
"step": 420
},
{
"epoch": 1.5867158671586716,
"grad_norm": 0.9911162257194519,
"learning_rate": 0.00016473964739647395,
"loss": 0.0357,
"step": 430
},
{
"epoch": 1.6236162361623616,
"grad_norm": 0.7283934354782104,
"learning_rate": 0.00016391963919639197,
"loss": 0.0548,
"step": 440
},
{
"epoch": 1.6605166051660518,
"grad_norm": 0.3784200847148895,
"learning_rate": 0.00016309963099630996,
"loss": 0.0411,
"step": 450
},
{
"epoch": 1.6974169741697418,
"grad_norm": 0.4938986599445343,
"learning_rate": 0.00016227962279622798,
"loss": 0.0384,
"step": 460
},
{
"epoch": 1.7343173431734318,
"grad_norm": 0.7969509363174438,
"learning_rate": 0.00016145961459614595,
"loss": 0.0282,
"step": 470
},
{
"epoch": 1.7712177121771218,
"grad_norm": 0.7997636198997498,
"learning_rate": 0.00016063960639606397,
"loss": 0.0299,
"step": 480
},
{
"epoch": 1.8081180811808117,
"grad_norm": 0.13115979731082916,
"learning_rate": 0.00015981959819598196,
"loss": 0.0361,
"step": 490
},
{
"epoch": 1.8450184501845017,
"grad_norm": 0.9753024578094482,
"learning_rate": 0.00015899958999589995,
"loss": 0.0267,
"step": 500
},
{
"epoch": 1.8450184501845017,
"eval_accuracy": 0.8797687861271676,
"eval_loss": 0.02772846817970276,
"eval_runtime": 52.9913,
"eval_samples_per_second": 32.647,
"eval_steps_per_second": 4.095,
"step": 500
},
{
"epoch": 1.881918819188192,
"grad_norm": 0.5084354877471924,
"learning_rate": 0.00015817958179581795,
"loss": 0.0432,
"step": 510
},
{
"epoch": 1.918819188191882,
"grad_norm": 0.2513779103755951,
"learning_rate": 0.00015735957359573597,
"loss": 0.034,
"step": 520
},
{
"epoch": 1.9557195571955721,
"grad_norm": 0.284483402967453,
"learning_rate": 0.00015653956539565396,
"loss": 0.0492,
"step": 530
},
{
"epoch": 1.992619926199262,
"grad_norm": 0.3686124086380005,
"learning_rate": 0.00015571955719557195,
"loss": 0.0313,
"step": 540
},
{
"epoch": 2.029520295202952,
"grad_norm": 0.49193885922431946,
"learning_rate": 0.00015489954899548995,
"loss": 0.0346,
"step": 550
},
{
"epoch": 2.066420664206642,
"grad_norm": 0.48066815733909607,
"learning_rate": 0.00015407954079540797,
"loss": 0.0204,
"step": 560
},
{
"epoch": 2.103321033210332,
"grad_norm": 0.4960450530052185,
"learning_rate": 0.00015325953259532596,
"loss": 0.0208,
"step": 570
},
{
"epoch": 2.140221402214022,
"grad_norm": 0.3585696220397949,
"learning_rate": 0.00015243952439524395,
"loss": 0.0226,
"step": 580
},
{
"epoch": 2.177121771217712,
"grad_norm": 0.383513480424881,
"learning_rate": 0.00015161951619516195,
"loss": 0.024,
"step": 590
},
{
"epoch": 2.2140221402214024,
"grad_norm": 0.45566025376319885,
"learning_rate": 0.00015079950799507997,
"loss": 0.0266,
"step": 600
},
{
"epoch": 2.2140221402214024,
"eval_accuracy": 0.9115606936416185,
"eval_loss": 0.017673172056674957,
"eval_runtime": 53.2922,
"eval_samples_per_second": 32.463,
"eval_steps_per_second": 4.072,
"step": 600
},
{
"epoch": 2.2509225092250924,
"grad_norm": 0.09787905216217041,
"learning_rate": 0.00014997949979499796,
"loss": 0.0173,
"step": 610
},
{
"epoch": 2.2878228782287824,
"grad_norm": 0.4203137159347534,
"learning_rate": 0.00014915949159491595,
"loss": 0.0208,
"step": 620
},
{
"epoch": 2.3247232472324724,
"grad_norm": 0.27736109495162964,
"learning_rate": 0.00014833948339483394,
"loss": 0.019,
"step": 630
},
{
"epoch": 2.3616236162361623,
"grad_norm": 0.1169864758849144,
"learning_rate": 0.00014751947519475196,
"loss": 0.0144,
"step": 640
},
{
"epoch": 2.3985239852398523,
"grad_norm": 0.4632050096988678,
"learning_rate": 0.00014669946699466993,
"loss": 0.022,
"step": 650
},
{
"epoch": 2.4354243542435423,
"grad_norm": 0.25868573784828186,
"learning_rate": 0.00014587945879458795,
"loss": 0.0225,
"step": 660
},
{
"epoch": 2.4723247232472323,
"grad_norm": 0.7463150024414062,
"learning_rate": 0.00014505945059450594,
"loss": 0.0218,
"step": 670
},
{
"epoch": 2.5092250922509223,
"grad_norm": 0.4195776581764221,
"learning_rate": 0.00014423944239442396,
"loss": 0.022,
"step": 680
},
{
"epoch": 2.5461254612546127,
"grad_norm": 1.2269337177276611,
"learning_rate": 0.00014341943419434193,
"loss": 0.0216,
"step": 690
},
{
"epoch": 2.5830258302583027,
"grad_norm": 0.44495096802711487,
"learning_rate": 0.00014259942599425995,
"loss": 0.014,
"step": 700
},
{
"epoch": 2.5830258302583027,
"eval_accuracy": 0.9497109826589596,
"eval_loss": 0.01273352187126875,
"eval_runtime": 53.0795,
"eval_samples_per_second": 32.593,
"eval_steps_per_second": 4.088,
"step": 700
},
{
"epoch": 2.6199261992619927,
"grad_norm": 0.16129033267498016,
"learning_rate": 0.00014177941779417794,
"loss": 0.0211,
"step": 710
},
{
"epoch": 2.6568265682656826,
"grad_norm": 0.639427900314331,
"learning_rate": 0.00014095940959409593,
"loss": 0.0245,
"step": 720
},
{
"epoch": 2.6937269372693726,
"grad_norm": 1.1732105016708374,
"learning_rate": 0.00014013940139401393,
"loss": 0.0188,
"step": 730
},
{
"epoch": 2.7306273062730626,
"grad_norm": 0.15935984253883362,
"learning_rate": 0.00013931939319393195,
"loss": 0.0225,
"step": 740
},
{
"epoch": 2.767527675276753,
"grad_norm": 0.44544655084609985,
"learning_rate": 0.00013849938499384994,
"loss": 0.0181,
"step": 750
},
{
"epoch": 2.804428044280443,
"grad_norm": 0.04136790707707405,
"learning_rate": 0.00013767937679376793,
"loss": 0.0193,
"step": 760
},
{
"epoch": 2.841328413284133,
"grad_norm": 0.3928181231021881,
"learning_rate": 0.00013685936859368593,
"loss": 0.0314,
"step": 770
},
{
"epoch": 2.878228782287823,
"grad_norm": 0.4807884097099304,
"learning_rate": 0.00013603936039360395,
"loss": 0.0204,
"step": 780
},
{
"epoch": 2.915129151291513,
"grad_norm": 0.11494464427232742,
"learning_rate": 0.00013521935219352194,
"loss": 0.0163,
"step": 790
},
{
"epoch": 2.952029520295203,
"grad_norm": 0.4516555964946747,
"learning_rate": 0.00013439934399343993,
"loss": 0.0207,
"step": 800
},
{
"epoch": 2.952029520295203,
"eval_accuracy": 0.9410404624277456,
"eval_loss": 0.014368296600878239,
"eval_runtime": 53.187,
"eval_samples_per_second": 32.527,
"eval_steps_per_second": 4.08,
"step": 800
},
{
"epoch": 2.988929889298893,
"grad_norm": 0.25021785497665405,
"learning_rate": 0.00013357933579335793,
"loss": 0.018,
"step": 810
},
{
"epoch": 3.025830258302583,
"grad_norm": 0.6319343447685242,
"learning_rate": 0.00013275932759327595,
"loss": 0.0212,
"step": 820
},
{
"epoch": 3.062730627306273,
"grad_norm": 0.16027286648750305,
"learning_rate": 0.00013193931939319394,
"loss": 0.0119,
"step": 830
},
{
"epoch": 3.0996309963099633,
"grad_norm": 0.27029338479042053,
"learning_rate": 0.00013111931119311193,
"loss": 0.011,
"step": 840
},
{
"epoch": 3.1365313653136533,
"grad_norm": 0.3908526301383972,
"learning_rate": 0.00013029930299302992,
"loss": 0.0113,
"step": 850
},
{
"epoch": 3.1734317343173433,
"grad_norm": 0.08731468766927719,
"learning_rate": 0.00012947929479294794,
"loss": 0.0088,
"step": 860
},
{
"epoch": 3.2103321033210332,
"grad_norm": 0.19361713528633118,
"learning_rate": 0.00012865928659286594,
"loss": 0.0138,
"step": 870
},
{
"epoch": 3.2472324723247232,
"grad_norm": 0.11760518699884415,
"learning_rate": 0.00012783927839278393,
"loss": 0.0128,
"step": 880
},
{
"epoch": 3.284132841328413,
"grad_norm": 0.5687548518180847,
"learning_rate": 0.00012701927019270192,
"loss": 0.0114,
"step": 890
},
{
"epoch": 3.321033210332103,
"grad_norm": 0.35156571865081787,
"learning_rate": 0.00012619926199261994,
"loss": 0.0115,
"step": 900
},
{
"epoch": 3.321033210332103,
"eval_accuracy": 0.9653179190751445,
"eval_loss": 0.009723745286464691,
"eval_runtime": 53.0303,
"eval_samples_per_second": 32.623,
"eval_steps_per_second": 4.092,
"step": 900
},
{
"epoch": 3.357933579335793,
"grad_norm": 0.22674980759620667,
"learning_rate": 0.0001253792537925379,
"loss": 0.0126,
"step": 910
},
{
"epoch": 3.3948339483394836,
"grad_norm": 0.23142710328102112,
"learning_rate": 0.00012455924559245593,
"loss": 0.007,
"step": 920
},
{
"epoch": 3.4317343173431736,
"grad_norm": 0.11926586925983429,
"learning_rate": 0.00012373923739237392,
"loss": 0.0111,
"step": 930
},
{
"epoch": 3.4686346863468636,
"grad_norm": 0.12741585075855255,
"learning_rate": 0.00012291922919229194,
"loss": 0.0149,
"step": 940
},
{
"epoch": 3.5055350553505535,
"grad_norm": 0.8535834550857544,
"learning_rate": 0.0001220992209922099,
"loss": 0.0098,
"step": 950
},
{
"epoch": 3.5424354243542435,
"grad_norm": 0.2683662474155426,
"learning_rate": 0.00012127921279212793,
"loss": 0.0107,
"step": 960
},
{
"epoch": 3.5793357933579335,
"grad_norm": 0.2808018624782562,
"learning_rate": 0.00012045920459204592,
"loss": 0.0129,
"step": 970
},
{
"epoch": 3.6162361623616235,
"grad_norm": 1.0000771284103394,
"learning_rate": 0.00011963919639196393,
"loss": 0.0148,
"step": 980
},
{
"epoch": 3.6531365313653135,
"grad_norm": 0.16155269742012024,
"learning_rate": 0.00011881918819188192,
"loss": 0.0118,
"step": 990
},
{
"epoch": 3.6900369003690034,
"grad_norm": 0.053899142891168594,
"learning_rate": 0.00011799917999179993,
"loss": 0.0113,
"step": 1000
},
{
"epoch": 3.6900369003690034,
"eval_accuracy": 0.9710982658959537,
"eval_loss": 0.007686651311814785,
"eval_runtime": 52.7853,
"eval_samples_per_second": 32.774,
"eval_steps_per_second": 4.111,
"step": 1000
},
{
"epoch": 3.726937269372694,
"grad_norm": 0.23413535952568054,
"learning_rate": 0.0001171791717917179,
"loss": 0.0118,
"step": 1010
},
{
"epoch": 3.763837638376384,
"grad_norm": 0.19596587121486664,
"learning_rate": 0.00011635916359163593,
"loss": 0.0069,
"step": 1020
},
{
"epoch": 3.800738007380074,
"grad_norm": 0.28500398993492126,
"learning_rate": 0.0001155391553915539,
"loss": 0.009,
"step": 1030
},
{
"epoch": 3.837638376383764,
"grad_norm": 0.3002331554889679,
"learning_rate": 0.00011471914719147193,
"loss": 0.0096,
"step": 1040
},
{
"epoch": 3.874538745387454,
"grad_norm": 0.21130523085594177,
"learning_rate": 0.0001138991389913899,
"loss": 0.0122,
"step": 1050
},
{
"epoch": 3.911439114391144,
"grad_norm": 0.14506787061691284,
"learning_rate": 0.00011307913079130793,
"loss": 0.0095,
"step": 1060
},
{
"epoch": 3.948339483394834,
"grad_norm": 0.3295513987541199,
"learning_rate": 0.0001122591225912259,
"loss": 0.0122,
"step": 1070
},
{
"epoch": 3.985239852398524,
"grad_norm": 0.07300284504890442,
"learning_rate": 0.00011143911439114391,
"loss": 0.0067,
"step": 1080
},
{
"epoch": 4.022140221402214,
"grad_norm": 0.03413034602999687,
"learning_rate": 0.0001106191061910619,
"loss": 0.0119,
"step": 1090
},
{
"epoch": 4.059040590405904,
"grad_norm": 0.06467791646718979,
"learning_rate": 0.00010979909799097991,
"loss": 0.0054,
"step": 1100
},
{
"epoch": 4.059040590405904,
"eval_accuracy": 0.9843930635838151,
"eval_loss": 0.00683161336928606,
"eval_runtime": 52.9373,
"eval_samples_per_second": 32.68,
"eval_steps_per_second": 4.099,
"step": 1100
},
{
"epoch": 4.095940959409594,
"grad_norm": 0.09930551052093506,
"learning_rate": 0.0001089790897908979,
"loss": 0.0057,
"step": 1110
},
{
"epoch": 4.132841328413284,
"grad_norm": 0.11587420105934143,
"learning_rate": 0.00010815908159081591,
"loss": 0.0065,
"step": 1120
},
{
"epoch": 4.169741697416974,
"grad_norm": 0.042286355048418045,
"learning_rate": 0.0001073390733907339,
"loss": 0.0067,
"step": 1130
},
{
"epoch": 4.206642066420664,
"grad_norm": 0.16917578876018524,
"learning_rate": 0.00010651906519065191,
"loss": 0.0039,
"step": 1140
},
{
"epoch": 4.243542435424354,
"grad_norm": 0.16002944111824036,
"learning_rate": 0.0001056990569905699,
"loss": 0.0066,
"step": 1150
},
{
"epoch": 4.280442804428044,
"grad_norm": 0.049849651753902435,
"learning_rate": 0.00010487904879048791,
"loss": 0.0049,
"step": 1160
},
{
"epoch": 4.317343173431734,
"grad_norm": 0.5795335173606873,
"learning_rate": 0.0001040590405904059,
"loss": 0.007,
"step": 1170
},
{
"epoch": 4.354243542435424,
"grad_norm": 0.01594790630042553,
"learning_rate": 0.00010323903239032391,
"loss": 0.0062,
"step": 1180
},
{
"epoch": 4.391143911439114,
"grad_norm": 0.12844012677669525,
"learning_rate": 0.0001024190241902419,
"loss": 0.0045,
"step": 1190
},
{
"epoch": 4.428044280442805,
"grad_norm": 0.053635355085134506,
"learning_rate": 0.00010159901599015991,
"loss": 0.0047,
"step": 1200
},
{
"epoch": 4.428044280442805,
"eval_accuracy": 0.9849710982658959,
"eval_loss": 0.004636078607290983,
"eval_runtime": 52.7596,
"eval_samples_per_second": 32.79,
"eval_steps_per_second": 4.113,
"step": 1200
},
{
"epoch": 4.464944649446495,
"grad_norm": 0.048139333724975586,
"learning_rate": 0.0001007790077900779,
"loss": 0.0051,
"step": 1210
},
{
"epoch": 4.501845018450185,
"grad_norm": 0.05954507365822792,
"learning_rate": 9.995899958999591e-05,
"loss": 0.0076,
"step": 1220
},
{
"epoch": 4.538745387453875,
"grad_norm": 0.07452525943517685,
"learning_rate": 9.91389913899139e-05,
"loss": 0.0041,
"step": 1230
},
{
"epoch": 4.575645756457565,
"grad_norm": 0.008173154667019844,
"learning_rate": 9.83189831898319e-05,
"loss": 0.0047,
"step": 1240
},
{
"epoch": 4.612546125461255,
"grad_norm": 0.062416139990091324,
"learning_rate": 9.74989749897499e-05,
"loss": 0.0058,
"step": 1250
},
{
"epoch": 4.649446494464945,
"grad_norm": 0.3318597972393036,
"learning_rate": 9.66789667896679e-05,
"loss": 0.0052,
"step": 1260
},
{
"epoch": 4.686346863468635,
"grad_norm": 0.1266857534646988,
"learning_rate": 9.58589585895859e-05,
"loss": 0.0079,
"step": 1270
},
{
"epoch": 4.723247232472325,
"grad_norm": 0.061126936227083206,
"learning_rate": 9.50389503895039e-05,
"loss": 0.0051,
"step": 1280
},
{
"epoch": 4.760147601476015,
"grad_norm": 0.09749143570661545,
"learning_rate": 9.42189421894219e-05,
"loss": 0.0078,
"step": 1290
},
{
"epoch": 4.797047970479705,
"grad_norm": 0.34020960330963135,
"learning_rate": 9.33989339893399e-05,
"loss": 0.0056,
"step": 1300
},
{
"epoch": 4.797047970479705,
"eval_accuracy": 0.9901734104046243,
"eval_loss": 0.0040424894541502,
"eval_runtime": 52.7044,
"eval_samples_per_second": 32.825,
"eval_steps_per_second": 4.117,
"step": 1300
},
{
"epoch": 4.833948339483395,
"grad_norm": 0.19669051468372345,
"learning_rate": 9.25789257892579e-05,
"loss": 0.0075,
"step": 1310
},
{
"epoch": 4.870848708487085,
"grad_norm": 0.09900686144828796,
"learning_rate": 9.175891758917589e-05,
"loss": 0.0077,
"step": 1320
},
{
"epoch": 4.907749077490775,
"grad_norm": 0.1532040238380432,
"learning_rate": 9.09389093890939e-05,
"loss": 0.0067,
"step": 1330
},
{
"epoch": 4.944649446494465,
"grad_norm": 0.051097236573696136,
"learning_rate": 9.011890118901189e-05,
"loss": 0.0039,
"step": 1340
},
{
"epoch": 4.9815498154981555,
"grad_norm": 0.05327645316720009,
"learning_rate": 8.92988929889299e-05,
"loss": 0.0055,
"step": 1350
},
{
"epoch": 5.018450184501845,
"grad_norm": 0.026003248989582062,
"learning_rate": 8.847888478884789e-05,
"loss": 0.0055,
"step": 1360
},
{
"epoch": 5.055350553505535,
"grad_norm": 0.11153847724199295,
"learning_rate": 8.76588765887659e-05,
"loss": 0.0044,
"step": 1370
},
{
"epoch": 5.092250922509225,
"grad_norm": 0.02700236812233925,
"learning_rate": 8.683886838868389e-05,
"loss": 0.0028,
"step": 1380
},
{
"epoch": 5.129151291512915,
"grad_norm": 0.07289482653141022,
"learning_rate": 8.60188601886019e-05,
"loss": 0.0029,
"step": 1390
},
{
"epoch": 5.166051660516605,
"grad_norm": 0.0497610978782177,
"learning_rate": 8.519885198851989e-05,
"loss": 0.0026,
"step": 1400
},
{
"epoch": 5.166051660516605,
"eval_accuracy": 0.992485549132948,
"eval_loss": 0.003214773256331682,
"eval_runtime": 52.8552,
"eval_samples_per_second": 32.731,
"eval_steps_per_second": 4.106,
"step": 1400
},
{
"epoch": 5.202952029520295,
"grad_norm": 0.15320487320423126,
"learning_rate": 8.43788437884379e-05,
"loss": 0.0041,
"step": 1410
},
{
"epoch": 5.239852398523985,
"grad_norm": 0.018703853711485863,
"learning_rate": 8.355883558835589e-05,
"loss": 0.0041,
"step": 1420
},
{
"epoch": 5.276752767527675,
"grad_norm": 0.007155160885304213,
"learning_rate": 8.273882738827388e-05,
"loss": 0.0028,
"step": 1430
},
{
"epoch": 5.313653136531365,
"grad_norm": 0.026553437113761902,
"learning_rate": 8.191881918819189e-05,
"loss": 0.0046,
"step": 1440
},
{
"epoch": 5.350553505535055,
"grad_norm": 0.031090274453163147,
"learning_rate": 8.109881098810988e-05,
"loss": 0.0044,
"step": 1450
},
{
"epoch": 5.387453874538745,
"grad_norm": 0.1262507289648056,
"learning_rate": 8.027880278802789e-05,
"loss": 0.0033,
"step": 1460
},
{
"epoch": 5.424354243542435,
"grad_norm": 0.17291103303432465,
"learning_rate": 7.945879458794588e-05,
"loss": 0.0043,
"step": 1470
},
{
"epoch": 5.461254612546125,
"grad_norm": 0.036809444427490234,
"learning_rate": 7.863878638786389e-05,
"loss": 0.0038,
"step": 1480
},
{
"epoch": 5.498154981549815,
"grad_norm": 0.027763400226831436,
"learning_rate": 7.781877818778188e-05,
"loss": 0.002,
"step": 1490
},
{
"epoch": 5.535055350553505,
"grad_norm": 0.10087448358535767,
"learning_rate": 7.699876998769989e-05,
"loss": 0.0037,
"step": 1500
},
{
"epoch": 5.535055350553505,
"eval_accuracy": 0.9936416184971099,
"eval_loss": 0.002714467002078891,
"eval_runtime": 52.8635,
"eval_samples_per_second": 32.726,
"eval_steps_per_second": 4.105,
"step": 1500
},
{
"epoch": 5.571955719557195,
"grad_norm": 0.04997282847762108,
"learning_rate": 7.617876178761788e-05,
"loss": 0.0024,
"step": 1510
},
{
"epoch": 5.608856088560886,
"grad_norm": 0.015307437628507614,
"learning_rate": 7.535875358753589e-05,
"loss": 0.0038,
"step": 1520
},
{
"epoch": 5.645756457564576,
"grad_norm": 0.0023889692965894938,
"learning_rate": 7.453874538745388e-05,
"loss": 0.0038,
"step": 1530
},
{
"epoch": 5.682656826568266,
"grad_norm": 0.040821630507707596,
"learning_rate": 7.371873718737188e-05,
"loss": 0.0041,
"step": 1540
},
{
"epoch": 5.719557195571956,
"grad_norm": 0.09734688699245453,
"learning_rate": 7.289872898728988e-05,
"loss": 0.0038,
"step": 1550
},
{
"epoch": 5.756457564575646,
"grad_norm": 0.016573524102568626,
"learning_rate": 7.207872078720787e-05,
"loss": 0.0025,
"step": 1560
},
{
"epoch": 5.793357933579336,
"grad_norm": 0.009829455986618996,
"learning_rate": 7.125871258712588e-05,
"loss": 0.0041,
"step": 1570
},
{
"epoch": 5.830258302583026,
"grad_norm": 0.07733350247144699,
"learning_rate": 7.043870438704387e-05,
"loss": 0.0024,
"step": 1580
},
{
"epoch": 5.867158671586716,
"grad_norm": 0.04763353243470192,
"learning_rate": 6.961869618696188e-05,
"loss": 0.0025,
"step": 1590
},
{
"epoch": 5.904059040590406,
"grad_norm": 0.009967833757400513,
"learning_rate": 6.879868798687987e-05,
"loss": 0.0039,
"step": 1600
},
{
"epoch": 5.904059040590406,
"eval_accuracy": 0.9976878612716763,
"eval_loss": 0.0022707099560648203,
"eval_runtime": 53.0061,
"eval_samples_per_second": 32.638,
"eval_steps_per_second": 4.094,
"step": 1600
},
{
"epoch": 5.940959409594096,
"grad_norm": 0.020813655108213425,
"learning_rate": 6.797867978679788e-05,
"loss": 0.0029,
"step": 1610
},
{
"epoch": 5.977859778597786,
"grad_norm": 0.062338754534721375,
"learning_rate": 6.715867158671587e-05,
"loss": 0.0023,
"step": 1620
},
{
"epoch": 6.014760147601476,
"grad_norm": 0.018345272168517113,
"learning_rate": 6.633866338663388e-05,
"loss": 0.0029,
"step": 1630
},
{
"epoch": 6.051660516605166,
"grad_norm": 0.023753851652145386,
"learning_rate": 6.551865518655187e-05,
"loss": 0.0018,
"step": 1640
},
{
"epoch": 6.088560885608856,
"grad_norm": 0.016533376649022102,
"learning_rate": 6.469864698646987e-05,
"loss": 0.0025,
"step": 1650
},
{
"epoch": 6.125461254612546,
"grad_norm": 0.008262280374765396,
"learning_rate": 6.387863878638787e-05,
"loss": 0.0025,
"step": 1660
},
{
"epoch": 6.162361623616236,
"grad_norm": 0.010828894563019276,
"learning_rate": 6.305863058630586e-05,
"loss": 0.0018,
"step": 1670
},
{
"epoch": 6.199261992619927,
"grad_norm": 0.01505562849342823,
"learning_rate": 6.223862238622387e-05,
"loss": 0.0017,
"step": 1680
},
{
"epoch": 6.236162361623617,
"grad_norm": 0.0033724599052220583,
"learning_rate": 6.141861418614186e-05,
"loss": 0.0018,
"step": 1690
},
{
"epoch": 6.273062730627307,
"grad_norm": 0.026863617822527885,
"learning_rate": 6.0598605986059866e-05,
"loss": 0.0019,
"step": 1700
},
{
"epoch": 6.273062730627307,
"eval_accuracy": 0.9971098265895953,
"eval_loss": 0.0018508523935452104,
"eval_runtime": 53.4671,
"eval_samples_per_second": 32.356,
"eval_steps_per_second": 4.059,
"step": 1700
},
{
"epoch": 6.3099630996309966,
"grad_norm": 0.011449555866420269,
"learning_rate": 5.9778597785977866e-05,
"loss": 0.0019,
"step": 1710
},
{
"epoch": 6.3468634686346865,
"grad_norm": 0.04869832843542099,
"learning_rate": 5.8958589585895866e-05,
"loss": 0.0022,
"step": 1720
},
{
"epoch": 6.3837638376383765,
"grad_norm": 0.2211851179599762,
"learning_rate": 5.813858138581386e-05,
"loss": 0.0021,
"step": 1730
},
{
"epoch": 6.4206642066420665,
"grad_norm": 0.021321425214409828,
"learning_rate": 5.731857318573186e-05,
"loss": 0.0017,
"step": 1740
},
{
"epoch": 6.4575645756457565,
"grad_norm": 0.014219704084098339,
"learning_rate": 5.649856498564986e-05,
"loss": 0.0026,
"step": 1750
},
{
"epoch": 6.4944649446494465,
"grad_norm": 0.011448672972619534,
"learning_rate": 5.567855678556786e-05,
"loss": 0.0026,
"step": 1760
},
{
"epoch": 6.531365313653136,
"grad_norm": 0.008601737208664417,
"learning_rate": 5.485854858548586e-05,
"loss": 0.0016,
"step": 1770
},
{
"epoch": 6.568265682656826,
"grad_norm": 0.013017091900110245,
"learning_rate": 5.403854038540386e-05,
"loss": 0.0023,
"step": 1780
},
{
"epoch": 6.605166051660516,
"grad_norm": 0.008623339235782623,
"learning_rate": 5.321853218532186e-05,
"loss": 0.0019,
"step": 1790
},
{
"epoch": 6.642066420664206,
"grad_norm": 0.027804844081401825,
"learning_rate": 5.239852398523986e-05,
"loss": 0.0019,
"step": 1800
},
{
"epoch": 6.642066420664206,
"eval_accuracy": 0.9988439306358381,
"eval_loss": 0.0016573916655033827,
"eval_runtime": 53.3851,
"eval_samples_per_second": 32.406,
"eval_steps_per_second": 4.065,
"step": 1800
},
{
"epoch": 6.678966789667896,
"grad_norm": 0.027201993390917778,
"learning_rate": 5.1578515785157857e-05,
"loss": 0.0021,
"step": 1810
},
{
"epoch": 6.715867158671586,
"grad_norm": 0.07681945711374283,
"learning_rate": 5.0758507585075856e-05,
"loss": 0.0022,
"step": 1820
},
{
"epoch": 6.752767527675276,
"grad_norm": 0.010648882016539574,
"learning_rate": 4.9938499384993856e-05,
"loss": 0.0017,
"step": 1830
},
{
"epoch": 6.789667896678967,
"grad_norm": 0.01609162986278534,
"learning_rate": 4.9118491184911856e-05,
"loss": 0.0016,
"step": 1840
},
{
"epoch": 6.826568265682657,
"grad_norm": 0.013306355103850365,
"learning_rate": 4.829848298482985e-05,
"loss": 0.0016,
"step": 1850
},
{
"epoch": 6.863468634686347,
"grad_norm": 0.015135644935071468,
"learning_rate": 4.747847478474785e-05,
"loss": 0.002,
"step": 1860
},
{
"epoch": 6.900369003690037,
"grad_norm": 0.019170399755239487,
"learning_rate": 4.665846658466585e-05,
"loss": 0.0015,
"step": 1870
},
{
"epoch": 6.937269372693727,
"grad_norm": 0.02915087901055813,
"learning_rate": 4.583845838458385e-05,
"loss": 0.0012,
"step": 1880
},
{
"epoch": 6.974169741697417,
"grad_norm": 0.023285195231437683,
"learning_rate": 4.501845018450185e-05,
"loss": 0.0019,
"step": 1890
},
{
"epoch": 7.011070110701107,
"grad_norm": 0.013183875940740108,
"learning_rate": 4.419844198441985e-05,
"loss": 0.0016,
"step": 1900
},
{
"epoch": 7.011070110701107,
"eval_accuracy": 1.0,
"eval_loss": 0.0015279006911441684,
"eval_runtime": 53.4443,
"eval_samples_per_second": 32.37,
"eval_steps_per_second": 4.06,
"step": 1900
},
{
"epoch": 7.047970479704797,
"grad_norm": 0.008983751758933067,
"learning_rate": 4.337843378433785e-05,
"loss": 0.0015,
"step": 1910
},
{
"epoch": 7.084870848708487,
"grad_norm": 0.003578983247280121,
"learning_rate": 4.2558425584255847e-05,
"loss": 0.0013,
"step": 1920
},
{
"epoch": 7.121771217712177,
"grad_norm": 0.009858568198978901,
"learning_rate": 4.1738417384173846e-05,
"loss": 0.0017,
"step": 1930
},
{
"epoch": 7.158671586715867,
"grad_norm": 0.01723896525800228,
"learning_rate": 4.0918409184091846e-05,
"loss": 0.0011,
"step": 1940
},
{
"epoch": 7.195571955719557,
"grad_norm": 0.007069133687764406,
"learning_rate": 4.0098400984009846e-05,
"loss": 0.0012,
"step": 1950
},
{
"epoch": 7.232472324723247,
"grad_norm": 0.006838188972324133,
"learning_rate": 3.927839278392784e-05,
"loss": 0.0021,
"step": 1960
},
{
"epoch": 7.269372693726937,
"grad_norm": 0.011245744302868843,
"learning_rate": 3.845838458384584e-05,
"loss": 0.0018,
"step": 1970
},
{
"epoch": 7.306273062730627,
"grad_norm": 0.002784464741125703,
"learning_rate": 3.763837638376384e-05,
"loss": 0.0013,
"step": 1980
},
{
"epoch": 7.343173431734318,
"grad_norm": 0.007723368238657713,
"learning_rate": 3.681836818368184e-05,
"loss": 0.0017,
"step": 1990
},
{
"epoch": 7.380073800738008,
"grad_norm": 0.016839824616909027,
"learning_rate": 3.599835998359984e-05,
"loss": 0.002,
"step": 2000
},
{
"epoch": 7.380073800738008,
"eval_accuracy": 1.0,
"eval_loss": 0.0014427828136831522,
"eval_runtime": 53.204,
"eval_samples_per_second": 32.516,
"eval_steps_per_second": 4.079,
"step": 2000
},
{
"epoch": 7.416974169741698,
"grad_norm": 0.015248976647853851,
"learning_rate": 3.517835178351784e-05,
"loss": 0.0018,
"step": 2010
},
{
"epoch": 7.453874538745388,
"grad_norm": 0.013244880363345146,
"learning_rate": 3.435834358343584e-05,
"loss": 0.0022,
"step": 2020
},
{
"epoch": 7.490774907749078,
"grad_norm": 0.009770199656486511,
"learning_rate": 3.353833538335384e-05,
"loss": 0.0017,
"step": 2030
},
{
"epoch": 7.527675276752768,
"grad_norm": 0.010310194455087185,
"learning_rate": 3.2718327183271836e-05,
"loss": 0.0018,
"step": 2040
},
{
"epoch": 7.564575645756458,
"grad_norm": 0.0034243548288941383,
"learning_rate": 3.1898318983189836e-05,
"loss": 0.0014,
"step": 2050
},
{
"epoch": 7.601476014760148,
"grad_norm": 0.005529376212507486,
"learning_rate": 3.1078310783107836e-05,
"loss": 0.0013,
"step": 2060
},
{
"epoch": 7.638376383763838,
"grad_norm": 0.005842520389705896,
"learning_rate": 3.0258302583025832e-05,
"loss": 0.0017,
"step": 2070
},
{
"epoch": 7.675276752767528,
"grad_norm": 0.00041994985076598823,
"learning_rate": 2.9438294382943832e-05,
"loss": 0.0016,
"step": 2080
},
{
"epoch": 7.712177121771218,
"grad_norm": 0.005925205536186695,
"learning_rate": 2.861828618286183e-05,
"loss": 0.0013,
"step": 2090
},
{
"epoch": 7.749077490774908,
"grad_norm": 0.00783997867256403,
"learning_rate": 2.779827798277983e-05,
"loss": 0.0013,
"step": 2100
},
{
"epoch": 7.749077490774908,
"eval_accuracy": 1.0,
"eval_loss": 0.0013818982988595963,
"eval_runtime": 53.3908,
"eval_samples_per_second": 32.403,
"eval_steps_per_second": 4.064,
"step": 2100
},
{
"epoch": 7.785977859778598,
"grad_norm": 0.0075693195685744286,
"learning_rate": 2.6978269782697828e-05,
"loss": 0.0018,
"step": 2110
},
{
"epoch": 7.822878228782288,
"grad_norm": 0.00795042049139738,
"learning_rate": 2.6158261582615827e-05,
"loss": 0.0011,
"step": 2120
},
{
"epoch": 7.8597785977859775,
"grad_norm": 0.00859429594129324,
"learning_rate": 2.5338253382533827e-05,
"loss": 0.0015,
"step": 2130
},
{
"epoch": 7.8966789667896675,
"grad_norm": 0.005215910263359547,
"learning_rate": 2.4518245182451827e-05,
"loss": 0.0016,
"step": 2140
},
{
"epoch": 7.9335793357933575,
"grad_norm": 0.0038353356067091227,
"learning_rate": 2.3698236982369827e-05,
"loss": 0.0015,
"step": 2150
},
{
"epoch": 7.970479704797048,
"grad_norm": 0.00975708942860365,
"learning_rate": 2.2878228782287826e-05,
"loss": 0.0015,
"step": 2160
},
{
"epoch": 8.007380073800737,
"grad_norm": 0.006485125049948692,
"learning_rate": 2.2058220582205823e-05,
"loss": 0.0012,
"step": 2170
},
{
"epoch": 8.044280442804428,
"grad_norm": 0.013848377391695976,
"learning_rate": 2.1238212382123822e-05,
"loss": 0.0014,
"step": 2180
},
{
"epoch": 8.081180811808117,
"grad_norm": 0.012979693710803986,
"learning_rate": 2.0418204182041822e-05,
"loss": 0.0015,
"step": 2190
},
{
"epoch": 8.118081180811808,
"grad_norm": 0.005968283861875534,
"learning_rate": 1.9598195981959822e-05,
"loss": 0.0015,
"step": 2200
},
{
"epoch": 8.118081180811808,
"eval_accuracy": 1.0,
"eval_loss": 0.0013393920380622149,
"eval_runtime": 53.2752,
"eval_samples_per_second": 32.473,
"eval_steps_per_second": 4.073,
"step": 2200
},
{
"epoch": 8.154981549815497,
"grad_norm": 0.010278232395648956,
"learning_rate": 1.877818778187782e-05,
"loss": 0.0009,
"step": 2210
},
{
"epoch": 8.191881918819188,
"grad_norm": 0.007073582150042057,
"learning_rate": 1.795817958179582e-05,
"loss": 0.0016,
"step": 2220
},
{
"epoch": 8.228782287822877,
"grad_norm": 0.0020103720016777515,
"learning_rate": 1.7138171381713817e-05,
"loss": 0.0014,
"step": 2230
},
{
"epoch": 8.265682656826568,
"grad_norm": 0.006469924468547106,
"learning_rate": 1.6318163181631817e-05,
"loss": 0.0014,
"step": 2240
},
{
"epoch": 8.302583025830259,
"grad_norm": 0.006924558896571398,
"learning_rate": 1.5498154981549817e-05,
"loss": 0.0013,
"step": 2250
},
{
"epoch": 8.339483394833948,
"grad_norm": 0.010701497085392475,
"learning_rate": 1.4678146781467817e-05,
"loss": 0.0013,
"step": 2260
},
{
"epoch": 8.376383763837639,
"grad_norm": 0.012286316603422165,
"learning_rate": 1.3858138581385816e-05,
"loss": 0.0016,
"step": 2270
},
{
"epoch": 8.413284132841328,
"grad_norm": 0.010458541102707386,
"learning_rate": 1.3038130381303814e-05,
"loss": 0.0018,
"step": 2280
},
{
"epoch": 8.450184501845019,
"grad_norm": 0.010043789632618427,
"learning_rate": 1.2218122181221812e-05,
"loss": 0.0012,
"step": 2290
},
{
"epoch": 8.487084870848708,
"grad_norm": 0.00660703144967556,
"learning_rate": 1.1398113981139812e-05,
"loss": 0.0011,
"step": 2300
},
{
"epoch": 8.487084870848708,
"eval_accuracy": 1.0,
"eval_loss": 0.0013166143326088786,
"eval_runtime": 53.3458,
"eval_samples_per_second": 32.43,
"eval_steps_per_second": 4.068,
"step": 2300
},
{
"epoch": 8.523985239852399,
"grad_norm": 0.006171096581965685,
"learning_rate": 1.057810578105781e-05,
"loss": 0.0014,
"step": 2310
},
{
"epoch": 8.560885608856088,
"grad_norm": 0.005463199689984322,
"learning_rate": 9.75809758097581e-06,
"loss": 0.0013,
"step": 2320
},
{
"epoch": 8.597785977859779,
"grad_norm": 0.008069201372563839,
"learning_rate": 8.93808938089381e-06,
"loss": 0.0015,
"step": 2330
},
{
"epoch": 8.634686346863468,
"grad_norm": 0.007250032387673855,
"learning_rate": 8.118081180811808e-06,
"loss": 0.0017,
"step": 2340
},
{
"epoch": 8.671586715867159,
"grad_norm": 0.011325874365866184,
"learning_rate": 7.298072980729807e-06,
"loss": 0.0014,
"step": 2350
},
{
"epoch": 8.708487084870848,
"grad_norm": 0.019912155345082283,
"learning_rate": 6.478064780647806e-06,
"loss": 0.0018,
"step": 2360
},
{
"epoch": 8.745387453874539,
"grad_norm": 0.0008021511603146791,
"learning_rate": 5.658056580565806e-06,
"loss": 0.0012,
"step": 2370
},
{
"epoch": 8.782287822878228,
"grad_norm": 0.01182828564196825,
"learning_rate": 4.838048380483805e-06,
"loss": 0.0016,
"step": 2380
},
{
"epoch": 8.819188191881919,
"grad_norm": 0.004345746710896492,
"learning_rate": 4.018040180401804e-06,
"loss": 0.001,
"step": 2390
},
{
"epoch": 8.85608856088561,
"grad_norm": 0.0077620395459234715,
"learning_rate": 3.198031980319803e-06,
"loss": 0.0013,
"step": 2400
},
{
"epoch": 8.85608856088561,
"eval_accuracy": 1.0,
"eval_loss": 0.0013051489368081093,
"eval_runtime": 52.9588,
"eval_samples_per_second": 32.667,
"eval_steps_per_second": 4.098,
"step": 2400
},
{
"epoch": 8.892988929889299,
"grad_norm": 0.01009498443454504,
"learning_rate": 2.3780237802378026e-06,
"loss": 0.0015,
"step": 2410
},
{
"epoch": 8.92988929889299,
"grad_norm": 0.00728220259770751,
"learning_rate": 1.5580155801558017e-06,
"loss": 0.0015,
"step": 2420
},
{
"epoch": 8.966789667896679,
"grad_norm": 0.014378752559423447,
"learning_rate": 7.380073800738008e-07,
"loss": 0.0014,
"step": 2430
},
{
"epoch": 9.0,
"step": 2439,
"total_flos": 3.017424657591816e+18,
"train_loss": 0.01402124272053813,
"train_runtime": 3085.6034,
"train_samples_per_second": 12.618,
"train_steps_per_second": 0.79
}
],
"logging_steps": 10,
"max_steps": 2439,
"num_input_tokens_seen": 0,
"num_train_epochs": 9,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.017424657591816e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}