bbytxt's picture
Training in progress, step 150, checkpoint
429d9c5 verified
raw
history blame
28.6 kB
{
"best_metric": 0.09180089831352234,
"best_model_checkpoint": "miner_id_24/checkpoint-150",
"epoch": 0.14584346135148274,
"eval_steps": 25,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0009722897423432182,
"grad_norm": 0.8657536506652832,
"learning_rate": 2.9999999999999997e-05,
"loss": 0.2454,
"step": 1
},
{
"epoch": 0.0009722897423432182,
"eval_loss": 0.49785929918289185,
"eval_runtime": 3.5592,
"eval_samples_per_second": 14.048,
"eval_steps_per_second": 1.967,
"step": 1
},
{
"epoch": 0.0019445794846864365,
"grad_norm": 0.915185272693634,
"learning_rate": 5.9999999999999995e-05,
"loss": 0.2505,
"step": 2
},
{
"epoch": 0.002916869227029655,
"grad_norm": 0.9991545081138611,
"learning_rate": 8.999999999999999e-05,
"loss": 0.2478,
"step": 3
},
{
"epoch": 0.003889158969372873,
"grad_norm": 0.44825488328933716,
"learning_rate": 0.00011999999999999999,
"loss": 0.21,
"step": 4
},
{
"epoch": 0.004861448711716091,
"grad_norm": 0.28227540850639343,
"learning_rate": 0.00015,
"loss": 0.1547,
"step": 5
},
{
"epoch": 0.00583373845405931,
"grad_norm": 0.32708004117012024,
"learning_rate": 0.00017999999999999998,
"loss": 0.1404,
"step": 6
},
{
"epoch": 0.006806028196402528,
"grad_norm": 0.288328617811203,
"learning_rate": 0.00020999999999999998,
"loss": 0.1264,
"step": 7
},
{
"epoch": 0.007778317938745746,
"grad_norm": 0.25696828961372375,
"learning_rate": 0.00023999999999999998,
"loss": 0.1166,
"step": 8
},
{
"epoch": 0.008750607681088965,
"grad_norm": 0.23258011043071747,
"learning_rate": 0.00027,
"loss": 0.1004,
"step": 9
},
{
"epoch": 0.009722897423432183,
"grad_norm": 0.23922766745090485,
"learning_rate": 0.0003,
"loss": 0.0788,
"step": 10
},
{
"epoch": 0.0106951871657754,
"grad_norm": 0.387058287858963,
"learning_rate": 0.0002999794957488703,
"loss": 0.0928,
"step": 11
},
{
"epoch": 0.01166747690811862,
"grad_norm": 0.17249208688735962,
"learning_rate": 0.0002999179886011389,
"loss": 0.074,
"step": 12
},
{
"epoch": 0.012639766650461837,
"grad_norm": 0.1309785097837448,
"learning_rate": 0.0002998154953722457,
"loss": 0.0584,
"step": 13
},
{
"epoch": 0.013612056392805057,
"grad_norm": 0.1812700778245926,
"learning_rate": 0.00029967204408281613,
"loss": 0.0645,
"step": 14
},
{
"epoch": 0.014584346135148274,
"grad_norm": 0.12967181205749512,
"learning_rate": 0.00029948767395100045,
"loss": 0.0599,
"step": 15
},
{
"epoch": 0.015556635877491492,
"grad_norm": 0.12690943479537964,
"learning_rate": 0.0002992624353817517,
"loss": 0.0532,
"step": 16
},
{
"epoch": 0.01652892561983471,
"grad_norm": 0.14767378568649292,
"learning_rate": 0.0002989963899530457,
"loss": 0.0614,
"step": 17
},
{
"epoch": 0.01750121536217793,
"grad_norm": 0.11901306360960007,
"learning_rate": 0.00029868961039904624,
"loss": 0.0528,
"step": 18
},
{
"epoch": 0.018473505104521146,
"grad_norm": 0.13424420356750488,
"learning_rate": 0.00029834218059022024,
"loss": 0.0627,
"step": 19
},
{
"epoch": 0.019445794846864366,
"grad_norm": 0.13712000846862793,
"learning_rate": 0.00029795419551040833,
"loss": 0.0539,
"step": 20
},
{
"epoch": 0.020418084589207585,
"grad_norm": 0.16320212185382843,
"learning_rate": 0.00029752576123085736,
"loss": 0.0543,
"step": 21
},
{
"epoch": 0.0213903743315508,
"grad_norm": 0.11892902851104736,
"learning_rate": 0.0002970569948812214,
"loss": 0.0406,
"step": 22
},
{
"epoch": 0.02236266407389402,
"grad_norm": 0.17433710396289825,
"learning_rate": 0.0002965480246175399,
"loss": 0.0654,
"step": 23
},
{
"epoch": 0.02333495381623724,
"grad_norm": 0.18646477162837982,
"learning_rate": 0.0002959989895872009,
"loss": 0.0608,
"step": 24
},
{
"epoch": 0.024307243558580455,
"grad_norm": 0.16546477377414703,
"learning_rate": 0.0002954100398908995,
"loss": 0.07,
"step": 25
},
{
"epoch": 0.024307243558580455,
"eval_loss": 0.12305433303117752,
"eval_runtime": 3.8062,
"eval_samples_per_second": 13.136,
"eval_steps_per_second": 1.839,
"step": 25
},
{
"epoch": 0.025279533300923675,
"grad_norm": 0.156415656208992,
"learning_rate": 0.0002947813365416023,
"loss": 0.0802,
"step": 26
},
{
"epoch": 0.026251823043266894,
"grad_norm": 0.1979251652956009,
"learning_rate": 0.0002941130514205272,
"loss": 0.1002,
"step": 27
},
{
"epoch": 0.027224112785610113,
"grad_norm": 0.153579980134964,
"learning_rate": 0.0002934053672301536,
"loss": 0.0814,
"step": 28
},
{
"epoch": 0.02819640252795333,
"grad_norm": 0.2506410479545593,
"learning_rate": 0.00029265847744427303,
"loss": 0.0807,
"step": 29
},
{
"epoch": 0.02916869227029655,
"grad_norm": 0.1667831689119339,
"learning_rate": 0.00029187258625509513,
"loss": 0.0692,
"step": 30
},
{
"epoch": 0.030140982012639768,
"grad_norm": 0.1767091304063797,
"learning_rate": 0.00029104790851742417,
"loss": 0.0929,
"step": 31
},
{
"epoch": 0.031113271754982984,
"grad_norm": 0.1997700184583664,
"learning_rate": 0.0002901846696899191,
"loss": 0.1169,
"step": 32
},
{
"epoch": 0.03208556149732621,
"grad_norm": 0.4836466610431671,
"learning_rate": 0.00028928310577345606,
"loss": 0.1228,
"step": 33
},
{
"epoch": 0.03305785123966942,
"grad_norm": 0.20822934806346893,
"learning_rate": 0.0002883434632466077,
"loss": 0.1195,
"step": 34
},
{
"epoch": 0.03403014098201264,
"grad_norm": 0.2419443577528,
"learning_rate": 0.00028736599899825856,
"loss": 0.1413,
"step": 35
},
{
"epoch": 0.03500243072435586,
"grad_norm": 0.3413419723510742,
"learning_rate": 0.00028635098025737434,
"loss": 0.1595,
"step": 36
},
{
"epoch": 0.03597472046669908,
"grad_norm": 0.30608636140823364,
"learning_rate": 0.00028529868451994384,
"loss": 0.1827,
"step": 37
},
{
"epoch": 0.03694701020904229,
"grad_norm": 0.30667898058891296,
"learning_rate": 0.0002842093994731145,
"loss": 0.161,
"step": 38
},
{
"epoch": 0.037919299951385516,
"grad_norm": 0.3554874360561371,
"learning_rate": 0.00028308342291654174,
"loss": 0.1801,
"step": 39
},
{
"epoch": 0.03889158969372873,
"grad_norm": 0.3086106777191162,
"learning_rate": 0.00028192106268097334,
"loss": 0.1888,
"step": 40
},
{
"epoch": 0.03986387943607195,
"grad_norm": 0.26156407594680786,
"learning_rate": 0.00028072263654409154,
"loss": 0.1606,
"step": 41
},
{
"epoch": 0.04083616917841517,
"grad_norm": 0.35756373405456543,
"learning_rate": 0.0002794884721436361,
"loss": 0.2412,
"step": 42
},
{
"epoch": 0.041808458920758386,
"grad_norm": 0.3686249852180481,
"learning_rate": 0.00027821890688783083,
"loss": 0.2308,
"step": 43
},
{
"epoch": 0.0427807486631016,
"grad_norm": 0.4308541417121887,
"learning_rate": 0.0002769142878631403,
"loss": 0.2267,
"step": 44
},
{
"epoch": 0.043753038405444825,
"grad_norm": 0.5061919093132019,
"learning_rate": 0.00027557497173937923,
"loss": 0.3109,
"step": 45
},
{
"epoch": 0.04472532814778804,
"grad_norm": 0.43579161167144775,
"learning_rate": 0.000274201324672203,
"loss": 0.2456,
"step": 46
},
{
"epoch": 0.045697617890131256,
"grad_norm": 0.6933690905570984,
"learning_rate": 0.00027279372220300385,
"loss": 0.4574,
"step": 47
},
{
"epoch": 0.04666990763247448,
"grad_norm": 0.6277015209197998,
"learning_rate": 0.0002713525491562421,
"loss": 0.4584,
"step": 48
},
{
"epoch": 0.047642197374817695,
"grad_norm": 0.8218262195587158,
"learning_rate": 0.00026987819953423867,
"loss": 0.5391,
"step": 49
},
{
"epoch": 0.04861448711716091,
"grad_norm": 1.0224953889846802,
"learning_rate": 0.00026837107640945905,
"loss": 0.5288,
"step": 50
},
{
"epoch": 0.04861448711716091,
"eval_loss": 0.1336946040391922,
"eval_runtime": 4.0118,
"eval_samples_per_second": 12.463,
"eval_steps_per_second": 1.745,
"step": 50
},
{
"epoch": 0.049586776859504134,
"grad_norm": 1.1134988069534302,
"learning_rate": 0.0002668315918143169,
"loss": 0.114,
"step": 51
},
{
"epoch": 0.05055906660184735,
"grad_norm": 0.23551532626152039,
"learning_rate": 0.00026526016662852886,
"loss": 0.0835,
"step": 52
},
{
"epoch": 0.051531356344190565,
"grad_norm": 0.24570299685001373,
"learning_rate": 0.00026365723046405023,
"loss": 0.0685,
"step": 53
},
{
"epoch": 0.05250364608653379,
"grad_norm": 0.19383001327514648,
"learning_rate": 0.0002620232215476231,
"loss": 0.05,
"step": 54
},
{
"epoch": 0.053475935828877004,
"grad_norm": 0.23110246658325195,
"learning_rate": 0.0002603585866009697,
"loss": 0.0522,
"step": 55
},
{
"epoch": 0.05444822557122023,
"grad_norm": 0.18834188580513,
"learning_rate": 0.00025866378071866334,
"loss": 0.0732,
"step": 56
},
{
"epoch": 0.05542051531356344,
"grad_norm": 0.14947369694709778,
"learning_rate": 0.00025693926724370956,
"loss": 0.0514,
"step": 57
},
{
"epoch": 0.05639280505590666,
"grad_norm": 0.13261213898658752,
"learning_rate": 0.00025518551764087326,
"loss": 0.0529,
"step": 58
},
{
"epoch": 0.05736509479824988,
"grad_norm": 0.2039453238248825,
"learning_rate": 0.00025340301136778483,
"loss": 0.0555,
"step": 59
},
{
"epoch": 0.0583373845405931,
"grad_norm": 0.18386036157608032,
"learning_rate": 0.00025159223574386114,
"loss": 0.067,
"step": 60
},
{
"epoch": 0.05930967428293631,
"grad_norm": 0.11324752122163773,
"learning_rate": 0.0002497536858170772,
"loss": 0.0413,
"step": 61
},
{
"epoch": 0.060281964025279536,
"grad_norm": 0.11208714544773102,
"learning_rate": 0.00024788786422862526,
"loss": 0.0496,
"step": 62
},
{
"epoch": 0.06125425376762275,
"grad_norm": 0.09385313838720322,
"learning_rate": 0.00024599528107549745,
"loss": 0.047,
"step": 63
},
{
"epoch": 0.06222654350996597,
"grad_norm": 0.08256314694881439,
"learning_rate": 0.00024407645377103054,
"loss": 0.0368,
"step": 64
},
{
"epoch": 0.06319883325230918,
"grad_norm": 0.08847391605377197,
"learning_rate": 0.00024213190690345018,
"loss": 0.0375,
"step": 65
},
{
"epoch": 0.06417112299465241,
"grad_norm": 0.1306406408548355,
"learning_rate": 0.00024016217209245374,
"loss": 0.048,
"step": 66
},
{
"epoch": 0.06514341273699563,
"grad_norm": 0.11775556206703186,
"learning_rate": 0.00023816778784387094,
"loss": 0.042,
"step": 67
},
{
"epoch": 0.06611570247933884,
"grad_norm": 0.08764883130788803,
"learning_rate": 0.0002361492994024415,
"loss": 0.0364,
"step": 68
},
{
"epoch": 0.06708799222168206,
"grad_norm": 0.12429194152355194,
"learning_rate": 0.0002341072586027509,
"loss": 0.0485,
"step": 69
},
{
"epoch": 0.06806028196402528,
"grad_norm": 0.09290876984596252,
"learning_rate": 0.00023204222371836405,
"loss": 0.0435,
"step": 70
},
{
"epoch": 0.06903257170636849,
"grad_norm": 0.11098179966211319,
"learning_rate": 0.00022995475930919905,
"loss": 0.0558,
"step": 71
},
{
"epoch": 0.07000486144871172,
"grad_norm": 0.11447007209062576,
"learning_rate": 0.00022784543606718227,
"loss": 0.0694,
"step": 72
},
{
"epoch": 0.07097715119105494,
"grad_norm": 0.09141000360250473,
"learning_rate": 0.00022571483066022657,
"loss": 0.038,
"step": 73
},
{
"epoch": 0.07194944093339815,
"grad_norm": 0.109103262424469,
"learning_rate": 0.0002235635255745762,
"loss": 0.0489,
"step": 74
},
{
"epoch": 0.07292173067574137,
"grad_norm": 0.1455768644809723,
"learning_rate": 0.00022139210895556104,
"loss": 0.0818,
"step": 75
},
{
"epoch": 0.07292173067574137,
"eval_loss": 0.10055980086326599,
"eval_runtime": 3.9854,
"eval_samples_per_second": 12.546,
"eval_steps_per_second": 1.756,
"step": 75
},
{
"epoch": 0.07389402041808459,
"grad_norm": 0.15560583770275116,
"learning_rate": 0.00021920117444680317,
"loss": 0.094,
"step": 76
},
{
"epoch": 0.0748663101604278,
"grad_norm": 0.15243405103683472,
"learning_rate": 0.00021699132102792097,
"loss": 0.0811,
"step": 77
},
{
"epoch": 0.07583859990277103,
"grad_norm": 0.14389897882938385,
"learning_rate": 0.0002147631528507739,
"loss": 0.0955,
"step": 78
},
{
"epoch": 0.07681088964511425,
"grad_norm": 0.21825049817562103,
"learning_rate": 0.00021251727907429355,
"loss": 0.1112,
"step": 79
},
{
"epoch": 0.07778317938745746,
"grad_norm": 0.18719053268432617,
"learning_rate": 0.0002102543136979454,
"loss": 0.1203,
"step": 80
},
{
"epoch": 0.07875546912980068,
"grad_norm": 0.2221420407295227,
"learning_rate": 0.0002079748753938678,
"loss": 0.162,
"step": 81
},
{
"epoch": 0.0797277588721439,
"grad_norm": 0.17710459232330322,
"learning_rate": 0.0002056795873377331,
"loss": 0.0792,
"step": 82
},
{
"epoch": 0.08070004861448711,
"grad_norm": 0.2508675754070282,
"learning_rate": 0.00020336907703837748,
"loss": 0.1702,
"step": 83
},
{
"epoch": 0.08167233835683034,
"grad_norm": 0.34161412715911865,
"learning_rate": 0.00020104397616624645,
"loss": 0.1894,
"step": 84
},
{
"epoch": 0.08264462809917356,
"grad_norm": 0.22855708003044128,
"learning_rate": 0.00019870492038070252,
"loss": 0.1288,
"step": 85
},
{
"epoch": 0.08361691784151677,
"grad_norm": 0.24569354951381683,
"learning_rate": 0.0001963525491562421,
"loss": 0.1454,
"step": 86
},
{
"epoch": 0.08458920758385999,
"grad_norm": 0.2825170159339905,
"learning_rate": 0.0001939875056076697,
"loss": 0.1396,
"step": 87
},
{
"epoch": 0.0855614973262032,
"grad_norm": 0.3594852685928345,
"learning_rate": 0.00019161043631427666,
"loss": 0.1615,
"step": 88
},
{
"epoch": 0.08653378706854643,
"grad_norm": 0.35308271646499634,
"learning_rate": 0.00018922199114307294,
"loss": 0.2612,
"step": 89
},
{
"epoch": 0.08750607681088965,
"grad_norm": 0.36867859959602356,
"learning_rate": 0.00018682282307111987,
"loss": 0.2518,
"step": 90
},
{
"epoch": 0.08847836655323287,
"grad_norm": 0.3401321470737457,
"learning_rate": 0.00018441358800701273,
"loss": 0.2185,
"step": 91
},
{
"epoch": 0.08945065629557608,
"grad_norm": 0.3880937397480011,
"learning_rate": 0.00018199494461156203,
"loss": 0.2246,
"step": 92
},
{
"epoch": 0.0904229460379193,
"grad_norm": 0.3770473897457123,
"learning_rate": 0.000179567554117722,
"loss": 0.1804,
"step": 93
},
{
"epoch": 0.09139523578026251,
"grad_norm": 0.31104758381843567,
"learning_rate": 0.00017713208014981648,
"loss": 0.1733,
"step": 94
},
{
"epoch": 0.09236752552260574,
"grad_norm": 0.4791819453239441,
"learning_rate": 0.00017468918854211007,
"loss": 0.2412,
"step": 95
},
{
"epoch": 0.09333981526494896,
"grad_norm": 0.45134127140045166,
"learning_rate": 0.00017223954715677627,
"loss": 0.2741,
"step": 96
},
{
"epoch": 0.09431210500729217,
"grad_norm": 0.5456284880638123,
"learning_rate": 0.00016978382570131034,
"loss": 0.4073,
"step": 97
},
{
"epoch": 0.09528439474963539,
"grad_norm": 0.6170369982719421,
"learning_rate": 0.00016732269554543794,
"loss": 0.3737,
"step": 98
},
{
"epoch": 0.0962566844919786,
"grad_norm": 0.6821739673614502,
"learning_rate": 0.00016485682953756942,
"loss": 0.4857,
"step": 99
},
{
"epoch": 0.09722897423432182,
"grad_norm": 0.7691540122032166,
"learning_rate": 0.00016238690182084986,
"loss": 0.4505,
"step": 100
},
{
"epoch": 0.09722897423432182,
"eval_loss": 0.09960367530584335,
"eval_runtime": 3.9183,
"eval_samples_per_second": 12.76,
"eval_steps_per_second": 1.786,
"step": 100
},
{
"epoch": 0.09820126397666505,
"grad_norm": 0.1519579142332077,
"learning_rate": 0.0001599135876488549,
"loss": 0.06,
"step": 101
},
{
"epoch": 0.09917355371900827,
"grad_norm": 0.1643187403678894,
"learning_rate": 0.00015743756320098332,
"loss": 0.0632,
"step": 102
},
{
"epoch": 0.10014584346135148,
"grad_norm": 0.11599410325288773,
"learning_rate": 0.0001549595053975962,
"loss": 0.0648,
"step": 103
},
{
"epoch": 0.1011181332036947,
"grad_norm": 0.12460173666477203,
"learning_rate": 0.00015248009171495378,
"loss": 0.0614,
"step": 104
},
{
"epoch": 0.10209042294603791,
"grad_norm": 0.07840460538864136,
"learning_rate": 0.00015,
"loss": 0.0461,
"step": 105
},
{
"epoch": 0.10306271268838113,
"grad_norm": 0.09137758612632751,
"learning_rate": 0.00014751990828504622,
"loss": 0.047,
"step": 106
},
{
"epoch": 0.10403500243072436,
"grad_norm": 0.08994504064321518,
"learning_rate": 0.00014504049460240375,
"loss": 0.0392,
"step": 107
},
{
"epoch": 0.10500729217306758,
"grad_norm": 0.0770004540681839,
"learning_rate": 0.00014256243679901663,
"loss": 0.0453,
"step": 108
},
{
"epoch": 0.10597958191541079,
"grad_norm": 0.08238277584314346,
"learning_rate": 0.00014008641235114508,
"loss": 0.0397,
"step": 109
},
{
"epoch": 0.10695187165775401,
"grad_norm": 0.12823158502578735,
"learning_rate": 0.00013761309817915014,
"loss": 0.0375,
"step": 110
},
{
"epoch": 0.10792416140009722,
"grad_norm": 0.07921775430440903,
"learning_rate": 0.00013514317046243058,
"loss": 0.0399,
"step": 111
},
{
"epoch": 0.10889645114244045,
"grad_norm": 0.09790435433387756,
"learning_rate": 0.00013267730445456208,
"loss": 0.0307,
"step": 112
},
{
"epoch": 0.10986874088478367,
"grad_norm": 0.09248471260070801,
"learning_rate": 0.00013021617429868963,
"loss": 0.0403,
"step": 113
},
{
"epoch": 0.11084103062712689,
"grad_norm": 0.09994786232709885,
"learning_rate": 0.00012776045284322368,
"loss": 0.0428,
"step": 114
},
{
"epoch": 0.1118133203694701,
"grad_norm": 0.08010675758123398,
"learning_rate": 0.00012531081145788987,
"loss": 0.0354,
"step": 115
},
{
"epoch": 0.11278561011181332,
"grad_norm": 0.11402561515569687,
"learning_rate": 0.00012286791985018355,
"loss": 0.059,
"step": 116
},
{
"epoch": 0.11375789985415653,
"grad_norm": 0.08841165900230408,
"learning_rate": 0.00012043244588227796,
"loss": 0.0342,
"step": 117
},
{
"epoch": 0.11473018959649976,
"grad_norm": 0.08822940289974213,
"learning_rate": 0.00011800505538843798,
"loss": 0.0445,
"step": 118
},
{
"epoch": 0.11570247933884298,
"grad_norm": 0.1175551563501358,
"learning_rate": 0.00011558641199298727,
"loss": 0.0471,
"step": 119
},
{
"epoch": 0.1166747690811862,
"grad_norm": 0.15154576301574707,
"learning_rate": 0.00011317717692888012,
"loss": 0.079,
"step": 120
},
{
"epoch": 0.11764705882352941,
"grad_norm": 0.10886827856302261,
"learning_rate": 0.00011077800885692702,
"loss": 0.0548,
"step": 121
},
{
"epoch": 0.11861934856587263,
"grad_norm": 0.1060369536280632,
"learning_rate": 0.00010838956368572334,
"loss": 0.042,
"step": 122
},
{
"epoch": 0.11959163830821584,
"grad_norm": 0.11565347760915756,
"learning_rate": 0.0001060124943923303,
"loss": 0.0596,
"step": 123
},
{
"epoch": 0.12056392805055907,
"grad_norm": 0.11722910404205322,
"learning_rate": 0.0001036474508437579,
"loss": 0.0562,
"step": 124
},
{
"epoch": 0.12153621779290229,
"grad_norm": 0.12110079824924469,
"learning_rate": 0.00010129507961929748,
"loss": 0.0742,
"step": 125
},
{
"epoch": 0.12153621779290229,
"eval_loss": 0.09679846465587616,
"eval_runtime": 4.0215,
"eval_samples_per_second": 12.433,
"eval_steps_per_second": 1.741,
"step": 125
},
{
"epoch": 0.1225085075352455,
"grad_norm": 0.0917026549577713,
"learning_rate": 9.895602383375353e-05,
"loss": 0.0453,
"step": 126
},
{
"epoch": 0.12348079727758872,
"grad_norm": 0.18777845799922943,
"learning_rate": 9.663092296162251e-05,
"loss": 0.0661,
"step": 127
},
{
"epoch": 0.12445308701993194,
"grad_norm": 0.10484489798545837,
"learning_rate": 9.432041266226686e-05,
"loss": 0.0516,
"step": 128
},
{
"epoch": 0.12542537676227516,
"grad_norm": 0.14925585687160492,
"learning_rate": 9.202512460613219e-05,
"loss": 0.0574,
"step": 129
},
{
"epoch": 0.12639766650461837,
"grad_norm": 0.12964065372943878,
"learning_rate": 8.97456863020546e-05,
"loss": 0.084,
"step": 130
},
{
"epoch": 0.1273699562469616,
"grad_norm": 0.14066071808338165,
"learning_rate": 8.748272092570646e-05,
"loss": 0.0826,
"step": 131
},
{
"epoch": 0.12834224598930483,
"grad_norm": 0.17143459618091583,
"learning_rate": 8.523684714922608e-05,
"loss": 0.1232,
"step": 132
},
{
"epoch": 0.12931453573164803,
"grad_norm": 0.14534994959831238,
"learning_rate": 8.300867897207903e-05,
"loss": 0.0783,
"step": 133
},
{
"epoch": 0.13028682547399126,
"grad_norm": 0.17438603937625885,
"learning_rate": 8.079882555319684e-05,
"loss": 0.1032,
"step": 134
},
{
"epoch": 0.13125911521633446,
"grad_norm": 0.21413007378578186,
"learning_rate": 7.860789104443896e-05,
"loss": 0.1421,
"step": 135
},
{
"epoch": 0.1322314049586777,
"grad_norm": 0.20025412738323212,
"learning_rate": 7.643647442542382e-05,
"loss": 0.1545,
"step": 136
},
{
"epoch": 0.1332036947010209,
"grad_norm": 0.22498586773872375,
"learning_rate": 7.428516933977347e-05,
"loss": 0.1427,
"step": 137
},
{
"epoch": 0.13417598444336412,
"grad_norm": 0.22663052380084991,
"learning_rate": 7.215456393281776e-05,
"loss": 0.1396,
"step": 138
},
{
"epoch": 0.13514827418570735,
"grad_norm": 0.23404861986637115,
"learning_rate": 7.004524069080096e-05,
"loss": 0.1501,
"step": 139
},
{
"epoch": 0.13612056392805055,
"grad_norm": 0.3112930953502655,
"learning_rate": 6.795777628163599e-05,
"loss": 0.1979,
"step": 140
},
{
"epoch": 0.13709285367039378,
"grad_norm": 0.38309967517852783,
"learning_rate": 6.58927413972491e-05,
"loss": 0.2962,
"step": 141
},
{
"epoch": 0.13806514341273698,
"grad_norm": 0.3294512927532196,
"learning_rate": 6.385070059755846e-05,
"loss": 0.1978,
"step": 142
},
{
"epoch": 0.13903743315508021,
"grad_norm": 0.3819890320301056,
"learning_rate": 6.183221215612904e-05,
"loss": 0.2528,
"step": 143
},
{
"epoch": 0.14000972289742344,
"grad_norm": 0.4122176170349121,
"learning_rate": 5.983782790754623e-05,
"loss": 0.2841,
"step": 144
},
{
"epoch": 0.14098201263976665,
"grad_norm": 0.4129266142845154,
"learning_rate": 5.786809309654982e-05,
"loss": 0.2905,
"step": 145
},
{
"epoch": 0.14195430238210988,
"grad_norm": 0.47997596859931946,
"learning_rate": 5.592354622896944e-05,
"loss": 0.3009,
"step": 146
},
{
"epoch": 0.14292659212445308,
"grad_norm": 0.5365499258041382,
"learning_rate": 5.40047189245025e-05,
"loss": 0.3335,
"step": 147
},
{
"epoch": 0.1438988818667963,
"grad_norm": 0.5594547390937805,
"learning_rate": 5.211213577137469e-05,
"loss": 0.3667,
"step": 148
},
{
"epoch": 0.14487117160913954,
"grad_norm": 0.778071939945221,
"learning_rate": 5.024631418292274e-05,
"loss": 0.5081,
"step": 149
},
{
"epoch": 0.14584346135148274,
"grad_norm": 0.9648370146751404,
"learning_rate": 4.840776425613886e-05,
"loss": 0.533,
"step": 150
},
{
"epoch": 0.14584346135148274,
"eval_loss": 0.09180089831352234,
"eval_runtime": 3.7999,
"eval_samples_per_second": 13.158,
"eval_steps_per_second": 1.842,
"step": 150
}
],
"logging_steps": 1,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 50,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 1,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 8.748837118358323e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}