|
{ |
|
"best_metric": 0.01876358687877655, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-100", |
|
"epoch": 0.07654586771917235, |
|
"eval_steps": 25, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0007654586771917235, |
|
"grad_norm": 15.716177940368652, |
|
"learning_rate": 2.5e-05, |
|
"loss": 0.6301, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0007654586771917235, |
|
"eval_loss": 0.9535714983940125, |
|
"eval_runtime": 1.8763, |
|
"eval_samples_per_second": 26.648, |
|
"eval_steps_per_second": 6.929, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.001530917354383447, |
|
"grad_norm": 28.1146240234375, |
|
"learning_rate": 5e-05, |
|
"loss": 1.6204, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0022963760315751706, |
|
"grad_norm": 3.1468186378479004, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 0.4088, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.003061834708766894, |
|
"grad_norm": 2.8593838214874268, |
|
"learning_rate": 0.0001, |
|
"loss": 0.3243, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0038272933859586173, |
|
"grad_norm": 2.0387535095214844, |
|
"learning_rate": 9.997590643643647e-05, |
|
"loss": 0.1949, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.004592752063150341, |
|
"grad_norm": 2.075502395629883, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 0.1607, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.005358210740342064, |
|
"grad_norm": 2.105217695236206, |
|
"learning_rate": 9.978331270024886e-05, |
|
"loss": 0.108, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.006123669417533788, |
|
"grad_norm": 1.4741127490997314, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 0.0612, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.006889128094725512, |
|
"grad_norm": 3.5471949577331543, |
|
"learning_rate": 9.939894994381957e-05, |
|
"loss": 0.0807, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.007654586771917235, |
|
"grad_norm": 2.4321608543395996, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 0.0468, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.008420045449108958, |
|
"grad_norm": 2.2056820392608643, |
|
"learning_rate": 9.882446406748002e-05, |
|
"loss": 0.0375, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.009185504126300682, |
|
"grad_norm": 1.6821849346160889, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 0.0243, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.009950962803492405, |
|
"grad_norm": 1.2000023126602173, |
|
"learning_rate": 9.80623151079494e-05, |
|
"loss": 0.3485, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.010716421480684128, |
|
"grad_norm": 1.9993274211883545, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 0.4763, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.011481880157875853, |
|
"grad_norm": 3.4421091079711914, |
|
"learning_rate": 9.711576670407965e-05, |
|
"loss": 0.8174, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.012247338835067576, |
|
"grad_norm": 0.3677566945552826, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 0.0088, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.013012797512259299, |
|
"grad_norm": 0.11690377444028854, |
|
"learning_rate": 9.598887212145291e-05, |
|
"loss": 0.0054, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.013778256189451023, |
|
"grad_norm": 0.09429043531417847, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 0.0047, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.014543714866642746, |
|
"grad_norm": 0.36489200592041016, |
|
"learning_rate": 9.468645689567598e-05, |
|
"loss": 0.0057, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.01530917354383447, |
|
"grad_norm": 0.42153462767601013, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 0.0062, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.016074632221026192, |
|
"grad_norm": 0.28647613525390625, |
|
"learning_rate": 9.321409816869605e-05, |
|
"loss": 0.0051, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.016840090898217915, |
|
"grad_norm": 0.0691399872303009, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 0.0021, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.01760554957540964, |
|
"grad_norm": 0.08754007518291473, |
|
"learning_rate": 9.157810080662269e-05, |
|
"loss": 0.0027, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.018371008252601365, |
|
"grad_norm": 0.15579411387443542, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 0.0033, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.019136466929793088, |
|
"grad_norm": 0.08465871959924698, |
|
"learning_rate": 8.978547040132317e-05, |
|
"loss": 0.0018, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.019136466929793088, |
|
"eval_loss": 0.08919362723827362, |
|
"eval_runtime": 1.0392, |
|
"eval_samples_per_second": 48.113, |
|
"eval_steps_per_second": 12.509, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.01990192560698481, |
|
"grad_norm": 0.8272204995155334, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 0.2644, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.020667384284176533, |
|
"grad_norm": 0.919032096862793, |
|
"learning_rate": 8.78438832714026e-05, |
|
"loss": 0.564, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.021432842961368256, |
|
"grad_norm": 1.0027246475219727, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 0.1129, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.02219830163855998, |
|
"grad_norm": 0.8935683369636536, |
|
"learning_rate": 8.57616535910292e-05, |
|
"loss": 0.017, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.022963760315751706, |
|
"grad_norm": 0.1873510479927063, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 0.0033, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.02372921899294343, |
|
"grad_norm": 0.2691366970539093, |
|
"learning_rate": 8.354769778736406e-05, |
|
"loss": 0.0028, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.02449467767013515, |
|
"grad_norm": 0.675572395324707, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 0.0078, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.025260136347326875, |
|
"grad_norm": 0.48143887519836426, |
|
"learning_rate": 8.12114963590511e-05, |
|
"loss": 0.0062, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.026025595024518598, |
|
"grad_norm": 0.1117522120475769, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 0.0016, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.02679105370171032, |
|
"grad_norm": 0.5607677102088928, |
|
"learning_rate": 7.876305327926657e-05, |
|
"loss": 0.0062, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.027556512378902047, |
|
"grad_norm": 0.13989481329917908, |
|
"learning_rate": 7.75e-05, |
|
"loss": 0.0013, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.02832197105609377, |
|
"grad_norm": 0.00945358444005251, |
|
"learning_rate": 7.62128531571699e-05, |
|
"loss": 0.0003, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.029087429733285493, |
|
"grad_norm": 0.3890470862388611, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 0.2334, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.029852888410477216, |
|
"grad_norm": 0.9241925477981567, |
|
"learning_rate": 7.357181634119777e-05, |
|
"loss": 0.2837, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.03061834708766894, |
|
"grad_norm": 0.7805806398391724, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 0.1911, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.03138380576486066, |
|
"grad_norm": 0.13290484249591827, |
|
"learning_rate": 7.085125215645552e-05, |
|
"loss": 0.0025, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.032149264442052385, |
|
"grad_norm": 0.6546358466148376, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 0.0129, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.03291472311924411, |
|
"grad_norm": 2.0483906269073486, |
|
"learning_rate": 6.80628104764508e-05, |
|
"loss": 0.0307, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.03368018179643583, |
|
"grad_norm": 1.0244156122207642, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 0.0139, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.03444564047362755, |
|
"grad_norm": 0.13140460848808289, |
|
"learning_rate": 6.52184318365468e-05, |
|
"loss": 0.0022, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.03521109915081928, |
|
"grad_norm": 0.0883515477180481, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 0.0016, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.035976557828011006, |
|
"grad_norm": 0.04970284551382065, |
|
"learning_rate": 6.23302963027565e-05, |
|
"loss": 0.0009, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.03674201650520273, |
|
"grad_norm": 0.0229946281760931, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 0.0005, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.03750747518239445, |
|
"grad_norm": 0.010964008048176765, |
|
"learning_rate": 5.941077131483025e-05, |
|
"loss": 0.0003, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.038272933859586175, |
|
"grad_norm": 0.009133069775998592, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 0.0003, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.038272933859586175, |
|
"eval_loss": 0.037851009517908096, |
|
"eval_runtime": 0.9779, |
|
"eval_samples_per_second": 51.128, |
|
"eval_steps_per_second": 13.293, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0390383925367779, |
|
"grad_norm": 0.3454943597316742, |
|
"learning_rate": 5.6472358726979935e-05, |
|
"loss": 0.2051, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.03980385121396962, |
|
"grad_norm": 1.5023900270462036, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 0.3154, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.040569309891161344, |
|
"grad_norm": 0.16861777007579803, |
|
"learning_rate": 5.352764127302008e-05, |
|
"loss": 0.024, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.04133476856835307, |
|
"grad_norm": 0.01335059478878975, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 0.0005, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.04210022724554479, |
|
"grad_norm": 0.01417592540383339, |
|
"learning_rate": 5.058922868516978e-05, |
|
"loss": 0.0006, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.04286568592273651, |
|
"grad_norm": 0.010599881410598755, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 0.0004, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.043631144599928236, |
|
"grad_norm": 0.010219581425189972, |
|
"learning_rate": 4.7669703697243516e-05, |
|
"loss": 0.0004, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.04439660327711996, |
|
"grad_norm": 0.008394899778068066, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 0.0004, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.04516206195431169, |
|
"grad_norm": 0.010442732833325863, |
|
"learning_rate": 4.478156816345321e-05, |
|
"loss": 0.0004, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.04592752063150341, |
|
"grad_norm": 0.01015259325504303, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 0.0004, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.046692979308695134, |
|
"grad_norm": 0.013029181398451328, |
|
"learning_rate": 4.19371895235492e-05, |
|
"loss": 0.0003, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.04745843798588686, |
|
"grad_norm": 0.0073064109310507774, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 0.0003, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.04822389666307858, |
|
"grad_norm": 0.2842812240123749, |
|
"learning_rate": 3.9148747843544495e-05, |
|
"loss": 0.1657, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.0489893553402703, |
|
"grad_norm": 0.26676300168037415, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 0.1517, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.049754814017462026, |
|
"grad_norm": 0.5113205909729004, |
|
"learning_rate": 3.642818365880224e-05, |
|
"loss": 0.1416, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.05052027269465375, |
|
"grad_norm": 0.19968412816524506, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 0.0041, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.05128573137184547, |
|
"grad_norm": 0.08529309928417206, |
|
"learning_rate": 3.378714684283011e-05, |
|
"loss": 0.0021, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.052051190049037195, |
|
"grad_norm": 0.03840894624590874, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 0.0011, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.05281664872622892, |
|
"grad_norm": 0.02306171879172325, |
|
"learning_rate": 3.123694672073344e-05, |
|
"loss": 0.0006, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.05358210740342064, |
|
"grad_norm": 0.012097193859517574, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 0.0004, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.054347566080612364, |
|
"grad_norm": 0.010973786003887653, |
|
"learning_rate": 2.8788503640948912e-05, |
|
"loss": 0.0004, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.055113024757804094, |
|
"grad_norm": 0.01180847268551588, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 0.0004, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.05587848343499582, |
|
"grad_norm": 0.010331504046916962, |
|
"learning_rate": 2.645230221263596e-05, |
|
"loss": 0.0004, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.05664394211218754, |
|
"grad_norm": 0.012231193482875824, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 0.0004, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.05740940078937926, |
|
"grad_norm": 0.009561908431351185, |
|
"learning_rate": 2.423834640897079e-05, |
|
"loss": 0.0003, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.05740940078937926, |
|
"eval_loss": 0.021306684240698814, |
|
"eval_runtime": 1.043, |
|
"eval_samples_per_second": 47.938, |
|
"eval_steps_per_second": 12.464, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.058174859466570986, |
|
"grad_norm": 0.18385003507137299, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 0.17, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.05894031814376271, |
|
"grad_norm": 0.40027400851249695, |
|
"learning_rate": 2.215611672859741e-05, |
|
"loss": 0.1423, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.05970577682095443, |
|
"grad_norm": 0.142598494887352, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 0.0031, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.060471235498146154, |
|
"grad_norm": 0.06515596807003021, |
|
"learning_rate": 2.0214529598676836e-05, |
|
"loss": 0.0014, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.06123669417533788, |
|
"grad_norm": 0.05153204873204231, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 0.0014, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.0620021528525296, |
|
"grad_norm": 0.014992534182965755, |
|
"learning_rate": 1.842189919337732e-05, |
|
"loss": 0.0005, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.06276761152972132, |
|
"grad_norm": 0.013885805383324623, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 0.0004, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.06353307020691305, |
|
"grad_norm": 0.010251723229885101, |
|
"learning_rate": 1.6785901831303956e-05, |
|
"loss": 0.0003, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.06429852888410477, |
|
"grad_norm": 0.0075831301510334015, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 0.0003, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.0650639875612965, |
|
"grad_norm": 0.0066162901930511, |
|
"learning_rate": 1.531354310432403e-05, |
|
"loss": 0.0003, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.06582944623848822, |
|
"grad_norm": 0.006637216545641422, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 0.0002, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.06659490491567994, |
|
"grad_norm": 0.011600018478929996, |
|
"learning_rate": 1.4011127878547087e-05, |
|
"loss": 0.0003, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 0.06736036359287166, |
|
"grad_norm": 0.14439114928245544, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 0.1791, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 0.06812582227006339, |
|
"grad_norm": 0.1987999677658081, |
|
"learning_rate": 1.2884233295920353e-05, |
|
"loss": 0.1352, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 0.0688912809472551, |
|
"grad_norm": 0.19108176231384277, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 0.066, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 0.06965673962444684, |
|
"grad_norm": 0.07157070934772491, |
|
"learning_rate": 1.1937684892050604e-05, |
|
"loss": 0.0013, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 0.07042219830163857, |
|
"grad_norm": 0.04157419875264168, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 0.0009, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 0.07118765697883028, |
|
"grad_norm": 0.02740364894270897, |
|
"learning_rate": 1.1175535932519987e-05, |
|
"loss": 0.0007, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 0.07195311565602201, |
|
"grad_norm": 0.013225646689534187, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 0.0004, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 0.07271857433321373, |
|
"grad_norm": 0.00674827815964818, |
|
"learning_rate": 1.0601050056180447e-05, |
|
"loss": 0.0002, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 0.07348403301040546, |
|
"grad_norm": 0.005964465904980898, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 0.0002, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 0.07424949168759717, |
|
"grad_norm": 0.005700402893126011, |
|
"learning_rate": 1.0216687299751144e-05, |
|
"loss": 0.0002, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 0.0750149503647889, |
|
"grad_norm": 0.005580263212323189, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 0.0002, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 0.07578040904198062, |
|
"grad_norm": 0.009917444549500942, |
|
"learning_rate": 1.0024093563563546e-05, |
|
"loss": 0.0002, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 0.07654586771917235, |
|
"grad_norm": 0.0060340710915625095, |
|
"learning_rate": 1e-05, |
|
"loss": 0.0002, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.07654586771917235, |
|
"eval_loss": 0.01876358687877655, |
|
"eval_runtime": 1.0439, |
|
"eval_samples_per_second": 47.897, |
|
"eval_steps_per_second": 12.453, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.965949609128755e+16, |
|
"train_batch_size": 1, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|