|
{ |
|
"best_metric": 1.4987461566925049, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-100", |
|
"epoch": 1.1424501424501425, |
|
"eval_steps": 25, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.011396011396011397, |
|
"grad_norm": 1.275281310081482, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 2.4206, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.011396011396011397, |
|
"eval_loss": 3.1777703762054443, |
|
"eval_runtime": 2.6727, |
|
"eval_samples_per_second": 18.707, |
|
"eval_steps_per_second": 2.619, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.022792022792022793, |
|
"grad_norm": 1.8708691596984863, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 2.5791, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.03418803418803419, |
|
"grad_norm": 2.057396411895752, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 2.6549, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.045584045584045586, |
|
"grad_norm": 2.621896505355835, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 2.8508, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.05698005698005698, |
|
"grad_norm": 2.667181968688965, |
|
"learning_rate": 0.00015, |
|
"loss": 2.6154, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.06837606837606838, |
|
"grad_norm": 2.1751344203948975, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 2.5356, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.07977207977207977, |
|
"grad_norm": 1.9212430715560913, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 2.646, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.09116809116809117, |
|
"grad_norm": 1.482764720916748, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 2.2598, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.10256410256410256, |
|
"grad_norm": 1.449442744255066, |
|
"learning_rate": 0.00027, |
|
"loss": 2.1986, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.11396011396011396, |
|
"grad_norm": 1.9114185571670532, |
|
"learning_rate": 0.0003, |
|
"loss": 2.2509, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.12535612535612536, |
|
"grad_norm": 1.7590088844299316, |
|
"learning_rate": 0.0002999731384004606, |
|
"loss": 2.2411, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.13675213675213677, |
|
"grad_norm": 1.9783275127410889, |
|
"learning_rate": 0.0002998925632224497, |
|
"loss": 2.2038, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.14814814814814814, |
|
"grad_norm": 2.0008552074432373, |
|
"learning_rate": 0.00029975830332434265, |
|
"loss": 1.913, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.15954415954415954, |
|
"grad_norm": 2.6259801387786865, |
|
"learning_rate": 0.00029957040679194776, |
|
"loss": 2.0676, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.17094017094017094, |
|
"grad_norm": 1.8901782035827637, |
|
"learning_rate": 0.00029932894092128383, |
|
"loss": 1.7778, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.18233618233618235, |
|
"grad_norm": 1.8770512342453003, |
|
"learning_rate": 0.0002990339921944777, |
|
"loss": 1.7791, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.19373219373219372, |
|
"grad_norm": 2.1568357944488525, |
|
"learning_rate": 0.00029868566624879054, |
|
"loss": 2.251, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.20512820512820512, |
|
"grad_norm": 2.0423424243927, |
|
"learning_rate": 0.00029828408783878324, |
|
"loss": 1.6777, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.21652421652421652, |
|
"grad_norm": 2.2600088119506836, |
|
"learning_rate": 0.00029782940079163485, |
|
"loss": 1.3782, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.22792022792022792, |
|
"grad_norm": 6.212785243988037, |
|
"learning_rate": 0.00029732176795563037, |
|
"loss": 1.7217, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.23931623931623933, |
|
"grad_norm": 4.51749324798584, |
|
"learning_rate": 0.0002967613711418359, |
|
"loss": 2.2409, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.25071225071225073, |
|
"grad_norm": 8.102594375610352, |
|
"learning_rate": 0.000296148411058982, |
|
"loss": 2.9289, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.2621082621082621, |
|
"grad_norm": 4.413826942443848, |
|
"learning_rate": 0.00029548310724157904, |
|
"loss": 2.52, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.27350427350427353, |
|
"grad_norm": 3.2592685222625732, |
|
"learning_rate": 0.0002947656979712899, |
|
"loss": 2.3453, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.2849002849002849, |
|
"grad_norm": 2.090911626815796, |
|
"learning_rate": 0.0002939964401915884, |
|
"loss": 2.3068, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.2849002849002849, |
|
"eval_loss": 2.0359325408935547, |
|
"eval_runtime": 1.6925, |
|
"eval_samples_per_second": 29.542, |
|
"eval_steps_per_second": 4.136, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.2962962962962963, |
|
"grad_norm": 1.3033101558685303, |
|
"learning_rate": 0.0002931756094157332, |
|
"loss": 2.0119, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.3076923076923077, |
|
"grad_norm": 1.3732004165649414, |
|
"learning_rate": 0.0002923034996280924, |
|
"loss": 1.9276, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.3190883190883191, |
|
"grad_norm": 1.2793781757354736, |
|
"learning_rate": 0.0002913804231788509, |
|
"loss": 2.0441, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.33048433048433046, |
|
"grad_norm": 1.2502461671829224, |
|
"learning_rate": 0.00029040671067214087, |
|
"loss": 1.7199, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.3418803418803419, |
|
"grad_norm": 1.2199724912643433, |
|
"learning_rate": 0.0002893827108476348, |
|
"loss": 1.7756, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.35327635327635326, |
|
"grad_norm": 1.1835788488388062, |
|
"learning_rate": 0.000288308790455642, |
|
"loss": 1.8096, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.3646723646723647, |
|
"grad_norm": 1.323184609413147, |
|
"learning_rate": 0.00028718533412575606, |
|
"loss": 1.6928, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.37606837606837606, |
|
"grad_norm": 1.272740364074707, |
|
"learning_rate": 0.00028601274422909733, |
|
"loss": 1.6588, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.38746438746438744, |
|
"grad_norm": 1.4608358144760132, |
|
"learning_rate": 0.00028479144073420234, |
|
"loss": 1.6605, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.39886039886039887, |
|
"grad_norm": 1.4448308944702148, |
|
"learning_rate": 0.0002835218610566095, |
|
"loss": 1.823, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.41025641025641024, |
|
"grad_norm": 1.501624584197998, |
|
"learning_rate": 0.0002822044599021973, |
|
"loss": 1.5703, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.42165242165242167, |
|
"grad_norm": 1.6337703466415405, |
|
"learning_rate": 0.0002808397091043291, |
|
"loss": 1.507, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.43304843304843305, |
|
"grad_norm": 1.7467942237854004, |
|
"learning_rate": 0.00027942809745486343, |
|
"loss": 1.6569, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.4444444444444444, |
|
"grad_norm": 1.4048912525177002, |
|
"learning_rate": 0.0002779701305290915, |
|
"loss": 1.1612, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.45584045584045585, |
|
"grad_norm": 1.558631420135498, |
|
"learning_rate": 0.00027646633050466265, |
|
"loss": 1.2638, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.4672364672364672, |
|
"grad_norm": 2.038233995437622, |
|
"learning_rate": 0.0002749172359745641, |
|
"loss": 1.366, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.47863247863247865, |
|
"grad_norm": 4.787012100219727, |
|
"learning_rate": 0.0002733234017542215, |
|
"loss": 2.2797, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.49002849002849, |
|
"grad_norm": 4.053765296936035, |
|
"learning_rate": 0.0002716853986827888, |
|
"loss": 2.4131, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.5014245014245015, |
|
"grad_norm": 2.653860569000244, |
|
"learning_rate": 0.0002700038134187002, |
|
"loss": 2.3959, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.5128205128205128, |
|
"grad_norm": 2.266941785812378, |
|
"learning_rate": 0.00026827924822955487, |
|
"loss": 2.1461, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.5242165242165242, |
|
"grad_norm": 1.615462303161621, |
|
"learning_rate": 0.0002665123207764128, |
|
"loss": 1.9849, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.5356125356125356, |
|
"grad_norm": 1.3153661489486694, |
|
"learning_rate": 0.00026470366389257614, |
|
"loss": 1.7058, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.5470085470085471, |
|
"grad_norm": 1.0891388654708862, |
|
"learning_rate": 0.0002628539253569372, |
|
"loss": 1.9861, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.5584045584045584, |
|
"grad_norm": 1.2085473537445068, |
|
"learning_rate": 0.00026096376766197307, |
|
"loss": 1.9597, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.5698005698005698, |
|
"grad_norm": 1.1210774183273315, |
|
"learning_rate": 0.00025903386777647154, |
|
"loss": 1.8633, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.5698005698005698, |
|
"eval_loss": 1.7979708909988403, |
|
"eval_runtime": 1.6988, |
|
"eval_samples_per_second": 29.433, |
|
"eval_steps_per_second": 4.121, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.5811965811965812, |
|
"grad_norm": 1.1083546876907349, |
|
"learning_rate": 0.0002570649169030708, |
|
"loss": 1.9054, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.5925925925925926, |
|
"grad_norm": 1.1118803024291992, |
|
"learning_rate": 0.0002550576202307026, |
|
"loss": 1.5566, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.603988603988604, |
|
"grad_norm": 1.1292729377746582, |
|
"learning_rate": 0.00025301269668202516, |
|
"loss": 1.5418, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.6153846153846154, |
|
"grad_norm": 1.0905343294143677, |
|
"learning_rate": 0.0002509308786559378, |
|
"loss": 1.567, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.6267806267806267, |
|
"grad_norm": 1.1679775714874268, |
|
"learning_rate": 0.00024881291176526903, |
|
"loss": 1.478, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.6381766381766382, |
|
"grad_norm": 1.0859359502792358, |
|
"learning_rate": 0.00024665955456973154, |
|
"loss": 1.3628, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.6495726495726496, |
|
"grad_norm": 1.2914154529571533, |
|
"learning_rate": 0.00024447157830424066, |
|
"loss": 1.4341, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.6609686609686609, |
|
"grad_norm": 4.928417205810547, |
|
"learning_rate": 0.00024224976660269302, |
|
"loss": 1.7669, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.6723646723646723, |
|
"grad_norm": 1.7731802463531494, |
|
"learning_rate": 0.0002399949152173043, |
|
"loss": 1.3646, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.6837606837606838, |
|
"grad_norm": 1.7798770666122437, |
|
"learning_rate": 0.00023770783173360704, |
|
"loss": 1.32, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.6951566951566952, |
|
"grad_norm": 1.3400565385818481, |
|
"learning_rate": 0.00023538933528120988, |
|
"loss": 1.0693, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.7065527065527065, |
|
"grad_norm": 1.7840746641159058, |
|
"learning_rate": 0.00023304025624042263, |
|
"loss": 1.2359, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.717948717948718, |
|
"grad_norm": 3.244560956954956, |
|
"learning_rate": 0.00023066143594485178, |
|
"loss": 2.1066, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.7293447293447294, |
|
"grad_norm": 1.5248583555221558, |
|
"learning_rate": 0.00022825372638007267, |
|
"loss": 2.2681, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.7407407407407407, |
|
"grad_norm": 1.579367995262146, |
|
"learning_rate": 0.0002258179898784871, |
|
"loss": 2.1084, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.7521367521367521, |
|
"grad_norm": 1.5203423500061035, |
|
"learning_rate": 0.00022335509881047497, |
|
"loss": 2.0729, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.7635327635327636, |
|
"grad_norm": 1.4482111930847168, |
|
"learning_rate": 0.00022086593527195062, |
|
"loss": 2.0057, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.7749287749287749, |
|
"grad_norm": 1.1762452125549316, |
|
"learning_rate": 0.00021835139076843623, |
|
"loss": 1.811, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.7863247863247863, |
|
"grad_norm": 0.9151526093482971, |
|
"learning_rate": 0.00021581236589576476, |
|
"loss": 1.5962, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.7977207977207977, |
|
"grad_norm": 1.0164101123809814, |
|
"learning_rate": 0.00021324977001752757, |
|
"loss": 1.5083, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.8091168091168092, |
|
"grad_norm": 0.9672901630401611, |
|
"learning_rate": 0.00021066452093938153, |
|
"loss": 1.5546, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.8205128205128205, |
|
"grad_norm": 1.0712361335754395, |
|
"learning_rate": 0.0002080575445803326, |
|
"loss": 1.5119, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.8319088319088319, |
|
"grad_norm": 1.0604687929153442, |
|
"learning_rate": 0.00020542977464111352, |
|
"loss": 1.6416, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.8433048433048433, |
|
"grad_norm": 1.0448590517044067, |
|
"learning_rate": 0.00020278215226977493, |
|
"loss": 1.5965, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.8547008547008547, |
|
"grad_norm": 1.0953928232192993, |
|
"learning_rate": 0.0002001156257246085, |
|
"loss": 1.5314, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.8547008547008547, |
|
"eval_loss": 1.6335612535476685, |
|
"eval_runtime": 1.6991, |
|
"eval_samples_per_second": 29.427, |
|
"eval_steps_per_second": 4.12, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.8660968660968661, |
|
"grad_norm": 1.022580862045288, |
|
"learning_rate": 0.00019743115003452357, |
|
"loss": 1.394, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 0.8774928774928775, |
|
"grad_norm": 1.150123119354248, |
|
"learning_rate": 0.0001947296866569998, |
|
"loss": 1.0808, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 0.8888888888888888, |
|
"grad_norm": 1.2115788459777832, |
|
"learning_rate": 0.00019201220313373607, |
|
"loss": 1.4386, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 0.9002849002849003, |
|
"grad_norm": 1.132363200187683, |
|
"learning_rate": 0.00018927967274412098, |
|
"loss": 1.1949, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 0.9116809116809117, |
|
"grad_norm": 1.3680353164672852, |
|
"learning_rate": 0.00018653307415664877, |
|
"loss": 1.2202, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.9230769230769231, |
|
"grad_norm": 1.075006127357483, |
|
"learning_rate": 0.00018377339107840412, |
|
"loss": 0.9873, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 0.9344729344729344, |
|
"grad_norm": 1.3361048698425293, |
|
"learning_rate": 0.0001810016119027429, |
|
"loss": 1.2343, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 0.9458689458689459, |
|
"grad_norm": 1.9565190076828003, |
|
"learning_rate": 0.00017821872935529505, |
|
"loss": 1.0822, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 0.9572649572649573, |
|
"grad_norm": 3.1362149715423584, |
|
"learning_rate": 0.0001754257401384145, |
|
"loss": 1.6614, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 0.9686609686609686, |
|
"grad_norm": 1.214998722076416, |
|
"learning_rate": 0.00017262364457420608, |
|
"loss": 1.8554, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 0.98005698005698, |
|
"grad_norm": 1.4069725275039673, |
|
"learning_rate": 0.00016981344624625536, |
|
"loss": 1.6906, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 0.9914529914529915, |
|
"grad_norm": 1.7144482135772705, |
|
"learning_rate": 0.0001669961516401905, |
|
"loss": 1.095, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 1.0056980056980056, |
|
"grad_norm": 2.2465782165527344, |
|
"learning_rate": 0.00016417276978320468, |
|
"loss": 2.0945, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 1.017094017094017, |
|
"grad_norm": 1.1649279594421387, |
|
"learning_rate": 0.00016134431188266851, |
|
"loss": 1.9323, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 1.0284900284900285, |
|
"grad_norm": 1.0408798456192017, |
|
"learning_rate": 0.00015851179096396112, |
|
"loss": 1.8263, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.03988603988604, |
|
"grad_norm": 1.0593069791793823, |
|
"learning_rate": 0.00015567622150765057, |
|
"loss": 1.8236, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 1.0512820512820513, |
|
"grad_norm": 0.940461277961731, |
|
"learning_rate": 0.00015283861908615284, |
|
"loss": 1.4745, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 1.0626780626780628, |
|
"grad_norm": 0.8671309351921082, |
|
"learning_rate": 0.00015, |
|
"loss": 1.3637, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 1.074074074074074, |
|
"grad_norm": 0.7851026058197021, |
|
"learning_rate": 0.00014716138091384716, |
|
"loss": 1.3622, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 1.0854700854700854, |
|
"grad_norm": 0.8758441805839539, |
|
"learning_rate": 0.00014432377849234946, |
|
"loss": 1.4627, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.0968660968660968, |
|
"grad_norm": 0.9090460538864136, |
|
"learning_rate": 0.00014148820903603888, |
|
"loss": 1.245, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 1.1082621082621082, |
|
"grad_norm": 0.8923598527908325, |
|
"learning_rate": 0.00013865568811733151, |
|
"loss": 1.3085, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 1.1196581196581197, |
|
"grad_norm": 0.9927690625190735, |
|
"learning_rate": 0.00013582723021679532, |
|
"loss": 1.2563, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 1.131054131054131, |
|
"grad_norm": 1.0350650548934937, |
|
"learning_rate": 0.0001330038483598095, |
|
"loss": 1.1181, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 1.1424501424501425, |
|
"grad_norm": 1.156041145324707, |
|
"learning_rate": 0.00013018655375374467, |
|
"loss": 1.1825, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.1424501424501425, |
|
"eval_loss": 1.4987461566925049, |
|
"eval_runtime": 1.8846, |
|
"eval_samples_per_second": 26.531, |
|
"eval_steps_per_second": 3.714, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 176, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.4663032422465536e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|