|
{ |
|
"best_metric": 10.526249885559082, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-100", |
|
"epoch": 1.5285171102661597, |
|
"eval_steps": 25, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.015209125475285171, |
|
"grad_norm": 7.798121452331543, |
|
"learning_rate": 2.9999999999999997e-05, |
|
"loss": 44.4974, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.015209125475285171, |
|
"eval_loss": 11.139165878295898, |
|
"eval_runtime": 0.5313, |
|
"eval_samples_per_second": 94.102, |
|
"eval_steps_per_second": 13.174, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.030418250950570342, |
|
"grad_norm": 7.954409599304199, |
|
"learning_rate": 5.9999999999999995e-05, |
|
"loss": 44.5234, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.045627376425855515, |
|
"grad_norm": 7.833451747894287, |
|
"learning_rate": 8.999999999999999e-05, |
|
"loss": 44.5026, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.060836501901140684, |
|
"grad_norm": 7.779505729675293, |
|
"learning_rate": 0.00011999999999999999, |
|
"loss": 44.4479, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.07604562737642585, |
|
"grad_norm": 7.852802753448486, |
|
"learning_rate": 0.00015, |
|
"loss": 44.4609, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.09125475285171103, |
|
"grad_norm": 7.819211006164551, |
|
"learning_rate": 0.00017999999999999998, |
|
"loss": 44.3568, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.10646387832699619, |
|
"grad_norm": 7.94288969039917, |
|
"learning_rate": 0.00020999999999999998, |
|
"loss": 44.3203, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.12167300380228137, |
|
"grad_norm": 7.820436000823975, |
|
"learning_rate": 0.00023999999999999998, |
|
"loss": 44.2292, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.13688212927756654, |
|
"grad_norm": 7.844394207000732, |
|
"learning_rate": 0.00027, |
|
"loss": 44.1641, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.1520912547528517, |
|
"grad_norm": 7.6544904708862305, |
|
"learning_rate": 0.0003, |
|
"loss": 44.0807, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.16730038022813687, |
|
"grad_norm": 7.4691572189331055, |
|
"learning_rate": 0.00029995027012714694, |
|
"loss": 43.9766, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.18250950570342206, |
|
"grad_norm": 7.065072059631348, |
|
"learning_rate": 0.00029980111348272456, |
|
"loss": 43.8542, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.19771863117870722, |
|
"grad_norm": 6.962889671325684, |
|
"learning_rate": 0.00029955262896727894, |
|
"loss": 43.7083, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.21292775665399238, |
|
"grad_norm": 6.819710731506348, |
|
"learning_rate": 0.00029920498134218835, |
|
"loss": 43.612, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.22813688212927757, |
|
"grad_norm": 6.688031196594238, |
|
"learning_rate": 0.0002987584011204152, |
|
"loss": 43.474, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.24334600760456274, |
|
"grad_norm": 5.722296714782715, |
|
"learning_rate": 0.0002982131844136615, |
|
"loss": 43.4219, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.2585551330798479, |
|
"grad_norm": 5.815008163452148, |
|
"learning_rate": 0.0002975696927360274, |
|
"loss": 43.3333, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.2737642585551331, |
|
"grad_norm": 5.559881687164307, |
|
"learning_rate": 0.0002968283527643036, |
|
"loss": 43.1901, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.2889733840304182, |
|
"grad_norm": 5.232143878936768, |
|
"learning_rate": 0.00029598965605505737, |
|
"loss": 43.1224, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.3041825095057034, |
|
"grad_norm": 5.151193618774414, |
|
"learning_rate": 0.000295054158718698, |
|
"loss": 43.0365, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.3193916349809886, |
|
"grad_norm": 4.557214260101318, |
|
"learning_rate": 0.0002940224810507402, |
|
"loss": 43.0156, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.33460076045627374, |
|
"grad_norm": 4.298803329467773, |
|
"learning_rate": 0.00029289530712050735, |
|
"loss": 42.8672, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.34980988593155893, |
|
"grad_norm": 3.9553070068359375, |
|
"learning_rate": 0.0002916733843175492, |
|
"loss": 42.8229, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.3650190114068441, |
|
"grad_norm": 3.7043850421905518, |
|
"learning_rate": 0.000290357522856074, |
|
"loss": 42.75, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.38022813688212925, |
|
"grad_norm": 3.5806026458740234, |
|
"learning_rate": 0.0002889485952377242, |
|
"loss": 42.6536, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.38022813688212925, |
|
"eval_loss": 10.657498359680176, |
|
"eval_runtime": 0.1066, |
|
"eval_samples_per_second": 469.222, |
|
"eval_steps_per_second": 65.691, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.39543726235741444, |
|
"grad_norm": 3.0404555797576904, |
|
"learning_rate": 0.0002874475356730507, |
|
"loss": 42.724, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.41064638783269963, |
|
"grad_norm": 3.1695425510406494, |
|
"learning_rate": 0.0002858553394620707, |
|
"loss": 42.6042, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.42585551330798477, |
|
"grad_norm": 2.890155076980591, |
|
"learning_rate": 0.0002841730623343193, |
|
"loss": 42.6042, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.44106463878326996, |
|
"grad_norm": 2.8616456985473633, |
|
"learning_rate": 0.00028240181974883207, |
|
"loss": 42.5703, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.45627376425855515, |
|
"grad_norm": 2.712928056716919, |
|
"learning_rate": 0.00028054278615452326, |
|
"loss": 42.5104, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.4714828897338403, |
|
"grad_norm": 2.436995267868042, |
|
"learning_rate": 0.0002785971942114498, |
|
"loss": 42.5052, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.4866920152091255, |
|
"grad_norm": 2.1528892517089844, |
|
"learning_rate": 0.0002765663339734778, |
|
"loss": 42.5156, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.5019011406844106, |
|
"grad_norm": 1.9085866212844849, |
|
"learning_rate": 0.0002744515520328928, |
|
"loss": 42.4583, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.5171102661596958, |
|
"grad_norm": 2.139317274093628, |
|
"learning_rate": 0.00027225425062752165, |
|
"loss": 42.388, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.532319391634981, |
|
"grad_norm": 1.8673893213272095, |
|
"learning_rate": 0.0002699758867109579, |
|
"loss": 42.3281, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.5475285171102662, |
|
"grad_norm": 1.6618345975875854, |
|
"learning_rate": 0.0002676179709865066, |
|
"loss": 42.3125, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.5627376425855514, |
|
"grad_norm": 1.54766047000885, |
|
"learning_rate": 0.00026518206690549, |
|
"loss": 42.2943, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.5779467680608364, |
|
"grad_norm": 1.4217829704284668, |
|
"learning_rate": 0.0002626697896305779, |
|
"loss": 42.2943, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.5931558935361216, |
|
"grad_norm": 1.5864824056625366, |
|
"learning_rate": 0.00026008280496482984, |
|
"loss": 42.3333, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.6083650190114068, |
|
"grad_norm": 1.3134839534759521, |
|
"learning_rate": 0.000257422828247159, |
|
"loss": 42.25, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.623574144486692, |
|
"grad_norm": 1.3444600105285645, |
|
"learning_rate": 0.00025469162321495147, |
|
"loss": 42.3021, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.6387832699619772, |
|
"grad_norm": 1.1616184711456299, |
|
"learning_rate": 0.00025189100083459397, |
|
"loss": 42.2057, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.6539923954372624, |
|
"grad_norm": 1.3812938928604126, |
|
"learning_rate": 0.00024902281810068475, |
|
"loss": 42.263, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.6692015209125475, |
|
"grad_norm": 1.0146691799163818, |
|
"learning_rate": 0.0002460889768047263, |
|
"loss": 42.1901, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.6844106463878327, |
|
"grad_norm": 1.0053883790969849, |
|
"learning_rate": 0.0002430914222741134, |
|
"loss": 42.2161, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.6996197718631179, |
|
"grad_norm": 0.9144092202186584, |
|
"learning_rate": 0.00024003214208225522, |
|
"loss": 42.2161, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.714828897338403, |
|
"grad_norm": 0.8486371636390686, |
|
"learning_rate": 0.00023691316473068452, |
|
"loss": 42.2214, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.7300380228136882, |
|
"grad_norm": 1.0502363443374634, |
|
"learning_rate": 0.00023373655830402968, |
|
"loss": 42.3255, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.7452471482889734, |
|
"grad_norm": 1.3109233379364014, |
|
"learning_rate": 0.00023050442909874007, |
|
"loss": 42.3125, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.7604562737642585, |
|
"grad_norm": 0.9757567644119263, |
|
"learning_rate": 0.00022721892022647462, |
|
"loss": 42.1979, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.7604562737642585, |
|
"eval_loss": 10.535416603088379, |
|
"eval_runtime": 0.1079, |
|
"eval_samples_per_second": 463.538, |
|
"eval_steps_per_second": 64.895, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.7756653992395437, |
|
"grad_norm": 0.7378947138786316, |
|
"learning_rate": 0.00022388221019307967, |
|
"loss": 42.1719, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.7908745247148289, |
|
"grad_norm": 0.8719134330749512, |
|
"learning_rate": 0.000220496511454098, |
|
"loss": 42.1641, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.8060836501901141, |
|
"grad_norm": 0.7421266436576843, |
|
"learning_rate": 0.00021706406894776709, |
|
"loss": 42.2005, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.8212927756653993, |
|
"grad_norm": 0.8768603205680847, |
|
"learning_rate": 0.0002135871586064791, |
|
"loss": 42.1771, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.8365019011406845, |
|
"grad_norm": 0.6931475400924683, |
|
"learning_rate": 0.00021006808584768998, |
|
"loss": 42.1979, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.8517110266159695, |
|
"grad_norm": 0.5837717652320862, |
|
"learning_rate": 0.00020650918404527775, |
|
"loss": 42.1406, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.8669201520912547, |
|
"grad_norm": 0.712273895740509, |
|
"learning_rate": 0.00020291281298236423, |
|
"loss": 42.1328, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.8821292775665399, |
|
"grad_norm": 0.8226441740989685, |
|
"learning_rate": 0.00019928135728662522, |
|
"loss": 42.1745, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.8973384030418251, |
|
"grad_norm": 0.617861270904541, |
|
"learning_rate": 0.0001956172248491277, |
|
"loss": 42.1719, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.9125475285171103, |
|
"grad_norm": 0.6812199950218201, |
|
"learning_rate": 0.00019192284522774142, |
|
"loss": 42.1484, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.9277566539923955, |
|
"grad_norm": 0.6634241342544556, |
|
"learning_rate": 0.00018820066803618428, |
|
"loss": 42.1615, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.9429657794676806, |
|
"grad_norm": 0.5809543132781982, |
|
"learning_rate": 0.00018445316131976934, |
|
"loss": 42.1432, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.9581749049429658, |
|
"grad_norm": 0.8210413455963135, |
|
"learning_rate": 0.00018068280991893014, |
|
"loss": 42.1901, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.973384030418251, |
|
"grad_norm": 1.388875961303711, |
|
"learning_rate": 0.00017689211382161034, |
|
"loss": 42.2708, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.9885931558935361, |
|
"grad_norm": 0.802690327167511, |
|
"learning_rate": 0.00017308358650560928, |
|
"loss": 42.1328, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 1.0114068441064639, |
|
"grad_norm": 0.6896817088127136, |
|
"learning_rate": 0.00016925975327198266, |
|
"loss": 42.1927, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.026615969581749, |
|
"grad_norm": 0.818332850933075, |
|
"learning_rate": 0.00016542314957060405, |
|
"loss": 42.1562, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 1.0418250950570342, |
|
"grad_norm": 0.49196258187294006, |
|
"learning_rate": 0.00016157631931899697, |
|
"loss": 42.125, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.0570342205323193, |
|
"grad_norm": 0.43189793825149536, |
|
"learning_rate": 0.00015772181321555196, |
|
"loss": 42.1172, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 1.0722433460076046, |
|
"grad_norm": 0.3118878901004791, |
|
"learning_rate": 0.0001538621870482483, |
|
"loss": 42.1068, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.0874524714828897, |
|
"grad_norm": 0.5249186158180237, |
|
"learning_rate": 0.00015, |
|
"loss": 42.151, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 1.102661596958175, |
|
"grad_norm": 0.6453397274017334, |
|
"learning_rate": 0.00014613781295175172, |
|
"loss": 42.1589, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.11787072243346, |
|
"grad_norm": 0.34862664341926575, |
|
"learning_rate": 0.000142278186784448, |
|
"loss": 42.125, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 1.1330798479087452, |
|
"grad_norm": 0.37730568647384644, |
|
"learning_rate": 0.00013842368068100303, |
|
"loss": 42.1198, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.1482889733840305, |
|
"grad_norm": 0.39992350339889526, |
|
"learning_rate": 0.00013457685042939592, |
|
"loss": 42.138, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.1482889733840305, |
|
"eval_loss": 10.527915954589844, |
|
"eval_runtime": 0.106, |
|
"eval_samples_per_second": 471.538, |
|
"eval_steps_per_second": 66.015, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 1.1634980988593155, |
|
"grad_norm": 0.9189149141311646, |
|
"learning_rate": 0.00013074024672801731, |
|
"loss": 42.1536, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.1787072243346008, |
|
"grad_norm": 0.5288547873497009, |
|
"learning_rate": 0.0001269164134943907, |
|
"loss": 42.1667, |
|
"step": 77 |
|
}, |
|
{ |
|
"epoch": 1.193916349809886, |
|
"grad_norm": 0.5245852470397949, |
|
"learning_rate": 0.00012310788617838966, |
|
"loss": 42.1432, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 1.209125475285171, |
|
"grad_norm": 0.670550525188446, |
|
"learning_rate": 0.0001193171900810699, |
|
"loss": 42.1536, |
|
"step": 79 |
|
}, |
|
{ |
|
"epoch": 1.2243346007604563, |
|
"grad_norm": 0.6620810031890869, |
|
"learning_rate": 0.00011554683868023067, |
|
"loss": 42.1406, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.2395437262357414, |
|
"grad_norm": 0.7172536253929138, |
|
"learning_rate": 0.0001117993319638157, |
|
"loss": 42.1667, |
|
"step": 81 |
|
}, |
|
{ |
|
"epoch": 1.2547528517110267, |
|
"grad_norm": 0.6294752359390259, |
|
"learning_rate": 0.00010807715477225858, |
|
"loss": 42.1797, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 1.2699619771863118, |
|
"grad_norm": 0.5978144407272339, |
|
"learning_rate": 0.00010438277515087233, |
|
"loss": 42.1667, |
|
"step": 83 |
|
}, |
|
{ |
|
"epoch": 1.285171102661597, |
|
"grad_norm": 0.5058898329734802, |
|
"learning_rate": 0.00010071864271337478, |
|
"loss": 42.125, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 1.3003802281368821, |
|
"grad_norm": 0.5284583568572998, |
|
"learning_rate": 9.708718701763577e-05, |
|
"loss": 42.112, |
|
"step": 85 |
|
}, |
|
{ |
|
"epoch": 1.3155893536121672, |
|
"grad_norm": 0.5570880770683289, |
|
"learning_rate": 9.34908159547222e-05, |
|
"loss": 42.1302, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 1.3307984790874525, |
|
"grad_norm": 0.44924110174179077, |
|
"learning_rate": 8.993191415231e-05, |
|
"loss": 42.1536, |
|
"step": 87 |
|
}, |
|
{ |
|
"epoch": 1.3460076045627376, |
|
"grad_norm": 0.5590141415596008, |
|
"learning_rate": 8.641284139352091e-05, |
|
"loss": 42.1406, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 1.3612167300380227, |
|
"grad_norm": 0.3548166751861572, |
|
"learning_rate": 8.293593105223287e-05, |
|
"loss": 42.1042, |
|
"step": 89 |
|
}, |
|
{ |
|
"epoch": 1.376425855513308, |
|
"grad_norm": 0.46111956238746643, |
|
"learning_rate": 7.950348854590204e-05, |
|
"loss": 42.112, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.3916349809885933, |
|
"grad_norm": 0.2855987846851349, |
|
"learning_rate": 7.611778980692035e-05, |
|
"loss": 42.1094, |
|
"step": 91 |
|
}, |
|
{ |
|
"epoch": 1.4068441064638784, |
|
"grad_norm": 0.3709195852279663, |
|
"learning_rate": 7.278107977352543e-05, |
|
"loss": 42.138, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 1.4220532319391634, |
|
"grad_norm": 0.8047066330909729, |
|
"learning_rate": 6.949557090125994e-05, |
|
"loss": 42.1667, |
|
"step": 93 |
|
}, |
|
{ |
|
"epoch": 1.4372623574144487, |
|
"grad_norm": 0.5303740501403809, |
|
"learning_rate": 6.626344169597031e-05, |
|
"loss": 42.125, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 1.4524714828897338, |
|
"grad_norm": 0.41722556948661804, |
|
"learning_rate": 6.308683526931545e-05, |
|
"loss": 42.125, |
|
"step": 95 |
|
}, |
|
{ |
|
"epoch": 1.467680608365019, |
|
"grad_norm": 1.1801074743270874, |
|
"learning_rate": 5.996785791774478e-05, |
|
"loss": 42.2188, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 1.4828897338403042, |
|
"grad_norm": 0.8621243238449097, |
|
"learning_rate": 5.690857772588657e-05, |
|
"loss": 42.1849, |
|
"step": 97 |
|
}, |
|
{ |
|
"epoch": 1.4980988593155893, |
|
"grad_norm": 1.1932792663574219, |
|
"learning_rate": 5.391102319527373e-05, |
|
"loss": 42.2187, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 1.5133079847908744, |
|
"grad_norm": 0.38025224208831787, |
|
"learning_rate": 5.0977181899315214e-05, |
|
"loss": 42.1276, |
|
"step": 99 |
|
}, |
|
{ |
|
"epoch": 1.5285171102661597, |
|
"grad_norm": 0.39083558320999146, |
|
"learning_rate": 4.8108999165406026e-05, |
|
"loss": 42.1458, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 1.5285171102661597, |
|
"eval_loss": 10.526249885559082, |
|
"eval_runtime": 0.1068, |
|
"eval_samples_per_second": 467.988, |
|
"eval_steps_per_second": 65.518, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 132, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 795475968000.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|