|
{ |
|
"best_metric": 3.6610631942749023, |
|
"best_model_checkpoint": "multilingual-e5-small-aligned-readability/checkpoint-40644", |
|
"epoch": 3.0, |
|
"eval_steps": 500, |
|
"global_step": 40644, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.03690581635665781, |
|
"grad_norm": 48.44145965576172, |
|
"learning_rate": 4.938490306072237e-05, |
|
"loss": 25.4936, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.07381163271331562, |
|
"grad_norm": 56.23558807373047, |
|
"learning_rate": 4.876980612144474e-05, |
|
"loss": 5.5206, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.11071744906997343, |
|
"grad_norm": 22.196273803710938, |
|
"learning_rate": 4.815470918216711e-05, |
|
"loss": 4.8622, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.14762326542663123, |
|
"grad_norm": 49.71085739135742, |
|
"learning_rate": 4.7539612242889484e-05, |
|
"loss": 4.7329, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.18452908178328906, |
|
"grad_norm": 33.54679489135742, |
|
"learning_rate": 4.692451530361185e-05, |
|
"loss": 4.4192, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.22143489813994685, |
|
"grad_norm": 33.129676818847656, |
|
"learning_rate": 4.6309418364334224e-05, |
|
"loss": 4.4362, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.2583407144966047, |
|
"grad_norm": 32.080562591552734, |
|
"learning_rate": 4.5694321425056594e-05, |
|
"loss": 4.2238, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.29524653085326247, |
|
"grad_norm": 49.822025299072266, |
|
"learning_rate": 4.507922448577896e-05, |
|
"loss": 4.207, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.33215234720992026, |
|
"grad_norm": 38.24595642089844, |
|
"learning_rate": 4.4464127546501335e-05, |
|
"loss": 4.1541, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.3690581635665781, |
|
"grad_norm": 38.85857391357422, |
|
"learning_rate": 4.38490306072237e-05, |
|
"loss": 4.1168, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 0.4059639799232359, |
|
"grad_norm": 26.093944549560547, |
|
"learning_rate": 4.323393366794607e-05, |
|
"loss": 4.035, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 0.4428697962798937, |
|
"grad_norm": 38.26423263549805, |
|
"learning_rate": 4.261883672866844e-05, |
|
"loss": 4.0707, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 0.4797756126365515, |
|
"grad_norm": 30.129150390625, |
|
"learning_rate": 4.200373978939081e-05, |
|
"loss": 4.0642, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 0.5166814289932093, |
|
"grad_norm": 28.189598083496094, |
|
"learning_rate": 4.138864285011318e-05, |
|
"loss": 4.0913, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 0.5535872453498671, |
|
"grad_norm": 25.67119789123535, |
|
"learning_rate": 4.077354591083555e-05, |
|
"loss": 3.8925, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 0.5904930617065249, |
|
"grad_norm": 40.868289947509766, |
|
"learning_rate": 4.015844897155792e-05, |
|
"loss": 4.0213, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 0.6273988780631827, |
|
"grad_norm": 26.44133186340332, |
|
"learning_rate": 3.954335203228029e-05, |
|
"loss": 4.0, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 0.6643046944198405, |
|
"grad_norm": 25.9945068359375, |
|
"learning_rate": 3.892825509300266e-05, |
|
"loss": 3.9923, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 0.7012105107764984, |
|
"grad_norm": 44.50141143798828, |
|
"learning_rate": 3.8313158153725024e-05, |
|
"loss": 3.8293, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 0.7381163271331562, |
|
"grad_norm": 43.48475646972656, |
|
"learning_rate": 3.76980612144474e-05, |
|
"loss": 3.8908, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 0.775022143489814, |
|
"grad_norm": 34.09233093261719, |
|
"learning_rate": 3.708296427516977e-05, |
|
"loss": 3.8289, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 0.8119279598464718, |
|
"grad_norm": 19.0194034576416, |
|
"learning_rate": 3.6467867335892135e-05, |
|
"loss": 3.9371, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 0.8488337762031296, |
|
"grad_norm": 23.901264190673828, |
|
"learning_rate": 3.585277039661451e-05, |
|
"loss": 3.9056, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 0.8857395925597874, |
|
"grad_norm": 17.51069450378418, |
|
"learning_rate": 3.5237673457336876e-05, |
|
"loss": 3.8314, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 0.9226454089164452, |
|
"grad_norm": 41.49451446533203, |
|
"learning_rate": 3.4622576518059246e-05, |
|
"loss": 3.8817, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 0.959551225273103, |
|
"grad_norm": 54.885257720947266, |
|
"learning_rate": 3.400747957878162e-05, |
|
"loss": 3.7882, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 0.9964570416297609, |
|
"grad_norm": 23.55073356628418, |
|
"learning_rate": 3.3392382639503986e-05, |
|
"loss": 3.8356, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 3.8022446632385254, |
|
"eval_mse": 3.80224462961076, |
|
"eval_runtime": 54.6351, |
|
"eval_samples_per_second": 1763.299, |
|
"eval_steps_per_second": 220.426, |
|
"step": 13548 |
|
}, |
|
{ |
|
"epoch": 1.0333628579864187, |
|
"grad_norm": 19.580780029296875, |
|
"learning_rate": 3.277728570022636e-05, |
|
"loss": 3.3048, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 1.0702686743430765, |
|
"grad_norm": 18.51485824584961, |
|
"learning_rate": 3.216218876094873e-05, |
|
"loss": 3.1909, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 1.1071744906997343, |
|
"grad_norm": 24.523353576660156, |
|
"learning_rate": 3.15470918216711e-05, |
|
"loss": 3.134, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 1.144080307056392, |
|
"grad_norm": 23.871919631958008, |
|
"learning_rate": 3.093199488239347e-05, |
|
"loss": 3.182, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 1.1809861234130499, |
|
"grad_norm": 27.006973266601562, |
|
"learning_rate": 3.0316897943115834e-05, |
|
"loss": 3.3002, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 1.2178919397697077, |
|
"grad_norm": 46.61240005493164, |
|
"learning_rate": 2.9701801003838208e-05, |
|
"loss": 3.1962, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 1.2547977561263655, |
|
"grad_norm": 23.656471252441406, |
|
"learning_rate": 2.9086704064560578e-05, |
|
"loss": 3.2212, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 1.2917035724830233, |
|
"grad_norm": 38.43106460571289, |
|
"learning_rate": 2.8471607125282945e-05, |
|
"loss": 3.2474, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 1.328609388839681, |
|
"grad_norm": 19.142786026000977, |
|
"learning_rate": 2.7856510186005312e-05, |
|
"loss": 3.1591, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 1.3655152051963388, |
|
"grad_norm": 30.900583267211914, |
|
"learning_rate": 2.7241413246727686e-05, |
|
"loss": 3.1354, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 1.4024210215529966, |
|
"grad_norm": 25.125627517700195, |
|
"learning_rate": 2.6626316307450056e-05, |
|
"loss": 3.1656, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 1.4393268379096544, |
|
"grad_norm": 28.680898666381836, |
|
"learning_rate": 2.6011219368172423e-05, |
|
"loss": 3.1617, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 1.4762326542663124, |
|
"grad_norm": 28.307249069213867, |
|
"learning_rate": 2.5396122428894797e-05, |
|
"loss": 3.1673, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 1.51313847062297, |
|
"grad_norm": 27.13144302368164, |
|
"learning_rate": 2.4781025489617167e-05, |
|
"loss": 3.1657, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 1.550044286979628, |
|
"grad_norm": 17.530426025390625, |
|
"learning_rate": 2.4165928550339534e-05, |
|
"loss": 3.1608, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 1.5869501033362858, |
|
"grad_norm": 51.65472412109375, |
|
"learning_rate": 2.3550831611061904e-05, |
|
"loss": 3.1323, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 1.6238559196929436, |
|
"grad_norm": 23.71245574951172, |
|
"learning_rate": 2.2935734671784274e-05, |
|
"loss": 3.2361, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 1.6607617360496014, |
|
"grad_norm": 17.434144973754883, |
|
"learning_rate": 2.2320637732506645e-05, |
|
"loss": 3.1969, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 1.6976675524062592, |
|
"grad_norm": 22.121740341186523, |
|
"learning_rate": 2.1705540793229015e-05, |
|
"loss": 3.1437, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 1.734573368762917, |
|
"grad_norm": 19.105876922607422, |
|
"learning_rate": 2.1090443853951382e-05, |
|
"loss": 3.1227, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 1.7714791851195748, |
|
"grad_norm": 27.000789642333984, |
|
"learning_rate": 2.0475346914673755e-05, |
|
"loss": 3.066, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 1.8083850014762326, |
|
"grad_norm": 24.040668487548828, |
|
"learning_rate": 1.9860249975396122e-05, |
|
"loss": 3.1173, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 1.8452908178328906, |
|
"grad_norm": 29.920883178710938, |
|
"learning_rate": 1.9245153036118493e-05, |
|
"loss": 3.0847, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 1.8821966341895484, |
|
"grad_norm": 44.850303649902344, |
|
"learning_rate": 1.8630056096840863e-05, |
|
"loss": 3.0993, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 1.9191024505462062, |
|
"grad_norm": 66.01287078857422, |
|
"learning_rate": 1.8014959157563233e-05, |
|
"loss": 3.0649, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 1.956008266902864, |
|
"grad_norm": 57.569190979003906, |
|
"learning_rate": 1.7399862218285603e-05, |
|
"loss": 3.1672, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 1.9929140832595218, |
|
"grad_norm": 16.41364860534668, |
|
"learning_rate": 1.678476527900797e-05, |
|
"loss": 3.1173, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_loss": 3.7406532764434814, |
|
"eval_mse": 3.7406532746764647, |
|
"eval_runtime": 53.1552, |
|
"eval_samples_per_second": 1812.39, |
|
"eval_steps_per_second": 226.563, |
|
"step": 27096 |
|
}, |
|
{ |
|
"epoch": 2.0298198996161796, |
|
"grad_norm": 16.53102684020996, |
|
"learning_rate": 1.6169668339730344e-05, |
|
"loss": 2.6256, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 2.0667257159728374, |
|
"grad_norm": 23.657825469970703, |
|
"learning_rate": 1.555457140045271e-05, |
|
"loss": 2.6492, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 2.103631532329495, |
|
"grad_norm": 20.9498233795166, |
|
"learning_rate": 1.4939474461175081e-05, |
|
"loss": 2.6146, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 2.140537348686153, |
|
"grad_norm": 16.935983657836914, |
|
"learning_rate": 1.4324377521897453e-05, |
|
"loss": 2.6738, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 2.1774431650428108, |
|
"grad_norm": 35.98014450073242, |
|
"learning_rate": 1.3709280582619822e-05, |
|
"loss": 2.5908, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 2.2143489813994686, |
|
"grad_norm": 30.703969955444336, |
|
"learning_rate": 1.3094183643342192e-05, |
|
"loss": 2.6565, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 2.2512547977561264, |
|
"grad_norm": 40.74715805053711, |
|
"learning_rate": 1.2479086704064562e-05, |
|
"loss": 2.6106, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 2.288160614112784, |
|
"grad_norm": 18.857763290405273, |
|
"learning_rate": 1.186398976478693e-05, |
|
"loss": 2.6383, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 2.325066430469442, |
|
"grad_norm": 68.10604095458984, |
|
"learning_rate": 1.1248892825509301e-05, |
|
"loss": 2.5259, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 2.3619722468260997, |
|
"grad_norm": 37.21859359741211, |
|
"learning_rate": 1.0633795886231671e-05, |
|
"loss": 2.5619, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 2.3988780631827575, |
|
"grad_norm": 58.106285095214844, |
|
"learning_rate": 1.001869894695404e-05, |
|
"loss": 2.6748, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 2.4357838795394153, |
|
"grad_norm": 47.5319938659668, |
|
"learning_rate": 9.40360200767641e-06, |
|
"loss": 2.6488, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 2.472689695896073, |
|
"grad_norm": 20.28361701965332, |
|
"learning_rate": 8.78850506839878e-06, |
|
"loss": 2.6175, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 2.509595512252731, |
|
"grad_norm": 19.851940155029297, |
|
"learning_rate": 8.17340812912115e-06, |
|
"loss": 2.6142, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 2.5465013286093887, |
|
"grad_norm": 32.45155334472656, |
|
"learning_rate": 7.55831118984352e-06, |
|
"loss": 2.5369, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 2.5834071449660465, |
|
"grad_norm": 60.2480354309082, |
|
"learning_rate": 6.94321425056589e-06, |
|
"loss": 2.5508, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 2.6203129613227043, |
|
"grad_norm": 16.603755950927734, |
|
"learning_rate": 6.328117311288259e-06, |
|
"loss": 2.6182, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 2.657218777679362, |
|
"grad_norm": 20.604549407958984, |
|
"learning_rate": 5.713020372010629e-06, |
|
"loss": 2.6126, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 2.69412459403602, |
|
"grad_norm": 18.057249069213867, |
|
"learning_rate": 5.097923432732999e-06, |
|
"loss": 2.6001, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 2.7310304103926777, |
|
"grad_norm": 22.73322296142578, |
|
"learning_rate": 4.482826493455368e-06, |
|
"loss": 2.4779, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 2.7679362267493355, |
|
"grad_norm": 101.31739807128906, |
|
"learning_rate": 3.8677295541777385e-06, |
|
"loss": 2.4957, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 2.8048420431059933, |
|
"grad_norm": 25.522369384765625, |
|
"learning_rate": 3.2526326149001084e-06, |
|
"loss": 2.5047, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 2.841747859462651, |
|
"grad_norm": 14.60265827178955, |
|
"learning_rate": 2.6375356756224782e-06, |
|
"loss": 2.5191, |
|
"step": 38500 |
|
}, |
|
{ |
|
"epoch": 2.878653675819309, |
|
"grad_norm": 39.825443267822266, |
|
"learning_rate": 2.022438736344848e-06, |
|
"loss": 2.5414, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 2.9155594921759667, |
|
"grad_norm": 19.467710494995117, |
|
"learning_rate": 1.4073417970672177e-06, |
|
"loss": 2.564, |
|
"step": 39500 |
|
}, |
|
{ |
|
"epoch": 2.952465308532625, |
|
"grad_norm": 57.59708023071289, |
|
"learning_rate": 7.922448577895876e-07, |
|
"loss": 2.5478, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 2.9893711248892827, |
|
"grad_norm": 15.459783554077148, |
|
"learning_rate": 1.771479185119575e-07, |
|
"loss": 2.6344, |
|
"step": 40500 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"eval_loss": 3.6610631942749023, |
|
"eval_mse": 3.6610634619705245, |
|
"eval_runtime": 53.3029, |
|
"eval_samples_per_second": 1807.368, |
|
"eval_steps_per_second": 225.935, |
|
"step": 40644 |
|
}, |
|
{ |
|
"epoch": 3.0, |
|
"step": 40644, |
|
"total_flos": 4.283504864539085e+16, |
|
"train_loss": 3.5571271605219925, |
|
"train_runtime": 3169.0778, |
|
"train_samples_per_second": 820.783, |
|
"train_steps_per_second": 12.825 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 40644, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.283504864539085e+16, |
|
"train_batch_size": 64, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|