|
{ |
|
"best_metric": 0.05362105742096901, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.47206923682140045, |
|
"eval_steps": 50, |
|
"global_step": 75, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.006294256490952006, |
|
"grad_norm": 3.8400495052337646, |
|
"learning_rate": 5e-06, |
|
"loss": 3.0839, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.006294256490952006, |
|
"eval_loss": 3.1272127628326416, |
|
"eval_runtime": 84.4157, |
|
"eval_samples_per_second": 6.35, |
|
"eval_steps_per_second": 0.794, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.012588512981904013, |
|
"grad_norm": 3.7963645458221436, |
|
"learning_rate": 1e-05, |
|
"loss": 3.289, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.01888276947285602, |
|
"grad_norm": 3.8076391220092773, |
|
"learning_rate": 1.5e-05, |
|
"loss": 3.2079, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.025177025963808025, |
|
"grad_norm": 3.608992099761963, |
|
"learning_rate": 2e-05, |
|
"loss": 3.0601, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.03147128245476003, |
|
"grad_norm": 3.9987127780914307, |
|
"learning_rate": 2.5e-05, |
|
"loss": 3.2286, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.03776553894571204, |
|
"grad_norm": 3.7046732902526855, |
|
"learning_rate": 3e-05, |
|
"loss": 3.1094, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.044059795436664044, |
|
"grad_norm": 3.8715567588806152, |
|
"learning_rate": 3.5e-05, |
|
"loss": 2.9081, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.05035405192761605, |
|
"grad_norm": 3.759075403213501, |
|
"learning_rate": 4e-05, |
|
"loss": 2.7651, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.05664830841856806, |
|
"grad_norm": 3.834105968475342, |
|
"learning_rate": 4.5e-05, |
|
"loss": 2.5933, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.06294256490952006, |
|
"grad_norm": 3.9589412212371826, |
|
"learning_rate": 5e-05, |
|
"loss": 2.3404, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.06923682140047208, |
|
"grad_norm": 4.193765163421631, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 2.3012, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.07553107789142408, |
|
"grad_norm": 5.016157627105713, |
|
"learning_rate": 6e-05, |
|
"loss": 2.025, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.08182533438237609, |
|
"grad_norm": 4.501615524291992, |
|
"learning_rate": 6.500000000000001e-05, |
|
"loss": 1.8619, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.08811959087332809, |
|
"grad_norm": 4.059515953063965, |
|
"learning_rate": 7e-05, |
|
"loss": 1.7482, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0944138473642801, |
|
"grad_norm": 3.349575996398926, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.4586, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.1007081038552321, |
|
"grad_norm": 3.5398507118225098, |
|
"learning_rate": 8e-05, |
|
"loss": 1.3738, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.10700236034618411, |
|
"grad_norm": 2.90134859085083, |
|
"learning_rate": 8.5e-05, |
|
"loss": 1.2573, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.11329661683713611, |
|
"grad_norm": 2.85213303565979, |
|
"learning_rate": 9e-05, |
|
"loss": 1.2735, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.11959087332808813, |
|
"grad_norm": 2.873847007751465, |
|
"learning_rate": 9.5e-05, |
|
"loss": 1.0826, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.12588512981904013, |
|
"grad_norm": 2.7670559883117676, |
|
"learning_rate": 0.0001, |
|
"loss": 1.1229, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.13217938630999213, |
|
"grad_norm": 3.1859798431396484, |
|
"learning_rate": 9.991845519630678e-05, |
|
"loss": 1.0617, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.13847364280094415, |
|
"grad_norm": 2.4608633518218994, |
|
"learning_rate": 9.967408676742751e-05, |
|
"loss": 0.8707, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.14476789929189615, |
|
"grad_norm": 2.742429256439209, |
|
"learning_rate": 9.926769179238466e-05, |
|
"loss": 0.852, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.15106215578284815, |
|
"grad_norm": 2.2563023567199707, |
|
"learning_rate": 9.870059584711668e-05, |
|
"loss": 0.7373, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.15735641227380015, |
|
"grad_norm": 2.2105352878570557, |
|
"learning_rate": 9.797464868072488e-05, |
|
"loss": 0.7149, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.16365066876475218, |
|
"grad_norm": 2.4090940952301025, |
|
"learning_rate": 9.709221818197624e-05, |
|
"loss": 0.7451, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.16994492525570418, |
|
"grad_norm": 2.5231480598449707, |
|
"learning_rate": 9.60561826557425e-05, |
|
"loss": 0.6176, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.17623918174665618, |
|
"grad_norm": 2.0671744346618652, |
|
"learning_rate": 9.486992143456792e-05, |
|
"loss": 0.4402, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.18253343823760818, |
|
"grad_norm": 2.357484817504883, |
|
"learning_rate": 9.353730385598887e-05, |
|
"loss": 0.4541, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.1888276947285602, |
|
"grad_norm": 2.6663615703582764, |
|
"learning_rate": 9.206267664155907e-05, |
|
"loss": 0.3951, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.1951219512195122, |
|
"grad_norm": 2.5575287342071533, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 0.3535, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.2014162077104642, |
|
"grad_norm": 2.6381635665893555, |
|
"learning_rate": 8.870708053195413e-05, |
|
"loss": 0.3087, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.2077104642014162, |
|
"grad_norm": 2.8394503593444824, |
|
"learning_rate": 8.683705689382024e-05, |
|
"loss": 0.2934, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.21400472069236823, |
|
"grad_norm": 2.78173565864563, |
|
"learning_rate": 8.484687843276469e-05, |
|
"loss": 0.2449, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.22029897718332023, |
|
"grad_norm": 2.9417877197265625, |
|
"learning_rate": 8.274303669726426e-05, |
|
"loss": 0.2123, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.22659323367427223, |
|
"grad_norm": 2.85414981842041, |
|
"learning_rate": 8.053239398177191e-05, |
|
"loss": 0.2024, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.23288749016522423, |
|
"grad_norm": 2.734037160873413, |
|
"learning_rate": 7.822216094333847e-05, |
|
"loss": 0.1753, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.23918174665617625, |
|
"grad_norm": 4.297616958618164, |
|
"learning_rate": 7.58198730819481e-05, |
|
"loss": 0.2276, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.24547600314712825, |
|
"grad_norm": 2.606386184692383, |
|
"learning_rate": 7.333336616128369e-05, |
|
"loss": 0.1276, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.25177025963808025, |
|
"grad_norm": 1.5647878646850586, |
|
"learning_rate": 7.077075065009433e-05, |
|
"loss": 0.0612, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.25806451612903225, |
|
"grad_norm": 2.8499035835266113, |
|
"learning_rate": 6.814038526753205e-05, |
|
"loss": 0.0934, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.26435877261998425, |
|
"grad_norm": 2.563603162765503, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 0.1466, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.27065302911093625, |
|
"grad_norm": 1.9947185516357422, |
|
"learning_rate": 6.271091670967436e-05, |
|
"loss": 0.057, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.2769472856018883, |
|
"grad_norm": 2.5543155670166016, |
|
"learning_rate": 5.992952333228728e-05, |
|
"loss": 0.062, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.2832415420928403, |
|
"grad_norm": 1.8335676193237305, |
|
"learning_rate": 5.7115741913664264e-05, |
|
"loss": 0.0785, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.2895357985837923, |
|
"grad_norm": 1.778093695640564, |
|
"learning_rate": 5.427875042394199e-05, |
|
"loss": 0.0491, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.2958300550747443, |
|
"grad_norm": 1.2079533338546753, |
|
"learning_rate": 5.142780253968481e-05, |
|
"loss": 0.0373, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.3021243115656963, |
|
"grad_norm": 1.9942814111709595, |
|
"learning_rate": 4.85721974603152e-05, |
|
"loss": 0.0423, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.3084185680566483, |
|
"grad_norm": 1.2081289291381836, |
|
"learning_rate": 4.5721249576058027e-05, |
|
"loss": 0.0258, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.3147128245476003, |
|
"grad_norm": 2.220339059829712, |
|
"learning_rate": 4.288425808633575e-05, |
|
"loss": 0.0285, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.3147128245476003, |
|
"eval_loss": 0.05362105742096901, |
|
"eval_runtime": 85.2365, |
|
"eval_samples_per_second": 6.288, |
|
"eval_steps_per_second": 0.786, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.3210070810385523, |
|
"grad_norm": 3.1018424034118652, |
|
"learning_rate": 4.007047666771274e-05, |
|
"loss": 0.0904, |
|
"step": 51 |
|
}, |
|
{ |
|
"epoch": 0.32730133752950435, |
|
"grad_norm": 2.7917895317077637, |
|
"learning_rate": 3.728908329032567e-05, |
|
"loss": 0.0776, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 0.33359559402045635, |
|
"grad_norm": 1.141679048538208, |
|
"learning_rate": 3.4549150281252636e-05, |
|
"loss": 0.0297, |
|
"step": 53 |
|
}, |
|
{ |
|
"epoch": 0.33988985051140835, |
|
"grad_norm": 1.4085471630096436, |
|
"learning_rate": 3.1859614732467954e-05, |
|
"loss": 0.0158, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 0.34618410700236035, |
|
"grad_norm": 2.6473264694213867, |
|
"learning_rate": 2.9229249349905684e-05, |
|
"loss": 0.0374, |
|
"step": 55 |
|
}, |
|
{ |
|
"epoch": 0.35247836349331235, |
|
"grad_norm": 2.7476418018341064, |
|
"learning_rate": 2.6666633838716314e-05, |
|
"loss": 0.0185, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 0.35877261998426435, |
|
"grad_norm": 1.146089792251587, |
|
"learning_rate": 2.418012691805191e-05, |
|
"loss": 0.0159, |
|
"step": 57 |
|
}, |
|
{ |
|
"epoch": 0.36506687647521635, |
|
"grad_norm": 1.5934761762619019, |
|
"learning_rate": 2.1777839056661554e-05, |
|
"loss": 0.0249, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 0.37136113296616835, |
|
"grad_norm": 1.0182127952575684, |
|
"learning_rate": 1.946760601822809e-05, |
|
"loss": 0.0249, |
|
"step": 59 |
|
}, |
|
{ |
|
"epoch": 0.3776553894571204, |
|
"grad_norm": 1.0180975198745728, |
|
"learning_rate": 1.725696330273575e-05, |
|
"loss": 0.0168, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.3839496459480724, |
|
"grad_norm": 1.1062073707580566, |
|
"learning_rate": 1.5153121567235335e-05, |
|
"loss": 0.0155, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 0.3902439024390244, |
|
"grad_norm": 0.5176264643669128, |
|
"learning_rate": 1.3162943106179749e-05, |
|
"loss": 0.0067, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 0.3965381589299764, |
|
"grad_norm": 0.4311802387237549, |
|
"learning_rate": 1.1292919468045877e-05, |
|
"loss": 0.0103, |
|
"step": 63 |
|
}, |
|
{ |
|
"epoch": 0.4028324154209284, |
|
"grad_norm": 0.7234724760055542, |
|
"learning_rate": 9.549150281252633e-06, |
|
"loss": 0.011, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 0.4091266719118804, |
|
"grad_norm": 0.7512171864509583, |
|
"learning_rate": 7.937323358440935e-06, |
|
"loss": 0.0119, |
|
"step": 65 |
|
}, |
|
{ |
|
"epoch": 0.4154209284028324, |
|
"grad_norm": 0.8448026180267334, |
|
"learning_rate": 6.462696144011149e-06, |
|
"loss": 0.0143, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 0.4217151848937844, |
|
"grad_norm": 0.37998679280281067, |
|
"learning_rate": 5.13007856543209e-06, |
|
"loss": 0.004, |
|
"step": 67 |
|
}, |
|
{ |
|
"epoch": 0.42800944138473646, |
|
"grad_norm": 0.6252229809761047, |
|
"learning_rate": 3.9438173442575e-06, |
|
"loss": 0.0072, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 0.43430369787568845, |
|
"grad_norm": 0.4169199466705322, |
|
"learning_rate": 2.9077818180237693e-06, |
|
"loss": 0.0064, |
|
"step": 69 |
|
}, |
|
{ |
|
"epoch": 0.44059795436664045, |
|
"grad_norm": 0.2969546318054199, |
|
"learning_rate": 2.0253513192751373e-06, |
|
"loss": 0.005, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 0.44689221085759245, |
|
"grad_norm": 0.6241343021392822, |
|
"learning_rate": 1.2994041528833266e-06, |
|
"loss": 0.0097, |
|
"step": 71 |
|
}, |
|
{ |
|
"epoch": 0.45318646734854445, |
|
"grad_norm": 0.4060995578765869, |
|
"learning_rate": 7.323082076153509e-07, |
|
"loss": 0.0054, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 0.45948072383949645, |
|
"grad_norm": 0.5652337670326233, |
|
"learning_rate": 3.2591323257248893e-07, |
|
"loss": 0.0094, |
|
"step": 73 |
|
}, |
|
{ |
|
"epoch": 0.46577498033044845, |
|
"grad_norm": 0.6753122806549072, |
|
"learning_rate": 8.15448036932176e-08, |
|
"loss": 0.0068, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 0.47206923682140045, |
|
"grad_norm": 0.3501550257205963, |
|
"learning_rate": 0.0, |
|
"loss": 0.004, |
|
"step": 75 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 75, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.451401010774016e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|