|
{ |
|
"best_metric": 11.832261085510254, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.09005966452774963, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0018011932905549926, |
|
"grad_norm": 0.4836229085922241, |
|
"learning_rate": 5e-05, |
|
"loss": 11.9408, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0018011932905549926, |
|
"eval_loss": 11.944238662719727, |
|
"eval_runtime": 29.629, |
|
"eval_samples_per_second": 126.228, |
|
"eval_steps_per_second": 15.795, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.003602386581109985, |
|
"grad_norm": 0.4692229926586151, |
|
"learning_rate": 0.0001, |
|
"loss": 11.9448, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.005403579871664978, |
|
"grad_norm": 0.4656652510166168, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 11.9426, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.00720477316221997, |
|
"grad_norm": 0.47113367915153503, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 11.9406, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.009005966452774964, |
|
"grad_norm": 0.49717453122138977, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 11.935, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.010807159743329957, |
|
"grad_norm": 0.4713899493217468, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 11.93, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.01260835303388495, |
|
"grad_norm": 0.49840787053108215, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 11.931, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.01440954632443994, |
|
"grad_norm": 0.4253399968147278, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 11.9331, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.016210739614994935, |
|
"grad_norm": 0.4758889377117157, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 11.9204, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.018011932905549928, |
|
"grad_norm": 0.47827309370040894, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 11.9184, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01981312619610492, |
|
"grad_norm": 0.4822375774383545, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 11.9224, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.021614319486659914, |
|
"grad_norm": 0.4610523283481598, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 11.9071, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.023415512777214906, |
|
"grad_norm": 0.40992528200149536, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 11.9184, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.0252167060677699, |
|
"grad_norm": 0.426434189081192, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 11.9042, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.02701789935832489, |
|
"grad_norm": 0.44922393560409546, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 11.8979, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.02881909264887988, |
|
"grad_norm": 0.5032673478126526, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 11.8922, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.030620285939434874, |
|
"grad_norm": 0.5120476484298706, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 11.8911, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.03242147922998987, |
|
"grad_norm": 0.5485901236534119, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 11.8884, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.03422267252054486, |
|
"grad_norm": 0.5173953175544739, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 11.8938, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.036023865811099856, |
|
"grad_norm": 0.5765029191970825, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 11.8828, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.037825059101654845, |
|
"grad_norm": 0.6179419159889221, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 11.8809, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.03962625239220984, |
|
"grad_norm": 0.5752896070480347, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 11.8891, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.04142744568276483, |
|
"grad_norm": 0.6997311115264893, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 11.8762, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.04322863897331983, |
|
"grad_norm": 0.6879186630249023, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 11.8733, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.04502983226387482, |
|
"grad_norm": 0.654765248298645, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 11.8922, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.04502983226387482, |
|
"eval_loss": 11.866266250610352, |
|
"eval_runtime": 38.7127, |
|
"eval_samples_per_second": 96.609, |
|
"eval_steps_per_second": 12.089, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.04683102555442981, |
|
"grad_norm": 0.6474030017852783, |
|
"learning_rate": 5e-05, |
|
"loss": 11.8627, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.0486322188449848, |
|
"grad_norm": 0.6563072204589844, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 11.8594, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0504334121355398, |
|
"grad_norm": 0.6200706362724304, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 11.8628, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.05223460542609479, |
|
"grad_norm": 0.7235188484191895, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 11.8635, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.05403579871664978, |
|
"grad_norm": 0.7191481590270996, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 11.8525, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.05583699200720477, |
|
"grad_norm": 0.6996354460716248, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 11.8521, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.05763818529775976, |
|
"grad_norm": 0.6822149753570557, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 11.8534, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.05943937858831476, |
|
"grad_norm": 0.7697290182113647, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 11.8328, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.06124057187886975, |
|
"grad_norm": 0.7364133596420288, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 11.8511, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.06304176516942474, |
|
"grad_norm": 0.8046404719352722, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 11.8493, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.06484295845997974, |
|
"grad_norm": 0.7927674651145935, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 11.8398, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.06664415175053473, |
|
"grad_norm": 0.7487473487854004, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 11.85, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.06844534504108972, |
|
"grad_norm": 0.8775519132614136, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 11.8442, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.07024653833164471, |
|
"grad_norm": 0.8876802921295166, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 11.8308, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.07204773162219971, |
|
"grad_norm": 0.7719692587852478, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 11.8375, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.0738489249127547, |
|
"grad_norm": 0.7725573182106018, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 11.8272, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.07565011820330969, |
|
"grad_norm": 0.8153325319290161, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 11.8334, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.07745131149386468, |
|
"grad_norm": 0.7966391444206238, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 11.8349, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.07925250478441968, |
|
"grad_norm": 0.8197851181030273, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 11.8284, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.08105369807497467, |
|
"grad_norm": 0.7913408279418945, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 11.8388, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.08285489136552966, |
|
"grad_norm": 0.8576236963272095, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 11.8242, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.08465608465608465, |
|
"grad_norm": 0.8047311902046204, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 11.8493, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.08645727794663965, |
|
"grad_norm": 0.662825882434845, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 11.8526, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.08825847123719464, |
|
"grad_norm": 0.8610819578170776, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 11.832, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.09005966452774963, |
|
"grad_norm": 0.7742445468902588, |
|
"learning_rate": 0.0, |
|
"loss": 11.8454, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.09005966452774963, |
|
"eval_loss": 11.832261085510254, |
|
"eval_runtime": 15.1468, |
|
"eval_samples_per_second": 246.917, |
|
"eval_steps_per_second": 30.898, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 417735062323200.0, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|