|
{ |
|
"best_metric": 2.1184535026550293, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.08787346221441125, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0017574692442882249, |
|
"grad_norm": 8.197124481201172, |
|
"learning_rate": 5e-05, |
|
"loss": 41.5996, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0017574692442882249, |
|
"eval_loss": 2.67146635055542, |
|
"eval_runtime": 1.9945, |
|
"eval_samples_per_second": 25.069, |
|
"eval_steps_per_second": 6.518, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0035149384885764497, |
|
"grad_norm": 8.175345420837402, |
|
"learning_rate": 0.0001, |
|
"loss": 41.7819, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.005272407732864675, |
|
"grad_norm": 7.782145977020264, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 41.4354, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.007029876977152899, |
|
"grad_norm": 6.955953598022461, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 41.4572, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.008787346221441126, |
|
"grad_norm": 6.188976764678955, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 40.7239, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.01054481546572935, |
|
"grad_norm": 6.627973556518555, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 40.2346, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.012302284710017574, |
|
"grad_norm": 7.199131488800049, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 40.0303, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.014059753954305799, |
|
"grad_norm": 6.509369850158691, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 39.3336, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.015817223198594025, |
|
"grad_norm": 5.756955623626709, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 38.5489, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.01757469244288225, |
|
"grad_norm": 5.405374050140381, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 39.4141, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.019332161687170474, |
|
"grad_norm": 5.918015003204346, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 38.2517, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0210896309314587, |
|
"grad_norm": 6.271076679229736, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 37.0873, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.022847100175746926, |
|
"grad_norm": 7.2467522621154785, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 37.3231, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.02460456942003515, |
|
"grad_norm": 6.07332706451416, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 36.9542, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.026362038664323375, |
|
"grad_norm": 5.141885280609131, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 36.7673, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.028119507908611598, |
|
"grad_norm": 4.824089050292969, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 36.6982, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.029876977152899824, |
|
"grad_norm": 4.891728401184082, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 36.8259, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.03163444639718805, |
|
"grad_norm": 4.810305118560791, |
|
"learning_rate": 7.75e-05, |
|
"loss": 36.0425, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.033391915641476276, |
|
"grad_norm": 4.6905517578125, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 36.3803, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.0351493848857645, |
|
"grad_norm": 4.8916730880737305, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 35.6215, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.03690685413005272, |
|
"grad_norm": 5.4649200439453125, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 35.9459, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.03866432337434095, |
|
"grad_norm": 4.950930118560791, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 35.3764, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.040421792618629174, |
|
"grad_norm": 4.891223907470703, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 35.791, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.0421792618629174, |
|
"grad_norm": 4.985620975494385, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 35.3101, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.043936731107205626, |
|
"grad_norm": 7.151154518127441, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 34.113, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.043936731107205626, |
|
"eval_loss": 2.184957981109619, |
|
"eval_runtime": 2.0461, |
|
"eval_samples_per_second": 24.437, |
|
"eval_steps_per_second": 6.354, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.04569420035149385, |
|
"grad_norm": 6.682544231414795, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 35.104, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.04745166959578207, |
|
"grad_norm": 5.638045787811279, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 35.0024, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.0492091388400703, |
|
"grad_norm": 5.364726543426514, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 34.663, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.050966608084358524, |
|
"grad_norm": 5.238322734832764, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 35.3332, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.05272407732864675, |
|
"grad_norm": 4.853617191314697, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 34.6261, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.054481546572934976, |
|
"grad_norm": 4.672550678253174, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 34.2556, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.056239015817223195, |
|
"grad_norm": 5.044317722320557, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 35.2622, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.05799648506151142, |
|
"grad_norm": 4.753660202026367, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 34.329, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.05975395430579965, |
|
"grad_norm": 5.342644691467285, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 33.9036, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.061511423550087874, |
|
"grad_norm": 5.310150146484375, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 34.2209, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0632688927943761, |
|
"grad_norm": 5.2009806632995605, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 34.3841, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.06502636203866433, |
|
"grad_norm": 6.031763076782227, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 34.0815, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.06678383128295255, |
|
"grad_norm": 6.2134504318237305, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 34.21, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.06854130052724078, |
|
"grad_norm": 5.3948974609375, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 34.3695, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.070298769771529, |
|
"grad_norm": 5.243184566497803, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 33.6793, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.07205623901581722, |
|
"grad_norm": 4.971773624420166, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 34.1816, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.07381370826010544, |
|
"grad_norm": 4.651072025299072, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 33.9473, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.07557117750439367, |
|
"grad_norm": 4.51092529296875, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 34.2455, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.0773286467486819, |
|
"grad_norm": 4.373141288757324, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 34.1921, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.07908611599297012, |
|
"grad_norm": 4.692596912384033, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 34.3758, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.08084358523725835, |
|
"grad_norm": 4.683785438537598, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 33.96, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.08260105448154657, |
|
"grad_norm": 5.398478031158447, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 34.4189, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.0843585237258348, |
|
"grad_norm": 5.291502952575684, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 34.3325, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.08611599297012303, |
|
"grad_norm": 5.439122200012207, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 33.6364, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.08787346221441125, |
|
"grad_norm": 7.769702434539795, |
|
"learning_rate": 1e-05, |
|
"loss": 33.6672, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.08787346221441125, |
|
"eval_loss": 2.1184535026550293, |
|
"eval_runtime": 2.0483, |
|
"eval_samples_per_second": 24.411, |
|
"eval_steps_per_second": 6.347, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 2.967137622687744e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|