|
{ |
|
"best_metric": 4.716582775115967, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 2.245125348189415, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04456824512534819, |
|
"grad_norm": 3.7572438716888428, |
|
"learning_rate": 5e-05, |
|
"loss": 10.7898, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.04456824512534819, |
|
"eval_loss": 11.917471885681152, |
|
"eval_runtime": 2.4732, |
|
"eval_samples_per_second": 20.217, |
|
"eval_steps_per_second": 5.256, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.08913649025069638, |
|
"grad_norm": 2.1436166763305664, |
|
"learning_rate": 0.0001, |
|
"loss": 10.8556, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.13370473537604458, |
|
"grad_norm": 1.6581848859786987, |
|
"learning_rate": 9.990365154573717e-05, |
|
"loss": 11.401, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.17827298050139276, |
|
"grad_norm": 2.0293407440185547, |
|
"learning_rate": 9.961501876182148e-05, |
|
"loss": 11.9143, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.22284122562674094, |
|
"grad_norm": 2.4636528491973877, |
|
"learning_rate": 9.913533761814537e-05, |
|
"loss": 12.2427, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.26740947075208915, |
|
"grad_norm": 6.925570487976074, |
|
"learning_rate": 9.846666218300807e-05, |
|
"loss": 8.1847, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.31197771587743733, |
|
"grad_norm": 4.652174949645996, |
|
"learning_rate": 9.761185582727977e-05, |
|
"loss": 7.1735, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.3565459610027855, |
|
"grad_norm": 2.5405783653259277, |
|
"learning_rate": 9.657457896300791e-05, |
|
"loss": 8.5635, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.4011142061281337, |
|
"grad_norm": 3.3183186054229736, |
|
"learning_rate": 9.535927336897098e-05, |
|
"loss": 9.1195, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.4456824512534819, |
|
"grad_norm": 3.9773972034454346, |
|
"learning_rate": 9.397114317029975e-05, |
|
"loss": 9.513, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.49025069637883006, |
|
"grad_norm": 5.126943588256836, |
|
"learning_rate": 9.241613255361455e-05, |
|
"loss": 10.8839, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.5348189415041783, |
|
"grad_norm": 6.313504695892334, |
|
"learning_rate": 9.070090031310558e-05, |
|
"loss": 4.9107, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.5793871866295265, |
|
"grad_norm": 3.481841564178467, |
|
"learning_rate": 8.883279133655399e-05, |
|
"loss": 6.6301, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.6239554317548747, |
|
"grad_norm": 3.6504743099212646, |
|
"learning_rate": 8.681980515339464e-05, |
|
"loss": 7.2363, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.6685236768802229, |
|
"grad_norm": 3.431692361831665, |
|
"learning_rate": 8.467056167950311e-05, |
|
"loss": 6.7311, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.713091922005571, |
|
"grad_norm": 4.575716495513916, |
|
"learning_rate": 8.239426430539243e-05, |
|
"loss": 7.6788, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.7576601671309192, |
|
"grad_norm": 4.351408958435059, |
|
"learning_rate": 8.000066048588211e-05, |
|
"loss": 4.4246, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.8022284122562674, |
|
"grad_norm": 3.999706983566284, |
|
"learning_rate": 7.75e-05, |
|
"loss": 5.5048, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.8467966573816156, |
|
"grad_norm": 2.2049319744110107, |
|
"learning_rate": 7.490299105985507e-05, |
|
"loss": 6.6618, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.8913649025069638, |
|
"grad_norm": 2.0348222255706787, |
|
"learning_rate": 7.222075445642904e-05, |
|
"loss": 6.2686, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.935933147632312, |
|
"grad_norm": 2.430169105529785, |
|
"learning_rate": 6.946477593864228e-05, |
|
"loss": 6.3663, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.9805013927576601, |
|
"grad_norm": 4.576208114624023, |
|
"learning_rate": 6.664685702961344e-05, |
|
"loss": 7.0645, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 1.033426183844011, |
|
"grad_norm": 5.862423896789551, |
|
"learning_rate": 6.377906449072578e-05, |
|
"loss": 8.5183, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 1.0779944289693593, |
|
"grad_norm": 3.1182539463043213, |
|
"learning_rate": 6.087367864990233e-05, |
|
"loss": 5.0484, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 1.1225626740947074, |
|
"grad_norm": 2.3813588619232178, |
|
"learning_rate": 5.794314081535644e-05, |
|
"loss": 5.7541, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.1225626740947074, |
|
"eval_loss": 5.417140483856201, |
|
"eval_runtime": 1.8352, |
|
"eval_samples_per_second": 27.245, |
|
"eval_steps_per_second": 7.084, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 1.1671309192200556, |
|
"grad_norm": 2.453505516052246, |
|
"learning_rate": 5.500000000000001e-05, |
|
"loss": 5.583, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 1.2116991643454038, |
|
"grad_norm": 2.5713016986846924, |
|
"learning_rate": 5.205685918464356e-05, |
|
"loss": 5.6186, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 1.2562674094707522, |
|
"grad_norm": 1.9322009086608887, |
|
"learning_rate": 4.912632135009769e-05, |
|
"loss": 3.9696, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 1.3008356545961002, |
|
"grad_norm": 2.791020631790161, |
|
"learning_rate": 4.6220935509274235e-05, |
|
"loss": 4.4587, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 1.3454038997214486, |
|
"grad_norm": 2.149622917175293, |
|
"learning_rate": 4.3353142970386564e-05, |
|
"loss": 4.8004, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.3899721448467965, |
|
"grad_norm": 2.2228457927703857, |
|
"learning_rate": 4.053522406135775e-05, |
|
"loss": 5.1657, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 1.434540389972145, |
|
"grad_norm": 2.323864221572876, |
|
"learning_rate": 3.777924554357096e-05, |
|
"loss": 5.0189, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 1.479108635097493, |
|
"grad_norm": 2.922153949737549, |
|
"learning_rate": 3.509700894014496e-05, |
|
"loss": 5.52, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 1.5236768802228413, |
|
"grad_norm": 2.770920515060425, |
|
"learning_rate": 3.250000000000001e-05, |
|
"loss": 4.2389, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 1.5682451253481893, |
|
"grad_norm": 2.918221950531006, |
|
"learning_rate": 2.9999339514117912e-05, |
|
"loss": 4.0683, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 1.6128133704735377, |
|
"grad_norm": 2.5188300609588623, |
|
"learning_rate": 2.760573569460757e-05, |
|
"loss": 5.1571, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 1.6573816155988856, |
|
"grad_norm": 2.6808626651763916, |
|
"learning_rate": 2.53294383204969e-05, |
|
"loss": 5.0721, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 1.701949860724234, |
|
"grad_norm": 1.907468318939209, |
|
"learning_rate": 2.3180194846605367e-05, |
|
"loss": 5.1194, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 1.7465181058495822, |
|
"grad_norm": 1.6851441860198975, |
|
"learning_rate": 2.1167208663446025e-05, |
|
"loss": 3.4621, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 1.7910863509749304, |
|
"grad_norm": 2.436414957046509, |
|
"learning_rate": 1.9299099686894423e-05, |
|
"loss": 4.29, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.8356545961002786, |
|
"grad_norm": 1.9236414432525635, |
|
"learning_rate": 1.758386744638546e-05, |
|
"loss": 4.988, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 1.8802228412256268, |
|
"grad_norm": 1.9346405267715454, |
|
"learning_rate": 1.602885682970026e-05, |
|
"loss": 4.5542, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 1.924791086350975, |
|
"grad_norm": 2.173877000808716, |
|
"learning_rate": 1.464072663102903e-05, |
|
"loss": 4.9275, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 1.9693593314763231, |
|
"grad_norm": 3.563979148864746, |
|
"learning_rate": 1.3425421036992098e-05, |
|
"loss": 5.8462, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 2.022284122562674, |
|
"grad_norm": 3.204674005508423, |
|
"learning_rate": 1.2388144172720251e-05, |
|
"loss": 7.8565, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 2.066852367688022, |
|
"grad_norm": 1.8876923322677612, |
|
"learning_rate": 1.1533337816991932e-05, |
|
"loss": 3.5769, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 2.1114206128133706, |
|
"grad_norm": 1.8125336170196533, |
|
"learning_rate": 1.0864662381854632e-05, |
|
"loss": 4.7022, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 2.1559888579387185, |
|
"grad_norm": 1.8523550033569336, |
|
"learning_rate": 1.0384981238178534e-05, |
|
"loss": 4.4587, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 2.200557103064067, |
|
"grad_norm": 2.4004316329956055, |
|
"learning_rate": 1.0096348454262845e-05, |
|
"loss": 4.7921, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 2.245125348189415, |
|
"grad_norm": 1.909448504447937, |
|
"learning_rate": 1e-05, |
|
"loss": 2.6394, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.245125348189415, |
|
"eval_loss": 4.716582775115967, |
|
"eval_runtime": 1.8513, |
|
"eval_samples_per_second": 27.009, |
|
"eval_steps_per_second": 7.022, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.624057810649088e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|