|
{ |
|
"best_metric": 1.3382925987243652, |
|
"best_model_checkpoint": "miner_id_24/checkpoint-50", |
|
"epoch": 0.06685050555694827, |
|
"eval_steps": 25, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0013370101111389655, |
|
"grad_norm": 45.61884689331055, |
|
"learning_rate": 5e-05, |
|
"loss": 1.8975, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0013370101111389655, |
|
"eval_loss": 2.1031837463378906, |
|
"eval_runtime": 144.5887, |
|
"eval_samples_per_second": 34.851, |
|
"eval_steps_per_second": 4.357, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.002674020222277931, |
|
"grad_norm": 61.574256896972656, |
|
"learning_rate": 0.0001, |
|
"loss": 1.8337, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.004011030333416896, |
|
"grad_norm": 47.025516510009766, |
|
"learning_rate": 9.989294616193017e-05, |
|
"loss": 1.7141, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.005348040444555862, |
|
"grad_norm": 42.46583938598633, |
|
"learning_rate": 9.957224306869053e-05, |
|
"loss": 1.5738, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.0066850505556948275, |
|
"grad_norm": 38.542701721191406, |
|
"learning_rate": 9.903926402016153e-05, |
|
"loss": 1.3803, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.008022060666833792, |
|
"grad_norm": 38.241188049316406, |
|
"learning_rate": 9.829629131445342e-05, |
|
"loss": 1.4181, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.009359070777972758, |
|
"grad_norm": 37.101680755615234, |
|
"learning_rate": 9.73465064747553e-05, |
|
"loss": 1.3047, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.010696080889111724, |
|
"grad_norm": 36.86508560180664, |
|
"learning_rate": 9.619397662556435e-05, |
|
"loss": 1.3025, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.01203309100025069, |
|
"grad_norm": 35.759864807128906, |
|
"learning_rate": 9.484363707663442e-05, |
|
"loss": 1.4198, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.013370101111389655, |
|
"grad_norm": 37.84111404418945, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 1.3385, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.01470711122252862, |
|
"grad_norm": 42.640438079833984, |
|
"learning_rate": 9.157348061512727e-05, |
|
"loss": 1.2823, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.016044121333667585, |
|
"grad_norm": 44.959407806396484, |
|
"learning_rate": 8.966766701456177e-05, |
|
"loss": 1.3878, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.01738113144480655, |
|
"grad_norm": 45.82082748413086, |
|
"learning_rate": 8.759199037394887e-05, |
|
"loss": 1.5677, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.018718141555945516, |
|
"grad_norm": 28.606582641601562, |
|
"learning_rate": 8.535533905932738e-05, |
|
"loss": 1.6885, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.02005515166708448, |
|
"grad_norm": 23.147939682006836, |
|
"learning_rate": 8.296729075500344e-05, |
|
"loss": 1.4806, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.021392161778223447, |
|
"grad_norm": 23.436861038208008, |
|
"learning_rate": 8.043807145043604e-05, |
|
"loss": 1.4649, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.022729171889362413, |
|
"grad_norm": 26.963802337646484, |
|
"learning_rate": 7.777851165098012e-05, |
|
"loss": 1.3901, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.02406618200050138, |
|
"grad_norm": 28.463054656982422, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 1.3727, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.025403192111640344, |
|
"grad_norm": 28.883792877197266, |
|
"learning_rate": 7.211443451095007e-05, |
|
"loss": 1.1623, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.02674020222277931, |
|
"grad_norm": 30.714143753051758, |
|
"learning_rate": 6.91341716182545e-05, |
|
"loss": 1.3107, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.028077212333918276, |
|
"grad_norm": 32.90729522705078, |
|
"learning_rate": 6.607197326515808e-05, |
|
"loss": 1.3789, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.02941422244505724, |
|
"grad_norm": 33.87862014770508, |
|
"learning_rate": 6.294095225512603e-05, |
|
"loss": 1.3426, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.030751232556196207, |
|
"grad_norm": 34.535404205322266, |
|
"learning_rate": 5.9754516100806423e-05, |
|
"loss": 1.2615, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.03208824266733517, |
|
"grad_norm": 45.40268325805664, |
|
"learning_rate": 5.6526309611002594e-05, |
|
"loss": 1.3269, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.033425252778474135, |
|
"grad_norm": 45.20197296142578, |
|
"learning_rate": 5.327015646150716e-05, |
|
"loss": 1.2515, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.033425252778474135, |
|
"eval_loss": 1.3706095218658447, |
|
"eval_runtime": 145.5182, |
|
"eval_samples_per_second": 34.628, |
|
"eval_steps_per_second": 4.329, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0347622628896131, |
|
"grad_norm": 25.990808486938477, |
|
"learning_rate": 5e-05, |
|
"loss": 1.5636, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.036099273000752066, |
|
"grad_norm": 22.69860076904297, |
|
"learning_rate": 4.6729843538492847e-05, |
|
"loss": 1.5672, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.03743628311189103, |
|
"grad_norm": 22.71676254272461, |
|
"learning_rate": 4.347369038899744e-05, |
|
"loss": 1.43, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.03877329322303, |
|
"grad_norm": 23.096960067749023, |
|
"learning_rate": 4.0245483899193595e-05, |
|
"loss": 1.4142, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.04011030333416896, |
|
"grad_norm": 23.61298942565918, |
|
"learning_rate": 3.705904774487396e-05, |
|
"loss": 1.326, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.04144731344530793, |
|
"grad_norm": 25.191864013671875, |
|
"learning_rate": 3.392802673484193e-05, |
|
"loss": 1.3509, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.042784323556446895, |
|
"grad_norm": 25.648237228393555, |
|
"learning_rate": 3.086582838174551e-05, |
|
"loss": 1.333, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.04412133366758586, |
|
"grad_norm": 27.153898239135742, |
|
"learning_rate": 2.7885565489049946e-05, |
|
"loss": 1.3001, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.045458343778724826, |
|
"grad_norm": 28.19595718383789, |
|
"learning_rate": 2.500000000000001e-05, |
|
"loss": 1.4561, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.04679535388986379, |
|
"grad_norm": 29.54447364807129, |
|
"learning_rate": 2.2221488349019903e-05, |
|
"loss": 1.3213, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.04813236400100276, |
|
"grad_norm": 31.437786102294922, |
|
"learning_rate": 1.9561928549563968e-05, |
|
"loss": 1.1158, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.04946937411214172, |
|
"grad_norm": 37.75790023803711, |
|
"learning_rate": 1.703270924499656e-05, |
|
"loss": 1.2325, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.05080638422328069, |
|
"grad_norm": 31.451086044311523, |
|
"learning_rate": 1.4644660940672627e-05, |
|
"loss": 1.3948, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.052143394334419654, |
|
"grad_norm": 19.551162719726562, |
|
"learning_rate": 1.2408009626051137e-05, |
|
"loss": 1.5541, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.05348040444555862, |
|
"grad_norm": 20.893918991088867, |
|
"learning_rate": 1.0332332985438248e-05, |
|
"loss": 1.5098, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.054817414556697586, |
|
"grad_norm": 21.392484664916992, |
|
"learning_rate": 8.426519384872733e-06, |
|
"loss": 1.4131, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.05615442466783655, |
|
"grad_norm": 21.886232376098633, |
|
"learning_rate": 6.698729810778065e-06, |
|
"loss": 1.348, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.05749143477897552, |
|
"grad_norm": 22.548765182495117, |
|
"learning_rate": 5.156362923365588e-06, |
|
"loss": 1.2932, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.05882844489011448, |
|
"grad_norm": 24.45055389404297, |
|
"learning_rate": 3.8060233744356633e-06, |
|
"loss": 1.2246, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.06016545500125345, |
|
"grad_norm": 25.212177276611328, |
|
"learning_rate": 2.653493525244721e-06, |
|
"loss": 1.2447, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.061502465112392414, |
|
"grad_norm": 25.975236892700195, |
|
"learning_rate": 1.70370868554659e-06, |
|
"loss": 1.2924, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.06283947522353138, |
|
"grad_norm": 28.950206756591797, |
|
"learning_rate": 9.607359798384785e-07, |
|
"loss": 1.317, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.06417648533467034, |
|
"grad_norm": 29.081235885620117, |
|
"learning_rate": 4.277569313094809e-07, |
|
"loss": 1.136, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.06551349544580931, |
|
"grad_norm": 33.672969818115234, |
|
"learning_rate": 1.0705383806982606e-07, |
|
"loss": 1.2239, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.06685050555694827, |
|
"grad_norm": 46.237857818603516, |
|
"learning_rate": 0.0, |
|
"loss": 1.2762, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.06685050555694827, |
|
"eval_loss": 1.3382925987243652, |
|
"eval_runtime": 292.4772, |
|
"eval_samples_per_second": 17.229, |
|
"eval_steps_per_second": 2.154, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 50, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 1, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 5.658021338284032e+17, |
|
"train_batch_size": 2, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|