|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.010654165778819518, |
|
"eval_steps": 9, |
|
"global_step": 50, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.00021308331557639036, |
|
"grad_norm": 1.4914582967758179, |
|
"learning_rate": 1e-05, |
|
"loss": 2.9025, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.00021308331557639036, |
|
"eval_loss": 3.0053186416625977, |
|
"eval_runtime": 481.0243, |
|
"eval_samples_per_second": 8.216, |
|
"eval_steps_per_second": 1.027, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.0004261666311527807, |
|
"grad_norm": 1.2396098375320435, |
|
"learning_rate": 2e-05, |
|
"loss": 2.8422, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.0006392499467291711, |
|
"grad_norm": 1.7862789630889893, |
|
"learning_rate": 3e-05, |
|
"loss": 2.9618, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.0008523332623055614, |
|
"grad_norm": 1.4219273328781128, |
|
"learning_rate": 4e-05, |
|
"loss": 2.9024, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.001065416577881952, |
|
"grad_norm": 1.5405254364013672, |
|
"learning_rate": 5e-05, |
|
"loss": 2.8993, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 0.0012784998934583422, |
|
"grad_norm": 1.578925371170044, |
|
"learning_rate": 6e-05, |
|
"loss": 2.9408, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.0014915832090347326, |
|
"grad_norm": 1.885631799697876, |
|
"learning_rate": 7e-05, |
|
"loss": 2.9361, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 0.0017046665246111229, |
|
"grad_norm": 1.7301489114761353, |
|
"learning_rate": 8e-05, |
|
"loss": 2.776, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.0019177498401875134, |
|
"grad_norm": 1.9182066917419434, |
|
"learning_rate": 9e-05, |
|
"loss": 2.7626, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.0019177498401875134, |
|
"eval_loss": 2.742213487625122, |
|
"eval_runtime": 484.7163, |
|
"eval_samples_per_second": 8.153, |
|
"eval_steps_per_second": 1.019, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 0.002130833155763904, |
|
"grad_norm": 1.6169171333312988, |
|
"learning_rate": 0.0001, |
|
"loss": 2.5475, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.002343916471340294, |
|
"grad_norm": 1.6501270532608032, |
|
"learning_rate": 9.99695413509548e-05, |
|
"loss": 2.6424, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 0.0025569997869166845, |
|
"grad_norm": 1.6458073854446411, |
|
"learning_rate": 9.987820251299122e-05, |
|
"loss": 2.7299, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.002770083102493075, |
|
"grad_norm": 2.196349859237671, |
|
"learning_rate": 9.972609476841367e-05, |
|
"loss": 2.3784, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 0.002983166418069465, |
|
"grad_norm": 2.144585132598877, |
|
"learning_rate": 9.951340343707852e-05, |
|
"loss": 2.4827, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.0031962497336458554, |
|
"grad_norm": 2.0523390769958496, |
|
"learning_rate": 9.924038765061042e-05, |
|
"loss": 2.4431, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 0.0034093330492222457, |
|
"grad_norm": 2.20808744430542, |
|
"learning_rate": 9.890738003669029e-05, |
|
"loss": 2.6465, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.0036224163647986364, |
|
"grad_norm": 2.1431615352630615, |
|
"learning_rate": 9.851478631379982e-05, |
|
"loss": 2.7287, |
|
"step": 17 |
|
}, |
|
{ |
|
"epoch": 0.0038354996803750267, |
|
"grad_norm": 2.179090738296509, |
|
"learning_rate": 9.806308479691595e-05, |
|
"loss": 2.4462, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.0038354996803750267, |
|
"eval_loss": 2.446166753768921, |
|
"eval_runtime": 484.7504, |
|
"eval_samples_per_second": 8.153, |
|
"eval_steps_per_second": 1.019, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.004048582995951417, |
|
"grad_norm": 1.9910677671432495, |
|
"learning_rate": 9.755282581475769e-05, |
|
"loss": 2.5489, |
|
"step": 19 |
|
}, |
|
{ |
|
"epoch": 0.004261666311527808, |
|
"grad_norm": 1.7879835367202759, |
|
"learning_rate": 9.698463103929542e-05, |
|
"loss": 2.3464, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.004474749627104198, |
|
"grad_norm": 1.7158172130584717, |
|
"learning_rate": 9.635919272833938e-05, |
|
"loss": 2.3559, |
|
"step": 21 |
|
}, |
|
{ |
|
"epoch": 0.004687832942680588, |
|
"grad_norm": 1.5184911489486694, |
|
"learning_rate": 9.567727288213005e-05, |
|
"loss": 2.583, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.004900916258256978, |
|
"grad_norm": 1.3161325454711914, |
|
"learning_rate": 9.493970231495835e-05, |
|
"loss": 2.4803, |
|
"step": 23 |
|
}, |
|
{ |
|
"epoch": 0.005113999573833369, |
|
"grad_norm": 1.6111788749694824, |
|
"learning_rate": 9.414737964294636e-05, |
|
"loss": 2.3079, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.005327082889409759, |
|
"grad_norm": 1.6040608882904053, |
|
"learning_rate": 9.330127018922194e-05, |
|
"loss": 2.3188, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.00554016620498615, |
|
"grad_norm": 1.7765617370605469, |
|
"learning_rate": 9.24024048078213e-05, |
|
"loss": 2.1691, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.00575324952056254, |
|
"grad_norm": 1.7104406356811523, |
|
"learning_rate": 9.145187862775209e-05, |
|
"loss": 2.3399, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.00575324952056254, |
|
"eval_loss": 2.3587300777435303, |
|
"eval_runtime": 485.0287, |
|
"eval_samples_per_second": 8.148, |
|
"eval_steps_per_second": 1.018, |
|
"step": 27 |
|
}, |
|
{ |
|
"epoch": 0.00596633283613893, |
|
"grad_norm": 1.2958590984344482, |
|
"learning_rate": 9.045084971874738e-05, |
|
"loss": 2.336, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.006179416151715321, |
|
"grad_norm": 1.8951802253723145, |
|
"learning_rate": 8.940053768033609e-05, |
|
"loss": 2.5203, |
|
"step": 29 |
|
}, |
|
{ |
|
"epoch": 0.006392499467291711, |
|
"grad_norm": 1.4188854694366455, |
|
"learning_rate": 8.83022221559489e-05, |
|
"loss": 2.5081, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.0066055827828681015, |
|
"grad_norm": 1.756216287612915, |
|
"learning_rate": 8.715724127386972e-05, |
|
"loss": 2.2355, |
|
"step": 31 |
|
}, |
|
{ |
|
"epoch": 0.006818666098444491, |
|
"grad_norm": 1.6971544027328491, |
|
"learning_rate": 8.596699001693255e-05, |
|
"loss": 2.2753, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.007031749414020882, |
|
"grad_norm": 1.730682611465454, |
|
"learning_rate": 8.473291852294987e-05, |
|
"loss": 2.2489, |
|
"step": 33 |
|
}, |
|
{ |
|
"epoch": 0.007244832729597273, |
|
"grad_norm": 1.500222086906433, |
|
"learning_rate": 8.345653031794292e-05, |
|
"loss": 2.5747, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.007457916045173663, |
|
"grad_norm": 1.689780592918396, |
|
"learning_rate": 8.213938048432697e-05, |
|
"loss": 2.4923, |
|
"step": 35 |
|
}, |
|
{ |
|
"epoch": 0.0076709993607500535, |
|
"grad_norm": 1.5094739198684692, |
|
"learning_rate": 8.07830737662829e-05, |
|
"loss": 2.3113, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.0076709993607500535, |
|
"eval_loss": 2.3216378688812256, |
|
"eval_runtime": 485.2683, |
|
"eval_samples_per_second": 8.144, |
|
"eval_steps_per_second": 1.018, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.007884082676326443, |
|
"grad_norm": 1.8050920963287354, |
|
"learning_rate": 7.938926261462366e-05, |
|
"loss": 2.1219, |
|
"step": 37 |
|
}, |
|
{ |
|
"epoch": 0.008097165991902834, |
|
"grad_norm": 1.7702151536941528, |
|
"learning_rate": 7.795964517353735e-05, |
|
"loss": 2.3288, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.008310249307479225, |
|
"grad_norm": 1.9148733615875244, |
|
"learning_rate": 7.649596321166024e-05, |
|
"loss": 2.2052, |
|
"step": 39 |
|
}, |
|
{ |
|
"epoch": 0.008523332623055616, |
|
"grad_norm": 1.9198733568191528, |
|
"learning_rate": 7.500000000000001e-05, |
|
"loss": 2.0524, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.008736415938632005, |
|
"grad_norm": 1.6441048383712769, |
|
"learning_rate": 7.347357813929454e-05, |
|
"loss": 2.3595, |
|
"step": 41 |
|
}, |
|
{ |
|
"epoch": 0.008949499254208395, |
|
"grad_norm": 1.6988778114318848, |
|
"learning_rate": 7.191855733945387e-05, |
|
"loss": 2.2509, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.009162582569784786, |
|
"grad_norm": 1.6815834045410156, |
|
"learning_rate": 7.033683215379002e-05, |
|
"loss": 2.285, |
|
"step": 43 |
|
}, |
|
{ |
|
"epoch": 0.009375665885361177, |
|
"grad_norm": 1.5567946434020996, |
|
"learning_rate": 6.873032967079561e-05, |
|
"loss": 2.2638, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.009588749200937566, |
|
"grad_norm": 1.5238242149353027, |
|
"learning_rate": 6.710100716628344e-05, |
|
"loss": 2.455, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.009588749200937566, |
|
"eval_loss": 2.2901864051818848, |
|
"eval_runtime": 484.6308, |
|
"eval_samples_per_second": 8.155, |
|
"eval_steps_per_second": 1.019, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 0.009801832516513957, |
|
"grad_norm": 1.698690414428711, |
|
"learning_rate": 6.545084971874738e-05, |
|
"loss": 2.5036, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.010014915832090347, |
|
"grad_norm": 1.5626423358917236, |
|
"learning_rate": 6.378186779084995e-05, |
|
"loss": 2.3447, |
|
"step": 47 |
|
}, |
|
{ |
|
"epoch": 0.010227999147666738, |
|
"grad_norm": 1.6634929180145264, |
|
"learning_rate": 6.209609477998338e-05, |
|
"loss": 2.2132, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 0.010441082463243129, |
|
"grad_norm": 1.5750799179077148, |
|
"learning_rate": 6.0395584540887963e-05, |
|
"loss": 2.4226, |
|
"step": 49 |
|
}, |
|
{ |
|
"epoch": 0.010654165778819518, |
|
"grad_norm": 1.4968082904815674, |
|
"learning_rate": 5.868240888334653e-05, |
|
"loss": 2.3799, |
|
"step": 50 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 100, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 25, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 3.70943641780224e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|