|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.9997597886139803, |
|
"eval_steps": 500, |
|
"global_step": 2081, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.024021138601969732, |
|
"grad_norm": 0.4454003870487213, |
|
"learning_rate": 2e-05, |
|
"loss": 2.2416, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.048042277203939464, |
|
"grad_norm": 0.5095623731613159, |
|
"learning_rate": 1.9507631708517972e-05, |
|
"loss": 1.7858, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0720634158059092, |
|
"grad_norm": 0.2806667685508728, |
|
"learning_rate": 1.9015263417035946e-05, |
|
"loss": 1.6981, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.09608455440787893, |
|
"grad_norm": 0.28293704986572266, |
|
"learning_rate": 1.8522895125553916e-05, |
|
"loss": 1.7156, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.12010569300984866, |
|
"grad_norm": 0.31131088733673096, |
|
"learning_rate": 1.8030526834071887e-05, |
|
"loss": 1.7039, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.1441268316118184, |
|
"grad_norm": 0.34635475277900696, |
|
"learning_rate": 1.7538158542589857e-05, |
|
"loss": 1.7054, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.16814797021378813, |
|
"grad_norm": 0.4496716856956482, |
|
"learning_rate": 1.704579025110783e-05, |
|
"loss": 1.6978, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.19216910881575786, |
|
"grad_norm": 0.34168896079063416, |
|
"learning_rate": 1.6553421959625802e-05, |
|
"loss": 1.6664, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.2161902474177276, |
|
"grad_norm": 0.3718103766441345, |
|
"learning_rate": 1.6061053668143772e-05, |
|
"loss": 1.6731, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.24021138601969733, |
|
"grad_norm": 0.3723597228527069, |
|
"learning_rate": 1.5568685376661746e-05, |
|
"loss": 1.671, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.2642325246216671, |
|
"grad_norm": 0.3750188648700714, |
|
"learning_rate": 1.5076317085179715e-05, |
|
"loss": 1.6883, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.2882536632236368, |
|
"grad_norm": 0.35369986295700073, |
|
"learning_rate": 1.4583948793697687e-05, |
|
"loss": 1.6777, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.3122748018256065, |
|
"grad_norm": 0.4338513910770416, |
|
"learning_rate": 1.4091580502215658e-05, |
|
"loss": 1.6664, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.33629594042757627, |
|
"grad_norm": 0.36728423833847046, |
|
"learning_rate": 1.359921221073363e-05, |
|
"loss": 1.6343, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.360317079029546, |
|
"grad_norm": 0.4234764277935028, |
|
"learning_rate": 1.3106843919251602e-05, |
|
"loss": 1.7187, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.3843382176315157, |
|
"grad_norm": 0.41642165184020996, |
|
"learning_rate": 1.2614475627769574e-05, |
|
"loss": 1.7164, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.40835935623348546, |
|
"grad_norm": 0.42162206768989563, |
|
"learning_rate": 1.2122107336287543e-05, |
|
"loss": 1.7247, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.4323804948354552, |
|
"grad_norm": 0.42779281735420227, |
|
"learning_rate": 1.1629739044805515e-05, |
|
"loss": 1.644, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.45640163343742496, |
|
"grad_norm": 0.417507141828537, |
|
"learning_rate": 1.1137370753323486e-05, |
|
"loss": 1.6829, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.48042277203939465, |
|
"grad_norm": 0.6434239149093628, |
|
"learning_rate": 1.0645002461841458e-05, |
|
"loss": 1.6499, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.5044439106413644, |
|
"grad_norm": 0.4976539611816406, |
|
"learning_rate": 1.015263417035943e-05, |
|
"loss": 1.6782, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.5284650492433342, |
|
"grad_norm": 0.428740531206131, |
|
"learning_rate": 9.6602658788774e-06, |
|
"loss": 1.659, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.5524861878453039, |
|
"grad_norm": 0.46981343626976013, |
|
"learning_rate": 9.167897587395373e-06, |
|
"loss": 1.6811, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.5765073264472736, |
|
"grad_norm": 0.4911981225013733, |
|
"learning_rate": 8.675529295913345e-06, |
|
"loss": 1.6418, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.6005284650492433, |
|
"grad_norm": 0.4720385670661926, |
|
"learning_rate": 8.183161004431315e-06, |
|
"loss": 1.6594, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.624549603651213, |
|
"grad_norm": 0.7791111469268799, |
|
"learning_rate": 7.690792712949286e-06, |
|
"loss": 1.6814, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.6485707422531828, |
|
"grad_norm": 0.5131087303161621, |
|
"learning_rate": 7.198424421467258e-06, |
|
"loss": 1.6509, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 0.6725918808551525, |
|
"grad_norm": 0.4304710328578949, |
|
"learning_rate": 6.70605612998523e-06, |
|
"loss": 1.6751, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.6966130194571223, |
|
"grad_norm": 0.4925595819950104, |
|
"learning_rate": 6.213687838503201e-06, |
|
"loss": 1.6418, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 0.720634158059092, |
|
"grad_norm": 0.45314958691596985, |
|
"learning_rate": 5.721319547021172e-06, |
|
"loss": 1.6763, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.7446552966610618, |
|
"grad_norm": 0.5018033981323242, |
|
"learning_rate": 5.228951255539144e-06, |
|
"loss": 1.6763, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 0.7686764352630314, |
|
"grad_norm": 0.41523104906082153, |
|
"learning_rate": 4.7365829640571156e-06, |
|
"loss": 1.6727, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.7926975738650012, |
|
"grad_norm": 0.4479306936264038, |
|
"learning_rate": 4.244214672575087e-06, |
|
"loss": 1.6588, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 0.8167187124669709, |
|
"grad_norm": 0.504392147064209, |
|
"learning_rate": 3.751846381093058e-06, |
|
"loss": 1.6946, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.8407398510689407, |
|
"grad_norm": 0.48174819350242615, |
|
"learning_rate": 3.2594780896110296e-06, |
|
"loss": 1.7111, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 0.8647609896709104, |
|
"grad_norm": 0.6309037804603577, |
|
"learning_rate": 2.767109798129001e-06, |
|
"loss": 1.651, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.8887821282728802, |
|
"grad_norm": 0.5154911279678345, |
|
"learning_rate": 2.2747415066469722e-06, |
|
"loss": 1.696, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 0.9128032668748499, |
|
"grad_norm": 0.39096760749816895, |
|
"learning_rate": 1.7823732151649436e-06, |
|
"loss": 1.6385, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.9368244054768196, |
|
"grad_norm": 0.4384596645832062, |
|
"learning_rate": 1.290004923682915e-06, |
|
"loss": 1.6523, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 0.9608455440787893, |
|
"grad_norm": 0.48874229192733765, |
|
"learning_rate": 7.976366322008864e-07, |
|
"loss": 1.6501, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.984866682680759, |
|
"grad_norm": 0.3902250826358795, |
|
"learning_rate": 3.0526834071885774e-07, |
|
"loss": 1.6395, |
|
"step": 2050 |
|
} |
|
], |
|
"logging_steps": 50, |
|
"max_steps": 2081, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.5068479218407424e+18, |
|
"train_batch_size": 4, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|