|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 0.2312272385687034, |
|
"eval_steps": 500, |
|
"global_step": 1000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 4e-05, |
|
"loss": 1.0185, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8e-05, |
|
"loss": 0.8649, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 0.00012, |
|
"loss": 0.7784, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.00016, |
|
"loss": 0.7386, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 0.0002, |
|
"loss": 0.7037, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0001990530303030303, |
|
"loss": 0.7019, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 0.03, |
|
"learning_rate": 0.0001981060606060606, |
|
"loss": 0.7117, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019715909090909094, |
|
"loss": 0.672, |
|
"step": 160 |
|
}, |
|
{ |
|
"epoch": 0.04, |
|
"learning_rate": 0.00019621212121212123, |
|
"loss": 0.664, |
|
"step": 180 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.00019526515151515152, |
|
"loss": 0.6666, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.05, |
|
"learning_rate": 0.0001943181818181818, |
|
"loss": 0.6685, |
|
"step": 220 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00019337121212121213, |
|
"loss": 0.6788, |
|
"step": 240 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00019242424242424245, |
|
"loss": 0.6673, |
|
"step": 260 |
|
}, |
|
{ |
|
"epoch": 0.06, |
|
"learning_rate": 0.00019147727272727274, |
|
"loss": 0.6628, |
|
"step": 280 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00019053030303030303, |
|
"loss": 0.6643, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.07, |
|
"learning_rate": 0.00018958333333333332, |
|
"loss": 0.6607, |
|
"step": 320 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00018863636363636364, |
|
"loss": 0.6706, |
|
"step": 340 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"learning_rate": 0.00018768939393939396, |
|
"loss": 0.6709, |
|
"step": 360 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00018674242424242425, |
|
"loss": 0.6616, |
|
"step": 380 |
|
}, |
|
{ |
|
"epoch": 0.09, |
|
"learning_rate": 0.00018579545454545454, |
|
"loss": 0.6566, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00018484848484848484, |
|
"loss": 0.6513, |
|
"step": 420 |
|
}, |
|
{ |
|
"epoch": 0.1, |
|
"learning_rate": 0.00018390151515151518, |
|
"loss": 0.6797, |
|
"step": 440 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00018295454545454547, |
|
"loss": 0.6599, |
|
"step": 460 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 0.00018200757575757577, |
|
"loss": 0.6561, |
|
"step": 480 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00018106060606060606, |
|
"loss": 0.662, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.00018011363636363638, |
|
"loss": 0.6629, |
|
"step": 520 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"learning_rate": 0.0001791666666666667, |
|
"loss": 0.6475, |
|
"step": 540 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00017821969696969699, |
|
"loss": 0.6607, |
|
"step": 560 |
|
}, |
|
{ |
|
"epoch": 0.13, |
|
"learning_rate": 0.00017727272727272728, |
|
"loss": 0.6512, |
|
"step": 580 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.00017632575757575757, |
|
"loss": 0.6484, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.14, |
|
"learning_rate": 0.0001753787878787879, |
|
"loss": 0.6403, |
|
"step": 620 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0001744318181818182, |
|
"loss": 0.6537, |
|
"step": 640 |
|
}, |
|
{ |
|
"epoch": 0.15, |
|
"learning_rate": 0.0001734848484848485, |
|
"loss": 0.6516, |
|
"step": 660 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.0001725378787878788, |
|
"loss": 0.6577, |
|
"step": 680 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"learning_rate": 0.00017159090909090908, |
|
"loss": 0.6374, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.0001706439393939394, |
|
"loss": 0.6551, |
|
"step": 720 |
|
}, |
|
{ |
|
"epoch": 0.17, |
|
"learning_rate": 0.00016969696969696972, |
|
"loss": 0.6388, |
|
"step": 740 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.00016875, |
|
"loss": 0.64, |
|
"step": 760 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0001678030303030303, |
|
"loss": 0.6579, |
|
"step": 780 |
|
}, |
|
{ |
|
"epoch": 0.18, |
|
"learning_rate": 0.0001668560606060606, |
|
"loss": 0.6525, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00016590909090909094, |
|
"loss": 0.6261, |
|
"step": 820 |
|
}, |
|
{ |
|
"epoch": 0.19, |
|
"learning_rate": 0.00016496212121212123, |
|
"loss": 0.6351, |
|
"step": 840 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.00016401515151515152, |
|
"loss": 0.6537, |
|
"step": 860 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"learning_rate": 0.0001630681818181818, |
|
"loss": 0.6448, |
|
"step": 880 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00016212121212121213, |
|
"loss": 0.638, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.21, |
|
"learning_rate": 0.00016117424242424245, |
|
"loss": 0.6503, |
|
"step": 920 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00016022727272727274, |
|
"loss": 0.6378, |
|
"step": 940 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 0.00015928030303030303, |
|
"loss": 0.643, |
|
"step": 960 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00015833333333333332, |
|
"loss": 0.6235, |
|
"step": 980 |
|
}, |
|
{ |
|
"epoch": 0.23, |
|
"learning_rate": 0.00015738636363636364, |
|
"loss": 0.647, |
|
"step": 1000 |
|
} |
|
], |
|
"logging_steps": 20, |
|
"max_steps": 4324, |
|
"num_train_epochs": 1, |
|
"save_steps": 500, |
|
"total_flos": 1.585613047974052e+17, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|