|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 3.0476190476190474, |
|
"eval_steps": 50, |
|
"global_step": 16, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.19047619047619047, |
|
"grad_norm": 1.869409203529358, |
|
"learning_rate": 5.000000000000001e-07, |
|
"loss": 5.0509, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.19047619047619047, |
|
"eval_loss": 4.9529218673706055, |
|
"eval_runtime": 0.6553, |
|
"eval_samples_per_second": 13.734, |
|
"eval_steps_per_second": 3.052, |
|
"step": 1 |
|
}, |
|
{ |
|
"epoch": 0.38095238095238093, |
|
"grad_norm": 1.981248378753662, |
|
"learning_rate": 1.0000000000000002e-06, |
|
"loss": 4.6691, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.5714285714285714, |
|
"grad_norm": 2.0131425857543945, |
|
"learning_rate": 1.5e-06, |
|
"loss": 4.9716, |
|
"step": 3 |
|
}, |
|
{ |
|
"epoch": 0.7619047619047619, |
|
"grad_norm": 2.0012943744659424, |
|
"learning_rate": 2.0000000000000003e-06, |
|
"loss": 5.3829, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.9523809523809523, |
|
"grad_norm": 1.8608438968658447, |
|
"learning_rate": 2.5e-06, |
|
"loss": 5.1384, |
|
"step": 5 |
|
}, |
|
{ |
|
"epoch": 1.1428571428571428, |
|
"grad_norm": 3.8675358295440674, |
|
"learning_rate": 3e-06, |
|
"loss": 9.6879, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 1.3333333333333333, |
|
"grad_norm": 1.9214471578598022, |
|
"learning_rate": 3.5e-06, |
|
"loss": 5.1229, |
|
"step": 7 |
|
}, |
|
{ |
|
"epoch": 1.5238095238095237, |
|
"grad_norm": 2.0400009155273438, |
|
"learning_rate": 4.000000000000001e-06, |
|
"loss": 4.7002, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 1.7142857142857144, |
|
"grad_norm": 1.9419265985488892, |
|
"learning_rate": 4.5e-06, |
|
"loss": 5.2229, |
|
"step": 9 |
|
}, |
|
{ |
|
"epoch": 1.9047619047619047, |
|
"grad_norm": 1.9313769340515137, |
|
"learning_rate": 5e-06, |
|
"loss": 5.2099, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 2.0952380952380953, |
|
"grad_norm": 3.5392918586730957, |
|
"learning_rate": 4.665063509461098e-06, |
|
"loss": 8.8352, |
|
"step": 11 |
|
}, |
|
{ |
|
"epoch": 2.2857142857142856, |
|
"grad_norm": 1.8467406034469604, |
|
"learning_rate": 3.7500000000000005e-06, |
|
"loss": 4.8335, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 2.4761904761904763, |
|
"grad_norm": 2.1296801567077637, |
|
"learning_rate": 2.5e-06, |
|
"loss": 5.1836, |
|
"step": 13 |
|
}, |
|
{ |
|
"epoch": 2.6666666666666665, |
|
"grad_norm": 1.7837193012237549, |
|
"learning_rate": 1.2500000000000007e-06, |
|
"loss": 4.7968, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 2.857142857142857, |
|
"grad_norm": 2.0977296829223633, |
|
"learning_rate": 3.3493649053890325e-07, |
|
"loss": 5.1121, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 3.0476190476190474, |
|
"grad_norm": 3.3787589073181152, |
|
"learning_rate": 0.0, |
|
"loss": 8.0154, |
|
"step": 16 |
|
} |
|
], |
|
"logging_steps": 1, |
|
"max_steps": 16, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 4, |
|
"save_steps": 50, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.0 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4309761310851072.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|