|
{ |
|
"best_metric": 0.17235368490219116, |
|
"best_model_checkpoint": "autotrain-dhurg-4zloz/checkpoint-100", |
|
"epoch": 2.0, |
|
"eval_steps": 500, |
|
"global_step": 100, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.04, |
|
"grad_norm": 8.779764175415039, |
|
"learning_rate": 6.666666666666667e-06, |
|
"loss": 0.7021, |
|
"step": 2 |
|
}, |
|
{ |
|
"epoch": 0.08, |
|
"grad_norm": 11.571272850036621, |
|
"learning_rate": 1.3333333333333333e-05, |
|
"loss": 0.7476, |
|
"step": 4 |
|
}, |
|
{ |
|
"epoch": 0.12, |
|
"grad_norm": 7.2595109939575195, |
|
"learning_rate": 2e-05, |
|
"loss": 0.6407, |
|
"step": 6 |
|
}, |
|
{ |
|
"epoch": 0.16, |
|
"grad_norm": 22.257381439208984, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 0.7677, |
|
"step": 8 |
|
}, |
|
{ |
|
"epoch": 0.2, |
|
"grad_norm": 9.527205467224121, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.6452, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.24, |
|
"grad_norm": 6.193709373474121, |
|
"learning_rate": 4e-05, |
|
"loss": 0.4157, |
|
"step": 12 |
|
}, |
|
{ |
|
"epoch": 0.28, |
|
"grad_norm": 8.290445327758789, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 0.6082, |
|
"step": 14 |
|
}, |
|
{ |
|
"epoch": 0.32, |
|
"grad_norm": 15.103638648986816, |
|
"learning_rate": 4.962962962962963e-05, |
|
"loss": 0.8635, |
|
"step": 16 |
|
}, |
|
{ |
|
"epoch": 0.36, |
|
"grad_norm": 9.055012702941895, |
|
"learning_rate": 4.888888888888889e-05, |
|
"loss": 0.4496, |
|
"step": 18 |
|
}, |
|
{ |
|
"epoch": 0.4, |
|
"grad_norm": 7.674185276031494, |
|
"learning_rate": 4.814814814814815e-05, |
|
"loss": 0.4544, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"grad_norm": 5.750149250030518, |
|
"learning_rate": 4.740740740740741e-05, |
|
"loss": 0.4023, |
|
"step": 22 |
|
}, |
|
{ |
|
"epoch": 0.48, |
|
"grad_norm": 9.68493938446045, |
|
"learning_rate": 4.666666666666667e-05, |
|
"loss": 0.4952, |
|
"step": 24 |
|
}, |
|
{ |
|
"epoch": 0.52, |
|
"grad_norm": 5.293528079986572, |
|
"learning_rate": 4.592592592592593e-05, |
|
"loss": 0.5546, |
|
"step": 26 |
|
}, |
|
{ |
|
"epoch": 0.56, |
|
"grad_norm": 7.6056647300720215, |
|
"learning_rate": 4.518518518518519e-05, |
|
"loss": 0.3908, |
|
"step": 28 |
|
}, |
|
{ |
|
"epoch": 0.6, |
|
"grad_norm": 6.408599853515625, |
|
"learning_rate": 4.4444444444444447e-05, |
|
"loss": 0.2509, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 0.64, |
|
"grad_norm": 6.033527374267578, |
|
"learning_rate": 4.3703703703703705e-05, |
|
"loss": 0.2029, |
|
"step": 32 |
|
}, |
|
{ |
|
"epoch": 0.68, |
|
"grad_norm": 11.087029457092285, |
|
"learning_rate": 4.296296296296296e-05, |
|
"loss": 0.5078, |
|
"step": 34 |
|
}, |
|
{ |
|
"epoch": 0.72, |
|
"grad_norm": 6.722639560699463, |
|
"learning_rate": 4.222222222222222e-05, |
|
"loss": 0.4449, |
|
"step": 36 |
|
}, |
|
{ |
|
"epoch": 0.76, |
|
"grad_norm": 5.349610805511475, |
|
"learning_rate": 4.148148148148148e-05, |
|
"loss": 0.3874, |
|
"step": 38 |
|
}, |
|
{ |
|
"epoch": 0.8, |
|
"grad_norm": 7.29227352142334, |
|
"learning_rate": 4.074074074074074e-05, |
|
"loss": 0.5953, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 0.84, |
|
"grad_norm": 8.242330551147461, |
|
"learning_rate": 4e-05, |
|
"loss": 0.3056, |
|
"step": 42 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"grad_norm": 3.6587088108062744, |
|
"learning_rate": 3.925925925925926e-05, |
|
"loss": 0.1099, |
|
"step": 44 |
|
}, |
|
{ |
|
"epoch": 0.92, |
|
"grad_norm": 3.0559661388397217, |
|
"learning_rate": 3.851851851851852e-05, |
|
"loss": 0.37, |
|
"step": 46 |
|
}, |
|
{ |
|
"epoch": 0.96, |
|
"grad_norm": 4.720924377441406, |
|
"learning_rate": 3.777777777777778e-05, |
|
"loss": 0.1703, |
|
"step": 48 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"grad_norm": 3.9827725887298584, |
|
"learning_rate": 3.7037037037037037e-05, |
|
"loss": 0.3503, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_accuracy": 0.95, |
|
"eval_auc": 0.964, |
|
"eval_f1": 0.9494949494949495, |
|
"eval_loss": 0.21780452132225037, |
|
"eval_precision": 0.9591836734693877, |
|
"eval_recall": 0.94, |
|
"eval_runtime": 26.0018, |
|
"eval_samples_per_second": 3.846, |
|
"eval_steps_per_second": 0.269, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 1.04, |
|
"grad_norm": 2.406909465789795, |
|
"learning_rate": 3.62962962962963e-05, |
|
"loss": 0.1008, |
|
"step": 52 |
|
}, |
|
{ |
|
"epoch": 1.08, |
|
"grad_norm": 11.02933406829834, |
|
"learning_rate": 3.555555555555556e-05, |
|
"loss": 0.2756, |
|
"step": 54 |
|
}, |
|
{ |
|
"epoch": 1.12, |
|
"grad_norm": 8.120884895324707, |
|
"learning_rate": 3.481481481481482e-05, |
|
"loss": 0.2622, |
|
"step": 56 |
|
}, |
|
{ |
|
"epoch": 1.16, |
|
"grad_norm": 5.108714580535889, |
|
"learning_rate": 3.4074074074074077e-05, |
|
"loss": 0.1122, |
|
"step": 58 |
|
}, |
|
{ |
|
"epoch": 1.2, |
|
"grad_norm": 13.36482048034668, |
|
"learning_rate": 3.3333333333333335e-05, |
|
"loss": 0.2356, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 1.24, |
|
"grad_norm": 4.833813190460205, |
|
"learning_rate": 3.25925925925926e-05, |
|
"loss": 0.1811, |
|
"step": 62 |
|
}, |
|
{ |
|
"epoch": 1.28, |
|
"grad_norm": 4.81235933303833, |
|
"learning_rate": 3.185185185185185e-05, |
|
"loss": 0.1474, |
|
"step": 64 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"grad_norm": 0.43874427676200867, |
|
"learning_rate": 3.111111111111111e-05, |
|
"loss": 0.2348, |
|
"step": 66 |
|
}, |
|
{ |
|
"epoch": 1.3599999999999999, |
|
"grad_norm": 1.1349278688430786, |
|
"learning_rate": 3.037037037037037e-05, |
|
"loss": 0.0234, |
|
"step": 68 |
|
}, |
|
{ |
|
"epoch": 1.4, |
|
"grad_norm": 2.6060400009155273, |
|
"learning_rate": 2.962962962962963e-05, |
|
"loss": 0.1018, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 1.44, |
|
"grad_norm": 6.894038200378418, |
|
"learning_rate": 2.8888888888888888e-05, |
|
"loss": 0.3433, |
|
"step": 72 |
|
}, |
|
{ |
|
"epoch": 1.48, |
|
"grad_norm": 0.4274143576622009, |
|
"learning_rate": 2.814814814814815e-05, |
|
"loss": 0.3128, |
|
"step": 74 |
|
}, |
|
{ |
|
"epoch": 1.52, |
|
"grad_norm": 4.964171886444092, |
|
"learning_rate": 2.7407407407407408e-05, |
|
"loss": 0.1867, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 1.56, |
|
"grad_norm": 3.660466194152832, |
|
"learning_rate": 2.6666666666666667e-05, |
|
"loss": 0.1936, |
|
"step": 78 |
|
}, |
|
{ |
|
"epoch": 1.6, |
|
"grad_norm": 9.657711029052734, |
|
"learning_rate": 2.5925925925925925e-05, |
|
"loss": 0.2673, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 1.6400000000000001, |
|
"grad_norm": 7.678964614868164, |
|
"learning_rate": 2.5185185185185183e-05, |
|
"loss": 0.2198, |
|
"step": 82 |
|
}, |
|
{ |
|
"epoch": 1.6800000000000002, |
|
"grad_norm": 2.8848860263824463, |
|
"learning_rate": 2.4444444444444445e-05, |
|
"loss": 0.2738, |
|
"step": 84 |
|
}, |
|
{ |
|
"epoch": 1.72, |
|
"grad_norm": 13.012591361999512, |
|
"learning_rate": 2.3703703703703707e-05, |
|
"loss": 0.2112, |
|
"step": 86 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"grad_norm": 8.288191795349121, |
|
"learning_rate": 2.2962962962962965e-05, |
|
"loss": 0.1981, |
|
"step": 88 |
|
}, |
|
{ |
|
"epoch": 1.8, |
|
"grad_norm": 9.876968383789062, |
|
"learning_rate": 2.2222222222222223e-05, |
|
"loss": 0.3836, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 1.8399999999999999, |
|
"grad_norm": 0.2542683184146881, |
|
"learning_rate": 2.148148148148148e-05, |
|
"loss": 0.2617, |
|
"step": 92 |
|
}, |
|
{ |
|
"epoch": 1.88, |
|
"grad_norm": 1.3072165250778198, |
|
"learning_rate": 2.074074074074074e-05, |
|
"loss": 0.2146, |
|
"step": 94 |
|
}, |
|
{ |
|
"epoch": 1.92, |
|
"grad_norm": 5.043093681335449, |
|
"learning_rate": 2e-05, |
|
"loss": 0.0965, |
|
"step": 96 |
|
}, |
|
{ |
|
"epoch": 1.96, |
|
"grad_norm": 7.184999942779541, |
|
"learning_rate": 1.925925925925926e-05, |
|
"loss": 0.232, |
|
"step": 98 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"grad_norm": 7.763317584991455, |
|
"learning_rate": 1.8518518518518518e-05, |
|
"loss": 0.3054, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 2.0, |
|
"eval_accuracy": 0.94, |
|
"eval_auc": 0.9796, |
|
"eval_f1": 0.94, |
|
"eval_loss": 0.17235368490219116, |
|
"eval_precision": 0.94, |
|
"eval_recall": 0.94, |
|
"eval_runtime": 24.1401, |
|
"eval_samples_per_second": 4.142, |
|
"eval_steps_per_second": 0.29, |
|
"step": 100 |
|
} |
|
], |
|
"logging_steps": 2, |
|
"max_steps": 150, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 3, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"EarlyStoppingCallback": { |
|
"args": { |
|
"early_stopping_patience": 5, |
|
"early_stopping_threshold": 0.01 |
|
}, |
|
"attributes": { |
|
"early_stopping_patience_counter": 0 |
|
} |
|
}, |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": false |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 6.19935916916736e+16, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|