bikashpatra's picture
Upload folder using huggingface_hub
3dbac81 verified
{
"best_metric": 5.248876094818115,
"best_model_checkpoint": "autotrain-htyqd-ivazp/checkpoint-64",
"epoch": 1.0,
"eval_steps": 500,
"global_step": 64,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.046875,
"grad_norm": 14.052762985229492,
"learning_rate": 1.5625e-06,
"loss": 3.6996,
"step": 3
},
{
"epoch": 0.09375,
"grad_norm": 14.165870666503906,
"learning_rate": 3.90625e-06,
"loss": 3.6546,
"step": 6
},
{
"epoch": 0.140625,
"grad_norm": 16.012378692626953,
"learning_rate": 6.25e-06,
"loss": 3.4308,
"step": 9
},
{
"epoch": 0.1875,
"grad_norm": 14.69151496887207,
"learning_rate": 8.59375e-06,
"loss": 2.9723,
"step": 12
},
{
"epoch": 0.234375,
"grad_norm": 14.49859619140625,
"learning_rate": 1.09375e-05,
"loss": 2.55,
"step": 15
},
{
"epoch": 0.28125,
"grad_norm": 9.837970733642578,
"learning_rate": 1.3281250000000001e-05,
"loss": 2.1128,
"step": 18
},
{
"epoch": 0.328125,
"grad_norm": 5.071118354797363,
"learning_rate": 1.5625e-05,
"loss": 1.6801,
"step": 21
},
{
"epoch": 0.375,
"grad_norm": 2.0291266441345215,
"learning_rate": 1.796875e-05,
"loss": 1.6157,
"step": 24
},
{
"epoch": 0.421875,
"grad_norm": 3.643156051635742,
"learning_rate": 2.0312500000000002e-05,
"loss": 1.4422,
"step": 27
},
{
"epoch": 0.46875,
"grad_norm": 2.610175609588623,
"learning_rate": 2.2656250000000002e-05,
"loss": 1.5487,
"step": 30
},
{
"epoch": 0.515625,
"grad_norm": 2.2735087871551514,
"learning_rate": 2.5e-05,
"loss": 1.5584,
"step": 33
},
{
"epoch": 0.5625,
"grad_norm": 3.6203994750976562,
"learning_rate": 2.734375e-05,
"loss": 1.4387,
"step": 36
},
{
"epoch": 0.609375,
"grad_norm": 1.6243354082107544,
"learning_rate": 2.96875e-05,
"loss": 1.4952,
"step": 39
},
{
"epoch": 0.65625,
"grad_norm": 1.4888181686401367,
"learning_rate": 3.203125e-05,
"loss": 1.3624,
"step": 42
},
{
"epoch": 0.703125,
"grad_norm": 2.144364595413208,
"learning_rate": 3.4375e-05,
"loss": 1.5163,
"step": 45
},
{
"epoch": 0.75,
"grad_norm": 1.7939876317977905,
"learning_rate": 3.671875e-05,
"loss": 1.2903,
"step": 48
},
{
"epoch": 0.796875,
"grad_norm": 1.5901564359664917,
"learning_rate": 3.90625e-05,
"loss": 1.2762,
"step": 51
},
{
"epoch": 0.84375,
"grad_norm": 1.5492233037948608,
"learning_rate": 4.140625e-05,
"loss": 1.3743,
"step": 54
},
{
"epoch": 0.890625,
"grad_norm": 1.4556649923324585,
"learning_rate": 4.375e-05,
"loss": 1.4039,
"step": 57
},
{
"epoch": 0.9375,
"grad_norm": 2.0221164226531982,
"learning_rate": 4.609375e-05,
"loss": 1.464,
"step": 60
},
{
"epoch": 0.984375,
"grad_norm": 1.9775513410568237,
"learning_rate": 4.8437500000000005e-05,
"loss": 1.385,
"step": 63
},
{
"epoch": 1.0,
"eval_accuracy": 0.0026870229007633587,
"eval_f1": 0.0,
"eval_loss": 5.248876094818115,
"eval_precision": 0.0,
"eval_recall": 0.0,
"eval_runtime": 0.4245,
"eval_samples_per_second": 327.45,
"eval_steps_per_second": 21.202,
"step": 64
}
],
"logging_steps": 3,
"max_steps": 640,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 5,
"early_stopping_threshold": 0.01
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 16636128375552.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}