akshatchugh3's picture
End of training
a8f7cee verified
{
"best_metric": 0.8475336322869955,
"best_model_checkpoint": "videomae-base-finetuned-ucf101-subset/checkpoint-390",
"epoch": 2.3282051282051284,
"eval_steps": 500,
"global_step": 390,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.02564102564102564,
"grad_norm": 9.303430557250977,
"learning_rate": 6.41025641025641e-07,
"loss": 0.191,
"step": 10
},
{
"epoch": 0.05128205128205128,
"grad_norm": 17.580381393432617,
"learning_rate": 1.282051282051282e-06,
"loss": 0.1535,
"step": 20
},
{
"epoch": 0.07692307692307693,
"grad_norm": 3.0345261096954346,
"learning_rate": 1.9230769230769234e-06,
"loss": 0.1446,
"step": 30
},
{
"epoch": 0.10256410256410256,
"grad_norm": 5.7094550132751465,
"learning_rate": 2.564102564102564e-06,
"loss": 0.1564,
"step": 40
},
{
"epoch": 0.1282051282051282,
"grad_norm": 1.9542657136917114,
"learning_rate": 3.205128205128206e-06,
"loss": 0.1486,
"step": 50
},
{
"epoch": 0.15384615384615385,
"grad_norm": 8.122542381286621,
"learning_rate": 3.846153846153847e-06,
"loss": 0.1577,
"step": 60
},
{
"epoch": 0.1794871794871795,
"grad_norm": 9.375791549682617,
"learning_rate": 4.487179487179488e-06,
"loss": 0.1942,
"step": 70
},
{
"epoch": 0.20512820512820512,
"grad_norm": 11.179394721984863,
"learning_rate": 4.967948717948718e-06,
"loss": 0.2429,
"step": 80
},
{
"epoch": 0.23076923076923078,
"grad_norm": 11.36959457397461,
"learning_rate": 4.807692307692308e-06,
"loss": 0.1481,
"step": 90
},
{
"epoch": 0.2564102564102564,
"grad_norm": 1.9242831468582153,
"learning_rate": 4.647435897435898e-06,
"loss": 0.1894,
"step": 100
},
{
"epoch": 0.28205128205128205,
"grad_norm": 3.764237642288208,
"learning_rate": 4.487179487179488e-06,
"loss": 0.1557,
"step": 110
},
{
"epoch": 0.3076923076923077,
"grad_norm": 2.1338346004486084,
"learning_rate": 4.326923076923077e-06,
"loss": 0.1373,
"step": 120
},
{
"epoch": 0.3333333333333333,
"grad_norm": 1.327778935432434,
"learning_rate": 4.166666666666667e-06,
"loss": 0.1658,
"step": 130
},
{
"epoch": 0.33589743589743587,
"eval_accuracy": 0.8251121076233184,
"eval_loss": 0.5804738998413086,
"eval_runtime": 56.2864,
"eval_samples_per_second": 3.962,
"eval_steps_per_second": 0.497,
"step": 131
},
{
"epoch": 1.023076923076923,
"grad_norm": 0.7002243399620056,
"learning_rate": 4.006410256410257e-06,
"loss": 0.1178,
"step": 140
},
{
"epoch": 1.0487179487179488,
"grad_norm": 1.249237298965454,
"learning_rate": 3.846153846153847e-06,
"loss": 0.1735,
"step": 150
},
{
"epoch": 1.0743589743589743,
"grad_norm": 8.169507026672363,
"learning_rate": 3.6858974358974363e-06,
"loss": 0.1527,
"step": 160
},
{
"epoch": 1.1,
"grad_norm": 4.84148645401001,
"learning_rate": 3.5256410256410263e-06,
"loss": 0.1313,
"step": 170
},
{
"epoch": 1.1256410256410256,
"grad_norm": 2.1306402683258057,
"learning_rate": 3.365384615384616e-06,
"loss": 0.0957,
"step": 180
},
{
"epoch": 1.1512820512820512,
"grad_norm": 1.54770827293396,
"learning_rate": 3.205128205128206e-06,
"loss": 0.1137,
"step": 190
},
{
"epoch": 1.176923076923077,
"grad_norm": 13.978232383728027,
"learning_rate": 3.044871794871795e-06,
"loss": 0.1326,
"step": 200
},
{
"epoch": 1.2025641025641025,
"grad_norm": 4.250465393066406,
"learning_rate": 2.8846153846153845e-06,
"loss": 0.0616,
"step": 210
},
{
"epoch": 1.2282051282051283,
"grad_norm": 1.337228775024414,
"learning_rate": 2.7243589743589744e-06,
"loss": 0.0948,
"step": 220
},
{
"epoch": 1.2538461538461538,
"grad_norm": 11.519142150878906,
"learning_rate": 2.564102564102564e-06,
"loss": 0.0906,
"step": 230
},
{
"epoch": 1.2794871794871794,
"grad_norm": 9.989028930664062,
"learning_rate": 2.403846153846154e-06,
"loss": 0.1384,
"step": 240
},
{
"epoch": 1.3051282051282052,
"grad_norm": 0.850464940071106,
"learning_rate": 2.243589743589744e-06,
"loss": 0.1329,
"step": 250
},
{
"epoch": 1.3307692307692307,
"grad_norm": 2.935410976409912,
"learning_rate": 2.0833333333333334e-06,
"loss": 0.1039,
"step": 260
},
{
"epoch": 1.3358974358974358,
"eval_accuracy": 0.8251121076233184,
"eval_loss": 0.5684685707092285,
"eval_runtime": 60.4317,
"eval_samples_per_second": 3.69,
"eval_steps_per_second": 0.463,
"step": 262
},
{
"epoch": 2.0205128205128204,
"grad_norm": 2.507692575454712,
"learning_rate": 1.9230769230769234e-06,
"loss": 0.1267,
"step": 270
},
{
"epoch": 2.046153846153846,
"grad_norm": 9.046551704406738,
"learning_rate": 1.7628205128205131e-06,
"loss": 0.0909,
"step": 280
},
{
"epoch": 2.071794871794872,
"grad_norm": 1.059535264968872,
"learning_rate": 1.602564102564103e-06,
"loss": 0.1108,
"step": 290
},
{
"epoch": 2.0974358974358975,
"grad_norm": 7.851165294647217,
"learning_rate": 1.4423076923076922e-06,
"loss": 0.0695,
"step": 300
},
{
"epoch": 2.123076923076923,
"grad_norm": 1.073171615600586,
"learning_rate": 1.282051282051282e-06,
"loss": 0.1031,
"step": 310
},
{
"epoch": 2.1487179487179486,
"grad_norm": 2.39035964012146,
"learning_rate": 1.121794871794872e-06,
"loss": 0.115,
"step": 320
},
{
"epoch": 2.174358974358974,
"grad_norm": 1.7656793594360352,
"learning_rate": 9.615384615384617e-07,
"loss": 0.0919,
"step": 330
},
{
"epoch": 2.2,
"grad_norm": 19.47994613647461,
"learning_rate": 8.012820512820515e-07,
"loss": 0.1017,
"step": 340
},
{
"epoch": 2.2256410256410257,
"grad_norm": 14.330218315124512,
"learning_rate": 6.41025641025641e-07,
"loss": 0.0826,
"step": 350
},
{
"epoch": 2.2512820512820513,
"grad_norm": 3.4437031745910645,
"learning_rate": 4.807692307692308e-07,
"loss": 0.1271,
"step": 360
},
{
"epoch": 2.276923076923077,
"grad_norm": 3.954479932785034,
"learning_rate": 3.205128205128205e-07,
"loss": 0.1132,
"step": 370
},
{
"epoch": 2.3025641025641024,
"grad_norm": 0.6898751854896545,
"learning_rate": 1.6025641025641025e-07,
"loss": 0.0906,
"step": 380
},
{
"epoch": 2.3282051282051284,
"grad_norm": 2.044902801513672,
"learning_rate": 0.0,
"loss": 0.0832,
"step": 390
},
{
"epoch": 2.3282051282051284,
"eval_accuracy": 0.8475336322869955,
"eval_loss": 0.5565493702888489,
"eval_runtime": 62.3378,
"eval_samples_per_second": 3.577,
"eval_steps_per_second": 0.449,
"step": 390
},
{
"epoch": 2.3282051282051284,
"step": 390,
"total_flos": 3.8861995625761997e+18,
"train_loss": 0.12900116260235125,
"train_runtime": 2004.3611,
"train_samples_per_second": 1.557,
"train_steps_per_second": 0.195
},
{
"epoch": 2.3282051282051284,
"eval_accuracy": 0.6438848920863309,
"eval_loss": 2.050309658050537,
"eval_runtime": 149.2566,
"eval_samples_per_second": 3.725,
"eval_steps_per_second": 0.469,
"step": 390
},
{
"epoch": 2.3282051282051284,
"eval_accuracy": 0.639344262295082,
"eval_loss": 2.076159954071045,
"eval_runtime": 145.7908,
"eval_samples_per_second": 3.766,
"eval_steps_per_second": 0.473,
"step": 390
}
],
"logging_steps": 10,
"max_steps": 390,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.8861995625761997e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}