virkha's picture
Training in progress, epoch 1
dddc103 verified
{
"best_metric": 0.11816192560175055,
"best_model_checkpoint": "videomae-base-finetuned-ssv2-finetuned-ucf101-subset\\checkpoint-433",
"epoch": 1.0,
"eval_steps": 500,
"global_step": 433,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.023094688221709007,
"grad_norm": 25.757522583007812,
"learning_rate": 1.1363636363636365e-05,
"loss": 5.5037,
"step": 10
},
{
"epoch": 0.046189376443418015,
"grad_norm": 25.24021339416504,
"learning_rate": 2.272727272727273e-05,
"loss": 5.2613,
"step": 20
},
{
"epoch": 0.06928406466512702,
"grad_norm": 22.77121353149414,
"learning_rate": 3.409090909090909e-05,
"loss": 5.0758,
"step": 30
},
{
"epoch": 0.09237875288683603,
"grad_norm": 31.911174774169922,
"learning_rate": 4.545454545454546e-05,
"loss": 5.2159,
"step": 40
},
{
"epoch": 0.11547344110854503,
"grad_norm": 26.340246200561523,
"learning_rate": 4.922879177377892e-05,
"loss": 5.3368,
"step": 50
},
{
"epoch": 0.13856812933025403,
"grad_norm": 23.175561904907227,
"learning_rate": 4.7943444730077124e-05,
"loss": 5.0119,
"step": 60
},
{
"epoch": 0.16166281755196305,
"grad_norm": 27.339330673217773,
"learning_rate": 4.6658097686375325e-05,
"loss": 5.1378,
"step": 70
},
{
"epoch": 0.18475750577367206,
"grad_norm": 23.49224281311035,
"learning_rate": 4.537275064267352e-05,
"loss": 5.3206,
"step": 80
},
{
"epoch": 0.20785219399538107,
"grad_norm": 23.86919403076172,
"learning_rate": 4.408740359897173e-05,
"loss": 5.2974,
"step": 90
},
{
"epoch": 0.23094688221709006,
"grad_norm": 17.22815704345703,
"learning_rate": 4.280205655526993e-05,
"loss": 5.3859,
"step": 100
},
{
"epoch": 0.2540415704387991,
"grad_norm": 18.21339988708496,
"learning_rate": 4.151670951156812e-05,
"loss": 5.2278,
"step": 110
},
{
"epoch": 0.27713625866050806,
"grad_norm": 18.38885498046875,
"learning_rate": 4.0231362467866324e-05,
"loss": 5.5348,
"step": 120
},
{
"epoch": 0.3002309468822171,
"grad_norm": 17.503589630126953,
"learning_rate": 3.8946015424164526e-05,
"loss": 5.282,
"step": 130
},
{
"epoch": 0.3233256351039261,
"grad_norm": 19.329565048217773,
"learning_rate": 3.766066838046273e-05,
"loss": 4.8012,
"step": 140
},
{
"epoch": 0.3464203233256351,
"grad_norm": 19.029603958129883,
"learning_rate": 3.637532133676093e-05,
"loss": 4.6088,
"step": 150
},
{
"epoch": 0.3695150115473441,
"grad_norm": 20.106794357299805,
"learning_rate": 3.508997429305913e-05,
"loss": 4.7802,
"step": 160
},
{
"epoch": 0.39260969976905313,
"grad_norm": 22.443622589111328,
"learning_rate": 3.380462724935733e-05,
"loss": 4.9326,
"step": 170
},
{
"epoch": 0.41570438799076215,
"grad_norm": 19.69178581237793,
"learning_rate": 3.251928020565553e-05,
"loss": 5.1273,
"step": 180
},
{
"epoch": 0.4387990762124711,
"grad_norm": 16.72547721862793,
"learning_rate": 3.1233933161953726e-05,
"loss": 4.9395,
"step": 190
},
{
"epoch": 0.4618937644341801,
"grad_norm": 21.63971710205078,
"learning_rate": 2.994858611825193e-05,
"loss": 4.3805,
"step": 200
},
{
"epoch": 0.48498845265588914,
"grad_norm": 16.757349014282227,
"learning_rate": 2.866323907455013e-05,
"loss": 5.0902,
"step": 210
},
{
"epoch": 0.5080831408775982,
"grad_norm": 16.207763671875,
"learning_rate": 2.737789203084833e-05,
"loss": 5.0768,
"step": 220
},
{
"epoch": 0.5311778290993071,
"grad_norm": 21.602184295654297,
"learning_rate": 2.6092544987146534e-05,
"loss": 4.511,
"step": 230
},
{
"epoch": 0.5542725173210161,
"grad_norm": 15.503689765930176,
"learning_rate": 2.480719794344473e-05,
"loss": 5.3379,
"step": 240
},
{
"epoch": 0.5773672055427251,
"grad_norm": 16.484134674072266,
"learning_rate": 2.3521850899742933e-05,
"loss": 5.0716,
"step": 250
},
{
"epoch": 0.6004618937644342,
"grad_norm": 28.777589797973633,
"learning_rate": 2.2236503856041134e-05,
"loss": 5.3695,
"step": 260
},
{
"epoch": 0.6235565819861432,
"grad_norm": 33.19585037231445,
"learning_rate": 2.095115681233933e-05,
"loss": 5.2579,
"step": 270
},
{
"epoch": 0.6466512702078522,
"grad_norm": 14.972123146057129,
"learning_rate": 1.9665809768637533e-05,
"loss": 4.9582,
"step": 280
},
{
"epoch": 0.6697459584295612,
"grad_norm": 15.122587203979492,
"learning_rate": 1.8380462724935734e-05,
"loss": 5.4649,
"step": 290
},
{
"epoch": 0.6928406466512702,
"grad_norm": 15.101243019104004,
"learning_rate": 1.7095115681233935e-05,
"loss": 5.5053,
"step": 300
},
{
"epoch": 0.7159353348729792,
"grad_norm": 14.751006126403809,
"learning_rate": 1.5809768637532136e-05,
"loss": 5.2739,
"step": 310
},
{
"epoch": 0.7390300230946882,
"grad_norm": 18.56909942626953,
"learning_rate": 1.4524421593830334e-05,
"loss": 5.0441,
"step": 320
},
{
"epoch": 0.7621247113163973,
"grad_norm": 20.036487579345703,
"learning_rate": 1.3239074550128535e-05,
"loss": 4.6066,
"step": 330
},
{
"epoch": 0.7852193995381063,
"grad_norm": 15.724655151367188,
"learning_rate": 1.1953727506426736e-05,
"loss": 4.63,
"step": 340
},
{
"epoch": 0.8083140877598153,
"grad_norm": 27.94047737121582,
"learning_rate": 1.0668380462724936e-05,
"loss": 4.8191,
"step": 350
},
{
"epoch": 0.8314087759815243,
"grad_norm": 18.26154899597168,
"learning_rate": 9.383033419023137e-06,
"loss": 4.3258,
"step": 360
},
{
"epoch": 0.8545034642032333,
"grad_norm": 21.948999404907227,
"learning_rate": 8.097686375321336e-06,
"loss": 5.2392,
"step": 370
},
{
"epoch": 0.8775981524249422,
"grad_norm": 18.45208740234375,
"learning_rate": 6.812339331619537e-06,
"loss": 5.2617,
"step": 380
},
{
"epoch": 0.9006928406466512,
"grad_norm": 23.426355361938477,
"learning_rate": 5.526992287917738e-06,
"loss": 4.9801,
"step": 390
},
{
"epoch": 0.9237875288683602,
"grad_norm": 23.681772232055664,
"learning_rate": 4.241645244215939e-06,
"loss": 5.2054,
"step": 400
},
{
"epoch": 0.9468822170900693,
"grad_norm": 25.422988891601562,
"learning_rate": 2.956298200514139e-06,
"loss": 4.9923,
"step": 410
},
{
"epoch": 0.9699769053117783,
"grad_norm": 25.6311092376709,
"learning_rate": 1.6709511568123394e-06,
"loss": 5.4941,
"step": 420
},
{
"epoch": 0.9930715935334873,
"grad_norm": 21.09837532043457,
"learning_rate": 3.8560411311053987e-07,
"loss": 5.2022,
"step": 430
},
{
"epoch": 1.0,
"eval_accuracy": 0.11816192560175055,
"eval_loss": 4.614536762237549,
"eval_runtime": 1618.8123,
"eval_samples_per_second": 0.282,
"eval_steps_per_second": 0.282,
"step": 433
},
{
"epoch": 1.0,
"step": 433,
"total_flos": 5.405323753225912e+17,
"train_loss": 5.075382356004957,
"train_runtime": 5788.9979,
"train_samples_per_second": 0.075,
"train_steps_per_second": 0.075
},
{
"epoch": 1.0,
"eval_accuracy": 0.10382513661202186,
"eval_loss": 4.723412990570068,
"eval_runtime": 2027.4251,
"eval_samples_per_second": 0.271,
"eval_steps_per_second": 0.271,
"step": 433
},
{
"epoch": 1.0,
"eval_accuracy": 0.10382513661202186,
"eval_loss": 4.723412990570068,
"eval_runtime": 1903.4725,
"eval_samples_per_second": 0.288,
"eval_steps_per_second": 0.288,
"step": 433
}
],
"logging_steps": 10,
"max_steps": 433,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 5.405323753225912e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}