|
{ |
|
"best_metric": 0.5925925925925926, |
|
"best_model_checkpoint": "beit-base-patch16-224-pt22k-ft22k-finetuned-conspiracy_imagery_2/checkpoint-76", |
|
"epoch": 5.901639344262295, |
|
"eval_steps": 500, |
|
"global_step": 90, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.6557377049180327, |
|
"grad_norm": 6.740944862365723, |
|
"learning_rate": 4.938271604938271e-05, |
|
"loss": 1.9515, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.9836065573770492, |
|
"eval_accuracy": 0.42592592592592593, |
|
"eval_loss": 1.3559091091156006, |
|
"eval_runtime": 3.4851, |
|
"eval_samples_per_second": 30.989, |
|
"eval_steps_per_second": 2.009, |
|
"step": 15 |
|
}, |
|
{ |
|
"epoch": 1.3114754098360657, |
|
"grad_norm": 6.367587566375732, |
|
"learning_rate": 4.3209876543209875e-05, |
|
"loss": 1.3689, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.9672131147540983, |
|
"grad_norm": 6.214643478393555, |
|
"learning_rate": 3.7037037037037037e-05, |
|
"loss": 1.1593, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.9672131147540983, |
|
"eval_accuracy": 0.5648148148148148, |
|
"eval_loss": 1.1501442193984985, |
|
"eval_runtime": 3.5137, |
|
"eval_samples_per_second": 30.737, |
|
"eval_steps_per_second": 1.992, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 2.6229508196721314, |
|
"grad_norm": 5.606908798217773, |
|
"learning_rate": 3.08641975308642e-05, |
|
"loss": 1.0288, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.9508196721311473, |
|
"eval_accuracy": 0.5462962962962963, |
|
"eval_loss": 1.1120755672454834, |
|
"eval_runtime": 3.3763, |
|
"eval_samples_per_second": 31.987, |
|
"eval_steps_per_second": 2.073, |
|
"step": 45 |
|
}, |
|
{ |
|
"epoch": 3.278688524590164, |
|
"grad_norm": 5.3310627937316895, |
|
"learning_rate": 2.4691358024691357e-05, |
|
"loss": 0.9132, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 3.9344262295081966, |
|
"grad_norm": 5.54246187210083, |
|
"learning_rate": 1.8518518518518518e-05, |
|
"loss": 0.8302, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.5740740740740741, |
|
"eval_loss": 1.1271188259124756, |
|
"eval_runtime": 3.3391, |
|
"eval_samples_per_second": 32.344, |
|
"eval_steps_per_second": 2.096, |
|
"step": 61 |
|
}, |
|
{ |
|
"epoch": 4.590163934426229, |
|
"grad_norm": 5.834253787994385, |
|
"learning_rate": 1.2345679012345678e-05, |
|
"loss": 0.7957, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 4.983606557377049, |
|
"eval_accuracy": 0.5925925925925926, |
|
"eval_loss": 1.0559544563293457, |
|
"eval_runtime": 4.0977, |
|
"eval_samples_per_second": 26.356, |
|
"eval_steps_per_second": 1.708, |
|
"step": 76 |
|
}, |
|
{ |
|
"epoch": 5.245901639344262, |
|
"grad_norm": 6.654218673706055, |
|
"learning_rate": 6.172839506172839e-06, |
|
"loss": 0.8094, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 5.901639344262295, |
|
"grad_norm": 7.636894702911377, |
|
"learning_rate": 0.0, |
|
"loss": 0.7633, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 5.901639344262295, |
|
"eval_accuracy": 0.5833333333333334, |
|
"eval_loss": 1.08096182346344, |
|
"eval_runtime": 3.983, |
|
"eval_samples_per_second": 27.115, |
|
"eval_steps_per_second": 1.757, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 5.901639344262295, |
|
"step": 90, |
|
"total_flos": 4.4386319544827904e+17, |
|
"train_loss": 1.0689217143588596, |
|
"train_runtime": 507.807, |
|
"train_samples_per_second": 11.461, |
|
"train_steps_per_second": 0.177 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 90, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 6, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 4.4386319544827904e+17, |
|
"train_batch_size": 16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|