|
{ |
|
"best_metric": 0.9821428571428571, |
|
"best_model_checkpoint": "vit-large-patch16-224-dungeon-geo-morphs-0-4-28Nov24-006/checkpoint-60", |
|
"epoch": 32.0, |
|
"eval_steps": 10, |
|
"global_step": 80, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 4.0, |
|
"grad_norm": 24.072080612182617, |
|
"learning_rate": 9.861111111111112e-06, |
|
"loss": 1.6194, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 4.0, |
|
"eval_accuracy": 0.6214285714285714, |
|
"eval_loss": 1.232202172279358, |
|
"eval_runtime": 7.6519, |
|
"eval_samples_per_second": 73.185, |
|
"eval_steps_per_second": 9.148, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"grad_norm": 17.304100036621094, |
|
"learning_rate": 8.472222222222223e-06, |
|
"loss": 0.7978, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 8.0, |
|
"eval_accuracy": 0.925, |
|
"eval_loss": 0.5918735861778259, |
|
"eval_runtime": 7.3658, |
|
"eval_samples_per_second": 76.027, |
|
"eval_steps_per_second": 9.503, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"grad_norm": 11.8755464553833, |
|
"learning_rate": 7.083333333333335e-06, |
|
"loss": 0.2576, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 12.0, |
|
"eval_accuracy": 0.9678571428571429, |
|
"eval_loss": 0.2720572352409363, |
|
"eval_runtime": 7.6632, |
|
"eval_samples_per_second": 73.077, |
|
"eval_steps_per_second": 9.135, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"grad_norm": 2.4268698692321777, |
|
"learning_rate": 5.694444444444445e-06, |
|
"loss": 0.0723, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 16.0, |
|
"eval_accuracy": 0.9785714285714285, |
|
"eval_loss": 0.15481743216514587, |
|
"eval_runtime": 7.6991, |
|
"eval_samples_per_second": 72.736, |
|
"eval_steps_per_second": 9.092, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"grad_norm": 1.032953143119812, |
|
"learning_rate": 4.305555555555556e-06, |
|
"loss": 0.0202, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 20.0, |
|
"eval_accuracy": 0.9767857142857143, |
|
"eval_loss": 0.10659553855657578, |
|
"eval_runtime": 8.3338, |
|
"eval_samples_per_second": 67.196, |
|
"eval_steps_per_second": 8.4, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"grad_norm": 0.3209587633609772, |
|
"learning_rate": 2.916666666666667e-06, |
|
"loss": 0.0067, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 24.0, |
|
"eval_accuracy": 0.9821428571428571, |
|
"eval_loss": 0.07470609247684479, |
|
"eval_runtime": 8.1625, |
|
"eval_samples_per_second": 68.606, |
|
"eval_steps_per_second": 8.576, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"grad_norm": 0.22312191128730774, |
|
"learning_rate": 1.527777777777778e-06, |
|
"loss": 0.0035, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 28.0, |
|
"eval_accuracy": 0.9767857142857143, |
|
"eval_loss": 0.07538952678442001, |
|
"eval_runtime": 7.554, |
|
"eval_samples_per_second": 74.133, |
|
"eval_steps_per_second": 9.267, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"grad_norm": 0.1788581758737564, |
|
"learning_rate": 1.3888888888888888e-07, |
|
"loss": 0.0027, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"eval_accuracy": 0.9785714285714285, |
|
"eval_loss": 0.07296328991651535, |
|
"eval_runtime": 7.9971, |
|
"eval_samples_per_second": 70.026, |
|
"eval_steps_per_second": 8.753, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 32.0, |
|
"step": 80, |
|
"total_flos": 7.012786101918106e+17, |
|
"train_loss": 0.34752421248704196, |
|
"train_runtime": 465.741, |
|
"train_samples_per_second": 6.871, |
|
"train_steps_per_second": 0.172 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 80, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 40, |
|
"save_steps": 10, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 7.012786101918106e+17, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|