from datasets import Dataset, DatasetDict, load_dataset, load_from_disk from transformers import ViTForImageClassification, ViTImageProcessor, Trainer, TrainingArguments from PIL import Image from torch.optim import AdamW from torch.optim.lr_scheduler import StepLR import torch import numpy as np from sklearn.metrics import f1_score, accuracy_score, recall_score, precision_score import os MODEL_NAME = "/Users/XXXXRT/vit_pretrain/vit-base-patch16-384" SIZE = "base" PATCH = 16 IMAGE_SIZE = 384 BATCH_SIZE = 8 OPTIMIZER = "AdamW" SCHEDULER = "StepLR" IMAGE_PATH = '/Users/XXXXRT/ISIC-2019' TRAIN_CSV_PATH = '/Users/XXXXRT/ISIC-2019/train_labels.csv' TEST_CSV_PATH = '/Users/XXXXRT/ISIC-2019/test_labels.csv' processed_dataset_path = f"/Users/XXXXRT/ISIC-2019/dataset-{IMAGE_SIZE}" processed_dataset_path = f"/Volumes/T9 APFS/ML Dataset/dataset-{IMAGE_SIZE}" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") device = torch.device("mps") processor = ViTImageProcessor.from_pretrained(MODEL_NAME) def preprocess_image_train(example): image = Image.open(os.path.join(IMAGE_PATH,'train', example["image"])).convert("RGB") example["pixel_values"] = processor(images=image, return_tensors="pt")["pixel_values"].squeeze(0).numpy() labels = example.copy() example["labels"] = np.array([labels["MEL"], labels["NV"], labels["BCC"], labels["AK"], labels["BKL"], labels["DF"], labels["VASC"], labels["SCC"], labels["UNK"]], dtype=np.float32) return example def preprocess_image_test(example): image = Image.open(os.path.join(IMAGE_PATH,'test', example["image"])).convert("RGB") example["pixel_values"] = processor(images=image, return_tensors="pt")["pixel_values"].squeeze(0).numpy() labels = example.copy() example["labels"] = np.array([labels["MEL"], labels["NV"], labels["BCC"], labels["AK"], labels["BKL"], labels["DF"], labels["VASC"], labels["SCC"], labels["UNK"]], dtype=np.float32) return example if os.path.exists(processed_dataset_path): dataset = load_from_disk(processed_dataset_path) print("LOADED") else: train_dataset = load_dataset('csv', data_files=TRAIN_CSV_PATH)["train"] test_dataset = load_dataset('csv', data_files=TEST_CSV_PATH)["train"] train_dataset = train_dataset.map(preprocess_image_train, batched=False, num_proc=2) test_dataset = test_dataset.map(preprocess_image_test, batched=False, num_proc=2) dataset = DatasetDict({ 'train': train_dataset, 'test': test_dataset }) dataset.save_to_disk(processed_dataset_path,num_proc=2) print(f"SAVED TO {processed_dataset_path}") train_dataset = dataset['train'] test_dataset = dataset['test'] num_labels = 9 model = ViTForImageClassification.from_pretrained( MODEL_NAME, num_labels=num_labels, problem_type="multi_label_classification" ).to(device) training_args = TrainingArguments( output_dir=f"/Users/XXXXRT/ISIC-2019/logs/vit-{SIZE}-patch{PATCH}-{IMAGE_SIZE}-bs{BATCH_SIZE}-{OPTIMIZER}-{SCHEDULER}-lables-{num_labels}", evaluation_strategy="epoch", learning_rate=5e-5, per_device_train_batch_size=BATCH_SIZE, per_device_eval_batch_size=BATCH_SIZE, num_train_epochs=5, save_strategy="epoch", logging_dir=f"/Users/XXXXRT/ISIC-2019/logs/vit-{SIZE}-patch{PATCH}-{IMAGE_SIZE}-bs{BATCH_SIZE}-{OPTIMIZER}-{SCHEDULER}-lables-{num_labels}/logs", logging_steps=50, report_to="tensorboard" ) def compute_metrics(pred): logits, labels = pred predictions = (logits >= 0.5).astype(int) f1 = f1_score(labels, predictions, average="macro") accuracy = accuracy_score(labels, predictions) recall = recall_score(labels, predictions, average="macro") precision = precision_score(labels, predictions, average="macro") return { "accuracy": accuracy, "f1": f1, "recall": recall, "precision": precision, } learning_rate = 5e-5 weight_decay = 0.01 step_size = 100 gamma = 0.1 optimizer = AdamW(model.parameters(), lr=learning_rate, weight_decay=weight_decay) scheduler = StepLR(optimizer, step_size=step_size, gamma=gamma) trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=test_dataset, compute_metrics=compute_metrics, optimizers=(optimizer, scheduler) ) trainer.train()