XXXXRT666
commited on
Commit
·
1de27a5
1
Parent(s):
d5f531d
- train_vit.py +52 -27
train_vit.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
from datasets import Dataset, DatasetDict, load_dataset, load_from_disk
|
2 |
-
from transformers import ViTForImageClassification, ViTImageProcessor, Trainer, TrainingArguments
|
3 |
from PIL import Image
|
4 |
from torch.optim import AdamW
|
5 |
from torch.optim.lr_scheduler import StepLR
|
@@ -9,23 +9,35 @@ from sklearn.metrics import f1_score, accuracy_score, recall_score, precision_sc
|
|
9 |
import os
|
10 |
|
11 |
|
12 |
-
MODEL_NAME = "/
|
13 |
SIZE = "base"
|
14 |
PATCH = 16
|
15 |
-
IMAGE_SIZE =
|
16 |
-
BATCH_SIZE =
|
17 |
OPTIMIZER = "AdamW"
|
18 |
SCHEDULER = "StepLR"
|
|
|
19 |
|
20 |
-
IMAGE_PATH = '/
|
21 |
-
TRAIN_CSV_PATH = '/
|
22 |
-
TEST_CSV_PATH = '/
|
23 |
|
24 |
-
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
28 |
-
device = torch.device("mps")
|
29 |
|
30 |
processor = ViTImageProcessor.from_pretrained(MODEL_NAME)
|
31 |
|
@@ -48,11 +60,11 @@ def preprocess_image_test(example):
|
|
48 |
return example
|
49 |
|
50 |
if os.path.exists(processed_dataset_path):
|
51 |
-
dataset = load_from_disk(processed_dataset_path)
|
52 |
print("LOADED")
|
53 |
else:
|
54 |
-
train_dataset = load_dataset('csv', data_files=TRAIN_CSV_PATH)["train"]
|
55 |
-
test_dataset = load_dataset('csv', data_files=TEST_CSV_PATH)["train"]
|
56 |
|
57 |
train_dataset = train_dataset.map(preprocess_image_train, batched=False, num_proc=2)
|
58 |
test_dataset = test_dataset.map(preprocess_image_test, batched=False, num_proc=2)
|
@@ -69,24 +81,35 @@ test_dataset = dataset['test']
|
|
69 |
|
70 |
num_labels = 9
|
71 |
|
72 |
-
model = ViTForImageClassification.from_pretrained(
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
|
|
|
|
|
78 |
|
79 |
training_args = TrainingArguments(
|
80 |
-
output_dir=f"/
|
81 |
evaluation_strategy="epoch",
|
82 |
learning_rate=5e-5,
|
83 |
per_device_train_batch_size=BATCH_SIZE,
|
84 |
per_device_eval_batch_size=BATCH_SIZE,
|
85 |
-
num_train_epochs=
|
86 |
save_strategy="epoch",
|
87 |
-
logging_dir=f"/
|
88 |
-
logging_steps=
|
89 |
-
report_to="tensorboard"
|
|
|
|
|
90 |
)
|
91 |
|
92 |
|
@@ -107,10 +130,10 @@ def compute_metrics(pred):
|
|
107 |
|
108 |
learning_rate = 5e-5
|
109 |
weight_decay = 0.01
|
110 |
-
step_size =
|
111 |
gamma = 0.1
|
112 |
optimizer = AdamW(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
|
113 |
-
scheduler = StepLR(optimizer, step_size=step_size, gamma=gamma)
|
114 |
|
115 |
|
116 |
trainer = Trainer(
|
@@ -119,8 +142,10 @@ trainer = Trainer(
|
|
119 |
train_dataset=train_dataset,
|
120 |
eval_dataset=test_dataset,
|
121 |
compute_metrics=compute_metrics,
|
122 |
-
optimizers=(optimizer,
|
|
|
123 |
)
|
124 |
|
125 |
|
126 |
-
trainer.train()
|
|
|
|
1 |
from datasets import Dataset, DatasetDict, load_dataset, load_from_disk
|
2 |
+
from transformers import ViTForImageClassification, ViTImageProcessor, Trainer, TrainingArguments, ViTConfig
|
3 |
from PIL import Image
|
4 |
from torch.optim import AdamW
|
5 |
from torch.optim.lr_scheduler import StepLR
|
|
|
9 |
import os
|
10 |
|
11 |
|
12 |
+
MODEL_NAME = "google/vit-base-patch16-224"
|
13 |
SIZE = "base"
|
14 |
PATCH = 16
|
15 |
+
IMAGE_SIZE = 224
|
16 |
+
BATCH_SIZE = 128
|
17 |
OPTIMIZER = "AdamW"
|
18 |
SCHEDULER = "StepLR"
|
19 |
+
NAME = 'no-pretrain'
|
20 |
|
21 |
+
IMAGE_PATH = '/root/autodl-tmp/ISIC-2019'
|
22 |
+
TRAIN_CSV_PATH = '/root/autodl-tmp/ISIC-2019/train_labels.csv'
|
23 |
+
TEST_CSV_PATH = '/root/autodl-tmp/ISIC-2019/test_labels.csv'
|
24 |
|
25 |
+
checkpoint_dir = "/root/autodl-tmp/ISIC-2019/logs/vit-base-patch16-224-bs128-AdamW-StepLR-lables-9"
|
26 |
+
|
27 |
+
if os.path.isdir(checkpoint_dir) and any(os.scandir(checkpoint_dir)):
|
28 |
+
checkpoint = max([os.path.join(checkpoint_dir, d) for d in os.listdir(checkpoint_dir) if "checkpoint" in d], key=os.path.getctime)
|
29 |
+
else:
|
30 |
+
checkpoint = None
|
31 |
+
|
32 |
+
checkpoint = "/root/autodl-tmp/ISIC-2019/logs/vit-base-patch16-224-bs128-AdamW-StepLR-lables-9-train-linear/checkpoint-895"
|
33 |
+
checkpoint = None
|
34 |
+
print(f"从检查点 {checkpoint} 恢复训练")
|
35 |
+
|
36 |
+
processed_dataset_path = f"/root/autodl-tmp/ISIC-2019/dataset-{IMAGE_SIZE}"
|
37 |
+
# processed_dataset_path = f"/root/autodl-tmp/dataset-{IMAGE_SIZE}"
|
38 |
|
39 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
40 |
+
# device = torch.device("mps")
|
41 |
|
42 |
processor = ViTImageProcessor.from_pretrained(MODEL_NAME)
|
43 |
|
|
|
60 |
return example
|
61 |
|
62 |
if os.path.exists(processed_dataset_path):
|
63 |
+
dataset = load_from_disk(processed_dataset_path,keep_in_memory=True)
|
64 |
print("LOADED")
|
65 |
else:
|
66 |
+
train_dataset = load_dataset('csv', data_files=TRAIN_CSV_PATH, keep_in_memory=True)["train"]
|
67 |
+
test_dataset = load_dataset('csv', data_files=TEST_CSV_PATH, keep_in_memory=True)["train"]
|
68 |
|
69 |
train_dataset = train_dataset.map(preprocess_image_train, batched=False, num_proc=2)
|
70 |
test_dataset = test_dataset.map(preprocess_image_test, batched=False, num_proc=2)
|
|
|
81 |
|
82 |
num_labels = 9
|
83 |
|
84 |
+
# model = ViTForImageClassification.from_pretrained(
|
85 |
+
# MODEL_NAME,
|
86 |
+
# num_labels=num_labels,
|
87 |
+
# problem_type="multi_label_classification",
|
88 |
+
# ignore_mismatched_sizes=True
|
89 |
+
# ).to(device)
|
90 |
+
|
91 |
+
# for param in model.parameters():
|
92 |
+
# param.requires_grad = False
|
93 |
+
|
94 |
+
# for param in model.classifier.parameters():
|
95 |
+
# param.requires_grad = True
|
96 |
|
97 |
+
config = ViTConfig(image_size=IMAGE_SIZE, num_labels=num_labels, problem_type="multi_label_classification", patch_size = PATCH)
|
98 |
+
model = ViTForImageClassification(config)
|
99 |
|
100 |
training_args = TrainingArguments(
|
101 |
+
output_dir=f"/root/autodl-tmp/ISIC-2019/logs/vit-{SIZE}-patch{PATCH}-{IMAGE_SIZE}-bs{BATCH_SIZE}-{OPTIMIZER}-{SCHEDULER}-lables-{num_labels}-{NAME}",
|
102 |
evaluation_strategy="epoch",
|
103 |
learning_rate=5e-5,
|
104 |
per_device_train_batch_size=BATCH_SIZE,
|
105 |
per_device_eval_batch_size=BATCH_SIZE,
|
106 |
+
num_train_epochs=20,
|
107 |
save_strategy="epoch",
|
108 |
+
logging_dir=f"/root/autodl-tmp/ISIC-2019/logs/vit-{SIZE}-patch{PATCH}-{IMAGE_SIZE}-bs{BATCH_SIZE}-{OPTIMIZER}-{SCHEDULER}-lables-{num_labels}-{NAME}/logs",
|
109 |
+
logging_steps=10,
|
110 |
+
report_to="tensorboard",
|
111 |
+
fp16=True,
|
112 |
+
dataloader_num_workers = 8
|
113 |
)
|
114 |
|
115 |
|
|
|
130 |
|
131 |
learning_rate = 5e-5
|
132 |
weight_decay = 0.01
|
133 |
+
step_size = 1000
|
134 |
gamma = 0.1
|
135 |
optimizer = AdamW(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
|
136 |
+
# scheduler = StepLR(optimizer, step_size=step_size, gamma=gamma)
|
137 |
|
138 |
|
139 |
trainer = Trainer(
|
|
|
142 |
train_dataset=train_dataset,
|
143 |
eval_dataset=test_dataset,
|
144 |
compute_metrics=compute_metrics,
|
145 |
+
optimizers=(optimizer,None)
|
146 |
+
# optimizers=(optimizer, scheduler)
|
147 |
)
|
148 |
|
149 |
|
150 |
+
# trainer.train()
|
151 |
+
trainer.train(resume_from_checkpoint=checkpoint)
|