nehulagrawal
commited on
Upload 4 files
Browse files- main.py +34 -0
- model.pth +3 -0
- output.png +0 -0
- watermark_remover.py +43 -0
main.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torchvision import transforms
|
3 |
+
from PIL import Image
|
4 |
+
from watermark_remover import WatermarkRemover
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
image_path = "path to your test image" # Replace with the path to your test image
|
8 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
9 |
+
|
10 |
+
# Load the trained model
|
11 |
+
model = WatermarkRemover().to(device)
|
12 |
+
model_path = "path to your model.pth" # Replace with the path to your saved model
|
13 |
+
model.load_state_dict(torch.load(model_path, map_location=device))
|
14 |
+
model.eval()
|
15 |
+
|
16 |
+
transform = transforms.Compose([transforms.Resize((256, 256)),
|
17 |
+
transforms.ToTensor(),])
|
18 |
+
watermarked_image = Image.open(image_path).convert("RGB")
|
19 |
+
original_size = watermarked_image.size
|
20 |
+
input_tensor = transform(watermarked_image).unsqueeze(0).to(device)
|
21 |
+
|
22 |
+
with torch.no_grad():
|
23 |
+
output_tensor = model(input_tensor)
|
24 |
+
|
25 |
+
predicted_image = output_tensor.squeeze(0).cpu().permute(1, 2, 0).clamp(0, 1).numpy()
|
26 |
+
predicted_pil = Image.fromarray((predicted_image * 255).astype(np.uint8))
|
27 |
+
predicted_pil = predicted_pil.resize(original_size, Image.Resampling.LANCZOS)
|
28 |
+
predicted_pil.save("predicted_image.jpg", quality=100)
|
29 |
+
|
30 |
+
|
31 |
+
|
32 |
+
|
33 |
+
|
34 |
+
|
model.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dfceb0199f80877802c6f0c5abf412f39ee82a634cbbcf072d05ce7c06e33d3b
|
3 |
+
size 125529866
|
output.png
ADDED
watermark_remover.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
|
4 |
+
class WatermarkRemover(nn.Module):
|
5 |
+
def __init__(self):
|
6 |
+
super(WatermarkRemover, self).__init__()
|
7 |
+
self.enc1 = self.conv_block(3, 64)
|
8 |
+
self.enc2 = self.conv_block(64, 128)
|
9 |
+
self.enc3 = self.conv_block(128, 256)
|
10 |
+
self.enc4 = self.conv_block(256, 512)
|
11 |
+
|
12 |
+
self.bottleneck = self.conv_block(512, 1024)
|
13 |
+
|
14 |
+
self.dec4 = self.conv_block(1024 + 512, 512)
|
15 |
+
self.dec3 = self.conv_block(512 + 256, 256)
|
16 |
+
self.dec2 = self.conv_block(256 + 128, 128)
|
17 |
+
self.dec1 = self.conv_block(128 + 64, 64)
|
18 |
+
|
19 |
+
self.final_layer = nn.Conv2d(64, 3, kernel_size=1)
|
20 |
+
|
21 |
+
def conv_block(self, in_channels, out_channels):
|
22 |
+
return nn.Sequential(
|
23 |
+
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
|
24 |
+
nn.ReLU(inplace=True),
|
25 |
+
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
|
26 |
+
nn.ReLU(inplace=True),
|
27 |
+
)
|
28 |
+
|
29 |
+
def forward(self, x):
|
30 |
+
e1 = self.enc1(x)
|
31 |
+
e2 = self.enc2(nn.MaxPool2d(2)(e1))
|
32 |
+
e3 = self.enc3(nn.MaxPool2d(2)(e2))
|
33 |
+
e4 = self.enc4(nn.MaxPool2d(2)(e3))
|
34 |
+
|
35 |
+
b = self.bottleneck(nn.MaxPool2d(2)(e4))
|
36 |
+
|
37 |
+
d4 = self.dec4(torch.cat((nn.Upsample(scale_factor=2)(b), e4), dim=1))
|
38 |
+
d3 = self.dec3(torch.cat((nn.Upsample(scale_factor=2)(d4), e3), dim=1))
|
39 |
+
d2 = self.dec2(torch.cat((nn.Upsample(scale_factor=2)(d3), e2), dim=1))
|
40 |
+
d1 = self.dec1(torch.cat((nn.Upsample(scale_factor=2)(d2), e1), dim=1))
|
41 |
+
|
42 |
+
return self.final_layer(d1)
|
43 |
+
|