Spaces:
Sleeping
Sleeping
muneebable
commited on
resize images
Browse files
app.py
CHANGED
@@ -145,6 +145,15 @@ def style_transfer(content_image, style_image, alpha, beta, conv1_1, conv2_1, co
|
|
145 |
|
146 |
return im_convert(target)
|
147 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
# Example images
|
149 |
examples = [
|
150 |
["https://huggingface.co/spaces/muneebable/vgg-style-transfer/resolve/main/assets/content_1.jpg",
|
@@ -163,13 +172,16 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
163 |
vgg.to(device)
|
164 |
|
165 |
|
|
|
|
|
|
|
166 |
# Gradio interface
|
167 |
with gr.Blocks() as demo:
|
168 |
gr.Markdown("# Neural Style Transfer")
|
169 |
with gr.Row():
|
170 |
with gr.Column():
|
171 |
-
content_input = gr.Image(label="Content Image")
|
172 |
-
style_input = gr.Image(label="Style Image")
|
173 |
with gr.Column():
|
174 |
output_image = gr.Image(label="Output Image")
|
175 |
|
@@ -206,7 +218,7 @@ with gr.Blocks() as demo:
|
|
206 |
)
|
207 |
|
208 |
gr.Examples(
|
209 |
-
|
210 |
inputs=[content_input, style_input]
|
211 |
)
|
212 |
|
|
|
145 |
|
146 |
return im_convert(target)
|
147 |
|
148 |
+
|
149 |
+
# Function to resize image while maintaining aspect ratio
|
150 |
+
def resize_image(image_path, max_size=400):
|
151 |
+
img = Image.open(image_path).convert('RGB')
|
152 |
+
ratio = max_size / max(img.size)
|
153 |
+
new_size = tuple([int(x*ratio) for x in img.size])
|
154 |
+
img = img.resize(new_size, Image.ANTIALIAS)
|
155 |
+
return np.array(img)
|
156 |
+
|
157 |
# Example images
|
158 |
examples = [
|
159 |
["https://huggingface.co/spaces/muneebable/vgg-style-transfer/resolve/main/assets/content_1.jpg",
|
|
|
172 |
vgg.to(device)
|
173 |
|
174 |
|
175 |
+
# Resize example images
|
176 |
+
resized_examples = [[resize_image(content), resize_image(style)] for content, style in examples]
|
177 |
+
|
178 |
# Gradio interface
|
179 |
with gr.Blocks() as demo:
|
180 |
gr.Markdown("# Neural Style Transfer")
|
181 |
with gr.Row():
|
182 |
with gr.Column():
|
183 |
+
content_input = gr.Image(label="Content Image", type="numpy", image_mode="RGB", shape=(400, 400))
|
184 |
+
style_input = gr.Image(label="Style Image", type="numpy", image_mode="RGB", shape=(400, 400))
|
185 |
with gr.Column():
|
186 |
output_image = gr.Image(label="Output Image")
|
187 |
|
|
|
218 |
)
|
219 |
|
220 |
gr.Examples(
|
221 |
+
resized_examples,
|
222 |
inputs=[content_input, style_input]
|
223 |
)
|
224 |
|