hpc-yekin
commited on
Commit
·
c80b748
1
Parent(s):
8a6f0b6
bug fix
Browse files
app.py
CHANGED
@@ -32,23 +32,15 @@ clipaway = CLIPAway(
|
|
32 |
)
|
33 |
|
34 |
def dilate_mask(mask, kernel_size=5, iterations=5):
|
35 |
-
mask = mask.convert("L")
|
36 |
kernel = np.ones((kernel_size, kernel_size), np.uint8)
|
37 |
mask = cv2.dilate(np.array(mask), kernel, iterations=iterations)
|
38 |
return Image.fromarray(mask)
|
39 |
|
40 |
-
def combine_masks(uploaded_mask, sketched_mask):
|
41 |
-
if uploaded_mask is not None:
|
42 |
-
return uploaded_mask
|
43 |
-
elif sketched_mask is not None:
|
44 |
-
return sketched_mask
|
45 |
-
else:
|
46 |
-
raise ValueError("Please provide a mask")
|
47 |
-
|
48 |
@spaces.GPU
|
49 |
def remove_obj(image, uploaded_mask, seed):
|
50 |
-
image_pil
|
51 |
-
mask = dilate_mask(
|
52 |
seed = int(seed)
|
53 |
latents = torch.randn((1, 4, 64, 64), generator=torch.Generator().manual_seed(seed)).to("cuda")
|
54 |
final_image = clipaway.generate(
|
@@ -62,22 +54,21 @@ examples = [
|
|
62 |
["gradio_examples/images/1.jpg", "gradio_examples/masks/1.png", 42],
|
63 |
["gradio_examples/images/2.jpg", "gradio_examples/masks/2.png", 42],
|
64 |
["gradio_examples/images/3.jpg", "gradio_examples/masks/3.png", 464],
|
65 |
-
["gradio_examples/images/4.jpg", "gradio_examples/masks/4.png", 2024],
|
66 |
]
|
67 |
|
68 |
-
with gr.Blocks() as demo:
|
69 |
gr.Markdown("<h1 style='text-align:center'>CLIPAway: Harmonizing Focused Embeddings for Removing Objects via Diffusion Models</h1>")
|
70 |
gr.Markdown("""
|
71 |
<div style='display:flex; justify-content:center; align-items:center;'>
|
72 |
-
<a href='https://arxiv.org/abs/2406.09368' style="margin-right:10px;
|
73 |
-
<a href='https://yigitekin.github.io/CLIPAway/' style="margin:10px;
|
74 |
-
<a href='https://github.com/YigitEkin/CLIPAway' style="margin-left:10px;
|
75 |
</div>
|
76 |
""")
|
77 |
gr.Markdown("""
|
78 |
This application allows you to remove objects from images using the CLIPAway method with diffusion models.
|
79 |
To use this tool:
|
80 |
-
1. Upload an image.
|
81 |
2. Upload a pre-defined mask if you have one. (If you don't have a mask, and want to sketch one,
|
82 |
we have provided a gradio demo in our github repository. <br/> Unfortunately, we cannot provide it here due to the compatibility issues with zerogpu.)
|
83 |
3. Set the seed for reproducibility (default is 42).
|
|
|
32 |
)
|
33 |
|
34 |
def dilate_mask(mask, kernel_size=5, iterations=5):
|
35 |
+
mask = mask.convert("L").resize((512, 512), Image.NEAREST)
|
36 |
kernel = np.ones((kernel_size, kernel_size), np.uint8)
|
37 |
mask = cv2.dilate(np.array(mask), kernel, iterations=iterations)
|
38 |
return Image.fromarray(mask)
|
39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
@spaces.GPU
|
41 |
def remove_obj(image, uploaded_mask, seed):
|
42 |
+
image_pil = image["image"].resize((512, 512), Image.ANTIALIAS)
|
43 |
+
mask = dilate_mask(uploaded_mask)
|
44 |
seed = int(seed)
|
45 |
latents = torch.randn((1, 4, 64, 64), generator=torch.Generator().manual_seed(seed)).to("cuda")
|
46 |
final_image = clipaway.generate(
|
|
|
54 |
["gradio_examples/images/1.jpg", "gradio_examples/masks/1.png", 42],
|
55 |
["gradio_examples/images/2.jpg", "gradio_examples/masks/2.png", 42],
|
56 |
["gradio_examples/images/3.jpg", "gradio_examples/masks/3.png", 464],
|
|
|
57 |
]
|
58 |
|
59 |
+
with gr.Blocks(theme="gradio/monochrome") as demo:
|
60 |
gr.Markdown("<h1 style='text-align:center'>CLIPAway: Harmonizing Focused Embeddings for Removing Objects via Diffusion Models</h1>")
|
61 |
gr.Markdown("""
|
62 |
<div style='display:flex; justify-content:center; align-items:center;'>
|
63 |
+
<a href='https://arxiv.org/abs/2406.09368' style="margin-right:10px;">Paper</a> |
|
64 |
+
<a href='https://yigitekin.github.io/CLIPAway/' style="margin:10px;">Project Website</a> |
|
65 |
+
<a href='https://github.com/YigitEkin/CLIPAway' style="margin-left:10px;">GitHub</a>
|
66 |
</div>
|
67 |
""")
|
68 |
gr.Markdown("""
|
69 |
This application allows you to remove objects from images using the CLIPAway method with diffusion models.
|
70 |
To use this tool:
|
71 |
+
1. Upload an image. (NOTE: We expect a 512x512 image, if you upload a different size, it will be resized to 512x512 which can affect the results.)
|
72 |
2. Upload a pre-defined mask if you have one. (If you don't have a mask, and want to sketch one,
|
73 |
we have provided a gradio demo in our github repository. <br/> Unfortunately, we cannot provide it here due to the compatibility issues with zerogpu.)
|
74 |
3. Set the seed for reproducibility (default is 42).
|