Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -51,6 +51,23 @@ def nms(x, t, s):
|
|
51 |
z[y > t] = 255
|
52 |
return z
|
53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
DESCRIPTION = '''# Scribble SDXL ๐๏ธ๐
|
56 |
sketch to image with SDXL, using [@xinsir](https://huggingface.co/xinsir) [scribble sdxl controlnet](https://huggingface.co/xinsir/controlnet-scribble-sdxl-1.0)
|
@@ -131,7 +148,10 @@ controlnet = ControlNetModel.from_pretrained(
|
|
131 |
"xinsir/controlnet-scribble-sdxl-1.0",
|
132 |
torch_dtype=torch.float16
|
133 |
)
|
134 |
-
|
|
|
|
|
|
|
135 |
# when test with other base model, you need to change the vae also.
|
136 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
137 |
|
@@ -144,6 +164,16 @@ pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
|
|
144 |
)
|
145 |
pipe.to(device)
|
146 |
# Load model.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
147 |
|
148 |
MAX_SEED = np.iinfo(np.int32).max
|
149 |
processor = HEDdetector.from_pretrained('lllyasviel/Annotators')
|
@@ -180,6 +210,7 @@ def run(
|
|
180 |
controlnet_conditioning_scale: float = 1.0,
|
181 |
seed: int = 0,
|
182 |
use_hed: bool = False,
|
|
|
183 |
progress=gr.Progress(track_tqdm=True),
|
184 |
) -> PIL.Image.Image:
|
185 |
width, height = image['composite'].size
|
@@ -187,7 +218,13 @@ def run(
|
|
187 |
new_width, new_height = int(width * ratio), int(height * ratio)
|
188 |
image = image['composite'].resize((new_width, new_height))
|
189 |
|
190 |
-
if
|
|
|
|
|
|
|
|
|
|
|
|
|
191 |
controlnet_img = image
|
192 |
else:
|
193 |
controlnet_img = processor(image, scribble=False)
|
@@ -206,7 +243,8 @@ def run(
|
|
206 |
prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
|
207 |
|
208 |
generator = torch.Generator(device=device).manual_seed(seed)
|
209 |
-
|
|
|
210 |
prompt=prompt,
|
211 |
negative_prompt=negative_prompt,
|
212 |
image=image,
|
@@ -217,6 +255,17 @@ def run(
|
|
217 |
width=new_width,
|
218 |
height=new_height,
|
219 |
).images[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
220 |
|
221 |
return (controlnet_img, out)
|
222 |
|
@@ -236,6 +285,7 @@ with gr.Blocks(css="style.css", js=js_func) as demo:
|
|
236 |
prompt = gr.Textbox(label="Prompt")
|
237 |
style = gr.Dropdown(label="Style", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
|
238 |
use_hed = gr.Checkbox(label="use HED detector", value=False, info="check this box if you upload an image and want to turn it to a sketch")
|
|
|
239 |
run_button = gr.Button("Run")
|
240 |
with gr.Accordion("Advanced options", open=False):
|
241 |
negative_prompt = gr.Textbox(
|
@@ -287,6 +337,7 @@ with gr.Blocks(css="style.css", js=js_func) as demo:
|
|
287 |
controlnet_conditioning_scale,
|
288 |
seed,
|
289 |
use_hed,
|
|
|
290 |
]
|
291 |
outputs = [image_slider]
|
292 |
run_button.click(
|
|
|
51 |
z[y > t] = 255
|
52 |
return z
|
53 |
|
54 |
+
def HWC3(x):
|
55 |
+
assert x.dtype == np.uint8
|
56 |
+
if x.ndim == 2:
|
57 |
+
x = x[:, :, None]
|
58 |
+
assert x.ndim == 3
|
59 |
+
H, W, C = x.shape
|
60 |
+
assert C == 1 or C == 3 or C == 4
|
61 |
+
if C == 3:
|
62 |
+
return x
|
63 |
+
if C == 1:
|
64 |
+
return np.concatenate([x, x, x], axis=2)
|
65 |
+
if C == 4:
|
66 |
+
color = x[:, :, 0:3].astype(np.float32)
|
67 |
+
alpha = x[:, :, 3:4].astype(np.float32) / 255.0
|
68 |
+
y = color * alpha + 255.0 * (1.0 - alpha)
|
69 |
+
y = y.clip(0, 255).astype(np.uint8)
|
70 |
+
return y
|
71 |
|
72 |
DESCRIPTION = '''# Scribble SDXL ๐๏ธ๐
|
73 |
sketch to image with SDXL, using [@xinsir](https://huggingface.co/xinsir) [scribble sdxl controlnet](https://huggingface.co/xinsir/controlnet-scribble-sdxl-1.0)
|
|
|
148 |
"xinsir/controlnet-scribble-sdxl-1.0",
|
149 |
torch_dtype=torch.float16
|
150 |
)
|
151 |
+
controlnet_canny = ControlNetModel.from_pretrained(
|
152 |
+
"xinsir/controlnet-canny-sdxl-1.0",
|
153 |
+
torch_dtype=torch.float16
|
154 |
+
)
|
155 |
# when test with other base model, you need to change the vae also.
|
156 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
157 |
|
|
|
164 |
)
|
165 |
pipe.to(device)
|
166 |
# Load model.
|
167 |
+
pipe_canny = StableDiffusionXLControlNetPipeline.from_pretrained(
|
168 |
+
"stabilityai/stable-diffusion-xl-base-1.0",
|
169 |
+
controlnet=controlnet_canny,
|
170 |
+
vae=vae,
|
171 |
+
safety_checker=None,
|
172 |
+
torch_dtype=torch.float16,
|
173 |
+
scheduler=eulera_scheduler,
|
174 |
+
)
|
175 |
+
|
176 |
+
pipe_canny.to(device)
|
177 |
|
178 |
MAX_SEED = np.iinfo(np.int32).max
|
179 |
processor = HEDdetector.from_pretrained('lllyasviel/Annotators')
|
|
|
210 |
controlnet_conditioning_scale: float = 1.0,
|
211 |
seed: int = 0,
|
212 |
use_hed: bool = False,
|
213 |
+
use_canny: bool = False,
|
214 |
progress=gr.Progress(track_tqdm=True),
|
215 |
) -> PIL.Image.Image:
|
216 |
width, height = image['composite'].size
|
|
|
218 |
new_width, new_height = int(width * ratio), int(height * ratio)
|
219 |
image = image['composite'].resize((new_width, new_height))
|
220 |
|
221 |
+
if use_canny:
|
222 |
+
controlnet_img = np.array(image)
|
223 |
+
controlnet_img = cv2.Canny(controlnet_img, 100, 200)
|
224 |
+
controlnet_img = HWC3(controlnet_img)
|
225 |
+
image = Image.fromarray(controlnet_img)
|
226 |
+
|
227 |
+
elif not use_hed:
|
228 |
controlnet_img = image
|
229 |
else:
|
230 |
controlnet_img = processor(image, scribble=False)
|
|
|
243 |
prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
|
244 |
|
245 |
generator = torch.Generator(device=device).manual_seed(seed)
|
246 |
+
if use_canny:
|
247 |
+
out = pipe_canny(
|
248 |
prompt=prompt,
|
249 |
negative_prompt=negative_prompt,
|
250 |
image=image,
|
|
|
255 |
width=new_width,
|
256 |
height=new_height,
|
257 |
).images[0]
|
258 |
+
else:
|
259 |
+
out = pipe(
|
260 |
+
prompt=prompt,
|
261 |
+
negative_prompt=negative_prompt,
|
262 |
+
image=image,
|
263 |
+
num_inference_steps=num_steps,
|
264 |
+
generator=generator,
|
265 |
+
controlnet_conditioning_scale=controlnet_conditioning_scale,
|
266 |
+
guidance_scale=guidance_scale,
|
267 |
+
width=new_width,
|
268 |
+
height=new_height,).images[0]
|
269 |
|
270 |
return (controlnet_img, out)
|
271 |
|
|
|
285 |
prompt = gr.Textbox(label="Prompt")
|
286 |
style = gr.Dropdown(label="Style", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
|
287 |
use_hed = gr.Checkbox(label="use HED detector", value=False, info="check this box if you upload an image and want to turn it to a sketch")
|
288 |
+
use_canny = gr.Checkbox(label="use Canny", value=False, info="check this to use ControlNet canny instead of scribble")
|
289 |
run_button = gr.Button("Run")
|
290 |
with gr.Accordion("Advanced options", open=False):
|
291 |
negative_prompt = gr.Textbox(
|
|
|
337 |
controlnet_conditioning_scale,
|
338 |
seed,
|
339 |
use_hed,
|
340 |
+
use_canny
|
341 |
]
|
342 |
outputs = [image_slider]
|
343 |
run_button.click(
|