youssefKadaouiAbbassi commited on
Commit
e34a11d
·
verified ·
1 Parent(s): 8ada244

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -548
app.py CHANGED
@@ -21,124 +21,72 @@ from insightface.app import FaceAnalysis
21
  from style_template import styles
22
  from pipeline_stable_diffusion_xl_instantid_full import StableDiffusionXLInstantIDPipeline, draw_kps
23
 
24
- # from controlnet_aux import OpenposeDetector
25
-
26
- import gradio as gr
27
-
28
  from depth_anything.dpt import DepthAnything
29
  from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet
30
 
31
  import torch.nn.functional as F
32
  from torchvision.transforms import Compose
33
 
34
- # global variable
35
  MAX_SEED = np.iinfo(np.int32).max
36
  device = "cuda" if torch.cuda.is_available() else "cpu"
37
- dtype = torch.float16 if str(device).__contains__("cuda") else torch.float32
38
  STYLE_NAMES = list(styles.keys())
39
  DEFAULT_STYLE_NAME = "Spring Festival"
40
- enable_lcm_arg = False
41
-
42
- # download checkpoints
43
- from huggingface_hub import hf_hub_download
44
 
 
45
  hf_hub_download(repo_id="InstantX/InstantID", filename="ControlNetModel/config.json", local_dir="./checkpoints")
46
- hf_hub_download(
47
- repo_id="InstantX/InstantID",
48
- filename="ControlNetModel/diffusion_pytorch_model.safetensors",
49
- local_dir="./checkpoints",
50
- )
51
  hf_hub_download(repo_id="InstantX/InstantID", filename="ip-adapter.bin", local_dir="./checkpoints")
52
 
53
  # Load face encoder
54
- app = FaceAnalysis(
55
- name="antelopev2",
56
- root="./",
57
- providers=["CPUExecutionProvider"],
58
- )
59
  app.prepare(ctx_id=0, det_size=(640, 640))
60
 
61
- # openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
62
-
63
  depth_anything = DepthAnything.from_pretrained('LiheYoung/depth_anything_vitl14').to(device).eval()
64
-
65
  transform = Compose([
66
- Resize(
67
- width=518,
68
- height=518,
69
- resize_target=False,
70
- keep_aspect_ratio=True,
71
- ensure_multiple_of=14,
72
- resize_method='lower_bound',
73
- image_interpolation_method=cv2.INTER_CUBIC,
74
- ),
75
  NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
76
  PrepareForNet(),
77
  ])
78
 
79
- # Path to InstantID models
80
- face_adapter = f"./checkpoints/ip-adapter.bin"
81
- controlnet_path = f"./checkpoints/ControlNetModel"
82
-
83
- # Load pipeline face ControlNetModel
84
- controlnet_identitynet = ControlNetModel.from_pretrained(
85
- controlnet_path, torch_dtype=dtype
86
- )
87
-
88
- # controlnet-pose/canny/depth
89
- # controlnet_pose_model = "thibaud/controlnet-openpose-sdxl-1.0"
90
- controlnet_canny_model = "diffusers/controlnet-canny-sdxl-1.0"
91
- controlnet_depth_model = "diffusers/controlnet-depth-sdxl-1.0-small"
92
-
93
- # controlnet_pose = ControlNetModel.from_pretrained(
94
- # controlnet_pose_model, torch_dtype=dtype
95
- # ).to(device)
96
- controlnet_canny = ControlNetModel.from_pretrained(
97
- controlnet_canny_model, torch_dtype=dtype
98
- ).to(device)
99
- controlnet_depth = ControlNetModel.from_pretrained(
100
- controlnet_depth_model, torch_dtype=dtype
101
- ).to(device)
102
-
103
  def get_depth_map(image):
104
-
105
  image = np.array(image) / 255.0
106
-
107
  h, w = image.shape[:2]
108
-
109
  image = transform({'image': image})['image']
110
- image = torch.from_numpy(image).unsqueeze(0).to("cuda")
111
-
112
  with torch.no_grad():
113
  depth = depth_anything(image)
114
-
115
  depth = F.interpolate(depth[None], (h, w), mode='bilinear', align_corners=False)[0, 0]
116
  depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
117
-
118
- depth = depth.cpu().numpy().astype(np.uint8)
119
-
120
- depth_image = Image.fromarray(depth)
121
-
122
- return depth_image
123
 
124
  def get_canny_image(image, t1=100, t2=200):
125
  image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
126
  edges = cv2.Canny(image, t1, t2)
127
  return Image.fromarray(edges, "L")
128
 
 
 
 
 
 
 
 
 
 
129
  controlnet_map = {
130
- #"pose": controlnet_pose,
131
  "canny": controlnet_canny,
132
  "depth": controlnet_depth,
133
  }
134
  controlnet_map_fn = {
135
- #"pose": openpose,
136
  "canny": get_canny_image,
137
  "depth": get_depth_map,
138
  }
139
 
 
140
  pretrained_model_name_or_path = "wangqixun/YamerMIX_v8"
141
-
142
  pipe = StableDiffusionXLInstantIDPipeline.from_pretrained(
143
  pretrained_model_name_or_path,
144
  controlnet=[controlnet_identitynet],
@@ -147,505 +95,54 @@ pipe = StableDiffusionXLInstantIDPipeline.from_pretrained(
147
  feature_extractor=None,
148
  ).to(device)
149
 
150
- pipe.scheduler = diffusers.EulerDiscreteScheduler.from_config(
151
- pipe.scheduler.config
152
- )
153
-
154
- # load and disable LCM
155
  pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl")
156
  pipe.disable_lora()
157
-
158
  pipe.cuda()
159
- pipe.load_ip_adapter_instantid(face_adapter)
160
- pipe.image_proj_model.to("cuda")
161
- pipe.unet.to("cuda")
162
-
163
- def toggle_lcm_ui(value):
164
- if value:
165
- return (
166
- gr.update(minimum=0, maximum=100, step=1, value=5),
167
- gr.update(minimum=0.1, maximum=20.0, step=0.1, value=1.5),
168
- )
169
- else:
170
- return (
171
- gr.update(minimum=5, maximum=100, step=1, value=30),
172
- gr.update(minimum=0.1, maximum=20.0, step=0.1, value=5),
173
- )
174
-
175
- def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
176
- if randomize_seed:
177
- seed = random.randint(0, MAX_SEED)
178
- return seed
179
-
180
- def remove_tips():
181
- return gr.update(visible=False)
182
-
183
- def get_example():
184
- case = [
185
- [
186
- "./examples/yann-lecun_resize.jpg",
187
- None,
188
- "a man",
189
- "Spring Festival",
190
- "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
191
- ],
192
- [
193
- "./examples/musk_resize.jpeg",
194
- "./examples/poses/pose2.jpg",
195
- "a man flying in the sky in Mars",
196
- "Mars",
197
- "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
198
- ],
199
- [
200
- "./examples/sam_resize.png",
201
- "./examples/poses/pose4.jpg",
202
- "a man doing a silly pose wearing a suite",
203
- "Jungle",
204
- "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, gree",
205
- ],
206
- [
207
- "./examples/schmidhuber_resize.png",
208
- "./examples/poses/pose3.jpg",
209
- "a man sit on a chair",
210
- "Neon",
211
- "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
212
- ],
213
- [
214
- "./examples/kaifu_resize.png",
215
- "./examples/poses/pose.jpg",
216
- "a man",
217
- "Vibrant Color",
218
- "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
219
- ],
220
- ]
221
- return case
222
-
223
- def run_for_examples(face_file, pose_file, prompt, style, negative_prompt):
224
- return generate_image(
225
- face_file,
226
- pose_file,
227
- prompt,
228
- negative_prompt,
229
- style,
230
- 20, # num_steps
231
- 0.8, # identitynet_strength_ratio
232
- 0.8, # adapter_strength_ratio
233
- #0.4, # pose_strength
234
- 0.3, # canny_strength
235
- 0.5, # depth_strength
236
- ["depth", "canny"], # controlnet_selection
237
- 5.0, # guidance_scale
238
- 42, # seed
239
- "EulerDiscreteScheduler", # scheduler
240
- False, # enable_LCM
241
- True, # enable_Face_Region
242
- )
243
-
244
- def convert_from_cv2_to_image(img: np.ndarray) -> Image:
245
- return Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
246
-
247
- def convert_from_image_to_cv2(img: Image) -> np.ndarray:
248
- return cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
249
-
250
- def resize_img(
251
- input_image,
252
- max_side=1280,
253
- min_side=1024,
254
- size=None,
255
- pad_to_max_side=False,
256
- mode=PIL.Image.BILINEAR,
257
- base_pixel_number=64,
258
- ):
259
- w, h = input_image.size
260
- if size is not None:
261
- w_resize_new, h_resize_new = size
262
- else:
263
- ratio = min_side / min(h, w)
264
- w, h = round(ratio * w), round(ratio * h)
265
- ratio = max_side / max(h, w)
266
- input_image = input_image.resize([round(ratio * w), round(ratio * h)], mode)
267
- w_resize_new = (round(ratio * w) // base_pixel_number) * base_pixel_number
268
- h_resize_new = (round(ratio * h) // base_pixel_number) * base_pixel_number
269
- input_image = input_image.resize([w_resize_new, h_resize_new], mode)
270
-
271
- if pad_to_max_side:
272
- res = np.ones([max_side, max_side, 3], dtype=np.uint8) * 255
273
- offset_x = (max_side - w_resize_new) // 2
274
- offset_y = (max_side - h_resize_new) // 2
275
- res[
276
- offset_y : offset_y + h_resize_new, offset_x : offset_x + w_resize_new
277
- ] = np.array(input_image)
278
- input_image = Image.fromarray(res)
279
- return input_image
280
-
281
- def apply_style(
282
- style_name: str, positive: str, negative: str = ""
283
- ) -> Tuple[str, str]:
284
- p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
285
- return p.replace("{prompt}", positive), n + " " + negative
286
 
287
  @spaces.GPU
288
  def generate_image(
289
- face_image_path,
290
- pose_image_path,
291
- prompt,
292
- negative_prompt,
293
- style_name,
294
- num_steps,
295
- identitynet_strength_ratio,
296
- adapter_strength_ratio,
297
- #pose_strength,
298
- canny_strength,
299
- depth_strength,
300
- controlnet_selection,
301
- guidance_scale,
302
- seed,
303
- scheduler,
304
- enable_LCM,
305
- enhance_face_region,
306
- progress=gr.Progress(track_tqdm=True),
307
  ):
308
-
309
  if enable_LCM:
310
  pipe.scheduler = diffusers.LCMScheduler.from_config(pipe.scheduler.config)
311
  pipe.enable_lora()
312
  else:
313
  pipe.disable_lora()
314
  scheduler_class_name = scheduler.split("-")[0]
315
-
316
- add_kwargs = {}
317
- if len(scheduler.split("-")) > 1:
318
- add_kwargs["use_karras_sigmas"] = True
319
- if len(scheduler.split("-")) > 2:
320
- add_kwargs["algorithm_type"] = "sde-dpmsolver++"
321
  scheduler = getattr(diffusers, scheduler_class_name)
322
- pipe.scheduler = scheduler.from_config(pipe.scheduler.config, **add_kwargs)
323
-
324
- if face_image_path is None:
325
- raise gr.Error(
326
- f"Cannot find any input face image! Please upload the face image"
327
- )
328
-
329
- if prompt is None:
330
- prompt = "a person"
331
-
332
- # apply the style template
333
- prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
334
 
 
335
  face_image = load_image(face_image_path)
336
  face_image = resize_img(face_image, max_side=1024)
337
- face_image_cv2 = convert_from_image_to_cv2(face_image)
338
- height, width, _ = face_image_cv2.shape
339
-
340
- # Extract face features
341
  face_info = app.get(face_image_cv2)
342
 
343
- if len(face_info) == 0:
344
- raise gr.Error(
345
- f"Unable to detect a face in the image. Please upload a different photo with a clear face."
346
- )
347
-
348
- face_info = sorted(
349
- face_info,
350
- key=lambda x: (x["bbox"][2] - x["bbox"][0]) * x["bbox"][3] - x["bbox"][1],
351
- )[
352
- -1
353
- ] # only use the maximum face
354
- face_emb = face_info["embedding"]
355
- face_kps = draw_kps(convert_from_cv2_to_image(face_image_cv2), face_info["kps"])
356
- img_controlnet = face_image
357
- if pose_image_path is not None:
358
- pose_image = load_image(pose_image_path)
359
- pose_image = resize_img(pose_image, max_side=1024)
360
- img_controlnet = pose_image
361
- pose_image_cv2 = convert_from_image_to_cv2(pose_image)
362
 
363
- face_info = app.get(pose_image_cv2)
 
 
 
364
 
365
- if len(face_info) == 0:
366
- raise gr.Error(
367
- f"Cannot find any face in the reference image! Please upload another person image"
368
- )
 
 
369
 
370
- face_info = face_info[-1]
371
- face_kps = draw_kps(pose_image, face_info["kps"])
372
-
373
- width, height = face_kps.size
374
-
375
- if enhance_face_region:
376
- control_mask = np.zeros([height, width, 3])
377
- x1, y1, x2, y2 = face_info["bbox"]
378
- x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
379
- control_mask[y1:y2, x1:x2] = 255
380
- control_mask = Image.fromarray(control_mask.astype(np.uint8))
381
- else:
382
- control_mask = None
383
-
384
- if len(controlnet_selection) > 0:
385
- controlnet_scales = {
386
- #"pose": pose_strength,
387
- "canny": canny_strength,
388
- "depth": depth_strength,
389
- }
390
- pipe.controlnet = MultiControlNetModel(
391
- [controlnet_identitynet]
392
- + [controlnet_map[s] for s in controlnet_selection]
393
- )
394
- control_scales = [float(identitynet_strength_ratio)] + [
395
- controlnet_scales[s] for s in controlnet_selection
396
- ]
397
- control_images = [face_kps] + [
398
- controlnet_map_fn[s](img_controlnet).resize((width, height))
399
- for s in controlnet_selection
400
- ]
401
- else:
402
- pipe.controlnet = controlnet_identitynet
403
- control_scales = float(identitynet_strength_ratio)
404
- control_images = face_kps
405
 
406
  generator = torch.Generator(device=device).manual_seed(seed)
407
-
408
- print("Start inference...")
409
- print(f"[Debug] Prompt: {prompt}, \n[Debug] Neg Prompt: {negative_prompt}")
410
-
411
- pipe.set_ip_adapter_scale(adapter_strength_ratio)
412
- images = pipe(
413
- prompt=prompt,
414
- negative_prompt=negative_prompt,
415
- image_embeds=face_emb,
416
- image=control_images,
417
- control_mask=control_mask,
418
- controlnet_conditioning_scale=control_scales,
419
- num_inference_steps=num_steps,
420
- guidance_scale=guidance_scale,
421
- height=height,
422
- width=width,
423
- generator=generator,
424
- ).images
425
-
426
- return images[0], gr.update(visible=True)
427
-
428
- # Description
429
- title = r"""
430
- <h1 align="center">InstantID: Zero-shot Identity-Preserving Generation in Seconds</h1>
431
- """
432
-
433
- description = r"""
434
- <b>Official 🤗 Gradio demo</b> for <a href='https://github.com/InstantID/InstantID' target='_blank'><b>InstantID: Zero-shot Identity-Preserving Generation in Seconds</b></a>.<br>
435
- We are organizing a Spring Festival event with HuggingFace from 2.7 to 2.25, and you can now generate pictures of Spring Festival costumes. Happy Dragon Year 🐲 ! Share the joy with your family.<br>
436
- How to use:<br>
437
- 1. Upload an image with a face. For images with multiple faces, we will only detect the largest face. Ensure the face is not too small and is clearly visible without significant obstructions or blurring.
438
- 2. (Optional) You can upload another image as a reference for the face pose. If you don't, we will use the first detected face image to extract facial landmarks. If you use a cropped face at step 1, it is recommended to upload it to define a new face pose.
439
- 3. (Optional) You can select multiple ControlNet models to control the generation process. The default is to use the IdentityNet only. The ControlNet models include pose skeleton, canny, and depth. You can adjust the strength of each ControlNet model to control the generation process.
440
- 4. Enter a text prompt, as done in normal text-to-image models.
441
- 5. Click the <b>Submit</b> button to begin customization.
442
- 6. Share your customized photo with your friends and enjoy! 😊"""
443
-
444
- article = r"""
445
- ---
446
- 📝 **Citation**
447
- <br>
448
- If our work is helpful for your research or applications, please cite us via:
449
- ```bibtex
450
- @article{wang2024instantid,
451
- title={InstantID: Zero-shot Identity-Preserving Generation in Seconds},
452
- author={Wang, Qixun and Bai, Xu and Wang, Haofan and Qin, Zekui and Chen, Anthony},
453
- journal={arXiv preprint arXiv:2401.07519},
454
- year={2024}
455
- }
456
- ```
457
- 📧 **Contact**
458
- <br>
459
- If you have any questions, please feel free to open an issue or directly reach us out at <b>[email protected]</b>.
460
- """
461
-
462
- tips = r"""
463
- ### Usage tips of InstantID
464
- 1. If you're not satisfied with the similarity, try increasing the weight of "IdentityNet Strength" and "Adapter Strength."
465
- 2. If you feel that the saturation is too high, first decrease the Adapter strength. If it remains too high, then decrease the IdentityNet strength.
466
- 3. If you find that text control is not as expected, decrease Adapter strength.
467
- 4. If you find that realistic style is not good enough, go for our Github repo and use a more realistic base model.
468
- """
469
-
470
- css = """
471
- .gradio-container {width: 85% !important}
472
- """
473
- with gr.Blocks(css=css) as demo:
474
- # description
475
- gr.Markdown(title)
476
- gr.Markdown(description)
477
-
478
- with gr.Row():
479
- with gr.Column():
480
- with gr.Row(equal_height=True):
481
- # upload face image
482
- face_file = gr.Image(
483
- label="Upload a photo of your face", type="filepath"
484
- )
485
- # optional: upload a reference pose image
486
- pose_file = gr.Image(
487
- label="Upload a reference pose image (Optional)",
488
- type="filepath",
489
- )
490
-
491
- # prompt
492
- prompt = gr.Textbox(
493
- label="Prompt",
494
- info="Give simple prompt is enough to achieve good face fidelity",
495
- placeholder="A photo of a person",
496
- value="",
497
- )
498
-
499
- submit = gr.Button("Submit", variant="primary")
500
- enable_LCM = gr.Checkbox(
501
- label="Enable Fast Inference with LCM", value=enable_lcm_arg,
502
- info="LCM speeds up the inference step, the trade-off is the quality of the generated image. It performs better with portrait face images rather than distant faces",
503
- )
504
- style = gr.Dropdown(
505
- label="Style template",
506
- choices=STYLE_NAMES,
507
- value=DEFAULT_STYLE_NAME,
508
- )
509
-
510
- # strength
511
- identitynet_strength_ratio = gr.Slider(
512
- label="IdentityNet strength (for fidelity)",
513
- minimum=0,
514
- maximum=1.5,
515
- step=0.05,
516
- value=0.80,
517
- )
518
- adapter_strength_ratio = gr.Slider(
519
- label="Image adapter strength (for detail)",
520
- minimum=0,
521
- maximum=1.5,
522
- step=0.05,
523
- value=0.80,
524
- )
525
- with gr.Accordion("Controlnet"):
526
- controlnet_selection = gr.CheckboxGroup(
527
- ["canny", "depth"], label="Controlnet", value=["depth"],
528
- info="Use pose for skeleton inference, canny for edge detection, and depth for depth map estimation. You can try all three to control the generation process"
529
- )
530
- # pose_strength = gr.Slider(
531
- # label="Pose strength",
532
- # minimum=0,
533
- # maximum=1.5,
534
- # step=0.05,
535
- # value=0.40,
536
- # )
537
- canny_strength = gr.Slider(
538
- label="Canny strength",
539
- minimum=0,
540
- maximum=1.5,
541
- step=0.05,
542
- value=0.40,
543
- )
544
- depth_strength = gr.Slider(
545
- label="Depth strength",
546
- minimum=0,
547
- maximum=1.5,
548
- step=0.05,
549
- value=0.40,
550
- )
551
- with gr.Accordion(open=False, label="Advanced Options"):
552
- negative_prompt = gr.Textbox(
553
- label="Negative Prompt",
554
- placeholder="low quality",
555
- value="(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
556
- )
557
- num_steps = gr.Slider(
558
- label="Number of sample steps",
559
- minimum=1,
560
- maximum=100,
561
- step=1,
562
- value=5 if enable_lcm_arg else 30,
563
- )
564
- guidance_scale = gr.Slider(
565
- label="Guidance scale",
566
- minimum=0.1,
567
- maximum=20.0,
568
- step=0.1,
569
- value=0.0 if enable_lcm_arg else 5.0,
570
- )
571
- seed = gr.Slider(
572
- label="Seed",
573
- minimum=0,
574
- maximum=MAX_SEED,
575
- step=1,
576
- value=42,
577
- )
578
- schedulers = [
579
- "DEISMultistepScheduler",
580
- "HeunDiscreteScheduler",
581
- "EulerDiscreteScheduler",
582
- "DPMSolverMultistepScheduler",
583
- "DPMSolverMultistepScheduler-Karras",
584
- "DPMSolverMultistepScheduler-Karras-SDE",
585
- ]
586
- scheduler = gr.Dropdown(
587
- label="Schedulers",
588
- choices=schedulers,
589
- value="EulerDiscreteScheduler",
590
- )
591
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
592
- enhance_face_region = gr.Checkbox(label="Enhance non-face region", value=True)
593
-
594
- with gr.Column(scale=1):
595
- gallery = gr.Image(label="Generated Images")
596
- usage_tips = gr.Markdown(
597
- label="InstantID Usage Tips", value=tips, visible=False
598
- )
599
-
600
- submit.click(
601
- fn=remove_tips,
602
- outputs=usage_tips,
603
- ).then(
604
- fn=randomize_seed_fn,
605
- inputs=[seed, randomize_seed],
606
- outputs=seed,
607
- queue=False,
608
- api_name=False,
609
- ).then(
610
- fn=generate_image,
611
- inputs=[
612
- face_file,
613
- pose_file,
614
- prompt,
615
- negative_prompt,
616
- style,
617
- num_steps,
618
- identitynet_strength_ratio,
619
- adapter_strength_ratio,
620
- #pose_strength,
621
- canny_strength,
622
- depth_strength,
623
- controlnet_selection,
624
- guidance_scale,
625
- seed,
626
- scheduler,
627
- enable_LCM,
628
- enhance_face_region,
629
- ],
630
- outputs=[gallery, usage_tips],
631
- )
632
-
633
- enable_LCM.input(
634
- fn=toggle_lcm_ui,
635
- inputs=[enable_LCM],
636
- outputs=[num_steps, guidance_scale],
637
- queue=False,
638
- )
639
-
640
- gr.Examples(
641
- examples=get_example(),
642
- inputs=[face_file, pose_file, prompt, style, negative_prompt],
643
- fn=run_for_examples,
644
- outputs=[gallery, usage_tips],
645
- cache_examples=True,
646
  )
647
-
648
- gr.Markdown(article)
649
-
650
- demo.queue(api_open=False)
651
- demo.launch()
 
21
  from style_template import styles
22
  from pipeline_stable_diffusion_xl_instantid_full import StableDiffusionXLInstantIDPipeline, draw_kps
23
 
 
 
 
 
24
  from depth_anything.dpt import DepthAnything
25
  from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet
26
 
27
  import torch.nn.functional as F
28
  from torchvision.transforms import Compose
29
 
30
+ # Global variables
31
  MAX_SEED = np.iinfo(np.int32).max
32
  device = "cuda" if torch.cuda.is_available() else "cpu"
33
+ dtype = torch.float16 if torch.cuda.is_available() else torch.float32
34
  STYLE_NAMES = list(styles.keys())
35
  DEFAULT_STYLE_NAME = "Spring Festival"
 
 
 
 
36
 
37
+ # Download checkpoints
38
  hf_hub_download(repo_id="InstantX/InstantID", filename="ControlNetModel/config.json", local_dir="./checkpoints")
39
+ hf_hub_download(repo_id="InstantX/InstantID", filename="ControlNetModel/diffusion_pytorch_model.safetensors", local_dir="./checkpoints")
 
 
 
 
40
  hf_hub_download(repo_id="InstantX/InstantID", filename="ip-adapter.bin", local_dir="./checkpoints")
41
 
42
  # Load face encoder
43
+ app = FaceAnalysis(name="antelopev2", root="./", providers=["CPUExecutionProvider"])
 
 
 
 
44
  app.prepare(ctx_id=0, det_size=(640, 640))
45
 
46
+ # Depth map and transforms
 
47
  depth_anything = DepthAnything.from_pretrained('LiheYoung/depth_anything_vitl14').to(device).eval()
 
48
  transform = Compose([
49
+ Resize(width=518, height=518, resize_target=False, keep_aspect_ratio=True, ensure_multiple_of=14, resize_method='lower_bound', image_interpolation_method=cv2.INTER_CUBIC),
 
 
 
 
 
 
 
 
50
  NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
51
  PrepareForNet(),
52
  ])
53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  def get_depth_map(image):
 
55
  image = np.array(image) / 255.0
 
56
  h, w = image.shape[:2]
 
57
  image = transform({'image': image})['image']
58
+ image = torch.from_numpy(image).unsqueeze(0).to(device)
 
59
  with torch.no_grad():
60
  depth = depth_anything(image)
 
61
  depth = F.interpolate(depth[None], (h, w), mode='bilinear', align_corners=False)[0, 0]
62
  depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
63
+ return Image.fromarray(depth.cpu().numpy().astype(np.uint8))
 
 
 
 
 
64
 
65
  def get_canny_image(image, t1=100, t2=200):
66
  image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
67
  edges = cv2.Canny(image, t1, t2)
68
  return Image.fromarray(edges, "L")
69
 
70
+ # ControlNet paths and mapping
71
+ controlnet_path = "./checkpoints/ControlNetModel"
72
+ controlnet_identitynet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=dtype)
73
+
74
+ controlnet_canny_model = "diffusers/controlnet-canny-sdxl-1.0"
75
+ controlnet_depth_model = "diffusers/controlnet-depth-sdxl-1.0-small"
76
+ controlnet_canny = ControlNetModel.from_pretrained(controlnet_canny_model, torch_dtype=dtype).to(device)
77
+ controlnet_depth = ControlNetModel.from_pretrained(controlnet_depth_model, torch_dtype=dtype).to(device)
78
+
79
  controlnet_map = {
 
80
  "canny": controlnet_canny,
81
  "depth": controlnet_depth,
82
  }
83
  controlnet_map_fn = {
 
84
  "canny": get_canny_image,
85
  "depth": get_depth_map,
86
  }
87
 
88
+ # Stable Diffusion XL pipeline
89
  pretrained_model_name_or_path = "wangqixun/YamerMIX_v8"
 
90
  pipe = StableDiffusionXLInstantIDPipeline.from_pretrained(
91
  pretrained_model_name_or_path,
92
  controlnet=[controlnet_identitynet],
 
95
  feature_extractor=None,
96
  ).to(device)
97
 
98
+ pipe.scheduler = diffusers.EulerDiscreteScheduler.from_config(pipe.scheduler.config)
 
 
 
 
99
  pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl")
100
  pipe.disable_lora()
 
101
  pipe.cuda()
102
+ pipe.load_ip_adapter_instantid("./checkpoints/ip-adapter.bin")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
 
104
  @spaces.GPU
105
  def generate_image(
106
+ face_image_path, pose_image_path, prompt, negative_prompt, style_name, num_steps,
107
+ identitynet_strength_ratio, adapter_strength_ratio, canny_strength, depth_strength,
108
+ controlnet_selection, guidance_scale, seed, scheduler, enable_LCM, enhance_face_region,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109
  ):
 
110
  if enable_LCM:
111
  pipe.scheduler = diffusers.LCMScheduler.from_config(pipe.scheduler.config)
112
  pipe.enable_lora()
113
  else:
114
  pipe.disable_lora()
115
  scheduler_class_name = scheduler.split("-")[0]
 
 
 
 
 
 
116
  scheduler = getattr(diffusers, scheduler_class_name)
117
+ pipe.scheduler = scheduler.from_config(pipe.scheduler.config)
 
 
 
 
 
 
 
 
 
 
 
118
 
119
+ # Image preprocessing
120
  face_image = load_image(face_image_path)
121
  face_image = resize_img(face_image, max_side=1024)
122
+ face_image_cv2 = cv2.cvtColor(np.array(face_image), cv2.COLOR_RGB2BGR)
 
 
 
123
  face_info = app.get(face_image_cv2)
124
 
125
+ if not face_info:
126
+ raise ValueError("No face detected in the input image.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
 
128
+ face_emb = face_info[0]["embedding"]
129
+ face_kps = draw_kps(face_image, face_info[0]["kps"])
130
+ control_images = [face_kps]
131
+ control_scales = [identitynet_strength_ratio]
132
 
133
+ # MultiControlNet
134
+ if controlnet_selection:
135
+ selected_models = [controlnet_map[s] for s in controlnet_selection]
136
+ selected_conditions = [controlnet_map_fn[s](face_image) for s in controlnet_selection]
137
+ control_images += selected_conditions
138
+ control_scales += [canny_strength, depth_strength]
139
 
140
+ pipe.controlnet = MultiControlNetModel([controlnet_identitynet] + selected_models)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
 
142
  generator = torch.Generator(device=device).manual_seed(seed)
143
+ output = pipe(
144
+ prompt=prompt, negative_prompt=negative_prompt, image_embeds=face_emb,
145
+ image=control_images, controlnet_conditioning_scale=control_scales,
146
+ num_inference_steps=num_steps, guidance_scale=guidance_scale, generator=generator,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  )
148
+ return output.images[0]