OmPrakashSingh1704 commited on
Commit
54e055a
·
1 Parent(s): 892614d
options/Banner_Model/Image2Image_2.py CHANGED
@@ -1,27 +1,34 @@
1
- import spaces
2
- import torch
3
- from controlnet_aux import LineartDetector
4
- from diffusers import ControlNetModel,UniPCMultistepScheduler,StableDiffusionControlNetPipeline
5
- from PIL import Image
6
 
7
- device= "cuda" if torch.cuda.is_available() else "cpu"
8
- print("Using device for I2I_2:", device)
9
 
10
- @spaces.GPU(duration=100)
11
- def I2I_2(image, prompt,size,num_inference_steps,guidance_scale):
12
- processor = LineartDetector.from_pretrained("lllyasviel/Annotators")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
- checkpoint = "ControlNet-1-1-preview/control_v11p_sd15_lineart"
15
- controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16).to(device)
16
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
17
- "radames/stable-diffusion-v1-5-img2img", controlnet=controlnet, torch_dtype=torch.float16
18
- ).to(device)
19
- pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
20
- pipe.enable_model_cpu_offload()
21
- if not isinstance(image, Image.Image):
22
- image = Image.fromarray(image)
23
- image.resize((size,size))
24
- image=processor(image)
25
- generator = torch.Generator(device=device).manual_seed(0)
26
- image = pipe(prompt+"best quality, extremely detailed", num_inference_steps=num_inference_steps, generator=generator, image=image,negative_prompt="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",guidance_scale=guidance_scale).images[0]
27
- return image
 
1
+ # import spaces
2
+ # import torch
3
+ # from controlnet_aux import LineartDetector
4
+ # from diffusers import ControlNetModel,UniPCMultistepScheduler,StableDiffusionControlNetPipeline
5
+ # from PIL import Image
6
 
7
+ # device= "cuda" if torch.cuda.is_available() else "cpu"
8
+ # print("Using device for I2I_2:", device)
9
 
10
+ # @spaces.GPU(duration=100)
11
+ # def I2I_2(image, prompt,size,num_inference_steps,guidance_scale):
12
+ # processor = LineartDetector.from_pretrained("lllyasviel/Annotators")
13
+
14
+ # checkpoint = "ControlNet-1-1-preview/control_v11p_sd15_lineart"
15
+ # controlnet = ControlNetModel.from_pretrained(checkpoint, torch_dtype=torch.float16).to(device)
16
+ # pipe = StableDiffusionControlNetPipeline.from_pretrained(
17
+ # "radames/stable-diffusion-v1-5-img2img", controlnet=controlnet, torch_dtype=torch.float16
18
+ # ).to(device)
19
+ # pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
20
+ # pipe.enable_model_cpu_offload()
21
+ # if not isinstance(image, Image.Image):
22
+ # image = Image.fromarray(image)
23
+ # image.resize((size,size))
24
+ # image=processor(image)
25
+ # generator = torch.Generator(device=device).manual_seed(0)
26
+ # image = pipe(prompt+"best quality, extremely detailed", num_inference_steps=num_inference_steps, generator=generator, image=image,negative_prompt="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",guidance_scale=guidance_scale).images[0]
27
+ # return image
28
 
29
+ from gradio_client import Client
30
+ def I2I_2(image, prompt,size,num_inference_steps,guidance_scale):
31
+ client = Client("https://hysts-controlnet-v1-1.hf.space/")
32
+ res=client.predict(image=image,prompt=prompt,additional_prompt="best quality, extremely detailed",negative_prompt="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",num_images=1,image_resolution=size,preprocess_resolution=size,num_steps=num_inference_steps,guidance_scale=guidance_scale,seed=0,preprocessor_name="Lineart")
33
+ print(res)
34
+ return res
 
 
 
 
 
 
 
 
options/__pycache__/Banner.cpython-310.pyc CHANGED
Binary files a/options/__pycache__/Banner.cpython-310.pyc and b/options/__pycache__/Banner.cpython-310.pyc differ