Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -12,18 +12,11 @@ import gradio as gr
|
|
12 |
import numpy as np
|
13 |
from PIL import Image
|
14 |
import torch
|
15 |
-
from diffusers import AutoencoderKL, StableDiffusionXLPipeline
|
16 |
from diffusers import EulerAncestralDiscreteScheduler
|
17 |
-
#from diffusers import DPMSolverMultistepScheduler
|
18 |
-
#from diffusers import AsymmetricAutoencoderKL
|
19 |
from typing import Tuple
|
20 |
import paramiko
|
21 |
-
import gc
|
22 |
-
import time
|
23 |
import datetime
|
24 |
-
#from diffusers.schedulers import AysSchedules
|
25 |
-
from gradio import themes
|
26 |
-
import gc
|
27 |
|
28 |
torch.backends.cuda.matmul.allow_tf32 = False
|
29 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
@@ -84,91 +77,31 @@ DEFAULT_STYLE_NAME = "Style Zero"
|
|
84 |
STYLE_NAMES = list(styles.keys())
|
85 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
86 |
|
87 |
-
#sampling_schedule = AysSchedules["StableDiffusionXLTimesteps"]
|
88 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
89 |
|
90 |
-
def load_and_prepare_model(
|
91 |
model_dtypes = {"ford442/RealVisXL_V5.0_BF16": torch.bfloat16,}
|
92 |
dtype = model_dtypes.get(model_id, torch.bfloat16) # Default to float32 if not found
|
93 |
-
#vaeX = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", safety_checker=None)
|
94 |
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False).to(device=device, dtype=torch.bfloat16)
|
95 |
-
#vae = AutoencoderKL.from_pretrained('cross-attention/asymmetric-autoencoder-kl-x-2',use_safetensors=False)
|
96 |
-
#vae = AutoencoderKL.from_single_file('https://huggingface.co/ford442/sdxl-vae-bf16/mySLR/myslrVAE_v10.safetensors')
|
97 |
-
#vaeX = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse",use_safetensors=True)
|
98 |
-
#vaeX = AutoencoderKL.from_pretrained('ford442/Juggernaut-XI-v11-fp32',subfolder='vae', safety_checker=None, use_safetensors=False) # ,use_safetensors=True FAILS
|
99 |
-
#vaeX = AutoencoderKL.from_pretrained('ford442/RealVisXL_V5.0_FP64',subfolder='vae').to(torch.bfloat16) # ,use_safetensors=True FAILS
|
100 |
-
#unetX = UNet2DConditionModel.from_pretrained('SG161222/RealVisXL_V5.0',subfolder='unet') # ,use_safetensors=True FAILS
|
101 |
-
# vae = AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",safety_checker=None).to(torch.bfloat16)
|
102 |
-
#sched = EulerAncestralDiscreteScheduler.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='scheduler',beta_schedule="scaled_linear", steps_offset=1,timestep_spacing="trailing"))
|
103 |
-
#sched = EulerAncestralDiscreteScheduler.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='scheduler', steps_offset=1,timestep_spacing="trailing")
|
104 |
-
#sched = EulerAncestralDiscreteScheduler.from_pretrained('SG161222/RealVisXL_V5.0', subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1,use_karras_sigmas=True)
|
105 |
sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1,use_karras_sigmas=True)
|
106 |
-
#pipeX = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V5.0").to(torch.bfloat16)
|
107 |
-
#pipeX = StableDiffusionXLPipeline.from_pretrained("ford442/Juggernaut-XI-v11-fp32",use_safetensors=True)
|
108 |
-
|
109 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
110 |
'ford442/RealVisXL_V5.0_BF16',
|
111 |
-
#'John6666/pornworks-sexy-beauty-v04-sdxl',
|
112 |
-
#'John6666/uber-realistic-porn-merge-xl-urpmxl-v3-sdxl',
|
113 |
-
#'ford442/Juggernaut-XI-v11-fp32',
|
114 |
-
# 'SG161222/RealVisXL_V5.0',
|
115 |
#torch_dtype=torch.bfloat16,
|
116 |
add_watermarker=False,
|
117 |
-
# custom_pipeline="lpw_stable_diffusion_xl",
|
118 |
-
#use_safetensors=True,
|
119 |
-
# use_auth_token=HF_TOKEN,
|
120 |
-
# vae=AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",repo_type='model',safety_checker=None),
|
121 |
-
# vae=AutoencoderKL.from_pretrained("stabilityai/sdxl-vae",repo_type='model',safety_checker=None, torch_dtype=torch.float32),
|
122 |
-
# vae=AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16",repo_type='model',safety_checker=None),
|
123 |
-
#vae=vaeX.to(torch.bfloat16),
|
124 |
-
#unet=pipeX.unet,
|
125 |
-
#scheduler = sched,
|
126 |
-
# scheduler = EulerAncestralDiscreteScheduler.from_config(pipeX.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
127 |
-
#scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset =1)
|
128 |
)
|
129 |
-
|
130 |
-
#pipe.unet=UNet2DConditionModel.from_pretrained('ford442/RealVisXL_V5.0_FP64',subfolder='unet').to(torch.bfloat16)
|
131 |
-
#pipe.unet=UNet2DConditionModel.from_pretrained('SG161222/RealVisXL_V5.0',subfolder='unet').to(torch.bfloat16)
|
132 |
-
#pipe.vae = AsymmetricAutoencoderKL.from_pretrained('cross-attention/asymmetric-autoencoder-kl-x-2').to(torch.bfloat16) # ,use_safetensors=True FAILS
|
133 |
-
#pipe.vae = AutoencoderKL.from_pretrained('ford442/Juggernaut-XI-v11-fp32',subfolder='vae') # ,use_safetensors=True FAILS
|
134 |
-
#pipe.vae.to(torch.bfloat16)
|
135 |
-
#sched = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear",use_karras_sigmas=True, algorithm_type="dpmsolver++")
|
136 |
-
#pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
137 |
-
#pipe.scheduler = DPMSolverMultistepScheduler.from_pretrained('SG161222/RealVisXL_V5.0', subfolder='scheduler', algorithm_type='sde-dpmsolver++')
|
138 |
pipe.vae = vaeXL #.to(torch.bfloat16)
|
139 |
-
#pipe.unet = unetX.to(torch.bfloat16)
|
140 |
pipe.scheduler = sched
|
141 |
pipe.vae.do_resize=False
|
142 |
-
#pipe.vae.do_rescale=False
|
143 |
-
#pipe.vae.do_convert_rgb=True
|
144 |
pipe.vae.vae_scale_factor=8
|
145 |
-
#pipe.scheduler = sched
|
146 |
-
#pipe.vae=vae.to(torch.bfloat16)
|
147 |
-
#pipe.unet=pipeX.unet
|
148 |
-
#pipe.scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
149 |
-
#pipe.scheduler=EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
|
150 |
-
|
151 |
pipe.to(device=device, dtype=torch.bfloat16)
|
152 |
-
#pipe.to(torch.bfloat16)
|
153 |
-
|
154 |
-
#apply_hidiffusion(pipe)
|
155 |
-
|
156 |
-
#pipe.unet.set_default_attn_processor()
|
157 |
pipe.vae.set_default_attn_processor()
|
158 |
-
|
159 |
-
print(f'Pipeline: ')
|
160 |
-
#print(f'_optional_components: {pipe._optional_components}')
|
161 |
-
#print(f'watermark: {pipe.watermark}')
|
162 |
-
print(f'image_processor: {pipe.image_processor}')
|
163 |
-
#print(f'feature_extractor: {pipe.feature_extractor}')
|
164 |
print(f'init noise scale: {pipe.scheduler.init_noise_sigma}')
|
165 |
-
#print(f'UNET: {pipe.unet}')
|
166 |
pipe.watermark=None
|
167 |
pipe.safety_checker=None
|
168 |
return pipe
|
169 |
|
170 |
# Preload and compile both models
|
171 |
-
|
172 |
|
173 |
MAX_SEED = np.iinfo(np.int32).max
|
174 |
|
@@ -227,49 +160,33 @@ def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise):
|
|
227 |
|
228 |
@spaces.GPU(duration=30)
|
229 |
def generate_30(
|
230 |
-
model_choice: str,
|
231 |
prompt: str,
|
232 |
negative_prompt: str = "",
|
233 |
use_negative_prompt: bool = False,
|
234 |
style_selection: str = "",
|
235 |
-
seed: int = 1,
|
236 |
width: int = 768,
|
237 |
height: int = 768,
|
238 |
guidance_scale: float = 4,
|
239 |
num_inference_steps: int = 125,
|
240 |
-
randomize_seed: bool = False,
|
241 |
use_resolution_binning: bool = True,
|
242 |
-
juggernaut: bool = False,
|
243 |
-
denoise: float = 0.3,
|
244 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
245 |
):
|
246 |
-
#torch.backends.cudnn.benchmark = False
|
247 |
-
#torch.cuda.empty_cache()
|
248 |
-
#gc.collect()
|
249 |
-
global models
|
250 |
-
pipe = models[model_choice]
|
251 |
-
#if juggernaut == True:
|
252 |
-
# pipe.vae=vaeX
|
253 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
254 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
255 |
-
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
256 |
options = {
|
257 |
"prompt": [prompt],
|
258 |
"negative_prompt": [negative_prompt],
|
259 |
"negative_prompt_2": [neg_prompt_2],
|
260 |
-
"strength": denoise,
|
261 |
"width": width,
|
262 |
"height": height,
|
263 |
"guidance_scale": guidance_scale,
|
264 |
"num_inference_steps": num_inference_steps,
|
265 |
"generator": generator,
|
266 |
-
# "timesteps": sampling_schedule,
|
267 |
"output_type": "pil",
|
268 |
}
|
269 |
if use_resolution_binning:
|
270 |
options["use_resolution_binning"] = True
|
271 |
images = []
|
272 |
-
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
273 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
274 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
|
275 |
batch_options = options.copy()
|
@@ -277,58 +194,39 @@ def generate_30(
|
|
277 |
sd_image_path = f"rv50_B_{timestamp}.png"
|
278 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
279 |
upload_to_ftp(sd_image_path)
|
280 |
-
#image_paths = save_image(rv_image)
|
281 |
-
#torch.cuda.empty_cache()
|
282 |
-
#gc.collect()
|
283 |
unique_name = str(uuid.uuid4()) + ".png"
|
284 |
os.symlink(sd_image_path, unique_name)
|
285 |
-
return [unique_name]
|
286 |
|
287 |
@spaces.GPU(duration=60)
|
288 |
def generate_60(
|
289 |
-
model_choice: str,
|
290 |
prompt: str,
|
291 |
negative_prompt: str = "",
|
292 |
use_negative_prompt: bool = False,
|
293 |
style_selection: str = "",
|
294 |
-
seed: int = 1,
|
295 |
width: int = 768,
|
296 |
height: int = 768,
|
297 |
guidance_scale: float = 4,
|
298 |
-
num_inference_steps: int =
|
299 |
-
randomize_seed: bool = False,
|
300 |
use_resolution_binning: bool = True,
|
301 |
-
juggernaut: bool = False,
|
302 |
-
denoise: float = 0.3,
|
303 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
304 |
):
|
305 |
-
#torch.backends.cudnn.benchmark = True
|
306 |
-
#torch.cuda.empty_cache()
|
307 |
-
#gc.collect()
|
308 |
-
global models
|
309 |
-
pipe = models[model_choice]
|
310 |
-
#if juggernaut == True:
|
311 |
-
# pipe.vae=vaeX
|
312 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
313 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
314 |
-
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
315 |
options = {
|
316 |
"prompt": [prompt],
|
317 |
"negative_prompt": [negative_prompt],
|
318 |
"negative_prompt_2": [neg_prompt_2],
|
319 |
-
"strength": denoise,
|
320 |
"width": width,
|
321 |
"height": height,
|
322 |
"guidance_scale": guidance_scale,
|
323 |
"num_inference_steps": num_inference_steps,
|
324 |
"generator": generator,
|
325 |
-
# "timesteps": sampling_schedule,
|
326 |
"output_type": "pil",
|
327 |
}
|
328 |
if use_resolution_binning:
|
329 |
options["use_resolution_binning"] = True
|
330 |
images = []
|
331 |
-
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
332 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
333 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
|
334 |
batch_options = options.copy()
|
@@ -336,58 +234,39 @@ def generate_60(
|
|
336 |
sd_image_path = f"rv50_B_{timestamp}.png"
|
337 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
338 |
upload_to_ftp(sd_image_path)
|
339 |
-
#image_paths = save_image(rv_image)
|
340 |
-
#torch.cuda.empty_cache()
|
341 |
-
#gc.collect()
|
342 |
unique_name = str(uuid.uuid4()) + ".png"
|
343 |
os.symlink(sd_image_path, unique_name)
|
344 |
-
return [unique_name]
|
345 |
|
346 |
@spaces.GPU(duration=90)
|
347 |
def generate_90(
|
348 |
-
model_choice: str,
|
349 |
prompt: str,
|
350 |
negative_prompt: str = "",
|
351 |
use_negative_prompt: bool = False,
|
352 |
style_selection: str = "",
|
353 |
-
seed: int = 1,
|
354 |
width: int = 768,
|
355 |
height: int = 768,
|
356 |
guidance_scale: float = 4,
|
357 |
-
num_inference_steps: int =
|
358 |
-
randomize_seed: bool = False,
|
359 |
use_resolution_binning: bool = True,
|
360 |
-
juggernaut: bool = False,
|
361 |
-
denoise: float = 0.3,
|
362 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
363 |
):
|
364 |
-
#torch.backends.cudnn.benchmark = True
|
365 |
-
#torch.cuda.empty_cache()
|
366 |
-
#gc.collect()
|
367 |
-
global models
|
368 |
-
pipe = models[model_choice]
|
369 |
-
#if juggernaut == True:
|
370 |
-
# pipe.vae=vaeX
|
371 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
372 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
373 |
-
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
374 |
options = {
|
375 |
"prompt": [prompt],
|
376 |
"negative_prompt": [negative_prompt],
|
377 |
"negative_prompt_2": [neg_prompt_2],
|
378 |
-
"strength": denoise,
|
379 |
"width": width,
|
380 |
"height": height,
|
381 |
"guidance_scale": guidance_scale,
|
382 |
"num_inference_steps": num_inference_steps,
|
383 |
"generator": generator,
|
384 |
-
# "timesteps": sampling_schedule,
|
385 |
"output_type": "pil",
|
386 |
}
|
387 |
if use_resolution_binning:
|
388 |
options["use_resolution_binning"] = True
|
389 |
images = []
|
390 |
-
pipe.scheduler.set_timesteps(num_inference_steps,device)
|
391 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
392 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
|
393 |
batch_options = options.copy()
|
@@ -395,12 +274,9 @@ def generate_90(
|
|
395 |
sd_image_path = f"rv50_B_{timestamp}.png"
|
396 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
397 |
upload_to_ftp(sd_image_path)
|
398 |
-
#image_paths = save_image(rv_image)
|
399 |
-
#torch.cuda.empty_cache()
|
400 |
-
#gc.collect()
|
401 |
unique_name = str(uuid.uuid4()) + ".png"
|
402 |
os.symlink(sd_image_path, unique_name)
|
403 |
-
return [unique_name]
|
404 |
|
405 |
def load_predefined_images1():
|
406 |
predefined_images1 = [
|
@@ -446,11 +322,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
446 |
result = gr.Gallery(label="Result", columns=1, show_label=False)
|
447 |
|
448 |
with gr.Row():
|
449 |
-
|
450 |
-
label="Model Selection🔻",
|
451 |
-
choices=list(MODEL_OPTIONS.keys()),
|
452 |
-
value="REALVISXL V5.0 BF16"
|
453 |
-
)
|
454 |
|
455 |
style_selection = gr.Radio(
|
456 |
show_label=True,
|
@@ -471,22 +343,6 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
471 |
value="('deformed', 'distorted', 'disfigured':1.3),'not photorealistic':1.5, 'poorly drawn', 'bad anatomy', 'wrong anatomy', 'extra limb', 'missing limb', 'floating limbs', 'poorly drawn hands', 'poorly drawn feet', 'poorly drawn face':1.3, 'out of frame', 'extra limbs', 'bad anatomy', 'bad art', 'beginner', 'distorted face','amateur'",
|
472 |
visible=True,
|
473 |
)
|
474 |
-
seed = gr.Slider(
|
475 |
-
label="Seed",
|
476 |
-
minimum=0,
|
477 |
-
maximum=MAX_SEED,
|
478 |
-
step=1,
|
479 |
-
value=0,
|
480 |
-
)
|
481 |
-
denoise = gr.Slider(
|
482 |
-
label="Denoising Strength",
|
483 |
-
minimum=0.0,
|
484 |
-
maximum=1.0,
|
485 |
-
step=0.01,
|
486 |
-
value=0.3,
|
487 |
-
)
|
488 |
-
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
|
489 |
-
juggernaut = gr.Checkbox(label="Use Juggernaut VAE", value=False)
|
490 |
with gr.Row():
|
491 |
width = gr.Slider(
|
492 |
label="Width",
|
@@ -538,19 +394,14 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
538 |
# api_name="generate", # Add this line
|
539 |
fn=generate_30,
|
540 |
inputs=[
|
541 |
-
model_choice,
|
542 |
prompt,
|
543 |
negative_prompt,
|
544 |
use_negative_prompt,
|
545 |
style_selection,
|
546 |
-
seed,
|
547 |
width,
|
548 |
height,
|
549 |
guidance_scale,
|
550 |
num_inference_steps,
|
551 |
-
randomize_seed,
|
552 |
-
juggernaut,
|
553 |
-
denoise
|
554 |
],
|
555 |
outputs=[result, seed],
|
556 |
)
|
@@ -562,19 +413,14 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
562 |
# api_name="generate", # Add this line
|
563 |
fn=generate_60,
|
564 |
inputs=[
|
565 |
-
model_choice,
|
566 |
prompt,
|
567 |
negative_prompt,
|
568 |
use_negative_prompt,
|
569 |
style_selection,
|
570 |
-
seed,
|
571 |
width,
|
572 |
height,
|
573 |
guidance_scale,
|
574 |
num_inference_steps,
|
575 |
-
randomize_seed,
|
576 |
-
juggernaut,
|
577 |
-
denoise
|
578 |
],
|
579 |
outputs=[result, seed],
|
580 |
)
|
@@ -586,19 +432,14 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
586 |
# api_name="generate", # Add this line
|
587 |
fn=generate_90,
|
588 |
inputs=[
|
589 |
-
model_choice,
|
590 |
prompt,
|
591 |
negative_prompt,
|
592 |
use_negative_prompt,
|
593 |
style_selection,
|
594 |
-
seed,
|
595 |
width,
|
596 |
height,
|
597 |
guidance_scale,
|
598 |
num_inference_steps,
|
599 |
-
randomize_seed,
|
600 |
-
juggernaut,
|
601 |
-
denoise
|
602 |
],
|
603 |
outputs=[result, seed],
|
604 |
)
|
|
|
12 |
import numpy as np
|
13 |
from PIL import Image
|
14 |
import torch
|
15 |
+
from diffusers import AutoencoderKL, StableDiffusionXLPipeline
|
16 |
from diffusers import EulerAncestralDiscreteScheduler
|
|
|
|
|
17 |
from typing import Tuple
|
18 |
import paramiko
|
|
|
|
|
19 |
import datetime
|
|
|
|
|
|
|
20 |
|
21 |
torch.backends.cuda.matmul.allow_tf32 = False
|
22 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
|
|
77 |
STYLE_NAMES = list(styles.keys())
|
78 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
79 |
|
|
|
80 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
81 |
|
82 |
+
def load_and_prepare_model():
|
83 |
model_dtypes = {"ford442/RealVisXL_V5.0_BF16": torch.bfloat16,}
|
84 |
dtype = model_dtypes.get(model_id, torch.bfloat16) # Default to float32 if not found
|
|
|
85 |
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False).to(device=device, dtype=torch.bfloat16)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1,use_karras_sigmas=True)
|
|
|
|
|
|
|
87 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
88 |
'ford442/RealVisXL_V5.0_BF16',
|
|
|
|
|
|
|
|
|
89 |
#torch_dtype=torch.bfloat16,
|
90 |
add_watermarker=False,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
pipe.vae = vaeXL #.to(torch.bfloat16)
|
|
|
93 |
pipe.scheduler = sched
|
94 |
pipe.vae.do_resize=False
|
|
|
|
|
95 |
pipe.vae.vae_scale_factor=8
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
pipe.to(device=device, dtype=torch.bfloat16)
|
|
|
|
|
|
|
|
|
|
|
97 |
pipe.vae.set_default_attn_processor()
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
print(f'init noise scale: {pipe.scheduler.init_noise_sigma}')
|
|
|
99 |
pipe.watermark=None
|
100 |
pipe.safety_checker=None
|
101 |
return pipe
|
102 |
|
103 |
# Preload and compile both models
|
104 |
+
pipe =load_and_prepare_model()
|
105 |
|
106 |
MAX_SEED = np.iinfo(np.int32).max
|
107 |
|
|
|
160 |
|
161 |
@spaces.GPU(duration=30)
|
162 |
def generate_30(
|
|
|
163 |
prompt: str,
|
164 |
negative_prompt: str = "",
|
165 |
use_negative_prompt: bool = False,
|
166 |
style_selection: str = "",
|
|
|
167 |
width: int = 768,
|
168 |
height: int = 768,
|
169 |
guidance_scale: float = 4,
|
170 |
num_inference_steps: int = 125,
|
|
|
171 |
use_resolution_binning: bool = True,
|
|
|
|
|
172 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
173 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
174 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
175 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
|
|
176 |
options = {
|
177 |
"prompt": [prompt],
|
178 |
"negative_prompt": [negative_prompt],
|
179 |
"negative_prompt_2": [neg_prompt_2],
|
|
|
180 |
"width": width,
|
181 |
"height": height,
|
182 |
"guidance_scale": guidance_scale,
|
183 |
"num_inference_steps": num_inference_steps,
|
184 |
"generator": generator,
|
|
|
185 |
"output_type": "pil",
|
186 |
}
|
187 |
if use_resolution_binning:
|
188 |
options["use_resolution_binning"] = True
|
189 |
images = []
|
|
|
190 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
191 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
|
192 |
batch_options = options.copy()
|
|
|
194 |
sd_image_path = f"rv50_B_{timestamp}.png"
|
195 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
196 |
upload_to_ftp(sd_image_path)
|
|
|
|
|
|
|
197 |
unique_name = str(uuid.uuid4()) + ".png"
|
198 |
os.symlink(sd_image_path, unique_name)
|
199 |
+
return [unique_name], seed
|
200 |
|
201 |
@spaces.GPU(duration=60)
|
202 |
def generate_60(
|
|
|
203 |
prompt: str,
|
204 |
negative_prompt: str = "",
|
205 |
use_negative_prompt: bool = False,
|
206 |
style_selection: str = "",
|
|
|
207 |
width: int = 768,
|
208 |
height: int = 768,
|
209 |
guidance_scale: float = 4,
|
210 |
+
num_inference_steps: int = 125,
|
|
|
211 |
use_resolution_binning: bool = True,
|
|
|
|
|
212 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
213 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
214 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
215 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
|
|
216 |
options = {
|
217 |
"prompt": [prompt],
|
218 |
"negative_prompt": [negative_prompt],
|
219 |
"negative_prompt_2": [neg_prompt_2],
|
|
|
220 |
"width": width,
|
221 |
"height": height,
|
222 |
"guidance_scale": guidance_scale,
|
223 |
"num_inference_steps": num_inference_steps,
|
224 |
"generator": generator,
|
|
|
225 |
"output_type": "pil",
|
226 |
}
|
227 |
if use_resolution_binning:
|
228 |
options["use_resolution_binning"] = True
|
229 |
images = []
|
|
|
230 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
231 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
|
232 |
batch_options = options.copy()
|
|
|
234 |
sd_image_path = f"rv50_B_{timestamp}.png"
|
235 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
236 |
upload_to_ftp(sd_image_path)
|
|
|
|
|
|
|
237 |
unique_name = str(uuid.uuid4()) + ".png"
|
238 |
os.symlink(sd_image_path, unique_name)
|
239 |
+
return [unique_name], seed
|
240 |
|
241 |
@spaces.GPU(duration=90)
|
242 |
def generate_90(
|
|
|
243 |
prompt: str,
|
244 |
negative_prompt: str = "",
|
245 |
use_negative_prompt: bool = False,
|
246 |
style_selection: str = "",
|
|
|
247 |
width: int = 768,
|
248 |
height: int = 768,
|
249 |
guidance_scale: float = 4,
|
250 |
+
num_inference_steps: int = 125,
|
|
|
251 |
use_resolution_binning: bool = True,
|
|
|
|
|
252 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
253 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
254 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
255 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
|
|
256 |
options = {
|
257 |
"prompt": [prompt],
|
258 |
"negative_prompt": [negative_prompt],
|
259 |
"negative_prompt_2": [neg_prompt_2],
|
|
|
260 |
"width": width,
|
261 |
"height": height,
|
262 |
"guidance_scale": guidance_scale,
|
263 |
"num_inference_steps": num_inference_steps,
|
264 |
"generator": generator,
|
|
|
265 |
"output_type": "pil",
|
266 |
}
|
267 |
if use_resolution_binning:
|
268 |
options["use_resolution_binning"] = True
|
269 |
images = []
|
|
|
270 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
271 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
|
272 |
batch_options = options.copy()
|
|
|
274 |
sd_image_path = f"rv50_B_{timestamp}.png"
|
275 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
276 |
upload_to_ftp(sd_image_path)
|
|
|
|
|
|
|
277 |
unique_name = str(uuid.uuid4()) + ".png"
|
278 |
os.symlink(sd_image_path, unique_name)
|
279 |
+
return [unique_name], seed
|
280 |
|
281 |
def load_predefined_images1():
|
282 |
predefined_images1 = [
|
|
|
322 |
result = gr.Gallery(label="Result", columns=1, show_label=False)
|
323 |
|
324 |
with gr.Row():
|
325 |
+
|
|
|
|
|
|
|
|
|
326 |
|
327 |
style_selection = gr.Radio(
|
328 |
show_label=True,
|
|
|
343 |
value="('deformed', 'distorted', 'disfigured':1.3),'not photorealistic':1.5, 'poorly drawn', 'bad anatomy', 'wrong anatomy', 'extra limb', 'missing limb', 'floating limbs', 'poorly drawn hands', 'poorly drawn feet', 'poorly drawn face':1.3, 'out of frame', 'extra limbs', 'bad anatomy', 'bad art', 'beginner', 'distorted face','amateur'",
|
344 |
visible=True,
|
345 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
346 |
with gr.Row():
|
347 |
width = gr.Slider(
|
348 |
label="Width",
|
|
|
394 |
# api_name="generate", # Add this line
|
395 |
fn=generate_30,
|
396 |
inputs=[
|
|
|
397 |
prompt,
|
398 |
negative_prompt,
|
399 |
use_negative_prompt,
|
400 |
style_selection,
|
|
|
401 |
width,
|
402 |
height,
|
403 |
guidance_scale,
|
404 |
num_inference_steps,
|
|
|
|
|
|
|
405 |
],
|
406 |
outputs=[result, seed],
|
407 |
)
|
|
|
413 |
# api_name="generate", # Add this line
|
414 |
fn=generate_60,
|
415 |
inputs=[
|
|
|
416 |
prompt,
|
417 |
negative_prompt,
|
418 |
use_negative_prompt,
|
419 |
style_selection,
|
|
|
420 |
width,
|
421 |
height,
|
422 |
guidance_scale,
|
423 |
num_inference_steps,
|
|
|
|
|
|
|
424 |
],
|
425 |
outputs=[result, seed],
|
426 |
)
|
|
|
432 |
# api_name="generate", # Add this line
|
433 |
fn=generate_90,
|
434 |
inputs=[
|
|
|
435 |
prompt,
|
436 |
negative_prompt,
|
437 |
use_negative_prompt,
|
438 |
style_selection,
|
|
|
439 |
width,
|
440 |
height,
|
441 |
guidance_scale,
|
442 |
num_inference_steps,
|
|
|
|
|
|
|
443 |
],
|
444 |
outputs=[result, seed],
|
445 |
)
|