Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -15,8 +15,6 @@ import torch
|
|
15 |
from diffusers import AutoencoderKL, StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
|
16 |
from typing import Tuple
|
17 |
import paramiko
|
18 |
-
import gc
|
19 |
-
import time
|
20 |
import datetime
|
21 |
from gradio import themes
|
22 |
from image_gen_aux import UpscaleWithModel
|
@@ -41,7 +39,7 @@ FTP_PASS = "GoogleBez12!"
|
|
41 |
FTP_DIR = "1ink.us/stable_diff/" # Remote directory on FTP server
|
42 |
|
43 |
DESCRIPTIONXX = """
|
44 |
-
## ⚡⚡⚡⚡ REALVISXL V5.0 BF16
|
45 |
"""
|
46 |
|
47 |
examples = [
|
@@ -50,13 +48,8 @@ examples = [
|
|
50 |
"A profile photo of a dog, brown background, shot on Leica M6 --ar 128:85 --v 6.0 --style raw",
|
51 |
]
|
52 |
|
53 |
-
MODEL_OPTIONS = {
|
54 |
-
"REALVISXL V5.0 BF16": "ford442/RealVisXL_V5.0_BF16",
|
55 |
-
}
|
56 |
-
|
57 |
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
|
58 |
-
|
59 |
-
ENABLE_CPU_OFFLOAD = 0
|
60 |
BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1"))
|
61 |
|
62 |
device = torch.device("cuda:0")
|
@@ -152,8 +145,6 @@ def load_and_prepare_model():
|
|
152 |
#pipe.vae = AutoencoderKL.from_pretrained('stabilityai/sdxl-vae-bf16',subfolder='vae')
|
153 |
#pipe.vae = AutoencoderKL.from_pretrained('stabilityai/sdxl-vae',subfolder='vae',force_upcast=False,scaling_factor= 0.182158767676)
|
154 |
#pipe.vae.to(torch.bfloat16)
|
155 |
-
|
156 |
-
|
157 |
|
158 |
'''
|
159 |
scaling_factor (`float`, *optional*, defaults to 0.18215):
|
@@ -187,8 +178,8 @@ def load_and_prepare_model():
|
|
187 |
#pipe.scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
188 |
#pipe.scheduler=EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
|
189 |
|
190 |
-
pipe.to(device
|
191 |
-
|
192 |
#apply_hidiffusion(pipe)
|
193 |
|
194 |
#pipe.unet.set_default_attn_processor()
|
@@ -239,9 +230,9 @@ def save_image(img):
|
|
239 |
return unique_name
|
240 |
|
241 |
def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
|
242 |
-
filename= f'
|
243 |
with open(filename, "w") as f:
|
244 |
-
f.write(f"Realvis 5.0
|
245 |
f.write(f"Date/time: {timestamp} \n")
|
246 |
f.write(f"Prompt: {prompt} \n")
|
247 |
f.write(f"Steps: {num_inference_steps} \n")
|
@@ -250,10 +241,7 @@ def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
|
|
250 |
f.write(f"Use Model Dtype: no \n")
|
251 |
f.write(f"Model Scheduler: Euler_a all_custom before cuda \n")
|
252 |
f.write(f"Model VAE: sdxl-vae to bfloat safetensor=false before cuda then attn_proc / scale factor 8 \n")
|
253 |
-
f.write(f"Model UNET:
|
254 |
-
f.write(f"Model HiDiffusion OFF \n")
|
255 |
-
f.write(f"Model do_resize ON \n")
|
256 |
-
f.write(f"added torch to prereq and changed accellerate \n")
|
257 |
upload_to_ftp(filename)
|
258 |
|
259 |
@spaces.GPU(duration=40)
|
@@ -302,7 +290,7 @@ def generate_30(
|
|
302 |
with torch.no_grad():
|
303 |
upscale = upscaler(sd_image, tiling=True, tile_width=256, tile_height=256)
|
304 |
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|
305 |
-
downscale_path = f"
|
306 |
downscale1.save(downscale_path,optimize=False,compress_level=0)
|
307 |
upload_to_ftp(downscale_path)
|
308 |
image_paths = [save_image(downscale1)]
|
@@ -356,7 +344,7 @@ def generate_60(
|
|
356 |
with torch.no_grad():
|
357 |
upscale = upscaler(sd_image, tiling=True, tile_width=256, tile_height=256)
|
358 |
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|
359 |
-
downscale_path = f"
|
360 |
downscale1.save(downscale_path,optimize=False,compress_level=0)
|
361 |
upload_to_ftp(downscale_path)
|
362 |
image_paths = [save_image(downscale1)]
|
@@ -410,7 +398,7 @@ def generate_90(
|
|
410 |
with torch.no_grad():
|
411 |
upscale = upscaler(sd_image, tiling=True, tile_width=256, tile_height=256)
|
412 |
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|
413 |
-
downscale_path = f"
|
414 |
downscale1.save(downscale_path,optimize=False,compress_level=0)
|
415 |
upload_to_ftp(downscale_path)
|
416 |
image_paths = [save_image(downscale1)]
|
|
|
15 |
from diffusers import AutoencoderKL, StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
|
16 |
from typing import Tuple
|
17 |
import paramiko
|
|
|
|
|
18 |
import datetime
|
19 |
from gradio import themes
|
20 |
from image_gen_aux import UpscaleWithModel
|
|
|
39 |
FTP_DIR = "1ink.us/stable_diff/" # Remote directory on FTP server
|
40 |
|
41 |
DESCRIPTIONXX = """
|
42 |
+
## ⚡⚡⚡⚡ REALVISXL V5.0 BF16 IP Adapter ⚡⚡⚡⚡
|
43 |
"""
|
44 |
|
45 |
examples = [
|
|
|
48 |
"A profile photo of a dog, brown background, shot on Leica M6 --ar 128:85 --v 6.0 --style raw",
|
49 |
]
|
50 |
|
|
|
|
|
|
|
|
|
51 |
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
|
52 |
+
|
|
|
53 |
BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1"))
|
54 |
|
55 |
device = torch.device("cuda:0")
|
|
|
145 |
#pipe.vae = AutoencoderKL.from_pretrained('stabilityai/sdxl-vae-bf16',subfolder='vae')
|
146 |
#pipe.vae = AutoencoderKL.from_pretrained('stabilityai/sdxl-vae',subfolder='vae',force_upcast=False,scaling_factor= 0.182158767676)
|
147 |
#pipe.vae.to(torch.bfloat16)
|
|
|
|
|
148 |
|
149 |
'''
|
150 |
scaling_factor (`float`, *optional*, defaults to 0.18215):
|
|
|
178 |
#pipe.scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
179 |
#pipe.scheduler=EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
|
180 |
|
181 |
+
pipe.to(device) #=device, dtype=torch.bfloat16)
|
182 |
+
pipe.to(torch.bfloat16)
|
183 |
#apply_hidiffusion(pipe)
|
184 |
|
185 |
#pipe.unet.set_default_attn_processor()
|
|
|
230 |
return unique_name
|
231 |
|
232 |
def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
|
233 |
+
filename= f'IP_{timestamp}.txt'
|
234 |
with open(filename, "w") as f:
|
235 |
+
f.write(f"Realvis 5.0 IP Adapter \n")
|
236 |
f.write(f"Date/time: {timestamp} \n")
|
237 |
f.write(f"Prompt: {prompt} \n")
|
238 |
f.write(f"Steps: {num_inference_steps} \n")
|
|
|
241 |
f.write(f"Use Model Dtype: no \n")
|
242 |
f.write(f"Model Scheduler: Euler_a all_custom before cuda \n")
|
243 |
f.write(f"Model VAE: sdxl-vae to bfloat safetensor=false before cuda then attn_proc / scale factor 8 \n")
|
244 |
+
f.write(f"Model UNET: ford442/RealVisXL_V5.0_BF16 \n")
|
|
|
|
|
|
|
245 |
upload_to_ftp(filename)
|
246 |
|
247 |
@spaces.GPU(duration=40)
|
|
|
290 |
with torch.no_grad():
|
291 |
upscale = upscaler(sd_image, tiling=True, tile_width=256, tile_height=256)
|
292 |
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|
293 |
+
downscale_path = f"rvIP_upscale_{timestamp}.png"
|
294 |
downscale1.save(downscale_path,optimize=False,compress_level=0)
|
295 |
upload_to_ftp(downscale_path)
|
296 |
image_paths = [save_image(downscale1)]
|
|
|
344 |
with torch.no_grad():
|
345 |
upscale = upscaler(sd_image, tiling=True, tile_width=256, tile_height=256)
|
346 |
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|
347 |
+
downscale_path = f"rvIP_upscale_{timestamp}.png"
|
348 |
downscale1.save(downscale_path,optimize=False,compress_level=0)
|
349 |
upload_to_ftp(downscale_path)
|
350 |
image_paths = [save_image(downscale1)]
|
|
|
398 |
with torch.no_grad():
|
399 |
upscale = upscaler(sd_image, tiling=True, tile_width=256, tile_height=256)
|
400 |
downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
|
401 |
+
downscale_path = f"rvIP_upscale_{timestamp}.png"
|
402 |
downscale1.save(downscale_path,optimize=False,compress_level=0)
|
403 |
upload_to_ftp(downscale_path)
|
404 |
image_paths = [save_image(downscale1)]
|