Commit
·
0a2783f
1
Parent(s):
ed3a50c
Performance suggestions
Browse files
app.py
CHANGED
@@ -10,6 +10,7 @@ import numpy as np
|
|
10 |
import PIL.Image
|
11 |
import torch
|
12 |
from diffusers import AutoencoderKL, StableDiffusionXLPipeline
|
|
|
13 |
|
14 |
DESCRIPTION = "# Segmind Stable Diffusion: SSD-1B"
|
15 |
if not torch.cuda.is_available():
|
@@ -57,13 +58,16 @@ if torch.cuda.is_available():
|
|
57 |
refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)
|
58 |
print("Model Compiled!")
|
59 |
|
|
|
|
|
|
|
|
|
60 |
|
61 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
62 |
if randomize_seed:
|
63 |
seed = random.randint(0, MAX_SEED)
|
64 |
return seed
|
65 |
|
66 |
-
|
67 |
def generate(
|
68 |
prompt: str,
|
69 |
negative_prompt: str = "",
|
@@ -80,7 +84,10 @@ def generate(
|
|
80 |
num_inference_steps_base: int = 25,
|
81 |
num_inference_steps_refiner: int = 25,
|
82 |
apply_refiner: bool = False,
|
|
|
|
|
83 |
) -> PIL.Image.Image:
|
|
|
84 |
generator = torch.Generator().manual_seed(seed)
|
85 |
|
86 |
if not use_negative_prompt:
|
@@ -91,7 +98,7 @@ def generate(
|
|
91 |
negative_prompt_2 = None # type: ignore
|
92 |
|
93 |
if not apply_refiner:
|
94 |
-
|
95 |
prompt=prompt,
|
96 |
negative_prompt=negative_prompt,
|
97 |
prompt_2=prompt_2,
|
@@ -126,7 +133,9 @@ def generate(
|
|
126 |
image=latents,
|
127 |
generator=generator,
|
128 |
).images[0]
|
129 |
-
|
|
|
|
|
130 |
|
131 |
|
132 |
examples = ['3d digital art of an adorable ghost, glowing within, holding a heart shaped pumpkin, Halloween, super cute, spooky haunted house background', 'beautiful lady, freckles, big smile, blue eyes, short ginger hair, dark makeup, wearing a floral blue vest top, soft light, dark grey background', 'professional portrait photo of an anthropomorphic cat wearing fancy gentleman hat and jacket walking in autumn forest.', 'an astronaut sitting in a diner, eating fries, cinematic, analog film', 'Albert Einstein in a surrealist Cyberpunk 2077 world, hyperrealistic', 'cinematic film still of Futuristic hero with golden dark armour with machine gun, muscular body']
|
@@ -172,7 +181,6 @@ with gr.Blocks(css="style.css") as demo:
|
|
172 |
placeholder="Enter a negative prompt",
|
173 |
visible=False,
|
174 |
)
|
175 |
-
|
176 |
seed = gr.Slider(
|
177 |
label="Seed",
|
178 |
minimum=0,
|
@@ -273,12 +281,6 @@ with gr.Blocks(css="style.css") as demo:
|
|
273 |
negative_prompt_2.submit,
|
274 |
run_button.click,
|
275 |
],
|
276 |
-
fn=randomize_seed_fn,
|
277 |
-
inputs=[seed, randomize_seed],
|
278 |
-
outputs=seed,
|
279 |
-
queue=False,
|
280 |
-
api_name=False,
|
281 |
-
).then(
|
282 |
fn=generate,
|
283 |
inputs=[
|
284 |
prompt,
|
@@ -296,6 +298,7 @@ with gr.Blocks(css="style.css") as demo:
|
|
296 |
num_inference_steps_base,
|
297 |
num_inference_steps_refiner,
|
298 |
apply_refiner,
|
|
|
299 |
],
|
300 |
outputs=result,
|
301 |
api_name="run",
|
|
|
10 |
import PIL.Image
|
11 |
import torch
|
12 |
from diffusers import AutoencoderKL, StableDiffusionXLPipeline
|
13 |
+
import uuid
|
14 |
|
15 |
DESCRIPTION = "# Segmind Stable Diffusion: SSD-1B"
|
16 |
if not torch.cuda.is_available():
|
|
|
58 |
refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)
|
59 |
print("Model Compiled!")
|
60 |
|
61 |
+
def save_image(img):
|
62 |
+
unique_name = str(uuid.uuid4()) + '.png'
|
63 |
+
img.save(unique_name)
|
64 |
+
return unique_name
|
65 |
|
66 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
67 |
if randomize_seed:
|
68 |
seed = random.randint(0, MAX_SEED)
|
69 |
return seed
|
70 |
|
|
|
71 |
def generate(
|
72 |
prompt: str,
|
73 |
negative_prompt: str = "",
|
|
|
84 |
num_inference_steps_base: int = 25,
|
85 |
num_inference_steps_refiner: int = 25,
|
86 |
apply_refiner: bool = False,
|
87 |
+
randomize_seed: bool = False,
|
88 |
+
progress = gr.Progress(track_tqdm=True)
|
89 |
) -> PIL.Image.Image:
|
90 |
+
seed = randomize_seed_fn(seed, randomize_seed)
|
91 |
generator = torch.Generator().manual_seed(seed)
|
92 |
|
93 |
if not use_negative_prompt:
|
|
|
98 |
negative_prompt_2 = None # type: ignore
|
99 |
|
100 |
if not apply_refiner:
|
101 |
+
image = pipe(
|
102 |
prompt=prompt,
|
103 |
negative_prompt=negative_prompt,
|
104 |
prompt_2=prompt_2,
|
|
|
133 |
image=latents,
|
134 |
generator=generator,
|
135 |
).images[0]
|
136 |
+
|
137 |
+
image_path = save_image(image)
|
138 |
+
return image_path
|
139 |
|
140 |
|
141 |
examples = ['3d digital art of an adorable ghost, glowing within, holding a heart shaped pumpkin, Halloween, super cute, spooky haunted house background', 'beautiful lady, freckles, big smile, blue eyes, short ginger hair, dark makeup, wearing a floral blue vest top, soft light, dark grey background', 'professional portrait photo of an anthropomorphic cat wearing fancy gentleman hat and jacket walking in autumn forest.', 'an astronaut sitting in a diner, eating fries, cinematic, analog film', 'Albert Einstein in a surrealist Cyberpunk 2077 world, hyperrealistic', 'cinematic film still of Futuristic hero with golden dark armour with machine gun, muscular body']
|
|
|
181 |
placeholder="Enter a negative prompt",
|
182 |
visible=False,
|
183 |
)
|
|
|
184 |
seed = gr.Slider(
|
185 |
label="Seed",
|
186 |
minimum=0,
|
|
|
281 |
negative_prompt_2.submit,
|
282 |
run_button.click,
|
283 |
],
|
|
|
|
|
|
|
|
|
|
|
|
|
284 |
fn=generate,
|
285 |
inputs=[
|
286 |
prompt,
|
|
|
298 |
num_inference_steps_base,
|
299 |
num_inference_steps_refiner,
|
300 |
apply_refiner,
|
301 |
+
randomize_seed
|
302 |
],
|
303 |
outputs=result,
|
304 |
api_name="run",
|