Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -214,6 +214,7 @@ def load_and_prepare_model():
|
|
214 |
|
215 |
# Preload and compile both models
|
216 |
pipe = load_and_prepare_model()
|
|
|
217 |
|
218 |
MAX_SEED = np.iinfo(np.int32).max
|
219 |
|
@@ -270,7 +271,19 @@ def generate_30(
|
|
270 |
samples=1,
|
271 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
272 |
):
|
273 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
274 |
seed = random.randint(0, MAX_SEED)
|
275 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
276 |
if latent_file is not None: # Check if a latent file is provided
|
@@ -288,10 +301,12 @@ def generate_30(
|
|
288 |
pil_image=sd_image_a,
|
289 |
pil_image_2=sd_image_b,
|
290 |
prompt=prompt,
|
|
|
|
|
291 |
num_samples=samples,
|
|
|
292 |
num_inference_steps=num_inference_steps,
|
293 |
guidance_scale=guidance_scale,
|
294 |
-
seed=seed
|
295 |
)
|
296 |
sd_image[0].save(filename,optimize=False,compress_level=0)
|
297 |
upload_to_ftp(filename)
|
@@ -323,7 +338,6 @@ def generate_60(
|
|
323 |
samples=1,
|
324 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
325 |
):
|
326 |
-
ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
|
327 |
seed = random.randint(0, MAX_SEED)
|
328 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
329 |
if latent_file is not None: # Check if a latent file is provided
|
@@ -341,10 +355,12 @@ def generate_60(
|
|
341 |
pil_image=sd_image_a,
|
342 |
pil_image_2=sd_image_b,
|
343 |
prompt=prompt,
|
|
|
|
|
344 |
num_samples=samples,
|
|
|
345 |
num_inference_steps=num_inference_steps,
|
346 |
guidance_scale=guidance_scale,
|
347 |
-
seed=seed
|
348 |
)
|
349 |
sd_image[0].save(filename,optimize=False,compress_level=0)
|
350 |
upload_to_ftp(filename)
|
@@ -376,7 +392,6 @@ def generate_90(
|
|
376 |
samples=1,
|
377 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
378 |
):
|
379 |
-
ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
|
380 |
seed = random.randint(0, MAX_SEED)
|
381 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
382 |
if latent_file is not None: # Check if a latent file is provided
|
@@ -394,10 +409,12 @@ def generate_90(
|
|
394 |
pil_image=sd_image_a,
|
395 |
pil_image_2=sd_image_b,
|
396 |
prompt=prompt,
|
|
|
|
|
397 |
num_samples=samples,
|
|
|
398 |
num_inference_steps=num_inference_steps,
|
399 |
guidance_scale=guidance_scale,
|
400 |
-
seed=seed
|
401 |
)
|
402 |
sd_image[0].save(filename,optimize=False,compress_level=0)
|
403 |
upload_to_ftp(filename)
|
|
|
214 |
|
215 |
# Preload and compile both models
|
216 |
pipe = load_and_prepare_model()
|
217 |
+
ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
|
218 |
|
219 |
MAX_SEED = np.iinfo(np.int32).max
|
220 |
|
|
|
271 |
samples=1,
|
272 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
273 |
):
|
274 |
+
prompt: str = "",
|
275 |
+
negative_prompt: str = "",
|
276 |
+
use_negative_prompt: bool = False,
|
277 |
+
style_selection: str = "",
|
278 |
+
width: int = 768,
|
279 |
+
height: int = 768,
|
280 |
+
guidance_scale: float = 4,
|
281 |
+
num_inference_steps: int = 125,
|
282 |
+
latent_file = gr.File(), # Add latents file input
|
283 |
+
latent_file_2 = gr.File(), # Add latents file input
|
284 |
+
samples=1,
|
285 |
+
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
286 |
+
):
|
287 |
seed = random.randint(0, MAX_SEED)
|
288 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
289 |
if latent_file is not None: # Check if a latent file is provided
|
|
|
301 |
pil_image=sd_image_a,
|
302 |
pil_image_2=sd_image_b,
|
303 |
prompt=prompt,
|
304 |
+
negative_prompt=negative_prompt,
|
305 |
+
scale=1.0,
|
306 |
num_samples=samples,
|
307 |
+
seed=seed,
|
308 |
num_inference_steps=num_inference_steps,
|
309 |
guidance_scale=guidance_scale,
|
|
|
310 |
)
|
311 |
sd_image[0].save(filename,optimize=False,compress_level=0)
|
312 |
upload_to_ftp(filename)
|
|
|
338 |
samples=1,
|
339 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
340 |
):
|
|
|
341 |
seed = random.randint(0, MAX_SEED)
|
342 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
343 |
if latent_file is not None: # Check if a latent file is provided
|
|
|
355 |
pil_image=sd_image_a,
|
356 |
pil_image_2=sd_image_b,
|
357 |
prompt=prompt,
|
358 |
+
negative_prompt=negative_prompt,
|
359 |
+
scale=1.0,
|
360 |
num_samples=samples,
|
361 |
+
seed=seed,
|
362 |
num_inference_steps=num_inference_steps,
|
363 |
guidance_scale=guidance_scale,
|
|
|
364 |
)
|
365 |
sd_image[0].save(filename,optimize=False,compress_level=0)
|
366 |
upload_to_ftp(filename)
|
|
|
392 |
samples=1,
|
393 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
394 |
):
|
|
|
395 |
seed = random.randint(0, MAX_SEED)
|
396 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
397 |
if latent_file is not None: # Check if a latent file is provided
|
|
|
409 |
pil_image=sd_image_a,
|
410 |
pil_image_2=sd_image_b,
|
411 |
prompt=prompt,
|
412 |
+
negative_prompt=negative_prompt,
|
413 |
+
scale=1.0,
|
414 |
num_samples=samples,
|
415 |
+
seed=seed,
|
416 |
num_inference_steps=num_inference_steps,
|
417 |
guidance_scale=guidance_scale,
|
|
|
418 |
)
|
419 |
sd_image[0].save(filename,optimize=False,compress_level=0)
|
420 |
upload_to_ftp(filename)
|