Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -257,6 +257,14 @@ def generate_30(
|
|
257 |
num_inference_steps: int = 125,
|
258 |
latent_file = gr.File(), # Add latents file input
|
259 |
latent_file_2 = gr.File(), # Add latents file input
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
260 |
samples=1,
|
261 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
262 |
):
|
@@ -268,6 +276,18 @@ def generate_30(
|
|
268 |
sd_image_b = Image.open(latent_file_2.name)
|
269 |
else:
|
270 |
sd_image_b = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
271 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
272 |
filename= f'rv_IP_{timestamp}.png'
|
273 |
print("-- using image file --")
|
@@ -276,9 +296,16 @@ def generate_30(
|
|
276 |
sd_image = ip_model.generate(
|
277 |
pil_image=sd_image_a,
|
278 |
pil_image_2=sd_image_b,
|
|
|
|
|
|
|
279 |
prompt=prompt,
|
280 |
negative_prompt=negative_prompt,
|
281 |
-
|
|
|
|
|
|
|
|
|
282 |
num_samples=samples,
|
283 |
seed=seed,
|
284 |
num_inference_steps=num_inference_steps,
|
@@ -299,7 +326,7 @@ def generate_30(
|
|
299 |
print('-- IMAGE REQUIRED --')
|
300 |
return image_paths
|
301 |
|
302 |
-
@spaces.GPU(duration=
|
303 |
def generate_60(
|
304 |
prompt: str = "",
|
305 |
negative_prompt: str = "",
|
@@ -311,6 +338,14 @@ def generate_60(
|
|
311 |
num_inference_steps: int = 125,
|
312 |
latent_file = gr.File(), # Add latents file input
|
313 |
latent_file_2 = gr.File(), # Add latents file input
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
314 |
samples=1,
|
315 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
316 |
):
|
@@ -322,6 +357,18 @@ def generate_60(
|
|
322 |
sd_image_b = Image.open(latent_file_2.name)
|
323 |
else:
|
324 |
sd_image_b = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
325 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
326 |
filename= f'rv_IP_{timestamp}.png'
|
327 |
print("-- using image file --")
|
@@ -330,9 +377,16 @@ def generate_60(
|
|
330 |
sd_image = ip_model.generate(
|
331 |
pil_image=sd_image_a,
|
332 |
pil_image_2=sd_image_b,
|
|
|
|
|
|
|
333 |
prompt=prompt,
|
334 |
negative_prompt=negative_prompt,
|
335 |
-
|
|
|
|
|
|
|
|
|
336 |
num_samples=samples,
|
337 |
seed=seed,
|
338 |
num_inference_steps=num_inference_steps,
|
@@ -353,7 +407,7 @@ def generate_60(
|
|
353 |
print('-- IMAGE REQUIRED --')
|
354 |
return image_paths
|
355 |
|
356 |
-
@spaces.GPU(duration=
|
357 |
def generate_90(
|
358 |
prompt: str = "",
|
359 |
negative_prompt: str = "",
|
@@ -365,6 +419,14 @@ def generate_90(
|
|
365 |
num_inference_steps: int = 125,
|
366 |
latent_file = gr.File(), # Add latents file input
|
367 |
latent_file_2 = gr.File(), # Add latents file input
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
368 |
samples=1,
|
369 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
370 |
):
|
@@ -376,6 +438,18 @@ def generate_90(
|
|
376 |
sd_image_b = Image.open(latent_file_2.name)
|
377 |
else:
|
378 |
sd_image_b = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
379 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
380 |
filename= f'rv_IP_{timestamp}.png'
|
381 |
print("-- using image file --")
|
@@ -384,9 +458,16 @@ def generate_90(
|
|
384 |
sd_image = ip_model.generate(
|
385 |
pil_image=sd_image_a,
|
386 |
pil_image_2=sd_image_b,
|
|
|
|
|
|
|
387 |
prompt=prompt,
|
388 |
negative_prompt=negative_prompt,
|
389 |
-
|
|
|
|
|
|
|
|
|
390 |
num_samples=samples,
|
391 |
seed=seed,
|
392 |
num_inference_steps=num_inference_steps,
|
@@ -452,7 +533,45 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
452 |
|
453 |
with gr.Row():
|
454 |
latent_file = gr.File(label="Image Prompt (Required)")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
455 |
latent_file_2 = gr.File(label="Image Prompt 2 (Optional)")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
456 |
style_selection = gr.Radio(
|
457 |
show_label=True,
|
458 |
container=True,
|
@@ -541,6 +660,14 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
541 |
num_inference_steps,
|
542 |
latent_file,
|
543 |
latent_file_2,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
544 |
samples,
|
545 |
],
|
546 |
outputs=[result],
|
@@ -563,6 +690,14 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
563 |
num_inference_steps,
|
564 |
latent_file,
|
565 |
latent_file_2,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
566 |
samples,
|
567 |
],
|
568 |
outputs=[result],
|
@@ -585,6 +720,14 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
585 |
num_inference_steps,
|
586 |
latent_file,
|
587 |
latent_file_2,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
588 |
samples,
|
589 |
],
|
590 |
outputs=[result],
|
|
|
257 |
num_inference_steps: int = 125,
|
258 |
latent_file = gr.File(), # Add latents file input
|
259 |
latent_file_2 = gr.File(), # Add latents file input
|
260 |
+
latent_file_3 = gr.File(), # Add latents file input
|
261 |
+
latent_file_4 = gr.File(), # Add latents file input
|
262 |
+
latent_file_5 = gr.File(), # Add latents file input
|
263 |
+
latent_file_1_scale: float = 3.8,
|
264 |
+
latent_file_2_scale: float = 3.8,
|
265 |
+
latent_file_3_scale: float = 3.8,
|
266 |
+
latent_file_4_scale: float = 3.8,
|
267 |
+
latent_file_5_scale: float = 3.8,
|
268 |
samples=1,
|
269 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
270 |
):
|
|
|
276 |
sd_image_b = Image.open(latent_file_2.name)
|
277 |
else:
|
278 |
sd_image_b = None
|
279 |
+
if latent_file_3 is not None: # Check if a latent file is provided
|
280 |
+
sd_image_c = Image.open(latent_file_3.name)
|
281 |
+
else:
|
282 |
+
sd_image_c = None
|
283 |
+
if latent_file_4 is not None: # Check if a latent file is provided
|
284 |
+
sd_image_d = Image.open(latent_file_4.name)
|
285 |
+
else:
|
286 |
+
sd_image_d = None
|
287 |
+
if latent_file_5 is not None: # Check if a latent file is provided
|
288 |
+
sd_image_e = Image.open(latent_file_5.name)
|
289 |
+
else:
|
290 |
+
sd_image_e = None
|
291 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
292 |
filename= f'rv_IP_{timestamp}.png'
|
293 |
print("-- using image file --")
|
|
|
296 |
sd_image = ip_model.generate(
|
297 |
pil_image=sd_image_a,
|
298 |
pil_image_2=sd_image_b,
|
299 |
+
pil_image_3=sd_image_c,
|
300 |
+
pil_image_4=sd_image_d,
|
301 |
+
pil_image_5=sd_image_e,
|
302 |
prompt=prompt,
|
303 |
negative_prompt=negative_prompt,
|
304 |
+
scale_1=latent_file_1_scale,
|
305 |
+
scale_2=latent_file_2_scale,
|
306 |
+
scale_3=latent_file_3_scale,
|
307 |
+
scale_4=latent_file_4_scale,
|
308 |
+
scale_5=latent_file_5_scale,
|
309 |
num_samples=samples,
|
310 |
seed=seed,
|
311 |
num_inference_steps=num_inference_steps,
|
|
|
326 |
print('-- IMAGE REQUIRED --')
|
327 |
return image_paths
|
328 |
|
329 |
+
@spaces.GPU(duration=70)
|
330 |
def generate_60(
|
331 |
prompt: str = "",
|
332 |
negative_prompt: str = "",
|
|
|
338 |
num_inference_steps: int = 125,
|
339 |
latent_file = gr.File(), # Add latents file input
|
340 |
latent_file_2 = gr.File(), # Add latents file input
|
341 |
+
latent_file_3 = gr.File(), # Add latents file input
|
342 |
+
latent_file_4 = gr.File(), # Add latents file input
|
343 |
+
latent_file_5 = gr.File(), # Add latents file input
|
344 |
+
latent_file_1_scale: float = 3.8,
|
345 |
+
latent_file_2_scale: float = 3.8,
|
346 |
+
latent_file_3_scale: float = 3.8,
|
347 |
+
latent_file_4_scale: float = 3.8,
|
348 |
+
latent_file_5_scale: float = 3.8,
|
349 |
samples=1,
|
350 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
351 |
):
|
|
|
357 |
sd_image_b = Image.open(latent_file_2.name)
|
358 |
else:
|
359 |
sd_image_b = None
|
360 |
+
if latent_file_3 is not None: # Check if a latent file is provided
|
361 |
+
sd_image_c = Image.open(latent_file_3.name)
|
362 |
+
else:
|
363 |
+
sd_image_c = None
|
364 |
+
if latent_file_4 is not None: # Check if a latent file is provided
|
365 |
+
sd_image_d = Image.open(latent_file_4.name)
|
366 |
+
else:
|
367 |
+
sd_image_d = None
|
368 |
+
if latent_file_5 is not None: # Check if a latent file is provided
|
369 |
+
sd_image_e = Image.open(latent_file_5.name)
|
370 |
+
else:
|
371 |
+
sd_image_e = None
|
372 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
373 |
filename= f'rv_IP_{timestamp}.png'
|
374 |
print("-- using image file --")
|
|
|
377 |
sd_image = ip_model.generate(
|
378 |
pil_image=sd_image_a,
|
379 |
pil_image_2=sd_image_b,
|
380 |
+
pil_image_3=sd_image_c,
|
381 |
+
pil_image_4=sd_image_d,
|
382 |
+
pil_image_5=sd_image_e,
|
383 |
prompt=prompt,
|
384 |
negative_prompt=negative_prompt,
|
385 |
+
scale_1=latent_file_1_scale,
|
386 |
+
scale_2=latent_file_2_scale,
|
387 |
+
scale_3=latent_file_3_scale,
|
388 |
+
scale_4=latent_file_4_scale,
|
389 |
+
scale_5=latent_file_5_scale,
|
390 |
num_samples=samples,
|
391 |
seed=seed,
|
392 |
num_inference_steps=num_inference_steps,
|
|
|
407 |
print('-- IMAGE REQUIRED --')
|
408 |
return image_paths
|
409 |
|
410 |
+
@spaces.GPU(duration=100)
|
411 |
def generate_90(
|
412 |
prompt: str = "",
|
413 |
negative_prompt: str = "",
|
|
|
419 |
num_inference_steps: int = 125,
|
420 |
latent_file = gr.File(), # Add latents file input
|
421 |
latent_file_2 = gr.File(), # Add latents file input
|
422 |
+
latent_file_3 = gr.File(), # Add latents file input
|
423 |
+
latent_file_4 = gr.File(), # Add latents file input
|
424 |
+
latent_file_5 = gr.File(), # Add latents file input
|
425 |
+
latent_file_1_scale: float = 3.8,
|
426 |
+
latent_file_2_scale: float = 3.8,
|
427 |
+
latent_file_3_scale: float = 3.8,
|
428 |
+
latent_file_4_scale: float = 3.8,
|
429 |
+
latent_file_5_scale: float = 3.8,
|
430 |
samples=1,
|
431 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
432 |
):
|
|
|
438 |
sd_image_b = Image.open(latent_file_2.name)
|
439 |
else:
|
440 |
sd_image_b = None
|
441 |
+
if latent_file_3 is not None: # Check if a latent file is provided
|
442 |
+
sd_image_c = Image.open(latent_file_3.name)
|
443 |
+
else:
|
444 |
+
sd_image_c = None
|
445 |
+
if latent_file_4 is not None: # Check if a latent file is provided
|
446 |
+
sd_image_d = Image.open(latent_file_4.name)
|
447 |
+
else:
|
448 |
+
sd_image_d = None
|
449 |
+
if latent_file_5 is not None: # Check if a latent file is provided
|
450 |
+
sd_image_e = Image.open(latent_file_5.name)
|
451 |
+
else:
|
452 |
+
sd_image_e = None
|
453 |
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
454 |
filename= f'rv_IP_{timestamp}.png'
|
455 |
print("-- using image file --")
|
|
|
458 |
sd_image = ip_model.generate(
|
459 |
pil_image=sd_image_a,
|
460 |
pil_image_2=sd_image_b,
|
461 |
+
pil_image_3=sd_image_c,
|
462 |
+
pil_image_4=sd_image_d,
|
463 |
+
pil_image_5=sd_image_e,
|
464 |
prompt=prompt,
|
465 |
negative_prompt=negative_prompt,
|
466 |
+
scale_1=latent_file_1_scale,
|
467 |
+
scale_2=latent_file_2_scale,
|
468 |
+
scale_3=latent_file_3_scale,
|
469 |
+
scale_4=latent_file_4_scale,
|
470 |
+
scale_5=latent_file_5_scale,
|
471 |
num_samples=samples,
|
472 |
seed=seed,
|
473 |
num_inference_steps=num_inference_steps,
|
|
|
533 |
|
534 |
with gr.Row():
|
535 |
latent_file = gr.File(label="Image Prompt (Required)")
|
536 |
+
file_1_strength = gr.Slider(
|
537 |
+
label="Img 1 Str",
|
538 |
+
minimum=0.0,
|
539 |
+
maximum=16.0,
|
540 |
+
step=0.01,
|
541 |
+
value=3.8,
|
542 |
+
)
|
543 |
latent_file_2 = gr.File(label="Image Prompt 2 (Optional)")
|
544 |
+
file_2_strength = gr.Slider(
|
545 |
+
label="Img 2 Str",
|
546 |
+
minimum=0.0,
|
547 |
+
maximum=16.0,
|
548 |
+
step=0.01,
|
549 |
+
value=3.8,
|
550 |
+
)
|
551 |
+
latent_file_3 = gr.File(label="Image Prompt 3 (Optional)")
|
552 |
+
file_3_strength = gr.Slider(
|
553 |
+
label="Img 3 Str",
|
554 |
+
minimum=0.0,
|
555 |
+
maximum=16.0,
|
556 |
+
step=0.01,
|
557 |
+
value=3.8,
|
558 |
+
)
|
559 |
+
latent_file_4 = gr.File(label="Image Prompt 4 (Optional)")
|
560 |
+
file_4_strength = gr.Slider(
|
561 |
+
label="Img 4 Str",
|
562 |
+
minimum=0.0,
|
563 |
+
maximum=16.0,
|
564 |
+
step=0.01,
|
565 |
+
value=3.8,
|
566 |
+
)
|
567 |
+
latent_file_5 = gr.File(label="Image Prompt 5 (Optional)")
|
568 |
+
file_5_strength = gr.Slider(
|
569 |
+
label="Img 5 Str",
|
570 |
+
minimum=0.0,
|
571 |
+
maximum=16.0,
|
572 |
+
step=0.01,
|
573 |
+
value=3.8,
|
574 |
+
)
|
575 |
style_selection = gr.Radio(
|
576 |
show_label=True,
|
577 |
container=True,
|
|
|
660 |
num_inference_steps,
|
661 |
latent_file,
|
662 |
latent_file_2,
|
663 |
+
latent_file_3,
|
664 |
+
latent_file_4,
|
665 |
+
latent_file_5,
|
666 |
+
file_1_strength,
|
667 |
+
file_2_strength,
|
668 |
+
file_3_strength,
|
669 |
+
file_4_strength,
|
670 |
+
file_5_strength,
|
671 |
samples,
|
672 |
],
|
673 |
outputs=[result],
|
|
|
690 |
num_inference_steps,
|
691 |
latent_file,
|
692 |
latent_file_2,
|
693 |
+
latent_file_3,
|
694 |
+
latent_file_4,
|
695 |
+
latent_file_5,
|
696 |
+
file_1_strength,
|
697 |
+
file_2_strength,
|
698 |
+
file_3_strength,
|
699 |
+
file_4_strength,
|
700 |
+
file_5_strength,
|
701 |
samples,
|
702 |
],
|
703 |
outputs=[result],
|
|
|
720 |
num_inference_steps,
|
721 |
latent_file,
|
722 |
latent_file_2,
|
723 |
+
latent_file_3,
|
724 |
+
latent_file_4,
|
725 |
+
latent_file_5,
|
726 |
+
file_1_strength,
|
727 |
+
file_2_strength,
|
728 |
+
file_3_strength,
|
729 |
+
file_4_strength,
|
730 |
+
file_5_strength,
|
731 |
samples,
|
732 |
],
|
733 |
outputs=[result],
|