Spaces:
Runtime error
Runtime error
Create new file
Browse files
app.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
import streamlit as st
|
4 |
+
from PIL import Image
|
5 |
+
import numpy as np
|
6 |
+
from io import BytesIO
|
7 |
+
from diffusers import StableDiffusionImg2ImgPipeline
|
8 |
+
|
9 |
+
device="cuda"
|
10 |
+
|
11 |
+
pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=st.secrets['USER_TOKEN'])
|
12 |
+
pipe.to(device)
|
13 |
+
|
14 |
+
def resize(value,img):
|
15 |
+
img = Image.open(img)
|
16 |
+
img = img.resize((value,value), Image.Resampling.LANCZOS)
|
17 |
+
return img
|
18 |
+
|
19 |
+
def infer(source_img, prompt, guide, steps, seed, Strength):
|
20 |
+
generator = torch.Generator('cuda').manual_seed(seed)
|
21 |
+
source_image = resize(512, source_img)
|
22 |
+
source_image.save('source.png')
|
23 |
+
image_list = pipe([prompt], init_image=source_image, strength=Strength, guidance_scale=guide, num_inference_steps=steps)
|
24 |
+
images = []
|
25 |
+
safe_image = Image.open(r"unsafe.png")
|
26 |
+
for i, image in enumerate(image_list["sample"]):
|
27 |
+
if(image_list["nsfw_content_detected"][i]):
|
28 |
+
images.append(safe_image)
|
29 |
+
else:
|
30 |
+
images.append(image)
|
31 |
+
return image
|
32 |
+
|
33 |
+
gr.Interface(fn=infer, inputs=[gr.Image(source="upload", type="filepath", label="Raw Image"), gr.Textbox(label = 'Prompt Input Text'),
|
34 |
+
gr.Slider(2, 15, value = 7, label = 'Guidence Scale'),
|
35 |
+
gr.Slider(10, 50, value = 25, step = 1, label = 'Number of Iterations'),
|
36 |
+
gr.Slider(
|
37 |
+
label = "Seed",
|
38 |
+
minimum = 0,
|
39 |
+
maximum = 2147483647,
|
40 |
+
step = 1,
|
41 |
+
randomize = True), gr.Slider(label='Strength', minimum = 0, maximum = 1, step = .05, value = .5)
|
42 |
+
], outputs='image').queue(max_size=10).launch(enable_queue=True)
|