wip
Browse files- app.py +21 -3
- requirements.txt +5 -0
app.py
CHANGED
@@ -1,7 +1,25 @@
|
|
1 |
import gradio as gr
|
|
|
|
|
2 |
|
3 |
-
def
|
4 |
-
|
|
|
|
|
|
|
5 |
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
iface.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
from diffusers import DiffusionPipeline
|
3 |
+
import torch
|
4 |
|
5 |
+
def get_device():
|
6 |
+
if torch.cuda.is_available():
|
7 |
+
return "cuda"
|
8 |
+
else:
|
9 |
+
return "cpu"
|
10 |
|
11 |
+
def generate_image(prompt):
|
12 |
+
pipe_id = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
|
13 |
+
pipe = DiffusionPipeline.from_pretrained(pipe_id, torch_dtype=torch.float16).to("cuda")
|
14 |
+
pipe.load_lora_weights("timdpaep/t1m")
|
15 |
+
prompt = "professional photo, closeup photo of t1mLora, wearing black sweater, nature, gloomy, cloudy weather, bokeh <lora:t1m01:1>"
|
16 |
+
|
17 |
+
lora_scale= 0.9
|
18 |
+
image = pipe(
|
19 |
+
prompt, num_inference_steps=10, cross_attention_kwargs={"scale": lora_scale}, generator=torch.manual_seed(0)
|
20 |
+
).to(get_device()).images[0]
|
21 |
+
return image
|
22 |
+
|
23 |
+
|
24 |
+
iface = gr.Interface(fn=generate_image, inputs="textbox", outputs="image")
|
25 |
iface.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
diffusers
|
3 |
+
accelerate
|
4 |
+
transformers
|
5 |
+
torch
|