Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -4,8 +4,18 @@ import random
|
|
4 |
import spaces
|
5 |
import torch
|
6 |
import time
|
|
|
7 |
from diffusers import DiffusionPipeline
|
8 |
from custom_pipeline import FLUXPipelineWithIntermediateOutputs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
# Constants
|
11 |
MAX_SEED = np.iinfo(np.int32).max
|
@@ -17,14 +27,35 @@ DEFAULT_INFERENCE_STEPS = 1
|
|
17 |
# Device and model setup
|
18 |
dtype = torch.float16
|
19 |
pipe = FLUXPipelineWithIntermediateOutputs.from_pretrained(
|
20 |
-
"black-forest-labs/FLUX.1-schnell", torch_dtype=dtype
|
21 |
).to("cuda")
|
22 |
torch.cuda.empty_cache()
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
# Inference function
|
25 |
@spaces.GPU(duration=25)
|
26 |
def generate_image(prompt, seed=42, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, randomize_seed=False, num_inference_steps=DEFAULT_INFERENCE_STEPS):
|
27 |
|
|
|
|
|
28 |
if randomize_seed:
|
29 |
seed = random.randint(0, MAX_SEED)
|
30 |
generator = torch.Generator().manual_seed(seed)
|
@@ -40,18 +71,17 @@ def generate_image(prompt, seed=42, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT,
|
|
40 |
height=height,
|
41 |
generator=generator
|
42 |
):
|
43 |
-
latency = f"
|
44 |
yield img, seed, latency
|
45 |
|
46 |
-
|
47 |
# Example prompts
|
48 |
examples = [
|
49 |
-
"
|
50 |
-
"
|
51 |
-
"
|
52 |
-
"
|
53 |
-
"
|
54 |
-
"
|
55 |
]
|
56 |
|
57 |
css = """
|
@@ -60,37 +90,36 @@ footer {
|
|
60 |
}
|
61 |
"""
|
62 |
|
63 |
-
|
64 |
# --- Gradio UI ---
|
65 |
with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
|
66 |
with gr.Column(elem_id="app-container"):
|
67 |
|
68 |
with gr.Row():
|
69 |
with gr.Column(scale=3):
|
70 |
-
result = gr.Image(label="Generated Image", show_label=False, interactive=False)
|
71 |
with gr.Column(scale=1):
|
72 |
prompt = gr.Text(
|
73 |
-
label="Prompt",
|
74 |
-
placeholder="
|
75 |
lines=3,
|
76 |
show_label=False,
|
77 |
container=False,
|
78 |
)
|
79 |
-
enhanceBtn = gr.Button("🚀 Enhance Image")
|
80 |
|
81 |
-
with gr.Column("Advanced Options"):
|
82 |
with gr.Row():
|
83 |
latency = gr.Text(show_label=False)
|
84 |
with gr.Row():
|
85 |
-
seed = gr.Number(label="Seed", value=42, precision=0)
|
86 |
-
randomize_seed = gr.Checkbox(label="Randomize Seed", value=False)
|
87 |
with gr.Row():
|
88 |
-
width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=DEFAULT_WIDTH)
|
89 |
-
height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=DEFAULT_HEIGHT)
|
90 |
-
num_inference_steps = gr.Slider(label="Inference Steps", minimum=1, maximum=4, step=1, value=DEFAULT_INFERENCE_STEPS)
|
91 |
|
92 |
with gr.Row():
|
93 |
-
gr.Markdown("### 🌟 Inspiration Gallery")
|
94 |
with gr.Row():
|
95 |
gr.Examples(
|
96 |
examples=examples,
|
@@ -122,4 +151,4 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
|
|
122 |
)
|
123 |
|
124 |
# Launch the app
|
125 |
-
demo.launch()
|
|
|
4 |
import spaces
|
5 |
import torch
|
6 |
import time
|
7 |
+
import os
|
8 |
from diffusers import DiffusionPipeline
|
9 |
from custom_pipeline import FLUXPipelineWithIntermediateOutputs
|
10 |
+
from transformers import pipeline
|
11 |
+
|
12 |
+
# Hugging Face 토큰 가져오기
|
13 |
+
hf_token = os.getenv("HF_TOKEN")
|
14 |
+
if not hf_token:
|
15 |
+
raise ValueError("HF_TOKEN environment variable is not set. Please set it to your Hugging Face token.")
|
16 |
+
|
17 |
+
# 번역 모델 로드 (토큰 인증 추가)
|
18 |
+
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en", use_auth_token=hf_token)
|
19 |
|
20 |
# Constants
|
21 |
MAX_SEED = np.iinfo(np.int32).max
|
|
|
27 |
# Device and model setup
|
28 |
dtype = torch.float16
|
29 |
pipe = FLUXPipelineWithIntermediateOutputs.from_pretrained(
|
30 |
+
"black-forest-labs/FLUX.1-schnell", torch_dtype=dtype, use_auth_token=hf_token
|
31 |
).to("cuda")
|
32 |
torch.cuda.empty_cache()
|
33 |
|
34 |
+
# 한글 메뉴 이름 dictionary
|
35 |
+
korean_labels = {
|
36 |
+
"Generated Image": "생성된 이미지",
|
37 |
+
"Prompt": "프롬프트",
|
38 |
+
"Enhance Image": "이미지 향상",
|
39 |
+
"Advanced Options": "고급 옵션",
|
40 |
+
"Seed": "시드",
|
41 |
+
"Randomize Seed": "시드 무작위화",
|
42 |
+
"Width": "너비",
|
43 |
+
"Height": "높이",
|
44 |
+
"Inference Steps": "추론 단계",
|
45 |
+
"Inspiration Gallery": "영감 갤러리"
|
46 |
+
}
|
47 |
+
|
48 |
+
def translate_if_korean(text):
|
49 |
+
if any('\u3131' <= char <= '\u3163' or '\uac00' <= char <= '\ud7a3' for char in text):
|
50 |
+
return translator(text, use_auth_token=hf_token)[0]['translation_text']
|
51 |
+
return text
|
52 |
+
|
53 |
# Inference function
|
54 |
@spaces.GPU(duration=25)
|
55 |
def generate_image(prompt, seed=42, width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, randomize_seed=False, num_inference_steps=DEFAULT_INFERENCE_STEPS):
|
56 |
|
57 |
+
prompt = translate_if_korean(prompt)
|
58 |
+
|
59 |
if randomize_seed:
|
60 |
seed = random.randint(0, MAX_SEED)
|
61 |
generator = torch.Generator().manual_seed(seed)
|
|
|
71 |
height=height,
|
72 |
generator=generator
|
73 |
):
|
74 |
+
latency = f"처리 시간: {(time.time()-start_time):.2f} 초"
|
75 |
yield img, seed, latency
|
76 |
|
|
|
77 |
# Example prompts
|
78 |
examples = [
|
79 |
+
"달에서 알에서 부화하는 작은 우주 비행사",
|
80 |
+
"안녕하세요 세상이라고 쓰인 표지판을 들고 있는 고양이",
|
81 |
+
"비너 슈니첼의 애니메이션 일러스트레이션",
|
82 |
+
"하늘을 나는 자동차와 네온 불빛이 있는 미래적인 도시 풍경",
|
83 |
+
"긴 갈색 웨이브 머리를 올려 묶고 안경을 쓴 젊은 여성의 사진. 그녀는 흰 피부에 눈과 입술을 강조한 은은한 화장을 했습니다. 그녀는 검은색 상의를 입었습니다. 배경은 도시 건물 외관으로 보이며, 햇빛이 그녀의 얼굴에 따뜻한 빛을 비추고 있습니다.",
|
84 |
+
"스티브 잡스를 스타워즈 영화 캐릭터로 상상해보세요"
|
85 |
]
|
86 |
|
87 |
css = """
|
|
|
90 |
}
|
91 |
"""
|
92 |
|
|
|
93 |
# --- Gradio UI ---
|
94 |
with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
|
95 |
with gr.Column(elem_id="app-container"):
|
96 |
|
97 |
with gr.Row():
|
98 |
with gr.Column(scale=3):
|
99 |
+
result = gr.Image(label=korean_labels["Generated Image"], show_label=False, interactive=False)
|
100 |
with gr.Column(scale=1):
|
101 |
prompt = gr.Text(
|
102 |
+
label=korean_labels["Prompt"],
|
103 |
+
placeholder="생성하고 싶은 이미지를 설명하세요...",
|
104 |
lines=3,
|
105 |
show_label=False,
|
106 |
container=False,
|
107 |
)
|
108 |
+
enhanceBtn = gr.Button(f"🚀 {korean_labels['Enhance Image']}")
|
109 |
|
110 |
+
with gr.Column(korean_labels["Advanced Options"]):
|
111 |
with gr.Row():
|
112 |
latency = gr.Text(show_label=False)
|
113 |
with gr.Row():
|
114 |
+
seed = gr.Number(label=korean_labels["Seed"], value=42, precision=0)
|
115 |
+
randomize_seed = gr.Checkbox(label=korean_labels["Randomize Seed"], value=False)
|
116 |
with gr.Row():
|
117 |
+
width = gr.Slider(label=korean_labels["Width"], minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=DEFAULT_WIDTH)
|
118 |
+
height = gr.Slider(label=korean_labels["Height"], minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=DEFAULT_HEIGHT)
|
119 |
+
num_inference_steps = gr.Slider(label=korean_labels["Inference Steps"], minimum=1, maximum=4, step=1, value=DEFAULT_INFERENCE_STEPS)
|
120 |
|
121 |
with gr.Row():
|
122 |
+
gr.Markdown(f"### 🌟 {korean_labels['Inspiration Gallery']}")
|
123 |
with gr.Row():
|
124 |
gr.Examples(
|
125 |
examples=examples,
|
|
|
151 |
)
|
152 |
|
153 |
# Launch the app
|
154 |
+
demo.launch()
|