Spaces:
Running
on
Zero
Running
on
Zero
seawolf2357
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -51,21 +51,28 @@ def load_gallery_images():
|
|
51 |
|
52 |
return [(os.path.join("gallery", row[2]), f"{row[0]}: {row[1]}") for row in rows]
|
53 |
|
54 |
-
# ๋ฒ์ญ ๋ชจ๋ธ ์ด๊ธฐํ
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
KEY_JSON = os.getenv("KEY_JSON")
|
65 |
with open(KEY_JSON, 'r') as f:
|
66 |
loras = json.load(f)
|
67 |
|
68 |
-
#
|
69 |
dtype = torch.bfloat16
|
70 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
71 |
base_model = "black-forest-labs/FLUX.1-dev"
|
@@ -136,11 +143,12 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scal
|
|
136 |
):
|
137 |
yield img
|
138 |
|
139 |
-
|
|
|
140 |
if selected_index is None:
|
141 |
raise gr.Error("์งํํ๊ธฐ ์ ์ LoRA๋ฅผ ์ ํํด์ผ ํฉ๋๋ค.")
|
142 |
|
143 |
-
original_prompt, english_prompt = process_prompt(prompt)
|
144 |
|
145 |
selected_lora = loras[selected_index]
|
146 |
lora_path = selected_lora["repo"]
|
@@ -159,21 +167,21 @@ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, wid
|
|
159 |
with calculateDuration("LoRA ์ธ๋ก๋"):
|
160 |
pipe.unload_lora_weights()
|
161 |
|
162 |
-
#
|
163 |
with calculateDuration(f"{selected_lora['title']}์ LoRA ๊ฐ์ค์น ๋ก๋"):
|
164 |
if "weights" in selected_lora:
|
165 |
pipe.load_lora_weights(lora_path, weight_name=selected_lora["weights"])
|
166 |
else:
|
167 |
pipe.load_lora_weights(lora_path)
|
168 |
|
169 |
-
#
|
170 |
with calculateDuration("์๋ ๋ฌด์์ํ"):
|
171 |
if randomize_seed:
|
172 |
seed = random.randint(0, MAX_SEED)
|
173 |
|
174 |
image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress)
|
175 |
|
176 |
-
#
|
177 |
final_image = None
|
178 |
step_counter = 0
|
179 |
for image in image_generator:
|
@@ -187,32 +195,31 @@ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, wid
|
|
187 |
|
188 |
yield final_image, seed, gr.update(value=progress_bar, visible=False), original_prompt, english_prompt
|
189 |
|
190 |
-
|
191 |
def get_huggingface_safetensors(link):
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
|
217 |
def check_custom_model(link):
|
218 |
if(link.startswith("https://")):
|
@@ -263,7 +270,6 @@ def add_custom_lora(custom_lora):
|
|
263 |
def remove_custom_lora():
|
264 |
return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
|
265 |
|
266 |
-
|
267 |
run_lora.zerogpu = True
|
268 |
|
269 |
css = """
|
@@ -279,8 +285,12 @@ if not os.path.exists('gallery'):
|
|
279 |
# ๋ฐ์ดํฐ๋ฒ ์ด์ค ์ด๊ธฐํ
|
280 |
init_db()
|
281 |
|
282 |
-
|
|
|
283 |
|
|
|
|
|
|
|
284 |
selected_index = gr.State(None)
|
285 |
|
286 |
with gr.Tabs():
|
@@ -342,11 +352,13 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as app:
|
|
342 |
inputs=[width, height],
|
343 |
outputs=[prompt, selected_info, selected_index, width, height]
|
344 |
)
|
|
|
345 |
custom_lora.input(
|
346 |
add_custom_lora,
|
347 |
inputs=[custom_lora],
|
348 |
outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, prompt]
|
349 |
)
|
|
|
350 |
custom_lora_button.click(
|
351 |
remove_custom_lora,
|
352 |
outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, custom_lora]
|
@@ -355,7 +367,8 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as app:
|
|
355 |
gr.on(
|
356 |
triggers=[generate_button.click, prompt.submit],
|
357 |
fn=run_lora,
|
358 |
-
inputs=[prompt, cfg_scale, steps, selected_index, randomize_seed,
|
|
|
359 |
outputs=[result, seed, progress_bar, original_prompt_display, english_prompt_display]
|
360 |
)
|
361 |
|
|
|
51 |
|
52 |
return [(os.path.join("gallery", row[2]), f"{row[0]}: {row[1]}") for row in rows]
|
53 |
|
54 |
+
# CPU์์ ๋ฒ์ญ ๋ชจ๋ธ ์ด๊ธฐํ
|
55 |
+
@spaces.CPU
|
56 |
+
def init_translator():
|
57 |
+
return pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en", device="cpu")
|
58 |
+
|
59 |
+
# CPU์์ ๋ฒ์ญ ์ฒ๋ฆฌ
|
60 |
+
@spaces.CPU
|
61 |
+
def translate_text(translator, text):
|
62 |
+
if any('\u3131' <= char <= '\u3163' or '\uac00' <= char <= '\ud7a3' for char in text):
|
63 |
+
translated = translator(text)[0]['translation_text']
|
64 |
+
return text, translated
|
65 |
+
return text, text
|
66 |
+
|
67 |
+
# ํ๋กฌํํธ ์ฒ๋ฆฌ ํจ์
|
68 |
+
def process_prompt(translator, prompt):
|
69 |
+
return translate_text(translator, prompt)
|
70 |
|
71 |
KEY_JSON = os.getenv("KEY_JSON")
|
72 |
with open(KEY_JSON, 'r') as f:
|
73 |
loras = json.load(f)
|
74 |
|
75 |
+
# ๊ธฐ๋ณธ ๋ชจ๋ธ ์ด๊ธฐํ
|
76 |
dtype = torch.bfloat16
|
77 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
78 |
base_model = "black-forest-labs/FLUX.1-dev"
|
|
|
143 |
):
|
144 |
yield img
|
145 |
|
146 |
+
@spaces.GPU(duration=70)
|
147 |
+
def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, translator, progress=gr.Progress(track_tqdm=True)):
|
148 |
if selected_index is None:
|
149 |
raise gr.Error("์งํํ๊ธฐ ์ ์ LoRA๋ฅผ ์ ํํด์ผ ํฉ๋๋ค.")
|
150 |
|
151 |
+
original_prompt, english_prompt = process_prompt(translator, prompt)
|
152 |
|
153 |
selected_lora = loras[selected_index]
|
154 |
lora_path = selected_lora["repo"]
|
|
|
167 |
with calculateDuration("LoRA ์ธ๋ก๋"):
|
168 |
pipe.unload_lora_weights()
|
169 |
|
170 |
+
# LoRA ๊ฐ์ค์น ๋ก๋
|
171 |
with calculateDuration(f"{selected_lora['title']}์ LoRA ๊ฐ์ค์น ๋ก๋"):
|
172 |
if "weights" in selected_lora:
|
173 |
pipe.load_lora_weights(lora_path, weight_name=selected_lora["weights"])
|
174 |
else:
|
175 |
pipe.load_lora_weights(lora_path)
|
176 |
|
177 |
+
# ์ฌํ์ฑ์ ์ํ ์๋ ์ค์
|
178 |
with calculateDuration("์๋ ๋ฌด์์ํ"):
|
179 |
if randomize_seed:
|
180 |
seed = random.randint(0, MAX_SEED)
|
181 |
|
182 |
image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress)
|
183 |
|
184 |
+
# ์ต์ข
์ด๋ฏธ์ง๋ฅผ ์ป๊ธฐ ์ํด ์ ๋๋ ์ดํฐ ์๋น
|
185 |
final_image = None
|
186 |
step_counter = 0
|
187 |
for image in image_generator:
|
|
|
195 |
|
196 |
yield final_image, seed, gr.update(value=progress_bar, visible=False), original_prompt, english_prompt
|
197 |
|
|
|
198 |
def get_huggingface_safetensors(link):
|
199 |
+
split_link = link.split("/")
|
200 |
+
if(len(split_link) == 2):
|
201 |
+
model_card = ModelCard.load(link)
|
202 |
+
base_model = model_card.data.get("base_model")
|
203 |
+
print(base_model)
|
204 |
+
if((base_model != "black-forest-labs/FLUX.1-dev") and (base_model != "black-forest-labs/FLUX.1-schnell")):
|
205 |
+
raise Exception("Not a FLUX LoRA!")
|
206 |
+
image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
|
207 |
+
trigger_word = model_card.data.get("instance_prompt", "")
|
208 |
+
image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
|
209 |
+
fs = HfFileSystem()
|
210 |
+
try:
|
211 |
+
list_of_files = fs.ls(link, detail=False)
|
212 |
+
for file in list_of_files:
|
213 |
+
if(file.endswith(".safetensors")):
|
214 |
+
safetensors_name = file.split("/")[-1]
|
215 |
+
if (not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp"))):
|
216 |
+
image_elements = file.split("/")
|
217 |
+
image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
|
218 |
+
except Exception as e:
|
219 |
+
print(e)
|
220 |
+
gr.Warning(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
|
221 |
+
raise Exception(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
|
222 |
+
return split_link[1], link, safetensors_name, trigger_word, image_url
|
223 |
|
224 |
def check_custom_model(link):
|
225 |
if(link.startswith("https://")):
|
|
|
270 |
def remove_custom_lora():
|
271 |
return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
|
272 |
|
|
|
273 |
run_lora.zerogpu = True
|
274 |
|
275 |
css = """
|
|
|
285 |
# ๋ฐ์ดํฐ๋ฒ ์ด์ค ์ด๊ธฐํ
|
286 |
init_db()
|
287 |
|
288 |
+
# ๋ฒ์ญ๊ธฐ ์ด๊ธฐํ
|
289 |
+
translator = init_translator()
|
290 |
|
291 |
+
with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as app:
|
292 |
+
# ๋ฒ์ญ๊ธฐ๋ฅผ ์ํ๋ก ์ถ๊ฐ
|
293 |
+
translator_state = gr.State(translator)
|
294 |
selected_index = gr.State(None)
|
295 |
|
296 |
with gr.Tabs():
|
|
|
352 |
inputs=[width, height],
|
353 |
outputs=[prompt, selected_info, selected_index, width, height]
|
354 |
)
|
355 |
+
|
356 |
custom_lora.input(
|
357 |
add_custom_lora,
|
358 |
inputs=[custom_lora],
|
359 |
outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, prompt]
|
360 |
)
|
361 |
+
|
362 |
custom_lora_button.click(
|
363 |
remove_custom_lora,
|
364 |
outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, custom_lora]
|
|
|
367 |
gr.on(
|
368 |
triggers=[generate_button.click, prompt.submit],
|
369 |
fn=run_lora,
|
370 |
+
inputs=[prompt, cfg_scale, steps, selected_index, randomize_seed,
|
371 |
+
seed, width, height, lora_scale, translator_state],
|
372 |
outputs=[result, seed, progress_bar, original_prompt_display, english_prompt_display]
|
373 |
)
|
374 |
|