Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -9,13 +9,8 @@ from diffusers import DiffusionPipeline
|
|
9 |
from custom_pipeline import FLUXPipelineWithIntermediateOutputs
|
10 |
from transformers import pipeline
|
11 |
|
12 |
-
#
|
13 |
-
|
14 |
-
if not hf_token:
|
15 |
-
raise ValueError("HF_TOKEN environment variable is not set. Please set it to your Hugging Face token.")
|
16 |
-
|
17 |
-
# 번역 모델 로드 (토큰 인증 추가)
|
18 |
-
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en", use_auth_token=hf_token)
|
19 |
|
20 |
# Constants
|
21 |
MAX_SEED = np.iinfo(np.int32).max
|
@@ -27,7 +22,7 @@ DEFAULT_INFERENCE_STEPS = 1
|
|
27 |
# Device and model setup
|
28 |
dtype = torch.float16
|
29 |
pipe = FLUXPipelineWithIntermediateOutputs.from_pretrained(
|
30 |
-
"black-forest-labs/FLUX.1-schnell", torch_dtype=dtype
|
31 |
).to("cuda")
|
32 |
torch.cuda.empty_cache()
|
33 |
|
@@ -47,7 +42,7 @@ korean_labels = {
|
|
47 |
|
48 |
def translate_if_korean(text):
|
49 |
if any('\u3131' <= char <= '\u3163' or '\uac00' <= char <= '\ud7a3' for char in text):
|
50 |
-
return translator(text
|
51 |
return text
|
52 |
|
53 |
# Inference function
|
|
|
9 |
from custom_pipeline import FLUXPipelineWithIntermediateOutputs
|
10 |
from transformers import pipeline
|
11 |
|
12 |
+
# 번역 모델 로드
|
13 |
+
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
# Constants
|
16 |
MAX_SEED = np.iinfo(np.int32).max
|
|
|
22 |
# Device and model setup
|
23 |
dtype = torch.float16
|
24 |
pipe = FLUXPipelineWithIntermediateOutputs.from_pretrained(
|
25 |
+
"black-forest-labs/FLUX.1-schnell", torch_dtype=dtype
|
26 |
).to("cuda")
|
27 |
torch.cuda.empty_cache()
|
28 |
|
|
|
42 |
|
43 |
def translate_if_korean(text):
|
44 |
if any('\u3131' <= char <= '\u3163' or '\uac00' <= char <= '\ud7a3' for char in text):
|
45 |
+
return translator(text)[0]['translation_text']
|
46 |
return text
|
47 |
|
48 |
# Inference function
|