[update]add sent_tokenize
Browse files
main.py
CHANGED
@@ -12,6 +12,7 @@ os.environ["HUGGINGFACE_HUB_CACHE"] = hf_hub_cache
|
|
12 |
import gradio as gr
|
13 |
import nltk
|
14 |
from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
|
|
|
15 |
|
16 |
|
17 |
def main():
|
@@ -22,11 +23,11 @@ def main():
|
|
22 |
}
|
23 |
}
|
24 |
|
25 |
-
def
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
model_group = model_dict.get(model_name)
|
31 |
if model_group is None:
|
32 |
for k in list(model_dict.keys()):
|
@@ -54,6 +55,8 @@ def main():
|
|
54 |
text_decoded = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
55 |
result += text_decoded[0]
|
56 |
|
|
|
|
|
57 |
return result
|
58 |
|
59 |
title = "Multilingual Machine Translation"
|
@@ -88,16 +91,16 @@ It was introduced in this [paper](https://arxiv.org/abs/2010.11125) and first re
|
|
88 |
|
89 |
output = gr.Textbox(lines=4, label="Output Text")
|
90 |
|
91 |
-
|
92 |
-
fn=
|
93 |
inputs=inputs,
|
94 |
outputs=output,
|
95 |
examples=examples,
|
96 |
title=title,
|
97 |
description=description,
|
98 |
-
cache_examples=
|
99 |
)
|
100 |
-
|
101 |
|
102 |
return
|
103 |
|
|
|
12 |
import gradio as gr
|
13 |
import nltk
|
14 |
from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
|
15 |
+
from transformers.generation.streamers import TextIteratorStreamer
|
16 |
|
17 |
|
18 |
def main():
|
|
|
23 |
}
|
24 |
}
|
25 |
|
26 |
+
def fn_non_stream(src_text: str,
|
27 |
+
src_lang: str,
|
28 |
+
tgt_lang: str,
|
29 |
+
model_name: str,
|
30 |
+
):
|
31 |
model_group = model_dict.get(model_name)
|
32 |
if model_group is None:
|
33 |
for k in list(model_dict.keys()):
|
|
|
55 |
text_decoded = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
56 |
result += text_decoded[0]
|
57 |
|
58 |
+
output.value = result
|
59 |
+
|
60 |
return result
|
61 |
|
62 |
title = "Multilingual Machine Translation"
|
|
|
91 |
|
92 |
output = gr.Textbox(lines=4, label="Output Text")
|
93 |
|
94 |
+
demo = gr.Interface(
|
95 |
+
fn=fn_non_stream,
|
96 |
inputs=inputs,
|
97 |
outputs=output,
|
98 |
examples=examples,
|
99 |
title=title,
|
100 |
description=description,
|
101 |
+
cache_examples=False
|
102 |
)
|
103 |
+
demo.queue().launch(debug=True, enable_queue=True)
|
104 |
|
105 |
return
|
106 |
|