Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM, StoppingCriteriaList
|
3 |
+
import os
|
4 |
+
import torch
|
5 |
+
|
6 |
+
theme = "darkgrass"
|
7 |
+
title = "Polyglot(Korean) Demo"
|
8 |
+
model_name = "EleutherAI/polyglot-ko-1.3b"
|
9 |
+
|
10 |
+
bad_words = [
|
11 |
+
'...',
|
12 |
+
'....',
|
13 |
+
'(์ค๋ต)',
|
14 |
+
'http'
|
15 |
+
]
|
16 |
+
|
17 |
+
description = "polyglot (1.3B ํ๋ผ๋ฏธํฐ ์ฌ์ด์ฆ) ํ๊ตญ์ด ๋ชจ๋ธ์ ์์ฐํ๋ ๋ฐ๋ชจํ์ด์ง ์
๋๋ค."
|
18 |
+
article = "<p style='text-align: center'><a href='https://github.com/EleutherAI/polyglot' target='_blank'>Polyglot: Large Language Models of Well-balanced Competence in Multi-languages</a></p>"
|
19 |
+
examples = [
|
20 |
+
["CPU์ GPU์ ์ฐจ์ด๋,"],
|
21 |
+
["์ง๋ฌธ: ์ฐํฌ๋ผ์ด๋ ์ ์์ด ์ธ๊ณ3์ฐจ๋์ ์ผ๋ก ํ์ ์ด ๋ ๊น์? \n๋ต๋ณ:"],
|
22 |
+
["2040๋
๋ฏธ๊ตญ์, "]
|
23 |
+
]
|
24 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
25 |
+
model = AutoModelForCausalLM.from_pretrained(
|
26 |
+
model_name
|
27 |
+
)
|
28 |
+
model.eval()
|
29 |
+
|
30 |
+
pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, eos_token_id=tokenizer.eos_token_id)
|
31 |
+
|
32 |
+
def predict(text):
|
33 |
+
with torch.no_grad():
|
34 |
+
tokens = tokenizer(text, return_tensors="pt").input_ids
|
35 |
+
# generate and end generate if <|endoftext|> is not in text
|
36 |
+
gen_tokens = model.generate(
|
37 |
+
tokens, do_sample=True, temperature=0.8, max_new_tokens=64, top_k=50, top_p=0.8,
|
38 |
+
no_repeat_ngram_size=3, repetition_penalty=1.2,
|
39 |
+
bad_words_ids=[
|
40 |
+
tokenizer.encode(bad_word) for bad_word in bad_words
|
41 |
+
],
|
42 |
+
eos_token_id=tokenizer.eos_token_id,
|
43 |
+
pad_token_id=tokenizer.pad_token_id
|
44 |
+
)
|
45 |
+
generated = tokenizer.batch_decode(gen_tokens)[0]
|
46 |
+
return generated
|
47 |
+
# return pipe(text)[0]['generated_text']
|
48 |
+
|
49 |
+
iface = gr.Interface(
|
50 |
+
fn=predict,
|
51 |
+
inputs='text',
|
52 |
+
outputs='text',
|
53 |
+
examples=examples
|
54 |
+
)
|
55 |
+
|
56 |
+
iface.launch()
|