nicholasKluge commited on
Commit
f4edcad
·
1 Parent(s): 2310c8c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -0
app.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import BloomForCausalLM, BloomTokenizerFast
2
+ import gradio as gr
3
+
4
+ tokenizer = BloomTokenizerFast.from_pretrained('nicholasKluge/Aira-Instruct-PT-560M',
5
+ use_auth_token="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")
6
+ model = BloomForCausalLM.from_pretrained('nicholasKluge/Aira-Instruct-PT-560M',
7
+ use_auth_token="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")
8
+
9
+ import gradio as gr
10
+
11
+ title = "AIRA Demo 🤓"
12
+
13
+
14
+ with gr.Blocks() as demo:
15
+ chatbot = gr.Chatbot()
16
+ msg = gr.Textbox()
17
+ clear = gr.Button("Clear Conversation")
18
+
19
+ def respond(message, chat_history):
20
+ inputs = tokenizer(tokenizer.bos_token + message + tokenizer.eos_token, return_tensors="pt")
21
+
22
+ response = model.generate(**inputs,
23
+ bos_token_id=tokenizer.bos_token_id,
24
+ pad_token_id=tokenizer.pad_token_id,
25
+ eos_token_id=tokenizer.eos_token_id,
26
+ do_sample=True,
27
+ early_stopping=True,
28
+ top_k=50,
29
+ max_length=200,
30
+ top_p=0.95,
31
+ temperature=0.7,
32
+ num_return_sequences=1)
33
+
34
+ chat_history.append((f"👤 {message}", f"""🤖 {tokenizer.decode(response[0], skip_special_tokens=True).replace(message, "")}"""))
35
+
36
+ return "", chat_history
37
+
38
+ msg.submit(respond, [msg, chatbot], [msg, chatbot])
39
+ clear.click(lambda: None, None, chatbot, queue=False)
40
+
41
+ demo.launch()