Spaces:
Runtime error
Runtime error
Connoriginal
commited on
Commit
·
cd46058
1
Parent(s):
af2e3f3
initial
Browse files- app.py +60 -50
- chatbot.py +82 -0
- requirements.txt +4 -1
app.py
CHANGED
@@ -1,63 +1,73 @@
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
-
|
|
|
|
|
|
|
3 |
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
|
|
|
|
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
system_message,
|
14 |
-
max_tokens,
|
15 |
-
temperature,
|
16 |
-
top_p,
|
17 |
-
):
|
18 |
-
messages = [{"role": "system", "content": system_message}]
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
messages.append({"role": "assistant", "content": val[1]})
|
25 |
|
26 |
-
|
|
|
27 |
|
28 |
-
|
|
|
29 |
|
30 |
-
|
31 |
-
|
32 |
-
max_tokens=max_tokens,
|
33 |
-
stream=True,
|
34 |
-
temperature=temperature,
|
35 |
-
top_p=top_p,
|
36 |
-
):
|
37 |
-
token = message.choices[0].delta.content
|
38 |
|
39 |
-
response += token
|
40 |
-
yield response
|
41 |
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
gr.
|
49 |
-
|
50 |
-
|
51 |
-
gr.
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
|
61 |
|
62 |
if __name__ == "__main__":
|
63 |
-
|
|
|
1 |
+
import time
|
2 |
+
import yaml
|
3 |
import gradio as gr
|
4 |
+
import os
|
5 |
+
import argparse
|
6 |
+
import random
|
7 |
+
from chatbot import ChatBot
|
8 |
|
9 |
+
# MARKDOWN
|
10 |
+
MARKDOWN = """
|
11 |
+
# Coffee-Gym Feedback Model
|
12 |
+
Welcome to the COFFEE-GYM demo page! This page will guide you through using our comprehensive RL environment for training models that provide feedback on code editing.
|
13 |
|
14 |
+
## Prompt template
|
15 |
+
To use the COFFEE-GYM feedback model, you can follow the prompt template below:
|
16 |
|
17 |
+
~~~json
|
18 |
+
Problem Description:
|
19 |
+
{problem}
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
+
Incorrect Code:
|
22 |
+
```python
|
23 |
+
{code}
|
24 |
+
```
|
|
|
25 |
|
26 |
+
Please generate feedback for the wrong code.
|
27 |
+
~~~
|
28 |
|
29 |
+
## Response
|
30 |
+
The chatbot will provide feedback on the incorrect code by analyzing the problem description, input, output, and the buggy solution provided in the prompt. You can interact with the chatbot to get immediate feedback, regenerate responses, and clear the chat history.
|
31 |
|
32 |
+
Feel free to explore and make the most out of COFFEE-GYM!
|
33 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
|
|
|
|
35 |
|
36 |
+
def main(args):
|
37 |
+
|
38 |
+
chatbot = ChatBot()
|
39 |
+
|
40 |
+
with gr.Blocks() as app:
|
41 |
+
##### Playground #####
|
42 |
+
gr.Markdown(MARKDOWN)
|
43 |
+
|
44 |
+
chat_history = gr.Chatbot(show_copy_button=True)
|
45 |
+
input_text = gr.Textbox(label="Enter your prompt and click 'Send'", placeholder="Type your message...")
|
46 |
+
send_button = gr.Button("Send", elem_id="send-btn")
|
47 |
+
|
48 |
+
with gr.Row():
|
49 |
+
regenerate_button = gr.Button("Regenerate")
|
50 |
+
clear_button = gr.Button("Clear")
|
51 |
+
|
52 |
+
send_button.click(
|
53 |
+
fn=chatbot.chat,
|
54 |
+
inputs=[chat_history, input_text],
|
55 |
+
outputs=chat_history
|
56 |
+
)
|
57 |
+
regenerate_button.click(
|
58 |
+
fn=chatbot.regenerate,
|
59 |
+
inputs=[chat_history, input_text],
|
60 |
+
outputs=chat_history
|
61 |
+
)
|
62 |
+
|
63 |
+
clear_button.click(
|
64 |
+
fn=chatbot.clear_chat,
|
65 |
+
inputs=[],
|
66 |
+
outputs=chat_history
|
67 |
+
)
|
68 |
+
|
69 |
+
app.launch(share=True)
|
70 |
|
71 |
|
72 |
if __name__ == "__main__":
|
73 |
+
main()
|
chatbot.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
import json
|
3 |
+
import yaml
|
4 |
+
from typing import Union
|
5 |
+
import os
|
6 |
+
|
7 |
+
|
8 |
+
from transformers import pipeline
|
9 |
+
|
10 |
+
|
11 |
+
class ChatBot(object):
|
12 |
+
def __init__(self):
|
13 |
+
self.model = pipe = pipeline("text-generation", model="Team-Coffee-Gym/CoffeeGym")
|
14 |
+
self.chat_history = []
|
15 |
+
self.history = self._set_initial_history()
|
16 |
+
|
17 |
+
|
18 |
+
def _set_initial_history(self):
|
19 |
+
return ["You are an exceptionally intelligent coding assistant developed by DLI lab that consistently delivers accurate and reliable responses to user instructions. If somebody asks you who are you, answer as 'AI programming assistant based on DLI Lab'.\n\n"]
|
20 |
+
|
21 |
+
def set_model_input(self, input_text = None):
|
22 |
+
model_input = []
|
23 |
+
|
24 |
+
if input_text is not None:
|
25 |
+
self.history.append(input_text)
|
26 |
+
|
27 |
+
|
28 |
+
model_input.append({
|
29 |
+
"role": "system",
|
30 |
+
"content": self.history[0]
|
31 |
+
})
|
32 |
+
|
33 |
+
chat_history = self.history[1:]
|
34 |
+
|
35 |
+
for i in range(len(chat_history)):
|
36 |
+
if i % 2 == 0:
|
37 |
+
model_input.append({
|
38 |
+
"role": "user",
|
39 |
+
"content": chat_history[i]
|
40 |
+
})
|
41 |
+
else:
|
42 |
+
model_input.append({
|
43 |
+
"role": "assistant",
|
44 |
+
"content": chat_history[i]
|
45 |
+
})
|
46 |
+
return model_input
|
47 |
+
|
48 |
+
|
49 |
+
def chat(self, chat_history, input_text):
|
50 |
+
|
51 |
+
self.chat_history = chat_history
|
52 |
+
|
53 |
+
model_input = self.set_model_input(input_text)
|
54 |
+
response = self.model(model_input)
|
55 |
+
|
56 |
+
if response is not None:
|
57 |
+
self.history.append(response)
|
58 |
+
self.chat_history = self.chat_history + [(input_text, response)]
|
59 |
+
|
60 |
+
self.log_chat()
|
61 |
+
return self.chat_history
|
62 |
+
|
63 |
+
|
64 |
+
def regenerate(self, chat_history, input_text):
|
65 |
+
|
66 |
+
self.chat_history = chat_history[:-1]
|
67 |
+
self.history = self.history[:-2]
|
68 |
+
|
69 |
+
model_input = self.set_model_input(None)
|
70 |
+
response = self.model.invoke(model_input)
|
71 |
+
|
72 |
+
if response is not None:
|
73 |
+
self.history.append(response)
|
74 |
+
self.chat_history = self.chat_history + [(input_text, response)]
|
75 |
+
|
76 |
+
|
77 |
+
return self.chat_history
|
78 |
+
|
79 |
+
def clear_chat(self):
|
80 |
+
self.chat_history = []
|
81 |
+
self.history = self._set_initial_history()
|
82 |
+
return self.chat_history
|
requirements.txt
CHANGED
@@ -1 +1,4 @@
|
|
1 |
-
huggingface_hub==0.22.2
|
|
|
|
|
|
|
|
1 |
+
huggingface_hub==0.22.2
|
2 |
+
transformers==4.41.2
|
3 |
+
gradio==4.37.2
|
4 |
+
gradio-client==1.0.2
|