Connoriginal commited on
Commit
cd46058
·
1 Parent(s): af2e3f3
Files changed (3) hide show
  1. app.py +60 -50
  2. chatbot.py +82 -0
  3. requirements.txt +4 -1
app.py CHANGED
@@ -1,63 +1,73 @@
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
9
 
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
 
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
 
27
 
28
- response = ""
 
29
 
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
 
39
- response += token
40
- yield response
41
 
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
- demo = gr.ChatInterface(
46
- respond,
47
- additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
- ),
58
- ],
59
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
 
61
 
62
  if __name__ == "__main__":
63
- demo.launch()
 
1
+ import time
2
+ import yaml
3
  import gradio as gr
4
+ import os
5
+ import argparse
6
+ import random
7
+ from chatbot import ChatBot
8
 
9
+ # MARKDOWN
10
+ MARKDOWN = """
11
+ # Coffee-Gym Feedback Model
12
+ Welcome to the COFFEE-GYM demo page! This page will guide you through using our comprehensive RL environment for training models that provide feedback on code editing.
13
 
14
+ ## Prompt template
15
+ To use the COFFEE-GYM feedback model, you can follow the prompt template below:
16
 
17
+ ~~~json
18
+ Problem Description:
19
+ {problem}
 
 
 
 
 
 
20
 
21
+ Incorrect Code:
22
+ ```python
23
+ {code}
24
+ ```
 
25
 
26
+ Please generate feedback for the wrong code.
27
+ ~~~
28
 
29
+ ## Response
30
+ The chatbot will provide feedback on the incorrect code by analyzing the problem description, input, output, and the buggy solution provided in the prompt. You can interact with the chatbot to get immediate feedback, regenerate responses, and clear the chat history.
31
 
32
+ Feel free to explore and make the most out of COFFEE-GYM!
33
+ """
 
 
 
 
 
 
34
 
 
 
35
 
36
+ def main(args):
37
+
38
+ chatbot = ChatBot()
39
+
40
+ with gr.Blocks() as app:
41
+ ##### Playground #####
42
+ gr.Markdown(MARKDOWN)
43
+
44
+ chat_history = gr.Chatbot(show_copy_button=True)
45
+ input_text = gr.Textbox(label="Enter your prompt and click 'Send'", placeholder="Type your message...")
46
+ send_button = gr.Button("Send", elem_id="send-btn")
47
+
48
+ with gr.Row():
49
+ regenerate_button = gr.Button("Regenerate")
50
+ clear_button = gr.Button("Clear")
51
+
52
+ send_button.click(
53
+ fn=chatbot.chat,
54
+ inputs=[chat_history, input_text],
55
+ outputs=chat_history
56
+ )
57
+ regenerate_button.click(
58
+ fn=chatbot.regenerate,
59
+ inputs=[chat_history, input_text],
60
+ outputs=chat_history
61
+ )
62
+
63
+ clear_button.click(
64
+ fn=chatbot.clear_chat,
65
+ inputs=[],
66
+ outputs=chat_history
67
+ )
68
+
69
+ app.launch(share=True)
70
 
71
 
72
  if __name__ == "__main__":
73
+ main()
chatbot.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import json
3
+ import yaml
4
+ from typing import Union
5
+ import os
6
+
7
+
8
+ from transformers import pipeline
9
+
10
+
11
+ class ChatBot(object):
12
+ def __init__(self):
13
+ self.model = pipe = pipeline("text-generation", model="Team-Coffee-Gym/CoffeeGym")
14
+ self.chat_history = []
15
+ self.history = self._set_initial_history()
16
+
17
+
18
+ def _set_initial_history(self):
19
+ return ["You are an exceptionally intelligent coding assistant developed by DLI lab that consistently delivers accurate and reliable responses to user instructions. If somebody asks you who are you, answer as 'AI programming assistant based on DLI Lab'.\n\n"]
20
+
21
+ def set_model_input(self, input_text = None):
22
+ model_input = []
23
+
24
+ if input_text is not None:
25
+ self.history.append(input_text)
26
+
27
+
28
+ model_input.append({
29
+ "role": "system",
30
+ "content": self.history[0]
31
+ })
32
+
33
+ chat_history = self.history[1:]
34
+
35
+ for i in range(len(chat_history)):
36
+ if i % 2 == 0:
37
+ model_input.append({
38
+ "role": "user",
39
+ "content": chat_history[i]
40
+ })
41
+ else:
42
+ model_input.append({
43
+ "role": "assistant",
44
+ "content": chat_history[i]
45
+ })
46
+ return model_input
47
+
48
+
49
+ def chat(self, chat_history, input_text):
50
+
51
+ self.chat_history = chat_history
52
+
53
+ model_input = self.set_model_input(input_text)
54
+ response = self.model(model_input)
55
+
56
+ if response is not None:
57
+ self.history.append(response)
58
+ self.chat_history = self.chat_history + [(input_text, response)]
59
+
60
+ self.log_chat()
61
+ return self.chat_history
62
+
63
+
64
+ def regenerate(self, chat_history, input_text):
65
+
66
+ self.chat_history = chat_history[:-1]
67
+ self.history = self.history[:-2]
68
+
69
+ model_input = self.set_model_input(None)
70
+ response = self.model.invoke(model_input)
71
+
72
+ if response is not None:
73
+ self.history.append(response)
74
+ self.chat_history = self.chat_history + [(input_text, response)]
75
+
76
+
77
+ return self.chat_history
78
+
79
+ def clear_chat(self):
80
+ self.chat_history = []
81
+ self.history = self._set_initial_history()
82
+ return self.chat_history
requirements.txt CHANGED
@@ -1 +1,4 @@
1
- huggingface_hub==0.22.2
 
 
 
 
1
+ huggingface_hub==0.22.2
2
+ transformers==4.41.2
3
+ gradio==4.37.2
4
+ gradio-client==1.0.2