Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,223 +2,85 @@
|
|
2 |
|
3 |
# OpenAI Chat completion
|
4 |
import os
|
5 |
-
import openai
|
6 |
from openai import AsyncOpenAI # importing openai for API usage
|
7 |
import chainlit as cl # importing chainlit for our app
|
8 |
-
|
9 |
-
from chainlit import message
|
10 |
from chainlit.playground.providers import ChatOpenAI # importing ChatOpenAI tools
|
11 |
from dotenv import load_dotenv
|
12 |
|
13 |
|
14 |
-
from chainlit.playground.providers import ChatOpenAI
|
15 |
-
import chainlit as cl
|
16 |
-
|
17 |
-
|
18 |
openai_api_key = os.getenv("OPENAI_API_KEY")
|
19 |
|
20 |
-
|
21 |
load_dotenv()
|
22 |
|
23 |
# ChatOpenAI Templates
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
# user_template = """{input}
|
28 |
-
# Think through your response step by step.
|
29 |
-
# """
|
30 |
-
|
31 |
-
|
32 |
-
# @cl.on_chat_start # marks a function that will be executed at the start of a user session
|
33 |
-
# async def start_chat():
|
34 |
-
# settings = {
|
35 |
-
# "model": "gpt-3.5-turbo",
|
36 |
-
# "temperature": 0,
|
37 |
-
# "max_tokens": 500,
|
38 |
-
# "top_p": 1,
|
39 |
-
# "frequency_penalty": 0,
|
40 |
-
# "presence_penalty": 0,
|
41 |
-
# }
|
42 |
-
|
43 |
-
# cl.user_session.set("settings", settings)
|
44 |
-
|
45 |
-
|
46 |
-
# @cl.on_message # marks a function that should be run each time the chatbot receives a message from a user
|
47 |
-
# async def main(message: cl.Message):
|
48 |
-
# settings = cl.user_session.get("settings")
|
49 |
-
|
50 |
-
# client = AsyncOpenAI()
|
51 |
-
|
52 |
-
# print(message.content)
|
53 |
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
# PromptMessage(
|
58 |
-
# role="system",
|
59 |
-
# template=system_template,
|
60 |
-
# formatted=system_template,
|
61 |
-
# ),
|
62 |
-
# PromptMessage(
|
63 |
-
# role="user",
|
64 |
-
# template=user_template,
|
65 |
-
# formatted=user_template.format(input=message.content),
|
66 |
-
# ),
|
67 |
-
# ],
|
68 |
-
# inputs={"input": message.content},
|
69 |
-
# settings=settings,
|
70 |
-
# )
|
71 |
-
|
72 |
-
# print([m.to_openai() for m in prompt.messages])
|
73 |
-
|
74 |
-
# msg = cl.Message(content="")
|
75 |
-
|
76 |
-
# # Call OpenAI
|
77 |
-
# async for stream_resp in await client.chat.completions.create(
|
78 |
-
# messages=[m.to_openai() for m in prompt.messages], stream=True, **settings
|
79 |
-
# ):
|
80 |
-
# token = stream_resp.choices[0].delta.content
|
81 |
-
# if not token:
|
82 |
-
# token = ""
|
83 |
-
# await msg.stream_token(token)
|
84 |
-
|
85 |
-
# # Update the prompt object with the completion
|
86 |
-
# prompt.completion = msg.content
|
87 |
-
# msg.prompt = prompt
|
88 |
-
|
89 |
-
# # Send and close the message stream
|
90 |
-
# await msg.send()
|
91 |
-
|
92 |
-
|
93 |
-
# template = "Hello, {name}!"
|
94 |
-
# variables = {"name": "John"}
|
95 |
-
|
96 |
-
# settings = {
|
97 |
-
# "model": "gpt-3.5-turbo",
|
98 |
-
# "temperature": 0,
|
99 |
-
# # ... more settings
|
100 |
-
# }
|
101 |
-
|
102 |
-
#-------------------------------------------------------------
|
103 |
-
|
104 |
-
# @cl.step(type="llm")
|
105 |
-
# async def call_llm():
|
106 |
-
# generation = cl.ChatGeneration(
|
107 |
-
# provider=ChatOpenAI.id,
|
108 |
-
# variables=variables,
|
109 |
-
# settings=settings,
|
110 |
-
# messages=[
|
111 |
-
# {
|
112 |
-
# "content": template.format(**variables),
|
113 |
-
# "role":"user"
|
114 |
-
# },
|
115 |
-
# ],
|
116 |
-
# )
|
117 |
-
|
118 |
-
# # Make the call to OpenAI
|
119 |
-
# response = await client.chat.completions.create(
|
120 |
-
# messages=generation.messages, **settings
|
121 |
-
# )
|
122 |
-
|
123 |
-
# generation.message_completion = {
|
124 |
-
# "content": response.choices[0].message.content,
|
125 |
-
# "role": "assistant"
|
126 |
-
# }
|
127 |
-
|
128 |
-
# # Add the generation to the current step
|
129 |
-
# cl.context.current_step.generation = generation
|
130 |
-
|
131 |
-
# return generation.message_completion["content"]
|
132 |
-
|
133 |
-
|
134 |
-
# @cl.on_chat_start
|
135 |
-
# async def start():
|
136 |
-
# await call_llm()
|
137 |
-
|
138 |
-
#-------------------------------------------------------------
|
139 |
-
#******
|
140 |
-
|
141 |
-
# @cl.on_message
|
142 |
-
# async def on_message(message: cl.Message):
|
143 |
-
# msg = cl.Message(content="")
|
144 |
-
# await msg.send()
|
145 |
-
|
146 |
-
# # do some work
|
147 |
-
# await cl.sleep(2)
|
148 |
-
|
149 |
-
# msg.content = f"Processed message {message.content}"
|
150 |
-
|
151 |
-
# await msg.update()
|
152 |
-
|
153 |
-
|
154 |
-
#------------------------------------
|
155 |
-
#**************************
|
156 |
-
|
157 |
-
client = AsyncOpenAI(api_key="sk-proj-r0mIMTDm41HXzATQVROcT3BlbkFJjEokAFlqLT2tS0RwBt6O")
|
158 |
-
import chainlit as cl
|
159 |
-
|
160 |
-
settings = {
|
161 |
-
"model": "gpt-3.5-turbo",
|
162 |
-
"temperature": 0,
|
163 |
-
"max_tokens": 500,
|
164 |
-
"top_p": 1,
|
165 |
-
"frequency_penalty": 0,
|
166 |
-
"presence_penalty": 0,
|
167 |
-
}
|
168 |
|
169 |
|
170 |
-
@cl.on_chat_start
|
171 |
-
def start_chat():
|
172 |
-
|
173 |
-
"
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
cl.user_session.set(
|
181 |
-
"settings",
|
182 |
-
settings
|
183 |
-
)
|
184 |
-
|
185 |
|
186 |
-
|
187 |
-
async def main(message: cl.Message):
|
188 |
-
message_history = cl.user_session.get("message_history")
|
189 |
-
message_history.append({"role": "user", "content": message.content})
|
190 |
|
191 |
-
msg = cl.Message(content="")
|
192 |
-
await msg.send()
|
193 |
|
194 |
-
|
195 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
196 |
)
|
197 |
|
198 |
-
|
199 |
-
if token := part.choices[0].delta.content or "":
|
200 |
-
await msg.stream_token(token)
|
201 |
-
|
202 |
-
message_history.append({"role": "assistant", "content": msg.content})
|
203 |
-
await msg.update()
|
204 |
|
|
|
205 |
|
206 |
-
#
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
|
215 |
-
#
|
216 |
-
|
|
|
217 |
|
218 |
-
#
|
219 |
-
|
220 |
-
# pass
|
221 |
|
222 |
-
# await cl.Message(content=f"Received {len(images)} image(s)").send()
|
223 |
|
224 |
|
|
|
2 |
|
3 |
# OpenAI Chat completion
|
4 |
import os
|
|
|
5 |
from openai import AsyncOpenAI # importing openai for API usage
|
6 |
import chainlit as cl # importing chainlit for our app
|
7 |
+
from chainlit.prompt import Prompt, PromptMessage # importing prompt tools
|
|
|
8 |
from chainlit.playground.providers import ChatOpenAI # importing ChatOpenAI tools
|
9 |
from dotenv import load_dotenv
|
10 |
|
11 |
|
|
|
|
|
|
|
|
|
12 |
openai_api_key = os.getenv("OPENAI_API_KEY")
|
13 |
|
|
|
14 |
load_dotenv()
|
15 |
|
16 |
# ChatOpenAI Templates
|
17 |
+
system_template = """You are a helpful assistant who always speaks in a pleasant tone!
|
18 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
+
user_template = """{input}
|
21 |
+
Think through your response step by step.
|
22 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
|
25 |
+
@cl.on_chat_start # marks a function that will be executed at the start of a user session
|
26 |
+
async def start_chat():
|
27 |
+
settings = {
|
28 |
+
"model": "gpt-3.5-turbo",
|
29 |
+
"temperature": 0,
|
30 |
+
"max_tokens": 500,
|
31 |
+
"top_p": 1,
|
32 |
+
"frequency_penalty": 0,
|
33 |
+
"presence_penalty": 0,
|
34 |
+
}
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
+
cl.user_session.set("settings", settings)
|
|
|
|
|
|
|
37 |
|
|
|
|
|
38 |
|
39 |
+
@cl.on_message # marks a function that should be run each time the chatbot receives a message from a user
|
40 |
+
async def main(message: cl.Message):
|
41 |
+
settings = cl.user_session.get("settings")
|
42 |
+
|
43 |
+
client = AsyncOpenAI(api_key=openai_api_key)
|
44 |
+
|
45 |
+
print(message.content)
|
46 |
+
|
47 |
+
prompt = Prompt(
|
48 |
+
provider=ChatOpenAI.id,
|
49 |
+
messages=[
|
50 |
+
PromptMessage(
|
51 |
+
role="system",
|
52 |
+
template=system_template,
|
53 |
+
formatted=system_template,
|
54 |
+
),
|
55 |
+
PromptMessage(
|
56 |
+
role="user",
|
57 |
+
template=user_template,
|
58 |
+
formatted=user_template.format(input=message.content),
|
59 |
+
),
|
60 |
+
],
|
61 |
+
inputs={"input": message.content},
|
62 |
+
settings=settings,
|
63 |
)
|
64 |
|
65 |
+
print([m.to_openai() for m in prompt.messages])
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
+
msg = cl.Message(content="")
|
68 |
|
69 |
+
# Call OpenAI
|
70 |
+
async for stream_resp in await client.chat.completions.create(
|
71 |
+
messages=[m.to_openai() for m in prompt.messages], stream=True, **settings
|
72 |
+
):
|
73 |
+
token = stream_resp.choices[0].delta.content
|
74 |
+
if not token:
|
75 |
+
token = ""
|
76 |
+
await msg.stream_token(token)
|
77 |
|
78 |
+
# Update the prompt object with the completion
|
79 |
+
prompt.completion = msg.content
|
80 |
+
msg.prompt = prompt
|
81 |
|
82 |
+
# Send and close the message stream
|
83 |
+
await msg.send()
|
|
|
84 |
|
|
|
85 |
|
86 |
|