Spaces:
Sleeping
Sleeping
lingyit1108
commited on
Commit
Β·
d026604
1
Parent(s):
c6352d6
tidy up single/double quotes
Browse files- streamlit_app.py +23 -18
streamlit_app.py
CHANGED
@@ -9,27 +9,31 @@ st.set_page_config(page_title="π¬ Open AI Chatbot")
|
|
9 |
|
10 |
# Replicate Credentials
|
11 |
with st.sidebar:
|
12 |
-
st.title(
|
13 |
-
st.write(
|
14 |
-
if
|
15 |
-
st.success(
|
16 |
-
openai_api = st.secrets[
|
17 |
else:
|
18 |
-
openai_api = st.text_input(
|
19 |
-
if not (openai_api.startswith(
|
20 |
-
st.warning(
|
21 |
else:
|
22 |
-
st.success(
|
23 |
-
os.environ[
|
24 |
|
25 |
-
st.subheader(
|
26 |
-
selected_model = st.sidebar.selectbox(
|
27 |
-
|
28 |
-
|
|
|
|
|
|
|
29 |
|
30 |
# Store LLM generated responses
|
31 |
if "messages" not in st.session_state.keys():
|
32 |
-
st.session_state.messages = [{"role": "assistant",
|
|
|
33 |
|
34 |
# Display or clear chat messages
|
35 |
for message in st.session_state.messages:
|
@@ -37,8 +41,9 @@ for message in st.session_state.messages:
|
|
37 |
st.write(message["content"])
|
38 |
|
39 |
def clear_chat_history():
|
40 |
-
st.session_state.messages = [{"role": "assistant",
|
41 |
-
|
|
|
42 |
|
43 |
def generate_llm_response(client, prompt_input):
|
44 |
system_content = ("You are a helpful assistant. "
|
@@ -69,7 +74,7 @@ if st.session_state.messages[-1]["role"] != "assistant":
|
|
69 |
with st.spinner("Thinking..."):
|
70 |
response = generate_llm_response(client, prompt)
|
71 |
placeholder = st.empty()
|
72 |
-
full_response =
|
73 |
for chunk in response:
|
74 |
if chunk.choices[0].delta.content is not None:
|
75 |
full_response += chunk.choices[0].delta.content
|
|
|
9 |
|
10 |
# Replicate Credentials
|
11 |
with st.sidebar:
|
12 |
+
st.title("π¬ Open AI Chatbot")
|
13 |
+
st.write("This chatbot is created using the GPT model from Open AI.")
|
14 |
+
if "OPENAI_API_KEY" in st.secrets:
|
15 |
+
st.success("API key already provided!", icon="β
")
|
16 |
+
openai_api = st.secrets["OPENAI_API_KEY"]
|
17 |
else:
|
18 |
+
openai_api = st.text_input("Enter OpenAI API token:", type="password")
|
19 |
+
if not (openai_api.startswith("sk-") and len(openai_api)==51):
|
20 |
+
st.warning("Please enter your credentials!", icon="β οΈ")
|
21 |
else:
|
22 |
+
st.success("Proceed to entering your prompt message!", icon="π")
|
23 |
+
os.environ["OPENAI_API_KEY"] = openai_api
|
24 |
|
25 |
+
st.subheader("Models and parameters")
|
26 |
+
selected_model = st.sidebar.selectbox("Choose an OpenAI model",
|
27 |
+
["gpt-3.5-turbo-1106", "gpt-4-1106-preview"],
|
28 |
+
key="selected_model")
|
29 |
+
temperature = st.sidebar.slider("temperature", min_value=0.01, max_value=2.0,
|
30 |
+
value=0.1, step=0.01)
|
31 |
+
st.markdown("π Reach out to SakiMilo to learn how to create this app!")
|
32 |
|
33 |
# Store LLM generated responses
|
34 |
if "messages" not in st.session_state.keys():
|
35 |
+
st.session_state.messages = [{"role": "assistant",
|
36 |
+
"content": "How may I assist you today?"}]
|
37 |
|
38 |
# Display or clear chat messages
|
39 |
for message in st.session_state.messages:
|
|
|
41 |
st.write(message["content"])
|
42 |
|
43 |
def clear_chat_history():
|
44 |
+
st.session_state.messages = [{"role": "assistant",
|
45 |
+
"content": "How may I assist you today?"}]
|
46 |
+
st.sidebar.button("Clear Chat History", on_click=clear_chat_history)
|
47 |
|
48 |
def generate_llm_response(client, prompt_input):
|
49 |
system_content = ("You are a helpful assistant. "
|
|
|
74 |
with st.spinner("Thinking..."):
|
75 |
response = generate_llm_response(client, prompt)
|
76 |
placeholder = st.empty()
|
77 |
+
full_response = ""
|
78 |
for chunk in response:
|
79 |
if chunk.choices[0].delta.content is not None:
|
80 |
full_response += chunk.choices[0].delta.content
|