eagle0504 commited on
Commit
5bb7259
·
verified ·
1 Parent(s): 7c5a247

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -3
app.py CHANGED
@@ -1,6 +1,59 @@
1
  import streamlit as st
2
 
3
- st.title("Echo Bot")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  # Initialize chat history
6
  if "messages" not in st.session_state:
@@ -13,14 +66,19 @@ for message in st.session_state.messages:
13
 
14
  # React to user input
15
  if prompt := st.chat_input("What is up?"):
 
16
  # Display user message in chat message container
17
  st.chat_message("user").markdown(prompt)
 
18
  # Add user message to chat history
19
  st.session_state.messages.append({"role": "user", "content": prompt})
20
 
21
- response = f"Echo: {prompt}"
 
 
22
  # Display assistant response in chat message container
23
  with st.chat_message("assistant"):
24
- st.markdown(response)
 
25
  # Add assistant response to chat history
26
  st.session_state.messages.append({"role": "assistant", "content": response})
 
1
  import streamlit as st
2
 
3
+ import os
4
+
5
+ import google.generativeai as genai
6
+
7
+ genai.configure(api_key=os.environ["GEMINI_API_KEY"])
8
+
9
+
10
+ # Create the model
11
+ # See https://ai.google.dev/api/python/google/generativeai/GenerativeModel
12
+ generation_config = {
13
+ "temperature": 1,
14
+ "top_p": 0.95,
15
+ "top_k": 64,
16
+ "max_output_tokens": 8192,
17
+ "response_mime_type": "text/plain",
18
+ }
19
+
20
+ safety_settings = [
21
+ {
22
+ "category": "HARM_CATEGORY_HARASSMENT",
23
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE",
24
+ },
25
+ {
26
+ "category": "HARM_CATEGORY_HATE_SPEECH",
27
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE",
28
+ },
29
+ {
30
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
31
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE",
32
+ },
33
+ {
34
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
35
+ "threshold": "BLOCK_MEDIUM_AND_ABOVE",
36
+ },
37
+ ]
38
+
39
+ model = genai.GenerativeModel(
40
+ model_name="gemini-1.5-flash-latest",
41
+ safety_settings=safety_settings,
42
+ generation_config=generation_config,
43
+ )
44
+
45
+ chat_session = model.start_chat(
46
+ history=[]
47
+ )
48
+
49
+ # response = chat_session.send_message("INSERT_INPUT_HERE")
50
+
51
+ # print(response.text)
52
+ # print(chat_session.history)
53
+
54
+
55
+
56
+ st.title("Gemini 1.5")
57
 
58
  # Initialize chat history
59
  if "messages" not in st.session_state:
 
66
 
67
  # React to user input
68
  if prompt := st.chat_input("What is up?"):
69
+
70
  # Display user message in chat message container
71
  st.chat_message("user").markdown(prompt)
72
+
73
  # Add user message to chat history
74
  st.session_state.messages.append({"role": "user", "content": prompt})
75
 
76
+ # Response
77
+ response = chat_session.send_message(prompt)
78
+
79
  # Display assistant response in chat message container
80
  with st.chat_message("assistant"):
81
+ st.markdown(response.text)
82
+
83
  # Add assistant response to chat history
84
  st.session_state.messages.append({"role": "assistant", "content": response})