ivnban27-ctl commited on
Commit
97926e6
·
verified ·
1 Parent(s): d2c9c50
models/ta_models/config.py CHANGED
@@ -28,9 +28,9 @@ BP_LAB2STR = {
28
  }
29
 
30
  QUESTION2PHASE = {
31
- "question_1": ["0_ActiveEngagement","1_Explore"],
32
  "question_4": ["1_Explore"],
33
- "question_5": ["0_ActiveEngagement", "1_Explore"],
34
  # "question_7": ["1_Explore"],
35
  # "question_9": ["4_SP&NS"],
36
  "question_10": ["4_SP&NS"],
 
28
  }
29
 
30
  QUESTION2PHASE = {
31
+ "question_1": ["0_ActiveEngagement"],
32
  "question_4": ["1_Explore"],
33
+ "question_5": ["1_Explore"],
34
  # "question_7": ["1_Explore"],
35
  # "question_9": ["4_SP&NS"],
36
  "question_10": ["4_SP&NS"],
pages/convosim.py CHANGED
@@ -2,7 +2,7 @@ import os
2
  import streamlit as st
3
  from streamlit.logger import get_logger
4
  from langchain.schema.messages import HumanMessage
5
- from utils.mongo_utils import get_db_client
6
  from utils.app_utils import create_memory_add_initial_message, get_random_name, DEFAULT_NAMES_DF, are_models_alive
7
  from utils.memory_utils import clear_memory, push_convo2db
8
  from utils.chain_utils import get_chain, custom_chain_predict
@@ -99,6 +99,8 @@ def sent_request_llm(llm_chain, prompt):
99
  responses = custom_chain_predict(llm_chain, prompt, stopper)
100
  for response in responses:
101
  st.chat_message("assistant").write(response)
 
 
102
 
103
  # @st.dialog("Bad Practice Detected")
104
  # def confirm_bp(bp_prediction, prompt):
 
2
  import streamlit as st
3
  from streamlit.logger import get_logger
4
  from langchain.schema.messages import HumanMessage
5
+ from utils.mongo_utils import get_db_client, update_convo
6
  from utils.app_utils import create_memory_add_initial_message, get_random_name, DEFAULT_NAMES_DF, are_models_alive
7
  from utils.memory_utils import clear_memory, push_convo2db
8
  from utils.chain_utils import get_chain, custom_chain_predict
 
99
  responses = custom_chain_predict(llm_chain, prompt, stopper)
100
  for response in responses:
101
  st.chat_message("assistant").write(response)
102
+ transcript = memoryA.load_memory_variables({})[memoryA.memory_key]
103
+ update_convo(st.session_state["db_client"], st.session_state["convo_id"], transcript)
104
 
105
  # @st.dialog("Bad Practice Detected")
106
  # def confirm_bp(bp_prediction, prompt):
utils/mongo_utils.py CHANGED
@@ -41,6 +41,17 @@ def new_convo(client, issue, language, username, is_comparison, model_one, model
41
  logger.debug(f"DBUTILS: new convo id is {convo_id}")
42
  st.session_state['convo_id'] = convo_id
43
 
 
 
 
 
 
 
 
 
 
 
 
44
  def new_comparison(client, prompt_timestamp, completion_timestamp,
45
  chat_history, prompt, completionA, completionB,
46
  source="webapp", subset=None
 
41
  logger.debug(f"DBUTILS: new convo id is {convo_id}")
42
  st.session_state['convo_id'] = convo_id
43
 
44
+ def update_convo(client, convo_id, transcript=""):
45
+ from bson.objectid import ObjectId
46
+ db = client[DB_SCHEMA]
47
+ convos = db[DB_CONVOS]
48
+ myquery = { "_id": ObjectId(convo_id) }
49
+ newvalues = { "$set": { "transcript": transcript } }
50
+ result = convos.update_one(myquery, newvalues)
51
+ if result.matched_count == 1:
52
+ logger.debug(f"Updated conversation {convo_id}")
53
+
54
+
55
  def new_comparison(client, prompt_timestamp, completion_timestamp,
56
  chat_history, prompt, completionA, completionB,
57
  source="webapp", subset=None