File size: 4,717 Bytes
9ff00d4
 
 
f3e0ba5
 
9ff00d4
f3e0ba5
9ff00d4
 
 
 
 
 
 
 
f3e0ba5
 
 
 
9ff00d4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f3e0ba5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134b64a
 
f3e0ba5
 
 
 
 
 
 
 
 
134b64a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
import pandas as pd
import streamlit as st
from streamlit.logger import get_logger
import os
import requests

from app_config import ENDPOINT_NAMES
from utils.memory_utils import change_memories
from models.model_seeds import seeds

logger = get_logger(__name__)

# TODO: Include more variable and representative names
DEFAULT_NAMES = ["Olivia", "Kit", "Abby", "Tom", "Carolyne", "Jessiny"]
DEFAULT_NAMES_DF = pd.read_csv("./utils/names.csv")
HEADERS = {
    "Authorization": f"Bearer {os.environ['DATABRICKS_TOKEN']}",
    "Content-Type": "application/json",
}

def get_random_name(gender="Neutral", ethnical_group="Neutral", names_df=None):
        if names_df is None:
            names_df = pd.DataFrame(DEFAULT_NAMES, columns=['name'])
            names_df["gender"] = "Neutral"
            names_df["ethnical_group"] = "Neutral"
        
        dfi = names_df
        
        if gender != "Neutral":
            dfi = dfi.query(f"gender=='{gender}'")
        if ethnical_group != "Neutral":
            dfi = dfi.query(f"ethnical_group=='{ethnical_group}'")
        if len(dfi) <=0 :
            dfi = names_df
        return dfi.sample(1)['name'].values[0]

def divide_messages(str_memory, str_ai_prefix="texter", str_human_prefix="helper", include_colon=True):
    message_delimiter = "$%$"
    # Split str memory in messaages according to previous prefix and flatten list
    colon = ":" if include_colon else ""
    str_memory =  f"{message_delimiter}{str_ai_prefix}{colon}".join(str_memory.split(f"{str_ai_prefix}{colon}"))
    str_memory =  f"{message_delimiter}{str_human_prefix}{colon}".join(str_memory.split(f"{str_human_prefix}{colon}"))
    return str_memory.split(message_delimiter)

def add_initial_message(issue, language, memory, str_ai_prefix="texter", str_human_prefix="helper", include_colon=True,
                        texter_name="", counselor_name=""):
    initial_mem_str = seeds.get(issue, "GCT")['memory'].format(counselor_name=counselor_name, texter_name=texter_name)
    message_list = divide_messages(initial_mem_str, str_ai_prefix, str_human_prefix, include_colon)
    colon = ":" if include_colon else ""
    for i, message in enumerate(message_list):
        message = message.strip("\n")
        message = message.strip()
        if message is None or message == "":
            pass
        elif message.startswith(str_human_prefix):
            memory.chat_memory.add_user_message(message.lstrip(f"{str_human_prefix}{colon}").strip())
        elif message.startswith(str_ai_prefix):
            memory.chat_memory.add_ai_message(message.lstrip(f"{str_ai_prefix}{colon}").strip()) 

def create_memory_add_initial_message(memories, issue, language, changed_source=False, texter_name="", counselor_name=""):
    change_memories(memories, language, changed_source=changed_source)

    for memory, _ in memories.items():
        if len(st.session_state[memory].buffer_as_messages) < 1:
            add_initial_message(issue, language, st.session_state[memory], texter_name=texter_name, counselor_name=counselor_name)

def is_model_alive(name, timeout=2, model_type="classificator"):
    if model_type!="openai":
        endpoint_url=os.environ['DATABRICKS_URL'].format(endpoint_name=name)
        headers = HEADERS
        if model_type == "classificator":
            body_request = {
                "inputs": [""]
            }
        elif model_type == "text-completion":
            body_request = {
                "prompt": "",
                "temperature": 0,
                "max_tokens": 1,
            }
        elif model_type == "text-generation":
            body_request = {
                "messages": [{"role":"user","content":""}],
                "max_tokens": 1,
                "temperature": 0
            }

        else:
            raise Exception(f"Model Type {model_type} not supported") 
        try:
            response = requests.post(url=endpoint_url, headers=HEADERS, json=body_request, timeout=timeout)
            return str(response.status_code)
        except:
            return "404"
    else:
        endpoint_url="https://api.openai.com/v1/models"
        headers = {"Authorization": f"Bearer {os.environ['OPENAI_API_KEY']}",}
        try:
            response = requests.get(url=endpoint_url, headers=headers, timeout=1)
            return str(response.status_code)
        except:
            return "404" 

@st.cache_data(ttl=300, show_spinner=False)
def are_models_alive():
    models_alive = []
    for config in ENDPOINT_NAMES.values():
        models_alive.append(is_model_alive(**config))
    openai = is_model_alive("openai", model_type="openai")
    models_alive.append(openai)
    return all([x=="200" for x in models_alive])