convosim-ui-dev / app_config.py
ivnban27-ctl's picture
training-adherence-features (#1)
f3e0ba5 verified
raw
history blame
1.66 kB
from models.model_seeds import seeds, seed2str
# ISSUES = ['Anxiety','Suicide']
ISSUES = [k for k,_ in seeds.items()]
SOURCES = [
# "CTL_llama2",
"CTL_llama3",
# "CTL_mistral",
'OA_rolemodel',
# 'OA_finetuned',
]
SOURCES_LAB = {"OA_rolemodel":'OpenAI GPT4o',
"OA_finetuned":'Finetuned OpenAI',
# "CTL_llama2": "Llama 2",
"CTL_llama3": "Llama 3",
"CTL_mistral": "Mistral",
}
ENDPOINT_NAMES = {
# "CTL_llama2": "texter_simulator",
"CTL_llama3": {
"name": "texter_simulator_llm",
"model_type": "text-generation"
},
# "CTL_llama3": {
# "name": "databricks-meta-llama-3-1-70b-instruct",
# "model_type": "text-generation"
# },
# 'CTL_llama2': "llama2_convo_sim",
# "CTL_mistral": "convo_sim_mistral",
"CPC": {
"name": "phase_classifier",
"model_type": "classificator"
},
"BadPractices": {
"name": "training_adherence_bp",
"model_type": "classificator"
},
"training_adherence": {
"name": "training_adherence",
"model_type": "text-completion"
},
}
def source2label(source):
return SOURCES_LAB[source]
def issue2label(issue):
return seed2str.get(issue, "GCT")
ENVIRON = "prod"
DB_SCHEMA = 'prod_db' if ENVIRON == 'prod' else 'test_db'
DB_CONVOS = 'conversations'
DB_COMPLETIONS = 'comparison_completions'
DB_BATTLES = 'battles'
DB_ERRORS = 'completion_errors'
DB_CPC = "cpc_comparison"
DB_BP = "bad_practices_comparison"
DB_TA = "convo_scoring_comparison"
MAX_MSG_COUNT = 60
WARN_MSG_COUT = int(MAX_MSG_COUNT*0.8)