File size: 1,336 Bytes
5832f57
 
 
1e91476
5832f57
1e91476
5832f57
1e91476
 
 
5832f57
 
1e91476
 
 
 
 
 
 
9ff00d4
1e91476
 
 
5832f57
 
 
1e91476
5832f57
 
 
 
1e91476
 
 
 
5832f57
 
 
 
 
1e91476
 
5832f57
1e91476
975a927
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import logging
from models.custom_parsers import CustomStringOutputParser
from langchain.chains import ConversationChain
from langchain_openai import ChatOpenAI
from langchain.prompts import PromptTemplate
from models.business_logic_utils.input_processing import initialize_conversation

OPENAI_TEMPLATE = """{template}
{{history}}
helper: {{input}}
texter:"""

def get_template_role_models(issue: str, language: str, texter_name: str = "") -> str:
    model_input = {
        "issue": issue,
        "language": language,
        "texter_name": texter_name,
        "messages": [],
    }

    # Initialize the conversation (adds the system message)
    model_input = initialize_conversation(model_input, "")
    return model_input["messages"][0]["content"]

def get_role_chain(template, memory, temperature=0.8):

    template = OPENAI_TEMPLATE.format(template=template)
    PROMPT = PromptTemplate(
        input_variables=['history', 'input'],
        template=template
    )
    llm = ChatOpenAI(
        model="gpt-4o",
        temperature=temperature,
        max_tokens=256,
    )
    llm_chain = ConversationChain(
        llm=llm,
        prompt=PROMPT,
        memory=memory,
        output_parser=CustomStringOutputParser(),
        verbose=True,
    )
    logging.debug(f"loaded GPT4o model")
    return llm_chain, "helper:"