|
|
|
import os |
|
import re |
|
from dotenv import load_dotenv |
|
load_dotenv() |
|
|
|
import gradio as gr |
|
|
|
from langchain.agents.openai_assistant import OpenAIAssistantRunnable |
|
from langchain.schema import HumanMessage, AIMessage |
|
|
|
api_key = os.getenv('OPENAI_API_KEY') |
|
extractor_agent = os.getenv('ASSISTANT_ID_SOLUTION_SPECIFIER_A') |
|
|
|
|
|
|
|
extractor_llm = OpenAIAssistantRunnable( |
|
assistant_id=extractor_agent, |
|
api_key=api_key, |
|
as_agent=True |
|
) |
|
|
|
|
|
THREAD_ID = None |
|
|
|
def remove_citation(text): |
|
pattern = r"γ\d+β \w+γ" |
|
return re.sub(pattern, "π", text) |
|
|
|
def predict(message, history): |
|
""" |
|
Receives the new user message plus the entire conversation history |
|
from Gradio. If no thread_id is set, we create a new thread. |
|
Otherwise we pass the existing thread_id. |
|
""" |
|
global THREAD_ID |
|
|
|
|
|
print("current history:", history) |
|
|
|
|
|
if not history: |
|
THREAD_ID = None |
|
|
|
|
|
if THREAD_ID is None: |
|
|
|
response = extractor_llm.invoke({"content": message}) |
|
THREAD_ID = response.thread_id |
|
else: |
|
|
|
response = extractor_llm.invoke({"content": message, "thread_id": THREAD_ID}) |
|
|
|
|
|
output = response.return_values["output"] |
|
non_cited_output = remove_citation(output) |
|
|
|
|
|
return non_cited_output |
|
|
|
|
|
chat = gr.ChatInterface( |
|
fn=predict, |
|
title="Solution Specifier A", |
|
|
|
) |
|
chat.launch(share=True) |