|
|
|
import gradio as gr |
|
from openai import OpenAI |
|
from langchain_chroma import Chroma |
|
from langchain_huggingface import HuggingFaceEmbeddings |
|
|
|
|
|
|
|
|
|
client=OpenAI() |
|
|
|
|
|
|
|
embeddings = HuggingFaceEmbeddings(model_name='radlab/polish-sts-v2') |
|
|
|
|
|
vector_store = Chroma( |
|
collection_name='baza', |
|
embedding_function=embeddings, |
|
persist_directory='baza' |
|
) |
|
|
|
|
|
|
|
|
|
def szukaj(query, konwersacja): |
|
query+=konwersacja |
|
context_objects = vector_store.similarity_search(query=query, k=3) |
|
context = "" |
|
for context_object in context_objects: |
|
context += context_object.page_content + "\n" |
|
return context |
|
|
|
|
|
|
|
|
|
|
|
def formatuj_historie_dla_promptu(history): |
|
prompt = "" |
|
for message in history: |
|
role = message["role"] |
|
content = message["content"] |
|
prompt += f"{content}\n" |
|
return prompt |
|
|
|
|
|
|
|
|
|
def odp(message, history): |
|
kontekst_konwersacji = formatuj_historie_dla_promptu(history) |
|
Kontekst=szukaj(message, kontekst_konwersacji) |
|
prompt= f"Konwersacja:\n{kontekst_konwersacji}\nKontekst z bazy wiedzy:\n{Kontekst}\nPytanie u偶ytkownika: {message}" |
|
response=client.chat.completions.create( |
|
model='gpt-4o-mini', |
|
temperature=0.2, |
|
messages=[ |
|
{'role': 'system', |
|
'content': 'Jeste艣 ekspertem dost臋pno艣ci cyfrowej i masz na imi臋 Jacek. Odpowiadaj kr贸tko na pytania korzystaj膮c z kontekstu i historii konwersacji.'}, |
|
{'role': 'user', |
|
'content': prompt} |
|
] |
|
) |
|
history.append({'role': 'user', 'content': message}) |
|
history.append({'role': 'assistant', 'content': response.choices[0].message.content}) |
|
return '', history |
|
|
|
|
|
|
|
with gr.Blocks(title='Jacek AI') as demo: |
|
chatbot = gr.Chatbot(type='messages', label='Jacek AI') |
|
msg = gr.Textbox(autofocus=True, label='Pytaj', show_label=False) |
|
msg.submit(odp, [msg, chatbot], [msg, chatbot]) |
|
demo.launch() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|