Poprawki w plikach konfiguracyjnych
Browse files
README.md
CHANGED
@@ -1,12 +1,14 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
emoji: 馃憗
|
4 |
colorFrom: indigo
|
5 |
colorTo: yellow
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
pinned: true
|
|
|
|
|
10 |
short_description: Chatbot znaj膮cy si臋 na cyfrowej dost臋pno艣ci i WCAG po polsku
|
11 |
---
|
12 |
|
|
|
1 |
---
|
2 |
+
title: 'Jacek AI'
|
3 |
emoji: 馃憗
|
4 |
colorFrom: indigo
|
5 |
colorTo: yellow
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 5.9.1
|
8 |
app_file: app.py
|
9 |
pinned: true
|
10 |
+
preload_from_hub:
|
11 |
+
- radlab/polish-sts-v2
|
12 |
short_description: Chatbot znaj膮cy si臋 na cyfrowej dost臋pno艣ci i WCAG po polsku
|
13 |
---
|
14 |
|
app.py
DELETED
@@ -1,79 +0,0 @@
|
|
1 |
-
# %%
|
2 |
-
import gradio as gr
|
3 |
-
from openai import OpenAI
|
4 |
-
from langchain_chroma import Chroma
|
5 |
-
from langchain_huggingface import HuggingFaceEmbeddings
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
# %%
|
10 |
-
client=OpenAI()
|
11 |
-
|
12 |
-
|
13 |
-
# %%
|
14 |
-
embeddings = HuggingFaceEmbeddings(model_name='radlab/polish-sts-v2')
|
15 |
-
|
16 |
-
# %%
|
17 |
-
vector_store = Chroma(
|
18 |
-
collection_name='baza',
|
19 |
-
embedding_function=embeddings,
|
20 |
-
persist_directory='baza'
|
21 |
-
)
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
# Funkcja wyszukiwania najbardziej podobnych fragment贸w
|
26 |
-
def szukaj(query, konwersacja):
|
27 |
-
query+=konwersacja
|
28 |
-
context_objects = vector_store.similarity_search(query=query, k=3)
|
29 |
-
context = "" # Inicjalizacja pustego ci膮gu znak贸w
|
30 |
-
for context_object in context_objects:
|
31 |
-
context += context_object.page_content + "\n"
|
32 |
-
return context
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
# Funkcja wyci膮gaj膮ca z historii tekst jako wsad do kontekstu
|
38 |
-
def formatuj_historie_dla_promptu(history):
|
39 |
-
prompt = ""
|
40 |
-
for message in history:
|
41 |
-
role = message["role"]
|
42 |
-
content = message["content"]
|
43 |
-
prompt += f"{content}\n"
|
44 |
-
return prompt
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
# G艂贸wna funkcja chata
|
49 |
-
def odp(message, history):
|
50 |
-
kontekst_konwersacji = formatuj_historie_dla_promptu(history)
|
51 |
-
Kontekst=szukaj(message, kontekst_konwersacji)
|
52 |
-
prompt= f"Konwersacja:\n{kontekst_konwersacji}\nKontekst z bazy wiedzy:\n{Kontekst}\nPytanie u偶ytkownika: {message}"
|
53 |
-
response=client.chat.completions.create(
|
54 |
-
model='gpt-4o-mini',
|
55 |
-
temperature=0.2,
|
56 |
-
messages=[
|
57 |
-
{'role': 'system',
|
58 |
-
'content': 'Jeste艣 ekspertem dost臋pno艣ci cyfrowej i masz na imi臋 Jacek. Odpowiadaj kr贸tko na pytania korzystaj膮c z kontekstu i historii konwersacji.'},
|
59 |
-
{'role': 'user',
|
60 |
-
'content': prompt}
|
61 |
-
]
|
62 |
-
)
|
63 |
-
history.append({'role': 'user', 'content': message})
|
64 |
-
history.append({'role': 'assistant', 'content': response.choices[0].message.content})
|
65 |
-
return '', history
|
66 |
-
|
67 |
-
|
68 |
-
## Interfejs graficzny
|
69 |
-
with gr.Blocks(title='Jacek AI') as demo:
|
70 |
-
chatbot = gr.Chatbot(type='messages', label='Jacek AI')
|
71 |
-
msg = gr.Textbox(autofocus=True, label='Pytaj', show_label=False)
|
72 |
-
msg.submit(odp, [msg, chatbot], [msg, chatbot])
|
73 |
-
demo.launch()
|
74 |
-
|
75 |
-
|
76 |
-
# %%
|
77 |
-
|
78 |
-
|
79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|