Update app.py
Browse files
app.py
CHANGED
@@ -3,8 +3,8 @@ import gradio as gr
|
|
3 |
from langchain.vectorstores import Chroma
|
4 |
from transformers import RagTokenizer, RagSequenceForGeneration
|
5 |
from sentence_transformers import SentenceTransformer
|
6 |
-
from langchain
|
7 |
-
from langchain.llms import
|
8 |
|
9 |
#Konstanten
|
10 |
ANTI_BOT_PW = os.getenv("CORRECT_VALIDATE")
|
@@ -22,9 +22,8 @@ model = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", use
|
|
22 |
# Verbindung zur Chroma DB und Laden der Dokumente
|
23 |
chroma_db = Chroma(embedding_model=embedding_model, persist_directory = PATH_WORK + CHROMA_DIR)
|
24 |
|
25 |
-
# Erstellung eines
|
26 |
-
llm =
|
27 |
-
|
28 |
|
29 |
|
30 |
# Erstellen eines eigenen Retrievers mit Chroma DB und Embeddings
|
@@ -59,7 +58,9 @@ def get_rag_response(question):
|
|
59 |
links = [doc.get('url', 'No URL available') for doc in docs]
|
60 |
|
61 |
# Generieren der Antwort
|
62 |
-
|
|
|
|
|
63 |
|
64 |
# Zusammenstellen der Ausgabe
|
65 |
response = {
|
@@ -71,16 +72,18 @@ def get_rag_response(question):
|
|
71 |
|
72 |
|
73 |
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
|
|
|
|
84 |
|
85 |
|
86 |
#############################
|
|
|
3 |
from langchain.vectorstores import Chroma
|
4 |
from transformers import RagTokenizer, RagSequenceForGeneration
|
5 |
from sentence_transformers import SentenceTransformer
|
6 |
+
from langchain import LLMChain, PromptTemplate
|
7 |
+
from langchain.llms import HuggingFacePipeline
|
8 |
|
9 |
#Konstanten
|
10 |
ANTI_BOT_PW = os.getenv("CORRECT_VALIDATE")
|
|
|
22 |
# Verbindung zur Chroma DB und Laden der Dokumente
|
23 |
chroma_db = Chroma(embedding_model=embedding_model, persist_directory = PATH_WORK + CHROMA_DIR)
|
24 |
|
25 |
+
# Erstellung eines HuggingFacePipeline LLM Modells
|
26 |
+
llm = HuggingFacePipeline(pipeline=model)
|
|
|
27 |
|
28 |
|
29 |
# Erstellen eines eigenen Retrievers mit Chroma DB und Embeddings
|
|
|
58 |
links = [doc.get('url', 'No URL available') for doc in docs]
|
59 |
|
60 |
# Generieren der Antwort
|
61 |
+
prompt_template = PromptTemplate(input_variables=["context", "question"], template="{context}\n\n{question}")
|
62 |
+
prompt = prompt_template(context=" ".join(passages), question=question)
|
63 |
+
answer = llm(prompt)
|
64 |
|
65 |
# Zusammenstellen der Ausgabe
|
66 |
response = {
|
|
|
72 |
|
73 |
|
74 |
|
75 |
+
# Funktion, die für den Chatbot genutzt wird
|
76 |
+
def chatbot_response(user_input, chat_history=[]):
|
77 |
+
response = get_rag_response(user_input)
|
78 |
+
answer = response['answer']
|
79 |
+
documents = response['documents']
|
80 |
+
|
81 |
+
doc_links = "\n\n".join([f"Link: {doc['link']}\nPassage: {doc['passage']}" for doc in documents])
|
82 |
+
|
83 |
+
bot_response = f"{answer}\n\nRelevant Documents:\n{doc_links}"
|
84 |
+
|
85 |
+
chat_history.append((user_input, bot_response))
|
86 |
+
return chat_history, chat_history
|
87 |
|
88 |
|
89 |
#############################
|