alexkueck commited on
Commit
ca1744a
·
verified ·
1 Parent(s): 146e52e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -16
app.py CHANGED
@@ -3,8 +3,8 @@ import gradio as gr
3
  from langchain.vectorstores import Chroma
4
  from transformers import RagTokenizer, RagSequenceForGeneration
5
  from sentence_transformers import SentenceTransformer
6
- from langchain.chains.question_answering import load_qa_chain
7
- from langchain.llms import HuggingFaceLLM
8
 
9
  #Konstanten
10
  ANTI_BOT_PW = os.getenv("CORRECT_VALIDATE")
@@ -22,9 +22,8 @@ model = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", use
22
  # Verbindung zur Chroma DB und Laden der Dokumente
23
  chroma_db = Chroma(embedding_model=embedding_model, persist_directory = PATH_WORK + CHROMA_DIR)
24
 
25
- # Erstellung eines HuggingFaceLLM Modells
26
- llm = HuggingFaceLLM(model=model, tokenizer=tokenizer)
27
-
28
 
29
 
30
  # Erstellen eines eigenen Retrievers mit Chroma DB und Embeddings
@@ -59,7 +58,9 @@ def get_rag_response(question):
59
  links = [doc.get('url', 'No URL available') for doc in docs]
60
 
61
  # Generieren der Antwort
62
- answer = llm(question, docs)
 
 
63
 
64
  # Zusammenstellen der Ausgabe
65
  response = {
@@ -71,16 +72,18 @@ def get_rag_response(question):
71
 
72
 
73
 
74
- def chatbot_response (user_input, chat_history=[]):
75
- response = get_rag_response(user_input)
76
- answer = response['answer']
77
- documents = response['documents']
78
- doc_links = "\n\n".join([f"Link: {doc['link']} \nAuszüge der Dokumente: {doc['passage']}" for doc in documents])
79
- bot_response = f"{answer} \n\nRelevante Dokumente: \n{doc_links}"
80
-
81
- chat_history.append((user_inptu, bot_response))
82
-
83
- return chat_history, chat_history
 
 
84
 
85
 
86
  #############################
 
3
  from langchain.vectorstores import Chroma
4
  from transformers import RagTokenizer, RagSequenceForGeneration
5
  from sentence_transformers import SentenceTransformer
6
+ from langchain import LLMChain, PromptTemplate
7
+ from langchain.llms import HuggingFacePipeline
8
 
9
  #Konstanten
10
  ANTI_BOT_PW = os.getenv("CORRECT_VALIDATE")
 
22
  # Verbindung zur Chroma DB und Laden der Dokumente
23
  chroma_db = Chroma(embedding_model=embedding_model, persist_directory = PATH_WORK + CHROMA_DIR)
24
 
25
+ # Erstellung eines HuggingFacePipeline LLM Modells
26
+ llm = HuggingFacePipeline(pipeline=model)
 
27
 
28
 
29
  # Erstellen eines eigenen Retrievers mit Chroma DB und Embeddings
 
58
  links = [doc.get('url', 'No URL available') for doc in docs]
59
 
60
  # Generieren der Antwort
61
+ prompt_template = PromptTemplate(input_variables=["context", "question"], template="{context}\n\n{question}")
62
+ prompt = prompt_template(context=" ".join(passages), question=question)
63
+ answer = llm(prompt)
64
 
65
  # Zusammenstellen der Ausgabe
66
  response = {
 
72
 
73
 
74
 
75
+ # Funktion, die für den Chatbot genutzt wird
76
+ def chatbot_response(user_input, chat_history=[]):
77
+ response = get_rag_response(user_input)
78
+ answer = response['answer']
79
+ documents = response['documents']
80
+
81
+ doc_links = "\n\n".join([f"Link: {doc['link']}\nPassage: {doc['passage']}" for doc in documents])
82
+
83
+ bot_response = f"{answer}\n\nRelevant Documents:\n{doc_links}"
84
+
85
+ chat_history.append((user_input, bot_response))
86
+ return chat_history, chat_history
87
 
88
 
89
  #############################