Update app.py
Browse files
app.py
CHANGED
@@ -18,6 +18,7 @@ from langchain_community.document_loaders.blob_loaders.youtube_audio import Yout
|
|
18 |
from langchain.schema import AIMessage, HumanMessage
|
19 |
from langchain_community.llms import HuggingFaceHub
|
20 |
from langchain_huggingface import HuggingFaceEndpoint
|
|
|
21 |
from langchain_huggingface import HuggingFaceEmbeddings
|
22 |
from langchain_community.llms import HuggingFaceTextGenInference
|
23 |
#from langchain_community.embeddings import HuggingFaceInstructEmbeddings, HuggingFaceEmbeddings, HuggingFaceBgeEmbeddings, HuggingFaceInferenceAPIEmbeddings
|
@@ -212,7 +213,7 @@ def generate_text (prompt, chatbot, history, vektordatenbank, retriever, top_p=0
|
|
212 |
#pipe = pipeline("text-generation", model=MODEL_NAME_HF, config={"temperature": 0.5, "max_length": 512, "num_return_sequences": 1, "top_k": top_k, "top_p": top_p, "repetition_penalty": repetition_penalty})
|
213 |
|
214 |
# Erstelle eine HuggingFaceEndPoints-Instanz mit den entsprechenden Endpunkt-Parametern
|
215 |
-
llm =
|
216 |
endpoint_url=f"https://api-inference.huggingface.co/models/{MODEL_NAME_HF}",
|
217 |
api_key=hf_token,
|
218 |
model_kwargs=model_kwargs
|
|
|
18 |
from langchain.schema import AIMessage, HumanMessage
|
19 |
from langchain_community.llms import HuggingFaceHub
|
20 |
from langchain_huggingface import HuggingFaceEndpoint
|
21 |
+
from langchain_community.llms import HuggingFaceEndPoints
|
22 |
from langchain_huggingface import HuggingFaceEmbeddings
|
23 |
from langchain_community.llms import HuggingFaceTextGenInference
|
24 |
#from langchain_community.embeddings import HuggingFaceInstructEmbeddings, HuggingFaceEmbeddings, HuggingFaceBgeEmbeddings, HuggingFaceInferenceAPIEmbeddings
|
|
|
213 |
#pipe = pipeline("text-generation", model=MODEL_NAME_HF, config={"temperature": 0.5, "max_length": 512, "num_return_sequences": 1, "top_k": top_k, "top_p": top_p, "repetition_penalty": repetition_penalty})
|
214 |
|
215 |
# Erstelle eine HuggingFaceEndPoints-Instanz mit den entsprechenden Endpunkt-Parametern
|
216 |
+
llm = HuggingFaceEndPoint(
|
217 |
endpoint_url=f"https://api-inference.huggingface.co/models/{MODEL_NAME_HF}",
|
218 |
api_key=hf_token,
|
219 |
model_kwargs=model_kwargs
|