alexkueck commited on
Commit
e744c61
·
verified ·
1 Parent(s): 3b3354b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -16,7 +16,8 @@ from langchain_community.document_loaders import PyPDFLoader, UnstructuredWordD
16
  from langchain_community.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader
17
  #from langchain.document_loaders import GenericLoader
18
  from langchain.schema import AIMessage, HumanMessage
19
- from langchain_community.llms import HuggingFaceHub
 
20
  from langchain_huggingface import HuggingFaceEmbeddings
21
  from langchain_community.llms import HuggingFaceTextGenInference
22
  #from langchain_community.embeddings import HuggingFaceInstructEmbeddings, HuggingFaceEmbeddings, HuggingFaceBgeEmbeddings, HuggingFaceInferenceAPIEmbeddings
@@ -24,7 +25,7 @@ from langchain.prompts import PromptTemplate
24
  from langchain.text_splitter import RecursiveCharacterTextSplitter
25
  from langchain_community.vectorstores import Chroma
26
  from chromadb.errors import InvalidDimensionException
27
- from langchain import HuggingFacePipeline
28
  from transformers import pipeline
29
  from utils import *
30
  from beschreibungen import *
@@ -205,7 +206,7 @@ def generate_text (prompt, chatbot, history, vektordatenbank, retriever, top_p=0
205
  #oder an Hugging Face --------------------------
206
  print("HF Anfrage.......................")
207
  model_kwargs={"temperature": 0.5, "max_length": 512, "num_return_sequences": 1, "top_k": top_k, "top_p": top_p, "repetition_penalty": repetition_penalty}
208
- llm = HuggingFaceHub(repo_id=repo_id, model_kwargs=model_kwargs)
209
  #llm = HuggingFaceChain(model=MODEL_NAME_HF, model_kwargs={"temperature": 0.5, "max_length": 128})
210
  # Erstelle eine Pipeline mit den gewünschten Parametern
211
  #pipe = pipeline("text-generation", model=MODEL_NAME_HF , model_kwargs=model_kwargs)
 
16
  from langchain_community.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader
17
  #from langchain.document_loaders import GenericLoader
18
  from langchain.schema import AIMessage, HumanMessage
19
+ #from langchain_community.llms import HuggingFaceHub
20
+ from langchain_huggingface import HuggingFaceEndpoint
21
  from langchain_huggingface import HuggingFaceEmbeddings
22
  from langchain_community.llms import HuggingFaceTextGenInference
23
  #from langchain_community.embeddings import HuggingFaceInstructEmbeddings, HuggingFaceEmbeddings, HuggingFaceBgeEmbeddings, HuggingFaceInferenceAPIEmbeddings
 
25
  from langchain.text_splitter import RecursiveCharacterTextSplitter
26
  from langchain_community.vectorstores import Chroma
27
  from chromadb.errors import InvalidDimensionException
28
+ from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
29
  from transformers import pipeline
30
  from utils import *
31
  from beschreibungen import *
 
206
  #oder an Hugging Face --------------------------
207
  print("HF Anfrage.......................")
208
  model_kwargs={"temperature": 0.5, "max_length": 512, "num_return_sequences": 1, "top_k": top_k, "top_p": top_p, "repetition_penalty": repetition_penalty}
209
+ llm = HuggingFaceEndpoint(repo_id=repo_id, model_kwargs=model_kwargs)
210
  #llm = HuggingFaceChain(model=MODEL_NAME_HF, model_kwargs={"temperature": 0.5, "max_length": 128})
211
  # Erstelle eine Pipeline mit den gewünschten Parametern
212
  #pipe = pipeline("text-generation", model=MODEL_NAME_HF , model_kwargs=model_kwargs)