alexkueck commited on
Commit
a3ed49b
·
verified ·
1 Parent(s): b222373

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -26,7 +26,7 @@ from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLo
26
  from langchain.document_loaders.generic import GenericLoader
27
  from langchain.document_loaders.parsers import OpenAIWhisperParser
28
  from langchain.schema import AIMessage, HumanMessage
29
- from langchain.llms import HuggingFaceHub, HuggingFaceChain
30
  from langchain.llms import HuggingFaceTextGenInference
31
  from langchain.embeddings import HuggingFaceInstructEmbeddings, HuggingFaceEmbeddings, HuggingFaceBgeEmbeddings, HuggingFaceInferenceAPIEmbeddings
32
  from langchain.retrievers.tavily_search_api import TavilySearchAPIRetriever
@@ -535,7 +535,7 @@ def generate_text (prompt, chatbot, history, rag_option, model_option, openai_ap
535
  print("HF Anfrage.......................")
536
  model_kwargs={"temperature": 0.5, "max_length": 512, "num_return_sequences": 1, "top_k": top_k, "top_p": top_p, "repetition_penalty": repetition_penalty}
537
  #llm = HuggingFaceHub(repo_id=repo_id, model_kwargs=model_kwargs)
538
- llm = HuggingFaceChain(model=MODEL_NAME_HF, model_kwargs={"temperature": 0.5, "max_length": 128})
539
  #llm = HuggingFaceHub(url_??? = "https://wdgsjd6zf201mufn.us-east-1.aws.endpoints.huggingface.cloud", model_kwargs={"temperature": 0.5, "max_length": 64})
540
  #llm = HuggingFaceTextGenInference( inference_server_url="http://localhost:8010/", max_new_tokens=max_new_tokens,top_k=10,top_p=top_p,typical_p=0.95,temperature=temperature,repetition_penalty=repetition_penalty,)
541
  #llm via HuggingChat
 
26
  from langchain.document_loaders.generic import GenericLoader
27
  from langchain.document_loaders.parsers import OpenAIWhisperParser
28
  from langchain.schema import AIMessage, HumanMessage
29
+ from langchain.llms import HuggingFaceHub, HuggingFaceTransformerChain
30
  from langchain.llms import HuggingFaceTextGenInference
31
  from langchain.embeddings import HuggingFaceInstructEmbeddings, HuggingFaceEmbeddings, HuggingFaceBgeEmbeddings, HuggingFaceInferenceAPIEmbeddings
32
  from langchain.retrievers.tavily_search_api import TavilySearchAPIRetriever
 
535
  print("HF Anfrage.......................")
536
  model_kwargs={"temperature": 0.5, "max_length": 512, "num_return_sequences": 1, "top_k": top_k, "top_p": top_p, "repetition_penalty": repetition_penalty}
537
  #llm = HuggingFaceHub(repo_id=repo_id, model_kwargs=model_kwargs)
538
+ llm = HuggingFaceTransformerChain(model=MODEL_NAME_HF, model_kwargs={"temperature": 0.5, "max_length": 128})
539
  #llm = HuggingFaceHub(url_??? = "https://wdgsjd6zf201mufn.us-east-1.aws.endpoints.huggingface.cloud", model_kwargs={"temperature": 0.5, "max_length": 64})
540
  #llm = HuggingFaceTextGenInference( inference_server_url="http://localhost:8010/", max_new_tokens=max_new_tokens,top_k=10,top_p=top_p,typical_p=0.95,temperature=temperature,repetition_penalty=repetition_penalty,)
541
  #llm via HuggingChat