Update app.py
Browse files
app.py
CHANGED
@@ -206,12 +206,17 @@ def generate_text (prompt, chatbot, history, vektordatenbank, retriever, top_p=0
|
|
206 |
#oder an Hugging Face --------------------------
|
207 |
print("HF Anfrage.......................")
|
208 |
model_kwargs={"temperature": 0.5, "max_length": 512, "num_return_sequences": 1, "top_k": top_k, "top_p": top_p, "repetition_penalty": repetition_penalty}
|
209 |
-
llm = HuggingFaceHub(repo_id=repo_id, model_kwargs=model_kwargs)
|
210 |
-
|
211 |
# Erstelle eine Pipeline mit den gewünschten Parametern
|
212 |
-
#pipe = pipeline("text-generation", model=MODEL_NAME_HF ,
|
213 |
-
|
214 |
-
#
|
|
|
|
|
|
|
|
|
|
|
215 |
|
216 |
#Prompt an history anhängen und einen Text daraus machen
|
217 |
history_text_und_prompt = generate_prompt_with_history(prompt, history)
|
@@ -222,6 +227,7 @@ def generate_text (prompt, chatbot, history, vektordatenbank, retriever, top_p=0
|
|
222 |
result = rag_chain(llm, history_text_und_prompt, retriever)
|
223 |
print("result regchain.....................")
|
224 |
print(result)
|
|
|
225 |
|
226 |
except Exception as e:
|
227 |
raise gr.Error(e)
|
|
|
206 |
#oder an Hugging Face --------------------------
|
207 |
print("HF Anfrage.......................")
|
208 |
model_kwargs={"temperature": 0.5, "max_length": 512, "num_return_sequences": 1, "top_k": top_k, "top_p": top_p, "repetition_penalty": repetition_penalty}
|
209 |
+
#llm = HuggingFaceHub(repo_id=repo_id, model_kwargs=model_kwargs)
|
210 |
+
|
211 |
# Erstelle eine Pipeline mit den gewünschten Parametern
|
212 |
+
#pipe = pipeline("text-generation", model=MODEL_NAME_HF, config={"temperature": 0.5, "max_length": 512, "num_return_sequences": 1, "top_k": top_k, "top_p": top_p, "repetition_penalty": repetition_penalty})
|
213 |
+
|
214 |
+
# Erstelle eine HuggingFaceEndPoints-Instanz mit den entsprechenden Endpunkt-Parametern
|
215 |
+
llm = HuggingFaceEndPoints(
|
216 |
+
endpoint_url=f"https://api-inference.huggingface.co/models/{MODEL_NAME_HF}",
|
217 |
+
api_key=hf_token,
|
218 |
+
model_kwargs=model_kwargs
|
219 |
+
)
|
220 |
|
221 |
#Prompt an history anhängen und einen Text daraus machen
|
222 |
history_text_und_prompt = generate_prompt_with_history(prompt, history)
|
|
|
227 |
result = rag_chain(llm, history_text_und_prompt, retriever)
|
228 |
print("result regchain.....................")
|
229 |
print(result)
|
230 |
+
print("Ene result............................")
|
231 |
|
232 |
except Exception as e:
|
233 |
raise gr.Error(e)
|