Update app.py
Browse files
app.py
CHANGED
@@ -522,23 +522,31 @@ def generate_text (prompt, chatbot, history, rag_option, model_option, openai_ap
|
|
522 |
else:
|
523 |
#splittet = False
|
524 |
print("LLM aufrufen ohne RAG: ...........")
|
525 |
-
resulti = llm_chain(llm, history_text_und_prompt)
|
526 |
-
result = resulti.strip()
|
527 |
-
"""
|
528 |
#Alternativ mit API_URL - aber das model braucht 93 B Space!!!
|
529 |
-
|
530 |
-
|
531 |
-
|
532 |
-
|
533 |
-
|
534 |
-
|
535 |
-
|
536 |
-
|
537 |
-
|
538 |
-
|
539 |
-
print(
|
540 |
-
|
541 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
542 |
"""
|
543 |
else: #Websuche ist An
|
544 |
print("Suche im Netz: ...........")
|
|
|
522 |
else:
|
523 |
#splittet = False
|
524 |
print("LLM aufrufen ohne RAG: ...........")
|
525 |
+
#resulti = llm_chain(llm, history_text_und_prompt)
|
526 |
+
#result = resulti.strip()
|
|
|
527 |
#Alternativ mit API_URL - aber das model braucht 93 B Space!!!
|
528 |
+
data = {
|
529 |
+
"inputs": prompt,
|
530 |
+
"parameters": {"temperature": 0.2, "max_length": 64},
|
531 |
+
"options": {"max_new_tokens": max_new_tokens}
|
532 |
+
}
|
533 |
+
response = requests.post(API_URL_TEXT, headers=HEADERS, json=data)
|
534 |
+
# Prüfe den Status der Anfrage und verarbeite die Antwort
|
535 |
+
if response.status_code == 200:
|
536 |
+
print("Erfolg:", response.json())
|
537 |
+
else:
|
538 |
+
print("Fehler:", response.text)
|
539 |
+
result = response.json()
|
540 |
+
|
541 |
+
"""
|
542 |
+
chatbot_response = result[0]['generated_text']
|
543 |
+
print("anzahl tokens gesamt antwort:------------------")
|
544 |
+
print (len(chatbot_response.split()))
|
545 |
+
chatbot_message = chatbot_response[len(prompt):].strip()
|
546 |
+
print("history/chatbot_rsponse:--------------------------------")
|
547 |
+
print(history)
|
548 |
+
print(chatbot_message)
|
549 |
+
result = chatbot_message
|
550 |
"""
|
551 |
else: #Websuche ist An
|
552 |
print("Suche im Netz: ...........")
|