Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -281,9 +281,9 @@ def generate_auswahl(prompt, file, chatbot, history, rag_option, model_option, o
|
|
281 |
#geht nur über spezielle OpenAI-Schnittstelle...
|
282 |
ext = analyze_file(file)
|
283 |
if (ext == "png" or ext == "PNG" or ext == "jpg" or ext == "jpeg" or ext == "JPG" or ext == "JPEG"):
|
284 |
-
result= generate_text_zu_bild(file, prompt, k, rag_option, chatbot, db)
|
285 |
else:
|
286 |
-
result = generate_text_zu_doc(file, prompt, k, rag_option, chatbot, db)
|
287 |
|
288 |
history = history + [[(file,), None],[prompt, result]]
|
289 |
print ("history ....................")
|
@@ -329,7 +329,7 @@ def generate_bild(prompt, chatbot, model_option_zeichnen='HuggingFace', temperat
|
|
329 |
|
330 |
##################################################
|
331 |
#zu einem Bild und Text-Prompt eine Analyse generieren
|
332 |
-
def generate_text_zu_bild(file, prompt, k, rag_option, chatbot, db):
|
333 |
global splittet
|
334 |
print("Text mit Bild ..............................")
|
335 |
print(file)
|
@@ -353,7 +353,7 @@ def generate_text_zu_bild(file, prompt, k, rag_option, chatbot, db):
|
|
353 |
|
354 |
##################################################
|
355 |
#zu einem Bild und Text-Prompt eine Analyse generieren
|
356 |
-
def generate_text_zu_doc(file, prompt, k, rag_option, chatbot, db):
|
357 |
global splittet
|
358 |
print("text mit doc ..............................")
|
359 |
|
@@ -362,9 +362,9 @@ def generate_text_zu_doc(file, prompt, k, rag_option, chatbot, db):
|
|
362 |
print("Doc mit RAG..............................")
|
363 |
neu_text_mit_chunks = rag_chain2(prompt, db, k)
|
364 |
#für Chat LLM:
|
365 |
-
#
|
366 |
#als reiner prompt:
|
367 |
-
prompt_neu = generate_prompt_with_history(neu_text_mit_chunks, chatbot)
|
368 |
|
369 |
result = create_assistant_file(prompt_neu, file)
|
370 |
return result
|
@@ -403,9 +403,9 @@ def generate_text (prompt, chatbot, history, rag_option, model_option, openai_ap
|
|
403 |
llm = ChatOpenAI(model_name = MODEL_NAME, openai_api_key = openai_api_key, temperature=temperature)#, top_p = top_p)
|
404 |
#Prompt an history anhängen und einen Text daraus machen
|
405 |
if (rag_option == "An"):
|
406 |
-
history_text_und_prompt = generate_prompt_with_history(prompt, chatbot)
|
407 |
else:
|
408 |
-
history_text_und_prompt = generate_prompt_with_history_openai(prompt, chatbot)
|
409 |
else:
|
410 |
#oder an Hugging Face --------------------------
|
411 |
print("HF Anfrage.......................")
|
@@ -415,7 +415,7 @@ def generate_text (prompt, chatbot, history, rag_option, model_option, openai_ap
|
|
415 |
#llm = HuggingFaceTextGenInference( inference_server_url="http://localhost:8010/", max_new_tokens=max_new_tokens,top_k=10,top_p=top_p,typical_p=0.95,temperature=temperature,repetition_penalty=repetition_penalty,)
|
416 |
print("HF")
|
417 |
#Prompt an history anhängen und einen Text daraus machen
|
418 |
-
history_text_und_prompt = generate_prompt_with_history(prompt, chatbot)
|
419 |
|
420 |
#zusätzliche Dokumenten Splits aus DB zum Prompt hinzufügen (aus VektorDB - Chroma oder Mongo DB)
|
421 |
if (rag_option == "An"):
|
|
|
281 |
#geht nur über spezielle OpenAI-Schnittstelle...
|
282 |
ext = analyze_file(file)
|
283 |
if (ext == "png" or ext == "PNG" or ext == "jpg" or ext == "jpeg" or ext == "JPG" or ext == "JPEG"):
|
284 |
+
result= generate_text_zu_bild(file, prompt, k, rag_option, chatbot, history, db)
|
285 |
else:
|
286 |
+
result = generate_text_zu_doc(file, prompt, k, rag_option, chatbot, history, db)
|
287 |
|
288 |
history = history + [[(file,), None],[prompt, result]]
|
289 |
print ("history ....................")
|
|
|
329 |
|
330 |
##################################################
|
331 |
#zu einem Bild und Text-Prompt eine Analyse generieren
|
332 |
+
def generate_text_zu_bild(file, prompt, k, rag_option, chatbot, history, db):
|
333 |
global splittet
|
334 |
print("Text mit Bild ..............................")
|
335 |
print(file)
|
|
|
353 |
|
354 |
##################################################
|
355 |
#zu einem Bild und Text-Prompt eine Analyse generieren
|
356 |
+
def generate_text_zu_doc(file, prompt, k, rag_option, chatbot, history, db):
|
357 |
global splittet
|
358 |
print("text mit doc ..............................")
|
359 |
|
|
|
362 |
print("Doc mit RAG..............................")
|
363 |
neu_text_mit_chunks = rag_chain2(prompt, db, k)
|
364 |
#für Chat LLM:
|
365 |
+
#prompt_neu = generate_prompt_with_history_openai(neu_text_mit_chunks, history)
|
366 |
#als reiner prompt:
|
367 |
+
prompt_neu = generate_prompt_with_history(neu_text_mit_chunks, history) #chatbot)
|
368 |
|
369 |
result = create_assistant_file(prompt_neu, file)
|
370 |
return result
|
|
|
403 |
llm = ChatOpenAI(model_name = MODEL_NAME, openai_api_key = openai_api_key, temperature=temperature)#, top_p = top_p)
|
404 |
#Prompt an history anhängen und einen Text daraus machen
|
405 |
if (rag_option == "An"):
|
406 |
+
history_text_und_prompt = generate_prompt_with_history(prompt, history) #chatbot)
|
407 |
else:
|
408 |
+
history_text_und_prompt = generate_prompt_with_history_openai(prompt, history) #chatbot)
|
409 |
else:
|
410 |
#oder an Hugging Face --------------------------
|
411 |
print("HF Anfrage.......................")
|
|
|
415 |
#llm = HuggingFaceTextGenInference( inference_server_url="http://localhost:8010/", max_new_tokens=max_new_tokens,top_k=10,top_p=top_p,typical_p=0.95,temperature=temperature,repetition_penalty=repetition_penalty,)
|
416 |
print("HF")
|
417 |
#Prompt an history anhängen und einen Text daraus machen
|
418 |
+
history_text_und_prompt = generate_prompt_with_history(prompt, history) #chatbot)
|
419 |
|
420 |
#zusätzliche Dokumenten Splits aus DB zum Prompt hinzufügen (aus VektorDB - Chroma oder Mongo DB)
|
421 |
if (rag_option == "An"):
|