Spaces:
Runtime error
Runtime error
hassanelmghari
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -29,44 +29,39 @@ def bot_streaming(message, history, together_api_key, max_new_tokens=250, temper
|
|
29 |
try:
|
30 |
initialize_client(together_api_key)
|
31 |
except Exception as e:
|
32 |
-
yield
|
33 |
return
|
34 |
|
35 |
prompt = "You are a helpful AI assistant. Analyze the image provided (if any) and respond to the user's query or comment."
|
36 |
-
|
37 |
messages = [{"role": "system", "content": prompt}]
|
38 |
-
|
39 |
-
#
|
40 |
for user_msg, assistant_msg in history:
|
41 |
-
|
42 |
-
text = user_msg[1] if len(user_msg) > 1 else ""
|
43 |
-
messages.append({"role": "user", "content": [
|
44 |
-
{"type": "text", "text": text},
|
45 |
-
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{encode_image(user_msg[0])}"}}
|
46 |
-
]})
|
47 |
-
else:
|
48 |
-
messages.append({"role": "user", "content": [{"type": "text", "text": user_msg}]})
|
49 |
messages.append({"role": "assistant", "content": [{"type": "text", "text": assistant_msg}]})
|
50 |
-
|
51 |
# Prepare the current message
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
|
|
|
|
70 |
|
71 |
try:
|
72 |
stream = client.chat.completions.create(
|
@@ -80,16 +75,20 @@ def bot_streaming(message, history, together_api_key, max_new_tokens=250, temper
|
|
80 |
response = ""
|
81 |
for chunk in stream:
|
82 |
response += chunk.choices[0].delta.content or ""
|
83 |
-
|
|
|
84 |
|
85 |
if not response:
|
86 |
-
|
|
|
87 |
|
88 |
except Exception as e:
|
89 |
if "Request Entity Too Large" in str(e):
|
90 |
-
|
|
|
91 |
else:
|
92 |
-
|
|
|
93 |
|
94 |
with gr.Blocks() as demo:
|
95 |
gr.Markdown("# Meta Llama-3.2-11B-Vision-Instruct (FREE)")
|
@@ -122,8 +121,12 @@ with gr.Blocks() as demo:
|
|
122 |
msg = gr.MultimodalTextbox(label="Enter text or upload an image")
|
123 |
clear = gr.Button("Clear")
|
124 |
|
125 |
-
msg.submit(
|
126 |
-
|
|
|
|
|
|
|
|
|
127 |
|
128 |
if __name__ == "__main__":
|
129 |
-
demo.launch(debug=True)
|
|
|
29 |
try:
|
30 |
initialize_client(together_api_key)
|
31 |
except Exception as e:
|
32 |
+
yield [("Error initializing client", str(e))]
|
33 |
return
|
34 |
|
35 |
prompt = "You are a helpful AI assistant. Analyze the image provided (if any) and respond to the user's query or comment."
|
36 |
+
|
37 |
messages = [{"role": "system", "content": prompt}]
|
38 |
+
|
39 |
+
# Build the conversation history
|
40 |
for user_msg, assistant_msg in history:
|
41 |
+
messages.append({"role": "user", "content": [{"type": "text", "text": user_msg}]})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
messages.append({"role": "assistant", "content": [{"type": "text", "text": assistant_msg}]})
|
43 |
+
|
44 |
# Prepare the current message
|
45 |
+
content = []
|
46 |
+
user_text = ""
|
47 |
+
if isinstance(message, dict):
|
48 |
+
if 'text' in message:
|
49 |
+
user_text = message['text']
|
50 |
+
content.append({"type": "text", "text": user_text})
|
51 |
+
if 'files' in message and len(message['files']) > 0:
|
52 |
+
image_path = message['files'][0]['name'] if isinstance(message['files'][0], dict) else message['files'][0]
|
53 |
+
image_base64 = encode_image(image_path)
|
54 |
+
content.append({"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_base64}"}})
|
55 |
+
user_text += "\n[User uploaded an image]"
|
56 |
+
else:
|
57 |
+
user_text = message
|
58 |
+
content.append({"type": "text", "text": user_text})
|
59 |
+
|
60 |
+
messages.append({"role": "user", "content": content})
|
61 |
+
|
62 |
+
# Update the history with the new user message (with empty assistant response)
|
63 |
+
history = history + [[user_text, ""]]
|
64 |
+
yield history
|
65 |
|
66 |
try:
|
67 |
stream = client.chat.completions.create(
|
|
|
75 |
response = ""
|
76 |
for chunk in stream:
|
77 |
response += chunk.choices[0].delta.content or ""
|
78 |
+
history[-1][1] = response
|
79 |
+
yield history
|
80 |
|
81 |
if not response:
|
82 |
+
history[-1][1] = "No response generated. Please try again."
|
83 |
+
yield history
|
84 |
|
85 |
except Exception as e:
|
86 |
if "Request Entity Too Large" in str(e):
|
87 |
+
history[-1][1] = "The image is too large. Please try with a smaller image or compress the existing one."
|
88 |
+
yield history
|
89 |
else:
|
90 |
+
history[-1][1] = f"An error occurred: {str(e)}"
|
91 |
+
yield history
|
92 |
|
93 |
with gr.Blocks() as demo:
|
94 |
gr.Markdown("# Meta Llama-3.2-11B-Vision-Instruct (FREE)")
|
|
|
121 |
msg = gr.MultimodalTextbox(label="Enter text or upload an image")
|
122 |
clear = gr.Button("Clear")
|
123 |
|
124 |
+
msg.submit(
|
125 |
+
bot_streaming,
|
126 |
+
inputs=[msg, chatbot, together_api_key, max_new_tokens, temperature],
|
127 |
+
outputs=chatbot
|
128 |
+
)
|
129 |
+
clear.click(lambda: [], None, chatbot, queue=False)
|
130 |
|
131 |
if __name__ == "__main__":
|
132 |
+
demo.launch(debug=True)
|