Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
import gradio as gr
|
2 |
from PIL import Image
|
3 |
-
import requests
|
4 |
import os
|
5 |
from together import Together
|
6 |
import base64
|
@@ -9,6 +8,7 @@ import io
|
|
9 |
# Initialize Together client
|
10 |
client = None
|
11 |
|
|
|
12 |
def initialize_client(api_key=None):
|
13 |
global client
|
14 |
if api_key:
|
@@ -18,174 +18,81 @@ def initialize_client(api_key=None):
|
|
18 |
else:
|
19 |
raise ValueError("Please provide a Together API Key")
|
20 |
|
21 |
-
def encode_image(image_path):
|
22 |
-
try:
|
23 |
-
with Image.open(image_path) as img:
|
24 |
-
buffered = io.BytesIO()
|
25 |
-
img.save(buffered, format="PNG")
|
26 |
-
return base64.b64encode(buffered.getvalue()).decode('utf-8')
|
27 |
-
except Exception as e:
|
28 |
-
print(f"Error encoding image: {e}")
|
29 |
-
raise e
|
30 |
-
|
31 |
-
def old_bot_streaming(message, history, max_new_tokens=250, api_key=None, max_history=5):
|
32 |
-
if client is None:
|
33 |
-
initialize_client(api_key)
|
34 |
-
|
35 |
-
txt = message["text"]
|
36 |
-
messages = []
|
37 |
-
images = []
|
38 |
-
|
39 |
-
for i, msg in enumerate(history[-max_history:]):
|
40 |
-
if isinstance(msg[0], tuple):
|
41 |
-
messages.append({"role": "user", "content": [{"type": "text", "text": history[i+1][0]}, {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{encode_image(msg[0][0])}"}}]})
|
42 |
-
messages.append({"role": "assistant", "content": [{"type": "text", "text": history[i+1][1]}]})
|
43 |
-
elif isinstance(history[i-1], tuple) and isinstance(msg[0], str):
|
44 |
-
pass
|
45 |
-
elif isinstance(history[i-1][0], str) and isinstance(msg[0], str):
|
46 |
-
messages.append({"role": "user", "content": [{"type": "text", "text": msg[0]}]})
|
47 |
-
messages.append({"role": "assistant", "content": [{"type": "text", "text": msg[1]}]})
|
48 |
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
messages.append({"role": "user", "content": [{"type": "text", "text": txt}, {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{encode_image(image_path)}"}}]})
|
55 |
-
else:
|
56 |
-
messages.append({"role": "user", "content": [{"type": "text", "text": txt}]})
|
57 |
-
|
58 |
-
try:
|
59 |
-
stream = client.chat.completions.create(
|
60 |
-
model="meta-llama/Llama-Vision-Free",
|
61 |
-
messages=messages,
|
62 |
-
max_tokens=max_new_tokens,
|
63 |
-
stream=True,
|
64 |
-
)
|
65 |
|
66 |
-
buffer = ""
|
67 |
-
for chunk in stream:
|
68 |
-
if chunk.choices[0].delta.content is not None:
|
69 |
-
buffer += chunk.choices[0].delta.content
|
70 |
-
time.sleep(0.01)
|
71 |
-
yield buffer
|
72 |
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
else:
|
77 |
-
yield f"An error occurred: {str(e)}"
|
78 |
-
|
79 |
-
def bot_streaming(message, history, together_api_key, max_new_tokens=250, temperature=0.7):
|
80 |
-
# Initialize history if it's None
|
81 |
-
if history is None:
|
82 |
-
history = []
|
83 |
-
|
84 |
-
# Initialize the Together client if not already done
|
85 |
if client is None:
|
86 |
try:
|
87 |
initialize_client(together_api_key)
|
88 |
except Exception as e:
|
89 |
-
|
90 |
-
history.append(["Error initializing client", str(e)])
|
91 |
yield history
|
92 |
return
|
93 |
|
94 |
prompt = "You are a helpful AI assistant. Analyze the image provided (if any) and respond to the user's query or comment."
|
95 |
-
|
96 |
messages = [{"role": "system", "content": prompt}]
|
97 |
|
98 |
-
#
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
|
115 |
# Prepare the current message
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
image_path = file_info['path']
|
125 |
-
elif isinstance(file_info, str):
|
126 |
-
image_path = file_info
|
127 |
-
else:
|
128 |
-
raise ValueError("Invalid file information provided.")
|
129 |
-
|
130 |
-
content.append({
|
131 |
-
"role": "user",
|
132 |
-
"content": [
|
133 |
-
{"type": "text", "text": message["text"]},
|
134 |
{
|
135 |
"type": "image_url",
|
136 |
-
"image_url": {
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
})
|
142 |
-
|
143 |
-
# if isinstance(message, dict):
|
144 |
-
# # Handle text input
|
145 |
-
# if 'text' in message and message['text']:
|
146 |
-
# user_text = message['text']
|
147 |
-
# content.append({"type": "text", "text": user_text})
|
148 |
-
|
149 |
-
# # Handle image input
|
150 |
-
# if 'files' in message and len(message['files']) > 0:
|
151 |
-
# file_info = message['files'][0]
|
152 |
-
# if isinstance(file_info, dict) and 'path' in file_info:
|
153 |
-
# image_path = file_info['path']
|
154 |
-
# elif isinstance(file_info, str):
|
155 |
-
# image_path = file_info
|
156 |
-
# else:
|
157 |
-
# raise ValueError("Invalid file information provided.")
|
158 |
-
|
159 |
-
# # Encode the image to base64
|
160 |
-
# image_base64 = encode_image(image_path)
|
161 |
-
# content.append({
|
162 |
-
# "type": "image_url",
|
163 |
-
# "image_url": {"url": f"data:image/png;base64,{image_base64}"}
|
164 |
-
# })
|
165 |
-
# user_text += "\n[User uploaded an image]"
|
166 |
-
# else:
|
167 |
-
# # If message is a string
|
168 |
-
# user_text = message
|
169 |
-
# content.append({"type": "text", "text": user_text})
|
170 |
-
except Exception as e:
|
171 |
-
# If there's an error processing the input, append it to history and yield
|
172 |
-
error_message = f"An error occurred while processing your input: {str(e)}"
|
173 |
-
print(error_message) # Debug statement
|
174 |
-
history.append([user_text or "[Invalid input]", error_message])
|
175 |
-
yield history
|
176 |
-
return
|
177 |
|
178 |
-
|
179 |
-
|
180 |
-
yield history # Yield the updated history to show the user's message immediately
|
181 |
|
182 |
-
#
|
183 |
-
|
|
|
184 |
|
185 |
try:
|
186 |
-
# Call the Together AI API with streaming
|
187 |
stream = client.chat.completions.create(
|
188 |
-
model="meta-llama/Llama-Vision-
|
189 |
messages=messages,
|
190 |
max_tokens=max_new_tokens,
|
191 |
temperature=temperature,
|
@@ -194,52 +101,47 @@ def bot_streaming(message, history, together_api_key, max_new_tokens=250, temper
|
|
194 |
|
195 |
response = ""
|
196 |
for chunk in stream:
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
# yield history
|
214 |
|
215 |
except Exception as e:
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
print(error_message) # Debug statement
|
224 |
-
|
225 |
-
if history:
|
226 |
-
history[-1][1] = error_message
|
227 |
-
else:
|
228 |
-
history.append(["", error_message])
|
229 |
-
|
230 |
yield history
|
231 |
|
|
|
|
|
232 |
with gr.Blocks() as demo:
|
233 |
gr.Markdown("# Meta Llama-3.2-11B-Vision-Instruct (FREE)")
|
234 |
-
gr.Markdown(
|
235 |
-
|
|
|
|
|
236 |
with gr.Row():
|
237 |
together_api_key = gr.Textbox(
|
238 |
label="Together API Key",
|
239 |
placeholder="Enter your TOGETHER_API_KEY here",
|
240 |
-
type="password"
|
241 |
)
|
242 |
-
|
243 |
with gr.Row():
|
244 |
max_new_tokens = gr.Slider(
|
245 |
minimum=10,
|
@@ -249,23 +151,19 @@ with gr.Blocks() as demo:
|
|
249 |
label="Maximum number of new tokens",
|
250 |
)
|
251 |
temperature = gr.Number(
|
252 |
-
value=0.7,
|
253 |
-
minimum=0,
|
254 |
-
maximum=1,
|
255 |
-
step=0.1,
|
256 |
-
label="Temperature"
|
257 |
)
|
258 |
-
|
259 |
chatbot = gr.Chatbot()
|
260 |
msg = gr.MultimodalTextbox(label="Enter text or upload an image")
|
261 |
clear = gr.Button("Clear")
|
262 |
|
263 |
msg.submit(
|
264 |
-
bot_streaming,
|
265 |
-
|
266 |
-
|
267 |
)
|
268 |
-
clear.click(lambda:
|
269 |
|
270 |
if __name__ == "__main__":
|
271 |
demo.launch(debug=True)
|
|
|
1 |
import gradio as gr
|
2 |
from PIL import Image
|
|
|
3 |
import os
|
4 |
from together import Together
|
5 |
import base64
|
|
|
8 |
# Initialize Together client
|
9 |
client = None
|
10 |
|
11 |
+
|
12 |
def initialize_client(api_key=None):
|
13 |
global client
|
14 |
if api_key:
|
|
|
18 |
else:
|
19 |
raise ValueError("Please provide a Together API Key")
|
20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
+
def encode_image(image_path):
|
23 |
+
with Image.open(image_path) as img:
|
24 |
+
buffered = io.BytesIO()
|
25 |
+
img.save(buffered, format="PNG")
|
26 |
+
return base64.b64encode(buffered.getvalue()).decode("utf-8")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
+
def bot_streaming(
|
30 |
+
message, history, together_api_key, max_new_tokens=250, temperature=0.7
|
31 |
+
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
if client is None:
|
33 |
try:
|
34 |
initialize_client(together_api_key)
|
35 |
except Exception as e:
|
36 |
+
history.append((message, f"Error initializing client: {str(e)}"))
|
|
|
37 |
yield history
|
38 |
return
|
39 |
|
40 |
prompt = "You are a helpful AI assistant. Analyze the image provided (if any) and respond to the user's query or comment."
|
|
|
41 |
messages = [{"role": "system", "content": prompt}]
|
42 |
|
43 |
+
# Add history to messages
|
44 |
+
for user_msg, assistant_msg in history:
|
45 |
+
if isinstance(user_msg, str): # Text message
|
46 |
+
messages.append(
|
47 |
+
{"role": "user", "content": [{"type": "text", "text": user_msg}]}
|
48 |
+
)
|
49 |
+
elif isinstance(user_msg, dict): # Image message
|
50 |
+
image_base64 = encode_image(user_msg["image_path"])
|
51 |
+
messages.append(
|
52 |
+
{
|
53 |
+
"role": "user",
|
54 |
+
"content": [
|
55 |
+
{"type": "text", "text": user_msg.get("text", "")},
|
56 |
+
{
|
57 |
+
"type": "image_url",
|
58 |
+
"image_url": {
|
59 |
+
"url": f"data:image/png;base64,{image_base64}"
|
60 |
+
},
|
61 |
+
},
|
62 |
+
],
|
63 |
+
}
|
64 |
+
)
|
65 |
+
messages.append(
|
66 |
+
{"role": "assistant", "content": [{"type": "text", "text": assistant_msg}]}
|
67 |
+
)
|
68 |
|
69 |
# Prepare the current message
|
70 |
+
user_message_content = []
|
71 |
+
if isinstance(message, dict):
|
72 |
+
if message.get("text"):
|
73 |
+
user_message_content.append({"type": "text", "text": message["text"]})
|
74 |
+
if message.get("files") and len(message["files"]) > 0:
|
75 |
+
image_path = message["files"][0]
|
76 |
+
image_base64 = encode_image(image_path)
|
77 |
+
user_message_content.append(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
{
|
79 |
"type": "image_url",
|
80 |
+
"image_url": {"url": f"data:image/png;base64,{image_base64}"},
|
81 |
+
}
|
82 |
+
)
|
83 |
+
elif isinstance(message, str):
|
84 |
+
user_message_content.append({"type": "text", "text": message})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
|
86 |
+
current_message = {"role": "user", "content": user_message_content}
|
87 |
+
messages.append(current_message)
|
|
|
88 |
|
89 |
+
# Add the user's message to the history
|
90 |
+
user_display_message = message["text"] if isinstance(message, dict) else message
|
91 |
+
history = history + [(user_display_message, "")]
|
92 |
|
93 |
try:
|
|
|
94 |
stream = client.chat.completions.create(
|
95 |
+
model="meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
|
96 |
messages=messages,
|
97 |
max_tokens=max_new_tokens,
|
98 |
temperature=temperature,
|
|
|
101 |
|
102 |
response = ""
|
103 |
for chunk in stream:
|
104 |
+
if (
|
105 |
+
chunk.choices
|
106 |
+
and chunk.choices[0].delta
|
107 |
+
and chunk.choices[0].delta.content is not None
|
108 |
+
):
|
109 |
+
response += chunk.choices[0].delta.content
|
110 |
+
# Update the assistant's response in the history
|
111 |
+
history[-1] = (user_display_message, response)
|
112 |
+
yield history
|
113 |
+
|
114 |
+
if not response:
|
115 |
+
history[-1] = (
|
116 |
+
user_display_message,
|
117 |
+
"No response generated. Please try again.",
|
118 |
+
)
|
119 |
+
yield history
|
|
|
120 |
|
121 |
except Exception as e:
|
122 |
+
error_message = (
|
123 |
+
"The image is too large. Please try with a smaller image or compress the existing one."
|
124 |
+
if "Request Entity Too Large" in str(e)
|
125 |
+
else f"An error occurred: {str(e)}"
|
126 |
+
)
|
127 |
+
history[-1] = (user_display_message, error_message)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
yield history
|
129 |
|
130 |
+
|
131 |
+
# The rest of your Gradio interface code remains the same
|
132 |
with gr.Blocks() as demo:
|
133 |
gr.Markdown("# Meta Llama-3.2-11B-Vision-Instruct (FREE)")
|
134 |
+
gr.Markdown(
|
135 |
+
"Try the new Llama 3.2 11B Vision API by Meta for free through Together AI. Upload an image, and start chatting about it. Just paste in your Together AI API key and get started!"
|
136 |
+
)
|
137 |
+
|
138 |
with gr.Row():
|
139 |
together_api_key = gr.Textbox(
|
140 |
label="Together API Key",
|
141 |
placeholder="Enter your TOGETHER_API_KEY here",
|
142 |
+
type="password",
|
143 |
)
|
144 |
+
|
145 |
with gr.Row():
|
146 |
max_new_tokens = gr.Slider(
|
147 |
minimum=10,
|
|
|
151 |
label="Maximum number of new tokens",
|
152 |
)
|
153 |
temperature = gr.Number(
|
154 |
+
value=0.7, minimum=0, maximum=1, step=0.1, label="Temperature"
|
|
|
|
|
|
|
|
|
155 |
)
|
156 |
+
|
157 |
chatbot = gr.Chatbot()
|
158 |
msg = gr.MultimodalTextbox(label="Enter text or upload an image")
|
159 |
clear = gr.Button("Clear")
|
160 |
|
161 |
msg.submit(
|
162 |
+
bot_streaming,
|
163 |
+
[msg, chatbot, together_api_key, max_new_tokens, temperature],
|
164 |
+
chatbot,
|
165 |
)
|
166 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
167 |
|
168 |
if __name__ == "__main__":
|
169 |
demo.launch(debug=True)
|