mrbeliever commited on
Commit
3f1e688
·
verified ·
1 Parent(s): d10fabd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -20
app.py CHANGED
@@ -4,30 +4,30 @@ from openai import OpenAI
4
 
5
  # Initialize the Nebius OpenAI client
6
  client = OpenAI(
7
- base_url="https://api.studio.nebius.ai/v1/chat/completions",
8
- api_key=os.getenv("OPENAI_API_KEY"),
9
  )
10
 
11
  # Function to process input and return the response
12
  def analyze_image(image_url):
13
- response = client.chat.completions.create(
14
- model="Qwen/Qwen2-VL-72B-Instruct",
15
- messages=[
16
- {
17
- "role": "user",
18
- "content": [
19
- {"type": "text", "text": "What’s in this image?"},
20
- {
21
- "type": "image_url",
22
- "image_url": {"url": image_url},
23
- },
24
- ],
25
- }
26
- ],
27
- max_tokens=300,
28
- )
29
- # Extract and return the AI's response
30
- return response.choices[0].get("message", {}).get("content", "No response found.")
31
 
32
  # Create the Gradio interface
33
  with gr.Blocks() as app:
 
4
 
5
  # Initialize the Nebius OpenAI client
6
  client = OpenAI(
7
+ base_url="https://api.studio.nebius.ai/v1/",
8
+ api_key=os.getenv("OPENAI_API_KEY"), # Use the environment variable
9
  )
10
 
11
  # Function to process input and return the response
12
  def analyze_image(image_url):
13
+ # Construct the prompt with the image URL
14
+ prompt = f"What’s in this image? [image_url: {image_url}]"
15
+
16
+ try:
17
+ # Call the Nebius OpenAI API
18
+ response = client.chat.completions.create(
19
+ model="Qwen/Qwen2-VL-72B-Instruct",
20
+ messages=[
21
+ {"role": "user", "content": prompt} # Simplified messages format
22
+ ],
23
+ max_tokens=300,
24
+ )
25
+
26
+ # Extract and return the AI's response
27
+ return response.choices[0].get("message", {}).get("content", "No response found.")
28
+
29
+ except Exception as e:
30
+ return f"Error: {str(e)}"
31
 
32
  # Create the Gradio interface
33
  with gr.Blocks() as app: