mrbeliever commited on
Commit
c2c2086
·
verified ·
1 Parent(s): a44a876

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -24
app.py CHANGED
@@ -1,29 +1,44 @@
1
  import os
 
2
  import gradio as gr
3
  from openai import OpenAI
4
 
5
  # Initialize the Nebius OpenAI client
6
  client = OpenAI(
7
  base_url="https://api.studio.nebius.ai/v1/",
8
- api_key=os.getenv("NEBIUS_API_KEY"), # Use the environment variable
9
  )
10
 
11
- # Function to process input and return the response
12
- def analyze_image(image_url):
13
- # Construct the prompt with the image URL
14
- prompt = f"What’s in this image? [image_url: {image_url}]"
15
-
 
 
16
  try:
17
- # Call the Nebius OpenAI API
 
 
 
18
  response = client.chat.completions.create(
19
- model="Qwen/Qwen2-VL-72B-Instruct",
20
  messages=[
21
- {"role": "user", "content": prompt} # Simplified messages format
 
 
 
 
 
 
 
 
 
22
  ],
23
  max_tokens=300,
24
  )
25
-
26
- # Extract the AI response
27
  if response.choices and "message" in response.choices[0]:
28
  return response.choices[0]["message"]["content"]
29
  else:
@@ -32,20 +47,17 @@ def analyze_image(image_url):
32
  except Exception as e:
33
  return f"Error: {str(e)}"
34
 
35
- # Create the Gradio interface
 
36
  with gr.Blocks() as app:
37
  gr.Markdown("# Image Analysis with Nebius OpenAI")
38
- image_url_input = gr.Textbox(
39
- label="Image URL",
40
- placeholder="Enter an image URL for analysis",
41
- )
42
- output = gr.Textbox(
43
- label="AI Response",
44
- placeholder="The description of the image will appear here.",
45
- )
46
- submit_button = gr.Button("Analyze Image")
47
- submit_button.click(analyze_image, inputs=image_url_input, outputs=output)
48
-
49
- # Launch the app
50
  if __name__ == "__main__":
51
  app.launch()
 
1
  import os
2
+ import base64
3
  import gradio as gr
4
  from openai import OpenAI
5
 
6
  # Initialize the Nebius OpenAI client
7
  client = OpenAI(
8
  base_url="https://api.studio.nebius.ai/v1/",
9
+ api_key=os.environ.get("NEBIUS_API_KEY"), # Replace with actual API key if not using environment variables
10
  )
11
 
12
+ # Function to encode the image in Base64
13
+ def encode_image_to_base64(image_path):
14
+ with open(image_path, "rb") as image_file:
15
+ return base64.b64encode(image_file.read()).decode("utf-8")
16
+
17
+ # Function to interact with the Nebius OpenAI API
18
+ def analyze_image(image):
19
  try:
20
+ # Convert the image to Base64
21
+ image_base64 = encode_image_to_base64(image)
22
+
23
+ # API request
24
  response = client.chat.completions.create(
25
+ model="Qwen/Qwen2-VL-72B-Instruct", # Ensure this model name is correct
26
  messages=[
27
+ {
28
+ "role": "user",
29
+ "content": [
30
+ {"type": "text", "text": "What’s in this image?"},
31
+ {
32
+ "type": "image_base64",
33
+ "image_base64": image_base64,
34
+ },
35
+ ],
36
+ }
37
  ],
38
  max_tokens=300,
39
  )
40
+
41
+ # Extract the AI's response
42
  if response.choices and "message" in response.choices[0]:
43
  return response.choices[0]["message"]["content"]
44
  else:
 
47
  except Exception as e:
48
  return f"Error: {str(e)}"
49
 
50
+
51
+ # Gradio interface for uploading an image
52
  with gr.Blocks() as app:
53
  gr.Markdown("# Image Analysis with Nebius OpenAI")
54
+ with gr.Row():
55
+ image_input = gr.Image(type="filepath", label="Upload an Image")
56
+ output_text = gr.Textbox(label="AI Response")
57
+
58
+ analyze_button = gr.Button("Analyze Image")
59
+ analyze_button.click(analyze_image, inputs=image_input, outputs=output_text)
60
+
61
+ # Launch the Gradio app
 
 
 
 
62
  if __name__ == "__main__":
63
  app.launch()