Spaces:
Running
Running
import os | |
import gradio as gr | |
from openai import OpenAI | |
# Initialize the Nebius OpenAI client | |
client = OpenAI( | |
base_url="https://api.studio.nebius.ai/v1/", | |
api_key=os.getenv("OPENAI_API_KEY"), # Use the environment variable | |
) | |
# Function to process input and return the response | |
def analyze_image(image_url): | |
# Construct the prompt with the image URL | |
prompt = f"What’s in this image? [image_url: {image_url}]" | |
try: | |
# Call the Nebius OpenAI API | |
response = client.chat.completions.create( | |
model="Qwen/Qwen2-VL-72B-Instruct", | |
messages=[ | |
{"role": "user", "content": prompt} # Simplified messages format | |
], | |
max_tokens=300, | |
) | |
# Extract and return the AI's response | |
return response.choices[0].get("message", {}).get("content", "No response found.") | |
except Exception as e: | |
return f"Error: {str(e)}" | |
# Create the Gradio interface | |
with gr.Blocks() as app: | |
gr.Markdown("# Image Analysis with Nebius OpenAI") | |
image_url_input = gr.Textbox( | |
label="Image URL", | |
placeholder="Enter an image URL for analysis", | |
) | |
output = gr.Textbox( | |
label="AI Response", | |
placeholder="The description of the image will appear here.", | |
) | |
submit_button = gr.Button("Analyze Image") | |
submit_button.click(analyze_image, inputs=image_url_input, outputs=output) | |
# Launch the app | |
if __name__ == "__main__": | |
app.launch() |