from fastapi import FastAPI from pydantic import BaseModel from huggingface_hub import InferenceClient import uvicorn from fastapi.responses import StreamingResponse import io app = FastAPI() client = InferenceClient("stabilityai/stable-diffusion-2-1") class Item(BaseModel): prompt: str negative_prompt: str = "" num_inference_steps: int = 50 guidance_scale: float = 7.5 width: int = 512 height: int = 512 def generate_image(item: Item): image = client.text_to_image( prompt=item.prompt, negative_prompt=item.negative_prompt, num_inference_steps=item.num_inference_steps, guidance_scale=item.guidance_scale, width=item.width, height=item.height, ) return image @app.post("/generate_image/") async def generate_image_api(item: Item): image = generate_image(item) # Convert the image to a byte stream img_byte_arr = io.BytesIO() image.save(img_byte_arr, format='PNG') img_byte_arr.seek(0) # Return the image as a streaming response return StreamingResponse(img_byte_arr, media_type="image/png") if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=7860)