Spaces:
Running
Running
jeremyLE-Ekimetrics
commited on
Commit
·
f1eb272
1
Parent(s):
0c5ecd6
init
Browse files- README.md +1 -1
- main.py +68 -0
- requirements.txt +4 -0
README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
---
|
2 |
title: Guess The Image
|
3 |
-
emoji:
|
4 |
colorFrom: pink
|
5 |
colorTo: gray
|
6 |
sdk: streamlit
|
|
|
1 |
---
|
2 |
title: Guess The Image
|
3 |
+
emoji: 🤗
|
4 |
colorFrom: pink
|
5 |
colorTo: gray
|
6 |
sdk: streamlit
|
main.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from diffusers import AutoPipelineForText2Image
|
2 |
+
import torch
|
3 |
+
import gradio as gr
|
4 |
+
import numpy as np
|
5 |
+
from openai import OpenAI
|
6 |
+
import os
|
7 |
+
client = OpenAI()
|
8 |
+
|
9 |
+
import streamlit as st
|
10 |
+
from PIL import Image
|
11 |
+
|
12 |
+
@st.cache_data(ttl=3600)
|
13 |
+
def get_prompt_to_guess():
|
14 |
+
response = client.chat.completions.create(
|
15 |
+
model="gpt-3.5-turbo",
|
16 |
+
messages=[
|
17 |
+
{"role": "system", "content": "You are a helpful assistant to generate one simple prompt in order to generate an image. Your given prompt won't go over 10 words. You only return the prompt. You will also answer in french."},
|
18 |
+
{"role": "user", "content": "Donne moi un prompt pour generer une image"},
|
19 |
+
]
|
20 |
+
)
|
21 |
+
return response.choices[0].message.content
|
22 |
+
|
23 |
+
@st.cache_resource
|
24 |
+
def get_model():
|
25 |
+
pipe = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float32, variant="fp16")
|
26 |
+
return pipe
|
27 |
+
|
28 |
+
@st.cache_data
|
29 |
+
def generate_image(_pipe, prompt):
|
30 |
+
return _pipe(prompt=prompt, num_inference_steps=1, guidance_scale=0.0, seed=1).images[0]
|
31 |
+
|
32 |
+
if "submit_guess" not in st.session_state:
|
33 |
+
st.session_state["submit_guess"] = False
|
34 |
+
|
35 |
+
def check_prompt(prompt, prompt_to_guess):
|
36 |
+
return prompt.strip() == prompt_to_guess.strip()
|
37 |
+
|
38 |
+
pipe = get_model()
|
39 |
+
prompt = get_prompt_to_guess()
|
40 |
+
im_to_guess = generate_image(pipe, prompt)
|
41 |
+
h, w = im_to_guess.size
|
42 |
+
|
43 |
+
st.title("Guess the prompt by Ekimetrics")
|
44 |
+
st.text("Rules : guess the prompt (in French) to generate the left image with the sdxl turbo model")
|
45 |
+
st.text("Hint : use right side to help you guess the prompt by testing some")
|
46 |
+
st.text("Disclosure : this runs on CPU so generation are quite slow (even with sdxl turbo)")
|
47 |
+
col_1, col_2 = st.columns([0.5, 0.5])
|
48 |
+
with col_1:
|
49 |
+
st.header("GUESS THE PROMPT")
|
50 |
+
st.image(im_to_guess)
|
51 |
+
guessed_prompt = st.text_area("Input your guess prompt")
|
52 |
+
st.session_state["submit_guess"] = st.button("guess the prompt")
|
53 |
+
if st.session_state["submit_guess"]:
|
54 |
+
if check_prompt(guessed_prompt, prompt):
|
55 |
+
st.text("Good prompt ! test again in 24h !")
|
56 |
+
else:
|
57 |
+
st.text("wrong prompt !")
|
58 |
+
|
59 |
+
with col_2:
|
60 |
+
st.header("TEST THE PROMPT")
|
61 |
+
testing_prompt = st.text_area("Input your testing prompt")
|
62 |
+
st.session_state["testing"] = st.button("test the prompt")
|
63 |
+
if st.session_state["testing"]:
|
64 |
+
im = generate_image(pipe, testing_prompt)
|
65 |
+
st.session_state["testing"] = False
|
66 |
+
else:
|
67 |
+
im = np.zeros([h,w,3])
|
68 |
+
st.image(im)
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
diffusers
|
2 |
+
transformers
|
3 |
+
accelerate
|
4 |
+
streamlit
|