Anustup commited on
Commit
8bbd0a7
·
verified ·
1 Parent(s): 898a076

Upload 7 files

Browse files
Files changed (7) hide show
  1. app.py +414 -0
  2. constants.py +131 -0
  3. gpt.py +198 -0
  4. prompts.py +72 -0
  5. requirements.txt +9 -0
  6. summarizer.py +36 -0
  7. theme.py +67 -0
app.py ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ from prompts import prompts
4
+ from constants import JSON_SCHEMA_FOR_GPT, UPDATED_MODEL_ONLY_SCHEMA, JSON_SCHEMA_FOR_LOC_ONLY
5
+ from gpt import runAssistant, checkRunStatus, retrieveThread, createAssistant, saveFileOpenAI, startAssistantThread, \
6
+ create_chat_completion_request_open_ai_for_summary, addMessageToThread, create_image_completion_request_gpt
7
+ from summarizer import create_brand_html, create_langchain_openai_query
8
+ from theme import flux_generated_image, flux_generated_image_seed
9
+ import time
10
+ from PIL import Image
11
+ import io
12
+
13
+
14
+ def process_run(st, thread_id, assistant_id):
15
+ run_id = runAssistant(thread_id, assistant_id)
16
+ status = 'running'
17
+ while status != 'completed':
18
+ with st.spinner('. . .'):
19
+ time.sleep(20)
20
+ status = checkRunStatus(thread_id, run_id)
21
+ thread_messages = retrieveThread(thread_id)
22
+ for message in thread_messages:
23
+ if not message['role'] == 'user':
24
+ return message["content"]
25
+ else:
26
+ pass
27
+
28
+
29
+ def page1():
30
+ st.title("Upload Product")
31
+ st.markdown("<h2 style='color:#FF5733; font-weight:bold;'>Add a Product</h2>", unsafe_allow_html=True)
32
+ st.markdown("<p style='color:#444;'>Upload your product images, more images you upload better the AI learns</p>",
33
+ unsafe_allow_html=True)
34
+ uploaded_files = st.file_uploader("Upload Images", accept_multiple_files=True, key="uploaded_files_key")
35
+ product_description = st.text_area("Describe the product", value=st.session_state.get("product_description", ""))
36
+ col1, col2 = st.columns([1, 2])
37
+ with col1:
38
+ if st.button("Save"):
39
+ st.session_state['uploaded_files'] = uploaded_files
40
+ st.session_state['product_description'] = product_description
41
+ st.success("Product information saved!")
42
+ with col2:
43
+ if st.button("Add product and move to next page"):
44
+ if not uploaded_files:
45
+ st.warning("Please upload at least one image.")
46
+ elif not product_description:
47
+ st.warning("Please provide a description for the product.")
48
+ else:
49
+ st.session_state['uploaded_files'] = uploaded_files
50
+ st.session_state['product_description'] = product_description
51
+ st.session_state['page'] = "Page 2"
52
+
53
+
54
+ def page2():
55
+ st.title("Tell us about your shoot preference")
56
+ st.markdown("<h3 style='color:#444;'>What are you shooting today?</h3>", unsafe_allow_html=True)
57
+ shoot_type = st.radio("Select your shoot type:", ["Editorial", "Catalogue"], index=0)
58
+ st.session_state['shoot_type'] = shoot_type
59
+ brand_link = st.text_input("Add your brand link:", value=st.session_state.get("brand_link", ""))
60
+ st.session_state['brand_link'] = brand_link
61
+ if st.button("Get Brand Summary"):
62
+ if brand_link:
63
+ brand_summary_html = create_brand_html(brand_link)
64
+ brand_summary = create_langchain_openai_query(brand_summary_html)
65
+ st.session_state['brand_summary'] = brand_summary
66
+ st.success("Brand summary fetched!")
67
+ else:
68
+ st.warning("Please add a brand link.")
69
+ brand_summary_value = st.session_state.get('brand_summary', "")
70
+ editable_summary = st.text_area("Brand Summary:", value=brand_summary_value, height=100)
71
+ st.session_state['brand_summary'] = editable_summary
72
+ product_info = st.text_area("Tell us something about your product:", value=st.session_state.get("product_info", ""))
73
+ st.session_state['product_info'] = product_info
74
+ reference_images = st.file_uploader("Upload Reference Images", accept_multiple_files=True,
75
+ key="reference_images_key")
76
+ st.session_state['reference_images'] = reference_images
77
+ if st.button("Give Me Ideas"):
78
+ st.session_state['page'] = "Page 3"
79
+
80
+
81
+ def page3():
82
+ st.title("Scene Suggestions")
83
+ st.write("Based on your uploaded product and references!")
84
+ feedback = st.chat_input("Provide feedback:")
85
+ if not st.session_state.get("assistant_initialized", False):
86
+ assistant_id = createAssistant("You are a helpful assistant who is an expert in Fashion Shoots.")
87
+ updated_prompt = prompts["IDEA_GENERATION_PROMPT"].format(
88
+ brand_details=st.session_state["brand_summary"],
89
+ product_details=st.session_state["product_info"],
90
+ type_of_shoot=st.session_state["shoot_type"],
91
+ json_schema=JSON_SCHEMA_FOR_GPT,
92
+ product_name=st.session_state["product_description"]
93
+ )
94
+ file_locations = []
95
+ for uploaded_file in st.session_state['uploaded_files']:
96
+ bytes_data = uploaded_file.getvalue()
97
+ image = Image.open(io.BytesIO(bytes_data))
98
+ image.verify()
99
+ location = f"temp_image_{uploaded_file.name}"
100
+ with open(location, "wb") as f:
101
+ f.write(bytes_data)
102
+ file_locations.append(location)
103
+ image.close()
104
+ for uploaded_file in st.session_state['reference_images']:
105
+ bytes_data = uploaded_file.getvalue()
106
+ image = Image.open(io.BytesIO(bytes_data))
107
+ image.verify()
108
+ location = f"temp2_image_{uploaded_file.name}"
109
+ with open(location, "wb") as f:
110
+ f.write(bytes_data)
111
+ file_locations.append(location)
112
+ image.close()
113
+ file_ids = [saveFileOpenAI(location) for location in file_locations]
114
+ thread_id = startAssistantThread(file_ids, updated_prompt, "yes", "yes")
115
+ st.session_state.assistant_id = assistant_id
116
+ st.session_state.thread_id = thread_id
117
+ st.session_state.assistant_initialized = True
118
+ regenerate_images(thread_id, assistant_id)
119
+ if feedback:
120
+ if 'images' in st.session_state and 'descriptions' in st.session_state:
121
+ for image_path in st.session_state['images']:
122
+ os.remove(image_path)
123
+ del st.session_state['images']
124
+ del st.session_state['descriptions']
125
+ del st.session_state["json_descriptions"]
126
+ addMessageToThread(st.session_state.thread_id, feedback)
127
+ regenerate_images(st.session_state.thread_id, st.session_state.assistant_id)
128
+ selected_image_index = None
129
+ cols = st.columns(1)
130
+ for i in range(len(st.session_state["images"])):
131
+ with cols[i]:
132
+ st.image(st.session_state.images[i], caption=st.session_state.descriptions[i], use_column_width=True)
133
+ if st.radio(f"Select {i + 1}", [f"Select Image {i + 1}"], key=f"radio_{i}"):
134
+ selected_image_index = i
135
+ if selected_image_index is not None and st.button("Refine"):
136
+ st.session_state.selected_image_index = selected_image_index
137
+ st.session_state.selected_image = st.session_state.images[selected_image_index]
138
+ st.session_state.selected_text = st.session_state.descriptions[selected_image_index]
139
+ st.session_state['page'] = "Page 4"
140
+ if st.button("Go Back!"):
141
+ st.session_state.page = "Page 2"
142
+
143
+
144
+ def regenerate_images(thread_id, assistant_id):
145
+ """Helper function to generate images and descriptions."""
146
+ response_from_process_list = []
147
+ for _ in range(1): # Assuming you generate 1 set of image/description
148
+ response_from_process = process_run(st, thread_id, assistant_id)
149
+ response_from_process_list.append(response_from_process)
150
+
151
+ summary_list = []
152
+ for final_response in response_from_process_list:
153
+ prompt_for_idea_summary = prompts["IDEA_SUMMARY_PROMPT"].format(
154
+ json_schema=str(final_response)
155
+ )
156
+ summary = create_chat_completion_request_open_ai_for_summary(prompt_for_idea_summary, "No")
157
+ summary_list.append(summary)
158
+
159
+ # Generate images based on the summaries
160
+ flux_generated_theme_image = []
161
+ for summary in summary_list:
162
+ theme_image = flux_generated_image(summary)
163
+ flux_generated_theme_image.append(theme_image["file_name"])
164
+
165
+ # Save the new images and descriptions in session state
166
+ st.session_state["images"] = flux_generated_theme_image
167
+ st.session_state["descriptions"] = summary_list
168
+ st.session_state["json_descriptions"] = response_from_process_list
169
+
170
+
171
+ def page4():
172
+ import json
173
+ selected_theme_text_by_user = st.session_state.json_descriptions[st.session_state.selected_image_index]
174
+ print(selected_theme_text_by_user)
175
+ schema_for_model_bg = {"type": "object",
176
+ "properties": {
177
+ "Model": {
178
+ "type": "string",
179
+ "description": "The model name or identifier."
180
+ },
181
+ "Background": {
182
+ "type": "string",
183
+ "description": "Description or type of the background."
184
+ }},
185
+ "required": ["Model", "Background"],
186
+ "additionalProperties": False
187
+ }
188
+ prompt_to_get_details = (f"You are provided with a brief of a Fashion Shoot : "
189
+ f"{st.session_state["json_descriptions"]}).\n Now provide me a JSON which will"
190
+ f"have two keys ```Model``` and ```Background```. Provide all detail's"
191
+ f"present about model and background in the brief provided by you. Just provide a "
192
+ f"natural langauge description. I will use it as description of model and "
193
+ f"background needed by the brand Output JSON following the schema")
194
+ response_from_open_ai = create_chat_completion_request_open_ai_for_summary(prompt_to_get_details,
195
+ schema_name="model_bg",
196
+ json_schema=schema_for_model_bg,
197
+ json_mode="yes")
198
+ json_response_from_open_ai = json.loads(response_from_open_ai)
199
+ with (st.sidebar):
200
+ st.title(st.session_state["product_info"])
201
+ st.write("Product Image")
202
+ st.image(st.session_state['uploaded_files'])
203
+ st.text("Scene Suggestion:")
204
+ st.image(st.session_state.selected_image)
205
+ dimensions = st.text_input("Enter Dimensions e.g 3:4, 1:2", key="Dimensions")
206
+ seed = st.selectbox(
207
+ "Seed Preference",
208
+ ("Fixed", "Random"),
209
+ )
210
+ if seed == "Fixed":
211
+ seed_number = st.number_input("Enter an integer:", min_value=1, max_value=100000, value=10, step=1)
212
+ else:
213
+ seed_number = 0
214
+ st.text("Thanks will take care")
215
+ model_preference = st.selectbox(
216
+ "Model Preference",
217
+ ("Create Own/Edit Pre-filled", "Ideas", "Upload Reference"),
218
+ )
219
+ if model_preference == "Create Own/Edit Pre-filled":
220
+ pre_filled_model_details = st.text_area("Model Idea", value=json_response_from_open_ai["Model"],
221
+ key="Model Idea")
222
+ elif model_preference == "Ideas":
223
+ prompt_to_generate_idea = ("Your task is to create model ideas for shoot of a product of a brand. "
224
+ "The details about the brand: ```{brand_details}.\n The product: {product_name},"
225
+ "which is: ```{product_details}```.\n Reference images for the product and "
226
+ "brands shoot idea is already provided with you. Additionally brand wants to "
227
+ "have a ```{type_of_shoot}``` of the model. Now based on all provided details, "
228
+ "think step by step and provide your ideas about what type of model the brand"
229
+ "should need based on mentioned JSON format. Also provide a combined prompt "
230
+ "which the brand will use to create a shoot image. While creating the "
231
+ "combined prompt as mentioned in the JSON schema, do not miss any details you"
232
+ " mentioned in the JSON.")
233
+ updated_model_idea_gen_prompt = prompt_to_generate_idea.format(
234
+ brand_details=st.session_state["brand_summary"],
235
+ product_details=st.session_state["product_info"],
236
+ type_of_shoot=st.session_state["shoot_type"],
237
+ product_name=st.session_state["product_description"]
238
+
239
+ )
240
+ response_for_only_model = create_chat_completion_request_open_ai_for_summary(updated_model_idea_gen_prompt
241
+ , schema_name="model_only",
242
+ json_schema=
243
+ UPDATED_MODEL_ONLY_SCHEMA,
244
+ json_mode="yes")
245
+ pre_filled_model_details = st.text_area("Model Idea", value=response_for_only_model,
246
+ key="Model Idea")
247
+ else:
248
+ uploaded_files = st.file_uploader("Upload one Model Reference Image here",
249
+ accept_multiple_files=False, key="uploader")
250
+ bytes_data = uploaded_files.getvalue()
251
+ image = Image.open(io.BytesIO(bytes_data))
252
+ image.verify()
253
+ location = f"temp_image_{uploaded_files.name}"
254
+ with open(location, "wb") as f:
255
+ f.write(bytes_data)
256
+ image.close()
257
+ prompt_to_generate_idea = ("Follow this JSON Schema : {json_schema_model_only}."
258
+ "Your task is to create model ideas for shoot of a product of a brand. "
259
+ "The details about the brand: ```{brand_details}.\n The product: {product_name},"
260
+ "which is: ```{product_details}```.\n Reference images for the product and "
261
+ "brands shoot idea is already provided with you. Additionally brand wants to "
262
+ "have a ```{type_of_shoot}``` of the model. Now based on all provided details, "
263
+ "think step by step and provide your ideas about what type of model the brand"
264
+ "should need based on mentioned JSON format. Also provide a combined prompt "
265
+ "which the brand will use to create a shoot image. While creating the "
266
+ "combined prompt as mentioned in the JSON schema, do not miss any details you"
267
+ " mentioned in the JSON.")
268
+ updated_model_idea_gen_prompt = prompt_to_generate_idea.format(
269
+ json_schema_model_only=UPDATED_MODEL_ONLY_SCHEMA,
270
+ brand_details=st.session_state["brand_summary"],
271
+ product_details=st.session_state["product_info"],
272
+ type_of_shoot=st.session_state["shoot_type"],
273
+ product_name=st.session_state["product_description"]
274
+
275
+ )
276
+ json_response = create_image_completion_request_gpt(location, updated_model_idea_gen_prompt)
277
+ pre_filled_model_details = st.text_area("Model Idea", value=json_response,
278
+ key="Model Idea")
279
+ background_preference = st.selectbox(
280
+ "Background Preference",
281
+ ("Create Own/Edit Pre-filled", "Ideas", "Upload Reference"),
282
+ )
283
+ if background_preference == "Create Own/Edit Pre-filled":
284
+ pre_filled_background_details = st.text_area("Background Idea",
285
+ value=json_response_from_open_ai["Background"],
286
+ key="Background Idea")
287
+ elif background_preference == "Ideas":
288
+ prompt_to_generate_idea = ("Follow this JSON Schema : {json_schema_background_only}."
289
+ "Your task is to create location/background ideas for shoot of a "
290
+ "product of a brand. "
291
+ "The details about the brand: ```{brand_details}.\n The product: {product_name},"
292
+ "which is: ```{product_details}```.\n Reference images for the product and "
293
+ "brands shoot idea is already provided with you. Additionally brand wants to "
294
+ "have a ```{type_of_shoot}``` of the model. Now based on all provided details, "
295
+ "think step by step and provide your ideas about what type of location the brand"
296
+ "should need based on mentioned JSON format. Also provide a combined prompt "
297
+ "which the brand will use to create a shoot image. While creating the "
298
+ "combined prompt as mentioned in the JSON schema, do not miss any details you"
299
+ " mentioned in the JSON.")
300
+ updated_bg_idea_gen_prompt = prompt_to_generate_idea.format(
301
+ json_schema_background_only=JSON_SCHEMA_FOR_LOC_ONLY,
302
+ brand_details=st.session_state["brand_summary"],
303
+ product_details=st.session_state["product_info"],
304
+ type_of_shoot=st.session_state["shoot_type"],
305
+ product_name=st.session_state["product_description"]
306
+
307
+ )
308
+ response_for_only_bg = create_chat_completion_request_open_ai_for_summary(updated_bg_idea_gen_prompt,
309
+ schema_name="bg_o",
310
+ json_schema=JSON_SCHEMA_FOR_LOC_ONLY,
311
+ json_mode="yes")
312
+ pre_filled_background_details = st.text_area("Background Idea", value=response_for_only_bg,
313
+ key="Background Idea")
314
+ else:
315
+ uploaded_files = st.file_uploader("Upload one Background Reference Image here",
316
+ accept_multiple_files=False, key="uploader")
317
+ bytes_data = uploaded_files.getvalue()
318
+ image = Image.open(io.BytesIO(bytes_data))
319
+ image.verify()
320
+ location = f"temp2_image_{uploaded_files.name}"
321
+ with open(location, "wb") as f:
322
+ f.write(bytes_data)
323
+ image.close()
324
+ prompt_to_generate_idea = ("Follow this JSON Schema : {json_schema_bg_only}."
325
+ "Your task is to create Background/Location ideas for shoot of a "
326
+ "product of a brand. "
327
+ "The details about the brand: ```{brand_details}.\n The product: {product_name},"
328
+ "which is: ```{product_details}```.\n Reference images for the product and "
329
+ "brands shoot idea is already provided with you. Additionally brand wants to "
330
+ "have a ```{type_of_shoot}``` of the model. Now based on all provided details, "
331
+ "think step by step and provide your ideas about what type of location the brand"
332
+ "should need based on mentioned JSON format. Also provide a combined prompt "
333
+ "which the brand will use to create a shoot image. While creating the "
334
+ "combined prompt as mentioned in the JSON schema, do not miss any details you"
335
+ " mentioned in the JSON.")
336
+ updated_bg_idea_gen_prompt = prompt_to_generate_idea.format(
337
+ json_schema_bg_only=JSON_SCHEMA_FOR_LOC_ONLY,
338
+ brand_details=st.session_state["brand_summary"],
339
+ product_details=st.session_state["product_info"],
340
+ type_of_shoot=st.session_state["shoot_type"],
341
+ product_name=st.session_state["product_description"]
342
+
343
+ )
344
+ json_response = create_image_completion_request_gpt(location, updated_bg_idea_gen_prompt)
345
+ pre_filled_background_details = st.text_area("Background Idea", value=json_response,
346
+ key="Background Idea")
347
+ start_chat = st.button("Start Chat")
348
+ if "mood_chat_messages" not in st.session_state:
349
+ st.session_state["mood_chat_messages"] = []
350
+ if seed and dimensions and model_preference and background_preference:
351
+ if start_chat:
352
+ final_mood_board_image_prompt = prompts["FINAL_PROMPT_GENERATION"].format(
353
+ brand_details=st.session_state["brand_summary"],
354
+ product_details=st.session_state["product_info"],
355
+ type_of_shoot=st.session_state["shoot_type"],
356
+ product_name=st.session_state["product_description"],
357
+ model_details=pre_filled_model_details,
358
+ location_details=pre_filled_background_details,
359
+ theme_details=str(selected_theme_text_by_user),
360
+ chat_history=str(st.session_state["mood_chat_messages"])
361
+ )
362
+ prompt_for_flux_mood_board = create_chat_completion_request_open_ai_for_summary(
363
+ final_mood_board_image_prompt, "No", system_message=prompts["SYSTEM_PROMPT_FOR_MOOD_BOARD"])
364
+ if seed == "Fixed":
365
+ generated_flux_image = flux_generated_image_seed(prompt_for_flux_mood_board, seed_number, dimensions)
366
+ else:
367
+ generated_flux_image = flux_generated_image(prompt_for_flux_mood_board)
368
+ st.session_state["mood_chat_messages"].append({
369
+ "role": "AI",
370
+ "message": prompt_for_flux_mood_board,
371
+ "image": generated_flux_image["file_name"]
372
+ })
373
+ # for message in st.session_state["mood_chat_messages"]:
374
+ # if message["role"] == "AI":
375
+ # st.write(f"Caimera AI: {message['message']}")
376
+ # st.image(message['image'])
377
+ #else:
378
+ # st.write(f"**You**: {message['message']}")
379
+ user_input = st.chat_input("Type your message here...")
380
+ if user_input:
381
+ st.session_state["mood_chat_messages"].append({"role": "User", "message": user_input})
382
+ prompt_for_flux_mood_board_n = create_chat_completion_request_open_ai_for_summary(
383
+ user_input, "No", system_message=prompts["SYSTEM_PROMPT_FOR_MOOD_BOARD"])
384
+ if seed == "Fixed":
385
+ generated_flux_image_n = flux_generated_image_seed(prompt_for_flux_mood_board_n, seed_number,
386
+ dimensions)
387
+ else:
388
+ generated_flux_image_n = flux_generated_image(prompt_for_flux_mood_board_n)
389
+ st.session_state["mood_chat_messages"].append({
390
+ "role": "AI",
391
+ "message": prompt_for_flux_mood_board_n,
392
+ "image": generated_flux_image_n["file_name"]
393
+ })
394
+ for message in st.session_state["mood_chat_messages"]:
395
+ if message["role"] == "AI":
396
+ st.write(f"**AI**: {message['message']}")
397
+ st.image(message['image'])
398
+ else:
399
+ st.write(f"**You**: {message['message']}")
400
+ print(seed_number)
401
+
402
+
403
+ if 'page' not in st.session_state:
404
+ st.session_state.page = "Page 1"
405
+
406
+ # Routing between pages
407
+ if st.session_state.page == "Page 1":
408
+ page1()
409
+ elif st.session_state.page == "Page 2":
410
+ page2()
411
+ elif st.session_state.page == "Page 3":
412
+ page3()
413
+ elif st.session_state.page == "Page 4":
414
+ page4()
constants.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ JSON_SCHEMA_FOR_GPT = """
2
+ {
3
+ "title": "Fashion Campaign Ideas",
4
+ "type": "array",
5
+ "items": {
6
+ "type": "object",
7
+ "properties": {
8
+ "model": {
9
+ "type": "object",
10
+ "properties": {
11
+ "ethnicity": {
12
+ "type": "string",
13
+ "description": "The ethnicity of the model"
14
+ },
15
+ "age": {
16
+ "type": "integer",
17
+ "description": "The age of the model"
18
+ },
19
+ "gender": {
20
+ "type": "string",
21
+ "enum": ["male", "female", "non-binary"],
22
+ "description": "The gender of the model"
23
+ }
24
+ },
25
+ "required": ["ethnicity", "age", "gender"]
26
+ },
27
+ "location": {
28
+ "type": "string",
29
+ "description": "The location or setting for the shoot"
30
+ },
31
+ "mood": {
32
+ "type": "string",
33
+ "description": "The overall mood or atmosphere for the shoot"
34
+ },
35
+ "emotion": {
36
+ "type": "string",
37
+ "description": "The primary emotion to be conveyed in the shoot"
38
+ },
39
+ "accessories": {
40
+ "type": "array",
41
+ "items": {
42
+ "type": "string",
43
+ "description": "Accessories included in the fashion shoot (e.g., sunglasses, handbags, jewelry)"
44
+ }
45
+ },
46
+ "pose": {
47
+ "type": "string",
48
+ "description": "The type of pose the model will be holding during the shoot"
49
+ },
50
+ "target_market": {
51
+ "type": "string",
52
+ "description": "The target audience for the fashion piece"
53
+ },
54
+ "reasoning": {
55
+ "type": "string",
56
+ "description": "Explanation of why this particular campaign is suggested for the product"
57
+ },
58
+ "final_prompt": {
59
+ "type": "string",
60
+ "description": "The combined fashion shoot prompt, summarizing all details",
61
+ "readonly": true
62
+ }
63
+ },
64
+ "required": ["model", "location", "mood", "emotion", "accessories", "pose", "target_market", "reasoning"],
65
+ "definitions": {
66
+ "generateFinalPrompt": {
67
+ "description": "Generate the final prompt by combining all the inputs into a cohesive sentence.",
68
+ "template": "{mood} photoshoot in {location} featuring a {model.ethnicity}, {model.age}-year-old {model.gender} model wearing accessories like {accessories}. The model holds a {pose} pose, conveying a sense of {emotion}, aimed at the {target_market} market. Reasoning: {reasoning}"
69
+ }
70
+ }
71
+ }
72
+ }"""
73
+
74
+ UPDATED_MODEL_ONLY_SCHEMA = {
75
+ "type": "object",
76
+ "properties": {
77
+ "model": {
78
+ "type": "object",
79
+ "properties": {
80
+ "ethnicity": {
81
+ "type": "string",
82
+ "description": "The ethnicity of the model"
83
+ },
84
+ "age": {
85
+ "type": "integer",
86
+ "description": "The age of the model"
87
+ },
88
+ "gender": {
89
+ "type": "string",
90
+ "enum": ["male", "female", "non-binary"],
91
+ "description": "The gender of the model"
92
+ },
93
+ "model_prompt": {
94
+ "type": "string",
95
+ "description": "The prompt used to generate the model details"
96
+ },
97
+ "reasoning": {
98
+ "type": "string",
99
+ "description": "The reasoning behind the model selection"
100
+ },
101
+ "background": {
102
+ "type": "string",
103
+ "description": "The background information about the model"
104
+ }
105
+ },
106
+ "required": ["ethnicity", "age", "gender", "model_prompt", "reasoning", "background"],
107
+ "additionalProperties": False
108
+ }
109
+ },
110
+ "required": ["model"],
111
+ "additionalProperties": False
112
+ }
113
+
114
+ JSON_SCHEMA_FOR_LOC_ONLY = {
115
+ "type": "object",
116
+ "properties": {
117
+ "location": {
118
+ "type": "string",
119
+ "description": "The location or setting for the shoot"
120
+ },
121
+ "location_prompt": {
122
+ "type": "string",
123
+ "description": "The prompt or instruction related to the location"
124
+ },
125
+ "reasoning": {
126
+ "type": "string",
127
+ "description": "The reasoning or rationale behind selecting this location"
128
+ }
129
+ },
130
+ "required": ["location", "location_prompt", "reasoning"]
131
+ }
gpt.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from openai import OpenAI
2
+ import os
3
+ import base64
4
+ import requests
5
+ from prompts import prompts
6
+ from constants import JSON_SCHEMA_FOR_GPT
7
+
8
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
9
+
10
+ client = OpenAI(api_key=OPENAI_API_KEY)
11
+ model = "gpt-4o"
12
+ title = "Caimera Mood board Expert"
13
+
14
+
15
+ def createAssistant(instruction_prompt):
16
+ instructions = instruction_prompt
17
+ assistant = client.beta.assistants.create(
18
+ name=title,
19
+ instructions=instructions,
20
+ model=model
21
+ )
22
+ return assistant.id
23
+
24
+
25
+ def saveFileOpenAI(location):
26
+ with open(location, "rb") as f:
27
+ file = client.files.create(file=f, purpose="vision")
28
+ os.remove(location)
29
+ return file.id
30
+
31
+
32
+ def startAssistantThread(file_id_enum, prompt_n, image_needed, json_mode_needed_or_not):
33
+ if json_mode_needed_or_not == "yes":
34
+ if image_needed == "yes":
35
+ messages = [
36
+ {
37
+ "role": "user",
38
+ "content": [
39
+ {
40
+ "type": "text",
41
+ "text": prompt_n
42
+ }
43
+ ],
44
+ }
45
+ ]
46
+ for file_id in file_id_enum:
47
+ messages[0]["content"].append({
48
+ "type": "image_file",
49
+ "image_file": {"file_id": file_id}
50
+ })
51
+ else:
52
+ messages = [
53
+ {
54
+ "role": "user",
55
+ "content": prompt_n}]
56
+ thread = client.beta.threads.create(messages=messages)
57
+ else:
58
+ if image_needed == "yes":
59
+ messages = [
60
+ {
61
+ "role": "user",
62
+ "content": [
63
+ {
64
+ "type": "text",
65
+ "text": prompt_n
66
+ }
67
+ ],
68
+ }
69
+ ]
70
+ for file_id in file_id_enum:
71
+ messages[0]["content"].append({
72
+ "type": "image_file",
73
+ "image_file": {"file_id": file_id}
74
+ })
75
+ else:
76
+ messages = [
77
+ {
78
+ "role": "user",
79
+ "content": prompt_n}]
80
+ thread = client.beta.threads.create(messages=messages)
81
+ return thread.id
82
+
83
+
84
+ def runAssistant(thread_id, assistant_id):
85
+ run = client.beta.threads.runs.create(thread_id=thread_id, assistant_id=assistant_id)
86
+ return run.id
87
+
88
+
89
+ def checkRunStatus(thread_id, run_id):
90
+ run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
91
+ return run.status
92
+
93
+
94
+ def retrieveThread(thread_id):
95
+ thread_messages = client.beta.threads.messages.list(thread_id)
96
+ list_messages = thread_messages.data
97
+ thread_messages = []
98
+ for message in list_messages:
99
+ obj = {}
100
+ obj['content'] = message.content[0].text.value
101
+ obj['role'] = message.role
102
+ thread_messages.append(obj)
103
+ return thread_messages[::-1]
104
+
105
+
106
+ def addMessageToThread(thread_id, prompt_n):
107
+ thread_message = client.beta.threads.messages.create(thread_id, role="user", content=prompt_n)
108
+
109
+
110
+ def create_chat_completion_request_open_ai_for_summary(prompt, json_mode, schema_name="",
111
+ json_schema="",
112
+ system_message="You are expert in Fashion "
113
+ "Shoots"):
114
+ import requests
115
+ if json_mode == "No":
116
+ url = "https://api.openai.com/v1/chat/completions"
117
+ headers = {
118
+ "Content-Type": "application/json",
119
+ "Authorization": f"Bearer {OPENAI_API_KEY}"
120
+ }
121
+ data = {
122
+ "model": "gpt-4o",
123
+ "messages": [
124
+ {
125
+ "role": "system",
126
+ "content": system_message
127
+ },
128
+ {
129
+ "role": "user",
130
+ "content": prompt
131
+ }
132
+ ]
133
+ }
134
+ response = requests.post(url, headers=headers, json=data)
135
+ json_response = response.json()
136
+ else:
137
+ url = "https://api.openai.com/v1/chat/completions"
138
+ headers = {
139
+ "Content-Type": "application/json",
140
+ "Authorization": f"Bearer {OPENAI_API_KEY}"
141
+ }
142
+ data = {
143
+ "model": "gpt-4o",
144
+ "messages": [
145
+ {
146
+ "role": "system",
147
+ "content": "You are expert in creating prompts for Fashion Shoots."
148
+ },
149
+ {
150
+ "role": "user",
151
+ "content": prompt
152
+ }
153
+ ],
154
+ "response_format": {"type": "json_schema", "json_schema": {"name": schema_name, "strict": True, "schema":
155
+ json_schema}}
156
+ }
157
+ response = requests.post(url, headers=headers, json=data)
158
+ json_response = response.json()
159
+ print(json_response)
160
+ return json_response["choices"][0]["message"]["content"]
161
+
162
+
163
+ def encode_image(image_path):
164
+ with open(image_path, "rb") as image_file:
165
+ return base64.b64encode(image_file.read()).decode('utf-8')
166
+
167
+
168
+ def create_image_completion_request_gpt(image_path, prompt):
169
+ base64_image = encode_image(image_path)
170
+ headers = {
171
+ "Content-Type": "application/json",
172
+ "Authorization": f"Bearer {OPENAI_API_KEY}"
173
+ }
174
+ payload = {
175
+ "model": "gpt-4o",
176
+ "messages": [
177
+ {
178
+ "role": "user",
179
+ "content": [
180
+ {
181
+ "type": "text",
182
+ "text": prompt
183
+ },
184
+ {
185
+ "type": "image_url",
186
+ "image_url": {
187
+ "url": f"data:image/jpeg;base64,{base64_image}"
188
+ }
189
+ }
190
+ ]
191
+ }
192
+ ],
193
+ "max_tokens": 300
194
+ }
195
+ response = requests.post("https://api.openai.com/v1/chat/completions",
196
+ headers=headers, json=payload)
197
+ json_resp = response.json()
198
+ return json_resp["choices"][0]["message"]["content"]
prompts.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ prompts = {
2
+ "SYSTEM_PROMPT_FOR_MOOD_BOARD": """Act as a fashion stylist to generate detailed prompts for creating AI images
3
+ tailored for a fashion brand. Consider brand aesthetics, current fashion trends, color theory, brand themes,
4
+ and styles. \n
5
+ # Steps: \n
6
+ 1. **Understand the Brand Aesthetics**: Analyze the brand's core values, style, and image.
7
+ Consider what makes their fashion unique and who their target audience is.\n
8
+ 2. **Incorporate Fashion Trends**: Identify current fashion trends that align with the brand's aesthetics.
9
+ Consider elements like cuts, patterns, and colors that are relevant and fresh.\n
10
+ 3. **Apply Color Theory**: Use color theory to choose a palette that complements the brand's existing themes
11
+ and stands out in AI-generated images.\n
12
+ 4. **Focus on Brand Theme**: Consider how each element in your prompt sustains the brand's overall theme or
13
+ narrative. This could involve mood, setting, or cultural references.\n
14
+ 5. **Detail the Image Composition**: Specify the composition elements such as the setting, pose, and styling
15
+ details. Consider using adjectives that communicate the desired mood and visual appeal.\n
16
+ # Output Format:\n
17
+ Each prompt should be a descriptive paragraph. The paragraph should include:\n
18
+ - Brand aesthetics and themes\n
19
+ - Relevant fashion trends and items\n
20
+ - Color palette and implications from color theory\n
21
+ - Desired mood and setting for the image\n
22
+ - Any specific styling or composition notes\n
23
+ # Examples\n
24
+ **Example 1 Input:**\n
25
+ - Brand: [Elegance Couture]\n
26
+ - Current Fashion Trends: [Minimalistic cut dresses, soft hues]\n
27
+ - Brand Theme: [Classic Elegance]\n
28
+ **Example 1 Output:**\n
29
+ fashion photography featuring a minimalistic cut dress that embodies the classic elegance of Elegance Couture.
30
+ The scene should be a soft, diffused setting with an ivory and blush color palette, reflecting a
31
+ serene yet sophisticated atmosphere. The model should pose with graceful ease, against an opulent
32
+ vintage backdrop, enhancing the brand's luxurious appeal.\n
33
+ **Example 2 Input:**\n
34
+ - Brand: [Urban Vogue]\n
35
+ - Current Fashion Trends: [Bold prints, oversized jackets]\n
36
+ - Brand Theme: [Edgy and Vibrant]\n
37
+ **Example 2 Output:**\n
38
+ realistic edgy photo that showcases Urban Vogue’s edgy and vibrant style.
39
+ The outfit should include an oversized jacket with a bold zebra print,
40
+ set against a dynamic urban landscape. The color scheme should be striking,
41
+ with electric blues and deep reds. The model should exhibit a confident pose,
42
+ capturing the bold energy and spirit of the city.\n
43
+ # Notes\n
44
+ - Ensure each prompt aligns with the specific preferences and branding guidelines of the fashion label.\n
45
+ - Think creatively about settings and poses to invent memorable and distinctive images.\n
46
+ - Consider lighting and texture elements that could enhance the visual output of the AI image model.""",
47
+ "IDEA_GENERATION_PROMPT": "Provide output in JSON schema, ```JSON SCHEMA ```: ```{json_schema}```.\n"
48
+ "Your task is to create shoot ideas for a product of a brand. "
49
+ "The details about the brand: ```{brand_details}.\n The product: {product_name}, "
50
+ "which is: "
51
+ "```{product_details}```.\n Reference images for the product and brands shoot idea is"
52
+ "already provided with you. Additionally brand wants to have a ```{type_of_shoot}```."
53
+ "Now based on all provided details, think step by step and provide your ideas in the"
54
+ "mentioned JSON format. Also provide a combined prompt which the brand will use to create"
55
+ "a shoot image. While creating the combined prompt as mentioned in the JSON schema, do "
56
+ "not miss any details you mentioned in other parts of the JSON for example for idea 1 "
57
+ "take location, model, product_reasoning everything in account for combined prompt.",
58
+ "IDEA_SUMMARY_PROMPT": """{json_schema}\nThis contains details about a brand a final prompt.
59
+ Now understand and summarize , and create a master prompt which will be used by a Fashion brand to create shoots.
60
+ You final prompt should be to the point, it should be in same style that is present in final prompt key inside the
61
+ JSON, also do not any details from location, model, final prompt etc.
62
+ IN OUTPUT JUST GIVE ME THE FINAL PROMPT NOTHING ELSE. NO OTHER EXTRA TEXTS NEEDED""",
63
+ "FINAL_PROMPT_GENERATION": "You have generate prompt to create fashion images as mentioned in the system"
64
+ "message based on provided context. Details about the brand: ```{brand_details}\n"
65
+ "The product: {product_name}, which is: ```{product_details}```. The brand is looking"
66
+ "for Specific model of style:{model_details}, location : {location_details}. The brand"
67
+ "is looking for this specific theme: {theme_details}."
68
+ "The brand"
69
+ "is shooting : {type_of_shoot} photography. Also take the chat history in account(if "
70
+ "present / needed) : {chat_history} to iterate the prompt. Now Based on the provided"
71
+ "information create a high quality prompt for a Fashion image for the brand."
72
+ }
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ openai
2
+ replicate
3
+ requests
4
+ pillow
5
+ langchain
6
+ unstructured
7
+ tiktoken
8
+ langchain-community
9
+ nltk
summarizer.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import base64
3
+ from langchain.docstore.document import Document
4
+ from langchain.text_splitter import CharacterTextSplitter
5
+ from langchain.llms.openai import OpenAI
6
+ from langchain.chains.summarize import load_summarize_chain
7
+ from langchain.document_loaders import UnstructuredURLLoader
8
+ import nltk
9
+ import openai
10
+
11
+ nltk.download('punkt')
12
+ OPENAI_API_KEY = "sk-proj-uCiflA45fuchFdjkbNJ7T3BlbkFJF5WiEf2zHkttr7s9kijX"
13
+
14
+
15
+ def create_brand_html(brand_link):
16
+ urls = [brand_link]
17
+ loader = UnstructuredURLLoader(urls=urls)
18
+ data = loader.load()
19
+ chunk_size = 3000
20
+ chunk_overlap = 200
21
+ text_splitter = CharacterTextSplitter(
22
+ chunk_size=chunk_size,
23
+ chunk_overlap=chunk_overlap,
24
+ length_function=len,
25
+ )
26
+ texts = text_splitter.split_text(data[0].page_content)
27
+ docs = [Document(page_content=t) for t in texts[:]]
28
+ return docs
29
+
30
+
31
+ def create_langchain_openai_query(docs):
32
+ openai.api_key = OPENAI_API_KEY
33
+ llm = OpenAI(temperature=0, openai_api_key=openai.api_key)
34
+ map_reduce_chain = load_summarize_chain(llm, chain_type="map_reduce")
35
+ output = map_reduce_chain.run(docs)
36
+ return output
theme.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import replicate
3
+ import base64
4
+ import os
5
+
6
+ os.environ["REPLICATE_API_TOKEN"] = os.getenv("REPLICATE_API_TOKEN")
7
+
8
+
9
+ def create_flux_request(prompt_for_image_generation):
10
+ payload = {
11
+ "prompt": prompt_for_image_generation,
12
+ "guidance": 3.5,
13
+ "num_outputs": 1,
14
+ "aspect_ratio": "3:4"
15
+ }
16
+ output = replicate.run(
17
+ "black-forest-labs/flux-dev",
18
+ input=payload
19
+ )
20
+ return output
21
+
22
+
23
+ def flux_generated_image(prompt_for_image_generation):
24
+ try:
25
+ flux_response_object = create_flux_request(prompt_for_image_generation)
26
+ data_uri = flux_response_object[0].url
27
+ header, encoded = data_uri.split(',', 1)
28
+ file_data = base64.b64decode(encoded)
29
+ random_int_for_file_prefix = random.randint(1, 1000000)
30
+ output_image_file_name = f"{random_int_for_file_prefix}_ide_theme_image.png"
31
+ with open(output_image_file_name, "wb") as f:
32
+ f.write(file_data)
33
+ return {"success": True, "file_name": output_image_file_name}
34
+ except Exception as e:
35
+ return {"success": False, "error": e}
36
+
37
+
38
+ def create_flux_request_seed(prompt_for_image_generation, seed, aspect_ratio):
39
+ print(f"YE SEED HAI MEPE:{seed}")
40
+ payload = {
41
+ "prompt": prompt_for_image_generation,
42
+ "guidance": 3.5,
43
+ "num_outputs": 1,
44
+ "aspect_ratio": str(aspect_ratio),
45
+ "seed": int(seed)
46
+
47
+ }
48
+ output = replicate.run(
49
+ "black-forest-labs/flux-dev",
50
+ input=payload
51
+ )
52
+ return output
53
+
54
+
55
+ def flux_generated_image_seed(prompt_for_image_generation, seed, aspect_ratio):
56
+ try:
57
+ flux_response_object = create_flux_request_seed(prompt_for_image_generation, seed, aspect_ratio)
58
+ data_uri = flux_response_object[0].url
59
+ header, encoded = data_uri.split(',', 1)
60
+ file_data = base64.b64decode(encoded)
61
+ random_int_for_file_prefix = random.randint(1, 1000000)
62
+ output_image_file_name = f"{random_int_for_file_prefix}_ide_theme_image.png"
63
+ with open(output_image_file_name, "wb") as f:
64
+ f.write(file_data)
65
+ return {"success": True, "file_name": output_image_file_name}
66
+ except Exception as e:
67
+ return {"success": False, "error": e}