Anustup commited on
Commit
e7b1cac
·
verified ·
1 Parent(s): 42398d8

Update gpt.py

Browse files
Files changed (1) hide show
  1. gpt.py +197 -198
gpt.py CHANGED
@@ -1,198 +1,197 @@
1
- from openai import OpenAI
2
- import os
3
- import base64
4
- import requests
5
- from prompts import prompts
6
- from constants import JSON_SCHEMA_FOR_GPT
7
-
8
- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
9
-
10
- client = OpenAI(api_key=OPENAI_API_KEY)
11
- model = "gpt-4o"
12
- title = "Caimera Mood board Expert"
13
-
14
-
15
- def createAssistant(instruction_prompt):
16
- instructions = instruction_prompt
17
- assistant = client.beta.assistants.create(
18
- name=title,
19
- instructions=instructions,
20
- model=model
21
- )
22
- return assistant.id
23
-
24
-
25
- def saveFileOpenAI(location):
26
- with open(location, "rb") as f:
27
- file = client.files.create(file=f, purpose="vision")
28
- os.remove(location)
29
- return file.id
30
-
31
-
32
- def startAssistantThread(file_id_enum, prompt_n, image_needed, json_mode_needed_or_not):
33
- if json_mode_needed_or_not == "yes":
34
- if image_needed == "yes":
35
- messages = [
36
- {
37
- "role": "user",
38
- "content": [
39
- {
40
- "type": "text",
41
- "text": prompt_n
42
- }
43
- ],
44
- }
45
- ]
46
- for file_id in file_id_enum:
47
- messages[0]["content"].append({
48
- "type": "image_file",
49
- "image_file": {"file_id": file_id}
50
- })
51
- else:
52
- messages = [
53
- {
54
- "role": "user",
55
- "content": prompt_n}]
56
- thread = client.beta.threads.create(messages=messages)
57
- else:
58
- if image_needed == "yes":
59
- messages = [
60
- {
61
- "role": "user",
62
- "content": [
63
- {
64
- "type": "text",
65
- "text": prompt_n
66
- }
67
- ],
68
- }
69
- ]
70
- for file_id in file_id_enum:
71
- messages[0]["content"].append({
72
- "type": "image_file",
73
- "image_file": {"file_id": file_id}
74
- })
75
- else:
76
- messages = [
77
- {
78
- "role": "user",
79
- "content": prompt_n}]
80
- thread = client.beta.threads.create(messages=messages)
81
- return thread.id
82
-
83
-
84
- def runAssistant(thread_id, assistant_id):
85
- run = client.beta.threads.runs.create(thread_id=thread_id, assistant_id=assistant_id)
86
- return run.id
87
-
88
-
89
- def checkRunStatus(thread_id, run_id):
90
- run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
91
- return run.status
92
-
93
-
94
- def retrieveThread(thread_id):
95
- thread_messages = client.beta.threads.messages.list(thread_id)
96
- list_messages = thread_messages.data
97
- thread_messages = []
98
- for message in list_messages:
99
- obj = {}
100
- obj['content'] = message.content[0].text.value
101
- obj['role'] = message.role
102
- thread_messages.append(obj)
103
- return thread_messages[::-1]
104
-
105
-
106
- def addMessageToThread(thread_id, prompt_n):
107
- thread_message = client.beta.threads.messages.create(thread_id, role="user", content=prompt_n)
108
-
109
-
110
- def create_chat_completion_request_open_ai_for_summary(prompt, json_mode, schema_name="",
111
- json_schema="",
112
- system_message="You are expert in Fashion "
113
- "Shoots"):
114
- import requests
115
- if json_mode == "No":
116
- url = "https://api.openai.com/v1/chat/completions"
117
- headers = {
118
- "Content-Type": "application/json",
119
- "Authorization": f"Bearer {OPENAI_API_KEY}"
120
- }
121
- data = {
122
- "model": "gpt-4o",
123
- "messages": [
124
- {
125
- "role": "system",
126
- "content": system_message
127
- },
128
- {
129
- "role": "user",
130
- "content": prompt
131
- }
132
- ]
133
- }
134
- response = requests.post(url, headers=headers, json=data)
135
- json_response = response.json()
136
- else:
137
- url = "https://api.openai.com/v1/chat/completions"
138
- headers = {
139
- "Content-Type": "application/json",
140
- "Authorization": f"Bearer {OPENAI_API_KEY}"
141
- }
142
- data = {
143
- "model": "gpt-4o",
144
- "messages": [
145
- {
146
- "role": "system",
147
- "content": "You are expert in creating prompts for Fashion Shoots."
148
- },
149
- {
150
- "role": "user",
151
- "content": prompt
152
- }
153
- ],
154
- "response_format": {"type": "json_schema", "json_schema": {"name": schema_name, "strict": True, "schema":
155
- json_schema}}
156
- }
157
- response = requests.post(url, headers=headers, json=data)
158
- json_response = response.json()
159
- print(json_response)
160
- return json_response["choices"][0]["message"]["content"]
161
-
162
-
163
- def encode_image(image_path):
164
- with open(image_path, "rb") as image_file:
165
- return base64.b64encode(image_file.read()).decode('utf-8')
166
-
167
-
168
- def create_image_completion_request_gpt(image_path, prompt):
169
- base64_image = encode_image(image_path)
170
- headers = {
171
- "Content-Type": "application/json",
172
- "Authorization": f"Bearer {OPENAI_API_KEY}"
173
- }
174
- payload = {
175
- "model": "gpt-4o",
176
- "messages": [
177
- {
178
- "role": "user",
179
- "content": [
180
- {
181
- "type": "text",
182
- "text": prompt
183
- },
184
- {
185
- "type": "image_url",
186
- "image_url": {
187
- "url": f"data:image/jpeg;base64,{base64_image}"
188
- }
189
- }
190
- ]
191
- }
192
- ],
193
- "max_tokens": 300
194
- }
195
- response = requests.post("https://api.openai.com/v1/chat/completions",
196
- headers=headers, json=payload)
197
- json_resp = response.json()
198
- return json_resp["choices"][0]["message"]["content"]
 
1
+ from openai import OpenAI
2
+ import os
3
+ import base64
4
+ import requests
5
+ from prompts import prompts
6
+ from constants import JSON_SCHEMA_FOR_GPT
7
+
8
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
9
+
10
+ client = OpenAI(api_key=OPENAI_API_KEY)
11
+ model = "gpt-4o"
12
+ title = "Caimera Mood board Expert"
13
+
14
+
15
+ def createAssistant(instruction_prompt):
16
+ instructions = instruction_prompt
17
+ assistant = client.beta.assistants.create(
18
+ name=title,
19
+ instructions=instructions,
20
+ model=model
21
+ )
22
+ return assistant.id
23
+
24
+
25
+ def saveFileOpenAI(location):
26
+ with open(location, "rb") as f:
27
+ file = client.files.create(file=f, purpose="vision")
28
+ os.remove(location)
29
+ return file.id
30
+
31
+
32
+ def startAssistantThread(file_id_enum, prompt_n, image_needed, json_mode_needed_or_not):
33
+ if json_mode_needed_or_not == "yes":
34
+ if image_needed == "yes":
35
+ messages = [
36
+ {
37
+ "role": "user",
38
+ "content": [
39
+ {
40
+ "type": "text",
41
+ "text": prompt_n
42
+ }
43
+ ],
44
+ }
45
+ ]
46
+ for file_id in file_id_enum:
47
+ messages[0]["content"].append({
48
+ "type": "image_file",
49
+ "image_file": {"file_id": file_id}
50
+ })
51
+ else:
52
+ messages = [
53
+ {
54
+ "role": "user",
55
+ "content": prompt_n}]
56
+ thread = client.beta.threads.create(messages=messages)
57
+ else:
58
+ if image_needed == "yes":
59
+ messages = [
60
+ {
61
+ "role": "user",
62
+ "content": [
63
+ {
64
+ "type": "text",
65
+ "text": prompt_n
66
+ }
67
+ ],
68
+ }
69
+ ]
70
+ for file_id in file_id_enum:
71
+ messages[0]["content"].append({
72
+ "type": "image_file",
73
+ "image_file": {"file_id": file_id}
74
+ })
75
+ else:
76
+ messages = [
77
+ {
78
+ "role": "user",
79
+ "content": prompt_n}]
80
+ thread = client.beta.threads.create(messages=messages)
81
+ return thread.id
82
+
83
+
84
+ def runAssistant(thread_id, assistant_id):
85
+ run = client.beta.threads.runs.create(thread_id=thread_id, assistant_id=assistant_id)
86
+ return run.id
87
+
88
+
89
+ def checkRunStatus(thread_id, run_id):
90
+ run = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
91
+ return run.status
92
+
93
+
94
+ def retrieveThread(thread_id):
95
+ thread_messages = client.beta.threads.messages.list(thread_id)
96
+ list_messages = thread_messages.data
97
+ thread_messages = []
98
+ for message in list_messages:
99
+ obj = {}
100
+ obj['content'] = message.content[0].text.value
101
+ obj['role'] = message.role
102
+ thread_messages.append(obj)
103
+ return thread_messages[::-1]
104
+
105
+
106
+ def addMessageToThread(thread_id, prompt_n):
107
+ thread_message = client.beta.threads.messages.create(thread_id, role="user", content=prompt_n)
108
+
109
+
110
+ def create_chat_completion_request_open_ai_for_summary(prompt, json_mode, schema_name="",
111
+ json_schema="",
112
+ system_message="You are expert in Fashion "
113
+ "Shoots"):
114
+ import requests
115
+ if json_mode == "No":
116
+ url = "https://api.openai.com/v1/chat/completions"
117
+ headers = {
118
+ "Content-Type": "application/json",
119
+ "Authorization": f"Bearer {OPENAI_API_KEY}"
120
+ }
121
+ data = {
122
+ "model": "gpt-4o",
123
+ "messages": [
124
+ {
125
+ "role": "system",
126
+ "content": system_message
127
+ },
128
+ {
129
+ "role": "user",
130
+ "content": prompt
131
+ }
132
+ ]
133
+ }
134
+ response = requests.post(url, headers=headers, json=data)
135
+ json_response = response.json()
136
+ else:
137
+ url = "https://api.openai.com/v1/chat/completions"
138
+ headers = {
139
+ "Content-Type": "application/json",
140
+ "Authorization": f"Bearer {OPENAI_API_KEY}"
141
+ }
142
+ data = {
143
+ "model": "gpt-4o",
144
+ "messages": [
145
+ {
146
+ "role": "system",
147
+ "content": "You are expert in creating prompts for Fashion Shoots."
148
+ },
149
+ {
150
+ "role": "user",
151
+ "content": prompt
152
+ }
153
+ ],
154
+ "response_format": {"type": "json_schema", "json_schema": {"name": schema_name, "strict": True, "schema":
155
+ json_schema}}
156
+ }
157
+ response = requests.post(url, headers=headers, json=data)
158
+ json_response = response.json()
159
+ print(json_response)
160
+ return json_response["choices"][0]["message"]["content"]
161
+
162
+
163
+ def encode_image(image_path):
164
+ with open(image_path, "rb") as image_file:
165
+ return base64.b64encode(image_file.read()).decode('utf-8')
166
+
167
+
168
+ def create_image_completion_request_gpt(image_path, prompt):
169
+ base64_image = encode_image(image_path)
170
+ headers = {
171
+ "Content-Type": "application/json",
172
+ "Authorization": f"Bearer {OPENAI_API_KEY}"
173
+ }
174
+ payload = {
175
+ "model": "gpt-4o",
176
+ "messages": [
177
+ {
178
+ "role": "user",
179
+ "content": [
180
+ {
181
+ "type": "text",
182
+ "text": prompt
183
+ },
184
+ {
185
+ "type": "image_url",
186
+ "image_url": {
187
+ "url": f"data:image/jpeg;base64,{base64_image}"
188
+ }
189
+ }
190
+ ]
191
+ }
192
+ ],
193
+ }
194
+ response = requests.post("https://api.openai.com/v1/chat/completions",
195
+ headers=headers, json=payload)
196
+ json_resp = response.json()
197
+ return json_resp["choices"][0]["message"]["content"]