Create openai_requests.py
Browse files- openai_requests.py +96 -0
openai_requests.py
ADDED
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import requests
|
3 |
+
import json
|
4 |
+
import math
|
5 |
+
|
6 |
+
def get_completion_from_openai(prompt, max_tokens = None):
|
7 |
+
url = os.getenv('OPENAI_COMPLETION_URL')
|
8 |
+
|
9 |
+
headers = {
|
10 |
+
"Content-Type": "application/json",
|
11 |
+
"Authorization": "Bearer " + os.environ.get("OPENAI_TOKEN"),
|
12 |
+
}
|
13 |
+
|
14 |
+
response = requests.post(url,
|
15 |
+
json={
|
16 |
+
"model": os.getenv('OPENAI_MODEL'),
|
17 |
+
"max_tokens": max_tokens,
|
18 |
+
"messages": [
|
19 |
+
{
|
20 |
+
"role": "user",
|
21 |
+
"content": prompt
|
22 |
+
}
|
23 |
+
]
|
24 |
+
},
|
25 |
+
headers=headers,
|
26 |
+
stream=False,
|
27 |
+
)
|
28 |
+
try:
|
29 |
+
return response.json()['choices'][0]['message']['content']
|
30 |
+
except:
|
31 |
+
return "Произошла ошибка"
|
32 |
+
|
33 |
+
def process_transcribation_with_assistant(prompt, transcript):
|
34 |
+
baseUrl = os.getenv('OPENAI_BASE_URL')
|
35 |
+
|
36 |
+
headers = {
|
37 |
+
"Content-Type": "application/json",
|
38 |
+
"Authorization": "Bearer " + os.environ.get("OPENAI_TOKEN"),
|
39 |
+
"OpenAI-Beta": "assistants=v2",
|
40 |
+
}
|
41 |
+
|
42 |
+
number_of_runs = math.ceil(len(transcript) / (4 * 4096))
|
43 |
+
output_text = ''
|
44 |
+
|
45 |
+
thread_response = requests.post(baseUrl + '/threads', json={}, headers=headers)
|
46 |
+
thread_id = thread_response.json()['id']
|
47 |
+
|
48 |
+
message_response = requests.post(baseUrl + '/threads/' + thread_id + '/messages',
|
49 |
+
json={"role" : "user", "content": prompt + transcript},
|
50 |
+
headers=headers)
|
51 |
+
|
52 |
+
run_response = requests.post(baseUrl + '/threads/' + thread_id + '/runs',
|
53 |
+
json={
|
54 |
+
"assistant_id": os.environ.get("OPENAI_ASSISTANT_ID"),
|
55 |
+
"stream": True
|
56 |
+
},
|
57 |
+
headers=headers,
|
58 |
+
stream=True)
|
59 |
+
|
60 |
+
output_text = ''
|
61 |
+
event_name = ""
|
62 |
+
for line in run_response.iter_lines(decode_unicode=True):
|
63 |
+
if line:
|
64 |
+
if line.startswith("event:"):
|
65 |
+
event_name = line.split(":")[1].strip()
|
66 |
+
if event_name == 'done':
|
67 |
+
break
|
68 |
+
elif line.startswith("data:") and event_name == 'thread.message.delta':
|
69 |
+
event_data = json.loads(line.split(":", 1)[1].strip())
|
70 |
+
output_text += event_data['delta']['content'][0]['text']['value']
|
71 |
+
|
72 |
+
for i in range(number_of_runs - 1):
|
73 |
+
message_response = requests.post(baseUrl + '/threads/' + thread_id + '/messages',
|
74 |
+
json={"role" : "user", "content": "Продолжай работать на текущей задачей"},
|
75 |
+
headers=headers)
|
76 |
+
|
77 |
+
run_response = requests.post(baseUrl + '/threads/' + thread_id + '/runs',
|
78 |
+
json={
|
79 |
+
"assistant_id": os.environ.get("OPENAI_ASSISTANT_ID"),
|
80 |
+
"stream": True
|
81 |
+
},
|
82 |
+
headers=headers,
|
83 |
+
stream=True)
|
84 |
+
event_name = ""
|
85 |
+
response = ''
|
86 |
+
for line in run_response.iter_lines(decode_unicode=True):
|
87 |
+
if line:
|
88 |
+
if line.startswith("event:"):
|
89 |
+
event_name = line.split(":")[1].strip()
|
90 |
+
if event_name == 'done':
|
91 |
+
break
|
92 |
+
elif line.startswith("data:") and event_name == 'thread.message.delta':
|
93 |
+
event_data = json.loads(line.split(":", 1)[1].strip())
|
94 |
+
output_text += event_data['delta']['content'][0]['text']['value']
|
95 |
+
|
96 |
+
return output_text
|