-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathGPT4o.py
73 lines (57 loc) · 1.94 KB
/
GPT4o.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
from openai import OpenAI
import base64
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
# 根据多个图像和文本生成对应解决方案
generation_key = "xxxxx" # GPT key
client = OpenAI(
api_key=generation_key,
)
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
def completion_with_backoff(**kwargs):
return client.chat.completions.create(**kwargs)
def gpt_infer(system, text, image_list, model="gpt-4-vision-preview", max_tokens=600, response_format=None):
user_content = []
for i, image in enumerate(image_list):
if image is not None:
user_content.append(
{
"type": "text",
"text": f"Image {i}:"
},
)
with open(image, "rb") as image_file:
image_base64 = base64.b64encode(image_file.read()).decode('utf-8')
image_message = {
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{image_base64}",
"detail": "low"
}
}
user_content.append(image_message)
user_content.append(
{
"type": "text",
"text": text
}
)
messages = [
{"role": "system",
"content": system
},
{"role": "user",
"content": user_content
}
]
if response_format:
chat_message = completion_with_backoff(model=model, messages=messages, temperature=0, max_tokens=max_tokens, response_format=response_format)
else:
chat_message = completion_with_backoff(model=model, messages=messages, temperature=0, max_tokens=max_tokens)
# print(chat_message)
answer = chat_message.choices[0].message.content
tokens = chat_message.usage
return answer, tokens