-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathtest.py
51 lines (46 loc) · 1.8 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
from time import sleep
# Make sure you do this before importing any packages that use openai under the hood.
import openai_wrapi as openai
# User must have IAM permissions to invoke openai-admin-dev.
openai.set_limit(staging="dev", project="hello", limit=10)
openai.flush_cache(staging="dev")
# Limits must have been set for the project.
openai.set_project("hello")
# openai.set_caching(False)
for _ in range(
2
): # Second call returns cached result (provided there is a short delay between calls).
# User must have IAM permissions to invoke openai-proxy-dev
# and have set OPENAI_API_KEY to be sk-XXX, where XXX corresponds to the URL
# https://XXX.lambda-url.region.on.aws/ of your Lambda function.
try: # openai v1
completion = openai.chat.completions.create(
model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello world"}]
)
except AttributeError: # openai v0
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello world"}]
)
print(completion)
sleep(1)
# Test streaming.
for _ in range(2):
try: # openai v1
response = openai.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Tell me a story in 10 words."}],
temperature=0,
stream=True,
)
except AttributeError: # openai v0
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Tell me a story in 10 words."}],
temperature=0,
stream=True,
)
for chunk in response:
if chunk.choices[0].finish_reason is None:
print(chunk.choices[0].delta.content, end="")
print()
sleep(1)