Skip to content

Commit

Permalink
Update gpt-3.5-turbo model
Browse files Browse the repository at this point in the history
  • Loading branch information
TheExplainthis committed Mar 3, 2023
1 parent 38c5fa0 commit 5eda54d
Show file tree
Hide file tree
Showing 7 changed files with 44 additions and 29 deletions.
4 changes: 2 additions & 2 deletions .env.example
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
OPENAI_API =
OPENAI_MODEL_ENGINE = 'text-davinci-003'
OPENAI_MAX_TOKENS = 1024
OPENAI_MODEL_ENGINE = 'gpt-3.5-turbo'
SYSTEM_MESSAGE = 'You are a helpful assistant.'
LINE_CHANNEL_SECRET =
LINE_CHANNEL_ACCESS_TOKEN =
5 changes: 5 additions & 0 deletions README.en.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,11 @@

[![license](https://img.shields.io/pypi/l/ansicolortags.svg)](LICENSE) [![Release](https://img.shields.io/github/v/release/TheExplainthis/ChatGPT-Line-Bot)](https://github.com/TheExplainthis/ChatGPT-Line-Bot/releases/)


## Update
- 2023/03/03 Model change to chat completion: `gpt-3.5-turbo`


## Introduction
Import the ChatGPT bot to Line and start interacting with it by simply typing text in the input box. In addition to ChatGPT, the model for DALL·E 2 is also integrated. Enter `/imagine + text` to return the corresponding image, as shown in the figure below:

Expand Down
5 changes: 5 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,11 @@

[![license](https://img.shields.io/pypi/l/ansicolortags.svg)](LICENSE) [![Release](https://img.shields.io/github/v/release/TheExplainthis/ChatGPT-Line-Bot)](https://github.com/TheExplainthis/ChatGPT-Line-Bot/releases/)


## 更新
- 2023/03/03 模型換成 chat completion: `gpt-3.5-turbo`


## 介紹
在 Line 中去導入 ChatGPT Bot,只要在輸入框直接輸入文字,即可與 ChatGPT 開始互動,除了 ChatGPT 以外,也直接串上了 DALL·E 2 的模型,輸入 `/imagine + 文字`,就會回傳相對應的圖片,如下圖所示:

Expand Down
4 changes: 2 additions & 2 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,9 @@
line_bot_api = LineBotApi(os.getenv('LINE_CHANNEL_ACCESS_TOKEN'))
handler = WebhookHandler(os.getenv('LINE_CHANNEL_SECRET'))

models = OpenAIModel(api_key=os.getenv('OPENAI_API'), model_engine=os.getenv('OPENAI_MODEL_ENGINE'), max_tokens=int(os.getenv('OPENAI_MAX_TOKENS')))
models = OpenAIModel(api_key=os.getenv('OPENAI_API'), model_engine=os.getenv('OPENAI_MODEL_ENGINE'))

memory = Memory()
memory = Memory(system_message=os.getenv('SYSTEM_MESSAGE'))
chatgpt = ChatGPT(models, memory)
dalle = DALLE(models)

Expand Down
14 changes: 7 additions & 7 deletions src/chatgpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,17 +3,17 @@


class ChatGPT:
def __init__(self, model: ModelInterface, memory: MemoryInterface = None):
def __init__(self, model: ModelInterface, memory: MemoryInterface):
self.model = model
self.memory = memory

def get_response(self, user_id: str, text: str) -> str:
prompt = text if self.memory is None else f'{self.memory.get(user_id)}\n\n{text}'
response = self.model.text_completion(f'{prompt} <|endoftext|>')
if self.memory is not None:
self.memory.append(user_id, prompt)
self.memory.append(user_id, response)
return response
self.memory.append(user_id, {'role': 'user', 'content': text})
response = self.model.chat_completion(self.memory.get(user_id))
role = response['choices'][0]['message']['role']
content = response['choices'][0]['message']['content']
self.memory.append(user_id, {'role': role, 'content': content})
return content

def clean_history(self, user_id: str) -> None:
self.memory.remove(user_id)
Expand Down
21 changes: 15 additions & 6 deletions src/memory.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
from typing import Dict
from collections import defaultdict


class MemoryInterface:
def append(self, user_id: str, text: str) -> None:
def append(self, user_id: str, message: Dict) -> None:
pass

def get(self, user_id: str) -> str:
Expand All @@ -13,15 +14,23 @@ def remove(self, user_id: str) -> None:


class Memory(MemoryInterface):
def __init__(self):
def __init__(self, system_message):
self.storage = defaultdict(list)
self.system_message = system_message

def append(self, user_id: str, text: str) -> None:
self.storage[user_id].append(text)
def initialize(self, user_id: str):
self.storage[user_id] = [{
'role': 'system', 'content': self.system_message
}]

def append(self, user_id: str, message: Dict) -> None:
print(user_id)
if self.storage[user_id] == []:
self.initialize(user_id)
self.storage[user_id].append(message)

def get(self, user_id: str) -> str:
HISTORY_MESSAGE_COUNT = 3
return '\n\n'.join(self.storage.get(user_id, [])[-HISTORY_MESSAGE_COUNT:])
return self.storage[user_id]

def remove(self, user_id: str) -> None:
self.storage[user_id] = []
20 changes: 8 additions & 12 deletions src/models.py
Original file line number Diff line number Diff line change
@@ -1,31 +1,27 @@
from typing import List, Dict
import openai


class ModelInterface:
def text_completion(self, prompt: str) -> str:
def chat_completion(self, messages: List[Dict]) -> str:
pass

def image_generation(self, prompt: str) -> str:
pass


class OpenAIModel(ModelInterface):
def __init__(self, api_key: str, model_engine: str, max_tokens: int = 128, image_size: str = '512x512'):
def __init__(self, api_key: str, model_engine: str, image_size: str = '512x512'):
openai.api_key = api_key
self.model_engine = model_engine
self.max_tokens = max_tokens
self.image_size = image_size

def text_completion(self, prompt: str) -> str:
response = openai.Completion.create(
engine=self.model_engine,
prompt=prompt,
max_tokens=self.max_tokens,
stop=None,
temperature=0.5,
def chat_completion(self, messages) -> str:
response = openai.ChatCompletion.create(
model=self.model_engine,
messages=messages
)
text = response.choices[0].text.strip()
return text
return response

def image_generation(self, prompt: str) -> str:
response = openai.Image.create(
Expand Down

0 comments on commit 5eda54d

Please sign in to comment.