Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature glm #92

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
*.pyd
*.pyc

TTS/models
SentimentEngine/models

ASR/resources/models
*.log
Client
.vscode
102 changes: 102 additions & 0 deletions GPT/GLMService.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
import logging
import os
import time

import GPT.machine_id
import GPT.tune as tune
import zhipuai

class GLMService():
def __init__(self, args):
r'''APIKey=your chatGLM api key
chatVer is useless now
brainwash
TODO:ADD model arg
加入history,以拥有记忆
'''
self.model="chatglm_pro"#args.model
logging.info('Initializing ChatGLM Service...')


self.tune = tune.get_tune(args.character, args.model)

self.brainwash = args.brainwash

self.counter = 0
# API connect to zhipuai
zhipuai.api_key=args.APIKey
logging.info('API ChatGLM initialized.')



def ask(self, text):
stime = time.time()
# 请求模型
response = zhipuai.model_api.invoke(
model=self.model,
prompt=[
{"role": "user", "content": self.tune+ text},

]
)
prev_text=response['data']['choices'][0]['content']
logging.info('ChatGLM Response: %s, time used %.2f' % (prev_text, time.time() - stime))
return prev_text

def ask_stream(self, text):
stime = time.time()
# #求asktext
# if self.counter % 5 == 0 and self.chatVer == 1:
# if self.brainwash:
# logging.info('Brainwash mode activated, reinforce the tune.')
# else:
# logging.info('Injecting tunes')
# asktext = self.tune + '\n' + text
# else:
# asktext = text
asktext = self.tune + '\n' + text
response = zhipuai.model_api.sse_invoke(
model=self.model,
prompt=[
{"role": "user", "content": asktext},

]
)

# prev_text = ""
complete_text = ""
self.counter += 1
for event in response.events():
if event.event == "add":
message=event.data
print(event.data,end='')
elif event.event == "error" or event.event == "interrupted":
message=event.data
print(event.data)
elif event.event == "finish":
message=event.data+'\n'
print(event.data)
# print(event.meta)
else:
message=event.data
print(event.data)
message=event.data
#判断是否成句子
if ("。" in message or "!" in message or "?" in message or "\n" in message) and len(complete_text) > 3:
complete_text += message
logging.info('chatGLM Stream Response: %s, @Time %.2f' % (complete_text, time.time() - stime))
yield complete_text.strip()
complete_text = ""
else:
complete_text += message
pass
if complete_text.strip():
logging.info('chatGLM Stream Response: %s, @Time %.2f' % (complete_text, time.time() - stime))
yield complete_text.strip()




def sentence_fix():
#句子组装
pass
4 changes: 2 additions & 2 deletions GPT/GPTService.py → GPT/GPTService-old.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@

import GPT.machine_id
import GPT.tune as tune
import zhipuai


class GPTService():
class GLMService():
def __init__(self, args):
logging.info('Initializing ChatGPT Service...')
self.chatVer = args.chatVer
Expand Down
13 changes: 10 additions & 3 deletions SocketServer.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
import GPT.tune
from utils.FlushingFileHandler import FlushingFileHandler
from ASR import ASRService
from GPT import GPTService
from GPT import GLMService
from TTS import TTService
from SentimentEngine import SentimentEngine

Expand Down Expand Up @@ -82,7 +82,7 @@ def __init__(self, args):
self.paraformer = ASRService.ASRService('./ASR/resources/config.yaml')

# CHAT GPT
self.chat_gpt = GPTService.GPTService(args)
self.chat_gpt = GLMService.GLMService(args)

# TTS
self.tts = TTService.TTService(*self.char_name[args.character])
Expand Down Expand Up @@ -191,8 +191,15 @@ def process_voice(self):

return text


def test():
args = parse_args()
chat_glm = GLMService.GLMService(args)
# print(chat_glm.ask('你好'))
for back in chat_glm.ask_stream('你好'):
print(back,end='')
return
if __name__ == '__main__':
# test()
try:
args = parse_args()
s = Server(args)
Expand Down
11 changes: 7 additions & 4 deletions SocketServer.spec
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import sys ; sys.setrecursionlimit(sys.getrecursionlimit() * 5)
from PyInstaller.utils.hooks import collect_all
import inspect
import torch
import os


def collect_all_and_add_to_list(package_name, datas, binaries, hiddenimports):
Expand Down Expand Up @@ -32,15 +33,17 @@ def collect_source_files(modules):
source_files = collect_source_files([torch])
source_files_toc = TOC((name, path, 'DATA') for path, name in source_files)


datas.append(('venv\lib\site-packages\librosa', 'librosa'))
datas.append(('venv\lib\site-packages\cn2an', 'cn2an'))
#for conda-env in win
# datas += collect_data_files(os.path.join(os.environ['STDLIB_DIR'], 'site-packages', 'librosa'))
datas.append(('C:\\ProgramData\\Anaconda3\\envs\\DL\\Lib\\site-packages\librosa', 'librosa'))
datas.append(('C:\\ProgramData\\Anaconda3\\envs\\DL\\Lib\\site-packages\cn2an', 'cn2an'))
datas.append(('TTS\models', 'TTS\models'))
datas.append(('venv\lib\site-packages\jieba','jieba'))
datas.append(('C:\\ProgramData\\Anaconda3\\envs\\DL\lib\site-packages\jieba','jieba'))
datas.append(('ASR', 'ASR'))
datas.append(('GPT\prompts_default', 'GPT\prompts_default'))
datas.append(('tmp', 'tmp'))
datas.append(('SentimentEngine\models\paimon_sentiment.onnx', 'SentimentEngine\models'))
datas.append(('C:\\ProgramData\\Anaconda3\\envs\\DL\\Lib\\site-packages\proces', 'proces'))
hiddenimports.extend(['tiktoken_ext.openai_public','tiktoken_ext'])


Expand Down
Binary file modified requirements.txt
Binary file not shown.
Binary file added tmp/server_processed.wav
Binary file not shown.