Skip to content

Commit

Permalink
sourcery
Browse files Browse the repository at this point in the history
  • Loading branch information
noes14155 committed Nov 15, 2023
1 parent dc7679f commit b3ce9e9
Show file tree
Hide file tree
Showing 6 changed files with 32 additions and 36 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ Add your userid from telegram. If empty DM enable or disable option will be disa
To use GPT4free GPT_KEY = ''
Key from the provider (including openai). Whichever api base you want use the key provided.
Naga AI, Novo, Genius AI, Mandrill AI tested, Use correct API_base and GPT_KEY according to the provider
For Naga AI, Get this by messaging run the /key get command in th bot channel in [Naga AI Discord](https://discord.gg/JxRBXBhabu) , [Naga AI Telegram](https://t.me/naga_ai)
Get this by messaging run the /key get command in th bot channel in [Naga AI Discord](https://discord.gg/JxRBXBhabu), [MandrillaiAI](https://discord.com/invite/vVzbwAXt)

- `API_BASE`
To use GPT4free API_BASE = 'http://localhost:1337'
Expand Down
14 changes: 6 additions & 8 deletions bot/chat_gpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def __init__(self, api_key: str, api_base: str, default_model: str):
openai.api_key = api_key
openai.api_base = api_base

self.fetch_models_url = f'{api_base}/models'
self.fetch_models_url = 'http://localhost:1337/models'
self.default_model = default_model
self.models = []
self.headers = {
Expand Down Expand Up @@ -73,22 +73,20 @@ def generate_response(self, instruction: str, plugin_result: str, history: List[
*history
]
try:
response_stream = openai.ChatCompletion.create(
return openai.ChatCompletion.create(
model=model,
messages=messages,
functions=function,
function_call='auto',
stream=True
stream=True,
)
return response_stream
except Exception as e:
text = f'model not available ```{e}```'
if "rate limit" in text.lower():
retries += 1
if retries >= 3:
return text
else:
print(f"Rate limit on {model}. Retrying after 5 seconds")
time.sleep(5)
continue
print(f"Rate limit on {model}. Retrying after 5 seconds")
time.sleep(5)
continue
return text
4 changes: 2 additions & 2 deletions bot/image_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,8 @@ async def generate_image(self, prompt: str) -> str:
str: The generated image as text.
"""
client = Client("http://127.0.0.1:7860/")
text = client.predict(prompt, api_name="/predict")
return text
return client.predict(prompt, api_name="/predict")


async def dalle_generate(self, prompt: str, size: int) -> str:
"""
Expand Down
44 changes: 21 additions & 23 deletions bot_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,23 +32,23 @@ def __init__(self):
else:
print(Fore.RED,'Invalid bot token')
exit
except:
except Exception:
print(Fore.RED,'please add your telegram bot token in the env file')
exit
try:
self.GPT_KEY = os.getenv("GPT_KEY")
except:
except Exception:
print(Fore.RED,'Please add your gpt apikey in your env file')
exit
try:
self.BOT_OWNER_ID = os.getenv("BOT_OWNER_ID")
except:
except Exception:
self.BOT_OWNER_ID = ''
print(Fore.WHITE,'Owner Id couldn\'t be determined. ToggleDM function will be disabled. To enable it add bot owner id to your environment variable')
self.HG_TOKEN = os.getenv("HG_TOKEN", '')
self.HG_IMG2TEXT = os.environ.get("HG_IMG2TEXT", 'https://api-inference.huggingface.co/models/Salesforce/blip-image-captioning-large')
self.HG_TEXT2IMAGE = os.environ.get("HG_TEXT2IMAGE", "stabilityai/stable-diffusion-2-1")
self.DEFAULT_LANGUAGE = os.environ.get("DEFAULT_LANGUAGE", "en")
self.DEFAULT_LANGUAGE = os.environ.get("DEFAULT_LANGUAGE", "en")
self.PLUGINS = os.environ.get('PLUGINS', 'true').lower() == 'true'
self.MAX_HISTORY = int(os.environ.get("MAX_HISTORY", 15))
self.API_BASE = os.environ.get("API_BASE", 'https://api.naga.ac/v1')
Expand All @@ -73,7 +73,7 @@ def __init__(self):
self.gpt.fetch_chat_models()
self.personas = {}
self.valid_sizes = ['256x256','512x512','1024x1024']

self.last_msg_ids = {}
self.last_call ={}
self.cancel_flag = False
Expand Down Expand Up @@ -185,13 +185,14 @@ async def select_size(self,user_id, user_message, state):


async def chat(self, call, waiting_id, bot, process_prompt = ''):
# sourcery skip: use-contextlib-suppress
full_text = sent_text = ''
chunk = 0
user_id = call.from_user.id
markup = self.generate_keyboard('text_func')
try:
await bot.edit_message_reply_markup(chat_id=call.chat.id,message_id=self.last_msg_ids[user_id],reply_markup=None) if user_id in self.last_msg_ids else None
except:
except Exception:
pass
self.last_call[user_id] = call
self.last_msg_ids[user_id] = waiting_id
Expand All @@ -200,16 +201,16 @@ async def chat(self, call, waiting_id, bot, process_prompt = ''):
self.cancel_flag = False
try:
await bot.edit_message_reply_markup(chat_id=call.chat.id,message_id=self.last_msg_ids[user_id],reply_markup=None) if user_id in self.last_msg_ids else None
except:
except Exception:
pass
self.last_call[user_id] = call
self.last_msg_ids[user_id] = waiting_id
response_stream = self.__common_generate(call=call, process_prompt=process_prompt)
async for response in response_stream:
if self.cancel_flag:
break
if self.cancel_flag:
break

if isinstance(response, str):
if isinstance(response, str):
full_text += response
if full_text == '': continue
chunk += 1
Expand All @@ -220,7 +221,7 @@ async def chat(self, call, waiting_id, bot, process_prompt = ''):
try:
await bot.edit_message_text(chat_id=call.chat.id, message_id=waiting_id, text=full_text, reply_markup=markup)
sent_text = full_text
except:
except Exception:
continue

if full_text not in ['', sent_text]:
Expand Down Expand Up @@ -290,12 +291,12 @@ async def document(self, call, waiting_id, bot):

def escape_markdown(self,text):
escape_chars = ['_', '-', '!', '*', '[', ']', '(', ')', '~', '>', '#', '+', '=', '{','}','|','.']
regex = r"([%s])" % "|".join(map(re.escape, escape_chars))
regex = f'([{"|".join(map(re.escape, escape_chars))}])'
return re.sub(regex, r"\\\1", text)

def generate_keyboard(self,key):
if not isinstance(key, str):
raise ValueError("key must be a string")
raise ValueError("key must be a string")
builder = ReplyKeyboardBuilder()
if key == 'persona':
for persona in self.personas.keys():
Expand All @@ -314,8 +315,7 @@ def generate_keyboard(self,key):
builder = InlineKeyboardBuilder()
builder.button(text="🔄Regenerate", callback_data="regenerate")
builder.button(text="❌Cancel", callback_data="cancel")
markup = builder.as_markup()
return markup
return builder.as_markup()

async def __common_generate(self, call, process_prompt = ''):
user_id = call.from_user.id
Expand All @@ -329,23 +329,21 @@ async def __common_generate(self, call, process_prompt = ''):
)
lm = self.lm.available_lang["languages"][lang]
history = []
if user_message == "/start":
prompt = bot_messages["help"] + f"{lm}."
elif user_message == "/help":
if user_message in ["/start", "/help"]:
prompt = bot_messages["help"] + f"{lm}."
elif process_prompt != '':
prompt = process_prompt
else:
prompt = user_message


web_text = await self.ws.extract_text_from_website(prompt)
if web_text is not None:
prompt = web_text
yt_transcript = await self.yt.get_yt_transcript(user_message, lang)
if yt_transcript is not None:
prompt = yt_transcript
EXTRA_PROMPT = bot_messages["EXTRA_PROMPT"]
EXTRA_PROMPT = bot_messages["EXTRA_PROMPT"]
if user.first_name is not None:
bot_messages["bot_prompt"] += f"You should address the user as '{user.first_name}'"
bot_messages["bot_prompt"] += f'{bot_messages["translator_prompt"]} {lm}'
Expand Down Expand Up @@ -386,12 +384,12 @@ async def __common_generate(self, call, process_prompt = ''):
if should_exit:
self.db.insert_history(user_id=user_id, role="assistant", content=text)
return

print("Using function ",fn_name, "with arguments ", arguments)
result = await self.plugin.call_function(fn_name,arguments)
should_exit = False
history.append({"role": "function", "name":fn_name, "content": result})
for i in range(3):
for _ in range(3):
response_stream = self.gpt.generate_response(
bot_messages["bot_prompt"], result, history, function, model=model
)
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ pydub
pycryptodome
GitPython
flask
openai
openai==0.28
gtts
pytube
whois
Expand Down
2 changes: 1 addition & 1 deletion updater.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def get_current_version(self) -> str:
str: The current version of the repository.
"""
# Return current version somehow
return "0.6"
return "0.7"

def get_latest_tag_from_github(self, repo_url: str) -> str:
"""
Expand Down

0 comments on commit b3ce9e9

Please sign in to comment.