diff --git a/README.md b/README.md index 335cfc3..d7cd77e 100644 --- a/README.md +++ b/README.md @@ -161,19 +161,19 @@ For Naga AI, Get this by messaging run the /key get command in th bot channel in To use GPT4free API_BASE = 'http://g4f_server:1337' (Only working in docker) You can use any provider. I have included Naga AI api base. Use the key for the same. --`MAX_HISTORY` +- `MAX_HISTORY` Number of conversation history to be sent with each request. Default value 10. Don't put more than 20. --`ENABLED_PLUGINS` +- `ENABLED_PLUGINS` List of enabled plugins. Only wolfram is disabled by default. Add Worlframalpha api key before adding wolfram to this list. --`WOLFRAM_APP_ID` +- `WOLFRAM_APP_ID` Wolframalpha api key --`DUCKDUCKGO_SAFESEARCH` +- `DUCKDUCKGO_SAFESEARCH` Valid values are 'safe', 'moderate', 'off' --`WORLDTIME_DEFAULT_TIMEZONE` +- `WORLDTIME_DEFAULT_TIMEZONE` Timezone should be in the format Asia/Dubai or Europe/Rome diff --git a/interference/app.py b/interference/app.py index 494f968..3e42997 100644 --- a/interference/app.py +++ b/interference/app.py @@ -72,15 +72,15 @@ def chat_completions(): #AItianhu, #Acytoo, #Aichat, - #Ails, - Aivvm, + Ails, + #Aivvm, #Bard, #Bing, ChatBase, - #ChatgptAi, - ChatgptLogin, - CodeLinkAva, - #DeepAi, + ChatgptAi, + #ChatgptLogin, + #CodeLinkAva, + DeepAi, #H2o, #HuggingChat, #Opchatgpts, @@ -91,9 +91,10 @@ def chat_completions(): #Vercel, Vitalentum, Wewordle, - Ylokh, + #Ylokh, #You, - Yqcloud,] + Yqcloud + ] for provider in providers: try: diff --git a/interference/g4f/Provider/AItianhu.py b/interference/g4f/Provider/AItianhu.py index 2e12989..0f01e53 100644 --- a/interference/g4f/Provider/AItianhu.py +++ b/interference/g4f/Provider/AItianhu.py @@ -1,61 +1,38 @@ from __future__ import annotations import json -from aiohttp import ClientSession, http +from curl_cffi.requests import AsyncSession -from ..typing import AsyncGenerator -from .base_provider import AsyncGeneratorProvider, format_prompt +from .base_provider import AsyncProvider, format_prompt -class AItianhu(AsyncGeneratorProvider): +class AItianhu(AsyncProvider): url = "https://www.aitianhu.com" working = True supports_gpt_35_turbo = True @classmethod - async def create_async_generator( + async def create_async( cls, model: str, messages: list[dict[str, str]], proxy: str = None, **kwargs - ) -> AsyncGenerator: - headers = { - "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0", - "Accept": "application/json, text/plain, */*", - "Accept-Language": "de,en-US;q=0.7,en;q=0.3", - "Content-Type": "application/json", - "Origin": cls.url, - "Connection": "keep-alive", - "Referer": cls.url + "/", - "Sec-Fetch-Dest": "empty", - "Sec-Fetch-Mode": "cors", - "Sec-Fetch-Site": "same-origin", + ) -> str: + data = { + "prompt": format_prompt(messages), + "options": {}, + "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.", + "temperature": 0.8, + "top_p": 1, + **kwargs } - async with ClientSession( - headers=headers, - version=http.HttpVersion10 - ) as session: - data = { - "prompt": format_prompt(messages), - "options": {}, - "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.", - "temperature": 0.8, - "top_p": 1, - **kwargs - } - async with session.post( - cls.url + "/api/chat-process", - proxy=proxy, - json=data, - ssl=False, - ) as response: - response.raise_for_status() - async for line in response.content: - line = json.loads(line.decode('utf-8')) - token = line["detail"]["choices"][0]["delta"].get("content") - if token: - yield token + async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107", verify=False) as session: + response = await session.post(cls.url + "/api/chat-process", json=data) + response.raise_for_status() + line = response.text.splitlines()[-1] + line = json.loads(line) + return line["text"] @classmethod diff --git a/interference/g4f/Provider/GptGo.py b/interference/g4f/Provider/GptGo.py new file mode 100644 index 0000000..7db8fb0 --- /dev/null +++ b/interference/g4f/Provider/GptGo.py @@ -0,0 +1,78 @@ +from __future__ import annotations + +from aiohttp import ClientSession +import json + +from ..typing import AsyncGenerator +from .base_provider import AsyncGeneratorProvider, format_prompt + + +class GptGo(AsyncGeneratorProvider): + url = "https://gptgo.ai" + supports_gpt_35_turbo = True + working = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: list[dict[str, str]], + proxy: str = None, + **kwargs + ) -> AsyncGenerator: + headers = { + "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", + "Accept" : "*/*", + "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", + "Origin" : cls.url, + "Referer" : cls.url + "/", + "Sec-Fetch-Dest" : "empty", + "Sec-Fetch-Mode" : "cors", + "Sec-Fetch-Site" : "same-origin", + } + async with ClientSession( + headers=headers + ) as session: + async with session.get( + "https://gptgo.ai/action_get_token.php", + params={ + "q": format_prompt(messages), + "hlgpt": "default", + "hl": "en" + }, + proxy=proxy + ) as response: + response.raise_for_status() + token = (await response.json(content_type=None))["token"] + + async with session.get( + "https://gptgo.ai/action_ai_gpt.php", + params={ + "token": token, + }, + proxy=proxy + ) as response: + response.raise_for_status() + start = "data: " + async for line in response.content: + line = line.decode() + if line.startswith("data: "): + if line.startswith("data: [DONE]"): + break + line = json.loads(line[len(start):-1]) + content = line["choices"][0]["delta"].get("content") + if content: + yield content + + + @classmethod + @property + def params(cls): + params = [ + ("model", "str"), + ("messages", "list[dict[str, str]]"), + ("stream", "bool"), + ("temperature", "float"), + ] + param = ", ".join([": ".join(p) for p in params]) + return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/interference/g4f/Provider/Vercel.py b/interference/g4f/Provider/Vercel.py index 79bcf3f..43f7d74 100644 --- a/interference/g4f/Provider/Vercel.py +++ b/interference/g4f/Provider/Vercel.py @@ -9,7 +9,7 @@ class Vercel(AsyncProvider): url = "https://sdk.vercel.ai" - working = True + working = False supports_gpt_35_turbo = True model = "replicate:replicate/llama-2-70b-chat" @@ -21,74 +21,12 @@ async def create_async( proxy: str = None, **kwargs ) -> str: - if model in ["gpt-3.5-turbo", "gpt-4"]: - model = "openai:" + model - model = model if model else cls.model - proxies = None - if proxy: - if "://" not in proxy: - proxy = "http://" + proxy - proxies = {"http": proxy, "https": proxy} - headers = { - "User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.{rand1}.{rand2} Safari/537.36".format( - rand1=random.randint(0,9999), - rand2=random.randint(0,9999) - ), - "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8", - "Accept-Encoding": "gzip, deflate, br", - "Accept-Language": "en-US,en;q=0.5", - "TE": "trailers", - } - async with AsyncSession(headers=headers, proxies=proxies, impersonate="chrome107") as session: - response = await session.get(cls.url + "/openai.jpeg") - response.raise_for_status() - custom_encoding = _get_custom_encoding(response.text) - headers = { - "Content-Type": "application/json", - "Custom-Encoding": custom_encoding, - } - data = _create_payload(model, messages) - response = await session.post(cls.url + "/api/generate", json=data, headers=headers) - response.raise_for_status() - return response.text - - -def _create_payload(model: str, messages: list[dict[str, str]]) -> dict[str, Any]: - if model not in model_info: - raise ValueError(f'Model are not supported: {model}') - default_params = model_info[model]["default_params"] - return { - "messages": messages, - "playgroundId": str(uuid.uuid4()), - "chatIndex": 0, - "model": model - } | default_params - -# based on https://github.com/ading2210/vercel-llm-api -def _get_custom_encoding(text: str) -> str: - data = json.loads(base64.b64decode(text, validate=True)) - script = """ - String.prototype.fontcolor = function() {{ - return `${{this}}` - }} - var globalThis = {{marker: "mark"}}; - ({script})({key}) - """.format( - script=data["c"], key=data["a"] - ) - context = quickjs.Context() # type: ignore - token_data = json.loads(context.eval(script).json()) # type: ignore - token_data[2] = "mark" - token = {"r": token_data, "t": data["t"]} - token_str = json.dumps(token, separators=(",", ":")).encode("utf-16le") - return base64.b64encode(token_str).decode() - + return class ModelInfo(TypedDict): id: str default_params: dict[str, Any] - model_info: dict[str, ModelInfo] = { "anthropic:claude-instant-v1": { "id": "anthropic:claude-instant-v1", diff --git a/interference/g4f/Provider/Ylokh.py b/interference/g4f/Provider/Ylokh.py index 1986b6d..c7b9208 100644 --- a/interference/g4f/Provider/Ylokh.py +++ b/interference/g4f/Provider/Ylokh.py @@ -51,7 +51,9 @@ async def create_async_generator( if stream: async for line in response.content: line = line.decode() - if line.startswith("data: ") and not line.startswith("data: [DONE]"): + if line.startswith("data: "): + if line.startswith("data: [DONE]"): + break line = json.loads(line[6:-1]) content = line["choices"][0]["delta"].get("content") if content: @@ -71,6 +73,7 @@ def params(cls): ("stream", "bool"), ("proxy", "str"), ("temperature", "float"), + ("top_p", "float"), ] param = ", ".join([": ".join(p) for p in params]) return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/interference/g4f/Provider/__init__.py b/interference/g4f/Provider/__init__.py index 2a3d820..c36782b 100644 --- a/interference/g4f/Provider/__init__.py +++ b/interference/g4f/Provider/__init__.py @@ -16,6 +16,7 @@ from .EasyChat import EasyChat from .Forefront import Forefront from .GetGpt import GetGpt +from .GptGo import GptGo from .H2o import H2o from .HuggingChat import HuggingChat from .Liaobots import Liaobots @@ -57,6 +58,7 @@ 'EasyChat', 'Forefront', 'GetGpt', + 'GptGo', 'H2o', 'HuggingChat', 'Liaobots', diff --git a/interference/g4f/Provider/base_provider.py b/interference/g4f/Provider/base_provider.py index 0f499c8..79f8f61 100644 --- a/interference/g4f/Provider/base_provider.py +++ b/interference/g4f/Provider/base_provider.py @@ -35,30 +35,6 @@ def params(cls): ] param = ", ".join([": ".join(p) for p in params]) return f"g4f.provider.{cls.__name__} supports: ({param})" - - -_cookies = {} - -def get_cookies(cookie_domain: str) -> dict: - if cookie_domain not in _cookies: - _cookies[cookie_domain] = {} - try: - for cookie in browser_cookie3.load(cookie_domain): - _cookies[cookie_domain][cookie.name] = cookie.value - except: - pass - return _cookies[cookie_domain] - - -def format_prompt(messages: list[dict[str, str]], add_special_tokens=False): - if add_special_tokens or len(messages) > 1: - formatted = "\n".join( - ["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages] - ) - return f"{formatted}\nAssistant:" - else: - return messages.pop()["content"] - class AsyncProvider(BaseProvider): @@ -67,8 +43,9 @@ def create_completion( cls, model: str, messages: list[dict[str, str]], - stream: bool = False, **kwargs: Any) -> CreateResult: - + stream: bool = False, + **kwargs + ) -> CreateResult: yield asyncio.run(cls.create_async(model, messages, **kwargs)) @staticmethod @@ -90,7 +67,20 @@ def create_completion( stream: bool = True, **kwargs ) -> CreateResult: - yield from run_generator(cls.create_async_generator(model, messages, stream=stream, **kwargs)) + loop = asyncio.new_event_loop() + try: + asyncio.set_event_loop(loop) + generator = cls.create_async_generator(model, messages, stream=stream, **kwargs) + gen = generator.__aiter__() + while True: + try: + yield loop.run_until_complete(gen.__anext__()) + except StopAsyncIteration: + break + finally: + asyncio.set_event_loop(None) + loop.close() + @classmethod async def create_async( @@ -99,27 +89,36 @@ async def create_async( messages: list[dict[str, str]], **kwargs ) -> str: - chunks = [chunk async for chunk in cls.create_async_generator(model, messages, stream=False, **kwargs)] - if chunks: - return "".join(chunks) + return "".join([chunk async for chunk in cls.create_async_generator(model, messages, stream=False, **kwargs)]) @staticmethod @abstractmethod def create_async_generator( - model: str, - messages: list[dict[str, str]], - **kwargs - ) -> AsyncGenerator: + model: str, + messages: list[dict[str, str]], + **kwargs + ) -> AsyncGenerator: raise NotImplementedError() -def run_generator(generator: AsyncGenerator[Union[Any, str], Any]): - loop = asyncio.new_event_loop() - gen = generator.__aiter__() +_cookies = {} - while True: +def get_cookies(cookie_domain: str) -> dict: + if cookie_domain not in _cookies: + _cookies[cookie_domain] = {} try: - yield loop.run_until_complete(gen.__anext__()) + for cookie in browser_cookie3.load(cookie_domain): + _cookies[cookie_domain][cookie.name] = cookie.value + except: + pass + return _cookies[cookie_domain] - except StopAsyncIteration: - break + +def format_prompt(messages: list[dict[str, str]], add_special_tokens=False): + if add_special_tokens or len(messages) > 1: + formatted = "\n".join( + ["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages] + ) + return f"{formatted}\nAssistant:" + else: + return messages[0]["content"] \ No newline at end of file diff --git a/interference/g4f/__init__.py b/interference/g4f/__init__.py index 90b05c8..a49e60e 100644 --- a/interference/g4f/__init__.py +++ b/interference/g4f/__init__.py @@ -1,5 +1,5 @@ from __future__ import annotations -from . import models +from g4f import models from .Provider import BaseProvider from .typing import Any, CreateResult, Union import random @@ -16,15 +16,17 @@ def create( auth : Union[str, None] = None, **kwargs: Any) -> Union[CreateResult, str]: if isinstance(model, str): - try: + if model in models.ModelUtils.convert: model = models.ModelUtils.convert[model] - except KeyError: + else: raise Exception(f'The model: {model} does not exist') - if not provider: - if isinstance(model.best_provider, tuple): - provider = random.choice(model.best_provider) + if isinstance(model.best_provider, list): + if stream: + provider = random.choice([p for p in model.best_provider if p.supports_stream]) + else: + provider = random.choice(model.best_provider) else: provider = model.best_provider diff --git a/interference/g4f/models.py b/interference/g4f/models.py index e095ce7..9b01fa3 100644 --- a/interference/g4f/models.py +++ b/interference/g4f/models.py @@ -1,9 +1,18 @@ -from __future__ import annotations +from __future__ import annotations from dataclasses import dataclass -from .Provider import BaseProvider, Bard, H2o, Vercel -from .Provider import Aichat, Aivvm, ChatBase, ChatgptAi, ChatgptLogin, CodeLinkAva -from .Provider import DeepAi, Vercel, Vitalentum, Ylokh, You, Yqcloud -from .typing import Union +from .typing import Union +from .Provider import BaseProvider +from .Provider import ( + ChatgptLogin, + CodeLinkAva, + ChatgptAi, + ChatBase, + Vercel, + DeepAi, + Aivvm, + Bard, + H2o +) @dataclass class Model: @@ -14,24 +23,21 @@ class Model: # Config for HuggingChat, OpenAssistant # Works for Liaobots, H2o, OpenaiChat, Yqcloud, You default = Model( - name="", - base_provider="huggingface" -) + name = "", + base_provider = "huggingface") # GPT-3.5 / GPT-4 gpt_35_turbo = Model( name = 'gpt-3.5-turbo', base_provider = 'openai', - best_provider = ( - Vercel, Aichat, Aivvm, ChatBase, ChatgptAi, ChatgptLogin, - CodeLinkAva, DeepAi, Vitalentum, Ylokh, You, Yqcloud - ) + best_provider = [ + DeepAi, CodeLinkAva, ChatgptLogin, ChatgptAi, ChatBase, Aivvm + ] ) gpt_4 = Model( name = 'gpt-4', - base_provider = 'openai', -) + base_provider = 'openai') # Bard palm = Model( diff --git a/interference/g4ftest.py b/interference/g4ftest.py index bc3d6d3..8d158a5 100644 --- a/interference/g4ftest.py +++ b/interference/g4ftest.py @@ -29,19 +29,18 @@ ) providers = [ -AItianhu, - AItianhu, + #AItianhu, #Acytoo, #Aichat, - #Ails, - Aivvm, + Ails, + #Aivvm, #Bard, #Bing, ChatBase, - #ChatgptAi, - ChatgptLogin, - CodeLinkAva, - #DeepAi, + ChatgptAi, + #ChatgptLogin, + #CodeLinkAva, + DeepAi, #H2o, #HuggingChat, #Opchatgpts, @@ -52,11 +51,12 @@ #Vercel, Vitalentum, Wewordle, - Ylokh, + #Ylokh, #You, - Yqcloud, + Yqcloud ] for provider in providers: + print('Trying', provider) start_time = time.time() try: response = g4f.ChatCompletion.create(messages=[{"role": "user", "content": "Hello"}], model = 'gpt-4', provider=provider)