diff --git a/README.md b/README.md index 4f825e2..111fc6c 100644 --- a/README.md +++ b/README.md @@ -153,12 +153,12 @@ Enable or disable plugins. Default value True. Add your userid from telegram. If empty DM enable or disable option will be disabled. - `GPT_KEY` -To use GPT4free GPT_KEY = '' (Only working in docker) +To use GPT4free GPT_KEY = '' Key from the provider (including openai). Whichever api base you want use the key provided. For Naga AI, Get this by messaging run the /key get command in th bot channel in [Naga AI Discord](https://discord.gg/JxRBXBhabu) , [Naga AI Telegram](https://t.me/chimer_ai) - `API_BASE` -To use GPT4free API_BASE = 'http://g4f_server:1337' (Only working in docker) +To use GPT4free API_BASE = 'http://localhost:1337' You can use any provider. I have included Naga AI api base. Use the key for the same. - `MAX_HISTORY` diff --git a/docker-compose.yml b/docker-compose.yml index 8e7ea1e..7fd3d78 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -9,15 +9,3 @@ services: restart: always # volumes: # - ./home/personas:/app/personas -# depends_on: -# - g4f_server -# g4f_server: -# container_name: g4f_server -# ports: -# - '1337:1337' -# environment: -# - PYTHONUNBUFFERED=1 -# build: -# context: ./interference -# dockerfile: Dockerfile -# restart: always \ No newline at end of file diff --git a/interference/Dockerfile b/interference/Dockerfile deleted file mode 100644 index 0dc4846..0000000 --- a/interference/Dockerfile +++ /dev/null @@ -1,31 +0,0 @@ -# Use the official lightweight Python image. -# https://hub.docker.com/_/python -FROM python:3.10-slim - -# Ensure Python outputs everything immediately (useful for real-time logging in Docker). -ENV PYTHONUNBUFFERED 1 - -# Set the working directory in the container. -WORKDIR /app - -# Update the system packages and install system-level dependencies required for compilation. -# gcc: Compiler required for some Python packages. -# build-essential: Contains necessary tools and libraries for building software. -RUN apt-get update && apt-get install -y --no-install-recommends \ - gcc \ - build-essential \ - && rm -rf /var/lib/apt/lists/* - -# Copy the project's requirements file into the container. -COPY requirements.txt /app/ - -# Upgrade pip for the latest features and install the project's Python dependencies. -RUN pip install --upgrade pip && pip install -r requirements.txt - -# Copy the entire project into the container. -# This may include all code, assets, and configuration files required to run the application. -COPY . . - - -# Define the default command to run the app using Python's module mode. -CMD ["/bin/bash", "/app/startup.sh"] \ No newline at end of file diff --git a/interference/app.py b/interference/app.py index 39f3ec0..16dffa2 100644 --- a/interference/app.py +++ b/interference/app.py @@ -7,7 +7,7 @@ from typing import Any from flask import Flask, jsonify, request from flask_cors import CORS -from transformers import AutoTokenizer +#from transformers import AutoTokenizer import uvicorn from g4f import ChatCompletion @@ -15,6 +15,9 @@ app = Flask(__name__) CORS(app) +@app.route('/') +def index(): + return 'interference api, url: http://127.0.0.1:1337' @app.route('/models') def get_models(): @@ -119,71 +122,72 @@ def streaming(): # Get the embedding from huggingface -def get_embedding(input_text, token): - huggingface_token = token - embedding_model = 'sentence-transformers/all-mpnet-base-v2' - max_token_length = 500 - - # Load the tokenizer for the 'all-mpnet-base-v2' model - tokenizer = AutoTokenizer.from_pretrained(embedding_model) - # Tokenize the text and split the tokens into chunks of 500 tokens each - tokens = tokenizer.tokenize(input_text) - token_chunks = [tokens[i:i + max_token_length] - for i in range(0, len(tokens), max_token_length)] - - # Initialize an empty list - embeddings = [] - - # Create embeddings for each chunk - for chunk in token_chunks: - # Convert the chunk tokens back to text - chunk_text = tokenizer.convert_tokens_to_string(chunk) - - # Use the Hugging Face API to get embeddings for the chunk - api_url = f'https://api-inference.huggingface.co/pipeline/feature-extraction/{embedding_model}' - headers = {'Authorization': f'Bearer {huggingface_token}'} - chunk_text = chunk_text.replace('\n', ' ') - - # Make a POST request to get the chunk's embedding - response = requests.post(api_url, headers=headers, json={ - 'inputs': chunk_text, 'options': {'wait_for_model': True}}) - - # Parse the response and extract the embedding - chunk_embedding = response.json() - # Append the embedding to the list - embeddings.append(chunk_embedding) - - # averaging all the embeddings - # this isn't very effective - # someone a better idea? - num_embeddings = len(embeddings) - average_embedding = [sum(x) / num_embeddings for x in zip(*embeddings)] - embedding = average_embedding - return embedding - - -@app.route('/embeddings', methods=['POST']) -def embeddings(): - input_text_list = request.get_json().get('input') - input_text = ' '.join(map(str, input_text_list)) - token = request.headers.get('Authorization').replace('Bearer ', '') - embedding = get_embedding(input_text, token) - - return { - 'data': [ - { - 'embedding': embedding, - 'index': 0, - 'object': 'embedding' - } - ], - 'model': 'text-embedding-ada-002', - 'object': 'list', - 'usage': { - 'prompt_tokens': None, - 'total_tokens': None - } - } +#def get_embedding(input_text, token): +# huggingface_token = token +# embedding_model = 'sentence-transformers/all-mpnet-base-v2' +# max_token_length = 500 +# +# # Load the tokenizer for the 'all-mpnet-base-v2' model +# tokenizer = AutoTokenizer.from_pretrained(embedding_model) +# # Tokenize the text and split the tokens into chunks of 500 tokens each +# tokens = tokenizer.tokenize(input_text) +# token_chunks = [tokens[i:i + max_token_length] +# for i in range(0, len(tokens), max_token_length)] +# +# # Initialize an empty list +# embeddings = [] +# +# # Create embeddings for each chunk +# for chunk in token_chunks: +# # Convert the chunk tokens back to text +# chunk_text = tokenizer.convert_tokens_to_string(chunk) +# +# # Use the Hugging Face API to get embeddings for the chunk +# api_url = f'https://api-inference.huggingface.co/pipeline/feature-extraction/{embedding_model}' +# headers = {'Authorization': f'Bearer {huggingface_token}'} +# chunk_text = chunk_text.replace('\n', ' ') +# +# # Make a POST request to get the chunk's embedding +# response = requests.post(api_url, headers=headers, json={ +# 'inputs': chunk_text, 'options': {'wait_for_model': True}}) +# +# # Parse the response and extract the embedding +# chunk_embedding = response.json() +# # Append the embedding to the list +# embeddings.append(chunk_embedding) +# +# # averaging all the embeddings +# # this isn't very effective +# # someone a better idea? +# num_embeddings = len(embeddings) +# average_embedding = [sum(x) / num_embeddings for x in zip(*embeddings)] +# embedding = average_embedding +# return embedding +# +# +#@app.route('/embeddings', methods=['POST']) +#def embeddings(): +# input_text_list = request.get_json().get('input') +# input_text = ' '.join(map(str, input_text_list)) +# token = request.headers.get('Authorization').replace('Bearer ', '') +# embedding = get_embedding(input_text, token) +# +# return { +# 'data': [ +# { +# 'embedding': embedding, +# 'index': 0, +# 'object': 'embedding' +# } +# ], +# 'model': 'text-embedding-ada-002', +# 'object': 'list', +# 'usage': { +# 'prompt_tokens': None, +# 'total_tokens': None +# } +# } +# def main(): asgi_app = WsgiToAsgi(app) diff --git a/interference/g4f/Provider/AItianhu.py b/interference/g4f/Provider/AItianhu.py deleted file mode 100644 index 42631d7..0000000 --- a/interference/g4f/Provider/AItianhu.py +++ /dev/null @@ -1,64 +0,0 @@ -from __future__ import annotations - -import json - -from ..typing import AsyncGenerator -from ..requests import StreamSession -from .base_provider import AsyncGeneratorProvider, format_prompt - - -class AItianhu(AsyncGeneratorProvider): - url = "https://www.aitianhu.com" - working = True - supports_gpt_35_turbo = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - proxy: str = None, - **kwargs - ) -> AsyncGenerator: - data = { - "prompt": format_prompt(messages), - "options": {}, - "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.", - "temperature": 0.8, - "top_p": 1, - **kwargs - } - headers = { - "Authority": cls.url, - "Accept": "application/json, text/plain, */*", - "Origin": cls.url, - "Referer": f"{cls.url}/" - } - async with StreamSession(headers=headers, proxies={"https": proxy}, impersonate="chrome107", verify=False) as session: - async with session.post(f"{cls.url}/api/chat-process", json=data) as response: - response.raise_for_status() - async for line in response.iter_lines(): - if b"platform's risk control" in line: - raise RuntimeError("Platform's Risk Control") - line = json.loads(line) - if "detail" in line: - content = line["detail"]["choices"][0]["delta"].get("content") - if content: - yield content - else: - raise RuntimeError(f"Response: {line}") - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("proxy", "str"), - ("temperature", "float"), - ("top_p", "int"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/interference/g4f/Provider/AItianhuSpace.py b/interference/g4f/Provider/AItianhuSpace.py deleted file mode 100644 index a6bf9a5..0000000 --- a/interference/g4f/Provider/AItianhuSpace.py +++ /dev/null @@ -1,79 +0,0 @@ -from __future__ import annotations - -import random, json - -from ..typing import AsyncGenerator -from ..requests import StreamSession -from .base_provider import AsyncGeneratorProvider, format_prompt - -domains = { - "gpt-3.5-turbo": ".aitianhu.space", - "gpt-4": ".aitianhu.website", -} - -class AItianhuSpace(AsyncGeneratorProvider): - url = "https://chat3.aiyunos.top/" - working = True - supports_gpt_35_turbo = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - stream: bool = True, - **kwargs - ) -> AsyncGenerator: - if not model: - model = "gpt-3.5-turbo" - elif not model in domains: - raise ValueError(f"Model are not supported: {model}") - - chars = 'abcdefghijklmnopqrstuvwxyz0123456789' - rand = ''.join(random.choice(chars) for _ in range(6)) - domain = domains[model] - url = f'https://{rand}{domain}' - - async with StreamSession(impersonate="chrome110", verify=False) as session: - data = { - "prompt": format_prompt(messages), - "options": {}, - "systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.", - "temperature": 0.8, - "top_p": 1, - **kwargs - } - headers = { - "Authority": url, - "Accept": "application/json, text/plain, */*", - "Origin": url, - "Referer": f"{url}/" - } - async with session.post(f"{url}/api/chat-process", json=data, headers=headers) as response: - response.raise_for_status() - async for line in response.iter_lines(): - if b"platform's risk control" in line: - raise RuntimeError("Platform's Risk Control") - line = json.loads(line) - if "detail" in line: - content = line["detail"]["choices"][0]["delta"].get("content") - if content: - yield content - elif "message" in line and "AI-4接口非常昂贵" in line["message"]: - raise RuntimeError("Rate limit for GPT 4 reached") - else: - raise RuntimeError(f"Response: {line}") - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ("top_p", "int"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/interference/g4f/Provider/Acytoo.py b/interference/g4f/Provider/Acytoo.py deleted file mode 100644 index d36ca6d..0000000 --- a/interference/g4f/Provider/Acytoo.py +++ /dev/null @@ -1,51 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession - -from ..typing import AsyncGenerator -from .base_provider import AsyncGeneratorProvider - - -class Acytoo(AsyncGeneratorProvider): - url = 'https://chat.acytoo.com' - working = True - supports_gpt_35_turbo = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - proxy: str = None, - **kwargs - ) -> AsyncGenerator: - - async with ClientSession( - headers=_create_header() - ) as session: - async with session.post( - cls.url + '/api/completions', - proxy=proxy, - json=_create_payload(messages, **kwargs) - ) as response: - response.raise_for_status() - async for stream in response.content.iter_any(): - if stream: - yield stream.decode() - - -def _create_header(): - return { - 'accept': '*/*', - 'content-type': 'application/json', - } - - -def _create_payload(messages: list[dict[str, str]], temperature: float = 0.5, **kwargs): - return { - 'key' : '', - 'model' : 'gpt-3.5-turbo', - 'messages' : messages, - 'temperature' : temperature, - 'password' : '' - } \ No newline at end of file diff --git a/interference/g4f/Provider/AiService.py b/interference/g4f/Provider/AiService.py deleted file mode 100644 index 2b5a6e7..0000000 --- a/interference/g4f/Provider/AiService.py +++ /dev/null @@ -1,36 +0,0 @@ -from __future__ import annotations - -import requests - -from ..typing import Any, CreateResult -from .base_provider import BaseProvider - - -class AiService(BaseProvider): - url = "https://aiservice.vercel.app/" - working = False - supports_gpt_35_turbo = True - - @staticmethod - def create_completion( - model: str, - messages: list[dict[str, str]], - stream: bool, - **kwargs: Any, - ) -> CreateResult: - base = "\n".join(f"{message['role']}: {message['content']}" for message in messages) - base += "\nassistant: " - - headers = { - "accept": "*/*", - "content-type": "text/plain;charset=UTF-8", - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "Referer": "https://aiservice.vercel.app/chat", - } - data = {"input": base} - url = "https://aiservice.vercel.app/api/chat/answer" - response = requests.post(url, headers=headers, json=data) - response.raise_for_status() - yield response.json()["data"] diff --git a/interference/g4f/Provider/Aibn.py b/interference/g4f/Provider/Aibn.py deleted file mode 100644 index df56189..0000000 --- a/interference/g4f/Provider/Aibn.py +++ /dev/null @@ -1,51 +0,0 @@ -from __future__ import annotations - -import time -import hashlib - -from ..typing import AsyncGenerator -from ..requests import StreamSession -from .base_provider import AsyncGeneratorProvider - - -class Aibn(AsyncGeneratorProvider): - url = "https://aibn.cc" - supports_gpt_35_turbo = True - working = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - **kwargs - ) -> AsyncGenerator: - async with StreamSession(impersonate="chrome107") as session: - timestamp = int(time.time()) - data = { - "messages": messages, - "pass": None, - "sign": generate_signature(timestamp, messages[-1]["content"]), - "time": timestamp - } - async with session.post(f"{cls.url}/api/generate", json=data) as response: - response.raise_for_status() - async for chunk in response.iter_content(): - yield chunk.decode() - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" - - -def generate_signature(timestamp: int, message: str, secret: str = "undefined"): - data = f"{timestamp}:{message}:{secret}" - return hashlib.sha256(data.encode()).hexdigest() \ No newline at end of file diff --git a/interference/g4f/Provider/Aichat.py b/interference/g4f/Provider/Aichat.py deleted file mode 100644 index 8edd17e..0000000 --- a/interference/g4f/Provider/Aichat.py +++ /dev/null @@ -1,54 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession - -from .base_provider import AsyncProvider, format_prompt - - -class Aichat(AsyncProvider): - url = "https://chat-gpt.org/chat" - working = True - supports_gpt_35_turbo = True - - @staticmethod - async def create_async( - model: str, - messages: list[dict[str, str]], - proxy: str = None, - **kwargs - ) -> str: - headers = { - "authority": "chat-gpt.org", - "accept": "*/*", - "cache-control": "no-cache", - "content-type": "application/json", - "origin": "https://chat-gpt.org", - "pragma": "no-cache", - "referer": "https://chat-gpt.org/chat", - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"macOS"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "same-origin", - "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36", - } - async with ClientSession( - headers=headers - ) as session: - json_data = { - "message": format_prompt(messages), - "temperature": kwargs.get('temperature', 0.5), - "presence_penalty": 0, - "top_p": kwargs.get('top_p', 1), - "frequency_penalty": 0, - } - async with session.post( - "https://chat-gpt.org/api/text", - proxy=proxy, - json=json_data - ) as response: - response.raise_for_status() - result = await response.json() - if not result['response']: - raise Exception(f"Error Response: {result}") - return result["message"] diff --git a/interference/g4f/Provider/Ails.py b/interference/g4f/Provider/Ails.py deleted file mode 100644 index d533ae2..0000000 --- a/interference/g4f/Provider/Ails.py +++ /dev/null @@ -1,106 +0,0 @@ -from __future__ import annotations - -import hashlib -import time -import uuid -import json -from datetime import datetime -from aiohttp import ClientSession - -from ..typing import SHA256, AsyncGenerator -from .base_provider import AsyncGeneratorProvider - - -class Ails(AsyncGeneratorProvider): - url: str = "https://ai.ls" - working = True - supports_gpt_35_turbo = True - - @staticmethod - async def create_async_generator( - model: str, - messages: list[dict[str, str]], - stream: bool, - proxy: str = None, - **kwargs - ) -> AsyncGenerator: - headers = { - "authority": "api.caipacity.com", - "accept": "*/*", - "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", - "authorization": "Bearer free", - "client-id": str(uuid.uuid4()), - "client-v": "0.1.278", - "content-type": "application/json", - "origin": "https://ai.ls", - "referer": "https://ai.ls/", - "sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": '"Windows"', - "sec-fetch-dest": "empty", - "sec-fetch-mode": "cors", - "sec-fetch-site": "cross-site", - "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", - "from-url": "https://ai.ls/?chat=1" - } - async with ClientSession( - headers=headers - ) as session: - timestamp = _format_timestamp(int(time.time() * 1000)) - json_data = { - "model": "gpt-3.5-turbo", - "temperature": kwargs.get("temperature", 0.6), - "stream": True, - "messages": messages, - "d": datetime.now().strftime("%Y-%m-%d"), - "t": timestamp, - "s": _hash({"t": timestamp, "m": messages[-1]["content"]}), - } - async with session.post( - "https://api.caipacity.com/v1/chat/completions", - proxy=proxy, - json=json_data - ) as response: - response.raise_for_status() - start = "data: " - async for line in response.content: - line = line.decode('utf-8') - if line.startswith(start) and line != "data: [DONE]": - line = line[len(start):-1] - line = json.loads(line) - token = line["choices"][0]["delta"].get("content") - if token: - if "ai.ls" in token or "ai.ci" in token: - raise Exception("Response Error: " + token) - yield token - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" - - -def _hash(json_data: dict[str, str]) -> SHA256: - base_string: str = "%s:%s:%s:%s" % ( - json_data["t"], - json_data["m"], - "WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf", - len(json_data["m"]), - ) - - return SHA256(hashlib.sha256(base_string.encode()).hexdigest()) - - -def _format_timestamp(timestamp: int) -> str: - e = timestamp - n = e % 10 - r = n + 1 if n % 2 == 0 else n - return str(e - n + r) \ No newline at end of file diff --git a/interference/g4f/Provider/Aivvm.py b/interference/g4f/Provider/Aivvm.py deleted file mode 100644 index c4ec677..0000000 --- a/interference/g4f/Provider/Aivvm.py +++ /dev/null @@ -1,62 +0,0 @@ -from __future__ import annotations - -from ..requests import StreamSession -from .base_provider import AsyncGeneratorProvider -from ..typing import AsyncGenerator - -# to recreate this easily, send a post request to https://chat.aivvm.com/api/models -models = { - 'gpt-3.5-turbo': {'id': 'gpt-3.5-turbo', 'name': 'GPT-3.5'}, - 'gpt-3.5-turbo-0613': {'id': 'gpt-3.5-turbo-0613', 'name': 'GPT-3.5-0613'}, - 'gpt-3.5-turbo-16k': {'id': 'gpt-3.5-turbo-16k', 'name': 'GPT-3.5-16K'}, - 'gpt-3.5-turbo-16k-0613': {'id': 'gpt-3.5-turbo-16k-0613', 'name': 'GPT-3.5-16K-0613'}, - 'gpt-4': {'id': 'gpt-4', 'name': 'GPT-4'}, - 'gpt-4-0613': {'id': 'gpt-4-0613', 'name': 'GPT-4-0613'}, - 'gpt-4-32k': {'id': 'gpt-4-32k', 'name': 'GPT-4-32K'}, - 'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'}, -} - -class Aivvm(AsyncGeneratorProvider): - url = 'https://chat.aivvm.com' - supports_stream = True - working = True - supports_gpt_35_turbo = True - supports_gpt_4 = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - stream: bool, - **kwargs - ) -> AsyncGenerator: - if not model: - model = "gpt-3.5-turbo" - elif model not in models: - raise ValueError(f"Model is not supported: {model}") - - json_data = { - "model" : models[model], - "messages" : messages, - "key" : "", - "prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."), - "temperature" : kwargs.get("temperature", 0.7) - } - async with StreamSession(impersonate="chrome107") as session: - async with session.post(f"{cls.url}/api/chat", json=json_data) as response: - response.raise_for_status() - async for chunk in response.iter_content(): - yield chunk.decode() - - @classmethod - @property - def params(cls): - params = [ - ('model', 'str'), - ('messages', 'list[dict[str, str]]'), - ('stream', 'bool'), - ('temperature', 'float'), - ] - param = ', '.join([': '.join(p) for p in params]) - return f'g4f.provider.{cls.__name__} supports: ({param})' \ No newline at end of file diff --git a/interference/g4f/Provider/Bard.py b/interference/g4f/Provider/Bard.py deleted file mode 100644 index 4e07637..0000000 --- a/interference/g4f/Provider/Bard.py +++ /dev/null @@ -1,92 +0,0 @@ -from __future__ import annotations - -import json -import random -import re - -from aiohttp import ClientSession - -from .base_provider import AsyncProvider, format_prompt, get_cookies - - -class Bard(AsyncProvider): - url = "https://bard.google.com" - needs_auth = True - working = True - _snlm0e = None - - @classmethod - async def create_async( - cls, - model: str, - messages: list[dict[str, str]], - proxy: str = None, - cookies: dict = None, - **kwargs - ) -> str: - prompt = format_prompt(messages) - if proxy and "://" not in proxy: - proxy = f"http://{proxy}" - if not cookies: - cookies = get_cookies(".google.com") - - headers = { - 'authority': 'bard.google.com', - 'origin': 'https://bard.google.com', - 'referer': 'https://bard.google.com/', - 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36', - 'x-same-domain': '1', - } - - async with ClientSession( - cookies=cookies, - headers=headers - ) as session: - if not cls._snlm0e: - async with session.get(cls.url, proxy=proxy) as response: - text = await response.text() - - match = re.search(r'SNlM0e\":\"(.*?)\"', text) - if not match: - raise RuntimeError("No snlm0e value.") - cls._snlm0e = match.group(1) - - params = { - 'bl': 'boq_assistant-bard-web-server_20230326.21_p0', - '_reqid': random.randint(1111, 9999), - 'rt': 'c' - } - - data = { - 'at': cls._snlm0e, - 'f.req': json.dumps([None, json.dumps([[prompt]])]) - } - - intents = '.'.join([ - 'assistant', - 'lamda', - 'BardFrontendService' - ]) - - async with session.post( - f'{cls.url}/_/BardChatUi/data/{intents}/StreamGenerate', - data=data, - params=params, - proxy=proxy - ) as response: - response = await response.text() - response = json.loads(response.splitlines()[3])[0][2] - response = json.loads(response)[4][0][1][0] - return response - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("proxy", "str"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/interference/g4f/Provider/Bing.py b/interference/g4f/Provider/Bing.py deleted file mode 100644 index e4e5651..0000000 --- a/interference/g4f/Provider/Bing.py +++ /dev/null @@ -1,287 +0,0 @@ -from __future__ import annotations - -import random -import json -import os -import urllib.parse -from aiohttp import ClientSession, ClientTimeout -from ..typing import AsyncGenerator -from .base_provider import AsyncGeneratorProvider, get_cookies - - -class Bing(AsyncGeneratorProvider): - url = "https://bing.com/chat" - working = True - supports_gpt_4 = True - - @staticmethod - def create_async_generator( - model: str, - messages: list[dict[str, str]], - cookies: dict = None, **kwargs) -> AsyncGenerator: - - if not cookies: - cookies = get_cookies(".bing.com") - if len(messages) < 2: - prompt = messages[0]["content"] - context = None - else: - prompt = messages[-1]["content"] - context = create_context(messages[:-1]) - - if not cookies or "SRCHD" not in cookies: - cookies = { - 'SRCHD' : 'AF=NOFORM', - 'PPLState' : '1', - 'KievRPSSecAuth': '', - 'SUID' : '', - 'SRCHUSR' : '', - 'SRCHHPGUSR' : '', - } - return stream_generate(prompt, context, cookies) - -def create_context(messages: list[dict[str, str]]): - context = "".join(f"[{message['role']}](#message)\n{message['content']}\n\n" for message in messages) - - return context - -class Conversation(): - def __init__(self, conversationId: str, clientId: str, conversationSignature: str) -> None: - self.conversationId = conversationId - self.clientId = clientId - self.conversationSignature = conversationSignature - -async def create_conversation(session: ClientSession) -> Conversation: - url = 'https://www.bing.com/turing/conversation/create' - async with await session.get(url) as response: - response = await response.json() - conversationId = response.get('conversationId') - clientId = response.get('clientId') - conversationSignature = response.get('conversationSignature') - - if not conversationId or not clientId or not conversationSignature: - raise Exception('Failed to create conversation.') - - return Conversation(conversationId, clientId, conversationSignature) - -async def list_conversations(session: ClientSession) -> list: - url = "https://www.bing.com/turing/conversation/chats" - async with session.get(url) as response: - response = await response.json() - return response["chats"] - -async def delete_conversation(session: ClientSession, conversation: Conversation) -> list: - url = "https://sydney.bing.com/sydney/DeleteSingleConversation" - json = { - "conversationId": conversation.conversationId, - "conversationSignature": conversation.conversationSignature, - "participant": {"id": conversation.clientId}, - "source": "cib", - "optionsSets": ["autosave"] - } - async with session.post(url, json=json) as response: - response = await response.json() - return response["result"]["value"] == "Success" - -class Defaults: - delimiter = "\x1e" - ip_address = f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}" - - allowedMessageTypes = [ - "Chat", - "Disengaged", - "AdsQuery", - "SemanticSerp", - "GenerateContentQuery", - "SearchQuery", - "ActionRequest", - "Context", - "Progress", - "AdsQuery", - "SemanticSerp", - ] - - sliceIds = [ - "winmuid3tf", - "osbsdusgreccf", - "ttstmout", - "crchatrev", - "winlongmsgtf", - "ctrlworkpay", - "norespwtf", - "tempcacheread", - "temptacache", - "505scss0", - "508jbcars0", - "515enbotdets0", - "5082tsports", - "515vaoprvs", - "424dagslnv1s0", - "kcimgattcf", - "427startpms0", - ] - - location = { - "locale": "en-US", - "market": "en-US", - "region": "US", - "locationHints": [ - { - "country": "United States", - "state": "California", - "city": "Los Angeles", - "timezoneoffset": 8, - "countryConfidence": 8, - "Center": {"Latitude": 34.0536909, "Longitude": -118.242766}, - "RegionType": 2, - "SourceType": 1, - } - ], - } - - headers = { - 'accept': '*/*', - 'accept-language': 'en-US,en;q=0.9', - 'cache-control': 'max-age=0', - 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"', - 'sec-ch-ua-arch': '"x86"', - 'sec-ch-ua-bitness': '"64"', - 'sec-ch-ua-full-version': '"110.0.1587.69"', - 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-model': '""', - 'sec-ch-ua-platform': '"Windows"', - 'sec-ch-ua-platform-version': '"15.0.0"', - 'sec-fetch-dest': 'document', - 'sec-fetch-mode': 'navigate', - 'sec-fetch-site': 'none', - 'sec-fetch-user': '?1', - 'upgrade-insecure-requests': '1', - 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69', - 'x-edge-shopping-flag': '1', - 'x-forwarded-for': ip_address, - } - - optionsSets = { - "optionsSets": [ - 'saharasugg', - 'enablenewsfc', - 'clgalileo', - 'gencontentv3', - "nlu_direct_response_filter", - "deepleo", - "disable_emoji_spoken_text", - "responsible_ai_policy_235", - "enablemm", - "h3precise" - "dtappid", - "cricinfo", - "cricinfov2", - "dv3sugg", - "nojbfedge" - ] - } - -def format_message(msg: dict) -> str: - return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter - -def create_message(conversation: Conversation, prompt: str, context: str=None) -> str: - struct = { - 'arguments': [ - { - **Defaults.optionsSets, - 'source': 'cib', - 'allowedMessageTypes': Defaults.allowedMessageTypes, - 'sliceIds': Defaults.sliceIds, - 'traceId': os.urandom(16).hex(), - 'isStartOfSession': True, - 'message': Defaults.location | { - 'author': 'user', - 'inputMethod': 'Keyboard', - 'text': prompt, - 'messageType': 'Chat' - }, - 'conversationSignature': conversation.conversationSignature, - 'participant': { - 'id': conversation.clientId - }, - 'conversationId': conversation.conversationId - } - ], - 'invocationId': '0', - 'target': 'chat', - 'type': 4 - } - - if context: - struct['arguments'][0]['previousMessages'] = [{ - "author": "user", - "description": context, - "contextType": "WebPage", - "messageType": "Context", - "messageId": "discover-web--page-ping-mriduna-----" - }] - return format_message(struct) - -async def stream_generate( - prompt: str, - context: str=None, - cookies: dict=None - ): - async with ClientSession( - timeout=ClientTimeout(total=900), - cookies=cookies, - headers=Defaults.headers, - ) as session: - conversation = await create_conversation(session) - try: - async with session.ws_connect( - 'wss://sydney.bing.com/sydney/ChatHub', - autoping=False, - ) as wss: - - await wss.send_str(format_message({'protocol': 'json', 'version': 1})) - msg = await wss.receive(timeout=900) - - await wss.send_str(create_message(conversation, prompt, context)) - - response_txt = '' - returned_text = '' - final = False - - while not final: - msg = await wss.receive(timeout=900) - objects = msg.data.split(Defaults.delimiter) - for obj in objects: - if obj is None or not obj: - continue - - response = json.loads(obj) - if response.get('type') == 1 and response['arguments'][0].get('messages'): - message = response['arguments'][0]['messages'][0] - if (message['contentOrigin'] != 'Apology'): - if 'adaptiveCards' in message: - card = message['adaptiveCards'][0]['body'][0] - if "text" in card: - response_txt = card.get('text') - if message.get('messageType'): - inline_txt = card['inlines'][0].get('text') - response_txt += inline_txt + '\n' - elif message.get('contentType') == "IMAGE": - query = urllib.parse.quote(message.get('text')) - url = f"\nhttps://www.bing.com/images/create?q={query}" - response_txt += url - final = True - if response_txt.startswith(returned_text): - new = response_txt[len(returned_text):] - if new != "\n": - yield new - returned_text = response_txt - elif response.get('type') == 2: - result = response['item']['result'] - if result.get('error'): - raise Exception(f"{result['value']}: {result['message']}") - final = True - break - finally: - await delete_conversation(session, conversation) \ No newline at end of file diff --git a/interference/g4f/Provider/ChatBase.py b/interference/g4f/Provider/ChatBase.py deleted file mode 100644 index b98fe56..0000000 --- a/interference/g4f/Provider/ChatBase.py +++ /dev/null @@ -1,62 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession - -from ..typing import AsyncGenerator -from .base_provider import AsyncGeneratorProvider - - -class ChatBase(AsyncGeneratorProvider): - url = "https://www.chatbase.co" - supports_gpt_35_turbo = True - supports_gpt_4 = True - working = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - **kwargs - ) -> AsyncGenerator: - if model == "gpt-4": - chat_id = "quran---tafseer-saadi-pdf-wbgknt7zn" - elif model == "gpt-3.5-turbo" or not model: - chat_id = "chatbase--1--pdf-p680fxvnm" - else: - raise ValueError(f"Model are not supported: {model}") - headers = { - "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", - "Accept" : "*/*", - "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", - "Origin" : cls.url, - "Referer" : cls.url + "/", - "Sec-Fetch-Dest" : "empty", - "Sec-Fetch-Mode" : "cors", - "Sec-Fetch-Site" : "same-origin", - } - async with ClientSession( - headers=headers - ) as session: - data = { - "messages": messages, - "captchaCode": "hadsa", - "chatId": chat_id, - "conversationId": f"kcXpqEnqUie3dnJlsRi_O-{chat_id}" - } - async with session.post("https://www.chatbase.co/api/fe/chat", json=data) as response: - response.raise_for_status() - async for stream in response.content.iter_any(): - yield stream.decode() - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/interference/g4f/Provider/ChatForAi.py b/interference/g4f/Provider/ChatForAi.py deleted file mode 100644 index 779799c..0000000 --- a/interference/g4f/Provider/ChatForAi.py +++ /dev/null @@ -1,62 +0,0 @@ -from __future__ import annotations - -import time, hashlib - -from ..typing import AsyncGenerator -from ..requests import StreamSession -from .base_provider import AsyncGeneratorProvider - - -class ChatForAi(AsyncGeneratorProvider): - url = "https://chatforai.com" - supports_gpt_35_turbo = True - working = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - **kwargs - ) -> AsyncGenerator: - async with StreamSession(impersonate="chrome107") as session: - conversation_id = f"id_{int(time.time())}" - prompt = messages[-1]["content"] - timestamp = int(time.time()) - data = { - "conversationId": conversation_id, - "conversationType": "chat_continuous", - "botId": "chat_continuous", - "globalSettings":{ - "baseUrl": "https://api.openai.com", - "model": model if model else "gpt-3.5-turbo", - "messageHistorySize": 5, - "temperature": 0.7, - "top_p": 1, - **kwargs - }, - "botSettings": {}, - "prompt": prompt, - "messages": messages, - "sign": generate_signature(timestamp, conversation_id, prompt), - "timestamp": timestamp - } - async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response: - response.raise_for_status() - async for chunk in response.iter_content(): - yield chunk.decode() - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" - -def generate_signature(timestamp, id, prompt): - data = f"{timestamp}:{id}:{prompt}:6B46K4pt" - return hashlib.sha256(data.encode()).hexdigest() diff --git a/interference/g4f/Provider/ChatgptAi.py b/interference/g4f/Provider/ChatgptAi.py deleted file mode 100644 index e6416cc..0000000 --- a/interference/g4f/Provider/ChatgptAi.py +++ /dev/null @@ -1,75 +0,0 @@ -from __future__ import annotations - -import re -import html -import json -from aiohttp import ClientSession - -from ..typing import AsyncGenerator -from .base_provider import AsyncGeneratorProvider - - -class ChatgptAi(AsyncGeneratorProvider): - url: str = "https://chatgpt.ai/" - working = True - supports_gpt_35_turbo = True - _system_data = None - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - proxy: str = None, - **kwargs - ) -> AsyncGenerator: - headers = { - "authority" : "chatgpt.ai", - "accept" : "*/*", - "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", - "cache-control" : "no-cache", - "origin" : "https://chatgpt.ai", - "pragma" : "no-cache", - "referer" : cls.url, - "sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', - "sec-ch-ua-mobile" : "?0", - "sec-ch-ua-platform" : '"Windows"', - "sec-fetch-dest" : "empty", - "sec-fetch-mode" : "cors", - "sec-fetch-site" : "same-origin", - "user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", - } - async with ClientSession( - headers=headers - ) as session: - if not cls._system_data: - async with session.get(cls.url, proxy=proxy) as response: - response.raise_for_status() - match = re.findall(r"data-system='([^']+)'", await response.text()) - if not match: - raise RuntimeError("No system data") - cls._system_data = json.loads(html.unescape(match[0])) - - data = { - "botId": cls._system_data["botId"], - "clientId": "", - "contextId": cls._system_data["contextId"], - "id": cls._system_data["id"], - "messages": messages[:-1], - "newMessage": messages[-1]["content"], - "session": cls._system_data["sessionId"], - "stream": True - } - async with session.post( - "https://chatgpt.ai/wp-json/mwai-ui/v1/chats/submit", - proxy=proxy, - json=data - ) as response: - response.raise_for_status() - start = "data: " - async for line in response.content: - line = line.decode('utf-8') - if line.startswith(start): - line = json.loads(line[len(start):-1]) - if line["type"] == "live": - yield line["data"] \ No newline at end of file diff --git a/interference/g4f/Provider/ChatgptDuo.py b/interference/g4f/Provider/ChatgptDuo.py deleted file mode 100644 index abed8a3..0000000 --- a/interference/g4f/Provider/ChatgptDuo.py +++ /dev/null @@ -1,51 +0,0 @@ -from __future__ import annotations - -from curl_cffi.requests import AsyncSession -from .base_provider import AsyncProvider, format_prompt - - -class ChatgptDuo(AsyncProvider): - url = "https://chatgptduo.com" - supports_gpt_35_turbo = True - working = True - - @classmethod - async def create_async( - cls, - model: str, - messages: list[dict[str, str]], - **kwargs - ) -> str: - async with AsyncSession(impersonate="chrome107") as session: - prompt = format_prompt(messages), - data = { - "prompt": prompt, - "search": prompt, - "purpose": "ask", - } - response = await session.post(f"{cls.url}/", data=data) - response.raise_for_status() - data = response.json() - - cls._sources = [{ - "title": source["title"], - "url": source["link"], - "snippet": source["snippet"] - } for source in data["results"]] - - return data["answer"] - - @classmethod - def get_sources(cls): - return cls._sources - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/interference/g4f/Provider/ChatgptLogin.py b/interference/g4f/Provider/ChatgptLogin.py deleted file mode 100644 index 3eb55a6..0000000 --- a/interference/g4f/Provider/ChatgptLogin.py +++ /dev/null @@ -1,74 +0,0 @@ -from __future__ import annotations - -import os, re -from aiohttp import ClientSession - -from .base_provider import AsyncProvider, format_prompt - - -class ChatgptLogin(AsyncProvider): - url = "https://opchatgpts.net" - supports_gpt_35_turbo = True - working = True - _nonce = None - - @classmethod - async def create_async( - cls, - model: str, - messages: list[dict[str, str]], - **kwargs - ) -> str: - headers = { - "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", - "Accept" : "*/*", - "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", - "Origin" : "https://opchatgpts.net", - "Alt-Used" : "opchatgpts.net", - "Referer" : "https://opchatgpts.net/chatgpt-free-use/", - "Sec-Fetch-Dest" : "empty", - "Sec-Fetch-Mode" : "cors", - "Sec-Fetch-Site" : "same-origin", - } - async with ClientSession( - headers=headers - ) as session: - if not cls._nonce: - async with session.get( - "https://opchatgpts.net/chatgpt-free-use/", - params={"id": os.urandom(6).hex()}, - ) as response: - result = re.search(r'data-nonce="(.*?)"', await response.text()) - if not result: - raise RuntimeError("No nonce value") - cls._nonce = result.group(1) - data = { - "_wpnonce": cls._nonce, - "post_id": 28, - "url": "https://opchatgpts.net/chatgpt-free-use", - "action": "wpaicg_chat_shortcode_message", - "message": format_prompt(messages), - "bot_id": 0 - } - async with session.post("https://opchatgpts.net/wp-admin/admin-ajax.php", data=data) as response: - response.raise_for_status() - data = await response.json() - if "data" in data: - return data["data"] - elif "msg" in data: - raise RuntimeError(data["msg"]) - else: - raise RuntimeError(f"Response: {data}") - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/interference/g4f/Provider/CodeLinkAva.py b/interference/g4f/Provider/CodeLinkAva.py deleted file mode 100644 index e3b3eb3..0000000 --- a/interference/g4f/Provider/CodeLinkAva.py +++ /dev/null @@ -1,64 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession -import json - -from ..typing import AsyncGenerator -from .base_provider import AsyncGeneratorProvider - - -class CodeLinkAva(AsyncGeneratorProvider): - url = "https://ava-ai-ef611.web.app" - supports_gpt_35_turbo = True - working = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - **kwargs - ) -> AsyncGenerator: - headers = { - "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", - "Accept" : "*/*", - "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", - "Origin" : cls.url, - "Referer" : cls.url + "/", - "Sec-Fetch-Dest" : "empty", - "Sec-Fetch-Mode" : "cors", - "Sec-Fetch-Site" : "same-origin", - } - async with ClientSession( - headers=headers - ) as session: - data = { - "messages": messages, - "temperature": 0.6, - "stream": True, - **kwargs - } - async with session.post("https://ava-alpha-api.codelink.io/api/chat", json=data) as response: - response.raise_for_status() - async for line in response.content: - line = line.decode() - if line.startswith("data: "): - if line.startswith("data: [DONE]"): - break - line = json.loads(line[6:-1]) - content = line["choices"][0]["delta"].get("content") - if content: - yield content - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/interference/g4f/Provider/DeepAi.py b/interference/g4f/Provider/DeepAi.py deleted file mode 100644 index f08f17b..0000000 --- a/interference/g4f/Provider/DeepAi.py +++ /dev/null @@ -1,64 +0,0 @@ -from __future__ import annotations - -import json -import js2py -from aiohttp import ClientSession - -from ..typing import AsyncGenerator -from .base_provider import AsyncGeneratorProvider - - -class DeepAi(AsyncGeneratorProvider): - url: str = "https://deepai.org" - working = True - supports_gpt_35_turbo = True - - @staticmethod - async def create_async_generator( - model: str, - messages: list[dict[str, str]], - proxy: str = None, - **kwargs - ) -> AsyncGenerator: - - token_js = """ -var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36' -var a, b, c, d, e, h, f, l, g, k, m, n, r, x, C, E, N, F, T, O, P, w, D, G, Q, R, W, I, aa, fa, na, oa, ha, ba, X, ia, ja, ka, J, la, K, L, ca, S, U, M, ma, B, da, V, Y; -h = Math.round(1E11 * Math.random()) + ""; -f = function () { - for (var p = [], q = 0; 64 > q;) p[q] = 0 | 4294967296 * Math.sin(++q % Math.PI); - - return function (t) { - var v, y, H, ea = [v = 1732584193, y = 4023233417, ~v, ~y], - Z = [], - A = unescape(encodeURI(t)) + "\u0080", - z = A.length; - t = --z / 4 + 2 | 15; - for (Z[--t] = 8 * z; ~z;) Z[z >> 2] |= A.charCodeAt(z) << 8 * z--; - for (q = A = 0; q < t; q += 16) { - for (z = ea; 64 > A; z = [H = z[3], v + ((H = z[0] + [v & y | ~v & H, H & v | ~H & y, v ^ y ^ H, y ^ (v | ~H)][z = A >> 4] + p[A] + ~~Z[q | [A, 5 * A + 1, 3 * A + 5, 7 * A][z] & 15]) << (z = [7, 12, 17, 22, 5, 9, 14, 20, 4, 11, 16, 23, 6, 10, 15, 21][4 * z + A++ % 4]) | H >>> -z), v, y]) v = z[1] | 0, y = z[2]; - for (A = 4; A;) ea[--A] += z[A] - } - for (t = ""; 32 > A;) t += (ea[A >> 3] >> 4 * (1 ^ A++) & 15).toString(16); - return t.split("").reverse().join("") - } -}(); - -"tryit-" + h + "-" + f(agent + f(agent + f(agent + h + "x"))); -""" - - payload = {"chat_style": "chat", "chatHistory": json.dumps(messages)} - api_key = js2py.eval_js(token_js) - headers = { - "api-key": api_key, - "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36", - **kwargs.get("headers", {}) - } - async with ClientSession( - headers=headers - ) as session: - async with session.post("https://api.deepai.org/make_me_a_sandwich", proxy=proxy, data=payload) as response: - response.raise_for_status() - async for stream in response.content.iter_any(): - if stream: - yield stream.decode() diff --git a/interference/g4f/Provider/DfeHub.py b/interference/g4f/Provider/DfeHub.py deleted file mode 100644 index d40e038..0000000 --- a/interference/g4f/Provider/DfeHub.py +++ /dev/null @@ -1,77 +0,0 @@ -from __future__ import annotations - -import json -import re -import time - -import requests - -from ..typing import Any, CreateResult -from .base_provider import BaseProvider - - -class DfeHub(BaseProvider): - url = "https://chat.dfehub.com/" - supports_stream = True - supports_gpt_35_turbo = True - - @staticmethod - def create_completion( - model: str, - messages: list[dict[str, str]], - stream: bool, **kwargs: Any) -> CreateResult: - - headers = { - "authority" : "chat.dfehub.com", - "accept" : "*/*", - "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", - "content-type" : "application/json", - "origin" : "https://chat.dfehub.com", - "referer" : "https://chat.dfehub.com/", - "sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', - "sec-ch-ua-mobile" : "?0", - "sec-ch-ua-platform": '"macOS"', - "sec-fetch-dest" : "empty", - "sec-fetch-mode" : "cors", - "sec-fetch-site" : "same-origin", - "user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", - "x-requested-with" : "XMLHttpRequest", - } - - json_data = { - "messages" : messages, - "model" : "gpt-3.5-turbo", - "temperature" : kwargs.get("temperature", 0.5), - "presence_penalty" : kwargs.get("presence_penalty", 0), - "frequency_penalty" : kwargs.get("frequency_penalty", 0), - "top_p" : kwargs.get("top_p", 1), - "stream" : True - } - - response = requests.post("https://chat.dfehub.com/api/openai/v1/chat/completions", - headers=headers, json=json_data, timeout=3) - - for chunk in response.iter_lines(): - if b"detail" in chunk: - delay = re.findall(r"\d+\.\d+", chunk.decode()) - delay = float(delay[-1]) - time.sleep(delay) - yield from DfeHub.create_completion(model, messages, stream, **kwargs) - if b"content" in chunk: - data = json.loads(chunk.decode().split("data: ")[1]) - yield (data["choices"][0]["delta"]["content"]) - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ("presence_penalty", "int"), - ("frequency_penalty", "int"), - ("top_p", "int"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/interference/g4f/Provider/EasyChat.py b/interference/g4f/Provider/EasyChat.py deleted file mode 100644 index dae5196..0000000 --- a/interference/g4f/Provider/EasyChat.py +++ /dev/null @@ -1,111 +0,0 @@ -from __future__ import annotations - -import json -import random - -import requests - -from ..typing import Any, CreateResult -from .base_provider import BaseProvider - - -class EasyChat(BaseProvider): - url: str = "https://free.easychat.work" - supports_stream = True - supports_gpt_35_turbo = True - working = False - - @staticmethod - def create_completion( - model: str, - messages: list[dict[str, str]], - stream: bool, **kwargs: Any) -> CreateResult: - - active_servers = [ - "https://chat10.fastgpt.me", - "https://chat9.fastgpt.me", - "https://chat1.fastgpt.me", - "https://chat2.fastgpt.me", - "https://chat3.fastgpt.me", - "https://chat4.fastgpt.me", - "https://gxos1h1ddt.fastgpt.me" - ] - - server = active_servers[kwargs.get("active_server", random.randint(0, 5))] - headers = { - "authority" : f"{server}".replace("https://", ""), - "accept" : "text/event-stream", - "accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2", - "content-type" : "application/json", - "origin" : f"{server}", - "referer" : f"{server}/", - "x-requested-with" : "XMLHttpRequest", - 'plugins' : '0', - 'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"', - 'sec-ch-ua-mobile' : '?0', - 'sec-ch-ua-platform': '"Windows"', - 'sec-fetch-dest' : 'empty', - 'sec-fetch-mode' : 'cors', - 'sec-fetch-site' : 'same-origin', - 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36', - 'usesearch' : 'false', - 'x-requested-with' : 'XMLHttpRequest' - } - - json_data = { - "messages" : messages, - "stream" : stream, - "model" : model, - "temperature" : kwargs.get("temperature", 0.5), - "presence_penalty" : kwargs.get("presence_penalty", 0), - "frequency_penalty" : kwargs.get("frequency_penalty", 0), - "top_p" : kwargs.get("top_p", 1) - } - - session = requests.Session() - # init cookies from server - session.get(f"{server}/") - - response = session.post(f"{server}/api/openai/v1/chat/completions", - headers=headers, json=json_data, stream=stream) - - if response.status_code == 200: - - if stream == False: - json_data = response.json() - - if "choices" in json_data: - yield json_data["choices"][0]["message"]["content"] - else: - raise Exception("No response from server") - - else: - - for chunk in response.iter_lines(): - - if b"content" in chunk: - splitData = chunk.decode().split("data:") - - if len(splitData) > 1: - yield json.loads(splitData[1])["choices"][0]["delta"]["content"] - else: - continue - else: - raise Exception(f"Error {response.status_code} from server : {response.reason}") - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ("presence_penalty", "int"), - ("frequency_penalty", "int"), - ("top_p", "int"), - ("active_server", "int"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/interference/g4f/Provider/Equing.py b/interference/g4f/Provider/Equing.py deleted file mode 100644 index 261c53c..0000000 --- a/interference/g4f/Provider/Equing.py +++ /dev/null @@ -1,81 +0,0 @@ -from __future__ import annotations - -import json -from abc import ABC, abstractmethod - -import requests - -from ..typing import Any, CreateResult -from .base_provider import BaseProvider - - -class Equing(BaseProvider): - url: str = 'https://next.eqing.tech/' - working = False - supports_stream = True - supports_gpt_35_turbo = True - supports_gpt_4 = False - - @staticmethod - @abstractmethod - def create_completion( - model: str, - messages: list[dict[str, str]], - stream: bool, **kwargs: Any) -> CreateResult: - - headers = { - 'authority' : 'next.eqing.tech', - 'accept' : 'text/event-stream', - 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'cache-control' : 'no-cache', - 'content-type' : 'application/json', - 'origin' : 'https://next.eqing.tech', - 'plugins' : '0', - 'pragma' : 'no-cache', - 'referer' : 'https://next.eqing.tech/', - 'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"', - 'sec-ch-ua-mobile' : '?0', - 'sec-ch-ua-platform': '"macOS"', - 'sec-fetch-dest' : 'empty', - 'sec-fetch-mode' : 'cors', - 'sec-fetch-site' : 'same-origin', - 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36', - 'usesearch' : 'false', - 'x-requested-with' : 'XMLHttpRequest' - } - - json_data = { - 'messages' : messages, - 'stream' : stream, - 'model' : model, - 'temperature' : kwargs.get('temperature', 0.5), - 'presence_penalty' : kwargs.get('presence_penalty', 0), - 'frequency_penalty' : kwargs.get('frequency_penalty', 0), - 'top_p' : kwargs.get('top_p', 1), - } - - response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions', - headers=headers, json=json_data, stream=stream) - - if not stream: - yield response.json()["choices"][0]["message"]["content"] - return - - for line in response.iter_content(chunk_size=1024): - if line: - if b'content' in line: - line_json = json.loads(line.decode('utf-8').split('data: ')[1]) - token = line_json['choices'][0]['delta'].get('content') - if token: - yield token - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/interference/g4f/Provider/FastGpt.py b/interference/g4f/Provider/FastGpt.py deleted file mode 100644 index ef47f75..0000000 --- a/interference/g4f/Provider/FastGpt.py +++ /dev/null @@ -1,86 +0,0 @@ -from __future__ import annotations - -import json -import random -from abc import ABC, abstractmethod - -import requests - -from ..typing import Any, CreateResult - - -class FastGpt(ABC): - url: str = 'https://chat9.fastgpt.me/' - working = False - needs_auth = False - supports_stream = True - supports_gpt_35_turbo = True - supports_gpt_4 = False - - @staticmethod - @abstractmethod - def create_completion( - model: str, - messages: list[dict[str, str]], - stream: bool, **kwargs: Any) -> CreateResult: - - headers = { - 'authority' : 'chat9.fastgpt.me', - 'accept' : 'text/event-stream', - 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'cache-control' : 'no-cache', - 'content-type' : 'application/json', - 'origin' : 'https://chat9.fastgpt.me', - 'plugins' : '0', - 'pragma' : 'no-cache', - 'referer' : 'https://chat9.fastgpt.me/', - 'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"', - 'sec-ch-ua-mobile' : '?0', - 'sec-ch-ua-platform': '"macOS"', - 'sec-fetch-dest' : 'empty', - 'sec-fetch-mode' : 'cors', - 'sec-fetch-site' : 'same-origin', - 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36', - 'usesearch' : 'false', - 'x-requested-with' : 'XMLHttpRequest', - } - - json_data = { - 'messages' : messages, - 'stream' : stream, - 'model' : model, - 'temperature' : kwargs.get('temperature', 0.5), - 'presence_penalty' : kwargs.get('presence_penalty', 0), - 'frequency_penalty' : kwargs.get('frequency_penalty', 0), - 'top_p' : kwargs.get('top_p', 1), - } - - subdomain = random.choice([ - 'jdaen979ew', - 'chat9' - ]) - - response = requests.post(f'https://{subdomain}.fastgpt.me/api/openai/v1/chat/completions', - headers=headers, json=json_data, stream=stream) - - for line in response.iter_lines(): - if line: - try: - if b'content' in line: - line_json = json.loads(line.decode('utf-8').split('data: ')[1]) - token = line_json['choices'][0]['delta'].get('content') - if token: - yield token - except: - continue - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/interference/g4f/Provider/Forefront.py b/interference/g4f/Provider/Forefront.py deleted file mode 100644 index 8f51fb5..0000000 --- a/interference/g4f/Provider/Forefront.py +++ /dev/null @@ -1,40 +0,0 @@ -from __future__ import annotations - -import json - -import requests - -from ..typing import Any, CreateResult -from .base_provider import BaseProvider - - -class Forefront(BaseProvider): - url = "https://forefront.com" - supports_stream = True - supports_gpt_35_turbo = True - - @staticmethod - def create_completion( - model: str, - messages: list[dict[str, str]], - stream: bool, **kwargs: Any) -> CreateResult: - - json_data = { - "text" : messages[-1]["content"], - "action" : "noauth", - "id" : "", - "parentId" : "", - "workspaceId" : "", - "messagePersona": "607e41fe-95be-497e-8e97-010a59b2e2c0", - "model" : "gpt-4", - "messages" : messages[:-1] if len(messages) > 1 else [], - "internetMode" : "auto", - } - - response = requests.post("https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat", - json=json_data, stream=True) - - response.raise_for_status() - for token in response.iter_lines(): - if b"delta" in token: - yield json.loads(token.decode().split("data: ")[1])["delta"] diff --git a/interference/g4f/Provider/FreeGpt.py b/interference/g4f/Provider/FreeGpt.py deleted file mode 100644 index 092e1bb..0000000 --- a/interference/g4f/Provider/FreeGpt.py +++ /dev/null @@ -1,54 +0,0 @@ -from __future__ import annotations - -import time, hashlib, random - -from ..typing import AsyncGenerator -from ..requests import StreamSession -from .base_provider import AsyncGeneratorProvider - -domains = [ - 'https://k.aifree.site', - 'https://p.aifree.site' -] - -class FreeGpt(AsyncGeneratorProvider): - url = "https://freegpts1.aifree.site/" - supports_gpt_35_turbo = True - working = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - **kwargs - ) -> AsyncGenerator: - async with StreamSession(impersonate="chrome107") as session: - prompt = messages[-1]["content"] - timestamp = int(time.time()) - data = { - "messages": messages, - "time": timestamp, - "pass": None, - "sign": generate_signature(timestamp, prompt) - } - url = random.choice(domains) - async with session.post(f"{url}/api/generate", json=data) as response: - response.raise_for_status() - async for chunk in response.iter_content(): - yield chunk.decode() - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" - -def generate_signature(timestamp: int, message: str, secret: str = ""): - data = f"{timestamp}:{message}:{secret}" - return hashlib.sha256(data.encode()).hexdigest() \ No newline at end of file diff --git a/interference/g4f/Provider/GetGpt.py b/interference/g4f/Provider/GetGpt.py deleted file mode 100644 index b96efaa..0000000 --- a/interference/g4f/Provider/GetGpt.py +++ /dev/null @@ -1,88 +0,0 @@ -from __future__ import annotations - -import json -import os -import uuid - -import requests -from Crypto.Cipher import AES - -from ..typing import Any, CreateResult -from .base_provider import BaseProvider - - -class GetGpt(BaseProvider): - url = 'https://chat.getgpt.world/' - supports_stream = True - working = False - supports_gpt_35_turbo = True - - @staticmethod - def create_completion( - model: str, - messages: list[dict[str, str]], - stream: bool, **kwargs: Any) -> CreateResult: - - headers = { - 'Content-Type' : 'application/json', - 'Referer' : 'https://chat.getgpt.world/', - 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', - } - - data = json.dumps( - { - 'messages' : messages, - 'frequency_penalty' : kwargs.get('frequency_penalty', 0), - 'max_tokens' : kwargs.get('max_tokens', 4000), - 'model' : 'gpt-3.5-turbo', - 'presence_penalty' : kwargs.get('presence_penalty', 0), - 'temperature' : kwargs.get('temperature', 1), - 'top_p' : kwargs.get('top_p', 1), - 'stream' : True, - 'uuid' : str(uuid.uuid4()) - } - ) - - res = requests.post('https://chat.getgpt.world/api/chat/stream', - headers=headers, json={'signature': _encrypt(data)}, stream=True) - - res.raise_for_status() - for line in res.iter_lines(): - if b'content' in line: - line_json = json.loads(line.decode('utf-8').split('data: ')[1]) - yield (line_json['choices'][0]['delta']['content']) - - @classmethod - @property - def params(cls): - params = [ - ('model', 'str'), - ('messages', 'list[dict[str, str]]'), - ('stream', 'bool'), - ('temperature', 'float'), - ('presence_penalty', 'int'), - ('frequency_penalty', 'int'), - ('top_p', 'int'), - ('max_tokens', 'int'), - ] - param = ', '.join([': '.join(p) for p in params]) - return f'g4f.provider.{cls.__name__} supports: ({param})' - - -def _encrypt(e: str): - t = os.urandom(8).hex().encode('utf-8') - n = os.urandom(8).hex().encode('utf-8') - r = e.encode('utf-8') - - cipher = AES.new(t, AES.MODE_CBC, n) - ciphertext = cipher.encrypt(_pad_data(r)) - - return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8') - - -def _pad_data(data: bytes) -> bytes: - block_size = AES.block_size - padding_size = block_size - len(data) % block_size - padding = bytes([padding_size] * padding_size) - - return data + padding diff --git a/interference/g4f/Provider/GptGo.py b/interference/g4f/Provider/GptGo.py deleted file mode 100644 index 7db8fb0..0000000 --- a/interference/g4f/Provider/GptGo.py +++ /dev/null @@ -1,78 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession -import json - -from ..typing import AsyncGenerator -from .base_provider import AsyncGeneratorProvider, format_prompt - - -class GptGo(AsyncGeneratorProvider): - url = "https://gptgo.ai" - supports_gpt_35_turbo = True - working = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - proxy: str = None, - **kwargs - ) -> AsyncGenerator: - headers = { - "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", - "Accept" : "*/*", - "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", - "Origin" : cls.url, - "Referer" : cls.url + "/", - "Sec-Fetch-Dest" : "empty", - "Sec-Fetch-Mode" : "cors", - "Sec-Fetch-Site" : "same-origin", - } - async with ClientSession( - headers=headers - ) as session: - async with session.get( - "https://gptgo.ai/action_get_token.php", - params={ - "q": format_prompt(messages), - "hlgpt": "default", - "hl": "en" - }, - proxy=proxy - ) as response: - response.raise_for_status() - token = (await response.json(content_type=None))["token"] - - async with session.get( - "https://gptgo.ai/action_ai_gpt.php", - params={ - "token": token, - }, - proxy=proxy - ) as response: - response.raise_for_status() - start = "data: " - async for line in response.content: - line = line.decode() - if line.startswith("data: "): - if line.startswith("data: [DONE]"): - break - line = json.loads(line[len(start):-1]) - content = line["choices"][0]["delta"].get("content") - if content: - yield content - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/interference/g4f/Provider/H2o.py b/interference/g4f/Provider/H2o.py deleted file mode 100644 index d92bd6d..0000000 --- a/interference/g4f/Provider/H2o.py +++ /dev/null @@ -1,109 +0,0 @@ -from __future__ import annotations - -import json -import uuid - -from aiohttp import ClientSession - -from ..typing import AsyncGenerator -from .base_provider import AsyncGeneratorProvider, format_prompt - - -class H2o(AsyncGeneratorProvider): - url = "https://gpt-gm.h2o.ai" - working = True - model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1" - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - proxy: str = None, - **kwargs - ) -> AsyncGenerator: - model = model if model else cls.model - headers = {"Referer": cls.url + "/"} - - async with ClientSession( - headers=headers - ) as session: - data = { - "ethicsModalAccepted": "true", - "shareConversationsWithModelAuthors": "true", - "ethicsModalAcceptedAt": "", - "activeModel": model, - "searchEnabled": "true", - } - async with session.post( - f"{cls.url}/settings", - proxy=proxy, - data=data - ) as response: - response.raise_for_status() - - async with session.post( - f"{cls.url}/conversation", - proxy=proxy, - json={"model": model}, - ) as response: - response.raise_for_status() - conversationId = (await response.json())["conversationId"] - - data = { - "inputs": format_prompt(messages), - "parameters": { - "temperature": 0.4, - "truncate": 2048, - "max_new_tokens": 1024, - "do_sample": True, - "repetition_penalty": 1.2, - "return_full_text": False, - **kwargs - }, - "stream": True, - "options": { - "id": str(uuid.uuid4()), - "response_id": str(uuid.uuid4()), - "is_retry": False, - "use_cache": False, - "web_search_id": "", - }, - } - async with session.post( - f"{cls.url}/conversation/{conversationId}", - proxy=proxy, - json=data - ) as response: - start = "data:" - async for line in response.content: - line = line.decode("utf-8") - if line and line.startswith(start): - line = json.loads(line[len(start):-1]) - if not line["token"]["special"]: - yield line["token"]["text"] - - async with session.delete( - f"{cls.url}/conversation/{conversationId}", - proxy=proxy, - json=data - ) as response: - response.raise_for_status() - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ("truncate", "int"), - ("max_new_tokens", "int"), - ("do_sample", "bool"), - ("repetition_penalty", "float"), - ("return_full_text", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/interference/g4f/Provider/HuggingChat.py b/interference/g4f/Provider/HuggingChat.py deleted file mode 100644 index f1cec77..0000000 --- a/interference/g4f/Provider/HuggingChat.py +++ /dev/null @@ -1,74 +0,0 @@ -from __future__ import annotations - -import json, uuid - -from aiohttp import ClientSession - -from ..typing import AsyncGenerator -from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies - - -class HuggingChat(AsyncGeneratorProvider): - url = "https://huggingface.co/chat" - needs_auth = True - working = True - model = "meta-llama/Llama-2-70b-chat-hf" - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - stream: bool = True, - proxy: str = None, - cookies: dict = None, - **kwargs - ) -> AsyncGenerator: - model = model if model else cls.model - if proxy and "://" not in proxy: - proxy = f"http://{proxy}" - if not cookies: - cookies = get_cookies(".huggingface.co") - - headers = { - 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36', - } - async with ClientSession( - cookies=cookies, - headers=headers - ) as session: - async with session.post(f"{cls.url}/conversation", json={"model": model}, proxy=proxy) as response: - conversation_id = (await response.json())["conversationId"] - - send = { - "id": str(uuid.uuid4()), - "inputs": format_prompt(messages), - "is_retry": False, - "response_id": str(uuid.uuid4()), - "web_search": False - } - async with session.post(f"{cls.url}/conversation/{conversation_id}", json=send, proxy=proxy) as response: - async for line in response.content: - line = json.loads(line[:-1]) - if "type" not in line: - raise RuntimeError(f"Response: {line}") - elif line["type"] == "stream": - yield line["token"] - elif line["type"] == "finalAnswer": - break - - async with session.delete(f"{cls.url}/conversation/{conversation_id}", proxy=proxy) as response: - response.raise_for_status() - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("proxy", "str"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/interference/g4f/Provider/Liaobots.py b/interference/g4f/Provider/Liaobots.py deleted file mode 100644 index ea3e0d4..0000000 --- a/interference/g4f/Provider/Liaobots.py +++ /dev/null @@ -1,89 +0,0 @@ -from __future__ import annotations - -import json -import uuid - -from aiohttp import ClientSession - -from ..typing import AsyncGenerator -from .base_provider import AsyncGeneratorProvider - -models = { - "gpt-4": { - "id": "gpt-4", - "name": "GPT-4", - "maxLength": 24000, - "tokenLimit": 8000, - }, - "gpt-3.5-turbo": { - "id": "gpt-3.5-turbo", - "name": "GPT-3.5", - "maxLength": 12000, - "tokenLimit": 4000, - }, - "gpt-3.5-turbo-16k": { - "id": "gpt-3.5-turbo-16k", - "name": "GPT-3.5-16k", - "maxLength": 48000, - "tokenLimit": 16000, - }, -} - -class Liaobots(AsyncGeneratorProvider): - url = "https://liaobots.com" - working = False - supports_gpt_35_turbo = True - supports_gpt_4 = True - _auth_code = None - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - auth: str = None, - proxy: str = None, - **kwargs - ) -> AsyncGenerator: - model = model if model in models else "gpt-3.5-turbo" - headers = { - "authority": "liaobots.com", - "content-type": "application/json", - "origin": cls.url, - "referer": cls.url + "/", - "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36", - } - async with ClientSession( - headers=headers - ) as session: - auth_code = auth if isinstance(auth, str) else cls._auth_code - if not auth_code: - async with session.post(cls.url + "/api/user", proxy=proxy, json={"authcode": ""}) as response: - response.raise_for_status() - auth_code = cls._auth_code = json.loads(await response.text())["authCode"] - data = { - "conversationId": str(uuid.uuid4()), - "model": models[model], - "messages": messages, - "key": "", - "prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.", - } - async with session.post(cls.url + "/api/chat", proxy=proxy, json=data, headers={"x-auth-code": auth_code}) as response: - response.raise_for_status() - async for stream in response.content.iter_any(): - if stream: - yield stream.decode() - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("proxy", "str"), - ("auth", "str"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/interference/g4f/Provider/Lockchat.py b/interference/g4f/Provider/Lockchat.py deleted file mode 100644 index c15eec8..0000000 --- a/interference/g4f/Provider/Lockchat.py +++ /dev/null @@ -1,64 +0,0 @@ -from __future__ import annotations - -import json - -import requests - -from ..typing import Any, CreateResult -from .base_provider import BaseProvider - - -class Lockchat(BaseProvider): - url: str = "http://supertest.lockchat.app" - supports_stream = True - supports_gpt_35_turbo = True - supports_gpt_4 = True - - @staticmethod - def create_completion( - model: str, - messages: list[dict[str, str]], - stream: bool, **kwargs: Any) -> CreateResult: - - temperature = float(kwargs.get("temperature", 0.7)) - payload = { - "temperature": temperature, - "messages" : messages, - "model" : model, - "stream" : True, - } - - headers = { - "user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0", - } - response = requests.post("http://supertest.lockchat.app/v1/chat/completions", - json=payload, headers=headers, stream=True) - - response.raise_for_status() - for token in response.iter_lines(): - if b"The model: `gpt-4` does not exist" in token: - print("error, retrying...") - Lockchat.create_completion( - model = model, - messages = messages, - stream = stream, - temperature = temperature, - **kwargs) - - if b"content" in token: - token = json.loads(token.decode("utf-8").split("data: ")[1]) - token = token["choices"][0]["delta"].get("content") - if token: - yield (token) - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/interference/g4f/Provider/Myshell.py b/interference/g4f/Provider/Myshell.py deleted file mode 100644 index 0ddd302..0000000 --- a/interference/g4f/Provider/Myshell.py +++ /dev/null @@ -1,172 +0,0 @@ -from __future__ import annotations - -import json, uuid, hashlib, time, random - -from aiohttp import ClientSession -from aiohttp.http import WSMsgType -import asyncio - -from ..typing import AsyncGenerator -from .base_provider import AsyncGeneratorProvider, format_prompt - - -models = { - "samantha": "1e3be7fe89e94a809408b1154a2ee3e1", - "gpt-3.5-turbo": "8077335db7cd47e29f7de486612cc7fd", - "gpt-4": "01c8de4fbfc548df903712b0922a4e01", -} - - -class Myshell(AsyncGeneratorProvider): - url = "https://app.myshell.ai/chat" - working = True - supports_gpt_35_turbo = True - supports_gpt_4 = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - **kwargs - ) -> AsyncGenerator: - if not model: - bot_id = models["samantha"] - elif model in models: - bot_id = models[model] - else: - raise ValueError(f"Model are not supported: {model}") - - user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36' - visitor_id = generate_visitor_id(user_agent) - - async with ClientSession( - headers={'User-Agent': user_agent} - ) as session: - async with session.ws_connect( - "wss://api.myshell.ai/ws/?EIO=4&transport=websocket", - autoping=False, - timeout=90 - ) as wss: - # Send and receive hello message - await wss.receive_str() - message = json.dumps({"token": None, "visitorId": visitor_id}) - await wss.send_str(f"40/chat,{message}") - await wss.receive_str() - - # Fix "need_verify_captcha" issue - await asyncio.sleep(5) - - # Create chat message - text = format_prompt(messages) - chat_data = json.dumps(["text_chat",{ - "reqId": str(uuid.uuid4()), - "botUid": bot_id, - "sourceFrom": "myshellWebsite", - "text": text, - **generate_signature(text) - }]) - - # Send chat message - chat_start = "42/chat," - chat_message = f"{chat_start}{chat_data}" - await wss.send_str(chat_message) - - # Receive messages - async for message in wss: - if message.type != WSMsgType.TEXT: - continue - # Ping back - if message.data == "2": - await wss.send_str("3") - continue - # Is not chat message - if not message.data.startswith(chat_start): - continue - data_type, data = json.loads(message.data[len(chat_start):]) - if data_type == "text_stream": - if data["data"]["text"]: - yield data["data"]["text"] - elif data["data"]["isFinal"]: - break - elif data_type in ("message_replied", "need_verify_captcha"): - raise RuntimeError(f"Received unexpected message: {data_type}") - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" - - -def generate_timestamp() -> str: - return str( - int( - str(int(time.time() * 1000))[:-1] - + str( - sum( - 2 * int(digit) - if idx % 2 == 0 - else 3 * int(digit) - for idx, digit in enumerate(str(int(time.time() * 1000))[:-1]) - ) - % 10 - ) - ) - ) - -def generate_signature(text: str): - timestamp = generate_timestamp() - version = 'v1.0.0' - secret = '8@VXGK3kKHr!u2gA' - data = f"{version}#{text}#{timestamp}#{secret}" - signature = hashlib.md5(data.encode()).hexdigest() - signature = signature[::-1] - return { - "signature": signature, - "timestamp": timestamp, - "version": version - } - -def xor_hash(B: str): - r = [] - i = 0 - - def o(e, t): - o_val = 0 - for i in range(len(t)): - o_val |= r[i] << (8 * i) - return e ^ o_val - - for e in range(len(B)): - t = ord(B[e]) - r.insert(0, 255 & t) - - if len(r) >= 4: - i = o(i, r) - r = [] - - if len(r) > 0: - i = o(i, r) - - return hex(i)[2:] - -def performance() -> str: - t = int(time.time() * 1000) - e = 0 - while t == int(time.time() * 1000): - e += 1 - return hex(t)[2:] + hex(e)[2:] - -def generate_visitor_id(user_agent: str) -> str: - f = performance() - r = hex(int(random.random() * (16**16)))[2:-2] - d = xor_hash(user_agent) - e = hex(1080 * 1920)[2:] - return f"{f}-{r}-{d}-{e}-{f}" \ No newline at end of file diff --git a/interference/g4f/Provider/Opchatgpts.py b/interference/g4f/Provider/Opchatgpts.py deleted file mode 100644 index 166323b..0000000 --- a/interference/g4f/Provider/Opchatgpts.py +++ /dev/null @@ -1,8 +0,0 @@ -from __future__ import annotations - -from .ChatgptLogin import ChatgptLogin - - -class Opchatgpts(ChatgptLogin): - url = "https://opchatgpts.net" - working = True \ No newline at end of file diff --git a/interference/g4f/Provider/OpenAssistant.py b/interference/g4f/Provider/OpenAssistant.py deleted file mode 100644 index 1e9a066..0000000 --- a/interference/g4f/Provider/OpenAssistant.py +++ /dev/null @@ -1,100 +0,0 @@ -from __future__ import annotations - -import json - -from aiohttp import ClientSession - -from ..typing import Any, AsyncGenerator -from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies - - -class OpenAssistant(AsyncGeneratorProvider): - url = "https://open-assistant.io/chat" - needs_auth = True - working = True - model = "OA_SFT_Llama_30B_6" - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - proxy: str = None, - cookies: dict = None, - **kwargs: Any - ) -> AsyncGenerator: - if not cookies: - cookies = get_cookies("open-assistant.io") - - headers = { - 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36', - } - async with ClientSession( - cookies=cookies, - headers=headers - ) as session: - async with session.post("https://open-assistant.io/api/chat", proxy=proxy) as response: - chat_id = (await response.json())["id"] - - data = { - "chat_id": chat_id, - "content": f"[INST]\n{format_prompt(messages)}\n[/INST]", - "parent_id": None - } - async with session.post("https://open-assistant.io/api/chat/prompter_message", proxy=proxy, json=data) as response: - parent_id = (await response.json())["id"] - - data = { - "chat_id": chat_id, - "parent_id": parent_id, - "model_config_name": model if model else cls.model, - "sampling_parameters":{ - "top_k": 50, - "top_p": None, - "typical_p": None, - "temperature": 0.35, - "repetition_penalty": 1.1111111111111112, - "max_new_tokens": 1024, - **kwargs - }, - "plugins":[] - } - async with session.post("https://open-assistant.io/api/chat/assistant_message", proxy=proxy, json=data) as response: - data = await response.json() - if "id" in data: - message_id = data["id"] - elif "message" in data: - raise RuntimeError(data["message"]) - else: - response.raise_for_status() - - params = { - 'chat_id': chat_id, - 'message_id': message_id, - } - async with session.post("https://open-assistant.io/api/chat/events", proxy=proxy, params=params) as response: - start = "data: " - async for line in response.content: - line = line.decode("utf-8") - if line and line.startswith(start): - line = json.loads(line[len(start):]) - if line["event_type"] == "token": - yield line["text"] - - params = { - 'chat_id': chat_id, - } - async with session.delete("https://open-assistant.io/api/chat", proxy=proxy, params=params) as response: - response.raise_for_status() - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("proxy", "str"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/interference/g4f/Provider/OpenaiChat.py b/interference/g4f/Provider/OpenaiChat.py deleted file mode 100644 index f7dc829..0000000 --- a/interference/g4f/Provider/OpenaiChat.py +++ /dev/null @@ -1,88 +0,0 @@ -from __future__ import annotations - -from curl_cffi.requests import AsyncSession -import uuid -import json - -from .base_provider import AsyncProvider, get_cookies, format_prompt -from ..typing import AsyncGenerator - - -class OpenaiChat(AsyncProvider): - url = "https://chat.openai.com" - needs_auth = True - working = True - supports_gpt_35_turbo = True - _access_token = None - - @classmethod - async def create_async( - cls, - model: str, - messages: list[dict[str, str]], - proxy: str = None, - access_token: str = None, - cookies: dict = None, - **kwargs: dict - ) -> AsyncGenerator: - proxies = {"https": proxy} - if not access_token: - access_token = await cls.get_access_token(cookies, proxies) - headers = { - "Accept": "text/event-stream", - "Authorization": f"Bearer {access_token}", - } - async with AsyncSession(proxies=proxies, headers=headers, impersonate="chrome107") as session: - messages = [ - { - "id": str(uuid.uuid4()), - "author": {"role": "user"}, - "content": {"content_type": "text", "parts": [format_prompt(messages)]}, - }, - ] - data = { - "action": "next", - "messages": messages, - "conversation_id": None, - "parent_message_id": str(uuid.uuid4()), - "model": "text-davinci-002-render-sha", - "history_and_training_disabled": True, - } - response = await session.post("https://chat.openai.com/backend-api/conversation", json=data) - response.raise_for_status() - last_message = None - for line in response.content.decode().splitlines(): - if line.startswith("data: "): - line = line[6:] - if line == "[DONE]": - break - line = json.loads(line) - if "message" in line: - last_message = line["message"]["content"]["parts"][0] - return last_message - - - @classmethod - async def get_access_token(cls, cookies: dict = None, proxies: dict = None) -> str: - if not cls._access_token: - cookies = cookies if cookies else get_cookies("chat.openai.com") - async with AsyncSession(proxies=proxies, cookies=cookies, impersonate="chrome107") as session: - response = await session.get("https://chat.openai.com/api/auth/session") - response.raise_for_status() - cls._access_token = response.json()["accessToken"] - return cls._access_token - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("proxy", "str"), - ("access_token", "str"), - ("cookies", "dict[str, str]") - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/interference/g4f/Provider/PerplexityAi.py b/interference/g4f/Provider/PerplexityAi.py deleted file mode 100644 index c7e58ad..0000000 --- a/interference/g4f/Provider/PerplexityAi.py +++ /dev/null @@ -1,101 +0,0 @@ -from __future__ import annotations - -import json -import time -import base64 -from curl_cffi.requests import AsyncSession - -from .base_provider import AsyncProvider, format_prompt, get_cookies - - -class PerplexityAi(AsyncProvider): - url = "https://www.perplexity.ai" - working = False - supports_gpt_35_turbo = True - _sources = [] - - @classmethod - async def create_async( - cls, - model: str, - messages: list[dict[str, str]], - proxy: str = None, - **kwargs - ) -> str: - url = cls.url + "/socket.io/?EIO=4&transport=polling" - headers = { - "Referer": f"{cls.url}/" - } - async with AsyncSession(headers=headers, proxies={"https": proxy}, impersonate="chrome107") as session: - url_session = "https://www.perplexity.ai/api/auth/session" - response = await session.get(url_session) - response.raise_for_status() - - url_session = "https://www.perplexity.ai/api/auth/session" - response = await session.get(url_session) - response.raise_for_status() - - response = await session.get(url, params={"t": timestamp()}) - response.raise_for_status() - sid = json.loads(response.text[1:])["sid"] - - response = await session.get(url, params={"t": timestamp(), "sid": sid}) - response.raise_for_status() - - data = '40{"jwt":"anonymous-ask-user"}' - response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data) - response.raise_for_status() - - response = await session.get(url, params={"t": timestamp(), "sid": sid}) - response.raise_for_status() - - data = "424" + json.dumps([ - "perplexity_ask", - format_prompt(messages), - { - "version":"2.1", - "source":"default", - "language":"en", - "timezone": time.tzname[0], - "search_focus":"internet", - "mode":"concise" - } - ]) - response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data) - response.raise_for_status() - - while True: - response = await session.get(url, params={"t": timestamp(), "sid": sid}) - response.raise_for_status() - for line in response.text.splitlines(): - if line.startswith("434"): - result = json.loads(json.loads(line[3:])[0]["text"]) - - cls._sources = [{ - "title": source["name"], - "url": source["url"], - "snippet": source["snippet"] - } for source in result["web_results"]] - - return result["answer"] - - @classmethod - def get_sources(cls): - return cls._sources - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("proxy", "str"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" - - -def timestamp() -> str: - return base64.urlsafe_b64encode(int(time.time()-1407782612).to_bytes(4, 'big')).decode() \ No newline at end of file diff --git a/interference/g4f/Provider/Phind.py b/interference/g4f/Provider/Phind.py deleted file mode 100644 index 0db4e3c..0000000 --- a/interference/g4f/Provider/Phind.py +++ /dev/null @@ -1,76 +0,0 @@ -from __future__ import annotations - -import random -from datetime import datetime - -from ..typing import AsyncGenerator -from ..requests import StreamSession -from .base_provider import AsyncGeneratorProvider, format_prompt - - -class Phind(AsyncGeneratorProvider): - url = "https://www.phind.com" - working = True - supports_gpt_4 = True - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - proxy: str = None, - **kwargs - ) -> AsyncGenerator: - chars = 'abcdefghijklmnopqrstuvwxyz0123456789' - user_id = ''.join(random.choice(chars) for _ in range(24)) - data = { - "question": format_prompt(messages), - "webResults": [], - "options": { - "date": datetime.now().strftime("%d.%m.%Y"), - "language": "en", - "detailed": True, - "anonUserId": user_id, - "answerModel": "GPT-4", - "creativeMode": False, - "customLinks": [] - }, - "context":"" - } - headers = { - "Authority": cls.url, - "Accept": "application/json, text/plain, */*", - "Origin": cls.url, - "Referer": f"{cls.url}/" - } - async with StreamSession(headers=headers, timeout=(5, 180), proxies={"https": proxy}, impersonate="chrome107") as session: - async with session.post(f"{cls.url}/api/infer/answer", json=data) as response: - response.raise_for_status() - new_lines = 0 - async for line in response.iter_lines(): - if not line: - continue - if line.startswith(b"data: "): - line = line[6:] - if line.startswith(b""): - continue - if line: - if new_lines: - yield "".join(["\n" for _ in range(int(new_lines / 2))]) - new_lines = 0 - yield line.decode() - else: - new_lines += 1 - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("proxy", "str"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/interference/g4f/Provider/Raycast.py b/interference/g4f/Provider/Raycast.py deleted file mode 100644 index 7ddc8ac..0000000 --- a/interference/g4f/Provider/Raycast.py +++ /dev/null @@ -1,72 +0,0 @@ -from __future__ import annotations - -import json - -import requests - -from ..typing import Any, CreateResult -from .base_provider import BaseProvider - - -class Raycast(BaseProvider): - url = "https://raycast.com" - supports_gpt_35_turbo = True - supports_gpt_4 = True - supports_stream = True - needs_auth = True - working = True - - @staticmethod - def create_completion( - model: str, - messages: list[dict[str, str]], - stream: bool, - **kwargs: Any, - ) -> CreateResult: - auth = kwargs.get('auth') - headers = { - 'Accept': 'application/json', - 'Accept-Language': 'en-US,en;q=0.9', - 'Authorization': f'Bearer {auth}', - 'Content-Type': 'application/json', - 'User-Agent': 'Raycast/0 CFNetwork/1410.0.3 Darwin/22.6.0', - } - parsed_messages = [] - for message in messages: - parsed_messages.append({ - 'author': message['role'], - 'content': {'text': message['content']} - }) - data = { - "debug": False, - "locale": "en-CN", - "messages": parsed_messages, - "model": model, - "provider": "openai", - "source": "ai_chat", - "system_instruction": "markdown", - "temperature": 0.5 - } - response = requests.post("https://backend.raycast.com/api/v1/ai/chat_completions", headers=headers, json=data, stream=True) - for token in response.iter_lines(): - if b'data: ' not in token: - continue - completion_chunk = json.loads(token.decode().replace('data: ', '')) - token = completion_chunk['text'] - if token != None: - yield token - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ("top_p", "int"), - ("model", "str"), - ("auth", "str"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" diff --git a/interference/g4f/Provider/Theb.py b/interference/g4f/Provider/Theb.py deleted file mode 100644 index 72fce3a..0000000 --- a/interference/g4f/Provider/Theb.py +++ /dev/null @@ -1,97 +0,0 @@ -from __future__ import annotations - -import json -import random - -import requests - -from ..typing import Any, CreateResult -from .base_provider import BaseProvider - - -class Theb(BaseProvider): - url = "https://theb.ai" - working = True - supports_stream = True - supports_gpt_35_turbo = True - needs_auth = True - - @staticmethod - def create_completion( - model: str, - messages: list[dict[str, str]], - stream: bool, **kwargs: Any) -> CreateResult: - - conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages) - conversation += "\nassistant: " - - auth = kwargs.get("auth", { - "bearer_token":"free", - "org_id":"theb", - }) - - bearer_token = auth["bearer_token"] - org_id = auth["org_id"] - - headers = { - 'authority' : 'beta.theb.ai', - 'accept' : 'text/event-stream', - 'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', - 'authorization' : 'Bearer '+bearer_token, - 'content-type' : 'application/json', - 'origin' : 'https://beta.theb.ai', - 'referer' : 'https://beta.theb.ai/home', - 'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"', - 'sec-ch-ua-mobile' : '?0', - 'sec-ch-ua-platform': '"Windows"', - 'sec-fetch-dest' : 'empty', - 'sec-fetch-mode' : 'cors', - 'sec-fetch-site' : 'same-origin', - 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36', - 'x-ai-model' : 'ee8d4f29cb7047f78cbe84313ed6ace8', - } - - req_rand = random.randint(100000000, 9999999999) - - json_data: dict[str, Any] = { - "text" : conversation, - "category" : "04f58f64a4aa4191a957b47290fee864", - "model" : "ee8d4f29cb7047f78cbe84313ed6ace8", - "model_params": { - "system_prompt" : "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture.\nKnowledge cutoff: 2021-09\nCurrent date: {{YYYY-MM-DD}}", - "temperature" : kwargs.get("temperature", 1), - "top_p" : kwargs.get("top_p", 1), - "frequency_penalty" : kwargs.get("frequency_penalty", 0), - "presence_penalty" : kwargs.get("presence_penalty", 0), - "long_term_memory" : "auto" - } - } - - response = requests.post(f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}", - headers=headers, json=json_data, stream=True) - - response.raise_for_status() - content = "" - next_content = "" - for chunk in response.iter_lines(): - if b"content" in chunk: - next_content = content - data = json.loads(chunk.decode().split("data: ")[1]) - content = data["content"] - yield data["content"].replace(next_content, "") - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("auth", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ("presence_penalty", "int"), - ("frequency_penalty", "int"), - ("top_p", "int") - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/interference/g4f/Provider/V50.py b/interference/g4f/Provider/V50.py deleted file mode 100644 index 81a95ba..0000000 --- a/interference/g4f/Provider/V50.py +++ /dev/null @@ -1,67 +0,0 @@ -from __future__ import annotations - -import uuid - -import requests - -from ..typing import Any, CreateResult -from .base_provider import BaseProvider - - -class V50(BaseProvider): - url = 'https://p5.v50.ltd' - supports_gpt_35_turbo = True - supports_stream = False - needs_auth = False - working = False - - @staticmethod - def create_completion( - model: str, - messages: list[dict[str, str]], - stream: bool, **kwargs: Any) -> CreateResult: - - conversation = "\n".join(f"{message['role']}: {message['content']}" for message in messages) - conversation += "\nassistant: " - - payload = { - "prompt" : conversation, - "options" : {}, - "systemMessage" : ".", - "temperature" : kwargs.get("temperature", 0.4), - "top_p" : kwargs.get("top_p", 0.4), - "model" : model, - "user" : str(uuid.uuid4()) - } - - headers = { - 'authority' : 'p5.v50.ltd', - 'accept' : 'application/json, text/plain, */*', - 'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', - 'content-type' : 'application/json', - 'origin' : 'https://p5.v50.ltd', - 'referer' : 'https://p5.v50.ltd/', - 'sec-ch-ua-platform': '"Windows"', - 'sec-fetch-dest' : 'empty', - 'sec-fetch-mode' : 'cors', - 'sec-fetch-site' : 'same-origin', - 'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36' - } - response = requests.post("https://p5.v50.ltd/api/chat-process", - json=payload, headers=headers, proxies=kwargs['proxy'] if 'proxy' in kwargs else {}) - - if "https://fk1.v50.ltd" not in response.text: - yield response.text - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ("top_p", "int"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/interference/g4f/Provider/Vercel.py b/interference/g4f/Provider/Vercel.py deleted file mode 100644 index 2d20ca6..0000000 --- a/interference/g4f/Provider/Vercel.py +++ /dev/null @@ -1,377 +0,0 @@ -from __future__ import annotations - -import json, base64, requests, execjs, random, uuid - -from ..typing import Any, TypedDict, CreateResult -from .base_provider import BaseProvider -from abc import abstractmethod - - -class Vercel(BaseProvider): - url = 'https://sdk.vercel.ai' - working = True - supports_gpt_35_turbo = True - supports_stream = True - - @staticmethod - @abstractmethod - def create_completion( - model: str, - messages: list[dict[str, str]], - stream: bool, - **kwargs - ) -> CreateResult: - if not model: - model = "gpt-3.5-turbo" - elif model not in model_info: - raise ValueError(f"Model are not supported: {model}") - - headers = { - 'authority' : 'sdk.vercel.ai', - 'accept' : '*/*', - 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'cache-control' : 'no-cache', - 'content-type' : 'application/json', - 'custom-encoding' : get_anti_bot_token(), - 'origin' : 'https://sdk.vercel.ai', - 'pragma' : 'no-cache', - 'referer' : 'https://sdk.vercel.ai/', - 'sec-ch-ua' : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"', - 'sec-ch-ua-mobile' : '?0', - 'sec-ch-ua-platform': '"macOS"', - 'sec-fetch-dest' : 'empty', - 'sec-fetch-mode' : 'cors', - 'sec-fetch-site' : 'same-origin', - 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.%s.%s Safari/537.36' % ( - random.randint(99, 999), - random.randint(99, 999) - ) - } - - json_data = { - 'model' : model_info[model]['id'], - 'messages' : messages, - 'playgroundId': str(uuid.uuid4()), - 'chatIndex' : 0} | model_info[model]['default_params'] - - max_retries = kwargs.get('max_retries', 20) - for i in range(max_retries): - response = requests.post('https://sdk.vercel.ai/api/generate', - headers=headers, json=json_data, stream=True) - try: - response.raise_for_status() - except: - continue - for token in response.iter_content(chunk_size=None): - yield token.decode() - break - - -def get_anti_bot_token() -> str: - headers = { - 'authority' : 'sdk.vercel.ai', - 'accept' : '*/*', - 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'cache-control' : 'no-cache', - 'pragma' : 'no-cache', - 'referer' : 'https://sdk.vercel.ai/', - 'sec-ch-ua' : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"', - 'sec-ch-ua-mobile' : '?0', - 'sec-ch-ua-platform': '"macOS"', - 'sec-fetch-dest' : 'empty', - 'sec-fetch-mode' : 'cors', - 'sec-fetch-site' : 'same-origin', - 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.%s.%s Safari/537.36' % ( - random.randint(99, 999), - random.randint(99, 999) - ) - } - - response = requests.get('https://sdk.vercel.ai/openai.jpeg', - headers=headers).text - - raw_data = json.loads(base64.b64decode(response, - validate=True)) - - js_script = '''const globalThis={marker:"mark"};String.prototype.fontcolor=function(){return `${this}`}; - return (%s)(%s)''' % (raw_data['c'], raw_data['a']) - - raw_token = json.dumps({'r': execjs.compile(js_script).call(''), 't': raw_data['t']}, - separators = (",", ":")) - - return base64.b64encode(raw_token.encode('utf-16le')).decode() - -class ModelInfo(TypedDict): - id: str - default_params: dict[str, Any] - -model_info: dict[str, ModelInfo] = { - 'claude-instant-v1': { - 'id': 'anthropic:claude-instant-v1', - 'default_params': { - 'temperature': 1, - 'maximumLength': 1024, - 'topP': 1, - 'topK': 1, - 'presencePenalty': 1, - 'frequencyPenalty': 1, - 'stopSequences': ['\n\nHuman:'], - }, - }, - 'claude-v1': { - 'id': 'anthropic:claude-v1', - 'default_params': { - 'temperature': 1, - 'maximumLength': 1024, - 'topP': 1, - 'topK': 1, - 'presencePenalty': 1, - 'frequencyPenalty': 1, - 'stopSequences': ['\n\nHuman:'], - }, - }, - 'claude-v2': { - 'id': 'anthropic:claude-v2', - 'default_params': { - 'temperature': 1, - 'maximumLength': 1024, - 'topP': 1, - 'topK': 1, - 'presencePenalty': 1, - 'frequencyPenalty': 1, - 'stopSequences': ['\n\nHuman:'], - }, - }, - 'a16z-infra/llama7b-v2-chat': { - 'id': 'replicate:a16z-infra/llama7b-v2-chat', - 'default_params': { - 'temperature': 0.75, - 'maximumLength': 3000, - 'topP': 1, - 'repetitionPenalty': 1, - }, - }, - 'a16z-infra/llama13b-v2-chat': { - 'id': 'replicate:a16z-infra/llama13b-v2-chat', - 'default_params': { - 'temperature': 0.75, - 'maximumLength': 3000, - 'topP': 1, - 'repetitionPenalty': 1, - }, - }, - 'replicate/llama-2-70b-chat': { - 'id': 'replicate:replicate/llama-2-70b-chat', - 'default_params': { - 'temperature': 0.75, - 'maximumLength': 3000, - 'topP': 1, - 'repetitionPenalty': 1, - }, - }, - 'bigscience/bloom': { - 'id': 'huggingface:bigscience/bloom', - 'default_params': { - 'temperature': 0.5, - 'maximumLength': 1024, - 'topP': 0.95, - 'topK': 4, - 'repetitionPenalty': 1.03, - }, - }, - 'google/flan-t5-xxl': { - 'id': 'huggingface:google/flan-t5-xxl', - 'default_params': { - 'temperature': 0.5, - 'maximumLength': 1024, - 'topP': 0.95, - 'topK': 4, - 'repetitionPenalty': 1.03, - }, - }, - 'EleutherAI/gpt-neox-20b': { - 'id': 'huggingface:EleutherAI/gpt-neox-20b', - 'default_params': { - 'temperature': 0.5, - 'maximumLength': 1024, - 'topP': 0.95, - 'topK': 4, - 'repetitionPenalty': 1.03, - 'stopSequences': [], - }, - }, - 'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5': { - 'id': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', - 'default_params': { - 'maximumLength': 1024, - 'typicalP': 0.2, - 'repetitionPenalty': 1, - }, - }, - 'OpenAssistant/oasst-sft-1-pythia-12b': { - 'id': 'huggingface:OpenAssistant/oasst-sft-1-pythia-12b', - 'default_params': { - 'maximumLength': 1024, - 'typicalP': 0.2, - 'repetitionPenalty': 1, - }, - }, - 'bigcode/santacoder': { - 'id': 'huggingface:bigcode/santacoder', - 'default_params': { - 'temperature': 0.5, - 'maximumLength': 1024, - 'topP': 0.95, - 'topK': 4, - 'repetitionPenalty': 1.03, - }, - }, - 'command-light-nightly': { - 'id': 'cohere:command-light-nightly', - 'default_params': { - 'temperature': 0.9, - 'maximumLength': 1024, - 'topP': 1, - 'topK': 0, - 'presencePenalty': 0, - 'frequencyPenalty': 0, - 'stopSequences': [], - }, - }, - 'command-nightly': { - 'id': 'cohere:command-nightly', - 'default_params': { - 'temperature': 0.9, - 'maximumLength': 1024, - 'topP': 1, - 'topK': 0, - 'presencePenalty': 0, - 'frequencyPenalty': 0, - 'stopSequences': [], - }, - }, - 'gpt-4': { - 'id': 'openai:gpt-4', - 'default_params': { - 'temperature': 0.7, - 'maximumLength': 8192, - 'topP': 1, - 'presencePenalty': 0, - 'frequencyPenalty': 0, - 'stopSequences': [], - }, - }, - 'gpt-4-0613': { - 'id': 'openai:gpt-4-0613', - 'default_params': { - 'temperature': 0.7, - 'maximumLength': 8192, - 'topP': 1, - 'presencePenalty': 0, - 'frequencyPenalty': 0, - 'stopSequences': [], - }, - }, - 'code-davinci-002': { - 'id': 'openai:code-davinci-002', - 'default_params': { - 'temperature': 0.5, - 'maximumLength': 1024, - 'topP': 1, - 'presencePenalty': 0, - 'frequencyPenalty': 0, - 'stopSequences': [], - }, - }, - 'gpt-3.5-turbo': { - 'id': 'openai:gpt-3.5-turbo', - 'default_params': { - 'temperature': 0.7, - 'maximumLength': 4096, - 'topP': 1, - 'topK': 1, - 'presencePenalty': 1, - 'frequencyPenalty': 1, - 'stopSequences': [], - }, - }, - 'gpt-3.5-turbo-16k': { - 'id': 'openai:gpt-3.5-turbo-16k', - 'default_params': { - 'temperature': 0.7, - 'maximumLength': 16280, - 'topP': 1, - 'topK': 1, - 'presencePenalty': 1, - 'frequencyPenalty': 1, - 'stopSequences': [], - }, - }, - 'gpt-3.5-turbo-16k-0613': { - 'id': 'openai:gpt-3.5-turbo-16k-0613', - 'default_params': { - 'temperature': 0.7, - 'maximumLength': 16280, - 'topP': 1, - 'topK': 1, - 'presencePenalty': 1, - 'frequencyPenalty': 1, - 'stopSequences': [], - }, - }, - 'text-ada-001': { - 'id': 'openai:text-ada-001', - 'default_params': { - 'temperature': 0.5, - 'maximumLength': 1024, - 'topP': 1, - 'presencePenalty': 0, - 'frequencyPenalty': 0, - 'stopSequences': [], - }, - }, - 'text-babbage-001': { - 'id': 'openai:text-babbage-001', - 'default_params': { - 'temperature': 0.5, - 'maximumLength': 1024, - 'topP': 1, - 'presencePenalty': 0, - 'frequencyPenalty': 0, - 'stopSequences': [], - }, - }, - 'text-curie-001': { - 'id': 'openai:text-curie-001', - 'default_params': { - 'temperature': 0.5, - 'maximumLength': 1024, - 'topP': 1, - 'presencePenalty': 0, - 'frequencyPenalty': 0, - 'stopSequences': [], - }, - }, - 'text-davinci-002': { - 'id': 'openai:text-davinci-002', - 'default_params': { - 'temperature': 0.5, - 'maximumLength': 1024, - 'topP': 1, - 'presencePenalty': 0, - 'frequencyPenalty': 0, - 'stopSequences': [], - }, - }, - 'text-davinci-003': { - 'id': 'openai:text-davinci-003', - 'default_params': { - 'temperature': 0.5, - 'maximumLength': 4097, - 'topP': 1, - 'presencePenalty': 0, - 'frequencyPenalty': 0, - 'stopSequences': [], - }, - }, -} \ No newline at end of file diff --git a/interference/g4f/Provider/Vitalentum.py b/interference/g4f/Provider/Vitalentum.py deleted file mode 100644 index d526542..0000000 --- a/interference/g4f/Provider/Vitalentum.py +++ /dev/null @@ -1,68 +0,0 @@ -from __future__ import annotations - -import json -from aiohttp import ClientSession - -from .base_provider import AsyncGeneratorProvider -from ..typing import AsyncGenerator - -class Vitalentum(AsyncGeneratorProvider): - url = "https://app.vitalentum.io" - working = True - supports_gpt_35_turbo = True - - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - proxy: str = None, - **kwargs - ) -> AsyncGenerator: - headers = { - "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", - "Accept" : "text/event-stream", - "Accept-language" : "de,en-US;q=0.7,en;q=0.3", - "Origin" : cls.url, - "Referer" : cls.url + "/", - "Sec-Fetch-Dest" : "empty", - "Sec-Fetch-Mode" : "cors", - "Sec-Fetch-Site" : "same-origin", - } - conversation = json.dumps({"history": [{ - "speaker": "human" if message["role"] == "user" else "bot", - "text": message["content"], - } for message in messages]}) - data = { - "conversation": conversation, - "temperature": 0.7, - **kwargs - } - async with ClientSession( - headers=headers - ) as session: - async with session.post(cls.url + "/api/converse-edge", json=data, proxy=proxy) as response: - response.raise_for_status() - async for line in response.content: - line = line.decode() - if line.startswith("data: "): - if line.startswith("data: [DONE]"): - break - line = json.loads(line[6:-1]) - content = line["choices"][0]["delta"].get("content") - if content: - yield content - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("temperature", "float"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/interference/g4f/Provider/Wewordle.py b/interference/g4f/Provider/Wewordle.py deleted file mode 100644 index 26d040c..0000000 --- a/interference/g4f/Provider/Wewordle.py +++ /dev/null @@ -1,65 +0,0 @@ -from __future__ import annotations - -import random, string, time -from aiohttp import ClientSession - -from .base_provider import AsyncProvider - - -class Wewordle(AsyncProvider): - url = "https://wewordle.org" - working = False - supports_gpt_35_turbo = True - - @classmethod - async def create_async( - cls, - model: str, - messages: list[dict[str, str]], - proxy: str = None, - **kwargs - ) -> str: - - headers = { - "accept" : "*/*", - "pragma" : "no-cache", - "Content-Type" : "application/json", - "Connection" : "keep-alive" - } - - _user_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=16)) - _app_id = "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=31)) - _request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime()) - data = { - "user" : _user_id, - "messages" : messages, - "subscriber": { - "originalPurchaseDate" : None, - "originalApplicationVersion" : None, - "allPurchaseDatesMillis" : {}, - "entitlements" : {"active": {}, "all": {}}, - "allPurchaseDates" : {}, - "allExpirationDatesMillis" : {}, - "allExpirationDates" : {}, - "originalAppUserId" : f"$RCAnonymousID:{_app_id}", - "latestExpirationDate" : None, - "requestDate" : _request_date, - "latestExpirationDateMillis" : None, - "nonSubscriptionTransactions" : [], - "originalPurchaseDateMillis" : None, - "managementURL" : None, - "allPurchasedProductIdentifiers": [], - "firstSeen" : _request_date, - "activeSubscriptions" : [], - } - } - - - async with ClientSession( - headers=headers - ) as session: - async with session.post(f"{cls.url}/gptapi/v1/android/turbo", proxy=proxy, json=data) as response: - response.raise_for_status() - content = (await response.json())["message"]["content"] - if content: - return content \ No newline at end of file diff --git a/interference/g4f/Provider/Wuguokai.py b/interference/g4f/Provider/Wuguokai.py deleted file mode 100644 index 0a46f6e..0000000 --- a/interference/g4f/Provider/Wuguokai.py +++ /dev/null @@ -1,63 +0,0 @@ -from __future__ import annotations - -import random - -import requests - -from ..typing import Any, CreateResult -from .base_provider import BaseProvider, format_prompt - - -class Wuguokai(BaseProvider): - url = 'https://chat.wuguokai.xyz' - supports_gpt_35_turbo = True - working = False - - @staticmethod - def create_completion( - model: str, - messages: list[dict[str, str]], - stream: bool, - **kwargs: Any, - ) -> CreateResult: - headers = { - 'authority': 'ai-api.wuguokai.xyz', - 'accept': 'application/json, text/plain, */*', - 'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', - 'content-type': 'application/json', - 'origin': 'https://chat.wuguokai.xyz', - 'referer': 'https://chat.wuguokai.xyz/', - 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', - 'sec-ch-ua-mobile': '?0', - 'sec-ch-ua-platform': '"Windows"', - 'sec-fetch-dest': 'empty', - 'sec-fetch-mode': 'cors', - 'sec-fetch-site': 'same-site', - 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36' - } - data ={ - "prompt": format_prompt(messages), - "options": {}, - "userId": f"#/chat/{random.randint(1,99999999)}", - "usingContext": True - } - response = requests.post("https://ai-api20.wuguokai.xyz/api/chat-process", headers=headers, timeout=3, json=data, proxies=kwargs['proxy'] if 'proxy' in kwargs else {}) - _split = response.text.split("> 若回答失败请重试或多刷新几次界面后重试") - if response.status_code == 200: - if len(_split) > 1: - yield _split[1].strip() - else: - yield _split[0].strip() - else: - raise Exception(f"Error: {response.status_code} {response.reason}") - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool") - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/interference/g4f/Provider/Ylokh.py b/interference/g4f/Provider/Ylokh.py deleted file mode 100644 index 3c8b32d..0000000 --- a/interference/g4f/Provider/Ylokh.py +++ /dev/null @@ -1,74 +0,0 @@ -from __future__ import annotations - -import json - -from ..requests import StreamSession -from .base_provider import AsyncGeneratorProvider -from ..typing import AsyncGenerator - -class Ylokh(AsyncGeneratorProvider): - url = "https://chat.ylokh.xyz" - working = True - supports_gpt_35_turbo = True - - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - stream: bool = True, - proxy: str = None, - **kwargs - ) -> AsyncGenerator: - model = model if model else "gpt-3.5-turbo" - headers = { - "Origin" : cls.url, - "Referer" : cls.url + "/", - } - data = { - "messages": messages, - "model": model, - "temperature": 1, - "presence_penalty": 0, - "top_p": 1, - "frequency_penalty": 0, - "allow_fallback": True, - "stream": stream, - **kwargs - } - async with StreamSession( - headers=headers, - proxies={"https": proxy} - ) as session: - async with session.post("https://chatapi.ylokh.xyz/v1/chat/completions", json=data) as response: - response.raise_for_status() - if stream: - async for line in response.iter_lines(): - line = line.decode() - if line.startswith("data: "): - if line.startswith("data: [DONE]"): - break - line = json.loads(line[6:]) - content = line["choices"][0]["delta"].get("content") - if content: - yield content - else: - chat = await response.json() - yield chat["choices"][0]["message"].get("content") - - - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ("proxy", "str"), - ("temperature", "float"), - ("top_p", "float"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/interference/g4f/Provider/You.py b/interference/g4f/Provider/You.py deleted file mode 100644 index 4f49f15..0000000 --- a/interference/g4f/Provider/You.py +++ /dev/null @@ -1,40 +0,0 @@ -from __future__ import annotations - -import json - -from curl_cffi.requests import AsyncSession - -from ..typing import AsyncGenerator -from .base_provider import AsyncGeneratorProvider, format_prompt - - -class You(AsyncGeneratorProvider): - url = "https://you.com" - working = True - supports_gpt_35_turbo = True - supports_stream = False - - - @classmethod - async def create_async_generator( - cls, - model: str, - messages: list[dict[str, str]], - proxy: str = None, - **kwargs, - ) -> AsyncGenerator: - async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107") as session: - headers = { - "Accept": "text/event-stream", - "Referer": "https://you.com/search?fromSearchBar=true&tbm=youchat", - } - response = await session.get( - "https://you.com/api/streamingSearch", - params={"q": format_prompt(messages), "domain": "youchat", "chat": ""}, - headers=headers - ) - response.raise_for_status() - start = 'data: {"youChatToken": ' - for line in response.text.splitlines(): - if line.startswith(start): - yield json.loads(line[len(start): -1]) \ No newline at end of file diff --git a/interference/g4f/Provider/Yqcloud.py b/interference/g4f/Provider/Yqcloud.py deleted file mode 100644 index ac93315..0000000 --- a/interference/g4f/Provider/Yqcloud.py +++ /dev/null @@ -1,48 +0,0 @@ -from __future__ import annotations - -from aiohttp import ClientSession - -from ..typing import AsyncGenerator -from .base_provider import AsyncGeneratorProvider, format_prompt - - -class Yqcloud(AsyncGeneratorProvider): - url = "https://chat9.yqcloud.top/" - working = True - supports_gpt_35_turbo = True - - @staticmethod - async def create_async_generator( - model: str, - messages: list[dict[str, str]], - proxy: str = None, - **kwargs, - ) -> AsyncGenerator: - async with ClientSession( - headers=_create_header() - ) as session: - payload = _create_payload(messages) - async with session.post("https://api.aichatos.cloud/api/generateStream", proxy=proxy, json=payload) as response: - response.raise_for_status() - async for stream in response.content.iter_any(): - if stream: - yield stream.decode() - - -def _create_header(): - return { - "accept" : "application/json, text/plain, */*", - "content-type" : "application/json", - "origin" : "https://chat9.yqcloud.top", - } - - -def _create_payload(messages: list[dict[str, str]]): - return { - "prompt": format_prompt(messages), - "network": True, - "system": "", - "withoutContext": False, - "stream": True, - "userId": "#/chat/1693025544336" - } diff --git a/interference/g4f/Provider/__init__.py b/interference/g4f/Provider/__init__.py deleted file mode 100644 index 2ac4191..0000000 --- a/interference/g4f/Provider/__init__.py +++ /dev/null @@ -1,101 +0,0 @@ -from __future__ import annotations -from .Acytoo import Acytoo -from .Aibn import Aibn -from .Aichat import Aichat -from .Ails import Ails -from .AiService import AiService -from .AItianhu import AItianhu -from .AItianhuSpace import AItianhuSpace -from .Aivvm import Aivvm -from .Bard import Bard -from .Bing import Bing -from .ChatBase import ChatBase -from .ChatForAi import ChatForAi -from .ChatgptAi import ChatgptAi -from .ChatgptDuo import ChatgptDuo -from .ChatgptLogin import ChatgptLogin -from .CodeLinkAva import CodeLinkAva -from .DeepAi import DeepAi -from .DfeHub import DfeHub -from .EasyChat import EasyChat -from .Forefront import Forefront -from .FreeGpt import FreeGpt -from .GetGpt import GetGpt -from .GptGo import GptGo -from .H2o import H2o -from .HuggingChat import HuggingChat -from .Liaobots import Liaobots -from .Lockchat import Lockchat -from .Myshell import Myshell -from .Opchatgpts import Opchatgpts -from .OpenaiChat import OpenaiChat -from .OpenAssistant import OpenAssistant -from .PerplexityAi import PerplexityAi -from .Phind import Phind -from .Raycast import Raycast -from .Theb import Theb -from .Vercel import Vercel -from .Vitalentum import Vitalentum -from .Wewordle import Wewordle -from .Ylokh import Ylokh -from .You import You -from .Yqcloud import Yqcloud -from .Equing import Equing -from .FastGpt import FastGpt -from .V50 import V50 -from .Wuguokai import Wuguokai - -from .base_provider import BaseProvider, AsyncProvider, AsyncGeneratorProvider -from .retry_provider import RetryProvider - -__all__ = [ - 'BaseProvider', - 'AsyncProvider', - 'AsyncGeneratorProvider', - 'RetryProvider', - 'Acytoo', - 'Aibn', - 'Aichat', - 'Ails', - 'AiService', - 'AItianhu', - 'AItianhuSpace', - 'Aivvm', - 'Bard', - 'Bing', - 'ChatBase', - 'ChatForAi', - 'ChatgptAi', - 'ChatgptDuo', - 'ChatgptLogin', - 'CodeLinkAva', - 'DeepAi', - 'DfeHub', - 'EasyChat', - 'Forefront', - 'FreeGpt', - 'GetGpt', - 'GptGo', - 'H2o', - 'HuggingChat', - 'Liaobots', - 'Lockchat', - 'Myshell', - 'Opchatgpts', - 'Raycast', - 'OpenaiChat', - 'OpenAssistant', - 'PerplexityAi', - 'Phind', - 'Theb', - 'Vercel', - 'Vitalentum', - 'Wewordle', - 'Ylokh', - 'You', - 'Yqcloud', - 'Equing', - 'FastGpt', - 'Wuguokai', - 'V50' -] diff --git a/interference/g4f/Provider/base_provider.py b/interference/g4f/Provider/base_provider.py deleted file mode 100644 index a21dc87..0000000 --- a/interference/g4f/Provider/base_provider.py +++ /dev/null @@ -1,136 +0,0 @@ -from __future__ import annotations - -from asyncio import AbstractEventLoop -from concurrent.futures import ThreadPoolExecutor -from abc import ABC, abstractmethod - -from .helper import get_event_loop, get_cookies, format_prompt -from ..typing import AsyncGenerator, CreateResult - - -class BaseProvider(ABC): - url: str - working = False - needs_auth = False - supports_stream = False - supports_gpt_35_turbo = False - supports_gpt_4 = False - - @staticmethod - @abstractmethod - def create_completion( - model: str, - messages: list[dict[str, str]], - stream: bool, - **kwargs - ) -> CreateResult: - raise NotImplementedError() - - @classmethod - async def create_async( - cls, - model: str, - messages: list[dict[str, str]], - *, - loop: AbstractEventLoop = None, - executor: ThreadPoolExecutor = None, - **kwargs - ) -> str: - if not loop: - loop = get_event_loop() - def create_func(): - return "".join(cls.create_completion( - model, - messages, - False, - **kwargs - )) - return await loop.run_in_executor( - executor, - create_func - ) - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" - - -class AsyncProvider(BaseProvider): - @classmethod - def create_completion( - cls, - model: str, - messages: list[dict[str, str]], - stream: bool = False, - **kwargs - ) -> CreateResult: - loop = get_event_loop() - coro = cls.create_async(model, messages, **kwargs) - yield loop.run_until_complete(coro) - - @staticmethod - @abstractmethod - async def create_async( - model: str, - messages: list[dict[str, str]], - **kwargs - ) -> str: - raise NotImplementedError() - - -class AsyncGeneratorProvider(AsyncProvider): - supports_stream = True - - @classmethod - def create_completion( - cls, - model: str, - messages: list[dict[str, str]], - stream: bool = True, - **kwargs - ) -> CreateResult: - loop = get_event_loop() - generator = cls.create_async_generator( - model, - messages, - stream=stream, - **kwargs - ) - gen = generator.__aiter__() - while True: - try: - yield loop.run_until_complete(gen.__anext__()) - except StopAsyncIteration: - break - - @classmethod - async def create_async( - cls, - model: str, - messages: list[dict[str, str]], - **kwargs - ) -> str: - return "".join([ - chunk async for chunk in cls.create_async_generator( - model, - messages, - stream=False, - **kwargs - ) - ]) - - @staticmethod - @abstractmethod - def create_async_generator( - model: str, - messages: list[dict[str, str]], - **kwargs - ) -> AsyncGenerator: - raise NotImplementedError() \ No newline at end of file diff --git a/interference/g4f/Provider/helper.py b/interference/g4f/Provider/helper.py deleted file mode 100644 index 234cdaa..0000000 --- a/interference/g4f/Provider/helper.py +++ /dev/null @@ -1,59 +0,0 @@ -from __future__ import annotations - -import asyncio, sys -from asyncio import AbstractEventLoop - -import browser_cookie3 - -# Change event loop policy on windows -if sys.platform == 'win32': - if isinstance( - asyncio.get_event_loop_policy(), asyncio.WindowsProactorEventLoopPolicy - ): - asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) - -# Local Cookie Storage -_cookies: dict[str, dict[str, str]] = {} - -# If event loop is already running, handle nested event loops -# If "nest_asyncio" is installed, patch the event loop. -def get_event_loop() -> AbstractEventLoop: - try: - asyncio.get_running_loop() - except RuntimeError: - try: - return asyncio.get_event_loop() - except RuntimeError: - asyncio.set_event_loop(asyncio.new_event_loop()) - return asyncio.get_event_loop() - try: - event_loop = asyncio.get_event_loop() - if not hasattr(event_loop.__class__, "_nest_patched"): - import nest_asyncio - nest_asyncio.apply(event_loop) - return event_loop - except ImportError: - raise RuntimeError( - 'Use "create_async" instead of "create" function in a running event loop. Or install the "nest_asyncio" package.') - -# Load cookies for a domain from all supported browser. -# Cache the results in the "_cookies" variable -def get_cookies(cookie_domain: str) -> dict: - if cookie_domain not in _cookies: - _cookies[cookie_domain] = {} - try: - for cookie in browser_cookie3.load(cookie_domain): - _cookies[cookie_domain][cookie.name] = cookie.value - except: - pass - return _cookies[cookie_domain] - - -def format_prompt(messages: list[dict[str, str]], add_special_tokens=False): - if add_special_tokens or len(messages) > 1: - formatted = "\n".join( - ["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages] - ) - return f"{formatted}\nAssistant:" - else: - return messages[0]["content"] \ No newline at end of file diff --git a/interference/g4f/Provider/retry_provider.py b/interference/g4f/Provider/retry_provider.py deleted file mode 100644 index e1a9cd1..0000000 --- a/interference/g4f/Provider/retry_provider.py +++ /dev/null @@ -1,81 +0,0 @@ -from __future__ import annotations - -import random - -from ..typing import CreateResult -from .base_provider import BaseProvider, AsyncProvider - - -class RetryProvider(AsyncProvider): - __name__ = "RetryProvider" - working = True - needs_auth = False - supports_stream = True - supports_gpt_35_turbo = False - supports_gpt_4 = False - - def __init__( - self, - providers: list[type[BaseProvider]], - shuffle: bool = True - ) -> None: - self.providers = providers - self.shuffle = shuffle - - - def create_completion( - self, - model: str, - messages: list[dict[str, str]], - stream: bool = False, - **kwargs - ) -> CreateResult: - if stream: - providers = [provider for provider in self.providers if provider.supports_stream] - else: - providers = self.providers - if self.shuffle: - random.shuffle(providers) - - self.exceptions = {} - started = False - for provider in providers: - try: - for token in provider.create_completion(model, messages, stream, **kwargs): - yield token - started = True - if started: - return - except Exception as e: - self.exceptions[provider.__name__] = e - if started: - break - - self.raise_exceptions() - - async def create_async( - self, - model: str, - messages: list[dict[str, str]], - **kwargs - ) -> str: - providers = [provider for provider in self.providers if issubclass(provider, AsyncProvider)] - if self.shuffle: - random.shuffle(providers) - - self.exceptions = {} - for provider in providers: - try: - return await provider.create_async(model, messages, **kwargs) - except Exception as e: - self.exceptions[provider.__name__] = e - - self.raise_exceptions() - - def raise_exceptions(self): - if self.exceptions: - raise RuntimeError("\n".join(["All providers failed:"] + [ - f"{p}: {self.exceptions[p].__class__.__name__}: {self.exceptions[p]}" for p in self.exceptions - ])) - - raise RuntimeError("No provider found") \ No newline at end of file diff --git a/interference/g4f/__init__.py b/interference/g4f/__init__.py deleted file mode 100644 index bfbf884..0000000 --- a/interference/g4f/__init__.py +++ /dev/null @@ -1,107 +0,0 @@ -from __future__ import annotations -from g4f import models -from .Provider import BaseProvider, AsyncProvider -from .typing import Any, CreateResult, Union -from requests import get - -logging = False -version = '0.1.4.5' - -def check_pypi_version(): - try: - response = get(f"https://pypi.org/pypi/g4f/json").json() - latest_version = response["info"]["version"] - - if version != latest_version: - print(f'New pypi version: {latest_version} (current: {version}) | pip install -U g4f') - - except Exception as e: - print(f'Failed to check g4f pypi version: {e}') - -check_pypi_version() - -def get_model_and_provider(model: Union[models.Model, str], provider: type[BaseProvider], stream: bool): - if isinstance(model, str): - if model in models.ModelUtils.convert: - model = models.ModelUtils.convert[model] - else: - raise Exception(f'The model: {model} does not exist') - - if not provider: - provider = model.best_provider - - if not provider: - raise Exception(f'No provider found for model: {model}') - - if not provider.working: - raise Exception(f'{provider.__name__} is not working') - - if not provider.supports_stream and stream: - raise Exception( - f'ValueError: {provider.__name__} does not support "stream" argument') - - if logging: - print(f'Using {provider.__name__} provider') - - return model, provider - -class ChatCompletion: - @staticmethod - def create( - model : Union[models.Model, str], - messages : list[dict[str, str]], - provider : Union[type[BaseProvider], None] = None, - stream : bool = False, - auth : Union[str, None] = None, - **kwargs - ) -> Union[CreateResult, str]: - - model, provider = get_model_and_provider(model, provider, stream) - - if provider.needs_auth and not auth: - raise Exception( - f'ValueError: {provider.__name__} requires authentication (use auth=\'cookie or token or jwt ...\' param)') - - if provider.needs_auth: - kwargs['auth'] = auth - - result = provider.create_completion(model.name, messages, stream, **kwargs) - return result if stream else ''.join(result) - - @staticmethod - async def create_async( - model : Union[models.Model, str], - messages : list[dict[str, str]], - provider : Union[type[BaseProvider], None] = None, - **kwargs - ) -> str: - model, provider = get_model_and_provider(model, provider, False) - - return await provider.create_async(model.name, messages, **kwargs) - -class Completion: - @staticmethod - def create( - model : Union[models.Model, str], - prompt : str, - provider : Union[type[BaseProvider], None] = None, - stream : bool = False, **kwargs) -> Union[CreateResult, str]: - - allowed_models = [ - 'code-davinci-002', - 'text-ada-001', - 'text-babbage-001', - 'text-curie-001', - 'text-davinci-002', - 'text-davinci-003' - ] - - if model not in allowed_models: - raise Exception(f'ValueError: Can\'t use {model} with Completion.create()') - - model, provider = get_model_and_provider(model, provider, stream) - - result = provider.create_completion(model.name, - [{"role": "user", "content": prompt}], stream, **kwargs) - - return result if stream else ''.join(result) \ No newline at end of file diff --git a/interference/g4f/models.py b/interference/g4f/models.py deleted file mode 100644 index 6b27645..0000000 --- a/interference/g4f/models.py +++ /dev/null @@ -1,267 +0,0 @@ -from __future__ import annotations -from dataclasses import dataclass -from .typing import Union -from .Provider import BaseProvider, RetryProvider -from .Provider import ( - AItianhuSpace, - ChatgptLogin, - PerplexityAi, - ChatgptDuo, - ChatgptAi, - ChatForAi, - ChatBase, - AItianhu, - Wewordle, - Yqcloud, - Myshell, - FreeGpt, - Vercel, - DeepAi, - Aichat, - Aivvm, - GptGo, - Ylokh, - Bard, - Aibn, - Bing, - H2o, -) - -@dataclass(unsafe_hash=True) -class Model: - name: str - base_provider: str - best_provider: Union[type[BaseProvider], RetryProvider] = None - -# Config for HuggingChat, OpenAssistant -# Works for Liaobots, H2o, OpenaiChat, Yqcloud, You -default = Model( - name = "", - base_provider = "", - best_provider = RetryProvider([ - Bing, # Not fully GPT 3 or 4 - PerplexityAi, # Adds references to sources - Wewordle, # Responds with markdown - Yqcloud, # Answers short questions in chinese - ChatBase, # Don't want to answer creatively - ChatgptDuo, # Include search results - DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, AItianhuSpace, Aichat, Myshell, Aibn, ChatForAi, FreeGpt, Ylokh, - ]) -) - -# GPT-3.5 / GPT-4 -gpt_35_turbo = Model( - name = 'gpt-3.5-turbo', - base_provider = 'openai', - best_provider = RetryProvider([ - DeepAi, ChatgptLogin, ChatgptAi, Aivvm, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, ChatForAi, FreeGpt, Ylokh, - ]) -) - -gpt_4 = Model( - name = 'gpt-4', - base_provider = 'openai', - best_provider = RetryProvider([ - Myshell, Ylokh, - ]) -) - -# Bard -palm = Model( - name = 'palm', - base_provider = 'google', - best_provider = Bard) - -# H2o -falcon_7b = Model( - name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3', - base_provider = 'huggingface', - best_provider = H2o) - -falcon_40b = Model( - name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1', - base_provider = 'huggingface', - best_provider = H2o) - -llama_13b = Model( - name = 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b', - base_provider = 'huggingface', - best_provider = H2o) - -# Vercel -claude_instant_v1 = Model( - name = 'claude-instant-v1', - base_provider = 'anthropic', - best_provider = Vercel) - -claude_v1 = Model( - name = 'claude-v1', - base_provider = 'anthropic', - best_provider = Vercel) - -claude_v2 = Model( - name = 'claude-v2', - base_provider = 'anthropic', - best_provider = Vercel) - -command_light_nightly = Model( - name = 'command-light-nightly', - base_provider = 'cohere', - best_provider = Vercel) - -command_nightly = Model( - name = 'command-nightly', - base_provider = 'cohere', - best_provider = Vercel) - -gpt_neox_20b = Model( - name = 'EleutherAI/gpt-neox-20b', - base_provider = 'huggingface', - best_provider = Vercel) - -oasst_sft_1_pythia_12b = Model( - name = 'OpenAssistant/oasst-sft-1-pythia-12b', - base_provider = 'huggingface', - best_provider = Vercel) - -oasst_sft_4_pythia_12b_epoch_35 = Model( - name = 'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', - base_provider = 'huggingface', - best_provider = Vercel) - -santacoder = Model( - name = 'bigcode/santacoder', - base_provider = 'huggingface', - best_provider = Vercel) - -bloom = Model( - name = 'bigscience/bloom', - base_provider = 'huggingface', - best_provider = Vercel) - -flan_t5_xxl = Model( - name = 'google/flan-t5-xxl', - base_provider = 'huggingface', - best_provider = Vercel) - -code_davinci_002 = Model( - name = 'code-davinci-002', - base_provider = 'openai', - best_provider = Vercel) - -gpt_35_turbo_16k = Model( - name = 'gpt-3.5-turbo-16k', - base_provider = 'openai', - best_provider = Vercel) - -gpt_35_turbo_16k_0613 = Model( - name = 'gpt-3.5-turbo-16k-0613', - base_provider = 'openai') - -gpt_35_turbo_0613 = Model( - name = 'gpt-3.5-turbo-0613', - base_provider = 'openai', - best_provider = RetryProvider([ - Aivvm, ChatgptLogin - ]) -) - -gpt_4_0613 = Model( - name = 'gpt-4-0613', - base_provider = 'openai', - best_provider = Aivvm) - -gpt_4_32k = Model( - name = 'gpt-4-32k', - base_provider = 'openai', - best_provider = Aivvm) - -gpt_4_32k_0613 = Model( - name = 'gpt-4-32k-0613', - base_provider = 'openai', - best_provider = Aivvm) - -text_ada_001 = Model( - name = 'text-ada-001', - base_provider = 'openai', - best_provider = Vercel) - -text_babbage_001 = Model( - name = 'text-babbage-001', - base_provider = 'openai', - best_provider = Vercel) - -text_curie_001 = Model( - name = 'text-curie-001', - base_provider = 'openai', - best_provider = Vercel) - -text_davinci_002 = Model( - name = 'text-davinci-002', - base_provider = 'openai', - best_provider = Vercel) - -text_davinci_003 = Model( - name = 'text-davinci-003', - base_provider = 'openai', - best_provider = Vercel) - -llama13b_v2_chat = Model( - name = 'replicate:a16z-infra/llama13b-v2-chat', - base_provider = 'replicate', - best_provider = Vercel) - -llama7b_v2_chat = Model( - name = 'replicate:a16z-infra/llama7b-v2-chat', - base_provider = 'replicate', - best_provider = Vercel) - - -class ModelUtils: - convert: dict[str, Model] = { - # gpt-3.5 - 'gpt-3.5-turbo' : gpt_35_turbo, - 'gpt-3.5-turbo-16k' : gpt_35_turbo_16k, - 'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613, - - # gpt-4 - 'gpt-4' : gpt_4, - 'gpt-4-0613' : gpt_4_0613, - 'gpt-4-32k' : gpt_4_32k, - 'gpt-4-32k-0613' : gpt_4_32k_0613, - - # Bard - 'palm2' : palm, - 'palm' : palm, - 'google' : palm, - 'google-bard' : palm, - 'google-palm' : palm, - 'bard' : palm, - - # H2o - 'falcon-40b' : falcon_40b, - 'falcon-7b' : falcon_7b, - 'llama-13b' : llama_13b, - - # Vercel - 'claude-instant-v1' : claude_instant_v1, - 'claude-v1' : claude_v1, - 'claude-v2' : claude_v2, - 'command-nightly' : command_nightly, - 'gpt-neox-20b' : gpt_neox_20b, - 'santacoder' : santacoder, - 'bloom' : bloom, - 'flan-t5-xxl' : flan_t5_xxl, - 'code-davinci-002' : code_davinci_002, - 'text-ada-001' : text_ada_001, - 'text-babbage-001' : text_babbage_001, - 'text-curie-001' : text_curie_001, - 'text-davinci-002' : text_davinci_002, - 'text-davinci-003' : text_davinci_003, - 'llama13b-v2-chat' : llama13b_v2_chat, - 'llama7b-v2-chat' : llama7b_v2_chat, - - 'oasst-sft-1-pythia-12b' : oasst_sft_1_pythia_12b, - 'oasst-sft-4-pythia-12b-epoch-3.5' : oasst_sft_4_pythia_12b_epoch_35, - 'command-light-nightly' : command_light_nightly, - } diff --git a/interference/g4f/requests.py b/interference/g4f/requests.py deleted file mode 100644 index c51d980..0000000 --- a/interference/g4f/requests.py +++ /dev/null @@ -1,179 +0,0 @@ -from __future__ import annotations - -import warnings, json, asyncio - -from functools import partialmethod -from asyncio import Future, Queue -from typing import AsyncGenerator - -from curl_cffi.requests import AsyncSession, Response - -import curl_cffi - -is_newer_0_5_8 = hasattr(AsyncSession, "_set_cookies") or hasattr(curl_cffi.requests.Cookies, "get_cookies_for_curl") -is_newer_0_5_9 = hasattr(curl_cffi.AsyncCurl, "remove_handle") -is_newer_0_5_10 = hasattr(AsyncSession, "release_curl") - -class StreamResponse: - def __init__(self, inner: Response, queue: Queue): - self.inner = inner - self.queue = queue - self.request = inner.request - self.status_code = inner.status_code - self.reason = inner.reason - self.ok = inner.ok - self.headers = inner.headers - self.cookies = inner.cookies - - async def text(self) -> str: - content = await self.read() - return content.decode() - - def raise_for_status(self): - if not self.ok: - raise RuntimeError(f"HTTP Error {self.status_code}: {self.reason}") - - async def json(self, **kwargs): - return json.loads(await self.read(), **kwargs) - - async def iter_lines(self, chunk_size=None, decode_unicode=False, delimiter=None) -> AsyncGenerator[bytes]: - """ - Copied from: https://requests.readthedocs.io/en/latest/_modules/requests/models/ - which is under the License: Apache 2.0 - """ - pending = None - - async for chunk in self.iter_content( - chunk_size=chunk_size, decode_unicode=decode_unicode - ): - if pending is not None: - chunk = pending + chunk - if delimiter: - lines = chunk.split(delimiter) - else: - lines = chunk.splitlines() - if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]: - pending = lines.pop() - else: - pending = None - - for line in lines: - yield line - - if pending is not None: - yield pending - - async def iter_content(self, chunk_size=None, decode_unicode=False) -> As: - if chunk_size: - warnings.warn("chunk_size is ignored, there is no way to tell curl that.") - if decode_unicode: - raise NotImplementedError() - while True: - chunk = await self.queue.get() - if chunk is None: - return - yield chunk - - async def read(self) -> bytes: - return b"".join([chunk async for chunk in self.iter_content()]) - -class StreamRequest: - def __init__(self, session: AsyncSession, method: str, url: str, **kwargs): - self.session = session - self.loop = session.loop if session.loop else asyncio.get_running_loop() - self.queue = Queue() - self.method = method - self.url = url - self.options = kwargs - self.handle = None - - def _on_content(self, data): - if not self.enter.done(): - self.enter.set_result(None) - self.queue.put_nowait(data) - - def _on_done(self, task: Future): - if not self.enter.done(): - self.enter.set_result(None) - self.queue.put_nowait(None) - - self.loop.call_soon(self.release_curl) - - async def fetch(self) -> StreamResponse: - if self.handle: - raise RuntimeError("Request already started") - self.curl = await self.session.pop_curl() - self.enter = self.loop.create_future() - if is_newer_0_5_10: - request, _, header_buffer, _, _ = self.session._set_curl_options( - self.curl, - self.method, - self.url, - content_callback=self._on_content, - **self.options - ) - else: - request, _, header_buffer = self.session._set_curl_options( - self.curl, - self.method, - self.url, - content_callback=self._on_content, - **self.options - ) - if is_newer_0_5_9: - self.handle = self.session.acurl.add_handle(self.curl) - else: - await self.session.acurl.add_handle(self.curl, False) - self.handle = self.session.acurl._curl2future[self.curl] - self.handle.add_done_callback(self._on_done) - # Wait for headers - await self.enter - # Raise exceptions - if self.handle.done(): - self.handle.result() - if is_newer_0_5_8: - response = self.session._parse_response(self.curl, _, header_buffer) - response.request = request - else: - response = self.session._parse_response(self.curl, request, _, header_buffer) - return StreamResponse( - response, - self.queue - ) - - async def __aenter__(self) -> StreamResponse: - return await self.fetch() - - async def __aexit__(self, *args): - self.release_curl() - - def release_curl(self): - if is_newer_0_5_10: - self.session.release_curl(self.curl) - return - if not self.curl: - return - self.curl.clean_after_perform() - if is_newer_0_5_9: - self.session.acurl.remove_handle(self.curl) - elif not self.handle.done() and not self.handle.cancelled(): - self.session.acurl.set_result(self.curl) - self.curl.reset() - self.session.push_curl(self.curl) - self.curl = None - -class StreamSession(AsyncSession): - def request( - self, - method: str, - url: str, - **kwargs - ) -> StreamRequest: - return StreamRequest(self, method, url, **kwargs) - - head = partialmethod(request, "HEAD") - get = partialmethod(request, "GET") - post = partialmethod(request, "POST") - put = partialmethod(request, "PUT") - patch = partialmethod(request, "PATCH") - delete = partialmethod(request, "DELETE") \ No newline at end of file diff --git a/interference/g4f/typing.py b/interference/g4f/typing.py deleted file mode 100644 index 5f63c22..0000000 --- a/interference/g4f/typing.py +++ /dev/null @@ -1,20 +0,0 @@ -import sys -from typing import Any, AsyncGenerator, Generator, NewType, Tuple, Union - -if sys.version_info >= (3, 8): - from typing import TypedDict -else: - from typing_extensions import TypedDict - -SHA256 = NewType('sha_256_hash', str) -CreateResult = Generator[str, None, None] - -__all__ = [ - 'Any', - 'AsyncGenerator', - 'Generator', - 'Tuple', - 'TypedDict', - 'SHA256', - 'CreateResult', -] diff --git a/interference/requirements.txt b/interference/requirements.txt deleted file mode 100644 index 3d5246c..0000000 --- a/interference/requirements.txt +++ /dev/null @@ -1,8 +0,0 @@ -flask_cors -watchdog~=3.0.0 -uvicorn -g4f -transformers -tensorflow -torch -asgiref \ No newline at end of file diff --git a/interference/startup.sh b/interference/startup.sh deleted file mode 100644 index a2d4492..0000000 --- a/interference/startup.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -# Update the g4f package every time the container starts -pip install -U g4f - -# Run your application -python -m app \ No newline at end of file diff --git a/main.py b/main.py index c8815be..3996dbf 100644 --- a/main.py +++ b/main.py @@ -2,7 +2,7 @@ import logging import os import random - +import sys import requests from updater import SelfUpdating from aiogram import Bot, Dispatcher, types, F @@ -19,7 +19,9 @@ import bot_service from replit_detector import ReplitFlaskApp - +import subprocess +command = [sys.executable, 'interference/app.py'] +process = subprocess.Popen(command, env=dict(os.environ)) service = bot_service.BotService() updater = SelfUpdating('noes14155/Telegrambot-with-GPT4free') storage = MemoryStorage() @@ -38,10 +40,6 @@ class MyStates(StatesGroup): SELECT_MODEL = State() SELECT_SIZE = State() -#class MyCallback(CallbackData, prefix="action"): -# : str -# bar: int - def owner_only(func): @wraps(func) async def wrapped(update, context, *args, **kwargs): diff --git a/requirements.txt b/requirements.txt index 164e90e..3a81cef 100644 --- a/requirements.txt +++ b/requirements.txt @@ -27,4 +27,7 @@ openai gtts pytube whois -spotipy +asgiref +uvicorn +flask_cors +g4f