-
Notifications
You must be signed in to change notification settings - Fork 30
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
noes14155
committed
Oct 2, 2023
1 parent
b264c8a
commit 3baa5b4
Showing
26 changed files
with
1,335 additions
and
453 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,73 @@ | ||
from __future__ import annotations | ||
|
||
import random, json | ||
|
||
from g4f.requests import AsyncSession, StreamRequest | ||
from .base_provider import AsyncGeneratorProvider, format_prompt | ||
|
||
domains = { | ||
"gpt-3.5-turbo": ".aitianhu.space", | ||
"gpt-4": ".aitianhu.website", | ||
} | ||
|
||
class AItianhuSpace(AsyncGeneratorProvider): | ||
url = "https://chat3.aiyunos.top/" | ||
working = True | ||
supports_gpt_35_turbo = True | ||
|
||
@classmethod | ||
async def create_async_generator( | ||
cls, | ||
model: str, | ||
messages: list[dict[str, str]], | ||
stream: bool = True, | ||
**kwargs | ||
) -> str: | ||
if not model: | ||
model = "gpt-3.5-turbo" | ||
elif not model in domains: | ||
raise ValueError(f"Model are not supported: {model}") | ||
|
||
chars = 'abcdefghijklmnopqrstuvwxyz0123456789' | ||
rand = ''.join(random.choice(chars) for _ in range(6)) | ||
domain = domains[model] | ||
url = f'https://{rand}{domain}/api/chat-process' | ||
|
||
headers = { | ||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36", | ||
} | ||
async with AsyncSession(headers=headers, impersonate="chrome107", verify=False) as session: | ||
data = { | ||
"prompt": format_prompt(messages), | ||
"options": {}, | ||
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.", | ||
"temperature": 0.8, | ||
"top_p": 1, | ||
**kwargs | ||
} | ||
async with StreamRequest(session, "POST", url, json=data) as response: | ||
response.raise_for_status() | ||
async for line in response.content: | ||
line = json.loads(line.rstrip()) | ||
if "detail" in line: | ||
content = line["detail"]["choices"][0]["delta"].get("content") | ||
if content: | ||
yield content | ||
elif "message" in line and "AI-4接口非常昂贵" in line["message"]: | ||
raise RuntimeError("Rate limit for GPT 4 reached") | ||
else: | ||
raise RuntimeError("Response: {line}") | ||
|
||
|
||
@classmethod | ||
@property | ||
def params(cls): | ||
params = [ | ||
("model", "str"), | ||
("messages", "list[dict[str, str]]"), | ||
("stream", "bool"), | ||
("temperature", "float"), | ||
("top_p", "int"), | ||
] | ||
param = ", ".join([": ".join(p) for p in params]) | ||
return f"g4f.provider.{cls.__name__} supports: ({param})" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,51 @@ | ||
from __future__ import annotations | ||
|
||
import time | ||
import hashlib | ||
|
||
from ..typing import AsyncGenerator | ||
from g4f.requests import AsyncSession | ||
from .base_provider import AsyncGeneratorProvider | ||
|
||
|
||
class Aibn(AsyncGeneratorProvider): | ||
url = "https://aibn.cc" | ||
supports_gpt_35_turbo = True | ||
working = True | ||
|
||
@classmethod | ||
async def create_async_generator( | ||
cls, | ||
model: str, | ||
messages: list[dict[str, str]], | ||
**kwargs | ||
) -> AsyncGenerator: | ||
async with AsyncSession(impersonate="chrome107") as session: | ||
timestamp = int(time.time()) | ||
data = { | ||
"messages": messages, | ||
"pass": None, | ||
"sign": generate_signature(timestamp, messages[-1]["content"]), | ||
"time": timestamp | ||
} | ||
async with session.post(f"{cls.url}/api/generate", json=data) as response: | ||
response.raise_for_status() | ||
async for chunk in response.content.iter_any(): | ||
yield chunk.decode() | ||
|
||
@classmethod | ||
@property | ||
def params(cls): | ||
params = [ | ||
("model", "str"), | ||
("messages", "list[dict[str, str]]"), | ||
("stream", "bool"), | ||
("temperature", "float"), | ||
] | ||
param = ", ".join([": ".join(p) for p in params]) | ||
return f"g4f.provider.{cls.__name__} supports: ({param})" | ||
|
||
|
||
def generate_signature(timestamp: int, message: str, secret: str = "undefined"): | ||
data = f"{timestamp}:{message}:{secret}" | ||
return hashlib.sha256(data.encode()).hexdigest() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,78 +1,77 @@ | ||
from __future__ import annotations | ||
import requests | ||
|
||
from aiohttp import ClientSession | ||
|
||
from .base_provider import AsyncGeneratorProvider | ||
from ..typing import AsyncGenerator | ||
from .base_provider import BaseProvider | ||
from ..typing import CreateResult | ||
|
||
# to recreate this easily, send a post request to https://chat.aivvm.com/api/models | ||
models = { | ||
"gpt-4": { | ||
"id": "gpt-4", | ||
"name": "GPT-4", | ||
}, | ||
"gpt-3.5-turbo": { | ||
"id": "gpt-3.5-turbo", | ||
"name": "GPT-3.5", | ||
}, | ||
"gpt-3.5-turbo-16k": { | ||
"id": "gpt-3.5-turbo-16k", | ||
"name": "GPT-3.5-16k", | ||
}, | ||
'gpt-3.5-turbo': {'id': 'gpt-3.5-turbo', 'name': 'GPT-3.5'}, | ||
'gpt-3.5-turbo-0613': {'id': 'gpt-3.5-turbo-0613', 'name': 'GPT-3.5-0613'}, | ||
'gpt-3.5-turbo-16k': {'id': 'gpt-3.5-turbo-16k', 'name': 'GPT-3.5-16K'}, | ||
'gpt-3.5-turbo-16k-0613': {'id': 'gpt-3.5-turbo-16k-0613', 'name': 'GPT-3.5-16K-0613'}, | ||
'gpt-4': {'id': 'gpt-4', 'name': 'GPT-4'}, | ||
'gpt-4-0613': {'id': 'gpt-4-0613', 'name': 'GPT-4-0613'}, | ||
'gpt-4-32k': {'id': 'gpt-4-32k', 'name': 'GPT-4-32K'}, | ||
'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'}, | ||
} | ||
|
||
class Aivvm(AsyncGeneratorProvider): | ||
url = "https://chat.aivvm.com" | ||
class Aivvm(BaseProvider): | ||
url = 'https://chat.aivvm.com' | ||
supports_stream = True | ||
working = True | ||
supports_gpt_35_turbo = True | ||
supports_gpt_4 = True | ||
|
||
|
||
@classmethod | ||
async def create_async_generator( | ||
cls, | ||
def create_completion(cls, | ||
model: str, | ||
messages: list[dict[str, str]], | ||
proxy: str = None, | ||
stream: bool, | ||
**kwargs | ||
) -> AsyncGenerator: | ||
model = model if model else "gpt-3.5-turbo" | ||
if model not in models: | ||
raise ValueError(f"Model are not supported: {model}") | ||
) -> CreateResult: | ||
if not model: | ||
model = "gpt-3.5-turbo" | ||
elif model not in models: | ||
raise ValueError(f"Model is not supported: {model}") | ||
|
||
headers = { | ||
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", | ||
"Accept" : "*/*", | ||
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", | ||
"Origin" : cls.url, | ||
"Referer" : cls.url + "/", | ||
"Sec-Fetch-Dest" : "empty", | ||
"Sec-Fetch-Mode" : "cors", | ||
"Sec-Fetch-Site" : "same-origin", | ||
"accept" : "*/*", | ||
"accept-language" : "hu-HU,hu;q=0.9,en-US;q=0.8,en;q=0.7", | ||
"content-type" : "application/json", | ||
"sec-ch-ua" : "\"Kuki\";v=\"116\", \"Not)A;Brand\";v=\"24\", \"Pici Pocoro\";v=\"102\"", | ||
"sec-ch-ua-mobile" : "?0", | ||
"sec-ch-ua-platform": "\"Bandóz\"", | ||
"sec-fetch-dest" : "empty", | ||
"sec-fetch-mode" : "cors", | ||
"sec-fetch-site" : "same-origin", | ||
"Referer" : "https://chat.aivvm.com/", | ||
"Referrer-Policy" : "same-origin", | ||
} | ||
async with ClientSession( | ||
headers=headers | ||
) as session: | ||
data = { | ||
"temperature": 1, | ||
"key": "", | ||
"messages": messages, | ||
"model": models[model], | ||
"prompt": "", | ||
**kwargs | ||
} | ||
async with session.post(cls.url + "/api/chat", json=data, proxy=proxy) as response: | ||
response.raise_for_status() | ||
async for stream in response.content.iter_any(): | ||
yield stream.decode() | ||
|
||
json_data = { | ||
"model" : models[model], | ||
"messages" : messages, | ||
"key" : "", | ||
"prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."), | ||
"temperature" : kwargs.get("temperature", 0.7) | ||
} | ||
|
||
response = requests.post( | ||
"https://chat.aivvm.com/api/chat", headers=headers, json=json_data, stream=True) | ||
response.raise_for_status() | ||
|
||
for chunk in response.iter_content(chunk_size=None): | ||
yield chunk.decode('utf-8') | ||
|
||
@classmethod | ||
@property | ||
def params(cls): | ||
params = [ | ||
("model", "str"), | ||
("messages", "list[dict[str, str]]"), | ||
("stream", "bool"), | ||
("temperature", "float"), | ||
('model', 'str'), | ||
('messages', 'list[dict[str, str]]'), | ||
('stream', 'bool'), | ||
('temperature', 'float'), | ||
] | ||
param = ", ".join([": ".join(p) for p in params]) | ||
return f"g4f.provider.{cls.__name__} supports: ({param})" | ||
param = ', '.join([': '.join(p) for p in params]) | ||
return f'g4f.provider.{cls.__name__} supports: ({param})' |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,51 @@ | ||
from __future__ import annotations | ||
|
||
from g4f.requests import AsyncSession | ||
from .base_provider import AsyncProvider, format_prompt | ||
|
||
|
||
class ChatgptDuo(AsyncProvider): | ||
url = "https://chatgptduo.com" | ||
supports_gpt_35_turbo = True | ||
working = True | ||
|
||
@classmethod | ||
async def create_async( | ||
cls, | ||
model: str, | ||
messages: list[dict[str, str]], | ||
**kwargs | ||
) -> str: | ||
async with AsyncSession(impersonate="chrome107") as session: | ||
prompt = format_prompt(messages), | ||
data = { | ||
"prompt": prompt, | ||
"search": prompt, | ||
"purpose": "ask", | ||
} | ||
async with session.post(f"{cls.url}/", data=data) as response: | ||
response.raise_for_status() | ||
data = await response.json() | ||
|
||
cls._sources = [{ | ||
"title": source["title"], | ||
"url": source["link"], | ||
"snippet": source["snippet"] | ||
} for source in data["results"]] | ||
|
||
return data["answer"] | ||
|
||
@classmethod | ||
def get_sources(cls): | ||
return cls._sources | ||
|
||
@classmethod | ||
@property | ||
def params(cls): | ||
params = [ | ||
("model", "str"), | ||
("messages", "list[dict[str, str]]"), | ||
("stream", "bool"), | ||
] | ||
param = ", ".join([": ".join(p) for p in params]) | ||
return f"g4f.provider.{cls.__name__} supports: ({param})" |
Oops, something went wrong.