Skip to content

Commit

Permalink
Update g4f
Browse files Browse the repository at this point in the history
  • Loading branch information
noes14155 committed Sep 20, 2023
1 parent 85aacc9 commit 8b4aeab
Show file tree
Hide file tree
Showing 11 changed files with 196 additions and 190 deletions.
10 changes: 5 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -161,19 +161,19 @@ For Naga AI, Get this by messaging run the /key get command in th bot channel in
To use GPT4free API_BASE = 'http://g4f_server:1337' (Only working in docker)
You can use any provider. I have included Naga AI api base. Use the key for the same.

-`MAX_HISTORY`
- `MAX_HISTORY`
Number of conversation history to be sent with each request. Default value 10. Don't put more than 20.

-`ENABLED_PLUGINS`
- `ENABLED_PLUGINS`
List of enabled plugins. Only wolfram is disabled by default. Add Worlframalpha api key before adding wolfram to this list.

-`WOLFRAM_APP_ID`
- `WOLFRAM_APP_ID`
Wolframalpha api key

-`DUCKDUCKGO_SAFESEARCH`
- `DUCKDUCKGO_SAFESEARCH`
Valid values are 'safe', 'moderate', 'off'

-`WORLDTIME_DEFAULT_TIMEZONE`
- `WORLDTIME_DEFAULT_TIMEZONE`
Timezone should be in the format Asia/Dubai or Europe/Rome


Expand Down
17 changes: 9 additions & 8 deletions interference/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,15 +72,15 @@ def chat_completions():
#AItianhu,
#Acytoo,
#Aichat,
#Ails,
Aivvm,
Ails,
#Aivvm,
#Bard,
#Bing,
ChatBase,
#ChatgptAi,
ChatgptLogin,
CodeLinkAva,
#DeepAi,
ChatgptAi,
#ChatgptLogin,
#CodeLinkAva,
DeepAi,
#H2o,
#HuggingChat,
#Opchatgpts,
Expand All @@ -91,9 +91,10 @@ def chat_completions():
#Vercel,
Vitalentum,
Wewordle,
Ylokh,
#Ylokh,
#You,
Yqcloud,]
Yqcloud
]

for provider in providers:
try:
Expand Down
59 changes: 18 additions & 41 deletions interference/g4f/Provider/AItianhu.py
Original file line number Diff line number Diff line change
@@ -1,61 +1,38 @@
from __future__ import annotations

import json
from aiohttp import ClientSession, http
from curl_cffi.requests import AsyncSession

from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider, format_prompt
from .base_provider import AsyncProvider, format_prompt


class AItianhu(AsyncGeneratorProvider):
class AItianhu(AsyncProvider):
url = "https://www.aitianhu.com"
working = True
supports_gpt_35_turbo = True

@classmethod
async def create_async_generator(
async def create_async(
cls,
model: str,
messages: list[dict[str, str]],
proxy: str = None,
**kwargs
) -> AsyncGenerator:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/116.0",
"Accept": "application/json, text/plain, */*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Content-Type": "application/json",
"Origin": cls.url,
"Connection": "keep-alive",
"Referer": cls.url + "/",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
) -> str:
data = {
"prompt": format_prompt(messages),
"options": {},
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
"temperature": 0.8,
"top_p": 1,
**kwargs
}
async with ClientSession(
headers=headers,
version=http.HttpVersion10
) as session:
data = {
"prompt": format_prompt(messages),
"options": {},
"systemMessage": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
"temperature": 0.8,
"top_p": 1,
**kwargs
}
async with session.post(
cls.url + "/api/chat-process",
proxy=proxy,
json=data,
ssl=False,
) as response:
response.raise_for_status()
async for line in response.content:
line = json.loads(line.decode('utf-8'))
token = line["detail"]["choices"][0]["delta"].get("content")
if token:
yield token
async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107", verify=False) as session:
response = await session.post(cls.url + "/api/chat-process", json=data)
response.raise_for_status()
line = response.text.splitlines()[-1]
line = json.loads(line)
return line["text"]


@classmethod
Expand Down
78 changes: 78 additions & 0 deletions interference/g4f/Provider/GptGo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
from __future__ import annotations

from aiohttp import ClientSession
import json

from ..typing import AsyncGenerator
from .base_provider import AsyncGeneratorProvider, format_prompt


class GptGo(AsyncGeneratorProvider):
url = "https://gptgo.ai"
supports_gpt_35_turbo = True
working = True

@classmethod
async def create_async_generator(
cls,
model: str,
messages: list[dict[str, str]],
proxy: str = None,
**kwargs
) -> AsyncGenerator:
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
"Accept" : "*/*",
"Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"Origin" : cls.url,
"Referer" : cls.url + "/",
"Sec-Fetch-Dest" : "empty",
"Sec-Fetch-Mode" : "cors",
"Sec-Fetch-Site" : "same-origin",
}
async with ClientSession(
headers=headers
) as session:
async with session.get(
"https://gptgo.ai/action_get_token.php",
params={
"q": format_prompt(messages),
"hlgpt": "default",
"hl": "en"
},
proxy=proxy
) as response:
response.raise_for_status()
token = (await response.json(content_type=None))["token"]

async with session.get(
"https://gptgo.ai/action_ai_gpt.php",
params={
"token": token,
},
proxy=proxy
) as response:
response.raise_for_status()
start = "data: "
async for line in response.content:
line = line.decode()
if line.startswith("data: "):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[len(start):-1])
content = line["choices"][0]["delta"].get("content")
if content:
yield content


@classmethod
@property
def params(cls):
params = [
("model", "str"),
("messages", "list[dict[str, str]]"),
("stream", "bool"),
("temperature", "float"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
66 changes: 2 additions & 64 deletions interference/g4f/Provider/Vercel.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

class Vercel(AsyncProvider):
url = "https://sdk.vercel.ai"
working = True
working = False
supports_gpt_35_turbo = True
model = "replicate:replicate/llama-2-70b-chat"

Expand All @@ -21,74 +21,12 @@ async def create_async(
proxy: str = None,
**kwargs
) -> str:
if model in ["gpt-3.5-turbo", "gpt-4"]:
model = "openai:" + model
model = model if model else cls.model
proxies = None
if proxy:
if "://" not in proxy:
proxy = "http://" + proxy
proxies = {"http": proxy, "https": proxy}
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.{rand1}.{rand2} Safari/537.36".format(
rand1=random.randint(0,9999),
rand2=random.randint(0,9999)
),
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-US,en;q=0.5",
"TE": "trailers",
}
async with AsyncSession(headers=headers, proxies=proxies, impersonate="chrome107") as session:
response = await session.get(cls.url + "/openai.jpeg")
response.raise_for_status()
custom_encoding = _get_custom_encoding(response.text)
headers = {
"Content-Type": "application/json",
"Custom-Encoding": custom_encoding,
}
data = _create_payload(model, messages)
response = await session.post(cls.url + "/api/generate", json=data, headers=headers)
response.raise_for_status()
return response.text


def _create_payload(model: str, messages: list[dict[str, str]]) -> dict[str, Any]:
if model not in model_info:
raise ValueError(f'Model are not supported: {model}')
default_params = model_info[model]["default_params"]
return {
"messages": messages,
"playgroundId": str(uuid.uuid4()),
"chatIndex": 0,
"model": model
} | default_params

# based on https://github.com/ading2210/vercel-llm-api
def _get_custom_encoding(text: str) -> str:
data = json.loads(base64.b64decode(text, validate=True))
script = """
String.prototype.fontcolor = function() {{
return `<font>${{this}}</font>`
}}
var globalThis = {{marker: "mark"}};
({script})({key})
""".format(
script=data["c"], key=data["a"]
)
context = quickjs.Context() # type: ignore
token_data = json.loads(context.eval(script).json()) # type: ignore
token_data[2] = "mark"
token = {"r": token_data, "t": data["t"]}
token_str = json.dumps(token, separators=(",", ":")).encode("utf-16le")
return base64.b64encode(token_str).decode()

return

class ModelInfo(TypedDict):
id: str
default_params: dict[str, Any]


model_info: dict[str, ModelInfo] = {
"anthropic:claude-instant-v1": {
"id": "anthropic:claude-instant-v1",
Expand Down
5 changes: 4 additions & 1 deletion interference/g4f/Provider/Ylokh.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,9 @@ async def create_async_generator(
if stream:
async for line in response.content:
line = line.decode()
if line.startswith("data: ") and not line.startswith("data: [DONE]"):
if line.startswith("data: "):
if line.startswith("data: [DONE]"):
break
line = json.loads(line[6:-1])
content = line["choices"][0]["delta"].get("content")
if content:
Expand All @@ -71,6 +73,7 @@ def params(cls):
("stream", "bool"),
("proxy", "str"),
("temperature", "float"),
("top_p", "float"),
]
param = ", ".join([": ".join(p) for p in params])
return f"g4f.provider.{cls.__name__} supports: ({param})"
2 changes: 2 additions & 0 deletions interference/g4f/Provider/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from .EasyChat import EasyChat
from .Forefront import Forefront
from .GetGpt import GetGpt
from .GptGo import GptGo
from .H2o import H2o
from .HuggingChat import HuggingChat
from .Liaobots import Liaobots
Expand Down Expand Up @@ -57,6 +58,7 @@
'EasyChat',
'Forefront',
'GetGpt',
'GptGo',
'H2o',
'HuggingChat',
'Liaobots',
Expand Down
Loading

0 comments on commit 8b4aeab

Please sign in to comment.