Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature: Add Bitdeer AI model api provider #9362

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
73 changes: 73 additions & 0 deletions docs/my-website/docs/providers/bitdeerai.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';

# Bitdeer AI

:::info
**We support ALL Bitdeer AI models, just set `bitdeerai/` as a prefix when sending completion requests**
:::

## API Keys

```python
import os
os.environ["BITDEERAI_API_KEY"] = "your-api-key"
```
## Sample Usage

### chat
```python
import os
from litellm import completion

os.environ["BITDEERAI_API_KEY"] = "your-api-key"

messages = [
{
"role":"system",
"content":"You are a knowledgeable assistant. Provide concise and clear explanations to scientific questions."
},
{
"role": "user",
"content": "Can you explain the theory of evolution in simple terms?"
}
]

completion(model="bitdeerai/OpenGVLab/InternVL2_5-78B-MPO", messages=messages)
```
### embedding
```python
import os
from litellm import embedding

response = embedding(
model="bitdeerai/BAAI/bge-m3", input=['The cat danced gracefully under the moonlight, its shadow twirling like a silent partner.']
)
print(response)
```
## Bitdeer AI Models
liteLLM supports `non-streaming` and `streaming` requests to all models on https://www.bitdeer.ai

Example Bitdeer AI Usage - Note: liteLLM supports all models deployed on Bitdeer AI


### LLMs models
| Model Name | Function Call |
|-------------------------------------------|---------------------------------------------------------------------|
| bitdeerai/deepseek-ai/DeepSeek-R1 | `completion('bitdeerai/deepseek-ai/DeepSeek-R1', messages)` |
| bitdeerai/deepseek-ai/DeepSeek-V3 | `completion('bitdeerai/deepseek-ai/DeepSeek-V3', messages)` |
| bitdeerai/Qwen/QwQ-32B | `completion('bitdeerai/Qwen/QwQ-32B', messages)` |
| bitdeerai/Qwen/Qwen2.5-VL-72B-Instruct | `completion('bitdeerai/Qwen/Qwen2.5-VL-72B-Instruct', messages)` |
| bitdeerai/Qwen/Qwen2.5-Coder-32B-Instruct | `completion('bitdeerai/Qwen/Qwen2.5-Coder-32B-Instruct', messages)` |
| bitdeerai/meta-llama/Llama-3.3-70B-Instruct | `completion('bitdeerai/meta-llama/Llama-3.3-70B-Instruct', messages)` |
| bitdeerai/OpenGVLab/InternVL2_5-78B-MPO | `completion('bitdeerai/OpenGVLab/InternVL2_5-78B-MPO', messages)` |

### Embedding models
| Model Name | Function Call |
|-----------------------------------------------------|-------------------------------------------------------------------|
| bitdeerai/Alibaba-NLP/gte-Qwen2-7B-instruct | `completion('bitdeerai/Alibaba-NLP/gte-Qwen2-7B-instruct', inputs)` |
| bitdeerai/BAAI/bge-m3 | `completion('bitdeerai/BAAI/bge-m3', inputs)` |
| bitdeerai/BAAI/bge-large-en-v1.5 | `completion('bitdeerai/BAAI/bge-m3', inputs)` |
| bitdeerai/intfloat/multilingual-e5-large-instruct | `completion('bitdeerai/intfloat/multilingual-e5-large-instruct', inputs)` |


3 changes: 2 additions & 1 deletion docs/my-website/sidebars.js
Original file line number Diff line number Diff line change
@@ -233,7 +233,8 @@ const sidebars = {
"providers/sambanova",
"providers/custom_llm_server",
"providers/petals",
"providers/snowflake"
"providers/snowflake",
"providers/bitdeerai"
],
},
{
12 changes: 12 additions & 0 deletions litellm/__init__.py
Original file line number Diff line number Diff line change
@@ -48,6 +48,7 @@
huggingface_models,
empower_models,
together_ai_models,
bitdeerai_models,
baseten_models,
REPEATED_STREAMING_CHUNK_LIMIT,
request_timeout,
@@ -183,6 +184,7 @@
aleph_alpha_key: Optional[str] = None
nlp_cloud_key: Optional[str] = None
snowflake_key: Optional[str] = None
bitdeerai_api_key: Optional[str] = None
common_cloud_provider_auth_params: dict = {
"params": ["project", "region_name", "token"],
"providers": ["vertex_ai", "bedrock", "watsonx", "azure", "vertex_ai_beta"],
@@ -418,6 +420,7 @@ def identify(event_details):
sambanova_models: List = []
assemblyai_models: List = []
snowflake_models: List = []
bitdeerai_models: List = []


def is_bedrock_pricing_only_model(key: str) -> bool:
@@ -648,6 +651,7 @@ def add_known_models():
+ assemblyai_models
+ jina_ai_models
+ snowflake_models
+ bitdeerai_models
)

model_list_set = set(model_list)
@@ -704,6 +708,7 @@ def add_known_models():
"assemblyai": assemblyai_models,
"jina_ai": jina_ai_models,
"snowflake": snowflake_models,
"bitdeerai":bitdeerai_models,
}

# mapping for those models which have larger equivalents
@@ -849,6 +854,13 @@ def add_known_models():
GoogleAIStudioGeminiConfig as GeminiConfig, # aliased to maintain backwards compatibility
)

from .llms.bitdeerai.chat.transformation import (
BitdeerAIChatConfig
)
from .llms.bitdeerai.embed.transformation import (
BitdeerAIEmbeddingConfig
)


from .llms.vertex_ai.vertex_embeddings.transformation import (
VertexAITextEmbeddingConfig,
4 changes: 4 additions & 0 deletions litellm/constants.py
Original file line number Diff line number Diff line change
@@ -80,6 +80,7 @@
"hosted_vllm",
"lm_studio",
"galadriel",
"bitdeerai",
]


@@ -140,6 +141,7 @@
"api.sambanova.ai/v1",
"api.x.ai/v1",
"api.galadriel.ai/v1",
"https://api-inference.bitdeer.ai/v1",
]


@@ -169,6 +171,7 @@
"hosted_vllm",
"lm_studio",
"galadriel",
"bitdeerai",
]
openai_text_completion_compatible_providers: List = (
[ # providers that support `/v1/completions`
@@ -314,6 +317,7 @@
"WizardLM/WizardLM-70B-V1.0",
] # supports all together ai models, just pass in the model id e.g. completion(model="together_computer/replit_code_3b",...)

bitdeerai_models: List = []

baseten_models: List = [
"qvv0xeq",
11 changes: 11 additions & 0 deletions litellm/litellm_core_utils/get_llm_provider_logic.py
Original file line number Diff line number Diff line change
@@ -564,6 +564,17 @@ def _get_openai_compatible_provider_info( # noqa: PLR0915
or get_secret_str("FRIENDLIAI_API_KEY")
or get_secret_str("FRIENDLI_TOKEN")
)
elif custom_llm_provider == "bitdeerai":
api_base = (
api_base
or get_secret("BITDEERAI_API_BASE")
or "https://api-inference.bitdeer.ai/v1"
) # type: ignore
dynamic_api_key = (
api_key
or get_secret_str("BITDEERAI_API_KEY")
or get_secret_str("BITDEERAI_TOKEN")
)
elif custom_llm_provider == "galadriel":
api_base = (
api_base
5 changes: 5 additions & 0 deletions litellm/litellm_core_utils/get_supported_openai_params.py
Original file line number Diff line number Diff line change
@@ -205,6 +205,11 @@ def get_supported_openai_params( # noqa: PLR0915
model=model
)
)
elif custom_llm_provider == "bitdeerai":
if request_type == "chat_completion":
return litellm.BitdeerAIChatConfig().get_supported_openai_params(model=model)
elif request_type == "embeddings":
return litellm.BitdeerAIEmbeddingConfig().get_supported_openai_params()
elif custom_llm_provider in litellm._custom_providers:
if request_type == "chat_completion":
provider_config = litellm.ProviderConfigManager.get_provider_chat_config(
8 changes: 8 additions & 0 deletions litellm/llms/bitdeerai/chat/transformation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
"""
Translates from OpenAI's `/v1/chat/completions` to BitdeerAI's `/v1/chat/completions`
"""
from ...openai.chat.gpt_transformation import OpenAIGPTConfig


class BitdeerAIChatConfig(OpenAIGPTConfig):
pass
19 changes: 19 additions & 0 deletions litellm/llms/bitdeerai/embed/transformation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
"""
Translates from OpenAI's `/v1/embeddings` to BitdeerAI's `/v1/embeddings`
"""


class BitdeerAIEmbeddingConfig:
@classmethod
def get_supported_openai_params(cls) -> list:
return []

@classmethod
def map_openai_params(
cls,
non_default_params: dict,
optional_params: dict,
model: str,
drop_params: dict = None,
) -> dict:
return optional_params
1 change: 1 addition & 0 deletions litellm/main.py
Original file line number Diff line number Diff line change
@@ -3464,6 +3464,7 @@ def embedding( # noqa: PLR0915
or custom_llm_provider == "together_ai"
or custom_llm_provider == "nvidia_nim"
or custom_llm_provider == "litellm_proxy"
or custom_llm_provider == "bitdeerai"
):
api_base = (
api_base
1 change: 1 addition & 0 deletions litellm/types/utils.py
Original file line number Diff line number Diff line change
@@ -1968,6 +1968,7 @@ class LlmProviders(str, Enum):
TOPAZ = "topaz"
ASSEMBLYAI = "assemblyai"
SNOWFLAKE = "snowflake"
BITDEERAI = "bitdeerai"


# Create a set of all provider values for quick lookup
47 changes: 47 additions & 0 deletions litellm/utils.py
Original file line number Diff line number Diff line change
@@ -2646,6 +2646,21 @@ def _check_valid_arg(supported_params: Optional[list]):
)
final_params = {**optional_params, **kwargs}
return final_params
elif custom_llm_provider == "bitdeerai":
supported_params = get_supported_openai_params(
model=model,
custom_llm_provider="bitdeerai",
request_type="embeddings",
)
_check_valid_arg(supported_params=supported_params)
optional_params = litellm.BitdeerAIEmbeddingConfig().map_openai_params(
non_default_params=non_default_params,
optional_params={},
model=model,
drop_params=drop_params if drop_params is not None else False,
)
final_params = {**optional_params, **kwargs}
return final_params
elif custom_llm_provider == "voyage":
supported_params = get_supported_openai_params(
model=model,
@@ -2927,6 +2942,7 @@ def get_optional_params( # noqa: PLR0915
and custom_llm_provider != "bedrock"
and custom_llm_provider != "ollama_chat"
and custom_llm_provider != "openrouter"
and custom_llm_provider != "bitdeerai"
and custom_llm_provider not in litellm.openai_compatible_providers
):
if custom_llm_provider == "ollama":
@@ -3168,6 +3184,17 @@ def _check_valid_arg(supported_params: List[str]):
else False
),
)
elif custom_llm_provider == "bitdeerai":
optional_params = litellm.BitdeerAIChatConfig().map_openai_params(
non_default_params=non_default_params,
optional_params=optional_params,
model=model,
drop_params=(
drop_params
if drop_params is not None and isinstance(drop_params, bool)
else False
),
)
elif custom_llm_provider == "vertex_ai" and (
model in litellm.vertex_chat_models
or model in litellm.vertex_code_chat_models
@@ -4008,6 +4035,13 @@ def get_api_key(llm_provider: str, dynamic_api_key: Optional[str]):
or get_secret("TOGETHERAI_API_KEY")
or get_secret("TOGETHER_AI_TOKEN")
)
elif llm_provider == "bitdeerai":
api_key = (
api_key
or litellm.bitdeerai_api_key
or get_secret("BITDEERAI_API_KEY")
or get_secret("BITDEERAI_TOKEN")
)
return api_key


@@ -4995,6 +5029,11 @@ def validate_environment( # noqa: PLR0915
keys_in_environment = True
else:
missing_keys.append("VOYAGE_API_KEY")
elif custom_llm_provider == "bitdeerai":
if "BITDEERAI_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("BITDEERAI_API_KEY")
elif custom_llm_provider == "fireworks_ai":
if (
"FIREWORKS_AI_API_KEY" in os.environ
@@ -5096,6 +5135,12 @@ def validate_environment( # noqa: PLR0915
keys_in_environment = True
else:
missing_keys.append("NLP_CLOUD_API_KEY")
## bitdeerai
elif model in litellm.bitdeerai_models:
if "BITDEERAI_API_KEY" in os.environ:
keys_in_environment = True
else:
missing_keys.append("BITDEERAI_API_KEY")

if api_key is not None:
new_missing_keys = []
@@ -6222,6 +6267,8 @@ def get_provider_chat_config( # noqa: PLR0915
return litellm.TritonConfig()
elif litellm.LlmProviders.PETALS == provider:
return litellm.PetalsConfig()
elif litellm.LlmProviders.BITDEERAI == provider:
return litellm.BitdeerAIChatConfig()
elif litellm.LlmProviders.BEDROCK == provider:
bedrock_route = BedrockModelInfo.get_bedrock_route(model)
bedrock_invoke_provider = litellm.BedrockLLM.get_bedrock_invoke_provider(
24 changes: 24 additions & 0 deletions tests/litellm/llms/bitdeerai/chat/test_transformation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import os
import sys
from typing import Dict,List,Optional
from unittest.mock import AsyncMock, MagicMock, patch

import pytest

sys.path.insert(
0, os.path.abspath("../../../../..")
) # Adds the parent directory to the system path

from litellm.llms.bitdeerai.chat.transformation import BitdeerAIChatConfig

import pytest

class TestBitdeerAIChatConfig:

def test_inheritance(self):
"""Test proper inheritance from OpenAIGPTConfig"""
config = BitdeerAIChatConfig()

from litellm.llms.openai.chat.gpt_transformation import OpenAIGPTConfig
assert isinstance(config, OpenAIGPTConfig)
assert hasattr(config, "get_supported_openai_params")
Loading