Skip to content

Commit

Permalink
Update github.ts
Browse files Browse the repository at this point in the history
  • Loading branch information
LovelyGuYiMeng authored Oct 13, 2024
1 parent 9fa3acc commit b0d8b21
Showing 1 changed file with 112 additions and 102 deletions.
214 changes: 112 additions & 102 deletions src/config/modelProviders/github.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,70 @@ import { ModelProviderCard } from '@/types/llm';
// https://github.com/marketplace/models
const Github: ModelProviderCard = {
chatModels: [
{
description: 'Smaller, faster, and 80% cheaper than o1-preview, performs well at code generation and small context operations.',
displayName: 'OpenAI o1-mini',
enabled: true,
functionCall: false,
id: 'o1-mini',
maxOutput: 65_536,
tokens: 128_000,
vision: true,
},
{
description: 'Focused on advanced reasoning and solving complex problems, including math and science tasks. Ideal for applications that require deep contextual understanding and agentic workflows.',
displayName: 'OpenAI o1-preview',
enabled: true,
functionCall: false,
id: 'o1-preview',
maxOutput: 32_768,
tokens: 128_000,
vision: true,
},
{
description: 'An affordable, efficient AI solution for diverse text and image tasks.',
displayName: 'OpenAI GPT-4o mini',
enabled: true,
functionCall: true,
id: 'gpt-4o-mini',
maxOutput: 4096,
tokens: 128_000,
vision: true,
},
{
description:
'A 398B parameters (94B active) multilingual model, offering a 256K long context window, function calling, structured output, and grounded generation.',
displayName: 'AI21 Jamba 1.5 Large',
"OpenAI's most advanced multimodal model in the GPT-4 family. Can handle both text and image inputs.",
displayName: 'OpenAI GPT-4o',
enabled: true,
functionCall: true,
id: 'ai21-jamba-1.5-large',
id: 'gpt-4o',
maxOutput: 4096,
tokens: 262_144,
tokens: 128_000,
vision: true,
},
{
description:
'Mistral Nemo is a cutting-edge Language Model (LLM) boasting state-of-the-art reasoning, world knowledge, and coding capabilities within its size category.',
displayName: 'Mistral Nemo',
id: 'mistral-nemo',
maxOutput: 4096,
tokens: 131_072,
},
{
description:
'Mistral Small can be used on any language-based task that requires high efficiency and low latency.',
displayName: 'Mistral Small',
id: 'mistral-small',
maxOutput: 4096,
tokens: 33_000,
},
{
description:
"Mistral's flagship model that's ideal for complex tasks that require large reasoning capabilities or are highly specialized (Synthetic Text Generation, Code Generation, RAG, or Agents).",
displayName: 'Mistral Large',
id: 'mistral-large',
maxOutput: 4096,
tokens: 33_000,
},
{
description:
Expand All @@ -24,11 +80,12 @@ const Github: ModelProviderCard = {
},
{
description:
'A production-grade Mamba-based LLM model to achieve best-in-class performance, quality, and cost efficiency.',
displayName: 'AI21-Jamba-Instruct',
id: 'ai21-jamba-instruct',
'A 398B parameters (94B active) multilingual model, offering a 256K long context window, function calling, structured output, and grounded generation.',
displayName: 'AI21 Jamba 1.5 Large',
functionCall: true,
id: 'ai21-jamba-1.5-large',
maxOutput: 4096,
tokens: 72_000,
tokens: 262_144,
},
{
description:
Expand All @@ -48,169 +105,122 @@ const Github: ModelProviderCard = {
},
{
description:
'A powerful 70-billion parameter model excelling in reasoning, coding, and broad language applications.',
displayName: 'Meta-Llama-3-70B-Instruct',
id: 'meta-llama-3-70b-instruct',
'Excels in image reasoning capabilities on high-res images for visual understanding apps.',
displayName: 'Llama 3.2 11B Vision',
id: 'llama-3.2-11b-vision-instruct',
maxOutput: 4096,
tokens: 8192,
tokens: 131_072,
vision: true,
},
{
description:
'A versatile 8-billion parameter model optimized for dialogue and text generation tasks.',
displayName: 'Meta-Llama-3-8B-Instruct',
id: 'meta-llama-3-8b-instruct',
'Advanced image reasoning capabilities for visual understanding agentic apps.',
displayName: 'Llama 3.2 90B Vision',
id: 'llama-3.2-90b-vision-instruct',
maxOutput: 4096,
tokens: 8192,
tokens: 131_072,
vision: true,
},
{
description:
'The Llama 3.1 instruction tuned text only models are optimized for multilingual dialogue use cases and outperform many of the available open source and closed chat models on common industry benchmarks.',
displayName: 'Meta-Llama-3.1-405B-Instruct',
id: 'meta-llama-3.1-405b-instruct',
displayName: 'Meta Llama 3.1 8B',
id: 'meta-llama-3.1-8b-instruct',
maxOutput: 4096,
tokens: 131_072,
},
{
description:
'The Llama 3.1 instruction tuned text only models are optimized for multilingual dialogue use cases and outperform many of the available open source and closed chat models on common industry benchmarks.',
displayName: 'Meta-Llama-3.1-70B-Instruct',
displayName: 'Meta Llama 3.1 70B',
id: 'meta-llama-3.1-70b-instruct',
maxOutput: 4096,
tokens: 131_072,
},
{
description:
'The Llama 3.1 instruction tuned text only models are optimized for multilingual dialogue use cases and outperform many of the available open source and closed chat models on common industry benchmarks.',
displayName: 'Meta-Llama-3.1-8B-Instruct',
id: 'meta-llama-3.1-8b-instruct',
displayName: 'Meta Llama 3.1 405B',
id: 'meta-llama-3.1-405b-instruct',
maxOutput: 4096,
tokens: 131_072,
},
{
description:
"Mistral's flagship model that's ideal for complex tasks that require large reasoning capabilities or are highly specialized (Synthetic Text Generation, Code Generation, RAG, or Agents).",
displayName: 'Mistral Large',
id: 'mistral-large',
'A versatile 8-billion parameter model optimized for dialogue and text generation tasks.',
displayName: 'Meta Llama 3 8B',
id: 'meta-llama-3-8b-instruct',
maxOutput: 4096,
tokens: 33_000,
tokens: 8192,
},
{
description:
'Mistral Large (2407) is an advanced Large Language Model (LLM) with state-of-the-art reasoning, knowledge and coding capabilities.',
displayName: 'Mistral Large (2407)',
id: 'mistral-large-2407',
'A powerful 70-billion parameter model excelling in reasoning, coding, and broad language applications.',
displayName: 'Meta Llama 3 70B',
id: 'meta-llama-3-70b-instruct',
maxOutput: 4096,
tokens: 131_072,
tokens: 8192,
},
{
description:
'Mistral Nemo is a cutting-edge Language Model (LLM) boasting state-of-the-art reasoning, world knowledge, and coding capabilities within its size category.',
displayName: 'Mistral Nemo',
id: 'mistral-nemo',
description: 'Refresh of Phi-3-mini model.',
displayName: 'Phi-3.5-mini 128K',
id: 'Phi-3-5-mini-instruct',
maxOutput: 4096,
tokens: 131_072,
},
{
description:
'Mistral Small can be used on any language-based task that requires high efficiency and low latency.',
displayName: 'Mistral Small',
id: 'mistral-small',
maxOutput: 4096,
tokens: 33_000,
},
{
description:
"OpenAI's most advanced multimodal model in the GPT-4 family. Can handle both text and image inputs.",
displayName: 'OpenAI GPT-4o',
enabled: true,
functionCall: true,
id: 'gpt-4o',
maxOutput: 4096,
tokens: 128_000,
vision: true,
},
{
description: 'An affordable, efficient AI solution for diverse text and image tasks.',
displayName: 'OpenAI GPT-4o mini',
enabled: true,
functionCall: true,
id: 'gpt-4o-mini',
maxOutput: 4096,
tokens: 128_000,
vision: true,
},
{
description: 'Focused on advanced reasoning and solving complex problems, including math and science tasks. Ideal for applications that require deep contextual understanding and agentic workflows.',
displayName: 'OpenAI o1-preview',
enabled: true,
functionCall: false,
id: 'o1-preview',
maxOutput: 32_768,
tokens: 128_000,
vision: true,
},
{
description: 'Smaller, faster, and 80% cheaper than o1-preview, performs well at code generation and small context operations.',
displayName: 'OpenAI o1-mini',
enabled: true,
functionCall: false,
id: 'o1-mini',
maxOutput: 65_536,
tokens: 128_000,
vision: true,
},
{
description:
'Same Phi-3-medium model, but with a larger context size for RAG or few shot prompting.',
displayName: 'Phi-3-medium instruct (128k)',
id: 'Phi-3-medium-128k-instruct',
description: 'Refresh of Phi-3-vision model.',
displayName: 'Phi-3.5-vision 128K',
id: 'Phi-3.5-vision-instrust',
maxOutput: 4096,
tokens: 131_072,
vision: true,
},
{
description:
'A 14B parameters model, proves better quality than Phi-3-mini, with a focus on high-quality, reasoning-dense data.',
displayName: 'Phi-3-medium instruct (4k)',
id: 'Phi-3-medium-4k-instruct',
'Tiniest member of the Phi-3 family. Optimized for both quality and low latency.',
displayName: 'Phi-3-mini 4K',
id: 'Phi-3-mini-4k-instruct',
maxOutput: 4096,
tokens: 4096,
},
{
description:
'Same Phi-3-mini model, but with a larger context size for RAG or few shot prompting.',
displayName: 'Phi-3-mini instruct (128k)',
displayName: 'Phi-3-mini 128K',
id: 'Phi-3-mini-128k-instruct',
maxOutput: 4096,
tokens: 131_072,
},
{
description:
'Tiniest member of the Phi-3 family. Optimized for both quality and low latency.',
displayName: 'Phi-3-mini instruct (4k)',
id: 'Phi-3-mini-4k-instruct',
'A 7B parameters model, proves better quality than Phi-3-mini, with a focus on high-quality, reasoning-dense data.',
displayName: 'Phi-3-small 8K',
id: 'Phi-3-small-8k-instruct',
maxOutput: 4096,
tokens: 4096,
tokens: 8192,
},
{
description:
'Same Phi-3-small model, but with a larger context size for RAG or few shot prompting.',
displayName: 'Phi-3-small instruct (128k)',
displayName: 'Phi-3-small 128K',
id: 'Phi-3-small-128k-instruct',
maxOutput: 4096,
tokens: 131_072,
},
{
description:
'A 7B parameters model, proves better quality than Phi-3-mini, with a focus on high-quality, reasoning-dense data.',
displayName: 'Phi-3-small instruct (8k)',
id: 'Phi-3-small-8k-instruct',
'A 14B parameters model, proves better quality than Phi-3-mini, with a focus on high-quality, reasoning-dense data.',
displayName: 'Phi-3-medium 4K',
id: 'Phi-3-medium-4k-instruct',
maxOutput: 4096,
tokens: 131_072,
tokens: 4096,
},
{
description: 'Refresh of Phi-3-mini model.',
displayName: 'Phi-3.5-mini instruct (128k)',
id: 'Phi-3-5-mini-instruct',
description:
'Same Phi-3-medium model, but with a larger context size for RAG or few shot prompting.',
displayName: 'Phi-3-medium 128K',
id: 'Phi-3-medium-128k-instruct',
maxOutput: 4096,
tokens: 131_072,
},
Expand Down

0 comments on commit b0d8b21

Please sign in to comment.