From eda25b105083303e8be335fdea74791ba0be6042 Mon Sep 17 00:00:00 2001 From: Jakob Date: Wed, 24 Jan 2024 13:53:44 -0800 Subject: [PATCH 01/38] add provider selector component --- .../PropertiesWindow/LLMProviderSelect.tsx | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 packages/client/editor/src/components/PropertiesWindow/LLMProviderSelect.tsx diff --git a/packages/client/editor/src/components/PropertiesWindow/LLMProviderSelect.tsx b/packages/client/editor/src/components/PropertiesWindow/LLMProviderSelect.tsx new file mode 100644 index 0000000000..4ec099f6f0 --- /dev/null +++ b/packages/client/editor/src/components/PropertiesWindow/LLMProviderSelect.tsx @@ -0,0 +1,30 @@ +import React, { useState } from "react"; +import { ConfigurationComponentProps } from "./PropertiesWindow"; +import { LLMProviders } from "plugins/core/src/lib/services/coreLLMService/types"; + +export const LLMProviderSelect = (props: ConfigurationComponentProps) => { + // TODO: We should make google default when available + const [selectedProvider, setSelectedProvider] = useState(LLMProviders.OpenAI); + + const onChange = (provider: LLMProviders) => { + setSelectedProvider(provider); + props.updateConfigKey("modelProvider", provider); + } + + return ( +
+

LLM Provider

+
+ +
+
+ ); +} From 832435a2ed211b02f652de2117212488f069a96b Mon Sep 17 00:00:00 2001 From: Jakob Date: Wed, 24 Jan 2024 13:53:57 -0800 Subject: [PATCH 02/38] add provider selector to properties window --- .../src/components/PropertiesWindow/PropertiesWindow.tsx | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/packages/client/editor/src/components/PropertiesWindow/PropertiesWindow.tsx b/packages/client/editor/src/components/PropertiesWindow/PropertiesWindow.tsx index af5517c57a..ae2b04f9ac 100644 --- a/packages/client/editor/src/components/PropertiesWindow/PropertiesWindow.tsx +++ b/packages/client/editor/src/components/PropertiesWindow/PropertiesWindow.tsx @@ -14,6 +14,7 @@ import { SpellInterface } from 'server/schemas'; import { VariableNames } from './variableNames'; import { ValueType } from './ValueType'; import { DefaultConfig } from './DefaultConfig'; +import { LLMProviderSelect } from './LLMProviderSelect'; type Props = { tab: Tab @@ -38,7 +39,8 @@ const ConfigurationComponents = { eventStateProperties: EventStateProperties, variableNames: VariableNames, valueType: ValueType, - default: DefaultConfig + default: DefaultConfig, + modelProviders: LLMProviderSelect, } export const PropertiesWindow = (props: Props) => { @@ -47,6 +49,7 @@ export const PropertiesWindow = (props: Props) => { const selectedNode = useSelector(selectActiveNode(props.tab.id)) const handleChange = useChangeNodeData(selectedNode?.id); + if (!selectedNode) return null const spec = nodeSpecs.find(spec => spec.type === selectedNode.type) @@ -109,4 +112,4 @@ export const PropertiesWindow = (props: Props) => { }) as any} ) -} \ No newline at end of file +} From 00dec1040d0a189a44883fdf4356c3283a5897b4 Mon Sep 17 00:00:00 2001 From: Jakob Date: Wed, 24 Jan 2024 13:56:03 -0800 Subject: [PATCH 03/38] update type name --- plugins/core/src/lib/services/types.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/plugins/core/src/lib/services/types.ts b/plugins/core/src/lib/services/types.ts index 95c9d31a3c..28f8215fb5 100644 --- a/plugins/core/src/lib/services/types.ts +++ b/plugins/core/src/lib/services/types.ts @@ -2,7 +2,7 @@ import { Chunk, CompletionRequest, CompletionResponse, - LLMModels, + CompletionModels, Message, } from './coreLLMService/types' @@ -23,7 +23,7 @@ export interface IBudgetManagerService { // Returns the current cost of a user get_current_cost(user: string): Promise // Returns the model cost of a user - get_model_cost(user: string): Promise> + get_model_cost(user: string): Promise> // Checks if a user is valid is_valid_user(user: string): Promise // Returns a list of all users @@ -69,7 +69,7 @@ export interface ICoreBudgetManagerService { completionObj: CompletionResponse ): Promise getCurrentCost(projectId: string): Promise - getModelCost(projectId: string): Promise> + getModelCost(projectId: string): Promise> isValidUser(projectId: string): Promise getUsers(): Promise resetCost(projectId: string): Promise From 50269984452dfaf619a957d699dd0d06700a1e47 Mon Sep 17 00:00:00 2001 From: Jakob Date: Wed, 24 Jan 2024 13:56:16 -0800 Subject: [PATCH 04/38] update type name --- .../coreBudgetMangerService.ts | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/plugins/core/src/lib/services/coreBudgetManagerService/coreBudgetMangerService.ts b/plugins/core/src/lib/services/coreBudgetManagerService/coreBudgetMangerService.ts index dbd961d17c..11f0d797db 100644 --- a/plugins/core/src/lib/services/coreBudgetManagerService/coreBudgetMangerService.ts +++ b/plugins/core/src/lib/services/coreBudgetManagerService/coreBudgetMangerService.ts @@ -1,6 +1,10 @@ import { python } from 'pythonia' -import { CompletionResponse, LLMModels, Message } from '../coreLLMService/types' +import { + CompletionResponse, + CompletionModels, + Message, +} from '../coreLLMService/types' import { UserService } from '../userService/userService' import { BudgetDuration, @@ -151,7 +155,9 @@ export class CoreBudgetManagerService implements ICoreBudgetManagerService { * @param projectId - Project ID * @returns Promise */ - async getModelCost(projectId: string): Promise> { + async getModelCost( + projectId: string + ): Promise> { const modelCost = await this.liteLLMBudgetManager?.get_model_cost(projectId) if (modelCost === null || modelCost === undefined) { throw new Error('Error getting model cost') From 5ea48b7d332f59e5b7b3ef5c59cfc020c2e41010 Mon Sep 17 00:00:00 2001 From: Jakob Date: Wed, 24 Jan 2024 13:57:10 -0800 Subject: [PATCH 05/38] extract contants into smaller files --- .../coreEmbeddingService/constants.ts | 63 ---- .../lib/services/coreLLMService/constants.ts | 316 ------------------ .../constants/allCompletionModels.ts | 56 ++++ .../constants/allEmbeddingModels.ts | 18 + .../constants/completionModelArrays.ts | 61 ++++ .../constants/embeddingModelArrays.ts | 17 + 6 files changed, 152 insertions(+), 379 deletions(-) delete mode 100644 plugins/core/src/lib/services/coreEmbeddingService/constants.ts delete mode 100644 plugins/core/src/lib/services/coreLLMService/constants.ts create mode 100644 plugins/core/src/lib/services/coreLLMService/constants/allCompletionModels.ts create mode 100644 plugins/core/src/lib/services/coreLLMService/constants/allEmbeddingModels.ts create mode 100644 plugins/core/src/lib/services/coreLLMService/constants/completionModelArrays.ts create mode 100644 plugins/core/src/lib/services/coreLLMService/constants/embeddingModelArrays.ts diff --git a/plugins/core/src/lib/services/coreEmbeddingService/constants.ts b/plugins/core/src/lib/services/coreEmbeddingService/constants.ts deleted file mode 100644 index 8ee8764cbc..0000000000 --- a/plugins/core/src/lib/services/coreEmbeddingService/constants.ts +++ /dev/null @@ -1,63 +0,0 @@ -import { LLMProviders } from '../coreLLMService/types' -import { - BedrockEmbeddingModels, - CohereEmbeddingModels, - EmbeddingModels, - HuggingFaceEmbeddingModels, - MistralEmbeddingModels, - OpenAIEmbeddingModels, - VoyageEmbeddingModels, -} from './types' - -export const embeddingProviderMap: Record = { - // Bedrock models - [BedrockEmbeddingModels.AmazonTitanEmbedTextV1]: LLMProviders.Bedrock, - [BedrockEmbeddingModels.CohereEmbedEnglishV3]: LLMProviders.Bedrock, - [BedrockEmbeddingModels.CohereEmbedMultilingualV3]: LLMProviders.Bedrock, - - // Cohere models - [CohereEmbeddingModels.EmbedEnglishLightV20]: LLMProviders.Cohere, - [CohereEmbeddingModels.EmbedEnglishLightV30]: LLMProviders.Cohere, - [CohereEmbeddingModels.EmbedEnglishV20]: LLMProviders.Cohere, - [CohereEmbeddingModels.EmbedEnglishV30]: LLMProviders.Cohere, - [CohereEmbeddingModels.EmbedMultilingualLightV30]: LLMProviders.Cohere, - [CohereEmbeddingModels.EmbedMultilingualV20]: LLMProviders.Cohere, - [CohereEmbeddingModels.EmbedMultilingualV30]: LLMProviders.Cohere, - - // Mistral models - [MistralEmbeddingModels.MistralEmbed]: LLMProviders.Mistral, - - // Voyage models - [VoyageEmbeddingModels.Voyage02]: LLMProviders.VoyageAI, - [VoyageEmbeddingModels.VoyageCode02]: LLMProviders.VoyageAI, - [VoyageEmbeddingModels.VoyageLite01Instruct]: LLMProviders.VoyageAI, - - // OpenAI models - [OpenAIEmbeddingModels.TextEmbeddingAda002]: LLMProviders.OpenAI, - - // HuggingFace models - [HuggingFaceEmbeddingModels.HuggingFaceMicrosoftCodebertBase]: - LLMProviders.HuggingFace, - [HuggingFaceEmbeddingModels.HuggingFaceBAAIBgeLargeZh]: - LLMProviders.HuggingFace, - [HuggingFaceEmbeddingModels.HuggingFaceAnyHfEmbeddingModel]: - LLMProviders.HuggingFace, -} - -const bedrockEmbeddingModelsArray = Object.values(BedrockEmbeddingModels) -const cohereEmbeddingModelsArray = Object.values(CohereEmbeddingModels) -const mistralEmbeddingModelsArray = Object.values(MistralEmbeddingModels) -const voyageEmbeddingModelsArray = Object.values(VoyageEmbeddingModels) -const openAIEmbeddingModelsArray = Object.values(OpenAIEmbeddingModels) -const huggingFaceEmbeddingModelsArray = Object.values( - HuggingFaceEmbeddingModels -) - -export const allEmbeddingModels: EmbeddingModels[] = [ - ...bedrockEmbeddingModelsArray, - ...cohereEmbeddingModelsArray, - ...mistralEmbeddingModelsArray, - ...voyageEmbeddingModelsArray, - ...openAIEmbeddingModelsArray, - ...huggingFaceEmbeddingModelsArray, -] diff --git a/plugins/core/src/lib/services/coreLLMService/constants.ts b/plugins/core/src/lib/services/coreLLMService/constants.ts deleted file mode 100644 index e846277c77..0000000000 --- a/plugins/core/src/lib/services/coreLLMService/constants.ts +++ /dev/null @@ -1,316 +0,0 @@ -import { allEmbeddingModels } from '../coreEmbeddingService/constants' -import { EmbeddingModels } from '../coreEmbeddingService/types' -import { - AI21Models, - AlephAlphaModels, - AnthropicModels, - BaseTenModels, - BedrockModels, - CloudflareWorkersAIModels, - DeepInfraChatModels, - GoogleAIStudioModels, - HuggingFaceModelsWithPromptFormatting, - LLMModels, - LLMProviderKeys, - MistralAIModels, - NLPCloudModels, - OllamaModels, - OllamaVisionModels, - OpenAIChatCompletionModels, - OpenAITextCompletionInstructModels, - OpenAIVisionModels, - PalmModels, - PerplexityAIModels, - PetalsModels, - SageMakerModels, - TogetherAIModels, - VLLMModels, - VertexAIGoogleModels, - VoyageAIModels, - XinferenceModels, -} from './types' - -export const modelProviderMap: Record = { - /// OpenAI Chat Completion Models - [OpenAIChatCompletionModels.GPT35Turbo1106Preview]: LLMProviderKeys.OpenAI, - [OpenAIChatCompletionModels.GPT35Turbo]: LLMProviderKeys.OpenAI, - [OpenAIChatCompletionModels.GPT35Turbo1106]: LLMProviderKeys.OpenAI, - [OpenAIChatCompletionModels.GPT35Turbo0301]: LLMProviderKeys.OpenAI, - [OpenAIChatCompletionModels.GPT35Turbo0613]: LLMProviderKeys.OpenAI, - [OpenAIChatCompletionModels.GPT35Turbo16k]: LLMProviderKeys.OpenAI, - [OpenAIChatCompletionModels.GPT35Turbo16k0613]: LLMProviderKeys.OpenAI, - [OpenAIChatCompletionModels.GPT4]: LLMProviderKeys.OpenAI, - [OpenAIChatCompletionModels.GPT40314]: LLMProviderKeys.OpenAI, - [OpenAIChatCompletionModels.GPT40613]: LLMProviderKeys.OpenAI, - - // OpenAI Text Completion / Instruct Models - [OpenAITextCompletionInstructModels.Ada001]: LLMProviderKeys.OpenAI, - [OpenAITextCompletionInstructModels.Babbage001]: LLMProviderKeys.OpenAI, - [OpenAITextCompletionInstructModels.Babbage002]: LLMProviderKeys.OpenAI, - [OpenAITextCompletionInstructModels.Curie001]: LLMProviderKeys.OpenAI, - [OpenAITextCompletionInstructModels.Davinci002]: LLMProviderKeys.OpenAI, - [OpenAITextCompletionInstructModels.GPT35TurboInstruct]: - LLMProviderKeys.OpenAI, - [OpenAITextCompletionInstructModels.TextDavinci003]: LLMProviderKeys.OpenAI, - - // OpenAI Vision Models - [OpenAIVisionModels.GPT4VisionPreview]: LLMProviderKeys.OpenAI, - - // HuggingFace Models With Prompt Formatting - [HuggingFaceModelsWithPromptFormatting.HuggingFaceCodellamaCodeLlama34bInstructHf]: - LLMProviderKeys.HuggingFace, - [HuggingFaceModelsWithPromptFormatting.HuggingFaceMetaLlamaLlama27bChat]: - LLMProviderKeys.HuggingFace, - [HuggingFaceModelsWithPromptFormatting.HuggingFaceMistral7BInstructV01]: - LLMProviderKeys.HuggingFace, - [HuggingFaceModelsWithPromptFormatting.HuggingFaceMosaicmlMpt7bChat]: - LLMProviderKeys.HuggingFace, - [HuggingFaceModelsWithPromptFormatting.HuggingFacePhindPhindCodeLlama34Bv2]: - LLMProviderKeys.HuggingFace, - [HuggingFaceModelsWithPromptFormatting.HuggingFaceTiiuaeFalcon7bInstruct]: - LLMProviderKeys.HuggingFace, - [HuggingFaceModelsWithPromptFormatting.HuggingFaceWizardLMWizardCoderPython34BV10]: - LLMProviderKeys.HuggingFace, - - // Ollama Models - [OllamaModels.OlamaMistral]: LLMProviderKeys.Ollama, - [OllamaModels.OlamaLlama27B]: LLMProviderKeys.Ollama, - [OllamaModels.OlamaLlama213B]: LLMProviderKeys.Ollama, - [OllamaModels.OlamaLlama270B]: LLMProviderKeys.Ollama, - [OllamaModels.OlamaLlama2Uncensored]: LLMProviderKeys.Ollama, - [OllamaModels.OlamaCodeLlama]: LLMProviderKeys.Ollama, - [OllamaModels.OlamaNousHermes]: LLMProviderKeys.Ollama, - [OllamaModels.OlamaOrcaMini]: LLMProviderKeys.Ollama, - [OllamaModels.OlamaVicuna]: LLMProviderKeys.Ollama, - [OllamaModels.OlamaNousHermes13B]: LLMProviderKeys.Ollama, - [OllamaModels.OlamaWizardVicunaUncensored]: LLMProviderKeys.Ollama, - - // Ollama Vision Models - [OllamaVisionModels.LLAVA]: LLMProviderKeys.Ollama, - - // VertexAI - [VertexAIGoogleModels.GeminiPro]: LLMProviderKeys.VertexAI, - [VertexAIGoogleModels.GeminiProVision]: LLMProviderKeys.VertexAI, - - //Palm - [PalmModels.ChatBison]: LLMProviderKeys.Palm, - - // Google AI Studio - [GoogleAIStudioModels.GeminiGeminiPro]: LLMProviderKeys.GoogleAIStudio, - [GoogleAIStudioModels.GeminiGeminiProVision]: LLMProviderKeys.GoogleAIStudio, - - // Mistral AI Models - [MistralAIModels.MistralTiny]: LLMProviderKeys.Mistral, - [MistralAIModels.MistralSmall]: LLMProviderKeys.Mistral, - [MistralAIModels.MistralMedium]: LLMProviderKeys.Mistral, - - // Anthropic Models - [AnthropicModels.Claude21]: LLMProviderKeys.Anthropic, - [AnthropicModels.Claude2]: LLMProviderKeys.Anthropic, - [AnthropicModels.ClaudeInstant1]: LLMProviderKeys.Anthropic, - [AnthropicModels.ClaudeInstant12]: LLMProviderKeys.Anthropic, - - // SageMaker Models - [SageMakerModels.MetaLlama213B]: LLMProviderKeys.Sagemaker, - [SageMakerModels.MetaLlama213BChatFineTuned]: LLMProviderKeys.Sagemaker, - [SageMakerModels.MetaLlama270B]: LLMProviderKeys.Sagemaker, - [SageMakerModels.MetaLlama270BChatFineTuned]: LLMProviderKeys.Sagemaker, - [SageMakerModels.MetaLlama27B]: LLMProviderKeys.Sagemaker, - [SageMakerModels.MetaLlama27BChatFineTuned]: LLMProviderKeys.Sagemaker, - - // Bedrock Models - [BedrockModels.BedrockAI21J2Mid]: LLMProviderKeys.Bedrock, - [BedrockModels.BedrockAI21J2Ultra]: LLMProviderKeys.Bedrock, - [BedrockModels.BedrockAmazonTitanExpress]: LLMProviderKeys.Bedrock, - [BedrockModels.BedrockAnthropicClaudeInstantV1]: LLMProviderKeys.Bedrock, - [BedrockModels.BedrockAnthropicClaudeV1]: LLMProviderKeys.Bedrock, - [BedrockModels.BedrockAnthropicClaudeV2]: LLMProviderKeys.Bedrock, - [BedrockModels.BedrockAnthropicClaudeV21]: LLMProviderKeys.Bedrock, - [BedrockModels.BedrockCohereCommand]: LLMProviderKeys.Bedrock, - [BedrockModels.BedrockMetaLlama2Chat13b]: LLMProviderKeys.Bedrock, - [BedrockModels.BedrockMetaLlama2Chat70b]: LLMProviderKeys.Bedrock, - [BedrockModels.BedrockAmazonTitanLite]: LLMProviderKeys.Bedrock, - - // Perplexity AI Models - [PerplexityAIModels.Pplx70bChat]: LLMProviderKeys.PerplexityAI, - [PerplexityAIModels.Pplx70bChatAlpha]: LLMProviderKeys.PerplexityAI, - [PerplexityAIModels.Pplx70bOnline]: LLMProviderKeys.PerplexityAI, - [PerplexityAIModels.Pplx7bOnline]: LLMProviderKeys.PerplexityAI, - [PerplexityAIModels.PplxCodeLlama34bInstruct]: LLMProviderKeys.PerplexityAI, - [PerplexityAIModels.PplxLlama213bChat]: LLMProviderKeys.PerplexityAI, - [PerplexityAIModels.PplxLlama270bChat]: LLMProviderKeys.PerplexityAI, - [PerplexityAIModels.PplxMistral7bInstruct]: LLMProviderKeys.PerplexityAI, - [PerplexityAIModels.PplxOpenhermes25Mistral7b]: LLMProviderKeys.PerplexityAI, - [PerplexityAIModels.PplxOpenhermes2Mistral7b]: LLMProviderKeys.PerplexityAI, - [PerplexityAIModels.Pplx7bChat]: LLMProviderKeys.PerplexityAI, - [PerplexityAIModels.Pplx7bChatAlpha]: LLMProviderKeys.PerplexityAI, - - // VLLM Models - [VLLMModels.CodellamaCodeLlama34bInstructHf]: LLMProviderKeys.VLLM, - [VLLMModels.MetaLlamaLlama27bChat]: LLMProviderKeys.VLLM, - [VLLMModels.MosaicmlMpt7bChat]: LLMProviderKeys.VLLM, - [VLLMModels.PhindPhindCodeLlama34Bv2]: LLMProviderKeys.VLLM, - [VLLMModels.TiiuaeFalcon7bInstruct]: LLMProviderKeys.VLLM, - [VLLMModels.WizardLMWizardCoderPython34BV10]: LLMProviderKeys.VLLM, - - //Xinference Models - [XinferenceModels.BgeBaseEn]: LLMProviderKeys.Xinference, - [XinferenceModels.BgeBaseEnV15]: LLMProviderKeys.Xinference, - [XinferenceModels.BgeBaseZh]: LLMProviderKeys.Xinference, - [XinferenceModels.BgeLargeEn]: LLMProviderKeys.Xinference, - [XinferenceModels.BgeLargeEnV15]: LLMProviderKeys.Xinference, - [XinferenceModels.BgeLargeZh]: LLMProviderKeys.Xinference, - [XinferenceModels.BgeLargeZhNoinstruct]: LLMProviderKeys.Xinference, - [XinferenceModels.BgeLargeZhV15]: LLMProviderKeys.Xinference, - [XinferenceModels.BgeSmallEnV15]: LLMProviderKeys.Xinference, - [XinferenceModels.BgeSmallZh]: LLMProviderKeys.Xinference, - [XinferenceModels.BgeSmallZhV15]: LLMProviderKeys.Xinference, - [XinferenceModels.E5LargeV2]: LLMProviderKeys.Xinference, - [XinferenceModels.GteBase]: LLMProviderKeys.Xinference, - [XinferenceModels.GteLarge]: LLMProviderKeys.Xinference, - [XinferenceModels.JinaEmbeddingsV2BaseEn]: LLMProviderKeys.Xinference, - [XinferenceModels.JinaEmbeddingsV2SmallEn]: LLMProviderKeys.Xinference, - [XinferenceModels.MultilingualE5Large]: LLMProviderKeys.Xinference, - [XinferenceModels.BgeBaseZhV15]: LLMProviderKeys.Xinference, - - // Cloudflare Workers AI Models - [CloudflareWorkersAIModels.MetaLlama27bChatFp16]: - LLMProviderKeys.CloudflareWorkersAI, - [CloudflareWorkersAIModels.MetaLlama27bChatInt8]: - LLMProviderKeys.CloudflareWorkersAI, - [CloudflareWorkersAIModels.Mistral7bInstructV01]: - LLMProviderKeys.CloudflareWorkersAI, - [CloudflareWorkersAIModels.TheBlokeCodellama7bInstructAwq]: - LLMProviderKeys.CloudflareWorkersAI, - - // AI21 Models - [AI21Models.J2Light]: LLMProviderKeys.AI21, - [AI21Models.J2Mid]: LLMProviderKeys.AI21, - [AI21Models.J2Ultra]: LLMProviderKeys.AI21, - - //NLPCloud Models - [NLPCloudModels.ChatDolphin]: LLMProviderKeys.NLPCloud, - [NLPCloudModels.Dolphin]: LLMProviderKeys.NLPCloud, - - // Deep Infra Models - [DeepInfraChatModels.CodellamaCodeLlama34bInstructHf]: - LLMProviderKeys.DeepInfra, - [DeepInfraChatModels.JondurbinAiroborosL270bGpt4141]: - LLMProviderKeys.DeepInfra, - [DeepInfraChatModels.MetaLlamaLlama213bChatHf]: LLMProviderKeys.DeepInfra, - [DeepInfraChatModels.MetaLlamaLlama270bChatHf]: LLMProviderKeys.DeepInfra, - [DeepInfraChatModels.MetaLlamaLlama27bChatHf]: LLMProviderKeys.DeepInfra, - [DeepInfraChatModels.MistralaiMistral7BInstructV01]: - LLMProviderKeys.DeepInfra, - - // Together AI Models - [TogetherAIModels.TogetherLlama270bChat]: LLMProviderKeys.TogetherAI, - [TogetherAIModels.TogetherLlama270b]: LLMProviderKeys.TogetherAI, - [TogetherAIModels.TogetherLlama27B32K]: LLMProviderKeys.TogetherAI, - [TogetherAIModels.TogetherLlama27B32KInstruct]: LLMProviderKeys.TogetherAI, - [TogetherAIModels.TogetherLlama27b]: LLMProviderKeys.TogetherAI, - [TogetherAIModels.TogetherFalcon40bInstruct]: LLMProviderKeys.TogetherAI, - [TogetherAIModels.TogetherFalcon7bInstruct]: LLMProviderKeys.TogetherAI, - [TogetherAIModels.TogetherAlpaca7b]: LLMProviderKeys.TogetherAI, - [TogetherAIModels.TogetherStarchatAlpha]: LLMProviderKeys.TogetherAI, - [TogetherAIModels.TogetherCodeLlama34b]: LLMProviderKeys.TogetherAI, - [TogetherAIModels.TogetherCodeLlama34bInstruct]: LLMProviderKeys.TogetherAI, - [TogetherAIModels.TogetherCodeLlama34bPython]: LLMProviderKeys.TogetherAI, - [TogetherAIModels.TogetherSqlCoder]: LLMProviderKeys.TogetherAI, - [TogetherAIModels.TogetherNSQLLlama27B]: LLMProviderKeys.TogetherAI, - [TogetherAIModels.TogetherWizardCoder15BV10]: LLMProviderKeys.TogetherAI, - [TogetherAIModels.TogetherWizardCoderPython34BV10]: - LLMProviderKeys.TogetherAI, - [TogetherAIModels.TogetherNousHermesLlama213b]: LLMProviderKeys.TogetherAI, - [TogetherAIModels.TogetherChronosHermes13b]: LLMProviderKeys.TogetherAI, - [TogetherAIModels.TogetherSolar070b16bit]: LLMProviderKeys.TogetherAI, - [TogetherAIModels.TogetherWizardLM70BV10]: LLMProviderKeys.TogetherAI, - - // Base Ten Models - [BaseTenModels.Falcon7B]: LLMProviderKeys.Baseten, - [BaseTenModels.MPT7BBase]: LLMProviderKeys.Baseten, - [BaseTenModels.WizardLM]: LLMProviderKeys.Baseten, - - // Petals Models - [PetalsModels.HuggyLlamaLlama65]: LLMProviderKeys.Petals, - [PetalsModels.PetalsTeamStableBeluga]: LLMProviderKeys.Petals, - - // Voyage AI Models - [VoyageAIModels.Voyage01]: LLMProviderKeys.VoyageAI, - [VoyageAIModels.VoyageLite01]: LLMProviderKeys.VoyageAI, - [VoyageAIModels.VoyageLite01Instruct]: LLMProviderKeys.VoyageAI, - - // Aleph Alpha Models - [AlephAlphaModels.LuminousBase]: LLMProviderKeys.AlephAlpha, - [AlephAlphaModels.LuminousBaseControl]: LLMProviderKeys.AlephAlpha, - [AlephAlphaModels.LuminousExtended]: LLMProviderKeys.AlephAlpha, - [AlephAlphaModels.LuminousExtendedControl]: LLMProviderKeys.AlephAlpha, - [AlephAlphaModels.LuminousSupreme]: LLMProviderKeys.AlephAlpha, - [AlephAlphaModels.LuminousSupremeControl]: LLMProviderKeys.AlephAlpha, -} - -const openAIChatCompletionModelsArray = Object.values( - OpenAIChatCompletionModels -) -const openAITextCompletionInstructModelsArray = Object.values( - OpenAITextCompletionInstructModels -) -const openAIVisionModelsArray = Object.values(OpenAIVisionModels) -const huggingFaceModelsWithPromptFormattingArray = Object.values( - HuggingFaceModelsWithPromptFormatting -) -const ollamaModelsArray = Object.values(OllamaModels) -const ollamaVisionModelsArray = Object.values(OllamaVisionModels) -const vertexAIGoogleModelsArray = Object.values(VertexAIGoogleModels) -const palmModelsArray = Object.values(PalmModels) -const googleAIStudioModelsArray = Object.values(GoogleAIStudioModels) -const mistralAIModelsArray = Object.values(MistralAIModels) -const anthropicModelsArray = Object.values(AnthropicModels) -const sageMakerModelsArray = Object.values(SageMakerModels) -const bedrockModelsArray = Object.values(BedrockModels) -const perplexityAIModelsArray = Object.values(PerplexityAIModels) -const vllmModelsArray = Object.values(VLLMModels) -const xinferenceModelsArray = Object.values(XinferenceModels) -const cloudflareWorkersAIModelsArray = Object.values(CloudflareWorkersAIModels) -const ai21ModelsArray = Object.values(AI21Models) -const nlpCloudModelsArray = Object.values(NLPCloudModels) -const deepInfraChatModelsArray = Object.values(DeepInfraChatModels) -const togetherAIModelsArray = Object.values(TogetherAIModels) -const baseTenModelsArray = Object.values(BaseTenModels) -const petalsModelsArray = Object.values(PetalsModels) -const voyageAIModelsArray = Object.values(VoyageAIModels) -const alephAlphaModelsArray = Object.values(AlephAlphaModels) - -export const allTextCompletionModels: LLMModels[] = [ - ...openAIChatCompletionModelsArray, - ...openAITextCompletionInstructModelsArray, - ...openAIVisionModelsArray, - ...huggingFaceModelsWithPromptFormattingArray, - ...ollamaModelsArray, - ...ollamaVisionModelsArray, - ...vertexAIGoogleModelsArray, - ...palmModelsArray, - ...googleAIStudioModelsArray, - ...mistralAIModelsArray, - ...anthropicModelsArray, - ...sageMakerModelsArray, - ...bedrockModelsArray, - ...perplexityAIModelsArray, - ...vllmModelsArray, - ...xinferenceModelsArray, - ...cloudflareWorkersAIModelsArray, - ...ai21ModelsArray, - ...nlpCloudModelsArray, - ...deepInfraChatModelsArray, - ...togetherAIModelsArray, - ...baseTenModelsArray, - ...petalsModelsArray, - ...voyageAIModelsArray, - ...alephAlphaModelsArray, -] - -// TODO: Filter these out by provided environment variables so we only show what we have enabled. -export const allModels: (LLMModels | EmbeddingModels)[] = [ - ...allTextCompletionModels, - ...allEmbeddingModels, -] diff --git a/plugins/core/src/lib/services/coreLLMService/constants/allCompletionModels.ts b/plugins/core/src/lib/services/coreLLMService/constants/allCompletionModels.ts new file mode 100644 index 0000000000..05b2f3e92d --- /dev/null +++ b/plugins/core/src/lib/services/coreLLMService/constants/allCompletionModels.ts @@ -0,0 +1,56 @@ +import { CompletionModels } from '../types' +import { + openAIChatCompletionModelsArray, + openAITextCompletionInstructModelsArray, + openAIVisionModelsArray, + huggingFaceModelsWithPromptFormattingArray, + ollamaModelsArray, + ollamaVisionModelsArray, + vertexAIGoogleModelsArray, + palmModelsArray, + googleAIStudioModelsArray, + mistralAIModelsArray, + anthropicModelsArray, + sageMakerModelsArray, + bedrockModelsArray, + perplexityAIModelsArray, + vllmModelsArray, + xinferenceModelsArray, + cloudflareWorkersAIModelsArray, + ai21ModelsArray, + nlpCloudModelsArray, + deepInfraChatModelsArray, + togetherAIModelsArray, + baseTenModelsArray, + petalsModelsArray, + voyageAIModelsArray, + alephAlphaModelsArray, +} from './completionModelArrays' + +export const allCompletionModels: CompletionModels[] = [ + ...openAIChatCompletionModelsArray, + ...openAITextCompletionInstructModelsArray, + ...openAIVisionModelsArray, + ...huggingFaceModelsWithPromptFormattingArray, + ...ollamaModelsArray, + ...ollamaVisionModelsArray, + ...vertexAIGoogleModelsArray, + ...palmModelsArray, + ...googleAIStudioModelsArray, + ...mistralAIModelsArray, + ...anthropicModelsArray, + ...sageMakerModelsArray, + ...bedrockModelsArray, + ...perplexityAIModelsArray, + ...vllmModelsArray, + ...xinferenceModelsArray, + ...cloudflareWorkersAIModelsArray, + ...ai21ModelsArray, + ...nlpCloudModelsArray, + ...deepInfraChatModelsArray, + ...togetherAIModelsArray, + ...baseTenModelsArray, + ...petalsModelsArray, + ...voyageAIModelsArray, + ...alephAlphaModelsArray, +] diff --git a/plugins/core/src/lib/services/coreLLMService/constants/allEmbeddingModels.ts b/plugins/core/src/lib/services/coreLLMService/constants/allEmbeddingModels.ts new file mode 100644 index 0000000000..df7dbdfd80 --- /dev/null +++ b/plugins/core/src/lib/services/coreLLMService/constants/allEmbeddingModels.ts @@ -0,0 +1,18 @@ +import { EmbeddingModels } from '../../coreEmbeddingService/types' +import { + bedrockEmbeddingModelsArray, + cohereEmbeddingModelsArray, + huggingFaceEmbeddingModelsArray, + mistralEmbeddingModelsArray, + openAIEmbeddingModelsArray, + voyageEmbeddingModelsArray, +} from './embeddingModelArrays' + +export const allEmbeddingModels: EmbeddingModels[] = [ + ...bedrockEmbeddingModelsArray, + ...cohereEmbeddingModelsArray, + ...mistralEmbeddingModelsArray, + ...voyageEmbeddingModelsArray, + ...openAIEmbeddingModelsArray, + ...huggingFaceEmbeddingModelsArray, +] diff --git a/plugins/core/src/lib/services/coreLLMService/constants/completionModelArrays.ts b/plugins/core/src/lib/services/coreLLMService/constants/completionModelArrays.ts new file mode 100644 index 0000000000..dfea13be2a --- /dev/null +++ b/plugins/core/src/lib/services/coreLLMService/constants/completionModelArrays.ts @@ -0,0 +1,61 @@ +import { + OpenAIChatCompletionModels, + OpenAITextCompletionInstructModels, + OpenAIVisionModels, + HuggingFaceModelsWithPromptFormatting, + OllamaModels, + OllamaVisionModels, + VertexAIGoogleModels, + PalmModels, + GoogleAIStudioModels, + MistralAIModels, + AnthropicModels, + SageMakerModels, + BedrockModels, + PerplexityAIModels, + VLLMModels, + XinferenceModels, + CloudflareWorkersAIModels, + AI21Models, + NLPCloudModels, + DeepInfraChatModels, + TogetherAIModels, + BaseTenModels, + PetalsModels, + VoyageAIModels, + AlephAlphaModels, +} from '../types' + +export const openAIChatCompletionModelsArray = Object.values( + OpenAIChatCompletionModels +) +export const openAITextCompletionInstructModelsArray = Object.values( + OpenAITextCompletionInstructModels +) +export const openAIVisionModelsArray = Object.values(OpenAIVisionModels) +export const huggingFaceModelsWithPromptFormattingArray = Object.values( + HuggingFaceModelsWithPromptFormatting +) +export const ollamaModelsArray = Object.values(OllamaModels) +export const ollamaVisionModelsArray = Object.values(OllamaVisionModels) +export const vertexAIGoogleModelsArray = Object.values(VertexAIGoogleModels) +export const palmModelsArray = Object.values(PalmModels) +export const googleAIStudioModelsArray = Object.values(GoogleAIStudioModels) +export const mistralAIModelsArray = Object.values(MistralAIModels) +export const anthropicModelsArray = Object.values(AnthropicModels) +export const sageMakerModelsArray = Object.values(SageMakerModels) +export const bedrockModelsArray = Object.values(BedrockModels) +export const perplexityAIModelsArray = Object.values(PerplexityAIModels) +export const vllmModelsArray = Object.values(VLLMModels) +export const xinferenceModelsArray = Object.values(XinferenceModels) +export const cloudflareWorkersAIModelsArray = Object.values( + CloudflareWorkersAIModels +) +export const ai21ModelsArray = Object.values(AI21Models) +export const nlpCloudModelsArray = Object.values(NLPCloudModels) +export const deepInfraChatModelsArray = Object.values(DeepInfraChatModels) +export const togetherAIModelsArray = Object.values(TogetherAIModels) +export const baseTenModelsArray = Object.values(BaseTenModels) +export const petalsModelsArray = Object.values(PetalsModels) +export const voyageAIModelsArray = Object.values(VoyageAIModels) +export const alephAlphaModelsArray = Object.values(AlephAlphaModels) diff --git a/plugins/core/src/lib/services/coreLLMService/constants/embeddingModelArrays.ts b/plugins/core/src/lib/services/coreLLMService/constants/embeddingModelArrays.ts new file mode 100644 index 0000000000..f7eeafb95e --- /dev/null +++ b/plugins/core/src/lib/services/coreLLMService/constants/embeddingModelArrays.ts @@ -0,0 +1,17 @@ +import { + BedrockEmbeddingModels, + CohereEmbeddingModels, + MistralEmbeddingModels, + VoyageEmbeddingModels, + OpenAIEmbeddingModels, + HuggingFaceEmbeddingModels, +} from '../../coreEmbeddingService/types' + +export const bedrockEmbeddingModelsArray = Object.values(BedrockEmbeddingModels) +export const cohereEmbeddingModelsArray = Object.values(CohereEmbeddingModels) +export const mistralEmbeddingModelsArray = Object.values(MistralEmbeddingModels) +export const voyageEmbeddingModelsArray = Object.values(VoyageEmbeddingModels) +export const openAIEmbeddingModelsArray = Object.values(OpenAIEmbeddingModels) +export const huggingFaceEmbeddingModelsArray = Object.values( + HuggingFaceEmbeddingModels +) From b72d39836fef902ac7e30b8eeb6948c4143d9e42 Mon Sep 17 00:00:00 2001 From: Jakob Date: Wed, 24 Jan 2024 13:57:38 -0800 Subject: [PATCH 06/38] add model provider map --- .../constants/modelProviderMap.ts | 776 ++++++++++++++++++ 1 file changed, 776 insertions(+) create mode 100644 plugins/core/src/lib/services/coreLLMService/constants/modelProviderMap.ts diff --git a/plugins/core/src/lib/services/coreLLMService/constants/modelProviderMap.ts b/plugins/core/src/lib/services/coreLLMService/constants/modelProviderMap.ts new file mode 100644 index 0000000000..262876d248 --- /dev/null +++ b/plugins/core/src/lib/services/coreLLMService/constants/modelProviderMap.ts @@ -0,0 +1,776 @@ +import { + BedrockEmbeddingModels, + CohereEmbeddingModels, + MistralEmbeddingModels, + VoyageEmbeddingModels, + OpenAIEmbeddingModels, + HuggingFaceEmbeddingModels, +} from '../../coreEmbeddingService/types' +import { + LLMProviders, + OpenAIChatCompletionModels, + OpenAITextCompletionInstructModels, + OpenAIVisionModels, + HuggingFaceModelsWithPromptFormatting, + OllamaModels, + OllamaVisionModels, + VertexAIGoogleModels, + PalmModels, + GoogleAIStudioModels, + MistralAIModels, + AnthropicModels, + SageMakerModels, + BedrockModels, + PerplexityAIModels, + VLLMModels, + XinferenceModels, + CloudflareWorkersAIModels, + AI21Models, + NLPCloudModels, + DeepInfraChatModels, + TogetherAIModels, + BaseTenModels, + PetalsModels, + VoyageAIModels, + AlephAlphaModels, + ModelProviderMapping, + LLMProviderKeys, + AllModels, +} from '../types' + +export const modelProviderMap: Record = { + /// OpenAI Chat Completion Models + [OpenAIChatCompletionModels.GPT35Turbo1106Preview]: { + provider: LLMProviders.OpenAI, + apiKey: LLMProviderKeys.OpenAI, + }, + [OpenAIChatCompletionModels.GPT35Turbo]: { + provider: LLMProviders.OpenAI, + apiKey: LLMProviderKeys.OpenAI, + }, + [OpenAIChatCompletionModels.GPT35Turbo1106]: { + provider: LLMProviders.OpenAI, + apiKey: LLMProviderKeys.OpenAI, + }, + [OpenAIChatCompletionModels.GPT35Turbo0301]: { + provider: LLMProviders.OpenAI, + apiKey: LLMProviderKeys.OpenAI, + }, + [OpenAIChatCompletionModels.GPT35Turbo0613]: { + provider: LLMProviders.OpenAI, + apiKey: LLMProviderKeys.OpenAI, + }, + [OpenAIChatCompletionModels.GPT35Turbo16k]: { + provider: LLMProviders.OpenAI, + apiKey: LLMProviderKeys.OpenAI, + }, + [OpenAIChatCompletionModels.GPT35Turbo16k0613]: { + provider: LLMProviders.OpenAI, + apiKey: LLMProviderKeys.OpenAI, + }, + [OpenAIChatCompletionModels.GPT4]: { + provider: LLMProviders.OpenAI, + apiKey: LLMProviderKeys.OpenAI, + }, + [OpenAIChatCompletionModels.GPT40314]: { + provider: LLMProviders.OpenAI, + apiKey: LLMProviderKeys.OpenAI, + }, + [OpenAIChatCompletionModels.GPT40613]: { + provider: LLMProviders.OpenAI, + apiKey: LLMProviderKeys.OpenAI, + }, + + // OpenAI Text Completion / Instruct Models + [OpenAITextCompletionInstructModels.Ada001]: { + provider: LLMProviders.OpenAI, + apiKey: LLMProviderKeys.OpenAI, + }, + [OpenAITextCompletionInstructModels.Babbage001]: { + provider: LLMProviders.OpenAI, + apiKey: LLMProviderKeys.OpenAI, + }, + [OpenAITextCompletionInstructModels.Babbage002]: { + provider: LLMProviders.OpenAI, + apiKey: LLMProviderKeys.OpenAI, + }, + [OpenAITextCompletionInstructModels.Curie001]: { + provider: LLMProviders.OpenAI, + apiKey: LLMProviderKeys.OpenAI, + }, + [OpenAITextCompletionInstructModels.Davinci002]: { + provider: LLMProviders.OpenAI, + apiKey: LLMProviderKeys.OpenAI, + }, + [OpenAITextCompletionInstructModels.GPT35TurboInstruct]: { + provider: LLMProviders.OpenAI, + apiKey: LLMProviderKeys.OpenAI, + }, + [OpenAITextCompletionInstructModels.TextDavinci003]: { + provider: LLMProviders.OpenAI, + apiKey: LLMProviderKeys.OpenAI, + }, + + // OpenAI Vision Models + [OpenAIVisionModels.GPT4VisionPreview]: { + provider: LLMProviders.OpenAI, + apiKey: LLMProviderKeys.OpenAI, + }, + + // HuggingFace Models With Prompt Formatting + [HuggingFaceModelsWithPromptFormatting.HuggingFaceCodellamaCodeLlama34bInstructHf]: + { + provider: LLMProviders.HuggingFace, + apiKey: LLMProviderKeys.HuggingFace, + }, + [HuggingFaceModelsWithPromptFormatting.HuggingFaceMetaLlamaLlama27bChat]: { + provider: LLMProviders.HuggingFace, + apiKey: LLMProviderKeys.HuggingFace, + }, + [HuggingFaceModelsWithPromptFormatting.HuggingFaceMistral7BInstructV01]: { + provider: LLMProviders.HuggingFace, + apiKey: LLMProviderKeys.HuggingFace, + }, + [HuggingFaceModelsWithPromptFormatting.HuggingFaceMosaicmlMpt7bChat]: { + provider: LLMProviders.HuggingFace, + apiKey: LLMProviderKeys.HuggingFace, + }, + [HuggingFaceModelsWithPromptFormatting.HuggingFacePhindPhindCodeLlama34Bv2]: { + provider: LLMProviders.HuggingFace, + apiKey: LLMProviderKeys.HuggingFace, + }, + [HuggingFaceModelsWithPromptFormatting.HuggingFaceTiiuaeFalcon7bInstruct]: { + provider: LLMProviders.HuggingFace, + apiKey: LLMProviderKeys.HuggingFace, + }, + [HuggingFaceModelsWithPromptFormatting.HuggingFaceWizardLMWizardCoderPython34BV10]: + { + provider: LLMProviders.HuggingFace, + apiKey: LLMProviderKeys.HuggingFace, + }, + + // Ollama Models + [OllamaModels.OlamaMistral]: { + provider: LLMProviders.Ollama, + apiKey: LLMProviderKeys.Ollama, + }, + [OllamaModels.OlamaLlama27B]: { + provider: LLMProviders.Ollama, + apiKey: LLMProviderKeys.Ollama, + }, + [OllamaModels.OlamaLlama213B]: { + provider: LLMProviders.Ollama, + apiKey: LLMProviderKeys.Ollama, + }, + [OllamaModels.OlamaLlama270B]: { + provider: LLMProviders.Ollama, + apiKey: LLMProviderKeys.Ollama, + }, + [OllamaModels.OlamaLlama2Uncensored]: { + provider: LLMProviders.Ollama, + apiKey: LLMProviderKeys.Ollama, + }, + [OllamaModels.OlamaCodeLlama]: { + provider: LLMProviders.Ollama, + apiKey: LLMProviderKeys.Ollama, + }, + [OllamaModels.OlamaNousHermes]: { + provider: LLMProviders.Ollama, + apiKey: LLMProviderKeys.Ollama, + }, + [OllamaModels.OlamaOrcaMini]: { + provider: LLMProviders.Ollama, + apiKey: LLMProviderKeys.Ollama, + }, + [OllamaModels.OlamaVicuna]: { + provider: LLMProviders.Ollama, + apiKey: LLMProviderKeys.Ollama, + }, + [OllamaModels.OlamaNousHermes13B]: { + provider: LLMProviders.Ollama, + apiKey: LLMProviderKeys.Ollama, + }, + [OllamaModels.OlamaWizardVicunaUncensored]: { + provider: LLMProviders.Ollama, + apiKey: LLMProviderKeys.Ollama, + }, + + // Ollama Vision Models + [OllamaVisionModels.LLAVA]: { + provider: LLMProviders.Ollama, + apiKey: LLMProviderKeys.Ollama, + }, + + // VertexAI + [VertexAIGoogleModels.GeminiPro]: { + provider: LLMProviders.VertexAI, + apiKey: LLMProviderKeys.VertexAI, + }, + [VertexAIGoogleModels.GeminiProVision]: { + provider: LLMProviders.VertexAI, + apiKey: LLMProviderKeys.VertexAI, + }, + + //Palm + [PalmModels.ChatBison]: { + provider: LLMProviders.Palm, + apiKey: LLMProviderKeys.Palm, + }, + + // Google AI Studio + [GoogleAIStudioModels.GeminiGeminiPro]: { + provider: LLMProviders.GoogleAIStudio, + apiKey: LLMProviderKeys.GoogleAIStudio, + }, + [GoogleAIStudioModels.GeminiGeminiProVision]: { + provider: LLMProviders.GoogleAIStudio, + apiKey: LLMProviderKeys.GoogleAIStudio, + }, + + // Mistral AI Models + [MistralAIModels.MistralTiny]: { + provider: LLMProviders.Mistral, + apiKey: LLMProviderKeys.Mistral, + }, + [MistralAIModels.MistralSmall]: { + provider: LLMProviders.Mistral, + apiKey: LLMProviderKeys.Mistral, + }, + [MistralAIModels.MistralMedium]: { + provider: LLMProviders.Mistral, + apiKey: LLMProviderKeys.Mistral, + }, + + // Anthropic Models + [AnthropicModels.Claude21]: { + provider: LLMProviders.Anthropic, + apiKey: LLMProviderKeys.Anthropic, + }, + [AnthropicModels.Claude2]: { + provider: LLMProviders.Anthropic, + apiKey: LLMProviderKeys.Anthropic, + }, + [AnthropicModels.ClaudeInstant1]: { + provider: LLMProviders.Anthropic, + apiKey: LLMProviderKeys.Anthropic, + }, + [AnthropicModels.ClaudeInstant12]: { + provider: LLMProviders.Anthropic, + apiKey: LLMProviderKeys.Anthropic, + }, + + // SageMaker Models + [SageMakerModels.MetaLlama213B]: { + provider: LLMProviders.Sagemaker, + apiKey: LLMProviderKeys.Sagemaker, + }, + [SageMakerModels.MetaLlama213BChatFineTuned]: { + provider: LLMProviders.Sagemaker, + apiKey: LLMProviderKeys.Sagemaker, + }, + [SageMakerModels.MetaLlama270B]: { + provider: LLMProviders.Sagemaker, + apiKey: LLMProviderKeys.Sagemaker, + }, + [SageMakerModels.MetaLlama270BChatFineTuned]: { + provider: LLMProviders.Sagemaker, + apiKey: LLMProviderKeys.Sagemaker, + }, + [SageMakerModels.MetaLlama27B]: { + provider: LLMProviders.Sagemaker, + apiKey: LLMProviderKeys.Sagemaker, + }, + [SageMakerModels.MetaLlama27BChatFineTuned]: { + provider: LLMProviders.Sagemaker, + apiKey: LLMProviderKeys.Sagemaker, + }, + + // Bedrock Models + [BedrockModels.BedrockAI21J2Mid]: { + provider: LLMProviders.Bedrock, + apiKey: LLMProviderKeys.Bedrock, + }, + [BedrockModels.BedrockAI21J2Ultra]: { + provider: LLMProviders.Bedrock, + apiKey: LLMProviderKeys.Bedrock, + }, + [BedrockModels.BedrockAmazonTitanExpress]: { + provider: LLMProviders.Bedrock, + apiKey: LLMProviderKeys.Bedrock, + }, + [BedrockModels.BedrockAnthropicClaudeInstantV1]: { + provider: LLMProviders.Bedrock, + apiKey: LLMProviderKeys.Bedrock, + }, + [BedrockModels.BedrockAnthropicClaudeV1]: { + provider: LLMProviders.Bedrock, + apiKey: LLMProviderKeys.Bedrock, + }, + [BedrockModels.BedrockAnthropicClaudeV2]: { + provider: LLMProviders.Bedrock, + apiKey: LLMProviderKeys.Bedrock, + }, + [BedrockModels.BedrockAnthropicClaudeV21]: { + provider: LLMProviders.Bedrock, + apiKey: LLMProviderKeys.Bedrock, + }, + [BedrockModels.BedrockCohereCommand]: { + provider: LLMProviders.Bedrock, + apiKey: LLMProviderKeys.Bedrock, + }, + [BedrockModels.BedrockMetaLlama2Chat13b]: { + provider: LLMProviders.Bedrock, + apiKey: LLMProviderKeys.Bedrock, + }, + [BedrockModels.BedrockMetaLlama2Chat70b]: { + provider: LLMProviders.Bedrock, + apiKey: LLMProviderKeys.Bedrock, + }, + [BedrockModels.BedrockAmazonTitanLite]: { + provider: LLMProviders.Bedrock, + apiKey: LLMProviderKeys.Bedrock, + }, + + // Perplexity AI Models + [PerplexityAIModels.Pplx70bChat]: { + provider: LLMProviders.PerplexityAI, + apiKey: LLMProviderKeys.PerplexityAI, + }, + [PerplexityAIModels.Pplx70bChatAlpha]: { + provider: LLMProviders.PerplexityAI, + apiKey: LLMProviderKeys.PerplexityAI, + }, + [PerplexityAIModels.Pplx70bOnline]: { + provider: LLMProviders.PerplexityAI, + apiKey: LLMProviderKeys.PerplexityAI, + }, + [PerplexityAIModels.Pplx7bOnline]: { + provider: LLMProviders.PerplexityAI, + apiKey: LLMProviderKeys.PerplexityAI, + }, + [PerplexityAIModels.PplxCodeLlama34bInstruct]: { + provider: LLMProviders.PerplexityAI, + apiKey: LLMProviderKeys.PerplexityAI, + }, + [PerplexityAIModels.PplxLlama213bChat]: { + provider: LLMProviders.PerplexityAI, + apiKey: LLMProviderKeys.PerplexityAI, + }, + [PerplexityAIModels.PplxLlama270bChat]: { + provider: LLMProviders.PerplexityAI, + apiKey: LLMProviderKeys.PerplexityAI, + }, + [PerplexityAIModels.PplxMistral7bInstruct]: { + provider: LLMProviders.PerplexityAI, + apiKey: LLMProviderKeys.PerplexityAI, + }, + [PerplexityAIModels.PplxOpenhermes25Mistral7b]: { + provider: LLMProviders.PerplexityAI, + apiKey: LLMProviderKeys.PerplexityAI, + }, + [PerplexityAIModels.PplxOpenhermes2Mistral7b]: { + provider: LLMProviders.PerplexityAI, + apiKey: LLMProviderKeys.PerplexityAI, + }, + [PerplexityAIModels.Pplx7bChat]: { + provider: LLMProviders.PerplexityAI, + apiKey: LLMProviderKeys.PerplexityAI, + }, + [PerplexityAIModels.Pplx7bChatAlpha]: { + provider: LLMProviders.PerplexityAI, + apiKey: LLMProviderKeys.PerplexityAI, + }, + + // VLLM Models + [VLLMModels.CodellamaCodeLlama34bInstructHf]: { + provider: LLMProviders.VLLM, + apiKey: LLMProviderKeys.VLLM, + }, + [VLLMModels.MetaLlamaLlama27bChat]: { + provider: LLMProviders.VLLM, + apiKey: LLMProviderKeys.VLLM, + }, + [VLLMModels.MosaicmlMpt7bChat]: { + provider: LLMProviders.VLLM, + apiKey: LLMProviderKeys.VLLM, + }, + [VLLMModels.PhindPhindCodeLlama34Bv2]: { + provider: LLMProviders.VLLM, + apiKey: LLMProviderKeys.VLLM, + }, + [VLLMModels.TiiuaeFalcon7bInstruct]: { + provider: LLMProviders.VLLM, + apiKey: LLMProviderKeys.VLLM, + }, + [VLLMModels.WizardLMWizardCoderPython34BV10]: { + provider: LLMProviders.VLLM, + apiKey: LLMProviderKeys.VLLM, + }, + + //Xinference Models + [XinferenceModels.BgeBaseEn]: { + provider: LLMProviders.Xinference, + apiKey: LLMProviderKeys.Xinference, + }, + [XinferenceModels.BgeBaseEnV15]: { + provider: LLMProviders.Xinference, + apiKey: LLMProviderKeys.Xinference, + }, + [XinferenceModels.BgeBaseZh]: { + provider: LLMProviders.Xinference, + apiKey: LLMProviderKeys.Xinference, + }, + [XinferenceModels.BgeLargeEn]: { + provider: LLMProviders.Xinference, + apiKey: LLMProviderKeys.Xinference, + }, + [XinferenceModels.BgeLargeEnV15]: { + provider: LLMProviders.Xinference, + apiKey: LLMProviderKeys.Xinference, + }, + [XinferenceModels.BgeLargeZh]: { + provider: LLMProviders.Xinference, + apiKey: LLMProviderKeys.Xinference, + }, + [XinferenceModels.BgeLargeZhNoinstruct]: { + provider: LLMProviders.Xinference, + apiKey: LLMProviderKeys.Xinference, + }, + [XinferenceModels.BgeLargeZhV15]: { + provider: LLMProviders.Xinference, + apiKey: LLMProviderKeys.Xinference, + }, + [XinferenceModels.BgeSmallEnV15]: { + provider: LLMProviders.Xinference, + apiKey: LLMProviderKeys.Xinference, + }, + [XinferenceModels.BgeSmallZh]: { + provider: LLMProviders.Xinference, + apiKey: LLMProviderKeys.Xinference, + }, + [XinferenceModels.BgeSmallZhV15]: { + provider: LLMProviders.Xinference, + apiKey: LLMProviderKeys.Xinference, + }, + [XinferenceModels.E5LargeV2]: { + provider: LLMProviders.Xinference, + apiKey: LLMProviderKeys.Xinference, + }, + [XinferenceModels.GteBase]: { + provider: LLMProviders.Xinference, + apiKey: LLMProviderKeys.Xinference, + }, + [XinferenceModels.GteLarge]: { + provider: LLMProviders.Xinference, + apiKey: LLMProviderKeys.Xinference, + }, + [XinferenceModels.JinaEmbeddingsV2BaseEn]: { + provider: LLMProviders.Xinference, + apiKey: LLMProviderKeys.Xinference, + }, + [XinferenceModels.JinaEmbeddingsV2SmallEn]: { + provider: LLMProviders.Xinference, + apiKey: LLMProviderKeys.Xinference, + }, + [XinferenceModels.MultilingualE5Large]: { + provider: LLMProviders.Xinference, + apiKey: LLMProviderKeys.Xinference, + }, + [XinferenceModels.BgeBaseZhV15]: { + provider: LLMProviders.Xinference, + apiKey: LLMProviderKeys.Xinference, + }, + + // Cloudflare Workers AI Models + [CloudflareWorkersAIModels.MetaLlama27bChatFp16]: { + provider: LLMProviders.CloudflareWorkersAI, + apiKey: LLMProviderKeys.CloudflareWorkersAI, + }, + [CloudflareWorkersAIModels.MetaLlama27bChatInt8]: { + provider: LLMProviders.CloudflareWorkersAI, + apiKey: LLMProviderKeys.CloudflareWorkersAI, + }, + [CloudflareWorkersAIModels.Mistral7bInstructV01]: { + provider: LLMProviders.CloudflareWorkersAI, + apiKey: LLMProviderKeys.CloudflareWorkersAI, + }, + [CloudflareWorkersAIModels.TheBlokeCodellama7bInstructAwq]: { + provider: LLMProviders.CloudflareWorkersAI, + apiKey: LLMProviderKeys.CloudflareWorkersAI, + }, + + // AI21 Models + [AI21Models.J2Light]: { + provider: LLMProviders.AI21, + apiKey: LLMProviderKeys.AI21, + }, + [AI21Models.J2Mid]: { + provider: LLMProviders.AI21, + apiKey: LLMProviderKeys.AI21, + }, + [AI21Models.J2Ultra]: { + provider: LLMProviders.AI21, + apiKey: LLMProviderKeys.AI21, + }, + + //NLPCloud Models + [NLPCloudModels.ChatDolphin]: { + provider: LLMProviders.NLPCloud, + apiKey: LLMProviderKeys.NLPCloud, + }, + [NLPCloudModels.Dolphin]: { + provider: LLMProviders.NLPCloud, + apiKey: LLMProviderKeys.NLPCloud, + }, + + // Deep Infra Models + [DeepInfraChatModels.CodellamaCodeLlama34bInstructHf]: { + provider: LLMProviders.DeepInfra, + apiKey: LLMProviderKeys.DeepInfra, + }, + [DeepInfraChatModels.JondurbinAiroborosL270bGpt4141]: { + provider: LLMProviders.DeepInfra, + apiKey: LLMProviderKeys.DeepInfra, + }, + [DeepInfraChatModels.MetaLlamaLlama213bChatHf]: { + provider: LLMProviders.DeepInfra, + apiKey: LLMProviderKeys.DeepInfra, + }, + [DeepInfraChatModels.MetaLlamaLlama270bChatHf]: { + provider: LLMProviders.DeepInfra, + apiKey: LLMProviderKeys.DeepInfra, + }, + [DeepInfraChatModels.MetaLlamaLlama27bChatHf]: { + provider: LLMProviders.DeepInfra, + apiKey: LLMProviderKeys.DeepInfra, + }, + [DeepInfraChatModels.MistralaiMistral7BInstructV01]: { + provider: LLMProviders.DeepInfra, + apiKey: LLMProviderKeys.DeepInfra, + }, + + // Together AI Models + [TogetherAIModels.TogetherLlama270bChat]: { + provider: LLMProviders.TogetherAI, + apiKey: LLMProviderKeys.TogetherAI, + }, + [TogetherAIModels.TogetherLlama270b]: { + provider: LLMProviders.TogetherAI, + apiKey: LLMProviderKeys.TogetherAI, + }, + [TogetherAIModels.TogetherLlama27B32K]: { + provider: LLMProviders.TogetherAI, + apiKey: LLMProviderKeys.TogetherAI, + }, + [TogetherAIModels.TogetherLlama27B32KInstruct]: { + provider: LLMProviders.TogetherAI, + apiKey: LLMProviderKeys.TogetherAI, + }, + [TogetherAIModels.TogetherLlama27b]: { + provider: LLMProviders.TogetherAI, + apiKey: LLMProviderKeys.TogetherAI, + }, + [TogetherAIModels.TogetherFalcon40bInstruct]: { + provider: LLMProviders.TogetherAI, + apiKey: LLMProviderKeys.TogetherAI, + }, + [TogetherAIModels.TogetherFalcon7bInstruct]: { + provider: LLMProviders.TogetherAI, + apiKey: LLMProviderKeys.TogetherAI, + }, + [TogetherAIModels.TogetherAlpaca7b]: { + provider: LLMProviders.TogetherAI, + apiKey: LLMProviderKeys.TogetherAI, + }, + [TogetherAIModels.TogetherStarchatAlpha]: { + provider: LLMProviders.TogetherAI, + apiKey: LLMProviderKeys.TogetherAI, + }, + [TogetherAIModels.TogetherCodeLlama34b]: { + provider: LLMProviders.TogetherAI, + apiKey: LLMProviderKeys.TogetherAI, + }, + [TogetherAIModels.TogetherCodeLlama34bInstruct]: { + provider: LLMProviders.TogetherAI, + apiKey: LLMProviderKeys.TogetherAI, + }, + [TogetherAIModels.TogetherCodeLlama34bPython]: { + provider: LLMProviders.TogetherAI, + apiKey: LLMProviderKeys.TogetherAI, + }, + [TogetherAIModels.TogetherSqlCoder]: { + provider: LLMProviders.TogetherAI, + apiKey: LLMProviderKeys.TogetherAI, + }, + [TogetherAIModels.TogetherNSQLLlama27B]: { + provider: LLMProviders.TogetherAI, + apiKey: LLMProviderKeys.TogetherAI, + }, + [TogetherAIModels.TogetherWizardCoder15BV10]: { + provider: LLMProviders.TogetherAI, + apiKey: LLMProviderKeys.TogetherAI, + }, + [TogetherAIModels.TogetherWizardCoderPython34BV10]: { + provider: LLMProviders.TogetherAI, + apiKey: LLMProviderKeys.TogetherAI, + }, + [TogetherAIModels.TogetherNousHermesLlama213b]: { + provider: LLMProviders.TogetherAI, + apiKey: LLMProviderKeys.TogetherAI, + }, + [TogetherAIModels.TogetherChronosHermes13b]: { + provider: LLMProviders.TogetherAI, + apiKey: LLMProviderKeys.TogetherAI, + }, + [TogetherAIModels.TogetherSolar070b16bit]: { + provider: LLMProviders.TogetherAI, + apiKey: LLMProviderKeys.TogetherAI, + }, + [TogetherAIModels.TogetherWizardLM70BV10]: { + provider: LLMProviders.TogetherAI, + apiKey: LLMProviderKeys.TogetherAI, + }, + + // Base Ten Models + [BaseTenModels.Falcon7B]: { + provider: LLMProviders.Baseten, + apiKey: LLMProviderKeys.Baseten, + }, + [BaseTenModels.MPT7BBase]: { + provider: LLMProviders.Baseten, + apiKey: LLMProviderKeys.Baseten, + }, + [BaseTenModels.WizardLM]: { + provider: LLMProviders.Baseten, + apiKey: LLMProviderKeys.Baseten, + }, + + // Petals Models + [PetalsModels.HuggyLlamaLlama65]: { + provider: LLMProviders.Petals, + apiKey: LLMProviderKeys.Petals, + }, + [PetalsModels.PetalsTeamStableBeluga]: { + provider: LLMProviders.Petals, + apiKey: LLMProviderKeys.Petals, + }, + + // Voyage AI Models + [VoyageAIModels.Voyage01]: { + provider: LLMProviders.VoyageAI, + apiKey: LLMProviderKeys.VoyageAI, + }, + [VoyageAIModels.VoyageLite01]: { + provider: LLMProviders.VoyageAI, + apiKey: LLMProviderKeys.VoyageAI, + }, + [VoyageAIModels.VoyageLite01Instruct]: { + provider: LLMProviders.VoyageAI, + apiKey: LLMProviderKeys.VoyageAI, + }, + + // Aleph Alpha Models + [AlephAlphaModels.LuminousBase]: { + provider: LLMProviders.AlephAlpha, + apiKey: LLMProviderKeys.AlephAlpha, + }, + [AlephAlphaModels.LuminousBaseControl]: { + provider: LLMProviders.AlephAlpha, + apiKey: LLMProviderKeys.AlephAlpha, + }, + [AlephAlphaModels.LuminousExtended]: { + provider: LLMProviders.AlephAlpha, + apiKey: LLMProviderKeys.AlephAlpha, + }, + [AlephAlphaModels.LuminousExtendedControl]: { + provider: LLMProviders.AlephAlpha, + apiKey: LLMProviderKeys.AlephAlpha, + }, + [AlephAlphaModels.LuminousSupreme]: { + provider: LLMProviders.AlephAlpha, + apiKey: LLMProviderKeys.AlephAlpha, + }, + [AlephAlphaModels.LuminousSupremeControl]: { + provider: LLMProviders.AlephAlpha, + apiKey: LLMProviderKeys.AlephAlpha, + }, + + // Embedding Models + [BedrockEmbeddingModels.AmazonTitanEmbedTextV1]: { + provider: LLMProviders.Bedrock, + apiKey: LLMProviderKeys.Bedrock, + }, + [BedrockEmbeddingModels.CohereEmbedEnglishV3]: { + provider: LLMProviders.Bedrock, + apiKey: LLMProviderKeys.Bedrock, + }, + [BedrockEmbeddingModels.CohereEmbedMultilingualV3]: { + provider: LLMProviders.Bedrock, + apiKey: LLMProviderKeys.Bedrock, + }, + + [CohereEmbeddingModels.EmbedEnglishLightV20]: { + provider: LLMProviders.Cohere, + apiKey: LLMProviderKeys.Cohere, + }, + [CohereEmbeddingModels.EmbedEnglishLightV30]: { + provider: LLMProviders.Cohere, + apiKey: LLMProviderKeys.Cohere, + }, + [CohereEmbeddingModels.EmbedEnglishV20]: { + provider: LLMProviders.Cohere, + apiKey: LLMProviderKeys.Cohere, + }, + [CohereEmbeddingModels.EmbedEnglishV30]: { + provider: LLMProviders.Cohere, + apiKey: LLMProviderKeys.Cohere, + }, + [CohereEmbeddingModels.EmbedMultilingualLightV30]: { + provider: LLMProviders.Cohere, + apiKey: LLMProviderKeys.Cohere, + }, + [CohereEmbeddingModels.EmbedMultilingualV20]: { + provider: LLMProviders.Cohere, + apiKey: LLMProviderKeys.Cohere, + }, + [CohereEmbeddingModels.EmbedMultilingualV30]: { + provider: LLMProviders.Cohere, + apiKey: LLMProviderKeys.Cohere, + }, + + [MistralEmbeddingModels.MistralEmbed]: { + provider: LLMProviders.Mistral, + apiKey: LLMProviderKeys.Mistral, + }, + + [VoyageEmbeddingModels.Voyage02]: { + provider: LLMProviders.VoyageAI, + apiKey: LLMProviderKeys.VoyageAI, + }, + [VoyageEmbeddingModels.VoyageCode02]: { + provider: LLMProviders.VoyageAI, + apiKey: LLMProviderKeys.VoyageAI, + }, + [VoyageEmbeddingModels.VoyageLite01Instruct]: { + provider: LLMProviders.VoyageAI, + apiKey: LLMProviderKeys.VoyageAI, + }, + + [OpenAIEmbeddingModels.TextEmbeddingAda002]: { + provider: LLMProviders.OpenAI, + apiKey: LLMProviderKeys.OpenAI, + }, + + [HuggingFaceEmbeddingModels.HuggingFaceAnyHfEmbeddingModel]: { + provider: LLMProviders.HuggingFace, + apiKey: LLMProviderKeys.HuggingFace, + }, + [HuggingFaceEmbeddingModels.HuggingFaceBAAIBgeLargeZh]: { + provider: LLMProviders.HuggingFace, + apiKey: LLMProviderKeys.HuggingFace, + }, + [HuggingFaceEmbeddingModels.HuggingFaceMicrosoftCodebertBase]: { + provider: LLMProviders.HuggingFace, + apiKey: LLMProviderKeys.HuggingFace, + }, +} From da30237aa383a99697dc2ac8ee268b0de8380301 Mon Sep 17 00:00:00 2001 From: Jakob Date: Wed, 24 Jan 2024 13:58:07 -0800 Subject: [PATCH 07/38] update llm types --- .../core/src/lib/services/coreLLMService/types.ts | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/plugins/core/src/lib/services/coreLLMService/types.ts b/plugins/core/src/lib/services/coreLLMService/types.ts index 0234f0d5a2..a9820004ba 100644 --- a/plugins/core/src/lib/services/coreLLMService/types.ts +++ b/plugins/core/src/lib/services/coreLLMService/types.ts @@ -16,6 +16,10 @@ export type Message = { function: FunctionType } } +export interface ModelProviderMapping { + provider: LLMProviders + apiKey: LLMProviderKeys +} export type LiteLLMOptions = { api_base?: string @@ -56,7 +60,7 @@ export type CompletionOptions = { } & LiteLLMOptions export type CompletionRequest = { - model: LLMModels + model: CompletionModels messages: Message[] options?: CompletionOptions api_key?: string @@ -168,13 +172,13 @@ export enum LLMProviders { VertexAI = 'VertexAI', } -export type Models = EmbeddingModels | LLMModels +export type Models = EmbeddingModels | CompletionModels export type LLMCredential = PluginCredential & { value: string } -export type LLMModel = [LLMModels, LLMProviderKeys] +export type LLMModel = [CompletionModels, LLMProviderKeys] export enum OpenAIChatCompletionModels { GPT35Turbo1106Preview = 'gpt-4-1106-preview', @@ -423,7 +427,7 @@ export enum PetalsModels { HuggyLlamaLlama65 = 'petals/huggyllama/llama-65b', } -export type LLMModels = +export type CompletionModels = | OpenAIChatCompletionModels | OpenAITextCompletionInstructModels | OpenAIVisionModels @@ -449,3 +453,5 @@ export type LLMModels = | AlephAlphaModels | BaseTenModels | PetalsModels + +export type AllModels = CompletionModels | EmbeddingModels From 168dc210deddfd558ca827e39a051924496554df Mon Sep 17 00:00:00 2001 From: Jakob Date: Wed, 24 Jan 2024 15:46:32 -0800 Subject: [PATCH 08/38] add findProvider helpers --- .../src/lib/services/coreLLMService/findProvider.ts | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 plugins/core/src/lib/services/coreLLMService/findProvider.ts diff --git a/plugins/core/src/lib/services/coreLLMService/findProvider.ts b/plugins/core/src/lib/services/coreLLMService/findProvider.ts new file mode 100644 index 0000000000..6dce263155 --- /dev/null +++ b/plugins/core/src/lib/services/coreLLMService/findProvider.ts @@ -0,0 +1,12 @@ +import { modelProviderMap } from './constants/modelProviderMap' +import { AllModels, LLMProviderKeys } from './types' + +export function findProviderKey(model: AllModels): LLMProviderKeys | undefined { + return LLMProviderKeys[modelProviderMap[model].apiKey] +} + +export function findProviderName( + model: AllModels +): LLMProviderKeys | undefined { + return LLMProviderKeys[modelProviderMap[model].provider] +} From 6da7fe6e8c1c897d3f90395858d26b48768b8350 Mon Sep 17 00:00:00 2001 From: Jakob Date: Wed, 24 Jan 2024 15:47:13 -0800 Subject: [PATCH 09/38] add allOpenAICICompletionModelsArray --- .../coreLLMService/constants/completionModelArrays.ts | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/plugins/core/src/lib/services/coreLLMService/constants/completionModelArrays.ts b/plugins/core/src/lib/services/coreLLMService/constants/completionModelArrays.ts index dfea13be2a..50e2e73a2c 100644 --- a/plugins/core/src/lib/services/coreLLMService/constants/completionModelArrays.ts +++ b/plugins/core/src/lib/services/coreLLMService/constants/completionModelArrays.ts @@ -32,6 +32,12 @@ export const openAIChatCompletionModelsArray = Object.values( export const openAITextCompletionInstructModelsArray = Object.values( OpenAITextCompletionInstructModels ) + +export const allOpenAICompletionModelsArray = [ + ...openAIChatCompletionModelsArray, + ...openAITextCompletionInstructModelsArray, +] + export const openAIVisionModelsArray = Object.values(OpenAIVisionModels) export const huggingFaceModelsWithPromptFormattingArray = Object.values( HuggingFaceModelsWithPromptFormatting From 2a0175f882b71394863d1e333684bc922ed41a49 Mon Sep 17 00:00:00 2001 From: Jakob Date: Wed, 24 Jan 2024 15:58:38 -0800 Subject: [PATCH 10/38] change component to CompletionProviderOptions --- .../CompletionProviderOptions.tsx | 87 +++++++++++++++++++ .../PropertiesWindow/LLMProviderSelect.tsx | 30 ------- 2 files changed, 87 insertions(+), 30 deletions(-) create mode 100644 packages/client/editor/src/components/PropertiesWindow/CompletionProviderOptions.tsx delete mode 100644 packages/client/editor/src/components/PropertiesWindow/LLMProviderSelect.tsx diff --git a/packages/client/editor/src/components/PropertiesWindow/CompletionProviderOptions.tsx b/packages/client/editor/src/components/PropertiesWindow/CompletionProviderOptions.tsx new file mode 100644 index 0000000000..ccf271e427 --- /dev/null +++ b/packages/client/editor/src/components/PropertiesWindow/CompletionProviderOptions.tsx @@ -0,0 +1,87 @@ +import React, { useState, useEffect } from "react"; +import { ConfigurationComponentProps } from "./PropertiesWindow"; +import { LLMProviders, CompletionModels } from "plugins/core/src/lib/services/coreLLMService/types"; +import { ai21ModelsArray, anthropicModelsArray } from "plugins/core/src/lib/services/coreLLMService/constants/completionModelArrays"; + + +export const CompletionProviderOptions = (props: ConfigurationComponentProps) => { + const [selectedProvider, setSelectedProvider] = useState(LLMProviders.OpenAI); + const [selectedModel, setSelectedModel] = useState(null); + const [filteredModels, setFilteredModels] = useState([]); + + useEffect(() => { + + // get all completion models for the selected provider` + switch (selectedProvider) { + case LLMProviders.AI21: + setFilteredModels(ai21ModelsArray); + break; + case LLMProviders.Anthropic: + setFilteredModels(anthropicModelsArray); + break; + case LLMProviders.AlephAlpha: + setFilteredModels([]); + break; + case LLMProviders.Anyscale: + setFilteredModels([]); + break; + case LLMProviders.Azure: + setFilteredModels([]); + break; + case LLMProviders.Baseten: + setFilteredModels([]); + break; + case LLMProviders.Bedrock: + setFilteredModels([]); + break; + case LLMProviders.CloudflareWorkersAI: + setFilteredModels([]); + break; + // case LLMProviders.OpenAI: + // setFilteredModels(allOpenAICompletionModelsArray); + // break; + + + } + }, [selectedProvider]); + + const onProviderChange = (provider: LLMProviders) => { + setSelectedProvider(provider); + props.updateConfigKey("modelProvider", provider); + }; + + const onModelChange = (model: CompletionModels) => { + setSelectedModel(model); + props.updateConfigKey("model", model); + }; + + return ( +
+

Model Provider

+
+ +
+

Model

+
+ +
+
+ ); +} diff --git a/packages/client/editor/src/components/PropertiesWindow/LLMProviderSelect.tsx b/packages/client/editor/src/components/PropertiesWindow/LLMProviderSelect.tsx deleted file mode 100644 index 4ec099f6f0..0000000000 --- a/packages/client/editor/src/components/PropertiesWindow/LLMProviderSelect.tsx +++ /dev/null @@ -1,30 +0,0 @@ -import React, { useState } from "react"; -import { ConfigurationComponentProps } from "./PropertiesWindow"; -import { LLMProviders } from "plugins/core/src/lib/services/coreLLMService/types"; - -export const LLMProviderSelect = (props: ConfigurationComponentProps) => { - // TODO: We should make google default when available - const [selectedProvider, setSelectedProvider] = useState(LLMProviders.OpenAI); - - const onChange = (provider: LLMProviders) => { - setSelectedProvider(provider); - props.updateConfigKey("modelProvider", provider); - } - - return ( -
-

LLM Provider

-
- -
-
- ); -} From ca13c53e319394ea6b81b4f7ce575379668a3501 Mon Sep 17 00:00:00 2001 From: Jakob Date: Wed, 24 Jan 2024 15:58:46 -0800 Subject: [PATCH 11/38] change component to CompletionProviderOptions --- .../src/components/PropertiesWindow/PropertiesWindow.tsx | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packages/client/editor/src/components/PropertiesWindow/PropertiesWindow.tsx b/packages/client/editor/src/components/PropertiesWindow/PropertiesWindow.tsx index ae2b04f9ac..5f3fb15add 100644 --- a/packages/client/editor/src/components/PropertiesWindow/PropertiesWindow.tsx +++ b/packages/client/editor/src/components/PropertiesWindow/PropertiesWindow.tsx @@ -14,7 +14,8 @@ import { SpellInterface } from 'server/schemas'; import { VariableNames } from './variableNames'; import { ValueType } from './ValueType'; import { DefaultConfig } from './DefaultConfig'; -import { LLMProviderSelect } from './LLMProviderSelect'; +import { CompletionProviderOptions } from './CompletionProviderOptions'; + type Props = { tab: Tab @@ -40,7 +41,7 @@ const ConfigurationComponents = { variableNames: VariableNames, valueType: ValueType, default: DefaultConfig, - modelProviders: LLMProviderSelect, + modelProviders: CompletionProviderOptions, } export const PropertiesWindow = (props: Props) => { From 73d2266e5ef70f262986480e591d4d6dcf65457d Mon Sep 17 00:00:00 2001 From: Jakob Date: Wed, 24 Jan 2024 15:58:58 -0800 Subject: [PATCH 12/38] update core memeory servicxe types --- .../coreMemoryService/coreMemoryService.ts | 34 +++++++------------ 1 file changed, 12 insertions(+), 22 deletions(-) diff --git a/plugins/core/src/lib/services/coreMemoryService/coreMemoryService.ts b/plugins/core/src/lib/services/coreMemoryService/coreMemoryService.ts index 33851bbd02..f5ef977bdc 100644 --- a/plugins/core/src/lib/services/coreMemoryService/coreMemoryService.ts +++ b/plugins/core/src/lib/services/coreMemoryService/coreMemoryService.ts @@ -1,19 +1,21 @@ import { python } from 'pythonia' import { PRODUCTION } from 'shared/config' import { + CompletionModels, LLMCredential, - LLMModels, - LLMProviderKeys, Models, OpenAIChatCompletionModels, } from '../coreLLMService/types' -import { modelProviderMap } from '../coreLLMService/constants' + import { DataType } from './coreMemoryTypes' import { EmbeddingModels, OpenAIEmbeddingModels, } from '../coreEmbeddingService/types' -import { embeddingProviderMap } from '../coreEmbeddingService/constants' +import { + findProviderKey, + findProviderName, +} from '../coreLLMService/findProvider' export interface ICoreMemoryService { initialize(agentId: string): Promise @@ -93,12 +95,12 @@ class CoreMemoryService { } } - setModel(model: LLMModels) { + setModel(model: CompletionModels) { this.setLLM(model) } - private setLLM(model: LLMModels) { - const providerName = this.findProviderName(model) + private setLLM(model: CompletionModels) { + const providerName = findProviderName(model) const credential = this.getCredential(model) const params = this.changeLLMParams() @@ -113,7 +115,7 @@ class CoreMemoryService { } private setEmbedder(model: EmbeddingModels) { - const providerName = this.findEmbeddingProviderName(model) + const providerName = findProviderName(model) const credential = this.getCredential(model) this.baseConfig.embedder = { @@ -136,18 +138,6 @@ class CoreMemoryService { return newParams } - private findProvider = (model: Models): LLMProviderKeys => { - return modelProviderMap[model] - } - - private findProviderName = (model: Models) => { - return modelProviderMap[model] - } - - private findEmbeddingProviderName = (model: EmbeddingModels) => { - return embeddingProviderMap[model] - } - addCredential(credential: LLMCredential): void { const existingCredentialIndex = this.credentials.findIndex( c => c.serviceType === credential.serviceType @@ -165,12 +155,12 @@ class CoreMemoryService { } private getCredential(model: Models): string { - const provider = this.findProvider(model) + const provider = findProviderKey(model) let credential = this.credentials.find( c => c.serviceType === provider )?.value - if (!credential && !PRODUCTION) { + if (!credential && !PRODUCTION && provider) { credential = process.env[provider] } From a454e5ba9894935572d72a3333c6709f3ce2edcd Mon Sep 17 00:00:00 2001 From: Jakob Date: Wed, 24 Jan 2024 15:59:23 -0800 Subject: [PATCH 13/38] add find provider key to coreLLMService --- .../services/coreLLMService/coreLLMService.ts | 32 ++++++------------- 1 file changed, 10 insertions(+), 22 deletions(-) diff --git a/plugins/core/src/lib/services/coreLLMService/coreLLMService.ts b/plugins/core/src/lib/services/coreLLMService/coreLLMService.ts index bd26c293c2..e8aabd3092 100644 --- a/plugins/core/src/lib/services/coreLLMService/coreLLMService.ts +++ b/plugins/core/src/lib/services/coreLLMService/coreLLMService.ts @@ -6,17 +6,13 @@ import { VERTEXAI_PROJECT, } from 'shared/config' -import { - LLMCredential, - LLMProviderKeys, - LLMModels, - CompletionResponse, -} from './types' -import { modelProviderMap } from './constants' +import { LLMCredential, CompletionResponse, AllModels } from './types' + import { ICoreBudgetManagerService, ICoreLLMService } from '../types' import { CoreBudgetManagerService } from '../coreBudgetManagerService/coreBudgetMangerService' import { UserService } from '../userService/userService' import { saveRequest } from 'shared/core' +import { findProviderKey, findProviderName } from './findProvider' const sleep = ms => new Promise(resolve => setTimeout(resolve, ms)) @@ -139,7 +135,7 @@ export class CoreLLMService implements ICoreLLMService { status: '', statusCode: 200, parameters: JSON.stringify(request.options), - provider: this.findProvider(request.model), + provider: findProviderName(request.model), type: 'completion', hidden: false, processed: false, @@ -177,25 +173,17 @@ export class CoreLLMService implements ICoreLLMService { } } - private findProvider = (model: LLMModels): LLMProviderKeys => { - return modelProviderMap[model] - } - - private getCredential = (model: LLMModels): string => { - const provider = this.findProvider(model) - - let credential = this.credentials.find(c => c.name === provider)?.value + private getCredential = (model: AllModels): string => { + const providerKey = findProviderKey(model) - if (!credential && PRODUCTION) { - credential = process.env[provider] - } + let credential = this.credentials.find(c => c.name === providerKey)?.value - if (!credential && NODE_ENV === 'development' && !PRODUCTION) { - credential = process.env[provider] + if (!credential && !PRODUCTION && providerKey) { + credential = process.env[providerKey] } if (!credential) { - throw new Error(`No credential found for ${provider}`) + throw new Error(`No credential found for ${providerKey}`) } return credential } From 56cdeda40048e693f8ee03e0fae35c2b1864356c Mon Sep 17 00:00:00 2001 From: Jakob Date: Thu, 25 Jan 2024 14:31:16 -0800 Subject: [PATCH 14/38] break types into pieces --- plugins/core/src/lib/corePlugin.ts | 2 +- .../coreEmbeddingService.ts | 2 +- .../constants/modelProviderMap.ts | 75 ++++- .../coreLLMService/constants/providers.ts | 8 + .../coreLLMService/types/completionModels.ts | 293 ++++++++++++++++++ .../coreLLMService/types/completionTypes.ts | 26 ++ .../coreLLMService/types/liteLLMTypes.ts | 42 +++ .../coreLLMService/types/messageTypes.ts | 35 +++ .../types/modelProviderMappings.ts | 4 + .../coreLLMService/types/providerTypes.ts | 82 +++++ .../coreMemoryService/coreMemoryService.ts | 18 +- plugins/core/src/lib/services/types.ts | 9 +- 12 files changed, 580 insertions(+), 16 deletions(-) create mode 100644 plugins/core/src/lib/services/coreLLMService/constants/providers.ts create mode 100644 plugins/core/src/lib/services/coreLLMService/types/completionModels.ts create mode 100644 plugins/core/src/lib/services/coreLLMService/types/completionTypes.ts create mode 100644 plugins/core/src/lib/services/coreLLMService/types/liteLLMTypes.ts create mode 100644 plugins/core/src/lib/services/coreLLMService/types/messageTypes.ts create mode 100644 plugins/core/src/lib/services/coreLLMService/types/modelProviderMappings.ts create mode 100644 plugins/core/src/lib/services/coreLLMService/types/providerTypes.ts diff --git a/plugins/core/src/lib/corePlugin.ts b/plugins/core/src/lib/corePlugin.ts index df871947b1..b47c326484 100644 --- a/plugins/core/src/lib/corePlugin.ts +++ b/plugins/core/src/lib/corePlugin.ts @@ -16,7 +16,7 @@ import { sendMessage } from './nodes/actions/sendMessage' import { textTemplate } from './nodes/functions/textTemplate' import { registerStructProfile } from './registerStructProfile' import { streamMessage } from './nodes/actions/streamMessage' -import { LLMProviderKeys } from './services/coreLLMService/types' +import { LLMProviderKeys } from './services/coreLLMService/types/providerTypes' import { variableGet } from './nodes/query/variableGet' import { VariableService } from './services/variableService' import { variableSet } from './nodes/query/variableSet' diff --git a/plugins/core/src/lib/services/coreEmbeddingService/coreEmbeddingService.ts b/plugins/core/src/lib/services/coreEmbeddingService/coreEmbeddingService.ts index 4fd7b70858..f17a3671a0 100644 --- a/plugins/core/src/lib/services/coreEmbeddingService/coreEmbeddingService.ts +++ b/plugins/core/src/lib/services/coreEmbeddingService/coreEmbeddingService.ts @@ -12,7 +12,7 @@ import { OpenAIEmbeddingOptions, VoyageEmbeddingOptions, } from './types' -import { LLMProviders } from '../coreLLMService/types' +import { LLMProviders } from '../coreLLMService/types/providerTypes' export class EmbeddingService implements IEmbeddingService { protected liteLLM: any diff --git a/plugins/core/src/lib/services/coreLLMService/constants/modelProviderMap.ts b/plugins/core/src/lib/services/coreLLMService/constants/modelProviderMap.ts index 262876d248..2411a487c7 100644 --- a/plugins/core/src/lib/services/coreLLMService/constants/modelProviderMap.ts +++ b/plugins/core/src/lib/services/coreLLMService/constants/modelProviderMap.ts @@ -7,7 +7,6 @@ import { HuggingFaceEmbeddingModels, } from '../../coreEmbeddingService/types' import { - LLMProviders, OpenAIChatCompletionModels, OpenAITextCompletionInstructModels, OpenAIVisionModels, @@ -33,10 +32,15 @@ import { PetalsModels, VoyageAIModels, AlephAlphaModels, + AnyscaleModels, + OpenRouterModels, +} from '../types/completionModels' +import { AllModels } from '../types/models' +import { ModelProviderMapping, + LLMProviders, LLMProviderKeys, - AllModels, -} from '../types' +} from '../types/providerTypes' export const modelProviderMap: Record = { /// OpenAI Chat Completion Models @@ -773,4 +777,69 @@ export const modelProviderMap: Record = { provider: LLMProviders.HuggingFace, apiKey: LLMProviderKeys.HuggingFace, }, + [AnyscaleModels.AnyscaleCodellamaCodeLlama34bInstructHf]: { + provider: LLMProviders.Anyscale, + apiKey: LLMProviderKeys.Anyscale, + }, + + [AnyscaleModels.AnyscaleMetaLlamaLlama213bChatHf]: { + provider: LLMProviders.Anyscale, + apiKey: LLMProviderKeys.Anyscale, + }, + [AnyscaleModels.AnyscaleMetaLlamaLlama270bChatHf]: { + provider: LLMProviders.Anyscale, + apiKey: LLMProviderKeys.Anyscale, + }, + [AnyscaleModels.AnyscaleMetaLlamaLlama27bChatHf]: { + provider: LLMProviders.Anyscale, + apiKey: LLMProviderKeys.Anyscale, + }, + [AnyscaleModels.AnyscaleZephyr7BBeta]: { + provider: LLMProviders.Anyscale, + apiKey: LLMProviderKeys.Anyscale, + }, + [AnyscaleModels.AnyscaleMistral7BInstructV01]: { + provider: LLMProviders.Anyscale, + apiKey: LLMProviderKeys.Anyscale, + }, + [OpenRouterModels.OpenRouterAnthropicClaud2]: { + provider: LLMProviders.OpenRouter, + apiKey: LLMProviderKeys.OpenRouter, + }, + [OpenRouterModels.OpenRouterAnthropicClaudInstantV1]: { + provider: LLMProviders.OpenRouter, + apiKey: LLMProviderKeys.OpenRouter, + }, + [OpenRouterModels.OpenRouterMetaLlamaLlama213bChat]: { + provider: LLMProviders.OpenRouter, + apiKey: LLMProviderKeys.OpenRouter, + }, + [OpenRouterModels.OpenRouterMetaLlamaLlama270bChat]: { + provider: LLMProviders.OpenRouter, + apiKey: LLMProviderKeys.OpenRouter, + }, + [OpenRouterModels.OpenRouterOpenAIGpt35turbo]: { + provider: LLMProviders.OpenRouter, + apiKey: LLMProviderKeys.OpenRouter, + }, + [OpenRouterModels.OpenRouterOpenAIGpt35turbo16k]: { + provider: LLMProviders.OpenRouter, + apiKey: LLMProviderKeys.OpenRouter, + }, + [OpenRouterModels.OpenRouterOpenAIGpt4]: { + provider: LLMProviders.OpenRouter, + apiKey: LLMProviderKeys.OpenRouter, + }, + [OpenRouterModels.OpenRouterOpenAIGpt432k]: { + provider: LLMProviders.OpenRouter, + apiKey: LLMProviderKeys.OpenRouter, + }, + [OpenRouterModels.OpenRouterPalm2ChatBison]: { + provider: LLMProviders.OpenRouter, + apiKey: LLMProviderKeys.OpenRouter, + }, + [OpenRouterModels.OpenRouterPalm2CodeChatBison]: { + provider: LLMProviders.OpenRouter, + apiKey: LLMProviderKeys.OpenRouter, + }, } diff --git a/plugins/core/src/lib/services/coreLLMService/constants/providers.ts b/plugins/core/src/lib/services/coreLLMService/constants/providers.ts new file mode 100644 index 0000000000..40510d3036 --- /dev/null +++ b/plugins/core/src/lib/services/coreLLMService/constants/providers.ts @@ -0,0 +1,8 @@ +import { ActiveProviders, LLMProviders } from '../types/providerTypes' + +export const activeProviders: ActiveProviders[] = [ + LLMProviders.OpenAI, + LLMProviders.TogetherAI, + LLMProviders.GoogleAIStudio, + LLMProviders.VertexAI, +] diff --git a/plugins/core/src/lib/services/coreLLMService/types/completionModels.ts b/plugins/core/src/lib/services/coreLLMService/types/completionModels.ts new file mode 100644 index 0000000000..8606947d24 --- /dev/null +++ b/plugins/core/src/lib/services/coreLLMService/types/completionModels.ts @@ -0,0 +1,293 @@ +export enum OpenAIChatCompletionModels { + GPT35Turbo1106Preview = 'gpt-4-1106-preview', + GPT35Turbo = 'gpt-3.5-turbo', + GPT35Turbo1106 = 'gpt-3.5-turbo-1106', + GPT35Turbo0301 = 'gpt-3.5-turbo-0301', + GPT35Turbo0613 = 'gpt-3.5-turbo-0613', + GPT35Turbo16k = 'gpt-3.5-turbo-16k', + GPT35Turbo16k0613 = 'gpt-3.5-turbo-16k-0613', + GPT4 = 'gpt-4', + GPT40314 = 'gpt-4-0314', + GPT40613 = 'gpt-4-0613', +} + +export enum OpenAIVisionModels { + GPT4VisionPreview = 'gpt-4-vision-preview', +} + +export enum OpenAITextCompletionInstructModels { + GPT35TurboInstruct = 'gpt-3.5-turbo-instruct', + TextDavinci003 = 'text-davinci-003', + Ada001 = 'ada-001', + Curie001 = 'curie-001', + Babbage001 = 'babbage-001', + Babbage002 = 'babbage-002', + Davinci002 = 'davinci-002', +} + +export type AzureSlug = `azure/${string}` +export type CustomOpenAISlug = `openai/${string}` +export type CustomReplicateSlug = `replicate/${string}` +export type ReplicateDeploymentModelSlug = `replicate/deployments/${string}` + +export enum HuggingFaceModelsWithPromptFormatting { + HuggingFaceMistral7BInstructV01 = 'huggingface/mistralai/Mistral-7B-Instruct-v0.1', + HuggingFaceMetaLlamaLlama27bChat = 'huggingface/meta-llama/Llama-2-7b-chat', + HuggingFaceTiiuaeFalcon7bInstruct = 'huggingface/tiiuae/falcon-7b-instruct', + HuggingFaceMosaicmlMpt7bChat = 'huggingface/mosaicml/mpt-7b-chat', + HuggingFaceCodellamaCodeLlama34bInstructHf = 'huggingface/codellama/CodeLlama-34b-Instruct-hf', + HuggingFaceWizardLMWizardCoderPython34BV10 = 'huggingface/WizardLM/WizardCoder-Python-34B-V1.0', + HuggingFacePhindPhindCodeLlama34Bv2 = 'huggingface/Phind/Phind-CodeLlama-34B-v2', +} + +export enum OllamaVisionModels { + LLAVA = 'ollama/llama-va', +} + +export enum OllamaModels { + OlamaMistral = 'ollama/mistral', + OlamaLlama27B = 'ollama/llama2', + OlamaLlama213B = 'ollama/llama2:13b', + OlamaLlama270B = 'ollama/llama2:70b', + OlamaLlama2Uncensored = 'ollama/llama2-uncensored', + OlamaCodeLlama = 'ollama/codellama', + OlamaOrcaMini = 'ollama/orca-mini', + OlamaVicuna = 'ollama/vicuna', + OlamaNousHermes = 'ollama/nous-hermes', + OlamaNousHermes13B = 'ollama/nous-hermes:13b', + OlamaWizardVicunaUncensored = 'ollama/wizard-vicuna', +} + +export enum VertexAIGoogleModels { + GeminiPro = 'gemini-pro', + GeminiProVision = 'gemini-pro-vision', +} + +export enum PalmModels { + ChatBison = 'chat-bison', +} + +export type GoogleAIStudioSlug = `gemini/${GoogleAIStudioModels}` +export enum GoogleAIStudioModels { + GeminiPro = 'gemini-pro', + GeminiProVision = 'gemini-pro-vision', +} + +export enum MistralAIModels { + MistralTiny = 'mistral/mistral-tiny', + MistralSmall = 'mistral/mistral-small', + MistralMedium = 'mistral/mistral-medium', +} + +export enum AnthropicModels { + Claude21 = 'claude-2.1', + Claude2 = 'claude-2', + ClaudeInstant1 = 'claude-instant-1', + ClaudeInstant12 = 'claude-instant-1.2', +} + +export enum SageMakerModels { + MetaLlama27B = 'sagemaker/jumpstart-dft-meta-textgeneration-llama-2-7b', + MetaLlama27BChatFineTuned = 'sagemaker/jumpstart-dft-meta-textgeneration-llama-2-7b-f', + MetaLlama213B = 'sagemaker/jumpstart-dft-meta-textgeneration-llama-2-13b', + MetaLlama213BChatFineTuned = 'sagemaker/jumpstart-dft-meta-textgeneration-llama-2-13b-f', + MetaLlama270B = 'sagemaker/jumpstart-dft-meta-textgeneration-llama-2-70b', + MetaLlama270BChatFineTuned = 'sagemaker/jumpstart-dft-meta-textgeneration-llama-2-70b-b-f', +} + +export type BedrockSlug = `bedrock/${BedrockModels}` +export enum BedrockModels { + BedrockAnthropicClaudeV21 = 'anthropic.claude-v2:1', + BedrockAnthropicClaudeV2 = 'anthropic.claude-v2', + BedrockAnthropicClaudeInstantV1 = 'anthropic.claude-instant-v1', + BedrockAnthropicClaudeV1 = 'anthropic.claude-v1', + BedrockAmazonTitanLite = 'amazon.titan-text-lite-v1', + BedrockAmazonTitanExpress = 'amazon.titan-text-express-v1', + BedrockCohereCommand = 'cohere.command-text-v14', + BedrockAI21J2Mid = 'ai21.j2-mid-v1', + BedrockAI21J2Ultra = 'ai21.j2-ultra-v1', + BedrockMetaLlama2Chat13b = 'meta.llama2-13b-chat-v1', + BedrockMetaLlama2Chat70b = 'meta.llama2-70b-chat-v1', +} + +export type PerplexityAISlug = `perplexity/${PerplexityAIModels}` +export enum PerplexityAIModels { + Pplx7bChat = 'pplx-7b-chat', + Pplx70bChat = 'pplx-70b-chat', + Pplx7bOnline = 'pplx-7b-online', + Pplx70bOnline = 'pplx-70b-online', + PplxCodeLlama34bInstruct = 'codellama-34b-instruct', + PplxLlama213bChat = 'llama-2-13b-chat', + PplxLlama270bChat = 'llama-2-70b-chat', + PplxMistral7bInstruct = 'mistral-7b-instruct', + PplxOpenhermes2Mistral7b = 'openhermes-2-mistral-7b', + PplxOpenhermes25Mistral7b = 'openhermes-2.5-mistral-7b', + Pplx7bChatAlpha = 'pplx-7b-chat-alpha', + Pplx70bChatAlpha = 'pplx-70b-chat-alpha', +} + +export enum VLLMModels { + MetaLlamaLlama27bChat = 'vllm/meta-llama/Llama-2-7b', + TiiuaeFalcon7bInstruct = 'vllm/tiiuae/falcon-7b-instruct', + MosaicmlMpt7bChat = 'vllm/mosaicml/mpt-7b-chat', + CodellamaCodeLlama34bInstructHf = 'vllm/codellama/CodeLlama-34b-Instruct-hf', + WizardLMWizardCoderPython34BV10 = 'vllm/WizardLM/WizardCoder-Python-34B-V1.0', + PhindPhindCodeLlama34Bv2 = 'vllm/Phind/Phind-CodeLlama-34B-v2', +} + +export enum XinferenceModels { + BgeBaseEn = 'xinference/bge-base-en', + BgeBaseEnV15 = 'xinference/bge-base-en-v1.5', + BgeBaseZh = 'xinference/bge-base-zh', + BgeBaseZhV15 = 'xinference/bge-base-zh-v1.5', + BgeLargeEn = 'xinference/bge-large-en', + BgeLargeEnV15 = 'xinference/bge-large-en-v1.5', + BgeLargeZh = 'xinference/bge-large-zh', + BgeLargeZhNoinstruct = 'xinference/bge-large-zh-noinstruct', + BgeLargeZhV15 = 'xinference/bge-large-zh-v1.5', + BgeSmallEnV15 = 'xinference/bge-small-en-v1.5', + BgeSmallZh = 'xinference/bge-small-zh', + BgeSmallZhV15 = 'xinference/bge-small-zh-v1.5', + E5LargeV2 = 'xinference/e5-large-v2', + GteBase = 'xinference/gte-base', + GteLarge = 'xinference/gte-large', + JinaEmbeddingsV2BaseEn = 'xinference/jina-embeddings-v2-base-en', + JinaEmbeddingsV2SmallEn = 'xinference/jina-embeddings-v2-small-en', + MultilingualE5Large = 'xinference/multilingual-e5-large', +} + +export enum CloudflareWorkersAIModels { + MetaLlama27bChatFp16 = '@cf/meta/llama-2-7b-chat-fp16', + MetaLlama27bChatInt8 = '@cf/meta/llama-2-7b-chat-int8', + Mistral7bInstructV01 = '@cf/mistral/mistral-7b-instruct-v0.1', + TheBlokeCodellama7bInstructAwq = '@hf/thebloke/codellama-7b-instruct-awq', +} + +export enum AI21Models { + J2Light = 'j2-light', + J2Mid = 'j2-mid', + J2Ultra = 'j2-ultra', +} + +export enum NLPCloudModels { + Dolphin = 'dolphin', + ChatDolphin = 'chatDolphin', +} + +export enum DeepInfraChatModels { + MetaLlamaLlama270bChatHf = 'deepinfra/meta-llama/Llama-2-70b-chat-hf', + MetaLlamaLlama27bChatHf = 'deepinfra/meta-llama/Llama-2-7b-chat-hf', + MetaLlamaLlama213bChatHf = 'deepinfra/meta-llama/Llama-2-13b-chat-hf', + CodellamaCodeLlama34bInstructHf = 'deepinfra/codellama/CodeLlama-34b-Instruct-hf', + MistralaiMistral7BInstructV01 = 'deepinfra/mistralai/Mistral-7B-Instruct-v0.1', + JondurbinAiroborosL270bGpt4141 = 'deepinfra/jondurbin/airoboros-l2-70b-gpt4-1.4.1', +} + +export enum VoyageAIModels { + Voyage01 = 'voyage-ai/voyage-01', + VoyageLite01 = 'voyage-ai/voyage-lite-01', + VoyageLite01Instruct = 'voyage-ai/voyage-lite-01-instruct', +} + +export type TogetherAISlug = `together_ai/${TogetherAIModels}` + +export enum TogetherAIModels { + TogetherLlama270bChat = 'togethercomputer/llama-2-70b-chat', + TogetherLlama270b = 'togethercomputer/llama-2-70b', + TogetherLlama27B32K = 'togethercomputer/LLaMA-2-7B-32K', + TogetherLlama27B32KInstruct = 'togethercomputer/Llama-2-7B-32K-Instruct', + TogetherLlama27b = 'togethercomputer/llama-2-7b', + TogetherFalcon40bInstruct = 'togethercomputer/falcon-40b-instruct', + TogetherFalcon7bInstruct = 'togethercomputer/falcon-7b-instruct', + TogetherAlpaca7b = 'togethercomputer/alpaca-7b', + TogetherStarchatAlpha = 'HuggingFaceH4/starchat-alpha', + TogetherCodeLlama34b = 'togethercomputer/CodeLlama-34b', + TogetherCodeLlama34bInstruct = 'togethercomputer/CodeLlama-34b-Instruct', + TogetherCodeLlama34bPython = 'togethercomputer/CodeLlama-34b-Python', + TogetherSqlCoder = 'defog/sqlcoder', + TogetherNSQLLlama27B = 'NumbersStation/nsql-llama-2-7B', + TogetherWizardCoder15BV10 = 'WizardLM/WizardCoder-15B-V1.0', + TogetherWizardCoderPython34BV10 = 'WizardLM/WizardCoder-Python-34B-V1.0', + TogetherNousHermesLlama213b = 'NousResearch/Nous-Hermes-Llama2-13b', + TogetherChronosHermes13b = 'Austism/chronos-hermes-13b', + TogetherSolar070b16bit = 'upstage/SOLAR-0-70b-16bit', + TogetherWizardLM70BV10 = 'WizardLM/WizardLM-70B-V1.0', +} + +export enum AlephAlphaModels { + LuminousBase = 'luminous-base', + LuminousBaseControl = 'luminous-base-control', + LuminousExtended = 'luminous-extended', + LuminousExtendedControl = 'luminous-extended-control', + LuminousSupreme = 'luminous-supreme', + LuminousSupremeControl = 'luminous-supreme-control', +} + +export enum BaseTenModels { + Falcon7B = 'baseten/qvv0xeq', + WizardLM = 'baseten/q841o8w', + MPT7BBase = 'baseten/31dxrj3', +} + +export enum PetalsModels { + PetalsTeamStableBeluga = 'petals/petals-team/StableBeluga2', + HuggyLlamaLlama65 = 'petals/huggyllama/llama-65b', +} + +export enum AnyscaleModels { + AnyscaleMistral7BInstructV01 = 'anyscale/mistralai/Mistral-7B-Instruct-v0.1', + AnyscaleZephyr7BBeta = 'anyscale/HuggingFaceH4/zephyr-7b-beta', + AnyscaleMetaLlamaLlama27bChatHf = 'anyscale/meta-llama/Llama-2-7b-chat-hf', + AnyscaleMetaLlamaLlama213bChatHf = 'anyscale/meta-llama/Llama-2-13b-chat-hf', + AnyscaleMetaLlamaLlama270bChatHf = 'anyscale/meta-llama/Llama-2-70b-chat-hf', + AnyscaleCodellamaCodeLlama34bInstructHf = 'anyscale/codellama/CodeLlama-34b-Instruct-hf', +} + +export enum OpenRouterModels { + OpenRouterOpenAIGpt35turbo = 'openrouter/openai/gpt-3.5-turbo', + OpenRouterOpenAIGpt35turbo16k = 'openrouter/openai/gpt-3.5-turbo-16k', + OpenRouterOpenAIGpt4 = 'openrouter/openai/gpt-4', + OpenRouterOpenAIGpt432k = 'openrouter/openai/gpt-4-32k', + OpenRouterAnthropicClaud2 = 'openrouter/anthropic/claude-2', + OpenRouterAnthropicClaudInstantV1 = 'openrouter/anthropic/claude-instant-v1', + OpenRouterPalm2ChatBison = 'openrouter/google/palm-2-chat-bison', + OpenRouterPalm2CodeChatBison = 'openrouter/google/palm-2-codechat-bison', + OpenRouterMetaLlamaLlama213bChat = 'openrouter/meta-llama/llama-2-13b-chat', + OpenRouterMetaLlamaLlama270bChat = 'openrouter/meta-llama/llama-2-70b-chat', +} + +export enum ReplicateModels { + ReplicateLlama270bChat = 'replicate/llama-2-70b-chat', + ReplicateLlama213bChat = 'replicate/a16z-infra/llama-2-13b-chat', + ReplicateVicuna13b = 'replicate/vicuna-13b', + ReplicateFlanT5Large = 'replicate/daanelson/flan-t5-large', +} + +export type CompletionModels = + | OpenAIChatCompletionModels + | OpenAITextCompletionInstructModels + | OpenAIVisionModels + | HuggingFaceModelsWithPromptFormatting + | OllamaModels + | OllamaVisionModels + | VertexAIGoogleModels + | PalmModels + | MistralAIModels + | AnthropicModels + | SageMakerModels + | BedrockModels + | PerplexityAIModels + | VLLMModels + | XinferenceModels + | CloudflareWorkersAIModels + | AI21Models + | NLPCloudModels + | DeepInfraChatModels + | VoyageAIModels + | AlephAlphaModels + | BaseTenModels + | PetalsModels + | AnyscaleModels + | OpenRouterModels + | GoogleAIStudioModels + | TogetherAIModels diff --git a/plugins/core/src/lib/services/coreLLMService/types/completionTypes.ts b/plugins/core/src/lib/services/coreLLMService/types/completionTypes.ts new file mode 100644 index 0000000000..086dab1f06 --- /dev/null +++ b/plugins/core/src/lib/services/coreLLMService/types/completionTypes.ts @@ -0,0 +1,26 @@ +import { CompletionModels } from './completionModels' +import { CompletionOptions } from './liteLLMTypes' + +import { Message, Choice } from './messageTypes' + +export type CompletionRequest = { + model: CompletionModels + messages: Message[] + options?: CompletionOptions + api_key?: string +} + +export type CompletionResponse = { + id: string + choices: Choice[] + created: string + model: string + object: string + system_fingerprint: any | null + usage: { + prompt_tokens: number + completion_tokens: number + total_tokens: number + } + _python_object: any +} diff --git a/plugins/core/src/lib/services/coreLLMService/types/liteLLMTypes.ts b/plugins/core/src/lib/services/coreLLMService/types/liteLLMTypes.ts new file mode 100644 index 0000000000..afb1ffacbb --- /dev/null +++ b/plugins/core/src/lib/services/coreLLMService/types/liteLLMTypes.ts @@ -0,0 +1,42 @@ +export type LiteLLMOptions = { + api_base?: string + api_version?: string + api_key?: string + num_retries?: number + context_window_fallback_dict?: Record + fallbacks?: Array> + metadata?: Record +} + +export type FunctionType = { + name: string + description?: string + parameters: Record +} + +export type ToolType = { + type: string + function: FunctionType +} +export type CompletionOptions = { + temperature?: number + top_p?: number + n?: number + stream?: boolean + stop?: string | string[] + max_tokens?: number + presence_penalty?: number + frequency_penalty?: number + logit_bias?: Record + user?: string + deployment_id?: string + request_timeout?: number + response_format?: { + type: string + } + seed?: number + tools?: ToolType[] + tool_choice?: string | ToolType + functions?: FunctionType[] + function_call?: string +} & LiteLLMOptions diff --git a/plugins/core/src/lib/services/coreLLMService/types/messageTypes.ts b/plugins/core/src/lib/services/coreLLMService/types/messageTypes.ts new file mode 100644 index 0000000000..13d4392f17 --- /dev/null +++ b/plugins/core/src/lib/services/coreLLMService/types/messageTypes.ts @@ -0,0 +1,35 @@ +import { FunctionType } from './liteLLMTypes' + +export type Message = { + role: string + content: string | null + name?: string + function_call?: { + type: string + function: FunctionType + } +} + +export type Chunk = { + choices: ChunkChoice[] +} + +export type ChunkChoice = { + finish_reason: string + index: number + delta: { + function_call: string + tool_calls: string + content: string + role: string + } +} + +export type Choice = { + finish_reason: string + index: number + message: { + role: string + content: string + } +} diff --git a/plugins/core/src/lib/services/coreLLMService/types/modelProviderMappings.ts b/plugins/core/src/lib/services/coreLLMService/types/modelProviderMappings.ts new file mode 100644 index 0000000000..de354a77ac --- /dev/null +++ b/plugins/core/src/lib/services/coreLLMService/types/modelProviderMappings.ts @@ -0,0 +1,4 @@ +import { CompletionModels } from './completionModels' +import { LLMProviderKeys } from './providerTypes' + +export type LLMModel = [CompletionModels, LLMProviderKeys] diff --git a/plugins/core/src/lib/services/coreLLMService/types/providerTypes.ts b/plugins/core/src/lib/services/coreLLMService/types/providerTypes.ts new file mode 100644 index 0000000000..9fe45cce4d --- /dev/null +++ b/plugins/core/src/lib/services/coreLLMService/types/providerTypes.ts @@ -0,0 +1,82 @@ +import { PluginCredential } from 'server/credentials' + +export enum LLMProviders { + OpenAI = 'openai', + Azure = 'azure_openai', + Anthropic = 'anthropic', + Sagemaker = 'sagemaker', + Bedrock = 'aws_bedrock', + Anyscale = 'anyscale', + PerplexityAI = 'perplexity', + VLLM = 'vllm', + DeepInfra = 'deepinfra', + Cohere = 'cohere', + TogetherAI = 'together', + AlephAlpha = 'alephalpha', + Baseten = 'baseten', + OpenRouter = 'openrouter', + CustomAPI = 'customapi', + CustomOpenAI = 'custom_openai', + Petals = 'petals', + Ollama = 'ollama', + GoogleAIStudio = 'google', + Palm = 'palm', + HuggingFace = 'Huggingface', + Xinference = 'xinference', + CloudflareWorkersAI = 'cloudflareworkersai', + AI21 = 'ai21', + NLPCloud = 'nlpcloud', + VoyageAI = 'voyageai', + Replicate = 'replicate', + Meta = 'meta', + Mistral = 'mistralai', + VertexAI = 'vertexai', +} + +export enum LLMProviderKeys { + OpenAI = 'OPENAI_API_KEY', + Azure = 'AZURE_API_KEY', + Anthropic = 'ANTHROPIC_API_KEY', + Sagemaker = 'SAGEMAKER_API_KEY', + Bedrock = 'BEDROCK_API_KEY', + Anyscale = 'ANYSCALE_API_KEY', + PerplexityAI = 'PERPLEXITY_API_KEY', + VLLM = 'VLLM_API_KEY', + DeepInfra = 'DEEPINFRA_API_KEY', + Cohere = 'COHERE_API_KEY', + TogetherAI = 'TOGETHER_AI_API_KEY', + AlephAlpha = 'ALEPH_ALPHA_API_KEY', + Baseten = 'BASETEN_API_KEY', + OpenRouter = 'OPENROUTER_API_KEY', + CustomAPI = 'CUSTOM_API_KEY', + CustomOpenAI = 'CUSTOM_OPEN_API_KEY', + Petals = 'PETALS_API_KEY', + Ollama = 'OLLAMA_API_KEY', + GoogleAIStudio = 'GOOGLEAISTUDIO_API_KEY', + Palm = 'PALM_API_KEY', + HuggingFace = 'HUGGINGFACE_API_KEY', + Xinference = 'XINFERENCE_API_KEY', + CloudflareWorkersAI = 'CLOUDFLAREWORKERSAI_API_KEY', + AI21 = 'AI21_API_KEY', + NLPCloud = 'NLPCLOUD_API_KEY', + VoyageAI = 'VOYAGEAI_API_KEY', + Replicate = 'REPLICATE_API_KEY', + Meta = 'META_API_KEY', + Mistral = 'MISTRAL_API_KEY', + VertexAI = 'VERTEXAI_API_KEY', + Unknown = 'unknown', +} + +export type ActiveProviders = + | LLMProviders.OpenAI + | LLMProviders.GoogleAIStudio + | LLMProviders.TogetherAI + | LLMProviders.VertexAI + +export interface ModelProviderMapping { + provider: LLMProviders + apiKey: LLMProviderKeys +} +export type LLMCredential = PluginCredential & { + value: string +} diff --git a/plugins/core/src/lib/services/coreMemoryService/coreMemoryService.ts b/plugins/core/src/lib/services/coreMemoryService/coreMemoryService.ts index f5ef977bdc..2c5decb15b 100644 --- a/plugins/core/src/lib/services/coreMemoryService/coreMemoryService.ts +++ b/plugins/core/src/lib/services/coreMemoryService/coreMemoryService.ts @@ -1,11 +1,5 @@ import { python } from 'pythonia' import { PRODUCTION } from 'shared/config' -import { - CompletionModels, - LLMCredential, - Models, - OpenAIChatCompletionModels, -} from '../coreLLMService/types' import { DataType } from './coreMemoryTypes' import { @@ -16,6 +10,12 @@ import { findProviderKey, findProviderName, } from '../coreLLMService/findProvider' +import { + OpenAIChatCompletionModels, + CompletionModels, +} from '../coreLLMService/types/completionModels' +import { Models } from '../coreLLMService/types/models' +import { LLMCredential } from '../coreLLMService/types/providerTypes' export interface ICoreMemoryService { initialize(agentId: string): Promise @@ -78,6 +78,11 @@ class CoreMemoryService { // Use Pythonia to create an instance of the Embedchain App this.embedchain = await python('embedchain') + console.log( + '#########Pythonia Embedchain:', + OpenAIChatCompletionModels.GPT35Turbo, + OpenAIEmbeddingModels.TextEmbeddingAda002 + ) // Ste initial LLM and Embedder models this.setLLM(OpenAIChatCompletionModels.GPT35Turbo) this.setEmbedder(OpenAIEmbeddingModels.TextEmbeddingAda002) @@ -156,6 +161,7 @@ class CoreMemoryService { private getCredential(model: Models): string { const provider = findProviderKey(model) + console.log('PROVIDER', provider) let credential = this.credentials.find( c => c.serviceType === provider )?.value diff --git a/plugins/core/src/lib/services/types.ts b/plugins/core/src/lib/services/types.ts index 28f8215fb5..57c15ea070 100644 --- a/plugins/core/src/lib/services/types.ts +++ b/plugins/core/src/lib/services/types.ts @@ -1,10 +1,9 @@ +import { CompletionModels } from './coreLLMService/types/completionModels' import { - Chunk, - CompletionRequest, CompletionResponse, - CompletionModels, - Message, -} from './coreLLMService/types' + CompletionRequest, +} from './coreLLMService/types/completionTypes' +import { Chunk, Message } from './coreLLMService/types/messageTypes' export interface IBudgetManagerService { // Creates a budget for a user From c435ab213c5f76ab24b7496da7d66edd1362dff7 Mon Sep 17 00:00:00 2001 From: Jakob Date: Thu, 25 Jan 2024 14:32:41 -0800 Subject: [PATCH 15/38] add in corresponding model arrays to the switch --- .../CompletionProviderOptions.tsx | 140 ++++++++++++++---- 1 file changed, 108 insertions(+), 32 deletions(-) diff --git a/packages/client/editor/src/components/PropertiesWindow/CompletionProviderOptions.tsx b/packages/client/editor/src/components/PropertiesWindow/CompletionProviderOptions.tsx index ccf271e427..41f50bf028 100644 --- a/packages/client/editor/src/components/PropertiesWindow/CompletionProviderOptions.tsx +++ b/packages/client/editor/src/components/PropertiesWindow/CompletionProviderOptions.tsx @@ -1,51 +1,126 @@ import React, { useState, useEffect } from "react"; import { ConfigurationComponentProps } from "./PropertiesWindow"; -import { LLMProviders, CompletionModels } from "plugins/core/src/lib/services/coreLLMService/types"; -import { ai21ModelsArray, anthropicModelsArray } from "plugins/core/src/lib/services/coreLLMService/constants/completionModelArrays"; +import { + allOpenAICompletionModelsArray, + googleAIStudioModelsArray, + togetherAIModelsArray, + // ai21ModelsArray, + // alephAlphaModelsArray, + // anthropicModelsArray, + // anyscaleModelsArray, + // baseTenModelsArray, + // bedrockModelsArray, + // cloudflareWorkersAIModelsArray, + // deepInfraChatModelsArray, + // huggingFaceModelsWithPromptFormattingArray, + // mistralAIModelsArray, + // nlpCloudModelsArray, + // ollamaModelsArray, + // openRouterModelsArray, + // palmModelsArray, + // perplexityAIModelsArray, + // petalsModelsArray, + // replicateModelsArray, + // sageMakerModelsArray, + // vertexAIGoogleModelsArray, + // vllmModelsArray, + // voyageAIModelsArray, + // xinferenceModelsArray +} from "plugins/core/src/lib/services/coreLLMService/constants/completionModelArrays"; +import { CompletionModels } from "plugins/core/src/lib/services/coreLLMService/types/completionModels"; +import { ActiveProviders, LLMProviders } from 'plugins/core/src/lib/services/coreLLMService/types/providerTypes'; +import { activeProviders } from "plugins/core/src/lib/services/coreLLMService/constants/providers"; export const CompletionProviderOptions = (props: ConfigurationComponentProps) => { - const [selectedProvider, setSelectedProvider] = useState(LLMProviders.OpenAI); + console.log('!!!PROPS', props) + const [selectedProvider, setSelectedProvider] = useState(LLMProviders.OpenAI); const [selectedModel, setSelectedModel] = useState(null); const [filteredModels, setFilteredModels] = useState([]); useEffect(() => { - // get all completion models for the selected provider` switch (selectedProvider) { - case LLMProviders.AI21: - setFilteredModels(ai21ModelsArray); - break; - case LLMProviders.Anthropic: - setFilteredModels(anthropicModelsArray); - break; - case LLMProviders.AlephAlpha: - setFilteredModels([]); - break; - case LLMProviders.Anyscale: - setFilteredModels([]); + case LLMProviders.OpenAI: + setFilteredModels(allOpenAICompletionModelsArray); break; - case LLMProviders.Azure: - setFilteredModels([]); + case LLMProviders.GoogleAIStudio: + setFilteredModels(googleAIStudioModelsArray); break; - case LLMProviders.Baseten: - setFilteredModels([]); + case LLMProviders.TogetherAI: + setFilteredModels(togetherAIModelsArray); break; - case LLMProviders.Bedrock: - setFilteredModels([]); - break; - case LLMProviders.CloudflareWorkersAI: - setFilteredModels([]); - break; - // case LLMProviders.OpenAI: - // setFilteredModels(allOpenAICompletionModelsArray); + // case LLMProviders.VertexAI: + // setFilteredModels(vertexAIGoogleModelsArray); // break; - - + // case LLMProviders.AI21: + // setFilteredModels(ai21ModelsArray); + // break; + // case LLMProviders.Anthropic: + // setFilteredModels(anthropicModelsArray); + // break; + // case LLMProviders.AlephAlpha: + // setFilteredModels(alephAlphaModelsArray); + // break; + // case LLMProviders.Anyscale: + // setFilteredModels(anyscaleModelsArray); + // break; + // case LLMProviders.Baseten: + // setFilteredModels(baseTenModelsArray); + // break; + // case LLMProviders.Bedrock: + // setFilteredModels(bedrockModelsArray); + // break; + // case LLMProviders.CloudflareWorkersAI: + // setFilteredModels(cloudflareWorkersAIModelsArray); + // break; + // case LLMProviders.DeepInfra: + // setFilteredModels(deepInfraChatModelsArray); + // break; + // case LLMProviders.HuggingFace: + // setFilteredModels(huggingFaceModelsWithPromptFormattingArray); + // break; + // case LLMProviders.Mistral: + // setFilteredModels(mistralAIModelsArray); + // break; + // case LLMProviders.NLPCloud: + // setFilteredModels(nlpCloudModelsArray); + // break; + // case LLMProviders.Ollama: + // setFilteredModels(ollamaModelsArray); + // break; + // case LLMProviders.OpenRouter: + // setFilteredModels(openRouterModelsArray); + // break; + // case LLMProviders.Palm: + // setFilteredModels(palmModelsArray); + // break; + // case LLMProviders.PerplexityAI: + // setFilteredModels(perplexityAIModelsArray); + // break; + // case LLMProviders.Petals: + // setFilteredModels(petalsModelsArray); + // break; + // case LLMProviders.Replicate: + // setFilteredModels(replicateModelsArray); + // break; + // case LLMProviders.Sagemaker: + // setFilteredModels(sageMakerModelsArray); + // break; + // case LLMProviders.VLLM: + // setFilteredModels(vllmModelsArray); + // break; + // case LLMProviders.VoyageAI: + // setFilteredModels(voyageAIModelsArray); + // break; + // case LLMProviders.Xinference: + // setFilteredModels(xinferenceModelsArray); + // break; + default: setFilteredModels([]); } }, [selectedProvider]); - const onProviderChange = (provider: LLMProviders) => { + const onProviderChange = (provider: ActiveProviders) => { setSelectedProvider(provider); props.updateConfigKey("modelProvider", provider); }; @@ -62,13 +137,14 @@ export const CompletionProviderOptions = (props: ConfigurationComponentProps) => +

Model