diff --git a/cmd/root.go b/cmd/root.go index 74ace36..ea3c1fe 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -8,6 +8,7 @@ import ( "time" tea "github.com/charmbracelet/bubbletea" + zone "github.com/lrstanley/bubblezone" "github.com/opencode-ai/opencode/internal/app" "github.com/opencode-ai/opencode/internal/config" "github.com/opencode-ai/opencode/internal/db" @@ -16,7 +17,6 @@ import ( "github.com/opencode-ai/opencode/internal/pubsub" "github.com/opencode-ai/opencode/internal/tui" "github.com/opencode-ai/opencode/internal/version" - zone "github.com/lrstanley/bubblezone" "github.com/spf13/cobra" ) @@ -80,6 +80,7 @@ to assist developers in writing, debugging, and understanding code directly from tui.New(app), tea.WithAltScreen(), tea.WithMouseCellMotion(), + tea.WithReportFocus(), ) // Initialize MCP tools in the background diff --git a/internal/config/config.go b/internal/config/config.go index 1da1f6c..c076f05 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -33,6 +33,12 @@ type MCPServer struct { type AgentName string +type ProviderType string + +const ( + ProviderTypeOpenAI ProviderType = "openai" +) + const ( AgentCoder AgentName = "coder" AgentTask AgentName = "task" @@ -41,15 +47,20 @@ const ( // Agent defines configuration for different LLM models and their token limits. type Agent struct { - Model models.ModelID `json:"model"` - MaxTokens int64 `json:"maxTokens"` - ReasoningEffort string `json:"reasoningEffort"` // For openai models low,medium,heigh + Model models.ModelID `json:"model"` + Provider models.ModelProvider `json:"provider"` + MaxTokens int64 `json:"maxTokens"` + ReasoningEffort string `json:"reasoningEffort"` // For openai models low,medium,heigh } // Provider defines configuration for an LLM provider. type Provider struct { - APIKey string `json:"apiKey"` - Disabled bool `json:"disabled"` + APIKey string `json:"apiKey"` + Disabled bool `json:"disabled"` + Type ProviderType `json:"type"` // will be used to set the parent provider, e.x openai for openai compatible APIs + Models map[models.ModelID]models.Model `json:"models"` + BaseURL string `json:"baseUrl"` + EnableReasoningEffort bool `json:"enableReasoningEffort"` } // Data defines storage configuration. @@ -332,8 +343,8 @@ func Validate() error { // Validate agent models for name, agent := range cfg.Agents { // Check if model exists - model, modelExists := models.SupportedModels[agent.Model] - if !modelExists { + model, err := GetModel(agent.Model, agent.Provider) + if err != nil { logging.Warn("unsupported model configured, reverting to default", "agent", name, "configured_model", agent.Model) diff --git a/internal/config/model.go b/internal/config/model.go new file mode 100644 index 0000000..33ae35e --- /dev/null +++ b/internal/config/model.go @@ -0,0 +1,83 @@ +package config + +import ( + "fmt" + + "github.com/opencode-ai/opencode/internal/llm/models" +) + +func GetModel(model models.ModelID, provider models.ModelProvider) (models.Model, error) { + if model == "" { + return models.Model{}, fmt.Errorf("model id is empty") + } + + m, foundModel := models.SupportedModels[model] + + if foundModel { + return m, nil + } + + providerName := m.Provider + if providerName == "" { + providerName = provider + } + + if providerName == "" { + return models.Model{}, fmt.Errorf("model %s not found", model) + } + providerCfg, foundProvider := cfg.Providers[providerName] + if !foundProvider { + return models.Model{}, fmt.Errorf("provider %s not supported", providerName) + } + if providerCfg.Disabled { + return models.Model{}, fmt.Errorf("provider %s is not enabled", providerName) + } + + // try to find the model in the provider config + if !foundModel { + m, foundModel = providerCfg.Models[model] + // Add some default behavior + if foundModel { + m.Provider = providerName + if m.ID == "" { + m.ID = model + } + if m.APIModel == "" { + m.APIModel = string(model) + } + if m.Name == "" { + m.Name = fmt.Sprintf("%s: %s", providerName, model) + } + if m.Ref != "" { + existingModel, foundExisting := models.SupportedModels[models.ModelID(m.Ref)] + if foundExisting { + m.CostPer1MIn = existingModel.CostPer1MIn + m.CostPer1MInCached = existingModel.CostPer1MInCached + m.CostPer1MOut = existingModel.CostPer1MOut + m.CostPer1MOutCached = existingModel.CostPer1MOutCached + m.ContextWindow = existingModel.ContextWindow + m.DefaultMaxTokens = existingModel.DefaultMaxTokens + m.ContextWindow = existingModel.ContextWindow + } + } + if m.DefaultMaxTokens == 0 { + m.DefaultMaxTokens = 4096 + } + if m.ContextWindow == 0 { + m.ContextWindow = 50_000 + } + } + + // if not found create a simple model just based on the model id + if !foundModel { + m = models.Model{ + ID: model, + APIModel: string(model), + Provider: providerName, + Name: fmt.Sprintf("%s: %s", providerName, model), + DefaultMaxTokens: 4096, + } + } + } + return m, nil +} diff --git a/internal/db/messages.sql.go b/internal/db/messages.sql.go index 0555b43..82a9970 100644 --- a/internal/db/messages.sql.go +++ b/internal/db/messages.sql.go @@ -17,12 +17,13 @@ INSERT INTO messages ( role, parts, model, + provider, created_at, updated_at ) VALUES ( - ?, ?, ?, ?, ?, strftime('%s', 'now'), strftime('%s', 'now') + ?, ?, ?, ?, ?, ?, strftime('%s', 'now'), strftime('%s', 'now') ) -RETURNING id, session_id, role, parts, model, created_at, updated_at, finished_at +RETURNING id, session_id, role, parts, model, created_at, updated_at, finished_at, provider ` type CreateMessageParams struct { @@ -31,6 +32,7 @@ type CreateMessageParams struct { Role string `json:"role"` Parts string `json:"parts"` Model sql.NullString `json:"model"` + Provider sql.NullString `json:"provider"` } func (q *Queries) CreateMessage(ctx context.Context, arg CreateMessageParams) (Message, error) { @@ -40,6 +42,7 @@ func (q *Queries) CreateMessage(ctx context.Context, arg CreateMessageParams) (M arg.Role, arg.Parts, arg.Model, + arg.Provider, ) var i Message err := row.Scan( @@ -51,6 +54,7 @@ func (q *Queries) CreateMessage(ctx context.Context, arg CreateMessageParams) (M &i.CreatedAt, &i.UpdatedAt, &i.FinishedAt, + &i.Provider, ) return i, err } @@ -76,7 +80,7 @@ func (q *Queries) DeleteSessionMessages(ctx context.Context, sessionID string) e } const getMessage = `-- name: GetMessage :one -SELECT id, session_id, role, parts, model, created_at, updated_at, finished_at +SELECT id, session_id, role, parts, model, created_at, updated_at, finished_at, provider FROM messages WHERE id = ? LIMIT 1 ` @@ -93,12 +97,13 @@ func (q *Queries) GetMessage(ctx context.Context, id string) (Message, error) { &i.CreatedAt, &i.UpdatedAt, &i.FinishedAt, + &i.Provider, ) return i, err } const listMessagesBySession = `-- name: ListMessagesBySession :many -SELECT id, session_id, role, parts, model, created_at, updated_at, finished_at +SELECT id, session_id, role, parts, model, created_at, updated_at, finished_at, provider FROM messages WHERE session_id = ? ORDER BY created_at ASC @@ -122,6 +127,7 @@ func (q *Queries) ListMessagesBySession(ctx context.Context, sessionID string) ( &i.CreatedAt, &i.UpdatedAt, &i.FinishedAt, + &i.Provider, ); err != nil { return nil, err } diff --git a/internal/db/migrations/20250427135639_add_provider.sql b/internal/db/migrations/20250427135639_add_provider.sql new file mode 100644 index 0000000..dfe4c29 --- /dev/null +++ b/internal/db/migrations/20250427135639_add_provider.sql @@ -0,0 +1,9 @@ +-- +goose Up +-- +goose StatementBegin +ALTER TABLE messages ADD COLUMN provider TEXT DEFAULT ''; +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +ALTER TABLE messages DROP COLUMN provider; +-- +goose StatementEnd diff --git a/internal/db/models.go b/internal/db/models.go index f00cb6a..d0124c3 100644 --- a/internal/db/models.go +++ b/internal/db/models.go @@ -27,6 +27,7 @@ type Message struct { CreatedAt int64 `json:"created_at"` UpdatedAt int64 `json:"updated_at"` FinishedAt sql.NullInt64 `json:"finished_at"` + Provider sql.NullString `json:"provider"` } type Session struct { diff --git a/internal/db/sql/messages.sql b/internal/db/sql/messages.sql index a59cebe..ea94617 100644 --- a/internal/db/sql/messages.sql +++ b/internal/db/sql/messages.sql @@ -16,10 +16,11 @@ INSERT INTO messages ( role, parts, model, + provider, created_at, updated_at ) VALUES ( - ?, ?, ?, ?, ?, strftime('%s', 'now'), strftime('%s', 'now') + ?, ?, ?, ?, ?, ?, strftime('%s', 'now'), strftime('%s', 'now') ) RETURNING *; diff --git a/internal/llm/agent/agent.go b/internal/llm/agent/agent.go index c5f0240..8b5796e 100644 --- a/internal/llm/agent/agent.go +++ b/internal/llm/agent/agent.go @@ -252,9 +252,10 @@ func (a *agent) streamAndHandleEvents(ctx context.Context, sessionID string, msg eventChan := a.provider.StreamResponse(ctx, msgHistory, a.tools) assistantMsg, err := a.messages.Create(ctx, sessionID, message.CreateMessageParams{ - Role: message.Assistant, - Parts: []message.ContentPart{}, - Model: a.provider.Model().ID, + Role: message.Assistant, + Parts: []message.ContentPart{}, + Model: a.provider.Model().ID, + Provider: a.provider.Model().Provider, }) if err != nil { return assistantMsg, nil, fmt.Errorf("failed to create assistant message: %w", err) @@ -438,22 +439,37 @@ func (a *agent) TrackUsage(ctx context.Context, sessionID string, model models.M func createAgentProvider(agentName config.AgentName) (provider.Provider, error) { cfg := config.Get() - agentConfig, ok := cfg.Agents[agentName] - if !ok { + agentConfig, foundModel := cfg.Agents[agentName] + if !foundModel { return nil, fmt.Errorf("agent %s not found", agentName) } - model, ok := models.SupportedModels[agentConfig.Model] - if !ok { - return nil, fmt.Errorf("model %s not supported", agentConfig.Model) + + if agentConfig.Model == "" { + return nil, fmt.Errorf("agent %s has no model configured", agentName) } - providerCfg, ok := cfg.Providers[model.Provider] - if !ok { + model, err := config.GetModel(agentConfig.Model, agentConfig.Provider) + providerCfg, foundProvider := cfg.Providers[model.Provider] + if !foundProvider { return nil, fmt.Errorf("provider %s not supported", model.Provider) } if providerCfg.Disabled { return nil, fmt.Errorf("provider %s is not enabled", model.Provider) } + + // try to find the model in the provider config + if !foundModel { + model, foundModel = providerCfg.Models[agentConfig.Model] + // if not found create a simple model just based on the model id + if !foundModel { + model = models.Model{ + ID: agentConfig.Model, + APIModel: string(agentConfig.Model), + Provider: model.Provider, + } + } + } + maxTokens := model.DefaultMaxTokens if agentConfig.MaxTokens > 0 { maxTokens = agentConfig.MaxTokens diff --git a/internal/llm/models/README.md b/internal/llm/models/README.md new file mode 100644 index 0000000..03f830a --- /dev/null +++ b/internal/llm/models/README.md @@ -0,0 +1,7 @@ +# Models + +This package holds preconfigured models that users can use without adding manual configurations. + +### Naming convention + +To make sure that the IDs of the models do not clash with each other make sure to prepend the provider e.x `bedrock.*` when using a third party API not the main. diff --git a/internal/llm/models/bedrock.go b/internal/llm/models/bedrock.go new file mode 100644 index 0000000..4a76be1 --- /dev/null +++ b/internal/llm/models/bedrock.go @@ -0,0 +1,20 @@ +package models + +const ( + ProviderBedrock ModelProvider = "bedrock" + + BedrockClaude37Sonnet ModelID = "bedrock.claude-3.7-sonnet" +) + +var BedrockModels = map[ModelID]Model{ + BedrockClaude37Sonnet: { + ID: BedrockClaude37Sonnet, + Name: "Bedrock: Claude 3.7 Sonnet", + Provider: ProviderBedrock, + APIModel: "anthropic.claude-3-7-sonnet-20250219-v1:0", + CostPer1MIn: 3.0, + CostPer1MInCached: 3.75, + CostPer1MOutCached: 0.30, + CostPer1MOut: 15.0, + }, +} diff --git a/internal/llm/models/groq.go b/internal/llm/models/groq.go index 749895b..18c66d6 100644 --- a/internal/llm/models/groq.go +++ b/internal/llm/models/groq.go @@ -4,13 +4,13 @@ const ( ProviderGROQ ModelProvider = "groq" // GROQ - QWENQwq ModelID = "qwen-qwq" + QWENQwq ModelID = "groq.qwen-qwq" // GROQ preview models - Llama4Scout ModelID = "meta-llama/llama-4-scout-17b-16e-instruct" - Llama4Maverick ModelID = "meta-llama/llama-4-maverick-17b-128e-instruct" - Llama3_3_70BVersatile ModelID = "llama-3.3-70b-versatile" - DeepseekR1DistillLlama70b ModelID = "deepseek-r1-distill-llama-70b" + Llama4Scout ModelID = "groq.llama-4-scout-17b-16e-instruct" + Llama4Maverick ModelID = "groq.llama-4-maverick-17b-128e-instruct" + Llama3_3_70BVersatile ModelID = "groq.llama-3.3-70b-versatile" + DeepseekR1DistillLlama70b ModelID = "groq.deepseek-r1-distill-llama-70b" ) var GroqModels = map[ModelID]Model{ @@ -33,7 +33,7 @@ var GroqModels = map[ModelID]Model{ Llama4Scout: { ID: Llama4Scout, - Name: "Llama4Scout", + Name: "Llama 4 Scout", Provider: ProviderGROQ, APIModel: "meta-llama/llama-4-scout-17b-16e-instruct", CostPer1MIn: 0.11, @@ -45,7 +45,7 @@ var GroqModels = map[ModelID]Model{ Llama4Maverick: { ID: Llama4Maverick, - Name: "Llama4Maverick", + Name: "Llama 4 Maverick", Provider: ProviderGROQ, APIModel: "meta-llama/llama-4-maverick-17b-128e-instruct", CostPer1MIn: 0.20, @@ -57,7 +57,7 @@ var GroqModels = map[ModelID]Model{ Llama3_3_70BVersatile: { ID: Llama3_3_70BVersatile, - Name: "Llama3_3_70BVersatile", + Name: "Llama 3.3 70B Versatile", Provider: ProviderGROQ, APIModel: "llama-3.3-70b-versatile", CostPer1MIn: 0.59, @@ -69,7 +69,7 @@ var GroqModels = map[ModelID]Model{ DeepseekR1DistillLlama70b: { ID: DeepseekR1DistillLlama70b, - Name: "DeepseekR1DistillLlama70b", + Name: "Deepseek R1 Distill Llama 70b", Provider: ProviderGROQ, APIModel: "deepseek-r1-distill-llama-70b", CostPer1MIn: 0.75, diff --git a/internal/llm/models/models.go b/internal/llm/models/models.go index 1bc02c4..3936128 100644 --- a/internal/llm/models/models.go +++ b/internal/llm/models/models.go @@ -11,69 +11,29 @@ type Model struct { ID ModelID `json:"id"` Name string `json:"name"` Provider ModelProvider `json:"provider"` - APIModel string `json:"api_model"` - CostPer1MIn float64 `json:"cost_per_1m_in"` - CostPer1MOut float64 `json:"cost_per_1m_out"` - CostPer1MInCached float64 `json:"cost_per_1m_in_cached"` - CostPer1MOutCached float64 `json:"cost_per_1m_out_cached"` - ContextWindow int64 `json:"context_window"` - DefaultMaxTokens int64 `json:"default_max_tokens"` - CanReason bool `json:"can_reason"` + APIModel string `json:"apiModel"` + CostPer1MIn float64 `json:"costPer1mIn"` + CostPer1MOut float64 `json:"costPer1mOut"` + CostPer1MInCached float64 `json:"constPer1mInCached"` + CostPer1MOutCached float64 `json:"costPer1mOutCached"` + ContextWindow int64 `json:"contextWindow"` + DefaultMaxTokens int64 `json:"defaultMaxTokens"` + CanReason bool `json:"canReason"` + ImageInput bool `json:"imageInput"` + Ref string `json:"ref"` // used when referencing a default model config } -// Model IDs -const ( // GEMINI - // Bedrock - BedrockClaude37Sonnet ModelID = "bedrock.claude-3.7-sonnet" -) - const ( - ProviderBedrock ModelProvider = "bedrock" // ForTests ProviderMock ModelProvider = "__mock" ) -var SupportedModels = map[ModelID]Model{ - // - // // GEMINI - // GEMINI25: { - // ID: GEMINI25, - // Name: "Gemini 2.5 Pro", - // Provider: ProviderGemini, - // APIModel: "gemini-2.5-pro-exp-03-25", - // CostPer1MIn: 0, - // CostPer1MInCached: 0, - // CostPer1MOutCached: 0, - // CostPer1MOut: 0, - // }, - // - // GRMINI20Flash: { - // ID: GRMINI20Flash, - // Name: "Gemini 2.0 Flash", - // Provider: ProviderGemini, - // APIModel: "gemini-2.0-flash", - // CostPer1MIn: 0.1, - // CostPer1MInCached: 0, - // CostPer1MOutCached: 0.025, - // CostPer1MOut: 0.4, - // }, - // - // // Bedrock - BedrockClaude37Sonnet: { - ID: BedrockClaude37Sonnet, - Name: "Bedrock: Claude 3.7 Sonnet", - Provider: ProviderBedrock, - APIModel: "anthropic.claude-3-7-sonnet-20250219-v1:0", - CostPer1MIn: 3.0, - CostPer1MInCached: 3.75, - CostPer1MOutCached: 0.30, - CostPer1MOut: 15.0, - }, -} +var SupportedModels = map[ModelID]Model{} func init() { maps.Copy(SupportedModels, AnthropicModels) maps.Copy(SupportedModels, OpenAIModels) maps.Copy(SupportedModels, GeminiModels) + maps.Copy(SupportedModels, BedrockModels) maps.Copy(SupportedModels, GroqModels) } diff --git a/internal/llm/provider/openai.go b/internal/llm/provider/openai.go index 4d45aeb..aaa6200 100644 --- a/internal/llm/provider/openai.go +++ b/internal/llm/provider/openai.go @@ -18,9 +18,10 @@ import ( ) type openaiOptions struct { - baseURL string - disableCache bool - reasoningEffort string + baseURL string + disableCache bool + reasoningEffort string + disableReasoningEffort bool } type OpenAIOption func(*openaiOptions) @@ -150,15 +151,17 @@ func (o *openaiClient) preparedParams(messages []openai.ChatCompletionMessagePar if o.providerOptions.model.CanReason == true { params.MaxCompletionTokens = openai.Int(o.providerOptions.maxTokens) - switch o.options.reasoningEffort { - case "low": - params.ReasoningEffort = shared.ReasoningEffortLow - case "medium": - params.ReasoningEffort = shared.ReasoningEffortMedium - case "high": - params.ReasoningEffort = shared.ReasoningEffortHigh - default: - params.ReasoningEffort = shared.ReasoningEffortMedium + if !o.options.disableReasoningEffort { + switch o.options.reasoningEffort { + case "low": + params.ReasoningEffort = shared.ReasoningEffortLow + case "medium": + params.ReasoningEffort = shared.ReasoningEffortMedium + case "high": + params.ReasoningEffort = shared.ReasoningEffortHigh + default: + params.ReasoningEffort = shared.ReasoningEffortMedium + } } } else { params.MaxTokens = openai.Int(o.providerOptions.maxTokens) @@ -393,3 +396,9 @@ func WithReasoningEffort(effort string) OpenAIOption { options.reasoningEffort = defaultReasoningEffort } } + +func WithOpenAIDisableReasoningEffort() OpenAIOption { + return func(options *openaiOptions) { + options.disableReasoningEffort = true + } +} diff --git a/internal/llm/provider/provider.go b/internal/llm/provider/provider.go index 00b7b29..dbc01a3 100644 --- a/internal/llm/provider/provider.go +++ b/internal/llm/provider/provider.go @@ -4,6 +4,7 @@ import ( "context" "fmt" + "github.com/opencode-ai/opencode/internal/config" "github.com/opencode-ai/opencode/internal/llm/models" "github.com/opencode-ai/opencode/internal/llm/tools" "github.com/opencode-ai/opencode/internal/message" @@ -82,6 +83,7 @@ type baseProvider[C ProviderClient] struct { } func NewProvider(providerName models.ModelProvider, opts ...ProviderClientOption) (Provider, error) { + cfg := config.Get() clientOptions := providerClientOptions{} for _, o := range opts { o(&clientOptions) @@ -110,6 +112,7 @@ func NewProvider(providerName models.ModelProvider, opts ...ProviderClientOption case models.ProviderGROQ: clientOptions.openaiOptions = append(clientOptions.openaiOptions, WithOpenAIBaseURL("https://api.groq.com/openai/v1"), + WithOpenAIDisableReasoningEffort(), ) return &baseProvider[OpenAIClient]{ options: clientOptions, @@ -119,6 +122,24 @@ func NewProvider(providerName models.ModelProvider, opts ...ProviderClientOption // TODO: implement mock client for test panic("not implemented") } + // check if we can find the customProvider in the config + if customProvider, ok := cfg.Providers[providerName]; ok { + switch customProvider.Type { + case config.ProviderTypeOpenAI: + clientOptions.openaiOptions = append(clientOptions.openaiOptions, + WithOpenAIBaseURL(customProvider.BaseURL), + ) + if !customProvider.EnableReasoningEffort { + clientOptions.openaiOptions = append(clientOptions.openaiOptions, + WithOpenAIDisableReasoningEffort(), + ) + } + return &baseProvider[OpenAIClient]{ + options: clientOptions, + client: newOpenAIClient(clientOptions), + }, nil + } + } return nil, fmt.Errorf("provider not supported: %s", providerName) } diff --git a/internal/message/content.go b/internal/message/content.go index 1ea2bcc..0f3e55c 100644 --- a/internal/message/content.go +++ b/internal/message/content.go @@ -110,6 +110,7 @@ type Message struct { SessionID string Parts []ContentPart Model models.ModelID + Provider models.ModelProvider CreatedAt int64 UpdatedAt int64 diff --git a/internal/message/message.go b/internal/message/message.go index b26af92..d9b38b8 100644 --- a/internal/message/message.go +++ b/internal/message/message.go @@ -14,9 +14,10 @@ import ( ) type CreateMessageParams struct { - Role MessageRole - Parts []ContentPart - Model models.ModelID + Role MessageRole + Parts []ContentPart + Model models.ModelID + Provider models.ModelProvider } type Service interface { @@ -71,6 +72,7 @@ func (s *service) Create(ctx context.Context, sessionID string, params CreateMes Role: string(params.Role), Parts: string(partsJSON), Model: sql.NullString{String: string(params.Model), Valid: true}, + Provider: sql.NullString{String: string(params.Provider), Valid: true}, }) if err != nil { return Message{}, err @@ -156,6 +158,7 @@ func (s *service) fromDBItem(item db.Message) (Message, error) { Role: MessageRole(item.Role), Parts: parts, Model: models.ModelID(item.Model.String), + Provider: models.ModelProvider(item.Provider.String), CreatedAt: item.CreatedAt, UpdatedAt: item.UpdatedAt, }, nil diff --git a/internal/tui/components/chat/editor.go b/internal/tui/components/chat/editor.go index 88ac3e7..0181590 100644 --- a/internal/tui/components/chat/editor.go +++ b/internal/tui/components/chat/editor.go @@ -105,6 +105,17 @@ func (m *editorCmp) Update(msg tea.Msg) (tea.Model, tea.Cmd) { m.session = msg } return m, nil + case tea.BlurMsg: + m.textarea.Blur() + return m, nil + case tea.MouseMsg: + if !m.textarea.Focused() { + m.textarea.Focus() + return m, textarea.Blink + } + case tea.FocusMsg: + m.textarea.Focus() + return m, textarea.Blink case tea.KeyMsg: if key.Matches(msg, messageKeys.PageUp) || key.Matches(msg, messageKeys.PageDown) || key.Matches(msg, messageKeys.HalfPageUp) || key.Matches(msg, messageKeys.HalfPageDown) { diff --git a/internal/tui/components/chat/message.go b/internal/tui/components/chat/message.go index 53ec7ea..10844b4 100644 --- a/internal/tui/components/chat/message.go +++ b/internal/tui/components/chat/message.go @@ -15,7 +15,6 @@ import ( "github.com/opencode-ai/opencode/internal/config" "github.com/opencode-ai/opencode/internal/diff" "github.com/opencode-ai/opencode/internal/llm/agent" - "github.com/opencode-ai/opencode/internal/llm/models" "github.com/opencode-ai/opencode/internal/llm/tools" "github.com/opencode-ai/opencode/internal/message" "github.com/opencode-ai/opencode/internal/tui/styles" @@ -121,25 +120,29 @@ func renderAssistantMessage( finishData := msg.FinishPart() info := []string{} + model, _ := config.GetModel(msg.Model, msg.Provider) + if model.ID == "" { + model.Name = "Unknown" + } // Add finish info if available if finished { switch finishData.Reason { case message.FinishReasonEndTurn: took := formatTimeDifference(msg.CreatedAt, finishData.Time) info = append(info, styles.BaseStyle.Width(width-1).Foreground(styles.ForgroundDim).Render( - fmt.Sprintf(" %s (%s)", models.SupportedModels[msg.Model].Name, took), + fmt.Sprintf(" %s (%s)", model.Name, took), )) case message.FinishReasonCanceled: info = append(info, styles.BaseStyle.Width(width-1).Foreground(styles.ForgroundDim).Render( - fmt.Sprintf(" %s (%s)", models.SupportedModels[msg.Model].Name, "canceled"), + fmt.Sprintf(" %s (%s)", model.Name, "canceled"), )) case message.FinishReasonError: info = append(info, styles.BaseStyle.Width(width-1).Foreground(styles.ForgroundDim).Render( - fmt.Sprintf(" %s (%s)", models.SupportedModels[msg.Model].Name, "error"), + fmt.Sprintf(" %s (%s)", model.Name, "error"), )) case message.FinishReasonPermissionDenied: info = append(info, styles.BaseStyle.Width(width-1).Foreground(styles.ForgroundDim).Render( - fmt.Sprintf(" %s (%s)", models.SupportedModels[msg.Model].Name, "permission denied"), + fmt.Sprintf(" %s (%s)", model.Name, "permission denied"), )) } } diff --git a/internal/tui/components/core/status.go b/internal/tui/components/core/status.go index 9fefdba..0b8ad5d 100644 --- a/internal/tui/components/core/status.go +++ b/internal/tui/components/core/status.go @@ -8,7 +8,6 @@ import ( tea "github.com/charmbracelet/bubbletea" "github.com/charmbracelet/lipgloss" "github.com/opencode-ai/opencode/internal/config" - "github.com/opencode-ai/opencode/internal/llm/models" "github.com/opencode-ai/opencode/internal/lsp" "github.com/opencode-ai/opencode/internal/lsp/protocol" "github.com/opencode-ai/opencode/internal/pubsub" @@ -236,7 +235,10 @@ func (m statusCmp) model() string { if !ok { return "Unknown" } - model := models.SupportedModels[coder.Model] + model, _ := config.GetModel(coder.Model, coder.Provider) + if model.ID == "" { + model.Name = "Unknown" + } return styles.Padded.Background(styles.Grey).Foreground(styles.Text).Render(model.Name) }