diff --git a/openhands/core/config/llm_config.py b/openhands/core/config/llm_config.py index c28390d47bda..9beb6d6f5f09 100644 --- a/openhands/core/config/llm_config.py +++ b/openhands/core/config/llm_config.py @@ -1,8 +1,8 @@ from __future__ import annotations import os - from typing import Any + from pydantic import BaseModel, Field, SecretStr from openhands.core.logger import LOG_DIR @@ -39,12 +39,12 @@ class LLMConfig(BaseModel): drop_params: Drop any unmapped (unsupported) params without causing an exception. modify_params: Modify params allows litellm to do transformations like adding a default message, when a message is empty. disable_vision: If model is vision capable, this option allows to disable image processing (useful for cost reduction). - reasoning_effort: The effort to put into reasoning. This is a string that can be one of 'low', 'medium', 'high', or 'none'. Exclusive for o1 models. caching_prompt: Use the prompt caching feature if provided by the LLM and supported by the provider. log_completions: Whether to log LLM completions to the state. log_completions_folder: The folder to log LLM completions to. Required if log_completions is True. custom_tokenizer: A custom tokenizer to use for token counting. native_tool_calling: Whether to use native tool calling if supported by the model. Can be True, False, or not set. + reasoning_effort: The effort to put into reasoning. This is a string that can be one of 'low', 'medium', 'high', or 'none'. Exclusive for o1 models. """ model: str = Field(default='claude-3-5-sonnet-20241022') @@ -85,7 +85,8 @@ class LLMConfig(BaseModel): log_completions_folder: str = Field(default=os.path.join(LOG_DIR, 'completions')) custom_tokenizer: str | None = Field(default=None) native_tool_calling: bool | None = Field(default=None) - + reasoning_effort: str | None = Field(default=None) + model_config = {'extra': 'forbid'} def model_post_init(self, __context: Any): diff --git a/openhands/llm/llm.py b/openhands/llm/llm.py index 8f9ac12b7063..98bcf7cb173d 100644 --- a/openhands/llm/llm.py +++ b/openhands/llm/llm.py @@ -152,6 +152,12 @@ def __init__( temperature=self.config.temperature, top_p=self.config.top_p, drop_params=self.config.drop_params, + # add reasoning_effort, only if the model is supported + **( + {'reasoning_effort': self.config.reasoning_effort} + if self.config.model.lower() in REASONING_EFFORT_SUPPORTED_MODELS + else {} + ), ) self._completion_unwrapped = self._completion @@ -217,10 +223,6 @@ def wrapper(*args, **kwargs): 'anthropic-beta': 'prompt-caching-2024-07-31', } - # Set reasoning effort for models that support it - if self.config.model.lower() in REASONING_EFFORT_SUPPORTED_MODELS: - kwargs['reasoning_effort'] = self.config.reasoning_effort - # set litellm modify_params to the configured value # True by default to allow litellm to do transformations like adding a default message, when a message is empty # NOTE: this setting is global; unlike drop_params, it cannot be overridden in the litellm completion partial