diff --git a/skyvern/config.py b/skyvern/config.py index c5a835c2..7e79466b 100644 --- a/skyvern/config.py +++ b/skyvern/config.py @@ -64,6 +64,7 @@ class Settings(BaseSettings): # ACTIVE LLM PROVIDER LLM_KEY: str = "OPENAI_GPT4O" # COMMON + LLM_CONFIG_TIMEOUT: int = 300 LLM_CONFIG_MAX_TOKENS: int = 4096 LLM_CONFIG_TEMPERATURE: float = 0 # LLM PROVIDER SPECIFIC diff --git a/skyvern/forge/sdk/api/llm/api_handler_factory.py b/skyvern/forge/sdk/api/llm/api_handler_factory.py index 0585988d..0c5f68b1 100644 --- a/skyvern/forge/sdk/api/llm/api_handler_factory.py +++ b/skyvern/forge/sdk/api/llm/api_handler_factory.py @@ -190,8 +190,10 @@ class LLMAPIHandlerFactory: response = await litellm.acompletion( model=llm_config.model_name, messages=messages, + timeout=SettingsManager.get_settings().LLM_CONFIG_TIMEOUT, **active_parameters, ) + LOG.info("LLM API call successful", llm_key=llm_key, model=llm_config.model_name) except openai.OpenAIError as e: raise LLMProviderError(llm_key) from e except Exception as e: