Sync config defaults and proxy docs
Some checks are pending
CI / checks (push) Waiting to run

This commit is contained in:
Alishahryar1 2026-04-22 17:34:00 -07:00
parent 4fdf7e8b7e
commit 55131019e1
3 changed files with 12 additions and 3 deletions

View file

@ -487,6 +487,10 @@ Configure via `WHISPER_DEVICE` (`cpu` | `cuda` | `nvidia_nim`) and `WHISPER_MODE
| `DEEPSEEK_API_KEY` | DeepSeek API key | required for DeepSeek |
| `LM_STUDIO_BASE_URL` | LM Studio server URL | `http://localhost:1234/v1` |
| `LLAMACPP_BASE_URL` | llama.cpp server URL | `http://localhost:8080/v1` |
| `NVIDIA_NIM_PROXY` | Optional proxy URL for NVIDIA NIM requests (`http://...` or `socks5://...`) | `""` |
| `OPENROUTER_PROXY` | Optional proxy URL for OpenRouter requests (`http://...` or `socks5://...`) | `""` |
| `LMSTUDIO_PROXY` | Optional proxy URL for LM Studio requests (`http://...` or `socks5://...`) | `""` |
| `LLAMACPP_PROXY` | Optional proxy URL for llama.cpp requests (`http://...` or `socks5://...`) | `""` |
### Rate Limiting & Timeouts

View file

@ -108,7 +108,7 @@ class Settings(BaseSettings):
# ==================== Model ====================
# All Claude model requests are mapped to this single model (fallback)
# Format: provider_type/model/name
model: str = "nvidia_nim/meta/llama3-70b-instruct"
model: str = "nvidia_nim/stepfun-ai/step-3.5-flash"
# Per-model overrides (optional, falls back to MODEL)
# Each can use a different provider
@ -134,7 +134,7 @@ class Settings(BaseSettings):
# ==================== HTTP Client Timeouts ====================
http_read_timeout: float = Field(
default=300.0, validation_alias="HTTP_READ_TIMEOUT"
default=120.0, validation_alias="HTTP_READ_TIMEOUT"
)
http_write_timeout: float = Field(
default=10.0, validation_alias="HTTP_WRITE_TIMEOUT"

View file

@ -16,16 +16,21 @@ class TestSettings:
settings = Settings()
assert settings is not None
def test_default_values(self):
def test_default_values(self, monkeypatch):
"""Test default values are set and have correct types."""
from config.settings import Settings
monkeypatch.delenv("MODEL", raising=False)
monkeypatch.delenv("HTTP_READ_TIMEOUT", raising=False)
monkeypatch.setitem(Settings.model_config, "env_file", ())
settings = Settings()
assert settings.model == "nvidia_nim/stepfun-ai/step-3.5-flash"
assert isinstance(settings.provider_rate_limit, int)
assert isinstance(settings.provider_rate_window, int)
assert isinstance(settings.nim.temperature, float)
assert isinstance(settings.fast_prefix_detection, bool)
assert isinstance(settings.enable_thinking, bool)
assert settings.http_read_timeout == 120.0
def test_get_settings_cached(self):
"""Test get_settings returns cached instance."""