free-claude-code/config/settings.py
arssing 2fe15bd2cd
feat: add proxy support for httpx clients (#125)
Add proxy support for providers based on
[doc](https://www.python-httpx.org/advanced/proxies/):

- Add per-provider proxy support (HTTP and SOCKS5) for all 4 providers:
nvidia_nim, open_router, lmstudio, llamacpp
- Each provider gets its own env var (NVIDIA_NIM_PROXY,
OPENROUTER_PROXY, LMSTUDIO_PROXY, LLAMACPP_PROXY) for independent proxy
configuration

---------

Co-authored-by: Alishahryar1 <alishahryar2@gmail.com>
2026-04-22 17:06:16 -07:00

310 lines
11 KiB
Python

"""Centralized configuration using Pydantic Settings."""
import os
from collections.abc import Mapping
from functools import lru_cache
from pathlib import Path
from typing import Any
from pydantic import Field, field_validator, model_validator
from pydantic_settings import BaseSettings, SettingsConfigDict
from .nim import NimSettings
def _env_files() -> tuple[Path, ...]:
"""Return env file paths in priority order (later overrides earlier)."""
files: list[Path] = [
Path.home() / ".config" / "free-claude-code" / ".env",
Path(".env"),
]
if explicit := os.environ.get("FCC_ENV_FILE"):
files.append(Path(explicit))
return tuple(files)
def _configured_env_files(model_config: Mapping[str, Any]) -> tuple[Path, ...]:
"""Return the currently configured env files for Settings."""
configured = model_config.get("env_file")
if configured is None:
return ()
if isinstance(configured, (str, Path)):
return (Path(configured),)
return tuple(Path(item) for item in configured)
def _env_file_contains_key(path: Path, key: str) -> bool:
"""Check whether a dotenv-style file defines the given key."""
if not path.is_file():
return False
try:
for line in path.read_text(encoding="utf-8").splitlines():
stripped = line.strip()
if not stripped or stripped.startswith("#"):
continue
if stripped.startswith("export "):
stripped = stripped[7:].lstrip()
name, sep, _value = stripped.partition("=")
if sep and name.strip() == key:
return True
except OSError:
return False
return False
def _removed_env_var_message(model_config: Mapping[str, Any]) -> str | None:
"""Return a migration error for removed env vars, if present."""
removed_key = "NIM_ENABLE_THINKING"
replacement = "ENABLE_THINKING"
if removed_key in os.environ:
return (
f"{removed_key} has been removed in this release. "
f"Rename it to {replacement}."
)
for env_file in _configured_env_files(model_config):
if _env_file_contains_key(env_file, removed_key):
return (
f"{removed_key} has been removed in this release. "
f"Rename it to {replacement}. Found in {env_file}."
)
return None
class Settings(BaseSettings):
"""Application settings loaded from environment variables."""
# ==================== OpenRouter Config ====================
open_router_api_key: str = Field(default="", validation_alias="OPENROUTER_API_KEY")
# ==================== DeepSeek Config ====================
deepseek_api_key: str = Field(default="", validation_alias="DEEPSEEK_API_KEY")
# ==================== Messaging Platform Selection ====================
# Valid: "telegram" | "discord"
messaging_platform: str = Field(
default="discord", validation_alias="MESSAGING_PLATFORM"
)
# ==================== NVIDIA NIM Config ====================
nvidia_nim_api_key: str = ""
# ==================== LM Studio Config ====================
lm_studio_base_url: str = Field(
default="http://localhost:1234/v1",
validation_alias="LM_STUDIO_BASE_URL",
)
# ==================== Llama.cpp Config ====================
llamacpp_base_url: str = Field(
default="http://localhost:8080/v1",
validation_alias="LLAMACPP_BASE_URL",
)
# ==================== Model ====================
# All Claude model requests are mapped to this single model (fallback)
# Format: provider_type/model/name
model: str = "nvidia_nim/meta/llama3-70b-instruct"
# Per-model overrides (optional, falls back to MODEL)
# Each can use a different provider
model_opus: str | None = Field(default=None, validation_alias="MODEL_OPUS")
model_sonnet: str | None = Field(default=None, validation_alias="MODEL_SONNET")
model_haiku: str | None = Field(default=None, validation_alias="MODEL_HAIKU")
# ==================== Per-Provider Proxy ====================
nvidia_nim_proxy: str = Field(default="", validation_alias="NVIDIA_NIM_PROXY")
open_router_proxy: str = Field(default="", validation_alias="OPENROUTER_PROXY")
lmstudio_proxy: str = Field(default="", validation_alias="LMSTUDIO_PROXY")
llamacpp_proxy: str = Field(default="", validation_alias="LLAMACPP_PROXY")
# ==================== Provider Rate Limiting ====================
provider_rate_limit: int = Field(default=40, validation_alias="PROVIDER_RATE_LIMIT")
provider_rate_window: int = Field(
default=60, validation_alias="PROVIDER_RATE_WINDOW"
)
provider_max_concurrency: int = Field(
default=5, validation_alias="PROVIDER_MAX_CONCURRENCY"
)
enable_thinking: bool = Field(default=True, validation_alias="ENABLE_THINKING")
# ==================== HTTP Client Timeouts ====================
http_read_timeout: float = Field(
default=300.0, validation_alias="HTTP_READ_TIMEOUT"
)
http_write_timeout: float = Field(
default=10.0, validation_alias="HTTP_WRITE_TIMEOUT"
)
http_connect_timeout: float = Field(
default=2.0, validation_alias="HTTP_CONNECT_TIMEOUT"
)
# ==================== Fast Prefix Detection ====================
fast_prefix_detection: bool = True
# ==================== Optimizations ====================
enable_network_probe_mock: bool = True
enable_title_generation_skip: bool = True
enable_suggestion_mode_skip: bool = True
enable_filepath_extraction_mock: bool = True
# ==================== NIM Settings ====================
nim: NimSettings = Field(default_factory=NimSettings)
# ==================== Voice Note Transcription ====================
voice_note_enabled: bool = Field(
default=True, validation_alias="VOICE_NOTE_ENABLED"
)
# Device: "cpu" | "cuda" | "nvidia_nim"
# - "cpu"/"cuda": local Whisper (requires voice_local extra: uv sync --extra voice_local)
# - "nvidia_nim": NVIDIA NIM Whisper API (requires voice extra: uv sync --extra voice)
whisper_device: str = Field(default="cpu", validation_alias="WHISPER_DEVICE")
# Whisper model ID or short name (for local Whisper) or NVIDIA NIM model (for nvidia_nim)
# Local Whisper: "tiny", "base", "small", "medium", "large-v2", "large-v3", "large-v3-turbo"
# NVIDIA NIM: "nvidia/parakeet-ctc-1.1b-asr", "openai/whisper-large-v3", etc.
whisper_model: str = Field(default="base", validation_alias="WHISPER_MODEL")
# Hugging Face token for faster model downloads (optional, for local Whisper)
hf_token: str = Field(default="", validation_alias="HF_TOKEN")
# ==================== Bot Wrapper Config ====================
telegram_bot_token: str | None = None
allowed_telegram_user_id: str | None = None
discord_bot_token: str | None = Field(
default=None, validation_alias="DISCORD_BOT_TOKEN"
)
allowed_discord_channels: str | None = Field(
default=None, validation_alias="ALLOWED_DISCORD_CHANNELS"
)
claude_workspace: str = "./agent_workspace"
allowed_dir: str = ""
# ==================== Server ====================
host: str = "0.0.0.0"
port: int = 8082
log_file: str = "server.log"
# Optional server API key to protect endpoints (Anthropic-style)
# Set via env `ANTHROPIC_AUTH_TOKEN`. When empty, no auth is required.
anthropic_auth_token: str = Field(
default="", validation_alias="ANTHROPIC_AUTH_TOKEN"
)
@model_validator(mode="before")
@classmethod
def reject_removed_env_vars(cls, data: Any) -> Any:
"""Fail fast when removed environment variables are still configured."""
if message := _removed_env_var_message(cls.model_config):
raise ValueError(message)
return data
# Handle empty strings for optional string fields
@field_validator(
"telegram_bot_token",
"allowed_telegram_user_id",
"discord_bot_token",
"allowed_discord_channels",
mode="before",
)
@classmethod
def parse_optional_str(cls, v: Any) -> Any:
if v == "":
return None
return v
@field_validator("whisper_device")
@classmethod
def validate_whisper_device(cls, v: str) -> str:
if v not in ("cpu", "cuda", "nvidia_nim"):
raise ValueError(
f"whisper_device must be 'cpu', 'cuda', or 'nvidia_nim', got {v!r}"
)
return v
@field_validator("model", "model_opus", "model_sonnet", "model_haiku")
@classmethod
def validate_model_format(cls, v: str | None) -> str | None:
if v is None:
return None
valid_providers = (
"nvidia_nim",
"open_router",
"deepseek",
"lmstudio",
"llamacpp",
)
if "/" not in v:
raise ValueError(
f"Model must be prefixed with provider type. "
f"Valid providers: {', '.join(valid_providers)}. "
f"Format: provider_type/model/name"
)
provider = v.split("/", 1)[0]
if provider not in valid_providers:
raise ValueError(
f"Invalid provider: '{provider}'. "
f"Supported: 'nvidia_nim', 'open_router', 'deepseek', 'lmstudio', 'llamacpp'"
)
return v
@model_validator(mode="after")
def check_nvidia_nim_api_key(self) -> Settings:
if (
self.voice_note_enabled
and self.whisper_device == "nvidia_nim"
and not self.nvidia_nim_api_key.strip()
):
raise ValueError(
"NVIDIA_NIM_API_KEY is required when WHISPER_DEVICE is 'nvidia_nim'. "
"Set it in your .env file."
)
return self
@property
def provider_type(self) -> str:
"""Extract provider type from the default model string."""
return self.model.split("/", 1)[0]
@property
def model_name(self) -> str:
"""Extract the actual model name from the default model string."""
return self.model.split("/", 1)[1]
def resolve_model(self, claude_model_name: str) -> str:
"""Resolve a Claude model name to the configured provider/model string.
Classifies the incoming Claude model (opus/sonnet/haiku) and
returns the model-specific override if configured, otherwise the fallback MODEL.
"""
name_lower = claude_model_name.lower()
if "opus" in name_lower and self.model_opus is not None:
return self.model_opus
if "haiku" in name_lower and self.model_haiku is not None:
return self.model_haiku
if "sonnet" in name_lower and self.model_sonnet is not None:
return self.model_sonnet
return self.model
@staticmethod
def parse_provider_type(model_string: str) -> str:
"""Extract provider type from any 'provider/model' string."""
return model_string.split("/", 1)[0]
@staticmethod
def parse_model_name(model_string: str) -> str:
"""Extract model name from any 'provider/model' string."""
return model_string.split("/", 1)[1]
model_config = SettingsConfigDict(
env_file=_env_files(),
env_file_encoding="utf-8",
extra="ignore",
)
@lru_cache
def get_settings() -> Settings:
"""Get cached settings instance."""
return Settings()