mirror of
https://github.com/Alishahryar1/free-claude-code.git
synced 2026-04-28 03:20:01 +00:00
## Summary * add native DeepSeek provider support via the shared OpenAI-compatible provider base * allow `deepseek/...` model prefixes in config validation * add `DEEPSEEK_API_KEY` and `DEEPSEEK_BASE_URL` settings * add DeepSeek entries to `.env.example` and `config/env.example` * implement `DeepSeekProvider` and register it in provider dependencies * add a DeepSeek request builder with DeepSeek-specific thinking payload handling * preserve Anthropic thinking blocks as `reasoning_content` for DeepSeek-compatible continuation flows * update `claude-pick` to discover DeepSeek models from the DeepSeek API * document DeepSeek usage in `README.md` * add tests for config validation, provider dependency wiring, request building, and streaming behavior ## Motivation DeepSeek exposes an OpenAI-compatible API and can be used directly without routing through OpenRouter. This lets users spend their existing DeepSeek balance through the proxy while keeping the same Claude Code workflow and per-model provider mapping. ## Example ```dotenv DEEPSEEK_API_KEY="sk-..." DEEPSEEK_BASE_URL="https://api.deepseek.com" MODEL_OPUS="deepseek/deepseek-reasoner" MODEL_SONNET="deepseek/deepseek-chat" MODEL_HAIKU="deepseek/deepseek-chat" MODEL="deepseek/deepseek-chat" --------- Co-authored-by: Alishahryar1 <alishahryar2@gmail.com>
509 lines
19 KiB
Python
509 lines
19 KiB
Python
"""Tests for config/settings.py and config/nim.py"""
|
|
|
|
import pytest
|
|
from pydantic import ValidationError
|
|
|
|
from config.nim import NimSettings
|
|
|
|
|
|
class TestSettings:
|
|
"""Test Settings configuration."""
|
|
|
|
def test_settings_loads(self):
|
|
"""Ensure Settings can be instantiated."""
|
|
from config.settings import Settings
|
|
|
|
settings = Settings()
|
|
assert settings is not None
|
|
|
|
def test_default_values(self):
|
|
"""Test default values are set and have correct types."""
|
|
from config.settings import Settings
|
|
|
|
settings = Settings()
|
|
assert isinstance(settings.provider_rate_limit, int)
|
|
assert isinstance(settings.provider_rate_window, int)
|
|
assert isinstance(settings.nim.temperature, float)
|
|
assert isinstance(settings.fast_prefix_detection, bool)
|
|
assert isinstance(settings.enable_thinking, bool)
|
|
|
|
def test_get_settings_cached(self):
|
|
"""Test get_settings returns cached instance."""
|
|
from config.settings import get_settings
|
|
|
|
s1 = get_settings()
|
|
s2 = get_settings()
|
|
assert s1 is s2 # Same object (cached)
|
|
|
|
def test_empty_string_to_none_for_optional_int(self):
|
|
"""Test that empty string converts to None for optional int fields."""
|
|
from config.settings import Settings
|
|
|
|
# Settings should handle NVIDIA_NIM_SEED="" gracefully
|
|
settings = Settings()
|
|
assert settings.nim.seed is None or isinstance(settings.nim.seed, int)
|
|
|
|
def test_model_setting(self):
|
|
"""Test model setting exists and is a string."""
|
|
from config.settings import Settings
|
|
|
|
settings = Settings()
|
|
assert isinstance(settings.model, str)
|
|
assert len(settings.model) > 0
|
|
|
|
def test_base_url_constant(self):
|
|
"""Test NVIDIA_NIM_BASE_URL is a constant."""
|
|
from providers.nvidia_nim import NVIDIA_NIM_BASE_URL
|
|
|
|
assert NVIDIA_NIM_BASE_URL == "https://integrate.api.nvidia.com/v1"
|
|
|
|
def test_lm_studio_base_url_from_env(self, monkeypatch):
|
|
"""LM_STUDIO_BASE_URL env var is loaded into settings."""
|
|
from config.settings import Settings
|
|
|
|
monkeypatch.setenv("LM_STUDIO_BASE_URL", "http://custom:5678/v1")
|
|
settings = Settings()
|
|
assert settings.lm_studio_base_url == "http://custom:5678/v1"
|
|
|
|
def test_provider_rate_limit_from_env(self, monkeypatch):
|
|
"""PROVIDER_RATE_LIMIT env var is loaded into settings."""
|
|
from config.settings import Settings
|
|
|
|
monkeypatch.setenv("PROVIDER_RATE_LIMIT", "20")
|
|
settings = Settings()
|
|
assert settings.provider_rate_limit == 20
|
|
|
|
def test_provider_rate_window_from_env(self, monkeypatch):
|
|
"""PROVIDER_RATE_WINDOW env var is loaded into settings."""
|
|
from config.settings import Settings
|
|
|
|
monkeypatch.setenv("PROVIDER_RATE_WINDOW", "30")
|
|
settings = Settings()
|
|
assert settings.provider_rate_window == 30
|
|
|
|
def test_http_read_timeout_from_env(self, monkeypatch):
|
|
"""HTTP_READ_TIMEOUT env var is loaded into settings."""
|
|
from config.settings import Settings
|
|
|
|
monkeypatch.setenv("HTTP_READ_TIMEOUT", "600")
|
|
settings = Settings()
|
|
assert settings.http_read_timeout == 600.0
|
|
|
|
def test_http_write_timeout_from_env(self, monkeypatch):
|
|
"""HTTP_WRITE_TIMEOUT env var is loaded into settings."""
|
|
from config.settings import Settings
|
|
|
|
monkeypatch.setenv("HTTP_WRITE_TIMEOUT", "20")
|
|
settings = Settings()
|
|
assert settings.http_write_timeout == 20.0
|
|
|
|
def test_http_connect_timeout_from_env(self, monkeypatch):
|
|
"""HTTP_CONNECT_TIMEOUT env var is loaded into settings."""
|
|
from config.settings import Settings
|
|
|
|
monkeypatch.setenv("HTTP_CONNECT_TIMEOUT", "5")
|
|
settings = Settings()
|
|
assert settings.http_connect_timeout == 5.0
|
|
|
|
def test_enable_thinking_from_env(self, monkeypatch):
|
|
"""ENABLE_THINKING env var is loaded into settings."""
|
|
from config.settings import Settings
|
|
|
|
monkeypatch.setenv("ENABLE_THINKING", "false")
|
|
settings = Settings()
|
|
assert settings.enable_thinking is False
|
|
|
|
def test_removed_nim_enable_thinking_raises(self, monkeypatch):
|
|
"""NIM_ENABLE_THINKING now fails fast with a migration message."""
|
|
from config.settings import Settings
|
|
|
|
monkeypatch.setenv("NIM_ENABLE_THINKING", "false")
|
|
with pytest.raises(ValidationError, match="Rename it to ENABLE_THINKING"):
|
|
Settings()
|
|
|
|
|
|
# --- NimSettings Validation Tests ---
|
|
class TestNimSettingsValidBounds:
|
|
"""Test that valid values within bounds are accepted."""
|
|
|
|
@pytest.mark.parametrize("top_k", [-1, 0, 1, 100])
|
|
def test_top_k_valid(self, top_k):
|
|
"""top_k >= -1 should be accepted."""
|
|
s = NimSettings(top_k=top_k)
|
|
assert s.top_k == top_k
|
|
|
|
@pytest.mark.parametrize("temp", [0.0, 0.5, 1.0, 2.0])
|
|
def test_temperature_valid(self, temp):
|
|
s = NimSettings(temperature=temp)
|
|
assert s.temperature == temp
|
|
|
|
@pytest.mark.parametrize("top_p", [0.0, 0.5, 1.0])
|
|
def test_top_p_valid(self, top_p):
|
|
s = NimSettings(top_p=top_p)
|
|
assert s.top_p == top_p
|
|
|
|
def test_max_tokens_valid(self):
|
|
s = NimSettings(max_tokens=1)
|
|
assert s.max_tokens == 1
|
|
|
|
def test_min_tokens_valid(self):
|
|
s = NimSettings(min_tokens=0)
|
|
assert s.min_tokens == 0
|
|
|
|
@pytest.mark.parametrize("penalty", [-2.0, 0.0, 2.0])
|
|
def test_presence_penalty_valid(self, penalty):
|
|
s = NimSettings(presence_penalty=penalty)
|
|
assert s.presence_penalty == penalty
|
|
|
|
@pytest.mark.parametrize("penalty", [-2.0, 0.0, 2.0])
|
|
def test_frequency_penalty_valid(self, penalty):
|
|
s = NimSettings(frequency_penalty=penalty)
|
|
assert s.frequency_penalty == penalty
|
|
|
|
@pytest.mark.parametrize("min_p", [0.0, 0.5, 1.0])
|
|
def test_min_p_valid(self, min_p):
|
|
s = NimSettings(min_p=min_p)
|
|
assert s.min_p == min_p
|
|
|
|
|
|
class TestNimSettingsInvalidBounds:
|
|
"""Test that out-of-range values raise ValidationError."""
|
|
|
|
@pytest.mark.parametrize("top_k", [-2, -100])
|
|
def test_top_k_below_lower_bound(self, top_k):
|
|
with pytest.raises((ValidationError, ValueError)):
|
|
NimSettings(top_k=top_k)
|
|
|
|
def test_temperature_negative(self):
|
|
with pytest.raises(ValidationError):
|
|
NimSettings(temperature=-0.1)
|
|
|
|
@pytest.mark.parametrize("top_p", [-0.1, 1.1])
|
|
def test_top_p_out_of_range(self, top_p):
|
|
with pytest.raises(ValidationError):
|
|
NimSettings(top_p=top_p)
|
|
|
|
@pytest.mark.parametrize("penalty", [-2.1, 2.1])
|
|
def test_presence_penalty_out_of_range(self, penalty):
|
|
with pytest.raises(ValidationError):
|
|
NimSettings(presence_penalty=penalty)
|
|
|
|
@pytest.mark.parametrize("penalty", [-2.1, 2.1])
|
|
def test_frequency_penalty_out_of_range(self, penalty):
|
|
with pytest.raises(ValidationError):
|
|
NimSettings(frequency_penalty=penalty)
|
|
|
|
@pytest.mark.parametrize("min_p", [-0.1, 1.1])
|
|
def test_min_p_out_of_range(self, min_p):
|
|
with pytest.raises(ValidationError):
|
|
NimSettings(min_p=min_p)
|
|
|
|
@pytest.mark.parametrize("max_tokens", [0, -1])
|
|
def test_max_tokens_too_low(self, max_tokens):
|
|
with pytest.raises(ValidationError):
|
|
NimSettings(max_tokens=max_tokens)
|
|
|
|
def test_min_tokens_negative(self):
|
|
with pytest.raises(ValidationError):
|
|
NimSettings(min_tokens=-1)
|
|
|
|
|
|
class TestNimSettingsValidators:
|
|
"""Test custom field validators in NimSettings."""
|
|
|
|
@pytest.mark.parametrize(
|
|
"seed_val,expected",
|
|
[("", None), (None, None), ("42", 42), (42, 42)],
|
|
ids=["empty_str", "none", "str_42", "int_42"],
|
|
)
|
|
def test_parse_optional_int(self, seed_val, expected):
|
|
s = NimSettings(seed=seed_val)
|
|
assert s.seed == expected
|
|
|
|
@pytest.mark.parametrize(
|
|
"stop_val,expected",
|
|
[("", None), ("STOP", "STOP"), (None, None)],
|
|
ids=["empty_str", "valid", "none"],
|
|
)
|
|
def test_parse_optional_str_stop(self, stop_val, expected):
|
|
s = NimSettings(stop=stop_val)
|
|
assert s.stop == expected
|
|
|
|
@pytest.mark.parametrize(
|
|
"chat_template_val,expected",
|
|
[("", None), ("template", "template")],
|
|
ids=["empty_str", "valid"],
|
|
)
|
|
def test_parse_optional_str_chat_template(self, chat_template_val, expected):
|
|
s = NimSettings(chat_template=chat_template_val)
|
|
assert s.chat_template == expected
|
|
|
|
def test_extra_forbid_rejects_unknown_field(self):
|
|
"""NimSettings with extra='forbid' rejects unknown fields."""
|
|
from typing import Any, cast
|
|
|
|
with pytest.raises(ValidationError):
|
|
NimSettings(**cast(Any, {"unknown_field": "value"}))
|
|
|
|
def test_enable_thinking_field_removed(self):
|
|
"""NimSettings no longer accepts the removed thinking toggle."""
|
|
from typing import Any, cast
|
|
|
|
with pytest.raises(ValidationError):
|
|
NimSettings(**cast(Any, {"enable_thinking": True}))
|
|
|
|
|
|
class TestSettingsOptionalStr:
|
|
"""Test Settings parse_optional_str validator."""
|
|
|
|
def test_empty_telegram_token_to_none(self, monkeypatch):
|
|
from config.settings import Settings
|
|
|
|
monkeypatch.setenv("TELEGRAM_BOT_TOKEN", "")
|
|
s = Settings()
|
|
assert s.telegram_bot_token is None
|
|
|
|
def test_valid_telegram_token_preserved(self, monkeypatch):
|
|
from config.settings import Settings
|
|
|
|
monkeypatch.setenv("TELEGRAM_BOT_TOKEN", "abc123")
|
|
s = Settings()
|
|
assert s.telegram_bot_token == "abc123"
|
|
|
|
def test_empty_allowed_user_id_to_none(self, monkeypatch):
|
|
from config.settings import Settings
|
|
|
|
monkeypatch.setenv("ALLOWED_TELEGRAM_USER_ID", "")
|
|
s = Settings()
|
|
assert s.allowed_telegram_user_id is None
|
|
|
|
def test_discord_bot_token_from_env(self, monkeypatch):
|
|
from config.settings import Settings
|
|
|
|
monkeypatch.setenv("DISCORD_BOT_TOKEN", "discord_token_123")
|
|
s = Settings()
|
|
assert s.discord_bot_token == "discord_token_123"
|
|
|
|
def test_empty_discord_bot_token_to_none(self, monkeypatch):
|
|
from config.settings import Settings
|
|
|
|
monkeypatch.setenv("DISCORD_BOT_TOKEN", "")
|
|
s = Settings()
|
|
assert s.discord_bot_token is None
|
|
|
|
def test_allowed_discord_channels_from_env(self, monkeypatch):
|
|
from config.settings import Settings
|
|
|
|
monkeypatch.setenv("ALLOWED_DISCORD_CHANNELS", "111,222,333")
|
|
s = Settings()
|
|
assert s.allowed_discord_channels == "111,222,333"
|
|
|
|
def test_messaging_platform_from_env(self, monkeypatch):
|
|
from config.settings import Settings
|
|
|
|
monkeypatch.setenv("MESSAGING_PLATFORM", "discord")
|
|
s = Settings()
|
|
assert s.messaging_platform == "discord"
|
|
|
|
def test_whisper_device_auto_rejected(self, monkeypatch):
|
|
"""WHISPER_DEVICE=auto raises ValidationError (auto removed)."""
|
|
from config.settings import Settings
|
|
|
|
monkeypatch.setenv("WHISPER_DEVICE", "auto")
|
|
with pytest.raises(ValidationError, match="whisper_device"):
|
|
Settings()
|
|
|
|
@pytest.mark.parametrize("device", ["cpu", "cuda"])
|
|
def test_whisper_device_valid(self, monkeypatch, device):
|
|
"""Valid whisper_device values are accepted."""
|
|
from config.settings import Settings
|
|
|
|
monkeypatch.setenv("WHISPER_DEVICE", device)
|
|
s = Settings()
|
|
assert s.whisper_device == device
|
|
|
|
|
|
class TestPerModelMapping:
|
|
"""Test per-model fields and resolve_model()."""
|
|
|
|
def test_model_fields_default_none(self):
|
|
"""Per-model fields default to None."""
|
|
from config.settings import Settings
|
|
|
|
s = Settings()
|
|
assert s.model_opus is None
|
|
assert s.model_sonnet is None
|
|
assert s.model_haiku is None
|
|
|
|
def test_model_opus_from_env(self, monkeypatch):
|
|
"""MODEL_OPUS env var is loaded."""
|
|
from config.settings import Settings
|
|
|
|
monkeypatch.setenv("MODEL_OPUS", "open_router/deepseek/deepseek-r1")
|
|
s = Settings()
|
|
assert s.model_opus == "open_router/deepseek/deepseek-r1"
|
|
|
|
@pytest.mark.parametrize(
|
|
"env_vars,expected_model,expected_haiku",
|
|
[
|
|
(
|
|
{"MODEL": "nvidia_nim/meta/llama3-70b-instruct"},
|
|
"nvidia_nim/meta/llama3-70b-instruct",
|
|
None,
|
|
),
|
|
(
|
|
{
|
|
"MODEL": "open_router/anthropic/claude-3-opus",
|
|
"MODEL_HAIKU": "open_router/anthropic/claude-3-haiku",
|
|
},
|
|
"open_router/anthropic/claude-3-opus",
|
|
"open_router/anthropic/claude-3-haiku",
|
|
),
|
|
({"MODEL": "deepseek/deepseek-chat"}, "deepseek/deepseek-chat", None),
|
|
({"MODEL": "lmstudio/qwen2.5-7b"}, "lmstudio/qwen2.5-7b", None),
|
|
({"MODEL": "llamacpp/local-model"}, "llamacpp/local-model", None),
|
|
],
|
|
)
|
|
def test_settings_models_from_env(
|
|
self, env_vars, expected_model, expected_haiku, monkeypatch
|
|
):
|
|
"""Test environment variables override model defaults."""
|
|
from config.settings import Settings
|
|
|
|
for k, v in env_vars.items():
|
|
monkeypatch.setenv(k, v)
|
|
|
|
s = Settings()
|
|
assert s.model == expected_model
|
|
assert s.model_haiku == expected_haiku
|
|
|
|
def test_model_sonnet_from_env(self, monkeypatch):
|
|
"""MODEL_SONNET env var is loaded."""
|
|
from config.settings import Settings
|
|
|
|
monkeypatch.setenv("MODEL_SONNET", "nvidia_nim/meta/llama-3.3-70b-instruct")
|
|
s = Settings()
|
|
assert s.model_sonnet == "nvidia_nim/meta/llama-3.3-70b-instruct"
|
|
|
|
def test_model_haiku_from_env(self, monkeypatch):
|
|
"""MODEL_HAIKU env var is loaded."""
|
|
from config.settings import Settings
|
|
|
|
monkeypatch.setenv("MODEL_HAIKU", "lmstudio/qwen2.5-7b")
|
|
s = Settings()
|
|
assert s.model_haiku == "lmstudio/qwen2.5-7b"
|
|
|
|
def test_model_opus_invalid_provider_raises(self, monkeypatch):
|
|
"""MODEL_OPUS with invalid provider prefix raises ValidationError."""
|
|
from config.settings import Settings
|
|
|
|
monkeypatch.setenv("MODEL_OPUS", "bad_provider/some-model")
|
|
with pytest.raises(ValidationError, match="Invalid provider"):
|
|
Settings()
|
|
|
|
def test_model_opus_no_slash_raises(self, monkeypatch):
|
|
"""MODEL_OPUS without provider prefix raises ValidationError."""
|
|
from config.settings import Settings
|
|
|
|
monkeypatch.setenv("MODEL_OPUS", "noprefix")
|
|
with pytest.raises(ValidationError, match="provider type"):
|
|
Settings()
|
|
|
|
def test_model_haiku_invalid_provider_raises(self, monkeypatch):
|
|
"""MODEL_HAIKU with invalid provider prefix raises ValidationError."""
|
|
from config.settings import Settings
|
|
|
|
monkeypatch.setenv("MODEL_HAIKU", "invalid/model")
|
|
with pytest.raises(ValidationError, match="Invalid provider"):
|
|
Settings()
|
|
|
|
def test_resolve_model_opus_override(self):
|
|
"""resolve_model returns model_opus for opus model names."""
|
|
from config.settings import Settings
|
|
|
|
s = Settings()
|
|
s.model_opus = "open_router/deepseek/deepseek-r1"
|
|
assert (
|
|
s.resolve_model("claude-opus-4-20250514")
|
|
== "open_router/deepseek/deepseek-r1"
|
|
)
|
|
assert s.resolve_model("claude-3-opus") == "open_router/deepseek/deepseek-r1"
|
|
assert (
|
|
s.resolve_model("claude-3-opus-20240229")
|
|
== "open_router/deepseek/deepseek-r1"
|
|
)
|
|
|
|
def test_resolve_model_sonnet_override(self):
|
|
"""resolve_model returns model_sonnet for sonnet model names."""
|
|
from config.settings import Settings
|
|
|
|
s = Settings()
|
|
s.model_sonnet = "nvidia_nim/meta/llama-3.3-70b-instruct"
|
|
assert (
|
|
s.resolve_model("claude-sonnet-4-20250514")
|
|
== "nvidia_nim/meta/llama-3.3-70b-instruct"
|
|
)
|
|
assert (
|
|
s.resolve_model("claude-3-5-sonnet-20241022")
|
|
== "nvidia_nim/meta/llama-3.3-70b-instruct"
|
|
)
|
|
|
|
def test_resolve_model_haiku_override(self):
|
|
"""resolve_model returns model_haiku for haiku model names."""
|
|
from config.settings import Settings
|
|
|
|
s = Settings()
|
|
s.model_haiku = "lmstudio/qwen2.5-7b"
|
|
assert s.resolve_model("claude-3-haiku-20240307") == "lmstudio/qwen2.5-7b"
|
|
assert s.resolve_model("claude-3-5-haiku-20241022") == "lmstudio/qwen2.5-7b"
|
|
assert s.resolve_model("claude-haiku-4-20250514") == "lmstudio/qwen2.5-7b"
|
|
|
|
def test_resolve_model_fallback_when_override_not_set(self):
|
|
"""resolve_model falls back to MODEL when model override is None."""
|
|
from config.settings import Settings
|
|
|
|
s = Settings()
|
|
s.model = "nvidia_nim/fallback-model"
|
|
# No model overrides set
|
|
assert s.resolve_model("claude-opus-4-20250514") == "nvidia_nim/fallback-model"
|
|
assert (
|
|
s.resolve_model("claude-sonnet-4-20250514") == "nvidia_nim/fallback-model"
|
|
)
|
|
assert s.resolve_model("claude-3-haiku-20240307") == "nvidia_nim/fallback-model"
|
|
|
|
def test_resolve_model_unknown_model_falls_back(self):
|
|
"""resolve_model falls back to MODEL for unrecognized model names."""
|
|
from config.settings import Settings
|
|
|
|
s = Settings()
|
|
s.model = "nvidia_nim/fallback-model"
|
|
s.model_opus = "open_router/opus-model"
|
|
assert s.resolve_model("claude-2.1") == "nvidia_nim/fallback-model"
|
|
assert s.resolve_model("some-unknown-model") == "nvidia_nim/fallback-model"
|
|
|
|
def test_resolve_model_case_insensitive(self):
|
|
"""Model classification is case-insensitive."""
|
|
from config.settings import Settings
|
|
|
|
s = Settings()
|
|
s.model_opus = "open_router/opus-model"
|
|
assert s.resolve_model("Claude-OPUS-4") == "open_router/opus-model"
|
|
|
|
def test_parse_provider_type(self):
|
|
"""parse_provider_type extracts provider from model string."""
|
|
from config.settings import Settings
|
|
|
|
assert Settings.parse_provider_type("nvidia_nim/meta/llama") == "nvidia_nim"
|
|
assert Settings.parse_provider_type("open_router/deepseek/r1") == "open_router"
|
|
assert Settings.parse_provider_type("deepseek/deepseek-chat") == "deepseek"
|
|
assert Settings.parse_provider_type("lmstudio/qwen") == "lmstudio"
|
|
assert Settings.parse_provider_type("llamacpp/model") == "llamacpp"
|
|
|
|
def test_parse_model_name(self):
|
|
"""parse_model_name extracts model name from model string."""
|
|
from config.settings import Settings
|
|
|
|
assert Settings.parse_model_name("nvidia_nim/meta/llama") == "meta/llama"
|
|
assert Settings.parse_model_name("deepseek/deepseek-chat") == "deepseek-chat"
|
|
assert Settings.parse_model_name("lmstudio/qwen") == "qwen"
|
|
assert Settings.parse_model_name("llamacpp/model") == "model"
|