mirror of
https://github.com/Alishahryar1/free-claude-code.git
synced 2026-04-28 03:20:01 +00:00
## Summary * add native DeepSeek provider support via the shared OpenAI-compatible provider base * allow `deepseek/...` model prefixes in config validation * add `DEEPSEEK_API_KEY` and `DEEPSEEK_BASE_URL` settings * add DeepSeek entries to `.env.example` and `config/env.example` * implement `DeepSeekProvider` and register it in provider dependencies * add a DeepSeek request builder with DeepSeek-specific thinking payload handling * preserve Anthropic thinking blocks as `reasoning_content` for DeepSeek-compatible continuation flows * update `claude-pick` to discover DeepSeek models from the DeepSeek API * document DeepSeek usage in `README.md` * add tests for config validation, provider dependency wiring, request building, and streaming behavior ## Motivation DeepSeek exposes an OpenAI-compatible API and can be used directly without routing through OpenRouter. This lets users spend their existing DeepSeek balance through the proxy while keeping the same Claude Code workflow and per-model provider mapping. ## Example ```dotenv DEEPSEEK_API_KEY="sk-..." DEEPSEEK_BASE_URL="https://api.deepseek.com" MODEL_OPUS="deepseek/deepseek-reasoner" MODEL_SONNET="deepseek/deepseek-chat" MODEL_HAIKU="deepseek/deepseek-chat" MODEL="deepseek/deepseek-chat" --------- Co-authored-by: Alishahryar1 <alishahryar2@gmail.com>
75 lines
2 KiB
Text
75 lines
2 KiB
Text
# NVIDIA NIM Config
|
|
NVIDIA_NIM_API_KEY=""
|
|
|
|
|
|
# OpenRouter Config
|
|
OPENROUTER_API_KEY=""
|
|
|
|
|
|
# DeepSeek Config
|
|
DEEPSEEK_API_KEY=""
|
|
|
|
|
|
# LM Studio Config (local provider, no API key required)
|
|
LM_STUDIO_BASE_URL="http://localhost:1234/v1"
|
|
|
|
|
|
# All Claude model requests are mapped to these models, plain model is fallback
|
|
# Format: provider_type/model/name
|
|
# Valid providers: "nvidia_nim" | "open_router" | "deepseek" | "lmstudio" | "llamacpp"
|
|
MODEL_OPUS="nvidia_nim/z-ai/glm4.7"
|
|
MODEL_SONNET="open_router/arcee-ai/trinity-large-preview:free"
|
|
MODEL_HAIKU="open_router/stepfun/step-3.5-flash:free"
|
|
MODEL="nvidia_nim/z-ai/glm4.7"
|
|
|
|
|
|
# Provider config
|
|
PROVIDER_RATE_LIMIT=40
|
|
PROVIDER_RATE_WINDOW=60
|
|
PROVIDER_MAX_CONCURRENCY=5
|
|
|
|
|
|
# HTTP client timeouts (seconds) for provider API requests
|
|
HTTP_READ_TIMEOUT=120
|
|
HTTP_WRITE_TIMEOUT=10
|
|
HTTP_CONNECT_TIMEOUT=2
|
|
|
|
|
|
# Messaging Platform: "telegram" | "discord"
|
|
MESSAGING_PLATFORM="discord"
|
|
MESSAGING_RATE_LIMIT=1
|
|
MESSAGING_RATE_WINDOW=1
|
|
|
|
|
|
# Voice Note Transcription
|
|
VOICE_NOTE_ENABLED=false
|
|
# WHISPER_DEVICE: "cpu" | "cuda" | "nvidia_nim"
|
|
# - "cpu"/"cuda": Hugging Face transformers Whisper (offline, free; install with: uv sync --extra voice_local)
|
|
# - "nvidia_nim": NVIDIA NIM Whisper via Riva gRPC (requires NVIDIA_NIM_API_KEY; install with: uv sync --extra voice)
|
|
WHISPER_DEVICE="nvidia_nim"
|
|
# WHISPER_MODEL:
|
|
# - For cpu/cuda: Hugging Face ID or short name (tiny, base, small, medium, large-v2, large-v3, large-v3-turbo)
|
|
# - For nvidia_nim: NVIDIA NIM model (e.g., "nvidia/parakeet-ctc-1.1b-asr", "openai/whisper-large-v3")
|
|
# - For nvidia_nim, default to "openai/whisper-large-v3" for best performance
|
|
WHISPER_MODEL="openai/whisper-large-v3"
|
|
HF_TOKEN=""
|
|
|
|
|
|
# Telegram Config
|
|
TELEGRAM_BOT_TOKEN=""
|
|
ALLOWED_TELEGRAM_USER_ID=""
|
|
|
|
|
|
# Discord Config
|
|
DISCORD_BOT_TOKEN=""
|
|
ALLOWED_DISCORD_CHANNELS=""
|
|
|
|
|
|
# Agent Config
|
|
CLAUDE_WORKSPACE="./agent_workspace"
|
|
ALLOWED_DIR=""
|
|
FAST_PREFIX_DETECTION=true
|
|
ENABLE_NETWORK_PROBE_MOCK=true
|
|
ENABLE_TITLE_GENERATION_SKIP=true
|
|
ENABLE_SUGGESTION_MODE_SKIP=true
|
|
ENABLE_FILEPATH_EXTRACTION_MOCK=true
|