mirror of
https://github.com/Alishahryar1/free-claude-code.git
synced 2026-04-28 03:20:01 +00:00
95 lines
2.4 KiB
Text
95 lines
2.4 KiB
Text
# NVIDIA NIM Config
|
|
NVIDIA_NIM_API_KEY=""
|
|
|
|
|
|
# OpenRouter Config
|
|
OPENROUTER_API_KEY=""
|
|
|
|
|
|
# DeepSeek Config
|
|
DEEPSEEK_API_KEY=""
|
|
|
|
|
|
# LM Studio Config (local provider, no API key required)
|
|
LM_STUDIO_BASE_URL="http://localhost:1234/v1"
|
|
|
|
|
|
# Llama.cpp Config (local provider, no API key required)
|
|
LLAMACPP_BASE_URL="http://localhost:8080/v1"
|
|
|
|
|
|
# All Claude model requests are mapped to these models, plain model is fallback
|
|
# Format: provider_type/model/name
|
|
# Valid providers: "nvidia_nim" | "open_router" | "deepseek" | "lmstudio" | "llamacpp"
|
|
MODEL_OPUS=
|
|
MODEL_SONNET=
|
|
MODEL_HAIKU=
|
|
MODEL="nvidia_nim/z-ai/glm4.7"
|
|
|
|
|
|
# Thinking output
|
|
# Global switch for provider reasoning requests and Claude thinking blocks.
|
|
# Set false to suppress thinking across NIM, OpenRouter, LM Studio, and llama.cpp.
|
|
ENABLE_THINKING=true
|
|
|
|
|
|
# Provider config
|
|
# Per-provider proxy support: http and socks5, example: "http://username:password@host:port"
|
|
NVIDIA_NIM_PROXY=""
|
|
OPENROUTER_PROXY=""
|
|
LMSTUDIO_PROXY=""
|
|
LLAMACPP_PROXY=""
|
|
|
|
PROVIDER_RATE_LIMIT=40
|
|
PROVIDER_RATE_WINDOW=60
|
|
PROVIDER_MAX_CONCURRENCY=5
|
|
|
|
|
|
# HTTP client timeouts (seconds) for provider API requests
|
|
HTTP_READ_TIMEOUT=120
|
|
HTTP_WRITE_TIMEOUT=10
|
|
HTTP_CONNECT_TIMEOUT=2
|
|
|
|
|
|
# Optional server API key (Anthropic-style)
|
|
ANTHROPIC_AUTH_TOKEN=
|
|
|
|
|
|
# Messaging Platform: "telegram" | "discord"
|
|
MESSAGING_PLATFORM="discord"
|
|
MESSAGING_RATE_LIMIT=1
|
|
MESSAGING_RATE_WINDOW=1
|
|
|
|
|
|
# Voice Note Transcription
|
|
VOICE_NOTE_ENABLED=false
|
|
# WHISPER_DEVICE: "cpu" | "cuda" | "nvidia_nim"
|
|
# - "cpu"/"cuda": Hugging Face transformers Whisper (offline, free; install with: uv sync --extra voice_local)
|
|
# - "nvidia_nim": NVIDIA NIM Whisper via Riva gRPC (requires NVIDIA_NIM_API_KEY; install with: uv sync --extra voice)
|
|
WHISPER_DEVICE="nvidia_nim"
|
|
# WHISPER_MODEL:
|
|
# - For cpu/cuda: Hugging Face ID or short name (tiny, base, small, medium, large-v2, large-v3, large-v3-turbo)
|
|
# - For nvidia_nim: NVIDIA NIM model (e.g., "nvidia/parakeet-ctc-1.1b-asr", "openai/whisper-large-v3")
|
|
# - For nvidia_nim, default to "openai/whisper-large-v3" for best performance
|
|
WHISPER_MODEL="openai/whisper-large-v3"
|
|
HF_TOKEN=""
|
|
|
|
|
|
# Telegram Config
|
|
TELEGRAM_BOT_TOKEN=""
|
|
ALLOWED_TELEGRAM_USER_ID=""
|
|
|
|
|
|
# Discord Config
|
|
DISCORD_BOT_TOKEN=""
|
|
ALLOWED_DISCORD_CHANNELS=""
|
|
|
|
|
|
# Agent Config
|
|
CLAUDE_WORKSPACE="./agent_workspace"
|
|
ALLOWED_DIR=""
|
|
FAST_PREFIX_DETECTION=true
|
|
ENABLE_NETWORK_PROBE_MOCK=true
|
|
ENABLE_TITLE_GENERATION_SKIP=true
|
|
ENABLE_SUGGESTION_MODE_SKIP=true
|
|
ENABLE_FILEPATH_EXTRACTION_MOCK=true
|