mirror of
https://github.com/Alishahryar1/free-claude-code.git
synced 2026-05-01 21:00:44 +00:00
Add option for an installable package (#75)
This commit is contained in:
parent
ebc8ac461f
commit
c5341ecbbe
5 changed files with 161 additions and 2 deletions
71
config/env.example
Normal file
71
config/env.example
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
# NVIDIA NIM Config
|
||||
NVIDIA_NIM_API_KEY=""
|
||||
|
||||
|
||||
# OpenRouter Config
|
||||
OPENROUTER_API_KEY=""
|
||||
|
||||
|
||||
# LM Studio Config (local provider, no API key required)
|
||||
LM_STUDIO_BASE_URL="http://localhost:1234/v1"
|
||||
|
||||
|
||||
# All Claude model requests are mapped to these models, plain model is fallback
|
||||
# Format: provider_type/model/name
|
||||
# Valid providers: "nvidia_nim" | "open_router" | "lmstudio"
|
||||
MODEL_OPUS="nvidia_nim/z-ai/glm4.7"
|
||||
MODEL_SONNET="open_router/arcee-ai/trinity-large-preview:free"
|
||||
MODEL_HAIKU="open_router/stepfun/step-3.5-flash:free"
|
||||
MODEL="nvidia_nim/z-ai/glm4.7"
|
||||
|
||||
|
||||
# Provider config
|
||||
PROVIDER_RATE_LIMIT=40
|
||||
PROVIDER_RATE_WINDOW=60
|
||||
PROVIDER_MAX_CONCURRENCY=5
|
||||
|
||||
|
||||
# HTTP client timeouts (seconds) for provider API requests
|
||||
HTTP_READ_TIMEOUT=120
|
||||
HTTP_WRITE_TIMEOUT=10
|
||||
HTTP_CONNECT_TIMEOUT=2
|
||||
|
||||
|
||||
# Messaging Platform: "telegram" | "discord"
|
||||
MESSAGING_PLATFORM="discord"
|
||||
MESSAGING_RATE_LIMIT=1
|
||||
MESSAGING_RATE_WINDOW=1
|
||||
|
||||
|
||||
# Voice Note Transcription
|
||||
VOICE_NOTE_ENABLED=false
|
||||
# WHISPER_DEVICE: "cpu" | "cuda" | "nvidia_nim"
|
||||
# - "cpu"/"cuda": Hugging Face transformers Whisper (offline, free; install with: uv sync --extra voice_local)
|
||||
# - "nvidia_nim": NVIDIA NIM Whisper via Riva gRPC (requires NVIDIA_NIM_API_KEY; install with: uv sync --extra voice)
|
||||
WHISPER_DEVICE="nvidia_nim"
|
||||
# WHISPER_MODEL:
|
||||
# - For cpu/cuda: Hugging Face ID or short name (tiny, base, small, medium, large-v2, large-v3, large-v3-turbo)
|
||||
# - For nvidia_nim: NVIDIA NIM model (e.g., "nvidia/parakeet-ctc-1.1b-asr", "openai/whisper-large-v3")
|
||||
# - For nvidia_nim, default to "openai/whisper-large-v3" for best performance
|
||||
WHISPER_MODEL="openai/whisper-large-v3"
|
||||
HF_TOKEN=""
|
||||
|
||||
|
||||
# Telegram Config
|
||||
TELEGRAM_BOT_TOKEN=""
|
||||
ALLOWED_TELEGRAM_USER_ID=""
|
||||
|
||||
|
||||
# Discord Config
|
||||
DISCORD_BOT_TOKEN=""
|
||||
ALLOWED_DISCORD_CHANNELS=""
|
||||
|
||||
|
||||
# Agent Config
|
||||
CLAUDE_WORKSPACE="./agent_workspace"
|
||||
ALLOWED_DIR=""
|
||||
FAST_PREFIX_DETECTION=true
|
||||
ENABLE_NETWORK_PROBE_MOCK=true
|
||||
ENABLE_TITLE_GENERATION_SKIP=true
|
||||
ENABLE_SUGGESTION_MODE_SKIP=true
|
||||
ENABLE_FILEPATH_EXTRACTION_MOCK=true
|
||||
Loading…
Add table
Add a link
Reference in a new issue