free-claude-code/pyproject.toml
Mauro Druwel de70700dde
feat: Use NVIDIA NIM ASR for audio transcription (#53)
## Summary
Added NVIDIA NIM as a second transcription option ( alongside local
Whisper). This lets you transcribe voice notes using NVIDIA's cloud API
instead of running Whisper locally.

## What changed

- **Transcription**: Now supports the two backends

  - Local Whisper: Free, runs on your GPU/CPU (existing)
  - NVIDIA NIM: Cloud API via Riva gRPC (new)

- **Supported models**: 8 NVIDIA NIM models added (Parakeet variants for
different languages, Whisper Large V3)

---------

Co-authored-by: Alishahryar1 <alishahryar2@gmail.com>
2026-02-28 08:48:59 -08:00

90 lines
2.4 KiB
TOML

[project]
name = "free-claude-code"
version = "2.0.0"
description = "Middleware between Claude Code CLI (Anthropic API) and NVIDIA NIM"
readme = "README.md"
requires-python = ">=3.14.2"
dependencies = [
"fastapi[standard]>=0.115.11",
"uvicorn>=0.34.0",
"httpx>=0.25.0",
"markdown-it-py>=3.0.0",
"pydantic>=2.0.0",
"python-dotenv>=1.0.0",
"tiktoken>=0.7.0",
"python-telegram-bot>=21.0",
"discord.py>=2.0.0",
"pydantic-settings>=2.12.0",
"openai>=2.16.0",
"loguru>=0.7.0",
]
[project.optional-dependencies]
voice = [
"grpcio>=1.78.0",
"grpcio-tools>=1.78.0",
"nvidia-riva-client>=2.15.0",
]
voice_local = [
"torch>=2.0.0",
"transformers>=4.45.0",
"accelerate>=0.30.0",
"librosa>=0.10.0",
]
[tool.uv.sources]
torch = { index = "pytorch-cu130" }
[[tool.uv.index]]
name = "pytorch-cu130"
url = "https://download.pytorch.org/whl/cu130"
explicit = true
[dependency-groups]
dev = [
"pytest>=9.0.2",
"pytest-asyncio>=1.3.0",
"pytest-cov>=7.0.0",
"ty>=0.0.1",
"ruff>=0.9.0",
]
[tool.ruff]
target-version = "py314"
line-length = 88
[tool.ruff.lint]
select = [
"E", # pycodestyle errors
"W", # pycodestyle warnings
"F", # Pyflakes (undefined names, unused imports)
"I", # isort (import ordering)
"UP", # pyupgrade (modernise syntax for target Python version)
"B", # flake8-bugbear (common bugs and anti-patterns)
"C4", # flake8-comprehensions (idiomatic comprehensions)
"SIM", # flake8-simplify (simplifiable code patterns)
"PERF", # Perflint (performance anti-patterns)
"RUF", # Ruff-specific rules
]
ignore = [
"E501", # line too long — enforced by the formatter instead
"B008", # FastAPI Depends() in argument defaults is intentional
"RUF006", # fire-and-forget tasks intentionally not awaited
]
[tool.ruff.lint.isort]
known-first-party = ["api", "cli", "config", "messaging", "providers", "utils"]
[tool.ruff.format]
quote-style = "double"
indent-style = "space"
line-ending = "auto"
skip-magic-trailing-comma = false
[tool.ty.environment]
python-version = "3.14"
[tool.ty.analysis]
# Optional voice_local extra: torch, transformers, librosa for local whisper transcription
# Optional voice extra: nvidia-riva-client for nvidia_nim transcription provider
allowed-unresolved-imports = ["torch", "transformers", "librosa", "riva.client"]