Added local Speech-to-Text (STT) support using Faster-Whisper

This commit is contained in:
Nabhan 2025-10-11 23:56:12 +05:00
parent 402039f02f
commit dad79674c8
8 changed files with 396 additions and 7 deletions

View file

@ -31,12 +31,15 @@ TTS_SERVICE_API_KEY=
# OPTIONAL: TTS Provider API Base
TTS_SERVICE_API_BASE=
# LiteLLM STT Provider: https://docs.litellm.ai/docs/audio_transcription#supported-providers
STT_SERVICE=openai/whisper-1
# Respective STT Service API
STT_SERVICE_API_KEY=""
# OPTIONAL: STT Provider API Base
STT_SERVICE_API_BASE=
# STT Service Configuration
# Use 'local' for offline Faster-Whisper or LiteLLM provider
STT_SERVICE=local
# For local STT: Whisper model size (tiny, base, small, medium, large-v3)
LOCAL_STT_MODEL=base
# For LiteLLM STT Provider: https://docs.litellm.ai/docs/audio_transcription#supported-providers
# STT_SERVICE=openai/whisper-1
# STT_SERVICE_API_KEY=""
# STT_SERVICE_API_BASE=
FIRECRAWL_API_KEY=fcr-01J0000000000000000000000

View file

@ -102,10 +102,13 @@ class Config:
TTS_SERVICE_API_BASE = os.getenv("TTS_SERVICE_API_BASE")
TTS_SERVICE_API_KEY = os.getenv("TTS_SERVICE_API_KEY")
# Litellm STT Configuration
# STT Configuration
STT_SERVICE = os.getenv("STT_SERVICE")
STT_SERVICE_API_BASE = os.getenv("STT_SERVICE_API_BASE")
STT_SERVICE_API_KEY = os.getenv("STT_SERVICE_API_KEY")
# Local STT Configuration
LOCAL_STT_MODEL = os.getenv("LOCAL_STT_MODEL", "base")
# Validation Checks
# Check embedding dimension

View file

@ -17,6 +17,7 @@ from .luma_add_connector_route import router as luma_add_connector_router
from .podcasts_routes import router as podcasts_router
from .search_source_connectors_routes import router as search_source_connectors_router
from .search_spaces_routes import router as search_spaces_router
from .stt_routes import router as stt_router
router = APIRouter()
@ -31,3 +32,4 @@ router.include_router(airtable_add_connector_router)
router.include_router(luma_add_connector_router)
router.include_router(llm_config_router)
router.include_router(logs_router)
router.include_router(stt_router)

View file

@ -0,0 +1,96 @@
"""Speech-to-Text API routes."""
from fastapi import APIRouter, File, Form, HTTPException, UploadFile
from fastapi.responses import JSONResponse
from app.services.stt_service import stt_service
router = APIRouter(prefix="/stt", tags=["Speech-to-Text"])
@router.post("/transcribe")
async def transcribe_audio(
audio: UploadFile = File(..., description="Audio file to transcribe"),
language: str = Form(None, description="Optional language code (e.g., 'en', 'es')"),
):
"""Transcribe uploaded audio file to text."""
# Validate file type
if not audio.content_type or not audio.content_type.startswith("audio/"):
raise HTTPException(
status_code=400,
detail="File must be an audio file"
)
try:
# Read audio bytes
audio_bytes = await audio.read()
# Transcribe
result = stt_service.transcribe_bytes(
audio_bytes,
filename=audio.filename or "audio.wav",
language=language if language else None
)
return JSONResponse(content={
"success": True,
"transcription": result["text"],
"metadata": {
"detected_language": result["language"],
"language_probability": result["language_probability"],
"duration_seconds": result["duration"],
"model_size": stt_service.model_size,
}
})
except Exception as e:
raise HTTPException(
status_code=500,
detail=f"Transcription failed: {str(e)}"
)
@router.get("/models")
async def get_available_models():
"""Get list of available Whisper models."""
return JSONResponse(content={
"models": [
{"name": "tiny", "size": "~39 MB", "speed": "fastest", "accuracy": "lowest"},
{"name": "base", "size": "~74 MB", "speed": "fast", "accuracy": "good"},
{"name": "small", "size": "~244 MB", "speed": "medium", "accuracy": "better"},
{"name": "medium", "size": "~769 MB", "speed": "slow", "accuracy": "high"},
{"name": "large-v3", "size": "~1550 MB", "speed": "slowest", "accuracy": "highest"},
],
"current_model": stt_service.model_size,
"note": "Models are downloaded automatically on first use"
})
@router.post("/change-model")
async def change_model(model_size: str = Form(...)):
"""Change the active Whisper model."""
valid_models = ["tiny", "base", "small", "medium", "large-v3"]
if model_size not in valid_models:
raise HTTPException(
status_code=400,
detail=f"Invalid model. Choose from: {valid_models}"
)
try:
# Create new service instance with different model
global stt_service
stt_service = type(stt_service)(model_size=model_size)
return JSONResponse(content={
"success": True,
"message": f"Model changed to {model_size}",
"note": "Model will be downloaded on next transcription if not cached"
})
except Exception as e:
raise HTTPException(
status_code=500,
detail=f"Failed to change model: {str(e)}"
)

View file

@ -0,0 +1,95 @@
"""Local Speech-to-Text service using Faster-Whisper."""
import os
import tempfile
from pathlib import Path
from typing import Optional
from faster_whisper import WhisperModel
from app.config import config
class STTService:
"""Local Speech-to-Text service using Faster-Whisper."""
def __init__(self, model_size: Optional[str] = None):
"""Initialize STT service with specified model size.
Args:
model_size: Whisper model size ("tiny", "base", "small", "medium", "large-v3")
"""
self.model_size = model_size or config.LOCAL_STT_MODEL
self._model: Optional[WhisperModel] = None
def _get_model(self) -> WhisperModel:
"""Lazy load the Whisper model."""
if self._model is None:
# Use CPU with optimizations for better performance
self._model = WhisperModel(
self.model_size,
device="cpu",
compute_type="int8", # Quantization for faster CPU inference
num_workers=1, # Single worker for stability
)
return self._model
def transcribe_file(self, audio_path: str, language: Optional[str] = None) -> dict:
"""Transcribe audio file to text.
Args:
audio_path: Path to audio file
language: Optional language code (e.g., "en", "es")
Returns:
Dict with transcription text and metadata
"""
model = self._get_model()
# Transcribe with optimized settings
segments, info = model.transcribe(
audio_path,
language=language,
beam_size=1, # Faster inference
best_of=1, # Single pass
temperature=0, # Deterministic output
vad_filter=True, # Voice activity detection
vad_parameters=dict(min_silence_duration_ms=500),
)
# Combine all segments
text = " ".join(segment.text.strip() for segment in segments)
return {
"text": text,
"language": info.language,
"language_probability": info.language_probability,
"duration": info.duration,
}
def transcribe_bytes(self, audio_bytes: bytes, filename: str = "audio.wav",
language: Optional[str] = None) -> dict:
"""Transcribe audio from bytes.
Args:
audio_bytes: Audio file bytes
filename: Original filename for format detection
language: Optional language code
Returns:
Dict with transcription text and metadata
"""
# Save bytes to temporary file
suffix = Path(filename).suffix or ".wav"
with tempfile.NamedTemporaryFile(suffix=suffix, delete=False) as tmp_file:
tmp_file.write(audio_bytes)
tmp_path = tmp_file.name
try:
return self.transcribe_file(tmp_path, language)
finally:
# Clean up temp file
os.unlink(tmp_path)
# Global STT service instance
stt_service = STTService()

View file

@ -43,6 +43,7 @@ dependencies = [
"youtube-transcript-api>=1.0.3",
"litellm>=1.77.5",
"langchain-litellm>=0.2.3",
"faster-whisper>=1.1.0",
]
[dependency-groups]

View file

@ -0,0 +1,109 @@
"use client";
import { useState, useRef } from "react";
import { Button } from "@/components/ui/button";
import { Mic, Square, Upload } from "lucide-react";
interface AudioRecorderProps {
onTranscription: (text: string) => void;
apiUrl?: string;
}
export function AudioRecorder({ onTranscription, apiUrl = "/api/v1/stt" }: AudioRecorderProps) {
const [isRecording, setIsRecording] = useState(false);
const [isTranscribing, setIsTranscribing] = useState(false);
const mediaRecorderRef = useRef<MediaRecorder | null>(null);
const chunksRef = useRef<Blob[]>([]);
const startRecording = async () => {
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
const mediaRecorder = new MediaRecorder(stream);
mediaRecorderRef.current = mediaRecorder;
chunksRef.current = [];
mediaRecorder.ondataavailable = (event) => {
chunksRef.current.push(event.data);
};
mediaRecorder.onstop = async () => {
const audioBlob = new Blob(chunksRef.current, { type: "audio/wav" });
await transcribeAudio(audioBlob);
stream.getTracks().forEach(track => track.stop());
};
mediaRecorder.start();
setIsRecording(true);
} catch (error) {
console.error("Error starting recording:", error);
}
};
const stopRecording = () => {
if (mediaRecorderRef.current && isRecording) {
mediaRecorderRef.current.stop();
setIsRecording(false);
}
};
const transcribeAudio = async (audioBlob: Blob) => {
setIsTranscribing(true);
const formData = new FormData();
formData.append("audio", audioBlob, "recording.wav");
try {
const response = await fetch(`${apiUrl}/transcribe`, {
method: "POST",
body: formData,
});
if (!response.ok) throw new Error("Transcription failed");
const result = await response.json();
onTranscription(result.transcription);
} catch (error) {
console.error("Transcription error:", error);
} finally {
setIsTranscribing(false);
}
};
const handleFileUpload = async (event: React.ChangeEvent<HTMLInputElement>) => {
const file = event.target.files?.[0];
if (!file) return;
await transcribeAudio(file);
};
return (
<div className="flex gap-2 items-center">
<Button
onClick={isRecording ? stopRecording : startRecording}
disabled={isTranscribing}
variant={isRecording ? "destructive" : "default"}
size="sm"
>
{isRecording ? <Square className="w-4 h-4" /> : <Mic className="w-4 h-4" />}
{isRecording ? "Stop" : "Record"}
</Button>
<label>
<Button variant="outline" size="sm" disabled={isTranscribing} asChild>
<span>
<Upload className="w-4 h-4" />
Upload
</span>
</Button>
<input
type="file"
accept="audio/*"
onChange={handleFileUpload}
className="hidden"
/>
</label>
{isTranscribing && <span className="text-sm text-muted-foreground">Transcribing...</span>}
</div>
);
}

80
test_stt.py Normal file
View file

@ -0,0 +1,80 @@
#!/usr/bin/env python3
"""Test script for local STT functionality."""
import asyncio
import requests
import tempfile
import wave
import numpy as np
def create_test_audio():
"""Create a simple test audio file."""
# Generate 3 seconds of sine wave at 440Hz (A note)
sample_rate = 16000
duration = 3
frequency = 440
t = np.linspace(0, duration, int(sample_rate * duration), False)
audio_data = np.sin(2 * np.pi * frequency * t) * 0.3
# Convert to 16-bit PCM
audio_data = (audio_data * 32767).astype(np.int16)
# Save as WAV file
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as f:
with wave.open(f.name, 'wb') as wav_file:
wav_file.setnchannels(1) # Mono
wav_file.setsampwidth(2) # 16-bit
wav_file.setframerate(sample_rate)
wav_file.writeframes(audio_data.tobytes())
return f.name
def test_stt_api():
"""Test the STT API endpoint."""
base_url = "http://localhost:8000/api/v1/stt"
# Test 1: Get available models
print("Testing /models endpoint...")
response = requests.get(f"{base_url}/models")
if response.status_code == 200:
print("✓ Models endpoint working")
print(f"Current model: {response.json()['current_model']}")
else:
print(f"✗ Models endpoint failed: {response.status_code}")
return
# Test 2: Create test audio and transcribe
print("\nTesting transcription...")
audio_file = create_test_audio()
try:
with open(audio_file, 'rb') as f:
files = {'audio': ('test.wav', f, 'audio/wav')}
response = requests.post(f"{base_url}/transcribe", files=files)
if response.status_code == 200:
result = response.json()
print("✓ Transcription successful")
print(f"Text: {result['transcription']}")
print(f"Language: {result['metadata']['detected_language']}")
print(f"Duration: {result['metadata']['duration_seconds']:.2f}s")
else:
print(f"✗ Transcription failed: {response.status_code}")
print(response.text)
finally:
import os
os.unlink(audio_file)
if __name__ == "__main__":
print("SurfSense STT Test")
print("==================")
print("Make sure the backend is running on localhost:8000")
print()
try:
test_stt_api()
except requests.exceptions.ConnectionError:
print("✗ Cannot connect to backend. Is it running?")
except Exception as e:
print(f"✗ Test failed: {e}")