add async content processing

This commit is contained in:
LUIS NOVO 2024-11-11 17:32:35 -03:00
parent ac2ea9e554
commit 00f070a644
10 changed files with 541 additions and 395 deletions

View file

@ -1,4 +1,6 @@
import asyncio
import os
from functools import partial
from math import ceil
from loguru import logger
@ -11,90 +13,102 @@ from open_notebook.graphs.content_processing.state import ContentState
# future: parallelize the transcription process
def split_audio(input_file, segment_length_minutes=15, output_prefix=None):
async def split_audio(input_file, segment_length_minutes=15, output_prefix=None):
"""
Split an audio file into segments of specified length.
Args:
input_file (str): Path to the input audio file
segment_length_minutes (int): Length of each segment in minutes
output_dir (str): Directory to save the segments (defaults to input file's directory)
output_prefix (str): Prefix for output files (defaults to input filename)
Returns:
list: List of paths to the created segment files
Split an audio file into segments asynchronously.
"""
# Convert input file to absolute path
input_file = os.path.abspath(input_file)
output_dir = os.path.dirname(input_file)
os.makedirs(output_dir, exist_ok=True)
def _split(input_file, segment_length_minutes, output_prefix):
# Convert input file to absolute path
input_file_abs = os.path.abspath(input_file)
output_dir = os.path.dirname(input_file_abs)
os.makedirs(output_dir, exist_ok=True)
# Set up output prefix
if output_prefix is None:
output_prefix = os.path.splitext(os.path.basename(input_file))[0]
# Set up output prefix
if output_prefix is None:
output_prefix = os.path.splitext(os.path.basename(input_file_abs))[0]
# Load the audio file
audio = AudioSegment.from_file(input_file)
# Load the audio file
audio = AudioSegment.from_file(input_file_abs)
# Calculate segment length in milliseconds
segment_length_ms = segment_length_minutes * 60 * 1000
# Calculate segment length in milliseconds
segment_length_ms = segment_length_minutes * 60 * 1000
# Calculate number of segments
total_segments = ceil(len(audio) / segment_length_ms)
logger.debug(f"Splitting file: {input_file} into {total_segments} segments")
# Calculate number of segments
total_segments = ceil(len(audio) / segment_length_ms)
logger.debug(f"Splitting file: {input_file_abs} into {total_segments} segments")
# List to store output file paths
output_files = []
output_files = []
# Split the audio into segments
for i in range(total_segments):
# Calculate start and end times for this segment
start_time = i * segment_length_ms
end_time = min((i + 1) * segment_length_ms, len(audio))
# Split the audio into segments
for i in range(total_segments):
start_time = i * segment_length_ms
end_time = min((i + 1) * segment_length_ms, len(audio))
# Extract segment
segment = audio[start_time:end_time]
# Extract segment
segment = audio[start_time:end_time]
# Generate output filename
# Format: prefix_001.mp3 (padding with zeros ensures correct ordering)
output_filename = f"{output_prefix}_{str(i+1).zfill(3)}.mp3"
output_path = os.path.join(output_dir, output_filename)
# Generate output filename
output_filename = f"{output_prefix}_{str(i+1).zfill(3)}.mp3"
output_path = os.path.join(output_dir, output_filename)
# Export segment
segment.export(output_path, format="mp3")
# Export segment
segment.export(output_path, format="mp3")
output_files.append(output_path)
output_files.append(output_path)
logger.debug(f"Exported segment {i+1}/{total_segments}: {output_filename}")
# Optional progress indication
logger.debug(f"Exported segment {i+1}/{total_segments}: {output_filename}")
return output_files
return output_files
# Run CPU-bound audio processing in thread pool
return await asyncio.get_event_loop().run_in_executor(
None, partial(_split, input_file, segment_length_minutes, output_prefix)
)
def extract_audio(data: ContentState):
async def transcribe_audio_segment(audio_file, model):
"""Transcribe a single audio segment asynchronously"""
def _transcribe(audio_file, model):
return model.transcribe(audio_file)
return await asyncio.get_event_loop().run_in_executor(
None, partial(_transcribe, audio_file, model)
)
async def extract_audio(data: ContentState):
SPEECH_TO_TEXT_MODEL = model_manager.speech_to_text
input_audio_path = data.get("file_path")
audio_files = []
try:
audio_files = split_audio(input_audio_path)
transcriptions = []
# Split audio into segments
audio_files = await split_audio(input_audio_path)
for audio_file in audio_files:
transcriptions.append(SPEECH_TO_TEXT_MODEL.transcribe(audio_file))
# Transcribe all segments concurrently
transcribe_tasks = [
transcribe_audio_segment(audio_file, SPEECH_TO_TEXT_MODEL)
for audio_file in audio_files
]
transcriptions = await asyncio.gather(*transcribe_tasks)
return {"content": " ".join(transcriptions)}
except Exception as e:
logger.error(f"Error transcribing audio: {str(e)}")
logger.exception(e)
raise # Re-raise the exception after logging
raise
finally:
for file in audio_files:
try:
os.remove(file)
except OSError as e:
logger.error(f"Error removing temporary file {file}: {str(e)}")
# Clean up temporary files
def _cleanup(files):
for file in files:
try:
os.remove(file)
except OSError as e:
logger.error(f"Error removing temporary file {file}: {str(e)}")
await asyncio.get_event_loop().run_in_executor(
None, partial(_cleanup, audio_files)
)