mirror of
https://github.com/MODSetter/SurfSense.git
synced 2025-09-10 06:14:37 +00:00
fix: Resolve merge conflict in documents_routes.py
- Integrated Docling ETL service with new task logging system - Maintained consistent logging pattern across all ETL services - Added progress and success/failure logging for Docling processing
This commit is contained in:
commit
f117d94ef7
34 changed files with 4160 additions and 520 deletions
|
@ -8,6 +8,7 @@ from app.config import config
|
|||
from app.prompts import SUMMARY_PROMPT_TEMPLATE
|
||||
from app.utils.document_converters import convert_document_to_markdown, generate_content_hash
|
||||
from app.services.llm_service import get_user_long_context_llm
|
||||
from app.services.task_logging_service import TaskLoggingService
|
||||
from langchain_core.documents import Document as LangChainDocument
|
||||
from langchain_community.document_loaders import FireCrawlLoader, AsyncChromiumLoader
|
||||
from langchain_community.document_transformers import MarkdownifyTransformer
|
||||
|
@ -22,10 +23,34 @@ md = MarkdownifyTransformer()
|
|||
async def add_crawled_url_document(
|
||||
session: AsyncSession, url: str, search_space_id: int, user_id: str
|
||||
) -> Optional[Document]:
|
||||
task_logger = TaskLoggingService(session, search_space_id)
|
||||
|
||||
# Log task start
|
||||
log_entry = await task_logger.log_task_start(
|
||||
task_name="crawl_url_document",
|
||||
source="background_task",
|
||||
message=f"Starting URL crawling process for: {url}",
|
||||
metadata={"url": url, "user_id": str(user_id)}
|
||||
)
|
||||
|
||||
try:
|
||||
# URL validation step
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Validating URL: {url}",
|
||||
{"stage": "validation"}
|
||||
)
|
||||
|
||||
if not validators.url(url):
|
||||
raise ValueError(f"Url {url} is not a valid URL address")
|
||||
|
||||
# Set up crawler
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Setting up crawler for URL: {url}",
|
||||
{"stage": "crawler_setup", "firecrawl_available": bool(config.FIRECRAWL_API_KEY)}
|
||||
)
|
||||
|
||||
if config.FIRECRAWL_API_KEY:
|
||||
crawl_loader = FireCrawlLoader(
|
||||
url=url,
|
||||
|
@ -39,6 +64,13 @@ async def add_crawled_url_document(
|
|||
else:
|
||||
crawl_loader = AsyncChromiumLoader(urls=[url], headless=True)
|
||||
|
||||
# Perform crawling
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Crawling URL content: {url}",
|
||||
{"stage": "crawling", "crawler_type": type(crawl_loader).__name__}
|
||||
)
|
||||
|
||||
url_crawled = await crawl_loader.aload()
|
||||
|
||||
if type(crawl_loader) == FireCrawlLoader:
|
||||
|
@ -46,6 +78,13 @@ async def add_crawled_url_document(
|
|||
elif type(crawl_loader) == AsyncChromiumLoader:
|
||||
content_in_markdown = md.transform_documents(url_crawled)[0].page_content
|
||||
|
||||
# Format document
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Processing crawled content from: {url}",
|
||||
{"stage": "content_processing", "content_length": len(content_in_markdown)}
|
||||
)
|
||||
|
||||
# Format document metadata in a more maintainable way
|
||||
metadata_sections = [
|
||||
(
|
||||
|
@ -74,6 +113,13 @@ async def add_crawled_url_document(
|
|||
combined_document_string = "\n".join(document_parts)
|
||||
content_hash = generate_content_hash(combined_document_string, search_space_id)
|
||||
|
||||
# Check for duplicates
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Checking for duplicate content: {url}",
|
||||
{"stage": "duplicate_check", "content_hash": content_hash}
|
||||
)
|
||||
|
||||
# Check if document with this content hash already exists
|
||||
existing_doc_result = await session.execute(
|
||||
select(Document).where(Document.content_hash == content_hash)
|
||||
|
@ -81,15 +127,33 @@ async def add_crawled_url_document(
|
|||
existing_document = existing_doc_result.scalars().first()
|
||||
|
||||
if existing_document:
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"Document already exists for URL: {url}",
|
||||
{"duplicate_detected": True, "existing_document_id": existing_document.id}
|
||||
)
|
||||
logging.info(f"Document with content hash {content_hash} already exists. Skipping processing.")
|
||||
return existing_document
|
||||
|
||||
# Get LLM for summary generation
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Preparing for summary generation: {url}",
|
||||
{"stage": "llm_setup"}
|
||||
)
|
||||
|
||||
# Get user's long context LLM
|
||||
user_llm = await get_user_long_context_llm(session, user_id)
|
||||
if not user_llm:
|
||||
raise RuntimeError(f"No long context LLM configured for user {user_id}")
|
||||
|
||||
# Generate summary
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Generating summary for URL content: {url}",
|
||||
{"stage": "summary_generation"}
|
||||
)
|
||||
|
||||
summary_chain = SUMMARY_PROMPT_TEMPLATE | user_llm
|
||||
summary_result = await summary_chain.ainvoke(
|
||||
{"document": combined_document_string}
|
||||
|
@ -98,6 +162,12 @@ async def add_crawled_url_document(
|
|||
summary_embedding = config.embedding_model_instance.embed(summary_content)
|
||||
|
||||
# Process chunks
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Processing content chunks for URL: {url}",
|
||||
{"stage": "chunk_processing"}
|
||||
)
|
||||
|
||||
chunks = [
|
||||
Chunk(
|
||||
content=chunk.text,
|
||||
|
@ -107,6 +177,12 @@ async def add_crawled_url_document(
|
|||
]
|
||||
|
||||
# Create and store document
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Creating document in database for URL: {url}",
|
||||
{"stage": "document_creation", "chunks_count": len(chunks)}
|
||||
)
|
||||
|
||||
document = Document(
|
||||
search_space_id=search_space_id,
|
||||
title=url_crawled[0].metadata["title"]
|
||||
|
@ -124,13 +200,38 @@ async def add_crawled_url_document(
|
|||
await session.commit()
|
||||
await session.refresh(document)
|
||||
|
||||
# Log success
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"Successfully crawled and processed URL: {url}",
|
||||
{
|
||||
"document_id": document.id,
|
||||
"title": document.title,
|
||||
"content_hash": content_hash,
|
||||
"chunks_count": len(chunks),
|
||||
"summary_length": len(summary_content)
|
||||
}
|
||||
)
|
||||
|
||||
return document
|
||||
|
||||
except SQLAlchemyError as db_error:
|
||||
await session.rollback()
|
||||
await task_logger.log_task_failure(
|
||||
log_entry,
|
||||
f"Database error while processing URL: {url}",
|
||||
str(db_error),
|
||||
{"error_type": "SQLAlchemyError"}
|
||||
)
|
||||
raise db_error
|
||||
except Exception as e:
|
||||
await session.rollback()
|
||||
await task_logger.log_task_failure(
|
||||
log_entry,
|
||||
f"Failed to crawl URL: {url}",
|
||||
str(e),
|
||||
{"error_type": type(e).__name__}
|
||||
)
|
||||
raise RuntimeError(f"Failed to crawl URL: {str(e)}")
|
||||
|
||||
|
||||
|
@ -148,6 +249,20 @@ async def add_extension_received_document(
|
|||
Returns:
|
||||
Document object if successful, None if failed
|
||||
"""
|
||||
task_logger = TaskLoggingService(session, search_space_id)
|
||||
|
||||
# Log task start
|
||||
log_entry = await task_logger.log_task_start(
|
||||
task_name="extension_document",
|
||||
source="background_task",
|
||||
message=f"Processing extension document: {content.metadata.VisitedWebPageTitle}",
|
||||
metadata={
|
||||
"url": content.metadata.VisitedWebPageURL,
|
||||
"title": content.metadata.VisitedWebPageTitle,
|
||||
"user_id": str(user_id)
|
||||
}
|
||||
)
|
||||
|
||||
try:
|
||||
# Format document metadata in a more maintainable way
|
||||
metadata_sections = [
|
||||
|
@ -188,6 +303,11 @@ async def add_extension_received_document(
|
|||
existing_document = existing_doc_result.scalars().first()
|
||||
|
||||
if existing_document:
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"Extension document already exists: {content.metadata.VisitedWebPageTitle}",
|
||||
{"duplicate_detected": True, "existing_document_id": existing_document.id}
|
||||
)
|
||||
logging.info(f"Document with content hash {content_hash} already exists. Skipping processing.")
|
||||
return existing_document
|
||||
|
||||
|
@ -229,19 +349,52 @@ async def add_extension_received_document(
|
|||
await session.commit()
|
||||
await session.refresh(document)
|
||||
|
||||
# Log success
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"Successfully processed extension document: {content.metadata.VisitedWebPageTitle}",
|
||||
{
|
||||
"document_id": document.id,
|
||||
"content_hash": content_hash,
|
||||
"url": content.metadata.VisitedWebPageURL
|
||||
}
|
||||
)
|
||||
|
||||
return document
|
||||
|
||||
except SQLAlchemyError as db_error:
|
||||
await session.rollback()
|
||||
await task_logger.log_task_failure(
|
||||
log_entry,
|
||||
f"Database error processing extension document: {content.metadata.VisitedWebPageTitle}",
|
||||
str(db_error),
|
||||
{"error_type": "SQLAlchemyError"}
|
||||
)
|
||||
raise db_error
|
||||
except Exception as e:
|
||||
await session.rollback()
|
||||
await task_logger.log_task_failure(
|
||||
log_entry,
|
||||
f"Failed to process extension document: {content.metadata.VisitedWebPageTitle}",
|
||||
str(e),
|
||||
{"error_type": type(e).__name__}
|
||||
)
|
||||
raise RuntimeError(f"Failed to process extension document: {str(e)}")
|
||||
|
||||
|
||||
async def add_received_markdown_file_document(
|
||||
session: AsyncSession, file_name: str, file_in_markdown: str, search_space_id: int, user_id: str
|
||||
) -> Optional[Document]:
|
||||
task_logger = TaskLoggingService(session, search_space_id)
|
||||
|
||||
# Log task start
|
||||
log_entry = await task_logger.log_task_start(
|
||||
task_name="markdown_file_document",
|
||||
source="background_task",
|
||||
message=f"Processing markdown file: {file_name}",
|
||||
metadata={"filename": file_name, "user_id": str(user_id), "content_length": len(file_in_markdown)}
|
||||
)
|
||||
|
||||
try:
|
||||
content_hash = generate_content_hash(file_in_markdown, search_space_id)
|
||||
|
||||
|
@ -252,6 +405,11 @@ async def add_received_markdown_file_document(
|
|||
existing_document = existing_doc_result.scalars().first()
|
||||
|
||||
if existing_document:
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"Markdown file document already exists: {file_name}",
|
||||
{"duplicate_detected": True, "existing_document_id": existing_document.id}
|
||||
)
|
||||
logging.info(f"Document with content hash {content_hash} already exists. Skipping processing.")
|
||||
return existing_document
|
||||
|
||||
|
@ -293,12 +451,36 @@ async def add_received_markdown_file_document(
|
|||
await session.commit()
|
||||
await session.refresh(document)
|
||||
|
||||
# Log success
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"Successfully processed markdown file: {file_name}",
|
||||
{
|
||||
"document_id": document.id,
|
||||
"content_hash": content_hash,
|
||||
"chunks_count": len(chunks),
|
||||
"summary_length": len(summary_content)
|
||||
}
|
||||
)
|
||||
|
||||
return document
|
||||
except SQLAlchemyError as db_error:
|
||||
await session.rollback()
|
||||
await task_logger.log_task_failure(
|
||||
log_entry,
|
||||
f"Database error processing markdown file: {file_name}",
|
||||
str(db_error),
|
||||
{"error_type": "SQLAlchemyError"}
|
||||
)
|
||||
raise db_error
|
||||
except Exception as e:
|
||||
await session.rollback()
|
||||
await task_logger.log_task_failure(
|
||||
log_entry,
|
||||
f"Failed to process markdown file: {file_name}",
|
||||
str(e),
|
||||
{"error_type": type(e).__name__}
|
||||
)
|
||||
raise RuntimeError(f"Failed to process file document: {str(e)}")
|
||||
|
||||
|
||||
|
@ -566,8 +748,24 @@ async def add_youtube_video_document(
|
|||
SQLAlchemyError: If there's a database error
|
||||
RuntimeError: If the video processing fails
|
||||
"""
|
||||
task_logger = TaskLoggingService(session, search_space_id)
|
||||
|
||||
# Log task start
|
||||
log_entry = await task_logger.log_task_start(
|
||||
task_name="youtube_video_document",
|
||||
source="background_task",
|
||||
message=f"Starting YouTube video processing for: {url}",
|
||||
metadata={"url": url, "user_id": str(user_id)}
|
||||
)
|
||||
|
||||
try:
|
||||
# Extract video ID from URL
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Extracting video ID from URL: {url}",
|
||||
{"stage": "video_id_extraction"}
|
||||
)
|
||||
|
||||
def get_youtube_video_id(url: str):
|
||||
parsed_url = urlparse(url)
|
||||
hostname = parsed_url.hostname
|
||||
|
@ -589,7 +787,19 @@ async def add_youtube_video_document(
|
|||
if not video_id:
|
||||
raise ValueError(f"Could not extract video ID from URL: {url}")
|
||||
|
||||
# Get video metadata using async HTTP client
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Video ID extracted: {video_id}",
|
||||
{"stage": "video_id_extracted", "video_id": video_id}
|
||||
)
|
||||
|
||||
# Get video metadata
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Fetching video metadata for: {video_id}",
|
||||
{"stage": "metadata_fetch"}
|
||||
)
|
||||
|
||||
params = {
|
||||
"format": "json",
|
||||
"url": f"https://www.youtube.com/watch?v={video_id}",
|
||||
|
@ -600,7 +810,19 @@ async def add_youtube_video_document(
|
|||
async with http_session.get(oembed_url, params=params) as response:
|
||||
video_data = await response.json()
|
||||
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Video metadata fetched: {video_data.get('title', 'Unknown')}",
|
||||
{"stage": "metadata_fetched", "title": video_data.get('title'), "author": video_data.get('author_name')}
|
||||
)
|
||||
|
||||
# Get video transcript
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Fetching transcript for video: {video_id}",
|
||||
{"stage": "transcript_fetch"}
|
||||
)
|
||||
|
||||
try:
|
||||
captions = YouTubeTranscriptApi.get_transcript(video_id)
|
||||
# Include complete caption information with timestamps
|
||||
|
@ -612,8 +834,26 @@ async def add_youtube_video_document(
|
|||
timestamp = f"[{start_time:.2f}s-{start_time + duration:.2f}s]"
|
||||
transcript_segments.append(f"{timestamp} {text}")
|
||||
transcript_text = "\n".join(transcript_segments)
|
||||
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Transcript fetched successfully: {len(captions)} segments",
|
||||
{"stage": "transcript_fetched", "segments_count": len(captions), "transcript_length": len(transcript_text)}
|
||||
)
|
||||
except Exception as e:
|
||||
transcript_text = f"No captions available for this video. Error: {str(e)}"
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"No transcript available for video: {video_id}",
|
||||
{"stage": "transcript_unavailable", "error": str(e)}
|
||||
)
|
||||
|
||||
# Format document
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Processing video content: {video_data.get('title', 'YouTube Video')}",
|
||||
{"stage": "content_processing"}
|
||||
)
|
||||
|
||||
# Format document metadata in a more maintainable way
|
||||
metadata_sections = [
|
||||
|
@ -646,6 +886,13 @@ async def add_youtube_video_document(
|
|||
combined_document_string = "\n".join(document_parts)
|
||||
content_hash = generate_content_hash(combined_document_string, search_space_id)
|
||||
|
||||
# Check for duplicates
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Checking for duplicate video content: {video_id}",
|
||||
{"stage": "duplicate_check", "content_hash": content_hash}
|
||||
)
|
||||
|
||||
# Check if document with this content hash already exists
|
||||
existing_doc_result = await session.execute(
|
||||
select(Document).where(Document.content_hash == content_hash)
|
||||
|
@ -653,15 +900,33 @@ async def add_youtube_video_document(
|
|||
existing_document = existing_doc_result.scalars().first()
|
||||
|
||||
if existing_document:
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"YouTube video document already exists: {video_data.get('title', 'YouTube Video')}",
|
||||
{"duplicate_detected": True, "existing_document_id": existing_document.id, "video_id": video_id}
|
||||
)
|
||||
logging.info(f"Document with content hash {content_hash} already exists. Skipping processing.")
|
||||
return existing_document
|
||||
|
||||
# Get LLM for summary generation
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Preparing for summary generation: {video_data.get('title', 'YouTube Video')}",
|
||||
{"stage": "llm_setup"}
|
||||
)
|
||||
|
||||
# Get user's long context LLM
|
||||
user_llm = await get_user_long_context_llm(session, user_id)
|
||||
if not user_llm:
|
||||
raise RuntimeError(f"No long context LLM configured for user {user_id}")
|
||||
|
||||
# Generate summary
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Generating summary for video: {video_data.get('title', 'YouTube Video')}",
|
||||
{"stage": "summary_generation"}
|
||||
)
|
||||
|
||||
summary_chain = SUMMARY_PROMPT_TEMPLATE | user_llm
|
||||
summary_result = await summary_chain.ainvoke(
|
||||
{"document": combined_document_string}
|
||||
|
@ -670,6 +935,12 @@ async def add_youtube_video_document(
|
|||
summary_embedding = config.embedding_model_instance.embed(summary_content)
|
||||
|
||||
# Process chunks
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Processing content chunks for video: {video_data.get('title', 'YouTube Video')}",
|
||||
{"stage": "chunk_processing"}
|
||||
)
|
||||
|
||||
chunks = [
|
||||
Chunk(
|
||||
content=chunk.text,
|
||||
|
@ -679,6 +950,11 @@ async def add_youtube_video_document(
|
|||
]
|
||||
|
||||
# Create document
|
||||
await task_logger.log_task_progress(
|
||||
log_entry,
|
||||
f"Creating YouTube video document in database: {video_data.get('title', 'YouTube Video')}",
|
||||
{"stage": "document_creation", "chunks_count": len(chunks)}
|
||||
)
|
||||
|
||||
document = Document(
|
||||
title=video_data.get("title", "YouTube Video"),
|
||||
|
@ -701,11 +977,38 @@ async def add_youtube_video_document(
|
|||
await session.commit()
|
||||
await session.refresh(document)
|
||||
|
||||
# Log success
|
||||
await task_logger.log_task_success(
|
||||
log_entry,
|
||||
f"Successfully processed YouTube video: {video_data.get('title', 'YouTube Video')}",
|
||||
{
|
||||
"document_id": document.id,
|
||||
"video_id": video_id,
|
||||
"title": document.title,
|
||||
"content_hash": content_hash,
|
||||
"chunks_count": len(chunks),
|
||||
"summary_length": len(summary_content),
|
||||
"has_transcript": "No captions available" not in transcript_text
|
||||
}
|
||||
)
|
||||
|
||||
return document
|
||||
except SQLAlchemyError as db_error:
|
||||
await session.rollback()
|
||||
await task_logger.log_task_failure(
|
||||
log_entry,
|
||||
f"Database error while processing YouTube video: {url}",
|
||||
str(db_error),
|
||||
{"error_type": "SQLAlchemyError", "video_id": video_id if 'video_id' in locals() else None}
|
||||
)
|
||||
raise db_error
|
||||
except Exception as e:
|
||||
await session.rollback()
|
||||
await task_logger.log_task_failure(
|
||||
log_entry,
|
||||
f"Failed to process YouTube video: {url}",
|
||||
str(e),
|
||||
{"error_type": type(e).__name__, "video_id": video_id if 'video_id' in locals() else None}
|
||||
)
|
||||
logging.error(f"Failed to process YouTube video: {str(e)}")
|
||||
raise
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue