Fixed all ruff lint and formatting errors

This commit is contained in:
Utkarsh-Patel-13 2025-07-24 14:43:48 -07:00
parent 0a03c42cc5
commit d359a59f6d
85 changed files with 5520 additions and 3870 deletions

View file

@ -1,46 +1,49 @@
from typing import Optional, List
from sqlalchemy.ext.asyncio import AsyncSession
import logging
from urllib.parse import parse_qs, urlparse
import aiohttp
import validators
from langchain_community.document_loaders import AsyncChromiumLoader, FireCrawlLoader
from langchain_community.document_transformers import MarkdownifyTransformer
from langchain_core.documents import Document as LangChainDocument
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.future import select
from app.db import Document, DocumentType, Chunk
from app.schemas import ExtensionDocumentContent
from youtube_transcript_api import YouTubeTranscriptApi
from app.config import config
from app.db import Chunk, Document, DocumentType
from app.prompts import SUMMARY_PROMPT_TEMPLATE
from app.utils.document_converters import convert_document_to_markdown, generate_content_hash
from app.schemas import ExtensionDocumentContent
from app.services.llm_service import get_user_long_context_llm
from app.services.task_logging_service import TaskLoggingService
from langchain_core.documents import Document as LangChainDocument
from langchain_community.document_loaders import FireCrawlLoader, AsyncChromiumLoader
from langchain_community.document_transformers import MarkdownifyTransformer
import validators
from youtube_transcript_api import YouTubeTranscriptApi
from urllib.parse import urlparse, parse_qs
import aiohttp
import logging
from app.utils.document_converters import (
convert_document_to_markdown,
generate_content_hash,
)
md = MarkdownifyTransformer()
async def add_crawled_url_document(
session: AsyncSession, url: str, search_space_id: int, user_id: str
) -> Optional[Document]:
) -> Document | None:
task_logger = TaskLoggingService(session, search_space_id)
# Log task start
log_entry = await task_logger.log_task_start(
task_name="crawl_url_document",
source="background_task",
message=f"Starting URL crawling process for: {url}",
metadata={"url": url, "user_id": str(user_id)}
metadata={"url": url, "user_id": str(user_id)},
)
try:
# URL validation step
await task_logger.log_task_progress(
log_entry,
f"Validating URL: {url}",
{"stage": "validation"}
log_entry, f"Validating URL: {url}", {"stage": "validation"}
)
if not validators.url(url):
raise ValueError(f"Url {url} is not a valid URL address")
@ -48,7 +51,10 @@ async def add_crawled_url_document(
await task_logger.log_task_progress(
log_entry,
f"Setting up crawler for URL: {url}",
{"stage": "crawler_setup", "firecrawl_available": bool(config.FIRECRAWL_API_KEY)}
{
"stage": "crawler_setup",
"firecrawl_available": bool(config.FIRECRAWL_API_KEY),
},
)
if config.FIRECRAWL_API_KEY:
@ -68,21 +74,21 @@ async def add_crawled_url_document(
await task_logger.log_task_progress(
log_entry,
f"Crawling URL content: {url}",
{"stage": "crawling", "crawler_type": type(crawl_loader).__name__}
{"stage": "crawling", "crawler_type": type(crawl_loader).__name__},
)
url_crawled = await crawl_loader.aload()
if type(crawl_loader) == FireCrawlLoader:
if isinstance(crawl_loader, FireCrawlLoader):
content_in_markdown = url_crawled[0].page_content
elif type(crawl_loader) == AsyncChromiumLoader:
elif isinstance(crawl_loader, AsyncChromiumLoader):
content_in_markdown = md.transform_documents(url_crawled)[0].page_content
# Format document
await task_logger.log_task_progress(
log_entry,
f"Processing crawled content from: {url}",
{"stage": "content_processing", "content_length": len(content_in_markdown)}
{"stage": "content_processing", "content_length": len(content_in_markdown)},
)
# Format document metadata in a more maintainable way
@ -117,7 +123,7 @@ async def add_crawled_url_document(
await task_logger.log_task_progress(
log_entry,
f"Checking for duplicate content: {url}",
{"stage": "duplicate_check", "content_hash": content_hash}
{"stage": "duplicate_check", "content_hash": content_hash},
)
# Check if document with this content hash already exists
@ -125,21 +131,26 @@ async def add_crawled_url_document(
select(Document).where(Document.content_hash == content_hash)
)
existing_document = existing_doc_result.scalars().first()
if existing_document:
await task_logger.log_task_success(
log_entry,
f"Document already exists for URL: {url}",
{"duplicate_detected": True, "existing_document_id": existing_document.id}
{
"duplicate_detected": True,
"existing_document_id": existing_document.id,
},
)
logging.info(
f"Document with content hash {content_hash} already exists. Skipping processing."
)
logging.info(f"Document with content hash {content_hash} already exists. Skipping processing.")
return existing_document
# Get LLM for summary generation
await task_logger.log_task_progress(
log_entry,
f"Preparing for summary generation: {url}",
{"stage": "llm_setup"}
{"stage": "llm_setup"},
)
# Get user's long context LLM
@ -151,7 +162,7 @@ async def add_crawled_url_document(
await task_logger.log_task_progress(
log_entry,
f"Generating summary for URL content: {url}",
{"stage": "summary_generation"}
{"stage": "summary_generation"},
)
summary_chain = SUMMARY_PROMPT_TEMPLATE | user_llm
@ -165,7 +176,7 @@ async def add_crawled_url_document(
await task_logger.log_task_progress(
log_entry,
f"Processing content chunks for URL: {url}",
{"stage": "chunk_processing"}
{"stage": "chunk_processing"},
)
chunks = [
@ -180,13 +191,13 @@ async def add_crawled_url_document(
await task_logger.log_task_progress(
log_entry,
f"Creating document in database for URL: {url}",
{"stage": "document_creation", "chunks_count": len(chunks)}
{"stage": "document_creation", "chunks_count": len(chunks)},
)
document = Document(
search_space_id=search_space_id,
title=url_crawled[0].metadata["title"]
if type(crawl_loader) == FireCrawlLoader
if isinstance(crawl_loader, FireCrawlLoader)
else url_crawled[0].metadata["source"],
document_type=DocumentType.CRAWLED_URL,
document_metadata=url_crawled[0].metadata,
@ -209,8 +220,8 @@ async def add_crawled_url_document(
"title": document.title,
"content_hash": content_hash,
"chunks_count": len(chunks),
"summary_length": len(summary_content)
}
"summary_length": len(summary_content),
},
)
return document
@ -221,7 +232,7 @@ async def add_crawled_url_document(
log_entry,
f"Database error while processing URL: {url}",
str(db_error),
{"error_type": "SQLAlchemyError"}
{"error_type": "SQLAlchemyError"},
)
raise db_error
except Exception as e:
@ -230,14 +241,17 @@ async def add_crawled_url_document(
log_entry,
f"Failed to crawl URL: {url}",
str(e),
{"error_type": type(e).__name__}
{"error_type": type(e).__name__},
)
raise RuntimeError(f"Failed to crawl URL: {str(e)}")
raise RuntimeError(f"Failed to crawl URL: {e!s}") from e
async def add_extension_received_document(
session: AsyncSession, content: ExtensionDocumentContent, search_space_id: int, user_id: str
) -> Optional[Document]:
session: AsyncSession,
content: ExtensionDocumentContent,
search_space_id: int,
user_id: str,
) -> Document | None:
"""
Process and store document content received from the SurfSense Extension.
@ -250,7 +264,7 @@ async def add_extension_received_document(
Document object if successful, None if failed
"""
task_logger = TaskLoggingService(session, search_space_id)
# Log task start
log_entry = await task_logger.log_task_start(
task_name="extension_document",
@ -259,10 +273,10 @@ async def add_extension_received_document(
metadata={
"url": content.metadata.VisitedWebPageURL,
"title": content.metadata.VisitedWebPageTitle,
"user_id": str(user_id)
}
"user_id": str(user_id),
},
)
try:
# Format document metadata in a more maintainable way
metadata_sections = [
@ -301,14 +315,19 @@ async def add_extension_received_document(
select(Document).where(Document.content_hash == content_hash)
)
existing_document = existing_doc_result.scalars().first()
if existing_document:
await task_logger.log_task_success(
log_entry,
f"Extension document already exists: {content.metadata.VisitedWebPageTitle}",
{"duplicate_detected": True, "existing_document_id": existing_document.id}
{
"duplicate_detected": True,
"existing_document_id": existing_document.id,
},
)
logging.info(
f"Document with content hash {content_hash} already exists. Skipping processing."
)
logging.info(f"Document with content hash {content_hash} already exists. Skipping processing.")
return existing_document
# Get user's long context LLM
@ -356,8 +375,8 @@ async def add_extension_received_document(
{
"document_id": document.id,
"content_hash": content_hash,
"url": content.metadata.VisitedWebPageURL
}
"url": content.metadata.VisitedWebPageURL,
},
)
return document
@ -368,7 +387,7 @@ async def add_extension_received_document(
log_entry,
f"Database error processing extension document: {content.metadata.VisitedWebPageTitle}",
str(db_error),
{"error_type": "SQLAlchemyError"}
{"error_type": "SQLAlchemyError"},
)
raise db_error
except Exception as e:
@ -377,24 +396,32 @@ async def add_extension_received_document(
log_entry,
f"Failed to process extension document: {content.metadata.VisitedWebPageTitle}",
str(e),
{"error_type": type(e).__name__}
{"error_type": type(e).__name__},
)
raise RuntimeError(f"Failed to process extension document: {str(e)}")
raise RuntimeError(f"Failed to process extension document: {e!s}") from e
async def add_received_markdown_file_document(
session: AsyncSession, file_name: str, file_in_markdown: str, search_space_id: int, user_id: str
) -> Optional[Document]:
session: AsyncSession,
file_name: str,
file_in_markdown: str,
search_space_id: int,
user_id: str,
) -> Document | None:
task_logger = TaskLoggingService(session, search_space_id)
# Log task start
log_entry = await task_logger.log_task_start(
task_name="markdown_file_document",
source="background_task",
message=f"Processing markdown file: {file_name}",
metadata={"filename": file_name, "user_id": str(user_id), "content_length": len(file_in_markdown)}
metadata={
"filename": file_name,
"user_id": str(user_id),
"content_length": len(file_in_markdown),
},
)
try:
content_hash = generate_content_hash(file_in_markdown, search_space_id)
@ -403,14 +430,19 @@ async def add_received_markdown_file_document(
select(Document).where(Document.content_hash == content_hash)
)
existing_document = existing_doc_result.scalars().first()
if existing_document:
await task_logger.log_task_success(
log_entry,
f"Markdown file document already exists: {file_name}",
{"duplicate_detected": True, "existing_document_id": existing_document.id}
{
"duplicate_detected": True,
"existing_document_id": existing_document.id,
},
)
logging.info(
f"Document with content hash {content_hash} already exists. Skipping processing."
)
logging.info(f"Document with content hash {content_hash} already exists. Skipping processing.")
return existing_document
# Get user's long context LLM
@ -459,8 +491,8 @@ async def add_received_markdown_file_document(
"document_id": document.id,
"content_hash": content_hash,
"chunks_count": len(chunks),
"summary_length": len(summary_content)
}
"summary_length": len(summary_content),
},
)
return document
@ -470,7 +502,7 @@ async def add_received_markdown_file_document(
log_entry,
f"Database error processing markdown file: {file_name}",
str(db_error),
{"error_type": "SQLAlchemyError"}
{"error_type": "SQLAlchemyError"},
)
raise db_error
except Exception as e:
@ -479,18 +511,18 @@ async def add_received_markdown_file_document(
log_entry,
f"Failed to process markdown file: {file_name}",
str(e),
{"error_type": type(e).__name__}
{"error_type": type(e).__name__},
)
raise RuntimeError(f"Failed to process file document: {str(e)}")
raise RuntimeError(f"Failed to process file document: {e!s}") from e
async def add_received_file_document_using_unstructured(
session: AsyncSession,
file_name: str,
unstructured_processed_elements: List[LangChainDocument],
unstructured_processed_elements: list[LangChainDocument],
search_space_id: int,
user_id: str,
) -> Optional[Document]:
) -> Document | None:
try:
file_in_markdown = await convert_document_to_markdown(
unstructured_processed_elements
@ -503,9 +535,11 @@ async def add_received_file_document_using_unstructured(
select(Document).where(Document.content_hash == content_hash)
)
existing_document = existing_doc_result.scalars().first()
if existing_document:
logging.info(f"Document with content hash {content_hash} already exists. Skipping processing.")
logging.info(
f"Document with content hash {content_hash} already exists. Skipping processing."
)
return existing_document
# TODO: Check if file_markdown exceeds token limit of embedding model
@ -555,7 +589,7 @@ async def add_received_file_document_using_unstructured(
raise db_error
except Exception as e:
await session.rollback()
raise RuntimeError(f"Failed to process file document: {str(e)}")
raise RuntimeError(f"Failed to process file document: {e!s}") from e
async def add_received_file_document_using_llamacloud(
@ -564,7 +598,7 @@ async def add_received_file_document_using_llamacloud(
llamacloud_markdown_document: str,
search_space_id: int,
user_id: str,
) -> Optional[Document]:
) -> Document | None:
"""
Process and store document content parsed by LlamaCloud.
@ -588,9 +622,11 @@ async def add_received_file_document_using_llamacloud(
select(Document).where(Document.content_hash == content_hash)
)
existing_document = existing_doc_result.scalars().first()
if existing_document:
logging.info(f"Document with content hash {content_hash} already exists. Skipping processing.")
logging.info(
f"Document with content hash {content_hash} already exists. Skipping processing."
)
return existing_document
# Get user's long context LLM
@ -638,7 +674,9 @@ async def add_received_file_document_using_llamacloud(
raise db_error
except Exception as e:
await session.rollback()
raise RuntimeError(f"Failed to process file document using LlamaCloud: {str(e)}")
raise RuntimeError(
f"Failed to process file document using LlamaCloud: {e!s}"
) from e
async def add_received_file_document_using_docling(
@ -647,7 +685,7 @@ async def add_received_file_document_using_docling(
docling_markdown_document: str,
search_space_id: int,
user_id: str,
) -> Optional[Document]:
) -> Document | None:
"""
Process and store document content parsed by Docling.
@ -671,9 +709,11 @@ async def add_received_file_document_using_docling(
select(Document).where(Document.content_hash == content_hash)
)
existing_document = existing_doc_result.scalars().first()
if existing_document:
logging.info(f"Document with content hash {content_hash} already exists. Skipping processing.")
logging.info(
f"Document with content hash {content_hash} already exists. Skipping processing."
)
return existing_document
# Get user's long context LLM
@ -683,12 +723,11 @@ async def add_received_file_document_using_docling(
# Generate summary using chunked processing for large documents
from app.services.docling_service import create_docling_service
docling_service = create_docling_service()
summary_content = await docling_service.process_large_document_summary(
content=file_in_markdown,
llm=user_llm,
document_title=file_name
content=file_in_markdown, llm=user_llm, document_title=file_name
)
summary_embedding = config.embedding_model_instance.embed(summary_content)
@ -726,7 +765,9 @@ async def add_received_file_document_using_docling(
raise db_error
except Exception as e:
await session.rollback()
raise RuntimeError(f"Failed to process file document using Docling: {str(e)}")
raise RuntimeError(
f"Failed to process file document using Docling: {e!s}"
) from e
async def add_youtube_video_document(
@ -749,23 +790,23 @@ async def add_youtube_video_document(
RuntimeError: If the video processing fails
"""
task_logger = TaskLoggingService(session, search_space_id)
# Log task start
log_entry = await task_logger.log_task_start(
task_name="youtube_video_document",
source="background_task",
message=f"Starting YouTube video processing for: {url}",
metadata={"url": url, "user_id": str(user_id)}
metadata={"url": url, "user_id": str(user_id)},
)
try:
# Extract video ID from URL
await task_logger.log_task_progress(
log_entry,
f"Extracting video ID from URL: {url}",
{"stage": "video_id_extraction"}
{"stage": "video_id_extraction"},
)
def get_youtube_video_id(url: str):
parsed_url = urlparse(url)
hostname = parsed_url.hostname
@ -790,14 +831,14 @@ async def add_youtube_video_document(
await task_logger.log_task_progress(
log_entry,
f"Video ID extracted: {video_id}",
{"stage": "video_id_extracted", "video_id": video_id}
{"stage": "video_id_extracted", "video_id": video_id},
)
# Get video metadata
await task_logger.log_task_progress(
log_entry,
f"Fetching video metadata for: {video_id}",
{"stage": "metadata_fetch"}
{"stage": "metadata_fetch"},
)
params = {
@ -806,21 +847,27 @@ async def add_youtube_video_document(
}
oembed_url = "https://www.youtube.com/oembed"
async with aiohttp.ClientSession() as http_session:
async with http_session.get(oembed_url, params=params) as response:
video_data = await response.json()
async with (
aiohttp.ClientSession() as http_session,
http_session.get(oembed_url, params=params) as response,
):
video_data = await response.json()
await task_logger.log_task_progress(
log_entry,
f"Video metadata fetched: {video_data.get('title', 'Unknown')}",
{"stage": "metadata_fetched", "title": video_data.get('title'), "author": video_data.get('author_name')}
{
"stage": "metadata_fetched",
"title": video_data.get("title"),
"author": video_data.get("author_name"),
},
)
# Get video transcript
await task_logger.log_task_progress(
log_entry,
f"Fetching transcript for video: {video_id}",
{"stage": "transcript_fetch"}
{"stage": "transcript_fetch"},
)
try:
@ -834,25 +881,29 @@ async def add_youtube_video_document(
timestamp = f"[{start_time:.2f}s-{start_time + duration:.2f}s]"
transcript_segments.append(f"{timestamp} {text}")
transcript_text = "\n".join(transcript_segments)
await task_logger.log_task_progress(
log_entry,
f"Transcript fetched successfully: {len(captions)} segments",
{"stage": "transcript_fetched", "segments_count": len(captions), "transcript_length": len(transcript_text)}
{
"stage": "transcript_fetched",
"segments_count": len(captions),
"transcript_length": len(transcript_text),
},
)
except Exception as e:
transcript_text = f"No captions available for this video. Error: {str(e)}"
transcript_text = f"No captions available for this video. Error: {e!s}"
await task_logger.log_task_progress(
log_entry,
f"No transcript available for video: {video_id}",
{"stage": "transcript_unavailable", "error": str(e)}
{"stage": "transcript_unavailable", "error": str(e)},
)
# Format document
await task_logger.log_task_progress(
log_entry,
f"Processing video content: {video_data.get('title', 'YouTube Video')}",
{"stage": "content_processing"}
{"stage": "content_processing"},
)
# Format document metadata in a more maintainable way
@ -890,7 +941,7 @@ async def add_youtube_video_document(
await task_logger.log_task_progress(
log_entry,
f"Checking for duplicate video content: {video_id}",
{"stage": "duplicate_check", "content_hash": content_hash}
{"stage": "duplicate_check", "content_hash": content_hash},
)
# Check if document with this content hash already exists
@ -898,21 +949,27 @@ async def add_youtube_video_document(
select(Document).where(Document.content_hash == content_hash)
)
existing_document = existing_doc_result.scalars().first()
if existing_document:
await task_logger.log_task_success(
log_entry,
f"YouTube video document already exists: {video_data.get('title', 'YouTube Video')}",
{"duplicate_detected": True, "existing_document_id": existing_document.id, "video_id": video_id}
{
"duplicate_detected": True,
"existing_document_id": existing_document.id,
"video_id": video_id,
},
)
logging.info(
f"Document with content hash {content_hash} already exists. Skipping processing."
)
logging.info(f"Document with content hash {content_hash} already exists. Skipping processing.")
return existing_document
# Get LLM for summary generation
await task_logger.log_task_progress(
log_entry,
f"Preparing for summary generation: {video_data.get('title', 'YouTube Video')}",
{"stage": "llm_setup"}
{"stage": "llm_setup"},
)
# Get user's long context LLM
@ -924,7 +981,7 @@ async def add_youtube_video_document(
await task_logger.log_task_progress(
log_entry,
f"Generating summary for video: {video_data.get('title', 'YouTube Video')}",
{"stage": "summary_generation"}
{"stage": "summary_generation"},
)
summary_chain = SUMMARY_PROMPT_TEMPLATE | user_llm
@ -938,7 +995,7 @@ async def add_youtube_video_document(
await task_logger.log_task_progress(
log_entry,
f"Processing content chunks for video: {video_data.get('title', 'YouTube Video')}",
{"stage": "chunk_processing"}
{"stage": "chunk_processing"},
)
chunks = [
@ -953,7 +1010,7 @@ async def add_youtube_video_document(
await task_logger.log_task_progress(
log_entry,
f"Creating YouTube video document in database: {video_data.get('title', 'YouTube Video')}",
{"stage": "document_creation", "chunks_count": len(chunks)}
{"stage": "document_creation", "chunks_count": len(chunks)},
)
document = Document(
@ -988,8 +1045,8 @@ async def add_youtube_video_document(
"content_hash": content_hash,
"chunks_count": len(chunks),
"summary_length": len(summary_content),
"has_transcript": "No captions available" not in transcript_text
}
"has_transcript": "No captions available" not in transcript_text,
},
)
return document
@ -999,7 +1056,10 @@ async def add_youtube_video_document(
log_entry,
f"Database error while processing YouTube video: {url}",
str(db_error),
{"error_type": "SQLAlchemyError", "video_id": video_id if 'video_id' in locals() else None}
{
"error_type": "SQLAlchemyError",
"video_id": video_id if "video_id" in locals() else None,
},
)
raise db_error
except Exception as e:
@ -1008,7 +1068,10 @@ async def add_youtube_video_document(
log_entry,
f"Failed to process YouTube video: {url}",
str(e),
{"error_type": type(e).__name__, "video_id": video_id if 'video_id' in locals() else None}
{
"error_type": type(e).__name__,
"video_id": video_id if "video_id" in locals() else None,
},
)
logging.error(f"Failed to process YouTube video: {str(e)}")
logging.error(f"Failed to process YouTube video: {e!s}")
raise