feat: Improved sub section writer agent & Chat UI

This commit is contained in:
DESKTOP-RTLN3BA\$punk 2025-05-09 22:14:22 -07:00
parent 1b9d7a0d96
commit 2cee5acaa3
4 changed files with 304 additions and 240 deletions

View file

@ -14,6 +14,8 @@ from .configuration import Configuration
from .prompts import get_answer_outline_system_prompt from .prompts import get_answer_outline_system_prompt
from .state import State from .state import State
from .sub_section_writer.graph import graph as sub_section_writer_graph from .sub_section_writer.graph import graph as sub_section_writer_graph
from .sub_section_writer.configuration import SubSectionType
from langgraph.types import StreamWriter from langgraph.types import StreamWriter
@ -41,14 +43,14 @@ async def write_answer_outline(state: State, config: RunnableConfig, writer: Str
""" """
streaming_service = state.streaming_service streaming_service = state.streaming_service
streaming_service.only_update_terminal("Generating answer outline...") streaming_service.only_update_terminal("🔍 Generating answer outline...")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
# Get configuration from runnable config # Get configuration from runnable config
configuration = Configuration.from_runnable_config(config) configuration = Configuration.from_runnable_config(config)
user_query = configuration.user_query user_query = configuration.user_query
num_sections = configuration.num_sections num_sections = configuration.num_sections
streaming_service.only_update_terminal(f"Planning research approach for query: {user_query[:100]}...") streaming_service.only_update_terminal(f"🤔 Planning research approach for: \"{user_query[:100]}...\"")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
# Initialize LLM # Initialize LLM
@ -78,7 +80,7 @@ async def write_answer_outline(state: State, config: RunnableConfig, writer: Str
Your output MUST be valid JSON in exactly this format. Do not include any other text or explanation. Your output MUST be valid JSON in exactly this format. Do not include any other text or explanation.
""" """
streaming_service.only_update_terminal("Designing structured outline with AI...") streaming_service.only_update_terminal("📝 Designing structured outline with AI...")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
# Create messages for the LLM # Create messages for the LLM
@ -88,7 +90,7 @@ async def write_answer_outline(state: State, config: RunnableConfig, writer: Str
] ]
# Call the LLM directly without using structured output # Call the LLM directly without using structured output
streaming_service.only_update_terminal("Processing answer structure...") streaming_service.only_update_terminal("⚙️ Processing answer structure...")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
response = await llm.ainvoke(messages) response = await llm.ainvoke(messages)
@ -111,7 +113,7 @@ async def write_answer_outline(state: State, config: RunnableConfig, writer: Str
answer_outline = AnswerOutline(**parsed_data) answer_outline = AnswerOutline(**parsed_data)
total_questions = sum(len(section.questions) for section in answer_outline.answer_outline) total_questions = sum(len(section.questions) for section in answer_outline.answer_outline)
streaming_service.only_update_terminal(f"Successfully generated outline with {len(answer_outline.answer_outline)} sections and {total_questions} research questions") streaming_service.only_update_terminal(f"Successfully generated outline with {len(answer_outline.answer_outline)} sections and {total_questions} research questions!")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
print(f"Successfully generated answer outline with {len(answer_outline.answer_outline)} sections") print(f"Successfully generated answer outline with {len(answer_outline.answer_outline)} sections")
@ -121,14 +123,14 @@ async def write_answer_outline(state: State, config: RunnableConfig, writer: Str
else: else:
# If JSON structure not found, raise a clear error # If JSON structure not found, raise a clear error
error_message = f"Could not find valid JSON in LLM response. Raw response: {content}" error_message = f"Could not find valid JSON in LLM response. Raw response: {content}"
streaming_service.only_update_terminal(error_message, "error") streaming_service.only_update_terminal(f"{error_message}", "error")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
raise ValueError(error_message) raise ValueError(error_message)
except (json.JSONDecodeError, ValueError) as e: except (json.JSONDecodeError, ValueError) as e:
# Log the error and re-raise it # Log the error and re-raise it
error_message = f"Error parsing LLM response: {str(e)}" error_message = f"Error parsing LLM response: {str(e)}"
streaming_service.only_update_terminal(error_message, "error") streaming_service.only_update_terminal(f"{error_message}", "error")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
print(f"Error parsing LLM response: {str(e)}") print(f"Error parsing LLM response: {str(e)}")
@ -149,6 +151,11 @@ async def fetch_relevant_documents(
""" """
Fetch relevant documents for research questions using the provided connectors. Fetch relevant documents for research questions using the provided connectors.
This function searches across multiple data sources for information related to the
research questions. It provides user-friendly feedback during the search process by
displaying connector names (like "Web Search" instead of "TAVILY_API") and adding
relevant emojis to indicate the type of source being searched.
Args: Args:
research_questions: List of research questions to find documents for research_questions: List of research questions to find documents for
user_id: The user ID user_id: The user ID
@ -158,6 +165,7 @@ async def fetch_relevant_documents(
writer: StreamWriter for sending progress updates writer: StreamWriter for sending progress updates
state: The current state containing the streaming service state: The current state containing the streaming service
top_k: Number of top results to retrieve per connector per question top_k: Number of top results to retrieve per connector per question
connector_service: An initialized connector service to use for searching
Returns: Returns:
List of relevant documents List of relevant documents
@ -170,7 +178,9 @@ async def fetch_relevant_documents(
# Stream initial status update # Stream initial status update
if streaming_service and writer: if streaming_service and writer:
streaming_service.only_update_terminal(f"Starting research on {len(research_questions)} questions using {len(connectors_to_search)} connectors...") connector_names = [get_connector_friendly_name(connector) for connector in connectors_to_search]
connector_names_str = ", ".join(connector_names)
streaming_service.only_update_terminal(f"🔎 Starting research on {len(research_questions)} questions using {connector_names_str} data sources")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
all_raw_documents = [] # Store all raw documents all_raw_documents = [] # Store all raw documents
@ -179,7 +189,7 @@ async def fetch_relevant_documents(
for i, user_query in enumerate(research_questions): for i, user_query in enumerate(research_questions):
# Stream question being researched # Stream question being researched
if streaming_service and writer: if streaming_service and writer:
streaming_service.only_update_terminal(f"Researching question {i+1}/{len(research_questions)}: {user_query[:100]}...") streaming_service.only_update_terminal(f"🧠 Researching question {i+1}/{len(research_questions)}: \"{user_query[:100]}...\"")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
# Use original research question as the query # Use original research question as the query
@ -189,7 +199,9 @@ async def fetch_relevant_documents(
for connector in connectors_to_search: for connector in connectors_to_search:
# Stream connector being searched # Stream connector being searched
if streaming_service and writer: if streaming_service and writer:
streaming_service.only_update_terminal(f"Searching {connector} for relevant information...") connector_emoji = get_connector_emoji(connector)
friendly_name = get_connector_friendly_name(connector)
streaming_service.only_update_terminal(f"{connector_emoji} Searching {friendly_name} for relevant information...")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
try: try:
@ -208,7 +220,7 @@ async def fetch_relevant_documents(
# Stream found document count # Stream found document count
if streaming_service and writer: if streaming_service and writer:
streaming_service.only_update_terminal(f"Found {len(youtube_chunks)} YouTube chunks relevant to the query") streaming_service.only_update_terminal(f"📹 Found {len(youtube_chunks)} YouTube chunks related to your query")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
elif connector == "EXTENSION": elif connector == "EXTENSION":
@ -226,7 +238,7 @@ async def fetch_relevant_documents(
# Stream found document count # Stream found document count
if streaming_service and writer: if streaming_service and writer:
streaming_service.only_update_terminal(f"Found {len(extension_chunks)} extension chunks relevant to the query") streaming_service.only_update_terminal(f"🧩 Found {len(extension_chunks)} Browser Extension chunks related to your query")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
elif connector == "CRAWLED_URL": elif connector == "CRAWLED_URL":
@ -244,7 +256,7 @@ async def fetch_relevant_documents(
# Stream found document count # Stream found document count
if streaming_service and writer: if streaming_service and writer:
streaming_service.only_update_terminal(f"Found {len(crawled_urls_chunks)} crawled URL chunks relevant to the query") streaming_service.only_update_terminal(f"🌐 Found {len(crawled_urls_chunks)} Web Pages chunks related to your query")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
elif connector == "FILE": elif connector == "FILE":
@ -262,7 +274,7 @@ async def fetch_relevant_documents(
# Stream found document count # Stream found document count
if streaming_service and writer: if streaming_service and writer:
streaming_service.only_update_terminal(f"Found {len(files_chunks)} file chunks relevant to the query") streaming_service.only_update_terminal(f"📄 Found {len(files_chunks)} Files chunks related to your query")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
@ -281,7 +293,7 @@ async def fetch_relevant_documents(
# Stream found document count # Stream found document count
if streaming_service and writer: if streaming_service and writer:
streaming_service.only_update_terminal(f"Found {len(slack_chunks)} Slack messages relevant to the query") streaming_service.only_update_terminal(f"💬 Found {len(slack_chunks)} Slack messages related to your query")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
elif connector == "NOTION_CONNECTOR": elif connector == "NOTION_CONNECTOR":
@ -299,7 +311,7 @@ async def fetch_relevant_documents(
# Stream found document count # Stream found document count
if streaming_service and writer: if streaming_service and writer:
streaming_service.only_update_terminal(f"Found {len(notion_chunks)} Notion pages/blocks relevant to the query") streaming_service.only_update_terminal(f"📘 Found {len(notion_chunks)} Notion pages/blocks related to your query")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
elif connector == "GITHUB_CONNECTOR": elif connector == "GITHUB_CONNECTOR":
@ -317,7 +329,7 @@ async def fetch_relevant_documents(
# Stream found document count # Stream found document count
if streaming_service and writer: if streaming_service and writer:
streaming_service.only_update_terminal(f"Found {len(github_chunks)} GitHub files/issues relevant to the query") streaming_service.only_update_terminal(f"🐙 Found {len(github_chunks)} GitHub files/issues related to your query")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
elif connector == "LINEAR_CONNECTOR": elif connector == "LINEAR_CONNECTOR":
@ -335,7 +347,7 @@ async def fetch_relevant_documents(
# Stream found document count # Stream found document count
if streaming_service and writer: if streaming_service and writer:
streaming_service.only_update_terminal(f"Found {len(linear_chunks)} Linear issues relevant to the query") streaming_service.only_update_terminal(f"📊 Found {len(linear_chunks)} Linear issues related to your query")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
elif connector == "TAVILY_API": elif connector == "TAVILY_API":
@ -352,7 +364,7 @@ async def fetch_relevant_documents(
# Stream found document count # Stream found document count
if streaming_service and writer: if streaming_service and writer:
streaming_service.only_update_terminal(f"Found {len(tavily_chunks)} web search results relevant to the query") streaming_service.only_update_terminal(f"🔍 Found {len(tavily_chunks)} Web Search results related to your query")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
elif connector == "LINKUP_API": elif connector == "LINKUP_API":
@ -374,7 +386,7 @@ async def fetch_relevant_documents(
# Stream found document count # Stream found document count
if streaming_service and writer: if streaming_service and writer:
streaming_service.only_update_terminal(f"Found {len(linkup_chunks)} Linkup chunks relevant to the query") streaming_service.only_update_terminal(f"🔗 Found {len(linkup_chunks)} Linkup results related to your query")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
@ -384,7 +396,8 @@ async def fetch_relevant_documents(
# Stream error message # Stream error message
if streaming_service and writer: if streaming_service and writer:
streaming_service.only_update_terminal(error_message, "error") friendly_name = get_connector_friendly_name(connector)
streaming_service.only_update_terminal(f"⚠️ Error searching {friendly_name}: {str(e)}", "error")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
# Continue with other connectors on error # Continue with other connectors on error
@ -411,7 +424,7 @@ async def fetch_relevant_documents(
# Stream info about deduplicated sources # Stream info about deduplicated sources
if streaming_service and writer: if streaming_service and writer:
streaming_service.only_update_terminal(f"Collected {len(deduplicated_sources)} unique sources across all connectors") streaming_service.only_update_terminal(f"📚 Collected {len(deduplicated_sources)} unique sources across all connectors")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
# After all sources are collected and deduplicated, stream them # After all sources are collected and deduplicated, stream them
@ -441,12 +454,44 @@ async def fetch_relevant_documents(
# Stream info about deduplicated documents # Stream info about deduplicated documents
if streaming_service and writer: if streaming_service and writer:
streaming_service.only_update_terminal(f"Found {len(deduplicated_docs)} unique document chunks after deduplication") streaming_service.only_update_terminal(f"🧹 Found {len(deduplicated_docs)} unique document chunks after removing duplicates")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
# Return deduplicated documents # Return deduplicated documents
return deduplicated_docs return deduplicated_docs
def get_connector_emoji(connector_name: str) -> str:
"""Get an appropriate emoji for a connector type."""
connector_emojis = {
"YOUTUBE_VIDEO": "📹",
"EXTENSION": "🧩",
"CRAWLED_URL": "🌐",
"FILE": "📄",
"SLACK_CONNECTOR": "💬",
"NOTION_CONNECTOR": "📘",
"GITHUB_CONNECTOR": "🐙",
"LINEAR_CONNECTOR": "📊",
"TAVILY_API": "🔍",
"LINKUP_API": "🔗"
}
return connector_emojis.get(connector_name, "🔎")
def get_connector_friendly_name(connector_name: str) -> str:
"""Convert technical connector IDs to user-friendly names."""
connector_friendly_names = {
"YOUTUBE_VIDEO": "YouTube",
"EXTENSION": "Browser Extension",
"CRAWLED_URL": "Web Pages",
"FILE": "Files",
"SLACK_CONNECTOR": "Slack",
"NOTION_CONNECTOR": "Notion",
"GITHUB_CONNECTOR": "GitHub",
"LINEAR_CONNECTOR": "Linear",
"TAVILY_API": "Tavily Search",
"LINKUP_API": "Linkup Search"
}
return connector_friendly_names.get(connector_name, connector_name)
async def process_sections(state: State, config: RunnableConfig, writer: StreamWriter) -> Dict[str, Any]: async def process_sections(state: State, config: RunnableConfig, writer: StreamWriter) -> Dict[str, Any]:
""" """
Process all sections in parallel and combine the results. Process all sections in parallel and combine the results.
@ -463,13 +508,13 @@ async def process_sections(state: State, config: RunnableConfig, writer: StreamW
answer_outline = state.answer_outline answer_outline = state.answer_outline
streaming_service = state.streaming_service streaming_service = state.streaming_service
streaming_service.only_update_terminal(f"Starting to process research sections...") streaming_service.only_update_terminal(f"🚀 Starting to process research sections...")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
print(f"Processing sections from outline: {answer_outline is not None}") print(f"Processing sections from outline: {answer_outline is not None}")
if not answer_outline: if not answer_outline:
streaming_service.only_update_terminal("Error: No answer outline was provided. Cannot generate report.", "error") streaming_service.only_update_terminal("Error: No answer outline was provided. Cannot generate report.", "error")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
return { return {
"final_written_report": "No answer outline was provided. Cannot generate final report." "final_written_report": "No answer outline was provided. Cannot generate final report."
@ -481,11 +526,11 @@ async def process_sections(state: State, config: RunnableConfig, writer: StreamW
all_questions.extend(section.questions) all_questions.extend(section.questions)
print(f"Collected {len(all_questions)} questions from all sections") print(f"Collected {len(all_questions)} questions from all sections")
streaming_service.only_update_terminal(f"Found {len(all_questions)} research questions across {len(answer_outline.answer_outline)} sections") streaming_service.only_update_terminal(f"🧩 Found {len(all_questions)} research questions across {len(answer_outline.answer_outline)} sections")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
# Fetch relevant documents once for all questions # Fetch relevant documents once for all questions
streaming_service.only_update_terminal("Searching for relevant information across all connectors...") streaming_service.only_update_terminal("🔍 Searching for relevant information across all connectors...")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
if configuration.num_sections == 1: if configuration.num_sections == 1:
@ -515,7 +560,7 @@ async def process_sections(state: State, config: RunnableConfig, writer: StreamW
except Exception as e: except Exception as e:
error_message = f"Error fetching relevant documents: {str(e)}" error_message = f"Error fetching relevant documents: {str(e)}"
print(error_message) print(error_message)
streaming_service.only_update_terminal(error_message, "error") streaming_service.only_update_terminal(f"{error_message}", "error")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
# Log the error and continue with an empty list of documents # Log the error and continue with an empty list of documents
# This allows the process to continue, but the report might lack information # This allows the process to continue, but the report might lack information
@ -523,15 +568,22 @@ async def process_sections(state: State, config: RunnableConfig, writer: StreamW
# Consider adding more robust error handling or reporting if needed # Consider adding more robust error handling or reporting if needed
print(f"Fetched {len(relevant_documents)} relevant documents for all sections") print(f"Fetched {len(relevant_documents)} relevant documents for all sections")
streaming_service.only_update_terminal(f"Starting to draft {len(answer_outline.answer_outline)} sections using {len(relevant_documents)} relevant document chunks") streaming_service.only_update_terminal(f"Starting to draft {len(answer_outline.answer_outline)} sections using {len(relevant_documents)} relevant document chunks")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
# Create tasks to process each section in parallel with the same document set # Create tasks to process each section in parallel with the same document set
section_tasks = [] section_tasks = []
streaming_service.only_update_terminal("Creating processing tasks for each section...") streaming_service.only_update_terminal("⚙️ Creating processing tasks for each section...")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
for section in answer_outline.answer_outline: for i, section in enumerate(answer_outline.answer_outline):
if i == 0:
sub_section_type = SubSectionType.START
elif i == len(answer_outline.answer_outline) - 1:
sub_section_type = SubSectionType.END
else:
sub_section_type = SubSectionType.MIDDLE
section_tasks.append( section_tasks.append(
process_section_with_documents( process_section_with_documents(
section_title=section.section_title, section_title=section.section_title,
@ -541,19 +593,20 @@ async def process_sections(state: State, config: RunnableConfig, writer: StreamW
search_space_id=configuration.search_space_id, search_space_id=configuration.search_space_id,
relevant_documents=relevant_documents, relevant_documents=relevant_documents,
state=state, state=state,
writer=writer writer=writer,
sub_section_type=sub_section_type
) )
) )
# Run all section processing tasks in parallel # Run all section processing tasks in parallel
print(f"Running {len(section_tasks)} section processing tasks in parallel") print(f"Running {len(section_tasks)} section processing tasks in parallel")
streaming_service.only_update_terminal(f"Processing {len(section_tasks)} sections simultaneously...") streaming_service.only_update_terminal(f"Processing {len(section_tasks)} sections simultaneously...")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
section_results = await asyncio.gather(*section_tasks, return_exceptions=True) section_results = await asyncio.gather(*section_tasks, return_exceptions=True)
# Handle any exceptions in the results # Handle any exceptions in the results
streaming_service.only_update_terminal("Combining section results into final report...") streaming_service.only_update_terminal("🧵 Combining section results into final report...")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
processed_results = [] processed_results = []
@ -562,7 +615,7 @@ async def process_sections(state: State, config: RunnableConfig, writer: StreamW
section_title = answer_outline.answer_outline[i].section_title section_title = answer_outline.answer_outline[i].section_title
error_message = f"Error processing section '{section_title}': {str(result)}" error_message = f"Error processing section '{section_title}': {str(result)}"
print(error_message) print(error_message)
streaming_service.only_update_terminal(error_message, "error") streaming_service.only_update_terminal(f"⚠️ {error_message}", "error")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
processed_results.append(error_message) processed_results.append(error_message)
else: else:
@ -580,7 +633,7 @@ async def process_sections(state: State, config: RunnableConfig, writer: StreamW
final_written_report = "\n".join(final_report) final_written_report = "\n".join(final_report)
print(f"Generated final report with {len(final_report)} parts") print(f"Generated final report with {len(final_report)} parts")
streaming_service.only_update_terminal("Final research report generated successfully!") streaming_service.only_update_terminal("🎉 Final research report generated successfully!")
writer({"yeild_value": streaming_service._format_annotations()}) writer({"yeild_value": streaming_service._format_annotations()})
if hasattr(state, 'streaming_service') and state.streaming_service: if hasattr(state, 'streaming_service') and state.streaming_service:
@ -612,7 +665,8 @@ async def process_section_with_documents(
relevant_documents: List[Dict[str, Any]], relevant_documents: List[Dict[str, Any]],
user_query: str, user_query: str,
state: State = None, state: State = None,
writer: StreamWriter = None writer: StreamWriter = None,
sub_section_type: SubSectionType = SubSectionType.MIDDLE
) -> str: ) -> str:
""" """
Process a single section using pre-fetched documents. Process a single section using pre-fetched documents.
@ -635,14 +689,14 @@ async def process_section_with_documents(
# Send status update via streaming if available # Send status update via streaming if available
if state and state.streaming_service and writer: if state and state.streaming_service and writer:
state.streaming_service.only_update_terminal(f"Writing section: {section_title} with {len(section_questions)} research questions") state.streaming_service.only_update_terminal(f"📝 Writing section: \"{section_title}\" with {len(section_questions)} research questions")
writer({"yeild_value": state.streaming_service._format_annotations()}) writer({"yeild_value": state.streaming_service._format_annotations()})
# Fallback if no documents found # Fallback if no documents found
if not documents_to_use: if not documents_to_use:
print(f"No relevant documents found for section: {section_title}") print(f"No relevant documents found for section: {section_title}")
if state and state.streaming_service and writer: if state and state.streaming_service and writer:
state.streaming_service.only_update_terminal(f"Warning: No relevant documents found for section: {section_title}", "warning") state.streaming_service.only_update_terminal(f"⚠️ Warning: No relevant documents found for section: \"{section_title}\"", "warning")
writer({"yeild_value": state.streaming_service._format_annotations()}) writer({"yeild_value": state.streaming_service._format_annotations()})
documents_to_use = [ documents_to_use = [
@ -657,6 +711,7 @@ async def process_section_with_documents(
"configurable": { "configurable": {
"sub_section_title": section_title, "sub_section_title": section_title,
"sub_section_questions": section_questions, "sub_section_questions": section_questions,
"sub_section_type": sub_section_type,
"user_query": user_query, "user_query": user_query,
"relevant_documents": documents_to_use, "relevant_documents": documents_to_use,
"user_id": user_id, "user_id": user_id,
@ -670,7 +725,7 @@ async def process_section_with_documents(
# Invoke the sub-section writer graph # Invoke the sub-section writer graph
print(f"Invoking sub_section_writer for: {section_title}") print(f"Invoking sub_section_writer for: {section_title}")
if state and state.streaming_service and writer: if state and state.streaming_service and writer:
state.streaming_service.only_update_terminal(f"Analyzing information and drafting content for section: {section_title}") state.streaming_service.only_update_terminal(f"🧠 Analyzing information and drafting content for section: \"{section_title}\"")
writer({"yeild_value": state.streaming_service._format_annotations()}) writer({"yeild_value": state.streaming_service._format_annotations()})
result = await sub_section_writer_graph.ainvoke(sub_state, config) result = await sub_section_writer_graph.ainvoke(sub_state, config)
@ -680,7 +735,7 @@ async def process_section_with_documents(
# Send section content update via streaming if available # Send section content update via streaming if available
if state and state.streaming_service and writer: if state and state.streaming_service and writer:
state.streaming_service.only_update_terminal(f"Completed writing section: {section_title}") state.streaming_service.only_update_terminal(f"Completed writing section: \"{section_title}\"")
writer({"yeild_value": state.streaming_service._format_annotations()}) writer({"yeild_value": state.streaming_service._format_annotations()})
return final_answer return final_answer
@ -689,7 +744,7 @@ async def process_section_with_documents(
# Send error update via streaming if available # Send error update via streaming if available
if state and state.streaming_service and writer: if state and state.streaming_service and writer:
state.streaming_service.only_update_terminal(f"Error processing section '{section_title}': {str(e)}", "error") state.streaming_service.only_update_terminal(f"❌ Error processing section \"{section_title}\": {str(e)}", "error")
writer({"yeild_value": state.streaming_service._format_annotations()}) writer({"yeild_value": state.streaming_service._format_annotations()})
return f"Error processing section: {section_title}. Details: {str(e)}" return f"Error processing section: {section_title}. Details: {str(e)}"

View file

@ -3,11 +3,19 @@
from __future__ import annotations from __future__ import annotations
from dataclasses import dataclass, fields from dataclasses import dataclass, fields
from enum import Enum
from typing import Optional, List, Any from typing import Optional, List, Any
from langchain_core.runnables import RunnableConfig from langchain_core.runnables import RunnableConfig
class SubSectionType(Enum):
"""Enum defining the type of sub-section."""
START = "START"
MIDDLE = "MIDDLE"
END = "END"
@dataclass(kw_only=True) @dataclass(kw_only=True)
class Configuration: class Configuration:
"""The configuration for the agent.""" """The configuration for the agent."""
@ -15,6 +23,7 @@ class Configuration:
# Input parameters provided at invocation # Input parameters provided at invocation
sub_section_title: str sub_section_title: str
sub_section_questions: List[str] sub_section_questions: List[str]
sub_section_type: SubSectionType
user_query: str user_query: str
relevant_documents: List[Any] # Documents provided directly to the agent relevant_documents: List[Any] # Documents provided directly to the agent
user_id: str user_id: str

View file

@ -5,6 +5,7 @@ from typing import Any, Dict
from app.config import config as app_config from app.config import config as app_config
from .prompts import get_citation_system_prompt from .prompts import get_citation_system_prompt
from langchain_core.messages import HumanMessage, SystemMessage from langchain_core.messages import HumanMessage, SystemMessage
from .configuration import SubSectionType
async def rerank_documents(state: State, config: RunnableConfig) -> Dict[str, Any]: async def rerank_documents(state: State, config: RunnableConfig) -> Dict[str, Any]:
""" """
@ -122,10 +123,20 @@ async def write_sub_section(state: State, config: RunnableConfig) -> Dict[str, A
sub_section_questions = configuration.sub_section_questions sub_section_questions = configuration.sub_section_questions
user_query = configuration.user_query # Get the original user query user_query = configuration.user_query # Get the original user query
documents_text = "\n".join(formatted_documents) documents_text = "\n".join(formatted_documents)
sub_section_type = configuration.sub_section_type
# Format the questions as bullet points for clarity # Format the questions as bullet points for clarity
questions_text = "\n".join([f"- {question}" for question in sub_section_questions]) questions_text = "\n".join([f"- {question}" for question in sub_section_questions])
# Provide more context based on the subsection type
section_position_context = ""
if sub_section_type == SubSectionType.START:
section_position_context = "This is the INTRODUCTION section. Focus on providing an overview of the topic, setting the context, and introducing key concepts that will be discussed in later sections. Do not provide any conclusions in this section, as conclusions should only appear in the final section."
elif sub_section_type == SubSectionType.MIDDLE:
section_position_context = "This is a MIDDLE section. Ensure this content flows naturally from previous sections and into subsequent ones. This could be any middle section in the document, so maintain coherence with the overall structure while addressing the specific topic of this section. Do not provide any conclusions in this section, as conclusions should only appear in the final section."
elif sub_section_type == SubSectionType.END:
section_position_context = "This is the CONCLUSION section. Focus on summarizing key points, providing closure, and possibly suggesting implications or future directions related to the topic."
# Construct a clear, structured query for the LLM # Construct a clear, structured query for the LLM
human_message_content = f""" human_message_content = f"""
Now user's query is: Now user's query is:
@ -138,6 +149,14 @@ async def write_sub_section(state: State, config: RunnableConfig) -> Dict[str, A
{section_title} {section_title}
</sub_section_title> </sub_section_title>
<section_position>
{section_position_context}
</section_position>
<guiding_questions>
{questions_text}
</guiding_questions>
Use the provided documents as your source material and cite them properly using the IEEE citation format [X] where X is the source_id. Use the provided documents as your source material and cite them properly using the IEEE citation format [X] where X is the source_id.
<documents> <documents>
{documents_text} {documents_text}

View file

@ -239,7 +239,6 @@ const SourcesDialogContent = ({
const ChatPage = () => { const ChatPage = () => {
const [token, setToken] = React.useState<string | null>(null); const [token, setToken] = React.useState<string | null>(null);
const [activeTab, setActiveTab] = useState("");
const [dialogOpenId, setDialogOpenId] = useState<number | null>(null); const [dialogOpenId, setDialogOpenId] = useState<number | null>(null);
const [sourcesPage, setSourcesPage] = useState(1); const [sourcesPage, setSourcesPage] = useState(1);
const [expandedSources, setExpandedSources] = useState(false); const [expandedSources, setExpandedSources] = useState(false);
@ -252,7 +251,6 @@ const ChatPage = () => {
const [researchMode, setResearchMode] = useState<ResearchMode>("GENERAL"); const [researchMode, setResearchMode] = useState<ResearchMode>("GENERAL");
const [currentTime, setCurrentTime] = useState<string>(''); const [currentTime, setCurrentTime] = useState<string>('');
const [currentDate, setCurrentDate] = useState<string>(''); const [currentDate, setCurrentDate] = useState<string>('');
const [connectorSources, setConnectorSources] = useState<any[]>([]);
const terminalMessagesRef = useRef<HTMLDivElement>(null); const terminalMessagesRef = useRef<HTMLDivElement>(null);
const { connectorSourceItems, isLoading: isLoadingConnectors } = useSearchSourceConnectors(); const { connectorSourceItems, isLoading: isLoadingConnectors } = useSearchSourceConnectors();
@ -476,43 +474,10 @@ const ChatPage = () => {
updateChat(); updateChat();
}, [messages, status, chat_id, researchMode, selectedConnectors, search_space_id]); }, [messages, status, chat_id, researchMode, selectedConnectors, search_space_id]);
// Memoize connector sources to prevent excessive re-renders
const processedConnectorSources = React.useMemo(() => {
if (messages.length === 0) return connectorSources;
// Only process when we have a complete message (not streaming)
if (status !== 'ready') return connectorSources;
// Find the latest assistant message
const assistantMessages = messages.filter(msg => msg.role === 'assistant');
if (assistantMessages.length === 0) return connectorSources;
const latestAssistantMessage = assistantMessages[assistantMessages.length - 1];
if (!latestAssistantMessage?.annotations) return connectorSources;
// Find the latest SOURCES annotation
const annotations = latestAssistantMessage.annotations as any[];
const sourcesAnnotations = annotations.filter(a => a.type === 'SOURCES');
if (sourcesAnnotations.length === 0) return connectorSources;
const latestSourcesAnnotation = sourcesAnnotations[sourcesAnnotations.length - 1];
if (!latestSourcesAnnotation.content) return connectorSources;
// Use this content if it differs from current
return latestSourcesAnnotation.content;
}, [messages, status, connectorSources]);
// Update connector sources when processed value changes
useEffect(() => {
if (processedConnectorSources !== connectorSources) {
setConnectorSources(processedConnectorSources);
}
}, [processedConnectorSources, connectorSources]);
// Check and scroll terminal when terminal info is available // Check and scroll terminal when terminal info is available
useEffect(() => { useEffect(() => {
if (messages.length === 0 || status !== 'ready') return; // Modified to trigger during streaming as well (removed status check)
if (messages.length === 0) return;
// Find the latest assistant message // Find the latest assistant message
const assistantMessages = messages.filter(msg => msg.role === 'assistant'); const assistantMessages = messages.filter(msg => msg.role === 'assistant');
@ -526,10 +491,27 @@ const ChatPage = () => {
const terminalInfoAnnotations = annotations.filter(a => a.type === 'TERMINAL_INFO'); const terminalInfoAnnotations = annotations.filter(a => a.type === 'TERMINAL_INFO');
if (terminalInfoAnnotations.length > 0) { if (terminalInfoAnnotations.length > 0) {
// Schedule scrolling after the DOM has been updated // Always scroll to bottom when terminal info is updated, even during streaming
setTimeout(scrollTerminalToBottom, 100); scrollTerminalToBottom();
} }
}, [messages, status]); }, [messages]); // Removed status from dependencies to ensure it triggers during streaming
// Pure function to get connector sources for a specific message
const getMessageConnectorSources = (message: any): any[] => {
if (!message || message.role !== 'assistant' || !message.annotations) return [];
// Find all SOURCES annotations
const annotations = message.annotations as any[];
const sourcesAnnotations = annotations.filter(a => a.type === 'SOURCES');
// Get the latest SOURCES annotation
if (sourcesAnnotations.length === 0) return [];
const latestSourcesAnnotation = sourcesAnnotations[sourcesAnnotations.length - 1];
if (!latestSourcesAnnotation.content) return [];
return latestSourcesAnnotation.content;
};
// Custom handleSubmit function to include selected connectors and answer type // Custom handleSubmit function to include selected connectors and answer type
const handleSubmit = (e: React.FormEvent) => { const handleSubmit = (e: React.FormEvent) => {
@ -561,17 +543,12 @@ const ChatPage = () => {
scrollToBottom(); scrollToBottom();
}, [messages]); }, [messages]);
// Set activeTab when connectorSources change using a memoized value // Reset sources page when new messages arrive
const activeTabValue = React.useMemo(() => {
return connectorSources.length > 0 ? connectorSources[0].type : "";
}, [connectorSources]);
// Update activeTab when the memoized value changes
useEffect(() => { useEffect(() => {
if (activeTabValue && activeTabValue !== activeTab) { // Reset pagination when we get new messages
setActiveTab(activeTabValue); setSourcesPage(1);
} setExpandedSources(false);
}, [activeTabValue, activeTab]); }, [messages]);
// Scroll terminal to bottom when expanded // Scroll terminal to bottom when expanded
useEffect(() => { useEffect(() => {
@ -582,7 +559,7 @@ const ChatPage = () => {
// Get total sources count for a connector type // Get total sources count for a connector type
const getSourcesCount = (connectorType: string) => { const getSourcesCount = (connectorType: string) => {
return getSourcesCountUtil(connectorSources, connectorType); return getSourcesCountUtil(getMessageConnectorSources(messages[messages.length - 1]), connectorType);
}; };
// Function to check scroll position and update indicators // Function to check scroll position and update indicators
@ -638,23 +615,14 @@ const ChatPage = () => {
if (assistantMessages.length === 0) return null; if (assistantMessages.length === 0) return null;
const latestAssistantMessage = assistantMessages[assistantMessages.length - 1]; const latestAssistantMessage = assistantMessages[assistantMessages.length - 1];
if (!latestAssistantMessage?.annotations) return null;
// Find all SOURCES annotations // Use our helper function to get sources
const annotations = latestAssistantMessage.annotations as any[]; const sources = getMessageConnectorSources(latestAssistantMessage);
const sourcesAnnotations = annotations.filter( if (sources.length === 0) return null;
(annotation) => annotation.type === 'SOURCES'
);
// Get the latest SOURCES annotation
if (sourcesAnnotations.length === 0) return null;
const latestSourcesAnnotation = sourcesAnnotations[sourcesAnnotations.length - 1];
if (!latestSourcesAnnotation.content) return null;
// Flatten all sources from all connectors // Flatten all sources from all connectors
const allSources: Source[] = []; const allSources: Source[] = [];
latestSourcesAnnotation.content.forEach((connector: ConnectorSource) => { sources.forEach((connector: ConnectorSource) => {
if (connector.sources && Array.isArray(connector.sources)) { if (connector.sources && Array.isArray(connector.sources)) {
connector.sources.forEach((source: SourceItem) => { connector.sources.forEach((source: SourceItem) => {
allSources.push({ allSources.push({
@ -675,23 +643,14 @@ const ChatPage = () => {
} else { } else {
// Use the specific message by index // Use the specific message by index
const message = messages[messageIndex]; const message = messages[messageIndex];
if (!message || message.role !== 'assistant' || !message.annotations) return null;
// Find all SOURCES annotations // Use our helper function to get sources
const annotations = message.annotations as any[]; const sources = getMessageConnectorSources(message);
const sourcesAnnotations = annotations.filter( if (sources.length === 0) return null;
(annotation) => annotation.type === 'SOURCES'
);
// Get the latest SOURCES annotation
if (sourcesAnnotations.length === 0) return null;
const latestSourcesAnnotation = sourcesAnnotations[sourcesAnnotations.length - 1];
if (!latestSourcesAnnotation.content) return null;
// Flatten all sources from all connectors // Flatten all sources from all connectors
const allSources: Source[] = []; const allSources: Source[] = [];
latestSourcesAnnotation.content.forEach((connector: ConnectorSource) => { sources.forEach((connector: ConnectorSource) => {
if (connector.sources && Array.isArray(connector.sources)) { if (connector.sources && Array.isArray(connector.sources)) {
connector.sources.forEach((source: SourceItem) => { connector.sources.forEach((source: SourceItem) => {
allSources.push({ allSources.push({
@ -712,6 +671,34 @@ const ChatPage = () => {
} }
}, [messages]); }, [messages]);
// Pure function for rendering terminal content - no hooks allowed here
const renderTerminalContent = (message: any) => {
if (!message.annotations) return null;
// Get all TERMINAL_INFO annotations
const terminalInfoAnnotations = (message.annotations as any[])
.filter(a => a.type === 'TERMINAL_INFO');
// Get the latest TERMINAL_INFO annotation
const latestTerminalInfo = terminalInfoAnnotations.length > 0
? terminalInfoAnnotations[terminalInfoAnnotations.length - 1]
: null;
// Render the content of the latest TERMINAL_INFO annotation
return latestTerminalInfo?.content.map((item: any, idx: number) => (
<div key={idx} className="py-0.5 flex items-start text-gray-300">
<span className="text-gray-500 text-xs mr-2 w-10 flex-shrink-0">[{String(idx).padStart(2, '0')}:{String(Math.floor(idx * 2)).padStart(2, '0')}]</span>
<span className="mr-2 opacity-70">{'>'}</span>
<span className={`
${item.type === 'info' ? 'text-blue-300' : ''}
${item.type === 'success' ? 'text-green-300' : ''}
${item.type === 'error' ? 'text-red-300' : ''}
${item.type === 'warning' ? 'text-yellow-300' : ''}
`}>{item.text}</span>
</div>
));
};
return ( return (
<> <>
<div className="flex flex-col min-h-[calc(100vh-4rem)] min-w-4xl max-w-4xl mx-auto px-4 py-8 overflow-x-hidden justify-center gap-4"> <div className="flex flex-col min-h-[calc(100vh-4rem)] min-w-4xl max-w-4xl mx-auto px-4 py-8 overflow-x-hidden justify-center gap-4">
@ -781,30 +768,9 @@ const ChatPage = () => {
<span className="mr-1">$</span> <span className="mr-1">$</span>
<span>surfsense-researcher</span> <span>surfsense-researcher</span>
</div> </div>
{message.annotations && (() => {
// Get all TERMINAL_INFO annotations
const terminalInfoAnnotations = (message.annotations as any[])
.filter(a => a.type === 'TERMINAL_INFO');
// Get the latest TERMINAL_INFO annotation {renderTerminalContent(message)}
const latestTerminalInfo = terminalInfoAnnotations.length > 0
? terminalInfoAnnotations[terminalInfoAnnotations.length - 1]
: null;
// Render the content of the latest TERMINAL_INFO annotation
return latestTerminalInfo?.content.map((item: any, idx: number) => (
<div key={idx} className="py-0.5 flex items-start text-gray-300">
<span className="text-gray-500 text-xs mr-2 w-10 flex-shrink-0">[{String(idx).padStart(2, '0')}:{String(Math.floor(idx * 2)).padStart(2, '0')}]</span>
<span className="mr-2 opacity-70">{'>'}</span>
<span className={`
${item.type === 'info' ? 'text-blue-300' : ''}
${item.type === 'success' ? 'text-green-300' : ''}
${item.type === 'error' ? 'text-red-300' : ''}
${item.type === 'warning' ? 'text-yellow-300' : ''}
`}>{item.text}</span>
</div>
));
})()}
<div className="mt-2 flex items-center"> <div className="mt-2 flex items-center">
<span className="text-gray-500 text-xs mr-2 w-10 flex-shrink-0">[00:13]</span> <span className="text-gray-500 text-xs mr-2 w-10 flex-shrink-0">[00:13]</span>
<span className="text-green-400 mr-1">researcher@surfsense</span> <span className="text-green-400 mr-1">researcher@surfsense</span>
@ -836,105 +802,120 @@ const ChatPage = () => {
<span className="font-medium">Sources</span> <span className="font-medium">Sources</span>
</div> </div>
<Tabs {(() => {
defaultValue={connectorSources.length > 0 ? connectorSources[0].type : "CRAWLED_URL"} // Get sources for this specific message
className="w-full" const messageConnectorSources = getMessageConnectorSources(message);
onValueChange={setActiveTab}
>
<div className="mb-4">
<div className="flex items-center">
<Button
variant="ghost"
size="icon"
onClick={scrollTabsLeft}
className="flex-shrink-0 mr-2 z-10"
disabled={!canScrollLeft}
>
<ChevronLeft className="h-4 w-4" />
</Button>
<div className="flex-1 overflow-hidden"> if (messageConnectorSources.length === 0) {
<div className="flex overflow-x-auto hide-scrollbar" ref={tabsListRef} onScroll={updateScrollIndicators}> return (
<TabsList className="flex-1 bg-transparent border-0 p-0 custom-tabs-list"> <div className="text-center py-8 text-gray-500 dark:text-gray-400 border border-dashed rounded-md">
{connectorSources.map((connector) => ( <Database className="h-8 w-8 mx-auto mb-2 opacity-50" />
<TabsTrigger </div>
key={connector.id} );
value={connector.type} }
className="flex items-center gap-1 mx-1 data-[state=active]:bg-gray-100 dark:data-[state=active]:bg-gray-800 rounded-md"
> // Use these message-specific sources for the Tabs component
{getConnectorIcon(connector.type)} return (
<span className="hidden sm:inline ml-1">{connector.name.split(' ')[0]}</span> <Tabs
<span className="bg-gray-200 dark:bg-gray-700 px-1.5 py-0.5 rounded text-xs"> defaultValue={messageConnectorSources.length > 0 ? messageConnectorSources[0].type : "CRAWLED_URL"}
{getSourcesCount(connector.type)} className="w-full"
</span> >
</TabsTrigger> <div className="mb-4">
))} <div className="flex items-center">
</TabsList> <Button
variant="ghost"
size="icon"
onClick={scrollTabsLeft}
className="flex-shrink-0 mr-2 z-10"
disabled={!canScrollLeft}
>
<ChevronLeft className="h-4 w-4" />
</Button>
<div className="flex-1 overflow-hidden">
<div className="flex overflow-x-auto hide-scrollbar" ref={tabsListRef} onScroll={updateScrollIndicators}>
<TabsList className="flex-1 bg-transparent border-0 p-0 custom-tabs-list">
{messageConnectorSources.map((connector) => (
<TabsTrigger
key={connector.id}
value={connector.type}
className="flex items-center gap-1 mx-1 data-[state=active]:bg-gray-100 dark:data-[state=active]:bg-gray-800 rounded-md"
>
{getConnectorIcon(connector.type)}
<span className="hidden sm:inline ml-1">{connector.name.split(' ')[0]}</span>
<span className="bg-gray-200 dark:bg-gray-700 px-1.5 py-0.5 rounded text-xs">
{connector.sources?.length || 0}
</span>
</TabsTrigger>
))}
</TabsList>
</div>
</div>
<Button
variant="ghost"
size="icon"
onClick={scrollTabsRight}
className="flex-shrink-0 ml-2 z-10"
disabled={!canScrollRight}
>
<ChevronRight className="h-4 w-4" />
</Button>
</div> </div>
</div> </div>
<Button {messageConnectorSources.map(connector => (
variant="ghost" <TabsContent key={connector.id} value={connector.type} className="mt-0">
size="icon" <div className="space-y-3">
onClick={scrollTabsRight} {connector.sources?.slice(0, INITIAL_SOURCES_DISPLAY)?.map((source: any) => (
className="flex-shrink-0 ml-2 z-10" <Card key={source.id} className="p-3 hover:bg-gray-50 dark:hover:bg-gray-800 cursor-pointer">
disabled={!canScrollRight} <div className="flex items-start gap-3">
> <div className="flex-shrink-0 w-6 h-6 flex items-center justify-center">
<ChevronRight className="h-4 w-4" /> {getConnectorIcon(connector.type)}
</Button> </div>
</div> <div className="flex-1">
</div> <h3 className="font-medium text-sm">{source.title}</h3>
<p className="text-sm text-gray-500 dark:text-gray-400">{source.description}</p>
</div>
<Button
variant="ghost"
size="icon"
className="h-6 w-6"
onClick={() => window.open(source.url, '_blank')}
>
<ExternalLink className="h-4 w-4" />
</Button>
</div>
</Card>
))}
{connectorSources.map(connector => ( {connector.sources?.length > INITIAL_SOURCES_DISPLAY && (
<TabsContent key={connector.id} value={connector.type} className="mt-0"> <Dialog open={dialogOpenId === connector.id} onOpenChange={(open) => setDialogOpenId(open ? connector.id : null)}>
<div className="space-y-3"> <DialogTrigger asChild>
{getMainViewSources(connector)?.map((source: any) => ( <Button variant="ghost" className="w-full text-sm text-gray-500 dark:text-gray-400">
<Card key={source.id} className="p-3 hover:bg-gray-50 dark:hover:bg-gray-800 cursor-pointer"> Show {connector.sources.length - INITIAL_SOURCES_DISPLAY} More Sources
<div className="flex items-start gap-3"> </Button>
<div className="flex-shrink-0 w-6 h-6 flex items-center justify-center"> </DialogTrigger>
{getConnectorIcon(connector.type)} <DialogContent className="sm:max-w-[600px] max-h-[80vh] overflow-y-auto dark:border-gray-700">
</div> <SourcesDialogContent
<div className="flex-1"> connector={connector}
<h3 className="font-medium text-sm">{source.title}</h3> sourceFilter={sourceFilter}
<p className="text-sm text-gray-500 dark:text-gray-400">{source.description}</p> expandedSources={expandedSources}
</div> sourcesPage={sourcesPage}
<Button setSourcesPage={setSourcesPage}
variant="ghost" setSourceFilter={setSourceFilter}
size="icon" setExpandedSources={setExpandedSources}
className="h-6 w-6" isLoadingMore={false}
onClick={() => window.open(source.url, '_blank')} />
> </DialogContent>
<ExternalLink className="h-4 w-4" /> </Dialog>
</Button> )}
</div> </div>
</Card> </TabsContent>
))} ))}
</Tabs>
{connector.sources.length > INITIAL_SOURCES_DISPLAY && ( );
<Dialog open={dialogOpenId === connector.id} onOpenChange={(open) => setDialogOpenId(open ? connector.id : null)}> })()}
<DialogTrigger asChild>
<Button variant="ghost" className="w-full text-sm text-gray-500 dark:text-gray-400">
Show {connector.sources.length - INITIAL_SOURCES_DISPLAY} More Sources
</Button>
</DialogTrigger>
<DialogContent className="sm:max-w-[600px] max-h-[80vh] overflow-y-auto dark:border-gray-700">
<SourcesDialogContent
connector={connector}
sourceFilter={sourceFilter}
expandedSources={expandedSources}
sourcesPage={sourcesPage}
setSourcesPage={setSourcesPage}
setSourceFilter={setSourceFilter}
setExpandedSources={setExpandedSources}
isLoadingMore={false}
/>
</DialogContent>
</Dialog>
)}
</div>
</TabsContent>
))}
</Tabs>
</div> </div>
{/* Answer Section */} {/* Answer Section */}