mirror of
https://github.com/MODSetter/SurfSense.git
synced 2025-09-02 10:39:13 +00:00
Merge pull request #184 from MODSetter/dev
feat: Added Follow Up Qns Logic
This commit is contained in:
commit
aac7f8c755
7 changed files with 500 additions and 44 deletions
|
@ -1,6 +1,6 @@
|
||||||
from langgraph.graph import StateGraph
|
from langgraph.graph import StateGraph
|
||||||
from .state import State
|
from .state import State
|
||||||
from .nodes import reformulate_user_query, write_answer_outline, process_sections, handle_qna_workflow
|
from .nodes import reformulate_user_query, write_answer_outline, process_sections, handle_qna_workflow, generate_further_questions
|
||||||
from .configuration import Configuration, ResearchMode
|
from .configuration import Configuration, ResearchMode
|
||||||
from typing import TypedDict, List, Dict, Any, Optional
|
from typing import TypedDict, List, Dict, Any, Optional
|
||||||
|
|
||||||
|
@ -17,7 +17,8 @@ def build_graph():
|
||||||
|
|
||||||
This function constructs the researcher agent graph with conditional routing
|
This function constructs the researcher agent graph with conditional routing
|
||||||
based on research_mode - QNA mode uses a direct Q&A workflow while other modes
|
based on research_mode - QNA mode uses a direct Q&A workflow while other modes
|
||||||
use the full report generation pipeline.
|
use the full report generation pipeline. Both paths generate follow-up questions
|
||||||
|
at the end using the reranked documents from the sub-agents.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A compiled LangGraph workflow
|
A compiled LangGraph workflow
|
||||||
|
@ -30,6 +31,7 @@ def build_graph():
|
||||||
workflow.add_node("handle_qna_workflow", handle_qna_workflow)
|
workflow.add_node("handle_qna_workflow", handle_qna_workflow)
|
||||||
workflow.add_node("write_answer_outline", write_answer_outline)
|
workflow.add_node("write_answer_outline", write_answer_outline)
|
||||||
workflow.add_node("process_sections", process_sections)
|
workflow.add_node("process_sections", process_sections)
|
||||||
|
workflow.add_node("generate_further_questions", generate_further_questions)
|
||||||
|
|
||||||
# Define the edges
|
# Define the edges
|
||||||
workflow.add_edge("__start__", "reformulate_user_query")
|
workflow.add_edge("__start__", "reformulate_user_query")
|
||||||
|
@ -53,12 +55,15 @@ def build_graph():
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
# QNA workflow path
|
# QNA workflow path: handle_qna_workflow -> generate_further_questions -> __end__
|
||||||
workflow.add_edge("handle_qna_workflow", "__end__")
|
workflow.add_edge("handle_qna_workflow", "generate_further_questions")
|
||||||
|
|
||||||
# Report generation workflow path
|
# Report generation workflow path: write_answer_outline -> process_sections -> generate_further_questions -> __end__
|
||||||
workflow.add_edge("write_answer_outline", "process_sections")
|
workflow.add_edge("write_answer_outline", "process_sections")
|
||||||
workflow.add_edge("process_sections", "__end__")
|
workflow.add_edge("process_sections", "generate_further_questions")
|
||||||
|
|
||||||
|
# Both paths end after generating further questions
|
||||||
|
workflow.add_edge("generate_further_questions", "__end__")
|
||||||
|
|
||||||
# Compile the workflow into an executable graph
|
# Compile the workflow into an executable graph
|
||||||
graph = workflow.compile()
|
graph = workflow.compile()
|
||||||
|
|
|
@ -9,7 +9,7 @@ from langchain_core.runnables import RunnableConfig
|
||||||
from sqlalchemy.ext.asyncio import AsyncSession
|
from sqlalchemy.ext.asyncio import AsyncSession
|
||||||
|
|
||||||
from .configuration import Configuration, SearchMode
|
from .configuration import Configuration, SearchMode
|
||||||
from .prompts import get_answer_outline_system_prompt
|
from .prompts import get_answer_outline_system_prompt, get_further_questions_system_prompt
|
||||||
from .state import State
|
from .state import State
|
||||||
from .sub_section_writer.graph import graph as sub_section_writer_graph
|
from .sub_section_writer.graph import graph as sub_section_writer_graph
|
||||||
from .sub_section_writer.configuration import SubSectionType
|
from .sub_section_writer.configuration import SubSectionType
|
||||||
|
@ -924,8 +924,11 @@ async def process_sections(state: State, config: RunnableConfig, writer: StreamW
|
||||||
# Skip the final update since we've been streaming incremental updates
|
# Skip the final update since we've been streaming incremental updates
|
||||||
# The final answer from each section is already shown in the UI
|
# The final answer from each section is already shown in the UI
|
||||||
|
|
||||||
|
# Use the shared documents for further question generation
|
||||||
|
# Since all sections used the same document pool, we can use it directly
|
||||||
return {
|
return {
|
||||||
"final_written_report": final_written_report
|
"final_written_report": final_written_report,
|
||||||
|
"reranked_documents": all_documents
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1194,6 +1197,7 @@ async def handle_qna_workflow(state: State, config: RunnableConfig, writer: Stre
|
||||||
|
|
||||||
# Track streaming content for real-time updates
|
# Track streaming content for real-time updates
|
||||||
complete_content = ""
|
complete_content = ""
|
||||||
|
captured_reranked_documents = []
|
||||||
|
|
||||||
# Call the QNA agent with streaming
|
# Call the QNA agent with streaming
|
||||||
async for _chunk_type, chunk in qna_agent_graph.astream(qna_state, qna_config, stream_mode=["values"]):
|
async for _chunk_type, chunk in qna_agent_graph.astream(qna_state, qna_config, stream_mode=["values"]):
|
||||||
|
@ -1215,6 +1219,10 @@ async def handle_qna_workflow(state: State, config: RunnableConfig, writer: Stre
|
||||||
streaming_service.only_update_answer(answer_lines)
|
streaming_service.only_update_answer(answer_lines)
|
||||||
writer({"yeild_value": streaming_service._format_annotations()})
|
writer({"yeild_value": streaming_service._format_annotations()})
|
||||||
|
|
||||||
|
# Capture reranked documents from QNA agent for further question generation
|
||||||
|
if "reranked_documents" in chunk:
|
||||||
|
captured_reranked_documents = chunk["reranked_documents"]
|
||||||
|
|
||||||
# Set default if no content was received
|
# Set default if no content was received
|
||||||
if not complete_content:
|
if not complete_content:
|
||||||
complete_content = "I couldn't find relevant information in your knowledge base to answer this question."
|
complete_content = "I couldn't find relevant information in your knowledge base to answer this question."
|
||||||
|
@ -1222,9 +1230,10 @@ async def handle_qna_workflow(state: State, config: RunnableConfig, writer: Stre
|
||||||
streaming_service.only_update_terminal("🎉 Q&A answer generated successfully!")
|
streaming_service.only_update_terminal("🎉 Q&A answer generated successfully!")
|
||||||
writer({"yeild_value": streaming_service._format_annotations()})
|
writer({"yeild_value": streaming_service._format_annotations()})
|
||||||
|
|
||||||
# Return the final answer in the expected state field
|
# Return the final answer and captured reranked documents for further question generation
|
||||||
return {
|
return {
|
||||||
"final_written_report": complete_content
|
"final_written_report": complete_content,
|
||||||
|
"reranked_documents": captured_reranked_documents
|
||||||
}
|
}
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
@ -1238,3 +1247,166 @@ async def handle_qna_workflow(state: State, config: RunnableConfig, writer: Stre
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
async def generate_further_questions(state: State, config: RunnableConfig, writer: StreamWriter) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Generate contextually relevant follow-up questions based on chat history and available documents.
|
||||||
|
|
||||||
|
This node takes the chat history and reranked documents from sub-agents (qna_agent or sub_section_writer)
|
||||||
|
and uses an LLM to generate follow-up questions that would naturally extend the conversation
|
||||||
|
and provide additional value to the user.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict containing the further questions in the "further_questions" key for state update.
|
||||||
|
"""
|
||||||
|
from app.services.llm_service import get_user_fast_llm
|
||||||
|
|
||||||
|
# Get configuration and state data
|
||||||
|
configuration = Configuration.from_runnable_config(config)
|
||||||
|
chat_history = state.chat_history
|
||||||
|
user_id = configuration.user_id
|
||||||
|
streaming_service = state.streaming_service
|
||||||
|
|
||||||
|
# Get reranked documents from the state (will be populated by sub-agents)
|
||||||
|
reranked_documents = getattr(state, 'reranked_documents', None) or []
|
||||||
|
|
||||||
|
streaming_service.only_update_terminal("🤔 Generating follow-up questions...")
|
||||||
|
writer({"yeild_value": streaming_service._format_annotations()})
|
||||||
|
|
||||||
|
# Get user's fast LLM
|
||||||
|
llm = await get_user_fast_llm(state.db_session, user_id)
|
||||||
|
if not llm:
|
||||||
|
error_message = f"No fast LLM configured for user {user_id}"
|
||||||
|
print(error_message)
|
||||||
|
streaming_service.only_update_terminal(f"❌ {error_message}", "error")
|
||||||
|
|
||||||
|
# Stream empty further questions to UI
|
||||||
|
streaming_service.only_update_further_questions([])
|
||||||
|
writer({"yeild_value": streaming_service._format_annotations()})
|
||||||
|
return {"further_questions": []}
|
||||||
|
|
||||||
|
# Format chat history for the prompt
|
||||||
|
chat_history_xml = "<chat_history>\n"
|
||||||
|
for message in chat_history:
|
||||||
|
if hasattr(message, 'type'):
|
||||||
|
if message.type == "human":
|
||||||
|
chat_history_xml += f"<user>{message.content}</user>\n"
|
||||||
|
elif message.type == "ai":
|
||||||
|
chat_history_xml += f"<assistant>{message.content}</assistant>\n"
|
||||||
|
else:
|
||||||
|
# Handle other message types if needed
|
||||||
|
chat_history_xml += f"<message>{str(message)}</message>\n"
|
||||||
|
chat_history_xml += "</chat_history>"
|
||||||
|
|
||||||
|
# Format available documents for the prompt
|
||||||
|
documents_xml = "<documents>\n"
|
||||||
|
for i, doc in enumerate(reranked_documents):
|
||||||
|
document_info = doc.get("document", {})
|
||||||
|
source_id = document_info.get("id", f"doc_{i}")
|
||||||
|
source_type = document_info.get("document_type", "UNKNOWN")
|
||||||
|
content = doc.get("content", "")
|
||||||
|
|
||||||
|
documents_xml += f"<document>\n"
|
||||||
|
documents_xml += f"<metadata>\n"
|
||||||
|
documents_xml += f"<source_id>{source_id}</source_id>\n"
|
||||||
|
documents_xml += f"<source_type>{source_type}</source_type>\n"
|
||||||
|
documents_xml += f"</metadata>\n"
|
||||||
|
documents_xml += f"<content>\n{content}</content>\n"
|
||||||
|
documents_xml += f"</document>\n"
|
||||||
|
documents_xml += "</documents>"
|
||||||
|
|
||||||
|
# Create the human message content
|
||||||
|
human_message_content = f"""
|
||||||
|
{chat_history_xml}
|
||||||
|
|
||||||
|
{documents_xml}
|
||||||
|
|
||||||
|
Based on the chat history and available documents above, generate 3-5 contextually relevant follow-up questions that would naturally extend the conversation and provide additional value to the user. Make sure the questions can be reasonably answered using the available documents or knowledge base.
|
||||||
|
|
||||||
|
Your response MUST be valid JSON in exactly this format:
|
||||||
|
{{
|
||||||
|
"further_questions": [
|
||||||
|
{{
|
||||||
|
"id": 0,
|
||||||
|
"question": "further qn 1"
|
||||||
|
}},
|
||||||
|
{{
|
||||||
|
"id": 1,
|
||||||
|
"question": "further qn 2"
|
||||||
|
}}
|
||||||
|
]
|
||||||
|
}}
|
||||||
|
|
||||||
|
Do not include any other text or explanation. Only return the JSON.
|
||||||
|
"""
|
||||||
|
|
||||||
|
streaming_service.only_update_terminal("🧠 Analyzing conversation context to suggest relevant questions...")
|
||||||
|
writer({"yeild_value": streaming_service._format_annotations()})
|
||||||
|
|
||||||
|
# Create messages for the LLM
|
||||||
|
messages = [
|
||||||
|
SystemMessage(content=get_further_questions_system_prompt()),
|
||||||
|
HumanMessage(content=human_message_content)
|
||||||
|
]
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Call the LLM
|
||||||
|
response = await llm.ainvoke(messages)
|
||||||
|
|
||||||
|
# Parse the JSON response
|
||||||
|
content = response.content
|
||||||
|
|
||||||
|
# Find the JSON in the content
|
||||||
|
json_start = content.find('{')
|
||||||
|
json_end = content.rfind('}') + 1
|
||||||
|
if json_start >= 0 and json_end > json_start:
|
||||||
|
json_str = content[json_start:json_end]
|
||||||
|
|
||||||
|
# Parse the JSON string
|
||||||
|
parsed_data = json.loads(json_str)
|
||||||
|
|
||||||
|
# Extract the further_questions array
|
||||||
|
further_questions = parsed_data.get("further_questions", [])
|
||||||
|
|
||||||
|
streaming_service.only_update_terminal(f"✅ Generated {len(further_questions)} contextual follow-up questions!")
|
||||||
|
|
||||||
|
# Stream the further questions to the UI
|
||||||
|
streaming_service.only_update_further_questions(further_questions)
|
||||||
|
writer({"yeild_value": streaming_service._format_annotations()})
|
||||||
|
|
||||||
|
print(f"Successfully generated {len(further_questions)} further questions")
|
||||||
|
|
||||||
|
return {"further_questions": further_questions}
|
||||||
|
else:
|
||||||
|
# If JSON structure not found, return empty list
|
||||||
|
error_message = "Could not find valid JSON in LLM response for further questions"
|
||||||
|
print(error_message)
|
||||||
|
streaming_service.only_update_terminal(f"⚠️ {error_message}", "warning")
|
||||||
|
|
||||||
|
# Stream empty further questions to UI
|
||||||
|
streaming_service.only_update_further_questions([])
|
||||||
|
writer({"yeild_value": streaming_service._format_annotations()})
|
||||||
|
return {"further_questions": []}
|
||||||
|
|
||||||
|
except (json.JSONDecodeError, ValueError) as e:
|
||||||
|
# Log the error and return empty list
|
||||||
|
error_message = f"Error parsing further questions response: {str(e)}"
|
||||||
|
print(error_message)
|
||||||
|
streaming_service.only_update_terminal(f"⚠️ {error_message}", "warning")
|
||||||
|
|
||||||
|
# Stream empty further questions to UI
|
||||||
|
streaming_service.only_update_further_questions([])
|
||||||
|
writer({"yeild_value": streaming_service._format_annotations()})
|
||||||
|
return {"further_questions": []}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
# Handle any other errors
|
||||||
|
error_message = f"Error generating further questions: {str(e)}"
|
||||||
|
print(error_message)
|
||||||
|
streaming_service.only_update_terminal(f"⚠️ {error_message}", "warning")
|
||||||
|
|
||||||
|
# Stream empty further questions to UI
|
||||||
|
streaming_service.only_update_further_questions([])
|
||||||
|
writer({"yeild_value": streaming_service._format_annotations()})
|
||||||
|
return {"further_questions": []}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -90,3 +90,135 @@ Number of Sections: 3
|
||||||
</examples>
|
</examples>
|
||||||
</answer_outline_system>
|
</answer_outline_system>
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def get_further_questions_system_prompt():
|
||||||
|
return f"""
|
||||||
|
Today's date: {datetime.datetime.now().strftime("%Y-%m-%d")}
|
||||||
|
<further_questions_system>
|
||||||
|
You are an expert research assistant specializing in generating contextually relevant follow-up questions. Your task is to analyze the chat history and available documents to suggest further questions that would naturally extend the conversation and provide additional value to the user.
|
||||||
|
|
||||||
|
<input>
|
||||||
|
- chat_history: Provided in XML format within <chat_history> tags, containing <user> and <assistant> message pairs that show the chronological conversation flow. This provides context about what has already been discussed.
|
||||||
|
- available_documents: Provided in XML format within <documents> tags, containing individual <document> elements with <metadata> (source_id, source_type) and <content> sections. This helps understand what information is accessible for answering potential follow-up questions.
|
||||||
|
</input>
|
||||||
|
|
||||||
|
<output_format>
|
||||||
|
A JSON object with the following structure:
|
||||||
|
{{
|
||||||
|
"further_questions": [
|
||||||
|
{{
|
||||||
|
"id": 0,
|
||||||
|
"question": "further qn 1"
|
||||||
|
}},
|
||||||
|
{{
|
||||||
|
"id": 1,
|
||||||
|
"question": "further qn 2"
|
||||||
|
}}
|
||||||
|
]
|
||||||
|
}}
|
||||||
|
</output_format>
|
||||||
|
|
||||||
|
<instructions>
|
||||||
|
1. **Analyze Chat History:** Review the entire conversation flow to understand:
|
||||||
|
* The main topics and themes discussed
|
||||||
|
* The user's interests and areas of focus
|
||||||
|
* Questions that have been asked and answered
|
||||||
|
* Any gaps or areas that could be explored further
|
||||||
|
* The depth level of the current discussion
|
||||||
|
|
||||||
|
2. **Evaluate Available Documents:** Consider the documents in context to identify:
|
||||||
|
* Additional information that hasn't been explored yet
|
||||||
|
* Related topics that could be of interest
|
||||||
|
* Specific details or data points that could warrant deeper investigation
|
||||||
|
* Cross-references or connections between different documents
|
||||||
|
|
||||||
|
3. **Generate Relevant Follow-up Questions:** Create 3-5 further questions that:
|
||||||
|
* Are directly related to the ongoing conversation but explore new angles
|
||||||
|
* Can be reasonably answered using the available documents or knowledge base
|
||||||
|
* Progress the conversation forward rather than repeating previous topics
|
||||||
|
* Match the user's apparent level of interest and expertise
|
||||||
|
* Are specific and actionable rather than overly broad
|
||||||
|
* Consider practical applications, comparisons, deeper analysis, or related concepts
|
||||||
|
|
||||||
|
4. **Ensure Question Quality:** Each question should:
|
||||||
|
* Be clear and well-formulated
|
||||||
|
* Provide genuine value to the user
|
||||||
|
* Be distinct from other suggested questions
|
||||||
|
* Be answerable within the current context
|
||||||
|
* Encourage meaningful exploration of the topic
|
||||||
|
|
||||||
|
5. **Prioritize and Order:** Arrange questions by relevance and natural progression:
|
||||||
|
* Most directly related to the current discussion first
|
||||||
|
* Questions that build upon previous answers
|
||||||
|
* Questions that explore practical applications or implications
|
||||||
|
* Questions that introduce related but new concepts
|
||||||
|
|
||||||
|
6. **Adhere Strictly to Output Format:** Ensure the final output is a valid JSON object with:
|
||||||
|
* Correct field names (`further_questions`, `id`, `question`)
|
||||||
|
* Sequential numbering starting from 0
|
||||||
|
* Proper data types and JSON formatting
|
||||||
|
</instructions>
|
||||||
|
|
||||||
|
<examples>
|
||||||
|
Input:
|
||||||
|
<chat_history>
|
||||||
|
<user>What are the best machine learning algorithms for text classification?</user>
|
||||||
|
<assistant>For text classification, several algorithms work well depending on your specific needs:
|
||||||
|
|
||||||
|
**Traditional Methods:**
|
||||||
|
- **Support Vector Machines (SVM)** - Excellent for high-dimensional text data
|
||||||
|
- **Naive Bayes** - Simple, fast, and works well with small datasets
|
||||||
|
- **Logistic Regression** - Good baseline with interpretable results
|
||||||
|
|
||||||
|
**Modern Deep Learning:**
|
||||||
|
- **Neural Networks** - More complex but can capture intricate patterns
|
||||||
|
- **Transformer models** - State-of-the-art for most text classification tasks
|
||||||
|
|
||||||
|
The choice depends on your dataset size, computational resources, and accuracy requirements.</assistant>
|
||||||
|
</chat_history>
|
||||||
|
|
||||||
|
<documents>
|
||||||
|
<document>
|
||||||
|
<metadata>
|
||||||
|
<source_id>101</source_id>
|
||||||
|
<source_type>FILE</source_type>
|
||||||
|
</metadata>
|
||||||
|
<content>
|
||||||
|
# Machine Learning for Text Classification: A Comprehensive Guide
|
||||||
|
|
||||||
|
## Performance Comparison
|
||||||
|
Recent studies show that transformer-based models achieve 95%+ accuracy on most text classification benchmarks, while traditional methods like SVM typically achieve 85-90% accuracy.
|
||||||
|
|
||||||
|
## Dataset Considerations
|
||||||
|
- Small datasets (< 1000 samples): Naive Bayes, SVM
|
||||||
|
- Large datasets (> 10,000 samples): Neural networks, transformers
|
||||||
|
- Imbalanced datasets: Require special handling with techniques like SMOTE
|
||||||
|
</content>
|
||||||
|
</document>
|
||||||
|
</documents>
|
||||||
|
|
||||||
|
Output:
|
||||||
|
{{
|
||||||
|
"further_questions": [
|
||||||
|
{{
|
||||||
|
"id": 0,
|
||||||
|
"question": "What are the key differences in performance between traditional algorithms like SVM and modern deep learning approaches for text classification?"
|
||||||
|
}},
|
||||||
|
{{
|
||||||
|
"id": 1,
|
||||||
|
"question": "How do you handle imbalanced datasets when training text classification models?"
|
||||||
|
}},
|
||||||
|
{{
|
||||||
|
"id": 2,
|
||||||
|
"question": "What preprocessing techniques are most effective for improving text classification accuracy?"
|
||||||
|
}},
|
||||||
|
{{
|
||||||
|
"id": 3,
|
||||||
|
"question": "Are there specific domains or use cases where certain classification algorithms perform better than others?"
|
||||||
|
}}
|
||||||
|
]
|
||||||
|
}}
|
||||||
|
</examples>
|
||||||
|
</further_questions_system>
|
||||||
|
"""
|
|
@ -26,6 +26,10 @@ class State:
|
||||||
reformulated_query: Optional[str] = field(default=None)
|
reformulated_query: Optional[str] = field(default=None)
|
||||||
# Using field to explicitly mark as part of state
|
# Using field to explicitly mark as part of state
|
||||||
answer_outline: Optional[Any] = field(default=None)
|
answer_outline: Optional[Any] = field(default=None)
|
||||||
|
further_questions: Optional[Any] = field(default=None)
|
||||||
|
|
||||||
|
# Temporary field to hold reranked documents from sub-agents for further question generation
|
||||||
|
reranked_documents: Optional[List[Any]] = field(default=None)
|
||||||
|
|
||||||
# OUTPUT: Populated by agent nodes
|
# OUTPUT: Populated by agent nodes
|
||||||
# Using field to explicitly mark as part of state
|
# Using field to explicitly mark as part of state
|
||||||
|
|
|
@ -54,10 +54,14 @@ async def handle_chat_data(
|
||||||
if message['role'] == "user":
|
if message['role'] == "user":
|
||||||
langchain_chat_history.append(HumanMessage(content=message['content']))
|
langchain_chat_history.append(HumanMessage(content=message['content']))
|
||||||
elif message['role'] == "assistant":
|
elif message['role'] == "assistant":
|
||||||
# Last annotation type will always be "ANSWER" here
|
# Find the last "ANSWER" annotation specifically
|
||||||
answer_annotation = message['annotations'][-1]
|
answer_annotation = None
|
||||||
answer_text = ""
|
for annotation in reversed(message['annotations']):
|
||||||
if answer_annotation['type'] == "ANSWER":
|
if annotation['type'] == "ANSWER":
|
||||||
|
answer_annotation = annotation
|
||||||
|
break
|
||||||
|
|
||||||
|
if answer_annotation:
|
||||||
answer_text = answer_annotation['content']
|
answer_text = answer_annotation['content']
|
||||||
# If content is a list, join it into a single string
|
# If content is a list, join it into a single string
|
||||||
if isinstance(answer_text, list):
|
if isinstance(answer_text, list):
|
||||||
|
|
|
@ -17,6 +17,10 @@ class StreamingService:
|
||||||
{
|
{
|
||||||
"type": "ANSWER",
|
"type": "ANSWER",
|
||||||
"content": []
|
"content": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "FURTHER_QUESTIONS",
|
||||||
|
"content": []
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
# It is used to send annotations to the frontend
|
# It is used to send annotations to the frontend
|
||||||
|
@ -69,4 +73,17 @@ class StreamingService:
|
||||||
self.message_annotations[2]["content"] = answer
|
self.message_annotations[2]["content"] = answer
|
||||||
return self.message_annotations
|
return self.message_annotations
|
||||||
|
|
||||||
|
def only_update_further_questions(self, further_questions: List[Dict[str, Any]]) -> str:
|
||||||
|
"""
|
||||||
|
Update the further questions annotation
|
||||||
|
|
||||||
|
Args:
|
||||||
|
further_questions: List of further question objects with id and question fields
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: The updated annotations
|
||||||
|
"""
|
||||||
|
self.message_annotations[3]["content"] = further_questions
|
||||||
|
return self.message_annotations
|
||||||
|
|
||||||
|
|
|
@ -589,6 +589,33 @@ const ChatPage = () => {
|
||||||
-webkit-box-orient: vertical;
|
-webkit-box-orient: vertical;
|
||||||
overflow: hidden;
|
overflow: hidden;
|
||||||
}
|
}
|
||||||
|
/* Hide scrollbar by default, show on hover */
|
||||||
|
.scrollbar-hover {
|
||||||
|
-ms-overflow-style: none; /* IE and Edge */
|
||||||
|
scrollbar-width: none; /* Firefox */
|
||||||
|
}
|
||||||
|
.scrollbar-hover::-webkit-scrollbar {
|
||||||
|
display: none; /* Chrome, Safari and Opera */
|
||||||
|
}
|
||||||
|
.scrollbar-hover:hover {
|
||||||
|
-ms-overflow-style: auto; /* IE and Edge */
|
||||||
|
scrollbar-width: thin; /* Firefox */
|
||||||
|
}
|
||||||
|
.scrollbar-hover:hover::-webkit-scrollbar {
|
||||||
|
display: block; /* Chrome, Safari and Opera */
|
||||||
|
height: 6px;
|
||||||
|
}
|
||||||
|
.scrollbar-hover:hover::-webkit-scrollbar-track {
|
||||||
|
background: hsl(var(--muted));
|
||||||
|
border-radius: 3px;
|
||||||
|
}
|
||||||
|
.scrollbar-hover:hover::-webkit-scrollbar-thumb {
|
||||||
|
background: hsl(var(--muted-foreground) / 0.3);
|
||||||
|
border-radius: 3px;
|
||||||
|
}
|
||||||
|
.scrollbar-hover:hover::-webkit-scrollbar-thumb:hover {
|
||||||
|
background: hsl(var(--muted-foreground) / 0.5);
|
||||||
|
}
|
||||||
`;
|
`;
|
||||||
document.head.appendChild(style);
|
document.head.appendChild(style);
|
||||||
|
|
||||||
|
@ -1303,36 +1330,131 @@ const ChatPage = () => {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fallback to the message content if no ANSWER annotation is available
|
// Fallback to the message content if no ANSWER annotation is available
|
||||||
return (
|
return <MarkdownViewer
|
||||||
<MarkdownViewer
|
content={message.content}
|
||||||
content={message.content}
|
getCitationSource={(id) => getCitationSource(id, index)}
|
||||||
getCitationSource={(id) =>
|
type="ai"
|
||||||
getCitationSource(id, index)
|
/>;
|
||||||
}
|
})()}
|
||||||
type="ai"
|
</div>
|
||||||
/>
|
}
|
||||||
);
|
</div>
|
||||||
})()}
|
|
||||||
</div>
|
{/* Further Questions Section */}
|
||||||
}
|
{message.annotations && (() => {
|
||||||
</div>
|
// Get all FURTHER_QUESTIONS annotations
|
||||||
{/* Scroll to bottom button */}
|
const furtherQuestionsAnnotations = (message.annotations as any[])
|
||||||
<div className="fixed bottom-8 right-8">
|
.filter(a => a.type === 'FURTHER_QUESTIONS');
|
||||||
<Button
|
|
||||||
onClick={scrollToBottom}
|
// Get the latest FURTHER_QUESTIONS annotation
|
||||||
className="h-8 w-8 rounded-full bg-gray-200 dark:bg-gray-700 hover:bg-gray-300 dark:hover:bg-gray-600"
|
const latestFurtherQuestions = furtherQuestionsAnnotations.length > 0
|
||||||
variant="ghost"
|
? furtherQuestionsAnnotations[furtherQuestionsAnnotations.length - 1]
|
||||||
size="icon"
|
: null;
|
||||||
>
|
|
||||||
<ArrowDown className="h-4 w-4" />
|
// Only render if we have questions
|
||||||
</Button>
|
if (!latestFurtherQuestions?.content || latestFurtherQuestions.content.length === 0) {
|
||||||
</div>
|
return null;
|
||||||
</CardContent>
|
}
|
||||||
</Card>
|
|
||||||
</div>
|
const furtherQuestions = latestFurtherQuestions.content;
|
||||||
);
|
|
||||||
}
|
return (
|
||||||
|
<div className="relative mb-6">
|
||||||
|
{/* Main container with improved styling */}
|
||||||
|
<div className="bg-muted/30 border border-border/60 rounded-lg overflow-hidden shadow-sm">
|
||||||
|
{/* Header with better visual separation */}
|
||||||
|
<div className="bg-muted/50 border-b border-border/40 px-4 py-2.5">
|
||||||
|
<div className="flex items-center justify-between">
|
||||||
|
<h3 className="text-sm font-medium text-muted-foreground flex items-center gap-2">
|
||||||
|
<svg className="h-4 w-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||||
|
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M8.228 9c.549-1.165 2.03-2 3.772-2 2.21 0 4 1.343 4 3 0 1.4-1.278 2.575-3.006 2.907-.542.104-.994.54-.994 1.093m0 3h.01M21 12a9 9 0 11-18 0 9 9 0 0118 0z" />
|
||||||
|
</svg>
|
||||||
|
Follow-up Questions
|
||||||
|
</h3>
|
||||||
|
<span className="text-xs text-muted-foreground bg-background/60 px-2 py-1 rounded-full border border-border/40">
|
||||||
|
{furtherQuestions.length} suggestion{furtherQuestions.length !== 1 ? 's' : ''}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Questions container with enhanced scrolling */}
|
||||||
|
<div className="p-3">
|
||||||
|
<div className="relative">
|
||||||
|
{/* Left fade gradient */}
|
||||||
|
<div className="absolute left-0 top-0 bottom-0 w-6 bg-gradient-to-r from-muted/30 to-transparent z-10 pointer-events-none" />
|
||||||
|
|
||||||
|
{/* Right fade gradient */}
|
||||||
|
<div className="absolute right-0 top-0 bottom-0 w-6 bg-gradient-to-l from-muted/30 to-transparent z-10 pointer-events-none" />
|
||||||
|
|
||||||
|
{/* Scrollable container */}
|
||||||
|
<div className="overflow-x-auto scrollbar-hover">
|
||||||
|
<div className="flex gap-2 py-1 px-6">
|
||||||
|
{furtherQuestions.map((question: any, qIndex: number) => (
|
||||||
|
<Button
|
||||||
|
key={question.id || qIndex}
|
||||||
|
variant="outline"
|
||||||
|
size="sm"
|
||||||
|
className="h-8 px-4 text-sm font-normal whitespace-nowrap rounded-full border-border/60 bg-background hover:bg-background/80 hover:border-primary/50 hover:shadow-md transition-all duration-200 flex items-center gap-2 select-none shrink-0 group"
|
||||||
|
onClick={() => {
|
||||||
|
// Set the input value and submit
|
||||||
|
handleInputChange({
|
||||||
|
target: { value: question.question }
|
||||||
|
} as React.ChangeEvent<HTMLInputElement>);
|
||||||
|
|
||||||
|
// Small delay to ensure input is updated, then submit
|
||||||
|
setTimeout(() => {
|
||||||
|
const form = document.querySelector('form') as HTMLFormElement;
|
||||||
|
if (form && status === 'ready') {
|
||||||
|
form.requestSubmit();
|
||||||
|
}
|
||||||
|
}, 50);
|
||||||
|
}}
|
||||||
|
disabled={status !== 'ready'}
|
||||||
|
>
|
||||||
|
<span className="text-foreground group-hover:text-primary transition-colors">
|
||||||
|
{question.question}
|
||||||
|
</span>
|
||||||
|
<svg
|
||||||
|
className="text-muted-foreground group-hover:text-primary transition-colors"
|
||||||
|
width="14"
|
||||||
|
height="14"
|
||||||
|
viewBox="0 0 16 16"
|
||||||
|
fill="none"
|
||||||
|
>
|
||||||
|
<path
|
||||||
|
fillRule="evenodd"
|
||||||
|
clipRule="evenodd"
|
||||||
|
d="M6.75011 4H6.00011V5.5H6.75011H9.43945L5.46978 9.46967L4.93945 10L6.00011 11.0607L6.53044 10.5303L10.499 6.56182V9.25V10H11.999V9.25V5C11.999 4.44772 11.5512 4 10.999 4H6.75011Z"
|
||||||
|
fill="currentColor"
|
||||||
|
/>
|
||||||
|
</svg>
|
||||||
|
</Button>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
})()}
|
||||||
|
{/* Scroll to bottom button */}
|
||||||
|
<div className="fixed bottom-8 right-8">
|
||||||
|
<Button
|
||||||
|
onClick={scrollToBottom}
|
||||||
|
className="h-8 w-8 rounded-full bg-gray-200 dark:bg-gray-700 hover:bg-gray-300 dark:hover:bg-gray-600"
|
||||||
|
variant="ghost"
|
||||||
|
size="icon"
|
||||||
|
>
|
||||||
|
<ArrowDown className="h-4 w-4" />
|
||||||
|
</Button>
|
||||||
|
</div>
|
||||||
|
</CardContent>
|
||||||
|
</Card>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
return null;
|
return null;
|
||||||
})}
|
})}
|
||||||
|
|
Loading…
Add table
Reference in a new issue