mirror of
https://github.com/lfnovo/open-notebook.git
synced 2026-04-28 11:30:00 +00:00
Resolve conflicts in ask.py and chat.py by merging the try/except error handling from main with the extract_text_content helper from the PR. Also apply the same fix to source_chat.py and transformation.py which had the same vulnerable isinstance/str() pattern for structured LLM response content (e.g. Gemini's envelope format).
75 lines
2.9 KiB
Python
75 lines
2.9 KiB
Python
from ai_prompter import Prompter
|
|
from langchain_core.messages import HumanMessage, SystemMessage
|
|
from langchain_core.runnables import RunnableConfig
|
|
from langgraph.graph import END, START, StateGraph
|
|
from typing_extensions import TypedDict
|
|
|
|
from open_notebook.ai.provision import provision_langchain_model
|
|
from open_notebook.domain.notebook import Source
|
|
from open_notebook.domain.transformation import DefaultPrompts, Transformation
|
|
from open_notebook.exceptions import OpenNotebookError
|
|
from open_notebook.utils import clean_thinking_content
|
|
from open_notebook.utils.error_classifier import classify_error
|
|
from open_notebook.utils.text_utils import extract_text_content
|
|
|
|
|
|
class TransformationState(TypedDict):
|
|
input_text: str
|
|
source: Source
|
|
transformation: Transformation
|
|
output: str
|
|
|
|
|
|
async def run_transformation(state: dict, config: RunnableConfig) -> dict:
|
|
source_obj = state.get("source")
|
|
source: Source = source_obj if isinstance(source_obj, Source) else None # type: ignore[assignment]
|
|
content = state.get("input_text")
|
|
assert source or content, "No content to transform"
|
|
transformation: Transformation = state["transformation"]
|
|
|
|
try:
|
|
if not content:
|
|
content = source.full_text
|
|
transformation_template_text = transformation.prompt
|
|
default_prompts: DefaultPrompts = DefaultPrompts(transformation_instructions=None)
|
|
if default_prompts.transformation_instructions:
|
|
transformation_template_text = f"{default_prompts.transformation_instructions}\n\n{transformation_template_text}"
|
|
|
|
transformation_template_text = f"{transformation_template_text}\n\n# INPUT"
|
|
|
|
system_prompt = Prompter(template_text=transformation_template_text).render(
|
|
data=state
|
|
)
|
|
content_str = str(content) if content else ""
|
|
payload = [SystemMessage(content=system_prompt), HumanMessage(content=content_str)]
|
|
chain = await provision_langchain_model(
|
|
str(payload),
|
|
config.get("configurable", {}).get("model_id"),
|
|
"transformation",
|
|
max_tokens=8192,
|
|
)
|
|
|
|
response = await chain.ainvoke(payload)
|
|
|
|
# Clean thinking content from the response
|
|
response_content = extract_text_content(response.content)
|
|
cleaned_content = clean_thinking_content(response_content)
|
|
|
|
if source:
|
|
await source.add_insight(transformation.title, cleaned_content)
|
|
|
|
return {
|
|
"output": cleaned_content,
|
|
}
|
|
except OpenNotebookError:
|
|
raise
|
|
except Exception as e:
|
|
error_class, user_message = classify_error(e)
|
|
raise error_class(user_message) from e
|
|
|
|
|
|
agent_state = StateGraph(TransformationState)
|
|
agent_state.add_node("agent", run_transformation) # type: ignore[type-var]
|
|
agent_state.add_edge(START, "agent")
|
|
agent_state.add_edge("agent", END)
|
|
graph = agent_state.compile()
|