mirror of
https://github.com/lfnovo/open-notebook.git
synced 2026-04-30 04:20:02 +00:00
refactor transformation, add graph and admin
This commit is contained in:
parent
e3fa445fcc
commit
4a5d47d934
26 changed files with 326 additions and 384 deletions
|
|
@ -1,59 +0,0 @@
|
|||
import operator
|
||||
from typing import List, Literal, Sequence
|
||||
|
||||
from langchain_core.runnables import (
|
||||
RunnableConfig,
|
||||
)
|
||||
from langgraph.graph import END, START, StateGraph
|
||||
from typing_extensions import Annotated, TypedDict
|
||||
|
||||
from open_notebook.graphs.utils import run_pattern
|
||||
|
||||
|
||||
class PatternChainState(TypedDict):
|
||||
content_stack: Annotated[Sequence[str], operator.add]
|
||||
patterns: List[str]
|
||||
output: str
|
||||
|
||||
|
||||
def call_model(state: dict, config: RunnableConfig) -> dict:
|
||||
patterns = state["patterns"]
|
||||
current_transformation = patterns.pop(0)
|
||||
if current_transformation.startswith("patterns/"):
|
||||
input_args = {"input_text": state["content_stack"][-1]}
|
||||
else:
|
||||
input_args = {
|
||||
"input_text": state["content_stack"][-1],
|
||||
"command": current_transformation,
|
||||
}
|
||||
current_transformation = "patterns/default/command"
|
||||
|
||||
transformation_result = run_pattern(
|
||||
pattern_name=current_transformation,
|
||||
config=config,
|
||||
state=input_args,
|
||||
)
|
||||
return {
|
||||
"content_stack": [transformation_result.content],
|
||||
"output": transformation_result.content,
|
||||
"patterns": state["patterns"],
|
||||
}
|
||||
|
||||
|
||||
def transform_condition(state: PatternChainState) -> Literal["agent", END]: # type: ignore
|
||||
"""
|
||||
Checks whether there are more chunks to process.
|
||||
"""
|
||||
if len(state["patterns"]) > 0:
|
||||
return "agent"
|
||||
return END
|
||||
|
||||
|
||||
agent_state = StateGraph(PatternChainState)
|
||||
agent_state.add_node("agent", call_model)
|
||||
agent_state.add_edge(START, "agent")
|
||||
agent_state.add_conditional_edges(
|
||||
"agent",
|
||||
transform_condition,
|
||||
)
|
||||
graph = agent_state.compile()
|
||||
46
open_notebook/graphs/prompt.py
Normal file
46
open_notebook/graphs/prompt.py
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
from typing import Any, Optional
|
||||
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
from langchain_core.runnables import (
|
||||
RunnableConfig,
|
||||
)
|
||||
from langgraph.graph import END, START, StateGraph
|
||||
from loguru import logger
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from open_notebook.graphs.utils import provision_langchain_model
|
||||
from open_notebook.prompter import Prompter
|
||||
|
||||
|
||||
class PatternChainState(TypedDict):
|
||||
prompt: str
|
||||
parser: Optional[Any]
|
||||
input_text: str
|
||||
output: str
|
||||
|
||||
|
||||
def call_model(state: dict, config: RunnableConfig) -> dict:
|
||||
content = state["input_text"]
|
||||
system_prompt = Prompter(
|
||||
prompt_text=state["prompt"], parser=state.get("parser")
|
||||
).render(data=state)
|
||||
logger.warning(content)
|
||||
payload = [SystemMessage(content=system_prompt)] + [HumanMessage(content=content)]
|
||||
chain = provision_langchain_model(
|
||||
str(payload),
|
||||
config.get("configurable", {}).get("model_id"),
|
||||
"transformation",
|
||||
max_tokens=5000,
|
||||
)
|
||||
|
||||
response = chain.invoke(payload)
|
||||
|
||||
return {"output": response.content}
|
||||
|
||||
|
||||
agent_state = StateGraph(PatternChainState)
|
||||
agent_state.add_node("agent", call_model)
|
||||
agent_state.add_edge(START, "agent")
|
||||
agent_state.add_edge("agent", END)
|
||||
|
||||
graph = agent_state.compile()
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
import operator
|
||||
from typing import List
|
||||
from typing import List, Optional
|
||||
|
||||
from langchain_core.runnables import (
|
||||
RunnableConfig,
|
||||
|
|
@ -13,22 +13,22 @@ from open_notebook.domain.notebook import Asset, Source
|
|||
from open_notebook.domain.transformation import Transformation
|
||||
from open_notebook.graphs.content_processing import ContentState
|
||||
from open_notebook.graphs.content_processing import graph as content_graph
|
||||
from open_notebook.graphs.multipattern import graph as transform_graph
|
||||
from open_notebook.graphs.transformation import graph as transform_graph
|
||||
from open_notebook.utils import surreal_clean
|
||||
|
||||
|
||||
class SourceState(TypedDict):
|
||||
content_state: ContentState
|
||||
transformations: List[str]
|
||||
apply_transformations: List[Transformation]
|
||||
notebook_id: str
|
||||
source: Source
|
||||
transformations: Annotated[list, operator.add]
|
||||
transformation: Annotated[list, operator.add]
|
||||
embed: bool
|
||||
|
||||
|
||||
class TransformationState(TypedDict):
|
||||
source: Source
|
||||
transformation: dict
|
||||
transformation: Transformation
|
||||
|
||||
|
||||
async def content_process(state: SourceState) -> dict:
|
||||
|
|
@ -38,13 +38,6 @@ async def content_process(state: SourceState) -> dict:
|
|||
return {"content_state": processed_state}
|
||||
|
||||
|
||||
async def run_patterns(input_text: str, patterns: List[dict]) -> str:
|
||||
output = await transform_graph.ainvoke(
|
||||
dict(content_stack=[input_text], patterns=patterns)
|
||||
)
|
||||
return output["output"]
|
||||
|
||||
|
||||
def save_source(state: SourceState) -> dict:
|
||||
content_state = state["content_state"]
|
||||
|
||||
|
|
@ -69,15 +62,10 @@ def save_source(state: SourceState) -> dict:
|
|||
|
||||
|
||||
def trigger_transformations(state: SourceState, config: RunnableConfig) -> List[Send]:
|
||||
if len(state["transformations"]) == 0:
|
||||
if len(state["apply_transformations"]) == 0:
|
||||
return []
|
||||
|
||||
transformations = Transformation.get_all()
|
||||
to_apply = [
|
||||
t
|
||||
for t in transformations["source_insights"]
|
||||
if t["name"] in state["transformations"]
|
||||
]
|
||||
to_apply = state["apply_transformations"]
|
||||
logger.debug(f"Applying transformations {to_apply}")
|
||||
|
||||
return [
|
||||
|
|
@ -92,19 +80,26 @@ def trigger_transformations(state: SourceState, config: RunnableConfig) -> List[
|
|||
]
|
||||
|
||||
|
||||
async def transform_content(state: TransformationState) -> dict:
|
||||
async def transform_content(state: TransformationState) -> Optional[dict]:
|
||||
source = state["source"]
|
||||
content = source.full_text
|
||||
if not content:
|
||||
return None
|
||||
transformation = state["transformation"]
|
||||
transformation: Transformation = state["transformation"]
|
||||
|
||||
logger.debug(f"Applying transformation {transformation['name']}")
|
||||
result = await run_patterns(content, patterns=transformation["patterns"])
|
||||
|
||||
source.add_insight(transformation["name"], surreal_clean(result))
|
||||
|
||||
return {"transformations": [{"name": transformation["name"], "content": result}]}
|
||||
logger.debug(f"Applying transformation {transformation.name}")
|
||||
result = await transform_graph.ainvoke(
|
||||
dict(input_text=content, transformation=transformation)
|
||||
)
|
||||
source.add_insight(transformation.title, surreal_clean(result["output"]))
|
||||
return {
|
||||
"transformation": [
|
||||
{
|
||||
"output": result["output"],
|
||||
"transformation_name": transformation.name,
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
# Create and compile the workflow
|
||||
|
|
|
|||
57
open_notebook/graphs/transformation.py
Normal file
57
open_notebook/graphs/transformation.py
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
from executing import Source
|
||||
from langchain_core.messages import HumanMessage, SystemMessage
|
||||
from langchain_core.runnables import (
|
||||
RunnableConfig,
|
||||
)
|
||||
from langgraph.graph import END, START, StateGraph
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from open_notebook.domain.transformation import DefaultPrompts, Transformation
|
||||
from open_notebook.graphs.utils import provision_langchain_model
|
||||
from open_notebook.prompter import Prompter
|
||||
|
||||
|
||||
class TransformationState(TypedDict):
|
||||
input_text: str
|
||||
source: Source
|
||||
transformation: Transformation
|
||||
output: str
|
||||
|
||||
|
||||
def run_transformation(state: dict, config: RunnableConfig) -> dict:
|
||||
source: Source = state.get("source")
|
||||
content = state.get("input_text")
|
||||
assert source or content, "No content to transform"
|
||||
transformation: Transformation = state["transformation"]
|
||||
if not content:
|
||||
content = source.full_text
|
||||
transformation_prompt_text = transformation.prompt
|
||||
default_prompts: DefaultPrompts = DefaultPrompts().load()
|
||||
if default_prompts.transformation_instructions:
|
||||
transformation_prompt_text = f"{default_prompts.transformation_instructions}\n\n{transformation_prompt_text}"
|
||||
|
||||
transformation_prompt_text = f"{transformation_prompt_text}\n\n# INPUT"
|
||||
|
||||
system_prompt = Prompter(prompt_text=transformation_prompt_text).render(data=state)
|
||||
payload = [SystemMessage(content=system_prompt)] + [HumanMessage(content=content)]
|
||||
chain = provision_langchain_model(
|
||||
str(payload),
|
||||
config.get("configurable", {}).get("model_id"),
|
||||
"transformation",
|
||||
max_tokens=5000,
|
||||
)
|
||||
|
||||
response = chain.invoke(payload)
|
||||
if source:
|
||||
source.add_insight(transformation.title, response.content)
|
||||
|
||||
return {
|
||||
"output": response.content,
|
||||
}
|
||||
|
||||
|
||||
agent_state = StateGraph(TransformationState)
|
||||
agent_state.add_node("agent", run_transformation)
|
||||
agent_state.add_edge(START, "agent")
|
||||
agent_state.add_edge("agent", END)
|
||||
graph = agent_state.compile()
|
||||
|
|
@ -1,10 +1,8 @@
|
|||
from langchain_core.language_models.chat_models import BaseChatModel
|
||||
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
|
||||
from loguru import logger
|
||||
|
||||
from open_notebook.domain.models import model_manager
|
||||
from open_notebook.models.llms import LanguageModel
|
||||
from open_notebook.prompter import Prompter
|
||||
from open_notebook.utils import token_count
|
||||
|
||||
|
||||
|
|
@ -31,24 +29,3 @@ def provision_langchain_model(
|
|||
|
||||
assert isinstance(model, LanguageModel), f"Model is not a LanguageModel: {model}"
|
||||
return model.to_langchain()
|
||||
|
||||
|
||||
# todo: turn into a graph
|
||||
def run_pattern(
|
||||
pattern_name: str,
|
||||
config,
|
||||
state: dict = {},
|
||||
parser=None,
|
||||
) -> BaseMessage:
|
||||
system_prompt = Prompter(prompt_template=pattern_name, parser=parser).render(
|
||||
data=state
|
||||
)
|
||||
payload = [SystemMessage(content=system_prompt)] + [
|
||||
HumanMessage(content=state["input_text"])
|
||||
]
|
||||
chain = provision_langchain_model(
|
||||
str(payload), config.get("configurable", {}).get("model_id"), "transformation"
|
||||
)
|
||||
|
||||
response = chain.invoke(payload)
|
||||
return response
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue