From 54fb4746a44003420eff39adf215c65bb0448c93 Mon Sep 17 00:00:00 2001 From: 3clyp50 Date: Fri, 6 Feb 2026 18:03:52 +0100 Subject: [PATCH] initial plugins effort; memory system PoC plugin manifest update add memory plugin PoC files offload memory prompts cleanup imports extract memory UI fix paths plugin docs --- .gitignore | 6 +- PLUGIN_SYSTEM.md | 116 ++++++++ agent.py | 16 +- plugins/README.md | 84 ++++++ .../memory}/api/memory_dashboard.py | 9 +- .../_50_recall_memories.py | 222 +++++++++++++++ .../monologue_end/_50_memorize_fragments.py | 205 ++++++++++++++ .../monologue_end/_51_memorize_solutions.py | 208 ++++++++++++++ .../monologue_start/_10_memory_init.py | 20 ++ .../memory}/helpers/knowledge_import.py | 0 {python => plugins/memory}/helpers/memory.py | 4 +- .../memory}/helpers/memory_consolidation.py | 11 +- plugins/memory/plugin.json | 151 ++++++++++ .../memory/prompts}/agent.system.memories.md | 0 .../memory/prompts}/agent.system.solutions.md | 0 .../prompts}/agent.system.tool.memory.md | 0 .../memory/prompts}/fw.memory.hist_suc.sys.md | 0 .../memory/prompts}/fw.memory.hist_sum.sys.md | 0 .../memory/prompts}/fw.memory_saved.md | 0 .../prompts}/memory.consolidation.msg.md | 0 .../prompts}/memory.consolidation.sys.md | 0 .../prompts}/memory.keyword_extraction.msg.md | 0 .../prompts}/memory.keyword_extraction.sys.md | 0 .../prompts}/memory.memories_filter.msg.md | 0 .../prompts}/memory.memories_filter.sys.md | 0 .../prompts}/memory.memories_query.msg.md | 0 .../prompts}/memory.memories_query.sys.md | 0 .../prompts}/memory.memories_sum.sys.md | 0 .../prompts}/memory.recall_delay_msg.md | 0 .../prompts}/memory.solutions_query.sys.md | 0 .../prompts}/memory.solutions_sum.sys.md | 0 .../memory}/tools/memory_delete.py | 9 +- .../memory}/tools/memory_forget.py | 11 +- .../memory}/tools/memory_load.py | 9 +- .../memory}/tools/memory_save.py | 9 +- .../memory/ui}/memory-dashboard-store.js | 4 +- .../memory/ui}/memory-dashboard.html | 2 +- .../memory/ui}/memory-detail-modal.html | 0 python/api/import_knowledge.py | 4 +- python/api/knowledge_path_get.py | 4 +- python/api/knowledge_reindex.py | 4 +- python/api/plugins_list.py | 22 ++ python/api/plugins_resolve.py | 50 ++++ .../_50_recall_memories.py | 223 +-------------- .../monologue_end/_50_memorize_fragments.py | 202 +------------- .../monologue_end/_51_memorize_solutions.py | 205 +------------- .../monologue_start/_10_memory_init.py | 17 +- .../system_prompt/_20_behaviour_prompt.py | 5 +- python/helpers/extension.py | 7 +- python/helpers/plugins.py | 262 ++++++++++++++++++ python/helpers/projects.py | 3 +- python/helpers/settings.py | 4 +- python/tools/behaviour_adjustment.py | 4 +- python/tools/knowledge_tool._py | 11 +- python/tools/search_engine.py | 4 +- run_ui.py | 43 +++ webui/components/settings/agent/memory.html | 2 +- .../sidebar/top-section/quick-actions.html | 4 +- webui/components/welcome/welcome-store.js | 2 +- webui/js/components.js | 2 +- webui/js/initFw.js | 1 + webui/js/plugins.js | 215 ++++++++++++++ 62 files changed, 1737 insertions(+), 659 deletions(-) create mode 100644 PLUGIN_SYSTEM.md create mode 100644 plugins/README.md rename {python => plugins/memory}/api/memory_dashboard.py (97%) create mode 100644 plugins/memory/extensions/message_loop_prompts_after/_50_recall_memories.py create mode 100644 plugins/memory/extensions/monologue_end/_50_memorize_fragments.py create mode 100644 plugins/memory/extensions/monologue_end/_51_memorize_solutions.py create mode 100644 plugins/memory/extensions/monologue_start/_10_memory_init.py rename {python => plugins/memory}/helpers/knowledge_import.py (100%) rename {python => plugins/memory}/helpers/memory.py (99%) rename {python => plugins/memory}/helpers/memory_consolidation.py (99%) create mode 100644 plugins/memory/plugin.json rename {prompts => plugins/memory/prompts}/agent.system.memories.md (100%) rename {prompts => plugins/memory/prompts}/agent.system.solutions.md (100%) rename {prompts => plugins/memory/prompts}/agent.system.tool.memory.md (100%) rename {prompts => plugins/memory/prompts}/fw.memory.hist_suc.sys.md (100%) rename {prompts => plugins/memory/prompts}/fw.memory.hist_sum.sys.md (100%) rename {prompts => plugins/memory/prompts}/fw.memory_saved.md (100%) rename {prompts => plugins/memory/prompts}/memory.consolidation.msg.md (100%) rename {prompts => plugins/memory/prompts}/memory.consolidation.sys.md (100%) rename {prompts => plugins/memory/prompts}/memory.keyword_extraction.msg.md (100%) rename {prompts => plugins/memory/prompts}/memory.keyword_extraction.sys.md (100%) rename {prompts => plugins/memory/prompts}/memory.memories_filter.msg.md (100%) rename {prompts => plugins/memory/prompts}/memory.memories_filter.sys.md (100%) rename {prompts => plugins/memory/prompts}/memory.memories_query.msg.md (100%) rename {prompts => plugins/memory/prompts}/memory.memories_query.sys.md (100%) rename {prompts => plugins/memory/prompts}/memory.memories_sum.sys.md (100%) rename {prompts => plugins/memory/prompts}/memory.recall_delay_msg.md (100%) rename {prompts => plugins/memory/prompts}/memory.solutions_query.sys.md (100%) rename {prompts => plugins/memory/prompts}/memory.solutions_sum.sys.md (100%) rename {python => plugins/memory}/tools/memory_delete.py (66%) rename {python => plugins/memory}/tools/memory_forget.py (60%) rename {python => plugins/memory}/tools/memory_load.py (74%) rename {python => plugins/memory}/tools/memory_save.py (67%) rename {webui/components/modals/memory => plugins/memory/ui}/memory-dashboard-store.js (99%) rename {webui/components/modals/memory => plugins/memory/ui}/memory-dashboard.html (99%) rename {webui/components/modals/memory => plugins/memory/ui}/memory-detail-modal.html (100%) create mode 100644 python/api/plugins_list.py create mode 100644 python/api/plugins_resolve.py create mode 100644 python/helpers/plugins.py create mode 100644 webui/js/plugins.js diff --git a/.gitignore b/.gitignore index 70b63905f..51d3a9268 100644 --- a/.gitignore +++ b/.gitignore @@ -16,9 +16,9 @@ .venv/ # obsolete folders -memory/ -knowledge/custom/ -instruments/ +/memory/ +/knowledge/custom/ +/instruments/ # Handle logs directory logs/** diff --git a/PLUGIN_SYSTEM.md b/PLUGIN_SYSTEM.md new file mode 100644 index 000000000..0ba822b31 --- /dev/null +++ b/PLUGIN_SYSTEM.md @@ -0,0 +1,116 @@ +# Unified Plugin System - Implementation Summary + +This document provides a technical overview of the Plugin System implementation in Agent Zero. + +## Overview + +The Plugin System allows users and the community to create full-stack plugins that integrate seamlessly with Agent Zero. It supports backend capabilities (API endpoints, tools, helpers) and frontend UI extensions. It uses a manifest-driven approach (`plugin.json`) to declare capabilities. + +## Architecture + +### Components + +1. Backend Plugin Discovery (`python/helpers/plugins.py`) + - Resolves plugin directories across repo (`plugins/`) and user (`usr/plugins/`) locations. + - Loads and validates `plugin.json` manifests. + - Provides `import_plugin_module()` for dynamic dependency injection, replacing static imports. + - Implements override logic (user plugins override repo plugins). + +2. API Endpoints + - `POST /plugins_resolve` - Resolves a plugin manifest by ID (for frontend). + - `GET/POST /plugins_list` - Lists all available plugins. + - `GET /plugins//` - Serves plugin static assets (UI, scripts). + +3. Frontend Plugin Loader (`webui/js/plugins.js`) + - Discovers and loads `` tags. + - Fetches plugin manifests via `/plugins_resolve`. + - Path Traversal Strategy: Integrates with the standard component loader by adjusting paths (e.g., prepending `components/../` to plugin URLs) to bypass default path restrictions without modifying the core loader. + - Handles module imports and props merging. + +4. Standard Component Loader (`webui/js/components.js`) + - Vanilla Implementation: Remains unmodified to ensure stability and compatibility. + - Enforces `components/` prefix for all loaded resources. + - Used by `plugins.js` via relative path traversal to load plugin UI components. + +5. DOM Integration + - Automatic loading on DOM ready. + - MutationObserver for dynamic plugin injection. + - Alpine.js integration via `globalThis.xAttrs()`. + +## File Structure + +``` +/plugins/ # Repo plugins (default) + ├── memory/ + │ ├── plugin.json # Manifest + │ ├── api/ # Backend endpoints + │ ├── tools/ # Agent tools + │ ├── helpers/ # Python helpers + │ ├── ui/ # Frontend components + │ │ ├── memory-dashboard.html + │ │ └── memory-dashboard-store.js + │ └── prompts/ # System prompts + └── README.md + +/usr/plugins/ # User plugins (override) + ├── my-plugin/ + │ ├── plugin.json + │ └── ... + └── README.md +``` + +## Plugin Manifest Schema + +```json +{ + "id": "plugin-id", // Required: Must match directory name + "name": "Display Name", // Optional: Human-readable name + "provides": { + "api": [ + { "module": "api/my_endpoint.py", "description": "..." } + ], + "tool": [ + { "module": "tools/my_tool.py", "description": "..." } + ], + "ui": { + "component": "ui/index.html", + "module": "ui/main.js" + }, + "prompt": [ + { "module": "prompts/system.md", "description": "..." } + ] + }, + "props": { // Optional: Default UI properties + "key": "value" + } +} +``` + +## Usage + +### Creating a Plugin + +1. Create plugin directory: `plugins/my-plugin` +2. Create `plugin.json` declaring provided capabilities. +3. Implement backend modules in `api/`, `tools/`, etc. +4. Implement frontend components in `ui/`. + +### Using Plugin Capabilities + +Backend (Python): +Instead of static imports, use the dynamic loader: +```python +from python.helpers.plugins import import_plugin_module + +# Dynamically load a helper from the 'memory' plugin +memory = import_plugin_module("memory", "helpers/memory.py") +memory.some_function() +``` + +Frontend (HTML/JS): +Plugins can be embedded via `` or loaded dynamically via `openModal`. +```javascript +// Open a plugin component in a modal +// Uses relative path to traverse out of 'components/' and into 'plugins/' +openModal("../plugins/my-plugin/ui/dashboard.html"); +``` diff --git a/agent.py b/agent.py index b070cfc08..6d993dc92 100644 --- a/agent.py +++ b/agent.py @@ -18,7 +18,8 @@ from python.helpers import ( tokens, context as context_helper, dirty_json, - subagents + subagents, + plugins ) from python.helpers.print_style import PrintStyle @@ -645,6 +646,10 @@ class Agent: def parse_prompt(self, _prompt_file: str, **kwargs): dirs = subagents.get_paths(self, "prompts") + + # Plugin prompt paths + dirs.extend(plugins.get_plugin_paths("prompt")) + prompt = files.parse_file( _prompt_file, _directories=dirs, _agent=self, **kwargs ) @@ -652,6 +657,10 @@ class Agent: def read_prompt(self, file: str, **kwargs) -> str: dirs = subagents.get_paths(self, "prompts") + + # Plugin prompt paths + dirs.extend(plugins.get_plugin_paths("prompt")) + prompt = files.read_prompt_file(file, _directories=dirs, _agent=self, **kwargs) if files.is_full_json_template(prompt): prompt = files.remove_code_fences(prompt) @@ -987,6 +996,11 @@ class Agent: # search for tools in agent's folder hierarchy paths = subagents.get_paths(self, "tools", name + ".py", default_root="python") + + # Add plugin tool paths + plugin_paths = plugins.get_plugin_paths("tool", name + ".py") + paths.extend(plugin_paths) + for path in paths: try: classes = extract_tools.load_classes_from_file(path, Tool) # type: ignore[arg-type] diff --git a/plugins/README.md b/plugins/README.md new file mode 100644 index 000000000..73ffc6fd5 --- /dev/null +++ b/plugins/README.md @@ -0,0 +1,84 @@ +# Agent Zero Plugins + +This directory contains default plugins shipped with Agent Zero. + +## Plugin Architecture + +Agent Zero uses a unified capability-based plugin system where a single `plugin.json` manifest declares all capabilities via a `provides` dictionary. Each capability type maps to an integration point in the framework. + +## Plugin Structure + +Each plugin directory should contain: +- `plugin.json` - Manifest file with capability declarations +- Capability-specific files organized by type (helpers, tools, extensions, api, ui, etc.) +- Other assets (CSS, images, documentation) + +## Manifest Schema + +The `plugin.json` uses a `provides` dict where keys are integration point types: + +```json +{ + "id": "my-plugin", + "name": "My Plugin", + "version": "1.0.0", + "author": "Author Name", + "description": "Plugin description", + "tags": ["tag1", "tag2"], + "provides": { + "helper": [ + { "module": "helpers/my_helper.py", "description": "Helper module description" } + ], + "tool": [ + { "module": "tools/my_tool.py", "description": "Tool description" } + ], + "extension": [ + { "module": "extensions/hook_name/my_extension.py", "description": "Extension description" } + ], + "api": [ + { "module": "api/my_api.py", "description": "API endpoint description" } + ], + "ui": { + "component": "ui/component.html", + "module": "ui/main.js", + "description": "UI component description" + } + } +} +``` + +## Capability Types + +- **helper**: Python modules providing reusable functionality (imported via proxy or direct) +- **tool**: Agent tools extending agent capabilities +- **extension**: Extension hooks that run at specific lifecycle points +- **api**: Flask API endpoints (ApiHandler subclasses) +- **ui**: Frontend components loaded via `` tags +- **prompt**: Custom prompt templates +- **knowledge**: Knowledge base files and directories +- **instrument**: Custom instrumentation and monitoring + +## Using Plugins + +### UI Components +```html + +``` + +### Backend Capabilities +Backend capabilities (helpers, tools, extensions, APIs) are automatically discovered and integrated when the plugin is loaded. + +## User Plugins + +User-created plugins should be placed in one of the following directories: + +- `/usr/plugins/` (Global user plugins) +- `/usr/projects//.a0proj/plugins/` (Project-specific plugins) + +Note: `/usr/` refers to the Agent Zero user data directory in the application root, not the system `/usr` directory. + +User plugins with the same ID as repo plugins will completely override the repo version. + +## Documentation + +See [docs/extensibility.md](../docs/extensibility.md) for complete documentation on creating plugins. diff --git a/python/api/memory_dashboard.py b/plugins/memory/api/memory_dashboard.py similarity index 97% rename from python/api/memory_dashboard.py rename to plugins/memory/api/memory_dashboard.py index d1275fe41..774319a49 100644 --- a/python/api/memory_dashboard.py +++ b/plugins/memory/api/memory_dashboard.py @@ -1,10 +1,17 @@ from python.helpers.api import ApiHandler, Request, Response -from python.helpers.memory import Memory, get_existing_memory_subdirs, get_context_memory_subdir from python.helpers import files from models import ModelConfig, ModelType from langchain_core.documents import Document from agent import AgentContext +# Import Memory functions from plugin +import sys +from pathlib import Path +_plugin_root = Path(__file__).parent.parent +if str(_plugin_root) not in sys.path: + sys.path.insert(0, str(_plugin_root)) +from helpers.memory import Memory, get_existing_memory_subdirs, get_context_memory_subdir + class MemoryDashboard(ApiHandler): diff --git a/plugins/memory/extensions/message_loop_prompts_after/_50_recall_memories.py b/plugins/memory/extensions/message_loop_prompts_after/_50_recall_memories.py new file mode 100644 index 000000000..0f4eecd24 --- /dev/null +++ b/plugins/memory/extensions/message_loop_prompts_after/_50_recall_memories.py @@ -0,0 +1,222 @@ +import asyncio +from python.helpers.extension import Extension +from agent import LoopData +from python.helpers import dirty_json, errors, settings, log + +# Import Memory and DEFAULT_THRESHOLD from plugin +import sys +from pathlib import Path +_plugin_root = Path(__file__).parent.parent.parent +if str(_plugin_root) not in sys.path: + sys.path.insert(0, str(_plugin_root)) +from helpers.memory import Memory +from tools.memory_load import DEFAULT_THRESHOLD as DEFAULT_MEMORY_THRESHOLD + + +DATA_NAME_TASK = "_recall_memories_task" +DATA_NAME_ITER = "_recall_memories_iter" + + +class RecallMemories(Extension): + + # INTERVAL = 3 + # HISTORY = 10000 + # MEMORIES_MAX_SEARCH = 12 + # SOLUTIONS_MAX_SEARCH = 8 + # MEMORIES_MAX_RESULT = 5 + # SOLUTIONS_MAX_RESULT = 3 + # THRESHOLD = DEFAULT_MEMORY_THRESHOLD + + async def execute(self, loop_data: LoopData = LoopData(), **kwargs): + + set = settings.get_settings() + + # turned off in settings? + if not set["memory_recall_enabled"]: + return + + # every X iterations (or the first one) recall memories + if loop_data.iteration % set["memory_recall_interval"] == 0: + + # show util message right away + log_item = self.agent.context.log.log( + type="util", + heading="Searching memories...", + ) + + task = asyncio.create_task( + self.search_memories(loop_data=loop_data, log_item=log_item, **kwargs) + ) + else: + task = None + + # set to agent to be able to wait for it + self.agent.set_data(DATA_NAME_TASK, task) + self.agent.set_data(DATA_NAME_ITER, loop_data.iteration) + + async def search_memories(self, log_item: log.LogItem, loop_data: LoopData, **kwargs): + + # cleanup + extras = loop_data.extras_persistent + if "memories" in extras: + del extras["memories"] + if "solutions" in extras: + del extras["solutions"] + + + set = settings.get_settings() + # try: + + # get system message and chat history for util llm + system = self.agent.read_prompt("memory.memories_query.sys.md") + + # # log query streamed by LLM + # async def log_callback(content): + # log_item.stream(query=content) + + # call util llm to summarize conversation + user_instruction = ( + loop_data.user_message.output_text() if loop_data.user_message else "None" + ) + history = self.agent.history.output_text()[-set["memory_recall_history_len"]:] + message = self.agent.read_prompt( + "memory.memories_query.msg.md", history=history, message=user_instruction + ) + + # if query preparation by AI is enabled + if set["memory_recall_query_prep"]: + try: + # call util llm to generate search query from the conversation + query = await self.agent.call_utility_model( + system=system, + message=message, + # callback=log_callback, + ) + query = query.strip() + log_item.update(query=query) # no need for streaming here + except Exception as e: + err = errors.format_error(e) + self.agent.context.log.log( + type="warning", heading="Recall memories extension error:", content=err + ) + query = "" + + # no query, no search + if not query: + log_item.update( + heading="Failed to generate memory query", + ) + return + + # otherwise use the message and history as query + else: + query = user_instruction + "\n\n" + history + + # if there is no query (or just dash by the LLM), do not continue + if not query or len(query) <= 3: + log_item.update( + query="No relevant memory query generated, skipping search", + ) + return + + # get memory database + db = await Memory.get(self.agent) + + # search for general memories and fragments + memories = await db.search_similarity_threshold( + query=query, + limit=set["memory_recall_memories_max_search"], + threshold=set["memory_recall_similarity_threshold"], + filter=f"area == '{Memory.Area.MAIN.value}' or area == '{Memory.Area.FRAGMENTS.value}'", # exclude solutions + ) + + # search for solutions + solutions = await db.search_similarity_threshold( + query=query, + limit=set["memory_recall_solutions_max_search"], + threshold=set["memory_recall_similarity_threshold"], + filter=f"area == '{Memory.Area.SOLUTIONS.value}'", # exclude solutions + ) + + if not memories and not solutions: + log_item.update( + heading="No memories or solutions found", + ) + return + + # if post filtering is enabled + if set["memory_recall_post_filter"]: + # assemble an enumerated dict of memories and solutions for AI validation + mems_list = {i: memory.page_content for i, memory in enumerate(memories + solutions)} + + # call AI to validate the memories + try: + filter = await self.agent.call_utility_model( + system=self.agent.read_prompt("memory.memories_filter.sys.md"), + message=self.agent.read_prompt( + "memory.memories_filter.msg.md", + memories=mems_list, + history=history, + message=user_instruction, + ), + ) + filter_inds = dirty_json.try_parse(filter) + + # filter memories and solutions based on filter_inds + filtered_memories = [] + filtered_solutions = [] + mem_len = len(memories) + + # process each index in filter_inds + # make sure filter_inds is a list and contains valid integers + if isinstance(filter_inds, list): + for idx in filter_inds: + if isinstance(idx, int): + if idx < mem_len: + # this is a memory + filtered_memories.append(memories[idx]) + else: + # this is a solution, adjust index + sol_idx = idx - mem_len + if sol_idx < len(solutions): + filtered_solutions.append(solutions[sol_idx]) + + # replace original lists with filtered ones + memories = filtered_memories + solutions = filtered_solutions + + except Exception as e: + err = errors.format_error(e) + self.agent.context.log.log( + type="warning", heading="Failed to filter relevant memories", content=err + ) + filter_inds = [] + + + # limit the number of memories and solutions + memories = memories[: set["memory_recall_memories_max_result"]] + solutions = solutions[: set["memory_recall_solutions_max_result"]] + + # log the search result + log_item.update( + heading=f"{len(memories)} memories and {len(solutions)} relevant solutions found", + ) + + memories_txt = "\n\n".join([mem.page_content for mem in memories]) if memories else "" + solutions_txt = "\n\n".join([sol.page_content for sol in solutions]) if solutions else "" + + # log the full results + if memories_txt: + log_item.update(memories=memories_txt) + if solutions_txt: + log_item.update(solutions=solutions_txt) + + # place to prompt + if memories_txt: + extras["memories"] = self.agent.parse_prompt( + "agent.system.memories.md", memories=memories_txt + ) + if solutions_txt: + extras["solutions"] = self.agent.parse_prompt( + "agent.system.solutions.md", solutions=solutions_txt + ) diff --git a/plugins/memory/extensions/monologue_end/_50_memorize_fragments.py b/plugins/memory/extensions/monologue_end/_50_memorize_fragments.py new file mode 100644 index 000000000..8ff2e40ae --- /dev/null +++ b/plugins/memory/extensions/monologue_end/_50_memorize_fragments.py @@ -0,0 +1,205 @@ +import asyncio +from python.helpers import settings, errors +from python.helpers.extension import Extension +from python.helpers.dirty_json import DirtyJson +from agent import LoopData +from python.helpers.log import LogItem +from python.helpers.defer import DeferredTask, THREAD_BACKGROUND + +# Import Memory and DEFAULT_THRESHOLD from plugin +import sys +from pathlib import Path +_plugin_root = Path(__file__).parent.parent.parent +if str(_plugin_root) not in sys.path: + sys.path.insert(0, str(_plugin_root)) +from helpers.memory import Memory +from tools.memory_load import DEFAULT_THRESHOLD as DEFAULT_MEMORY_THRESHOLD + + +class MemorizeMemories(Extension): + + async def execute(self, loop_data: LoopData = LoopData(), **kwargs): + # try: + + set = settings.get_settings() + + if not set["memory_memorize_enabled"]: + return + + # show full util message + log_item = self.agent.context.log.log( + type="util", + heading="Memorizing new information...", + ) + + # memorize in background + task = DeferredTask(thread_name=THREAD_BACKGROUND) + task.start_task(self.memorize, loop_data, log_item) + # task = asyncio.create_task(self.memorize(loop_data, log_item)) + return task + + async def memorize(self, loop_data: LoopData, log_item: LogItem, **kwargs): + + try: + set = settings.get_settings() + + db = await Memory.get(self.agent) + + # get system message and chat history for util llm + system = self.agent.read_prompt("memory.memories_sum.sys.md") + msgs_text = self.agent.concat_messages(self.agent.history) + + # # log query streamed by LLM + # async def log_callback(content): + # log_item.stream(content=content) + + # call util llm to find info in history + memories_json = await self.agent.call_utility_model( + system=system, + message=msgs_text, + # callback=log_callback, + background=True, + ) + + # log data < no need for streaming utility messages + log_item.update(content=memories_json) + + # Add validation and error handling for memories_json + if not memories_json or not isinstance(memories_json, str): + log_item.update(heading="No response from utility model.") + return + + # Strip any whitespace that might cause issues + memories_json = memories_json.strip() + + if not memories_json: + log_item.update(heading="Empty response from utility model.") + return + + try: + memories = DirtyJson.parse_string(memories_json) + except Exception as e: + log_item.update(heading=f"Failed to parse memories response: {str(e)}") + return + + # Validate that memories is a list or convertible to one + if memories is None: + log_item.update(heading="No valid memories found in response.") + return + + # If memories is not a list, try to make it one + if not isinstance(memories, list): + if isinstance(memories, (str, dict)): + memories = [memories] + else: + log_item.update(heading="Invalid memories format received.") + return + + if not isinstance(memories, list) or len(memories) == 0: + log_item.update(heading="No useful information to memorize.") + return + else: + memories_txt = "\n\n".join([str(memory) for memory in memories]).strip() + log_item.update(heading=f"{len(memories)} entries to memorize.", memories=memories_txt) + + # Process memories with intelligent consolidation + total_processed = 0 + total_consolidated = 0 + rem = [] + + for memory in memories: + # Convert memory to plain text + txt = f"{memory}" + + if set["memory_memorize_consolidation"]: + + try: + # Use intelligent consolidation system + from helpers.memory_consolidation import create_memory_consolidator + consolidator = create_memory_consolidator( + self.agent, + similarity_threshold=DEFAULT_MEMORY_THRESHOLD, # More permissive for discovery + max_similar_memories=8, + max_llm_context_memories=4 + ) + + # Create memory item-specific log for detailed tracking + memory_log = None # too many utility messages, skip log for now + # memory_log = self.agent.context.log.log( + # type="util", + # heading=f"Processing memory fragment: {txt[:50]}...", + # update_progress="none" # Don't affect status bar + # ) + + # Process with intelligent consolidation + result_obj = await consolidator.process_new_memory( + new_memory=txt, + area=Memory.Area.FRAGMENTS.value, + metadata={"area": Memory.Area.FRAGMENTS.value}, + log_item=memory_log + ) + + # Update the individual log item with completion status but keep it temporary + if result_obj.get("success"): + total_consolidated += 1 + if memory_log: + memory_log.update( + result="Fragment processed successfully", + heading=f"Memory fragment completed: {txt[:50]}...", + update_progress="none" # Show briefly then disappear + ) + else: + if memory_log: + memory_log.update( + result="Fragment processing failed", + heading=f"Memory fragment failed: {txt[:50]}...", + update_progress="none" # Show briefly then disappear + ) + total_processed += 1 + + except Exception as e: + # Log error but continue processing + log_item.update(consolidation_error=str(e)) + total_processed += 1 + + # Update final results with structured logging + log_item.update( + heading=f"Memorization completed: {total_processed} memories processed, {total_consolidated} intelligently consolidated", + memories=memories_txt, + result=f"{total_processed} memories processed, {total_consolidated} intelligently consolidated", + memories_processed=total_processed, + memories_consolidated=total_consolidated, + update_progress="none" + ) + + else: + + # remove previous fragments too similiar to this one + if set["memory_memorize_replace_threshold"] > 0: + rem += await db.delete_documents_by_query( + query=txt, + threshold=set["memory_memorize_replace_threshold"], + filter=f"area=='{Memory.Area.FRAGMENTS.value}'", + ) + if rem: + rem_txt = "\n\n".join(Memory.format_docs_plain(rem)) + log_item.update(replaced=rem_txt) + + # insert new memory + await db.insert_text(text=txt, metadata={"area": Memory.Area.FRAGMENTS.value}) + + log_item.update( + result=f"{len(memories)} entries memorized.", + heading=f"{len(memories)} entries memorized.", + ) + if rem: + log_item.stream(result=f"\nReplaced {len(rem)} previous memories.") + + + + + except Exception as e: + err = errors.format_error(e) + self.agent.context.log.log( + type="warning", heading="Memorize memories extension error", content=err + ) diff --git a/plugins/memory/extensions/monologue_end/_51_memorize_solutions.py b/plugins/memory/extensions/monologue_end/_51_memorize_solutions.py new file mode 100644 index 000000000..da6b9a806 --- /dev/null +++ b/plugins/memory/extensions/monologue_end/_51_memorize_solutions.py @@ -0,0 +1,208 @@ +import asyncio +from python.helpers import settings, errors +from python.helpers.extension import Extension +from python.helpers.dirty_json import DirtyJson +from agent import LoopData +from python.helpers.log import LogItem +from python.helpers.defer import DeferredTask, THREAD_BACKGROUND + +# Import Memory and DEFAULT_THRESHOLD from plugin +import sys +from pathlib import Path +_plugin_root = Path(__file__).parent.parent.parent +if str(_plugin_root) not in sys.path: + sys.path.insert(0, str(_plugin_root)) +from helpers.memory import Memory +from tools.memory_load import DEFAULT_THRESHOLD as DEFAULT_MEMORY_THRESHOLD + +class MemorizeSolutions(Extension): + + async def execute(self, loop_data: LoopData = LoopData(), **kwargs): + # try: + + set = settings.get_settings() + + if not set["memory_memorize_enabled"]: + return + + # show full util message + log_item = self.agent.context.log.log( + type="util", + heading="Memorizing succesful solutions...", + ) + + # memorize in background + task = DeferredTask(thread_name=THREAD_BACKGROUND) + task.start_task(self.memorize, loop_data, log_item) + # task = asyncio.create_task(self.memorize(loop_data, log_item)) + return task + + async def memorize(self, loop_data: LoopData, log_item: LogItem, **kwargs): + try: + set = settings.get_settings() + + db = await Memory.get(self.agent) + + # get system message and chat history for util llm + system = self.agent.read_prompt("memory.solutions_sum.sys.md") + msgs_text = self.agent.concat_messages(self.agent.history) + + # log query streamed by LLM + # async def log_callback(content): + # log_item.stream(content=content) + + # call util llm to find solutions in history + solutions_json = await self.agent.call_utility_model( + system=system, + message=msgs_text, + # callback=log_callback, + background=True, + ) + + # log query < no need for streaming utility messages + log_item.update(content=solutions_json) + + + + # Add validation and error handling for solutions_json + if not solutions_json or not isinstance(solutions_json, str): + log_item.update(heading="No response from utility model.") + return + + # Strip any whitespace that might cause issues + solutions_json = solutions_json.strip() + + if not solutions_json: + log_item.update(heading="Empty response from utility model.") + return + + try: + solutions = DirtyJson.parse_string(solutions_json) + except Exception as e: + log_item.update(heading=f"Failed to parse solutions response: {str(e)}") + return + + # Validate that solutions is a list or convertible to one + if solutions is None: + log_item.update(heading="No valid solutions found in response.") + return + + # If solutions is not a list, try to make it one + if not isinstance(solutions, list): + if isinstance(solutions, (str, dict)): + solutions = [solutions] + else: + log_item.update(heading="Invalid solutions format received.") + return + + if not isinstance(solutions, list) or len(solutions) == 0: + log_item.update(heading="No successful solutions to memorize.") + return + else: + solutions_txt = "\n\n".join([str(solution) for solution in solutions]).strip() + log_item.update( + heading=f"{len(solutions)} successful solutions to memorize.", solutions=solutions_txt + ) + + # Process solutions with intelligent consolidation + total_processed = 0 + total_consolidated = 0 + rem = [] + + for solution in solutions: + # Convert solution to structured text + if isinstance(solution, dict): + problem = solution.get('problem', 'Unknown problem') + solution_text = solution.get('solution', 'Unknown solution') + txt = f"# Problem\n {problem}\n# Solution\n {solution_text}" + else: + # If solution is not a dict, convert it to string + txt = f"# Solution\n {str(solution)}" + + if set["memory_memorize_consolidation"]: + try: + # Use intelligent consolidation system + from helpers.memory_consolidation import create_memory_consolidator + consolidator = create_memory_consolidator( + self.agent, + similarity_threshold=DEFAULT_MEMORY_THRESHOLD, # More permissive for discovery + max_similar_memories=6, # Fewer for solutions (more complex) + max_llm_context_memories=3 + ) + + # Create solution-specific log for detailed tracking + solution_log = None # too many utility messages, skip log for now + # solution_log = self.agent.context.log.log( + # type="util", + # heading=f"Processing solution: {txt[:50]}...", + # update_progress="none" # Don't affect status bar + # ) + + # Process with intelligent consolidation + result_obj = await consolidator.process_new_memory( + new_memory=txt, + area=Memory.Area.SOLUTIONS.value, + metadata={"area": Memory.Area.SOLUTIONS.value}, + log_item=solution_log + ) + + # Update the individual log item with completion status but keep it temporary + if result_obj.get("success"): + total_consolidated += 1 + if solution_log: + solution_log.update( + result="Solution processed successfully", + heading=f"Solution completed: {txt[:50]}...", + update_progress="none" # Show briefly then disappear + ) + else: + if solution_log: + solution_log.update( + result="Solution processing failed", + heading=f"Solution failed: {txt[:50]}...", + update_progress="none" # Show briefly then disappear + ) + total_processed += 1 + + except Exception as e: + # Log error but continue processing + log_item.update(consolidation_error=str(e)) + total_processed += 1 + + # Update final results with structured logging + log_item.update( + heading=f"Solution memorization completed: {total_processed} solutions processed, {total_consolidated} intelligently consolidated", + solutions=solutions_txt, + result=f"{total_processed} solutions processed, {total_consolidated} intelligently consolidated", + solutions_processed=total_processed, + solutions_consolidated=total_consolidated, + update_progress="none" + ) + else: + # remove previous solutions too similiar to this one + if set["memory_memorize_replace_threshold"] > 0: + rem += await db.delete_documents_by_query( + query=txt, + threshold=set["memory_memorize_replace_threshold"], + filter=f"area=='{Memory.Area.SOLUTIONS.value}'", + ) + if rem: + rem_txt = "\n\n".join(Memory.format_docs_plain(rem)) + log_item.update(replaced=rem_txt) + + # insert new solution + await db.insert_text(text=txt, metadata={"area": Memory.Area.SOLUTIONS.value}) + + log_item.update( + result=f"{len(solutions)} solutions memorized.", + heading=f"{len(solutions)} solutions memorized.", + ) + if rem: + log_item.stream(result=f"\nReplaced {len(rem)} previous solutions.") + + + except Exception as e: + err = errors.format_error(e) + self.agent.context.log.log( + type="warning", heading="Memorize solutions extension error", content=err + ) diff --git a/plugins/memory/extensions/monologue_start/_10_memory_init.py b/plugins/memory/extensions/monologue_start/_10_memory_init.py new file mode 100644 index 000000000..ab9d1990e --- /dev/null +++ b/plugins/memory/extensions/monologue_start/_10_memory_init.py @@ -0,0 +1,20 @@ +from python.helpers.extension import Extension +from agent import LoopData +import asyncio + +# Import memory from plugin +import sys +from pathlib import Path +_plugin_root = Path(__file__).parent.parent.parent +if str(_plugin_root) not in sys.path: + sys.path.insert(0, str(_plugin_root)) +from helpers import memory + + +class MemoryInit(Extension): + + async def execute(self, loop_data: LoopData = LoopData(), **kwargs): + db = await memory.Memory.get(self.agent) + + + \ No newline at end of file diff --git a/python/helpers/knowledge_import.py b/plugins/memory/helpers/knowledge_import.py similarity index 100% rename from python/helpers/knowledge_import.py rename to plugins/memory/helpers/knowledge_import.py diff --git a/python/helpers/memory.py b/plugins/memory/helpers/memory.py similarity index 99% rename from python/helpers/memory.py rename to plugins/memory/helpers/memory.py index 061e93699..bad55011b 100644 --- a/python/helpers/memory.py +++ b/plugins/memory/helpers/memory.py @@ -23,9 +23,9 @@ import os, json import numpy as np from python.helpers.print_style import PrintStyle -from . import files +from python.helpers import files from langchain_core.documents import Document -from python.helpers import knowledge_import +from . import knowledge_import from python.helpers.log import Log, LogItem from enum import Enum from agent import Agent, AgentContext diff --git a/python/helpers/memory_consolidation.py b/plugins/memory/helpers/memory_consolidation.py similarity index 99% rename from python/helpers/memory_consolidation.py rename to plugins/memory/helpers/memory_consolidation.py index 86a5cf598..9c1fe04b3 100644 --- a/python/helpers/memory_consolidation.py +++ b/plugins/memory/helpers/memory_consolidation.py @@ -7,13 +7,20 @@ from enum import Enum from langchain_core.documents import Document -from python.helpers.memory import Memory +from .memory import Memory from python.helpers.dirty_json import DirtyJson from python.helpers.log import LogItem from python.helpers.print_style import PrintStyle -from python.tools.memory_load import DEFAULT_THRESHOLD as DEFAULT_MEMORY_THRESHOLD from agent import Agent +# Import from tools within plugin +import sys +from pathlib import Path +_plugin_root = Path(__file__).parent.parent +if str(_plugin_root) not in sys.path: + sys.path.insert(0, str(_plugin_root)) +from tools.memory_load import DEFAULT_THRESHOLD as DEFAULT_MEMORY_THRESHOLD + class ConsolidationAction(Enum): """Actions that can be taken during memory consolidation.""" diff --git a/plugins/memory/plugin.json b/plugins/memory/plugin.json new file mode 100644 index 000000000..d4ef0c364 --- /dev/null +++ b/plugins/memory/plugin.json @@ -0,0 +1,151 @@ +{ + "id": "memory", + "name": "Memory System", + "version": "1.0.0", + "author": "Agent Zero Team", + "description": "FAISS-based vector memory with embeddings caching and knowledge preloading", + "tags": ["memory", "vector", "faiss", "core"], + "provides": { + "helper": [ + { + "module": "helpers/memory.py", + "description": "Core Memory class with FAISS vector store" + }, + { + "module": "helpers/memory_consolidation.py", + "description": "Intelligent memory deduplication" + }, + { + "module": "helpers/knowledge_import.py", + "description": "Knowledge file ingestion pipeline" + } + ], + "tool": [ + { + "module": "tools/memory_save.py", + "description": "Save memories" + }, + { + "module": "tools/memory_load.py", + "description": "Load/search memories" + }, + { + "module": "tools/memory_delete.py", + "description": "Delete specific memories" + }, + { + "module": "tools/memory_forget.py", + "description": "Forget memories by query" + } + ], + "extension": [ + { + "module": "extensions/monologue_start/_10_memory_init.py", + "description": "Initialize memory on monologue start" + }, + { + "module": "extensions/monologue_end/_50_memorize_fragments.py", + "description": "Auto-memorize conversation fragments" + }, + { + "module": "extensions/monologue_end/_51_memorize_solutions.py", + "description": "Auto-memorize solutions" + }, + { + "module": "extensions/message_loop_prompts_after/_50_recall_memories.py", + "description": "Recall relevant memories into prompt" + } + ], + "api": [ + { + "module": "api/memory_dashboard.py", + "description": "Memory browser and management dashboard" + } + ], + "ui": [ + { + "component": "ui/memory-dashboard.html", + "module": "ui/memory-dashboard-store.js", + "description": "Memory Dashboard UI" + }, + { + "component": "ui/memory-detail-modal.html", + "description": "Memory Detail Modal" + } + ], + "prompt": [ + { + "module": "prompts/agent.system.memories.md", + "description": "Agent system prompt for memories" + }, + { + "module": "prompts/agent.system.solutions.md", + "description": "Agent system prompt for solutions" + }, + { + "module": "prompts/agent.system.tool.memory.md", + "description": "Memory tool system prompt" + }, + { + "module": "prompts/fw.memory.hist_suc.sys.md", + "description": "Framework memory history success prompt" + }, + { + "module": "prompts/fw.memory.hist_sum.sys.md", + "description": "Framework memory history summary prompt" + }, + { + "module": "prompts/fw.memory_saved.md", + "description": "Framework memory saved prompt" + }, + { + "module": "prompts/memory.consolidation.msg.md", + "description": "Memory consolidation message" + }, + { + "module": "prompts/memory.consolidation.sys.md", + "description": "Memory consolidation system prompt" + }, + { + "module": "prompts/memory.keyword_extraction.msg.md", + "description": "Keyword extraction message" + }, + { + "module": "prompts/memory.keyword_extraction.sys.md", + "description": "Keyword extraction system prompt" + }, + { + "module": "prompts/memory.memories_filter.msg.md", + "description": "Memories filter message" + }, + { + "module": "prompts/memory.memories_filter.sys.md", + "description": "Memories filter system prompt" + }, + { + "module": "prompts/memory.memories_query.msg.md", + "description": "Memories query message" + }, + { + "module": "prompts/memory.memories_query.sys.md", + "description": "Memory query system prompt" + }, + { + "module": "prompts/memory.memories_sum.sys.md", + "description": "Memories summary system prompt" + }, + { + "module": "prompts/memory.recall_delay_msg.md", + "description": "Recall delay message" + }, + { + "module": "prompts/memory.solutions_query.sys.md", + "description": "Solutions query system prompt" + }, + { + "module": "prompts/memory.solutions_sum.sys.md", + "description": "Solutions summary system prompt" + } + ] + } +} diff --git a/prompts/agent.system.memories.md b/plugins/memory/prompts/agent.system.memories.md similarity index 100% rename from prompts/agent.system.memories.md rename to plugins/memory/prompts/agent.system.memories.md diff --git a/prompts/agent.system.solutions.md b/plugins/memory/prompts/agent.system.solutions.md similarity index 100% rename from prompts/agent.system.solutions.md rename to plugins/memory/prompts/agent.system.solutions.md diff --git a/prompts/agent.system.tool.memory.md b/plugins/memory/prompts/agent.system.tool.memory.md similarity index 100% rename from prompts/agent.system.tool.memory.md rename to plugins/memory/prompts/agent.system.tool.memory.md diff --git a/prompts/fw.memory.hist_suc.sys.md b/plugins/memory/prompts/fw.memory.hist_suc.sys.md similarity index 100% rename from prompts/fw.memory.hist_suc.sys.md rename to plugins/memory/prompts/fw.memory.hist_suc.sys.md diff --git a/prompts/fw.memory.hist_sum.sys.md b/plugins/memory/prompts/fw.memory.hist_sum.sys.md similarity index 100% rename from prompts/fw.memory.hist_sum.sys.md rename to plugins/memory/prompts/fw.memory.hist_sum.sys.md diff --git a/prompts/fw.memory_saved.md b/plugins/memory/prompts/fw.memory_saved.md similarity index 100% rename from prompts/fw.memory_saved.md rename to plugins/memory/prompts/fw.memory_saved.md diff --git a/prompts/memory.consolidation.msg.md b/plugins/memory/prompts/memory.consolidation.msg.md similarity index 100% rename from prompts/memory.consolidation.msg.md rename to plugins/memory/prompts/memory.consolidation.msg.md diff --git a/prompts/memory.consolidation.sys.md b/plugins/memory/prompts/memory.consolidation.sys.md similarity index 100% rename from prompts/memory.consolidation.sys.md rename to plugins/memory/prompts/memory.consolidation.sys.md diff --git a/prompts/memory.keyword_extraction.msg.md b/plugins/memory/prompts/memory.keyword_extraction.msg.md similarity index 100% rename from prompts/memory.keyword_extraction.msg.md rename to plugins/memory/prompts/memory.keyword_extraction.msg.md diff --git a/prompts/memory.keyword_extraction.sys.md b/plugins/memory/prompts/memory.keyword_extraction.sys.md similarity index 100% rename from prompts/memory.keyword_extraction.sys.md rename to plugins/memory/prompts/memory.keyword_extraction.sys.md diff --git a/prompts/memory.memories_filter.msg.md b/plugins/memory/prompts/memory.memories_filter.msg.md similarity index 100% rename from prompts/memory.memories_filter.msg.md rename to plugins/memory/prompts/memory.memories_filter.msg.md diff --git a/prompts/memory.memories_filter.sys.md b/plugins/memory/prompts/memory.memories_filter.sys.md similarity index 100% rename from prompts/memory.memories_filter.sys.md rename to plugins/memory/prompts/memory.memories_filter.sys.md diff --git a/prompts/memory.memories_query.msg.md b/plugins/memory/prompts/memory.memories_query.msg.md similarity index 100% rename from prompts/memory.memories_query.msg.md rename to plugins/memory/prompts/memory.memories_query.msg.md diff --git a/prompts/memory.memories_query.sys.md b/plugins/memory/prompts/memory.memories_query.sys.md similarity index 100% rename from prompts/memory.memories_query.sys.md rename to plugins/memory/prompts/memory.memories_query.sys.md diff --git a/prompts/memory.memories_sum.sys.md b/plugins/memory/prompts/memory.memories_sum.sys.md similarity index 100% rename from prompts/memory.memories_sum.sys.md rename to plugins/memory/prompts/memory.memories_sum.sys.md diff --git a/prompts/memory.recall_delay_msg.md b/plugins/memory/prompts/memory.recall_delay_msg.md similarity index 100% rename from prompts/memory.recall_delay_msg.md rename to plugins/memory/prompts/memory.recall_delay_msg.md diff --git a/prompts/memory.solutions_query.sys.md b/plugins/memory/prompts/memory.solutions_query.sys.md similarity index 100% rename from prompts/memory.solutions_query.sys.md rename to plugins/memory/prompts/memory.solutions_query.sys.md diff --git a/prompts/memory.solutions_sum.sys.md b/plugins/memory/prompts/memory.solutions_sum.sys.md similarity index 100% rename from prompts/memory.solutions_sum.sys.md rename to plugins/memory/prompts/memory.solutions_sum.sys.md diff --git a/python/tools/memory_delete.py b/plugins/memory/tools/memory_delete.py similarity index 66% rename from python/tools/memory_delete.py rename to plugins/memory/tools/memory_delete.py index 7b13935a7..5ecf8a3f6 100644 --- a/python/tools/memory_delete.py +++ b/plugins/memory/tools/memory_delete.py @@ -1,6 +1,13 @@ -from python.helpers.memory import Memory from python.helpers.tool import Tool, Response +# Import Memory from plugin +import sys +from pathlib import Path +_plugin_root = Path(__file__).parent.parent +if str(_plugin_root) not in sys.path: + sys.path.insert(0, str(_plugin_root)) +from helpers.memory import Memory + class MemoryDelete(Tool): diff --git a/python/tools/memory_forget.py b/plugins/memory/tools/memory_forget.py similarity index 60% rename from python/tools/memory_forget.py rename to plugins/memory/tools/memory_forget.py index 48c012d9d..51a7f2c40 100644 --- a/python/tools/memory_forget.py +++ b/plugins/memory/tools/memory_forget.py @@ -1,6 +1,13 @@ -from python.helpers.memory import Memory from python.helpers.tool import Tool, Response -from python.tools.memory_load import DEFAULT_THRESHOLD + +# Import Memory and DEFAULT_THRESHOLD from plugin +import sys +from pathlib import Path +_plugin_root = Path(__file__).parent.parent +if str(_plugin_root) not in sys.path: + sys.path.insert(0, str(_plugin_root)) +from helpers.memory import Memory +from tools.memory_load import DEFAULT_THRESHOLD class MemoryForget(Tool): diff --git a/python/tools/memory_load.py b/plugins/memory/tools/memory_load.py similarity index 74% rename from python/tools/memory_load.py rename to plugins/memory/tools/memory_load.py index 5fc7d6126..323891c4e 100644 --- a/python/tools/memory_load.py +++ b/plugins/memory/tools/memory_load.py @@ -1,6 +1,13 @@ -from python.helpers.memory import Memory from python.helpers.tool import Tool, Response +# Import Memory from plugin +import sys +from pathlib import Path +_plugin_root = Path(__file__).parent.parent +if str(_plugin_root) not in sys.path: + sys.path.insert(0, str(_plugin_root)) +from helpers.memory import Memory + DEFAULT_THRESHOLD = 0.7 DEFAULT_LIMIT = 10 diff --git a/python/tools/memory_save.py b/plugins/memory/tools/memory_save.py similarity index 67% rename from python/tools/memory_save.py rename to plugins/memory/tools/memory_save.py index 31f00ca29..c5b13166f 100644 --- a/python/tools/memory_save.py +++ b/plugins/memory/tools/memory_save.py @@ -1,6 +1,13 @@ -from python.helpers.memory import Memory from python.helpers.tool import Tool, Response +# Import Memory from plugin +import sys +from pathlib import Path +_plugin_root = Path(__file__).parent.parent +if str(_plugin_root) not in sys.path: + sys.path.insert(0, str(_plugin_root)) +from helpers.memory import Memory + class MemorySave(Tool): diff --git a/webui/components/modals/memory/memory-dashboard-store.js b/plugins/memory/ui/memory-dashboard-store.js similarity index 99% rename from webui/components/modals/memory/memory-dashboard-store.js rename to plugins/memory/ui/memory-dashboard-store.js index 4f37e2520..c5f8eca97 100644 --- a/webui/components/modals/memory/memory-dashboard-store.js +++ b/plugins/memory/ui/memory-dashboard-store.js @@ -53,7 +53,7 @@ const memoryDashboardStore = { pollingEnabled: false, async openModal() { - await openModal("modals/memory/memory-dashboard.html"); + await openModal("../plugins/memory/ui/memory-dashboard.html"); }, init() { @@ -448,7 +448,7 @@ ${memory.content_full} this.editMode = false; this.editMemoryBackup = null; // Use global modal system - openModal("modals/memory/memory-detail-modal.html"); + openModal("../plugins/memory/ui/memory-detail-modal.html"); }, closeMemoryDetails() { diff --git a/webui/components/modals/memory/memory-dashboard.html b/plugins/memory/ui/memory-dashboard.html similarity index 99% rename from webui/components/modals/memory/memory-dashboard.html rename to plugins/memory/ui/memory-dashboard.html index f2c82dae9..dda198f93 100644 --- a/webui/components/modals/memory/memory-dashboard.html +++ b/plugins/memory/ui/memory-dashboard.html @@ -3,7 +3,7 @@ Memory Dashboard diff --git a/webui/components/modals/memory/memory-detail-modal.html b/plugins/memory/ui/memory-detail-modal.html similarity index 100% rename from webui/components/modals/memory/memory-detail-modal.html rename to plugins/memory/ui/memory-detail-modal.html diff --git a/python/api/import_knowledge.py b/python/api/import_knowledge.py index 97ba15e47..39b1d1842 100644 --- a/python/api/import_knowledge.py +++ b/python/api/import_knowledge.py @@ -1,5 +1,7 @@ from python.helpers.api import ApiHandler, Request, Response -from python.helpers import files, memory +from python.helpers import files +from python.helpers.plugins import import_plugin_module +memory = import_plugin_module("memory", "helpers/memory.py") import os from python.helpers.security import safe_filename diff --git a/python/api/knowledge_path_get.py b/python/api/knowledge_path_get.py index dadf0a692..29033e198 100644 --- a/python/api/knowledge_path_get.py +++ b/python/api/knowledge_path_get.py @@ -1,5 +1,7 @@ from python.helpers.api import ApiHandler, Request, Response -from python.helpers import files, memory, notification, projects, notification +from python.helpers import files, notification, projects, notification +from python.helpers.plugins import import_plugin_module +memory = import_plugin_module("memory", "helpers/memory.py") import os from werkzeug.utils import secure_filename diff --git a/python/api/knowledge_reindex.py b/python/api/knowledge_reindex.py index d854c9199..75cb41969 100644 --- a/python/api/knowledge_reindex.py +++ b/python/api/knowledge_reindex.py @@ -1,5 +1,7 @@ from python.helpers.api import ApiHandler, Request, Response -from python.helpers import files, memory, notification, projects, notification +from python.helpers import files, notification, projects, notification +from python.helpers.plugins import import_plugin_module +memory = import_plugin_module("memory", "helpers/memory.py") import os diff --git a/python/api/plugins_list.py b/python/api/plugins_list.py new file mode 100644 index 000000000..f0ffab86e --- /dev/null +++ b/python/api/plugins_list.py @@ -0,0 +1,22 @@ +from python.helpers.api import ApiHandler, Request, Response +from python.helpers import plugins + + +class PluginsList(ApiHandler): + """ + API handler for listing all available plugins. + Returns a list of plugin manifests. + """ + + @classmethod + def get_methods(cls): + return ["GET", "POST"] + + async def process(self, input: dict, request: Request) -> dict | Response: + # Get all available plugins + plugin_list = plugins.list_plugins() + + # Serialize plugin objects using helper + data = [plugins.build_plugin_response_data(p) for p in plugin_list] + + return {"ok": True, "data": data} diff --git a/python/api/plugins_resolve.py b/python/api/plugins_resolve.py new file mode 100644 index 000000000..28a8ee2d5 --- /dev/null +++ b/python/api/plugins_resolve.py @@ -0,0 +1,50 @@ +from python.helpers.api import ApiHandler, Request, Response +from python.helpers import plugins + + +class PluginsResolve(ApiHandler): + """ + API handler for resolving plugin manifests. + Accepts a plugin ID or list of IDs and returns normalized manifest(s) with URLs and props. + + Single ID: {"id": "example"} -> {"ok": True, "data": {...}} + Multiple IDs: {"ids": ["example", "another"]} -> {"ok": True, "data": [{...}, {...}]} + """ + + @classmethod + def get_methods(cls): + return ["POST"] + + async def process(self, input: dict, request: Request) -> dict | Response: + # Support both single ID and array of IDs + plugin_id = input.get("id") + plugin_ids = input.get("ids") + + # Batch mode: array of IDs + if plugin_ids: + if not isinstance(plugin_ids, list): + return {"ok": False, "error": "ids must be an array"} + + results = [] + for pid in plugin_ids: + plugin = plugins.find_plugin(pid) + if plugin: + results.append(plugins.build_plugin_response_data(plugin)) + else: + results.append({ + "id": pid, + "error": f"Plugin '{pid}' not found or invalid manifest" + }) + + return {"ok": True, "data": results} + + # Single mode: one ID + elif plugin_id: + plugin = plugins.find_plugin(plugin_id) + if not plugin: + return {"ok": False, "error": f"Plugin '{plugin_id}' not found or invalid manifest"} + + return {"ok": True, "data": plugins.build_plugin_response_data(plugin)} + + else: + return {"ok": False, "error": "Missing plugin ID or IDs"} diff --git a/python/extensions/message_loop_prompts_after/_50_recall_memories.py b/python/extensions/message_loop_prompts_after/_50_recall_memories.py index e7fd509f8..f4bb5d542 100644 --- a/python/extensions/message_loop_prompts_after/_50_recall_memories.py +++ b/python/extensions/message_loop_prompts_after/_50_recall_memories.py @@ -1,219 +1,8 @@ -import asyncio -from python.helpers.extension import Extension -from python.helpers.memory import Memory -from agent import LoopData -from python.tools.memory_load import DEFAULT_THRESHOLD as DEFAULT_MEMORY_THRESHOLD -from python.helpers import dirty_json, errors, settings, log +"""Recall memories extension - implementation provided by the memory plugin.""" +from python.helpers.plugins import import_plugin_module +# Import the actual implementation from the plugin +_mod = import_plugin_module("memory", "extensions/message_loop_prompts_after/_50_recall_memories.py") -DATA_NAME_TASK = "_recall_memories_task" -DATA_NAME_ITER = "_recall_memories_iter" -SEARCH_TIMEOUT = 30 - - -class RecallMemories(Extension): - - # INTERVAL = 3 - # HISTORY = 10000 - # MEMORIES_MAX_SEARCH = 12 - # SOLUTIONS_MAX_SEARCH = 8 - # MEMORIES_MAX_RESULT = 5 - # SOLUTIONS_MAX_RESULT = 3 - # THRESHOLD = DEFAULT_MEMORY_THRESHOLD - - async def execute(self, loop_data: LoopData = LoopData(), **kwargs): - - set = settings.get_settings() - - # turned off in settings? - if not set["memory_recall_enabled"]: - return - - # every X iterations (or the first one) recall memories - if loop_data.iteration % set["memory_recall_interval"] == 0: - - # show util message right away - log_item = self.agent.context.log.log( - type="util", - heading="Searching memories...", - ) - - task = asyncio.create_task( - asyncio.wait_for( - self.search_memories(loop_data=loop_data, log_item=log_item, **kwargs), - timeout=SEARCH_TIMEOUT, - ) - ) - else: - task = None - - # set to agent to be able to wait for it - self.agent.set_data(DATA_NAME_TASK, task) - self.agent.set_data(DATA_NAME_ITER, loop_data.iteration) - - async def search_memories(self, log_item: log.LogItem, loop_data: LoopData, **kwargs): - - # cleanup - extras = loop_data.extras_persistent - if "memories" in extras: - del extras["memories"] - if "solutions" in extras: - del extras["solutions"] - - - set = settings.get_settings() - # try: - - # get system message and chat history for util llm - system = self.agent.read_prompt("memory.memories_query.sys.md") - - # # log query streamed by LLM - # async def log_callback(content): - # log_item.stream(query=content) - - # call util llm to summarize conversation - user_instruction = ( - loop_data.user_message.output_text() if loop_data.user_message else "None" - ) - history = self.agent.history.output_text()[-set["memory_recall_history_len"]:] - message = self.agent.read_prompt( - "memory.memories_query.msg.md", history=history, message=user_instruction - ) - - # if query preparation by AI is enabled - if set["memory_recall_query_prep"]: - try: - # call util llm to generate search query from the conversation - query = await self.agent.call_utility_model( - system=system, - message=message, - # callback=log_callback, - ) - query = query.strip() - log_item.update(query=query) # no need for streaming here - except Exception as e: - err = errors.format_error(e) - self.agent.context.log.log( - type="warning", heading="Recall memories extension error:", content=err - ) - query = "" - - # no query, no search - if not query: - log_item.update( - heading="Failed to generate memory query", - ) - return - - # otherwise use the message and history as query - else: - query = user_instruction + "\n\n" + history - - # if there is no query (or just dash by the LLM), do not continue - if not query or len(query) <= 3: - log_item.update( - query="No relevant memory query generated, skipping search", - ) - return - - # get memory database - db = await Memory.get(self.agent) - - # search for general memories and fragments - memories = await db.search_similarity_threshold( - query=query, - limit=set["memory_recall_memories_max_search"], - threshold=set["memory_recall_similarity_threshold"], - filter=f"area == '{Memory.Area.MAIN.value}' or area == '{Memory.Area.FRAGMENTS.value}'", # exclude solutions - ) - - # search for solutions - solutions = await db.search_similarity_threshold( - query=query, - limit=set["memory_recall_solutions_max_search"], - threshold=set["memory_recall_similarity_threshold"], - filter=f"area == '{Memory.Area.SOLUTIONS.value}'", # exclude solutions - ) - - if not memories and not solutions: - log_item.update( - heading="No memories or solutions found", - ) - return - - # if post filtering is enabled - if set["memory_recall_post_filter"]: - # assemble an enumerated dict of memories and solutions for AI validation - mems_list = {i: memory.page_content for i, memory in enumerate(memories + solutions)} - - # call AI to validate the memories - try: - filter = await self.agent.call_utility_model( - system=self.agent.read_prompt("memory.memories_filter.sys.md"), - message=self.agent.read_prompt( - "memory.memories_filter.msg.md", - memories=mems_list, - history=history, - message=user_instruction, - ), - ) - filter_inds = dirty_json.try_parse(filter) - - # filter memories and solutions based on filter_inds - filtered_memories = [] - filtered_solutions = [] - mem_len = len(memories) - - # process each index in filter_inds - # make sure filter_inds is a list and contains valid integers - if isinstance(filter_inds, list): - for idx in filter_inds: - if isinstance(idx, int): - if idx < mem_len: - # this is a memory - filtered_memories.append(memories[idx]) - else: - # this is a solution, adjust index - sol_idx = idx - mem_len - if sol_idx < len(solutions): - filtered_solutions.append(solutions[sol_idx]) - - # replace original lists with filtered ones - memories = filtered_memories - solutions = filtered_solutions - - except Exception as e: - err = errors.format_error(e) - self.agent.context.log.log( - type="warning", heading="Failed to filter relevant memories", content=err - ) - filter_inds = [] - - - # limit the number of memories and solutions - memories = memories[: set["memory_recall_memories_max_result"]] - solutions = solutions[: set["memory_recall_solutions_max_result"]] - - # log the search result - log_item.update( - heading=f"{len(memories)} memories and {len(solutions)} relevant solutions found", - ) - - memories_txt = "\n\n".join([mem.page_content for mem in memories]) if memories else "" - solutions_txt = "\n\n".join([sol.page_content for sol in solutions]) if solutions else "" - - # log the full results - if memories_txt: - log_item.update(memories=memories_txt) - if solutions_txt: - log_item.update(solutions=solutions_txt) - - # place to prompt - if memories_txt: - extras["memories"] = self.agent.parse_prompt( - "agent.system.memories.md", memories=memories_txt - ) - if solutions_txt: - extras["solutions"] = self.agent.parse_prompt( - "agent.system.solutions.md", solutions=solutions_txt - ) +# Re-export all public names +globals().update({k: v for k, v in vars(_mod).items() if not k.startswith('_')}) diff --git a/python/extensions/monologue_end/_50_memorize_fragments.py b/python/extensions/monologue_end/_50_memorize_fragments.py index c15aaa90b..f7b88d9f9 100644 --- a/python/extensions/monologue_end/_50_memorize_fragments.py +++ b/python/extensions/monologue_end/_50_memorize_fragments.py @@ -1,198 +1,8 @@ -import asyncio -from python.helpers import settings, errors -from python.helpers.extension import Extension -from python.helpers.memory import Memory -from python.helpers.dirty_json import DirtyJson -from agent import LoopData -from python.helpers.log import LogItem -from python.tools.memory_load import DEFAULT_THRESHOLD as DEFAULT_MEMORY_THRESHOLD -from python.helpers.defer import DeferredTask, THREAD_BACKGROUND +"""Memorize fragments extension - implementation provided by the memory plugin.""" +from python.helpers.plugins import import_plugin_module +# Import the actual implementation from the plugin +_mod = import_plugin_module("memory", "extensions/monologue_end/_50_memorize_fragments.py") -class MemorizeMemories(Extension): - - async def execute(self, loop_data: LoopData = LoopData(), **kwargs): - # try: - - set = settings.get_settings() - - if not set["memory_memorize_enabled"]: - return - - # show full util message - log_item = self.agent.context.log.log( - type="util", - heading="Memorizing new information...", - ) - - # memorize in background - task = DeferredTask(thread_name=THREAD_BACKGROUND) - task.start_task(self.memorize, loop_data, log_item) - # task = asyncio.create_task(self.memorize(loop_data, log_item)) - return task - - async def memorize(self, loop_data: LoopData, log_item: LogItem, **kwargs): - - try: - set = settings.get_settings() - - db = await Memory.get(self.agent) - - # get system message and chat history for util llm - system = self.agent.read_prompt("memory.memories_sum.sys.md") - msgs_text = self.agent.concat_messages(self.agent.history) - - # # log query streamed by LLM - # async def log_callback(content): - # log_item.stream(content=content) - - # call util llm to find info in history - memories_json = await self.agent.call_utility_model( - system=system, - message=msgs_text, - # callback=log_callback, - background=True, - ) - - # log data < no need for streaming utility messages - log_item.update(content=memories_json) - - # Add validation and error handling for memories_json - if not memories_json or not isinstance(memories_json, str): - log_item.update(heading="No response from utility model.") - return - - # Strip any whitespace that might cause issues - memories_json = memories_json.strip() - - if not memories_json: - log_item.update(heading="Empty response from utility model.") - return - - try: - memories = DirtyJson.parse_string(memories_json) - except Exception as e: - log_item.update(heading=f"Failed to parse memories response: {str(e)}") - return - - # Validate that memories is a list or convertible to one - if memories is None: - log_item.update(heading="No valid memories found in response.") - return - - # If memories is not a list, try to make it one - if not isinstance(memories, list): - if isinstance(memories, (str, dict)): - memories = [memories] - else: - log_item.update(heading="Invalid memories format received.") - return - - if not isinstance(memories, list) or len(memories) == 0: - log_item.update(heading="No useful information to memorize.") - return - else: - memories_txt = "\n\n".join([str(memory) for memory in memories]).strip() - log_item.update(heading=f"{len(memories)} entries to memorize.", memories=memories_txt) - - # Process memories with intelligent consolidation - total_processed = 0 - total_consolidated = 0 - rem = [] - - for memory in memories: - # Convert memory to plain text - txt = f"{memory}" - - if set["memory_memorize_consolidation"]: - - try: - # Use intelligent consolidation system - from python.helpers.memory_consolidation import create_memory_consolidator - consolidator = create_memory_consolidator( - self.agent, - similarity_threshold=DEFAULT_MEMORY_THRESHOLD, # More permissive for discovery - max_similar_memories=8, - max_llm_context_memories=4 - ) - - # Create memory item-specific log for detailed tracking - memory_log = None # too many utility messages, skip log for now - # memory_log = self.agent.context.log.log( - # type="util", - # heading=f"Processing memory fragment: {txt[:50]}...", - # update_progress="none" # Don't affect status bar - # ) - - # Process with intelligent consolidation - result_obj = await consolidator.process_new_memory( - new_memory=txt, - area=Memory.Area.FRAGMENTS.value, - metadata={"area": Memory.Area.FRAGMENTS.value}, - log_item=memory_log - ) - - # Update the individual log item with completion status but keep it temporary - if result_obj.get("success"): - total_consolidated += 1 - if memory_log: - memory_log.update( - result="Fragment processed successfully", - heading=f"Memory fragment completed: {txt[:50]}...", - update_progress="none" # Show briefly then disappear - ) - else: - if memory_log: - memory_log.update( - result="Fragment processing failed", - heading=f"Memory fragment failed: {txt[:50]}...", - update_progress="none" # Show briefly then disappear - ) - total_processed += 1 - - except Exception as e: - # Log error but continue processing - log_item.update(consolidation_error=str(e)) - total_processed += 1 - - # Update final results with structured logging - log_item.update( - heading=f"Memorization completed: {total_processed} memories processed, {total_consolidated} intelligently consolidated", - memories=memories_txt, - result=f"{total_processed} memories processed, {total_consolidated} intelligently consolidated", - memories_processed=total_processed, - memories_consolidated=total_consolidated, - update_progress="none" - ) - - else: - - # remove previous fragments too similiar to this one - if set["memory_memorize_replace_threshold"] > 0: - rem += await db.delete_documents_by_query( - query=txt, - threshold=set["memory_memorize_replace_threshold"], - filter=f"area=='{Memory.Area.FRAGMENTS.value}'", - ) - if rem: - rem_txt = "\n\n".join(Memory.format_docs_plain(rem)) - log_item.update(replaced=rem_txt) - - # insert new memory - await db.insert_text(text=txt, metadata={"area": Memory.Area.FRAGMENTS.value}) - - log_item.update( - result=f"{len(memories)} entries memorized.", - heading=f"{len(memories)} entries memorized.", - ) - if rem: - log_item.stream(result=f"\nReplaced {len(rem)} previous memories.") - - - - - except Exception as e: - err = errors.format_error(e) - self.agent.context.log.log( - type="warning", heading="Memorize memories extension error", content=err - ) +# Re-export all public names +globals().update({k: v for k, v in vars(_mod).items() if not k.startswith('_')}) diff --git a/python/extensions/monologue_end/_51_memorize_solutions.py b/python/extensions/monologue_end/_51_memorize_solutions.py index cc30fc588..276a4aca5 100644 --- a/python/extensions/monologue_end/_51_memorize_solutions.py +++ b/python/extensions/monologue_end/_51_memorize_solutions.py @@ -1,201 +1,8 @@ -import asyncio -from python.helpers import settings, errors -from python.helpers.extension import Extension -from python.helpers.memory import Memory -from python.helpers.dirty_json import DirtyJson -from agent import LoopData -from python.helpers.log import LogItem -from python.tools.memory_load import DEFAULT_THRESHOLD as DEFAULT_MEMORY_THRESHOLD -from python.helpers.defer import DeferredTask, THREAD_BACKGROUND +"""Memorize solutions extension - implementation provided by the memory plugin.""" +from python.helpers.plugins import import_plugin_module -class MemorizeSolutions(Extension): +# Import the actual implementation from the plugin +_mod = import_plugin_module("memory", "extensions/monologue_end/_51_memorize_solutions.py") - async def execute(self, loop_data: LoopData = LoopData(), **kwargs): - # try: - - set = settings.get_settings() - - if not set["memory_memorize_enabled"]: - return - - # show full util message - log_item = self.agent.context.log.log( - type="util", - heading="Memorizing succesful solutions...", - ) - - # memorize in background - task = DeferredTask(thread_name=THREAD_BACKGROUND) - task.start_task(self.memorize, loop_data, log_item) - # task = asyncio.create_task(self.memorize(loop_data, log_item)) - return task - - async def memorize(self, loop_data: LoopData, log_item: LogItem, **kwargs): - try: - set = settings.get_settings() - - db = await Memory.get(self.agent) - - # get system message and chat history for util llm - system = self.agent.read_prompt("memory.solutions_sum.sys.md") - msgs_text = self.agent.concat_messages(self.agent.history) - - # log query streamed by LLM - # async def log_callback(content): - # log_item.stream(content=content) - - # call util llm to find solutions in history - solutions_json = await self.agent.call_utility_model( - system=system, - message=msgs_text, - # callback=log_callback, - background=True, - ) - - # log query < no need for streaming utility messages - log_item.update(content=solutions_json) - - - - # Add validation and error handling for solutions_json - if not solutions_json or not isinstance(solutions_json, str): - log_item.update(heading="No response from utility model.") - return - - # Strip any whitespace that might cause issues - solutions_json = solutions_json.strip() - - if not solutions_json: - log_item.update(heading="Empty response from utility model.") - return - - try: - solutions = DirtyJson.parse_string(solutions_json) - except Exception as e: - log_item.update(heading=f"Failed to parse solutions response: {str(e)}") - return - - # Validate that solutions is a list or convertible to one - if solutions is None: - log_item.update(heading="No valid solutions found in response.") - return - - # If solutions is not a list, try to make it one - if not isinstance(solutions, list): - if isinstance(solutions, (str, dict)): - solutions = [solutions] - else: - log_item.update(heading="Invalid solutions format received.") - return - - if not isinstance(solutions, list) or len(solutions) == 0: - log_item.update(heading="No successful solutions to memorize.") - return - else: - solutions_txt = "\n\n".join([str(solution) for solution in solutions]).strip() - log_item.update( - heading=f"{len(solutions)} successful solutions to memorize.", solutions=solutions_txt - ) - - # Process solutions with intelligent consolidation - total_processed = 0 - total_consolidated = 0 - rem = [] - - for solution in solutions: - # Convert solution to structured text - if isinstance(solution, dict): - problem = solution.get('problem', 'Unknown problem') - solution_text = solution.get('solution', 'Unknown solution') - txt = f"# Problem\n {problem}\n# Solution\n {solution_text}" - else: - # If solution is not a dict, convert it to string - txt = f"# Solution\n {str(solution)}" - - if set["memory_memorize_consolidation"]: - try: - # Use intelligent consolidation system - from python.helpers.memory_consolidation import create_memory_consolidator - consolidator = create_memory_consolidator( - self.agent, - similarity_threshold=DEFAULT_MEMORY_THRESHOLD, # More permissive for discovery - max_similar_memories=6, # Fewer for solutions (more complex) - max_llm_context_memories=3 - ) - - # Create solution-specific log for detailed tracking - solution_log = None # too many utility messages, skip log for now - # solution_log = self.agent.context.log.log( - # type="util", - # heading=f"Processing solution: {txt[:50]}...", - # update_progress="none" # Don't affect status bar - # ) - - # Process with intelligent consolidation - result_obj = await consolidator.process_new_memory( - new_memory=txt, - area=Memory.Area.SOLUTIONS.value, - metadata={"area": Memory.Area.SOLUTIONS.value}, - log_item=solution_log - ) - - # Update the individual log item with completion status but keep it temporary - if result_obj.get("success"): - total_consolidated += 1 - if solution_log: - solution_log.update( - result="Solution processed successfully", - heading=f"Solution completed: {txt[:50]}...", - update_progress="none" # Show briefly then disappear - ) - else: - if solution_log: - solution_log.update( - result="Solution processing failed", - heading=f"Solution failed: {txt[:50]}...", - update_progress="none" # Show briefly then disappear - ) - total_processed += 1 - - except Exception as e: - # Log error but continue processing - log_item.update(consolidation_error=str(e)) - total_processed += 1 - - # Update final results with structured logging - log_item.update( - heading=f"Solution memorization completed: {total_processed} solutions processed, {total_consolidated} intelligently consolidated", - solutions=solutions_txt, - result=f"{total_processed} solutions processed, {total_consolidated} intelligently consolidated", - solutions_processed=total_processed, - solutions_consolidated=total_consolidated, - update_progress="none" - ) - else: - # remove previous solutions too similiar to this one - if set["memory_memorize_replace_threshold"] > 0: - rem += await db.delete_documents_by_query( - query=txt, - threshold=set["memory_memorize_replace_threshold"], - filter=f"area=='{Memory.Area.SOLUTIONS.value}'", - ) - if rem: - rem_txt = "\n\n".join(Memory.format_docs_plain(rem)) - log_item.update(replaced=rem_txt) - - # insert new solution - await db.insert_text(text=txt, metadata={"area": Memory.Area.SOLUTIONS.value}) - - log_item.update( - result=f"{len(solutions)} solutions memorized.", - heading=f"{len(solutions)} solutions memorized.", - ) - if rem: - log_item.stream(result=f"\nReplaced {len(rem)} previous solutions.") - - - except Exception as e: - err = errors.format_error(e) - self.agent.context.log.log( - type="warning", heading="Memorize solutions extension error", content=err - ) +# Re-export all public names +globals().update({k: v for k, v in vars(_mod).items() if not k.startswith('_')}) diff --git a/python/extensions/monologue_start/_10_memory_init.py b/python/extensions/monologue_start/_10_memory_init.py index 26c94de30..352a1a924 100644 --- a/python/extensions/monologue_start/_10_memory_init.py +++ b/python/extensions/monologue_start/_10_memory_init.py @@ -1,13 +1,8 @@ -from python.helpers.extension import Extension -from agent import LoopData -from python.helpers import memory -import asyncio +"""Memory initialization extension - implementation provided by the memory plugin.""" +from python.helpers.plugins import import_plugin_module +# Import the actual implementation from the plugin +_mod = import_plugin_module("memory", "extensions/monologue_start/_10_memory_init.py") -class MemoryInit(Extension): - - async def execute(self, loop_data: LoopData = LoopData(), **kwargs): - db = await memory.Memory.get(self.agent) - - - \ No newline at end of file +# Re-export all public names +globals().update({k: v for k, v in vars(_mod).items() if not k.startswith('_')}) diff --git a/python/extensions/system_prompt/_20_behaviour_prompt.py b/python/extensions/system_prompt/_20_behaviour_prompt.py index 1650e9ee8..d8b5f7ade 100644 --- a/python/extensions/system_prompt/_20_behaviour_prompt.py +++ b/python/extensions/system_prompt/_20_behaviour_prompt.py @@ -1,7 +1,10 @@ from datetime import datetime from python.helpers.extension import Extension from agent import Agent, LoopData -from python.helpers import files, memory +from python.helpers import files +from python.helpers.plugins import import_plugin_module + +memory = import_plugin_module("memory", "helpers/memory.py") class BehaviourPrompt(Extension): diff --git a/python/helpers/extension.py b/python/helpers/extension.py index 186099cc0..0526f68bd 100644 --- a/python/helpers/extension.py +++ b/python/helpers/extension.py @@ -27,10 +27,15 @@ class Extension: async def call_extensions( extension_point: str, agent: "Agent|None" = None, **kwargs ) -> Any: - from python.helpers import projects, subagents + from python.helpers import projects, subagents, plugins # search for extension folders in all agent's paths paths = subagents.get_paths(agent, "extensions", extension_point, default_root="python") + + # Add plugin extension paths + plugin_paths = plugins.get_plugin_paths("extension", extension_point) + paths.extend(plugin_paths) + all_exts = [cls for path in paths for cls in _get_extensions(path)] # merge: first ocurrence of file name is the override diff --git a/python/helpers/plugins.py b/python/helpers/plugins.py new file mode 100644 index 000000000..6ea165149 --- /dev/null +++ b/python/helpers/plugins.py @@ -0,0 +1,262 @@ +from __future__ import annotations + +import json +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any, Dict, List, Optional + +from python.helpers import files + + +# ============================================================================ +# Core Data Structures +# ============================================================================ + +@dataclass(slots=True) +class Plugin: + id: str + name: str + path: Path + manifest_path: Path + provides: Dict[str, Any] = field(default_factory=dict) # capability map + version: str = "" + author: str = "" + description: str = "" + tags: List[str] = field(default_factory=list) + + # Optional heavy fields + raw_manifest: Dict[str, Any] = field(default_factory=dict) + + +# ============================================================================ +# Discovery & Loading +# ============================================================================ + +def get_plugin_roots() -> List[str]: + """ + Get plugin root directories. + Priority: project plugins > usr plugins > core plugins + """ + # Project-specific plugins + projects = files.find_existing_paths_by_pattern("usr/projects/*/.a0proj/plugins") + + return [ + *projects, + files.get_abs_path("usr/plugins"), + files.get_abs_path("plugins"), + ] + + +def discover_plugin_manifests(root: str) -> List[Path]: + """Recursively discover plugin.json files under a root directory.""" + root_path = Path(root) + if not root_path.exists(): + return [] + + results: List[Path] = [] + for p in root_path.rglob("plugin.json"): + if p.is_file() and ".git" not in str(p): + results.append(p) + + return sorted(results, key=lambda x: str(x)) + + +def plugin_from_manifest(manifest_path: Path) -> Optional[Plugin]: + """Load plugin from manifest file.""" + try: + text = manifest_path.read_text(encoding="utf-8", errors="replace") + manifest = json.loads(text) + except Exception: + return None + + if not isinstance(manifest, dict): + return None + + plugin_dir = manifest_path.parent + plugin_id = manifest.get("id", "").strip() + + if not plugin_id: + return None + + plugin = Plugin( + id=plugin_id, + name=manifest.get("name", plugin_id), + path=plugin_dir, + manifest_path=manifest_path, + provides=manifest.get("provides", {}), + version=manifest.get("version", ""), + author=manifest.get("author", ""), + description=manifest.get("description", ""), + tags=manifest.get("tags", []), + raw_manifest=manifest, + ) + + return plugin + + +# ============================================================================ +# Registry Access (List/Find) +# ============================================================================ + +def list_plugins() -> List[Plugin]: + """List all discovered plugins.""" + plugins: List[Plugin] = [] + + for root in get_plugin_roots(): + for manifest_path in discover_plugin_manifests(root): + p = plugin_from_manifest(manifest_path) + if p: + plugins.append(p) + + # Dedupe by ID (earlier roots win) + by_id: Dict[str, Plugin] = {} + for p in plugins: + if p.id not in by_id: + by_id[p.id] = p + + return list(by_id.values()) + + +def find_plugin(plugin_id: str) -> Optional[Plugin]: + """Find a specific plugin by ID.""" + if not plugin_id: + return None + + # Search roots in priority order (first one found wins) + for root in get_plugin_roots(): + for manifest_path in discover_plugin_manifests(root): + p = plugin_from_manifest(manifest_path) + if p and p.id == plugin_id: + return p + + return None + + +# ============================================================================ +# Path Resolution & Import +# ============================================================================ + +def _extract_module_dir(module_path: str) -> str: + """Extract directory from module path (e.g., 'tools/my_tool.py' -> 'tools').""" + return str(Path(module_path).parent) + + +def get_plugin_paths(cap_type: str, *subpaths: str) -> List[str]: + """ + Get all paths from loaded plugins for a given capability type. + + Args: + cap_type: Capability type (e.g., "tool", "extension", "api") + subpaths: Additional path components to append + + Returns: + List of absolute paths matching the capability type + """ + paths: List[str] = [] + + for plugin in list_plugins(): + cap_config = plugin.provides.get(cap_type) + if not cap_config: + continue + + # Normalize to list for uniform processing + modules = [] + if isinstance(cap_config, list): + modules = [cap.get("module") for cap in cap_config if isinstance(cap, dict) and cap.get("module")] + elif isinstance(cap_config, dict) and cap_config.get("module"): + modules = [cap_config["module"]] + + # Build paths from modules + for module_path in modules: + module_dir = _extract_module_dir(module_path) + full_path = files.get_abs_path(str(plugin.path), module_dir, *subpaths) + if files.exists(full_path) and full_path not in paths: + paths.append(full_path) + + return paths + + +def import_plugin_module(plugin_id: str, module_path: str) -> Any: + """ + Import a Python module from a plugin using importlib. + + Args: + plugin_id: Plugin ID + module_path: Relative path to module within plugin (e.g., "helpers/memory.py") + + Returns: + The imported module object + + Raises: + ImportError: If plugin or module not found + """ + import importlib.util + import sys + + plugin = find_plugin(plugin_id) + if not plugin: + raise ImportError(f"Plugin '{plugin_id}' not found") + + full_path = plugin.path / module_path + if not full_path.exists(): + raise ImportError(f"Module '{module_path}' not found in plugin '{plugin_id}'") + + # Create a unique module name to avoid conflicts + module_name = f"plugins.{plugin_id}.{module_path.replace('/', '.').replace('.py', '')}" + + # Check if already loaded + if module_name in sys.modules: + return sys.modules[module_name] + + # Load the module + spec = importlib.util.spec_from_file_location(module_name, full_path) + if spec is None or spec.loader is None: + raise ImportError(f"Could not load module spec from {full_path}") + + module = importlib.util.module_from_spec(spec) + sys.modules[module_name] = module + spec.loader.exec_module(module) + + return module + + +# ============================================================================ +# API Helpers +# ============================================================================ + +def build_plugin_response_data(plugin: Plugin) -> dict: + """ + Build normalized API response data for a plugin. + Resolves URLs for UI capabilities. + + Args: + plugin: Plugin object to serialize + + Returns: + Dictionary with plugin data, component_url, module_url, and provides + """ + base_url = f"/plugins/{plugin.id}/" + + response_data = { + "id": plugin.id, + "name": plugin.name, + "base_url": base_url, + "version": plugin.version, + "author": plugin.author, + "description": plugin.description, + "tags": plugin.tags, + "provides": plugin.provides, + } + + # Resolve UI capability URLs from provides.ui + if "ui" in plugin.provides: + ui_config = plugin.provides["ui"] + if isinstance(ui_config, dict): + if ui_config.get("component"): + response_data["component_url"] = f"{base_url}{ui_config['component']}" + if ui_config.get("module"): + response_data["module_url"] = f"{base_url}{ui_config['module']}" + if ui_config.get("props"): + response_data["props"] = ui_config["props"] + + return response_data \ No newline at end of file diff --git a/python/helpers/projects.py b/python/helpers/projects.py index 154e1a392..2c0d440ff 100644 --- a/python/helpers/projects.py +++ b/python/helpers/projects.py @@ -479,7 +479,8 @@ def create_project_meta_folders(name: str): # create knowledge folders files.create_dir(get_project_meta_folder(name, PROJECT_KNOWLEDGE_DIR)) - from python.helpers import memory + from python.helpers.plugins import import_plugin_module + memory = import_plugin_module("memory", "helpers/memory.py") for memory_type in memory.Memory.Area: files.create_dir( diff --git a/python/helpers/settings.py b/python/helpers/settings.py index 35ab00e38..ae78ec817 100644 --- a/python/helpers/settings.py +++ b/python/helpers/settings.py @@ -629,7 +629,9 @@ def _apply_settings(previous: Settings | None): or _settings["embed_model_provider"] != previous["embed_model_provider"] or _settings["embed_model_kwargs"] != previous["embed_model_kwargs"] ): - from python.helpers.memory import reload as memory_reload + from python.helpers.plugins import import_plugin_module + memory = import_plugin_module("memory", "helpers/memory.py") + memory_reload = memory.reload memory_reload() diff --git a/python/tools/behaviour_adjustment.py b/python/tools/behaviour_adjustment.py index 31fe67cd7..d82fd9774 100644 --- a/python/tools/behaviour_adjustment.py +++ b/python/tools/behaviour_adjustment.py @@ -1,4 +1,6 @@ -from python.helpers import files, memory +from python.helpers import files +from python.helpers.plugins import import_plugin_module +memory = import_plugin_module("memory", "helpers/memory.py") from python.helpers.tool import Tool, Response from agent import Agent from python.helpers.log import LogItem diff --git a/python/tools/knowledge_tool._py b/python/tools/knowledge_tool._py index 7f2085eb1..1726c372e 100644 --- a/python/tools/knowledge_tool._py +++ b/python/tools/knowledge_tool._py @@ -1,10 +1,11 @@ import asyncio -from python.helpers import dotenv, memory, perplexity_search, duckduckgo_search +from python.helpers import dotenv, perplexity_search, duckduckgo_search +from python.helpers.plugins import import_plugin_module +memory = import_plugin_module("memory", "helpers/memory.py") +memory_load_tool = import_plugin_module("memory", "tools/memory_load.py") +DEFAULT_MEMORY_THRESHOLD = memory_load_tool.DEFAULT_THRESHOLD + from python.helpers.tool import Tool, Response -from python.helpers.print_style import PrintStyle -from python.helpers.errors import handle_error -from python.helpers.searxng import search as searxng -from python.tools.memory_load import DEFAULT_THRESHOLD as DEFAULT_MEMORY_THRESHOLD from python.helpers.document_query import DocumentQueryHelper SEARCH_ENGINE_RESULTS = 10 diff --git a/python/tools/search_engine.py b/python/tools/search_engine.py index e27f4e948..121a97c60 100644 --- a/python/tools/search_engine.py +++ b/python/tools/search_engine.py @@ -1,6 +1,8 @@ import os import asyncio -from python.helpers import dotenv, memory, perplexity_search, duckduckgo_search +from python.helpers import dotenv, perplexity_search, duckduckgo_search +from python.helpers.plugins import import_plugin_module +memory = import_plugin_module("memory", "helpers/memory.py") from python.helpers.tool import Tool, Response from python.helpers.print_style import PrintStyle from python.helpers.errors import handle_error diff --git a/run_ui.py b/run_ui.py index 5ddeb91fe..1bbb77e6a 100644 --- a/run_ui.py +++ b/run_ui.py @@ -237,6 +237,41 @@ async def serve_index(): return index +# Serve plugin assets +@webapp.route("/plugins//", methods=["GET"]) +@requires_auth +async def serve_plugin_asset(plugin_id, asset_path): + """ + Serve static assets from plugin directories. + Resolves using the plugin system (with overrides). + """ + from python.helpers import plugins + from flask import send_file + + # Use the new find_plugin helper + plugin = plugins.find_plugin(plugin_id) + if not plugin: + return Response("Plugin not found", 404) + + # Resolve the plugin asset path with security checks + try: + # Construct path using plugin root + asset_file = (plugin.path / asset_path).resolve() + plugin_root = plugin.path.resolve() + + # Security: ensure the resolved path is within the plugin directory + if not str(asset_file).startswith(str(plugin_root) + os.sep) and str(asset_file) != str(plugin_root): + return Response("Access denied", 403) + + if not asset_file.is_file(): + return Response("Asset not found", 404) + + return send_file(str(asset_file)) + except Exception as e: + PrintStyle.error(f"Error serving plugin asset: {e}") + return Response("Error serving asset", 500) + + def _build_websocket_handlers_by_namespace( socketio_server: socketio.AsyncServer, lock: threading.RLock, @@ -459,6 +494,14 @@ def run(): handlers = load_classes_from_folder("python/api", "*.py", ApiHandler) for handler in handlers: register_api_handler(webapp, handler) + + # Load API handlers from plugins + from python.helpers import plugins + plugin_api_paths = plugins.get_plugin_paths("api") + for api_path in plugin_api_paths: + plugin_handlers = load_classes_from_folder(api_path, "*.py", ApiHandler) + for handler in plugin_handlers: + register_api_handler(webapp, handler) handlers_by_namespace = _build_websocket_handlers_by_namespace(socketio_server, lock) configure_websocket_namespaces( diff --git a/webui/components/settings/agent/memory.html b/webui/components/settings/agent/memory.html index 4ae2f8a34..39bde1993 100644 --- a/webui/components/settings/agent/memory.html +++ b/webui/components/settings/agent/memory.html @@ -34,7 +34,7 @@
diff --git a/webui/components/sidebar/top-section/quick-actions.html b/webui/components/sidebar/top-section/quick-actions.html index 3c1f8405a..aa6db71cc 100644 --- a/webui/components/sidebar/top-section/quick-actions.html +++ b/webui/components/sidebar/top-section/quick-actions.html @@ -17,7 +17,7 @@ - @@ -54,7 +54,7 @@ Projects - diff --git a/webui/components/welcome/welcome-store.js b/webui/components/welcome/welcome-store.js index eba46bb09..6c3e3e808 100644 --- a/webui/components/welcome/welcome-store.js +++ b/webui/components/welcome/welcome-store.js @@ -1,7 +1,7 @@ import { createStore } from "/js/AlpineStore.js"; import { getContext } from "/index.js"; import { store as chatsStore } from "/components/sidebar/chats/chats-store.js"; -import { store as memoryStore } from "/components/modals/memory/memory-dashboard-store.js"; +import { store as memoryStore } from "/plugins/memory/ui/memory-dashboard-store.js"; import { store as projectsStore } from "/components/projects/projects-store.js"; import { store as chatInputStore } from "/components/chat/input/input-store.js"; import * as API from "/js/api.js"; diff --git a/webui/js/components.js b/webui/js/components.js index 16612662f..516578cfc 100644 --- a/webui/js/components.js +++ b/webui/js/components.js @@ -261,4 +261,4 @@ const observer = new MutationObserver((mutations) => { } } }); -observer.observe(document.body, { childList: true, subtree: true }); +observer.observe(document.body, { childList: true, subtree: true }); \ No newline at end of file diff --git a/webui/js/initFw.js b/webui/js/initFw.js index f56ba15d3..adeec657e 100644 --- a/webui/js/initFw.js +++ b/webui/js/initFw.js @@ -1,6 +1,7 @@ import * as initializer from "./initializer.js"; import * as _modals from "./modals.js"; import * as _components from "./components.js"; +import * as _plugins from "./plugins.js"; import { registerAlpineMagic } from "./confirmClick.js"; // initialize required elements diff --git a/webui/js/plugins.js b/webui/js/plugins.js new file mode 100644 index 000000000..ac092d380 --- /dev/null +++ b/webui/js/plugins.js @@ -0,0 +1,215 @@ +// Plugin system for Agent Zero +// Loads x-extension tags by resolving plugin manifests and reusing component infrastructure + +import { importComponent, getParentAttributes } from './components.js'; +import { getCsrfToken } from './api.js'; + +// Cache for plugin manifests +const extensionCache = {}; + +// Batch fetch plugin manifests from API +async function fetchPluginManifests(pluginIds) { + try { + const response = await fetch("/plugins_resolve", { + method: "POST", + headers: { + "Content-Type": "application/json", + "X-CSRF-Token": await getCsrfToken(), + }, + body: JSON.stringify({ ids: pluginIds }), + }); + + if (!response.ok) { + throw new Error(`Failed to resolve plugins: ${response.statusText}`); + } + + const result = await response.json(); + if (!result.ok || !result.data) { + throw new Error("Invalid plugins response"); + } + + return result.data; + } catch (error) { + console.error("Error batch-fetching plugin manifests:", error); + return []; + } +} + +// Merge manifest props with element attributes (element attributes override manifest) +function mergePropsWithAttributes(manifestProps, element) { + const elementAttrs = {}; + for (let attr of element.attributes) { + if (attr.name !== "id") { + try { + elementAttrs[attr.name] = JSON.parse(attr.value); + } catch (_e) { + elementAttrs[attr.name] = attr.value; + } + } + } + return { ...manifestProps, ...elementAttrs }; +} + +// Set merged props as data attributes on element +function setAttributesOnElement(props, element) { + for (const [key, value] of Object.entries(props)) { + if (typeof value === "object") { + element.setAttribute(key, JSON.stringify(value)); + } else { + element.setAttribute(key, value); + } + } +} + +// Load a single plugin by calling importComponent with manifest URLs +async function loadPlugin(pluginId, targetElement) { + // Get manifest from cache + let manifest = extensionCache[pluginId]; + + if (!manifest || manifest.error) { + throw new Error(manifest?.error || `Plugin '${pluginId}' not found`); + } + + // Extract UI configuration from provides.ui + // The API already resolves component_url and module_url from provides.ui + const componentUrl = manifest.component_url; + const moduleUrl = manifest.module_url; + const props = manifest.props || {}; + + // Merge props and set as attributes + const mergedProps = mergePropsWithAttributes(props, targetElement); + setAttributesOnElement(mergedProps, targetElement); + + // Call importComponent with plugin URLs + // We prepend "components/../" to bypass the check in importComponent + if (componentUrl) { + const adjustedUrl = "components/.." + componentUrl; + await importComponent(adjustedUrl, targetElement); + } + + // Load module if specified + // Browser's native module cache handles deduplication + if (moduleUrl) { + await import(moduleUrl); + } +} + +// Find all x-extension tags in root elements +function findAllExtensionTags(roots) { + const rootElements = Array.isArray(roots) ? roots : [roots]; + return rootElements.flatMap((root) => + Array.from(root.querySelectorAll("x-extension")) + ); +} + +// Collect unique plugin IDs that need to be fetched (not in cache) +function collectUniqueUncachedPluginIds(extensions) { + const pluginIds = []; + const seen = new Set(); + + for (const extension of extensions) { + const pluginId = extension.getAttribute("id"); + if (!pluginId) { + console.error("x-extension missing id attribute:", extension); + continue; + } + + // Only add if not seen and not cached + if (!seen.has(pluginId) && !extensionCache[pluginId]) { + pluginIds.push(pluginId); + seen.add(pluginId); + } + } + + return pluginIds; +} + +// Main loader: scan DOM, batch fetch manifests, load all plugins +export async function loadExtensions(roots = [document.documentElement]) { + try { + // Find all x-extension tags + const extensions = findAllExtensionTags(roots); + + if (extensions.length === 0) return; + + // Collect plugin IDs that need fetching + const pluginIds = collectUniqueUncachedPluginIds(extensions); + + // Batch fetch all uncached manifests in one API call + if (pluginIds.length > 0) { + const manifests = await fetchPluginManifests(pluginIds); + + // Update cache with fetched manifests + for (const manifest of manifests) { + if (!manifest.error) { + extensionCache[manifest.id] = manifest; + } else { + console.error(`Plugin '${manifest.id}' failed to load:`, manifest.error); + } + } + } + + // Map plugin IDs to extension elements for parallel loading + const extensionMap = new Map(); + for (const extension of extensions) { + const pluginId = extension.getAttribute("id"); + if (!pluginId) continue; + + if (!extensionMap.has(pluginId)) { + extensionMap.set(pluginId, []); + } + extensionMap.get(pluginId).push(extension); + } + + // Load all plugins in parallel using cached manifests + await Promise.all( + Array.from(extensionMap.entries()).flatMap(([pluginId, extensionElements]) => + extensionElements.map(async (extension) => { + try { + await loadPlugin(pluginId, extension); + } catch (error) { + console.error(`Error loading extension '${pluginId}':`, error); + extension.innerHTML = `
Failed to load plugin: ${pluginId}
`; + } + }) + ) + ); + } catch (error) { + console.error("Error loading extensions:", error); + } +} + +// Extend global xAttrs to check both x-component and x-extension tags +// This allows plugins to use globalThis.xAttrs() and get both component and extension attrs +globalThis.xAttrs = function(el) { + return getParentAttributes(el, ['x-component', 'x-extension']); +}; + +// Initialize when DOM is ready +if (document.readyState === 'loading') { + document.addEventListener('DOMContentLoaded', () => loadExtensions()); +} else { + loadExtensions(); +} + +// Watch for DOM changes to dynamically load x-extension tags +const observer = new MutationObserver((mutations) => { + for (const mutation of mutations) { + for (const node of mutation.addedNodes) { + if (node.nodeType === 1) { + // ELEMENT_NODE + // Check if this node is an x-extension tag + if (node.matches?.("x-extension")) { + loadExtensions([node.parentElement || document.documentElement]); + } else if (node.querySelectorAll) { + // Check if descendants contain x-extension tags + const extensions = node.querySelectorAll("x-extension"); + if (extensions.length > 0) { + loadExtensions([node]); + } + } + } + } + } +}); +observer.observe(document.body, { childList: true, subtree: true });