Pulse/internal/ai/tools/tools_query.go
2026-04-23 23:01:51 +01:00

5694 lines
188 KiB
Go

package tools
import (
"context"
"fmt"
"os"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/rcourtman/pulse-go-rewrite/internal/unifiedresources"
"github.com/rs/zerolog/log"
)
// routingMismatchLogLimiter provides rate limiting for routing mismatch debug logs.
// This prevents log spam while still providing visibility into routing issues.
var routingMismatchLogLimiter = struct {
mu sync.Mutex
lastLog time.Time
interval time.Duration
}{
interval: 10 * time.Second, // Log at most once per 10 seconds
}
// ErrStrictResolution is returned when a write operation is attempted on an
// undiscovered resource while PULSE_STRICT_RESOLUTION is enabled.
// Use errors.As() to check for this error type.
type ErrStrictResolution struct {
ResourceID string // The resource identifier that wasn't found
Action string // The action that was attempted
Message string // Human-readable message
}
func (e *ErrStrictResolution) Error() string {
return e.Message
}
// Code returns the error code for structured responses
func (e *ErrStrictResolution) Code() string {
return ErrCodeStrictResolution
}
// ToToolResponse returns a consistent ToolResponse for blocked operations.
// This enables the agentic loop to detect and auto-recover (discover then retry).
func (e *ErrStrictResolution) ToToolResponse() ToolResponse {
return NewToolBlockedError(
ErrCodeStrictResolution,
e.Message,
map[string]interface{}{
"resource_id": e.ResourceID,
"action": e.Action,
"recovery_hint": "Use pulse_query action=search to discover the resource first",
"auto_recoverable": true, // Signal to agentic loop that auto-discovery can help
},
)
}
// ToStructuredError returns a structured error payload for tool responses
// Deprecated: Use ToToolResponse() for consistent envelope
func (e *ErrStrictResolution) ToStructuredError() map[string]interface{} {
return map[string]interface{}{
"error_code": e.Code(),
"message": e.Message,
"resource_id": e.ResourceID,
"action": e.Action,
}
}
// ErrRoutingMismatch is returned when a tool targets a parent host (e.g., a hypervisor node)
// but the session has discovered more specific child resources (system containers/VMs) on that host.
// This prevents accidentally operating on the host filesystem when the user intended
// to target a guest resource.
type ErrRoutingMismatch struct {
TargetHost string // The host that was targeted
MoreSpecificResources []string // Child resource names that exist on this host
MoreSpecificIDs []string // Canonical resource IDs (kind:scope:id) for future ID-based targeting
ChildKinds []string // Resource kinds of children (for telemetry/routing: "system-container", "vm", etc.)
Message string // Human-readable message
}
func (e *ErrRoutingMismatch) Error() string {
return e.Message
}
// Code returns the error code for structured responses
func (e *ErrRoutingMismatch) Code() string {
return "ROUTING_MISMATCH"
}
// ToToolResponse returns a consistent ToolResponse for routing mismatches.
func (e *ErrRoutingMismatch) ToToolResponse() ToolResponse {
details := map[string]interface{}{
"target_host": e.TargetHost,
"more_specific_resources": e.MoreSpecificResources,
"auto_recoverable": true,
}
// Include canonical IDs and prefer ID-based targeting in recovery hint
if len(e.MoreSpecificIDs) > 0 {
details["more_specific_resource_ids"] = e.MoreSpecificIDs
details["target_resource_id"] = e.MoreSpecificIDs[0] // Primary suggestion
// Prefer ID-based targeting; keep name-based targeting as a fallback.
details["recovery_hint"] = fmt.Sprintf(
"Retry with target_resource_id='%s' (preferred) or target_host='%s'",
e.MoreSpecificIDs[0], e.MoreSpecificResources[0])
} else {
// Fallback if no IDs available
details["recovery_hint"] = fmt.Sprintf(
"Use target_host='%s' to target the specific resource, not the parent host node",
e.MoreSpecificResources[0])
}
return NewToolBlockedError(
"ROUTING_MISMATCH",
e.Message,
details,
)
}
// isStrictResolutionEnabled returns true if hard validation is enabled for write operations.
// Set PULSE_STRICT_RESOLUTION=true to block write operations on undiscovered resources.
func isStrictResolutionEnabled() bool {
val := os.Getenv("PULSE_STRICT_RESOLUTION")
return val == "true" || val == "1" || val == "yes"
}
// isWriteAction returns true if the action is a write/mutating operation.
// Note: "exec" is treated as write because it can execute arbitrary commands.
// For finer control, use classifyCommandRisk() to distinguish read-only exec commands.
func isWriteAction(action string) bool {
writeActions := map[string]bool{
"start": true,
"stop": true,
"restart": true,
"delete": true,
"shutdown": true,
"exec": true,
"write": true,
"append": true,
}
return writeActions[action]
}
// CommandRisk represents the risk level of a shell command
type CommandRisk int
const (
CommandRiskReadOnly CommandRisk = 0 // Safe read-only commands
CommandRiskLowWrite CommandRisk = 1 // Low-risk writes (touch, mkdir temp)
CommandRiskMediumWrite CommandRisk = 2 // Medium-risk writes (config changes)
CommandRiskHighWrite CommandRisk = 3 // High-risk writes (rm, systemctl, package managers)
)
// ExecutionIntent represents whether a command can be proven non-mutating.
// This is the primary abstraction for pulse_read gating decisions.
//
// Invariant: pulse_read may execute commands that are provably non-mutating
// either by construction (known read-only commands) or by bounded inspection
// (self-contained input + no shell composition + no write patterns). Any command
// that depends on external input, shell composition, or ambiguous semantics is
// treated as write-capable and blocked from pulse_read.
type ExecutionIntent int
const (
// IntentReadOnlyCertain - command is non-mutating by construction.
// Examples: ls, cat, grep, docker logs, ffprobe, kubectl get
// These cannot mutate regardless of arguments.
IntentReadOnlyCertain ExecutionIntent = iota
// IntentReadOnlyConditional - command appears read-only by bounded inspection.
// The command is self-contained (no shell composition) and content inspection
// found no write patterns. Examples: sqlite3 "SELECT ...", psql -c "SELECT ..."
// Guardrails: no redirects, no pipes, no subshells, no chaining, inline input only.
IntentReadOnlyConditional
// IntentWriteOrUnknown - command may mutate or cannot be proven safe.
// Either it matches known write patterns, has shell composition that prevents
// analysis, or is unknown and we fail closed.
IntentWriteOrUnknown
)
// IntentResult contains the execution intent classification and the reason for it.
type IntentResult struct {
Intent ExecutionIntent
Reason string // Human-readable reason for classification
NonInteractiveBlock *NonInteractiveBlockResult // Non-nil if blocked by NonInteractiveOnly guardrail
}
// ContentInspector examines command content to determine if it's read-only.
// Different inspectors handle different tool families (SQL, Redis, kubectl, etc.)
type ContentInspector interface {
// Applies returns true if this inspector handles the given command
Applies(cmdLower string) bool
// IsReadOnly returns (true, "") if content is read-only, or (false, reason) if not
IsReadOnly(cmdLower string) (bool, string)
}
// sqlContentInspector handles SQL CLI tools (sqlite3, mysql, psql, etc.)
type sqlContentInspector struct{}
func (s *sqlContentInspector) Applies(cmdLower string) bool {
sqlCLIs := []string{"sqlite3 ", "mysql ", "mariadb ", "psql ", "mycli ", "pgcli ", "litecli "}
for _, cli := range sqlCLIs {
if strings.Contains(cmdLower, cli) || strings.HasPrefix(cmdLower, strings.TrimSuffix(cli, " ")) {
return true
}
}
return false
}
func (s *sqlContentInspector) IsReadOnly(cmdLower string) (bool, string) {
// SQL statements that mutate data or schema.
// Conservative: includes DDL, DML writes, transaction control, and admin commands.
sqlWriteKeywords := []string{
// DML writes
"insert ", "update ", "delete ", "replace ",
// DDL
"create ", "drop ", "alter ", "truncate ",
"merge ", "upsert ",
// Transaction control (expands attack surface)
"begin", "commit", "rollback", "savepoint", "release ",
// Database management
"attach ", "detach ",
"vacuum", "reindex",
"grant ", "revoke ",
"pragma ",
}
for _, kw := range sqlWriteKeywords {
if strings.Contains(cmdLower, kw) {
return false, fmt.Sprintf("SQL contains write/control keyword: %s", strings.TrimSpace(kw))
}
}
// Conservative: if we can't find inline SQL content, assume external input
hasInlineSQL := strings.Contains(cmdLower, `"`) ||
strings.Contains(cmdLower, `'`) ||
strings.Contains(cmdLower, " .") || // dot commands like .tables, .schema
strings.Contains(cmdLower, " -e ") || // mysql -e
strings.Contains(cmdLower, " -c ") // psql -c
if !hasInlineSQL {
return false, "no inline SQL found; input may be external (piped/interactive)"
}
return true, ""
}
// registeredInspectors is the list of content inspectors to try.
// Add new inspectors here for redis-cli, kubectl, etc.
var registeredInspectors = []ContentInspector{
&sqlContentInspector{},
// Future: &redisContentInspector{},
// Future: &kubectlContentInspector{},
}
// ClassifyExecutionIntent determines whether a command can be proven non-mutating.
// This is the main entry point for pulse_read gating decisions.
func ClassifyExecutionIntent(command string) IntentResult {
// === PHASE 0: Shell chaining decomposition ===
// If the command contains ;, &&, or || outside quotes, split into
// sub-commands and classify each independently. If ALL are read-only,
// the whole chain is read-only. If ANY is write/unknown, block.
if hasShellChainingOutsideQuotes(command) {
return classifyChainedCommand(command)
}
return classifySingleCommand(command)
}
// classifySingleCommand classifies a single (non-chained) command.
func classifySingleCommand(command string) IntentResult {
cmdLower := strings.ToLower(command)
// === PHASE 1: Mutation-capability guards ===
// These make ANY command potentially dangerous regardless of the binary.
// Includes: sudo, redirects, tee, subshells, pipes
if reason := checkMutationCapabilityGuards(command, cmdLower); reason != "" {
return IntentResult{Intent: IntentWriteOrUnknown, Reason: reason}
}
// === PHASE 1.5: NonInteractiveOnly guardrails ===
// MUST be checked before Phase 3 (read-only by construction) because even
// read-only commands like `tail -f` and `journalctl -f` can hang indefinitely.
// pulse_read requires commands that terminate deterministically.
if niBlock := checkNonInteractiveGuardrails(command, cmdLower); niBlock != nil {
return IntentResult{
Intent: IntentWriteOrUnknown,
Reason: niBlock.FormatMessage(),
NonInteractiveBlock: niBlock,
}
}
// === PHASE 2: Known write patterns ===
// Check BEFORE read-only patterns to catch write variants like "sed -i"
// before generic patterns like "sed " match.
if reason := matchesWritePatterns(cmdLower); reason != "" {
return IntentResult{Intent: IntentWriteOrUnknown, Reason: reason}
}
// === PHASE 3: Known read-only by construction ===
// Commands that cannot mutate regardless of arguments.
// Only reached if Phase 2 didn't match any write patterns.
if isReadOnlyByConstruction(cmdLower) {
return IntentResult{Intent: IntentReadOnlyCertain, Reason: "known read-only command"}
}
// === PHASE 4: Self-contained read candidate check ===
// Additional guardrails before content inspection.
if reason := checkSelfContainedGuardrails(command, cmdLower); reason != "" {
return IntentResult{Intent: IntentWriteOrUnknown, Reason: reason}
}
// === PHASE 5: Content inspection via registered inspectors ===
for _, inspector := range registeredInspectors {
if inspector.Applies(cmdLower) {
if isReadOnly, reason := inspector.IsReadOnly(cmdLower); isReadOnly {
return IntentResult{Intent: IntentReadOnlyConditional, Reason: "content inspection: read-only"}
} else {
return IntentResult{Intent: IntentWriteOrUnknown, Reason: "content inspection: " + reason}
}
}
}
// === PHASE 6: Fail closed ===
// Unknown commands may wrap interpreters, scripts, or side-effecting tools.
// pulse_read must only execute commands that are known read-only by construction
// or proven read-only by an explicit content inspector.
return IntentResult{Intent: IntentWriteOrUnknown, Reason: "unknown command is not on the read-only allowlist"}
}
// checkMutationCapabilityGuards checks for shell patterns that enable mutation
// regardless of the underlying command. Returns reason if any guard fails.
//
// Also includes NonInteractiveOnly guardrails - pulse_read runs non-interactively,
// so commands requiring TTY or indefinite streaming are blocked.
func checkMutationCapabilityGuards(command, cmdLower string) string {
// sudo escalates any command
if strings.Contains(cmdLower, "sudo ") || strings.HasPrefix(cmdLower, "sudo") {
return "sudo escalates command privileges"
}
// Output redirection can overwrite files
if hasStdoutRedirect(command) {
return "output redirection can overwrite files"
}
if strings.Contains(cmdLower, " tee ") || strings.Contains(cmdLower, "|tee ") {
return "tee can write to files"
}
// Subshell/command substitution can execute arbitrary commands
if strings.Contains(command, "$(") || strings.Contains(command, "`") {
return "command substitution can execute arbitrary commands"
}
// Input redirection means we can't inspect the content.
// This catches: < (redirect), << (heredoc), <<< (here-string)
// But NOT stderr-to-stdout redirections like 2>&1 which are harmless.
// Examples blocked:
// sqlite3 db < script.sql
// psql <<EOF ... EOF
// sqlite3 db <<< "SELECT ..."
if hasInputRedirect(command) {
return "input redirection prevents content inspection"
}
// Pipes: only block when piping to a dual-use tool that interprets input
// (like SQL CLIs). Piping to read-only filters (grep, head, etc.) is safe.
if strings.Contains(command, "|") && !strings.Contains(command, "||") {
if pipedToDualUseTool(cmdLower) {
return "piped input to dual-use tool prevents content inspection"
}
}
// Note: shell chaining (;, &&, ||) is handled at the top level of
// ClassifyExecutionIntent via classifyChainedCommand, not here.
return ""
}
// NonInteractiveBlockResult contains structured information about a NonInteractiveOnly block.
type NonInteractiveBlockResult struct {
Category string // telemetry category: tty_flag, pager, unbounded_stream, interactive_repl
Message string // human-readable reason
SuggestedCmd string // drop-in rewrite suggestion (empty if none available)
AutoRecoverable bool // true if the suggested rewrite is safe for auto-recovery
}
// checkNonInteractiveGuardrails enforces the exit-boundedness invariant:
// pulse_read must only execute commands that terminate deterministically.
//
// Categories (for telemetry labels):
// - tty_flag: interactive TTY flags (-it, --tty)
// - pager: pager/editor tools (less, vim, nano)
// - unbounded_stream: follow mode without bounds (-f without -n/--since/timeout)
// - interactive_repl: commands that open REPL/shell without non-interactive flags
//
// Returns nil if command passes all guardrails.
func checkNonInteractiveGuardrails(command, cmdLower string) *NonInteractiveBlockResult {
// [tty_flag] Interactive TTY flags allocate a terminal
if hasInteractiveTTYFlags(cmdLower) {
return &NonInteractiveBlockResult{
Category: "tty_flag",
Message: "[tty_flag] interactive/TTY flags require terminal; use non-interactive form",
SuggestedCmd: suggestNonInteractiveTTY(command),
AutoRecoverable: true,
}
}
// [pager] Pager and editor tools require terminal interaction
if isPagerOrEditorTool(cmdLower) {
return &NonInteractiveBlockResult{
Category: "pager",
Message: "[pager] pager/editor tools require terminal; use cat, head, or tail instead",
SuggestedCmd: suggestPagerReplacement(command, cmdLower),
AutoRecoverable: true,
}
}
// [unbounded_stream] Live monitoring tools never terminate
if isLiveMonitoringTool(cmdLower) {
return &NonInteractiveBlockResult{
Category: "unbounded_stream",
Message: "[unbounded_stream] live monitoring tools run indefinitely; use bounded alternatives",
SuggestedCmd: suggestLiveMonitoringReplacement(command, cmdLower),
AutoRecoverable: true,
}
}
// [unbounded_stream] Follow mode without explicit bound
if isUnboundedStreaming(cmdLower) {
return &NonInteractiveBlockResult{
Category: "unbounded_stream",
Message: "[unbounded_stream] follow mode without bound; add --tail/--since or wrap with timeout",
SuggestedCmd: suggestBoundedStreaming(command, cmdLower),
AutoRecoverable: true,
}
}
// [interactive_repl] Commands that open REPL/interactive session
if isInteractiveREPL(cmdLower) {
return &NonInteractiveBlockResult{
Category: "interactive_repl",
Message: "[interactive_repl] command opens interactive session; add -c/--execute flag or inline command",
SuggestedCmd: suggestNonInteractiveREPL(command, cmdLower),
AutoRecoverable: false, // REPL rewrites need human judgment (what query to run?)
}
}
return nil
}
// FormatNonInteractiveBlock formats a block result for tool response.
func (r *NonInteractiveBlockResult) FormatMessage() string {
if r.SuggestedCmd != "" {
return fmt.Sprintf("%s\n\nSuggested rewrite:\n %s", r.Message, r.SuggestedCmd)
}
return r.Message
}
// suggestNonInteractiveTTY suggests removing -it flags from docker/kubectl commands.
func suggestNonInteractiveTTY(command string) string {
// Remove -it, -i -t, --interactive, --tty flags
result := command
replacements := []struct{ old, new string }{
{" -it ", " "},
{" -i -t ", " "},
{" -ti ", " "},
{" --interactive --tty ", " "},
{" --tty --interactive ", " "},
{" --interactive ", " "},
{" --tty ", " "},
}
for _, r := range replacements {
result = strings.ReplaceAll(result, r.old, r.new)
}
// Clean up double spaces
for strings.Contains(result, " ") {
result = strings.ReplaceAll(result, " ", " ")
}
if result != command {
return strings.TrimSpace(result)
}
return ""
}
// suggestPagerReplacement suggests cat/head/tail instead of pagers.
func suggestPagerReplacement(command, cmdLower string) string {
parts := strings.Fields(command)
if len(parts) < 2 {
return ""
}
// Extract the file argument (everything after the pager command)
fileArgs := strings.Join(parts[1:], " ")
pager := strings.ToLower(parts[0])
switch pager {
case "less", "more":
return fmt.Sprintf("cat %s | head -200", fileArgs)
case "vim", "vi", "nano", "emacs", "pico", "ed":
return fmt.Sprintf("cat %s", fileArgs)
}
return ""
}
// suggestLiveMonitoringReplacement suggests bounded alternatives for live tools.
func suggestLiveMonitoringReplacement(command, cmdLower string) string {
parts := strings.Fields(cmdLower)
if len(parts) == 0 {
return ""
}
tool := parts[0]
switch tool {
case "top", "htop", "atop":
return "ps aux --sort=-%cpu | head -20"
case "iotop":
return "iotop -b -n 1" // batch mode, 1 iteration
case "iftop", "nload":
return "ss -s" // socket statistics summary
case "watch":
// Extract the watched command and suggest running it once
if len(parts) > 1 {
// Skip watch and any flags like -n 1
cmdStart := 1
for i := 1; i < len(parts); i++ {
if strings.HasPrefix(parts[i], "-") {
cmdStart = i + 1
if parts[i] == "-n" && i+1 < len(parts) {
cmdStart = i + 2
}
} else {
break
}
}
if cmdStart < len(parts) {
watchedCmd := strings.Join(strings.Fields(command)[cmdStart:], " ")
return strings.Trim(watchedCmd, "'\"")
}
}
}
return ""
}
// suggestBoundedStreaming adds --tail/--since bounds to streaming commands.
func suggestBoundedStreaming(command, cmdLower string) string {
parts := strings.Fields(command)
if len(parts) == 0 {
return ""
}
tool := strings.ToLower(parts[0])
switch {
case tool == "tail":
// tail -f /var/log/app.log → tail -n 200 --follow=name /var/log/app.log (or just remove -f)
// Simplest: add -n 200 and keep -f for "recent + follow with bound"
// Or suggest removing -f entirely
result := strings.ReplaceAll(command, " -f", " -n 200")
result = strings.ReplaceAll(result, " --follow", " -n 200")
return result
case tool == "journalctl":
// journalctl -f → journalctl -n 200 --since "10 min ago"
result := command
if strings.Contains(cmdLower, " -f") {
result = strings.ReplaceAll(result, " -f", ` -n 200 --since "10 min ago"`)
}
if strings.Contains(cmdLower, " --follow") {
result = strings.ReplaceAll(result, " --follow", ` -n 200 --since "10 min ago"`)
}
return result
case strings.HasPrefix(tool, "docker") && strings.Contains(cmdLower, "logs"):
// docker logs -f container → docker logs --tail=200 container
result := strings.ReplaceAll(command, " -f ", " --tail=200 ")
result = strings.ReplaceAll(result, " -f", " --tail=200")
result = strings.ReplaceAll(result, " --follow ", " --tail=200 ")
result = strings.ReplaceAll(result, " --follow", " --tail=200")
return result
case strings.HasPrefix(tool, "kubectl") && strings.Contains(cmdLower, "logs"):
// kubectl logs -f pod → kubectl logs --tail=200 --since=10m pod
result := strings.ReplaceAll(command, " -f ", " --tail=200 --since=10m ")
result = strings.ReplaceAll(result, " -f", " --tail=200 --since=10m")
result = strings.ReplaceAll(result, " --follow ", " --tail=200 --since=10m ")
result = strings.ReplaceAll(result, " --follow", " --tail=200 --since=10m")
return result
case tool == "dmesg":
// dmesg -w → dmesg | tail -200
result := strings.ReplaceAll(command, " -w", "")
result = strings.ReplaceAll(result, " --follow", "")
return result + " | tail -200"
}
return ""
}
// suggestNonInteractiveREPL suggests non-interactive form for REPL commands.
// Returns empty string for cases needing human judgment (what query to run?).
func suggestNonInteractiveREPL(command, cmdLower string) string {
parts := strings.Fields(command)
if len(parts) == 0 {
return ""
}
tool := strings.ToLower(parts[0])
// For SQL CLIs and REPLs, we can't suggest a specific query
// but we can show the pattern
switch tool {
case "mysql", "mariadb":
return fmt.Sprintf("%s -e \"SELECT ...\"", command)
case "psql":
return fmt.Sprintf("%s -c \"SELECT ...\"", command)
case "sqlite3":
return fmt.Sprintf("%s \"SELECT ...\"", command)
case "redis-cli":
return fmt.Sprintf("%s PING", command)
case "ssh":
// ssh host → ssh host "command"
return fmt.Sprintf("%s \"ls -la\"", command)
}
return ""
}
// hasInteractiveTTYFlags detects flags that request interactive/TTY mode.
func hasInteractiveTTYFlags(cmdLower string) bool {
// Only check for docker/kubectl commands
isDockerKubectl := strings.HasPrefix(cmdLower, "docker ") ||
strings.HasPrefix(cmdLower, "kubectl ")
if !isDockerKubectl {
return false
}
// Docker/kubectl -it or -i -t combinations (common shorthand)
if strings.Contains(cmdLower, " -it ") || strings.Contains(cmdLower, " -it\t") ||
strings.HasSuffix(cmdLower, " -it") ||
strings.Contains(cmdLower, " -ti ") || strings.Contains(cmdLower, " -ti\t") ||
strings.HasSuffix(cmdLower, " -ti") {
return true
}
// Explicit long flags
if strings.Contains(cmdLower, " --tty") || strings.Contains(cmdLower, " --interactive") {
return true
}
// Check for standalone -t and -i flags that aren't part of other patterns
// Avoid matching: 2>&1 (stderr redirect), -t tablename, etc.
// Look for " -t " or " -i " as standalone flags followed by non-alphanumeric
parts := strings.Fields(cmdLower)
for i, part := range parts {
if part == "-t" || part == "-i" {
// Found standalone -t or -i flag
// Check if this is in the context of exec/run subcommands
for j := 0; j < i; j++ {
if parts[j] == "exec" || parts[j] == "run" {
return true
}
}
}
}
return false
}
// isPagerOrEditorTool detects pager and editor tools that require terminal interaction.
func isPagerOrEditorTool(cmdLower string) bool {
// Extract first word
firstWord := cmdLower
if spaceIdx := strings.Index(cmdLower, " "); spaceIdx > 0 {
firstWord = cmdLower[:spaceIdx]
}
pagerEditorTools := []string{"less", "more", "vim", "vi", "nano", "emacs", "pico", "ed"}
for _, tool := range pagerEditorTools {
if firstWord == tool {
return true
}
}
return false
}
// isLiveMonitoringTool detects tools that run indefinitely showing live data.
func isLiveMonitoringTool(cmdLower string) bool {
firstWord := cmdLower
if spaceIdx := strings.Index(cmdLower, " "); spaceIdx > 0 {
firstWord = cmdLower[:spaceIdx]
}
// These tools run until interrupted
liveTools := []string{"top", "htop", "atop", "iotop", "iftop", "nload", "watch"}
for _, tool := range liveTools {
if firstWord == tool {
return true
}
}
return false
}
// isUnboundedStreaming detects follow-mode commands without an exit bound.
// Exit-bounded = terminates deterministically (line count, time window, or timeout wrapper).
//
// Allowed (exit-bounded):
// - journalctl -n 100, tail -n 50, tail -100 -f, kubectl logs --tail=100
// - journalctl --since "10 min ago", kubectl logs --since=10m
// - timeout 5s tail -f
//
// Blocked (runs indefinitely):
// - journalctl -f, tail -f, kubectl logs -f, dmesg -w
func isUnboundedStreaming(cmdLower string) bool {
// Only certain commands support follow mode - don't flag -f on other commands
// (e.g., "hostname -f" uses -f for "full", not "follow")
streamingCommands := []string{"tail ", "journalctl ", "docker logs ", "kubectl logs ", "dmesg "}
isStreamingCmd := false
for _, prefix := range streamingCommands {
if strings.HasPrefix(cmdLower, prefix) {
isStreamingCmd = true
break
}
}
if !isStreamingCmd {
return false
}
// Check for follow flags
hasFollowFlag := strings.Contains(cmdLower, " -f") ||
strings.Contains(cmdLower, " --follow") ||
strings.Contains(cmdLower, " -w") // dmesg uses -w/--follow
if !hasFollowFlag {
return false
}
// If wrapped in timeout, it's exit-bounded
if strings.HasPrefix(cmdLower, "timeout ") {
return false
}
// Check for explicit bounds that make it exit-bounded:
// - Line count: -n, --lines, --tail
// - Time window: --since, --until (journalctl/kubectl logs)
hasBound := strings.Contains(cmdLower, " -n ") ||
strings.Contains(cmdLower, " -n=") ||
strings.Contains(cmdLower, " --lines") ||
strings.Contains(cmdLower, " --tail=") ||
strings.Contains(cmdLower, " --tail ") ||
strings.Contains(cmdLower, " --since") || // journalctl --since "10 min ago", kubectl logs --since=10m
strings.Contains(cmdLower, " --until") || // journalctl --until "2024-01-01"
hasTailShorthandBound(cmdLower) // tail -100 shorthand
// Follow flag without bounds = runs indefinitely
return !hasBound
}
// hasTailShorthandBound checks for tail's -N shorthand (e.g., tail -100 -f)
func hasTailShorthandBound(cmdLower string) bool {
if !strings.HasPrefix(cmdLower, "tail ") {
return false
}
// Look for -NUMBER pattern (tail's shorthand for -n NUMBER)
// Match patterns like: tail -100, tail -50 -f
parts := strings.Fields(cmdLower)
for _, part := range parts {
if len(part) >= 2 && part[0] == '-' {
// Check if rest is digits
allDigits := true
for _, c := range part[1:] {
if c < '0' || c > '9' {
allDigits = false
break
}
}
if allDigits && len(part) > 1 {
return true
}
}
}
return false
}
// isInteractiveREPL detects commands that open an interactive REPL/shell
// unless given explicit non-interactive flags (-c, --execute, inline command).
//
// Blocked (opens REPL):
// - ssh host (no command)
// - mysql, psql, sqlite3 db (no -c/-e/inline SQL)
// - redis-cli (no command args)
// - python, node, irb
// - openssl s_client
//
// Allowed (non-interactive):
// - ssh host "command"
// - mysql -e "SELECT 1"
// - sqlite3 db "SELECT 1"
func isInteractiveREPL(cmdLower string) bool {
firstWord := cmdLower
if spaceIdx := strings.Index(cmdLower, " "); spaceIdx > 0 {
firstWord = cmdLower[:spaceIdx]
}
// SSH: interactive unless a command is provided after host
// ssh host -> interactive
// ssh host "ls -la" -> non-interactive
// ssh -t host -> interactive (explicit TTY)
if firstWord == "ssh" {
// If has -t flag, it's explicitly requesting TTY
if strings.Contains(cmdLower, " -t ") || strings.Contains(cmdLower, " -t") {
return true
}
// Count non-flag arguments after ssh
// ssh [options] host [command]
parts := strings.Fields(cmdLower)
nonFlagArgs := 0
skipNext := false
for i, part := range parts[1:] { // skip "ssh"
if skipNext {
skipNext = false
continue
}
// Skip flags that take arguments
if part == "-i" || part == "-l" || part == "-p" || part == "-o" || part == "-F" {
skipNext = true
continue
}
// Skip other flags
if strings.HasPrefix(part, "-") {
continue
}
nonFlagArgs++
// If we have more than just the host, there's a command
if nonFlagArgs > 1 || (nonFlagArgs == 1 && i < len(parts)-2) {
return false // has command, not interactive
}
}
// Only host, no command = interactive
return nonFlagArgs <= 1
}
// SQL CLIs: handled by sqlContentInspector, but catch bare invocations
// mysql, psql without -c/-e, sqlite3 without inline SQL
if firstWord == "mysql" || firstWord == "mariadb" {
// Non-interactive if has -e or --execute
if strings.Contains(cmdLower, " -e ") || strings.Contains(cmdLower, " -e\"") ||
strings.Contains(cmdLower, " --execute") {
return false
}
// Non-interactive if has piped input (handled elsewhere, but check)
if strings.Contains(cmdLower, " < ") || strings.Contains(cmdLower, " <<") {
return false
}
return true // bare mysql = interactive
}
if firstWord == "psql" {
// Non-interactive if has -c or --command
if strings.Contains(cmdLower, " -c ") || strings.Contains(cmdLower, " -c\"") ||
strings.Contains(cmdLower, " --command") {
return false
}
if strings.Contains(cmdLower, " < ") || strings.Contains(cmdLower, " <<") {
return false
}
return true
}
// redis-cli: interactive without command arguments
if firstWord == "redis-cli" {
// Check for command after connection flags
parts := strings.Fields(cmdLower)
hasCommand := false
skipNext := false
for _, part := range parts[1:] {
if skipNext {
skipNext = false
continue
}
// Connection flags that take arguments
if part == "-h" || part == "-p" || part == "-a" || part == "-n" || part == "--user" {
skipNext = true
continue
}
if strings.HasPrefix(part, "-") {
continue
}
// Non-flag argument = Redis command
hasCommand = true
break
}
return !hasCommand
}
// Scripting language runtimes are dual-use. Bare invocations are also
// interactive; non-interactive forms are blocked by the fail-closed
// read-only allowlist boundary.
if firstWord == "python" || firstWord == "python3" || firstWord == "python2" {
// Non-interactive if has -c or script file
if strings.Contains(cmdLower, " -c ") || strings.Contains(cmdLower, " -c\"") {
return false
}
// Check for script file (non-flag argument)
parts := strings.Fields(cmdLower)
for _, part := range parts[1:] {
if !strings.HasPrefix(part, "-") && !strings.HasPrefix(part, "\"") {
return false // has script file
}
}
return true // bare python = REPL
}
if firstWord == "node" || firstWord == "nodejs" {
// Non-interactive if has -e or script file
if strings.Contains(cmdLower, " -e ") || strings.Contains(cmdLower, " -e\"") ||
strings.Contains(cmdLower, " --eval") {
return false
}
parts := strings.Fields(cmdLower)
for _, part := range parts[1:] {
if !strings.HasPrefix(part, "-") {
return false // has script file
}
}
return true
}
if firstWord == "irb" || firstWord == "pry" {
// Ruby REPLs - almost always interactive
// Non-interactive only with -e
if strings.Contains(cmdLower, " -e ") {
return false
}
return true
}
// openssl s_client is always interactive (waits for input)
if strings.HasPrefix(cmdLower, "openssl s_client") || strings.HasPrefix(cmdLower, "openssl s_server") {
return true
}
return false
}
// hasStdoutRedirect checks for dangerous output redirects while allowing safe stderr redirects.
// hasInputRedirect checks for input redirection (<, <<, <<<) while
// allowing harmless stderr-to-stdout redirections (2>&1, 2<&1).
func hasInputRedirect(command string) bool {
if !strings.Contains(command, "<") {
return false
}
// Remove safe fd-merge patterns before checking for input redirects.
// 2>&1 = merge stderr into stdout; 2<&1 = merge stderr into stdout (rare)
// These contain '<' but aren't input redirection.
cmd := command
cmd = strings.ReplaceAll(cmd, "2>&1", "")
cmd = strings.ReplaceAll(cmd, "2<&1", "")
cmd = strings.ReplaceAll(cmd, "1>&2", "")
cmd = strings.ReplaceAll(cmd, "&>", "") // bash shorthand for > ... 2>&1
return strings.Contains(cmd, "<")
}
func hasStdoutRedirect(command string) bool {
if !strings.Contains(command, ">") {
return false
}
// Remove safe stderr redirects before checking
cmd := strings.ReplaceAll(command, "2>/dev/null", "")
cmd = strings.ReplaceAll(cmd, "2>&1", "")
return strings.Contains(cmd, ">")
}
// pipedToDualUseTool checks if a piped command sends input to a dual-use tool
// that could interpret piped input dangerously (like SQL CLIs).
// Piping to read-only filters (grep, head, tail, etc.) is safe.
func pipedToDualUseTool(cmdLower string) bool {
// Find the last pipe (not ||)
pipeIdx := -1
for i := 0; i < len(cmdLower)-1; i++ {
if cmdLower[i] == '|' && cmdLower[i+1] != '|' {
pipeIdx = i
}
}
if pipeIdx == -1 {
return false
}
// Get the command after the last pipe
afterPipe := strings.TrimSpace(cmdLower[pipeIdx+1:])
// Dual-use tools that interpret piped input dangerously
dualUseTools := []string{
"sqlite3", "mysql", "mariadb", "psql", "mycli", "pgcli", "litecli",
"redis-cli", "mongo", "mongosh",
"sh ", "sh\t", "bash ", "bash\t", "zsh ", "zsh\t",
"python", "perl", "ruby", "node",
"xargs",
}
for _, tool := range dualUseTools {
if strings.HasPrefix(afterPipe, tool) {
return true
}
}
return false
}
// checkSelfContainedGuardrails verifies the command is a single execution unit.
// Returns reason if any guardrail fails.
// Note: Most guardrails have been moved to checkMutationCapabilityGuards (Phase 1)
// to ensure they run before read-only-by-construction checks.
func checkSelfContainedGuardrails(command, cmdLower string) string {
// Most checks are now in Phase 1 (checkMutationCapabilityGuards)
// This phase is kept for potential future guardrails that should run
// after write pattern matching.
return ""
}
// isReadOnlyByConstruction returns true for commands that cannot mutate by design.
// Only matches patterns at the START of the command to avoid false positives
// (e.g., "date " inside "UPDATE" SQL statements).
func isReadOnlyByConstruction(cmdLower string) bool {
// Note: Pager tools (less, more) and live monitors (top, htop) are excluded here
// because they're blocked by NonInteractiveOnly guardrails in Phase 1.
readOnlyCommands := []string{
"cat", "head", "tail",
"ls", "ll", "dir",
"ps", "free", "df", "du", "iostat", "vmstat", "mpstat", "sar",
"grep", "awk", "sed", "find", "locate", "which", "whereis",
"journalctl", "dmesg",
"uname", "hostname", "whoami", "id", "groups",
"echo", "printf",
"date", "uptime", "env", "printenv", "locale",
"netstat", "ss", "ifconfig", "route", "arp",
"ping", "traceroute", "tracepath", "nslookup", "dig", "host",
"file", "stat", "wc", "sort", "uniq", "cut", "tr",
"lsof", "fuser",
"getent", "nproc", "lscpu", "lsmem", "lsblk", "blkid",
"lxc-ls", "lxc-info",
"zcat", "zgrep", "bzcat", "xzcat",
"md5sum", "sha256sum", "sha1sum",
"test",
// Process inspection
"pgrep", "pidof", "pstree",
// Login/session info
"last", "lastlog", "who", "w",
// Hardware inspection
"lspci", "lsusb", "dmidecode", "hwinfo", "inxi",
"sensors", "hddtemp", "smartctl", "nvme",
// Media inspection tools
"ffprobe", "mediainfo", "exiftool",
// Proxmox version
"pveversion",
}
// Multi-word patterns that must appear at the start
multiWordPatterns := []string{
// Curl read-only variants (various flag combinations)
"curl -s", "curl --silent", "curl -i", "curl --head",
"curl -k", "curl --insecure",
"curl -sk", "curl -ks", "curl -ki", "curl -ik",
"curl http", "curl https",
"wget -q", "wget --spider",
// Docker read-only
"docker ps", "docker logs", "docker inspect", "docker stats",
"docker images", "docker info", "docker version",
"docker top", "docker port",
"docker network ls", "docker network inspect",
"docker volume ls", "docker volume inspect",
"docker-compose ps", "docker compose ps",
// Systemd read-only
"systemctl status", "systemctl is-active", "systemctl is-enabled",
"systemctl list", "systemctl show",
"service status", "service --status-all",
// Proxmox read-only
"pct list", "pct status", "pct config", "pct df",
"qm list", "qm status", "qm config", "qm guest cmd",
"pvesh get",
"pvecm status", "pvecm nodes",
"pvesm status", "pvesm list",
// ZFS/ZPool read-only
"zpool status", "zpool list", "zpool get",
"zfs list", "zfs get",
// RAID inspection
"mdadm --detail", "mdadm -D",
// Network: ip with optional protocol flags (-4, -6)
"ip addr", "ip route", "ip link", "ip neigh", "ip neighbor",
"ip -4 addr", "ip -4 route", "ip -4 link", "ip -4 neigh", "ip -4 neighbor",
"ip -6 addr", "ip -6 route", "ip -6 link", "ip -6 neigh", "ip -6 neighbor",
"ip a", "ip r", "ip n",
// Package info (read-only)
"apt list", "apt show", "apt-cache",
"dpkg -l", "dpkg --list", "dpkg -s",
"rpm -q", "rpm -qa",
"yum list", "dnf list",
// Kubectl read-only commands
"kubectl get", "kubectl describe", "kubectl logs", "kubectl top", "kubectl cluster-info",
"kubectl api-resources", "kubectl api-versions", "kubectl version", "kubectl config view",
// Network connectivity checks (zero-I/O port scan, read-only)
"nc -z", "nc -vz", "nc -zv", "nc -zw", "nc -zvw",
"netcat -z", "netcat -zv",
// Timeout wrapper (makes any command bounded)
"timeout ",
}
// Extract first word of command
firstWord := cmdLower
if spaceIdx := strings.Index(cmdLower, " "); spaceIdx > 0 {
firstWord = cmdLower[:spaceIdx]
}
// Check single-word commands
for _, cmd := range readOnlyCommands {
if firstWord == cmd {
return true
}
}
// Check multi-word patterns at start
for _, pattern := range multiWordPatterns {
if strings.HasPrefix(cmdLower, pattern) {
return true
}
}
// Special case: nc/netcat with -z flag anywhere (zero-I/O mode is read-only regardless of flag order)
if (firstWord == "nc" || firstWord == "netcat") && containsFlag(cmdLower, "-z") {
return true
}
// Special case: [ (test shorthand)
if strings.HasPrefix(cmdLower, "[ ") {
return true
}
return false
}
// containsFlag checks whether a flag (e.g. "-z") appears as a standalone
// token or embedded in a combined short-flag group (e.g. "-zv", "-zvw") in cmd.
func containsFlag(cmd, flag string) bool {
// flag is expected to be like "-z"
char := strings.TrimPrefix(flag, "-")
for _, field := range strings.Fields(cmd) {
if field == flag {
return true
}
// Check combined flags like "-zv", "-zvw", "-wzv"
if strings.HasPrefix(field, "-") && !strings.HasPrefix(field, "--") && strings.Contains(field[1:], char) {
return true
}
}
return false
}
// matchesWritePatterns checks for known write-capable command patterns.
// Returns reason if a write pattern matches.
func matchesWritePatterns(cmdLower string) string {
// High-risk patterns
highRiskPatterns := map[string]string{
"rm ": "file deletion", "rm\t": "file deletion", "rmdir": "directory deletion",
"shutdown": "system shutdown", "reboot": "system reboot", "poweroff": "system poweroff", "halt": "system halt",
"systemctl restart": "service restart", "systemctl stop": "service stop", "systemctl start": "service start",
"systemctl enable": "service enable", "systemctl disable": "service disable",
"init ": "init control",
"apt ": "package management", "apt-get ": "package management", "yum ": "package management",
"dnf ": "package management", "pacman ": "package management", "apk ": "package management", "brew ": "package management",
"pip install": "package install", "pip uninstall": "package uninstall",
"npm install": "package install", "npm uninstall": "package uninstall", "cargo install": "package install",
"docker rm": "container removal", "docker stop": "container stop", "docker kill": "container kill",
"docker restart": "container restart", "docker exec": "container exec", "kubectl exec": "container exec",
"kill ": "process termination", "killall ": "process termination", "pkill ": "process termination",
"dd ": "disk write", "mkfs": "filesystem creation", "fdisk": "disk partition", "parted": "disk partition", "mkswap": "swap creation",
"iptables": "firewall modification", "firewall-cmd": "firewall modification", "ufw ": "firewall modification",
"truncate": "file truncation",
"chmod ": "permission change", "chown ": "ownership change", "chgrp ": "group change",
"useradd": "user creation", "userdel": "user deletion", "usermod": "user modification",
"chpasswd": "password change",
"crontab -e": "cron edit", "crontab -r": "cron removal", "crontab -": "cron modification",
"visudo": "sudoers edit", "vipw": "passwd edit",
"mount ": "filesystem mount", "umount ": "filesystem unmount",
"modprobe": "kernel module", "rmmod": "kernel module removal", "insmod": "kernel module insertion",
"sysctl -w": "kernel parameter change",
"nc -l": "network listener",
}
for pattern, reason := range highRiskPatterns {
if strings.Contains(cmdLower, pattern) {
return reason
}
}
// Command-start-only patterns: these must be the first word to avoid matching
// substrings in arguments (e.g., "pve-daily-utils.service" contains "service",
// "grep /etc/passwd" contains "passwd").
if strings.HasPrefix(cmdLower, "service ") {
return "service control"
}
if strings.HasPrefix(cmdLower, "passwd") {
return "password change"
}
// Medium-risk patterns
mediumRiskPatterns := map[string]string{
"mv ": "file move", "cp ": "file copy",
"sed -i": "in-place edit", "awk -i": "in-place edit",
"touch ": "file creation", "mkdir ": "directory creation",
// echo/printf: output redirection is already caught by Phase 1 (hasStdoutRedirect).
// Without a redirect, echo/printf are read-only (just print to stdout).
// They are now in the read-only allowlist in Phase 3.
"wget -O": "file download", "wget --output": "file download",
"tar -x": "archive extraction", "tar x": "archive extraction", "unzip ": "archive extraction", "gunzip ": "archive extraction",
}
for pattern, reason := range mediumRiskPatterns {
if strings.Contains(cmdLower, pattern) {
return reason
}
}
if containsCommandToken(cmdLower, "ln") || containsCommandToken(cmdLower, "link") {
return "link creation"
}
// Curl with mutation verbs
if strings.Contains(cmdLower, "curl") {
if strings.Contains(cmdLower, "-d ") || strings.Contains(cmdLower, "--data") ||
strings.Contains(cmdLower, "--upload") ||
strings.Contains(cmdLower, "-x post") || strings.Contains(cmdLower, "-x put") ||
strings.Contains(cmdLower, "-x delete") || strings.Contains(cmdLower, "-x patch") {
return "HTTP mutation request"
}
}
return ""
}
func containsCommandToken(cmdLower, token string) bool {
for _, field := range strings.Fields(cmdLower) {
if field == token {
return true
}
if strings.HasPrefix(field, "/") || strings.HasPrefix(field, "./") ||
strings.HasPrefix(field, "../") || strings.HasPrefix(field, "~/") {
if strings.HasSuffix(field, "/"+token) {
return true
}
}
}
return false
}
// hasShellChainingOutsideQuotes checks if a command contains shell chaining operators
// (;, &&, ||) outside of quoted strings. This allows SQL statements like "SELECT 1;"
// while still catching shell command chaining like "ls; rm -rf /".
//
// Handles escaped quotes (\' and \") by skipping the escaped character.
// Fails closed: if quote state becomes ambiguous (unclosed quotes), returns true.
func hasShellChainingOutsideQuotes(cmd string) bool {
inSingleQuote := false
inDoubleQuote := false
for i := 0; i < len(cmd); i++ {
ch := cmd[i]
// Handle escape sequences: skip the next character
// This prevents \" or \' from toggling quote state
if ch == '\\' && i+1 < len(cmd) {
i++ // Skip the escaped character
continue
}
// Track quote state
switch ch {
case '\'':
if !inDoubleQuote {
inSingleQuote = !inSingleQuote
}
case '"':
if !inSingleQuote {
inDoubleQuote = !inDoubleQuote
}
case ';':
if !inSingleQuote && !inDoubleQuote {
return true
}
case '&':
// Check for && (need to look at next char)
if !inSingleQuote && !inDoubleQuote && i+1 < len(cmd) && cmd[i+1] == '&' {
return true
}
case '|':
// Check for || (need to look at next char)
// Note: single | is a pipe, which is allowed for read operations
if !inSingleQuote && !inDoubleQuote && i+1 < len(cmd) && cmd[i+1] == '|' {
return true
}
}
}
// Fail closed: if quotes are unclosed, treat as potentially dangerous
// (ambiguous state means we can't be sure chaining operators are inside quotes)
if inSingleQuote || inDoubleQuote {
return true
}
return false
}
// splitChainedCommand splits a shell command on ;, &&, and || operators
// that appear outside of quoted strings. Returns the individual sub-commands
// with leading/trailing whitespace trimmed.
func splitChainedCommand(cmd string) []string {
var parts []string
inSingleQuote := false
inDoubleQuote := false
start := 0
for i := 0; i < len(cmd); i++ {
ch := cmd[i]
if ch == '\\' && i+1 < len(cmd) {
i++
continue
}
switch ch {
case '\'':
if !inDoubleQuote {
inSingleQuote = !inSingleQuote
}
case '"':
if !inSingleQuote {
inDoubleQuote = !inDoubleQuote
}
case ';':
if !inSingleQuote && !inDoubleQuote {
parts = append(parts, strings.TrimSpace(cmd[start:i]))
start = i + 1
}
case '&':
if !inSingleQuote && !inDoubleQuote && i+1 < len(cmd) && cmd[i+1] == '&' {
parts = append(parts, strings.TrimSpace(cmd[start:i]))
start = i + 2
i++ // skip second &
}
case '|':
if !inSingleQuote && !inDoubleQuote && i+1 < len(cmd) && cmd[i+1] == '|' {
parts = append(parts, strings.TrimSpace(cmd[start:i]))
start = i + 2
i++ // skip second |
}
}
}
// Add the last segment
if start < len(cmd) {
last := strings.TrimSpace(cmd[start:])
if last != "" {
parts = append(parts, last)
}
}
return parts
}
// classifyChainedCommand handles commands with shell chaining operators.
// It splits the command into sub-commands and classifies each individually.
// If ALL sub-commands are read-only, the chain is allowed.
// If ANY sub-command is write/unknown, the chain is blocked.
func classifyChainedCommand(command string) IntentResult {
parts := splitChainedCommand(command)
if len(parts) == 0 {
return IntentResult{Intent: IntentWriteOrUnknown, Reason: "empty chained command"}
}
// If splitting produced only 1 part (shouldn't happen since we checked
// hasShellChainingOutsideQuotes first), classify it directly.
if len(parts) == 1 {
return classifySingleCommand(parts[0])
}
for _, part := range parts {
if part == "" {
continue
}
result := classifySingleCommand(part)
if result.Intent != IntentReadOnlyCertain && result.Intent != IntentReadOnlyConditional {
return IntentResult{
Intent: IntentWriteOrUnknown,
Reason: fmt.Sprintf("chained sub-command not read-only: %s (reason: %s)", part, result.Reason),
}
}
}
return IntentResult{Intent: IntentReadOnlyCertain, Reason: "all chained sub-commands are read-only"}
}
// classifyCommandRisk provides backward-compatible risk classification.
// It delegates to ClassifyExecutionIntent and maps the result to CommandRisk,
// preserving the High/Medium write distinction for existing callers.
//
// Deprecated: Use ClassifyExecutionIntent for new code.
func classifyCommandRisk(command string) CommandRisk {
result := ClassifyExecutionIntent(command)
switch result.Intent {
case IntentReadOnlyCertain, IntentReadOnlyConditional:
return CommandRiskReadOnly
default:
// For backward compatibility, distinguish HighWrite from MediumWrite
// using the same pattern checks from matchesWritePatterns
return classifyWriteRiskLevel(command, result.Reason)
}
}
// classifyWriteRiskLevel determines whether a write command is high or medium risk.
// Used by classifyCommandRisk for backward compatibility.
func classifyWriteRiskLevel(command, reason string) CommandRisk {
cmdLower := strings.ToLower(command)
// High-risk: destructive system operations
highRiskPatterns := []string{
// Shell mutation capabilities (these dominate everything)
"> ", ">>", "| tee ",
// Destructive file operations
"rm ", "rm\t", "rmdir",
// System control
"shutdown", "reboot", "poweroff", "halt",
// Service control (except status)
"systemctl restart", "systemctl stop", "systemctl start",
"systemctl enable", "systemctl disable",
"service ", "init ",
// Package managers
"apt ", "apt-get ", "yum ", "dnf ", "pacman ", "apk ", "brew ",
"pip install", "pip uninstall", "npm install", "npm uninstall", "cargo install",
// Container destruction
"docker rm", "docker stop", "docker kill", "docker restart",
// Process termination
"kill ", "killall ", "pkill ",
// Disk operations
"dd ", "mkfs", "fdisk", "parted", "mkswap",
// Firewall
"iptables", "firewall-cmd", "ufw ",
// File truncation
"truncate",
// Permissions/ownership
"chmod ", "chown ", "chgrp ",
// User management
"useradd", "userdel", "usermod", "passwd", "chpasswd",
// Cron/sudoers
"crontab -", "visudo", "vipw",
// Mounts and kernel
"mount ", "umount ", "modprobe", "rmmod", "insmod", "sysctl -w",
// sudo escalation
"sudo",
}
for _, pattern := range highRiskPatterns {
if strings.Contains(cmdLower, pattern) {
return CommandRiskHighWrite
}
}
// Everything else is medium-risk
return CommandRiskMediumWrite
}
// GetReadOnlyViolationHint returns a hint for why a command was blocked from pulse_read.
// Uses the IntentResult reason plus context-aware suggestions.
func GetReadOnlyViolationHint(command string, result IntentResult) string {
baseHint := result.Reason
cmdLower := strings.ToLower(command)
// Phase 1 guardrail hints (structural issues that must be removed)
if isPhase1GuardrailFailure(result.Reason) {
return getPhase1Hint(result.Reason, baseHint)
}
// Content inspection hints (SQL CLIs, etc.)
isSQLCLI := strings.Contains(cmdLower, "sqlite3") ||
strings.Contains(cmdLower, "mysql") ||
strings.Contains(cmdLower, "mariadb") ||
strings.Contains(cmdLower, "psql")
if isSQLCLI {
return getSQLHint(result.Reason, baseHint)
}
// Unknown command fallback hint
if strings.Contains(result.Reason, "unknown") || strings.Contains(result.Reason, "no inspector") {
return baseHint + ". Try a self-contained form: no pipes, no redirects, single statement. If this is a read-only operation, consider using a known read-only command instead."
}
return baseHint
}
// isPhase1GuardrailFailure returns true if the reason indicates a Phase 1 structural issue.
func isPhase1GuardrailFailure(reason string) bool {
guardrailKeywords := []string{
"sudo", "redirect", "tee", "substitution", "chaining", "piped input",
// NonInteractiveOnly guardrails
"TTY", "terminal", "pager", "editor", "indefinitely", "unbounded", "streaming",
}
for _, kw := range guardrailKeywords {
if strings.Contains(reason, kw) {
return true
}
}
return false
}
// getPhase1Hint returns actionable hints for Phase 1 guardrail failures.
func getPhase1Hint(reason, baseHint string) string {
switch {
case strings.Contains(reason, "sudo"):
return baseHint + ". Remove sudo to use pulse_read; use pulse_control for privileged operations."
case strings.Contains(reason, "redirect"):
return baseHint + ". Remove redirects (>, >>, <, <<, <<<) to use pulse_read."
case strings.Contains(reason, "tee"):
return baseHint + ". Remove tee to use pulse_read; tee writes to files."
case strings.Contains(reason, "substitution"):
return baseHint + ". Remove $() or backticks to use pulse_read."
case strings.Contains(reason, "chaining"):
return baseHint + ". Run commands separately instead of chaining with ; && ||."
case strings.Contains(reason, "piped input"):
return baseHint + ". For dual-use tools, include content directly instead of piping. Example: sqlite3 db.db \"SELECT ...\" instead of cat file | sqlite3 db.db"
// NonInteractiveOnly hints
case strings.Contains(reason, "TTY") || strings.Contains(reason, "terminal"):
return baseHint + ". Remove -it/--tty/--interactive flags. Use non-interactive form: docker exec container cmd (not docker exec -it)."
case strings.Contains(reason, "pager") || strings.Contains(reason, "editor"):
return baseHint + ". Use cat, head -n, or tail -n instead of interactive tools."
case strings.Contains(reason, "indefinitely"):
return baseHint + ". Use bounded alternatives: ps aux (not top), journalctl -n 100 (not watch)."
case strings.Contains(reason, "unbounded") || strings.Contains(reason, "streaming"):
return baseHint + ". Add line limit: journalctl -n 100 -f or tail -n 50 -f, or wrap with timeout."
default:
return baseHint + ". Remove redirects, chaining, sudo, or subshells to use pulse_read."
}
}
// getSQLHint returns actionable hints for SQL CLI content inspection failures.
func getSQLHint(reason, baseHint string) string {
switch {
case strings.Contains(reason, "external") || strings.Contains(reason, "no inline"):
return baseHint + ". Include SQL directly in quotes: sqlite3 db.db \"SELECT ...\""
case strings.Contains(reason, "write") || strings.Contains(reason, "control"):
return baseHint + ". Use only SELECT statements. Avoid: INSERT, UPDATE, DELETE, DROP, CREATE, PRAGMA, BEGIN, COMMIT, ROLLBACK."
default:
return baseHint + ". For read-only queries, use self-contained SELECT statements without transaction control."
}
}
const (
defaultMaxTopologyNodes = 5
defaultMaxTopologyVMsPerNode = 5
defaultMaxTopologyContainersPerNode = 5
defaultMaxTopologyDockerHosts = 3
defaultMaxTopologyDockerContainersPerHost = 5
defaultMaxTopologyK8sClusters = 3
defaultMaxTopologyK8sNodesPerCluster = 5
defaultMaxTopologyK8sDeploymentsPerCluster = 5
defaultMaxTopologyK8sPodsPerCluster = 10
defaultMaxListDockerContainersPerHost = 10
)
// registerResolvedResource adds a discovered resource to the resolved context if available.
// This is called by query tools when they find resources, enabling action tools to validate
// that commands are targeting legitimate, discovered resources.
//
// NOTE: This does NOT mark the resource as "recently accessed" for routing validation.
// Use registerResolvedResourceWithExplicitAccess() for single-resource operations where
// user intent is clear.
func (e *PulseToolExecutor) registerResolvedResource(reg ResourceRegistration) {
if e.resolvedContext == nil {
return
}
e.resolvedContext.AddResolvedResource(reg)
}
// registerResolvedResourceWithExplicitAccess adds a resource AND marks it as recently accessed.
// Use this for single-resource operations (pulse_query get, explicit select) where user
// intent to target this specific resource is clear.
//
// DO NOT use this for bulk operations (list, search) that return many resources,
// as it would poison routing validation and cause false ROUTING_MISMATCH blocks.
func (e *PulseToolExecutor) registerResolvedResourceWithExplicitAccess(reg ResourceRegistration) {
if e.resolvedContext == nil {
return
}
e.resolvedContext.AddResolvedResource(reg)
// Build the resource ID to mark explicit access (must match AddResolvedResource format)
// Format: {kind}:{host}:{provider_uid} for scoped resources
// {kind}:{provider_uid} for global resources
var resourceID string
if reg.ProviderUID != "" {
if reg.HostUID != "" || reg.HostName != "" {
hostScope := reg.HostUID
if hostScope == "" {
hostScope = reg.HostName
}
resourceID = reg.Kind + ":" + hostScope + ":" + reg.ProviderUID
} else {
resourceID = reg.Kind + ":" + reg.ProviderUID
}
} else {
if reg.HostUID != "" || reg.HostName != "" {
hostScope := reg.HostUID
if hostScope == "" {
hostScope = reg.HostName
}
resourceID = reg.Kind + ":" + hostScope + ":" + reg.Name
} else {
resourceID = reg.Kind + ":" + reg.Name
}
}
e.resolvedContext.MarkExplicitAccess(resourceID)
}
// ValidationResult holds the result of resource validation.
// Check StrictError first using errors.As() for typed error handling.
type ValidationResult struct {
Resource ResolvedResourceInfo
ErrorMsg string // Human-readable error (backwards compat)
StrictError *ErrStrictResolution // Typed error for strict mode violations
}
// IsBlocked returns true if the validation blocked the operation
func (v *ValidationResult) IsBlocked() bool {
return v.StrictError != nil
}
// validateResolvedResource checks if a resource has been previously discovered via query/discovery tools.
// Returns a ValidationResult containing:
// - Resource: the resolved resource info if found
// - ErrorMsg: human-readable error message (empty if valid)
// - StrictError: typed error for strict mode violations (nil if not blocked)
//
// Setting skipIfNoContext=true makes validation optional (for backwards compatibility).
//
// When PULSE_STRICT_RESOLUTION=true is set, write operations (start, stop, restart, delete, exec)
// will be blocked if the resource wasn't discovered first. This prevents the AI from operating
// on fabricated or hallucinated resource IDs.
func (e *PulseToolExecutor) validateResolvedResource(resourceName, action string, skipIfNoContext bool) ValidationResult {
// Determine if this requires hard validation (strict mode + write action)
strictMode := isStrictResolutionEnabled()
isWrite := isWriteAction(action)
requireHardValidation := strictMode && isWrite
if e.resolvedContext == nil {
if requireHardValidation {
// Record telemetry for strict resolution block
if e.telemetryCallback != nil {
e.telemetryCallback.RecordStrictResolutionBlock("validateResolvedResource", action)
}
err := &ErrStrictResolution{
ResourceID: resourceName,
Action: action,
Message: fmt.Sprintf("Resource '%s' has not been discovered. Use pulse_query to find resources before performing '%s' action.", resourceName, action),
}
return ValidationResult{
ErrorMsg: err.Message,
StrictError: err,
}
}
if skipIfNoContext {
return ValidationResult{}
}
return ValidationResult{
ErrorMsg: fmt.Sprintf("Resource '%s' has not been discovered. Use pulse_query to find resources first.", resourceName),
}
}
// First, try to find by alias (most common case - user refers to resources by name)
res, found := e.resolvedContext.GetResolvedResourceByAlias(resourceName)
if found {
// Check if action is allowed
allowedActions := res.GetAllowedActions()
if len(allowedActions) > 0 {
actionAllowed := false
for _, allowed := range allowedActions {
if allowed == action || allowed == "*" {
actionAllowed = true
break
}
}
if !actionAllowed {
return ValidationResult{
Resource: res,
ErrorMsg: fmt.Sprintf("Action '%s' is not permitted for resource '%s'. Allowed actions: %v", action, resourceName, allowedActions),
}
}
}
return ValidationResult{Resource: res}
}
// Try direct ID lookup (for when caller passes canonical ID)
res, found = e.resolvedContext.GetResolvedResourceByID(resourceName)
if found {
// Same action validation
allowedActions := res.GetAllowedActions()
if len(allowedActions) > 0 {
actionAllowed := false
for _, allowed := range allowedActions {
if allowed == action || allowed == "*" {
actionAllowed = true
break
}
}
if !actionAllowed {
return ValidationResult{
Resource: res,
ErrorMsg: fmt.Sprintf("Action '%s' is not permitted for resource '%s'. Allowed actions: %v", action, resourceName, allowedActions),
}
}
}
return ValidationResult{Resource: res}
}
// Resource not found
if requireHardValidation {
// Record telemetry for strict resolution block
if e.telemetryCallback != nil {
e.telemetryCallback.RecordStrictResolutionBlock("validateResolvedResource", action)
}
err := &ErrStrictResolution{
ResourceID: resourceName,
Action: action,
Message: fmt.Sprintf("Resource '%s' has not been discovered in this session. Use pulse_query action=search to find it before performing '%s' action.", resourceName, action),
}
return ValidationResult{
ErrorMsg: err.Message,
StrictError: err,
}
}
// Allow operation if skipIfNoContext (backwards compat for soft validation)
if skipIfNoContext {
return ValidationResult{}
}
return ValidationResult{
ErrorMsg: fmt.Sprintf("Resource '%s' has not been discovered in this session. Use pulse_query action=search to find it first.", resourceName),
}
}
// validateResolvedResourceForExec validates a resource for command execution.
// It uses command risk classification to determine if strict validation applies.
//
// Behavior in strict mode (PULSE_STRICT_RESOLUTION=true):
// - Read-only commands are allowed IF the session has ANY resolved context
// (prevents arbitrary host guessing while allowing diagnostic commands)
// - Write commands require the specific resource to be discovered first
//
// Behavior in normal mode:
// - All commands are allowed with soft validation (warning logs)
func (e *PulseToolExecutor) validateResolvedResourceForExec(resourceName, command string, skipIfNoContext bool) ValidationResult {
// Classify the command risk
risk := classifyCommandRisk(command)
// For read-only commands in strict mode, allow if session has ANY resolved context
// This prevents arbitrary host guessing while still allowing diagnostic commands
// on hosts that have been discovered (even if not the specific resource)
if risk == CommandRiskReadOnly && isStrictResolutionEnabled() {
// Check if there's any resolved context at all
if e.resolvedContext != nil {
// Try to find the resource - if found, great
result := e.validateResolvedResource(resourceName, "query", true)
if result.Resource != nil {
return result
}
// Resource not found, but we have some context - check if ANY host is discovered
// This is a scoped bypass: read-only commands allowed only if session is "active"
// (i.e., user has already done some discovery)
if e.hasAnyResolvedHost() {
// Allow read-only command with warning
return ValidationResult{
ErrorMsg: fmt.Sprintf("Resource '%s' not explicitly discovered, but allowing read-only command due to existing session context", resourceName),
}
}
}
// No context at all - require discovery even for read-only in strict mode
// Record telemetry for strict resolution block
if e.telemetryCallback != nil {
e.telemetryCallback.RecordStrictResolutionBlock("validateResolvedResourceForExec", "exec (read-only)")
}
return ValidationResult{
ErrorMsg: "No resources discovered in this session. Use pulse_query to discover resources first.",
StrictError: &ErrStrictResolution{
ResourceID: resourceName,
Action: "exec (read-only)",
Message: fmt.Sprintf("Resource '%s' cannot be accessed. No resources have been discovered in this session. Use pulse_query action=search to discover available resources.", resourceName),
},
}
}
// For read-only commands in non-strict mode, use soft validation
if risk == CommandRiskReadOnly {
return e.validateResolvedResource(resourceName, "query", skipIfNoContext)
}
// For write commands, use "exec" action which triggers strict validation
return e.validateResolvedResource(resourceName, "exec", skipIfNoContext)
}
// hasAnyResolvedHost checks if there's at least one discovered resource in the session.
// This is used to scope read-only exec bypass - if the user has discovered ANY resource,
// we allow read-only commands to other resources (with warnings).
func (e *PulseToolExecutor) hasAnyResolvedHost() bool {
if e.resolvedContext == nil {
return false
}
return e.resolvedContext.HasAnyResources()
}
// RoutingValidationResult holds the result of routing context validation.
type RoutingValidationResult struct {
RoutingError *ErrRoutingMismatch // Non-nil if routing mismatch detected
}
// IsBlocked returns true if routing validation blocked the operation
func (r *RoutingValidationResult) IsBlocked() bool {
return r.RoutingError != nil
}
// validateRoutingContext checks if a target_host should be a more specific resource.
//
// This validation prevents the model from accidentally operating on a parent host
// when the user clearly intends to target a child resource (system container/VM) on that host.
//
// IMPORTANT: This check is intentionally scoped to RECENTLY ACCESSED resources to avoid
// false positives. The logic is:
//
// - If target_host resolves directly to a resource in ResolvedContext → OK
// - If target_host is a Proxmox node AND the user RECENTLY referenced child resources
// on that node (within RecentAccessWindow) → block with ROUTING_MISMATCH
//
// This prevents blocking legitimate host-level operations like "apt update on @pve-node"
// while still catching the "user said @homepage-docker but model targets pve-node" scenario.
//
// The key insight: if the user explicitly mentioned a child resource in this turn/exchange,
// they probably intend to target that child, not the parent host.
func (e *PulseToolExecutor) validateRoutingContext(targetHost string) RoutingValidationResult {
// Skip if no state provider or resolved context
if !e.hasReadState() || e.resolvedContext == nil {
return RoutingValidationResult{}
}
// First, check if targetHost resolves directly to a resource in ResolvedContext
// If so, no routing mismatch - user is targeting the right thing
if res, found := e.resolvedContext.GetResolvedResourceByAlias(targetHost); found {
// Target matches a resolved resource directly - no mismatch
_ = res
return RoutingValidationResult{}
}
// Check if targetHost is a Proxmox node (host)
loc := e.resolveResourceLocation(targetHost)
// Only check for mismatch if targetHost is a Proxmox node (host type)
if !loc.Found || loc.ResourceType != "node" {
return RoutingValidationResult{}
}
// targetHost is a Proxmox node. Check if ResolvedContext has RECENTLY ACCESSED
// child resources on this node (within the recent access window).
// This is the key refinement: we only block if the user recently referenced
// a child resource, implying they intended to target that child.
recentChildren := e.findRecentlyReferencedChildrenOnNode(loc.Node)
if len(recentChildren) == 0 {
return RoutingValidationResult{}
}
// Extract names, IDs, and kinds for the error response
var childNames []string
var childIDs []string
var childKinds []string
for _, child := range recentChildren {
childNames = append(childNames, child.Name)
childIDs = append(childIDs, child.ResourceID)
childKinds = append(childKinds, child.Kind)
}
// Record telemetry for routing mismatch block
// Use the first child kind for the label (we use small enums to avoid cardinality issues)
if e.telemetryCallback != nil && len(childKinds) > 0 {
e.telemetryCallback.RecordRoutingMismatchBlock("routing_validation", "node", childKinds[0])
}
// Rate-limited debug logging for support/debugging
// Logs: target_kind, child_kind, suggested_resource_id (no user paths to avoid cardinality)
logRoutingMismatchDebug(targetHost, childKinds, childIDs)
// Found recently referenced child resources! Block with ROUTING_MISMATCH
return RoutingValidationResult{
RoutingError: &ErrRoutingMismatch{
TargetHost: targetHost,
MoreSpecificResources: childNames,
MoreSpecificIDs: childIDs,
ChildKinds: childKinds,
Message: fmt.Sprintf(
"target_host '%s' is a hypervisor node, but you recently referenced more specific resources on it: %v. "+
"Did you mean to target one of these instead? File operations on a host node do NOT affect files inside guest VMs/containers.",
targetHost, childNames),
},
}
}
// recentChildInfo holds both the name and canonical ID of a recently referenced child resource.
type recentChildInfo struct {
Name string // Human-readable name
ResourceID string // Canonical ID (kind:scope:id)
Kind string // Resource kind (system-container, vm, app-container) for telemetry
}
// findRecentlyReferencedChildrenOnNode returns the names and IDs of guest resources (system containers/VMs) on a
// specific hypervisor node that were RECENTLY ACCESSED (within RecentAccessWindow).
//
// This is used by validateRoutingContext to detect when the user referenced a child resource
// in the current turn/exchange, indicating they probably intended to target that child.
func (e *PulseToolExecutor) findRecentlyReferencedChildrenOnNode(nodeName string) []recentChildInfo {
if e.resolvedContext == nil {
return nil
}
var children []recentChildInfo
rs, err := e.readStateForControl()
if err != nil {
return nil
}
// Check LXC containers on this node
for _, ct := range rs.Containers() {
if ct.Node() != nodeName {
continue
}
// Check if this LXC is in the resolved context AND was recently accessed
if res, found := e.resolvedContext.GetResolvedResourceByAlias(ct.Name()); found {
if res.GetKind() == "system-container" {
resourceID := res.GetResourceID()
if e.resolvedContext.WasRecentlyAccessed(resourceID, RecentAccessWindow) {
children = append(children, recentChildInfo{
Name: ct.Name(),
ResourceID: resourceID,
Kind: "system-container",
})
}
}
}
}
// Check VMs on this node
for _, vm := range rs.VMs() {
if vm.Node() != nodeName {
continue
}
// Check if this VM is in the resolved context AND was recently accessed
if res, found := e.resolvedContext.GetResolvedResourceByAlias(vm.Name()); found {
if res.GetKind() == "vm" {
resourceID := res.GetResourceID()
if e.resolvedContext.WasRecentlyAccessed(resourceID, RecentAccessWindow) {
children = append(children, recentChildInfo{
Name: vm.Name(),
ResourceID: resourceID,
Kind: "vm",
})
}
}
}
}
return children
}
// logRoutingMismatchDebug logs routing mismatch details for debugging and support.
// Rate-limited to avoid log spam (at most once per 10 seconds).
// Only logs safe, low-cardinality fields: target_kind, child_kind, suggested_resource_id.
func logRoutingMismatchDebug(targetHost string, childKinds, childIDs []string) {
routingMismatchLogLimiter.mu.Lock()
defer routingMismatchLogLimiter.mu.Unlock()
if time.Since(routingMismatchLogLimiter.lastLog) < routingMismatchLogLimiter.interval {
return // Rate limited
}
routingMismatchLogLimiter.lastLog = time.Now()
// Get first child kind and ID for logging (safe, low cardinality)
childKind := "unknown"
suggestedID := "none"
if len(childKinds) > 0 {
childKind = childKinds[0]
}
if len(childIDs) > 0 {
suggestedID = childIDs[0]
}
log.Debug().
Str("event", "routing_mismatch_block").
Str("target_kind", "node").
Str("child_kind", childKind).
Str("suggested_resource_id", suggestedID).
Int("affected_children", len(childIDs)).
Msg("[RoutingValidation] Blocked operation targeting parent node when child recently referenced")
}
// registerQueryTools registers the pulse_query tool
func (e *PulseToolExecutor) registerQueryTools() {
e.registry.Register(RegisteredTool{
Definition: Tool{
Name: "pulse_query",
Description: `Query and search canonical infrastructure resources. Start here to discover systems, workloads, storage, and disks by name. Actions: search, get, config, topology, list, health.`,
InputSchema: InputSchema{
Type: "object",
Properties: map[string]PropertySchema{
"action": {
Type: "string",
Description: "Query action to perform",
Enum: []string{"search", "get", "config", "topology", "list", "health"},
},
"query": {
Type: "string",
Description: "Search query (for action: search)",
},
"resource_type": {
Type: "string",
Description: "Resource type. For get/search, prefer canonical values: 'agent', 'vm', 'system-container', 'app-container', 'storage', 'physical-disk', 'node', and 'docker-host'. Compatibility aliases 'system' and 'storage-pool' are still accepted. For config: 'vm', 'system-container', or supported API-backed 'app-container'.",
Enum: []string{"agent", "system", "vm", "system-container", "app-container", "node", "docker-host", "storage", "storage-pool", "physical-disk"},
},
"resource_id": {
Type: "string",
Description: "Resource identifier (VMID or name) (for action: get, config)",
},
"type": {
Type: "string",
Description: "Filter by type. For action=list, use plural families such as 'agents', 'systems', 'nodes', 'vms', 'system-containers', 'app-containers', 'docker-hosts', 'storages', 'storage-pools', and 'physical-disks'. For action=search, singular canonical kinds such as 'agent' and 'storage' are also accepted.",
Enum: []string{"agent", "agents", "systems", "nodes", "vms", "system-containers", "app-containers", "docker-hosts", "storage", "storages", "storage-pools", "physical-disks", "kubernetes", "k8s-clusters", "k8s-nodes", "k8s-pods", "k8s-deployments"},
},
"status": {
Type: "string",
Description: "Filter by status (for action: search, list)",
},
"cluster_name": {
Type: "string",
Description: "Filter Kubernetes list responses by cluster name (for action: list)",
},
"namespace": {
Type: "string",
Description: "Filter Kubernetes pods/deployments by namespace (for action: list)",
},
"include": {
Type: "string",
Description: "Include filter for topology: 'all', 'proxmox', 'app-containers', 'kubernetes' (for action: topology)",
Enum: []string{"all", "proxmox", "app-containers", "kubernetes"},
},
"summary_only": {
Type: "boolean",
Description: "Return only summary counts for topology (for action: topology)",
},
"max_proxmox_nodes": {
Type: "integer",
Description: "Max Proxmox nodes to include (for action: topology)",
},
"max_vms_per_node": {
Type: "integer",
Description: "Max VMs per node (for action: topology)",
},
"max_containers_per_node": {
Type: "integer",
Description: "Max containers per node (for action: topology)",
},
"max_docker_hosts": {
Type: "integer",
Description: "Max Docker hosts (for action: topology)",
},
"max_docker_containers_per_host": {
Type: "integer",
Description: "Max Docker containers per host (for action: topology, list)",
},
"max_k8s_clusters": {
Type: "integer",
Description: "Max Kubernetes clusters to include (for action: topology)",
},
"max_k8s_nodes_per_cluster": {
Type: "integer",
Description: "Max Kubernetes nodes per cluster (for action: topology)",
},
"max_k8s_deployments_per_cluster": {
Type: "integer",
Description: "Max Kubernetes deployments per cluster (for action: topology)",
},
"max_k8s_pods_per_cluster": {
Type: "integer",
Description: "Max Kubernetes pods per cluster (for action: topology)",
},
"limit": {
Type: "integer",
Description: "Maximum number of results (default: 100)",
},
"offset": {
Type: "integer",
Description: "Number of results to skip",
},
},
Required: []string{"action"},
},
},
Handler: func(ctx context.Context, exec *PulseToolExecutor, args map[string]interface{}) (CallToolResult, error) {
return exec.executeQuery(ctx, args)
},
Governance: ToolGovernance{
ActionMode: ToolActionRead,
ApprovalPolicy: "no approval required",
Summary: "Resolves canonical infrastructure identity, topology, config, and health without changing state.",
},
})
}
func canonicalQueryResourceType(resourceType string) string {
switch strings.ToLower(strings.TrimSpace(resourceType)) {
case "system":
return "agent"
case "storage-pool":
return "storage"
default:
return strings.ToLower(strings.TrimSpace(resourceType))
}
}
func canonicalQueryListType(filterType string) string {
switch strings.ToLower(strings.TrimSpace(filterType)) {
case "system", "agent", "agents":
return "systems"
case "node":
return "nodes"
case "vm":
return "vms"
case "system-container":
return "system-containers"
case "app-container":
return "app-containers"
case "docker-host":
return "docker-hosts"
case "storage", "storages", "storage-pool":
return "storage-pools"
case "physical-disk":
return "physical-disks"
case "k8s-cluster", "k8s-clusters":
return "k8s-clusters"
case "k8s-node", "k8s-nodes":
return "k8s-nodes"
case "k8s-pod", "k8s-pods":
return "k8s-pods"
case "k8s-deployment", "k8s-deployments":
return "k8s-deployments"
default:
return strings.ToLower(strings.TrimSpace(filterType))
}
}
func canonicalQueryTopologyInclude(include string) string {
switch strings.ToLower(strings.TrimSpace(include)) {
case "app-container":
return "app-containers"
default:
return strings.ToLower(strings.TrimSpace(include))
}
}
func canonicalQuerySearchType(typeFilter string) string {
switch strings.ToLower(strings.TrimSpace(typeFilter)) {
case "agent", "agents", "system", "systems":
return "agent"
case "system-containers":
return "system-container"
case "app-containers":
return "app-container"
case "docker-hosts":
return "docker-host"
case "storage", "storages", "storage-pool", "storage-pools":
return "storage"
case "physical-disks":
return "physical-disk"
case "docker-host":
return "docker-host"
default:
return strings.ToLower(strings.TrimSpace(typeFilter))
}
}
type governedQueryMetadataResolver struct {
rs unifiedresources.ReadState
cache map[string]GovernedResourceMetadata
}
func newGovernedQueryMetadataResolver(rs unifiedresources.ReadState) *governedQueryMetadataResolver {
return &governedQueryMetadataResolver{
rs: rs,
cache: make(map[string]GovernedResourceMetadata),
}
}
func (r *governedQueryMetadataResolver) Resolve(candidates ...string) GovernedResourceMetadata {
if r == nil || r.rs == nil {
return GovernedResourceMetadata{}
}
for _, candidate := range candidates {
key := strings.TrimSpace(candidate)
if key == "" {
continue
}
if metadata, ok := r.cache[key]; ok {
if metadata.Policy != nil || metadata.AISafeSummary != "" {
return metadata
}
continue
}
resolved := unifiedresources.ResolveResourceContext(r.rs, key)
metadata := governedQueryMetadataFromResolvedResource(resolved.Resource)
r.cache[key] = metadata
if metadata.Policy != nil || metadata.AISafeSummary != "" {
return metadata
}
}
return GovernedResourceMetadata{}
}
func governedQueryMetadataFromResolvedResource(resource *unifiedresources.Resource) GovernedResourceMetadata {
policy, aiSafeSummary := unifiedresources.CanonicalGovernanceMetadata(resource)
return GovernedResourceMetadata{
Policy: policy,
AISafeSummary: aiSafeSummary,
}
}
func metricPercent(value *unifiedresources.MetricValue) float64 {
if value == nil {
return 0
}
return value.Percent
}
func metricUsedGB(value *unifiedresources.MetricValue) float64 {
if value == nil || value.Used == nil {
return 0
}
return float64(*value.Used) / (1024 * 1024 * 1024)
}
func metricTotalGB(value *unifiedresources.MetricValue) float64 {
if value == nil || value.Total == nil {
return 0
}
return float64(*value.Total) / (1024 * 1024 * 1024)
}
func firstNonEmptyString(values ...string) string {
for _, value := range values {
trimmed := strings.TrimSpace(value)
if trimmed != "" {
return trimmed
}
}
return ""
}
func canonicalResourcePlatform(resource unifiedresources.Resource) string {
switch {
case resource.TrueNAS != nil || resourceHasTag(resource, "truenas"):
return "truenas"
case resource.VMware != nil:
return "vmware-vsphere"
case resource.Proxmox != nil:
return "proxmox"
case resource.Docker != nil:
return "docker"
case resource.Agent != nil && strings.TrimSpace(resource.Agent.Platform) != "":
return strings.ToLower(strings.TrimSpace(resource.Agent.Platform))
default:
return ""
}
}
func resourceHasTag(resource unifiedresources.Resource, needle string) bool {
for _, tag := range resource.Tags {
if strings.EqualFold(strings.TrimSpace(tag), needle) {
return true
}
}
return false
}
func resourceDisplayName(resource unifiedresources.Resource) string {
if name := unifiedresources.ResourceDisplayName(resource); name != "" {
return name
}
return strings.TrimSpace(resource.Name)
}
func resourceHostCandidates(resource unifiedresources.Resource) []string {
candidates := []string{resourceDisplayName(resource)}
if resource.Agent != nil {
candidates = append(candidates, strings.TrimSpace(resource.Agent.Hostname))
}
if resource.TrueNAS != nil {
candidates = append(candidates, strings.TrimSpace(resource.TrueNAS.Hostname))
}
if resource.Docker != nil {
candidates = append(candidates, strings.TrimSpace(resource.Docker.Hostname))
}
if resource.Proxmox != nil {
candidates = append(candidates, strings.TrimSpace(resource.Proxmox.NodeName))
}
candidates = append(candidates, resource.Identity.Hostnames...)
return candidates
}
func resourceAgentConnected(resource unifiedresources.Resource, connected map[string]bool) bool {
for _, candidate := range resourceHostCandidates(resource) {
key := strings.TrimSpace(candidate)
if key == "" {
continue
}
if connected[key] {
return true
}
}
return false
}
func canonicalAppContainerID(resource unifiedresources.Resource) string {
if resource.Docker != nil && strings.TrimSpace(resource.Docker.ContainerID) != "" {
return strings.TrimSpace(resource.Docker.ContainerID)
}
return strings.TrimSpace(resource.ID)
}
func canonicalAppContainerState(resource unifiedresources.Resource) string {
if resource.Docker != nil && strings.TrimSpace(resource.Docker.ContainerState) != "" {
return strings.TrimSpace(resource.Docker.ContainerState)
}
return string(resource.Status)
}
func canonicalAppContainerHost(resource unifiedresources.Resource) string {
if host := strings.TrimSpace(resource.ParentName); host != "" {
return host
}
for _, candidate := range resource.Identity.Hostnames {
if strings.TrimSpace(candidate) != "" {
return strings.TrimSpace(candidate)
}
}
return ""
}
func matchCanonicalAppContainerResource(resource unifiedresources.Resource, resourceID string) (string, bool) {
query := strings.TrimSpace(resourceID)
if query == "" {
return "", false
}
containerID := canonicalAppContainerID(resource)
if strings.EqualFold(containerID, query) ||
strings.EqualFold(resourceDisplayName(resource), query) ||
strings.EqualFold(strings.TrimSpace(resource.ID), query) ||
strings.HasPrefix(strings.ToLower(containerID), strings.ToLower(query)) {
return containerID, true
}
return "", false
}
func findCanonicalAppContainerResource(provider UnifiedResourceProvider, resourceID string) (unifiedresources.Resource, string, bool) {
if provider == nil {
return unifiedresources.Resource{}, "", false
}
for _, resource := range provider.GetByType(unifiedresources.ResourceTypeAppContainer) {
if containerID, ok := matchCanonicalAppContainerResource(resource, resourceID); ok {
return resource, containerID, true
}
}
return unifiedresources.Resource{}, "", false
}
func guestExecutorActions() []string {
return []string{"query", "get", "logs", "console", "exec", "start", "stop", "shutdown", "restart", "delete"}
}
func readOnlyResourceActions() []string {
return []string{"query", "get"}
}
func commandCapableAgentActions(resource unifiedresources.Resource) []string {
if resource.Agent != nil && resource.Agent.CommandsEnabled {
return []string{"query", "get", "exec"}
}
return readOnlyResourceActions()
}
func canonicalResolvedProviderUID(kind string, resource unifiedresources.Resource, fallbacks ...string) string {
id := strings.TrimSpace(resource.ID)
prefix := strings.TrimSpace(kind) + ":"
if strings.HasPrefix(id, prefix) {
return strings.TrimSpace(strings.TrimPrefix(id, prefix))
}
for _, fallback := range fallbacks {
if fallback = strings.TrimSpace(fallback); fallback != "" {
return fallback
}
}
return id
}
func canonicalAgentHost(resource unifiedresources.Resource) string {
if resource.Agent != nil && strings.TrimSpace(resource.Agent.Hostname) != "" {
return strings.TrimSpace(resource.Agent.Hostname)
}
if resource.TrueNAS != nil && strings.TrimSpace(resource.TrueNAS.Hostname) != "" {
return strings.TrimSpace(resource.TrueNAS.Hostname)
}
for _, hostname := range resource.Identity.Hostnames {
if hostname = strings.TrimSpace(hostname); hostname != "" {
return hostname
}
}
return strings.TrimSpace(resource.Name)
}
func canonicalAgentManagedID(resource unifiedresources.Resource) string {
if resource.VMware != nil && strings.EqualFold(strings.TrimSpace(resource.VMware.EntityType), "host") {
connectionID := strings.TrimSpace(resource.VMware.ConnectionID)
managedObjectID := strings.TrimSpace(resource.VMware.ManagedObjectID)
if connectionID != "" && managedObjectID != "" {
return connectionID + ":host:" + managedObjectID
}
return managedObjectID
}
return ""
}
func canonicalAgentSearchCandidates(resource unifiedresources.Resource) []string {
candidates := []string{
resourceDisplayName(resource),
strings.TrimSpace(resource.ID),
canonicalAgentManagedID(resource),
canonicalResourcePlatform(resource),
}
candidates = append(candidates, resourceHostCandidates(resource)...)
if resource.Agent != nil {
candidates = append(candidates, strings.TrimSpace(resource.Agent.AgentID), strings.TrimSpace(resource.Agent.MachineID))
}
if resource.VMware != nil {
candidates = append(candidates, strings.TrimSpace(resource.VMware.ConnectionName), strings.TrimSpace(resource.VMware.HostUUID))
}
return appendUniqueStrings(nil, candidates...)
}
func canonicalAgentRegistration(resource unifiedresources.Resource) (ResourceRegistration, bool) {
name := strings.TrimSpace(resourceDisplayName(resource))
providerUID := canonicalResolvedProviderUID("agent", resource, canonicalAgentManagedID(resource), canonicalAgentHost(resource))
if name == "" || providerUID == "" {
return ResourceRegistration{}, false
}
executorID := firstNonEmptyString(canonicalAgentHost(resource), name, providerUID)
adapter := canonicalResourcePlatform(resource)
if resource.Agent != nil && resource.Agent.CommandsEnabled {
adapter = "direct"
}
return ResourceRegistration{
Kind: "agent",
ProviderUID: providerUID,
Name: name,
Aliases: canonicalAgentSearchCandidates(resource),
Executors: []ExecutorRegistration{{
ExecutorID: executorID,
Adapter: adapter,
Actions: commandCapableAgentActions(resource),
Priority: 10,
}},
}, true
}
func canonicalStorageManagedID(resource unifiedresources.Resource) string {
if resource.VMware != nil && strings.EqualFold(strings.TrimSpace(resource.VMware.EntityType), "datastore") {
connectionID := strings.TrimSpace(resource.VMware.ConnectionID)
managedObjectID := strings.TrimSpace(resource.VMware.ManagedObjectID)
if connectionID != "" && managedObjectID != "" {
return connectionID + ":datastore:" + managedObjectID
}
return managedObjectID
}
return ""
}
func canonicalStorageHost(resource unifiedresources.Resource) string {
if parent := strings.TrimSpace(resource.ParentName); parent != "" {
return parent
}
if resource.Storage != nil && len(resource.Storage.Nodes) == 1 {
if node := strings.TrimSpace(resource.Storage.Nodes[0]); node != "" {
return node
}
}
return ""
}
func canonicalStorageSearchCandidates(resource unifiedresources.Resource) []string {
candidates := []string{
resourceDisplayName(resource),
strings.TrimSpace(resource.ID),
canonicalStorageManagedID(resource),
canonicalResourcePlatform(resource),
canonicalStorageHost(resource),
}
if resource.Storage != nil {
candidates = append(candidates, resource.Storage.Type, resource.Storage.Content, resource.Storage.Path)
candidates = append(candidates, resource.Storage.Nodes...)
}
if resource.VMware != nil {
candidates = append(candidates, strings.TrimSpace(resource.VMware.ConnectionName), strings.TrimSpace(resource.VMware.DatacenterName))
}
candidates = append(candidates, resource.Tags...)
return appendUniqueStrings(nil, candidates...)
}
func canonicalStorageRegistration(resource unifiedresources.Resource) (ResourceRegistration, bool) {
name := strings.TrimSpace(resourceDisplayName(resource))
providerUID := canonicalResolvedProviderUID("storage", resource, canonicalStorageManagedID(resource), name)
if name == "" || providerUID == "" {
return ResourceRegistration{}, false
}
executorID := firstNonEmptyString(canonicalStorageHost(resource), name, providerUID)
return ResourceRegistration{
Kind: "storage",
ProviderUID: providerUID,
Name: name,
Aliases: canonicalStorageSearchCandidates(resource),
Executors: []ExecutorRegistration{{
ExecutorID: executorID,
Adapter: canonicalResourcePlatform(resource),
Actions: readOnlyResourceActions(),
Priority: 10,
}},
}, true
}
func findCanonicalResourceByID(resources []unifiedresources.Resource, resourceID string) (unifiedresources.Resource, bool) {
resourceID = strings.TrimSpace(resourceID)
if resourceID == "" {
return unifiedresources.Resource{}, false
}
for _, resource := range resources {
if strings.EqualFold(strings.TrimSpace(resource.ID), resourceID) {
return resource, true
}
}
return unifiedresources.Resource{}, false
}
func matchesCanonicalResourceReference(resource unifiedresources.Resource, resourceID string, additionalCandidates ...string) bool {
if matchesCanonicalResourceID(resource, resourceID) {
return true
}
resourceID = strings.TrimSpace(resourceID)
if resourceID == "" {
return false
}
for _, candidate := range additionalCandidates {
if strings.EqualFold(strings.TrimSpace(candidate), resourceID) {
return true
}
}
return false
}
func canonicalGuestResourceType(kind string) (unifiedresources.ResourceType, bool) {
switch strings.TrimSpace(kind) {
case "vm":
return unifiedresources.ResourceTypeVM, true
case "system-container":
return unifiedresources.ResourceTypeSystemContainer, true
default:
return "", false
}
}
func canonicalGuestManagedID(resource unifiedresources.Resource) string {
if resource.Proxmox != nil && resource.Proxmox.VMID > 0 {
return strconv.Itoa(resource.Proxmox.VMID)
}
if resource.VMware != nil {
return strings.TrimSpace(resource.VMware.ManagedObjectID)
}
return ""
}
func canonicalGuestTarget(resource unifiedresources.Resource) string {
if resource.Proxmox != nil {
return strings.TrimSpace(resource.Proxmox.NodeName)
}
if resource.VMware != nil {
return firstNonEmptyString(
resource.VMware.RuntimeHostName,
resource.VMware.RuntimeHostID,
resource.ParentName,
resource.VMware.ConnectionName,
)
}
return strings.TrimSpace(resource.ParentName)
}
func canonicalGuestSearchCandidates(kind string, resource unifiedresources.Resource) []string {
candidates := []string{
resourceDisplayName(resource),
strings.TrimSpace(resource.ID),
}
if managedID := canonicalGuestManagedID(resource); managedID != "" {
candidates = append(candidates, managedID)
}
if host := canonicalGuestTarget(resource); host != "" {
candidates = append(candidates, host)
}
candidates = append(candidates, resource.Identity.IPAddresses...)
candidates = append(candidates, resource.Tags...)
if resource.Proxmox != nil && resource.Proxmox.VMID > 0 {
vmid := strconv.Itoa(resource.Proxmox.VMID)
switch strings.TrimSpace(kind) {
case "vm":
candidates = append(candidates, "vm"+vmid)
case "system-container":
candidates = append(candidates, "system-container"+vmid)
}
}
return candidates
}
func canonicalGuestMatchesResourceID(kind, resourceID string, resource unifiedresources.Resource) bool {
query := strings.TrimSpace(resourceID)
if query == "" {
return false
}
for _, candidate := range canonicalGuestSearchCandidates(kind, resource) {
if strings.EqualFold(strings.TrimSpace(candidate), query) {
return true
}
}
return false
}
func findCanonicalGuestResource(provider UnifiedResourceProvider, kind, resourceID string) (unifiedresources.Resource, bool) {
if provider == nil {
return unifiedresources.Resource{}, false
}
resourceType, ok := canonicalGuestResourceType(kind)
if !ok {
return unifiedresources.Resource{}, false
}
for _, resource := range provider.GetByType(resourceType) {
if canonicalGuestMatchesResourceID(kind, resourceID, resource) {
return resource, true
}
}
return unifiedresources.Resource{}, false
}
func canonicalGuestRegistration(kind string, resource unifiedresources.Resource) (ResourceRegistration, bool) {
kind = strings.TrimSpace(kind)
name := strings.TrimSpace(resourceDisplayName(resource))
providerUID := firstNonEmptyString(canonicalGuestManagedID(resource), resource.ID)
if kind == "" || name == "" || providerUID == "" {
return ResourceRegistration{}, false
}
host := canonicalGuestTarget(resource)
aliases := appendUniqueStrings(nil, canonicalGuestSearchCandidates(kind, resource)...)
reg := ResourceRegistration{
Kind: kind,
ProviderUID: providerUID,
Name: name,
Aliases: aliases,
HostUID: host,
HostName: host,
Node: host,
}
switch kind {
case "vm":
reg.LocationChain = []string{"agent:" + firstNonEmptyString(host, name), "vm:" + name}
case "system-container":
reg.LocationChain = []string{"agent:" + firstNonEmptyString(host, name), "system-container:" + name}
}
switch canonicalResourcePlatform(resource) {
case "proxmox":
if resource.Proxmox == nil || host == "" {
return ResourceRegistration{}, false
}
reg.VMID = resource.Proxmox.VMID
reg.HostUID = strings.TrimSpace(resource.Proxmox.NodeName)
reg.HostName = strings.TrimSpace(resource.Proxmox.NodeName)
reg.Node = strings.TrimSpace(resource.Proxmox.NodeName)
adapter := "qm"
if kind == "system-container" {
adapter = "pct"
}
reg.Executors = []ExecutorRegistration{{
ExecutorID: reg.Node,
Adapter: adapter,
Actions: guestExecutorActions(),
Priority: 10,
}}
default:
executorID := firstNonEmptyString(host, name)
reg.Executors = []ExecutorRegistration{{
ExecutorID: executorID,
Adapter: canonicalResourcePlatform(resource),
Actions: readOnlyResourceActions(),
Priority: 10,
}}
}
return reg, true
}
func canonicalGuestResponse(kind string, resource unifiedresources.Resource, governance *governedQueryMetadataResolver) ResourceResponse {
candidates := append([]string{resourceDisplayName(resource), resource.ID}, canonicalGuestSearchCandidates(kind, resource)...)
response := EmptyResourceResponse()
response.GovernedResourceMetadata = governance.Resolve(candidates...)
response.Type = kind
response.ID = firstNonEmptyString(resource.ID, canonicalGuestManagedID(resource))
response.Name = resourceDisplayName(resource)
response.Status = string(resource.Status)
response.Platform = canonicalResourcePlatform(resource)
response.Node = canonicalGuestTarget(resource)
response.Host = canonicalGuestTarget(resource)
response.CPU = ResourceCPU{
Percent: metricPercent(resourceMetric(resource, "cpu")),
}
response.Memory = ResourceMemory{
Percent: metricPercent(resourceMetric(resource, "memory")),
UsedGB: metricUsedGB(resourceMetric(resource, "memory")),
TotalGB: metricTotalGB(resourceMetric(resource, "memory")),
}
if diskMetric := resourceMetric(resource, "disk"); diskMetric != nil {
response.Disk = &ResourceDisk{
Percent: metricPercent(diskMetric),
UsedGB: metricUsedGB(diskMetric),
TotalGB: metricTotalGB(diskMetric),
}
}
response.Tags = append([]string{}, resource.Tags...)
if resource.Proxmox != nil {
response.ID = firstNonEmptyString(resource.ID, resource.Proxmox.SourceID)
response.Node = strings.TrimSpace(resource.Proxmox.NodeName)
response.CPU.Cores = resource.Proxmox.CPUs
response.OS = strings.TrimSpace(resource.Proxmox.OSName)
if !resource.Proxmox.LastBackup.IsZero() {
t := resource.Proxmox.LastBackup
response.LastBackup = &t
}
}
if resource.VMware != nil {
response.ID = firstNonEmptyString(resource.ID, resource.VMware.ManagedObjectID)
response.Node = firstNonEmptyString(response.Node, resource.VMware.RuntimeHostName, resource.ParentName)
response.Host = firstNonEmptyString(response.Host, resource.VMware.RuntimeHostName, resource.ParentName)
if response.CPU.Cores == 0 {
response.CPU.Cores = resource.VMware.CPUCount
}
if response.Memory.TotalGB == 0 && resource.VMware.MemorySizeMiB > 0 {
response.Memory.TotalGB = float64(resource.VMware.MemorySizeMiB) / 1024
}
if response.OS == "" {
response.OS = strings.TrimSpace(resource.VMware.GuestOSFamily)
}
}
return response.NormalizeCollections()
}
func canonicalAgentResponse(resource unifiedresources.Resource, governance *governedQueryMetadataResolver) ResourceResponse {
candidates := canonicalAgentSearchCandidates(resource)
response := EmptyResourceResponse()
response.GovernedResourceMetadata = governance.Resolve(candidates...)
response.Type = "agent"
response.ID = strings.TrimSpace(resource.ID)
response.Name = resourceDisplayName(resource)
response.Status = string(resource.Status)
response.Platform = canonicalResourcePlatform(resource)
response.Host = canonicalAgentHost(resource)
response.CPU = ResourceCPU{
Percent: metricPercent(resourceMetric(resource, "cpu")),
}
response.Memory = ResourceMemory{
Percent: metricPercent(resourceMetric(resource, "memory")),
UsedGB: metricUsedGB(resourceMetric(resource, "memory")),
TotalGB: metricTotalGB(resourceMetric(resource, "memory")),
}
if diskMetric := resourceMetric(resource, "disk"); diskMetric != nil {
response.Disk = &ResourceDisk{
Percent: metricPercent(diskMetric),
UsedGB: metricUsedGB(diskMetric),
TotalGB: metricTotalGB(diskMetric),
}
}
response.Tags = append([]string{}, resource.Tags...)
if resource.Agent != nil {
response.OS = strings.TrimSpace(resource.Agent.OSName)
response.CPU.Cores = resource.Agent.CPUCount
}
if response.Host == "" && resource.TrueNAS != nil {
response.Host = strings.TrimSpace(resource.TrueNAS.Hostname)
}
if response.Host == "" {
response.Host = canonicalAgentHost(resource)
}
return response.NormalizeCollections()
}
func canonicalStorageResponse(resource unifiedresources.Resource, governance *governedQueryMetadataResolver) ResourceResponse {
candidates := canonicalStorageSearchCandidates(resource)
response := EmptyResourceResponse()
response.GovernedResourceMetadata = governance.Resolve(candidates...)
response.Type = "storage"
response.ID = strings.TrimSpace(resource.ID)
response.Name = resourceDisplayName(resource)
response.Status = string(resource.Status)
response.Platform = canonicalResourcePlatform(resource)
response.Host = canonicalStorageHost(resource)
if response.Host != "" {
response.Node = response.Host
}
if diskMetric := resourceMetric(resource, "disk"); diskMetric != nil {
response.Disk = &ResourceDisk{
Percent: metricPercent(diskMetric),
UsedGB: metricUsedGB(diskMetric),
TotalGB: metricTotalGB(diskMetric),
}
}
response.Tags = append([]string{}, resource.Tags...)
return response.NormalizeCollections()
}
func canonicalAppContainerAdapter(resource unifiedresources.Resource) string {
switch canonicalResourcePlatform(resource) {
case "docker":
return "docker"
case "truenas":
return "truenas"
default:
return strings.TrimSpace(canonicalResourcePlatform(resource))
}
}
func canonicalCapabilityActions(resource unifiedresources.Resource) []string {
seen := make(map[string]struct{})
actions := make([]string, 0, len(resource.Capabilities))
for _, capability := range resource.Capabilities {
name := strings.ToLower(strings.TrimSpace(capability.Name))
if name == "" {
continue
}
if _, ok := seen[name]; ok {
continue
}
seen[name] = struct{}{}
actions = append(actions, name)
}
return actions
}
func appendUniqueStrings(base []string, values ...string) []string {
seen := make(map[string]struct{}, len(base))
for _, value := range base {
key := strings.TrimSpace(value)
if key == "" {
continue
}
seen[key] = struct{}{}
}
for _, value := range values {
key := strings.TrimSpace(value)
if key == "" {
continue
}
if _, ok := seen[key]; ok {
continue
}
seen[key] = struct{}{}
base = append(base, key)
}
return base
}
func canonicalAppContainerActions(resource unifiedresources.Resource) []string {
actions := []string{"query", "get"}
switch canonicalResourcePlatform(resource) {
case "docker":
actions = append(actions, "logs", "exec", "restart", "stop", "start")
case "truenas":
actions = append(actions, "restart", "stop", "start")
}
return appendUniqueStrings(actions, canonicalCapabilityActions(resource)...)
}
func resolvedAppContainerRegistration(resource unifiedresources.Resource) (ResourceRegistration, bool) {
containerID := strings.TrimSpace(canonicalAppContainerID(resource))
name := strings.TrimSpace(resourceDisplayName(resource))
host := strings.TrimSpace(canonicalAppContainerHost(resource))
adapter := strings.TrimSpace(canonicalAppContainerAdapter(resource))
if containerID == "" || name == "" || host == "" || adapter == "" {
return ResourceRegistration{}, false
}
aliases := appendUniqueStrings(nil, name, containerID, strings.TrimSpace(resource.ID))
if adapter == "docker" && len(containerID) > 12 {
aliases = appendUniqueStrings(aliases, containerID[:12])
}
reg := ResourceRegistration{
Kind: "app-container",
ProviderUID: containerID,
Name: name,
Aliases: aliases,
HostUID: host,
HostName: host,
Executors: []ExecutorRegistration{{
ExecutorID: host,
Adapter: adapter,
Actions: canonicalAppContainerActions(resource),
Priority: 10,
}},
}
switch adapter {
case "docker":
reg.LocationChain = []string{"docker-host:" + host, "docker:" + name}
case "truenas":
reg.LocationChain = []string{"system:" + host, "app:" + name}
default:
reg.LocationChain = []string{"app-container:" + name}
}
return reg, true
}
func canonicalSystemSummaryFromResource(resource unifiedresources.Resource, connected map[string]bool) SystemSummary {
return SystemSummary{
ID: strings.TrimSpace(resource.ID),
Name: resourceDisplayName(resource),
Status: string(resource.Status),
Platform: canonicalResourcePlatform(resource),
ChildCount: resource.ChildCount,
AgentConnected: resourceAgentConnected(resource, connected),
CPU: metricPercent(resourceMetric(resource, "cpu")),
Memory: metricPercent(resourceMetric(resource, "memory")),
Disk: metricPercent(resourceMetric(resource, "disk")),
}
}
func canonicalAppContainerSummaryFromResource(resource unifiedresources.Resource) AppContainerSummary {
health := ""
image := ""
if resource.Docker != nil {
health = strings.TrimSpace(resource.Docker.Health)
image = strings.TrimSpace(resource.Docker.Image)
}
return AppContainerSummary{
ID: canonicalAppContainerID(resource),
Name: resourceDisplayName(resource),
Status: canonicalAppContainerState(resource),
Host: canonicalAppContainerHost(resource),
Platform: canonicalResourcePlatform(resource),
Image: image,
Health: health,
CPU: metricPercent(resourceMetric(resource, "cpu")),
Memory: metricPercent(resourceMetric(resource, "memory")),
Disk: metricPercent(resourceMetric(resource, "disk")),
}
}
func canonicalStoragePoolResources(provider UnifiedResourceProvider) []unifiedresources.Resource {
if provider == nil {
return []unifiedresources.Resource{}
}
storagePoolResources := make([]unifiedresources.Resource, 0)
for _, resource := range sortedResourcesByName(provider.GetByType(unifiedresources.ResourceTypeStorage)) {
if resource.Storage != nil && strings.EqualFold(strings.TrimSpace(resource.Storage.Topology), "dataset") {
continue
}
storagePoolResources = append(storagePoolResources, resource)
}
return storagePoolResources
}
func canonicalPhysicalDiskHost(resource unifiedresources.Resource) string {
for _, candidate := range resource.Identity.Hostnames {
if candidate = strings.TrimSpace(candidate); candidate != "" {
return candidate
}
}
return strings.TrimSpace(resource.ParentName)
}
func physicalDiskSummaryFromResource(resource unifiedresources.Resource) PhysicalDiskSummary {
pd := resource.PhysicalDisk
summary := PhysicalDiskSummary{
ID: resource.ID,
Node: canonicalPhysicalDiskHost(resource),
DevPath: "",
Model: "",
Serial: "",
WWN: "",
Type: "",
SizeBytes: 0,
Health: "",
Used: "",
LastChecked: resource.LastSeen,
}
if pd == nil {
return summary
}
summary.DevPath = pd.DevPath
summary.Model = pd.Model
summary.Serial = pd.Serial
summary.WWN = pd.WWN
summary.Type = pd.DiskType
summary.SizeBytes = pd.SizeBytes
summary.Health = pd.Health
summary.Used = pd.Used
if pd.Wearout >= 0 {
wearout := pd.Wearout
summary.Wearout = &wearout
}
if pd.Temperature > 0 {
temp := pd.Temperature
summary.Temperature = &temp
}
if pd.RPM > 0 {
rpm := pd.RPM
summary.RPM = &rpm
}
return summary
}
func resourceMetric(resource unifiedresources.Resource, kind string) *unifiedresources.MetricValue {
if resource.Metrics == nil {
return nil
}
switch kind {
case "cpu":
return resource.Metrics.CPU
case "memory":
return resource.Metrics.Memory
case "disk":
return resource.Metrics.Disk
default:
return nil
}
}
func sortedResourcesByName(resources []unifiedresources.Resource) []unifiedresources.Resource {
sorted := append([]unifiedresources.Resource(nil), resources...)
sort.Slice(sorted, func(i, j int) bool {
return strings.ToLower(resourceDisplayName(sorted[i])) < strings.ToLower(resourceDisplayName(sorted[j]))
})
return sorted
}
func matchesCanonicalResourceID(resource unifiedresources.Resource, resourceID string) bool {
resourceID = strings.TrimSpace(resourceID)
if resourceID == "" {
return false
}
if strings.EqualFold(strings.TrimSpace(resource.ID), resourceID) {
return true
}
if strings.EqualFold(resourceDisplayName(resource), resourceID) {
return true
}
return false
}
// executeQuery routes to the appropriate query handler based on action
func (e *PulseToolExecutor) executeQuery(ctx context.Context, args map[string]interface{}) (CallToolResult, error) {
action, _ := args["action"].(string)
switch action {
case "search":
return e.executeSearchResources(ctx, args)
case "get":
return e.executeGetResource(ctx, args)
case "config":
return e.executeGetResourceConfig(ctx, args)
case "topology":
return e.executeGetTopology(ctx, args)
case "list":
return e.executeListInfrastructure(ctx, args)
case "health":
return e.executeGetConnectionHealth(ctx, args)
default:
return NewErrorResult(fmt.Errorf("unknown action: %s. Use: search, get, config, topology, list, health", action)), nil
}
}
func (e *PulseToolExecutor) executeListInfrastructure(_ context.Context, args map[string]interface{}) (CallToolResult, error) {
rs, err := e.readStateForControl()
if err != nil {
return NewErrorResult(err), nil
}
governance := newGovernedQueryMetadataResolver(rs)
filterType, _ := args["type"].(string)
filterType = canonicalQueryListType(filterType)
filterStatus, _ := args["status"].(string)
clusterNameFilter, _ := args["cluster_name"].(string)
namespaceFilter, _ := args["namespace"].(string)
limit := intArg(args, "limit", 100)
offset := intArg(args, "offset", 0)
maxDockerContainersPerHost := intArg(args, "max_docker_containers_per_host", 0)
if _, ok := args["max_docker_containers_per_host"]; !ok {
maxDockerContainersPerHost = defaultMaxListDockerContainersPerHost
}
if limit <= 0 {
limit = 100
}
if offset < 0 {
offset = 0
}
clusterNameFilter = strings.TrimSpace(clusterNameFilter)
namespaceFilter = strings.TrimSpace(namespaceFilter)
matchesCluster := func(clusterName string) bool {
if clusterNameFilter == "" {
return true
}
return strings.EqualFold(strings.TrimSpace(clusterName), clusterNameFilter)
}
matchesNamespace := func(namespace string) bool {
if namespaceFilter == "" {
return true
}
return strings.EqualFold(strings.TrimSpace(namespace), namespaceFilter)
}
normalizeClusterKey := func(clusterName string) string {
return strings.ToLower(strings.TrimSpace(clusterName))
}
// Build a set of connected agent hostnames for quick lookup
connectedAgentHostnames := make(map[string]bool)
if e.agentServer != nil {
for _, agent := range e.agentServer.GetConnectedAgents() {
connectedAgentHostnames[agent.Hostname] = true
}
}
systemResources := []unifiedresources.Resource{}
appContainerResources := []unifiedresources.Resource{}
storagePoolResources := []unifiedresources.Resource{}
physicalDiskResources := []unifiedresources.Resource{}
if e.unifiedResourceProvider != nil {
systemResources = sortedResourcesByName(e.unifiedResourceProvider.GetByType(unifiedresources.ResourceTypeAgent))
appContainerResources = sortedResourcesByName(e.unifiedResourceProvider.GetByType(unifiedresources.ResourceTypeAppContainer))
physicalDiskResources = sortedResourcesByName(e.unifiedResourceProvider.GetByType(unifiedresources.ResourceTypePhysicalDisk))
storagePoolResources = canonicalStoragePoolResources(e.unifiedResourceProvider)
}
response := EmptyInfrastructureResponse()
response.Total = TotalCounts{
Systems: len(systemResources),
Nodes: len(rs.Nodes()),
VMs: len(rs.VMs()),
Containers: len(rs.Containers()),
AppContainers: len(appContainerResources),
DockerHosts: len(rs.DockerHosts()),
StoragePools: len(storagePoolResources),
PhysicalDisks: len(physicalDiskResources),
K8sClusters: len(rs.K8sClusters()),
K8sNodes: len(rs.K8sNodes()),
K8sPods: len(rs.Pods()),
K8sDeployments: len(rs.K8sDeployments()),
}
if response.Total.Systems == 0 {
response.Total.Systems = len(rs.Nodes()) + len(rs.Hosts()) + len(rs.DockerHosts())
}
if response.Total.AppContainers == 0 {
response.Total.AppContainers = len(rs.DockerContainers())
}
if response.Total.StoragePools == 0 {
response.Total.StoragePools = len(rs.StoragePools())
}
if response.Total.PhysicalDisks == 0 {
response.Total.PhysicalDisks = len(rs.PhysicalDisks())
}
totalMatches := 0
nodeCountByCluster := make(map[string]int)
deploymentCountByCluster := make(map[string]int)
podCountByCluster := make(map[string]int)
for _, node := range rs.K8sNodes() {
if node == nil {
continue
}
nodeCountByCluster[normalizeClusterKey(node.ClusterName())]++
}
for _, deployment := range rs.K8sDeployments() {
if deployment == nil {
continue
}
deploymentCountByCluster[normalizeClusterKey(deployment.ClusterName())]++
}
for _, pod := range rs.Pods() {
if pod == nil {
continue
}
podCountByCluster[normalizeClusterKey(pod.ClusterName())]++
}
dockerHostsByID := make(map[string]*unifiedresources.DockerHostView)
for _, host := range rs.DockerHosts() {
if host == nil {
continue
}
dockerHostsByID[host.ID()] = host
}
// Systems
if filterType == "" || filterType == "systems" {
count := 0
for _, resource := range systemResources {
if !statusMatchesFilter(string(resource.Status), filterStatus) {
continue
}
if count < offset {
count++
continue
}
if len(response.Systems) >= limit {
count++
continue
}
summary := canonicalSystemSummaryFromResource(resource, connectedAgentHostnames)
summary.GovernedResourceMetadata = governance.Resolve(resourceDisplayName(resource), resource.ID)
response.Systems = append(response.Systems, summary)
count++
}
if filterType == "systems" {
totalMatches = count
}
}
// Nodes
if filterType == "" || filterType == "nodes" {
count := 0
for _, node := range rs.Nodes() {
if !statusMatchesFilter(string(node.Status()), filterStatus) {
continue
}
if count < offset {
count++
continue
}
if len(response.Nodes) >= limit {
count++
continue
}
response.Nodes = append(response.Nodes, NodeSummary{
GovernedResourceMetadata: governance.Resolve(node.Name(), node.ID()),
Name: node.Name(),
Status: string(node.Status()),
AgentConnected: connectedAgentHostnames[node.Name()],
})
count++
}
if filterType == "nodes" {
totalMatches = count
}
}
// VMs
if filterType == "" || filterType == "vms" {
count := 0
for _, vm := range rs.VMs() {
if !statusMatchesFilter(string(vm.Status()), filterStatus) {
continue
}
if count < offset {
count++
continue
}
if len(response.VMs) >= limit {
count++
continue
}
response.VMs = append(response.VMs, VMSummary{
GovernedResourceMetadata: governance.Resolve(vm.Name(), vm.ID(), fmt.Sprintf("%d", vm.VMID())),
VMID: vm.VMID(),
Name: vm.Name(),
Status: string(vm.Status()),
Node: vm.Node(),
CPU: vm.CPUPercent(),
Memory: vm.MemoryPercent(),
})
count++
}
if filterType == "vms" {
totalMatches = count
}
}
// Containers (LXC)
if filterType == "" || filterType == "system-containers" {
count := 0
for _, ct := range rs.Containers() {
if !statusMatchesFilter(string(ct.Status()), filterStatus) {
continue
}
if count < offset {
count++
continue
}
if len(response.Containers) >= limit {
count++
continue
}
response.Containers = append(response.Containers, ContainerSummary{
GovernedResourceMetadata: governance.Resolve(ct.Name(), ct.ID(), fmt.Sprintf("%d", ct.VMID())),
VMID: ct.VMID(),
Name: ct.Name(),
Status: string(ct.Status()),
Node: ct.Node(),
CPU: ct.CPUPercent(),
Memory: ct.MemoryPercent(),
})
count++
}
if filterType == "system-containers" {
totalMatches = count
}
}
// App containers
if filterType == "" || filterType == "app-containers" {
count := 0
for _, resource := range appContainerResources {
status := canonicalAppContainerState(resource)
if !statusMatchesFilter(status, filterStatus) {
continue
}
if count < offset {
count++
continue
}
if len(response.AppContainers) >= limit {
count++
continue
}
summary := canonicalAppContainerSummaryFromResource(resource)
summary.GovernedResourceMetadata = governance.Resolve(resourceDisplayName(resource), resource.ID, canonicalAppContainerID(resource))
response.AppContainers = append(response.AppContainers, summary)
count++
}
if len(appContainerResources) == 0 {
for _, container := range rs.DockerContainers() {
if container == nil {
continue
}
state := strings.TrimSpace(container.ContainerState())
if state == "" {
state = string(container.Status())
}
if !statusMatchesFilter(state, filterStatus) {
continue
}
if count < offset {
count++
continue
}
if len(response.AppContainers) >= limit {
count++
continue
}
hostName := ""
if host := dockerHostsByID[strings.TrimSpace(container.ParentID())]; host != nil {
hostName = strings.TrimSpace(host.Hostname())
if hostName == "" {
hostName = strings.TrimSpace(host.Name())
}
}
containerID := strings.TrimSpace(container.ContainerID())
if containerID == "" {
containerID = strings.TrimSpace(container.ID())
}
response.AppContainers = append(response.AppContainers, AppContainerSummary{
GovernedResourceMetadata: governance.Resolve(container.Name(), container.ID(), containerID),
ID: containerID,
Name: container.Name(),
Status: state,
Host: hostName,
Platform: "docker",
Image: container.Image(),
Health: container.Health(),
CPU: container.CPUPercent(),
Memory: container.MemoryPercent(),
})
count++
}
}
if filterType == "app-containers" {
totalMatches = count
}
}
// Docker hosts
if filterType == "" || filterType == "docker-hosts" {
containersByHost := make(map[string][]*unifiedresources.DockerContainerView)
for _, container := range rs.DockerContainers() {
if container == nil {
continue
}
parentID := strings.TrimSpace(container.ParentID())
if parentID == "" {
continue
}
containersByHost[parentID] = append(containersByHost[parentID], container)
}
count := 0
for _, host := range rs.DockerHosts() {
if host == nil {
continue
}
hostContainers := containersByHost[host.ID()]
if count < offset {
count++
continue
}
if len(response.DockerHosts) >= limit {
count++
continue
}
hostname := strings.TrimSpace(host.Hostname())
displayName := strings.TrimSpace(host.Name())
if hostname == "" {
hostname = displayName
}
dockerHost := DockerHostSummary{
GovernedResourceMetadata: governance.Resolve(host.Hostname(), host.Name(), host.HostSourceID(), host.ID()),
ID: host.ID(),
Hostname: hostname,
DisplayName: displayName,
ContainerCount: len(hostContainers),
AgentConnected: connectedAgentHostnames[hostname] || connectedAgentHostnames[displayName],
}
for _, container := range hostContainers {
state := strings.TrimSpace(container.ContainerState())
if state == "" {
state = string(container.Status())
}
if !statusMatchesFilter(state, filterStatus) {
continue
}
if maxDockerContainersPerHost > 0 && len(dockerHost.Containers) >= maxDockerContainersPerHost {
continue
}
dockerHost.Containers = append(dockerHost.Containers, DockerContainerSummary{
GovernedResourceMetadata: governance.Resolve(container.Name(), container.ID(), container.ContainerID()),
ID: container.ID(),
Name: container.Name(),
State: state,
Image: container.Image(),
Health: container.Health(),
})
}
if filterStatus != "" && filterStatus != "all" && len(dockerHost.Containers) == 0 {
continue
}
response.DockerHosts = append(response.DockerHosts, dockerHost)
count++
}
if filterType == "docker-hosts" {
totalMatches = count
}
}
// Storage pools
if filterType == "" || filterType == "storage-pools" {
count := 0
for _, resource := range storagePoolResources {
if !statusMatchesFilter(string(resource.Status), filterStatus) {
continue
}
if count < offset {
count++
continue
}
if len(response.StoragePools) >= limit {
count++
continue
}
response.StoragePools = append(response.StoragePools, storagePoolSummaryFromResource(resource))
count++
}
if filterType == "storage-pools" {
totalMatches = count
}
}
// Physical disks
if filterType == "" || filterType == "physical-disks" {
count := 0
for _, resource := range physicalDiskResources {
if !statusMatchesFilter(string(resource.Status), filterStatus) {
continue
}
if count < offset {
count++
continue
}
if len(response.PhysicalDisks) >= limit {
count++
continue
}
response.PhysicalDisks = append(response.PhysicalDisks, physicalDiskSummaryFromResource(resource))
count++
}
if filterType == "physical-disks" {
totalMatches = count
}
}
// Kubernetes clusters
if filterType == "" || filterType == "kubernetes" || filterType == "k8s-clusters" {
count := 0
for _, cluster := range rs.K8sClusters() {
if cluster == nil {
continue
}
if !matchesCluster(cluster.Name()) {
continue
}
if !statusMatchesFilter(string(cluster.Status()), filterStatus) {
continue
}
if count < offset {
count++
continue
}
if len(response.K8sClusters) >= limit {
count++
continue
}
key := normalizeClusterKey(cluster.Name())
response.K8sClusters = append(response.K8sClusters, K8sClusterSummary{
GovernedResourceMetadata: governance.Resolve(cluster.Name(), cluster.ID(), cluster.ClusterID()),
ID: cluster.ID(),
Name: cluster.Name(),
Status: string(cluster.Status()),
NodeCount: nodeCountByCluster[key],
DeploymentCount: deploymentCountByCluster[key],
PodCount: podCountByCluster[key],
})
count++
}
if filterType == "kubernetes" || filterType == "k8s-clusters" {
totalMatches = count
}
}
// Kubernetes nodes
if filterType == "" || filterType == "k8s-nodes" {
count := 0
for _, node := range rs.K8sNodes() {
if node == nil {
continue
}
if !matchesCluster(node.ClusterName()) {
continue
}
if !statusMatchesFilter(string(node.Status()), filterStatus) {
continue
}
if count < offset {
count++
continue
}
if len(response.K8sNodes) >= limit {
count++
continue
}
response.K8sNodes = append(response.K8sNodes, K8sNodeSummary{
GovernedResourceMetadata: governance.Resolve(node.Name()),
Name: node.Name(),
Cluster: node.ClusterName(),
Status: string(node.Status()),
Ready: node.Ready(),
Roles: node.Roles(),
})
count++
}
if filterType == "k8s-nodes" {
totalMatches = count
}
}
// Kubernetes pods
if filterType == "" || filterType == "k8s-pods" {
count := 0
for _, pod := range rs.Pods() {
if pod == nil {
continue
}
if !matchesCluster(pod.ClusterName()) || !matchesNamespace(pod.Namespace()) {
continue
}
if !statusMatchesFilter(string(pod.Status()), filterStatus) && !statusMatchesFilter(pod.PodPhase(), filterStatus) {
continue
}
if count < offset {
count++
continue
}
if len(response.K8sPods) >= limit {
count++
continue
}
response.K8sPods = append(response.K8sPods, K8sPodSummary{
GovernedResourceMetadata: governance.Resolve(pod.Name()),
Name: pod.Name(),
Cluster: pod.ClusterName(),
Namespace: pod.Namespace(),
Status: string(pod.Status()),
Restarts: pod.Restarts(),
OwnerKind: pod.OwnerKind(),
OwnerName: pod.OwnerName(),
})
count++
}
if filterType == "k8s-pods" {
totalMatches = count
}
}
// Kubernetes deployments
if filterType == "" || filterType == "k8s-deployments" {
count := 0
for _, deployment := range rs.K8sDeployments() {
if deployment == nil {
continue
}
if !matchesCluster(deployment.ClusterName()) || !matchesNamespace(deployment.Namespace()) {
continue
}
if !statusMatchesFilter(string(deployment.Status()), filterStatus) {
continue
}
if count < offset {
count++
continue
}
if len(response.K8sDeployments) >= limit {
count++
continue
}
response.K8sDeployments = append(response.K8sDeployments, K8sDeploymentSummary{
GovernedResourceMetadata: governance.Resolve(deployment.Name()),
Name: deployment.Name(),
Cluster: deployment.ClusterName(),
Namespace: deployment.Namespace(),
Status: string(deployment.Status()),
DesiredReplicas: deployment.DesiredReplicas(),
ReadyReplicas: deployment.ReadyReplicas(),
})
count++
}
if filterType == "k8s-deployments" {
totalMatches = count
}
}
if filterType != "" && (offset > 0 || totalMatches > limit) {
response.Pagination = &PaginationInfo{
Total: totalMatches,
Limit: limit,
Offset: offset,
}
}
return NewJSONResult(response.NormalizeCollections()), nil
}
func (e *PulseToolExecutor) executeGetTopology(_ context.Context, args map[string]interface{}) (CallToolResult, error) {
include, _ := args["include"].(string)
if include == "" {
include = "all"
}
include = canonicalQueryTopologyInclude(include)
switch include {
case "all", "proxmox", "app-containers", "kubernetes":
default:
return NewErrorResult(fmt.Errorf("invalid include: %s. Use all, proxmox, app-containers, or kubernetes", include)), nil
}
summaryOnly, summaryProvided := args["summary_only"].(bool)
maxProxmoxNodes := intArg(args, "max_proxmox_nodes", 0)
maxVMsPerNode := intArg(args, "max_vms_per_node", 0)
maxContainersPerNode := intArg(args, "max_containers_per_node", 0)
maxDockerHosts := intArg(args, "max_docker_hosts", 0)
maxDockerContainersPerHost := intArg(args, "max_docker_containers_per_host", 0)
maxK8sClusters := intArg(args, "max_k8s_clusters", 0)
maxK8sNodesPerCluster := intArg(args, "max_k8s_nodes_per_cluster", 0)
maxK8sDeploymentsPerCluster := intArg(args, "max_k8s_deployments_per_cluster", 0)
maxK8sPodsPerCluster := intArg(args, "max_k8s_pods_per_cluster", 0)
_, maxProxmoxNodesProvided := args["max_proxmox_nodes"]
_, maxVMsProvided := args["max_vms_per_node"]
_, maxContainersProvided := args["max_containers_per_node"]
_, maxDockerHostsProvided := args["max_docker_hosts"]
_, maxDockerContainersProvided := args["max_docker_containers_per_host"]
_, maxK8sClustersProvided := args["max_k8s_clusters"]
_, maxK8sNodesProvided := args["max_k8s_nodes_per_cluster"]
_, maxK8sDeploymentsProvided := args["max_k8s_deployments_per_cluster"]
_, maxK8sPodsProvided := args["max_k8s_pods_per_cluster"]
if !summaryProvided {
summaryOnly = false
}
if !summaryOnly {
if !maxProxmoxNodesProvided {
maxProxmoxNodes = defaultMaxTopologyNodes
}
if !maxVMsProvided {
maxVMsPerNode = defaultMaxTopologyVMsPerNode
}
if !maxContainersProvided {
maxContainersPerNode = defaultMaxTopologyContainersPerNode
}
if !maxDockerHostsProvided {
maxDockerHosts = defaultMaxTopologyDockerHosts
}
if !maxDockerContainersProvided {
maxDockerContainersPerHost = defaultMaxTopologyDockerContainersPerHost
}
if !maxK8sClustersProvided {
maxK8sClusters = defaultMaxTopologyK8sClusters
}
if !maxK8sNodesProvided {
maxK8sNodesPerCluster = defaultMaxTopologyK8sNodesPerCluster
}
if !maxK8sDeploymentsProvided {
maxK8sDeploymentsPerCluster = defaultMaxTopologyK8sDeploymentsPerCluster
}
if !maxK8sPodsProvided {
maxK8sPodsPerCluster = defaultMaxTopologyK8sPodsPerCluster
}
}
rs, err := e.readStateForControl()
if err != nil {
return NewErrorResult(err), nil
}
governance := newGovernedQueryMetadataResolver(rs)
// Build a set of connected agent hostnames for quick lookup
connectedAgentHostnames := make(map[string]bool)
if e.agentServer != nil {
for _, agent := range e.agentServer.GetConnectedAgents() {
connectedAgentHostnames[agent.Hostname] = true
}
}
// Check if control is enabled
controlEnabled := e.controlLevel != ControlLevelReadOnly && e.controlLevel != ""
includeProxmox := include == "all" || include == "proxmox"
includeDocker := include == "all" || include == "app-containers"
includeKubernetes := include == "all" || include == "kubernetes"
// Summary counters
summary := TopologySummary{
TotalNodes: len(rs.Nodes()),
TotalVMs: len(rs.VMs()),
TotalSystemContainers: len(rs.Containers()),
TotalDockerHosts: len(rs.DockerHosts()),
TotalDockerContainers: len(rs.DockerContainers()),
TotalK8sClusters: len(rs.K8sClusters()),
TotalK8sNodes: len(rs.K8sNodes()),
TotalK8sDeployments: len(rs.K8sDeployments()),
TotalK8sPods: len(rs.Pods()),
}
for _, node := range rs.Nodes() {
if connectedAgentHostnames[node.Name()] {
summary.NodesWithAgents++
}
}
for _, host := range rs.DockerHosts() {
if host == nil {
continue
}
hostname := strings.TrimSpace(host.Hostname())
displayName := strings.TrimSpace(host.Name())
if connectedAgentHostnames[hostname] || connectedAgentHostnames[displayName] {
summary.DockerHostsWithAgents++
}
}
for _, pod := range rs.Pods() {
if pod == nil {
continue
}
if statusMatchesFilter(string(pod.Status()), "running") || statusMatchesFilter(pod.PodPhase(), "running") {
summary.RunningK8sPods++
}
}
// Build node topology - group VMs and containers by hypervisor node
nodeMap := make(map[string]*ProxmoxNodeTopology)
if includeProxmox && !summaryOnly {
for _, node := range rs.Nodes() {
if maxProxmoxNodes > 0 && len(nodeMap) >= maxProxmoxNodes {
break
}
name := node.Name()
hasAgent := connectedAgentHostnames[name]
nodeMap[name] = &ProxmoxNodeTopology{
GovernedResourceMetadata: governance.Resolve(node.Name(), node.ID()),
Name: name,
Status: string(node.Status()),
AgentConnected: hasAgent,
CanExecute: hasAgent && controlEnabled,
VMs: []TopologyVM{},
Containers: []TopologyContainer{},
}
}
}
ensureNode := func(name, status string) *ProxmoxNodeTopology {
if !includeProxmox || summaryOnly {
return nil
}
if node, exists := nodeMap[name]; exists {
return node
}
if maxProxmoxNodes > 0 && len(nodeMap) >= maxProxmoxNodes {
return nil
}
hasAgent := connectedAgentHostnames[name]
nodeMap[name] = &ProxmoxNodeTopology{
GovernedResourceMetadata: governance.Resolve(name),
Name: name,
Status: status,
AgentConnected: hasAgent,
CanExecute: hasAgent && controlEnabled,
VMs: []TopologyVM{},
Containers: []TopologyContainer{},
}
return nodeMap[name]
}
// Add VMs to their nodes
for _, vm := range rs.VMs() {
status := string(vm.Status())
if status == "running" || status == string(unifiedresources.StatusOnline) {
summary.RunningVMs++
}
nodeTopology := ensureNode(vm.Node(), "unknown")
if nodeTopology == nil {
continue
}
nodeTopology.VMCount++
if maxVMsPerNode <= 0 || len(nodeTopology.VMs) < maxVMsPerNode {
nodeTopology.VMs = append(nodeTopology.VMs, TopologyVM{
GovernedResourceMetadata: governance.Resolve(vm.Name(), vm.ID(), fmt.Sprintf("%d", vm.VMID())),
VMID: vm.VMID(),
Name: vm.Name(),
Status: status,
CPU: vm.CPUPercent(),
Memory: vm.MemoryPercent(),
Tags: vm.Tags(),
})
}
}
// Add containers to their nodes
for _, ct := range rs.Containers() {
status := string(ct.Status())
if status == "running" || status == string(unifiedresources.StatusOnline) {
summary.RunningContainers++
}
nodeTopology := ensureNode(ct.Node(), "unknown")
if nodeTopology == nil {
continue
}
nodeTopology.ContainerCount++
if maxContainersPerNode <= 0 || len(nodeTopology.Containers) < maxContainersPerNode {
nodeTopology.Containers = append(nodeTopology.Containers, TopologyContainer{
GovernedResourceMetadata: governance.Resolve(ct.Name(), ct.ID(), fmt.Sprintf("%d", ct.VMID())),
VMID: ct.VMID(),
Name: ct.Name(),
Status: status,
CPU: ct.CPUPercent(),
Memory: ct.MemoryPercent(),
Tags: ct.Tags(),
})
}
}
// Convert node map to slice
proxmoxNodes := []ProxmoxNodeTopology{}
if includeProxmox && !summaryOnly {
for _, node := range nodeMap {
proxmoxNodes = append(proxmoxNodes, *node)
}
sort.Slice(proxmoxNodes, func(i, j int) bool {
return strings.ToLower(proxmoxNodes[i].Name) < strings.ToLower(proxmoxNodes[j].Name)
})
for i := range proxmoxNodes {
sort.Slice(proxmoxNodes[i].VMs, func(a, b int) bool {
return strings.ToLower(proxmoxNodes[i].VMs[a].Name) < strings.ToLower(proxmoxNodes[i].VMs[b].Name)
})
sort.Slice(proxmoxNodes[i].Containers, func(a, b int) bool {
return strings.ToLower(proxmoxNodes[i].Containers[a].Name) < strings.ToLower(proxmoxNodes[i].Containers[b].Name)
})
}
}
// Build Docker topology
containersByHost := make(map[string][]*unifiedresources.DockerContainerView)
for _, container := range rs.DockerContainers() {
if container == nil {
continue
}
parentID := strings.TrimSpace(container.ParentID())
if parentID == "" {
continue
}
containersByHost[parentID] = append(containersByHost[parentID], container)
if statusMatchesFilter(container.ContainerState(), "running") || statusMatchesFilter(string(container.Status()), "running") {
summary.RunningDocker++
}
}
dockerHosts := []DockerHostTopology{}
for _, host := range rs.DockerHosts() {
if host == nil {
continue
}
hostname := strings.TrimSpace(host.Hostname())
displayName := strings.TrimSpace(host.Name())
hasAgent := connectedAgentHostnames[hostname] || connectedAgentHostnames[displayName]
hostContainers := containersByHost[host.ID()]
runningCount := 0
var containers []DockerContainerSummary
for _, container := range hostContainers {
state := strings.TrimSpace(container.ContainerState())
if state == "" {
state = string(container.Status())
}
if statusMatchesFilter(state, "running") {
runningCount++
}
if includeDocker && !summaryOnly {
if maxDockerContainersPerHost <= 0 || len(containers) < maxDockerContainersPerHost {
containers = append(containers, DockerContainerSummary{
GovernedResourceMetadata: governance.Resolve(container.Name(), container.ID(), container.ContainerID()),
ID: container.ID(),
Name: container.Name(),
State: state,
Image: container.Image(),
Health: container.Health(),
})
}
}
}
if includeDocker && !summaryOnly {
if maxDockerHosts > 0 && len(dockerHosts) >= maxDockerHosts {
continue
}
if hostname == "" {
hostname = displayName
}
dockerHosts = append(dockerHosts, DockerHostTopology{
GovernedResourceMetadata: governance.Resolve(host.Hostname(), host.Name(), host.HostSourceID(), host.ID()),
Hostname: hostname,
DisplayName: displayName,
AgentConnected: hasAgent,
CanExecute: hasAgent && controlEnabled,
Containers: containers,
ContainerCount: len(hostContainers),
RunningCount: runningCount,
})
}
}
sort.Slice(dockerHosts, func(i, j int) bool {
return strings.ToLower(dockerHosts[i].Hostname) < strings.ToLower(dockerHosts[j].Hostname)
})
// Build Kubernetes topology
k8sClusters := []KubernetesClusterTopology{}
clusterMap := make(map[string]*KubernetesClusterTopology)
clusterKeyByID := make(map[string]string)
clusterKeyByName := make(map[string]string)
ensureCluster := func(name, id, status string) *KubernetesClusterTopology {
if !includeKubernetes || summaryOnly {
return nil
}
name = strings.TrimSpace(name)
id = strings.TrimSpace(id)
status = strings.TrimSpace(status)
if status == "" {
status = "unknown"
}
key := strings.ToLower(id)
if key == "" {
key = strings.ToLower(name)
}
if key == "" {
return nil
}
if existing, ok := clusterMap[key]; ok {
if existing.Name == "" && name != "" {
existing.Name = name
}
if existing.ID == "" && id != "" {
existing.ID = id
}
if existing.Status == "unknown" && status != "" {
existing.Status = status
}
return existing
}
if maxK8sClusters > 0 && len(clusterMap) >= maxK8sClusters {
return nil
}
cluster := &KubernetesClusterTopology{
GovernedResourceMetadata: governance.Resolve(name, id),
Name: name,
ID: id,
Status: status,
}
if cluster.Name == "" {
cluster.Name = cluster.ID
}
clusterMap[key] = cluster
if cluster.ID != "" {
clusterKeyByID[cluster.ID] = key
}
if cluster.Name != "" {
clusterKeyByName[strings.ToLower(cluster.Name)] = key
}
return cluster
}
resolveCluster := func(parentID, clusterName string) *KubernetesClusterTopology {
parentID = strings.TrimSpace(parentID)
clusterName = strings.TrimSpace(clusterName)
if parentID != "" {
if key, ok := clusterKeyByID[parentID]; ok {
return clusterMap[key]
}
}
if clusterName != "" {
if key, ok := clusterKeyByName[strings.ToLower(clusterName)]; ok {
return clusterMap[key]
}
}
return ensureCluster(clusterName, parentID, "unknown")
}
if includeKubernetes && !summaryOnly {
for _, cluster := range rs.K8sClusters() {
if cluster == nil {
continue
}
ensureCluster(cluster.Name(), cluster.ID(), string(cluster.Status()))
}
}
for _, node := range rs.K8sNodes() {
if node == nil {
continue
}
cluster := resolveCluster(node.ParentID(), node.ClusterName())
if cluster == nil {
continue
}
cluster.NodeCount++
if includeKubernetes && !summaryOnly && (maxK8sNodesPerCluster <= 0 || len(cluster.Nodes) < maxK8sNodesPerCluster) {
cluster.Nodes = append(cluster.Nodes, KubernetesNodeTopology{
GovernedResourceMetadata: governance.Resolve(node.Name()),
Name: node.Name(),
Status: string(node.Status()),
Ready: node.Ready(),
Roles: node.Roles(),
CPU: node.CPUPercent(),
Memory: node.MemoryPercent(),
})
}
}
for _, deployment := range rs.K8sDeployments() {
if deployment == nil {
continue
}
cluster := resolveCluster(deployment.ParentID(), deployment.ClusterName())
if cluster == nil {
continue
}
cluster.DeploymentCount++
if includeKubernetes && !summaryOnly && (maxK8sDeploymentsPerCluster <= 0 || len(cluster.Deployments) < maxK8sDeploymentsPerCluster) {
cluster.Deployments = append(cluster.Deployments, KubernetesDeploymentDetail{
GovernedResourceMetadata: governance.Resolve(deployment.Name()),
Name: deployment.Name(),
Namespace: deployment.Namespace(),
Status: string(deployment.Status()),
DesiredReplicas: deployment.DesiredReplicas(),
ReadyReplicas: deployment.ReadyReplicas(),
})
}
}
for _, pod := range rs.Pods() {
if pod == nil {
continue
}
cluster := resolveCluster(pod.ParentID(), pod.ClusterName())
if cluster == nil {
continue
}
cluster.PodCount++
if includeKubernetes && !summaryOnly && (maxK8sPodsPerCluster <= 0 || len(cluster.Pods) < maxK8sPodsPerCluster) {
cluster.Pods = append(cluster.Pods, KubernetesPodDetail{
GovernedResourceMetadata: governance.Resolve(pod.Name()),
Name: pod.Name(),
Namespace: pod.Namespace(),
Status: string(pod.Status()),
Restarts: pod.Restarts(),
OwnerKind: pod.OwnerKind(),
OwnerName: pod.OwnerName(),
})
}
}
if includeKubernetes && !summaryOnly {
for _, cluster := range clusterMap {
k8sClusters = append(k8sClusters, *cluster)
}
sort.Slice(k8sClusters, func(i, j int) bool {
return strings.ToLower(k8sClusters[i].Name) < strings.ToLower(k8sClusters[j].Name)
})
for i := range k8sClusters {
sort.Slice(k8sClusters[i].Nodes, func(a, b int) bool {
return strings.ToLower(k8sClusters[i].Nodes[a].Name) < strings.ToLower(k8sClusters[i].Nodes[b].Name)
})
sort.Slice(k8sClusters[i].Deployments, func(a, b int) bool {
return strings.ToLower(k8sClusters[i].Deployments[a].Name) < strings.ToLower(k8sClusters[i].Deployments[b].Name)
})
sort.Slice(k8sClusters[i].Pods, func(a, b int) bool {
return strings.ToLower(k8sClusters[i].Pods[a].Name) < strings.ToLower(k8sClusters[i].Pods[b].Name)
})
}
}
response := EmptyTopologyResponse()
response.Proxmox.Nodes = proxmoxNodes
response.Docker.Hosts = dockerHosts
response.Kubernetes.Clusters = k8sClusters
response.Summary = summary
return NewJSONResult(response.NormalizeCollections()), nil
}
func (e *PulseToolExecutor) executeGetResource(_ context.Context, args map[string]interface{}) (CallToolResult, error) {
resourceType, _ := args["resource_type"].(string)
resourceID, _ := args["resource_id"].(string)
resourceTypeRaw := strings.TrimSpace(resourceType)
resourceType = canonicalQueryResourceType(resourceType)
if resourceType == "" {
return NewErrorResult(fmt.Errorf("resource_type is required")), nil
}
if resourceID == "" {
return NewErrorResult(fmt.Errorf("resource_id is required")), nil
}
rs, err := e.readStateForControl()
if err != nil {
return NewTextResult("State information not available."), nil
}
governance := newGovernedQueryMetadataResolver(rs)
switch resourceType {
case "agent":
if e.unifiedResourceProvider != nil {
for _, resource := range e.unifiedResourceProvider.GetByType(unifiedresources.ResourceTypeAgent) {
if !matchesCanonicalResourceReference(resource, resourceID, canonicalAgentSearchCandidates(resource)...) {
continue
}
response := canonicalAgentResponse(resource, governance)
if reg, ok := canonicalAgentRegistration(resource); ok {
e.registerResolvedResourceWithExplicitAccess(reg)
}
return NewJSONResult(response), nil
}
}
return NewJSONResult(map[string]interface{}{
"error": "not_found",
"resource_id": resourceID,
"type": "agent",
}), nil
case "storage":
if e.unifiedResourceProvider != nil {
for _, resource := range canonicalStoragePoolResources(e.unifiedResourceProvider) {
if !matchesCanonicalResourceReference(resource, resourceID, canonicalStorageSearchCandidates(resource)...) {
continue
}
response := canonicalStorageResponse(resource, governance)
if reg, ok := canonicalStorageRegistration(resource); ok {
e.registerResolvedResourceWithExplicitAccess(reg)
}
return NewJSONResult(response), nil
}
}
return NewJSONResult(map[string]interface{}{
"error": "not_found",
"resource_id": resourceID,
"type": "storage",
}), nil
case "vm":
if resource, ok := findCanonicalGuestResource(e.unifiedResourceProvider, "vm", resourceID); ok {
response := canonicalGuestResponse("vm", resource, governance)
if reg, ok := canonicalGuestRegistration("vm", resource); ok {
e.registerResolvedResourceWithExplicitAccess(reg)
}
return NewJSONResult(response), nil
}
for _, vm := range rs.VMs() {
if fmt.Sprintf("%d", vm.VMID()) == resourceID || vm.Name() == resourceID || vm.ID() == resourceID {
used := vm.MemoryUsed()
total := vm.MemoryTotal()
response := EmptyResourceResponse()
response.GovernedResourceMetadata = governance.Resolve(vm.Name(), vm.ID(), fmt.Sprintf("%d", vm.VMID()))
response.Type = "vm"
response.ID = vm.ID()
response.Name = vm.Name()
response.Status = string(vm.Status())
response.Node = vm.Node()
response.CPU = ResourceCPU{
Percent: vm.CPUPercent(),
Cores: vm.CPUs(),
}
response.Memory = ResourceMemory{
Percent: vm.MemoryPercent(),
UsedGB: float64(used) / (1024 * 1024 * 1024),
TotalGB: float64(total) / (1024 * 1024 * 1024),
}
response.Tags = vm.Tags()
if !vm.LastBackup().IsZero() {
t := vm.LastBackup()
response.LastBackup = &t
}
// Register in resolved context WITH explicit access (single-resource get operation)
e.registerResolvedResourceWithExplicitAccess(ResourceRegistration{
Kind: "vm",
ProviderUID: fmt.Sprintf("%d", vm.VMID()), // VMID is the stable provider ID
Name: vm.Name(),
Aliases: []string{vm.Name(), fmt.Sprintf("%d", vm.VMID()), vm.ID()},
HostUID: vm.Node(),
HostName: vm.Node(),
VMID: vm.VMID(),
Node: vm.Node(),
LocationChain: []string{"node:" + vm.Node(), "vm:" + vm.Name()},
Executors: []ExecutorRegistration{{
ExecutorID: vm.Node(),
Adapter: "qm",
Actions: guestExecutorActions(),
Priority: 10,
}},
})
return NewJSONResult(response.NormalizeCollections()), nil
}
}
return NewJSONResult(map[string]interface{}{
"error": "not_found",
"resource_id": resourceID,
"type": "vm",
}), nil
case "system-container":
if resource, ok := findCanonicalGuestResource(e.unifiedResourceProvider, "system-container", resourceID); ok {
response := canonicalGuestResponse("system-container", resource, governance)
if reg, ok := canonicalGuestRegistration("system-container", resource); ok {
e.registerResolvedResourceWithExplicitAccess(reg)
}
return NewJSONResult(response), nil
}
for _, ct := range rs.Containers() {
if fmt.Sprintf("%d", ct.VMID()) == resourceID || ct.Name() == resourceID || ct.ID() == resourceID {
used := ct.MemoryUsed()
total := ct.MemoryTotal()
response := EmptyResourceResponse()
response.GovernedResourceMetadata = governance.Resolve(ct.Name(), ct.ID(), fmt.Sprintf("%d", ct.VMID()))
response.Type = "system-container"
response.ID = ct.ID()
response.Name = ct.Name()
response.Status = string(ct.Status())
response.Node = ct.Node()
response.CPU = ResourceCPU{
Percent: ct.CPUPercent(),
Cores: ct.CPUs(),
}
response.Memory = ResourceMemory{
Percent: ct.MemoryPercent(),
UsedGB: float64(used) / (1024 * 1024 * 1024),
TotalGB: float64(total) / (1024 * 1024 * 1024),
}
response.Tags = ct.Tags()
if !ct.LastBackup().IsZero() {
t := ct.LastBackup()
response.LastBackup = &t
}
// Register in resolved context WITH explicit access (single-resource get operation)
e.registerResolvedResourceWithExplicitAccess(ResourceRegistration{
Kind: "system-container",
ProviderUID: fmt.Sprintf("%d", ct.VMID()), // VMID is the stable provider ID
Name: ct.Name(),
Aliases: []string{ct.Name(), fmt.Sprintf("%d", ct.VMID()), ct.ID()},
HostUID: ct.Node(),
HostName: ct.Node(),
VMID: ct.VMID(),
Node: ct.Node(),
LocationChain: []string{"node:" + ct.Node(), "system-container:" + ct.Name()},
Executors: []ExecutorRegistration{{
ExecutorID: ct.Node(),
Adapter: "pct",
Actions: guestExecutorActions(),
Priority: 10,
}},
})
return NewJSONResult(response.NormalizeCollections()), nil
}
}
return NewJSONResult(map[string]interface{}{
"error": "not_found",
"resource_id": resourceID,
"type": "system-container",
}), nil
case "app-container":
if resource, containerID, ok := findCanonicalAppContainerResource(e.unifiedResourceProvider, resourceID); ok {
response := EmptyResourceResponse()
response.GovernedResourceMetadata = governance.Resolve(resourceDisplayName(resource), resource.ID, containerID)
response.Type = "app-container"
response.ID = containerID
response.Name = resourceDisplayName(resource)
response.Status = canonicalAppContainerState(resource)
response.Platform = canonicalResourcePlatform(resource)
response.Host = canonicalAppContainerHost(resource)
response.CPU = ResourceCPU{
Percent: metricPercent(resourceMetric(resource, "cpu")),
}
response.Memory = ResourceMemory{
Percent: metricPercent(resourceMetric(resource, "memory")),
UsedGB: metricUsedGB(resourceMetric(resource, "memory")),
TotalGB: metricTotalGB(resourceMetric(resource, "memory")),
}
if diskMetric := resourceMetric(resource, "disk"); diskMetric != nil {
response.Disk = &ResourceDisk{
Percent: metricPercent(diskMetric),
UsedGB: metricUsedGB(diskMetric),
TotalGB: metricTotalGB(diskMetric),
}
}
response.Tags = append([]string{}, resource.Tags...)
if resource.Docker != nil {
response.Image = strings.TrimSpace(resource.Docker.Image)
response.Health = strings.TrimSpace(resource.Docker.Health)
response.RestartCount = resource.Docker.RestartCount
response.Labels = resource.Docker.Labels
if update := resource.Docker.UpdateStatus; update != nil && update.UpdateAvailable {
response.UpdateAvailable = true
}
for _, p := range resource.Docker.Ports {
response.Ports = append(response.Ports, PortInfo{
Private: p.PrivatePort,
Public: p.PublicPort,
Protocol: p.Protocol,
IP: p.IP,
})
}
for _, n := range resource.Docker.Networks {
addresses := make([]string, 0, 2)
if n.IPv4 != "" {
addresses = append(addresses, n.IPv4)
}
if n.IPv6 != "" {
addresses = append(addresses, n.IPv6)
}
response.Networks = append(response.Networks, NetworkInfo{
Name: n.Name,
Addresses: addresses,
})
}
for _, m := range resource.Docker.Mounts {
response.Mounts = append(response.Mounts, MountInfo{
Source: m.Source,
Destination: m.Destination,
ReadWrite: !strings.EqualFold(strings.TrimSpace(m.Mode), "ro"),
})
}
}
if reg, ok := resolvedAppContainerRegistration(resource); ok {
e.registerResolvedResourceWithExplicitAccess(reg)
}
return NewJSONResult(response.NormalizeCollections()), nil
}
dockerHostsByID := make(map[string]*unifiedresources.DockerHostView)
for _, host := range rs.DockerHosts() {
if host == nil {
continue
}
dockerHostsByID[host.ID()] = host
}
for _, container := range rs.DockerContainers() {
if container == nil {
continue
}
containerID := strings.TrimSpace(container.ContainerID())
if containerID == "" {
containerID = strings.TrimSpace(container.ID())
}
if containerID != resourceID && container.Name() != resourceID && !strings.HasPrefix(containerID, resourceID) {
continue
}
state := strings.TrimSpace(container.ContainerState())
if state == "" {
state = string(container.Status())
}
hostName := ""
hostUID := ""
executorID := ""
if host := dockerHostsByID[strings.TrimSpace(container.ParentID())]; host != nil {
hostName = strings.TrimSpace(host.Hostname())
hostUID = strings.TrimSpace(host.HostSourceID())
if hostUID == "" {
hostUID = strings.TrimSpace(host.ID())
}
if hostName == "" {
hostName = strings.TrimSpace(host.Name())
}
executorID = hostName
if executorID == "" {
executorID = hostUID
}
}
response := EmptyResourceResponse()
response.GovernedResourceMetadata = governance.Resolve(container.Name(), container.ID(), containerID)
response.Type = "app-container"
response.ID = containerID
response.Name = container.Name()
response.Status = state
response.Host = hostName
response.Image = container.Image()
response.Health = container.Health()
response.CPU = ResourceCPU{
Percent: container.CPUPercent(),
}
response.Memory = ResourceMemory{
Percent: container.MemoryPercent(),
UsedGB: float64(container.MemoryUsed()) / (1024 * 1024 * 1024),
TotalGB: float64(container.MemoryTotal()) / (1024 * 1024 * 1024),
}
response.RestartCount = container.RestartCount()
response.Labels = container.Labels()
if update := container.UpdateStatus(); update != nil && update.UpdateAvailable {
response.UpdateAvailable = true
}
for _, p := range container.Ports() {
response.Ports = append(response.Ports, PortInfo{
Private: p.PrivatePort,
Public: p.PublicPort,
Protocol: p.Protocol,
IP: p.IP,
})
}
for _, n := range container.Networks() {
addresses := make([]string, 0, 2)
if n.IPv4 != "" {
addresses = append(addresses, n.IPv4)
}
if n.IPv6 != "" {
addresses = append(addresses, n.IPv6)
}
response.Networks = append(response.Networks, NetworkInfo{
Name: n.Name,
Addresses: addresses,
})
}
for _, m := range container.Mounts() {
response.Mounts = append(response.Mounts, MountInfo{
Source: m.Source,
Destination: m.Destination,
ReadWrite: m.RW,
})
}
// Register in resolved context WITH explicit access (single-resource get operation)
aliases := []string{container.Name(), containerID}
if len(containerID) > 12 {
aliases = append(aliases, containerID[:12]) // Add short ID for longer IDs
}
locationHost := hostName
if locationHost == "" {
locationHost = hostUID
}
if locationHost == "" {
locationHost = "unknown"
}
if hostName == "" {
hostName = locationHost
}
if hostUID == "" {
hostUID = locationHost
}
if executorID == "" {
executorID = locationHost
}
response.Host = hostName
e.registerResolvedResourceWithExplicitAccess(ResourceRegistration{
Kind: "app-container",
ProviderUID: containerID, // Docker container ID is the stable provider ID
Name: container.Name(),
Aliases: aliases,
HostUID: hostUID,
HostName: hostName,
LocationChain: []string{"docker-host:" + locationHost, "docker:" + container.Name()},
Executors: []ExecutorRegistration{{
ExecutorID: executorID,
Adapter: "docker",
Actions: []string{"query", "get", "logs", "exec", "restart", "stop", "start"},
Priority: 10,
}},
})
return NewJSONResult(response.NormalizeCollections()), nil
}
return NewJSONResult(map[string]interface{}{
"error": "not_found",
"resource_id": resourceID,
"type": "app-container",
}), nil
default:
return NewErrorResult(fmt.Errorf("invalid resource_type: %s. Use 'agent', 'vm', 'system-container', 'app-container', or 'storage' (compatibility aliases 'system' and 'storage-pool' are also accepted)", resourceTypeRaw)), nil
}
}
func (e *PulseToolExecutor) executeGetResourceConfig(ctx context.Context, args map[string]interface{}) (CallToolResult, error) {
resourceType, _ := args["resource_type"].(string)
resourceID, _ := args["resource_id"].(string)
resourceType = canonicalQueryResourceType(resourceType)
if resourceType == "" {
return NewErrorResult(fmt.Errorf("resource_type is required")), nil
}
if resourceID == "" {
return NewErrorResult(fmt.Errorf("resource_id is required")), nil
}
if resourceType == "app-container" {
return e.executeNativeAppContainerConfig(ctx, strings.TrimSpace(resourceID))
}
if e.guestConfigProvider == nil {
return NewTextResult("Guest configuration not available."), nil
}
var (
guestType string
vmID int
name string
node string
instance string
)
var err error
rs, err := e.readStateForControl()
if err != nil {
return NewTextResult("State information not available."), nil
}
governance := newGovernedQueryMetadataResolver(rs)
guestType, vmID, name, node, instance, err = resolveGuestFromReadState(rs, resourceType, resourceID)
if err != nil {
return NewErrorResult(err), nil
}
// Normalize semantic type to provider-level type for guest config lookup.
// GetGuestConfig expects "container" or "vm", not "system-container".
configType := guestType
if configType == "system-container" {
configType = "container"
}
rawConfig, err := e.guestConfigProvider.GetGuestConfig(configType, instance, node, vmID)
if err != nil {
return NewErrorResult(err), nil
}
response := EmptyGuestConfigResponse()
response.GovernedResourceMetadata = governance.Resolve(name, fmt.Sprintf("%d", vmID))
response.GuestType = guestType
response.VMID = vmID
response.Name = name
response.Node = node
response.Instance = instance
switch guestType {
case "system-container":
hostname, osType, onboot, rootfs, mounts := parseContainerConfig(rawConfig)
response.Hostname = hostname
response.OSType = osType
response.Onboot = onboot
response.RootFS = rootfs
response.Mounts = mounts
case "vm":
osType, onboot, disks := parseVMConfig(rawConfig)
response.OSType = osType
response.Onboot = onboot
response.Disks = disks
default:
return NewErrorResult(fmt.Errorf("unsupported guest type: %s", guestType)), nil
}
return NewJSONResult(response.NormalizeCollections()), nil
}
func (e *PulseToolExecutor) executeNativeAppContainerConfig(ctx context.Context, resourceRef string) (CallToolResult, error) {
if e.appContainerConfigProvider == nil {
return NewTextResult("App-container configuration not available."), nil
}
rs, err := e.readStateForControl()
if err != nil {
return NewTextResult("State information not available."), nil
}
governance := newGovernedQueryMetadataResolver(rs)
var resource unifiedresources.Resource
var found bool
if validation := e.validateResolvedResource(resourceRef, "query", true); validation.Resource != nil {
if matched, _, ok := findCanonicalAppContainerResource(e.unifiedResourceProvider, resourceRef); ok {
resource = matched
found = true
}
}
if !found {
var containerID string
resource, containerID, found = findCanonicalAppContainerResource(e.unifiedResourceProvider, resourceRef)
if !found {
return NewJSONResult(map[string]interface{}{
"error": "not_found",
"resource_id": resourceRef,
"type": "app-container",
}), nil
}
if reg, ok := resolvedAppContainerRegistration(resource); ok {
e.registerResolvedResourceWithExplicitAccess(reg)
}
_ = containerID
}
validation := e.validateResolvedResource(resourceRef, "query", true)
if validation.Resource == nil {
if validation.ErrorMsg != "" {
return NewErrorResult(fmt.Errorf("%s", validation.ErrorMsg)), nil
}
return NewErrorResult(fmt.Errorf("app-container not found: %s", resourceRef)), nil
}
if validation.ErrorMsg != "" {
return NewErrorResult(fmt.Errorf("%s", validation.ErrorMsg)), nil
}
resolved := validation.Resource
if resolved.GetKind() != "app-container" {
return NewErrorResult(fmt.Errorf("resource '%s' is %q, not app-container", resourceRef, resolved.GetKind())), nil
}
if !strings.EqualFold(strings.TrimSpace(resolved.GetAdapter()), "truenas") {
return NewTextResult("App-container configuration not available."), nil
}
result, err := e.appContainerConfigProvider.GetConfig(ctx, AppContainerConfigRequest{
OrgID: e.orgID,
ResourceID: strings.TrimSpace(resolved.GetResourceID()),
ProviderUID: strings.TrimSpace(resolved.GetProviderUID()),
Name: resourceDisplayName(resource),
Host: strings.TrimSpace(resolved.GetTargetHost()),
Platform: "truenas",
})
if err != nil {
return NewErrorResult(err), nil
}
response := EmptyAppContainerConfigResponse()
if result != nil {
response.GovernedResourceMetadata = governance.Resolve(result.Name, result.ResourceID, result.ProviderUID)
response.Type = "app-container"
response.ID = result.ProviderUID
if response.ID == "" {
response.ID = strings.TrimSpace(result.ResourceID)
}
response.Name = result.Name
response.Host = result.Host
response.Platform = result.Platform
response.Status = result.Status
response.Version = result.Version
response.HumanVersion = result.HumanVersion
response.Notes = result.Notes
response.CustomApp = result.CustomApp
response.UpgradeAvailable = result.UpgradeAvailable
response.ImageUpdatesAvailable = result.ImageUpdatesAvailable
response.ContainerCount = result.ContainerCount
response.UsedHostIPs = append([]string{}, result.UsedHostIPs...)
response.Images = append([]string{}, result.Images...)
response.Ports = append([]PortInfo{}, result.Ports...)
response.Networks = append([]NetworkInfo{}, result.Networks...)
response.Mounts = append([]MountInfo{}, result.Mounts...)
response.Containers = append([]AppContainerConfigContainer{}, result.Containers...)
}
if response.ID == "" {
response.ID = strings.TrimSpace(resolved.GetProviderUID())
}
if response.Name == "" {
response.Name = resolvedResourceDisplayName(resolved)
}
if response.Host == "" {
response.Host = strings.TrimSpace(resolved.GetTargetHost())
}
if response.Platform == "" {
response.Platform = strings.TrimSpace(resolved.GetAdapter())
}
if response.GovernedResourceMetadata.Policy == nil && response.AISafeSummary == "" {
response.GovernedResourceMetadata = governance.Resolve(response.Name, strings.TrimSpace(resolved.GetResourceID()), response.ID)
}
return NewJSONResult(response.NormalizeCollections()), nil
}
func resolveGuestFromReadState(rs unifiedresources.ReadState, resourceType, resourceID string) (guestType string, vmID int, name, node, instance string, err error) {
resourceType = canonicalQueryResourceType(resourceType)
resourceID = strings.TrimSpace(resourceID)
if resourceType == "" || resourceID == "" {
return "", 0, "", "", "", fmt.Errorf("resource_type and resource_id are required")
}
switch resourceType {
case "system-container":
for _, ct := range rs.Containers() {
if fmt.Sprintf("%d", ct.VMID()) == resourceID || ct.Name() == resourceID || ct.ID() == resourceID {
return "system-container", ct.VMID(), ct.Name(), ct.Node(), ct.Instance(), nil
}
}
return "", 0, "", "", "", fmt.Errorf("system-container not found: %s", resourceID)
case "vm":
for _, vm := range rs.VMs() {
if fmt.Sprintf("%d", vm.VMID()) == resourceID || vm.Name() == resourceID || vm.ID() == resourceID {
return "vm", vm.VMID(), vm.Name(), vm.Node(), vm.Instance(), nil
}
}
return "", 0, "", "", "", fmt.Errorf("vm not found: %s", resourceID)
default:
return "", 0, "", "", "", fmt.Errorf("invalid resource_type: %s. Use vm or system-container", resourceType)
}
}
func parseContainerConfig(config map[string]interface{}) (hostname, osType string, onboot *bool, rootfs string, mounts []GuestMountConfig) {
if len(config) == 0 {
return "", "", nil, "", nil
}
for key, value := range config {
lowerKey := strings.ToLower(strings.TrimSpace(key))
switch lowerKey {
case "hostname":
hostname = strings.TrimSpace(fmt.Sprint(value))
case "ostype":
osType = strings.TrimSpace(fmt.Sprint(value))
case "onboot":
onboot = parseOnbootValue(value)
}
if lowerKey != "rootfs" && !strings.HasPrefix(lowerKey, "mp") {
continue
}
raw := strings.TrimSpace(fmt.Sprint(value))
if raw == "" {
continue
}
source, mountpoint := parseMountValue(raw)
if lowerKey == "rootfs" {
rootfs = source
if mountpoint == "" {
mountpoint = "/"
}
}
mounts = append(mounts, GuestMountConfig{
Key: lowerKey,
Source: source,
Mountpoint: mountpoint,
})
}
if len(mounts) > 1 {
sort.Slice(mounts, func(i, j int) bool {
return mounts[i].Key < mounts[j].Key
})
}
return hostname, osType, onboot, rootfs, mounts
}
func parseVMConfig(config map[string]interface{}) (osType string, onboot *bool, disks []GuestDiskConfig) {
if len(config) == 0 {
return "", nil, nil
}
for key, value := range config {
lowerKey := strings.ToLower(strings.TrimSpace(key))
switch lowerKey {
case "ostype":
osType = strings.TrimSpace(fmt.Sprint(value))
case "onboot":
onboot = parseOnbootValue(value)
}
if !isVMConfigDiskKey(lowerKey) {
continue
}
raw := strings.TrimSpace(fmt.Sprint(value))
if raw == "" {
continue
}
disks = append(disks, GuestDiskConfig{
Key: lowerKey,
Value: raw,
})
}
if len(disks) > 1 {
sort.Slice(disks, func(i, j int) bool {
return disks[i].Key < disks[j].Key
})
}
return osType, onboot, disks
}
func isVMConfigDiskKey(key string) bool {
if strings.HasPrefix(key, "scsi") ||
strings.HasPrefix(key, "virtio") ||
strings.HasPrefix(key, "sata") ||
strings.HasPrefix(key, "ide") ||
strings.HasPrefix(key, "unused") ||
strings.HasPrefix(key, "efidisk") ||
strings.HasPrefix(key, "tpmstate") {
return true
}
return false
}
func parseMountValue(raw string) (source, mountpoint string) {
parts := strings.Split(raw, ",")
if len(parts) > 0 {
source = strings.TrimSpace(parts[0])
}
for _, part := range parts[1:] {
kv := strings.SplitN(strings.TrimSpace(part), "=", 2)
if len(kv) != 2 {
continue
}
k := strings.ToLower(strings.TrimSpace(kv[0]))
v := strings.TrimSpace(kv[1])
if k == "mp" || k == "mountpoint" {
mountpoint = v
}
}
return source, mountpoint
}
func parseOnbootValue(value interface{}) *bool {
raw := strings.TrimSpace(fmt.Sprint(value))
if raw == "" {
return nil
}
if raw == "1" || strings.EqualFold(raw, "yes") || strings.EqualFold(raw, "true") {
val := true
return &val
}
if raw == "0" || strings.EqualFold(raw, "no") || strings.EqualFold(raw, "false") {
val := false
return &val
}
return nil
}
func (e *PulseToolExecutor) executeSearchResources(_ context.Context, args map[string]interface{}) (CallToolResult, error) {
if !e.hasReadState() {
return NewTextResult("State provider not available."), nil
}
rawQuery, _ := args["query"].(string)
query := strings.TrimSpace(rawQuery)
if query == "" {
return NewErrorResult(fmt.Errorf("query is required")), nil
}
typeFilter, _ := args["type"].(string)
// Map resource_type to type for search
if typeFilter == "" {
typeFilter, _ = args["resource_type"].(string)
}
typeFilter = canonicalQuerySearchType(typeFilter)
statusFilter, _ := args["status"].(string)
limit := intArg(args, "limit", 20)
offset := intArg(args, "offset", 0)
if limit <= 0 {
limit = 20
}
if offset < 0 {
offset = 0
}
allowedTypes := map[string]bool{
"": true,
"agent": true,
"node": true,
"vm": true,
"system-container": true,
"app-container": true,
"docker-host": true,
"storage": true,
"physical-disk": true,
}
if !allowedTypes[typeFilter] {
return NewErrorResult(fmt.Errorf("invalid type: %s. Use agent, node, vm, system-container, app-container, docker-host, storage, or physical-disk (compatibility aliases system and storage-pool are also accepted)", typeFilter)), nil
}
// normalizeForSearch replaces common separators with spaces and splits at
// alpha↔numeric boundaries for fuzzy matching. This allows queries like
// "LXC112", "VM100", "CT201" to match VMID candidates ("112", "100", "201").
normalizeForSearch := func(s string) string {
s = strings.ToLower(s)
s = strings.ReplaceAll(s, "-", " ")
s = strings.ReplaceAll(s, "_", " ")
s = strings.ReplaceAll(s, ".", " ")
// Split at alpha↔numeric boundaries: "lxc112" → "lxc 112"
var buf strings.Builder
buf.Grow(len(s) + 4)
for i, ch := range s {
if i > 0 {
prev := s[i-1]
cur := byte(ch)
if (prev >= 'a' && prev <= 'z' && cur >= '0' && cur <= '9') ||
(prev >= '0' && prev <= '9' && cur >= 'a' && cur <= 'z') {
buf.WriteByte(' ')
}
}
buf.WriteRune(ch)
}
return buf.String()
}
matchesQuery := func(query string, candidates ...string) bool {
queryNorm := normalizeForSearch(query)
queryWords := strings.Fields(queryNorm)
for _, candidate := range candidates {
if candidate == "" {
continue
}
candidateNorm := normalizeForSearch(candidate)
// Direct substring match (normalized)
if strings.Contains(candidateNorm, queryNorm) {
return true
}
// All query words must be present in candidate
if len(queryWords) > 0 {
allMatch := true
for _, word := range queryWords {
if !strings.Contains(candidateNorm, word) {
allMatch = false
break
}
}
if allMatch {
return true
}
}
}
return false
}
queryLower := strings.ToLower(query)
rs, err := e.readStateForControl()
if err != nil {
return NewErrorResult(err), nil
}
governance := newGovernedQueryMetadataResolver(rs)
dockerHostsByID := make(map[string]*unifiedresources.DockerHostView)
for _, host := range rs.DockerHosts() {
if host == nil {
continue
}
dockerHostsByID[host.ID()] = host
}
systemResources := []unifiedresources.Resource{}
vmResources := []unifiedresources.Resource{}
systemContainerResources := []unifiedresources.Resource{}
appContainerResources := []unifiedresources.Resource{}
storagePoolResources := []unifiedresources.Resource{}
physicalDiskResources := []unifiedresources.Resource{}
if e.unifiedResourceProvider != nil {
systemResources = sortedResourcesByName(e.unifiedResourceProvider.GetByType(unifiedresources.ResourceTypeAgent))
vmResources = sortedResourcesByName(e.unifiedResourceProvider.GetByType(unifiedresources.ResourceTypeVM))
systemContainerResources = sortedResourcesByName(e.unifiedResourceProvider.GetByType(unifiedresources.ResourceTypeSystemContainer))
appContainerResources = sortedResourcesByName(e.unifiedResourceProvider.GetByType(unifiedresources.ResourceTypeAppContainer))
physicalDiskResources = sortedResourcesByName(e.unifiedResourceProvider.GetByType(unifiedresources.ResourceTypePhysicalDisk))
storagePoolResources = canonicalStoragePoolResources(e.unifiedResourceProvider)
}
// Build a set of connected agent hostnames for quick lookup
connectedAgentHostnames := make(map[string]bool)
if e.agentServer != nil {
for _, agent := range e.agentServer.GetConnectedAgents() {
connectedAgentHostnames[agent.Hostname] = true
}
}
matches := make([]ResourceMatch, 0, limit)
total := 0
addMatch := func(match ResourceMatch) {
if total < offset {
total++
return
}
if len(matches) >= limit {
total++
return
}
matches = append(matches, match)
total++
}
if typeFilter == "" || typeFilter == "agent" {
for _, resource := range systemResources {
status := string(resource.Status)
if !statusMatchesFilter(status, statusFilter) {
continue
}
candidates := canonicalAgentSearchCandidates(resource)
if !matchesQuery(queryLower, candidates...) {
continue
}
addMatch(ResourceMatch{
GovernedResourceMetadata: governance.Resolve(candidates...),
Type: "agent",
ID: resource.ID,
Name: resourceDisplayName(resource),
Status: status,
Host: canonicalAgentHost(resource),
Platform: canonicalResourcePlatform(resource),
AgentConnected: resourceAgentConnected(resource, connectedAgentHostnames),
})
}
}
if typeFilter == "" || typeFilter == "node" {
for _, node := range rs.Nodes() {
status := string(node.Status())
if !statusMatchesFilter(status, statusFilter) {
continue
}
if !matchesQuery(queryLower, node.Name(), node.ID()) {
continue
}
addMatch(ResourceMatch{
GovernedResourceMetadata: governance.Resolve(node.Name(), node.ID()),
Type: "node",
Name: node.Name(),
Status: status,
AgentConnected: connectedAgentHostnames[node.Name()],
})
}
}
if (typeFilter == "" || typeFilter == "vm") && len(vmResources) > 0 {
for _, resource := range vmResources {
status := string(resource.Status)
if !statusMatchesFilter(status, statusFilter) {
continue
}
candidates := canonicalGuestSearchCandidates("vm", resource)
if !matchesQuery(queryLower, candidates...) {
continue
}
vmid := 0
if resource.Proxmox != nil && resource.Proxmox.VMID > 0 {
vmid = resource.Proxmox.VMID
}
node := canonicalGuestTarget(resource)
metadataCandidates := append([]string{resourceDisplayName(resource), resource.ID}, candidates...)
addMatch(ResourceMatch{
GovernedResourceMetadata: governance.Resolve(metadataCandidates...),
Type: "vm",
ID: resource.ID,
Name: resourceDisplayName(resource),
Status: status,
Node: node,
NodeHasAgent: connectedAgentHostnames[node],
Platform: canonicalResourcePlatform(resource),
VMID: vmid,
AgentConnected: resourceAgentConnected(resource, connectedAgentHostnames),
})
}
} else if typeFilter == "" || typeFilter == "vm" {
for _, vm := range rs.VMs() {
status := string(vm.Status())
if !statusMatchesFilter(status, statusFilter) {
continue
}
// Build searchable candidates: name, ID, VMID, canonical type-prefixed VMID, IPs, tags
vmidStr := fmt.Sprintf("%d", vm.VMID())
candidates := []string{vm.Name(), vm.ID(), vmidStr, "vm" + vmidStr}
candidates = append(candidates, vm.IPAddresses()...)
candidates = append(candidates, vm.Tags()...)
if !matchesQuery(queryLower, candidates...) {
continue
}
addMatch(ResourceMatch{
GovernedResourceMetadata: governance.Resolve(vm.Name(), vm.ID(), vmidStr),
Type: "vm",
ID: vm.ID(),
Name: vm.Name(),
Status: status,
Node: vm.Node(),
NodeHasAgent: connectedAgentHostnames[vm.Node()],
Platform: "proxmox",
VMID: vm.VMID(),
AgentConnected: connectedAgentHostnames[vm.Name()],
})
}
}
if (typeFilter == "" || typeFilter == "system-container") && len(systemContainerResources) > 0 {
for _, resource := range systemContainerResources {
status := string(resource.Status)
if !statusMatchesFilter(status, statusFilter) {
continue
}
candidates := canonicalGuestSearchCandidates("system-container", resource)
if !matchesQuery(queryLower, candidates...) {
continue
}
vmid := 0
if resource.Proxmox != nil && resource.Proxmox.VMID > 0 {
vmid = resource.Proxmox.VMID
}
node := canonicalGuestTarget(resource)
metadataCandidates := append([]string{resourceDisplayName(resource), resource.ID}, candidates...)
addMatch(ResourceMatch{
GovernedResourceMetadata: governance.Resolve(metadataCandidates...),
Type: "system-container",
ID: resource.ID,
Name: resourceDisplayName(resource),
Status: status,
Node: node,
NodeHasAgent: connectedAgentHostnames[node],
Platform: canonicalResourcePlatform(resource),
VMID: vmid,
AgentConnected: resourceAgentConnected(resource, connectedAgentHostnames),
})
}
} else if typeFilter == "" || typeFilter == "system-container" {
for _, ct := range rs.Containers() {
status := string(ct.Status())
if !statusMatchesFilter(status, statusFilter) {
continue
}
// Build searchable candidates: name, ID, VMID, canonical type-prefixed VMID, IPs, tags
vmidStr := fmt.Sprintf("%d", ct.VMID())
candidates := []string{ct.Name(), ct.ID(), vmidStr, "system-container" + vmidStr}
candidates = append(candidates, ct.IPAddresses()...)
candidates = append(candidates, ct.Tags()...)
if !matchesQuery(queryLower, candidates...) {
continue
}
addMatch(ResourceMatch{
GovernedResourceMetadata: governance.Resolve(ct.Name(), ct.ID(), vmidStr),
Type: "system-container",
ID: ct.ID(),
Name: ct.Name(),
Status: status,
Node: ct.Node(),
NodeHasAgent: connectedAgentHostnames[ct.Node()],
Platform: "proxmox",
VMID: ct.VMID(),
AgentConnected: connectedAgentHostnames[ct.Name()],
})
}
}
if typeFilter == "" || typeFilter == "docker-host" {
for _, host := range rs.DockerHosts() {
if host == nil {
continue
}
status := string(host.Status())
if statusFilter != "" && !strings.EqualFold(status, statusFilter) {
continue
}
if !matchesQuery(queryLower, host.ID(), host.HostSourceID(), host.Hostname(), host.Name()) {
continue
}
hostID := strings.TrimSpace(host.HostSourceID())
if hostID == "" {
hostID = strings.TrimSpace(host.ID())
}
displayName := strings.TrimSpace(host.Name())
if displayName == "" {
displayName = strings.TrimSpace(host.Hostname())
}
hostName := strings.TrimSpace(host.Hostname())
if hostName == "" {
hostName = displayName
}
addMatch(ResourceMatch{
GovernedResourceMetadata: governance.Resolve(host.Hostname(), host.Name(), host.HostSourceID(), host.ID()),
Type: "docker-host",
ID: hostID,
Name: displayName,
Status: status,
Host: hostName,
})
}
}
if typeFilter == "" || typeFilter == "app-container" {
for _, resource := range appContainerResources {
status := canonicalAppContainerState(resource)
if !statusMatchesFilter(status, statusFilter) {
continue
}
containerID := canonicalAppContainerID(resource)
candidates := []string{
resourceDisplayName(resource),
resource.ID,
containerID,
canonicalAppContainerHost(resource),
canonicalResourcePlatform(resource),
}
if resource.Docker != nil {
candidates = append(candidates, resource.Docker.Image)
for key, value := range resource.Docker.Labels {
candidates = append(candidates, key, value)
}
for _, network := range resource.Docker.Networks {
candidates = append(candidates, network.IPv4, network.IPv6, network.Name)
}
}
candidates = append(candidates, resource.Tags...)
if !matchesQuery(queryLower, candidates...) {
continue
}
summary := canonicalAppContainerSummaryFromResource(resource)
addMatch(ResourceMatch{
GovernedResourceMetadata: governance.Resolve(resourceDisplayName(resource), resource.ID, containerID),
Type: "app-container",
ID: summary.ID,
Name: summary.Name,
Status: summary.Status,
Host: summary.Host,
Platform: summary.Platform,
Image: summary.Image,
})
}
}
if (typeFilter == "" || typeFilter == "app-container") && len(appContainerResources) == 0 {
for _, container := range rs.DockerContainers() {
if container == nil {
continue
}
state := strings.TrimSpace(container.ContainerState())
if state == "" {
state = string(container.Status())
}
if statusFilter != "" && !strings.EqualFold(state, statusFilter) {
continue
}
containerID := strings.TrimSpace(container.ContainerID())
if containerID == "" {
containerID = strings.TrimSpace(container.ID())
}
// Build searchable candidates: name, ID, image, IPs
candidates := []string{container.Name(), containerID, container.Image()}
for _, network := range container.Networks() {
if network.IPv4 != "" {
candidates = append(candidates, network.IPv4)
}
if network.IPv6 != "" {
candidates = append(candidates, network.IPv6)
}
}
if !matchesQuery(queryLower, candidates...) {
continue
}
hostName := ""
if host := dockerHostsByID[strings.TrimSpace(container.ParentID())]; host != nil {
hostName = strings.TrimSpace(host.Hostname())
if hostName == "" {
hostName = strings.TrimSpace(host.Name())
}
}
addMatch(ResourceMatch{
GovernedResourceMetadata: governance.Resolve(container.Name(), container.ID(), containerID),
Type: "app-container",
ID: containerID,
Name: container.Name(),
Status: state,
Host: hostName,
Image: container.Image(),
})
}
}
if typeFilter == "" || typeFilter == "storage" {
for _, resource := range storagePoolResources {
status := string(resource.Status)
if !statusMatchesFilter(status, statusFilter) {
continue
}
candidates := canonicalStorageSearchCandidates(resource)
if !matchesQuery(queryLower, candidates...) {
continue
}
addMatch(ResourceMatch{
GovernedResourceMetadata: governance.Resolve(candidates...),
Type: "storage",
ID: resource.ID,
Name: resourceDisplayName(resource),
Status: status,
Host: canonicalStorageHost(resource),
Platform: canonicalResourcePlatform(resource),
})
}
}
if typeFilter == "" || typeFilter == "physical-disk" {
for _, resource := range physicalDiskResources {
status := string(resource.Status)
if !statusMatchesFilter(status, statusFilter) {
continue
}
summary := physicalDiskSummaryFromResource(resource)
candidates := []string{
resourceDisplayName(resource),
resource.ID,
summary.DevPath,
summary.Model,
summary.Serial,
summary.WWN,
summary.Type,
summary.Node,
strings.TrimSpace(resource.ParentName),
canonicalResourcePlatform(resource),
}
candidates = append(candidates, resource.Tags...)
if !matchesQuery(queryLower, candidates...) {
continue
}
addMatch(ResourceMatch{
GovernedResourceMetadata: governance.Resolve(resourceDisplayName(resource), resource.ID, summary.Serial, summary.DevPath),
Type: "physical-disk",
ID: resource.ID,
Name: resourceDisplayName(resource),
Status: status,
Host: summary.Node,
Platform: canonicalResourcePlatform(resource),
})
}
}
response := EmptyResourceSearchResponse()
response.Query = query
response.Matches = matches
response.Total = total
if offset > 0 || total > limit {
response.Pagination = &PaginationInfo{
Total: total,
Limit: limit,
Offset: offset,
}
}
// Register all found resources in the resolved context
// This enables action tools to validate that commands target legitimate resources
for _, match := range matches {
var reg ResourceRegistration
switch match.Type {
case "agent":
if resource, ok := findCanonicalResourceByID(systemResources, match.ID); ok {
reg, _ = canonicalAgentRegistration(resource)
} else {
reg = ResourceRegistration{
Kind: "agent",
ProviderUID: strings.TrimPrefix(match.ID, "agent:"),
Name: match.Name,
Aliases: []string{match.Name, match.ID, match.Host},
Executors: []ExecutorRegistration{{
ExecutorID: firstNonEmptyString(match.Host, match.Name, match.ID),
Adapter: match.Platform,
Actions: readOnlyResourceActions(),
Priority: 10,
}},
}
}
case "node":
reg = ResourceRegistration{
Kind: "node",
ProviderUID: match.Name, // Node name is the identifier used for routing
Name: match.Name,
Aliases: []string{match.Name},
HostName: match.Name,
LocationChain: []string{"node:" + match.Name},
Executors: []ExecutorRegistration{{
ExecutorID: match.Name,
Adapter: "direct",
Actions: []string{"query", "get", "exec"},
Priority: 10,
}},
}
case "vm":
if resource, ok := findCanonicalGuestResource(e.unifiedResourceProvider, "vm", match.ID); ok {
reg, _ = canonicalGuestRegistration("vm", resource)
} else if resource, ok := findCanonicalGuestResource(e.unifiedResourceProvider, "vm", match.Name); ok {
reg, _ = canonicalGuestRegistration("vm", resource)
} else {
reg = ResourceRegistration{
Kind: "vm",
ProviderUID: fmt.Sprintf("%d", match.VMID),
Name: match.Name,
Aliases: []string{match.Name, fmt.Sprintf("%d", match.VMID), match.ID},
HostUID: match.Node,
HostName: match.Node,
VMID: match.VMID,
Node: match.Node,
LocationChain: []string{"node:" + match.Node, "vm:" + match.Name},
Executors: []ExecutorRegistration{{
ExecutorID: match.Node,
Adapter: "qm",
Actions: guestExecutorActions(),
Priority: 10,
}},
}
}
case "system-container":
if resource, ok := findCanonicalGuestResource(e.unifiedResourceProvider, "system-container", match.ID); ok {
reg, _ = canonicalGuestRegistration("system-container", resource)
} else if resource, ok := findCanonicalGuestResource(e.unifiedResourceProvider, "system-container", match.Name); ok {
reg, _ = canonicalGuestRegistration("system-container", resource)
} else {
reg = ResourceRegistration{
Kind: "system-container",
ProviderUID: fmt.Sprintf("%d", match.VMID),
Name: match.Name,
Aliases: []string{match.Name, fmt.Sprintf("%d", match.VMID), match.ID},
HostUID: match.Node,
HostName: match.Node,
VMID: match.VMID,
Node: match.Node,
LocationChain: []string{"node:" + match.Node, "system-container:" + match.Name},
Executors: []ExecutorRegistration{{
ExecutorID: match.Node,
Adapter: "pct",
Actions: guestExecutorActions(),
Priority: 10,
}},
}
}
case "docker-host":
reg = ResourceRegistration{
Kind: "docker-host",
ProviderUID: match.ID,
Name: match.Name,
Aliases: []string{match.Name, match.ID, match.Host},
HostUID: match.Host,
HostName: match.Host,
LocationChain: []string{"docker-host:" + match.Host},
Executors: []ExecutorRegistration{{
ExecutorID: match.Host,
Adapter: "direct",
Actions: []string{"query", "get"},
Priority: 10,
}},
}
case "app-container":
reg = ResourceRegistration{
Kind: "app-container",
ProviderUID: match.ID, // Stable container/app provider ID
Name: match.Name,
Aliases: []string{match.Name, match.ID},
HostUID: match.Host,
HostName: match.Host,
}
if match.Platform == "docker" {
reg.LocationChain = []string{"docker-host:" + match.Host, "docker:" + match.Name}
reg.Executors = []ExecutorRegistration{{
ExecutorID: match.Host,
Adapter: "docker",
Actions: []string{"query", "get", "logs", "exec", "restart", "stop", "start"},
Priority: 10,
}}
} else if match.Platform == "truenas" {
reg.LocationChain = []string{"system:" + match.Host, "app:" + match.Name}
reg.Executors = []ExecutorRegistration{{
ExecutorID: match.Host,
Adapter: "truenas",
Actions: []string{"query", "get", "restart", "stop", "start"},
Priority: 10,
}}
}
case "storage":
if resource, ok := findCanonicalResourceByID(storagePoolResources, match.ID); ok {
reg, _ = canonicalStorageRegistration(resource)
} else {
reg = ResourceRegistration{
Kind: "storage",
ProviderUID: strings.TrimPrefix(match.ID, "storage:"),
Name: match.Name,
Aliases: []string{match.Name, match.ID},
Executors: []ExecutorRegistration{{
ExecutorID: firstNonEmptyString(match.Host, match.Name, match.ID),
Adapter: match.Platform,
Actions: readOnlyResourceActions(),
Priority: 10,
}},
}
}
case "physical-disk":
reg = ResourceRegistration{
Kind: "physical-disk",
ProviderUID: match.ID,
Name: match.Name,
Aliases: []string{match.Name, match.ID},
HostUID: match.Host,
HostName: match.Host,
}
default:
continue // Skip unknown types
}
e.registerResolvedResource(reg)
}
return NewJSONResult(response.NormalizeCollections()), nil
}
func (e *PulseToolExecutor) executeGetConnectionHealth(_ context.Context, _ map[string]interface{}) (CallToolResult, error) {
if e.connectionHealth == nil {
return NewTextResult("State provider not available."), nil
}
health := e.connectionHealth.GetConnectionHealth()
if len(health) == 0 {
return NewTextResult("No connection health data available."), nil
}
var connections []ConnectionStatus
connected := 0
disconnected := 0
for instanceID, isConnected := range health {
connections = append(connections, ConnectionStatus{
InstanceID: instanceID,
Connected: isConnected,
})
if isConnected {
connected++
} else {
disconnected++
}
}
response := EmptyConnectionHealthResponse()
response.Connections = connections
response.Total = len(connections)
response.Connected = connected
response.Disconnected = disconnected
return NewJSONResult(response.NormalizeCollections()), nil
}
// Helper to get int args with default
func intArg(args map[string]interface{}, key string, defaultVal int) int {
if v, ok := args[key]; ok {
switch val := v.(type) {
case int:
return val
case float64:
return int(val)
case int64:
return int(val)
}
}
return defaultVal
}
func statusMatchesFilter(actual, filter string) bool {
filter = strings.ToLower(strings.TrimSpace(filter))
if filter == "" || filter == "all" {
return true
}
actual = strings.ToLower(strings.TrimSpace(actual))
if actual == filter {
return true
}
// Compatibility mapping: ReadState normalizes guest lifecycle into "online/offline"
// while legacy Proxmox state often uses "running/stopped".
switch filter {
case "running":
return actual == "online"
case "online":
return actual == "running"
case "stopped":
return actual == "offline"
case "offline":
return actual == "stopped"
default:
return false
}
}