mirror of
https://github.com/rcourtman/Pulse.git
synced 2026-04-28 03:20:11 +00:00
1562 lines
52 KiB
Go
1562 lines
52 KiB
Go
package tools
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"errors"
|
|
"fmt"
|
|
"strconv"
|
|
"strings"
|
|
|
|
"github.com/rcourtman/pulse-go-rewrite/internal/agentexec"
|
|
"github.com/rcourtman/pulse-go-rewrite/internal/ai/approval"
|
|
"github.com/rcourtman/pulse-go-rewrite/internal/ai/safety"
|
|
"github.com/rcourtman/pulse-go-rewrite/internal/models"
|
|
"github.com/rcourtman/pulse-go-rewrite/internal/unifiedresources"
|
|
"github.com/rs/zerolog/log"
|
|
)
|
|
|
|
// registerControlTools registers the pulse_control tool
|
|
func (e *PulseToolExecutor) registerControlTools() {
|
|
e.registry.Register(RegisteredTool{
|
|
Definition: Tool{
|
|
Name: "pulse_control",
|
|
Description: `WRITE operations: control canonical resources that explicitly advertise shared Pulse actions (for example Proxmox guests and supported app-containers) or execute state-modifying commands. Some canonical resources are read-only and will reject pulse_control even when their type is vm or system-container. For read-only operations use pulse_read. For Docker-only workflows use pulse_docker.`,
|
|
InputSchema: InputSchema{
|
|
Type: "object",
|
|
Properties: map[string]PropertySchema{
|
|
"type": {
|
|
Type: "string",
|
|
Description: "Control type: guest, resource, or command",
|
|
Enum: []string{"guest", "resource", "command"},
|
|
},
|
|
"guest_id": {
|
|
Type: "string",
|
|
Description: "For guest: VMID or name",
|
|
},
|
|
"resource_id": {
|
|
Type: "string",
|
|
Description: "For resource: discovered resource name or canonical resource ID from pulse_query",
|
|
},
|
|
"action": {
|
|
Type: "string",
|
|
Description: "For guest/resource: start, stop, shutdown, restart, delete (availability depends on the resolved resource's shared action set)",
|
|
Enum: []string{"start", "stop", "shutdown", "restart", "delete"},
|
|
},
|
|
"command": {
|
|
Type: "string",
|
|
Description: "For command type: the shell command to execute",
|
|
},
|
|
"target_host": {
|
|
Type: "string",
|
|
Description: "For command type: hostname to run command on",
|
|
},
|
|
"run_on_host": {
|
|
Type: "boolean",
|
|
Description: "For command type: run on host (default true)",
|
|
},
|
|
"force": {
|
|
Type: "boolean",
|
|
Description: "For guest stop: force stop without graceful shutdown",
|
|
},
|
|
},
|
|
Required: []string{"type"},
|
|
},
|
|
},
|
|
Handler: func(ctx context.Context, exec *PulseToolExecutor, args map[string]interface{}) (CallToolResult, error) {
|
|
return exec.executeControl(ctx, args)
|
|
},
|
|
RequireControl: true,
|
|
Governance: ToolGovernance{
|
|
ActionMode: ToolActionWrite,
|
|
ApprovalPolicy: "hidden in read-only mode; approval required in controlled mode",
|
|
Summary: "Runs shared Pulse control actions and state-changing commands only against resources that advertise supported actions.",
|
|
},
|
|
})
|
|
}
|
|
|
|
// executeControl routes to the appropriate control handler based on type
|
|
func (e *PulseToolExecutor) executeControl(ctx context.Context, args map[string]interface{}) (CallToolResult, error) {
|
|
controlType, _ := args["type"].(string)
|
|
switch controlType {
|
|
case "guest":
|
|
return e.executeControlGuest(ctx, args)
|
|
case "resource":
|
|
return e.executeControlResource(ctx, args)
|
|
case "command":
|
|
return e.executeRunCommand(ctx, args)
|
|
default:
|
|
return NewErrorResult(fmt.Errorf("unknown type: %s. Use: guest, resource, command", controlType)), nil
|
|
}
|
|
}
|
|
|
|
func (e *PulseToolExecutor) executeControlResource(ctx context.Context, args map[string]interface{}) (CallToolResult, error) {
|
|
resourceRef, _ := args["resource_id"].(string)
|
|
resourceRef = strings.TrimSpace(resourceRef)
|
|
action, _ := args["action"].(string)
|
|
action = strings.TrimSpace(action)
|
|
approvalID, _ := args["_approval_id"].(string)
|
|
approvalID = strings.TrimSpace(approvalID)
|
|
|
|
if resourceRef == "" {
|
|
return NewErrorResult(fmt.Errorf("resource_id is required")), nil
|
|
}
|
|
if action == "" {
|
|
return NewErrorResult(fmt.Errorf("action is required")), nil
|
|
}
|
|
|
|
validation := e.validateResolvedResource(resourceRef, action, true)
|
|
if validation.IsBlocked() {
|
|
return NewToolResponseResult(validation.StrictError.ToToolResponse()), nil
|
|
}
|
|
if validation.Resource == nil {
|
|
if validation.ErrorMsg != "" {
|
|
return NewErrorResult(errors.New(validation.ErrorMsg)), nil
|
|
}
|
|
return NewErrorResult(fmt.Errorf("resource '%s' has not been discovered in this session. Use pulse_query to find it first", resourceRef)), nil
|
|
}
|
|
if validation.ErrorMsg != "" {
|
|
return NewErrorResult(errors.New(validation.ErrorMsg)), nil
|
|
}
|
|
|
|
resolved := validation.Resource
|
|
switch resolved.GetKind() {
|
|
case "vm", "system-container":
|
|
guestArgs := map[string]interface{}{
|
|
"guest_id": resolvedResourceControlIdentity(resolved, resourceRef),
|
|
"action": action,
|
|
}
|
|
if force, ok := args["force"].(bool); ok {
|
|
guestArgs["force"] = force
|
|
}
|
|
if approvalID != "" {
|
|
guestArgs["_approval_id"] = approvalID
|
|
}
|
|
return e.executeControlGuest(ctx, guestArgs)
|
|
|
|
case "app-container":
|
|
switch strings.ToLower(strings.TrimSpace(resolved.GetAdapter())) {
|
|
case "docker":
|
|
dockerArgs := map[string]interface{}{
|
|
"container": resolvedResourceControlIdentity(resolved, resourceRef),
|
|
"host": strings.TrimSpace(resolved.GetTargetHost()),
|
|
"operation": action,
|
|
}
|
|
if approvalID != "" {
|
|
dockerArgs["_approval_id"] = approvalID
|
|
}
|
|
return e.executeDockerControl(ctx, dockerArgs)
|
|
case "truenas":
|
|
return e.executeNativeAppContainerControl(ctx, resolved, action, approvalID)
|
|
default:
|
|
return NewErrorResult(fmt.Errorf("resource '%s' uses unsupported control adapter %q", resourceRef, resolved.GetAdapter())), nil
|
|
}
|
|
}
|
|
|
|
return NewErrorResult(fmt.Errorf("resource '%s' of kind %q is not controllable through pulse_control type=resource", resourceRef, resolved.GetKind())), nil
|
|
}
|
|
|
|
func (e *PulseToolExecutor) executeNativeAppContainerControl(ctx context.Context, resource ResolvedResourceInfo, action string, approvalID string) (CallToolResult, error) {
|
|
if resource == nil {
|
|
return NewErrorResult(fmt.Errorf("resolved resource is nil")), nil
|
|
}
|
|
if e.appContainerActionProvider == nil {
|
|
return NewErrorResult(fmt.Errorf("native app-container control provider is not available")), nil
|
|
}
|
|
|
|
validActions := map[string]bool{"start": true, "stop": true, "restart": true}
|
|
if !validActions[action] {
|
|
return NewErrorResult(fmt.Errorf("invalid app-container action: %s. Use start, stop, or restart", action)), nil
|
|
}
|
|
if e.controlLevel == ControlLevelReadOnly {
|
|
return NewTextResult("Resource control actions are not available in read-only mode."), nil
|
|
}
|
|
|
|
resourceName := resolvedResourceDisplayName(resource)
|
|
resourceHost := strings.TrimSpace(resource.GetTargetHost())
|
|
command := fmt.Sprintf("truenas app %s %s", action, resourceName)
|
|
if resourceHost != "" {
|
|
command = fmt.Sprintf("%s on %s", command, resourceHost)
|
|
}
|
|
approvalTargetType := "app-container"
|
|
approvalTargetID := strings.TrimSpace(resource.GetResourceID())
|
|
if approvalTargetID == "" {
|
|
approvalTargetID = strings.TrimSpace(resource.GetProviderUID())
|
|
}
|
|
|
|
preApproved := consumeApprovalWithValidation(map[string]interface{}{"_approval_id": approvalID}, e.orgID, command, approvalTargetType, approvalTargetID)
|
|
decision := agentexec.PolicyAllow
|
|
if e.policy != nil {
|
|
decision = e.policy.Evaluate(command)
|
|
if decision == agentexec.PolicyBlock {
|
|
return NewTextResult(formatPolicyBlocked(command, "This action is blocked by security policy")), nil
|
|
}
|
|
}
|
|
requiresApproval := !e.isAutonomous && (e.controlLevel == ControlLevelControlled || decision == agentexec.PolicyRequireApproval)
|
|
|
|
if !preApproved && decision == agentexec.PolicyRequireApproval && !e.isAutonomous {
|
|
approvalID = createApprovalRecordForOrg(e.orgID, command, approvalTargetType, approvalTargetID, resourceName, fmt.Sprintf("%s app-container %s", action, resourceName))
|
|
return NewTextResult(formatAppContainerApprovalNeeded(resourceName, resourceHost, action, command, approvalID)), nil
|
|
}
|
|
if !preApproved && e.controlLevel == ControlLevelControlled {
|
|
approvalID = createApprovalRecordForOrg(e.orgID, command, approvalTargetType, approvalTargetID, resourceName, fmt.Sprintf("%s app-container %s", action, resourceName))
|
|
return NewTextResult(formatAppContainerApprovalNeeded(resourceName, resourceHost, action, command, approvalID)), nil
|
|
}
|
|
|
|
var actionResult *AppContainerActionResult
|
|
executionResult, err := e.executeNativeActionWithAudit(
|
|
ctx,
|
|
"pulse_control",
|
|
strings.TrimSpace(resource.GetResourceID()),
|
|
approvalID,
|
|
requiresApproval,
|
|
map[string]any{
|
|
"action": action,
|
|
"kind": resource.GetKind(),
|
|
"providerUid": resource.GetProviderUID(),
|
|
"host": resourceHost,
|
|
"platform": "truenas",
|
|
"approvalId": approvalID,
|
|
"requestedBy": "pulse_control",
|
|
},
|
|
"pulse_control",
|
|
fmt.Sprintf("%s app-container %s", action, resourceName),
|
|
func(ctx context.Context) (*unifiedresources.ExecutionResult, error) {
|
|
result, err := e.appContainerActionProvider.ExecuteAction(ctx, AppContainerActionRequest{
|
|
OrgID: e.orgID,
|
|
ResourceID: strings.TrimSpace(resource.GetResourceID()),
|
|
ProviderUID: strings.TrimSpace(resource.GetProviderUID()),
|
|
Name: resourceName,
|
|
Host: resourceHost,
|
|
Platform: "truenas",
|
|
Action: action,
|
|
})
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
actionResult = result
|
|
return &unifiedresources.ExecutionResult{
|
|
Success: true,
|
|
Output: strings.TrimSpace(result.Output),
|
|
}, nil
|
|
},
|
|
)
|
|
if err != nil {
|
|
return NewErrorResult(err), nil
|
|
}
|
|
if actionResult == nil {
|
|
actionResult = &AppContainerActionResult{
|
|
ResourceID: strings.TrimSpace(resource.GetResourceID()),
|
|
ProviderUID: strings.TrimSpace(resource.GetProviderUID()),
|
|
Name: resourceName,
|
|
Host: resourceHost,
|
|
Platform: "truenas",
|
|
Action: action,
|
|
Output: strings.TrimSpace(executionResult.Output),
|
|
}
|
|
}
|
|
if strings.TrimSpace(actionResult.Output) == "" {
|
|
actionResult.Output = strings.TrimSpace(executionResult.Output)
|
|
}
|
|
|
|
response := map[string]interface{}{
|
|
"success": executionResult.Success,
|
|
"type": "resource",
|
|
"resource_id": actionResult.ResourceID,
|
|
"resource": actionResult.Name,
|
|
"resource_type": resource.GetKind(),
|
|
"provider_uid": actionResult.ProviderUID,
|
|
"platform": actionResult.Platform,
|
|
"host": actionResult.Host,
|
|
"action": actionResult.Action,
|
|
"status": actionResult.Status,
|
|
"output": actionResult.Output,
|
|
"verification": map[string]interface{}{
|
|
"ok": executionResult.Success,
|
|
"method": "provider_refresh",
|
|
"observed_status": actionResult.Status,
|
|
},
|
|
}
|
|
return NewJSONResultWithIsError(response, !executionResult.Success), nil
|
|
}
|
|
|
|
func resolvedResourceControlIdentity(resource ResolvedResourceInfo, fallback string) string {
|
|
if resource == nil {
|
|
return strings.TrimSpace(fallback)
|
|
}
|
|
if providerUID := strings.TrimSpace(resource.GetProviderUID()); providerUID != "" {
|
|
return providerUID
|
|
}
|
|
for _, alias := range resource.GetAliases() {
|
|
alias = strings.TrimSpace(alias)
|
|
if alias != "" {
|
|
return alias
|
|
}
|
|
}
|
|
return strings.TrimSpace(fallback)
|
|
}
|
|
|
|
func resolvedResourceDisplayName(resource ResolvedResourceInfo) string {
|
|
if resource == nil {
|
|
return ""
|
|
}
|
|
aliases := resource.GetAliases()
|
|
if len(aliases) > 0 {
|
|
if name := strings.TrimSpace(aliases[0]); name != "" {
|
|
return name
|
|
}
|
|
}
|
|
if providerUID := strings.TrimSpace(resource.GetProviderUID()); providerUID != "" {
|
|
return providerUID
|
|
}
|
|
return strings.TrimSpace(resource.GetResourceID())
|
|
}
|
|
|
|
func (e *PulseToolExecutor) executeRunCommand(ctx context.Context, args map[string]interface{}) (CallToolResult, error) {
|
|
command, _ := args["command"].(string)
|
|
targetHost, _ := args["target_host"].(string)
|
|
approvalID, _ := args["_approval_id"].(string)
|
|
approvalID = strings.TrimSpace(approvalID)
|
|
|
|
if command == "" {
|
|
return NewErrorResult(fmt.Errorf("command is required")), nil
|
|
}
|
|
|
|
// Validate resource is in resolved context
|
|
// Uses command risk classification: read-only commands bypass strict mode
|
|
// With PULSE_STRICT_RESOLUTION=true, write commands are blocked on undiscovered resources
|
|
if targetHost != "" {
|
|
validation := e.validateResolvedResourceForExec(targetHost, command, true)
|
|
if validation.IsBlocked() {
|
|
// Hard validation failure - return consistent error envelope
|
|
return NewToolResponseResult(validation.StrictError.ToToolResponse()), nil
|
|
}
|
|
if validation.ErrorMsg != "" {
|
|
// Soft validation - log warning but allow operation
|
|
log.Warn().
|
|
Str("target", targetHost).
|
|
Str("command", command).
|
|
Str("validation_error", validation.ErrorMsg).
|
|
Msg("[Control] Target resource not in resolved context - may indicate model hallucination")
|
|
}
|
|
|
|
// Validate routing context - block if targeting a host node when child resources exist
|
|
// This prevents accidentally executing commands on the host when user meant to target a container/VM
|
|
routingResult := e.validateRoutingContext(targetHost)
|
|
if routingResult.IsBlocked() {
|
|
return NewToolResponseResult(routingResult.RoutingError.ToToolResponse()), nil
|
|
}
|
|
}
|
|
|
|
// Note: Control level read_only check is now centralized in registry.Execute()
|
|
|
|
// Check security policy (skip block check - blocks cannot be pre-approved)
|
|
decision := agentexec.PolicyAllow
|
|
if e.policy != nil {
|
|
decision = e.policy.Evaluate(command)
|
|
if decision == agentexec.PolicyBlock {
|
|
return NewTextResult(formatPolicyBlocked(command, "This command is blocked by security policy")), nil
|
|
}
|
|
}
|
|
|
|
requiresApproval := !e.isAutonomous && (e.controlLevel == ControlLevelControlled || decision == agentexec.PolicyRequireApproval)
|
|
|
|
if targetHost == "" && e.agentServer != nil {
|
|
agents := e.agentServer.GetConnectedAgents()
|
|
if len(agents) > 1 {
|
|
return NewTextResult(formatTargetHostRequired(agents)), nil
|
|
}
|
|
}
|
|
|
|
// Execute via agent server
|
|
if e.agentServer == nil {
|
|
return NewErrorResult(fmt.Errorf("no agent server available")), nil
|
|
}
|
|
|
|
// Resolve target to the correct agent and routing info (with full provenance)
|
|
// If targetHost is a container/VM name, this routes to the host node agent
|
|
// with the correct TargetType and TargetID for pct exec / qm guest exec
|
|
routing := e.resolveTargetForCommandFull(targetHost)
|
|
if routing.AgentID == "" {
|
|
if targetHost != "" {
|
|
if routing.TargetType == "container" || routing.TargetType == "vm" {
|
|
return NewErrorResult(fmt.Errorf("'%s' is a %s but no agent is available on its host node; install Pulse Unified Agent on the node", targetHost, routing.TargetType)), nil
|
|
}
|
|
return NewErrorResult(fmt.Errorf("no agent available for target '%s'. %s", targetHost, formatAvailableAgentHosts(e.agentServer.GetConnectedAgents()))), nil
|
|
}
|
|
return NewErrorResult(fmt.Errorf("no agent available for target")), nil
|
|
}
|
|
|
|
approvalTargetType, approvalTargetID, approvalTargetName := approvalTargetForCommand(targetHost, routing)
|
|
|
|
// Check if this is a pre-approved execution with command hash validation.
|
|
// This validates the approval matches this exact command+target and marks it as consumed.
|
|
preApproved := consumeApprovalWithValidation(args, e.orgID, command, approvalTargetType, approvalTargetID)
|
|
|
|
// Skip approval checks if pre-approved or in autonomous mode.
|
|
if !preApproved && !e.isAutonomous && e.controlLevel == ControlLevelControlled {
|
|
approvalID := createApprovalRecordForOrg(e.orgID, command, approvalTargetType, approvalTargetID, approvalTargetName, "Control level requires approval")
|
|
return NewTextResult(formatApprovalNeeded(command, "Control level requires approval", approvalID)), nil
|
|
}
|
|
if e.isAutonomous {
|
|
log.Debug().
|
|
Str("command", command).
|
|
Bool("read_only", safety.IsReadOnlyCommand(command)).
|
|
Msg("Auto-approving command for autonomous investigation")
|
|
}
|
|
if !preApproved && decision == agentexec.PolicyRequireApproval && !e.isAutonomous {
|
|
approvalID := createApprovalRecordForOrg(e.orgID, command, approvalTargetType, approvalTargetID, approvalTargetName, "Security policy requires approval")
|
|
return NewTextResult(formatApprovalNeeded(command, "Security policy requires approval", approvalID)), nil
|
|
}
|
|
|
|
log.Debug().
|
|
Str("target_host", targetHost).
|
|
Str("agent_id", routing.AgentID).
|
|
Str("agent_host", routing.AgentHostname).
|
|
Str("resolved_kind", routing.ResolvedKind).
|
|
Str("resolved_node", routing.ResolvedNode).
|
|
Str("transport", routing.Transport).
|
|
Str("target_type", routing.TargetType).
|
|
Str("target_id", routing.TargetID).
|
|
Msg("[pulse_control] Routing command execution")
|
|
|
|
result, err := e.executeCommandWithAudit(
|
|
ctx,
|
|
"pulse_control",
|
|
func() string {
|
|
if targetHost != "" {
|
|
return targetHost
|
|
}
|
|
return routing.AgentHostname
|
|
}(),
|
|
approvalID,
|
|
requiresApproval,
|
|
routing.AgentID,
|
|
agentexec.ExecuteCommandPayload{
|
|
Command: command,
|
|
TargetType: routing.TargetType,
|
|
TargetID: routing.TargetID,
|
|
},
|
|
"pulse_control",
|
|
fmt.Sprintf("run command %q on %s", command, func() string {
|
|
if strings.TrimSpace(targetHost) != "" {
|
|
return strings.TrimSpace(targetHost)
|
|
}
|
|
return strings.TrimSpace(routing.AgentHostname)
|
|
}()),
|
|
)
|
|
if err != nil {
|
|
return NewErrorResult(err), nil
|
|
}
|
|
|
|
output := result.Stdout
|
|
if result.Stderr != "" {
|
|
output += "\n" + result.Stderr
|
|
}
|
|
if redacted, n := safety.RedactSensitiveText(output); n > 0 {
|
|
output = redacted + fmt.Sprintf("\n\n[redacted %d sensitive value(s)]", n)
|
|
}
|
|
|
|
success := result.ExitCode == 0
|
|
response := map[string]interface{}{
|
|
"success": success,
|
|
"type": "command",
|
|
"command": command,
|
|
"target_host": targetHost,
|
|
"exit_code": result.ExitCode,
|
|
"output": output,
|
|
"execution": buildExecutionProvenance(targetHost, routing),
|
|
"verification": map[string]interface{}{"ok": success, "method": "exit_code", "exit_code": result.ExitCode},
|
|
}
|
|
return NewJSONResultWithIsError(response, !success), nil
|
|
}
|
|
|
|
// approvalTargetForCommand derives stable approval binding fields from resolved routing.
|
|
// This ensures replay protection hashes match the actual execution target.
|
|
func approvalTargetForCommand(targetHost string, routing CommandRoutingResult) (targetType, targetID, targetName string) {
|
|
targetType = routing.TargetType
|
|
targetID = strings.TrimSpace(routing.TargetID)
|
|
|
|
if targetType == "" {
|
|
targetType = "agent"
|
|
}
|
|
|
|
if targetType == "agent" {
|
|
// Agent-level executions do not carry routing.TargetID in ExecuteCommand payload.
|
|
// Use agent ID to bind approvals to a specific connected agent.
|
|
targetID = strings.TrimSpace(routing.AgentID)
|
|
if targetID == "" {
|
|
targetID = strings.TrimSpace(targetHost)
|
|
}
|
|
} else if targetID == "" {
|
|
// Defensive fallback for unexpected missing IDs.
|
|
targetID = strings.TrimSpace(targetHost)
|
|
}
|
|
|
|
targetName = strings.TrimSpace(targetHost)
|
|
if targetName == "" {
|
|
targetName = strings.TrimSpace(routing.AgentHostname)
|
|
}
|
|
if targetName == "" {
|
|
targetName = strings.TrimSpace(routing.AgentID)
|
|
}
|
|
|
|
return targetType, targetID, targetName
|
|
}
|
|
|
|
func (e *PulseToolExecutor) executeControlGuest(ctx context.Context, args map[string]interface{}) (CallToolResult, error) {
|
|
guestID, _ := args["guest_id"].(string)
|
|
action, _ := args["action"].(string)
|
|
force, _ := args["force"].(bool)
|
|
approvalID, _ := args["_approval_id"].(string)
|
|
approvalID = strings.TrimSpace(approvalID)
|
|
|
|
if guestID == "" {
|
|
return NewErrorResult(fmt.Errorf("guest_id is required")), nil
|
|
}
|
|
if action == "" {
|
|
return NewErrorResult(fmt.Errorf("action is required")), nil
|
|
}
|
|
|
|
validActions := map[string]bool{"start": true, "stop": true, "shutdown": true, "restart": true, "delete": true}
|
|
if !validActions[action] {
|
|
return NewErrorResult(fmt.Errorf("invalid action: %s. Use start, stop, shutdown, restart, or delete", action)), nil
|
|
}
|
|
|
|
// Validate resource is in resolved context
|
|
// With PULSE_STRICT_RESOLUTION=true, this blocks execution on undiscovered resources
|
|
validation := e.validateResolvedResource(guestID, action, true)
|
|
if validation.IsBlocked() {
|
|
// Hard validation failure - return consistent error envelope
|
|
return NewToolResponseResult(validation.StrictError.ToToolResponse()), nil
|
|
}
|
|
if validation.ErrorMsg != "" {
|
|
if validation.Resource != nil {
|
|
return NewErrorResult(errors.New(validation.ErrorMsg)), nil
|
|
}
|
|
// Soft validation - log warning but allow operation
|
|
log.Warn().
|
|
Str("guest_id", guestID).
|
|
Str("action", action).
|
|
Str("validation_error", validation.ErrorMsg).
|
|
Msg("[ControlGuest] Guest not in resolved context - may indicate model hallucination")
|
|
}
|
|
|
|
// Note: Control level read_only check is now centralized in registry.Execute()
|
|
|
|
guest, err := e.resolveGuest(guestID)
|
|
if err != nil {
|
|
return NewErrorResult(fmt.Errorf("could not find guest '%s': %v", guestID, err)), nil
|
|
}
|
|
|
|
// Check if guest is protected
|
|
vmidStr := fmt.Sprintf("%d", guest.VMID)
|
|
for _, protected := range e.protectedGuests {
|
|
if protected == vmidStr || protected == guest.Name {
|
|
return NewErrorResult(fmt.Errorf("guest %s (VMID %d) is protected and cannot be controlled by Pulse Assistant", guest.Name, guest.VMID)), nil
|
|
}
|
|
}
|
|
|
|
// Build the command
|
|
cmdTool := "pct"
|
|
if guest.Type == "vm" {
|
|
cmdTool = "qm"
|
|
}
|
|
|
|
// For delete action, verify guest is stopped first
|
|
if action == "delete" && guest.Status != "stopped" {
|
|
return NewErrorResult(fmt.Errorf("cannot delete %s (VMID %d) - it is currently %s; stop it first, then try deleting again", guest.Name, guest.VMID, guest.Status)), nil
|
|
}
|
|
|
|
var command string
|
|
switch action {
|
|
case "start":
|
|
command = fmt.Sprintf("%s start %d", cmdTool, guest.VMID)
|
|
case "stop":
|
|
command = fmt.Sprintf("%s stop %d", cmdTool, guest.VMID)
|
|
case "shutdown":
|
|
command = fmt.Sprintf("%s shutdown %d", cmdTool, guest.VMID)
|
|
case "restart":
|
|
command = fmt.Sprintf("%s reboot %d", cmdTool, guest.VMID)
|
|
case "delete":
|
|
// Delete uses 'destroy' subcommand with --purge to also remove associated storage
|
|
command = fmt.Sprintf("%s destroy %d --purge", cmdTool, guest.VMID)
|
|
}
|
|
|
|
if force && action == "stop" {
|
|
command = fmt.Sprintf("%s stop %d --skiplock", cmdTool, guest.VMID)
|
|
}
|
|
|
|
approvalTargetID := fmt.Sprintf("%s:%d", guest.Node, guest.VMID)
|
|
|
|
// Check if this is a pre-approved execution (agentic loop re-executing after user approval).
|
|
// Use consumeApprovalWithValidation to enforce command-bound, single-use approvals.
|
|
preApproved := consumeApprovalWithValidation(args, e.orgID, command, guest.Type, approvalTargetID)
|
|
|
|
// Check security policy (skip if pre-approved)
|
|
if !preApproved && e.policy != nil {
|
|
decision := e.policy.Evaluate(command)
|
|
if decision == agentexec.PolicyBlock {
|
|
return NewTextResult(formatPolicyBlocked(command, "This command is blocked by security policy")), nil
|
|
}
|
|
if decision == agentexec.PolicyRequireApproval && !e.isAutonomous {
|
|
// Use guest.Node (the Proxmox host) as targetName so approval execution can find the correct agent
|
|
approvalID := createApprovalRecordForOrg(e.orgID, command, guest.Type, approvalTargetID, guest.Node, fmt.Sprintf("%s guest %s", action, guest.Name))
|
|
return NewTextResult(formatControlApprovalNeeded(guest.Name, guest.VMID, action, command, approvalID)), nil
|
|
}
|
|
}
|
|
|
|
// Check control level - this must be outside policy check since policy may be nil (skip if pre-approved)
|
|
if !preApproved && e.controlLevel == ControlLevelControlled {
|
|
// Use guest.Node (the Proxmox host) as targetName so approval execution can find the correct agent
|
|
approvalID := createApprovalRecordForOrg(e.orgID, command, guest.Type, approvalTargetID, guest.Node, fmt.Sprintf("%s guest %s", action, guest.Name))
|
|
return NewTextResult(formatControlApprovalNeeded(guest.Name, guest.VMID, action, command, approvalID)), nil
|
|
}
|
|
|
|
requiresApproval := !e.isAutonomous && e.controlLevel == ControlLevelControlled
|
|
|
|
if e.agentServer == nil {
|
|
return NewErrorResult(fmt.Errorf("no agent server available")), nil
|
|
}
|
|
|
|
agentID := e.findAgentForNode(guest.Node)
|
|
if agentID == "" {
|
|
return NewErrorResult(fmt.Errorf("no agent available on node '%s'; install Pulse Unified Agent on the node to enable control", guest.Node)), nil
|
|
}
|
|
|
|
result, err := e.executeCommandWithAudit(
|
|
ctx,
|
|
"pulse_control",
|
|
fmt.Sprintf("%s:%d", guest.Node, guest.VMID),
|
|
approvalID,
|
|
requiresApproval,
|
|
agentID,
|
|
agentexec.ExecuteCommandPayload{
|
|
Command: command,
|
|
TargetType: "agent",
|
|
TargetID: "",
|
|
},
|
|
"pulse_control",
|
|
fmt.Sprintf("%s guest %s", action, guest.Name),
|
|
)
|
|
if err != nil {
|
|
return NewErrorResult(err), nil
|
|
}
|
|
|
|
output := result.Stdout
|
|
if result.Stderr != "" {
|
|
output += "\n" + result.Stderr
|
|
}
|
|
|
|
// Detect idempotent success: the guest is already in the desired state.
|
|
// Proxmox returns exit code 255 with "not running" for stop/shutdown on a stopped guest,
|
|
// or "already running" for start on a running guest. These aren't failures — the desired
|
|
// state is already achieved.
|
|
outputLower := strings.ToLower(output)
|
|
alreadyDone := false
|
|
switch action {
|
|
case "stop", "shutdown":
|
|
alreadyDone = strings.Contains(outputLower, "not running")
|
|
case "start":
|
|
alreadyDone = strings.Contains(outputLower, "already running")
|
|
}
|
|
if alreadyDone {
|
|
result.ExitCode = 0
|
|
output = fmt.Sprintf("%s\n(idempotent: desired state already set)", output)
|
|
}
|
|
|
|
if redacted, n := safety.RedactSensitiveText(output); n > 0 {
|
|
output = redacted + fmt.Sprintf("\n\n[redacted %d sensitive value(s)]", n)
|
|
}
|
|
|
|
verify := e.verifyGuestAction(ctx, agentID, cmdTool, guest.VMID, action)
|
|
verify["ok"] = result.ExitCode == 0
|
|
success := result.ExitCode == 0
|
|
response := map[string]interface{}{
|
|
"success": success,
|
|
"type": "guest",
|
|
"guest": guest.Name,
|
|
"guest_id": fmt.Sprintf("%d", guest.VMID),
|
|
"guest_type": guest.Type,
|
|
"node": guest.Node,
|
|
"action": action,
|
|
"command": command,
|
|
"exit_code": result.ExitCode,
|
|
"output": output,
|
|
"verification": verify,
|
|
}
|
|
return NewJSONResultWithIsError(response, !success), nil
|
|
}
|
|
|
|
func (e *PulseToolExecutor) verifyGuestAction(ctx context.Context, agentID, cmdTool string, vmID int, action string) map[string]interface{} {
|
|
expect := ""
|
|
switch action {
|
|
case "start", "restart":
|
|
expect = "running"
|
|
case "stop", "shutdown":
|
|
expect = "stopped"
|
|
case "delete":
|
|
expect = "deleted"
|
|
}
|
|
|
|
statusCmd := fmt.Sprintf("%s status %d", cmdTool, vmID)
|
|
res, err := e.agentServer.ExecuteCommand(ctx, agentID, agentexec.ExecuteCommandPayload{
|
|
Command: statusCmd,
|
|
TargetType: "agent",
|
|
})
|
|
if err != nil {
|
|
return map[string]interface{}{"confirmed": false, "method": "status", "command": statusCmd, "note": err.Error()}
|
|
}
|
|
out := strings.TrimSpace(res.Stdout + "\n" + res.Stderr)
|
|
outLower := strings.ToLower(out)
|
|
|
|
// Delete verification: status should fail with does-not-exist semantics.
|
|
if action == "delete" {
|
|
confirmed := res.ExitCode != 0 && (strings.Contains(outLower, "does not exist") || strings.Contains(outLower, "no such") || strings.Contains(outLower, "not found"))
|
|
return map[string]interface{}{"confirmed": confirmed, "method": "status", "command": statusCmd, "expected": expect, "raw": out}
|
|
}
|
|
|
|
observed := ""
|
|
if strings.Contains(outLower, "status: running") {
|
|
observed = "running"
|
|
} else if strings.Contains(outLower, "status: stopped") {
|
|
observed = "stopped"
|
|
}
|
|
confirmed := res.ExitCode == 0 && observed != "" && observed == expect
|
|
return map[string]interface{}{"confirmed": confirmed, "method": "status", "command": statusCmd, "expected": expect, "observed": observed, "raw": out}
|
|
}
|
|
|
|
// Helper methods for control tools
|
|
|
|
// CommandRoutingResult contains full routing information for command execution.
|
|
// This provides the provenance needed to verify where commands actually run.
|
|
type CommandRoutingResult struct {
|
|
// Routing info for agent
|
|
AgentID string // The agent that will execute the command
|
|
TargetType string // "agent", "container", or "vm"
|
|
TargetID string // VMID for LXC/VM, empty for agent
|
|
|
|
// Provenance info
|
|
AgentHostname string // Hostname of the agent
|
|
ResolvedKind string // Technology/transport kind: "node", "system-container", "vm", "app-container", "docker-host", "agent" (drives routing decisions)
|
|
ResolvedNode string // Hypervisor node name (if applicable)
|
|
Transport string // How command will be executed: "direct", "pct_exec", "qm_guest_exec"
|
|
}
|
|
|
|
// resolveTargetForCommandFull resolves a target_host to full routing info including provenance.
|
|
// Use this for write operations where you need to verify execution context.
|
|
//
|
|
// CRITICAL ORDERING: Topology resolution (state.ResolveResource) happens FIRST.
|
|
// Agent hostname matching is a FALLBACK only when the state doesn't know the resource.
|
|
// This prevents the "hostname collision" bug where an agent with hostname matching an LXC name
|
|
// causes commands to execute on the node instead of inside the LXC via pct exec.
|
|
func (e *PulseToolExecutor) resolveTargetForCommandFull(targetHost string) CommandRoutingResult {
|
|
result := CommandRoutingResult{
|
|
TargetType: "agent",
|
|
Transport: "direct",
|
|
}
|
|
|
|
if e.agentServer == nil {
|
|
return result
|
|
}
|
|
|
|
agents := e.agentServer.GetConnectedAgents()
|
|
if len(agents) == 0 {
|
|
return result
|
|
}
|
|
|
|
if targetHost == "" {
|
|
// No target_host specified - require exactly one agent or fail
|
|
if len(agents) > 1 {
|
|
return result
|
|
}
|
|
result.AgentID = agents[0].AgentID
|
|
result.AgentHostname = agents[0].Hostname
|
|
result.ResolvedKind = "agent"
|
|
return result
|
|
}
|
|
|
|
// STEP 1: Consult topology (state) FIRST — this is authoritative.
|
|
// If the state knows about this resource, use topology-based routing.
|
|
// This prevents hostname collisions from masquerading as host targets.
|
|
loc := e.resolveResourceLocation(targetHost)
|
|
|
|
if loc.Found {
|
|
// Route based on resource type
|
|
switch loc.ResourceType {
|
|
case "agent":
|
|
for _, agent := range agents {
|
|
if unifiedresources.HostnamesEquivalent(agent.Hostname, loc.TargetHost) || agent.AgentID == loc.TargetID {
|
|
result.AgentID = agent.AgentID
|
|
result.AgentHostname = agent.Hostname
|
|
result.ResolvedKind = "agent"
|
|
return result
|
|
}
|
|
}
|
|
|
|
case "node":
|
|
// Direct hypervisor node
|
|
nodeAgentID := e.findAgentForNode(loc.Node)
|
|
result.AgentID = nodeAgentID
|
|
result.ResolvedKind = "node"
|
|
result.ResolvedNode = loc.Node
|
|
for _, agent := range agents {
|
|
if agent.AgentID == nodeAgentID {
|
|
result.AgentHostname = agent.Hostname
|
|
break
|
|
}
|
|
}
|
|
return result
|
|
|
|
case "system-container":
|
|
// System container - route through node agent via pct exec
|
|
nodeAgentID := e.findAgentForNode(loc.Node)
|
|
result.ResolvedKind = "system-container"
|
|
result.ResolvedNode = loc.Node
|
|
result.TargetType = "container"
|
|
result.TargetID = fmt.Sprintf("%d", loc.VMID)
|
|
result.Transport = "pct_exec"
|
|
if nodeAgentID != "" {
|
|
result.AgentID = nodeAgentID
|
|
for _, agent := range agents {
|
|
if agent.AgentID == nodeAgentID {
|
|
result.AgentHostname = agent.Hostname
|
|
break
|
|
}
|
|
}
|
|
}
|
|
return result
|
|
|
|
case "vm":
|
|
// VM - route through node agent via qm guest exec
|
|
nodeAgentID := e.findAgentForNode(loc.Node)
|
|
result.ResolvedKind = "vm"
|
|
result.ResolvedNode = loc.Node
|
|
result.TargetType = "vm"
|
|
result.TargetID = fmt.Sprintf("%d", loc.VMID)
|
|
result.Transport = "qm_guest_exec"
|
|
if nodeAgentID != "" {
|
|
result.AgentID = nodeAgentID
|
|
for _, agent := range agents {
|
|
if agent.AgentID == nodeAgentID {
|
|
result.AgentHostname = agent.Hostname
|
|
break
|
|
}
|
|
}
|
|
}
|
|
return result
|
|
|
|
case "app-container", "docker-host":
|
|
// Docker container or Docker host
|
|
result.ResolvedKind = loc.ResourceType
|
|
result.ResolvedNode = loc.Node
|
|
|
|
if loc.DockerHostType == "system-container" {
|
|
nodeAgentID := e.findAgentForNode(loc.Node)
|
|
result.TargetType = "container"
|
|
result.TargetID = fmt.Sprintf("%d", loc.DockerHostVMID)
|
|
result.Transport = "pct_exec"
|
|
if nodeAgentID != "" {
|
|
result.AgentID = nodeAgentID
|
|
for _, agent := range agents {
|
|
if agent.AgentID == nodeAgentID {
|
|
result.AgentHostname = agent.Hostname
|
|
break
|
|
}
|
|
}
|
|
}
|
|
return result
|
|
}
|
|
if loc.DockerHostType == "vm" {
|
|
nodeAgentID := e.findAgentForNode(loc.Node)
|
|
result.TargetType = "vm"
|
|
result.TargetID = fmt.Sprintf("%d", loc.DockerHostVMID)
|
|
result.Transport = "qm_guest_exec"
|
|
if nodeAgentID != "" {
|
|
result.AgentID = nodeAgentID
|
|
for _, agent := range agents {
|
|
if agent.AgentID == nodeAgentID {
|
|
result.AgentHostname = agent.Hostname
|
|
break
|
|
}
|
|
}
|
|
}
|
|
return result
|
|
}
|
|
// Standalone Docker host - find agent directly
|
|
for _, agent := range agents {
|
|
if unifiedresources.HostnamesEquivalent(agent.Hostname, loc.TargetHost) || agent.AgentID == loc.TargetHost {
|
|
result.AgentID = agent.AgentID
|
|
result.AgentHostname = agent.Hostname
|
|
return result
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// STEP 2: FALLBACK — agent hostname match.
|
|
// Only used when the state doesn't know about this resource at all.
|
|
// This handles standalone hosts without Proxmox topology.
|
|
for _, agent := range agents {
|
|
if unifiedresources.HostnamesEquivalent(agent.Hostname, targetHost) || agent.AgentID == targetHost {
|
|
result.AgentID = agent.AgentID
|
|
result.AgentHostname = agent.Hostname
|
|
result.ResolvedKind = "agent"
|
|
return result
|
|
}
|
|
}
|
|
|
|
return result
|
|
}
|
|
|
|
// resolveTargetForCommand resolves a target_host to the correct agent and routing info.
|
|
// Uses the authoritative resolveResourceLocation function.
|
|
// Returns: agentID, targetType ("agent", "container", or "vm"), targetID (vmid for LXC/VM)
|
|
//
|
|
// CRITICAL ORDERING: Same as resolveTargetForCommandFull — topology first, agent fallback second.
|
|
func (e *PulseToolExecutor) resolveTargetForCommand(targetHost string) (agentID string, targetType string, targetID string) {
|
|
// Delegate to the full resolver and extract the triple
|
|
r := e.resolveTargetForCommandFull(targetHost)
|
|
return r.AgentID, r.TargetType, r.TargetID
|
|
}
|
|
|
|
func (e *PulseToolExecutor) findAgentForCommand(runOnHost bool, targetHost string) string {
|
|
agentID, _, _ := e.resolveTargetForCommand(targetHost)
|
|
return agentID
|
|
}
|
|
|
|
func (e *PulseToolExecutor) readStateForControl() (unifiedresources.ReadState, error) {
|
|
if rs := e.getReadState(); rs != nil {
|
|
return rs, nil
|
|
}
|
|
return nil, fmt.Errorf("read state not available")
|
|
}
|
|
|
|
func (e *PulseToolExecutor) resolveGuest(guestID string) (*GuestInfo, error) {
|
|
rs, err := e.readStateForControl()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
vmID, convErr := strconv.Atoi(guestID)
|
|
for _, vm := range rs.VMs() {
|
|
if (convErr == nil && vm.VMID() == vmID) || vm.Name() == guestID || vm.ID() == guestID {
|
|
return &GuestInfo{
|
|
VMID: vm.VMID(),
|
|
Name: vm.Name(),
|
|
Node: vm.Node(),
|
|
Type: "vm",
|
|
Technology: "qemu",
|
|
Status: string(vm.Status()),
|
|
Instance: vm.Instance(),
|
|
}, nil
|
|
}
|
|
}
|
|
|
|
for _, ct := range rs.Containers() {
|
|
if (convErr == nil && ct.VMID() == vmID) || ct.Name() == guestID || ct.ID() == guestID {
|
|
return &GuestInfo{
|
|
VMID: ct.VMID(),
|
|
Name: ct.Name(),
|
|
Node: ct.Node(),
|
|
Type: "system-container",
|
|
Technology: "lxc",
|
|
Status: string(ct.Status()),
|
|
Instance: ct.Instance(),
|
|
}, nil
|
|
}
|
|
}
|
|
|
|
return nil, fmt.Errorf("no VM or container found with ID or name '%s'", guestID)
|
|
}
|
|
|
|
func (e *PulseToolExecutor) resolveDockerContainer(containerName, hostName string) (*models.DockerContainer, *models.DockerHost, error) {
|
|
rs, err := e.readStateForControl()
|
|
if err != nil {
|
|
return nil, nil, fmt.Errorf("read state not available: %w", err)
|
|
}
|
|
|
|
containersByHost := make(map[string][]*unifiedresources.DockerContainerView)
|
|
for _, container := range rs.DockerContainers() {
|
|
if container == nil {
|
|
continue
|
|
}
|
|
parentID := strings.TrimSpace(container.ParentID())
|
|
if parentID == "" {
|
|
continue
|
|
}
|
|
containersByHost[parentID] = append(containersByHost[parentID], container)
|
|
}
|
|
|
|
type dockerMatch struct {
|
|
host models.DockerHost
|
|
container models.DockerContainer
|
|
}
|
|
matches := []dockerMatch{}
|
|
|
|
for _, hostView := range rs.DockerHosts() {
|
|
if hostView == nil {
|
|
continue
|
|
}
|
|
if hostName != "" && !matchesDockerHostFilter(hostView, hostName) {
|
|
continue
|
|
}
|
|
|
|
hostModel := dockerHostModelFromView(hostView)
|
|
for _, containerView := range containersByHost[hostView.ID()] {
|
|
container := dockerContainerModelFromView(containerView)
|
|
if container.Name == containerName ||
|
|
container.ID == containerName ||
|
|
strings.HasPrefix(container.ID, containerName) {
|
|
matches = append(matches, dockerMatch{host: hostModel, container: container})
|
|
}
|
|
}
|
|
}
|
|
|
|
if hostName != "" {
|
|
if len(matches) == 0 {
|
|
return nil, nil, fmt.Errorf("container '%s' not found on host '%s'", containerName, hostName)
|
|
}
|
|
match := matches[0]
|
|
return &match.container, &match.host, nil
|
|
}
|
|
|
|
if len(matches) == 0 {
|
|
return nil, nil, fmt.Errorf("container '%s' not found on any Docker host", containerName)
|
|
}
|
|
if len(matches) > 1 {
|
|
hostNames := make([]string, 0, len(matches))
|
|
seen := make(map[string]bool)
|
|
for _, match := range matches {
|
|
name := strings.TrimSpace(match.host.DisplayName)
|
|
if name == "" {
|
|
name = strings.TrimSpace(match.host.Hostname)
|
|
}
|
|
if name == "" {
|
|
name = strings.TrimSpace(match.host.ID)
|
|
}
|
|
if name == "" || seen[name] {
|
|
continue
|
|
}
|
|
hostNames = append(hostNames, name)
|
|
seen[name] = true
|
|
}
|
|
if len(hostNames) == 0 {
|
|
return nil, nil, fmt.Errorf("container '%s' exists on multiple Docker hosts; specify host", containerName)
|
|
}
|
|
return nil, nil, fmt.Errorf("container '%s' exists on multiple Docker hosts: %s. Specify host", containerName, strings.Join(hostNames, ", "))
|
|
}
|
|
|
|
match := matches[0]
|
|
return &match.container, &match.host, nil
|
|
}
|
|
|
|
func matchesDockerHostFilter(host *unifiedresources.DockerHostView, filter string) bool {
|
|
if host == nil {
|
|
return false
|
|
}
|
|
if host.Hostname() == filter || host.Name() == filter {
|
|
return true
|
|
}
|
|
if host.ID() == filter || host.HostSourceID() == filter {
|
|
return true
|
|
}
|
|
return false
|
|
}
|
|
|
|
func dockerHostModelFromView(host *unifiedresources.DockerHostView) models.DockerHost {
|
|
if host == nil {
|
|
return models.DockerHost{}
|
|
}
|
|
|
|
hostID := strings.TrimSpace(host.HostSourceID())
|
|
if hostID == "" {
|
|
hostID = strings.TrimSpace(host.ID())
|
|
}
|
|
|
|
displayName := strings.TrimSpace(host.Name())
|
|
hostname := strings.TrimSpace(host.Hostname())
|
|
if hostname == "" {
|
|
hostname = displayName
|
|
}
|
|
|
|
return models.DockerHost{
|
|
ID: hostID,
|
|
AgentID: strings.TrimSpace(host.AgentID()),
|
|
Hostname: hostname,
|
|
DisplayName: displayName,
|
|
Status: string(host.Status()),
|
|
}
|
|
}
|
|
|
|
func dockerContainerModelFromView(container *unifiedresources.DockerContainerView) models.DockerContainer {
|
|
if container == nil {
|
|
return models.DockerContainer{}
|
|
}
|
|
|
|
containerID := strings.TrimSpace(container.ContainerID())
|
|
if containerID == "" {
|
|
containerID = strings.TrimSpace(container.ID())
|
|
}
|
|
|
|
state := strings.TrimSpace(container.ContainerState())
|
|
if state == "" {
|
|
state = string(container.Status())
|
|
}
|
|
|
|
ports := container.Ports()
|
|
portModels := make([]models.DockerContainerPort, 0, len(ports))
|
|
for _, p := range ports {
|
|
portModels = append(portModels, models.DockerContainerPort{
|
|
PrivatePort: p.PrivatePort,
|
|
PublicPort: p.PublicPort,
|
|
Protocol: p.Protocol,
|
|
IP: p.IP,
|
|
})
|
|
}
|
|
|
|
networks := container.Networks()
|
|
networkModels := make([]models.DockerContainerNetworkLink, 0, len(networks))
|
|
for _, n := range networks {
|
|
networkModels = append(networkModels, models.DockerContainerNetworkLink{
|
|
Name: n.Name,
|
|
IPv4: n.IPv4,
|
|
IPv6: n.IPv6,
|
|
})
|
|
}
|
|
|
|
mounts := container.Mounts()
|
|
mountModels := make([]models.DockerContainerMount, 0, len(mounts))
|
|
for _, m := range mounts {
|
|
mountModels = append(mountModels, models.DockerContainerMount{
|
|
Type: m.Type,
|
|
Source: m.Source,
|
|
Destination: m.Destination,
|
|
Mode: m.Mode,
|
|
RW: m.RW,
|
|
})
|
|
}
|
|
|
|
var updateStatus *models.DockerContainerUpdateStatus
|
|
if status := container.UpdateStatus(); status != nil {
|
|
updateStatus = &models.DockerContainerUpdateStatus{
|
|
UpdateAvailable: status.UpdateAvailable,
|
|
CurrentDigest: status.CurrentDigest,
|
|
LatestDigest: status.LatestDigest,
|
|
Error: status.Error,
|
|
}
|
|
}
|
|
|
|
return models.DockerContainer{
|
|
ID: containerID,
|
|
Name: container.Name(),
|
|
Image: container.Image(),
|
|
State: state,
|
|
Health: container.Health(),
|
|
CPUPercent: container.CPUPercent(),
|
|
MemoryUsage: container.MemoryUsed(),
|
|
MemoryLimit: container.MemoryTotal(),
|
|
MemoryPercent: container.MemoryPercent(),
|
|
UptimeSeconds: container.UptimeSeconds(),
|
|
RestartCount: container.RestartCount(),
|
|
ExitCode: container.ExitCode(),
|
|
Ports: portModels,
|
|
Labels: container.Labels(),
|
|
Networks: networkModels,
|
|
Mounts: mountModels,
|
|
UpdateStatus: updateStatus,
|
|
}
|
|
}
|
|
|
|
func (e *PulseToolExecutor) findAgentForNode(nodeName string) string {
|
|
if e.agentServer == nil {
|
|
return ""
|
|
}
|
|
|
|
agents := e.agentServer.GetConnectedAgents()
|
|
for _, agent := range agents {
|
|
if unifiedresources.HostnamesEquivalent(agent.Hostname, nodeName) {
|
|
return agent.AgentID
|
|
}
|
|
}
|
|
|
|
rs, err := e.readStateForControl()
|
|
if err != nil {
|
|
return ""
|
|
}
|
|
|
|
// Map linked node IDs to node names for quick lookup.
|
|
nodeNamesByID := make(map[string]string)
|
|
for _, node := range rs.Nodes() {
|
|
name := node.Name()
|
|
if name == "" {
|
|
name = node.NodeName()
|
|
}
|
|
if node.ID() != "" {
|
|
nodeNamesByID[node.ID()] = name
|
|
}
|
|
}
|
|
|
|
for _, host := range rs.Hosts() {
|
|
linked := host.LinkedNodeID()
|
|
if linked == "" {
|
|
continue
|
|
}
|
|
if !unifiedresources.HostnamesEquivalent(nodeNamesByID[linked], nodeName) {
|
|
continue
|
|
}
|
|
for _, agent := range agents {
|
|
if unifiedresources.HostnamesEquivalent(agent.Hostname, host.Hostname()) || agent.AgentID == host.ID() {
|
|
return agent.AgentID
|
|
}
|
|
}
|
|
}
|
|
|
|
return ""
|
|
}
|
|
|
|
func (e *PulseToolExecutor) findAgentForDockerHost(dockerHost *models.DockerHost) string {
|
|
if e.agentServer == nil {
|
|
return ""
|
|
}
|
|
|
|
agents := e.agentServer.GetConnectedAgents()
|
|
|
|
// First try to match by AgentID (most reliable)
|
|
if dockerHost.AgentID != "" {
|
|
for _, agent := range agents {
|
|
if agent.AgentID == dockerHost.AgentID {
|
|
return agent.AgentID
|
|
}
|
|
}
|
|
}
|
|
|
|
// Fall back to hostname match
|
|
for _, agent := range agents {
|
|
if unifiedresources.HostnamesEquivalent(agent.Hostname, dockerHost.Hostname) {
|
|
return agent.AgentID
|
|
}
|
|
}
|
|
|
|
return ""
|
|
}
|
|
|
|
// getAgentHostnameForDockerHost finds the agent hostname for a Docker host (for approval records)
|
|
func (e *PulseToolExecutor) getAgentHostnameForDockerHost(dockerHost *models.DockerHost) string {
|
|
if e.agentServer == nil {
|
|
return dockerHost.Hostname // fallback
|
|
}
|
|
|
|
agents := e.agentServer.GetConnectedAgents()
|
|
|
|
// Try to match by AgentID first
|
|
if dockerHost.AgentID != "" {
|
|
for _, agent := range agents {
|
|
if agent.AgentID == dockerHost.AgentID {
|
|
return agent.Hostname
|
|
}
|
|
}
|
|
}
|
|
|
|
// Fall back to the docker host's hostname
|
|
return dockerHost.Hostname
|
|
}
|
|
|
|
// resolveDockerHostRoutingFull resolves a Docker host to the correct agent and routing info
|
|
// with full provenance metadata. If the Docker host is actually a system container or VM,
|
|
// it routes through the node agent with the correct TargetType and TargetID so commands
|
|
// are executed inside the guest.
|
|
func (e *PulseToolExecutor) resolveDockerHostRoutingFull(dockerHost *models.DockerHost) CommandRoutingResult {
|
|
result := CommandRoutingResult{
|
|
TargetType: "agent",
|
|
Transport: "direct",
|
|
}
|
|
|
|
if e.agentServer == nil {
|
|
return result
|
|
}
|
|
|
|
// STEP 1: Check topology — is the Docker host actually a system container or VM?
|
|
if rs, err := e.readStateForControl(); err == nil {
|
|
// Check system containers
|
|
for _, ct := range rs.Containers() {
|
|
if ct.Name() == dockerHost.Hostname {
|
|
result.ResolvedKind = "system-container"
|
|
result.ResolvedNode = ct.Node()
|
|
result.TargetType = "container"
|
|
result.TargetID = fmt.Sprintf("%d", ct.VMID())
|
|
result.Transport = "pct_exec"
|
|
nodeAgentID := e.findAgentForNode(ct.Node())
|
|
if nodeAgentID != "" {
|
|
result.AgentID = nodeAgentID
|
|
result.AgentHostname = ct.Node()
|
|
log.Debug().
|
|
Str("docker_host", dockerHost.Hostname).
|
|
Str("node", ct.Node()).
|
|
Int("vmid", ct.VMID()).
|
|
Str("agent", nodeAgentID).
|
|
Str("transport", result.Transport).
|
|
Msg("Resolved Docker host as LXC, routing through Proxmox agent")
|
|
}
|
|
return result
|
|
}
|
|
}
|
|
|
|
// Check VMs
|
|
for _, vm := range rs.VMs() {
|
|
if vm.Name() == dockerHost.Hostname {
|
|
result.ResolvedKind = "vm"
|
|
result.ResolvedNode = vm.Node()
|
|
result.TargetType = "vm"
|
|
result.TargetID = fmt.Sprintf("%d", vm.VMID())
|
|
result.Transport = "qm_guest_exec"
|
|
nodeAgentID := e.findAgentForNode(vm.Node())
|
|
if nodeAgentID != "" {
|
|
result.AgentID = nodeAgentID
|
|
result.AgentHostname = vm.Node()
|
|
log.Debug().
|
|
Str("docker_host", dockerHost.Hostname).
|
|
Str("node", vm.Node()).
|
|
Int("vmid", vm.VMID()).
|
|
Str("agent", nodeAgentID).
|
|
Str("transport", result.Transport).
|
|
Msg("Resolved Docker host as VM, routing through Proxmox agent")
|
|
}
|
|
return result
|
|
}
|
|
}
|
|
}
|
|
|
|
// STEP 2: Docker host is not an LXC/VM — use direct agent routing
|
|
agentID := e.findAgentForDockerHost(dockerHost)
|
|
result.AgentID = agentID
|
|
result.ResolvedKind = "docker-host"
|
|
if agentID != "" {
|
|
// Try to get agent hostname
|
|
agents := e.agentServer.GetConnectedAgents()
|
|
for _, a := range agents {
|
|
if a.AgentID == agentID {
|
|
result.AgentHostname = a.Hostname
|
|
break
|
|
}
|
|
}
|
|
}
|
|
return result
|
|
}
|
|
|
|
// createApprovalRecord creates an approval record in the store and returns the approval ID.
|
|
// Returns empty string if store is not available (approvals will still work, just without persistence).
|
|
func createApprovalRecord(command, targetType, targetID, targetName, context string) string {
|
|
return createApprovalRecordForOrg("", command, targetType, targetID, targetName, context)
|
|
}
|
|
|
|
func createApprovalRecordForOrg(orgID, command, targetType, targetID, targetName, context string) string {
|
|
store := approval.GetStore()
|
|
if store == nil {
|
|
log.Debug().Msg("approval store not available, approval will not be persisted")
|
|
return ""
|
|
}
|
|
|
|
req := &approval.ApprovalRequest{
|
|
OrgID: strings.TrimSpace(orgID),
|
|
Command: command,
|
|
TargetType: targetType,
|
|
TargetID: targetID,
|
|
TargetName: targetName,
|
|
Context: context,
|
|
}
|
|
|
|
if err := store.CreateApproval(req); err != nil {
|
|
log.Warn().Err(err).Msg("failed to create approval record")
|
|
return ""
|
|
}
|
|
|
|
log.Debug().Str("approval_id", req.ID).Str("command", command).Msg("created approval record")
|
|
return req.ID
|
|
}
|
|
|
|
// isPreApproved checks if the args contain a valid, approved approval_id.
|
|
// This is used when the agentic loop re-executes a tool after user approval.
|
|
// DEPRECATED: Use consumeApprovalWithValidation instead for replay protection.
|
|
func isPreApproved(args map[string]interface{}) bool {
|
|
approvalID, ok := args["_approval_id"].(string)
|
|
if !ok || approvalID == "" {
|
|
return false
|
|
}
|
|
|
|
store := approval.GetStore()
|
|
if store == nil {
|
|
return false
|
|
}
|
|
|
|
req, found := store.GetApproval(approvalID)
|
|
if !found {
|
|
log.Debug().Str("approval_id", approvalID).Msg("pre-approval check: approval not found")
|
|
return false
|
|
}
|
|
|
|
if req.Status == approval.StatusApproved {
|
|
log.Debug().Str("approval_id", approvalID).Msg("pre-approval check: approved, skipping approval flow")
|
|
return true
|
|
}
|
|
|
|
log.Debug().Str("approval_id", approvalID).Str("status", string(req.Status)).Msg("pre-approval check: not approved")
|
|
return false
|
|
}
|
|
|
|
// consumeApprovalWithValidation validates and consumes an approval for a specific command.
|
|
// It verifies the command hash matches the approval and marks it as consumed (single-use).
|
|
// Returns true if the approval is valid and was consumed, false otherwise.
|
|
func consumeApprovalWithValidation(args map[string]interface{}, orgID, command, targetType, targetID string) bool {
|
|
approvalID, ok := args["_approval_id"].(string)
|
|
if !ok || approvalID == "" {
|
|
return false
|
|
}
|
|
|
|
store := approval.GetStore()
|
|
if store == nil {
|
|
return false
|
|
}
|
|
|
|
req, found := store.GetApproval(approvalID)
|
|
if !found {
|
|
log.Warn().Str("approval_id", approvalID).Msg("failed to find approval for pre-approved execution")
|
|
return false
|
|
}
|
|
|
|
if !approval.BelongsToOrg(req, orgID) {
|
|
log.Warn().
|
|
Str("approval_id", approvalID).
|
|
Str("request_org", approval.NormalizeOrgID(req.OrgID)).
|
|
Str("requested_org", approval.NormalizeOrgID(orgID)).
|
|
Msg("cross-org pre-approved execution rejected")
|
|
return false
|
|
}
|
|
|
|
_, err := store.ConsumeApproval(approvalID, command, targetType, targetID)
|
|
if err != nil {
|
|
log.Warn().Err(err).Str("approval_id", approvalID).Msg("failed to consume approval")
|
|
return false
|
|
}
|
|
|
|
return true
|
|
}
|
|
|
|
// Formatting helpers for control tools
|
|
|
|
func formatApprovalNeeded(command, reason, approvalID string) string {
|
|
payload := map[string]interface{}{
|
|
"type": "approval_required",
|
|
"approval_id": approvalID,
|
|
"command": command,
|
|
"reason": reason,
|
|
"how_to_approve": "Click the approval button in the chat to execute this command.",
|
|
"do_not_retry": true,
|
|
}
|
|
b, _ := json.Marshal(payload)
|
|
return "APPROVAL_REQUIRED: " + string(b)
|
|
}
|
|
|
|
func formatPolicyBlocked(command, reason string) string {
|
|
payload := map[string]interface{}{
|
|
"type": "policy_blocked",
|
|
"command": command,
|
|
"reason": reason,
|
|
"do_not_retry": true,
|
|
}
|
|
b, _ := json.Marshal(payload)
|
|
return "POLICY_BLOCKED: " + string(b)
|
|
}
|
|
|
|
func collectAgentHostnames(agents []agentexec.ConnectedAgent, max int) (all []string, truncated []string) {
|
|
all = make([]string, 0, len(agents))
|
|
for _, agent := range agents {
|
|
name := strings.TrimSpace(agent.Hostname)
|
|
if name == "" {
|
|
name = strings.TrimSpace(agent.AgentID)
|
|
}
|
|
if name != "" {
|
|
all = append(all, name)
|
|
}
|
|
}
|
|
|
|
truncated = all
|
|
if max >= 0 && len(all) > max {
|
|
truncated = all[:max]
|
|
}
|
|
|
|
return all, truncated
|
|
}
|
|
|
|
func formatTargetHostRequired(agents []agentexec.ConnectedAgent) string {
|
|
const maxItems = 6
|
|
hostnames, list := collectAgentHostnames(agents, maxItems)
|
|
if len(hostnames) == 0 {
|
|
return "Multiple agents are connected. Please specify target_host."
|
|
}
|
|
message := fmt.Sprintf("Multiple agents are connected. Please specify target_host. Available: %s", strings.Join(list, ", "))
|
|
if len(hostnames) > maxItems {
|
|
message = fmt.Sprintf("%s (+%d more)", message, len(hostnames)-maxItems)
|
|
}
|
|
return message
|
|
}
|
|
|
|
// formatAvailableAgentHosts returns a hint listing connected agent hostnames.
|
|
func formatAvailableAgentHosts(agents []agentexec.ConnectedAgent) string {
|
|
const maxItems = 6
|
|
hostnames, list := collectAgentHostnames(agents, maxItems)
|
|
if len(hostnames) == 0 {
|
|
return "No agents are currently connected."
|
|
}
|
|
msg := fmt.Sprintf("Available targets: %s", strings.Join(list, ", "))
|
|
if len(hostnames) > maxItems {
|
|
msg = fmt.Sprintf("%s (+%d more)", msg, len(hostnames)-maxItems)
|
|
}
|
|
return msg
|
|
}
|
|
|
|
func formatControlApprovalNeeded(name string, vmID int, action, command, approvalID string) string {
|
|
payload := map[string]interface{}{
|
|
"type": "approval_required",
|
|
"approval_id": approvalID,
|
|
"guest_name": name,
|
|
"guest_vmid": vmID,
|
|
"action": action,
|
|
"command": command,
|
|
"how_to_approve": "Click the approval button in the chat to execute this action.",
|
|
"do_not_retry": true,
|
|
}
|
|
b, _ := json.Marshal(payload)
|
|
return "APPROVAL_REQUIRED: " + string(b)
|
|
}
|
|
|
|
func formatDockerApprovalNeeded(name, host, action, command, approvalID string) string {
|
|
payload := map[string]interface{}{
|
|
"type": "approval_required",
|
|
"approval_id": approvalID,
|
|
"container_name": name,
|
|
"docker_host": host,
|
|
"action": action,
|
|
"command": command,
|
|
"how_to_approve": "Click the approval button in the chat to execute this action.",
|
|
"do_not_retry": true,
|
|
}
|
|
b, _ := json.Marshal(payload)
|
|
return "APPROVAL_REQUIRED: " + string(b)
|
|
}
|
|
|
|
func formatAppContainerApprovalNeeded(name, host, action, command, approvalID string) string {
|
|
payload := map[string]interface{}{
|
|
"type": "approval_required",
|
|
"approval_id": approvalID,
|
|
"resource_name": name,
|
|
"resource_host": host,
|
|
"resource_type": "app-container",
|
|
"action": action,
|
|
"command": command,
|
|
"how_to_approve": "Click the approval button in the chat to execute this action.",
|
|
"do_not_retry": true,
|
|
}
|
|
b, _ := json.Marshal(payload)
|
|
return "APPROVAL_REQUIRED: " + string(b)
|
|
}
|