Pulse/internal/ai/tools/tools_storage.go

1730 lines
45 KiB
Go

package tools
import (
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
"github.com/rcourtman/pulse-go-rewrite/internal/recovery"
proxmoxrecoverymapper "github.com/rcourtman/pulse-go-rewrite/internal/recovery/mapper/proxmox"
"github.com/rcourtman/pulse-go-rewrite/internal/unifiedresources"
)
// registerStorageTools registers the pulse_storage tool
func (e *PulseToolExecutor) registerStorageTools() {
e.registry.Register(RegisteredTool{
Definition: Tool{
Name: "pulse_storage",
Description: `Query storage pools, backups, snapshots, Ceph, replication, RAID, and disk health. Use the "type" parameter to select what to query.`,
InputSchema: InputSchema{
Type: "object",
Properties: map[string]PropertySchema{
"type": {
Type: "string",
Description: "Storage type to query",
Enum: []string{"pools", "backups", "backup_tasks", "snapshots", "ceph", "ceph_details", "replication", "pbs_jobs", "raid", "disk_health", "resource_disks"},
},
"storage_id": {
Type: "string",
Description: "Filter by storage ID (for pools)",
},
"resource_id": {
Type: "string",
Description: "Filter by VM/container ID (for backups, snapshots, resource_disks)",
},
"guest_id": {
Type: "string",
Description: "Filter by guest ID (for snapshots, backup_tasks)",
},
"vm_id": {
Type: "string",
Description: "Filter by VM ID (for replication)",
},
"instance": {
Type: "string",
Description: "Filter by Proxmox/PBS instance",
},
"node": {
Type: "string",
Description: "Filter by node name",
},
"host": {
Type: "string",
Description: "Filter by host (for raid, ceph_details)",
},
"cluster": {
Type: "string",
Description: "Filter by Ceph cluster name",
},
"job_type": {
Type: "string",
Description: "Filter PBS jobs by type: backup, sync, verify, prune, garbage",
Enum: []string{"backup", "sync", "verify", "prune", "garbage"},
},
"state": {
Type: "string",
Description: "Filter RAID arrays by state: clean, degraded, rebuilding",
},
"status": {
Type: "string",
Description: "Filter backup tasks by status: ok, error",
},
"resource_type": {
Type: "string",
Description: "Filter by type: vm or system-container (for resource_disks)",
},
"min_usage": {
Type: "number",
Description: "Only show resources with disk usage above this percentage (for resource_disks)",
},
"limit": {
Type: "integer",
Description: "Maximum number of results (default: 100)",
},
"offset": {
Type: "integer",
Description: "Number of results to skip",
},
},
Required: []string{"type"},
},
},
Handler: func(ctx context.Context, exec *PulseToolExecutor, args map[string]interface{}) (CallToolResult, error) {
return exec.executeStorage(ctx, args)
},
})
}
// executeStorage routes to the appropriate storage handler based on type
func (e *PulseToolExecutor) executeStorage(ctx context.Context, args map[string]interface{}) (CallToolResult, error) {
storageType, _ := args["type"].(string)
switch storageType {
case "pools":
return e.executeListStorage(ctx, args)
case "backups":
return e.executeListBackups(ctx, args)
case "backup_tasks":
return e.executeListBackupTasks(ctx, args)
case "snapshots":
return e.executeListSnapshots(ctx, args)
case "ceph":
return e.executeGetCephStatus(ctx, args)
case "ceph_details":
return e.executeGetHostCephDetails(ctx, args)
case "replication":
return e.executeGetReplication(ctx, args)
case "pbs_jobs":
return e.executeListPBSJobs(ctx, args)
case "raid":
return e.executeGetHostRAIDStatus(ctx, args)
case "disk_health":
return e.executeGetDiskHealth(ctx, args)
case "resource_disks":
return e.executeGetResourceDisks(ctx, args)
default:
return NewErrorResult(fmt.Errorf("unknown type: %s. Use: pools, backups, backup_tasks, snapshots, ceph, ceph_details, replication, pbs_jobs, raid, disk_health, resource_disks", storageType)), nil
}
}
func (e *PulseToolExecutor) executeListBackups(_ context.Context, args map[string]interface{}) (CallToolResult, error) {
resourceID, _ := args["resource_id"].(string)
limit := intArg(args, "limit", 100)
offset := intArg(args, "offset", 0)
if e.backupProvider == nil {
return NewTextResult("Backup information not available."), nil
}
backups := e.backupProvider.GetBackups()
pbsInstances := e.backupProvider.GetPBSInstances()
response := EmptyBackupsResponse()
// PBS Backups
count := 0
for _, b := range backups.PBS {
if resourceID != "" && b.VMID != resourceID {
continue
}
if count < offset {
count++
continue
}
if len(response.PBS) >= limit {
break
}
response.PBS = append(response.PBS, PBSBackupSummary{
VMID: b.VMID,
BackupType: b.BackupType,
BackupTime: b.BackupTime,
Instance: b.Instance,
Datastore: b.Datastore,
SizeGB: float64(b.Size) / (1024 * 1024 * 1024),
Verified: b.Verified,
Protected: b.Protected,
})
count++
}
// PVE Backups
count = 0
for _, b := range backups.PVE.StorageBackups {
if resourceID != "" && string(rune(b.VMID)) != resourceID {
continue
}
if count < offset {
count++
continue
}
if len(response.PVE) >= limit {
break
}
response.PVE = append(response.PVE, PVEBackupSummary{
VMID: b.VMID,
BackupTime: b.Time,
SizeGB: float64(b.Size) / (1024 * 1024 * 1024),
Storage: b.Storage,
})
count++
}
// PBS Servers
for _, pbs := range pbsInstances {
server := PBSServerSummary{
Name: pbs.Name,
Host: pbs.Host,
Status: pbs.Status,
}
for _, ds := range pbs.Datastores {
server.Datastores = append(server.Datastores, DatastoreSummary{
Name: ds.Name,
UsagePercent: ds.Usage,
FreeGB: float64(ds.Free) / (1024 * 1024 * 1024),
})
}
response.PBSServers = append(response.PBSServers, server)
}
// Recent tasks
for _, t := range backups.PVE.BackupTasks {
if len(response.RecentTasks) >= 20 {
break
}
response.RecentTasks = append(response.RecentTasks, BackupTaskSummary{
VMID: t.VMID,
Node: t.Node,
Status: t.Status,
StartTime: t.StartTime,
})
}
return NewJSONResult(response.NormalizeCollections()), nil
}
func (e *PulseToolExecutor) executeListStorage(_ context.Context, args map[string]interface{}) (CallToolResult, error) {
storageID, _ := args["storage_id"].(string)
limit := intArg(args, "limit", 100)
offset := intArg(args, "offset", 0)
response := EmptyStorageResponse()
// Storage pools
count := 0
var storageResources []unifiedresources.Resource
if e.unifiedResourceProvider != nil {
storageResources = e.unifiedResourceProvider.GetByType(unifiedresources.ResourceTypeStorage)
}
for _, r := range storageResources {
if r.Storage == nil {
continue
}
if storageID != "" && r.ID != storageID && r.Name != storageID {
continue
}
if count < offset {
count++
continue
}
if len(response.Pools) >= limit {
break
}
pool := storagePoolSummaryFromResource(r)
response.Pools = append(response.Pools, pool)
count++
}
// Ceph clusters from unified resources
if e.unifiedResourceProvider != nil {
for _, r := range e.unifiedResourceProvider.GetByType(unifiedresources.ResourceTypeCeph) {
if r.Ceph == nil {
continue
}
c := r.Ceph
usedBytes, totalBytes := cephBytesFromResource(r)
usagePercent := 0.0
if totalBytes > 0 {
usagePercent = float64(usedBytes) / float64(totalBytes) * 100
}
response.CephClusters = append(response.CephClusters, CephClusterSummary{
Name: r.Name,
Health: c.HealthStatus,
HealthMessage: c.HealthMessage,
UsagePercent: usagePercent,
UsedTB: float64(usedBytes) / (1024 * 1024 * 1024 * 1024),
TotalTB: float64(totalBytes) / (1024 * 1024 * 1024 * 1024),
NumOSDs: c.NumOSDs,
NumOSDsUp: c.NumOSDsUp,
NumOSDsIn: c.NumOSDsIn,
NumMons: c.NumMons,
NumMgrs: c.NumMgrs,
})
}
}
return NewJSONResult(response.NormalizeCollections()), nil
}
func storagePoolSummaryFromResource(r unifiedresources.Resource) StoragePoolSummary {
var (
poolType string
content string
shared bool
node string
instance string
usedBytes int64
totalBytes int64
usagePercent float64
)
if r.Storage != nil {
poolType = r.Storage.Type
content = r.Storage.Content
shared = r.Storage.Shared
if content == "" && len(r.Storage.ContentTypes) > 0 {
content = strings.Join(r.Storage.ContentTypes, ",")
}
}
if r.Proxmox != nil {
node = r.Proxmox.NodeName
instance = r.Proxmox.Instance
}
if r.Metrics != nil && r.Metrics.Disk != nil {
if r.Metrics.Disk.Used != nil {
usedBytes = *r.Metrics.Disk.Used
}
if r.Metrics.Disk.Total != nil {
totalBytes = *r.Metrics.Disk.Total
}
usagePercent = r.Metrics.Disk.Percent
}
if usagePercent == 0 && totalBytes > 0 {
usagePercent = float64(usedBytes) / float64(totalBytes) * 100
}
freeBytes := int64(0)
if totalBytes > usedBytes {
freeBytes = totalBytes - usedBytes
}
active := r.Status != unifiedresources.StatusOffline
enabled := r.Status != unifiedresources.StatusOffline
id := r.ID
if id == "" {
id = r.Name
}
name := r.Name
if name == "" {
name = id
}
return StoragePoolSummary{
ID: id,
Name: name,
Node: node,
Instance: instance,
Type: poolType,
Status: string(r.Status),
Enabled: enabled,
Active: active,
UsagePercent: usagePercent,
UsedGB: float64(usedBytes) / (1024 * 1024 * 1024),
TotalGB: float64(totalBytes) / (1024 * 1024 * 1024),
FreeGB: float64(freeBytes) / (1024 * 1024 * 1024),
Content: content,
Shared: shared,
}
}
// cephBytesFromResource extracts used/total bytes from a unified Ceph resource.
// It prefers the Metrics.Disk values (which carry absolute bytes), falling back
// to summing pool-level data from CephMeta.
func cephBytesFromResource(r unifiedresources.Resource) (usedBytes, totalBytes int64) {
if r.Metrics != nil && r.Metrics.Disk != nil && r.Metrics.Disk.Used != nil && r.Metrics.Disk.Total != nil {
return *r.Metrics.Disk.Used, *r.Metrics.Disk.Total
}
if r.Ceph != nil {
for _, p := range r.Ceph.Pools {
usedBytes += p.StoredBytes
totalBytes += p.StoredBytes + p.AvailableBytes
}
}
return
}
func (e *PulseToolExecutor) executeGetDiskHealth(_ context.Context, _ map[string]interface{}) (CallToolResult, error) {
if e.diskHealthProvider == nil {
return NewTextResult("Disk health information not available."), nil
}
response := EmptyDiskHealthResponse()
// SMART and RAID data from host agents
if e.diskHealthProvider != nil {
hosts := e.diskHealthProvider.GetHosts()
for _, host := range hosts {
hostHealth := HostDiskHealth{
Hostname: toolHostLabel(host),
}
// SMART data
if sensors := host.Sensors(); sensors != nil {
for _, disk := range sensors.SMART {
hostHealth.SMART = append(hostHealth.SMART, SMARTDiskSummary{
Device: disk.Device,
Model: disk.Model,
Health: disk.Health,
Temperature: disk.Temperature,
})
}
}
// RAID arrays
for _, raid := range host.RAID() {
hostHealth.RAID = append(hostHealth.RAID, RAIDArraySummary{
Device: raid.Device,
Level: raid.Level,
State: raid.State,
ActiveDevices: raid.ActiveDevices,
WorkingDevices: raid.WorkingDevices,
FailedDevices: raid.FailedDevices,
SpareDevices: raid.SpareDevices,
RebuildPercent: raid.RebuildPercent,
})
}
// Ceph from agent
if ceph := host.Ceph(); ceph != nil {
hostHealth.Ceph = &CephStatusSummary{
Health: ceph.Health.Status,
NumOSDs: ceph.OSDMap.NumOSDs,
NumOSDsUp: ceph.OSDMap.NumUp,
NumOSDsIn: ceph.OSDMap.NumIn,
NumPGs: ceph.PGMap.NumPGs,
UsagePercent: ceph.PGMap.UsagePercent,
}
}
// Only add if there's data
if len(hostHealth.SMART) > 0 || len(hostHealth.RAID) > 0 || hostHealth.Ceph != nil {
response.Hosts = append(response.Hosts, hostHealth)
}
}
}
return NewJSONResult(response.NormalizeCollections()), nil
}
// executeGetCephStatus returns Ceph cluster status
func (e *PulseToolExecutor) executeGetCephStatus(_ context.Context, args map[string]interface{}) (CallToolResult, error) {
clusterFilter, _ := args["cluster"].(string)
type CephSummary struct {
Name string `json:"name"`
Health string `json:"health"`
Details map[string]interface{} `json:"details,omitempty"`
}
var results []CephSummary
if e.unifiedResourceProvider != nil {
resources := e.unifiedResourceProvider.GetByType(unifiedresources.ResourceTypeCeph)
for _, r := range resources {
if r.Ceph == nil {
continue
}
if clusterFilter != "" && r.Name != clusterFilter {
continue
}
c := r.Ceph
summary := CephSummary{
Name: r.Name,
Health: c.HealthStatus,
Details: make(map[string]interface{}),
}
if c.HealthMessage != "" {
summary.Details["health_message"] = c.HealthMessage
}
if c.NumOSDs > 0 {
summary.Details["osd_count"] = c.NumOSDs
summary.Details["osds_up"] = c.NumOSDsUp
summary.Details["osds_in"] = c.NumOSDsIn
summary.Details["osds_down"] = c.NumOSDs - c.NumOSDsUp
}
if c.NumMons > 0 {
summary.Details["monitors"] = c.NumMons
}
usedBytes, totalBytes := cephBytesFromResource(r)
if totalBytes > 0 {
summary.Details["total_bytes"] = totalBytes
summary.Details["used_bytes"] = usedBytes
summary.Details["available_bytes"] = totalBytes - usedBytes
usagePercent := float64(usedBytes) / float64(totalBytes) * 100
summary.Details["usage_percent"] = usagePercent
}
if len(c.Pools) > 0 {
summary.Details["pools"] = c.Pools
}
results = append(results, summary)
}
}
if len(results) == 0 {
if clusterFilter != "" {
return NewTextResult(fmt.Sprintf("Ceph cluster '%s' not found.", clusterFilter)), nil
}
return NewTextResult("No Ceph clusters found. Ceph may not be configured or data is not yet available."), nil
}
output, _ := json.MarshalIndent(results, "", " ")
return NewTextResult(string(output)), nil
}
// executeGetReplication returns replication job status
func (e *PulseToolExecutor) executeGetReplication(_ context.Context, args map[string]interface{}) (CallToolResult, error) {
vmFilter, _ := args["vm_id"].(string)
if e.replicationProvider == nil {
return NewTextResult("No replication jobs found. Replication may not be configured."), nil
}
jobs := e.replicationProvider.GetReplicationJobs()
if len(jobs) == 0 {
return NewTextResult("No replication jobs found. Replication may not be configured."), nil
}
type ReplicationSummary struct {
ID string `json:"id"`
GuestID int `json:"guest_id"`
GuestName string `json:"guest_name,omitempty"`
GuestType string `json:"guest_type,omitempty"`
SourceNode string `json:"source_node,omitempty"`
TargetNode string `json:"target_node"`
Schedule string `json:"schedule,omitempty"`
Status string `json:"status"`
LastSync string `json:"last_sync,omitempty"`
NextSync string `json:"next_sync,omitempty"`
LastDuration string `json:"last_duration,omitempty"`
Error string `json:"error,omitempty"`
}
var results []ReplicationSummary
for _, job := range jobs {
if vmFilter != "" && fmt.Sprintf("%d", job.GuestID) != vmFilter {
continue
}
summary := ReplicationSummary{
ID: job.ID,
GuestID: job.GuestID,
GuestName: job.GuestName,
GuestType: job.GuestType,
SourceNode: job.SourceNode,
TargetNode: job.TargetNode,
Schedule: job.Schedule,
Status: job.Status,
}
if job.LastSyncTime != nil {
summary.LastSync = job.LastSyncTime.Format("2006-01-02 15:04:05")
}
if job.NextSyncTime != nil {
summary.NextSync = job.NextSyncTime.Format("2006-01-02 15:04:05")
}
if job.LastSyncDurationHuman != "" {
summary.LastDuration = job.LastSyncDurationHuman
}
if job.Error != "" {
summary.Error = job.Error
}
results = append(results, summary)
}
if len(results) == 0 && vmFilter != "" {
return NewTextResult(fmt.Sprintf("No replication jobs found for VM %s.", vmFilter)), nil
}
output, _ := json.MarshalIndent(results, "", " ")
return NewTextResult(string(output)), nil
}
// containsAny checks if s contains any of the substrings (case-insensitive)
func containsAny(s string, substrs ...string) bool {
lower := strings.ToLower(s)
for _, sub := range substrs {
if strings.Contains(lower, strings.ToLower(sub)) {
return true
}
}
return false
}
func (e *PulseToolExecutor) legacyPVESnapshotPoints() ([]recovery.RecoveryPoint, int) {
if e.backupProvider != nil {
backups := e.backupProvider.GetBackups()
return proxmoxrecoverymapper.FromPVEGuestSnapshots(backups.PVE.GuestSnapshots, nil), len(backups.PVE.GuestSnapshots)
}
return nil, 0
}
func (e *PulseToolExecutor) legacyPVEBackupTaskPoints() ([]recovery.RecoveryPoint, int) {
if e.backupProvider != nil {
backups := e.backupProvider.GetBackups()
return proxmoxrecoverymapper.FromPVEBackupTasks(backups.PVE.BackupTasks, nil), len(backups.PVE.BackupTasks)
}
return nil, 0
}
func recoveryPointDetailString(p recovery.RecoveryPoint, key string) string {
if p.Details == nil {
return ""
}
v, ok := p.Details[key]
if !ok || v == nil {
return ""
}
if s, ok := v.(string); ok {
return strings.TrimSpace(s)
}
return ""
}
func recoveryPointDetailInt(p recovery.RecoveryPoint, key string) int {
if p.Details == nil {
return 0
}
v, ok := p.Details[key]
if !ok || v == nil {
return 0
}
switch n := v.(type) {
case int:
return n
case int64:
return int(n)
case float64:
return int(n)
case json.Number:
i, _ := n.Int64()
return int(i)
default:
return 0
}
}
func recoveryPointDetailBool(p recovery.RecoveryPoint, key string) bool {
if p.Details == nil {
return false
}
v, ok := p.Details[key]
if !ok || v == nil {
return false
}
b, ok := v.(bool)
return ok && b
}
func recoveryPointDisplayString(p recovery.RecoveryPoint, value func(*recovery.RecoveryPointDisplay) string) string {
if p.Display == nil {
return ""
}
return strings.TrimSpace(value(p.Display))
}
func parseRecoveryPointVMID(value string) int {
vmid, err := strconv.Atoi(strings.TrimSpace(value))
if err != nil || vmid <= 0 {
return 0
}
return vmid
}
func recoveryPointCanonicalGuestType(p recovery.RecoveryPoint) string {
raw := recoveryPointDisplayString(p, func(display *recovery.RecoveryPointDisplay) string { return display.ItemType })
if raw == "" && p.SubjectRef != nil {
raw = strings.TrimSpace(p.SubjectRef.Type)
}
if raw == "" {
raw = recoveryPointDetailString(p, "type")
}
switch recovery.NormalizeRecoveryItemType(raw) {
case "vm":
return "vm"
case "system-container":
return "lxc"
default:
return strings.TrimSpace(raw)
}
}
func recoveryPointCanonicalInstance(p recovery.RecoveryPoint) string {
if value := recoveryPointDisplayString(p, func(display *recovery.RecoveryPointDisplay) string { return display.ClusterLabel }); value != "" {
return value
}
if p.SubjectRef != nil {
if value := strings.TrimSpace(p.SubjectRef.Namespace); value != "" {
return value
}
}
return recoveryPointDetailString(p, "instance")
}
func recoveryPointCanonicalNode(p recovery.RecoveryPoint) string {
if value := recoveryPointDisplayString(p, func(display *recovery.RecoveryPointDisplay) string { return display.NodeHostLabel }); value != "" {
return value
}
if p.SubjectRef != nil {
if value := strings.TrimSpace(p.SubjectRef.Class); value != "" {
return value
}
}
return recoveryPointDetailString(p, "node")
}
func recoveryPointCanonicalVMID(p recovery.RecoveryPoint) int {
if value := parseRecoveryPointVMID(recoveryPointDisplayString(p, func(display *recovery.RecoveryPointDisplay) string { return display.EntityIDLabel })); value > 0 {
return value
}
if p.SubjectRef != nil {
if value := parseRecoveryPointVMID(p.SubjectRef.Name); value > 0 {
return value
}
if value := parseRecoveryPointVMID(p.SubjectRef.ID); value > 0 {
return value
}
}
return recoveryPointDetailInt(p, "vmid")
}
func recoveryPointCanonicalSnapshotName(p recovery.RecoveryPoint, trimPrefix func(string) string) string {
if value := recoveryPointDetailString(p, "snapshotName"); value != "" {
return value
}
if value := recoveryPointDisplayString(p, func(display *recovery.RecoveryPointDisplay) string { return display.DetailsSummary }); value != "" {
return value
}
return trimPrefix(p.ID)
}
func recoveryPointCanonicalTaskStatus(p recovery.RecoveryPoint) string {
if value := recoveryPointDetailString(p, "status"); value != "" {
return value
}
switch p.Outcome {
case recovery.OutcomeSuccess:
return "OK"
case recovery.OutcomeFailed:
return "ERROR"
case recovery.OutcomeWarning:
return "WARNING"
case recovery.OutcomeRunning:
return "RUNNING"
default:
return "UNKNOWN"
}
}
func recoveryPointCanonicalTaskError(p recovery.RecoveryPoint) string {
if value := recoveryPointDetailString(p, "error"); value != "" {
return value
}
if p.Outcome == recovery.OutcomeFailed || p.Outcome == recovery.OutcomeWarning {
return recoveryPointDisplayString(p, func(display *recovery.RecoveryPointDisplay) string { return display.DetailsSummary })
}
return ""
}
func (e *PulseToolExecutor) executeListSnapshots(ctx context.Context, args map[string]interface{}) (CallToolResult, error) {
if e.backupProvider == nil && e.recoveryPointsProvider == nil {
return NewTextResult("State provider not available."), nil
}
guestIDFilter, _ := args["guest_id"].(string)
instanceFilter, _ := args["instance"].(string)
limit := intArg(args, "limit", 100)
offset := intArg(args, "offset", 0)
// Build VM name map for enrichment
vmNames := make(map[int]string)
if rs, err := e.readStateForControl(); err == nil {
for _, w := range rs.Workloads() {
if w.VMID() > 0 && w.Name() != "" {
// Best-effort: VMID collisions across instances are possible; this matches legacy behavior.
if _, exists := vmNames[w.VMID()]; !exists {
vmNames[w.VMID()] = w.Name()
}
}
}
}
var snapshots []SnapshotSummary
filteredCount := 0
totalCount := 0
trimPrefix := func(id string) string {
id = strings.TrimSpace(id)
if strings.HasPrefix(id, "pve-snapshot:") {
return strings.TrimPrefix(id, "pve-snapshot:")
}
return id
}
if e.recoveryPointsProvider != nil {
const pageLimit = 200
const maxPages = 20
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
matchedIndex := 0
for page := 1; page <= maxPages; page++ {
points, total, err := e.recoveryPointsProvider.ListPoints(ctx, recovery.ListPointsOptions{
Provider: recovery.ProviderProxmoxPVE,
Kind: recovery.KindSnapshot,
Page: page,
Limit: pageLimit,
})
if err != nil {
return NewErrorResult(err), nil
}
if page == 1 {
totalCount = total
}
if len(points) == 0 {
break
}
for _, p := range points {
if !strings.HasPrefix(strings.TrimSpace(p.ID), "pve-snapshot:") {
continue
}
vmid := recoveryPointCanonicalVMID(p)
instance := recoveryPointCanonicalInstance(p)
if guestIDFilter != "" && strconv.Itoa(vmid) != guestIDFilter {
continue
}
if instanceFilter != "" && instance != instanceFilter {
continue
}
filteredCount++
if matchedIndex < offset {
matchedIndex++
continue
}
if len(snapshots) >= limit {
matchedIndex++
continue
}
node := recoveryPointCanonicalNode(p)
guestType := recoveryPointCanonicalGuestType(p)
snapshotName := recoveryPointCanonicalSnapshotName(p, trimPrefix)
description := recoveryPointDetailString(p, "description")
var ts time.Time
if p.CompletedAt != nil {
ts = p.CompletedAt.UTC()
} else if p.StartedAt != nil {
ts = p.StartedAt.UTC()
} else {
ts = time.Time{}
}
size := int64(0)
if p.SizeBytes != nil {
size = *p.SizeBytes
}
snapshots = append(snapshots, SnapshotSummary{
ID: trimPrefix(p.ID),
VMID: vmid,
VMName: vmNames[vmid],
Type: guestType,
Node: node,
Instance: instance,
SnapshotName: snapshotName,
Description: description,
Time: ts,
VMState: recoveryPointDetailBool(p, "vmState"),
SizeBytes: size,
})
matchedIndex++
}
if len(snapshots) >= limit && matchedIndex >= offset+limit {
break
}
}
} else {
// Legacy fallback: normalize snapshot data into recovery points so
// both paths share the same extraction behavior.
points, total := e.legacyPVESnapshotPoints()
totalCount = total
matchedIndex := 0
for _, p := range points {
if !strings.HasPrefix(strings.TrimSpace(p.ID), "pve-snapshot:") {
continue
}
vmid := recoveryPointCanonicalVMID(p)
instance := recoveryPointCanonicalInstance(p)
// Apply filters
if guestIDFilter != "" && strconv.Itoa(vmid) != guestIDFilter {
continue
}
if instanceFilter != "" && instance != instanceFilter {
continue
}
filteredCount++
// Apply pagination
if matchedIndex < offset {
matchedIndex++
continue
}
if len(snapshots) >= limit {
matchedIndex++
continue
}
node := recoveryPointCanonicalNode(p)
guestType := recoveryPointCanonicalGuestType(p)
snapshotName := recoveryPointCanonicalSnapshotName(p, trimPrefix)
description := recoveryPointDetailString(p, "description")
var ts time.Time
if p.CompletedAt != nil {
ts = p.CompletedAt.UTC()
} else if p.StartedAt != nil {
ts = p.StartedAt.UTC()
}
size := int64(0)
if p.SizeBytes != nil {
size = *p.SizeBytes
}
snapshots = append(snapshots, SnapshotSummary{
ID: trimPrefix(p.ID),
VMID: vmid,
VMName: vmNames[vmid],
Type: guestType,
Node: node,
Instance: instance,
SnapshotName: snapshotName,
Description: description,
Time: ts,
VMState: recoveryPointDetailBool(p, "vmState"),
SizeBytes: size,
})
matchedIndex++
}
}
if snapshots == nil {
snapshots = []SnapshotSummary{}
}
response := EmptySnapshotsResponse()
response.Snapshots = snapshots
response.Total = totalCount
response.Filtered = filteredCount
return NewJSONResult(response.NormalizeCollections()), nil
}
func (e *PulseToolExecutor) executeListPBSJobs(_ context.Context, args map[string]interface{}) (CallToolResult, error) {
if e.backupProvider == nil {
return NewTextResult("Backup provider not available."), nil
}
instanceFilter, _ := args["instance"].(string)
jobTypeFilter, _ := args["job_type"].(string)
pbsInstances := e.backupProvider.GetPBSInstances()
if len(pbsInstances) == 0 {
return NewTextResult("No PBS instances found. PBS monitoring may not be configured."), nil
}
var jobs []PBSJobSummary
for _, pbs := range pbsInstances {
if instanceFilter != "" && pbs.ID != instanceFilter && pbs.Name != instanceFilter {
continue
}
// Backup jobs
if jobTypeFilter == "" || jobTypeFilter == "backup" {
for _, job := range pbs.BackupJobs {
jobs = append(jobs, PBSJobSummary{
ID: job.ID,
Type: "backup",
Store: job.Store,
Status: job.Status,
LastRun: job.LastBackup,
NextRun: job.NextRun,
Error: job.Error,
VMID: job.VMID,
})
}
}
// Sync jobs
if jobTypeFilter == "" || jobTypeFilter == "sync" {
for _, job := range pbs.SyncJobs {
jobs = append(jobs, PBSJobSummary{
ID: job.ID,
Type: "sync",
Store: job.Store,
Status: job.Status,
LastRun: job.LastSync,
NextRun: job.NextRun,
Error: job.Error,
Remote: job.Remote,
})
}
}
// Verify jobs
if jobTypeFilter == "" || jobTypeFilter == "verify" {
for _, job := range pbs.VerifyJobs {
jobs = append(jobs, PBSJobSummary{
ID: job.ID,
Type: "verify",
Store: job.Store,
Status: job.Status,
LastRun: job.LastVerify,
NextRun: job.NextRun,
Error: job.Error,
})
}
}
// Prune jobs
if jobTypeFilter == "" || jobTypeFilter == "prune" {
for _, job := range pbs.PruneJobs {
jobs = append(jobs, PBSJobSummary{
ID: job.ID,
Type: "prune",
Store: job.Store,
Status: job.Status,
LastRun: job.LastPrune,
NextRun: job.NextRun,
Error: job.Error,
})
}
}
// Garbage jobs
if jobTypeFilter == "" || jobTypeFilter == "garbage" {
for _, job := range pbs.GarbageJobs {
jobs = append(jobs, PBSJobSummary{
ID: job.ID,
Type: "garbage",
Store: job.Store,
Status: job.Status,
LastRun: job.LastGarbage,
NextRun: job.NextRun,
Error: job.Error,
RemovedBytes: job.RemovedBytes,
})
}
}
}
if jobs == nil {
jobs = []PBSJobSummary{}
}
response := EmptyPBSJobsResponse()
response.Instance = instanceFilter
response.Jobs = jobs
response.Total = len(jobs)
return NewJSONResult(response.NormalizeCollections()), nil
}
func (e *PulseToolExecutor) executeListBackupTasks(ctx context.Context, args map[string]interface{}) (CallToolResult, error) {
if e.backupProvider == nil && e.recoveryPointsProvider == nil {
return NewTextResult("State provider not available."), nil
}
instanceFilter, _ := args["instance"].(string)
guestIDFilter, _ := args["guest_id"].(string)
statusFilter, _ := args["status"].(string)
limit := intArg(args, "limit", 50)
// Build VM name map
vmNames := make(map[int]string)
if rs, err := e.readStateForControl(); err == nil {
for _, w := range rs.Workloads() {
if w.VMID() > 0 && w.Name() != "" {
if _, exists := vmNames[w.VMID()]; !exists {
vmNames[w.VMID()] = w.Name()
}
}
}
}
var tasks []BackupTaskDetail
filteredCount := 0
totalCount := 0
trimPrefix := func(id string) string {
id = strings.TrimSpace(id)
if strings.HasPrefix(id, "pve-task:") {
return strings.TrimPrefix(id, "pve-task:")
}
return id
}
if e.recoveryPointsProvider != nil {
const pageLimit = 200
const maxPages = 20
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
for page := 1; page <= maxPages; page++ {
points, _, err := e.recoveryPointsProvider.ListPoints(ctx, recovery.ListPointsOptions{
Provider: recovery.ProviderProxmoxPVE,
Kind: recovery.KindBackup,
Page: page,
Limit: pageLimit,
})
if err != nil {
return NewErrorResult(err), nil
}
if len(points) == 0 {
break
}
for _, p := range points {
if !strings.HasPrefix(strings.TrimSpace(p.ID), "pve-task:") {
continue
}
totalCount++
instance := recoveryPointCanonicalInstance(p)
node := recoveryPointCanonicalNode(p)
vmid := recoveryPointCanonicalVMID(p)
status := recoveryPointCanonicalTaskStatus(p)
taskType := recoveryPointCanonicalGuestType(p)
errText := recoveryPointCanonicalTaskError(p)
// Apply filters
if instanceFilter != "" && instance != instanceFilter {
continue
}
if guestIDFilter != "" && strconv.Itoa(vmid) != guestIDFilter {
continue
}
if statusFilter != "" && !strings.EqualFold(status, statusFilter) {
continue
}
filteredCount++
if len(tasks) >= limit {
continue
}
started := time.Time{}
if p.StartedAt != nil {
started = p.StartedAt.UTC()
}
ended := time.Time{}
if p.CompletedAt != nil {
ended = p.CompletedAt.UTC()
}
size := int64(0)
if p.SizeBytes != nil {
size = *p.SizeBytes
}
tasks = append(tasks, BackupTaskDetail{
ID: trimPrefix(p.ID),
VMID: vmid,
VMName: vmNames[vmid],
Node: node,
Instance: instance,
Type: taskType,
Status: status,
StartTime: started,
EndTime: ended,
SizeBytes: size,
Error: errText,
})
}
if len(tasks) >= limit {
break
}
}
} else {
// Legacy fallback: normalize backup-task data into recovery points so
// both paths share the same extraction behavior.
points, total := e.legacyPVEBackupTaskPoints()
totalCount = total
for _, p := range points {
if !strings.HasPrefix(strings.TrimSpace(p.ID), "pve-task:") {
continue
}
instance := recoveryPointCanonicalInstance(p)
node := recoveryPointCanonicalNode(p)
vmid := recoveryPointCanonicalVMID(p)
status := recoveryPointCanonicalTaskStatus(p)
taskType := recoveryPointCanonicalGuestType(p)
errText := recoveryPointCanonicalTaskError(p)
// Apply filters
if instanceFilter != "" && instance != instanceFilter {
continue
}
if guestIDFilter != "" && strconv.Itoa(vmid) != guestIDFilter {
continue
}
if statusFilter != "" && !strings.EqualFold(status, statusFilter) {
continue
}
filteredCount++
if len(tasks) >= limit {
continue
}
started := time.Time{}
if p.StartedAt != nil {
started = p.StartedAt.UTC()
}
ended := time.Time{}
if p.CompletedAt != nil {
ended = p.CompletedAt.UTC()
}
size := int64(0)
if p.SizeBytes != nil {
size = *p.SizeBytes
}
tasks = append(tasks, BackupTaskDetail{
ID: trimPrefix(p.ID),
VMID: vmid,
VMName: vmNames[vmid],
Node: node,
Instance: instance,
Type: taskType,
Status: status,
StartTime: started,
EndTime: ended,
SizeBytes: size,
Error: errText,
})
}
}
if tasks == nil {
tasks = []BackupTaskDetail{}
}
response := EmptyBackupTasksListResponse()
response.Tasks = tasks
response.Total = totalCount
response.Filtered = filteredCount
return NewJSONResult(response.NormalizeCollections()), nil
}
func (e *PulseToolExecutor) executeGetHostRAIDStatus(_ context.Context, args map[string]interface{}) (CallToolResult, error) {
if e.diskHealthProvider == nil {
return NewTextResult("Disk health provider not available."), nil
}
hostFilter, _ := args["host"].(string)
stateFilter, _ := args["state"].(string)
hosts := e.diskHealthProvider.GetHosts()
var hostSummaries []HostRAIDSummary
for _, host := range hosts {
targetID := toolHostTargetID(host)
hostLabel := toolHostLabel(host)
// Apply host filter
if hostFilter != "" && targetID != hostFilter && host.ID() != hostFilter && toolHostName(host) != hostFilter && hostLabel != hostFilter {
continue
}
// Skip hosts without RAID arrays
raidArrays := host.RAID()
if len(raidArrays) == 0 {
continue
}
var arrays []HostRAIDArraySummary
for _, raid := range raidArrays {
// Apply state filter
if stateFilter != "" && !strings.EqualFold(raid.State, stateFilter) {
continue
}
var devices []HostRAIDDeviceSummary
for _, dev := range raid.Devices {
devices = append(devices, HostRAIDDeviceSummary{
Device: dev.Device,
State: dev.State,
Slot: dev.Slot,
})
}
if devices == nil {
devices = []HostRAIDDeviceSummary{}
}
arrays = append(arrays, HostRAIDArraySummary{
Device: raid.Device,
Name: raid.Name,
Level: raid.Level,
State: raid.State,
TotalDevices: raid.TotalDevices,
ActiveDevices: raid.ActiveDevices,
WorkingDevices: raid.WorkingDevices,
FailedDevices: raid.FailedDevices,
SpareDevices: raid.SpareDevices,
UUID: raid.UUID,
RebuildPercent: raid.RebuildPercent,
RebuildSpeed: raid.RebuildSpeed,
Devices: devices,
})
}
if len(arrays) > 0 {
if arrays == nil {
arrays = []HostRAIDArraySummary{}
}
hostSummaries = append(hostSummaries, HostRAIDSummary{
Hostname: hostLabel,
TargetID: targetID,
Arrays: arrays,
})
}
}
if hostSummaries == nil {
hostSummaries = []HostRAIDSummary{}
}
if len(hostSummaries) == 0 {
if hostFilter != "" {
return NewTextResult(fmt.Sprintf("No RAID arrays found for host '%s'.", hostFilter)), nil
}
return NewTextResult("No RAID arrays found across any hosts. RAID monitoring requires host agents to be configured."), nil
}
response := EmptyHostRAIDStatusResponse()
response.Hosts = hostSummaries
response.Total = len(hostSummaries)
return NewJSONResult(response.NormalizeCollections()), nil
}
func (e *PulseToolExecutor) executeGetHostCephDetails(_ context.Context, args map[string]interface{}) (CallToolResult, error) {
if e.diskHealthProvider == nil {
return NewTextResult("Disk health provider not available."), nil
}
hostFilter, _ := args["host"].(string)
hosts := e.diskHealthProvider.GetHosts()
var hostSummaries []HostCephSummary
for _, host := range hosts {
targetID := toolHostTargetID(host)
hostLabel := toolHostLabel(host)
// Apply host filter
if hostFilter != "" && targetID != hostFilter && host.ID() != hostFilter && toolHostName(host) != hostFilter && hostLabel != hostFilter {
continue
}
// Skip hosts without Ceph data
ceph := host.Ceph()
if ceph == nil {
continue
}
// Build health messages from checks and summary
var healthMessages []HostCephHealthMessage
for checkName, check := range ceph.Health.Checks {
msg := check.Message
if msg == "" {
msg = checkName
}
healthMessages = append(healthMessages, HostCephHealthMessage{
Severity: check.Severity,
Message: msg,
})
}
for _, summary := range ceph.Health.Summary {
healthMessages = append(healthMessages, HostCephHealthMessage{
Severity: summary.Severity,
Message: summary.Message,
})
}
// Build monitor summary
var monSummary *HostCephMonSummary
if ceph.MonMap.NumMons > 0 {
var monitors []HostCephMonitorSummary
for _, mon := range ceph.MonMap.Monitors {
monitors = append(monitors, HostCephMonitorSummary{
Name: mon.Name,
Rank: mon.Rank,
Addr: mon.Addr,
Status: mon.Status,
})
}
monSummary = &HostCephMonSummary{
NumMons: ceph.MonMap.NumMons,
Monitors: monitors,
}
}
// Build manager summary
var mgrSummary *HostCephMgrSummary
if ceph.MgrMap.NumMgrs > 0 || ceph.MgrMap.Available {
mgrSummary = &HostCephMgrSummary{
Available: ceph.MgrMap.Available,
NumMgrs: ceph.MgrMap.NumMgrs,
ActiveMgr: ceph.MgrMap.ActiveMgr,
Standbys: ceph.MgrMap.Standbys,
}
}
// Build pool summaries
var pools []HostCephPoolSummary
for _, pool := range ceph.Pools {
pools = append(pools, HostCephPoolSummary{
ID: pool.ID,
Name: pool.Name,
BytesUsed: pool.BytesUsed,
BytesAvailable: pool.BytesAvailable,
Objects: pool.Objects,
PercentUsed: pool.PercentUsed,
})
}
if healthMessages == nil {
healthMessages = []HostCephHealthMessage{}
}
if pools == nil {
pools = []HostCephPoolSummary{}
}
hostSummaries = append(hostSummaries, HostCephSummary{
Hostname: hostLabel,
TargetID: targetID,
FSID: ceph.FSID,
Health: HostCephHealthSummary{
Status: ceph.Health.Status,
Messages: healthMessages,
},
MonMap: monSummary,
MgrMap: mgrSummary,
OSDMap: HostCephOSDSummary{
NumOSDs: ceph.OSDMap.NumOSDs,
NumUp: ceph.OSDMap.NumUp,
NumIn: ceph.OSDMap.NumIn,
NumDown: ceph.OSDMap.NumDown,
NumOut: ceph.OSDMap.NumOut,
},
PGMap: HostCephPGSummary{
NumPGs: ceph.PGMap.NumPGs,
BytesTotal: ceph.PGMap.BytesTotal,
BytesUsed: ceph.PGMap.BytesUsed,
BytesAvailable: ceph.PGMap.BytesAvailable,
UsagePercent: ceph.PGMap.UsagePercent,
DegradedRatio: ceph.PGMap.DegradedRatio,
MisplacedRatio: ceph.PGMap.MisplacedRatio,
ReadBytesPerSec: ceph.PGMap.ReadBytesPerSec,
WriteBytesPerSec: ceph.PGMap.WriteBytesPerSec,
ReadOpsPerSec: ceph.PGMap.ReadOpsPerSec,
WriteOpsPerSec: ceph.PGMap.WriteOpsPerSec,
},
Pools: pools,
CollectedAt: ceph.CollectedAt,
})
}
if hostSummaries == nil {
hostSummaries = []HostCephSummary{}
}
if len(hostSummaries) == 0 {
if hostFilter != "" {
return NewTextResult(fmt.Sprintf("No Ceph data found for host '%s'.", hostFilter)), nil
}
return NewTextResult("No Ceph data found from host agents. Ceph monitoring requires host agents to be configured on Ceph nodes."), nil
}
response := EmptyHostCephDetailsResponse()
response.Hosts = hostSummaries
response.Total = len(hostSummaries)
return NewJSONResult(response.NormalizeCollections()), nil
}
func toolHostTargetID(host *unifiedresources.HostView) string {
if host == nil {
return ""
}
if agentID := strings.TrimSpace(host.AgentID()); agentID != "" {
return agentID
}
return strings.TrimSpace(host.ID())
}
func toolHostName(host *unifiedresources.HostView) string {
if host == nil {
return ""
}
if hostname := strings.TrimSpace(host.Hostname()); hostname != "" {
return hostname
}
return strings.TrimSpace(host.Name())
}
func toolHostLabel(host *unifiedresources.HostView) string {
if name := toolHostName(host); name != "" {
return name
}
return toolHostTargetID(host)
}
func (e *PulseToolExecutor) executeGetResourceDisks(_ context.Context, args map[string]interface{}) (CallToolResult, error) {
rs, err := e.readStateForControl()
if err != nil {
return NewTextResult("State provider not available."), nil
}
resourceFilter, _ := args["resource_id"].(string)
typeFilter, _ := args["type"].(string)
instanceFilter, _ := args["instance"].(string)
minUsage, _ := args["min_usage"].(float64)
typeFilter = strings.ToLower(strings.TrimSpace(typeFilter))
if typeFilter != "" && typeFilter != "vm" && typeFilter != "system-container" {
return NewErrorResult(fmt.Errorf("unsupported type %q (allowed: vm, system-container)", typeFilter)), nil
}
var resources []ResourceDisksSummary
// Process VMs
if typeFilter == "" || typeFilter == "vm" {
for _, vm := range rs.VMs() {
if vm == nil {
continue
}
vmID := vm.VMID()
vmIDStr := strconv.Itoa(vmID)
vmSourceID := strings.TrimSpace(vm.SourceID())
if vmSourceID == "" {
vmSourceID = vm.ID()
}
// Apply filters
if resourceFilter != "" && vmSourceID != resourceFilter && vmIDStr != resourceFilter {
continue
}
if instanceFilter != "" && vm.Instance() != instanceFilter {
continue
}
vmDisks := vm.Disks()
// Skip VMs without disk data
if len(vmDisks) == 0 {
continue
}
var disks []ResourceDiskInfo
maxUsage := 0.0
for _, disk := range vmDisks {
usage := disk.Usage
if usage <= 0 && disk.Total > 0 {
usage = (float64(disk.Used) / float64(disk.Total)) * 100
}
if usage <= 0 && disk.Used > 0 {
usage = 100
}
if usage > maxUsage {
maxUsage = usage
}
disks = append(disks, ResourceDiskInfo{
Device: disk.Device,
Mountpoint: disk.Mountpoint,
Type: disk.Filesystem,
TotalBytes: disk.Total,
UsedBytes: disk.Used,
FreeBytes: disk.Free,
Usage: usage,
})
}
// Apply min_usage filter
if minUsage > 0 && maxUsage < minUsage {
continue
}
if disks == nil {
disks = []ResourceDiskInfo{}
}
resources = append(resources, ResourceDisksSummary{
ID: vmSourceID,
VMID: vmID,
Name: vm.Name(),
Type: "vm",
Node: vm.Node(),
Instance: vm.Instance(),
Disks: disks,
})
}
}
// Process containers
if typeFilter == "" || typeFilter == "system-container" {
for _, ct := range rs.Containers() {
if ct == nil {
continue
}
ctID := ct.VMID()
ctIDStr := strconv.Itoa(ctID)
ctSourceID := strings.TrimSpace(ct.SourceID())
if ctSourceID == "" {
ctSourceID = ct.ID()
}
// Apply filters
if resourceFilter != "" && ctSourceID != resourceFilter && ctIDStr != resourceFilter {
continue
}
if instanceFilter != "" && ct.Instance() != instanceFilter {
continue
}
ctDisks := ct.Disks()
// Skip containers without disk data
if len(ctDisks) == 0 {
continue
}
var disks []ResourceDiskInfo
maxUsage := 0.0
for _, disk := range ctDisks {
usage := disk.Usage
if usage <= 0 && disk.Total > 0 {
usage = (float64(disk.Used) / float64(disk.Total)) * 100
}
if usage <= 0 && disk.Used > 0 {
usage = 100
}
if usage > maxUsage {
maxUsage = usage
}
disks = append(disks, ResourceDiskInfo{
Device: disk.Device,
Mountpoint: disk.Mountpoint,
Type: disk.Filesystem,
TotalBytes: disk.Total,
UsedBytes: disk.Used,
FreeBytes: disk.Free,
Usage: usage,
})
}
// Apply min_usage filter
if minUsage > 0 && maxUsage < minUsage {
continue
}
if disks == nil {
disks = []ResourceDiskInfo{}
}
resources = append(resources, ResourceDisksSummary{
ID: ctSourceID,
VMID: ctID,
Name: ct.Name(),
Type: "system-container",
Node: ct.Node(),
Instance: ct.Instance(),
Disks: disks,
})
}
}
if resources == nil {
resources = []ResourceDisksSummary{}
}
if len(resources) == 0 {
if resourceFilter != "" {
return NewTextResult(fmt.Sprintf("No disk data found for resource '%s'. Guest agent may not be installed or disk info unavailable.", resourceFilter)), nil
}
return NewTextResult("No disk data available for any VMs or containers. Disk details require guest agents to be installed and running."), nil
}
response := EmptyResourceDisksResponse()
response.Resources = resources
response.Total = len(resources)
return NewJSONResult(response.NormalizeCollections()), nil
}