mirror of
https://github.com/rcourtman/Pulse.git
synced 2026-05-17 04:00:32 +00:00
fix: reduce API calls to prevent syslog spam on non-clustered nodes (#322)
- Cache nodes list in pollPVEInstance and pass to sub-functions - Prevents multiple GetNodes() calls per polling cycle - Reduces API calls from ~5 per cycle to 1 per cycle - Fixes syslog spam on standalone PVE nodes trying to find cluster certificates - Fixes PBS 'Transport endpoint not connected' errors from excessive polling Previously we were calling GetNodes() in: - pollPVEInstance (main) - pollVMs - pollContainers - pollStorage - pollStorageBackups Now we call it once and pass the list to avoid duplicate API calls that trigger certificate checks on non-clustered nodes.
This commit is contained in:
parent
b316510a2b
commit
fcd7823709
1 changed files with 29 additions and 5 deletions
|
|
@ -798,11 +798,12 @@ func (m *Monitor) pollPVEInstance(ctx context.Context, instanceName string, clie
|
|||
// Try to use efficient cluster/resources endpoint
|
||||
if !m.pollVMsAndContainersEfficient(ctx, instanceName, client) {
|
||||
// Fall back to old method if cluster/resources fails
|
||||
// Use WithNodes versions to avoid duplicate GetNodes calls
|
||||
if instanceCfg.MonitorVMs {
|
||||
m.pollVMs(ctx, instanceName, client)
|
||||
m.pollVMsWithNodes(ctx, instanceName, client, nodes)
|
||||
}
|
||||
if instanceCfg.MonitorContainers {
|
||||
m.pollContainers(ctx, instanceName, client)
|
||||
m.pollContainersWithNodes(ctx, instanceName, client, nodes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -814,7 +815,7 @@ func (m *Monitor) pollPVEInstance(ctx context.Context, instanceName string, clie
|
|||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
m.pollStorage(ctx, instanceName, client)
|
||||
m.pollStorageWithNodes(ctx, instanceName, client, nodes)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -840,8 +841,8 @@ func (m *Monitor) pollPVEInstance(ctx context.Context, instanceName string, clie
|
|||
// Poll backup tasks
|
||||
m.pollBackupTasks(backupCtx, instanceName, client)
|
||||
|
||||
// Poll storage backups
|
||||
m.pollStorageBackups(backupCtx, instanceName, client)
|
||||
// Poll storage backups - pass nodes to avoid duplicate API calls
|
||||
m.pollStorageBackupsWithNodes(backupCtx, instanceName, client, nodes)
|
||||
|
||||
// Poll guest snapshots
|
||||
m.pollGuestSnapshots(backupCtx, instanceName, client)
|
||||
|
|
@ -997,6 +998,11 @@ func (m *Monitor) pollVMs(ctx context.Context, instanceName string, client PVECl
|
|||
return
|
||||
}
|
||||
|
||||
m.pollVMsWithNodes(ctx, instanceName, client, nodes)
|
||||
}
|
||||
|
||||
// pollVMsWithNodes polls VMs using a provided nodes list to avoid duplicate GetNodes calls
|
||||
func (m *Monitor) pollVMsWithNodes(ctx context.Context, instanceName string, client PVEClientInterface, nodes []proxmox.Node) {
|
||||
var allVMs []models.VM
|
||||
for _, node := range nodes {
|
||||
vms, err := client.GetVMs(ctx, node.Node)
|
||||
|
|
@ -1133,6 +1139,12 @@ func (m *Monitor) pollContainers(ctx context.Context, instanceName string, clien
|
|||
return
|
||||
}
|
||||
|
||||
m.pollContainersWithNodes(ctx, instanceName, client, nodes)
|
||||
}
|
||||
|
||||
// pollContainersWithNodes polls containers using a provided nodes list to avoid duplicate GetNodes calls
|
||||
func (m *Monitor) pollContainersWithNodes(ctx context.Context, instanceName string, client PVEClientInterface, nodes []proxmox.Node) {
|
||||
|
||||
var allContainers []models.Container
|
||||
for _, node := range nodes {
|
||||
containers, err := client.GetContainers(ctx, node.Node)
|
||||
|
|
@ -1248,6 +1260,12 @@ func (m *Monitor) pollStorage(ctx context.Context, instanceName string, client P
|
|||
return
|
||||
}
|
||||
|
||||
m.pollStorageWithNodes(ctx, instanceName, client, nodes)
|
||||
}
|
||||
|
||||
// pollStorageWithNodes polls storage using a provided nodes list to avoid duplicate GetNodes calls
|
||||
func (m *Monitor) pollStorageWithNodes(ctx context.Context, instanceName string, client PVEClientInterface, nodes []proxmox.Node) {
|
||||
|
||||
// Get cluster storage configuration for shared/enabled status
|
||||
clusterStorages, err := client.GetAllStorage(ctx)
|
||||
if err != nil {
|
||||
|
|
@ -1695,6 +1713,12 @@ func (m *Monitor) pollStorageBackups(ctx context.Context, instanceName string, c
|
|||
return
|
||||
}
|
||||
|
||||
m.pollStorageBackupsWithNodes(ctx, instanceName, client, nodes)
|
||||
}
|
||||
|
||||
// pollStorageBackupsWithNodes polls backups using a provided nodes list to avoid duplicate GetNodes calls
|
||||
func (m *Monitor) pollStorageBackupsWithNodes(ctx context.Context, instanceName string, client PVEClientInterface, nodes []proxmox.Node) {
|
||||
|
||||
var allBackups []models.StorageBackup
|
||||
seenVolids := make(map[string]bool) // Track seen volume IDs to avoid duplicates
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue