Use guest meminfo available for VM memory usage

This commit is contained in:
rcourtman 2025-10-12 11:03:56 +00:00
parent 7ba89da12c
commit 2163d6f5a8
4 changed files with 171 additions and 51 deletions

View file

@ -39,18 +39,22 @@ type NodeMemorySnapshot struct {
// VMMemoryRaw captures both the listing and detailed status memory fields for a VM/CT.
type VMMemoryRaw struct {
ListingMem uint64 `json:"listingMem"`
ListingMaxMem uint64 `json:"listingMaxmem"`
StatusMem uint64 `json:"statusMem,omitempty"`
StatusFreeMem uint64 `json:"statusFreemem,omitempty"`
StatusMaxMem uint64 `json:"statusMaxmem,omitempty"`
Balloon uint64 `json:"balloon,omitempty"`
BalloonMin uint64 `json:"balloonMin,omitempty"`
MemInfoUsed uint64 `json:"meminfoUsed,omitempty"`
MemInfoFree uint64 `json:"meminfoFree,omitempty"`
MemInfoTotal uint64 `json:"meminfoTotal,omitempty"`
Agent int `json:"agent,omitempty"`
DerivedFromBall bool `json:"derivedFromBalloon,omitempty"`
ListingMem uint64 `json:"listingMem"`
ListingMaxMem uint64 `json:"listingMaxmem"`
StatusMem uint64 `json:"statusMem,omitempty"`
StatusFreeMem uint64 `json:"statusFreemem,omitempty"`
StatusMaxMem uint64 `json:"statusMaxmem,omitempty"`
Balloon uint64 `json:"balloon,omitempty"`
BalloonMin uint64 `json:"balloonMin,omitempty"`
MemInfoUsed uint64 `json:"meminfoUsed,omitempty"`
MemInfoFree uint64 `json:"meminfoFree,omitempty"`
MemInfoTotal uint64 `json:"meminfoTotal,omitempty"`
MemInfoAvailable uint64 `json:"meminfoAvailable,omitempty"`
MemInfoBuffers uint64 `json:"meminfoBuffers,omitempty"`
MemInfoCached uint64 `json:"meminfoCached,omitempty"`
MemInfoShared uint64 `json:"meminfoShared,omitempty"`
Agent int `json:"agent,omitempty"`
DerivedFromBall bool `json:"derivedFromBalloon,omitempty"`
}
// GuestMemorySnapshot records the memory calculation for a guest (VM/LXC).

View file

@ -2527,10 +2527,28 @@ func (m *Monitor) pollVMsAndContainersEfficient(ctx context.Context, instanceNam
guestRaw.Balloon = detailedStatus.Balloon
guestRaw.BalloonMin = detailedStatus.BalloonMin
guestRaw.Agent = detailedStatus.Agent
memAvailable := uint64(0)
if detailedStatus.MemInfo != nil {
guestRaw.MemInfoUsed = detailedStatus.MemInfo.Used
guestRaw.MemInfoFree = detailedStatus.MemInfo.Free
guestRaw.MemInfoTotal = detailedStatus.MemInfo.Total
guestRaw.MemInfoAvailable = detailedStatus.MemInfo.Available
guestRaw.MemInfoBuffers = detailedStatus.MemInfo.Buffers
guestRaw.MemInfoCached = detailedStatus.MemInfo.Cached
guestRaw.MemInfoShared = detailedStatus.MemInfo.Shared
switch {
case detailedStatus.MemInfo.Available > 0:
memAvailable = detailedStatus.MemInfo.Available
memorySource = "meminfo-available"
case detailedStatus.MemInfo.Free > 0 ||
detailedStatus.MemInfo.Buffers > 0 ||
detailedStatus.MemInfo.Cached > 0:
memAvailable = detailedStatus.MemInfo.Free +
detailedStatus.MemInfo.Buffers +
detailedStatus.MemInfo.Cached
memorySource = "meminfo-derived"
}
}
// Use actual disk I/O values from detailed status
@ -2547,10 +2565,16 @@ func (m *Monitor) pollVMsAndContainersEfficient(ctx context.Context, instanceNam
guestRaw.DerivedFromBall = false
}
if detailedStatus.FreeMem > 0 && memTotal >= detailedStatus.FreeMem {
switch {
case memAvailable > 0:
if memAvailable > memTotal {
memAvailable = memTotal
}
memUsed = memTotal - memAvailable
case detailedStatus.FreeMem > 0 && memTotal >= detailedStatus.FreeMem:
memUsed = memTotal - detailedStatus.FreeMem
memorySource = "status-freemem"
} else if detailedStatus.Mem > 0 {
case detailedStatus.Mem > 0:
memUsed = detailedStatus.Mem
memorySource = "status-mem"
}
@ -3180,10 +3204,28 @@ func (m *Monitor) pollVMsWithNodes(ctx context.Context, instanceName string, cli
guestRaw.Balloon = status.Balloon
guestRaw.BalloonMin = status.BalloonMin
guestRaw.Agent = status.Agent
memAvailable := uint64(0)
if status.MemInfo != nil {
guestRaw.MemInfoUsed = status.MemInfo.Used
guestRaw.MemInfoFree = status.MemInfo.Free
guestRaw.MemInfoTotal = status.MemInfo.Total
guestRaw.MemInfoAvailable = status.MemInfo.Available
guestRaw.MemInfoBuffers = status.MemInfo.Buffers
guestRaw.MemInfoCached = status.MemInfo.Cached
guestRaw.MemInfoShared = status.MemInfo.Shared
switch {
case status.MemInfo.Available > 0:
memAvailable = status.MemInfo.Available
memorySource = "meminfo-available"
case status.MemInfo.Free > 0 ||
status.MemInfo.Buffers > 0 ||
status.MemInfo.Cached > 0:
memAvailable = status.MemInfo.Free +
status.MemInfo.Buffers +
status.MemInfo.Cached
memorySource = "meminfo-derived"
}
}
// Use actual disk I/O values from detailed status
@ -3201,18 +3243,28 @@ func (m *Monitor) pollVMsWithNodes(ctx context.Context, instanceName string, cli
}
// If we have free memory from guest agent, calculate actual usage
if status.FreeMem > 0 {
switch {
case memAvailable > 0:
if memAvailable > memTotal {
memAvailable = memTotal
}
memUsed = memTotal - memAvailable
case status.FreeMem > 0:
// Guest agent reports free memory, so calculate used
memUsed = memTotal - status.FreeMem
memorySource = "status-freemem"
} else if status.Mem > 0 {
case status.Mem > 0:
// No guest agent free memory data, but we have actual memory usage
// Use the reported memory usage from Proxmox
memUsed = status.Mem
memorySource = "status-mem"
} else {
default:
// No memory data available at all - show 0% usage
memUsed = 0
memorySource = "status-unavailable"
}
if memUsed > memTotal {
memUsed = memTotal
}
guestIPs, guestIfaces, guestOSName, guestOSVersion := fetchGuestAgentMetadata(ctx, client, instanceName, node.Node, vm.Name, vm.VMID, status)
@ -3456,6 +3508,25 @@ func (m *Monitor) pollVMsWithNodes(ctx context.Context, instanceName string, cli
}
diskReadRate, diskWriteRate, netInRate, netOutRate := m.rateTracker.CalculateRates(guestID, currentMetrics)
memTotalBytes := clampToInt64(memTotal)
memUsedBytes := clampToInt64(memUsed)
if memTotalBytes > 0 && memUsedBytes > memTotalBytes {
memUsedBytes = memTotalBytes
}
memFreeBytes := memTotalBytes - memUsedBytes
if memFreeBytes < 0 {
memFreeBytes = 0
}
memory := models.Memory{
Total: memTotalBytes,
Used: memUsedBytes,
Free: memFreeBytes,
Usage: safePercentage(float64(memUsed), float64(memTotal)),
}
if guestRaw.Balloon > 0 {
memory.Balloon = clampToInt64(guestRaw.Balloon)
}
modelVM := models.VM{
ID: guestID,
VMID: vm.VMID,
@ -3466,12 +3537,7 @@ func (m *Monitor) pollVMsWithNodes(ctx context.Context, instanceName string, cli
Type: "qemu",
CPU: cpuUsage, // Already in percentage
CPUs: vm.CPUs,
Memory: models.Memory{
Total: int64(memTotal),
Used: int64(memUsed),
Free: int64(memTotal - memUsed),
Usage: safePercentage(float64(memUsed), float64(memTotal)),
},
Memory: memory,
Disk: models.Disk{
Total: int64(diskTotal),
Used: int64(diskUsed),

View file

@ -163,21 +163,51 @@ func (m *Monitor) pollVMsWithNodesOptimized(ctx context.Context, instanceName st
guestRaw.Balloon = status.Balloon
guestRaw.BalloonMin = status.BalloonMin
guestRaw.Agent = status.Agent
memAvailable := uint64(0)
if status.MemInfo != nil {
guestRaw.MemInfoUsed = status.MemInfo.Used
guestRaw.MemInfoFree = status.MemInfo.Free
guestRaw.MemInfoTotal = status.MemInfo.Total
guestRaw.MemInfoAvailable = status.MemInfo.Available
guestRaw.MemInfoBuffers = status.MemInfo.Buffers
guestRaw.MemInfoCached = status.MemInfo.Cached
guestRaw.MemInfoShared = status.MemInfo.Shared
switch {
case status.MemInfo.Available > 0:
memAvailable = status.MemInfo.Available
memorySource = "meminfo-available"
case status.MemInfo.Free > 0 ||
status.MemInfo.Buffers > 0 ||
status.MemInfo.Cached > 0:
memAvailable = status.MemInfo.Free +
status.MemInfo.Buffers +
status.MemInfo.Cached
memorySource = "meminfo-derived"
}
}
if vmStatus.Balloon > 0 && vmStatus.Balloon < vmStatus.MaxMem {
memTotal = vmStatus.Balloon
guestRaw.DerivedFromBall = true
}
if vmStatus.FreeMem > 0 {
switch {
case memAvailable > 0:
if memAvailable > memTotal {
memAvailable = memTotal
}
memUsed = memTotal - memAvailable
case vmStatus.FreeMem > 0:
memUsed = memTotal - vmStatus.FreeMem
memorySource = "status-freemem"
} else if vmStatus.Mem > 0 {
case vmStatus.Mem > 0:
memUsed = vmStatus.Mem
memorySource = "status-mem"
default:
memUsed = 0
memorySource = "status-unavailable"
}
if memUsed > memTotal {
memUsed = memTotal
}
// Use actual disk I/O values from detailed status
diskReadBytes = int64(vmStatus.DiskRead)
@ -461,14 +491,23 @@ func (m *Monitor) pollVMsWithNodesOptimized(ctx context.Context, instanceName st
diskStatusReason = "no-status"
}
memTotalBytes := clampToInt64(memTotal)
memUsedBytes := clampToInt64(memUsed)
if memTotalBytes > 0 && memUsedBytes > memTotalBytes {
memUsedBytes = memTotalBytes
}
memFreeBytes := memTotalBytes - memUsedBytes
if memFreeBytes < 0 {
memFreeBytes = 0
}
memory := models.Memory{
Total: int64(memTotal),
Used: int64(memUsed),
Free: int64(memTotal - memUsed),
Total: memTotalBytes,
Used: memUsedBytes,
Free: memFreeBytes,
Usage: safePercentage(float64(memUsed), float64(memTotal)),
}
if vmStatus != nil && vmStatus.Balloon > 0 {
memory.Balloon = int64(vmStatus.Balloon)
if guestRaw.Balloon > 0 {
memory.Balloon = clampToInt64(guestRaw.Balloon)
}
// Create VM model

View file

@ -1243,28 +1243,39 @@ type ZFSPoolDevice struct {
}
// VMStatus represents detailed VM status
// VMMemInfo describes memory statistics reported by the guest agent.
// Proxmox surfaces guest /proc/meminfo values (in bytes). The available
// field is only present on newer agent versions, so we keep the raw
// components to reconstruct it when missing.
type VMMemInfo struct {
Total uint64 `json:"total,omitempty"`
Used uint64 `json:"used,omitempty"`
Free uint64 `json:"free,omitempty"`
Available uint64 `json:"available,omitempty"`
Buffers uint64 `json:"buffers,omitempty"`
Cached uint64 `json:"cached,omitempty"`
Shared uint64 `json:"shared,omitempty"`
}
// VMStatus represents detailed VM status returned by Proxmox.
type VMStatus struct {
Status string `json:"status"`
CPU float64 `json:"cpu"`
CPUs int `json:"cpus"`
Mem uint64 `json:"mem"`
MaxMem uint64 `json:"maxmem"`
Balloon uint64 `json:"balloon"`
BalloonMin uint64 `json:"balloon_min"`
FreeMem uint64 `json:"freemem"`
MemInfo *struct {
Used uint64 `json:"used"`
Free uint64 `json:"free"`
Total uint64 `json:"total"`
} `json:"meminfo,omitempty"`
Disk uint64 `json:"disk"`
MaxDisk uint64 `json:"maxdisk"`
DiskRead uint64 `json:"diskread"`
DiskWrite uint64 `json:"diskwrite"`
NetIn uint64 `json:"netin"`
NetOut uint64 `json:"netout"`
Uptime uint64 `json:"uptime"`
Agent int `json:"agent"`
Status string `json:"status"`
CPU float64 `json:"cpu"`
CPUs int `json:"cpus"`
Mem uint64 `json:"mem"`
MaxMem uint64 `json:"maxmem"`
Balloon uint64 `json:"balloon"`
BalloonMin uint64 `json:"balloon_min"`
FreeMem uint64 `json:"freemem"`
MemInfo *VMMemInfo `json:"meminfo,omitempty"`
Disk uint64 `json:"disk"`
MaxDisk uint64 `json:"maxdisk"`
DiskRead uint64 `json:"diskread"`
DiskWrite uint64 `json:"diskwrite"`
NetIn uint64 `json:"netin"`
NetOut uint64 `json:"netout"`
Uptime uint64 `json:"uptime"`
Agent int `json:"agent"`
}
// GetZFSPoolStatus gets the status of ZFS pools on a node