mirror of
https://github.com/rcourtman/Pulse.git
synced 2026-04-28 03:20:11 +00:00
style: fix gofmt formatting inconsistencies
Run gofmt -w to fix tab/space inconsistencies across 33 files.
This commit is contained in:
parent
e6adffb2ff
commit
01f7d81d38
33 changed files with 584 additions and 584 deletions
|
|
@ -1,3 +1,4 @@
|
|||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package main
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package main
|
||||
|
|
|
|||
|
|
@ -1,70 +1,70 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type auditRecord map[string]interface{}
|
||||
|
||||
func TestAuditLogValidationFailure(t *testing.T) {
|
||||
tmp, err := os.CreateTemp("", "audit-test-*.log")
|
||||
if err != nil {
|
||||
t.Fatalf("temp file: %v", err)
|
||||
}
|
||||
path := tmp.Name()
|
||||
tmp.Close()
|
||||
defer os.Remove(path)
|
||||
tmp, err := os.CreateTemp("", "audit-test-*.log")
|
||||
if err != nil {
|
||||
t.Fatalf("temp file: %v", err)
|
||||
}
|
||||
path := tmp.Name()
|
||||
tmp.Close()
|
||||
defer os.Remove(path)
|
||||
|
||||
logger := newAuditLogger(path)
|
||||
logger := newAuditLogger(path)
|
||||
|
||||
cred := &peerCredentials{uid: 1000, gid: 1000, pid: 4242}
|
||||
logger.LogValidationFailure("corr-123", cred, "remote", "get_temperature", []string{"node"}, "invalid_node")
|
||||
logger.Close()
|
||||
cred := &peerCredentials{uid: 1000, gid: 1000, pid: 4242}
|
||||
logger.LogValidationFailure("corr-123", cred, "remote", "get_temperature", []string{"node"}, "invalid_node")
|
||||
logger.Close()
|
||||
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
t.Fatalf("open log: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
t.Fatalf("open log: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
if !scanner.Scan() {
|
||||
t.Fatalf("expected at least one audit entry (file may be empty)")
|
||||
}
|
||||
scanner := bufio.NewScanner(file)
|
||||
if !scanner.Scan() {
|
||||
t.Fatalf("expected at least one audit entry (file may be empty)")
|
||||
}
|
||||
|
||||
line := scanner.Bytes()
|
||||
if len(line) == 0 {
|
||||
t.Fatalf("empty line in audit log")
|
||||
}
|
||||
line := scanner.Bytes()
|
||||
if len(line) == 0 {
|
||||
t.Fatalf("empty line in audit log")
|
||||
}
|
||||
|
||||
t.Logf("Audit log line: %s", string(line))
|
||||
t.Logf("Audit log line: %s", string(line))
|
||||
|
||||
var record auditRecord
|
||||
if err := json.Unmarshal(line, &record); err != nil {
|
||||
t.Fatalf("unmarshal (line=%s): %v", string(line), err)
|
||||
}
|
||||
var record auditRecord
|
||||
if err := json.Unmarshal(line, &record); err != nil {
|
||||
t.Fatalf("unmarshal (line=%s): %v", string(line), err)
|
||||
}
|
||||
|
||||
t.Logf("Parsed record: %+v", record)
|
||||
t.Logf("Parsed record: %+v", record)
|
||||
|
||||
if record["event_type"] != "command.validation_failed" {
|
||||
t.Fatalf("unexpected event_type: %v (full record: %+v)", record["event_type"], record)
|
||||
}
|
||||
if record["correlation_id"] != "corr-123" {
|
||||
t.Fatalf("unexpected correlation id: %v", record["correlation_id"])
|
||||
}
|
||||
if record["command"] != "get_temperature" {
|
||||
t.Fatalf("unexpected command: %v", record["command"])
|
||||
}
|
||||
if record["reason"] != "invalid_node" {
|
||||
t.Fatalf("unexpected reason: %v", record["reason"])
|
||||
}
|
||||
if record["decision"] != "denied" {
|
||||
t.Fatalf("unexpected decision: %v", record["decision"])
|
||||
}
|
||||
if record["event_hash"] == "" {
|
||||
t.Fatalf("expected event_hash to be set")
|
||||
}
|
||||
if record["event_type"] != "command.validation_failed" {
|
||||
t.Fatalf("unexpected event_type: %v (full record: %+v)", record["event_type"], record)
|
||||
}
|
||||
if record["correlation_id"] != "corr-123" {
|
||||
t.Fatalf("unexpected correlation id: %v", record["correlation_id"])
|
||||
}
|
||||
if record["command"] != "get_temperature" {
|
||||
t.Fatalf("unexpected command: %v", record["command"])
|
||||
}
|
||||
if record["reason"] != "invalid_node" {
|
||||
t.Fatalf("unexpected reason: %v", record["reason"])
|
||||
}
|
||||
if record["decision"] != "denied" {
|
||||
t.Fatalf("unexpected decision: %v", record["decision"])
|
||||
}
|
||||
if record["event_hash"] == "" {
|
||||
t.Fatalf("expected event_hash to be set")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,31 +1,31 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func FuzzValidateCommand(f *testing.F) {
|
||||
seeds := []string{
|
||||
"sensors -j",
|
||||
"ipmitool sdr",
|
||||
"sensors",
|
||||
"ipmitool lan print",
|
||||
}
|
||||
for _, seed := range seeds {
|
||||
f.Add(seed)
|
||||
}
|
||||
seeds := []string{
|
||||
"sensors -j",
|
||||
"ipmitool sdr",
|
||||
"sensors",
|
||||
"ipmitool lan print",
|
||||
}
|
||||
for _, seed := range seeds {
|
||||
f.Add(seed)
|
||||
}
|
||||
|
||||
f.Fuzz(func(t *testing.T, input string) {
|
||||
fields := strings.Fields(input)
|
||||
if len(fields) == 0 {
|
||||
return
|
||||
}
|
||||
cmd := fields[0]
|
||||
args := []string{}
|
||||
if len(fields) > 1 {
|
||||
args = fields[1:]
|
||||
}
|
||||
validateCommand(cmd, args) // ensure no panics
|
||||
})
|
||||
f.Fuzz(func(t *testing.T, input string) {
|
||||
fields := strings.Fields(input)
|
||||
if len(fields) == 0 {
|
||||
return
|
||||
}
|
||||
cmd := fields[0]
|
||||
args := []string{}
|
||||
if len(fields) > 1 {
|
||||
args = fields[1:]
|
||||
}
|
||||
validateCommand(cmd, args) // ensure no panics
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,40 +1,39 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
func startMetricsServer(ctx context.Context, addr string) {
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/metrics", promhttp.Handler())
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle("/metrics", promhttp.Handler())
|
||||
|
||||
srv := &http.Server{
|
||||
Addr: addr,
|
||||
Handler: mux,
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
IdleTimeout: 30 * time.Second,
|
||||
}
|
||||
srv := &http.Server{
|
||||
Addr: addr,
|
||||
Handler: mux,
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
IdleTimeout: 30 * time.Second,
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
if err := srv.Shutdown(shutdownCtx); err != nil && err != http.ErrServerClosed {
|
||||
log.Warn().Err(err).Msg("Failed to shut down metrics server cleanly")
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
if err := srv.Shutdown(shutdownCtx); err != nil && err != http.ErrServerClosed {
|
||||
log.Warn().Err(err).Msg("Failed to shut down metrics server cleanly")
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
log.Info().Str("addr", addr).Msg("Metrics endpoint listening")
|
||||
if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
log.Warn().Err(err).Msg("Metrics server stopped unexpectedly")
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
log.Info().Str("addr", addr).Msg("Metrics endpoint listening")
|
||||
if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
log.Warn().Err(err).Msg("Metrics server stopped unexpectedly")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -10,12 +10,12 @@ import (
|
|||
// This test shows how to configure different alert delays for different metrics
|
||||
func TestPerMetricDelayConfiguration(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
config AlertConfig
|
||||
resourceType string
|
||||
metricType string
|
||||
expectedDelay int
|
||||
description string
|
||||
name string
|
||||
config AlertConfig
|
||||
resourceType string
|
||||
metricType string
|
||||
expectedDelay int
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "CPU alert with longer delay than memory",
|
||||
|
|
@ -117,7 +117,7 @@ func TestPerMetricDelayConfiguration(t *testing.T) {
|
|||
},
|
||||
MetricTimeThresholds: map[string]map[string]int{
|
||||
"docker": {
|
||||
"restartcount": 10, // Quick notification for container restarts
|
||||
"restartcount": 10, // Quick notification for container restarts
|
||||
"cpu": 120, // Longer for CPU
|
||||
},
|
||||
},
|
||||
|
|
|
|||
|
|
@ -70,12 +70,12 @@ func TestGetTimeThresholdMetricOverrides(t *testing.T) {
|
|||
metricType string
|
||||
expected int
|
||||
}{
|
||||
{"vm-resource", "VM", "cpu", 5}, // guest metric override
|
||||
{"vm-resource", "VM", "memory", 30}, // falls back to guest type delay
|
||||
{"node-1", "Node", "temperature", 120}, // node metric override
|
||||
{"node-1", "Node", "cpu", 60}, // node type delay
|
||||
{"storage-1", "storage", "usage", 90}, // storage type delay
|
||||
{"unknown", "unknown", "cpu", 20}, // global default metric override
|
||||
{"vm-resource", "VM", "cpu", 5}, // guest metric override
|
||||
{"vm-resource", "VM", "memory", 30}, // falls back to guest type delay
|
||||
{"node-1", "Node", "temperature", 120}, // node metric override
|
||||
{"node-1", "Node", "cpu", 60}, // node type delay
|
||||
{"storage-1", "storage", "usage", 90}, // storage type delay
|
||||
{"unknown", "unknown", "cpu", 20}, // global default metric override
|
||||
{"unknown", "unknown", "disk", 20},
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -6,19 +6,19 @@ func TestNormalizeNodeHost(t *testing.T) {
|
|||
tests := []struct {
|
||||
name string
|
||||
rawHost string
|
||||
nodeType string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "adds default port to explicit https without port",
|
||||
rawHost: "https://example.com",
|
||||
nodeType: "pve",
|
||||
want: "https://example.com:8006",
|
||||
},
|
||||
{
|
||||
name: "adds default port for bare pve host",
|
||||
rawHost: "pve.lan",
|
||||
nodeType: "pve",
|
||||
nodeType string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "adds default port to explicit https without port",
|
||||
rawHost: "https://example.com",
|
||||
nodeType: "pve",
|
||||
want: "https://example.com:8006",
|
||||
},
|
||||
{
|
||||
name: "adds default port for bare pve host",
|
||||
rawHost: "pve.lan",
|
||||
nodeType: "pve",
|
||||
want: "https://pve.lan:8006",
|
||||
},
|
||||
{
|
||||
|
|
@ -39,19 +39,19 @@ func TestNormalizeNodeHost(t *testing.T) {
|
|||
nodeType: "pmg",
|
||||
want: "https://[2001:db8::1]:8006",
|
||||
},
|
||||
{
|
||||
name: "drops path segments",
|
||||
rawHost: "https://example.com/api",
|
||||
nodeType: "pve",
|
||||
want: "https://example.com:8006",
|
||||
},
|
||||
{
|
||||
name: "adds default port to explicit http scheme",
|
||||
rawHost: "http://example.com",
|
||||
nodeType: "pve",
|
||||
want: "http://example.com:8006",
|
||||
},
|
||||
}
|
||||
{
|
||||
name: "drops path segments",
|
||||
rawHost: "https://example.com/api",
|
||||
nodeType: "pve",
|
||||
want: "https://example.com:8006",
|
||||
},
|
||||
{
|
||||
name: "adds default port to explicit http scheme",
|
||||
rawHost: "http://example.com",
|
||||
nodeType: "pve",
|
||||
want: "http://example.com:8006",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
|
|
|||
|
|
@ -25,11 +25,11 @@ type CSRFToken struct {
|
|||
type CSRFTokenStore struct {
|
||||
tokens map[string]*CSRFToken
|
||||
mu sync.RWMutex
|
||||
saveMu sync.Mutex // Serializes disk writes to prevent save corruption
|
||||
saveMu sync.Mutex // Serializes disk writes to prevent save corruption
|
||||
dataPath string
|
||||
saveTicker *time.Ticker
|
||||
stopChan chan bool
|
||||
stopOnce sync.Once // Ensures Stop() can only close channel once
|
||||
stopOnce sync.Once // Ensures Stop() can only close channel once
|
||||
}
|
||||
|
||||
func csrfSessionKey(sessionID string) string {
|
||||
|
|
|
|||
|
|
@ -159,4 +159,5 @@ func (h *DockerMetadataHandler) HandleDeleteMetadata(w http.ResponseWriter, r *h
|
|||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// test comment
|
||||
|
|
|
|||
|
|
@ -22,12 +22,12 @@ func TestHandleLookupByIDSuccess(t *testing.T) {
|
|||
lastSeen := time.Now().UTC()
|
||||
|
||||
handler := newHostAgentHandlerForTests(t, models.Host{
|
||||
ID: hostID,
|
||||
Hostname: "host.local",
|
||||
DisplayName:"Host Local",
|
||||
Status: "online",
|
||||
TokenID: tokenID,
|
||||
LastSeen: lastSeen,
|
||||
ID: hostID,
|
||||
Hostname: "host.local",
|
||||
DisplayName: "Host Local",
|
||||
Status: "online",
|
||||
TokenID: tokenID,
|
||||
LastSeen: lastSeen,
|
||||
})
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/api/agents/host/lookup?id="+hostID, nil)
|
||||
|
|
@ -42,7 +42,7 @@ func TestHandleLookupByIDSuccess(t *testing.T) {
|
|||
|
||||
var resp struct {
|
||||
Success bool `json:"success"`
|
||||
Host struct {
|
||||
Host struct {
|
||||
ID string `json:"id"`
|
||||
Hostname string `json:"hostname"`
|
||||
Status string `json:"status"`
|
||||
|
|
|
|||
|
|
@ -81,18 +81,18 @@ type ConfigResponse struct {
|
|||
|
||||
// NodeConfig represents a node configuration
|
||||
type NodeConfig struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Address string `json:"address"`
|
||||
Port int `json:"port,omitempty"`
|
||||
Username string `json:"username,omitempty"`
|
||||
HasPassword bool `json:"hasPassword"`
|
||||
HasToken bool `json:"hasToken"`
|
||||
SkipTLS bool `json:"skipTLS,omitempty"`
|
||||
TemperatureMonitoringEnabled *bool `json:"temperatureMonitoringEnabled,omitempty"`
|
||||
Tags []string `json:"tags,omitempty"`
|
||||
Metadata map[string]string `json:"metadata,omitempty"`
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Address string `json:"address"`
|
||||
Port int `json:"port,omitempty"`
|
||||
Username string `json:"username,omitempty"`
|
||||
HasPassword bool `json:"hasPassword"`
|
||||
HasToken bool `json:"hasToken"`
|
||||
SkipTLS bool `json:"skipTLS,omitempty"`
|
||||
TemperatureMonitoringEnabled *bool `json:"temperatureMonitoringEnabled,omitempty"`
|
||||
Tags []string `json:"tags,omitempty"`
|
||||
Metadata map[string]string `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
// SettingsConfig represents application settings
|
||||
|
|
|
|||
|
|
@ -28,13 +28,13 @@ const (
|
|||
|
||||
// Config controls logger initialization.
|
||||
type Config struct {
|
||||
Format string // "json", "console", or "auto"
|
||||
Level string // "debug", "info", "warn", "error"
|
||||
Component string // optional component name
|
||||
FilePath string // optional log file path
|
||||
MaxSizeMB int // rotate after this size (MB)
|
||||
MaxAgeDays int // keep rotated logs for this many days
|
||||
Compress bool // gzip rotated logs
|
||||
Format string // "json", "console", or "auto"
|
||||
Level string // "debug", "info", "warn", "error"
|
||||
Component string // optional component name
|
||||
FilePath string // optional log file path
|
||||
MaxSizeMB int // rotate after this size (MB)
|
||||
MaxAgeDays int // keep rotated logs for this many days
|
||||
Compress bool // gzip rotated logs
|
||||
}
|
||||
|
||||
// Option customizes logger construction.
|
||||
|
|
@ -271,16 +271,16 @@ func collectOptions(opts ...Option) options {
|
|||
}
|
||||
|
||||
func parseLevel(level string) zerolog.Level {
|
||||
switch strings.ToLower(strings.TrimSpace(level)) {
|
||||
case "debug":
|
||||
return zerolog.DebugLevel
|
||||
case "warn":
|
||||
return zerolog.WarnLevel
|
||||
case "error":
|
||||
return zerolog.ErrorLevel
|
||||
default:
|
||||
return zerolog.InfoLevel
|
||||
}
|
||||
switch strings.ToLower(strings.TrimSpace(level)) {
|
||||
case "debug":
|
||||
return zerolog.DebugLevel
|
||||
case "warn":
|
||||
return zerolog.WarnLevel
|
||||
case "error":
|
||||
return zerolog.ErrorLevel
|
||||
default:
|
||||
return zerolog.InfoLevel
|
||||
}
|
||||
}
|
||||
|
||||
func isValidLevel(level string) bool {
|
||||
|
|
@ -310,10 +310,10 @@ func selectWriter(format string) io.Writer {
|
|||
}
|
||||
|
||||
func newConsoleWriter(out io.Writer) io.Writer {
|
||||
return zerolog.ConsoleWriter{
|
||||
Out: out,
|
||||
TimeFormat: defaultTimeFmt,
|
||||
}
|
||||
return zerolog.ConsoleWriter{
|
||||
Out: out,
|
||||
TimeFormat: defaultTimeFmt,
|
||||
}
|
||||
}
|
||||
|
||||
func isTerminal(file *os.File) bool {
|
||||
|
|
|
|||
|
|
@ -222,14 +222,14 @@ func TestContextHelpersWithRequestID(t *testing.T) {
|
|||
t.Fatalf("expected stored request id %s, got %s", generated, got)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
logger := New("api", WithWriter(&buf))
|
||||
ctx = WithLogger(ctx, logger)
|
||||
var buf bytes.Buffer
|
||||
logger := New("api", WithWriter(&buf))
|
||||
ctx = WithLogger(ctx, logger)
|
||||
|
||||
info := FromContext(ctx)
|
||||
info.Info().Msg("ctx-log")
|
||||
info := FromContext(ctx)
|
||||
info.Info().Msg("ctx-log")
|
||||
|
||||
event := readJSONLine(t, &buf)
|
||||
event := readJSONLine(t, &buf)
|
||||
if event["request_id"] != generated {
|
||||
t.Fatalf("expected request_id %s, got %v", generated, event["request_id"])
|
||||
}
|
||||
|
|
@ -299,16 +299,16 @@ func TestFromContextWithoutRequestID(t *testing.T) {
|
|||
Level: "info",
|
||||
})
|
||||
|
||||
var buf bytes.Buffer
|
||||
mu.Lock()
|
||||
baseLogger = zerolog.New(&buf).With().Timestamp().Logger()
|
||||
baseWriter = &buf
|
||||
baseComponent = ""
|
||||
log.Logger = baseLogger
|
||||
mu.Unlock()
|
||||
var buf bytes.Buffer
|
||||
mu.Lock()
|
||||
baseLogger = zerolog.New(&buf).With().Timestamp().Logger()
|
||||
baseWriter = &buf
|
||||
baseComponent = ""
|
||||
log.Logger = baseLogger
|
||||
mu.Unlock()
|
||||
|
||||
base := FromContext(context.Background())
|
||||
base.Info().Msg("no-request")
|
||||
base := FromContext(context.Background())
|
||||
base.Info().Msg("no-request")
|
||||
|
||||
event := readJSONLine(t, &buf)
|
||||
if _, ok := event["request_id"]; ok {
|
||||
|
|
|
|||
|
|
@ -181,28 +181,28 @@ func TestVMToFrontend(t *testing.T) {
|
|||
lastBackup := now.Add(-24 * time.Hour)
|
||||
|
||||
vm := VM{
|
||||
ID: "vm-100",
|
||||
VMID: 100,
|
||||
Name: "test-vm",
|
||||
Node: "pve1",
|
||||
Instance: "default",
|
||||
Status: "running",
|
||||
Type: "qemu",
|
||||
CPU: 0.15,
|
||||
CPUs: 4,
|
||||
Memory: Memory{Total: 8000000000, Used: 4000000000},
|
||||
Disk: Disk{Total: 100000000000, Used: 50000000000},
|
||||
NetworkIn: 1000000,
|
||||
NetworkOut: 500000,
|
||||
DiskRead: 100000,
|
||||
DiskWrite: 50000,
|
||||
Uptime: 3600,
|
||||
Tags: []string{"production", "web"},
|
||||
LastSeen: now,
|
||||
LastBackup: lastBackup,
|
||||
ID: "vm-100",
|
||||
VMID: 100,
|
||||
Name: "test-vm",
|
||||
Node: "pve1",
|
||||
Instance: "default",
|
||||
Status: "running",
|
||||
Type: "qemu",
|
||||
CPU: 0.15,
|
||||
CPUs: 4,
|
||||
Memory: Memory{Total: 8000000000, Used: 4000000000},
|
||||
Disk: Disk{Total: 100000000000, Used: 50000000000},
|
||||
NetworkIn: 1000000,
|
||||
NetworkOut: 500000,
|
||||
DiskRead: 100000,
|
||||
DiskWrite: 50000,
|
||||
Uptime: 3600,
|
||||
Tags: []string{"production", "web"},
|
||||
LastSeen: now,
|
||||
LastBackup: lastBackup,
|
||||
IPAddresses: []string{"192.168.1.50", "10.0.0.50"},
|
||||
OSName: "Ubuntu",
|
||||
OSVersion: "22.04",
|
||||
OSName: "Ubuntu",
|
||||
OSVersion: "22.04",
|
||||
}
|
||||
|
||||
frontend := vm.ToFrontend()
|
||||
|
|
@ -268,21 +268,21 @@ func TestContainerToFrontend(t *testing.T) {
|
|||
now := time.Now()
|
||||
|
||||
container := Container{
|
||||
ID: "ct-101",
|
||||
VMID: 101,
|
||||
Name: "test-ct",
|
||||
Node: "pve1",
|
||||
Status: "running",
|
||||
Type: "lxc",
|
||||
CPU: 0.10,
|
||||
CPUs: 2,
|
||||
Memory: Memory{Total: 4000000000, Used: 2000000000},
|
||||
Disk: Disk{Total: 50000000000, Used: 25000000000},
|
||||
NetworkIn: 500000,
|
||||
NetworkOut: 250000,
|
||||
Uptime: 7200,
|
||||
Tags: []string{"dev", "database"},
|
||||
LastSeen: now,
|
||||
ID: "ct-101",
|
||||
VMID: 101,
|
||||
Name: "test-ct",
|
||||
Node: "pve1",
|
||||
Status: "running",
|
||||
Type: "lxc",
|
||||
CPU: 0.10,
|
||||
CPUs: 2,
|
||||
Memory: Memory{Total: 4000000000, Used: 2000000000},
|
||||
Disk: Disk{Total: 50000000000, Used: 25000000000},
|
||||
NetworkIn: 500000,
|
||||
NetworkOut: 250000,
|
||||
Uptime: 7200,
|
||||
Tags: []string{"dev", "database"},
|
||||
LastSeen: now,
|
||||
IPAddresses: []string{"192.168.1.51"},
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -5,31 +5,31 @@ package models
|
|||
|
||||
// NodeFrontend represents a Node with frontend-friendly field names
|
||||
type NodeFrontend struct {
|
||||
ID string `json:"id"`
|
||||
Node string `json:"node"` // Maps to Name
|
||||
Name string `json:"name"`
|
||||
DisplayName string `json:"displayName"`
|
||||
Instance string `json:"instance"`
|
||||
Host string `json:"host,omitempty"`
|
||||
Status string `json:"status"`
|
||||
Type string `json:"type"`
|
||||
CPU float64 `json:"cpu"`
|
||||
Memory *Memory `json:"memory,omitempty"` // Full memory object with usage percentage
|
||||
Mem int64 `json:"mem"` // Maps to Memory.Used (kept for backward compat)
|
||||
MaxMem int64 `json:"maxmem"` // Maps to Memory.Total (kept for backward compat)
|
||||
Disk *Disk `json:"disk,omitempty"` // Full disk object with usage percentage
|
||||
MaxDisk int64 `json:"maxdisk"` // Maps to Disk.Total (kept for backward compat)
|
||||
Uptime int64 `json:"uptime"`
|
||||
LoadAverage []float64 `json:"loadAverage"`
|
||||
KernelVersion string `json:"kernelVersion"`
|
||||
PVEVersion string `json:"pveVersion"`
|
||||
CPUInfo CPUInfo `json:"cpuInfo"`
|
||||
Temperature *Temperature `json:"temperature,omitempty"` // CPU/NVMe temperatures
|
||||
LastSeen int64 `json:"lastSeen"` // Unix timestamp
|
||||
ConnectionHealth string `json:"connectionHealth"`
|
||||
IsClusterMember bool `json:"isClusterMember,omitempty"`
|
||||
ClusterName string `json:"clusterName,omitempty"`
|
||||
TemperatureMonitoringEnabled *bool `json:"temperatureMonitoringEnabled,omitempty"` // Per-node temperature monitoring override
|
||||
ID string `json:"id"`
|
||||
Node string `json:"node"` // Maps to Name
|
||||
Name string `json:"name"`
|
||||
DisplayName string `json:"displayName"`
|
||||
Instance string `json:"instance"`
|
||||
Host string `json:"host,omitempty"`
|
||||
Status string `json:"status"`
|
||||
Type string `json:"type"`
|
||||
CPU float64 `json:"cpu"`
|
||||
Memory *Memory `json:"memory,omitempty"` // Full memory object with usage percentage
|
||||
Mem int64 `json:"mem"` // Maps to Memory.Used (kept for backward compat)
|
||||
MaxMem int64 `json:"maxmem"` // Maps to Memory.Total (kept for backward compat)
|
||||
Disk *Disk `json:"disk,omitempty"` // Full disk object with usage percentage
|
||||
MaxDisk int64 `json:"maxdisk"` // Maps to Disk.Total (kept for backward compat)
|
||||
Uptime int64 `json:"uptime"`
|
||||
LoadAverage []float64 `json:"loadAverage"`
|
||||
KernelVersion string `json:"kernelVersion"`
|
||||
PVEVersion string `json:"pveVersion"`
|
||||
CPUInfo CPUInfo `json:"cpuInfo"`
|
||||
Temperature *Temperature `json:"temperature,omitempty"` // CPU/NVMe temperatures
|
||||
LastSeen int64 `json:"lastSeen"` // Unix timestamp
|
||||
ConnectionHealth string `json:"connectionHealth"`
|
||||
IsClusterMember bool `json:"isClusterMember,omitempty"`
|
||||
ClusterName string `json:"clusterName,omitempty"`
|
||||
TemperatureMonitoringEnabled *bool `json:"temperatureMonitoringEnabled,omitempty"` // Per-node temperature monitoring override
|
||||
}
|
||||
|
||||
// VMFrontend represents a VM with frontend-friendly field names
|
||||
|
|
@ -421,27 +421,27 @@ type ReplicationJobFrontend struct {
|
|||
|
||||
// StateFrontend represents the state with frontend-friendly field names
|
||||
type StateFrontend struct {
|
||||
Nodes []NodeFrontend `json:"nodes"`
|
||||
VMs []VMFrontend `json:"vms"`
|
||||
Containers []ContainerFrontend `json:"containers"`
|
||||
DockerHosts []DockerHostFrontend `json:"dockerHosts"`
|
||||
RemovedDockerHosts []RemovedDockerHostFrontend `json:"removedDockerHosts"`
|
||||
Hosts []HostFrontend `json:"hosts"`
|
||||
Storage []StorageFrontend `json:"storage"`
|
||||
CephClusters []CephClusterFrontend `json:"cephClusters"`
|
||||
PhysicalDisks []PhysicalDisk `json:"physicalDisks"`
|
||||
PBS []PBSInstance `json:"pbs"` // Keep as is
|
||||
PMG []PMGInstance `json:"pmg"`
|
||||
PBSBackups []PBSBackup `json:"pbsBackups"`
|
||||
PMGBackups []PMGBackup `json:"pmgBackups"`
|
||||
Backups Backups `json:"backups"`
|
||||
ReplicationJobs []ReplicationJobFrontend `json:"replicationJobs"`
|
||||
ActiveAlerts []Alert `json:"activeAlerts"` // Active alerts
|
||||
Metrics map[string]any `json:"metrics"` // Empty object for now
|
||||
PVEBackups PVEBackups `json:"pveBackups"` // Keep as is
|
||||
Performance map[string]any `json:"performance"` // Empty object for now
|
||||
ConnectionHealth map[string]bool `json:"connectionHealth"` // Keep as is
|
||||
Stats map[string]any `json:"stats"` // Empty object for now
|
||||
LastUpdate int64 `json:"lastUpdate"` // Unix timestamp
|
||||
TemperatureMonitoringEnabled bool `json:"temperatureMonitoringEnabled"` // Global temperature monitoring setting
|
||||
Nodes []NodeFrontend `json:"nodes"`
|
||||
VMs []VMFrontend `json:"vms"`
|
||||
Containers []ContainerFrontend `json:"containers"`
|
||||
DockerHosts []DockerHostFrontend `json:"dockerHosts"`
|
||||
RemovedDockerHosts []RemovedDockerHostFrontend `json:"removedDockerHosts"`
|
||||
Hosts []HostFrontend `json:"hosts"`
|
||||
Storage []StorageFrontend `json:"storage"`
|
||||
CephClusters []CephClusterFrontend `json:"cephClusters"`
|
||||
PhysicalDisks []PhysicalDisk `json:"physicalDisks"`
|
||||
PBS []PBSInstance `json:"pbs"` // Keep as is
|
||||
PMG []PMGInstance `json:"pmg"`
|
||||
PBSBackups []PBSBackup `json:"pbsBackups"`
|
||||
PMGBackups []PMGBackup `json:"pmgBackups"`
|
||||
Backups Backups `json:"backups"`
|
||||
ReplicationJobs []ReplicationJobFrontend `json:"replicationJobs"`
|
||||
ActiveAlerts []Alert `json:"activeAlerts"` // Active alerts
|
||||
Metrics map[string]any `json:"metrics"` // Empty object for now
|
||||
PVEBackups PVEBackups `json:"pveBackups"` // Keep as is
|
||||
Performance map[string]any `json:"performance"` // Empty object for now
|
||||
ConnectionHealth map[string]bool `json:"connectionHealth"` // Keep as is
|
||||
Stats map[string]any `json:"stats"` // Empty object for now
|
||||
LastUpdate int64 `json:"lastUpdate"` // Unix timestamp
|
||||
TemperatureMonitoringEnabled bool `json:"temperatureMonitoringEnabled"` // Global temperature monitoring setting
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,25 +4,25 @@ import "time"
|
|||
|
||||
// StateSnapshot represents a snapshot of the state without mutex
|
||||
type StateSnapshot struct {
|
||||
Nodes []Node `json:"nodes"`
|
||||
VMs []VM `json:"vms"`
|
||||
Containers []Container `json:"containers"`
|
||||
DockerHosts []DockerHost `json:"dockerHosts"`
|
||||
RemovedDockerHosts []RemovedDockerHost `json:"removedDockerHosts"`
|
||||
Hosts []Host `json:"hosts"`
|
||||
Storage []Storage `json:"storage"`
|
||||
CephClusters []CephCluster `json:"cephClusters"`
|
||||
PhysicalDisks []PhysicalDisk `json:"physicalDisks"`
|
||||
PBSInstances []PBSInstance `json:"pbs"`
|
||||
PMGInstances []PMGInstance `json:"pmg"`
|
||||
PBSBackups []PBSBackup `json:"pbsBackups"`
|
||||
PMGBackups []PMGBackup `json:"pmgBackups"`
|
||||
Backups Backups `json:"backups"`
|
||||
ReplicationJobs []ReplicationJob `json:"replicationJobs"`
|
||||
Metrics []Metric `json:"metrics"`
|
||||
PVEBackups PVEBackups `json:"pveBackups"`
|
||||
Performance Performance `json:"performance"`
|
||||
ConnectionHealth map[string]bool `json:"connectionHealth"`
|
||||
Nodes []Node `json:"nodes"`
|
||||
VMs []VM `json:"vms"`
|
||||
Containers []Container `json:"containers"`
|
||||
DockerHosts []DockerHost `json:"dockerHosts"`
|
||||
RemovedDockerHosts []RemovedDockerHost `json:"removedDockerHosts"`
|
||||
Hosts []Host `json:"hosts"`
|
||||
Storage []Storage `json:"storage"`
|
||||
CephClusters []CephCluster `json:"cephClusters"`
|
||||
PhysicalDisks []PhysicalDisk `json:"physicalDisks"`
|
||||
PBSInstances []PBSInstance `json:"pbs"`
|
||||
PMGInstances []PMGInstance `json:"pmg"`
|
||||
PBSBackups []PBSBackup `json:"pbsBackups"`
|
||||
PMGBackups []PMGBackup `json:"pmgBackups"`
|
||||
Backups Backups `json:"backups"`
|
||||
ReplicationJobs []ReplicationJob `json:"replicationJobs"`
|
||||
Metrics []Metric `json:"metrics"`
|
||||
PVEBackups PVEBackups `json:"pveBackups"`
|
||||
Performance Performance `json:"performance"`
|
||||
ConnectionHealth map[string]bool `json:"connectionHealth"`
|
||||
Stats Stats `json:"stats"`
|
||||
ActiveAlerts []Alert `json:"activeAlerts"`
|
||||
RecentlyResolved []ResolvedAlert `json:"recentlyResolved"`
|
||||
|
|
@ -63,11 +63,11 @@ func (s *State) GetSnapshot() StateSnapshot {
|
|||
PBS: pbsBackups,
|
||||
PMG: pmgBackups,
|
||||
},
|
||||
ReplicationJobs: append([]ReplicationJob{}, s.ReplicationJobs...),
|
||||
Metrics: append([]Metric{}, s.Metrics...),
|
||||
PVEBackups: pveBackups,
|
||||
Performance: s.Performance,
|
||||
ConnectionHealth: make(map[string]bool),
|
||||
ReplicationJobs: append([]ReplicationJob{}, s.ReplicationJobs...),
|
||||
Metrics: append([]Metric{}, s.Metrics...),
|
||||
PVEBackups: pveBackups,
|
||||
Performance: s.Performance,
|
||||
ConnectionHealth: make(map[string]bool),
|
||||
Stats: s.Stats,
|
||||
ActiveAlerts: append([]Alert{}, s.ActiveAlerts...),
|
||||
RecentlyResolved: append([]ResolvedAlert{}, s.RecentlyResolved...),
|
||||
|
|
@ -136,25 +136,25 @@ func (s StateSnapshot) ToFrontend() StateFrontend {
|
|||
}
|
||||
|
||||
return StateFrontend{
|
||||
Nodes: nodes,
|
||||
VMs: vms,
|
||||
Containers: containers,
|
||||
DockerHosts: dockerHosts,
|
||||
RemovedDockerHosts: removedDockerHosts,
|
||||
Hosts: hosts,
|
||||
Storage: storage,
|
||||
CephClusters: cephClusters,
|
||||
PhysicalDisks: s.PhysicalDisks,
|
||||
PBS: s.PBSInstances,
|
||||
PMG: s.PMGInstances,
|
||||
PBSBackups: s.PBSBackups,
|
||||
PMGBackups: s.PMGBackups,
|
||||
Backups: s.Backups,
|
||||
ReplicationJobs: replicationJobs,
|
||||
ActiveAlerts: s.ActiveAlerts,
|
||||
Metrics: make(map[string]any),
|
||||
PVEBackups: s.PVEBackups,
|
||||
Performance: make(map[string]any),
|
||||
Nodes: nodes,
|
||||
VMs: vms,
|
||||
Containers: containers,
|
||||
DockerHosts: dockerHosts,
|
||||
RemovedDockerHosts: removedDockerHosts,
|
||||
Hosts: hosts,
|
||||
Storage: storage,
|
||||
CephClusters: cephClusters,
|
||||
PhysicalDisks: s.PhysicalDisks,
|
||||
PBS: s.PBSInstances,
|
||||
PMG: s.PMGInstances,
|
||||
PBSBackups: s.PBSBackups,
|
||||
PMGBackups: s.PMGBackups,
|
||||
Backups: s.Backups,
|
||||
ReplicationJobs: replicationJobs,
|
||||
ActiveAlerts: s.ActiveAlerts,
|
||||
Metrics: make(map[string]any),
|
||||
PVEBackups: s.PVEBackups,
|
||||
Performance: make(map[string]any),
|
||||
ConnectionHealth: s.ConnectionHealth,
|
||||
Stats: make(map[string]any),
|
||||
LastUpdate: s.LastUpdate.Unix() * 1000, // JavaScript timestamp
|
||||
|
|
|
|||
|
|
@ -77,7 +77,7 @@ func TestBackoffConfig_NextDelay(t *testing.T) {
|
|||
Max: 5 * time.Minute,
|
||||
},
|
||||
attempt: 0,
|
||||
rng: 0.5, // neutral jitter
|
||||
rng: 0.5, // neutral jitter
|
||||
wantMin: 8 * time.Second, // 10s * (1 - 0.2)
|
||||
wantMax: 12 * time.Second, // 10s * (1 + 0.2)
|
||||
},
|
||||
|
|
|
|||
|
|
@ -23,8 +23,8 @@ type circuitBreaker struct {
|
|||
maxDelay time.Duration
|
||||
openThreshold int
|
||||
halfOpenWindow time.Duration
|
||||
stateSince time.Time
|
||||
lastTransition time.Time
|
||||
stateSince time.Time
|
||||
lastTransition time.Time
|
||||
}
|
||||
|
||||
func newCircuitBreaker(openThreshold int, retryInterval, maxDelay, halfOpenWindow time.Duration) *circuitBreaker {
|
||||
|
|
@ -40,16 +40,16 @@ func newCircuitBreaker(openThreshold int, retryInterval, maxDelay, halfOpenWindo
|
|||
if halfOpenWindow <= 0 {
|
||||
halfOpenWindow = 30 * time.Second
|
||||
}
|
||||
now := time.Now()
|
||||
return &circuitBreaker{
|
||||
state: breakerClosed,
|
||||
retryInterval: retryInterval,
|
||||
maxDelay: maxDelay,
|
||||
openThreshold: openThreshold,
|
||||
halfOpenWindow: halfOpenWindow,
|
||||
stateSince: now,
|
||||
lastTransition: now,
|
||||
}
|
||||
now := time.Now()
|
||||
return &circuitBreaker{
|
||||
state: breakerClosed,
|
||||
retryInterval: retryInterval,
|
||||
maxDelay: maxDelay,
|
||||
openThreshold: openThreshold,
|
||||
halfOpenWindow: halfOpenWindow,
|
||||
stateSince: now,
|
||||
lastTransition: now,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *circuitBreaker) allow(now time.Time) bool {
|
||||
|
|
|
|||
|
|
@ -272,7 +272,7 @@ func TestCircuitBreaker_RetryIntervalBackoff(t *testing.T) {
|
|||
|
||||
// All subsequent failures should keep it at maxDelay
|
||||
for i := 0; i < 5; i++ {
|
||||
cb.allow(now.Add(time.Duration(42+i*2)*time.Minute))
|
||||
cb.allow(now.Add(time.Duration(42+i*2) * time.Minute))
|
||||
cb.recordFailure(now.Add(time.Duration(42+i*2) * time.Minute))
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -163,11 +163,11 @@ func TestAdaptiveSchedulerIntegration(t *testing.T) {
|
|||
t.Fatalf("expected transient instance to recover with successes, got 0")
|
||||
}
|
||||
|
||||
dlqKeys := map[string]struct{}{}
|
||||
for _, task := range report.Health.DeadLetter.Tasks {
|
||||
dlqKeys[instanceKey(task.Type, task.Instance)] = struct{}{}
|
||||
}
|
||||
if len(report.Health.Breakers) > len(dlqKeys) {
|
||||
dlqKeys := map[string]struct{}{}
|
||||
for _, task := range report.Health.DeadLetter.Tasks {
|
||||
dlqKeys[instanceKey(task.Type, task.Instance)] = struct{}{}
|
||||
}
|
||||
if len(report.Health.Breakers) > len(dlqKeys) {
|
||||
t.Fatalf("unexpected number of circuit breaker entries: got %d want <= %d", len(report.Health.Breakers), len(dlqKeys))
|
||||
}
|
||||
for _, breaker := range report.Health.Breakers {
|
||||
|
|
@ -218,12 +218,12 @@ func TestAdaptiveSchedulerSoak(t *testing.T) {
|
|||
t.Skip("skipping soak test (enable with -soak or HARNESS_SOAK_MINUTES)")
|
||||
}
|
||||
|
||||
minutes := 15
|
||||
if minutesEnv != "" {
|
||||
if parsed, err := strconv.Atoi(minutesEnv); err == nil && parsed > 0 {
|
||||
minutes = parsed
|
||||
}
|
||||
}
|
||||
minutes := 15
|
||||
if minutesEnv != "" {
|
||||
if parsed, err := strconv.Atoi(minutesEnv); err == nil && parsed > 0 {
|
||||
minutes = parsed
|
||||
}
|
||||
}
|
||||
|
||||
duration := time.Duration(minutes) * time.Minute
|
||||
warmup := 2 * time.Minute
|
||||
|
|
|
|||
|
|
@ -52,14 +52,14 @@ type IntervalRequest struct {
|
|||
|
||||
// InstanceDescriptor describes a monitored endpoint for scheduling purposes.
|
||||
type InstanceDescriptor struct {
|
||||
Name string
|
||||
Type InstanceType
|
||||
LastSuccess time.Time
|
||||
LastFailure time.Time
|
||||
Name string
|
||||
Type InstanceType
|
||||
LastSuccess time.Time
|
||||
LastFailure time.Time
|
||||
LastScheduled time.Time
|
||||
LastInterval time.Duration
|
||||
ErrorCount int
|
||||
Metadata map[string]any
|
||||
LastInterval time.Duration
|
||||
ErrorCount int
|
||||
Metadata map[string]any
|
||||
}
|
||||
|
||||
// ScheduledTask represents a single polling opportunity planned by the scheduler.
|
||||
|
|
@ -90,13 +90,13 @@ func DefaultSchedulerConfig() SchedulerConfig {
|
|||
|
||||
// AdaptiveScheduler orchestrates poll execution plans using pluggable scoring strategies.
|
||||
type AdaptiveScheduler struct {
|
||||
cfg SchedulerConfig
|
||||
staleness StalenessSource
|
||||
interval IntervalSelector
|
||||
enqueuer TaskEnqueuer
|
||||
cfg SchedulerConfig
|
||||
staleness StalenessSource
|
||||
interval IntervalSelector
|
||||
enqueuer TaskEnqueuer
|
||||
|
||||
mu sync.RWMutex
|
||||
lastPlan map[string]ScheduledTask
|
||||
mu sync.RWMutex
|
||||
lastPlan map[string]ScheduledTask
|
||||
}
|
||||
|
||||
// NewAdaptiveScheduler constructs a scheduler with safe defaults.
|
||||
|
|
@ -110,15 +110,15 @@ func NewAdaptiveScheduler(cfg SchedulerConfig, staleness StalenessSource, interv
|
|||
if cfg.MaxInterval <= 0 || cfg.MaxInterval < cfg.MinInterval {
|
||||
cfg.MaxInterval = DefaultSchedulerConfig().MaxInterval
|
||||
}
|
||||
if staleness == nil {
|
||||
staleness = noopStalenessSource{}
|
||||
}
|
||||
if interval == nil {
|
||||
interval = newAdaptiveIntervalSelector(cfg)
|
||||
}
|
||||
if enqueuer == nil {
|
||||
enqueuer = noopTaskEnqueuer{}
|
||||
}
|
||||
if staleness == nil {
|
||||
staleness = noopStalenessSource{}
|
||||
}
|
||||
if interval == nil {
|
||||
interval = newAdaptiveIntervalSelector(cfg)
|
||||
}
|
||||
if enqueuer == nil {
|
||||
enqueuer = noopTaskEnqueuer{}
|
||||
}
|
||||
|
||||
return &AdaptiveScheduler{
|
||||
cfg: cfg,
|
||||
|
|
|
|||
|
|
@ -1,204 +1,204 @@
|
|||
package monitoring
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"sync"
|
||||
"time"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// FreshnessSnapshot captures the most recent freshness metadata available for a target instance.
|
||||
type FreshnessSnapshot struct {
|
||||
InstanceType InstanceType
|
||||
Instance string
|
||||
LastSuccess time.Time
|
||||
LastError time.Time
|
||||
LastMutated time.Time
|
||||
ChangeHash string
|
||||
InstanceType InstanceType
|
||||
Instance string
|
||||
LastSuccess time.Time
|
||||
LastError time.Time
|
||||
LastMutated time.Time
|
||||
ChangeHash string
|
||||
}
|
||||
|
||||
// StalenessTracker maintains freshness metadata and exposes normalized staleness scores.
|
||||
type StalenessTracker struct {
|
||||
mu sync.RWMutex
|
||||
entries map[string]FreshnessSnapshot
|
||||
baseTTL time.Duration
|
||||
maxStale time.Duration
|
||||
metrics *PollMetrics
|
||||
mu sync.RWMutex
|
||||
entries map[string]FreshnessSnapshot
|
||||
baseTTL time.Duration
|
||||
maxStale time.Duration
|
||||
metrics *PollMetrics
|
||||
}
|
||||
|
||||
// NewStalenessTracker builds a tracker wired to poll metrics for last-success signal and using default parameters.
|
||||
func NewStalenessTracker(metrics *PollMetrics) *StalenessTracker {
|
||||
return &StalenessTracker{
|
||||
entries: make(map[string]FreshnessSnapshot),
|
||||
baseTTL: 10 * time.Second,
|
||||
maxStale: 5 * time.Minute,
|
||||
metrics: metrics,
|
||||
}
|
||||
return &StalenessTracker{
|
||||
entries: make(map[string]FreshnessSnapshot),
|
||||
baseTTL: 10 * time.Second,
|
||||
maxStale: 5 * time.Minute,
|
||||
metrics: metrics,
|
||||
}
|
||||
}
|
||||
|
||||
// SetBounds allows overriding score decay windows.
|
||||
func (t *StalenessTracker) SetBounds(baseTTL, maxStale time.Duration) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
if baseTTL > 0 {
|
||||
t.baseTTL = baseTTL
|
||||
}
|
||||
if maxStale > 0 {
|
||||
t.maxStale = maxStale
|
||||
}
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
if baseTTL > 0 {
|
||||
t.baseTTL = baseTTL
|
||||
}
|
||||
if maxStale > 0 {
|
||||
t.maxStale = maxStale
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateSuccess records a successful poll along with a change hash derived from the payload.
|
||||
func (t *StalenessTracker) UpdateSuccess(instanceType InstanceType, instance string, payload []byte) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
snapshot := FreshnessSnapshot{
|
||||
InstanceType: instanceType,
|
||||
Instance: instance,
|
||||
LastSuccess: now,
|
||||
}
|
||||
now := time.Now()
|
||||
snapshot := FreshnessSnapshot{
|
||||
InstanceType: instanceType,
|
||||
Instance: instance,
|
||||
LastSuccess: now,
|
||||
}
|
||||
|
||||
if len(payload) > 0 {
|
||||
sum := sha1.Sum(payload)
|
||||
snapshot.ChangeHash = hex.EncodeToString(sum[:])
|
||||
snapshot.LastMutated = now
|
||||
}
|
||||
if len(payload) > 0 {
|
||||
sum := sha1.Sum(payload)
|
||||
snapshot.ChangeHash = hex.EncodeToString(sum[:])
|
||||
snapshot.LastMutated = now
|
||||
}
|
||||
|
||||
t.setSnapshot(snapshot)
|
||||
t.setSnapshot(snapshot)
|
||||
}
|
||||
|
||||
// UpdateError records the most recent error time for a target instance.
|
||||
func (t *StalenessTracker) UpdateError(instanceType InstanceType, instance string) {
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
if t == nil {
|
||||
return
|
||||
}
|
||||
|
||||
snapshot := FreshnessSnapshot{
|
||||
InstanceType: instanceType,
|
||||
Instance: instance,
|
||||
LastError: time.Now(),
|
||||
}
|
||||
snapshot := FreshnessSnapshot{
|
||||
InstanceType: instanceType,
|
||||
Instance: instance,
|
||||
LastError: time.Now(),
|
||||
}
|
||||
|
||||
t.mergeSnapshot(snapshot)
|
||||
t.mergeSnapshot(snapshot)
|
||||
}
|
||||
|
||||
// SetChangeHash updates the change fingerprint without affecting success timestamps.
|
||||
func (t *StalenessTracker) SetChangeHash(instanceType InstanceType, instance string, payload []byte) {
|
||||
if t == nil || len(payload) == 0 {
|
||||
return
|
||||
}
|
||||
if t == nil || len(payload) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
sum := sha1.Sum(payload)
|
||||
hash := hex.EncodeToString(sum[:])
|
||||
now := time.Now()
|
||||
sum := sha1.Sum(payload)
|
||||
hash := hex.EncodeToString(sum[:])
|
||||
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
key := trackerKey(instanceType, instance)
|
||||
snap := t.entries[key]
|
||||
snap.InstanceType = instanceType
|
||||
snap.Instance = instance
|
||||
snap.ChangeHash = hash
|
||||
snap.LastMutated = now
|
||||
t.entries[key] = snap
|
||||
key := trackerKey(instanceType, instance)
|
||||
snap := t.entries[key]
|
||||
snap.InstanceType = instanceType
|
||||
snap.Instance = instance
|
||||
snap.ChangeHash = hash
|
||||
snap.LastMutated = now
|
||||
t.entries[key] = snap
|
||||
}
|
||||
|
||||
// StalenessScore implements the StalenessSource interface and returns a normalized value in [0,1].
|
||||
func (t *StalenessTracker) StalenessScore(instanceType InstanceType, instance string) (float64, bool) {
|
||||
if t == nil {
|
||||
return 0, false
|
||||
}
|
||||
if t == nil {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
snap, ok := t.snapshot(instanceType, instance)
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
snap, ok := t.snapshot(instanceType, instance)
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
if !snap.LastSuccess.IsZero() && t.metrics != nil {
|
||||
if ts, ok := t.metrics.lastSuccessFor(string(instanceType), instance); ok {
|
||||
snap.LastSuccess = ts
|
||||
}
|
||||
}
|
||||
if !snap.LastSuccess.IsZero() && t.metrics != nil {
|
||||
if ts, ok := t.metrics.lastSuccessFor(string(instanceType), instance); ok {
|
||||
snap.LastSuccess = ts
|
||||
}
|
||||
}
|
||||
|
||||
if snap.LastSuccess.IsZero() {
|
||||
return 1, true
|
||||
}
|
||||
if snap.LastSuccess.IsZero() {
|
||||
return 1, true
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
age := now.Sub(snap.LastSuccess)
|
||||
if age <= 0 {
|
||||
return 0, true
|
||||
}
|
||||
now := time.Now()
|
||||
age := now.Sub(snap.LastSuccess)
|
||||
if age <= 0 {
|
||||
return 0, true
|
||||
}
|
||||
|
||||
max := t.maxStale
|
||||
if max <= 0 {
|
||||
max = 5 * time.Minute
|
||||
}
|
||||
score := age.Seconds() / max.Seconds()
|
||||
if score > 1 {
|
||||
score = 1
|
||||
}
|
||||
if score < 0 {
|
||||
score = 0
|
||||
}
|
||||
return score, true
|
||||
max := t.maxStale
|
||||
if max <= 0 {
|
||||
max = 5 * time.Minute
|
||||
}
|
||||
score := age.Seconds() / max.Seconds()
|
||||
if score > 1 {
|
||||
score = 1
|
||||
}
|
||||
if score < 0 {
|
||||
score = 0
|
||||
}
|
||||
return score, true
|
||||
}
|
||||
|
||||
func (t *StalenessTracker) setSnapshot(snapshot FreshnessSnapshot) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
key := trackerKey(snapshot.InstanceType, snapshot.Instance)
|
||||
t.entries[key] = snapshot
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
key := trackerKey(snapshot.InstanceType, snapshot.Instance)
|
||||
t.entries[key] = snapshot
|
||||
}
|
||||
|
||||
func (t *StalenessTracker) mergeSnapshot(snapshot FreshnessSnapshot) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
key := trackerKey(snapshot.InstanceType, snapshot.Instance)
|
||||
existing := t.entries[key]
|
||||
key := trackerKey(snapshot.InstanceType, snapshot.Instance)
|
||||
existing := t.entries[key]
|
||||
|
||||
if snapshot.LastSuccess.After(existing.LastSuccess) {
|
||||
existing.LastSuccess = snapshot.LastSuccess
|
||||
}
|
||||
if snapshot.LastError.After(existing.LastError) {
|
||||
existing.LastError = snapshot.LastError
|
||||
}
|
||||
if snapshot.LastMutated.After(existing.LastMutated) {
|
||||
existing.LastMutated = snapshot.LastMutated
|
||||
}
|
||||
if snapshot.ChangeHash != "" {
|
||||
existing.ChangeHash = snapshot.ChangeHash
|
||||
}
|
||||
if snapshot.LastSuccess.After(existing.LastSuccess) {
|
||||
existing.LastSuccess = snapshot.LastSuccess
|
||||
}
|
||||
if snapshot.LastError.After(existing.LastError) {
|
||||
existing.LastError = snapshot.LastError
|
||||
}
|
||||
if snapshot.LastMutated.After(existing.LastMutated) {
|
||||
existing.LastMutated = snapshot.LastMutated
|
||||
}
|
||||
if snapshot.ChangeHash != "" {
|
||||
existing.ChangeHash = snapshot.ChangeHash
|
||||
}
|
||||
|
||||
existing.InstanceType = snapshot.InstanceType
|
||||
existing.Instance = snapshot.Instance
|
||||
existing.InstanceType = snapshot.InstanceType
|
||||
existing.Instance = snapshot.Instance
|
||||
|
||||
t.entries[key] = existing
|
||||
t.entries[key] = existing
|
||||
}
|
||||
|
||||
func (t *StalenessTracker) snapshot(instanceType InstanceType, instance string) (FreshnessSnapshot, bool) {
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
snap, ok := t.entries[trackerKey(instanceType, instance)]
|
||||
return snap, ok
|
||||
t.mu.RLock()
|
||||
defer t.mu.RUnlock()
|
||||
snap, ok := t.entries[trackerKey(instanceType, instance)]
|
||||
return snap, ok
|
||||
}
|
||||
|
||||
func trackerKey(instanceType InstanceType, instance string) string {
|
||||
return string(instanceType) + "::" + instance
|
||||
return string(instanceType) + "::" + instance
|
||||
}
|
||||
|
||||
// StalenessSnapshot represents staleness data for a single instance.
|
||||
type StalenessSnapshot struct {
|
||||
Instance string `json:"instance"`
|
||||
Type string `json:"type"`
|
||||
Score float64 `json:"score"`
|
||||
LastSuccess time.Time `json:"lastSuccess"`
|
||||
LastError time.Time `json:"lastError,omitempty"`
|
||||
Instance string `json:"instance"`
|
||||
Type string `json:"type"`
|
||||
Score float64 `json:"score"`
|
||||
LastSuccess time.Time `json:"lastSuccess"`
|
||||
LastError time.Time `json:"lastError,omitempty"`
|
||||
}
|
||||
|
||||
// Snapshot returns a copy of all staleness data for API exposure.
|
||||
|
|
|
|||
|
|
@ -9,9 +9,9 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/alerts"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/utils"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
|
|
@ -54,9 +54,9 @@ type NotificationQueue struct {
|
|||
wg sync.WaitGroup
|
||||
processorTicker *time.Ticker
|
||||
cleanupTicker *time.Ticker
|
||||
notifyChan chan struct{} // Signal when new notifications are added
|
||||
processor func(*QueuedNotification) error // Notification processor function
|
||||
workerSem chan struct{} // Semaphore for limiting concurrent workers
|
||||
notifyChan chan struct{} // Signal when new notifications are added
|
||||
processor func(*QueuedNotification) error // Notification processor function
|
||||
workerSem chan struct{} // Semaphore for limiting concurrent workers
|
||||
}
|
||||
|
||||
// NewNotificationQueue creates a new persistent notification queue
|
||||
|
|
|
|||
|
|
@ -377,19 +377,19 @@ func TestExtractTempInput(t *testing.T) {
|
|||
expected: 45.0,
|
||||
},
|
||||
{
|
||||
name: "no input field",
|
||||
input: map[string]interface{}{"temp1_max": 100.0},
|
||||
isNaN: true,
|
||||
name: "no input field",
|
||||
input: map[string]interface{}{"temp1_max": 100.0},
|
||||
isNaN: true,
|
||||
},
|
||||
{
|
||||
name: "empty map",
|
||||
input: map[string]interface{}{},
|
||||
isNaN: true,
|
||||
name: "empty map",
|
||||
input: map[string]interface{}{},
|
||||
isNaN: true,
|
||||
},
|
||||
{
|
||||
name: "wrong suffix",
|
||||
input: map[string]interface{}{"temp1_max": 45.0},
|
||||
isNaN: true,
|
||||
name: "wrong suffix",
|
||||
input: map[string]interface{}{"temp1_max": 45.0},
|
||||
isNaN: true,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ func TestIsHexString(t *testing.T) {
|
|||
{"xyz", false},
|
||||
{"123g", false},
|
||||
{"hello-world", false},
|
||||
{"12 34", false}, // space
|
||||
{"12 34", false}, // space
|
||||
{"12\n34", false}, // newline
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -12,12 +12,12 @@ import (
|
|||
type JobState string
|
||||
|
||||
const (
|
||||
JobStateIdle JobState = "idle"
|
||||
JobStateQueued JobState = "queued"
|
||||
JobStateRunning JobState = "running"
|
||||
JobStateCompleted JobState = "completed"
|
||||
JobStateFailed JobState = "failed"
|
||||
JobStateCancelled JobState = "cancelled"
|
||||
JobStateIdle JobState = "idle"
|
||||
JobStateQueued JobState = "queued"
|
||||
JobStateRunning JobState = "running"
|
||||
JobStateCompleted JobState = "completed"
|
||||
JobStateFailed JobState = "failed"
|
||||
JobStateCancelled JobState = "cancelled"
|
||||
)
|
||||
|
||||
// UpdateJob represents a single update job
|
||||
|
|
@ -34,10 +34,10 @@ type UpdateJob struct {
|
|||
|
||||
// UpdateQueue manages the update job queue ensuring only one update runs at a time
|
||||
type UpdateQueue struct {
|
||||
mu sync.RWMutex
|
||||
currentJob *UpdateJob
|
||||
jobHistory []*UpdateJob
|
||||
maxHistory int
|
||||
mu sync.RWMutex
|
||||
currentJob *UpdateJob
|
||||
jobHistory []*UpdateJob
|
||||
maxHistory int
|
||||
}
|
||||
|
||||
// NewUpdateQueue creates a new update queue
|
||||
|
|
|
|||
|
|
@ -7,10 +7,10 @@ import (
|
|||
|
||||
// UpdateRequest represents a request to update
|
||||
type UpdateRequest struct {
|
||||
Version string
|
||||
Channel string
|
||||
Force bool
|
||||
DryRun bool
|
||||
Version string
|
||||
Channel string
|
||||
Force bool
|
||||
DryRun bool
|
||||
}
|
||||
|
||||
// UpdatePlan contains information about how an update will be performed
|
||||
|
|
@ -26,11 +26,11 @@ type UpdatePlan struct {
|
|||
|
||||
// UpdateProgress represents progress updates during an update
|
||||
type UpdateProgress struct {
|
||||
Stage string `json:"stage"`
|
||||
Progress int `json:"progress"` // 0-100
|
||||
Message string `json:"message"`
|
||||
IsComplete bool `json:"isComplete"`
|
||||
Error string `json:"error,omitempty"`
|
||||
Stage string `json:"stage"`
|
||||
Progress int `json:"progress"` // 0-100
|
||||
Message string `json:"message"`
|
||||
IsComplete bool `json:"isComplete"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// ProgressCallback is called during update execution
|
||||
|
|
|
|||
|
|
@ -66,9 +66,9 @@ func TestReport_JSONMarshal(t *testing.T) {
|
|||
IntervalSeconds: 30,
|
||||
},
|
||||
Host: HostInfo{
|
||||
Hostname: "docker-host",
|
||||
DockerVersion: "24.0.0",
|
||||
TotalCPU: 8,
|
||||
Hostname: "docker-host",
|
||||
DockerVersion: "24.0.0",
|
||||
TotalCPU: 8,
|
||||
TotalMemoryBytes: 16000000000,
|
||||
},
|
||||
Containers: []Container{
|
||||
|
|
|
|||
|
|
@ -89,24 +89,24 @@ type Sensors struct {
|
|||
|
||||
// RAIDArray represents an mdadm RAID array.
|
||||
type RAIDArray struct {
|
||||
Device string `json:"device"` // e.g., /dev/md0
|
||||
Name string `json:"name,omitempty"` // Array name if set
|
||||
Level string `json:"level"` // RAID level: raid0, raid1, raid5, raid6, raid10
|
||||
State string `json:"state"` // clean, active, degraded, recovering, resyncing, etc.
|
||||
TotalDevices int `json:"totalDevices"` // Total number of devices in array
|
||||
ActiveDevices int `json:"activeDevices"` // Number of active devices
|
||||
WorkingDevices int `json:"workingDevices"` // Number of working devices
|
||||
FailedDevices int `json:"failedDevices"` // Number of failed devices
|
||||
SpareDevices int `json:"spareDevices"` // Number of spare devices
|
||||
UUID string `json:"uuid,omitempty"` // Array UUID
|
||||
Devices []RAIDDevice `json:"devices"` // Individual devices in array
|
||||
RebuildPercent float64 `json:"rebuildPercent"` // Rebuild/resync progress (0-100)
|
||||
Device string `json:"device"` // e.g., /dev/md0
|
||||
Name string `json:"name,omitempty"` // Array name if set
|
||||
Level string `json:"level"` // RAID level: raid0, raid1, raid5, raid6, raid10
|
||||
State string `json:"state"` // clean, active, degraded, recovering, resyncing, etc.
|
||||
TotalDevices int `json:"totalDevices"` // Total number of devices in array
|
||||
ActiveDevices int `json:"activeDevices"` // Number of active devices
|
||||
WorkingDevices int `json:"workingDevices"` // Number of working devices
|
||||
FailedDevices int `json:"failedDevices"` // Number of failed devices
|
||||
SpareDevices int `json:"spareDevices"` // Number of spare devices
|
||||
UUID string `json:"uuid,omitempty"` // Array UUID
|
||||
Devices []RAIDDevice `json:"devices"` // Individual devices in array
|
||||
RebuildPercent float64 `json:"rebuildPercent"` // Rebuild/resync progress (0-100)
|
||||
RebuildSpeed string `json:"rebuildSpeed,omitempty"` // Rebuild speed (e.g., "50000K/sec")
|
||||
}
|
||||
|
||||
// RAIDDevice represents a single device in a RAID array.
|
||||
type RAIDDevice struct {
|
||||
Device string `json:"device"` // e.g., /dev/sda1
|
||||
State string `json:"state"` // active, spare, faulty, removed
|
||||
Slot int `json:"slot"` // Position in array (-1 if not applicable)
|
||||
Device string `json:"device"` // e.g., /dev/sda1
|
||||
State string `json:"state"` // active, spare, faulty, removed
|
||||
Slot int `json:"slot"` // Position in array (-1 if not applicable)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -83,13 +83,13 @@ type SubnetPhase struct {
|
|||
|
||||
// EnvironmentProfile captures detection results and scanning plan.
|
||||
type EnvironmentProfile struct {
|
||||
Type Environment // Detected environment.
|
||||
Phases []SubnetPhase // Subnet scanning phases.
|
||||
ExtraTargets []net.IP // IPs to always probe.
|
||||
Policy ScanPolicy // Applied scan policy.
|
||||
Confidence float64 // Overall confidence (0.0 - 1.0).
|
||||
Warnings []string // Non-fatal detection warnings.
|
||||
Metadata map[string]string // Misc metadata (container type, gateway, etc.).
|
||||
Type Environment // Detected environment.
|
||||
Phases []SubnetPhase // Subnet scanning phases.
|
||||
ExtraTargets []net.IP // IPs to always probe.
|
||||
Policy ScanPolicy // Applied scan policy.
|
||||
Confidence float64 // Overall confidence (0.0 - 1.0).
|
||||
Warnings []string // Non-fatal detection warnings.
|
||||
Metadata map[string]string // Misc metadata (container type, gateway, etc.).
|
||||
}
|
||||
|
||||
// DetectEnvironment performs environment detection and returns a profile.
|
||||
|
|
|
|||
|
|
@ -1839,23 +1839,23 @@ func (a *VMAgentField) UnmarshalJSON(data []byte) error {
|
|||
|
||||
// VMStatus represents detailed VM status returned by Proxmox.
|
||||
type VMStatus struct {
|
||||
Status string `json:"status"`
|
||||
CPU float64 `json:"cpu"`
|
||||
CPUs int `json:"cpus"`
|
||||
Mem uint64 `json:"mem"`
|
||||
MaxMem uint64 `json:"maxmem"`
|
||||
Balloon uint64 `json:"balloon"`
|
||||
BalloonMin uint64 `json:"balloon_min"`
|
||||
FreeMem uint64 `json:"freemem"`
|
||||
MemInfo *VMMemInfo `json:"meminfo,omitempty"`
|
||||
Disk uint64 `json:"disk"`
|
||||
MaxDisk uint64 `json:"maxdisk"`
|
||||
DiskRead uint64 `json:"diskread"`
|
||||
DiskWrite uint64 `json:"diskwrite"`
|
||||
NetIn uint64 `json:"netin"`
|
||||
NetOut uint64 `json:"netout"`
|
||||
Uptime uint64 `json:"uptime"`
|
||||
Agent VMAgentField `json:"agent"`
|
||||
Status string `json:"status"`
|
||||
CPU float64 `json:"cpu"`
|
||||
CPUs int `json:"cpus"`
|
||||
Mem uint64 `json:"mem"`
|
||||
MaxMem uint64 `json:"maxmem"`
|
||||
Balloon uint64 `json:"balloon"`
|
||||
BalloonMin uint64 `json:"balloon_min"`
|
||||
FreeMem uint64 `json:"freemem"`
|
||||
MemInfo *VMMemInfo `json:"meminfo,omitempty"`
|
||||
Disk uint64 `json:"disk"`
|
||||
MaxDisk uint64 `json:"maxdisk"`
|
||||
DiskRead uint64 `json:"diskread"`
|
||||
DiskWrite uint64 `json:"diskwrite"`
|
||||
NetIn uint64 `json:"netin"`
|
||||
NetOut uint64 `json:"netout"`
|
||||
Uptime uint64 `json:"uptime"`
|
||||
Agent VMAgentField `json:"agent"`
|
||||
}
|
||||
|
||||
// GetZFSPoolStatus gets the status of ZFS pools on a node
|
||||
|
|
|
|||
|
|
@ -246,8 +246,6 @@ func TestMemoryStatusUnmarshalFlexibleValues(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
// TestMemoryStatusEffectiveAvailable_RegressionIssue435 tests the specific scenarios
|
||||
// reported in GitHub issue #435 where memory calculations incorrectly included cache/buffers
|
||||
func TestMemoryStatusEffectiveAvailable_RegressionIssue435(t *testing.T) {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue