diff --git a/internal/alerts/alerts.go b/internal/alerts/alerts.go index ac68416da..d7a65a3ce 100644 --- a/internal/alerts/alerts.go +++ b/internal/alerts/alerts.go @@ -2851,16 +2851,27 @@ func hostResourceID(hostID string) string { } func hostDisplayName(host models.Host) string { + base := "Host" if name := strings.TrimSpace(host.DisplayName); name != "" { - return name + base = name + } else if name := strings.TrimSpace(host.Hostname); name != "" { + base = name + } else if host.ID != "" { + base = host.ID } - if name := strings.TrimSpace(host.Hostname); name != "" { - return name + + if strings.TrimSpace(host.LinkedNodeID) != "" || + strings.TrimSpace(host.LinkedVMID) != "" || + strings.TrimSpace(host.LinkedContainerID) != "" { + if strings.EqualFold(base, "Host") { + return "Host Agent" + } + if !strings.Contains(strings.ToLower(base), "host agent") { + return fmt.Sprintf("%s (Host Agent)", base) + } } - if host.ID != "" { - return host.ID - } - return "Host" + + return base } func hostInstanceName(host models.Host) string { @@ -3067,7 +3078,7 @@ func (m *Manager) CheckHost(host models.Host) { if disk.Temperature > 0 && !disk.Standby { // Use specific resource ID for the disk: hostID/disk-temp:device tempResourceID := fmt.Sprintf("%s/disk_temp:%s", hostResourceID(host.ID), sanitizeHostComponent(disk.Device)) - tempResourceName := fmt.Sprintf("%s (%s Temp)", host.DisplayName, disk.Device) + tempResourceName := fmt.Sprintf("%s (%s Temp)", hostDisplayName(host), disk.Device) diskTempMetadata := cloneMetadata(baseMetadata) diskTempMetadata["metric"] = "diskTemperature" diff --git a/internal/alerts/alerts_test.go b/internal/alerts/alerts_test.go index 5efa81205..4790a2188 100644 --- a/internal/alerts/alerts_test.go +++ b/internal/alerts/alerts_test.go @@ -15021,6 +15021,39 @@ func TestCheckHostComprehensive(t *testing.T) { t.Error("expected tags in metadata") } }) + + t.Run("qualifies linked host agent alert resource names", func(t *testing.T) { + m := newTestManager(t) + + m.mu.Lock() + m.config.TimeThreshold = 0 + m.config.TimeThresholds = map[string]int{} + m.config.HostDefaults = ThresholdConfig{ + CPU: &HysteresisThreshold{Trigger: 80.0, Clear: 70.0}, + } + m.mu.Unlock() + + host := models.Host{ + ID: "host1", + DisplayName: "Hamster", + Hostname: "hamster.local", + LinkedVMID: "Main:node3:101", + CPUUsage: 97.5, + } + + m.CheckHost(host) + + m.mu.RLock() + alert := m.activeAlerts["host:host1-cpu"] + m.mu.RUnlock() + + if alert == nil { + t.Fatal("expected CPU alert") + } + if alert.ResourceName != "Hamster (Host Agent)" { + t.Fatalf("expected qualified host resource name, got %q", alert.ResourceName) + } + }) } func TestCheckPBSComprehensive(t *testing.T) { diff --git a/internal/alerts/utility_test.go b/internal/alerts/utility_test.go index 627d323fe..e0c278d95 100644 --- a/internal/alerts/utility_test.go +++ b/internal/alerts/utility_test.go @@ -710,6 +710,33 @@ func TestHostDisplayName(t *testing.T) { }, want: "Server Name", }, + { + name: "linked host agent display name is qualified", + host: models.Host{ + ID: "id-123", + DisplayName: "Hamster", + LinkedVMID: "Main:node1:101", + }, + want: "Hamster (Host Agent)", + }, + { + name: "linked host agent hostname fallback is qualified", + host: models.Host{ + ID: "id-123", + Hostname: "proxmoxn3", + LinkedContainerID: "Main:node1:102", + }, + want: "proxmoxn3 (Host Agent)", + }, + { + name: "existing host agent suffix is not duplicated", + host: models.Host{ + ID: "id-123", + DisplayName: "Hamster (Host Agent)", + LinkedNodeID: "Main-node1", + }, + want: "Hamster (Host Agent)", + }, } for _, tc := range tests {