mirror of
https://github.com/rcourtman/Pulse.git
synced 2026-04-28 03:20:11 +00:00
test: Add tests for monitoring and notifications functions
- buildCephClusterModel: 0% → 100% (11 test cases) - collectContainerRootUsage: 0% → 100% (18 test cases) - NotificationManager getters/setters: 8 functions now tested Overall coverage: 45.5% → 45.8%
This commit is contained in:
parent
74fe6b9223
commit
b444793897
3 changed files with 1126 additions and 0 deletions
|
|
@ -4,6 +4,7 @@ import (
|
|||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/models"
|
||||
"github.com/rcourtman/pulse-go-rewrite/pkg/proxmox"
|
||||
)
|
||||
|
||||
|
|
@ -381,3 +382,375 @@ func TestSummarizeCephHealth(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildCephClusterModel(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
instanceName string
|
||||
status *proxmox.CephStatus
|
||||
df *proxmox.CephDF
|
||||
check func(t *testing.T, cluster models.CephCluster)
|
||||
}{
|
||||
{
|
||||
name: "basic case with minimal status data",
|
||||
instanceName: "pve-cluster",
|
||||
status: &proxmox.CephStatus{
|
||||
FSID: "abc123",
|
||||
PGMap: proxmox.CephPGMap{
|
||||
BytesTotal: 1000000,
|
||||
BytesUsed: 400000,
|
||||
BytesAvail: 600000,
|
||||
},
|
||||
},
|
||||
df: nil,
|
||||
check: func(t *testing.T, cluster models.CephCluster) {
|
||||
if cluster.ID != "pve-cluster-abc123" {
|
||||
t.Errorf("ID = %q, want %q", cluster.ID, "pve-cluster-abc123")
|
||||
}
|
||||
if cluster.Instance != "pve-cluster" {
|
||||
t.Errorf("Instance = %q, want %q", cluster.Instance, "pve-cluster")
|
||||
}
|
||||
if cluster.FSID != "abc123" {
|
||||
t.Errorf("FSID = %q, want %q", cluster.FSID, "abc123")
|
||||
}
|
||||
if cluster.TotalBytes != 1000000 {
|
||||
t.Errorf("TotalBytes = %d, want %d", cluster.TotalBytes, 1000000)
|
||||
}
|
||||
if cluster.UsedBytes != 400000 {
|
||||
t.Errorf("UsedBytes = %d, want %d", cluster.UsedBytes, 400000)
|
||||
}
|
||||
if cluster.AvailableBytes != 600000 {
|
||||
t.Errorf("AvailableBytes = %d, want %d", cluster.AvailableBytes, 600000)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "DF data overrides PGMap data",
|
||||
instanceName: "test-instance",
|
||||
status: &proxmox.CephStatus{
|
||||
FSID: "fsid-456",
|
||||
PGMap: proxmox.CephPGMap{
|
||||
BytesTotal: 1000,
|
||||
BytesUsed: 500,
|
||||
BytesAvail: 500,
|
||||
},
|
||||
},
|
||||
df: &proxmox.CephDF{
|
||||
Data: proxmox.CephDFData{
|
||||
Stats: proxmox.CephDFStats{
|
||||
TotalBytes: 2000000,
|
||||
TotalUsedBytes: 800000,
|
||||
TotalAvailBytes: 1200000,
|
||||
},
|
||||
},
|
||||
},
|
||||
check: func(t *testing.T, cluster models.CephCluster) {
|
||||
if cluster.TotalBytes != 2000000 {
|
||||
t.Errorf("TotalBytes = %d, want %d (DF should override PGMap)", cluster.TotalBytes, 2000000)
|
||||
}
|
||||
if cluster.UsedBytes != 800000 {
|
||||
t.Errorf("UsedBytes = %d, want %d (DF should override PGMap)", cluster.UsedBytes, 800000)
|
||||
}
|
||||
if cluster.AvailableBytes != 1200000 {
|
||||
t.Errorf("AvailableBytes = %d, want %d (DF should override PGMap)", cluster.AvailableBytes, 1200000)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "DF nil uses PGMap values",
|
||||
instanceName: "pgmap-test",
|
||||
status: &proxmox.CephStatus{
|
||||
FSID: "fsid-789",
|
||||
PGMap: proxmox.CephPGMap{
|
||||
BytesTotal: 5000000,
|
||||
BytesUsed: 1500000,
|
||||
BytesAvail: 3500000,
|
||||
},
|
||||
},
|
||||
df: nil,
|
||||
check: func(t *testing.T, cluster models.CephCluster) {
|
||||
if cluster.TotalBytes != 5000000 {
|
||||
t.Errorf("TotalBytes = %d, want %d", cluster.TotalBytes, 5000000)
|
||||
}
|
||||
if cluster.UsedBytes != 1500000 {
|
||||
t.Errorf("UsedBytes = %d, want %d", cluster.UsedBytes, 1500000)
|
||||
}
|
||||
if cluster.AvailableBytes != 3500000 {
|
||||
t.Errorf("AvailableBytes = %d, want %d", cluster.AvailableBytes, 3500000)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pool parsing from DF",
|
||||
instanceName: "pool-test",
|
||||
status: &proxmox.CephStatus{
|
||||
FSID: "fsid-pools",
|
||||
},
|
||||
df: &proxmox.CephDF{
|
||||
Data: proxmox.CephDFData{
|
||||
Stats: proxmox.CephDFStats{
|
||||
TotalBytes: 10000000,
|
||||
},
|
||||
Pools: []proxmox.CephDFPool{
|
||||
{
|
||||
ID: 1,
|
||||
Name: "rbd-pool",
|
||||
Stats: proxmox.CephDFPoolStat{
|
||||
BytesUsed: 100000,
|
||||
MaxAvail: 900000,
|
||||
Objects: 50,
|
||||
PercentUsed: 10.0,
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: 2,
|
||||
Name: "cephfs-data",
|
||||
Stats: proxmox.CephDFPoolStat{
|
||||
BytesUsed: 200000,
|
||||
MaxAvail: 800000,
|
||||
Objects: 100,
|
||||
PercentUsed: 20.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
check: func(t *testing.T, cluster models.CephCluster) {
|
||||
if len(cluster.Pools) != 2 {
|
||||
t.Fatalf("len(Pools) = %d, want 2", len(cluster.Pools))
|
||||
}
|
||||
pool1 := cluster.Pools[0]
|
||||
if pool1.ID != 1 || pool1.Name != "rbd-pool" {
|
||||
t.Errorf("Pool[0] = {ID:%d, Name:%q}, want {ID:1, Name:rbd-pool}", pool1.ID, pool1.Name)
|
||||
}
|
||||
if pool1.StoredBytes != 100000 {
|
||||
t.Errorf("Pool[0].StoredBytes = %d, want %d", pool1.StoredBytes, 100000)
|
||||
}
|
||||
if pool1.AvailableBytes != 900000 {
|
||||
t.Errorf("Pool[0].AvailableBytes = %d, want %d", pool1.AvailableBytes, 900000)
|
||||
}
|
||||
if pool1.Objects != 50 {
|
||||
t.Errorf("Pool[0].Objects = %d, want %d", pool1.Objects, 50)
|
||||
}
|
||||
if pool1.PercentUsed != 10.0 {
|
||||
t.Errorf("Pool[0].PercentUsed = %f, want %f", pool1.PercentUsed, 10.0)
|
||||
}
|
||||
|
||||
pool2 := cluster.Pools[1]
|
||||
if pool2.ID != 2 || pool2.Name != "cephfs-data" {
|
||||
t.Errorf("Pool[1] = {ID:%d, Name:%q}, want {ID:2, Name:cephfs-data}", pool2.ID, pool2.Name)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "service status parsing with running and stopped daemons",
|
||||
instanceName: "service-test",
|
||||
status: &proxmox.CephStatus{
|
||||
FSID: "fsid-services",
|
||||
ServiceMap: proxmox.CephServiceMap{
|
||||
Services: map[string]proxmox.CephServiceDefinition{
|
||||
"mon": {
|
||||
Daemons: map[string]proxmox.CephServiceDaemon{
|
||||
"a": {Host: "node1", Status: "running"},
|
||||
"b": {Host: "node2", Status: "running"},
|
||||
"c": {Host: "node3", Status: "stopped"},
|
||||
},
|
||||
},
|
||||
"mgr": {
|
||||
Daemons: map[string]proxmox.CephServiceDaemon{
|
||||
"node1": {Host: "node1", Status: "active"},
|
||||
"node2": {Host: "node2", Status: "standby"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
df: nil,
|
||||
check: func(t *testing.T, cluster models.CephCluster) {
|
||||
if cluster.NumMons != 3 {
|
||||
t.Errorf("NumMons = %d, want 3", cluster.NumMons)
|
||||
}
|
||||
if cluster.NumMgrs != 2 {
|
||||
t.Errorf("NumMgrs = %d, want 2", cluster.NumMgrs)
|
||||
}
|
||||
if len(cluster.Services) != 2 {
|
||||
t.Fatalf("len(Services) = %d, want 2", len(cluster.Services))
|
||||
}
|
||||
// Find mon service
|
||||
var monService *models.CephServiceStatus
|
||||
for i := range cluster.Services {
|
||||
if cluster.Services[i].Type == "mon" {
|
||||
monService = &cluster.Services[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if monService == nil {
|
||||
t.Fatal("mon service not found")
|
||||
}
|
||||
if monService.Running != 2 {
|
||||
t.Errorf("mon.Running = %d, want 2", monService.Running)
|
||||
}
|
||||
if monService.Total != 3 {
|
||||
t.Errorf("mon.Total = %d, want 3", monService.Total)
|
||||
}
|
||||
if monService.Message != "Offline: c@node3" {
|
||||
t.Errorf("mon.Message = %q, want %q", monService.Message, "Offline: c@node3")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "health message integration",
|
||||
instanceName: "health-test",
|
||||
status: &proxmox.CephStatus{
|
||||
FSID: "fsid-health",
|
||||
Health: proxmox.CephHealth{
|
||||
Status: "HEALTH_WARN",
|
||||
Summary: []proxmox.CephHealthSummary{
|
||||
{Message: "1 pool(s) have too few placement groups"},
|
||||
},
|
||||
},
|
||||
},
|
||||
df: nil,
|
||||
check: func(t *testing.T, cluster models.CephCluster) {
|
||||
if cluster.Health != "HEALTH_WARN" {
|
||||
t.Errorf("Health = %q, want %q", cluster.Health, "HEALTH_WARN")
|
||||
}
|
||||
if cluster.HealthMessage != "1 pool(s) have too few placement groups" {
|
||||
t.Errorf("HealthMessage = %q, want %q", cluster.HealthMessage, "1 pool(s) have too few placement groups")
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "OSD map values",
|
||||
instanceName: "osd-test",
|
||||
status: &proxmox.CephStatus{
|
||||
FSID: "fsid-osd",
|
||||
OSDMap: proxmox.CephOSDMap{
|
||||
NumOSDs: 12,
|
||||
NumUpOSDs: 10,
|
||||
NumInOSDs: 11,
|
||||
},
|
||||
PGMap: proxmox.CephPGMap{
|
||||
NumPGs: 256,
|
||||
},
|
||||
},
|
||||
df: nil,
|
||||
check: func(t *testing.T, cluster models.CephCluster) {
|
||||
if cluster.NumOSDs != 12 {
|
||||
t.Errorf("NumOSDs = %d, want 12", cluster.NumOSDs)
|
||||
}
|
||||
if cluster.NumOSDsUp != 10 {
|
||||
t.Errorf("NumOSDsUp = %d, want 10", cluster.NumOSDsUp)
|
||||
}
|
||||
if cluster.NumOSDsIn != 11 {
|
||||
t.Errorf("NumOSDsIn = %d, want 11", cluster.NumOSDsIn)
|
||||
}
|
||||
if cluster.NumPGs != 256 {
|
||||
t.Errorf("NumPGs = %d, want 256", cluster.NumPGs)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty FSID fallback",
|
||||
instanceName: "no-fsid-instance",
|
||||
status: &proxmox.CephStatus{
|
||||
FSID: "",
|
||||
},
|
||||
df: nil,
|
||||
check: func(t *testing.T, cluster models.CephCluster) {
|
||||
if cluster.ID != "no-fsid-instance" {
|
||||
t.Errorf("ID = %q, want %q (should equal instanceName when FSID is empty)", cluster.ID, "no-fsid-instance")
|
||||
}
|
||||
if cluster.FSID != "" {
|
||||
t.Errorf("FSID = %q, want empty", cluster.FSID)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "usage percent calculation",
|
||||
instanceName: "usage-test",
|
||||
status: &proxmox.CephStatus{
|
||||
FSID: "fsid-usage",
|
||||
PGMap: proxmox.CephPGMap{
|
||||
BytesTotal: 1000,
|
||||
BytesUsed: 250,
|
||||
BytesAvail: 750,
|
||||
},
|
||||
},
|
||||
df: nil,
|
||||
check: func(t *testing.T, cluster models.CephCluster) {
|
||||
// 250/1000 * 100 = 25%
|
||||
if cluster.UsagePercent != 25.0 {
|
||||
t.Errorf("UsagePercent = %f, want 25.0", cluster.UsagePercent)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "DF with zero TotalBytes uses PGMap",
|
||||
instanceName: "zero-df-test",
|
||||
status: &proxmox.CephStatus{
|
||||
FSID: "fsid-zero",
|
||||
PGMap: proxmox.CephPGMap{
|
||||
BytesTotal: 3000000,
|
||||
BytesUsed: 1000000,
|
||||
BytesAvail: 2000000,
|
||||
},
|
||||
},
|
||||
df: &proxmox.CephDF{
|
||||
Data: proxmox.CephDFData{
|
||||
Stats: proxmox.CephDFStats{
|
||||
TotalBytes: 0,
|
||||
TotalUsedBytes: 0,
|
||||
TotalAvailBytes: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
check: func(t *testing.T, cluster models.CephCluster) {
|
||||
// When DF TotalBytes is 0, should use PGMap values
|
||||
if cluster.TotalBytes != 3000000 {
|
||||
t.Errorf("TotalBytes = %d, want %d (PGMap value when DF TotalBytes=0)", cluster.TotalBytes, 3000000)
|
||||
}
|
||||
if cluster.UsedBytes != 1000000 {
|
||||
t.Errorf("UsedBytes = %d, want %d", cluster.UsedBytes, 1000000)
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "service with daemon without host",
|
||||
instanceName: "no-host-test",
|
||||
status: &proxmox.CephStatus{
|
||||
FSID: "fsid-nohost",
|
||||
ServiceMap: proxmox.CephServiceMap{
|
||||
Services: map[string]proxmox.CephServiceDefinition{
|
||||
"osd": {
|
||||
Daemons: map[string]proxmox.CephServiceDaemon{
|
||||
"0": {Host: "", Status: "stopped"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
df: nil,
|
||||
check: func(t *testing.T, cluster models.CephCluster) {
|
||||
if len(cluster.Services) != 1 {
|
||||
t.Fatalf("len(Services) = %d, want 1", len(cluster.Services))
|
||||
}
|
||||
// When host is empty, message should just use daemon name
|
||||
if cluster.Services[0].Message != "Offline: 0" {
|
||||
t.Errorf("Service.Message = %q, want %q", cluster.Services[0].Message, "Offline: 0")
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
cluster := buildCephClusterModel(tc.instanceName, tc.status, tc.df)
|
||||
tc.check(t, cluster)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,8 +1,12 @@
|
|||
package monitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/rcourtman/pulse-go-rewrite/pkg/proxmox"
|
||||
)
|
||||
|
||||
func TestClampToInt64(t *testing.T) {
|
||||
|
|
@ -296,3 +300,333 @@ func TestContainerDiskOverride_Fields(t *testing.T) {
|
|||
t.Errorf("Total = %d, want 10737418240", override.Total)
|
||||
}
|
||||
}
|
||||
|
||||
// stubContainerDiskClient implements PVEClientInterface for testing collectContainerRootUsage
|
||||
type stubContainerDiskClient struct {
|
||||
stubPVEClient
|
||||
storages []proxmox.Storage
|
||||
storagesErr error
|
||||
contentByStore map[string][]proxmox.StorageContent
|
||||
contentErr map[string]error
|
||||
}
|
||||
|
||||
func (s *stubContainerDiskClient) GetStorage(ctx context.Context, node string) ([]proxmox.Storage, error) {
|
||||
if s.storagesErr != nil {
|
||||
return nil, s.storagesErr
|
||||
}
|
||||
return s.storages, nil
|
||||
}
|
||||
|
||||
func (s *stubContainerDiskClient) GetStorageContent(ctx context.Context, node, storage string) ([]proxmox.StorageContent, error) {
|
||||
if s.contentErr != nil {
|
||||
if err, ok := s.contentErr[storage]; ok {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if s.contentByStore != nil {
|
||||
return s.contentByStore[storage], nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func TestCollectContainerRootUsage(t *testing.T) {
|
||||
mon := &Monitor{}
|
||||
|
||||
t.Run("empty vmIDs list returns empty map", func(t *testing.T) {
|
||||
client := &stubContainerDiskClient{}
|
||||
result := mon.collectContainerRootUsage(context.Background(), client, "node1", []int{})
|
||||
if len(result) != 0 {
|
||||
t.Errorf("expected empty map, got %d entries", len(result))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("nil vmIDs list returns empty map", func(t *testing.T) {
|
||||
client := &stubContainerDiskClient{}
|
||||
result := mon.collectContainerRootUsage(context.Background(), client, "node1", nil)
|
||||
if len(result) != 0 {
|
||||
t.Errorf("expected empty map, got %d entries", len(result))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("no storages returns empty map", func(t *testing.T) {
|
||||
client := &stubContainerDiskClient{
|
||||
storages: []proxmox.Storage{},
|
||||
}
|
||||
result := mon.collectContainerRootUsage(context.Background(), client, "node1", []int{100})
|
||||
if len(result) != 0 {
|
||||
t.Errorf("expected empty map, got %d entries", len(result))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("storage does not support container volumes", func(t *testing.T) {
|
||||
client := &stubContainerDiskClient{
|
||||
storages: []proxmox.Storage{
|
||||
{Storage: "backup-store", Content: "backup,iso", Enabled: 1, Active: 1},
|
||||
},
|
||||
contentByStore: map[string][]proxmox.StorageContent{
|
||||
"backup-store": {{Volid: "backup-store:subvol-100-disk-0", VMID: 100, Used: 1024, Size: 4096}},
|
||||
},
|
||||
}
|
||||
result := mon.collectContainerRootUsage(context.Background(), client, "node1", []int{100})
|
||||
if len(result) != 0 {
|
||||
t.Errorf("expected empty map (storage doesn't support container volumes), got %d entries", len(result))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("GetStorageContent error is handled gracefully", func(t *testing.T) {
|
||||
client := &stubContainerDiskClient{
|
||||
storages: []proxmox.Storage{
|
||||
{Storage: "local", Content: "rootdir", Enabled: 1, Active: 1},
|
||||
{Storage: "zfs", Content: "rootdir", Enabled: 1, Active: 1},
|
||||
},
|
||||
contentByStore: map[string][]proxmox.StorageContent{
|
||||
"zfs": {{Volid: "zfs:subvol-100-disk-0", VMID: 100, Used: 2048, Size: 8192}},
|
||||
},
|
||||
contentErr: map[string]error{
|
||||
"local": errors.New("storage offline"),
|
||||
},
|
||||
}
|
||||
result := mon.collectContainerRootUsage(context.Background(), client, "node1", []int{100})
|
||||
// Should continue to next storage and find the container in zfs
|
||||
if len(result) != 1 {
|
||||
t.Errorf("expected 1 entry from zfs storage, got %d", len(result))
|
||||
}
|
||||
if override, ok := result[100]; !ok || override.Used != 2048 {
|
||||
t.Errorf("expected override for vmid 100 with Used=2048, got %+v", result)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("GetStorage error returns empty map", func(t *testing.T) {
|
||||
client := &stubContainerDiskClient{
|
||||
storagesErr: errors.New("API error"),
|
||||
}
|
||||
result := mon.collectContainerRootUsage(context.Background(), client, "node1", []int{100})
|
||||
if len(result) != 0 {
|
||||
t.Errorf("expected empty map on GetStorage error, got %d entries", len(result))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("matching container root volume found", func(t *testing.T) {
|
||||
client := &stubContainerDiskClient{
|
||||
storages: []proxmox.Storage{
|
||||
{Storage: "local", Content: "rootdir", Enabled: 1, Active: 1},
|
||||
},
|
||||
contentByStore: map[string][]proxmox.StorageContent{
|
||||
"local": {{Volid: "local:subvol-100-disk-0", VMID: 100, Used: 1024, Size: 4096}},
|
||||
},
|
||||
}
|
||||
result := mon.collectContainerRootUsage(context.Background(), client, "node1", []int{100})
|
||||
if len(result) != 1 {
|
||||
t.Fatalf("expected 1 entry, got %d", len(result))
|
||||
}
|
||||
override, ok := result[100]
|
||||
if !ok {
|
||||
t.Fatal("expected entry for vmid 100")
|
||||
}
|
||||
if override.Used != 1024 {
|
||||
t.Errorf("Used = %d, want 1024", override.Used)
|
||||
}
|
||||
if override.Total != 4096 {
|
||||
t.Errorf("Total = %d, want 4096", override.Total)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("volume with Used=0 is skipped", func(t *testing.T) {
|
||||
client := &stubContainerDiskClient{
|
||||
storages: []proxmox.Storage{
|
||||
{Storage: "local", Content: "rootdir", Enabled: 1, Active: 1},
|
||||
},
|
||||
contentByStore: map[string][]proxmox.StorageContent{
|
||||
"local": {{Volid: "local:subvol-100-disk-0", VMID: 100, Used: 0, Size: 4096}},
|
||||
},
|
||||
}
|
||||
result := mon.collectContainerRootUsage(context.Background(), client, "node1", []int{100})
|
||||
if len(result) != 0 {
|
||||
t.Errorf("expected empty map (Used=0), got %d entries", len(result))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("volume with non-matching VMID is skipped", func(t *testing.T) {
|
||||
client := &stubContainerDiskClient{
|
||||
storages: []proxmox.Storage{
|
||||
{Storage: "local", Content: "rootdir", Enabled: 1, Active: 1},
|
||||
},
|
||||
contentByStore: map[string][]proxmox.StorageContent{
|
||||
"local": {{Volid: "local:subvol-200-disk-0", VMID: 200, Used: 1024, Size: 4096}},
|
||||
},
|
||||
}
|
||||
result := mon.collectContainerRootUsage(context.Background(), client, "node1", []int{100})
|
||||
if len(result) != 0 {
|
||||
t.Errorf("expected empty map (VMID not in list), got %d entries", len(result))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("volume with non-root volid is skipped", func(t *testing.T) {
|
||||
client := &stubContainerDiskClient{
|
||||
storages: []proxmox.Storage{
|
||||
{Storage: "local", Content: "rootdir", Enabled: 1, Active: 1},
|
||||
},
|
||||
contentByStore: map[string][]proxmox.StorageContent{
|
||||
"local": {{Volid: "local:subvol-100-disk-1", VMID: 100, Used: 1024, Size: 4096}},
|
||||
},
|
||||
}
|
||||
result := mon.collectContainerRootUsage(context.Background(), client, "node1", []int{100})
|
||||
if len(result) != 0 {
|
||||
t.Errorf("expected empty map (not root disk), got %d entries", len(result))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("volume with VMID=0 is skipped", func(t *testing.T) {
|
||||
client := &stubContainerDiskClient{
|
||||
storages: []proxmox.Storage{
|
||||
{Storage: "local", Content: "rootdir", Enabled: 1, Active: 1},
|
||||
},
|
||||
contentByStore: map[string][]proxmox.StorageContent{
|
||||
"local": {{Volid: "local:subvol-100-disk-0", VMID: 0, Used: 1024, Size: 4096}},
|
||||
},
|
||||
}
|
||||
result := mon.collectContainerRootUsage(context.Background(), client, "node1", []int{100})
|
||||
if len(result) != 0 {
|
||||
t.Errorf("expected empty map (VMID=0), got %d entries", len(result))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("multiple containers multiple storages", func(t *testing.T) {
|
||||
client := &stubContainerDiskClient{
|
||||
storages: []proxmox.Storage{
|
||||
{Storage: "local", Content: "rootdir", Enabled: 1, Active: 1},
|
||||
{Storage: "zfs", Content: "images,rootdir", Enabled: 1, Active: 1},
|
||||
},
|
||||
contentByStore: map[string][]proxmox.StorageContent{
|
||||
"local": {
|
||||
{Volid: "local:subvol-100-disk-0", VMID: 100, Used: 1024, Size: 4096},
|
||||
{Volid: "local:subvol-101-disk-0", VMID: 101, Used: 2048, Size: 8192},
|
||||
},
|
||||
"zfs": {
|
||||
{Volid: "zfs:subvol-102-disk-0", VMID: 102, Used: 3072, Size: 12288},
|
||||
{Volid: "zfs:subvol-103-disk-0", VMID: 103, Used: 4096, Size: 16384},
|
||||
},
|
||||
},
|
||||
}
|
||||
result := mon.collectContainerRootUsage(context.Background(), client, "node1", []int{100, 101, 102, 103})
|
||||
if len(result) != 4 {
|
||||
t.Fatalf("expected 4 entries, got %d", len(result))
|
||||
}
|
||||
expected := map[int]containerDiskOverride{
|
||||
100: {Used: 1024, Total: 4096},
|
||||
101: {Used: 2048, Total: 8192},
|
||||
102: {Used: 3072, Total: 12288},
|
||||
103: {Used: 4096, Total: 16384},
|
||||
}
|
||||
for vmid, want := range expected {
|
||||
got, ok := result[vmid]
|
||||
if !ok {
|
||||
t.Errorf("missing entry for vmid %d", vmid)
|
||||
continue
|
||||
}
|
||||
if got.Used != want.Used || got.Total != want.Total {
|
||||
t.Errorf("vmid %d: got %+v, want %+v", vmid, got, want)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("storage not enabled is skipped", func(t *testing.T) {
|
||||
client := &stubContainerDiskClient{
|
||||
storages: []proxmox.Storage{
|
||||
{Storage: "local", Content: "rootdir", Enabled: 0, Active: 1},
|
||||
},
|
||||
contentByStore: map[string][]proxmox.StorageContent{
|
||||
"local": {{Volid: "local:subvol-100-disk-0", VMID: 100, Used: 1024, Size: 4096}},
|
||||
},
|
||||
}
|
||||
result := mon.collectContainerRootUsage(context.Background(), client, "node1", []int{100})
|
||||
if len(result) != 0 {
|
||||
t.Errorf("expected empty map (storage not enabled), got %d entries", len(result))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("storage not active is skipped", func(t *testing.T) {
|
||||
client := &stubContainerDiskClient{
|
||||
storages: []proxmox.Storage{
|
||||
{Storage: "local", Content: "rootdir", Enabled: 1, Active: 0},
|
||||
},
|
||||
contentByStore: map[string][]proxmox.StorageContent{
|
||||
"local": {{Volid: "local:subvol-100-disk-0", VMID: 100, Used: 1024, Size: 4096}},
|
||||
},
|
||||
}
|
||||
result := mon.collectContainerRootUsage(context.Background(), client, "node1", []int{100})
|
||||
if len(result) != 0 {
|
||||
t.Errorf("expected empty map (storage not active), got %d entries", len(result))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("storage with empty name is skipped", func(t *testing.T) {
|
||||
client := &stubContainerDiskClient{
|
||||
storages: []proxmox.Storage{
|
||||
{Storage: "", Content: "rootdir", Enabled: 1, Active: 1},
|
||||
},
|
||||
contentByStore: map[string][]proxmox.StorageContent{
|
||||
"": {{Volid: ":subvol-100-disk-0", VMID: 100, Used: 1024, Size: 4096}},
|
||||
},
|
||||
}
|
||||
result := mon.collectContainerRootUsage(context.Background(), client, "node1", []int{100})
|
||||
if len(result) != 0 {
|
||||
t.Errorf("expected empty map (storage name empty), got %d entries", len(result))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("vm-disk pattern also matches", func(t *testing.T) {
|
||||
client := &stubContainerDiskClient{
|
||||
storages: []proxmox.Storage{
|
||||
{Storage: "local", Content: "images", Enabled: 1, Active: 1},
|
||||
},
|
||||
contentByStore: map[string][]proxmox.StorageContent{
|
||||
"local": {{Volid: "local:vm-100-disk-0", VMID: 100, Used: 5000, Size: 10000}},
|
||||
},
|
||||
}
|
||||
result := mon.collectContainerRootUsage(context.Background(), client, "node1", []int{100})
|
||||
if len(result) != 1 {
|
||||
t.Fatalf("expected 1 entry (vm-disk pattern), got %d", len(result))
|
||||
}
|
||||
if result[100].Used != 5000 {
|
||||
t.Errorf("Used = %d, want 5000", result[100].Used)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("subvol content type works", func(t *testing.T) {
|
||||
client := &stubContainerDiskClient{
|
||||
storages: []proxmox.Storage{
|
||||
{Storage: "zfs", Content: "subvol", Enabled: 1, Active: 1},
|
||||
},
|
||||
contentByStore: map[string][]proxmox.StorageContent{
|
||||
"zfs": {{Volid: "zfs:subvol-100-disk-0", VMID: 100, Used: 7777, Size: 9999}},
|
||||
},
|
||||
}
|
||||
result := mon.collectContainerRootUsage(context.Background(), client, "node1", []int{100})
|
||||
if len(result) != 1 {
|
||||
t.Fatalf("expected 1 entry (subvol content type), got %d", len(result))
|
||||
}
|
||||
if result[100].Used != 7777 {
|
||||
t.Errorf("Used = %d, want 7777", result[100].Used)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("PBS storage with Active=0 is queryable for backup content", func(t *testing.T) {
|
||||
// PBS storages report Active=0 but should be queryable if they have backup content
|
||||
// However, collectContainerRootUsage skips storages that don't support container volumes
|
||||
// PBS with "backup" content won't match rootdir/images/subvol
|
||||
client := &stubContainerDiskClient{
|
||||
storages: []proxmox.Storage{
|
||||
{Storage: "pbs", Content: "backup", Type: "pbs", Enabled: 1, Active: 0},
|
||||
},
|
||||
contentByStore: map[string][]proxmox.StorageContent{
|
||||
"pbs": {{Volid: "pbs:subvol-100-disk-0", VMID: 100, Used: 1024, Size: 4096}},
|
||||
},
|
||||
}
|
||||
result := mon.collectContainerRootUsage(context.Background(), client, "node1", []int{100})
|
||||
// PBS with only "backup" content doesn't support container volumes
|
||||
if len(result) != 0 {
|
||||
t.Errorf("expected empty map (PBS backup-only storage), got %d entries", len(result))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -807,3 +807,422 @@ func TestSendTestNotificationAppriseHTTP(t *testing.T) {
|
|||
t.Fatalf("timeout waiting for Apprise HTTP request")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPublicURL(t *testing.T) {
|
||||
t.Run("set and get URL", func(t *testing.T) {
|
||||
nm := NewNotificationManager("")
|
||||
nm.SetPublicURL("https://pulse.example.com")
|
||||
|
||||
got := nm.GetPublicURL()
|
||||
if got != "https://pulse.example.com" {
|
||||
t.Fatalf("expected https://pulse.example.com, got %q", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("empty string is no-op", func(t *testing.T) {
|
||||
nm := NewNotificationManager("")
|
||||
nm.SetPublicURL("https://pulse.example.com")
|
||||
nm.SetPublicURL("")
|
||||
|
||||
got := nm.GetPublicURL()
|
||||
if got != "https://pulse.example.com" {
|
||||
t.Fatalf("expected URL to remain unchanged, got %q", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("trailing slash is trimmed", func(t *testing.T) {
|
||||
nm := NewNotificationManager("")
|
||||
nm.SetPublicURL("https://pulse.example.com/")
|
||||
|
||||
got := nm.GetPublicURL()
|
||||
if got != "https://pulse.example.com" {
|
||||
t.Fatalf("expected trailing slash to be trimmed, got %q", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("whitespace is trimmed", func(t *testing.T) {
|
||||
nm := NewNotificationManager("")
|
||||
nm.SetPublicURL(" https://pulse.example.com ")
|
||||
|
||||
got := nm.GetPublicURL()
|
||||
if got != "https://pulse.example.com" {
|
||||
t.Fatalf("expected whitespace to be trimmed, got %q", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("same URL twice is no-op", func(t *testing.T) {
|
||||
nm := NewNotificationManager("")
|
||||
nm.SetPublicURL("https://pulse.example.com")
|
||||
|
||||
nm.mu.RLock()
|
||||
urlBefore := nm.publicURL
|
||||
nm.mu.RUnlock()
|
||||
|
||||
nm.SetPublicURL("https://pulse.example.com")
|
||||
|
||||
nm.mu.RLock()
|
||||
urlAfter := nm.publicURL
|
||||
nm.mu.RUnlock()
|
||||
|
||||
if urlBefore != urlAfter {
|
||||
t.Fatalf("expected URL to remain unchanged")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("whitespace-only is no-op", func(t *testing.T) {
|
||||
nm := NewNotificationManager("")
|
||||
nm.SetPublicURL("https://pulse.example.com")
|
||||
nm.SetPublicURL(" ")
|
||||
|
||||
got := nm.GetPublicURL()
|
||||
if got != "https://pulse.example.com" {
|
||||
t.Fatalf("expected URL to remain unchanged after whitespace-only set, got %q", got)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetAppriseConfigReturnsCopy(t *testing.T) {
|
||||
nm := NewNotificationManager("")
|
||||
nm.SetAppriseConfig(AppriseConfig{
|
||||
Enabled: true,
|
||||
Targets: []string{"discord://token1", "slack://token2"},
|
||||
TimeoutSeconds: 30,
|
||||
})
|
||||
|
||||
// Get a copy of the config
|
||||
configCopy := nm.GetAppriseConfig()
|
||||
|
||||
// Modify the returned copy
|
||||
configCopy.Targets = append(configCopy.Targets, "telegram://token3")
|
||||
configCopy.Enabled = false
|
||||
configCopy.TimeoutSeconds = 60
|
||||
|
||||
// Get another copy and verify the internal state wasn't affected
|
||||
configAfter := nm.GetAppriseConfig()
|
||||
|
||||
if !configAfter.Enabled {
|
||||
t.Fatalf("modifying returned copy should not affect internal enabled state")
|
||||
}
|
||||
if configAfter.TimeoutSeconds != 30 {
|
||||
t.Fatalf("expected timeout 30, got %d", configAfter.TimeoutSeconds)
|
||||
}
|
||||
if len(configAfter.Targets) != 2 {
|
||||
t.Fatalf("expected 2 targets, got %d", len(configAfter.Targets))
|
||||
}
|
||||
if configAfter.Targets[0] != "discord://token1" || configAfter.Targets[1] != "slack://token2" {
|
||||
t.Fatalf("internal targets were modified: %v", configAfter.Targets)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotifyOnResolve(t *testing.T) {
|
||||
t.Run("default value is true", func(t *testing.T) {
|
||||
nm := NewNotificationManager("")
|
||||
|
||||
if !nm.GetNotifyOnResolve() {
|
||||
t.Fatalf("expected default notifyOnResolve to be true")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("set true and get", func(t *testing.T) {
|
||||
nm := NewNotificationManager("")
|
||||
nm.SetNotifyOnResolve(true)
|
||||
|
||||
if !nm.GetNotifyOnResolve() {
|
||||
t.Fatalf("expected notifyOnResolve to be true after setting")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("set false and get", func(t *testing.T) {
|
||||
nm := NewNotificationManager("")
|
||||
nm.SetNotifyOnResolve(false)
|
||||
|
||||
if nm.GetNotifyOnResolve() {
|
||||
t.Fatalf("expected notifyOnResolve to be false after setting")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestGroupingOptions(t *testing.T) {
|
||||
t.Run("byNode=true, byGuest=false", func(t *testing.T) {
|
||||
nm := NewNotificationManager("")
|
||||
nm.SetGroupingOptions(true, false)
|
||||
|
||||
nm.mu.RLock()
|
||||
byNode := nm.groupByNode
|
||||
byGuest := nm.groupByGuest
|
||||
nm.mu.RUnlock()
|
||||
|
||||
if !byNode {
|
||||
t.Fatalf("expected groupByNode to be true")
|
||||
}
|
||||
if byGuest {
|
||||
t.Fatalf("expected groupByGuest to be false")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("byNode=false, byGuest=true", func(t *testing.T) {
|
||||
nm := NewNotificationManager("")
|
||||
nm.SetGroupingOptions(false, true)
|
||||
|
||||
nm.mu.RLock()
|
||||
byNode := nm.groupByNode
|
||||
byGuest := nm.groupByGuest
|
||||
nm.mu.RUnlock()
|
||||
|
||||
if byNode {
|
||||
t.Fatalf("expected groupByNode to be false")
|
||||
}
|
||||
if !byGuest {
|
||||
t.Fatalf("expected groupByGuest to be true")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("both true", func(t *testing.T) {
|
||||
nm := NewNotificationManager("")
|
||||
nm.SetGroupingOptions(true, true)
|
||||
|
||||
nm.mu.RLock()
|
||||
byNode := nm.groupByNode
|
||||
byGuest := nm.groupByGuest
|
||||
nm.mu.RUnlock()
|
||||
|
||||
if !byNode {
|
||||
t.Fatalf("expected groupByNode to be true")
|
||||
}
|
||||
if !byGuest {
|
||||
t.Fatalf("expected groupByGuest to be true")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("both false", func(t *testing.T) {
|
||||
nm := NewNotificationManager("")
|
||||
nm.SetGroupingOptions(false, false)
|
||||
|
||||
nm.mu.RLock()
|
||||
byNode := nm.groupByNode
|
||||
byGuest := nm.groupByGuest
|
||||
nm.mu.RUnlock()
|
||||
|
||||
if byNode {
|
||||
t.Fatalf("expected groupByNode to be false")
|
||||
}
|
||||
if byGuest {
|
||||
t.Fatalf("expected groupByGuest to be false")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestWebhookAddAndGet(t *testing.T) {
|
||||
t.Run("add webhook and retrieve", func(t *testing.T) {
|
||||
nm := NewNotificationManager("")
|
||||
|
||||
webhook := WebhookConfig{
|
||||
ID: "webhook-1",
|
||||
Name: "Test Webhook",
|
||||
URL: "https://example.com/hook",
|
||||
Method: "POST",
|
||||
Enabled: true,
|
||||
Service: "generic",
|
||||
}
|
||||
nm.AddWebhook(webhook)
|
||||
|
||||
webhooks := nm.GetWebhooks()
|
||||
if len(webhooks) != 1 {
|
||||
t.Fatalf("expected 1 webhook, got %d", len(webhooks))
|
||||
}
|
||||
if webhooks[0].ID != "webhook-1" {
|
||||
t.Fatalf("expected webhook ID 'webhook-1', got %q", webhooks[0].ID)
|
||||
}
|
||||
if webhooks[0].Name != "Test Webhook" {
|
||||
t.Fatalf("expected webhook name 'Test Webhook', got %q", webhooks[0].Name)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("add multiple webhooks", func(t *testing.T) {
|
||||
nm := NewNotificationManager("")
|
||||
|
||||
nm.AddWebhook(WebhookConfig{ID: "webhook-1", Name: "First", URL: "https://example.com/1"})
|
||||
nm.AddWebhook(WebhookConfig{ID: "webhook-2", Name: "Second", URL: "https://example.com/2"})
|
||||
nm.AddWebhook(WebhookConfig{ID: "webhook-3", Name: "Third", URL: "https://example.com/3"})
|
||||
|
||||
webhooks := nm.GetWebhooks()
|
||||
if len(webhooks) != 3 {
|
||||
t.Fatalf("expected 3 webhooks, got %d", len(webhooks))
|
||||
}
|
||||
|
||||
ids := make(map[string]bool)
|
||||
for _, wh := range webhooks {
|
||||
ids[wh.ID] = true
|
||||
}
|
||||
if !ids["webhook-1"] || !ids["webhook-2"] || !ids["webhook-3"] {
|
||||
t.Fatalf("missing expected webhook IDs: %v", ids)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("get webhooks returns empty slice when none", func(t *testing.T) {
|
||||
nm := NewNotificationManager("")
|
||||
|
||||
webhooks := nm.GetWebhooks()
|
||||
if webhooks == nil {
|
||||
t.Fatalf("expected empty slice, got nil")
|
||||
}
|
||||
if len(webhooks) != 0 {
|
||||
t.Fatalf("expected 0 webhooks, got %d", len(webhooks))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestWebhookUpdate(t *testing.T) {
|
||||
t.Run("update existing webhook", func(t *testing.T) {
|
||||
nm := NewNotificationManager("")
|
||||
|
||||
nm.AddWebhook(WebhookConfig{
|
||||
ID: "webhook-1",
|
||||
Name: "Original Name",
|
||||
URL: "https://example.com/original",
|
||||
Enabled: true,
|
||||
})
|
||||
|
||||
err := nm.UpdateWebhook("webhook-1", WebhookConfig{
|
||||
ID: "webhook-1",
|
||||
Name: "Updated Name",
|
||||
URL: "https://example.com/updated",
|
||||
Enabled: false,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error updating webhook, got %v", err)
|
||||
}
|
||||
|
||||
webhooks := nm.GetWebhooks()
|
||||
if len(webhooks) != 1 {
|
||||
t.Fatalf("expected 1 webhook, got %d", len(webhooks))
|
||||
}
|
||||
if webhooks[0].Name != "Updated Name" {
|
||||
t.Fatalf("expected name 'Updated Name', got %q", webhooks[0].Name)
|
||||
}
|
||||
if webhooks[0].URL != "https://example.com/updated" {
|
||||
t.Fatalf("expected URL 'https://example.com/updated', got %q", webhooks[0].URL)
|
||||
}
|
||||
if webhooks[0].Enabled {
|
||||
t.Fatalf("expected enabled to be false")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("update non-existent webhook returns error", func(t *testing.T) {
|
||||
nm := NewNotificationManager("")
|
||||
|
||||
err := nm.UpdateWebhook("non-existent", WebhookConfig{
|
||||
ID: "non-existent",
|
||||
Name: "Test",
|
||||
})
|
||||
if err == nil {
|
||||
t.Fatalf("expected error updating non-existent webhook, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "webhook not found") {
|
||||
t.Fatalf("expected 'webhook not found' error, got: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestWebhookDelete(t *testing.T) {
|
||||
t.Run("delete existing webhook", func(t *testing.T) {
|
||||
nm := NewNotificationManager("")
|
||||
|
||||
nm.AddWebhook(WebhookConfig{ID: "webhook-1", Name: "First"})
|
||||
nm.AddWebhook(WebhookConfig{ID: "webhook-2", Name: "Second"})
|
||||
|
||||
err := nm.DeleteWebhook("webhook-1")
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error deleting webhook, got %v", err)
|
||||
}
|
||||
|
||||
webhooks := nm.GetWebhooks()
|
||||
if len(webhooks) != 1 {
|
||||
t.Fatalf("expected 1 webhook after delete, got %d", len(webhooks))
|
||||
}
|
||||
if webhooks[0].ID != "webhook-2" {
|
||||
t.Fatalf("expected remaining webhook ID 'webhook-2', got %q", webhooks[0].ID)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("delete non-existent webhook returns error", func(t *testing.T) {
|
||||
nm := NewNotificationManager("")
|
||||
|
||||
err := nm.DeleteWebhook("non-existent")
|
||||
if err == nil {
|
||||
t.Fatalf("expected error deleting non-existent webhook, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "webhook not found") {
|
||||
t.Fatalf("expected 'webhook not found' error, got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("delete from middle of list", func(t *testing.T) {
|
||||
nm := NewNotificationManager("")
|
||||
|
||||
nm.AddWebhook(WebhookConfig{ID: "webhook-1", Name: "First"})
|
||||
nm.AddWebhook(WebhookConfig{ID: "webhook-2", Name: "Second"})
|
||||
nm.AddWebhook(WebhookConfig{ID: "webhook-3", Name: "Third"})
|
||||
|
||||
err := nm.DeleteWebhook("webhook-2")
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error deleting middle webhook, got %v", err)
|
||||
}
|
||||
|
||||
webhooks := nm.GetWebhooks()
|
||||
if len(webhooks) != 2 {
|
||||
t.Fatalf("expected 2 webhooks after delete, got %d", len(webhooks))
|
||||
}
|
||||
|
||||
ids := make(map[string]bool)
|
||||
for _, wh := range webhooks {
|
||||
ids[wh.ID] = true
|
||||
}
|
||||
if !ids["webhook-1"] || !ids["webhook-3"] {
|
||||
t.Fatalf("expected webhook-1 and webhook-3 to remain, got: %v", ids)
|
||||
}
|
||||
if ids["webhook-2"] {
|
||||
t.Fatalf("webhook-2 should have been deleted")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetEmailConfig(t *testing.T) {
|
||||
nm := NewNotificationManager("")
|
||||
|
||||
config := EmailConfig{
|
||||
Enabled: true,
|
||||
SMTPHost: "smtp.example.com",
|
||||
SMTPPort: 587,
|
||||
Username: "user@example.com",
|
||||
Password: "secret",
|
||||
From: "alerts@example.com",
|
||||
To: []string{"admin@example.com", "ops@example.com"},
|
||||
StartTLS: true,
|
||||
}
|
||||
nm.SetEmailConfig(config)
|
||||
|
||||
got := nm.GetEmailConfig()
|
||||
|
||||
if !got.Enabled {
|
||||
t.Fatalf("expected enabled to be true")
|
||||
}
|
||||
if got.SMTPHost != "smtp.example.com" {
|
||||
t.Fatalf("expected host 'smtp.example.com', got %q", got.SMTPHost)
|
||||
}
|
||||
if got.SMTPPort != 587 {
|
||||
t.Fatalf("expected port 587, got %d", got.SMTPPort)
|
||||
}
|
||||
if got.Username != "user@example.com" {
|
||||
t.Fatalf("expected username 'user@example.com', got %q", got.Username)
|
||||
}
|
||||
if got.From != "alerts@example.com" {
|
||||
t.Fatalf("expected from 'alerts@example.com', got %q", got.From)
|
||||
}
|
||||
if len(got.To) != 2 {
|
||||
t.Fatalf("expected 2 recipients, got %d", len(got.To))
|
||||
}
|
||||
if !got.StartTLS {
|
||||
t.Fatalf("expected startTLS to be true")
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue