mirror of
https://github.com/rcourtman/Pulse.git
synced 2026-05-07 00:37:36 +00:00
661 lines
23 KiB
Go
661 lines
23 KiB
Go
package ai
|
|
|
|
import (
|
|
"strings"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/rcourtman/pulse-go-rewrite/internal/ai/baseline"
|
|
"github.com/rcourtman/pulse-go-rewrite/internal/ai/correlation"
|
|
"github.com/rcourtman/pulse-go-rewrite/internal/ai/memory"
|
|
"github.com/rcourtman/pulse-go-rewrite/internal/alerts"
|
|
"github.com/rcourtman/pulse-go-rewrite/internal/models"
|
|
unifiedresources "github.com/rcourtman/pulse-go-rewrite/internal/unifiedresources"
|
|
)
|
|
|
|
func TestPatrolService_BroadcastFullChannel(t *testing.T) {
|
|
ps := NewPatrolService(nil, nil)
|
|
|
|
// Subscribe with a small buffer
|
|
ch := make(chan PatrolStreamEvent, 1)
|
|
ps.streamMu.Lock()
|
|
ps.streamSubscribers[ch] = &streamSubscriber{ch: ch}
|
|
ps.streamMu.Unlock()
|
|
|
|
// Fill the channel
|
|
ch <- PatrolStreamEvent{Type: "full"}
|
|
|
|
// Broadcast enough times to trigger slow-subscriber eviction (fullCount >= 25).
|
|
for i := 0; i < 25; i++ {
|
|
ps.broadcast(PatrolStreamEvent{Type: "overflow"})
|
|
}
|
|
|
|
// Verify the channel was removed from subscribers
|
|
ps.streamMu.RLock()
|
|
_, exists := ps.streamSubscribers[ch]
|
|
ps.streamMu.RUnlock()
|
|
|
|
if exists {
|
|
t.Error("Expected channel to be removed from subscribers after full broadcast")
|
|
}
|
|
}
|
|
|
|
type baselineResult struct {
|
|
severity baseline.AnomalySeverity
|
|
zScore float64
|
|
bl *baseline.MetricBaseline
|
|
}
|
|
|
|
type mockBaselineStore struct {
|
|
anomalies map[string]baselineResult
|
|
}
|
|
|
|
func (m *mockBaselineStore) CheckAnomaly(resourceID, metric string, value float64) (baseline.AnomalySeverity, float64, *baseline.MetricBaseline) {
|
|
res, ok := m.anomalies[resourceID+":"+metric]
|
|
if !ok {
|
|
return baseline.AnomalyNone, 0, &baseline.MetricBaseline{}
|
|
}
|
|
return res.severity, res.zScore, res.bl
|
|
}
|
|
|
|
func (m *mockBaselineStore) GetBaseline(resourceID, metric string) (*baseline.MetricBaseline, bool) {
|
|
res, ok := m.anomalies[resourceID+":"+metric]
|
|
if !ok {
|
|
return nil, false
|
|
}
|
|
return res.bl, true
|
|
}
|
|
|
|
func (m *mockBaselineStore) Update(resourceID, metric string, value float64) {}
|
|
func (m *mockBaselineStore) Save() error { return nil }
|
|
func (m *mockBaselineStore) Load() error { return nil }
|
|
|
|
func TestGenerateRemediationSummary(t *testing.T) {
|
|
tests := []struct {
|
|
command string
|
|
context map[string]interface{}
|
|
expected string
|
|
}{
|
|
{"docker restart my-container", nil, "Restarted my-container container"},
|
|
{"docker start my-container", nil, "Restarted my-container container"},
|
|
{"docker restart", nil, "Restarted container"},
|
|
{"docker stop my-container", nil, "Stopped my-container container"},
|
|
{"docker stop", nil, "Stopped container"},
|
|
{"docker ps --filter name=web", nil, "Verified web container is running"},
|
|
{"docker ps", nil, "Checked container status"},
|
|
{"docker logs web", nil, "Retrieved web logs"},
|
|
{"docker logs", nil, "Retrieved container logs"},
|
|
{"systemctl restart nginx", nil, "Restarted nginx service"},
|
|
{"systemctl restart", nil, "Restarted system service"},
|
|
{"systemctl status nginx", nil, "Checked nginx service status"},
|
|
{"systemctl status", nil, "Checked service status"},
|
|
{"df -h /var/lib/frigate", nil, "Analyzed Frigate storage usage"},
|
|
{"du -sh /var/lib/plex", nil, "Analyzed Plex storage usage"},
|
|
{"df -h /mnt/recordings", nil, "Analyzed recordings storage"},
|
|
{"df -h /data/mysql", nil, "Analyzed /data/mysql storage"},
|
|
{"df -h", nil, "Analyzed disk usage"},
|
|
{"grep -r \"config\" /etc/frigate", nil, "Inspected Frigate configuration"},
|
|
{"grep \"server\" /etc/nginx/nginx.conf", nil, "Inspected /nginx/nginx.conf configuration"},
|
|
{"grep \"test\" /sys/config/test.conf", nil, "Inspected /config/test.conf configuration"},
|
|
{"grep \"test\" config", nil, "Inspected configuration"},
|
|
{"tail -f /var/log/syslog", map[string]interface{}{"name": "host1"}, "Reviewed host1 logs"},
|
|
{"journalctl -u nginx", nil, "Reviewed system logs"},
|
|
{"pct resize 100 rootfs +10G", nil, "Resized container 100 disk"},
|
|
{"pct resize", nil, "Resized container disk"},
|
|
{"qm resize 200 virtio0 +20G", nil, "Resized VM 200 disk"},
|
|
{"qm resize", nil, "Resized VM disk"},
|
|
{"ping -c 4 8.8.8.8", nil, "Tested network connectivity"},
|
|
{"curl -I google.com", nil, "Tested network connectivity"},
|
|
{"free -m", nil, "Checked memory usage"},
|
|
{"top -n 1", nil, "Analyzed running processes"},
|
|
{"rm -rf /tmp/test", nil, "Cleaned up files"},
|
|
{"chmod 644 /etc/passwd", nil, "Fixed file permissions"},
|
|
{"ls -la", map[string]interface{}{"name": "host1"}, "Ran diagnostics on host1"},
|
|
{"ls -la", nil, "Ran system diagnostics"},
|
|
}
|
|
|
|
for _, tt := range tests {
|
|
result := generateRemediationSummary(tt.command, "", tt.context)
|
|
if result != tt.expected {
|
|
t.Errorf("generateRemediationSummary(%s) = %s, want %s", tt.command, result, tt.expected)
|
|
}
|
|
}
|
|
}
|
|
|
|
// mockMetricsHistoryProvider implements MetricsHistoryProvider
|
|
type mockMetricsHistoryProvider struct {
|
|
metrics map[string][]models.MetricPoint // resourceID:metrics -> points
|
|
}
|
|
|
|
func (m *mockMetricsHistoryProvider) GetNodeMetrics(nodeID string, metricType string, duration time.Duration) []models.MetricPoint {
|
|
return m.metrics[nodeID+":"+metricType]
|
|
}
|
|
|
|
func (m *mockMetricsHistoryProvider) GetGuestMetrics(guestID string, metricType string, duration time.Duration) []models.MetricPoint {
|
|
return m.metrics[guestID+":"+metricType]
|
|
}
|
|
|
|
func (m *mockMetricsHistoryProvider) GetAllGuestMetrics(guestID string, duration time.Duration) map[string][]models.MetricPoint {
|
|
result := make(map[string][]models.MetricPoint)
|
|
for k, v := range m.metrics {
|
|
if strings.HasPrefix(k, guestID+":") {
|
|
parts := strings.Split(k, ":")
|
|
if len(parts) == 2 {
|
|
result[parts[1]] = v
|
|
}
|
|
}
|
|
}
|
|
return result
|
|
}
|
|
|
|
func (m *mockMetricsHistoryProvider) GetAllStorageMetrics(storageID string, duration time.Duration) map[string][]models.MetricPoint {
|
|
return nil
|
|
}
|
|
|
|
// mockAlertProvider implements AlertProvider
|
|
type mockAlertProvider struct {
|
|
active []AlertInfo
|
|
history []ResolvedAlertInfo
|
|
}
|
|
|
|
func (m *mockAlertProvider) GetActiveAlerts() []AlertInfo { return m.active }
|
|
func (m *mockAlertProvider) GetRecentlyResolved(minutes int) []ResolvedAlertInfo { return m.history }
|
|
func (m *mockAlertProvider) GetAlertsByResource(resourceID string) []AlertInfo {
|
|
var res []AlertInfo
|
|
for _, a := range m.active {
|
|
if a.ResourceID == resourceID {
|
|
res = append(res, a)
|
|
}
|
|
}
|
|
return res
|
|
}
|
|
func (m *mockAlertProvider) GetAlertHistory(resourceID string, limit int) []ResolvedAlertInfo {
|
|
var res []ResolvedAlertInfo
|
|
for _, a := range m.history {
|
|
if a.ResourceID == resourceID {
|
|
res = append(res, a)
|
|
}
|
|
}
|
|
return res
|
|
}
|
|
|
|
func TestService_BuildEnrichedResourceContext(t *testing.T) {
|
|
s := NewService(nil, nil)
|
|
|
|
// Case 1: Patrol service is nil
|
|
ctx := s.buildEnrichedResourceContext("res1", "", nil)
|
|
if ctx != "" {
|
|
t.Error("Expected empty context when patrol service is nil")
|
|
}
|
|
|
|
// Case 2: Patrol service exists but no baseline store
|
|
ps := NewPatrolService(nil, nil)
|
|
s.mu.Lock()
|
|
s.patrolService = ps
|
|
s.mu.Unlock()
|
|
|
|
ctx = s.buildEnrichedResourceContext("res1", "", nil)
|
|
// Should at least return empty or minimal if no baseline store
|
|
t.Logf("Ctx (no baseline store): %q", ctx)
|
|
|
|
// Case 3: With baseline store and data
|
|
bs := baseline.NewStore(baseline.StoreConfig{MinSamples: 1})
|
|
ps.mu.Lock()
|
|
ps.baselineStore = bs
|
|
ps.mu.Unlock()
|
|
|
|
// Add baselines
|
|
now := time.Now()
|
|
// We need 10 samples for the "meaningful" baseline message in buildEnrichedResourceContext
|
|
var cpuPoints, memPoints []baseline.MetricPoint
|
|
for i := 0; i < 11; i++ {
|
|
cpuPoints = append(cpuPoints, baseline.MetricPoint{Value: 10, Timestamp: now.Add(time.Duration(i) * time.Minute)})
|
|
memPoints = append(memPoints, baseline.MetricPoint{Value: 20, Timestamp: now.Add(time.Duration(i) * time.Minute)})
|
|
}
|
|
_ = bs.Learn("res1", "node", "cpu", cpuPoints)
|
|
_ = bs.Learn("res1", "node", "memory", memPoints)
|
|
|
|
metrics := map[string]interface{}{
|
|
"cpu_usage": float64(50), // 5x baseline (anomaly)
|
|
"memory_usage": float64(22), // normal-ish
|
|
}
|
|
|
|
ctx = s.buildEnrichedResourceContext("res1", "node", metrics)
|
|
if ctx == "" {
|
|
t.Error("Expected non-empty context")
|
|
}
|
|
t.Logf("Enriched context: %s", ctx)
|
|
|
|
if !strings.Contains(ctx, "ANOMALY") {
|
|
t.Error("Expected ANOMALY in context for high CPU")
|
|
}
|
|
if !strings.Contains(ctx, "normal") {
|
|
t.Error("Expected normal in context for memory")
|
|
}
|
|
|
|
// Case 4: With Metrics History (Trends)
|
|
mockMH := &mockMetricsHistoryProvider{
|
|
metrics: make(map[string][]models.MetricPoint),
|
|
}
|
|
// Add data showing increasing trend for CPU (>5% trend)
|
|
// Last 24h: 10% -> 20% = +10% over 1 day (+10%/day > 5%)
|
|
tsOld := now.Add(-24 * time.Hour)
|
|
mockMH.metrics["res1:cpu"] = []models.MetricPoint{
|
|
{Value: 10, Timestamp: tsOld},
|
|
{Value: 15, Timestamp: tsOld.Add(12 * time.Hour)},
|
|
{Value: 20, Timestamp: now},
|
|
}
|
|
// Add data showing flat trend for memory
|
|
mockMH.metrics["res1:memory"] = []models.MetricPoint{
|
|
{Value: 50, Timestamp: tsOld},
|
|
{Value: 50, Timestamp: now},
|
|
}
|
|
|
|
ps.SetMetricsHistoryProvider(mockMH)
|
|
|
|
ctx = s.buildEnrichedResourceContext("res1", "node", metrics)
|
|
t.Logf("Enriched context (trends): %s", ctx)
|
|
|
|
if !strings.Contains(ctx, "CPU") || !strings.Contains(ctx, "increasing") {
|
|
t.Error("Expected 'CPU' and 'increasing' trend in context")
|
|
}
|
|
|
|
// Case 5: With Pattern Detector (Predictions)
|
|
pd := NewPatternDetector(DefaultPatternConfig())
|
|
// Record 3 events to form a pattern (T-4h, T-2h, T-0h -> Interval 2h -> Next T+2h)
|
|
pd.RecordEvent(HistoricalEvent{ResourceID: "res1", EventType: EventHighCPU, Timestamp: now.Add(-4 * time.Hour)})
|
|
pd.RecordEvent(HistoricalEvent{ResourceID: "res1", EventType: EventHighCPU, Timestamp: now.Add(-2 * time.Hour)})
|
|
pd.RecordEvent(HistoricalEvent{ResourceID: "res1", EventType: EventHighCPU, Timestamp: now})
|
|
|
|
ps.SetPatternDetector(pd)
|
|
|
|
ctx = s.buildEnrichedResourceContext("res1", "node", metrics)
|
|
t.Logf("Enriched context (patterns): %s", ctx)
|
|
|
|
if !strings.Contains(ctx, "Predictions") {
|
|
t.Error("Expected 'Predictions' section in context")
|
|
}
|
|
// Note: formatPatternBasis is showing just the event type currently
|
|
if !strings.Contains(ctx, "high_cpu") {
|
|
t.Error("Expected 'high_cpu' prediction in context")
|
|
}
|
|
|
|
// Case 6: With Active/Historical Alerts
|
|
mockAP := &mockAlertProvider{
|
|
active: []AlertInfo{
|
|
{ResourceID: "res1", Type: "cpu", Level: "warning", Message: "High CPU detected", Value: 85, Duration: "5m"},
|
|
},
|
|
history: []ResolvedAlertInfo{
|
|
{AlertInfo: AlertInfo{ResourceID: "res1", Type: "cpu", Level: "warning"}, ResolvedTime: now.Add(-1 * time.Hour)},
|
|
},
|
|
}
|
|
s.SetAlertProvider(mockAP)
|
|
|
|
ctx = s.buildEnrichedResourceContext("res1", "node", metrics)
|
|
t.Logf("Enriched context (alerts): %s", ctx)
|
|
|
|
if !strings.Contains(ctx, "active alert") {
|
|
t.Error("Expected 'active alert' count in context")
|
|
}
|
|
if !strings.Contains(ctx, "Past 30 days") {
|
|
t.Error("Expected 'Past 30 days' history in context")
|
|
}
|
|
|
|
// Case 7: With canonical resource timeline history
|
|
canonicalStore := unifiedresources.NewMemoryStore()
|
|
if err := canonicalStore.RecordChange(unifiedresources.ResourceChange{
|
|
ID: "change-1",
|
|
ObservedAt: now.Add(-2 * time.Hour),
|
|
ResourceID: "res1",
|
|
Kind: unifiedresources.ChangeRestart,
|
|
From: "running",
|
|
To: "restarting",
|
|
SourceType: unifiedresources.SourcePlatformEvent,
|
|
SourceAdapter: unifiedresources.AdapterProxmox,
|
|
Confidence: unifiedresources.ConfidenceHigh,
|
|
Actor: "agent:oncall-helper",
|
|
RelatedResources: []string{"node-1"},
|
|
Reason: "Routine restart requested",
|
|
}); err != nil {
|
|
t.Fatalf("record canonical resource change: %v", err)
|
|
}
|
|
s.mu.Lock()
|
|
s.orgID = "org-1"
|
|
s.resourceExportStore = canonicalStore
|
|
s.resourceExportStoreOrgID = s.orgID
|
|
s.mu.Unlock()
|
|
|
|
recentCtx := s.buildRecentResourceChangesContext("res1")
|
|
if !strings.Contains(recentCtx, "Restart") {
|
|
t.Fatalf("expected canonical recent change summary to include restart label, got %q", recentCtx)
|
|
}
|
|
if !strings.Contains(recentCtx, "platform_event/proxmox_adapter") {
|
|
t.Fatalf("expected canonical recent change summary to include shared provenance, got %q", recentCtx)
|
|
}
|
|
if !strings.Contains(recentCtx, "Routine restart requested") {
|
|
t.Fatalf("expected canonical recent change summary to include reason, got %q", recentCtx)
|
|
}
|
|
|
|
ctx = s.buildEnrichedResourceContext("res1", "node", metrics)
|
|
t.Logf("Enriched context (canonical changes): %s", ctx)
|
|
|
|
if !strings.Contains(ctx, "Recent Changes") {
|
|
t.Error("Expected canonical recent changes section in context")
|
|
}
|
|
if !strings.Contains(ctx, "Restart") {
|
|
t.Error("Expected canonical restart change in context")
|
|
}
|
|
if !strings.Contains(ctx, "proxmox_adapter") {
|
|
t.Error("Expected canonical adapter provenance in context")
|
|
}
|
|
if !strings.Contains(ctx, "node-1") {
|
|
t.Error("Expected canonical related resource in context")
|
|
}
|
|
|
|
s.mu.Lock()
|
|
s.resourceExportStore = nil
|
|
s.resourceExportStoreOrgID = ""
|
|
s.mu.Unlock()
|
|
|
|
// Case 8: With Change Logic
|
|
cd := NewChangeDetector(memory.ChangeDetectorConfig{})
|
|
// Simulate a "created" change
|
|
cd.DetectChanges([]memory.ResourceSnapshot{
|
|
{
|
|
ID: "res1",
|
|
Name: "res1",
|
|
Type: "vm",
|
|
Status: "running",
|
|
SnapshotTime: now,
|
|
},
|
|
})
|
|
// Force persistence flush/processing if needed (DetectChanges does it async but returns changes immediately)
|
|
|
|
ps.SetChangeDetector(cd)
|
|
|
|
ctx = s.buildEnrichedResourceContext("res1", "node", metrics)
|
|
t.Logf("Enriched context (changes): %s", ctx)
|
|
|
|
if !strings.Contains(ctx, "Changes") {
|
|
t.Error("Expected 'Changes' section")
|
|
}
|
|
if !strings.Contains(ctx, "created") {
|
|
t.Error("Expected creation event in context")
|
|
}
|
|
|
|
// Case 9: With Correlation Logic
|
|
// We need correlation package or just use the alias
|
|
cord := NewCorrelationDetector(DefaultCorrelationConfig())
|
|
// Record an event that should contribute to correlation
|
|
cord.RecordEvent(CorrelationEvent{
|
|
ResourceID: "res1",
|
|
ResourceType: "vm",
|
|
EventType: CorrelationEventHighCPU,
|
|
Timestamp: now,
|
|
})
|
|
|
|
ps.SetCorrelationDetector(cord)
|
|
|
|
ctx = s.buildEnrichedResourceContext("res1", "node", metrics)
|
|
t.Logf("Enriched context (correlation): %s", ctx)
|
|
|
|
// Correlation text might appear depending on implementation.
|
|
// If no correlations found (single event might not be enough), it might be empty section.
|
|
// But let's verify it doesn't crash and we exercised the path.
|
|
}
|
|
|
|
func TestService_BuildIncidentContext(t *testing.T) {
|
|
s := NewService(nil, nil)
|
|
|
|
// Case 1: store is nil
|
|
ctx := s.buildIncidentContext("res1", "alert1")
|
|
if ctx != "" {
|
|
t.Logf("Note: ctx is %v", ctx)
|
|
}
|
|
|
|
// Mock store
|
|
store := memory.NewIncidentStore(memory.IncidentStoreConfig{})
|
|
s.mu.Lock()
|
|
s.incidentStore = store
|
|
s.mu.Unlock()
|
|
|
|
// Case 2: alertID set
|
|
ctx = s.buildIncidentContext("res1", "alert1")
|
|
// Since alert1 doesn't exist, it returns empty
|
|
if ctx != "" {
|
|
t.Logf("Alert context: %s", ctx)
|
|
}
|
|
|
|
// Case 3: resourceID set
|
|
ctx = s.buildIncidentContext("res1", "")
|
|
if ctx != "" {
|
|
t.Logf("Resource context: %s", ctx)
|
|
}
|
|
|
|
// Case 4: canonical resource timeline fallback
|
|
canonicalStore := unifiedresources.NewMemoryStore()
|
|
if err := canonicalStore.RecordChange(unifiedresources.ResourceChange{
|
|
ID: "change-incident-1",
|
|
ObservedAt: time.Now().Add(-time.Hour),
|
|
ResourceID: "res1",
|
|
Kind: unifiedresources.ChangeConfigUpdate,
|
|
From: "old",
|
|
To: "new",
|
|
SourceType: unifiedresources.SourcePulseDiff,
|
|
SourceAdapter: unifiedresources.AdapterOpsAgent,
|
|
Confidence: unifiedresources.ConfidenceMedium,
|
|
RelatedResources: []string{"related-1"},
|
|
Reason: "Config refresh",
|
|
}); err != nil {
|
|
t.Fatalf("record canonical change: %v", err)
|
|
}
|
|
s.mu.Lock()
|
|
s.orgID = "org-1"
|
|
s.resourceExportStore = canonicalStore
|
|
s.resourceExportStoreOrgID = s.orgID
|
|
s.mu.Unlock()
|
|
|
|
ctx = s.buildIncidentContext("res1", "")
|
|
if !strings.Contains(ctx, "Recent Changes") {
|
|
t.Fatalf("expected canonical recent changes in incident context, got %q", ctx)
|
|
}
|
|
if !strings.Contains(ctx, "Config update") {
|
|
t.Fatalf("expected canonical change kind in incident context, got %q", ctx)
|
|
}
|
|
if !strings.Contains(ctx, "agent:ops-helper") {
|
|
t.Fatalf("expected canonical adapter provenance in incident context, got %q", ctx)
|
|
}
|
|
if !strings.Contains(ctx, "related-1") {
|
|
t.Fatalf("expected canonical related resource in incident context, got %q", ctx)
|
|
}
|
|
s.mu.Lock()
|
|
s.resourceExportStore = nil
|
|
s.resourceExportStoreOrgID = ""
|
|
s.mu.Unlock()
|
|
|
|
// Case 5: both empty
|
|
ctx = s.buildIncidentContext("", "")
|
|
if ctx != "" {
|
|
t.Error("Expected empty context when both IDs are empty")
|
|
}
|
|
}
|
|
|
|
func TestService_BuildIncidentContext_AlertIncludesCanonicalResourceChanges(t *testing.T) {
|
|
s := NewService(nil, nil)
|
|
|
|
incidentStore := memory.NewIncidentStore(memory.IncidentStoreConfig{})
|
|
alert := &alerts.Alert{
|
|
ID: "alert-canonical-1",
|
|
Type: "cpu_high",
|
|
Level: alerts.AlertLevelWarning,
|
|
ResourceID: "res-canonical-1",
|
|
ResourceName: "vm-canonical-1",
|
|
StartTime: time.Now().Add(-30 * time.Minute),
|
|
}
|
|
incidentStore.RecordAlertFired(alert)
|
|
|
|
canonicalStore := unifiedresources.NewMemoryStore()
|
|
if err := canonicalStore.RecordChange(unifiedresources.ResourceChange{
|
|
ID: "change-canonical-1",
|
|
ObservedAt: time.Now().Add(-15 * time.Minute),
|
|
ResourceID: alert.ResourceID,
|
|
Kind: unifiedresources.ChangeConfigUpdate,
|
|
From: "old",
|
|
To: "new",
|
|
SourceType: unifiedresources.SourcePulseDiff,
|
|
SourceAdapter: unifiedresources.AdapterOpsAgent,
|
|
Confidence: unifiedresources.ConfidenceHigh,
|
|
Reason: "Config drift corrected",
|
|
}); err != nil {
|
|
t.Fatalf("record canonical change: %v", err)
|
|
}
|
|
|
|
s.mu.Lock()
|
|
s.incidentStore = incidentStore
|
|
s.orgID = "org-1"
|
|
s.resourceExportStore = canonicalStore
|
|
s.resourceExportStoreOrgID = s.orgID
|
|
s.mu.Unlock()
|
|
|
|
ctx := s.buildIncidentContext(alert.ResourceID, alert.ID)
|
|
if !strings.Contains(ctx, "Incident Memory") {
|
|
t.Fatalf("expected alert incident memory in context, got %q", ctx)
|
|
}
|
|
if !strings.Contains(ctx, "Recent Changes") {
|
|
t.Fatalf("expected canonical recent changes in alert context, got %q", ctx)
|
|
}
|
|
if !strings.Contains(ctx, "Config update") {
|
|
t.Fatalf("expected canonical change summary in alert context, got %q", ctx)
|
|
}
|
|
}
|
|
|
|
func TestService_BuildRelationshipContext_UsesCanonicalReadState(t *testing.T) {
|
|
now := time.Now()
|
|
s := NewService(nil, nil)
|
|
ps := NewPatrolService(nil, nil)
|
|
|
|
resourceID := "vm-rel-1"
|
|
readState := &mockReadState{
|
|
resources: map[string]*unifiedresources.Resource{
|
|
resourceID: {
|
|
ID: resourceID,
|
|
Type: unifiedresources.ResourceTypeVM,
|
|
Name: resourceID,
|
|
Status: unifiedresources.StatusOnline,
|
|
Relationships: []unifiedresources.ResourceRelationship{
|
|
{
|
|
SourceID: resourceID,
|
|
TargetID: "node-rel-1",
|
|
Type: unifiedresources.RelRunsOn,
|
|
Confidence: 0.9,
|
|
Active: true,
|
|
Discoverer: "proxmox_adapter",
|
|
ObservedAt: now.Add(-2 * time.Hour),
|
|
LastSeenAt: now.Add(-time.Hour),
|
|
Metadata: map[string]any{"role": "primary"},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
s.mu.Lock()
|
|
s.patrolService = ps
|
|
s.readState = readState
|
|
s.mu.Unlock()
|
|
|
|
corrCfg := DefaultCorrelationConfig()
|
|
corrCfg.MinOccurrences = 1
|
|
corrCfg.CorrelationWindow = time.Hour
|
|
corr := NewCorrelationDetector(corrCfg)
|
|
corr.RecordEvent(correlation.Event{
|
|
ResourceID: resourceID,
|
|
ResourceName: resourceID,
|
|
ResourceType: "vm",
|
|
EventType: CorrelationEventHighCPU,
|
|
Timestamp: now.Add(-30 * time.Minute),
|
|
})
|
|
corr.RecordEvent(correlation.Event{
|
|
ResourceID: "vm-rel-2",
|
|
ResourceName: "vm-rel-2",
|
|
ResourceType: "vm",
|
|
EventType: CorrelationEventRestart,
|
|
Timestamp: now.Add(-25 * time.Minute),
|
|
})
|
|
ps.SetCorrelationDetector(corr)
|
|
|
|
relationshipContext := s.buildResourceRelationshipContext(resourceID)
|
|
if !strings.Contains(relationshipContext, "### Resource Relationships") {
|
|
t.Fatalf("expected canonical relationship context to include relationship heading, got %q", relationshipContext)
|
|
}
|
|
if !strings.Contains(relationshipContext, "Runs on") {
|
|
t.Fatalf("expected canonical relationship context to include relationship label, got %q", relationshipContext)
|
|
}
|
|
if !strings.Contains(relationshipContext, "metadata present") {
|
|
t.Fatalf("expected canonical relationship context to include shared metadata marker, got %q", relationshipContext)
|
|
}
|
|
|
|
resourceCtx := s.buildEnrichedResourceContext(resourceID, "", nil)
|
|
if !strings.Contains(resourceCtx, "Resource Relationships") {
|
|
t.Fatalf("expected enriched resource context to include relationship section, got %q", resourceCtx)
|
|
}
|
|
if !strings.Contains(resourceCtx, "Runs on") {
|
|
t.Fatalf("expected enriched resource context to include canonical relationship label, got %q", resourceCtx)
|
|
}
|
|
if !strings.Contains(resourceCtx, "discoverer proxmox_adapter") {
|
|
t.Fatalf("expected enriched resource context to include provenance, got %q", resourceCtx)
|
|
}
|
|
if !strings.Contains(resourceCtx, "metadata present") {
|
|
t.Fatalf("expected enriched resource context to include shared relationship metadata marker, got %q", resourceCtx)
|
|
}
|
|
if !strings.Contains(resourceCtx, "Resource Correlations") {
|
|
t.Fatalf("expected enriched resource context to include correlation section, got %q", resourceCtx)
|
|
}
|
|
if !strings.Contains(resourceCtx, "seen 1x") {
|
|
t.Fatalf("expected enriched resource context to include shared correlation summary, got %q", resourceCtx)
|
|
}
|
|
|
|
incidentCtx := s.buildIncidentContext(resourceID, "")
|
|
if !strings.Contains(incidentCtx, "Resource Relationships") {
|
|
t.Fatalf("expected incident context to include relationship section, got %q", incidentCtx)
|
|
}
|
|
if !strings.Contains(incidentCtx, "Runs on") {
|
|
t.Fatalf("expected incident context to include canonical relationship label, got %q", incidentCtx)
|
|
}
|
|
}
|
|
|
|
func TestService_BuildRecentResourceChangesContext_FallsBackToMemoryFormatter(t *testing.T) {
|
|
s := &Service{}
|
|
ps := NewPatrolService(nil, nil)
|
|
cd := NewChangeDetector(ChangeDetectorConfig{MaxChanges: 10})
|
|
cd.DetectChanges([]ResourceSnapshot{
|
|
{ID: "res-fallback", Name: "fallback-resource", Type: "vm", Status: "running", SnapshotTime: time.Now()},
|
|
})
|
|
ps.SetChangeDetector(cd)
|
|
s.patrolService = ps
|
|
|
|
got := s.buildRecentResourceChangesContext("res-fallback")
|
|
want := memory.FormatRecentChangesContext(cd.GetChangesForResource("res-fallback", 5), false, "###")
|
|
if got != want {
|
|
t.Fatalf("expected fallback recent-resource-changes context to use shared memory formatter:\nwant %q\n got %q", want, got)
|
|
}
|
|
}
|
|
|
|
type mockIncidentStore struct {
|
|
}
|
|
|
|
func (m *mockIncidentStore) FormatForAlert(alertID string, limit int) string {
|
|
return "alert:" + alertID
|
|
}
|
|
|
|
func (m *mockIncidentStore) FormatForResource(resourceID string, limit int) string {
|
|
return "res:" + resourceID
|
|
}
|
|
|
|
func (m *mockIncidentStore) FormatForPatrol(limit int) string {
|
|
return "patrol"
|
|
}
|
|
|
|
func (m *mockIncidentStore) Record(resourceID, resourceType, alertID, analysis, remediation string) error {
|
|
return nil
|
|
}
|