Pulse/internal/api/ai_handlers.go

6905 lines
224 KiB
Go

package api
import (
"context"
"encoding/csv"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/rcourtman/pulse-go-rewrite/internal/agentexec"
"github.com/rcourtman/pulse-go-rewrite/internal/ai"
"github.com/rcourtman/pulse-go-rewrite/internal/ai/approval"
"github.com/rcourtman/pulse-go-rewrite/internal/ai/chat"
"github.com/rcourtman/pulse-go-rewrite/internal/ai/circuit"
"github.com/rcourtman/pulse-go-rewrite/internal/ai/cost"
"github.com/rcourtman/pulse-go-rewrite/internal/ai/forecast"
"github.com/rcourtman/pulse-go-rewrite/internal/ai/learning"
"github.com/rcourtman/pulse-go-rewrite/internal/ai/memory"
"github.com/rcourtman/pulse-go-rewrite/internal/ai/providers"
"github.com/rcourtman/pulse-go-rewrite/internal/ai/proxmox"
"github.com/rcourtman/pulse-go-rewrite/internal/ai/unified"
"github.com/rcourtman/pulse-go-rewrite/internal/config"
"github.com/rcourtman/pulse-go-rewrite/internal/metrics"
"github.com/rcourtman/pulse-go-rewrite/internal/mockmode"
"github.com/rcourtman/pulse-go-rewrite/internal/monitoring"
"github.com/rcourtman/pulse-go-rewrite/internal/servicediscovery"
"github.com/rcourtman/pulse-go-rewrite/internal/unifiedresources"
"github.com/rcourtman/pulse-go-rewrite/internal/utils"
"github.com/rcourtman/pulse-go-rewrite/pkg/aicontracts"
"github.com/rcourtman/pulse-go-rewrite/pkg/extensions"
"github.com/rs/zerolog/log"
)
// AISettingsHandler handles AI settings endpoints
type AISettingsHandler struct {
stateMu sync.RWMutex
mtPersistence *config.MultiTenantPersistence
mtMonitor *monitoring.MultiTenantMonitor
defaultConfig *config.Config
defaultPersistence *config.ConfigPersistence
hostedMode bool
defaultAIService *ai.Service
aiServices map[string]*ai.Service
aiServicesMu sync.RWMutex
agentServer *agentexec.Server
onModelChange func() // Called when model or other AI chat-affecting settings change
onControlSettingsChange func() // Called when control level or protected guests change
// Providers to be applied to new services
stateProvider ai.StateProvider
readState unifiedresources.ReadState
unifiedResourceProvider ai.UnifiedResourceProvider
metadataProvider ai.MetadataProvider
patrolThresholdProvider ai.ThresholdProvider
metricsHistoryProvider ai.MetricsHistoryProvider
baselineStore *ai.BaselineStore
changeDetector *ai.ChangeDetector
remediationLog *ai.RemediationLog
incidentStore *memory.IncidentStore
patternDetector *ai.PatternDetector
correlationDetector *ai.CorrelationDetector
licenseHandlers *LicenseHandlers
// New AI intelligence services (Phase 6)
circuitBreaker *circuit.Breaker // Circuit breaker for resilient patrol
learningStore *learning.LearningStore // Feedback learning
forecastService *forecast.Service // Trend forecasting
proxmoxCorrelator *proxmox.EventCorrelator // Proxmox event correlation
remediationEngine aicontracts.RemediationEngine // AI-guided remediation
unifiedStore *unified.UnifiedStore // Unified alert/finding store
alertBridge *unified.AlertBridge // Bridge between alerts and unified store
// Event-driven patrol (Phase 7)
triggerManager *ai.TriggerManager // Event-driven patrol trigger manager
incidentCoordinator *ai.IncidentCoordinator // Incident recording coordinator
incidentRecorder *metrics.IncidentRecorder // High-frequency incident recorder
intelligenceMu sync.RWMutex
proxmoxCorrelators map[string]*proxmox.EventCorrelator
learningStores map[string]*learning.LearningStore
forecastServices map[string]*forecast.Service
remediationEngines map[string]aicontracts.RemediationEngine
incidentStores map[string]*memory.IncidentStore
circuitBreakers map[string]*circuit.Breaker
discoveryStores map[string]*servicediscovery.Store
unifiedStores map[string]*unified.UnifiedStore
alertBridges map[string]*unified.AlertBridge
triggerManagers map[string]*ai.TriggerManager
incidentCoordinators map[string]*ai.IncidentCoordinator
incidentRecorders map[string]*metrics.IncidentRecorder
// Investigation orchestration (Patrol Autonomy)
chatHandler *AIHandler // Chat service handler for investigations
investigationStores map[string]aicontracts.InvestigationStore // Investigation stores per org
investigationMu sync.RWMutex
// Extension endpoints for enterprise feature gating
aiAutoFixEndpoints extensions.AIAutoFixEndpoints
// Discovery store for deep infrastructure discovery
discoveryStore *servicediscovery.Store
}
// SetAIAutoFixEndpoints sets the resolved AI auto-fix extension endpoints.
// Called during route registration so the approval handler can delegate investigation fix approvals.
func (h *AISettingsHandler) SetAIAutoFixEndpoints(ep extensions.AIAutoFixEndpoints) {
h.aiAutoFixEndpoints = ep
}
func (h *AISettingsHandler) stateRefs() (
*config.MultiTenantPersistence,
*monitoring.MultiTenantMonitor,
*config.Config,
*config.ConfigPersistence,
unifiedresources.ReadState,
ai.UnifiedResourceProvider,
) {
h.stateMu.RLock()
defer h.stateMu.RUnlock()
return h.mtPersistence, h.mtMonitor, h.defaultConfig, h.defaultPersistence, h.readState, h.unifiedResourceProvider
}
type aiSettingsProviderSnapshot struct {
defaultAIService *ai.Service
stateProvider ai.StateProvider
metadataProvider ai.MetadataProvider
patrolThresholdProvider ai.ThresholdProvider
metricsHistoryProvider ai.MetricsHistoryProvider
baselineStore *ai.BaselineStore
changeDetector *ai.ChangeDetector
remediationLog *ai.RemediationLog
incidentStore *memory.IncidentStore
patternDetector *ai.PatternDetector
correlationDetector *ai.CorrelationDetector
discoveryStore *servicediscovery.Store
licenseHandlers *LicenseHandlers
chatHandler *AIHandler
}
type failClosedLicenseChecker struct{}
func (failClosedLicenseChecker) HasFeature(string) bool { return false }
func (failClosedLicenseChecker) GetLicenseStateString() (string, bool) {
return string(ai.LicenseStateNone), false
}
func (h *AISettingsHandler) providerSnapshot() aiSettingsProviderSnapshot {
h.stateMu.RLock()
defer h.stateMu.RUnlock()
return aiSettingsProviderSnapshot{
defaultAIService: h.defaultAIService,
stateProvider: h.stateProvider,
metadataProvider: h.metadataProvider,
patrolThresholdProvider: h.patrolThresholdProvider,
metricsHistoryProvider: h.metricsHistoryProvider,
baselineStore: h.baselineStore,
changeDetector: h.changeDetector,
remediationLog: h.remediationLog,
incidentStore: h.incidentStore,
patternDetector: h.patternDetector,
correlationDetector: h.correlationDetector,
discoveryStore: h.discoveryStore,
licenseHandlers: h.licenseHandlers,
chatHandler: h.chatHandler,
}
}
func (h *AISettingsHandler) newFailClosedTenantService(orgID string) *ai.Service {
svc := ai.NewService(nil, h.agentServer)
svc.SetOrgID(orgID)
svc.SetAlertAnalyzerFactory(getCreateAlertAnalyzer())
svc.SetLicenseChecker(failClosedLicenseChecker{})
return svc
}
// NewAISettingsHandler creates a new AI settings handler
func NewAISettingsHandler(mtp *config.MultiTenantPersistence, mtm *monitoring.MultiTenantMonitor, agentServer *agentexec.Server) *AISettingsHandler {
var defaultConfig *config.Config
var defaultPersistence *config.ConfigPersistence
var defaultAIService *ai.Service
hostedMode := hostedModeEnabledFromEnv()
if mtm != nil {
if m, err := mtm.GetMonitor("default"); err == nil && m != nil {
defaultConfig = m.GetConfig()
}
}
if mtp != nil {
if p, err := mtp.GetPersistence("default"); err == nil {
defaultPersistence = p
}
}
handler := &AISettingsHandler{
mtPersistence: mtp,
mtMonitor: mtm,
defaultConfig: defaultConfig,
defaultPersistence: defaultPersistence,
hostedMode: hostedMode,
aiServices: make(map[string]*ai.Service),
agentServer: agentServer,
proxmoxCorrelators: make(map[string]*proxmox.EventCorrelator),
learningStores: make(map[string]*learning.LearningStore),
forecastServices: make(map[string]*forecast.Service),
remediationEngines: make(map[string]aicontracts.RemediationEngine),
incidentStores: make(map[string]*memory.IncidentStore),
circuitBreakers: make(map[string]*circuit.Breaker),
discoveryStores: make(map[string]*servicediscovery.Store),
unifiedStores: make(map[string]*unified.UnifiedStore),
alertBridges: make(map[string]*unified.AlertBridge),
triggerManagers: make(map[string]*ai.TriggerManager),
incidentCoordinators: make(map[string]*ai.IncidentCoordinator),
incidentRecorders: make(map[string]*metrics.IncidentRecorder),
}
defaultAIService = ai.NewService(defaultPersistence, agentServer)
defaultAIService.SetOrgID("default")
defaultAIService.SetAlertAnalyzerFactory(getCreateAlertAnalyzer())
// Wire quickstart credit manager before LoadConfig so the quickstart
// provider path is available during initial configuration.
if defaultPersistence != nil {
qsMgr := ai.NewPersistentQuickstartCreditManager(
defaultPersistence,
"default",
func() *config.AIConfig {
cfg, _ := handler.loadAIConfigForPersistence(context.Background(), "default", defaultPersistence)
return cfg
},
)
defaultAIService.SetQuickstartCredits(qsMgr)
if _, err := handler.loadAIConfigForPersistence(context.Background(), "default", defaultPersistence); err != nil {
log.Warn().Err(err).Msg("Failed to bootstrap Pulse Assistant config on startup")
}
if err := defaultAIService.LoadConfig(); err != nil {
log.Warn().Err(err).Msg("Failed to load AI config on startup")
}
}
handler.defaultAIService = defaultAIService
return handler
}
func (h *AISettingsHandler) loadAIConfigForPersistence(_ context.Context, orgID string, persistence *config.ConfigPersistence) (*config.AIConfig, error) {
if persistence == nil {
return nil, fmt.Errorf("Pulse Assistant config persistence unavailable")
}
billingBaseDir := persistence.DataDir()
if h != nil && h.mtPersistence != nil {
billingBaseDir = h.mtPersistence.BaseDataDir()
}
return loadHostedAwareAIConfig(h != nil && h.hostedMode, billingBaseDir, orgID, persistence)
}
func (h *AISettingsHandler) loadAIConfig(ctx context.Context) (*config.AIConfig, error) {
persistence := h.getPersistence(ctx)
if persistence == nil {
return nil, fmt.Errorf("Pulse Assistant config persistence unavailable")
}
orgID := strings.TrimSpace(GetOrgID(ctx))
if orgID == "" {
orgID = "default"
}
return h.loadAIConfigForPersistence(ctx, orgID, persistence)
}
// GetAIService returns the underlying AI service
func (h *AISettingsHandler) GetAIService(ctx context.Context) *ai.Service {
mtPersistence, mtMonitor, _, _, _, _ := h.stateRefs()
providers := h.providerSnapshot()
defaultAIService := providers.defaultAIService
orgID := GetOrgID(ctx)
if orgID == "default" || orgID == "" {
return defaultAIService
}
if mtPersistence == nil {
if mtMonitor != nil {
log.Warn().Str("orgID", orgID).Msg("Failed to get persistence manager for tenant AI service")
return h.newFailClosedTenantService(orgID)
}
return defaultAIService
}
h.aiServicesMu.RLock()
svc, exists := h.aiServices[orgID]
h.aiServicesMu.RUnlock()
if exists {
return svc
}
h.aiServicesMu.Lock()
defer h.aiServicesMu.Unlock()
// Double check
if svc, exists = h.aiServices[orgID]; exists {
return svc
}
// Create new service for this tenant
persistence, err := mtPersistence.GetPersistence(orgID)
if err != nil {
log.Warn().Str("orgID", orgID).Err(err).Msg("Failed to get persistence for AI service")
return h.newFailClosedTenantService(orgID)
}
if persistence == nil {
log.Warn().Str("orgID", orgID).Msg("Tenant persistence unavailable for AI service")
return h.newFailClosedTenantService(orgID)
}
svc = ai.NewService(persistence, h.agentServer)
svc.SetOrgID(orgID)
svc.SetAlertAnalyzerFactory(getCreateAlertAnalyzer())
// Wire quickstart credit manager before LoadConfig so the quickstart
// provider path is available during initial configuration.
qsMgr := ai.NewPersistentQuickstartCreditManager(
persistence,
orgID,
func() *config.AIConfig {
cfg, _ := h.loadAIConfigForPersistence(context.Background(), orgID, persistence)
return cfg
},
)
svc.SetQuickstartCredits(qsMgr)
if _, err := h.loadAIConfigForPersistence(context.Background(), orgID, persistence); err != nil {
log.Warn().Str("orgID", orgID).Err(err).Msg("Failed to bootstrap Pulse Assistant config for tenant")
}
if err := svc.LoadConfig(); err != nil {
log.Warn().Str("orgID", orgID).Err(err).Msg("Failed to load AI config for tenant")
}
// Set providers on new service
svc.SetStateProvider(h.stateProviderForOrg(orgID, providers.stateProvider))
if readState := h.readStateForOrg(orgID); readState != nil {
svc.SetReadState(readState)
}
if provider := h.unifiedResourceProviderForOrg(orgID); provider != nil {
svc.SetUnifiedResourceProvider(provider)
}
if providers.metadataProvider != nil {
svc.SetMetadataProvider(providers.metadataProvider)
}
if orgID == "default" {
if providers.patrolThresholdProvider != nil {
svc.SetPatrolThresholdProvider(providers.patrolThresholdProvider)
}
if providers.metricsHistoryProvider != nil {
svc.SetMetricsHistoryProvider(providers.metricsHistoryProvider)
}
if providers.baselineStore != nil {
svc.SetBaselineStore(providers.baselineStore)
}
if providers.changeDetector != nil {
svc.SetChangeDetector(providers.changeDetector)
}
if providers.remediationLog != nil {
svc.SetRemediationLog(providers.remediationLog)
}
}
if incidentStore := h.GetIncidentStoreForOrg(orgID); incidentStore != nil {
svc.SetIncidentStore(incidentStore)
} else if orgID == "default" && providers.incidentStore != nil {
svc.SetIncidentStore(providers.incidentStore)
}
if orgID == "default" {
if providers.patternDetector != nil {
svc.SetPatternDetector(providers.patternDetector)
}
if providers.correlationDetector != nil {
svc.SetCorrelationDetector(providers.correlationDetector)
}
}
if discoveryStore := h.GetDiscoveryStoreForOrg(orgID); discoveryStore != nil {
svc.SetDiscoveryStore(discoveryStore)
} else if orgID == "default" && providers.discoveryStore != nil {
svc.SetDiscoveryStore(providers.discoveryStore)
}
// Set license checker if handler available
if providers.licenseHandlers != nil {
// Used context to resolve tenant license service
if licSvc, _, err := providers.licenseHandlers.getTenantComponents(ctx); err == nil {
svc.SetLicenseChecker(licSvc)
}
}
// Set up investigation orchestrator if chat handler is available
if providers.chatHandler != nil && isAIInvestigationEnabled() {
h.setupInvestigationOrchestrator(orgID, svc)
}
h.aiServices[orgID] = svc
return svc
}
// RemoveTenantService removes the AI settings service for a specific tenant.
func (h *AISettingsHandler) RemoveTenantService(orgID string) {
orgID = strings.TrimSpace(orgID)
if orgID == "default" || orgID == "" {
return
}
h.aiServicesMu.Lock()
svc := h.aiServices[orgID]
delete(h.aiServices, orgID)
h.aiServicesMu.Unlock()
if svc != nil {
svc.StopPatrol()
}
h.RemoveTenantIntelligence(orgID)
h.investigationMu.Lock()
delete(h.investigationStores, orgID)
h.investigationMu.Unlock()
log.Debug().Str("orgID", orgID).Msg("Removed AI settings service for tenant")
}
// getConfig returns the config for the current context
func (h *AISettingsHandler) getConfig(ctx context.Context) *config.Config {
_, mtMonitor, defaultConfig, _, _, _ := h.stateRefs()
orgID := strings.TrimSpace(GetOrgID(ctx))
if orgID == "" || orgID == "default" {
return defaultConfig
}
if mtMonitor != nil {
if m, err := mtMonitor.GetMonitor(orgID); err == nil && m != nil {
return m.GetConfig()
}
// Security: never fall back to default config for non-default orgs.
return nil
}
return defaultConfig
}
// GetPersistence returns the persistence for the current context
func (h *AISettingsHandler) getPersistence(ctx context.Context) *config.ConfigPersistence {
mtPersistence, _, _, defaultPersistence, _, _ := h.stateRefs()
orgID := strings.TrimSpace(GetOrgID(ctx))
if orgID == "" || orgID == "default" {
return defaultPersistence
}
if mtPersistence != nil {
if p, err := mtPersistence.GetPersistence(orgID); err == nil && p != nil {
return p
}
// Security: never fall back to default persistence for non-default orgs.
return nil
}
return defaultPersistence
}
// SetMultiTenantPersistence updates the persistence manager
func (h *AISettingsHandler) SetMultiTenantPersistence(mtp *config.MultiTenantPersistence) {
h.stateMu.Lock()
defer h.stateMu.Unlock()
h.mtPersistence = mtp
}
// SetMultiTenantMonitor updates the monitor manager
func (h *AISettingsHandler) SetMultiTenantMonitor(mtm *monitoring.MultiTenantMonitor) {
h.stateMu.Lock()
defer h.stateMu.Unlock()
h.mtMonitor = mtm
}
// SetConfig updates the configuration reference used by the handler.
func (h *AISettingsHandler) SetConfig(cfg *config.Config) {
if cfg == nil {
return
}
h.stateMu.Lock()
defer h.stateMu.Unlock()
h.defaultConfig = cfg
}
// setSSECORSHeaders validates the request origin against the configured AllowedOrigins
// and sets CORS headers only for allowed origins. This prevents arbitrary origin reflection.
func (h *AISettingsHandler) setSSECORSHeaders(w http.ResponseWriter, r *http.Request) {
origin := r.Header.Get("Origin")
if origin == "" {
return
}
cfg := h.getConfig(r.Context())
if cfg == nil {
return
}
allowed := cfg.AllowedOrigins
if allowed == "" {
return
}
applyConfiguredCORSHeaders(w, origin, allowed, "GET, POST, OPTIONS", "Content-Type, Accept, Cookie")
}
// SetStateProvider sets the state provider for infrastructure context
func (h *AISettingsHandler) SetStateProvider(sp ai.StateProvider) {
h.stateMu.Lock()
h.stateProvider = sp
defaultAIService := h.defaultAIService
h.stateMu.Unlock()
if defaultAIService != nil {
defaultAIService.SetStateProvider(sp)
}
h.aiServicesMu.Lock()
for orgID, svc := range h.aiServices {
svc.SetStateProvider(h.stateProviderForOrg(orgID, sp))
}
h.aiServicesMu.Unlock()
// Now that state provider is set, patrol service should be available.
// Try to set up the investigation orchestrator if chat handler is ready.
// Note: This usually fails because chat service isn't started yet.
// The orchestrator will be wired via WireOrchestratorAfterChatStart() instead.
if defaultAIService != nil && isAIInvestigationEnabled() {
h.setupInvestigationOrchestrator("default", defaultAIService)
h.aiServicesMu.RLock()
for orgID, svc := range h.aiServices {
h.setupInvestigationOrchestrator(orgID, svc)
}
h.aiServicesMu.RUnlock()
}
}
// GetStateProvider returns the state provider for infrastructure context
func (h *AISettingsHandler) GetStateProvider() ai.StateProvider {
h.stateMu.RLock()
defer h.stateMu.RUnlock()
return h.stateProvider
}
func (h *AISettingsHandler) stateProviderForOrg(orgID string, fallback ai.StateProvider) ai.StateProvider {
orgID = strings.TrimSpace(orgID)
if orgID == "" || orgID == "default" {
return fallback
}
// Security: never fall back to default-org provider for non-default orgs.
return nil
}
func (h *AISettingsHandler) readStateForOrg(orgID string) unifiedresources.ReadState {
if h == nil {
return nil
}
_, mtMonitor, _, _, fallbackReadState, _ := h.stateRefs()
orgID = strings.TrimSpace(orgID)
if orgID == "" {
orgID = "default"
}
if mtMonitor != nil {
if monitor, err := mtMonitor.GetMonitor(orgID); err == nil && monitor != nil {
if readState := monitor.GetUnifiedReadState(); readState != nil {
return readState
}
}
if orgID != "default" {
// Security: never fall back to default-org read state for non-default orgs.
return nil
}
}
return fallbackReadState
}
func (h *AISettingsHandler) unifiedResourceProviderForOrg(orgID string) ai.UnifiedResourceProvider {
if h == nil {
return nil
}
_, mtMonitor, _, _, _, fallbackProvider := h.stateRefs()
orgID = strings.TrimSpace(orgID)
if orgID == "" {
orgID = "default"
}
if mtMonitor != nil {
if monitor, err := mtMonitor.GetMonitor(orgID); err == nil && monitor != nil {
if readState := monitor.GetUnifiedReadState(); readState != nil {
if provider, ok := readState.(ai.UnifiedResourceProvider); ok && provider != nil {
return provider
}
}
}
if orgID != "default" {
// Security: never fall back to default-org unified provider for non-default orgs.
return nil
}
}
return fallbackProvider
}
// SetReadState injects unified read-state context into AI services (patrol path).
func (h *AISettingsHandler) SetReadState(rs unifiedresources.ReadState) {
if h == nil {
return
}
h.stateMu.Lock()
h.readState = rs
defaultAIService := h.defaultAIService
h.stateMu.Unlock()
if defaultAIService != nil {
defaultAIService.SetReadState(h.readStateForOrg("default"))
}
h.aiServicesMu.Lock()
for orgID, svc := range h.aiServices {
if svc != nil {
svc.SetReadState(h.readStateForOrg(orgID))
}
}
h.aiServicesMu.Unlock()
}
// SetUnifiedResourceProvider forwards unified-resource-native context to AI services.
func (h *AISettingsHandler) SetUnifiedResourceProvider(urp ai.UnifiedResourceProvider) {
if h == nil {
return
}
h.stateMu.Lock()
h.unifiedResourceProvider = urp
defaultAIService := h.defaultAIService
h.stateMu.Unlock()
if defaultAIService != nil {
defaultAIService.SetUnifiedResourceProvider(h.unifiedResourceProviderForOrg("default"))
}
h.aiServicesMu.Lock()
defer h.aiServicesMu.Unlock()
for orgID, svc := range h.aiServices {
if svc != nil {
svc.SetUnifiedResourceProvider(h.unifiedResourceProviderForOrg(orgID))
}
}
}
// SetMetadataProvider sets the metadata provider for AI URL discovery
func (h *AISettingsHandler) SetMetadataProvider(mp ai.MetadataProvider) {
h.stateMu.Lock()
h.metadataProvider = mp
defaultAIService := h.defaultAIService
h.stateMu.Unlock()
if defaultAIService != nil {
defaultAIService.SetMetadataProvider(mp)
}
h.aiServicesMu.Lock()
defer h.aiServicesMu.Unlock()
for _, svc := range h.aiServices {
svc.SetMetadataProvider(mp)
}
}
// StartPatrol starts the background AI patrol service
func (h *AISettingsHandler) StartPatrol(ctx context.Context) {
h.GetAIService(ctx).StartPatrol(ctx)
}
// IsAIEnabled returns true if AI features are enabled
func (h *AISettingsHandler) IsAIEnabled(ctx context.Context) bool {
return h.GetAIService(ctx).IsEnabled()
}
// SetPatrolThresholdProvider sets the threshold provider for the patrol service
func (h *AISettingsHandler) SetPatrolThresholdProvider(provider ai.ThresholdProvider) {
h.stateMu.Lock()
h.patrolThresholdProvider = provider
defaultAIService := h.defaultAIService
h.stateMu.Unlock()
if defaultAIService != nil {
defaultAIService.SetPatrolThresholdProvider(provider)
}
}
// SetPatrolFindingsPersistence enables findings persistence for the patrol service
func (h *AISettingsHandler) SetPatrolFindingsPersistence(persistence ai.FindingsPersistence) error {
var firstErr error
if patrol := h.defaultAIService.GetPatrolService(); patrol != nil {
if err := patrol.SetFindingsPersistence(persistence); err != nil {
firstErr = err
}
}
// Also apply to active services
h.aiServicesMu.RLock()
defer h.aiServicesMu.RUnlock()
for orgID, svc := range h.aiServices {
if patrol := svc.GetPatrolService(); patrol != nil {
if err := patrol.SetFindingsPersistence(persistence); err != nil {
log.Warn().Str("orgID", orgID).Err(err).Msg("Failed to set findings persistence for tenant")
if firstErr == nil {
firstErr = err
}
}
}
}
return firstErr
}
// SetPatrolRunHistoryPersistence enables patrol run history persistence for the patrol service
func (h *AISettingsHandler) SetPatrolRunHistoryPersistence(persistence ai.PatrolHistoryPersistence) error {
var firstErr error
if patrol := h.defaultAIService.GetPatrolService(); patrol != nil {
if err := patrol.SetRunHistoryPersistence(persistence); err != nil {
firstErr = err
}
}
// Also apply to active services
h.aiServicesMu.RLock()
defer h.aiServicesMu.RUnlock()
for orgID, svc := range h.aiServices {
if patrol := svc.GetPatrolService(); patrol != nil {
if err := patrol.SetRunHistoryPersistence(persistence); err != nil {
log.Warn().Str("orgID", orgID).Err(err).Msg("Failed to set run history persistence for tenant")
if firstErr == nil {
firstErr = err
}
}
}
}
return firstErr
}
// SetMetricsHistoryProvider sets the metrics history provider for enriched AI context
func (h *AISettingsHandler) SetMetricsHistoryProvider(provider ai.MetricsHistoryProvider) {
h.stateMu.Lock()
h.metricsHistoryProvider = provider
defaultAIService := h.defaultAIService
h.stateMu.Unlock()
if defaultAIService != nil {
defaultAIService.SetMetricsHistoryProvider(provider)
}
}
// SetBaselineStore sets the baseline store for anomaly detection
func (h *AISettingsHandler) SetBaselineStore(store *ai.BaselineStore) {
h.stateMu.Lock()
h.baselineStore = store
defaultAIService := h.defaultAIService
h.stateMu.Unlock()
if defaultAIService != nil {
defaultAIService.SetBaselineStore(store)
}
}
// SetChangeDetector sets the change detector for operational memory
func (h *AISettingsHandler) SetChangeDetector(detector *ai.ChangeDetector) {
h.stateMu.Lock()
h.changeDetector = detector
defaultAIService := h.defaultAIService
h.stateMu.Unlock()
if defaultAIService != nil {
defaultAIService.SetChangeDetector(detector)
}
}
// SetRemediationLog sets the remediation log for tracking fix attempts
func (h *AISettingsHandler) SetRemediationLog(remLog *ai.RemediationLog) {
h.stateMu.Lock()
h.remediationLog = remLog
defaultAIService := h.defaultAIService
h.stateMu.Unlock()
if defaultAIService != nil {
defaultAIService.SetRemediationLog(remLog)
}
}
// SetIncidentStore sets the incident store for the default org.
func (h *AISettingsHandler) SetIncidentStore(store *memory.IncidentStore) {
h.SetIncidentStoreForOrg("default", store)
}
// SetIncidentStoreForOrg sets the incident store for alert timelines for an org.
func (h *AISettingsHandler) SetIncidentStoreForOrg(orgID string, store *memory.IncidentStore) {
if h == nil {
return
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.Lock()
h.ensureIntelligenceMapsLocked()
if store == nil {
delete(h.incidentStores, orgID)
} else {
h.incidentStores[orgID] = store
}
h.intelligenceMu.Unlock()
if orgID == "default" {
h.stateMu.Lock()
h.incidentStore = store
defaultAIService := h.defaultAIService
h.stateMu.Unlock()
if defaultAIService != nil {
defaultAIService.SetIncidentStore(store)
}
}
h.aiServicesMu.RLock()
svc := h.aiServices[orgID]
h.aiServicesMu.RUnlock()
if svc != nil {
svc.SetIncidentStore(store)
}
}
// GetIncidentStoreForOrg returns the incident store for an org.
func (h *AISettingsHandler) GetIncidentStoreForOrg(orgID string) *memory.IncidentStore {
if h == nil {
return nil
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.RLock()
if store := h.incidentStores[orgID]; store != nil {
h.intelligenceMu.RUnlock()
return store
}
h.intelligenceMu.RUnlock()
if orgID == "default" {
h.stateMu.RLock()
store := h.incidentStore
h.stateMu.RUnlock()
return store
}
return nil
}
// SetPatternDetector sets the pattern detector for failure prediction
func (h *AISettingsHandler) SetPatternDetector(detector *ai.PatternDetector) {
h.stateMu.Lock()
h.patternDetector = detector
defaultAIService := h.defaultAIService
h.stateMu.Unlock()
if defaultAIService != nil {
defaultAIService.SetPatternDetector(detector)
}
}
// SetCorrelationDetector sets the correlation detector for multi-resource correlation
func (h *AISettingsHandler) SetCorrelationDetector(detector *ai.CorrelationDetector) {
h.stateMu.Lock()
h.correlationDetector = detector
defaultAIService := h.defaultAIService
h.stateMu.Unlock()
if defaultAIService != nil {
defaultAIService.SetCorrelationDetector(detector)
}
}
// SetCircuitBreaker sets the circuit breaker for the default org.
func (h *AISettingsHandler) SetCircuitBreaker(breaker *circuit.Breaker) {
h.SetCircuitBreakerForOrg("default", breaker)
}
// SetCircuitBreakerForOrg sets the circuit breaker for an org.
func (h *AISettingsHandler) SetCircuitBreakerForOrg(orgID string, breaker *circuit.Breaker) {
if h == nil {
return
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.Lock()
h.ensureIntelligenceMapsLocked()
if breaker == nil {
delete(h.circuitBreakers, orgID)
} else {
h.circuitBreakers[orgID] = breaker
}
h.intelligenceMu.Unlock()
if orgID == "default" {
h.circuitBreaker = breaker
}
}
// GetCircuitBreaker returns the circuit breaker for the default org.
func (h *AISettingsHandler) GetCircuitBreaker() *circuit.Breaker {
return h.GetCircuitBreakerForOrg("default")
}
// GetCircuitBreakerForOrg returns the circuit breaker for an org.
func (h *AISettingsHandler) GetCircuitBreakerForOrg(orgID string) *circuit.Breaker {
if h == nil {
return nil
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.RLock()
if breaker := h.circuitBreakers[orgID]; breaker != nil {
h.intelligenceMu.RUnlock()
return breaker
}
h.intelligenceMu.RUnlock()
if orgID == "default" {
return h.circuitBreaker
}
return nil
}
// SetLearningStore sets the learning store for the default org.
func (h *AISettingsHandler) SetLearningStore(store *learning.LearningStore) {
h.SetLearningStoreForOrg("default", store)
}
// SetLearningStoreForOrg sets the learning store for an org.
func (h *AISettingsHandler) SetLearningStoreForOrg(orgID string, store *learning.LearningStore) {
if h == nil {
return
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.Lock()
h.ensureIntelligenceMapsLocked()
if store == nil {
delete(h.learningStores, orgID)
} else {
h.learningStores[orgID] = store
}
h.intelligenceMu.Unlock()
if orgID == "default" {
h.learningStore = store
}
}
// GetLearningStore returns the learning store for the default org.
func (h *AISettingsHandler) GetLearningStore() *learning.LearningStore {
return h.GetLearningStoreForOrg("default")
}
// GetLearningStoreForOrg returns the learning store for an org.
func (h *AISettingsHandler) GetLearningStoreForOrg(orgID string) *learning.LearningStore {
if h == nil {
return nil
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.RLock()
if store := h.learningStores[orgID]; store != nil {
h.intelligenceMu.RUnlock()
return store
}
h.intelligenceMu.RUnlock()
if orgID == "default" {
return h.learningStore
}
return nil
}
// SetForecastService sets the forecast service for the default org.
func (h *AISettingsHandler) SetForecastService(svc *forecast.Service) {
h.SetForecastServiceForOrg("default", svc)
}
// SetForecastServiceForOrg sets the forecast service for an org.
func (h *AISettingsHandler) SetForecastServiceForOrg(orgID string, svc *forecast.Service) {
if h == nil {
return
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.Lock()
h.ensureIntelligenceMapsLocked()
if svc == nil {
delete(h.forecastServices, orgID)
} else {
h.forecastServices[orgID] = svc
}
h.intelligenceMu.Unlock()
if orgID == "default" {
h.forecastService = svc
}
}
// GetForecastService returns the forecast service for the default org.
func (h *AISettingsHandler) GetForecastService() *forecast.Service {
return h.GetForecastServiceForOrg("default")
}
// GetForecastServiceForOrg returns the forecast service for an org.
func (h *AISettingsHandler) GetForecastServiceForOrg(orgID string) *forecast.Service {
if h == nil {
return nil
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.RLock()
if svc := h.forecastServices[orgID]; svc != nil {
h.intelligenceMu.RUnlock()
return svc
}
h.intelligenceMu.RUnlock()
if orgID == "default" {
return h.forecastService
}
return nil
}
func normalizeAIIntelligenceOrgID(orgID string) string {
orgID = strings.TrimSpace(orgID)
if orgID == "" {
return "default"
}
return orgID
}
func (h *AISettingsHandler) ensureIntelligenceMapsLocked() {
if h.proxmoxCorrelators == nil {
h.proxmoxCorrelators = make(map[string]*proxmox.EventCorrelator)
}
if h.learningStores == nil {
h.learningStores = make(map[string]*learning.LearningStore)
}
if h.forecastServices == nil {
h.forecastServices = make(map[string]*forecast.Service)
}
if h.remediationEngines == nil {
h.remediationEngines = make(map[string]aicontracts.RemediationEngine)
}
if h.incidentStores == nil {
h.incidentStores = make(map[string]*memory.IncidentStore)
}
if h.circuitBreakers == nil {
h.circuitBreakers = make(map[string]*circuit.Breaker)
}
if h.discoveryStores == nil {
h.discoveryStores = make(map[string]*servicediscovery.Store)
}
if h.alertBridges == nil {
h.alertBridges = make(map[string]*unified.AlertBridge)
}
if h.unifiedStores == nil {
h.unifiedStores = make(map[string]*unified.UnifiedStore)
}
if h.triggerManagers == nil {
h.triggerManagers = make(map[string]*ai.TriggerManager)
}
if h.incidentCoordinators == nil {
h.incidentCoordinators = make(map[string]*ai.IncidentCoordinator)
}
if h.incidentRecorders == nil {
h.incidentRecorders = make(map[string]*metrics.IncidentRecorder)
}
}
// SetProxmoxCorrelatorForOrg sets the Proxmox event correlator for an org.
func (h *AISettingsHandler) SetProxmoxCorrelatorForOrg(orgID string, correlator *proxmox.EventCorrelator) {
if h == nil {
return
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.Lock()
h.ensureIntelligenceMapsLocked()
if correlator == nil {
delete(h.proxmoxCorrelators, orgID)
} else {
h.proxmoxCorrelators[orgID] = correlator
}
h.intelligenceMu.Unlock()
if orgID == "default" {
h.proxmoxCorrelator = correlator
}
}
// SetProxmoxCorrelator sets the Proxmox event correlator
func (h *AISettingsHandler) SetProxmoxCorrelator(correlator *proxmox.EventCorrelator) {
h.SetProxmoxCorrelatorForOrg("default", correlator)
}
// GetProxmoxCorrelatorForOrg returns the Proxmox event correlator for an org.
func (h *AISettingsHandler) GetProxmoxCorrelatorForOrg(orgID string) *proxmox.EventCorrelator {
if h == nil {
return nil
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.RLock()
if correlator := h.proxmoxCorrelators[orgID]; correlator != nil {
h.intelligenceMu.RUnlock()
return correlator
}
h.intelligenceMu.RUnlock()
if orgID == "default" {
return h.proxmoxCorrelator
}
return nil
}
// GetProxmoxCorrelator returns the Proxmox event correlator
func (h *AISettingsHandler) GetProxmoxCorrelator() *proxmox.EventCorrelator {
return h.GetProxmoxCorrelatorForOrg("default")
}
// SetRemediationEngine sets the remediation engine for the default org.
func (h *AISettingsHandler) SetRemediationEngine(engine aicontracts.RemediationEngine) {
h.SetRemediationEngineForOrg("default", engine)
}
// SetRemediationEngineForOrg sets the remediation engine for an org.
func (h *AISettingsHandler) SetRemediationEngineForOrg(orgID string, engine aicontracts.RemediationEngine) {
if h == nil {
return
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.Lock()
h.ensureIntelligenceMapsLocked()
if engine == nil {
delete(h.remediationEngines, orgID)
} else {
h.remediationEngines[orgID] = engine
}
h.intelligenceMu.Unlock()
if orgID == "default" {
h.remediationEngine = engine
}
}
// GetRemediationEngine returns the remediation engine for the default org.
func (h *AISettingsHandler) GetRemediationEngine() aicontracts.RemediationEngine {
return h.GetRemediationEngineForOrg("default")
}
// GetRemediationEngineForOrg returns the remediation engine for an org.
func (h *AISettingsHandler) GetRemediationEngineForOrg(orgID string) aicontracts.RemediationEngine {
if h == nil {
return nil
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.RLock()
if engine := h.remediationEngines[orgID]; engine != nil {
h.intelligenceMu.RUnlock()
return engine
}
h.intelligenceMu.RUnlock()
if orgID == "default" {
return h.remediationEngine
}
return nil
}
// SetUnifiedStore sets the unified store for the default org.
func (h *AISettingsHandler) SetUnifiedStore(store *unified.UnifiedStore) {
h.SetUnifiedStoreForOrg("default", store)
}
// SetUnifiedStoreForOrg sets the unified store for an org.
func (h *AISettingsHandler) SetUnifiedStoreForOrg(orgID string, store *unified.UnifiedStore) {
if h == nil {
return
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.Lock()
h.ensureIntelligenceMapsLocked()
if store == nil {
delete(h.unifiedStores, orgID)
} else {
h.unifiedStores[orgID] = store
}
h.intelligenceMu.Unlock()
if orgID == "default" {
h.unifiedStore = store
}
}
// GetUnifiedStore returns the unified store for the default org.
func (h *AISettingsHandler) GetUnifiedStore() *unified.UnifiedStore {
return h.GetUnifiedStoreForOrg("default")
}
// GetUnifiedStoreForOrg returns the unified store for an org.
func (h *AISettingsHandler) GetUnifiedStoreForOrg(orgID string) *unified.UnifiedStore {
if h == nil {
return nil
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.RLock()
if store := h.unifiedStores[orgID]; store != nil {
h.intelligenceMu.RUnlock()
return store
}
h.intelligenceMu.RUnlock()
if orgID == "default" {
return h.unifiedStore
}
return nil
}
// SetDiscoveryStore sets the discovery store for the default org.
func (h *AISettingsHandler) SetDiscoveryStore(store *servicediscovery.Store) {
h.SetDiscoveryStoreForOrg("default", store)
}
// SetDiscoveryStoreForOrg sets the discovery store for deep infrastructure discovery for an org.
func (h *AISettingsHandler) SetDiscoveryStoreForOrg(orgID string, store *servicediscovery.Store) {
if h == nil {
return
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.Lock()
h.ensureIntelligenceMapsLocked()
if store == nil {
delete(h.discoveryStores, orgID)
} else {
h.discoveryStores[orgID] = store
}
h.intelligenceMu.Unlock()
if orgID == "default" {
h.stateMu.Lock()
h.discoveryStore = store
defaultAIService := h.defaultAIService
h.stateMu.Unlock()
if defaultAIService != nil {
defaultAIService.SetDiscoveryStore(store)
}
}
h.aiServicesMu.RLock()
svc := h.aiServices[orgID]
h.aiServicesMu.RUnlock()
if svc != nil {
svc.SetDiscoveryStore(store)
}
}
// GetDiscoveryStore returns the discovery store for the default org.
func (h *AISettingsHandler) GetDiscoveryStore() *servicediscovery.Store {
return h.GetDiscoveryStoreForOrg("default")
}
// GetDiscoveryStoreForOrg returns the discovery store for an org.
func (h *AISettingsHandler) GetDiscoveryStoreForOrg(orgID string) *servicediscovery.Store {
if h == nil {
return nil
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.RLock()
if store := h.discoveryStores[orgID]; store != nil {
h.intelligenceMu.RUnlock()
return store
}
h.intelligenceMu.RUnlock()
if orgID == "default" {
h.stateMu.RLock()
store := h.discoveryStore
h.stateMu.RUnlock()
return store
}
return nil
}
// SetAlertBridge sets the alert bridge
func (h *AISettingsHandler) SetAlertBridge(bridge *unified.AlertBridge) {
h.SetAlertBridgeForOrg("default", bridge)
}
// SetAlertBridgeForOrg sets the alert bridge for an org.
func (h *AISettingsHandler) SetAlertBridgeForOrg(orgID string, bridge *unified.AlertBridge) {
if h == nil {
return
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.Lock()
h.ensureIntelligenceMapsLocked()
if bridge == nil {
delete(h.alertBridges, orgID)
} else {
h.alertBridges[orgID] = bridge
}
h.intelligenceMu.Unlock()
if orgID == "default" {
h.alertBridge = bridge
}
}
// GetAlertBridge returns the alert bridge
func (h *AISettingsHandler) GetAlertBridge() *unified.AlertBridge {
return h.GetAlertBridgeForOrg("default")
}
// GetAlertBridgeForOrg returns the alert bridge for an org.
func (h *AISettingsHandler) GetAlertBridgeForOrg(orgID string) *unified.AlertBridge {
if h == nil {
return nil
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.RLock()
if bridge := h.alertBridges[orgID]; bridge != nil {
h.intelligenceMu.RUnlock()
return bridge
}
h.intelligenceMu.RUnlock()
if orgID == "default" {
return h.alertBridge
}
return nil
}
// SetTriggerManager sets the event-driven patrol trigger manager
func (h *AISettingsHandler) SetTriggerManager(tm *ai.TriggerManager) {
h.SetTriggerManagerForOrg("default", tm)
}
// SetTriggerManagerForOrg sets the event-driven patrol trigger manager for an org.
func (h *AISettingsHandler) SetTriggerManagerForOrg(orgID string, tm *ai.TriggerManager) {
if h == nil {
return
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.Lock()
h.ensureIntelligenceMapsLocked()
if tm == nil {
delete(h.triggerManagers, orgID)
} else {
h.triggerManagers[orgID] = tm
}
h.intelligenceMu.Unlock()
if orgID == "default" {
h.triggerManager = tm
}
}
// GetTriggerManager returns the event-driven patrol trigger manager
func (h *AISettingsHandler) GetTriggerManager() *ai.TriggerManager {
return h.GetTriggerManagerForOrg("default")
}
// GetTriggerManagerForOrg returns the event-driven patrol trigger manager for an org.
func (h *AISettingsHandler) GetTriggerManagerForOrg(orgID string) *ai.TriggerManager {
if h == nil {
return nil
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.RLock()
if tm := h.triggerManagers[orgID]; tm != nil {
h.intelligenceMu.RUnlock()
return tm
}
h.intelligenceMu.RUnlock()
if orgID == "default" {
return h.triggerManager
}
return nil
}
// SetIncidentCoordinator sets the incident recording coordinator
func (h *AISettingsHandler) SetIncidentCoordinator(coordinator *ai.IncidentCoordinator) {
h.SetIncidentCoordinatorForOrg("default", coordinator)
}
// SetIncidentCoordinatorForOrg sets the incident recording coordinator for an org.
func (h *AISettingsHandler) SetIncidentCoordinatorForOrg(orgID string, coordinator *ai.IncidentCoordinator) {
if h == nil {
return
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.Lock()
h.ensureIntelligenceMapsLocked()
if coordinator == nil {
delete(h.incidentCoordinators, orgID)
} else {
h.incidentCoordinators[orgID] = coordinator
}
h.intelligenceMu.Unlock()
if orgID == "default" {
h.incidentCoordinator = coordinator
}
}
// GetIncidentCoordinator returns the incident recording coordinator
func (h *AISettingsHandler) GetIncidentCoordinator() *ai.IncidentCoordinator {
return h.GetIncidentCoordinatorForOrg("default")
}
// GetIncidentCoordinatorForOrg returns the incident recording coordinator for an org.
func (h *AISettingsHandler) GetIncidentCoordinatorForOrg(orgID string) *ai.IncidentCoordinator {
if h == nil {
return nil
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.RLock()
if coordinator := h.incidentCoordinators[orgID]; coordinator != nil {
h.intelligenceMu.RUnlock()
return coordinator
}
h.intelligenceMu.RUnlock()
if orgID == "default" {
return h.incidentCoordinator
}
return nil
}
// SetIncidentRecorder sets the high-frequency incident recorder
func (h *AISettingsHandler) SetIncidentRecorder(recorder *metrics.IncidentRecorder) {
h.SetIncidentRecorderForOrg("default", recorder)
}
// SetIncidentRecorderForOrg sets the high-frequency incident recorder for an org.
func (h *AISettingsHandler) SetIncidentRecorderForOrg(orgID string, recorder *metrics.IncidentRecorder) {
if h == nil {
return
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.Lock()
h.ensureIntelligenceMapsLocked()
if recorder == nil {
delete(h.incidentRecorders, orgID)
} else {
h.incidentRecorders[orgID] = recorder
}
h.intelligenceMu.Unlock()
if orgID == "default" {
h.incidentRecorder = recorder
}
}
// GetIncidentRecorder returns the high-frequency incident recorder
func (h *AISettingsHandler) GetIncidentRecorder() *metrics.IncidentRecorder {
return h.GetIncidentRecorderForOrg("default")
}
// GetIncidentRecorderForOrg returns the high-frequency incident recorder for an org.
func (h *AISettingsHandler) GetIncidentRecorderForOrg(orgID string) *metrics.IncidentRecorder {
if h == nil {
return nil
}
orgID = normalizeAIIntelligenceOrgID(orgID)
h.intelligenceMu.RLock()
if recorder := h.incidentRecorders[orgID]; recorder != nil {
h.intelligenceMu.RUnlock()
return recorder
}
h.intelligenceMu.RUnlock()
if orgID == "default" {
return h.incidentRecorder
}
return nil
}
// ListLearningStores returns learning stores keyed by org.
func (h *AISettingsHandler) ListLearningStores() map[string]*learning.LearningStore {
out := make(map[string]*learning.LearningStore)
if h == nil {
return out
}
h.intelligenceMu.RLock()
for orgID, store := range h.learningStores {
if store != nil {
out[orgID] = store
}
}
h.intelligenceMu.RUnlock()
if _, ok := out["default"]; !ok && h.learningStore != nil {
out["default"] = h.learningStore
}
return out
}
// ListAlertBridges returns alert bridges keyed by org for shutdown/cleanup flows.
func (h *AISettingsHandler) ListAlertBridges() map[string]*unified.AlertBridge {
out := make(map[string]*unified.AlertBridge)
if h == nil {
return out
}
h.intelligenceMu.RLock()
for orgID, bridge := range h.alertBridges {
if bridge != nil {
out[orgID] = bridge
}
}
h.intelligenceMu.RUnlock()
if _, ok := out["default"]; !ok && h.alertBridge != nil {
out["default"] = h.alertBridge
}
return out
}
// ListTriggerManagers returns trigger managers keyed by org for shutdown/cleanup flows.
func (h *AISettingsHandler) ListTriggerManagers() map[string]*ai.TriggerManager {
out := make(map[string]*ai.TriggerManager)
if h == nil {
return out
}
h.intelligenceMu.RLock()
for orgID, tm := range h.triggerManagers {
if tm != nil {
out[orgID] = tm
}
}
h.intelligenceMu.RUnlock()
if _, ok := out["default"]; !ok && h.triggerManager != nil {
out["default"] = h.triggerManager
}
return out
}
// ListIncidentCoordinators returns incident coordinators keyed by org.
func (h *AISettingsHandler) ListIncidentCoordinators() map[string]*ai.IncidentCoordinator {
out := make(map[string]*ai.IncidentCoordinator)
if h == nil {
return out
}
h.intelligenceMu.RLock()
for orgID, coordinator := range h.incidentCoordinators {
if coordinator != nil {
out[orgID] = coordinator
}
}
h.intelligenceMu.RUnlock()
if _, ok := out["default"]; !ok && h.incidentCoordinator != nil {
out["default"] = h.incidentCoordinator
}
return out
}
// ListIncidentRecorders returns incident recorders keyed by org.
func (h *AISettingsHandler) ListIncidentRecorders() map[string]*metrics.IncidentRecorder {
out := make(map[string]*metrics.IncidentRecorder)
if h == nil {
return out
}
h.intelligenceMu.RLock()
for orgID, recorder := range h.incidentRecorders {
if recorder != nil {
out[orgID] = recorder
}
}
h.intelligenceMu.RUnlock()
if _, ok := out["default"]; !ok && h.incidentRecorder != nil {
out["default"] = h.incidentRecorder
}
return out
}
// StopPatrol stops the background AI patrol service
func (h *AISettingsHandler) StopPatrol() {
if h.defaultAIService != nil {
h.defaultAIService.StopPatrol()
}
h.aiServicesMu.RLock()
services := make([]*ai.Service, 0, len(h.aiServices))
for _, svc := range h.aiServices {
if svc != nil {
services = append(services, svc)
}
}
h.aiServicesMu.RUnlock()
for _, svc := range services {
svc.StopPatrol()
}
}
// StopPatrolForOrg stops patrol for a single org without affecting others.
func (h *AISettingsHandler) StopPatrolForOrg(orgID string) {
orgID = normalizeAIIntelligenceOrgID(orgID)
if orgID == "default" {
if h.defaultAIService != nil {
h.defaultAIService.StopPatrol()
}
return
}
h.aiServicesMu.RLock()
svc := h.aiServices[orgID]
h.aiServicesMu.RUnlock()
if svc != nil {
svc.StopPatrol()
}
}
// RemoveTenantIntelligence stops and removes org-scoped intelligence components.
func (h *AISettingsHandler) RemoveTenantIntelligence(orgID string) {
orgID = normalizeAIIntelligenceOrgID(orgID)
if orgID == "default" {
return
}
var (
bridge *unified.AlertBridge
trigger *ai.TriggerManager
coordinator *ai.IncidentCoordinator
recorder *metrics.IncidentRecorder
)
h.intelligenceMu.Lock()
delete(h.learningStores, orgID)
delete(h.forecastServices, orgID)
delete(h.remediationEngines, orgID)
delete(h.incidentStores, orgID)
delete(h.circuitBreakers, orgID)
delete(h.discoveryStores, orgID)
bridge = h.alertBridges[orgID]
trigger = h.triggerManagers[orgID]
coordinator = h.incidentCoordinators[orgID]
recorder = h.incidentRecorders[orgID]
delete(h.unifiedStores, orgID)
delete(h.alertBridges, orgID)
delete(h.triggerManagers, orgID)
delete(h.incidentCoordinators, orgID)
delete(h.incidentRecorders, orgID)
delete(h.proxmoxCorrelators, orgID)
h.intelligenceMu.Unlock()
if bridge != nil {
bridge.Stop()
}
if trigger != nil {
trigger.Stop()
}
if coordinator != nil {
coordinator.Stop()
}
if recorder != nil {
recorder.Stop()
}
}
// GetAlertTriggeredAnalyzer returns the alert-triggered analyzer for wiring into alert callbacks
func (h *AISettingsHandler) GetAlertTriggeredAnalyzer(ctx context.Context) aicontracts.AlertAnalyzer {
return h.GetAIService(ctx).GetAlertTriggeredAnalyzer()
}
// SetLicenseHandlers sets the license handlers for Pro feature gating
func (h *AISettingsHandler) SetLicenseHandlers(handlers *LicenseHandlers) {
h.stateMu.Lock()
h.licenseHandlers = handlers
defaultAIService := h.defaultAIService
h.stateMu.Unlock()
if handlers == nil {
return
}
// Update default-org service with the current license checker.
// We can try to get it using background context (default tenant).
if svc, _, err := handlers.getTenantComponents(context.Background()); err == nil {
if defaultAIService != nil {
defaultAIService.SetLicenseChecker(svc)
}
}
}
// SetOnModelChange sets a callback to be invoked when model settings change
// Used by Router to trigger AI chat service restart
func (h *AISettingsHandler) SetOnModelChange(callback func()) {
h.onModelChange = callback
}
func shouldRestartAIChat(req AISettingsUpdateRequest) bool {
return req.Enabled != nil ||
req.Model != nil ||
req.ChatModel != nil ||
req.PatrolModel != nil ||
req.AutoFixModel != nil ||
req.AuthMethod != nil ||
req.AnthropicAPIKey != nil ||
req.OpenAIAPIKey != nil ||
req.OpenRouterAPIKey != nil ||
req.DeepSeekAPIKey != nil ||
req.GeminiAPIKey != nil ||
req.OllamaBaseURL != nil ||
req.OllamaUsername != nil ||
req.OllamaPassword != nil ||
req.OpenAIBaseURL != nil ||
req.ClearAnthropicKey != nil ||
req.ClearOpenAIKey != nil ||
req.ClearOpenRouterKey != nil ||
req.ClearDeepSeekKey != nil ||
req.ClearGeminiKey != nil ||
req.ClearOllamaURL != nil ||
req.ClearOllamaUsername != nil ||
req.ClearOllamaPassword != nil
}
// SetOnControlSettingsChange sets a callback to be invoked when control settings change
// Used by Router to update MCP tool visibility without restarting AI chat
func (h *AISettingsHandler) SetOnControlSettingsChange(callback func()) {
h.onControlSettingsChange = callback
}
// SetChatHandler sets the chat handler for investigation orchestration
// This enables the patrol service to spawn chat sessions to investigate findings
func (h *AISettingsHandler) SetChatHandler(chatHandler *AIHandler) {
h.stateMu.Lock()
h.chatHandler = chatHandler
defaultAIService := h.defaultAIService
h.stateMu.Unlock()
h.investigationMu.Lock()
if h.investigationStores == nil {
h.investigationStores = make(map[string]aicontracts.InvestigationStore)
}
h.investigationMu.Unlock()
// Wire up orchestrator for the default-org service.
// Note: This usually fails because chat service isn't started yet.
// The orchestrator will be wired via WireOrchestratorAfterChatStart() instead.
if defaultAIService != nil && isAIInvestigationEnabled() {
h.setupInvestigationOrchestrator("default", defaultAIService)
}
// Wire up orchestrator for any existing services
if isAIInvestigationEnabled() {
h.aiServicesMu.RLock()
for orgID, svc := range h.aiServices {
h.setupInvestigationOrchestrator(orgID, svc)
}
h.aiServicesMu.RUnlock()
}
}
// WireOrchestratorAfterChatStart is called after the chat service is started
// to wire up the investigation orchestrator. This must be called after aiHandler.Start()
// because the orchestrator needs an active chat service.
func (h *AISettingsHandler) WireOrchestratorAfterChatStart() {
if !isAIInvestigationEnabled() {
log.Info().Msg("WireOrchestratorAfterChatStart skipped (requires Pulse Pro)")
return
}
h.stateMu.RLock()
hasChatHandler := h.chatHandler != nil
defaultAIService := h.defaultAIService
h.stateMu.RUnlock()
if !hasChatHandler {
log.Warn().Msg("WireOrchestratorAfterChatStart called but chatHandler is nil")
return
}
// Wire up orchestrator for the default-org service.
if defaultAIService != nil {
h.setupInvestigationOrchestrator("default", defaultAIService)
}
// Wire up orchestrator for any existing services
h.aiServicesMu.RLock()
for orgID, svc := range h.aiServices {
h.setupInvestigationOrchestrator(orgID, svc)
}
h.aiServicesMu.RUnlock()
}
// setupInvestigationOrchestrator creates and wires the investigation orchestrator for an AI service
func (h *AISettingsHandler) setupInvestigationOrchestrator(orgID string, svc *ai.Service) {
// Check factory exists first — if nil, this is OSS (no orchestrator available).
// Clear any stale orchestrator from a prior setup so the patrol service
// doesn't keep using a removed enterprise component.
factory := getCreateInvestigationOrchestrator()
if factory == nil {
if patrol := svc.GetPatrolService(); patrol != nil {
patrol.SetInvestigationOrchestrator(nil)
}
log.Debug().Str("orgID", orgID).Msg("Investigation orchestrator factory not registered (requires Pulse Pro)")
return
}
h.stateMu.RLock()
chatHandler := h.chatHandler
h.stateMu.RUnlock()
if chatHandler == nil {
log.Debug().Str("orgID", orgID).Msg("Chat handler not set, skipping orchestrator setup")
return
}
patrol := svc.GetPatrolService()
if patrol == nil {
log.Debug().Str("orgID", orgID).Msg("Patrol service not available, skipping orchestrator setup")
return
}
// Get or create investigation store for this org
h.investigationMu.Lock()
store, exists := h.investigationStores[orgID]
if !exists {
// Get data directory from persistence
var dataDir string
mtPersistence, _, _, defaultPersistence, _, _ := h.stateRefs()
if defaultPersistence != nil && orgID == "default" {
dataDir = defaultPersistence.DataDir()
} else if mtPersistence != nil {
if p, err := mtPersistence.GetPersistence(orgID); err == nil {
dataDir = p.DataDir()
}
}
if storeFactory := getCreateInvestigationStore(); storeFactory != nil {
store = storeFactory(dataDir)
}
if store == nil {
log.Warn().Str("orgID", orgID).Msg("Investigation store not available (requires Pulse Pro)")
h.investigationMu.Unlock()
return
}
if err := store.LoadFromDisk(); err != nil {
log.Warn().Err(err).Str("orgID", orgID).Msg("Failed to load investigation store")
}
h.investigationStores[orgID] = store
}
h.investigationMu.Unlock()
// Get chat service for this org using org-scoped context
ctx := context.WithValue(context.Background(), OrgIDContextKey, orgID)
chatSvc := chatHandler.GetService(ctx)
if chatSvc == nil {
log.Warn().Str("orgID", orgID).Msg("Chat service not available for orchestrator")
return
}
// Create chat adapter - need to cast to *chat.Service
chatService, ok := chatSvc.(*chat.Service)
if !ok {
log.Warn().Str("orgID", orgID).Msg("Chat service is not *chat.Service, cannot create adapter")
return
}
// Mirror default-org router wiring for per-org services so patrol/investigation
// executions always use the chat backend path with mid-run budget enforcement.
svc.SetChatService(&chatServiceAdapter{svc: chatService})
chatService.SetBudgetChecker(func() error {
return svc.CheckBudget("patrol")
})
// Build local adapters that implement aicontracts.Orchestrator* interfaces
chatAdapter := &orchestratorChatAdapter{svc: chatService}
// Create findings store adapter
findingsStore := patrol.GetFindings()
if findingsStore == nil {
log.Warn().Str("orgID", orgID).Msg("Findings store not available for orchestrator")
return
}
findingsAdapter := &orchestratorFindingsAdapter{store: &findingsStoreWrapper{store: findingsStore}}
// Create approval adapter from the global approval store
var approvalAdapter aicontracts.OrchestratorApprovalStore
if approvalStoreInst := approval.GetStore(); approvalStoreInst != nil {
approvalAdapter = &orchestratorApprovalAdapter{store: approvalStoreInst, orgID: approval.NormalizeOrgID(orgID)}
}
// Get config for investigation settings
cfg := svc.GetConfig()
invConfig := aicontracts.DefaultInvestigationConfig()
if cfg != nil {
invConfig.MaxTurns = cfg.GetPatrolInvestigationBudget()
invConfig.Timeout = cfg.GetPatrolInvestigationTimeout()
}
// Wire up discovery context to the knowledge store for infrastructure context
var infraContext aicontracts.OrchestratorInfraContextProvider
if knowledgeStore := svc.GetKnowledgeStore(); knowledgeStore != nil {
if discoveryService := svc.GetDiscoveryService(); discoveryService != nil {
knowledgeStore.SetDiscoveryContextProvider(func() string {
discoveries, err := discoveryService.ListDiscoveries()
if err != nil || len(discoveries) == 0 {
return ""
}
return servicediscovery.FormatForAIContext(discoveries)
})
knowledgeStore.SetDiscoveryContextProviderForResources(func(resourceIDs []string) string {
if len(resourceIDs) == 0 {
return ""
}
discoveries, err := discoveryService.ListDiscoveries()
if err != nil || len(discoveries) == 0 {
return ""
}
filtered := servicediscovery.FilterDiscoveriesByResourceIDs(discoveries, resourceIDs)
return servicediscovery.FormatForAIContext(filtered)
})
}
infraContext = knowledgeStore
}
// Build deps struct and call factory
deps := aicontracts.OrchestratorDeps{
ChatService: chatAdapter,
CmdExecutor: chatAdapter, // ChatServiceAdapter implements both interfaces
Store: store,
FindingsStore: findingsAdapter,
ApprovalStore: approvalAdapter,
Config: invConfig,
InfraContext: infraContext,
Autonomy: &autonomyLevelProviderAdapter{svc: svc},
FixVerifier: &patrolFixVerifierAdapter{patrol: patrol},
License: &licenseCheckerForOrchestrator{svc: svc},
Metrics: &patrolMetricsCallbackAdapter{},
}
orchestrator := factory(deps)
if orchestrator == nil {
log.Warn().Str("orgID", orgID).Msg("Investigation orchestrator factory returned nil")
patrol.SetInvestigationOrchestrator(nil)
return
}
// Set directly on patrol service — factory returns the interface
patrol.SetInvestigationOrchestrator(orchestrator)
log.Info().Str("orgID", orgID).Msg("Investigation orchestrator configured for patrol service")
}
// ---------------------------------------------------------------------------
// Local adapters — bridge between OSS singletons and aicontracts interfaces
// ---------------------------------------------------------------------------
// orchestratorChatAdapter wraps *chat.Service to implement
// aicontracts.OrchestratorChatService and OrchestratorCommandExecutor.
type orchestratorChatAdapter struct {
svc *chat.Service
}
func (a *orchestratorChatAdapter) CreateSession(ctx context.Context) (*aicontracts.OrchestratorChatSession, error) {
session, err := a.svc.CreateSession(ctx)
if err != nil {
return nil, err
}
return &aicontracts.OrchestratorChatSession{ID: session.ID}, nil
}
func (a *orchestratorChatAdapter) ExecuteStream(ctx context.Context, req aicontracts.OrchestratorExecuteRequest, callback aicontracts.OrchestratorStreamCallback) error {
if a.svc == nil {
return fmt.Errorf("chat service is nil")
}
if !a.svc.IsRunning() {
return fmt.Errorf("chat service is not running")
}
chatReq := chat.ExecuteRequest{
Prompt: req.Prompt,
SessionID: req.SessionID,
MaxTurns: req.MaxTurns,
AutonomousMode: req.AutonomousMode,
}
return a.svc.ExecuteStream(ctx, chatReq, func(event chat.StreamEvent) {
callback(aicontracts.OrchestratorStreamEvent{
Type: event.Type,
Data: event.Data,
})
})
}
func (a *orchestratorChatAdapter) GetMessages(ctx context.Context, sessionID string) ([]aicontracts.OrchestratorMessage, error) {
chatMessages, err := a.svc.GetMessages(ctx, sessionID)
if err != nil {
return nil, err
}
messages := make([]aicontracts.OrchestratorMessage, len(chatMessages))
for i, msg := range chatMessages {
m := aicontracts.OrchestratorMessage{
ID: msg.ID,
Role: msg.Role,
Content: msg.Content,
ReasoningContent: msg.ReasoningContent,
Timestamp: msg.Timestamp,
}
for _, tc := range msg.ToolCalls {
m.ToolCalls = append(m.ToolCalls, aicontracts.OrchestratorToolCallInfo{
ID: tc.ID,
Name: tc.Name,
Input: tc.Input,
})
}
if msg.ToolResult != nil {
m.ToolResult = &aicontracts.OrchestratorToolResultInfo{
ToolUseID: msg.ToolResult.ToolUseID,
Content: msg.ToolResult.Content,
IsError: msg.ToolResult.IsError,
}
}
messages[i] = m.NormalizeCollections()
}
return messages, nil
}
func (a *orchestratorChatAdapter) DeleteSession(ctx context.Context, sessionID string) error {
return a.svc.DeleteSession(ctx, sessionID)
}
func (a *orchestratorChatAdapter) ListAvailableTools(ctx context.Context, prompt string) []string {
if a.svc == nil {
return nil
}
return a.svc.ListAvailableTools(ctx, prompt)
}
func (a *orchestratorChatAdapter) SetAutonomousMode(enabled bool) {
if a.svc != nil {
a.svc.SetAutonomousMode(enabled)
}
}
func (a *orchestratorChatAdapter) ExecuteCommand(ctx context.Context, command, targetHost string) (string, int, error) {
if a.svc == nil {
return "", -1, fmt.Errorf("chat service not available")
}
return a.svc.ExecuteCommand(ctx, command, targetHost)
}
// orchestratorFindingsAdapter wraps findingsStoreWrapper to implement
// aicontracts.OrchestratorFindingsStore.
type orchestratorFindingsAdapter struct {
store *findingsStoreWrapper
}
func (a *orchestratorFindingsAdapter) Get(id string) *aicontracts.Finding {
if a.store == nil {
return nil
}
f := a.store.Get(id)
if f == nil {
return nil
}
return &aicontracts.Finding{
ID: f.GetID(),
Severity: f.GetSeverity(),
Category: f.GetCategory(),
ResourceID: f.GetResourceID(),
ResourceName: f.GetResourceName(),
ResourceType: f.GetResourceType(),
Title: f.GetTitle(),
Description: f.GetDescription(),
Recommendation: f.GetRecommendation(),
Evidence: f.GetEvidence(),
InvestigationSessionID: f.GetInvestigationSessionID(),
InvestigationStatus: f.GetInvestigationStatus(),
InvestigationOutcome: f.GetInvestigationOutcome(),
LastInvestigatedAt: f.GetLastInvestigatedAt(),
InvestigationAttempts: f.GetInvestigationAttempts(),
}
}
func (a *orchestratorFindingsAdapter) Update(f *aicontracts.Finding) bool {
if a.store == nil || f == nil {
return false
}
return a.store.UpdateInvestigation(
f.ID,
f.InvestigationSessionID,
f.InvestigationStatus,
f.InvestigationOutcome,
f.LastInvestigatedAt,
f.InvestigationAttempts,
)
}
// orchestratorApprovalAdapter wraps *approval.Store to implement
// aicontracts.OrchestratorApprovalStore.
type orchestratorApprovalAdapter struct {
store *approval.Store
orgID string
}
func (a *orchestratorApprovalAdapter) Create(appr *aicontracts.OrchestratorApproval) error {
if a.store == nil {
return nil
}
riskLevel := approval.RiskLow
switch appr.RiskLevel {
case "low":
riskLevel = approval.RiskLow
case "medium":
riskLevel = approval.RiskMedium
case "high", "critical":
riskLevel = approval.RiskHigh
}
req := &approval.ApprovalRequest{
OrgID: a.orgID,
ID: appr.ID,
ToolID: "investigation_fix",
Command: appr.Command,
TargetType: "investigation",
TargetID: appr.FindingID,
TargetName: strings.TrimSpace(appr.TargetHost),
Context: "Automated fix from patrol investigation: " + appr.Description,
RiskLevel: riskLevel,
}
if req.TargetName == "" {
req.TargetName = appr.Description
}
return a.store.CreateApproval(req)
}
// patrolFixVerifierAdapter wraps *ai.PatrolService to implement
// aicontracts.OrchestratorFixVerifier.
type patrolFixVerifierAdapter struct {
patrol *ai.PatrolService
}
func (v *patrolFixVerifierAdapter) VerifyFixResolved(ctx context.Context, finding *aicontracts.Finding) (bool, error) {
return v.patrol.VerifyFixResolved(ctx, finding.ResourceID, finding.ResourceType, finding.Key, finding.ID)
}
// patrolMetricsCallbackAdapter implements aicontracts.OrchestratorMetricsCallback
// by delegating to the global PatrolMetrics singleton.
type patrolMetricsCallbackAdapter struct{}
func (c *patrolMetricsCallbackAdapter) RecordInvestigationOutcome(outcome string) {
ai.GetPatrolMetrics().RecordInvestigationOutcome(outcome)
}
func (c *patrolMetricsCallbackAdapter) RecordFixVerification(result string) {
ai.GetPatrolMetrics().RecordFixVerification(result)
}
// licenseCheckerForOrchestrator adapts *ai.Service to aicontracts.OrchestratorLicenseChecker
type licenseCheckerForOrchestrator struct {
svc *ai.Service
}
func (l *licenseCheckerForOrchestrator) HasFeature(feature string) bool {
return l.svc.HasLicenseFeature(feature)
}
// findingsStoreWrapper wraps *ai.FindingsStore to implement aicontracts.OrchestratorAIFindingsStore
type findingsStoreWrapper struct {
store *ai.FindingsStore
}
func (w *findingsStoreWrapper) Get(id string) aicontracts.OrchestratorAIFinding {
if w.store == nil {
return nil
}
f := w.store.Get(id)
if f == nil {
return nil
}
return f
}
func (w *findingsStoreWrapper) UpdateInvestigation(id, sessionID, status, outcome string, lastInvestigatedAt *time.Time, attempts int) bool {
if w.store == nil {
return false
}
return w.store.UpdateInvestigation(id, sessionID, status, outcome, lastInvestigatedAt, attempts)
}
// autonomyLevelProviderAdapter provides current autonomy level from config for re-checking before fix execution
type autonomyLevelProviderAdapter struct {
svc *ai.Service
}
func (a *autonomyLevelProviderAdapter) GetCurrentAutonomyLevel() string {
if a.svc == nil {
return config.PatrolAutonomyMonitor
}
return a.svc.GetEffectivePatrolAutonomyLevel()
}
func (a *autonomyLevelProviderAdapter) IsFullModeUnlocked() bool {
if a.svc == nil {
return false
}
cfg := a.svc.GetConfig()
if cfg == nil {
return false
}
return cfg.PatrolFullModeUnlocked
}
// AISettingsResponse is returned by GET /api/settings/ai
// API keys are masked for security
type AISettingsResponse struct {
Enabled bool `json:"enabled"`
Model string `json:"model"`
ChatModel string `json:"chat_model,omitempty"` // Model for interactive chat (empty = use default)
PatrolModel string `json:"patrol_model,omitempty"` // Model for patrol (empty = use default)
AutoFixModel string `json:"auto_fix_model,omitempty"` // Model for auto-fix (empty = use patrol model)
Configured bool `json:"configured"` // true if AI is ready to use
CustomContext string `json:"custom_context"` // user-provided infrastructure context
// OAuth fields for Claude Pro/Max subscription authentication
AuthMethod string `json:"auth_method"` // "api_key" or "oauth"
OAuthConnected bool `json:"oauth_connected"` // true if OAuth tokens are configured
// Patrol settings for token efficiency
PatrolIntervalMinutes int `json:"patrol_interval_minutes"` // Patrol interval in minutes (0 = disabled)
PatrolEnabled bool `json:"patrol_enabled"` // true if patrol is enabled
PatrolAutoFix bool `json:"patrol_auto_fix"` // true if patrol can auto-fix issues
AlertTriggeredAnalysis bool `json:"alert_triggered_analysis"` // true if AI analyzes when alerts fire
PatrolEventTriggersEnabled bool `json:"patrol_event_triggers_enabled"` // Legacy aggregate flag; true when any scoped Patrol trigger source is enabled
PatrolAlertTriggersEnabled bool `json:"patrol_alert_triggers_enabled"` // true if alert-driven scoped Patrol triggers are enabled
PatrolAnomalyTriggersEnabled bool `json:"patrol_anomaly_triggers_enabled"` // true if anomaly-driven scoped Patrol triggers are enabled
UseProactiveThresholds bool `json:"use_proactive_thresholds"` // true if patrol warns before thresholds (false = use exact thresholds)
AvailableModels []providers.ModelInfo `json:"available_models"` // List of models for current provider
// Multi-provider credentials - shows which providers are configured
AnthropicConfigured bool `json:"anthropic_configured"` // true if Anthropic API key or OAuth is set
OpenAIConfigured bool `json:"openai_configured"` // true if OpenAI API key is set
OpenRouterConfigured bool `json:"openrouter_configured"` // true if OpenRouter API key is set
DeepSeekConfigured bool `json:"deepseek_configured"` // true if DeepSeek API key is set
GeminiConfigured bool `json:"gemini_configured"` // true if Gemini API key is set
OllamaConfigured bool `json:"ollama_configured"` // true (always available for attempt)
OllamaBaseURL string `json:"ollama_base_url"` // Ollama server URL
OllamaUsername string `json:"ollama_username,omitempty"` // Optional Basic Auth username for Ollama
OllamaPasswordSet bool `json:"ollama_password_set"` // true if an Ollama password is stored
OpenAIBaseURL string `json:"openai_base_url,omitempty"` // Custom OpenAI base URL
ConfiguredProviders []string `json:"configured_providers"` // List of provider names with credentials
// Cost controls
CostBudgetUSD30d float64 `json:"cost_budget_usd_30d,omitempty"`
// Request timeout (seconds) - for slow hardware running local models
RequestTimeoutSeconds int `json:"request_timeout_seconds,omitempty"`
// Infrastructure control settings
ControlLevel string `json:"control_level"` // "read_only", "controlled", "autonomous"
ProtectedGuests []string `json:"protected_guests"` // VMIDs/names that AI cannot control
// Discovery settings
DiscoveryEnabled bool `json:"discovery_enabled"` // true if discovery is enabled
DiscoveryIntervalHours int `json:"discovery_interval_hours,omitempty"` // Hours between auto-scans (0 = manual only)
// Quickstart credits
QuickstartCreditsTotal int `json:"quickstart_credits_total"` // Total credits granted (25)
QuickstartCreditsUsed int `json:"quickstart_credits_used"` // Credits consumed
QuickstartCreditsRemaining int `json:"quickstart_credits_remaining"` // Credits remaining
QuickstartCreditsAvailable bool `json:"quickstart_credits_available"` // true if quickstart credits are usable
UsingQuickstart bool `json:"using_quickstart"` // true if currently using quickstart provider
QuickstartBlockedReason string `json:"quickstart_blocked_reason,omitempty"` // canonical reason when quickstart is not currently usable
}
func EmptyAISettingsResponse() AISettingsResponse {
return AISettingsResponse{}.NormalizeCollections()
}
func (r AISettingsResponse) NormalizeCollections() AISettingsResponse {
if r.AvailableModels == nil {
r.AvailableModels = []providers.ModelInfo{}
}
if r.ConfiguredProviders == nil {
r.ConfiguredProviders = []string{}
}
if r.ProtectedGuests == nil {
r.ProtectedGuests = []string{}
}
return r
}
// AISettingsUpdateRequest is the request body for PUT /api/settings/ai
type AISettingsUpdateRequest struct {
Enabled *bool `json:"enabled,omitempty"`
Model *string `json:"model,omitempty"`
ChatModel *string `json:"chat_model,omitempty"` // Model for interactive chat
PatrolModel *string `json:"patrol_model,omitempty"` // Model for background patrol
AutoFixModel *string `json:"auto_fix_model,omitempty"` // Model for auto-fix remediation
CustomContext *string `json:"custom_context,omitempty"` // user-provided infrastructure context
AuthMethod *string `json:"auth_method,omitempty"` // "api_key" or "oauth"
// Patrol settings for token efficiency
PatrolIntervalMinutes *int `json:"patrol_interval_minutes,omitempty"` // Custom interval in minutes (0 = disabled, minimum 10)
PatrolEnabled *bool `json:"patrol_enabled,omitempty"` // true if patrol is enabled
PatrolAutoFix *bool `json:"patrol_auto_fix,omitempty"` // true if patrol can auto-fix issues
AlertTriggeredAnalysis *bool `json:"alert_triggered_analysis,omitempty"` // true if AI analyzes when alerts fire
PatrolEventTriggersEnabled *bool `json:"patrol_event_triggers_enabled,omitempty"` // Legacy aggregate update; applies to both scoped Patrol trigger sources
PatrolAlertTriggersEnabled *bool `json:"patrol_alert_triggers_enabled,omitempty"` // true if alert-driven scoped Patrol triggers are enabled
PatrolAnomalyTriggersEnabled *bool `json:"patrol_anomaly_triggers_enabled,omitempty"` // true if anomaly-driven scoped Patrol triggers are enabled
UseProactiveThresholds *bool `json:"use_proactive_thresholds,omitempty"` // true if patrol warns before thresholds (default: false = exact thresholds)
// Multi-provider credentials
AnthropicAPIKey *string `json:"anthropic_api_key,omitempty"` // Set Anthropic API key
OpenAIAPIKey *string `json:"openai_api_key,omitempty"` // Set OpenAI API key
OpenRouterAPIKey *string `json:"openrouter_api_key,omitempty"` // Set OpenRouter API key
DeepSeekAPIKey *string `json:"deepseek_api_key,omitempty"` // Set DeepSeek API key
GeminiAPIKey *string `json:"gemini_api_key,omitempty"` // Set Gemini API key
OllamaBaseURL *string `json:"ollama_base_url,omitempty"` // Set Ollama server URL
OllamaUsername *string `json:"ollama_username,omitempty"` // Set Ollama Basic Auth username
OllamaPassword *string `json:"ollama_password,omitempty"` // Set Ollama Basic Auth password
OpenAIBaseURL *string `json:"openai_base_url,omitempty"` // Set custom OpenAI base URL
// Clear flags for removing credentials
ClearAnthropicKey *bool `json:"clear_anthropic_key,omitempty"` // Clear Anthropic API key
ClearOpenAIKey *bool `json:"clear_openai_key,omitempty"` // Clear OpenAI API key
ClearOpenRouterKey *bool `json:"clear_openrouter_key,omitempty"` // Clear OpenRouter API key
ClearDeepSeekKey *bool `json:"clear_deepseek_key,omitempty"` // Clear DeepSeek API key
ClearGeminiKey *bool `json:"clear_gemini_key,omitempty"` // Clear Gemini API key
ClearOllamaURL *bool `json:"clear_ollama_url,omitempty"` // Clear Ollama URL
ClearOllamaUsername *bool `json:"clear_ollama_username,omitempty"` // Clear Ollama Basic Auth username
ClearOllamaPassword *bool `json:"clear_ollama_password,omitempty"` // Clear Ollama Basic Auth password
// Cost controls
CostBudgetUSD30d *float64 `json:"cost_budget_usd_30d,omitempty"`
// Request timeout (seconds) - for slow hardware running local models
RequestTimeoutSeconds *int `json:"request_timeout_seconds,omitempty"`
// Infrastructure control settings
ControlLevel *string `json:"control_level,omitempty"` // "read_only", "controlled", "autonomous"
ProtectedGuests []string `json:"protected_guests,omitempty"` // VMIDs/names that AI cannot control (nil = don't update, empty = clear)
// Discovery settings
DiscoveryEnabled *bool `json:"discovery_enabled,omitempty"` // Enable discovery
DiscoveryIntervalHours *int `json:"discovery_interval_hours,omitempty"` // Hours between auto-scans (0 = manual only)
}
// populateQuickstartFields fills quickstart credit info on an AISettingsResponse.
func (h *AISettingsHandler) populateQuickstartFields(ctx context.Context, resp *AISettingsResponse) {
aiSvc := h.GetAIService(ctx)
if aiSvc == nil {
return
}
qsMgr := aiSvc.GetQuickstartCredits()
if qsMgr == nil {
return
}
var bootstrapErr error
if err := qsMgr.EnsureBootstrap(ctx); err != nil {
bootstrapErr = err
log.Debug().Err(err).Msg("Quickstart bootstrap unavailable while populating AI settings")
}
resp.QuickstartCreditsTotal = qsMgr.CreditsTotal()
remaining := qsMgr.CreditsRemaining()
resp.QuickstartCreditsRemaining = remaining
if resp.QuickstartCreditsTotal > remaining {
resp.QuickstartCreditsUsed = resp.QuickstartCreditsTotal - remaining
}
resp.QuickstartCreditsAvailable = qsMgr.HasCredits()
resp.UsingQuickstart = aiSvc.IsUsingQuickstart()
if len(resp.ConfiguredProviders) > 0 && !resp.UsingQuickstart {
resp.QuickstartBlockedReason = ""
return
}
switch {
case resp.QuickstartCreditsAvailable:
resp.QuickstartBlockedReason = ""
case resp.QuickstartCreditsTotal > 0 && remaining <= 0:
resp.QuickstartBlockedReason = ai.QuickstartCreditsExhaustedReason()
case bootstrapErr != nil:
resp.QuickstartBlockedReason = strings.TrimSpace(ai.QuickstartBlockedReasonForError(bootstrapErr))
default:
resp.QuickstartBlockedReason = ""
}
}
// AssistantEnabled reports whether the Pulse Assistant affordance should be
// shown in the authenticated shell without forcing the browser to probe the
// full AI settings API on every route bootstrap.
func (h *AISettingsHandler) AssistantEnabled(ctx context.Context) bool {
if h == nil {
return false
}
settings, err := h.loadAIConfig(ctx)
if err != nil {
return false
}
if settings == nil {
settings = config.NewDefaultAIConfig()
}
enabled := settings.Enabled || mockmode.IsEnabled()
if !enabled {
return false
}
if settings.IsConfigured() || mockmode.IsEnabled() {
return true
}
response := AISettingsResponse{
ConfiguredProviders: settings.GetConfiguredProviders(),
}.NormalizeCollections()
h.populateQuickstartFields(ctx, &response)
return response.QuickstartCreditsAvailable
}
func aiSettingsRequireModelResolution(settings *config.AIConfig) bool {
if settings == nil || !settings.IsConfigured() {
return false
}
model := strings.TrimSpace(settings.GetModel())
if model == "" {
return true
}
providerName, _ := config.ParseModelString(model)
if providerName == "" || providerName == config.AIProviderQuickstart {
return false
}
return !settings.HasProvider(providerName)
}
// HandleGetAISettings returns the current AI settings (GET /api/settings/ai)
func (h *AISettingsHandler) HandleGetAISettings(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
if !ensureSettingsReadScope(h.getConfig(r.Context()), w, r) {
return
}
ctx := r.Context()
settings, err := h.loadAIConfig(ctx)
if err != nil {
log.Error().Err(err).Msg("Failed to load Pulse Assistant settings")
http.Error(w, "Failed to load Pulse Assistant settings", http.StatusInternalServerError)
return
}
if settings == nil {
settings = config.NewDefaultAIConfig()
}
settings.NormalizeQuickstartModelAliases()
if aiSettingsRequireModelResolution(settings) {
if resolvedModel, resolveErr := ai.ResolveConfiguredModel(ctx, settings); resolveErr == nil {
settings.Model = resolvedModel
}
}
// Determine auth method string
authMethod := string(settings.AuthMethod)
if authMethod == "" {
authMethod = string(config.AuthMethodAPIKey)
}
// Determine if running in demo mode
isDemo := mockmode.IsEnabled()
triggerSettings := settings.GetPatrolEventTriggerSettings()
response := AISettingsResponse{
Enabled: settings.Enabled || isDemo,
Model: settings.GetModel(),
ChatModel: config.NormalizeQuickstartModelString(settings.ChatModel),
PatrolModel: config.NormalizeQuickstartModelString(settings.PatrolModel),
AutoFixModel: config.NormalizeQuickstartModelString(settings.AutoFixModel),
Configured: settings.IsConfigured() || isDemo,
CustomContext: settings.CustomContext,
AuthMethod: authMethod,
OAuthConnected: settings.OAuthAccessToken != "",
// Patrol settings
PatrolIntervalMinutes: settings.PatrolIntervalMinutes,
PatrolEnabled: settings.PatrolEnabled,
PatrolAutoFix: settings.PatrolAutoFix,
AlertTriggeredAnalysis: settings.AlertTriggeredAnalysis,
PatrolEventTriggersEnabled: triggerSettings.AlertTriggersEnabled || triggerSettings.AnomalyTriggersEnabled,
PatrolAlertTriggersEnabled: triggerSettings.AlertTriggersEnabled,
PatrolAnomalyTriggersEnabled: triggerSettings.AnomalyTriggersEnabled,
UseProactiveThresholds: settings.UseProactiveThresholds,
AvailableModels: nil, // Now populated via /api/ai/models endpoint
// Multi-provider configuration
AnthropicConfigured: settings.HasProvider(config.AIProviderAnthropic),
OpenAIConfigured: settings.HasProvider(config.AIProviderOpenAI),
OpenRouterConfigured: settings.HasProvider(config.AIProviderOpenRouter),
DeepSeekConfigured: settings.HasProvider(config.AIProviderDeepSeek),
GeminiConfigured: settings.HasProvider(config.AIProviderGemini),
OllamaConfigured: settings.HasProvider(config.AIProviderOllama),
OllamaBaseURL: settings.GetBaseURLForProvider(config.AIProviderOllama),
OllamaUsername: settings.OllamaUsername,
OllamaPasswordSet: settings.OllamaPassword != "",
OpenAIBaseURL: settings.OpenAIBaseURL,
ConfiguredProviders: settings.GetConfiguredProviders(),
CostBudgetUSD30d: settings.CostBudgetUSD30d,
RequestTimeoutSeconds: settings.RequestTimeoutSeconds,
ControlLevel: settings.GetControlLevel(),
ProtectedGuests: settings.GetProtectedGuests(),
DiscoveryEnabled: settings.IsDiscoveryEnabled(),
DiscoveryIntervalHours: settings.DiscoveryIntervalHours,
}.NormalizeCollections()
// Populate quickstart credit info
h.populateQuickstartFields(ctx, &response)
// If quickstart credits are available, mark as configured even without BYOK
if response.QuickstartCreditsAvailable && !response.Configured {
response.Configured = true
}
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to write AI settings response")
}
}
// HandleUpdateAISettings updates AI settings (PUT /api/settings/ai)
func (h *AISettingsHandler) HandleUpdateAISettings(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPut && r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Require admin authentication
if !CheckAuth(h.getConfig(r.Context()), w, r) {
return
}
// Check proxy auth admin status if applicable
if h.getConfig(r.Context()).ProxyAuthSecret != "" {
if valid, username, isAdmin := CheckProxyAuth(h.getConfig(r.Context()), r); valid && !isAdmin {
log.Warn().
Str("ip", r.RemoteAddr).
Str("path", r.URL.Path).
Str("method", r.Method).
Str("username", username).
Msg("Non-admin user attempted to update AI settings")
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusForbidden)
_ = json.NewEncoder(w).Encode(map[string]string{"error": "Admin privileges required"})
return
}
}
if !ensureSettingsWriteScope(h.getConfig(r.Context()), w, r) {
return
}
// Load existing settings
settings, err := h.loadAIConfig(r.Context())
if err != nil {
log.Error().Err(err).Msg("Failed to load existing AI settings")
settings = config.NewDefaultAIConfig()
}
if settings == nil {
settings = config.NewDefaultAIConfig()
}
// Parse request
r.Body = http.MaxBytesReader(w, r.Body, 16*1024)
var req AISettingsUpdateRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
// Validate and apply updates
if req.Model != nil {
settings.Model = strings.TrimSpace(*req.Model)
}
if req.ChatModel != nil {
settings.ChatModel = strings.TrimSpace(*req.ChatModel)
}
if req.PatrolModel != nil {
settings.PatrolModel = strings.TrimSpace(*req.PatrolModel)
}
if req.AutoFixModel != nil {
settings.AutoFixModel = strings.TrimSpace(*req.AutoFixModel)
}
if req.PatrolAutoFix != nil {
// Auto-fix requires Pro license with ai_autofix feature
if *req.PatrolAutoFix && !h.GetAIService(r.Context()).HasLicenseFeature(ai.FeatureAIAutoFix) {
WriteLicenseRequired(w, ai.FeatureAIAutoFix, "Pulse Patrol Auto-Fix requires Pulse Pro")
return
}
settings.PatrolAutoFix = *req.PatrolAutoFix
}
if req.UseProactiveThresholds != nil {
settings.UseProactiveThresholds = *req.UseProactiveThresholds
}
if req.CustomContext != nil {
settings.CustomContext = strings.TrimSpace(*req.CustomContext)
}
// Handle multi-provider credentials FIRST - before enabled check
// This allows the setup flow to send API key + enabled:true together
// Clear flags take priority over setting new values
if req.ClearAnthropicKey != nil && *req.ClearAnthropicKey {
settings.AnthropicAPIKey = ""
} else if req.AnthropicAPIKey != nil {
settings.AnthropicAPIKey = strings.TrimSpace(*req.AnthropicAPIKey)
}
if req.ClearOpenAIKey != nil && *req.ClearOpenAIKey {
settings.OpenAIAPIKey = ""
} else if req.OpenAIAPIKey != nil {
settings.OpenAIAPIKey = strings.TrimSpace(*req.OpenAIAPIKey)
}
if req.ClearOpenRouterKey != nil && *req.ClearOpenRouterKey {
settings.OpenRouterAPIKey = ""
} else if req.OpenRouterAPIKey != nil {
settings.OpenRouterAPIKey = strings.TrimSpace(*req.OpenRouterAPIKey)
}
if req.ClearDeepSeekKey != nil && *req.ClearDeepSeekKey {
settings.DeepSeekAPIKey = ""
} else if req.DeepSeekAPIKey != nil {
settings.DeepSeekAPIKey = strings.TrimSpace(*req.DeepSeekAPIKey)
}
if req.ClearGeminiKey != nil && *req.ClearGeminiKey {
settings.GeminiAPIKey = ""
} else if req.GeminiAPIKey != nil {
settings.GeminiAPIKey = strings.TrimSpace(*req.GeminiAPIKey)
}
if req.ClearOllamaURL != nil && *req.ClearOllamaURL {
settings.OllamaBaseURL = ""
} else if req.OllamaBaseURL != nil {
settings.OllamaBaseURL = strings.TrimSpace(*req.OllamaBaseURL)
}
if req.ClearOllamaUsername != nil && *req.ClearOllamaUsername {
settings.OllamaUsername = ""
} else if req.OllamaUsername != nil {
settings.OllamaUsername = strings.TrimSpace(*req.OllamaUsername)
}
if req.ClearOllamaPassword != nil && *req.ClearOllamaPassword {
settings.OllamaPassword = ""
} else if req.OllamaPassword != nil {
settings.OllamaPassword = *req.OllamaPassword
}
if req.OpenAIBaseURL != nil {
settings.OpenAIBaseURL = strings.TrimSpace(*req.OpenAIBaseURL)
}
// Track whether we should opportunistically refresh quickstart state after validation.
refreshQuickstartState := false
if req.Enabled != nil {
// Only allow enabling if at least one provider is configured OR quickstart credits are available
if *req.Enabled {
configuredProviders := settings.GetConfiguredProviders()
if len(configuredProviders) == 0 {
// Check if quickstart credits can bridge the gap
aiSvc := h.GetAIService(r.Context())
hasQuickstart := false
var bootstrapErr error
if aiSvc != nil {
if qsMgr := aiSvc.GetQuickstartCredits(); qsMgr != nil {
bootstrapErr = qsMgr.EnsureBootstrap(r.Context())
hasQuickstart = qsMgr.HasCredits() && qsMgr.GetProvider() != nil
}
}
if !hasQuickstart {
blockedReason := strings.TrimSpace(ai.QuickstartBlockedReasonForError(bootstrapErr))
switch blockedReason {
case ai.QuickstartActivationRequiredReason():
http.Error(w, blockedReason, http.StatusConflict)
return
case ai.QuickstartUnavailableReason():
http.Error(w, ai.QuickstartUnavailableReason(), http.StatusBadGateway)
return
}
http.Error(w, "Please configure a provider (API key or Ollama URL) before enabling Pulse Assistant", http.StatusBadRequest)
return
}
// Quickstart credits available — allow enabling without BYOK.
// Persist all model strings so chat.Service restart picks them
// up from disk. All three must point to quickstart — any stale
// BYOK model reference would cause the chat service to fail.
quickstartModelStr := config.DefaultModelForProvider(config.AIProviderQuickstart)
settings.Model = quickstartModelStr
settings.PatrolModel = quickstartModelStr
settings.ChatModel = quickstartModelStr
} else {
if aiSettingsRequireModelResolution(settings) {
resolvedModel, resolveErr := ai.ResolveConfiguredModel(r.Context(), settings)
if resolveErr != nil {
http.Error(w, fmt.Sprintf("Failed to resolve provider model: %v", resolveErr), http.StatusBadGateway)
return
}
settings.Model = resolvedModel
}
// BYOK is configured — refresh quickstart state best-effort so the
// Patrol status API can still report the current server snapshot.
refreshQuickstartState = true
}
}
settings.Enabled = *req.Enabled
}
// Handle patrol interval - prefer custom minutes over preset
if req.PatrolIntervalMinutes != nil {
minutes := *req.PatrolIntervalMinutes
if minutes < 0 {
http.Error(w, "patrol_interval_minutes cannot be negative", http.StatusBadRequest)
return
}
if minutes > 0 && minutes < 10 {
http.Error(w, "patrol_interval_minutes must be at least 10 minutes (or 0 to disable)", http.StatusBadRequest)
return
}
if minutes > 10080 { // 7 days max
http.Error(w, "patrol_interval_minutes cannot exceed 10080 (7 days)", http.StatusBadRequest)
return
}
settings.PatrolIntervalMinutes = minutes
if minutes > 0 {
settings.PatrolEnabled = true // Enable patrol when setting custom interval
} else {
settings.PatrolEnabled = false // Disable patrol when setting interval to 0
}
}
if req.PatrolEnabled != nil && req.PatrolIntervalMinutes == nil {
settings.PatrolEnabled = *req.PatrolEnabled
if *req.PatrolEnabled {
// Ensure we have a sane default interval when turning on
if settings.PatrolIntervalMinutes <= 0 {
settings.PatrolIntervalMinutes = 360
}
}
}
if req.CostBudgetUSD30d != nil {
if *req.CostBudgetUSD30d < 0 {
http.Error(w, "cost_budget_usd_30d cannot be negative", http.StatusBadRequest)
return
}
settings.CostBudgetUSD30d = *req.CostBudgetUSD30d
}
// Handle alert-triggered analysis toggle
if req.AlertTriggeredAnalysis != nil {
// Alert analysis requires Pro license with ai_alerts feature
if *req.AlertTriggeredAnalysis && !h.GetAIService(r.Context()).HasLicenseFeature(ai.FeatureAIAlerts) {
WriteLicenseRequired(w, ai.FeatureAIAlerts, "Pulse Alert Analysis requires Pulse Pro")
return
}
settings.AlertTriggeredAnalysis = *req.AlertTriggeredAnalysis
}
// Handle legacy aggregate event-triggered patrol toggle
if req.PatrolEventTriggersEnabled != nil {
settings.SetPatrolEventTriggersEnabled(*req.PatrolEventTriggersEnabled)
}
// Handle split scoped patrol trigger toggles
if req.PatrolAlertTriggersEnabled != nil || req.PatrolAnomalyTriggersEnabled != nil {
triggerSettings := settings.GetPatrolEventTriggerSettings()
if req.PatrolAlertTriggersEnabled != nil {
triggerSettings.AlertTriggersEnabled = *req.PatrolAlertTriggersEnabled
}
if req.PatrolAnomalyTriggersEnabled != nil {
triggerSettings.AnomalyTriggersEnabled = *req.PatrolAnomalyTriggersEnabled
}
settings.SetPatrolEventTriggerSettings(
triggerSettings.AlertTriggersEnabled,
triggerSettings.AnomalyTriggersEnabled,
)
}
// Handle request timeout (for slow hardware)
if req.RequestTimeoutSeconds != nil {
if *req.RequestTimeoutSeconds < 0 {
http.Error(w, "request_timeout_seconds cannot be negative", http.StatusBadRequest)
return
}
if *req.RequestTimeoutSeconds > 3600 {
http.Error(w, "request_timeout_seconds cannot exceed 3600 (1 hour)", http.StatusBadRequest)
return
}
settings.RequestTimeoutSeconds = *req.RequestTimeoutSeconds
}
// Handle infrastructure control settings
if req.ControlLevel != nil {
level := strings.TrimSpace(*req.ControlLevel)
if !config.IsValidControlLevel(level) {
http.Error(w, "invalid control_level: must be read_only, controlled, or autonomous", http.StatusBadRequest)
return
}
// "autonomous" requires Pro license
if level == config.ControlLevelAutonomous {
if !h.GetAIService(r.Context()).HasLicenseFeature(ai.FeatureAIAutoFix) {
WriteLicenseRequired(w, ai.FeatureAIAutoFix, "Autonomous control requires Pulse Pro")
return
}
}
settings.ControlLevel = level
}
// Handle protected guests (nil = don't update)
if req.ProtectedGuests != nil {
settings.ProtectedGuests = req.ProtectedGuests
}
// Handle discovery settings
if req.DiscoveryEnabled != nil {
settings.DiscoveryEnabled = *req.DiscoveryEnabled
}
if req.DiscoveryIntervalHours != nil {
if *req.DiscoveryIntervalHours < 0 {
http.Error(w, "discovery_interval_hours cannot be negative", http.StatusBadRequest)
return
}
settings.DiscoveryIntervalHours = *req.DiscoveryIntervalHours
}
// Auto-default discovery interval to 24h when enabled with no interval set.
// Without this, enabling discovery with interval=0 silently stays in manual-only mode.
if settings.DiscoveryEnabled && settings.DiscoveryIntervalHours == 0 {
settings.DiscoveryIntervalHours = 24
}
// Refresh quickstart bootstrap state now that validation has passed.
if refreshQuickstartState {
if aiSvc := h.GetAIService(r.Context()); aiSvc != nil {
if qsMgr := aiSvc.GetQuickstartCredits(); qsMgr != nil {
if err := qsMgr.EnsureBootstrap(r.Context()); err != nil {
log.Warn().Err(err).Msg("Failed to refresh quickstart bootstrap state")
}
}
}
}
if aiSettingsRequireModelResolution(settings) {
resolvedModel, resolveErr := ai.ResolveConfiguredModel(r.Context(), settings)
if resolveErr != nil {
http.Error(w, fmt.Sprintf("Failed to resolve provider model: %v", resolveErr), http.StatusBadGateway)
return
}
settings.Model = resolvedModel
}
settings.NormalizeQuickstartModelAliases()
// Save settings
if err := h.getPersistence(r.Context()).SaveAIConfig(*settings); err != nil {
log.Error().Err(err).Msg("Failed to save AI settings")
http.Error(w, "Failed to save settings", http.StatusInternalServerError)
return
}
// Reload the AI service with new settings
if err := h.GetAIService(r.Context()).Reload(); err != nil {
log.Warn().Err(err).Msg("Failed to reload AI service after settings update")
}
// Reconfigure patrol service with new settings (applies interval changes immediately)
h.GetAIService(r.Context()).ReconfigurePatrol()
// Update alert-triggered analyzer if available
if analyzer := h.GetAIService(r.Context()).GetAlertTriggeredAnalyzer(); analyzer != nil {
analyzer.SetEnabled(settings.AlertTriggeredAnalysis)
}
// Trigger AI chat service restart when provider-affecting settings change.
// The running service keeps provider configuration in memory, so credential,
// base URL, or model updates must restart the chat runtime to take effect.
if h.onModelChange != nil && shouldRestartAIChat(req) {
h.onModelChange()
}
// Update MCP control settings if control level or protected guests changed
// This updates tool visibility without restarting AI chat
if h.onControlSettingsChange != nil && (req.ControlLevel != nil || req.ProtectedGuests != nil) {
h.onControlSettingsChange()
}
providerName, _ := config.ParseModelString(settings.GetModel())
LogAuditEventForTenant(GetOrgID(r.Context()), "ai_settings_updated", getAuthUsername(h.getConfig(r.Context()), r), GetClientIP(r), r.URL.Path, true,
fmt.Sprintf("AI settings updated: enabled=%t provider=%s model=%s", settings.Enabled, providerName, settings.GetModel()))
log.Info().
Bool("enabled", settings.Enabled).
Str("provider", providerName).
Str("model", settings.GetModel()).
Str("chatModel", config.NormalizeQuickstartModelString(settings.ChatModel)).
Str("patrolModel", config.NormalizeQuickstartModelString(settings.PatrolModel)).
Bool("alertTriggeredAnalysis", settings.AlertTriggeredAnalysis).
Msg("AI settings updated")
// Determine auth method for response
authMethod := string(settings.AuthMethod)
if authMethod == "" {
authMethod = string(config.AuthMethodAPIKey)
}
triggerSettings := settings.GetPatrolEventTriggerSettings()
// Return updated settings
response := AISettingsResponse{
Enabled: settings.Enabled,
Model: settings.GetModel(),
ChatModel: config.NormalizeQuickstartModelString(settings.ChatModel),
PatrolModel: config.NormalizeQuickstartModelString(settings.PatrolModel),
AutoFixModel: config.NormalizeQuickstartModelString(settings.AutoFixModel),
Configured: settings.IsConfigured(),
CustomContext: settings.CustomContext,
AuthMethod: authMethod,
OAuthConnected: settings.OAuthAccessToken != "",
PatrolIntervalMinutes: settings.PatrolIntervalMinutes,
PatrolEnabled: settings.PatrolEnabled,
PatrolAutoFix: settings.PatrolAutoFix,
AlertTriggeredAnalysis: settings.AlertTriggeredAnalysis,
PatrolEventTriggersEnabled: triggerSettings.AlertTriggersEnabled || triggerSettings.AnomalyTriggersEnabled,
PatrolAlertTriggersEnabled: triggerSettings.AlertTriggersEnabled,
PatrolAnomalyTriggersEnabled: triggerSettings.AnomalyTriggersEnabled,
UseProactiveThresholds: settings.UseProactiveThresholds,
AvailableModels: nil, // Now populated via /api/ai/models endpoint
// Multi-provider configuration
AnthropicConfigured: settings.HasProvider(config.AIProviderAnthropic),
OpenAIConfigured: settings.HasProvider(config.AIProviderOpenAI),
OpenRouterConfigured: settings.HasProvider(config.AIProviderOpenRouter),
DeepSeekConfigured: settings.HasProvider(config.AIProviderDeepSeek),
GeminiConfigured: settings.HasProvider(config.AIProviderGemini),
OllamaConfigured: settings.HasProvider(config.AIProviderOllama),
OllamaBaseURL: settings.GetBaseURLForProvider(config.AIProviderOllama),
OllamaUsername: settings.OllamaUsername,
OllamaPasswordSet: settings.OllamaPassword != "",
OpenAIBaseURL: settings.OpenAIBaseURL,
ConfiguredProviders: settings.GetConfiguredProviders(),
RequestTimeoutSeconds: settings.RequestTimeoutSeconds,
ControlLevel: settings.GetControlLevel(),
ProtectedGuests: settings.GetProtectedGuests(),
DiscoveryEnabled: settings.DiscoveryEnabled,
DiscoveryIntervalHours: settings.DiscoveryIntervalHours,
}.NormalizeCollections()
// Populate quickstart credit info
h.populateQuickstartFields(r.Context(), &response)
// If quickstart credits are available, mark as configured even without BYOK
if response.QuickstartCreditsAvailable && !response.Configured {
response.Configured = true
}
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to write AI settings update response")
}
}
// HandleTestAIConnection tests the AI provider connection (POST /api/ai/test)
// Auth is enforced by RequirePermission middleware at route registration; with
// default authorizer, non-admin proxy users are hard-denied (with RBAC, deferred
// to authorizer). Token scope is enforced by RequireScope middleware.
// ensureSettingsWriteScope provides the additional admin-session identity guard
// for session-based (non-token) users.
func (h *AISettingsHandler) HandleTestAIConnection(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
if !ensureSettingsWriteScope(h.getConfig(r.Context()), w, r) {
return
}
ctx, cancel := context.WithTimeout(r.Context(), 30*time.Second)
defer cancel()
var testResult struct {
Success bool `json:"success"`
Message string `json:"message"`
Model string `json:"model,omitempty"`
}
err := h.GetAIService(r.Context()).TestConnection(ctx)
if err != nil {
testResult.Success = false
testResult.Message = "Connection test failed"
log.Error().Err(err).Msg("AI connection test failed")
} else {
cfg := h.GetAIService(r.Context()).GetConfig()
testResult.Success = true
testResult.Message = "Connection successful"
if cfg != nil {
testResult.Model = cfg.GetModel()
}
}
if err := utils.WriteJSONResponse(w, testResult); err != nil {
log.Error().Err(err).Msg("Failed to write AI test response")
}
}
// HandleTestProvider tests a specific AI provider connection (POST /api/ai/test/:provider)
// Auth is enforced by RequirePermission middleware at route registration; with
// default authorizer, non-admin proxy users are hard-denied (with RBAC, deferred
// to authorizer). Token scope is enforced by RequireScope middleware.
// ensureSettingsWriteScope provides the additional admin-session identity guard
// for session-based (non-token) users.
func (h *AISettingsHandler) HandleTestProvider(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
if !ensureSettingsWriteScope(h.getConfig(r.Context()), w, r) {
return
}
// Get provider from URL path (e.g., /api/ai/test/anthropic -> anthropic)
provider := strings.TrimPrefix(r.URL.Path, "/api/ai/test/")
if provider == "" || provider == r.URL.Path {
http.Error(w, `{"error":"Provider is required"}`, http.StatusBadRequest)
return
}
// Validate provider name: only allow lowercase alphanumeric and hyphens,
// max 64 chars. This rejects path traversal, slashes, and injection attempts.
if len(provider) > 64 || !isValidProviderName(provider) {
http.Error(w, `{"error":"Invalid provider name"}`, http.StatusBadRequest)
return
}
ctx, cancel := context.WithTimeout(r.Context(), 30*time.Second)
defer cancel()
var testResult struct {
Success bool `json:"success"`
Message string `json:"message"`
Provider string `json:"provider"`
}
testResult.Provider = provider
// Load config and create provider for testing
cfg := h.GetAIService(r.Context()).GetConfig()
if cfg == nil {
testResult.Success = false
testResult.Message = "Pulse Assistant not configured"
if err := utils.WriteJSONResponse(w, testResult); err != nil {
log.Error().Err(err).Msg("failed to write JSON response")
}
return
}
// Check if provider is configured
if !cfg.HasProvider(provider) {
testResult.Success = false
testResult.Message = "Provider not configured"
if err := utils.WriteJSONResponse(w, testResult); err != nil {
log.Error().Err(err).Msg("failed to write JSON response")
}
return
}
// Create provider and test connection
model, err := ai.ResolvePreferredModelForProvider(ctx, cfg, provider)
if err != nil {
testResult.Success = false
testResult.Message = "Failed to resolve provider model"
log.Error().Err(err).Str("provider", provider).Msg("AI provider model resolution failed")
if err := utils.WriteJSONResponse(w, testResult); err != nil {
log.Error().Err(err).Msg("failed to write provider test response")
}
return
}
testProvider, err := providers.NewForProvider(cfg, provider, model)
if err != nil {
testResult.Success = false
testResult.Message = "Failed to create provider"
log.Error().Err(err).Str("provider", provider).Msg("AI provider creation failed")
if err := utils.WriteJSONResponse(w, testResult); err != nil {
log.Error().Err(err).Msg("failed to write JSON response")
}
return
}
err = testProvider.TestConnection(ctx)
if err != nil {
testResult.Success = false
testResult.Message = "Connection test failed"
log.Error().Err(err).Str("provider", provider).Msg("AI provider connection test failed")
} else {
testResult.Success = true
testResult.Message = "Connection successful"
}
if err := utils.WriteJSONResponse(w, testResult); err != nil {
log.Error().Err(err).Msg("Failed to write provider test response")
}
}
// isValidProviderName returns true if s contains only lowercase letters, digits,
// and hyphens. This prevents path traversal, URL injection, and log injection.
func isValidProviderName(s string) bool {
for _, c := range s {
if !((c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || c == '-') {
return false
}
}
return len(s) > 0
}
// HandleListModels fetches available models from the configured AI provider (GET /api/ai/models)
func (h *AISettingsHandler) HandleListModels(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Auth is enforced by RequireAuth + RequireScope middleware at the route level.
ctx, cancel := context.WithTimeout(r.Context(), 30*time.Second)
defer cancel()
type ModelInfo struct {
ID string `json:"id"`
Name string `json:"name"`
Description string `json:"description,omitempty"`
CreatedAt int64 `json:"created_at,omitempty"`
Notable bool `json:"notable"`
}
type Response struct {
Models []ModelInfo `json:"models"`
Error string `json:"error,omitempty"`
Cached bool `json:"cached"`
}
models, cached, err := h.GetAIService(r.Context()).ListModelsWithCache(ctx)
if err != nil {
// Return error but don't fail the request - frontend can show a fallback
log.Error().Err(err).Msg("Failed to list AI models")
resp := Response{
Models: []ModelInfo{},
Error: "Failed to fetch model list",
}
if jsonErr := utils.WriteJSONResponse(w, resp); jsonErr != nil {
log.Error().Err(jsonErr).Msg("Failed to write AI models response")
}
return
}
// Convert provider models to response format
responseModels := make([]ModelInfo, 0, len(models))
notableCount := 0
for _, m := range models {
if m.Notable {
notableCount++
}
responseModels = append(responseModels, ModelInfo{
ID: m.ID,
Name: m.Name,
Description: m.Description,
CreatedAt: m.CreatedAt,
Notable: m.Notable,
})
}
log.Debug().Int("total", len(responseModels)).Int("notable", notableCount).Msg("Returning AI models")
resp := Response{
Models: responseModels,
Cached: cached,
}
if err := utils.WriteJSONResponse(w, resp); err != nil {
log.Error().Err(err).Msg("Failed to write AI models response")
}
}
// AIExecuteRequest is the request body for POST /api/ai/execute
// AIConversationMessage represents a message in conversation history
type AIConversationMessage struct {
Role string `json:"role"` // "user" or "assistant"
Content string `json:"content"`
}
type AIExecuteRequest struct {
Prompt string `json:"prompt"`
TargetType string `json:"target_type,omitempty"` // "agent", "system-container", "vm"
TargetID string `json:"target_id,omitempty"`
Context map[string]interface{} `json:"context,omitempty"` // Current metrics, state, etc.
History []AIConversationMessage `json:"history,omitempty"` // Previous conversation messages
FindingID string `json:"finding_id,omitempty"`
Model string `json:"model,omitempty"`
UseCase string `json:"use_case,omitempty"` // "chat" or "patrol"
}
func normalizeAIExecuteTargetType(raw string) string {
return strings.ToLower(strings.TrimSpace(raw))
}
func normalizeAndValidateAIExecuteTargetType(raw string) (string, error) {
targetType := normalizeAIExecuteTargetType(raw)
if targetType == "" {
return "", nil
}
switch targetType {
case "agent", "system-container", "vm":
return targetType, nil
default:
return "", fmt.Errorf("invalid target_type")
}
}
// AIExecuteResponse is the response from POST /api/ai/execute
type AIExecuteResponse struct {
Content string `json:"content"`
Model string `json:"model"`
InputTokens int `json:"input_tokens"`
OutputTokens int `json:"output_tokens"`
ToolCalls []ai.ToolExecution `json:"tool_calls"` // Commands that were executed
PendingApprovals []ai.ApprovalNeededData `json:"pending_approvals"` // Commands that require approval (non-streaming)
}
func EmptyAIExecuteResponse() AIExecuteResponse {
return AIExecuteResponse{}.NormalizeCollections()
}
func (r AIExecuteResponse) NormalizeCollections() AIExecuteResponse {
if r.ToolCalls == nil {
r.ToolCalls = []ai.ToolExecution{}
}
if r.PendingApprovals == nil {
r.PendingApprovals = []ai.ApprovalNeededData{}
}
return r
}
type aiExecuteStreamCompleteEvent struct {
Type string `json:"type"`
Model string `json:"model"`
InputTokens int `json:"input_tokens"`
OutputTokens int `json:"output_tokens"`
ToolCalls []ai.ToolExecution `json:"tool_calls"`
}
func emptyAIExecuteStreamCompleteEvent() aiExecuteStreamCompleteEvent {
return aiExecuteStreamCompleteEvent{}.NormalizeCollections()
}
func (e aiExecuteStreamCompleteEvent) NormalizeCollections() aiExecuteStreamCompleteEvent {
if e.ToolCalls == nil {
e.ToolCalls = []ai.ToolExecution{}
}
return e
}
type AIKubernetesAnalyzeRequest struct {
ClusterID string `json:"cluster_id"`
}
// HandleExecute executes an AI prompt (POST /api/ai/execute)
func (h *AISettingsHandler) HandleExecute(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Authentication is enforced by RequireAdmin middleware at route registration.
// Check if AI is enabled
if !h.GetAIService(r.Context()).IsEnabled() {
http.Error(w, "Pulse Assistant is not enabled or configured", http.StatusBadRequest)
return
}
// Parse request
r.Body = http.MaxBytesReader(w, r.Body, 64*1024)
bodyBytes, readErr := io.ReadAll(r.Body)
if readErr != nil {
log.Error().Err(readErr).Msg("Failed to read request body")
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
var req AIExecuteRequest
if err := json.Unmarshal(bodyBytes, &req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
// Fine-grained license checks based on UseCase
useCase := strings.ToLower(strings.TrimSpace(req.UseCase))
if useCase == "autofix" || useCase == "remediation" {
if !h.GetAIService(r.Context()).HasLicenseFeature(ai.FeatureAIAutoFix) {
WriteLicenseRequired(w, ai.FeatureAIAutoFix, "Pulse Patrol Auto-Fix requires Pulse Pro")
return
}
}
if strings.TrimSpace(req.Prompt) == "" {
http.Error(w, "Prompt is required", http.StatusBadRequest)
return
}
// Validate and normalize target_type if provided
targetType, err := normalizeAndValidateAIExecuteTargetType(req.TargetType)
if err != nil {
http.Error(w, "Invalid target_type (allowed: agent, system-container, vm)", http.StatusBadRequest)
return
}
// Validate target_id length if provided
if len(req.TargetID) > 256 {
http.Error(w, "target_id exceeds maximum length", http.StatusBadRequest)
return
}
// Validate conversation history roles
for _, msg := range req.History {
switch msg.Role {
case "user", "assistant":
// valid
default:
http.Error(w, "Invalid role in history (allowed: user, assistant)", http.StatusBadRequest)
return
}
}
// Execute the prompt with a timeout
ctx, cancel := context.WithTimeout(r.Context(), 120*time.Second)
defer cancel()
// Convert history from API type to service type
var history []ai.ConversationMessage
for _, msg := range req.History {
history = append(history, ai.ConversationMessage{
Role: msg.Role,
Content: msg.Content,
})
}
if useCase == "" {
useCase = "chat"
}
resp, err := h.GetAIService(r.Context()).Execute(ctx, ai.ExecuteRequest{
Prompt: req.Prompt,
TargetType: targetType,
TargetID: req.TargetID,
Context: req.Context,
History: history,
FindingID: req.FindingID,
Model: req.Model,
UseCase: useCase,
})
if err != nil {
log.Error().Err(err).Msg("AI execution failed")
http.Error(w, "Pulse Assistant request failed", http.StatusInternalServerError)
return
}
response := AIExecuteResponse{
Content: resp.Content,
Model: resp.Model,
InputTokens: resp.InputTokens,
OutputTokens: resp.OutputTokens,
ToolCalls: resp.ToolCalls,
PendingApprovals: resp.PendingApprovals,
}.NormalizeCollections()
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to write AI execute response")
}
}
// HandleAnalyzeKubernetesCluster analyzes a Kubernetes cluster with AI (POST /api/ai/kubernetes/analyze)
func (h *AISettingsHandler) HandleAnalyzeKubernetesCluster(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Require authentication
if !CheckAuth(h.getConfig(r.Context()), w, r) {
return
}
if !h.GetAIService(r.Context()).IsEnabled() {
http.Error(w, "Pulse Assistant is not enabled or configured", http.StatusBadRequest)
return
}
r.Body = http.MaxBytesReader(w, r.Body, 16*1024)
var req AIKubernetesAnalyzeRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
if strings.TrimSpace(req.ClusterID) == "" {
http.Error(w, "cluster_id is required", http.StatusBadRequest)
return
}
ctx, cancel := context.WithTimeout(r.Context(), 180*time.Second)
defer cancel()
resp, err := h.GetAIService(r.Context()).AnalyzeKubernetesCluster(ctx, req.ClusterID)
if err != nil {
switch {
case errors.Is(err, ai.ErrKubernetesClusterNotFound):
http.Error(w, "Kubernetes cluster not found", http.StatusNotFound)
return
case errors.Is(err, ai.ErrKubernetesStateUnavailable):
http.Error(w, "Kubernetes state not available", http.StatusServiceUnavailable)
return
default:
log.Error().Err(err).Str("cluster_id", req.ClusterID).Msg("Kubernetes AI analysis failed")
http.Error(w, "Pulse Assistant request failed", http.StatusInternalServerError)
return
}
}
response := AIExecuteResponse{
Content: resp.Content,
Model: resp.Model,
InputTokens: resp.InputTokens,
OutputTokens: resp.OutputTokens,
ToolCalls: resp.ToolCalls,
PendingApprovals: resp.PendingApprovals,
}
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to write Kubernetes AI response")
}
}
// HandleExecuteStream executes an AI prompt with SSE streaming (POST /api/ai/execute/stream)
func (h *AISettingsHandler) HandleExecuteStream(w http.ResponseWriter, r *http.Request) {
// Handle CORS for dev mode (frontend on different port)
h.setSSECORSHeaders(w, r)
// Handle preflight
if r.Method == http.MethodOptions {
w.WriteHeader(http.StatusOK)
return
}
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// NOTE: Authentication is enforced by RequireAdmin middleware at route
// registration (router_routes_ai_relay.go). No redundant CheckAuth needed.
// Check if AI is enabled
if !h.GetAIService(r.Context()).IsEnabled() {
http.Error(w, "Pulse Assistant is not enabled or configured", http.StatusBadRequest)
return
}
// Parse request
r.Body = http.MaxBytesReader(w, r.Body, 64*1024) // 64KB max
var req AIExecuteRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
log.Warn().Err(err).Msg("Failed to decode AI execute stream request")
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
// Fine-grained license checks based on UseCase (before SSE headers)
useCase := strings.ToLower(strings.TrimSpace(req.UseCase))
if useCase == "autofix" || useCase == "remediation" {
if !h.GetAIService(r.Context()).HasLicenseFeature(ai.FeatureAIAutoFix) {
WriteLicenseRequired(w, ai.FeatureAIAutoFix, "Pulse Patrol Auto-Fix requires Pulse Pro")
return
}
}
if strings.TrimSpace(req.Prompt) == "" {
http.Error(w, "Prompt is required", http.StatusBadRequest)
return
}
// Validate and normalize target_type if provided
targetType, err := normalizeAndValidateAIExecuteTargetType(req.TargetType)
if err != nil {
http.Error(w, "Invalid target_type (allowed: agent, system-container, vm)", http.StatusBadRequest)
return
}
// Validate target_id length if provided
if len(req.TargetID) > 256 {
http.Error(w, "target_id exceeds maximum length", http.StatusBadRequest)
return
}
// Validate conversation history roles
for _, msg := range req.History {
switch msg.Role {
case "user", "assistant":
// valid
default:
http.Error(w, "Invalid role in history (allowed: user, assistant)", http.StatusBadRequest)
return
}
}
log.Info().
Int("prompt_len", len(req.Prompt)).
Str("target_type", targetType).
Str("target_id", req.TargetID).
Msg("AI streaming request started")
// Set up SSE headers
// IMPORTANT: Set headers BEFORE any writes to prevent Go from auto-adding Transfer-Encoding: chunked
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Connection", "keep-alive")
w.Header().Set("X-Accel-Buffering", "no") // Disable nginx buffering
// Prevent chunked encoding which causes "Invalid character in chunk size" errors in Vite proxy
w.Header().Set("Transfer-Encoding", "identity")
flusher, ok := w.(http.Flusher)
if !ok {
http.Error(w, "Streaming not supported", http.StatusInternalServerError)
return
}
// Disable the server's write deadline for this SSE connection
// This is critical for long-running AI requests that can take several minutes
rc := http.NewResponseController(w)
if err := rc.SetWriteDeadline(time.Time{}); err != nil {
log.Warn().Err(err).Msg("Failed to disable write deadline for SSE")
}
// Also disable read deadline
if err := rc.SetReadDeadline(time.Time{}); err != nil {
log.Warn().Err(err).Msg("Failed to disable read deadline for SSE")
}
// Flush headers immediately
flusher.Flush()
// Create context with timeout (15 minutes for complex analysis with multiple tool calls)
// Use background context to avoid browser disconnect canceling the request
// DeepSeek reasoning models + multiple tool executions can easily take 5+ minutes
ctx, cancel := context.WithTimeout(context.Background(), 900*time.Second)
defer cancel()
// Set up heartbeat to keep connection alive during long tool executions
// NOTE: We don't check r.Context().Done() because Vite proxy may close
// the request context prematurely. We detect real disconnection via write failures.
heartbeatDone := make(chan struct{})
var clientDisconnected atomic.Bool
go func() {
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
// Extend write deadline before heartbeat
_ = rc.SetWriteDeadline(time.Now().Add(10 * time.Second))
// Send SSE comment as heartbeat
_, err := w.Write([]byte(": heartbeat\n\n"))
if err != nil {
log.Debug().Err(err).Msg("Heartbeat write failed, stopping heartbeat (AI continues)")
clientDisconnected.Store(true)
// Don't cancel the AI request - let it complete with its own timeout
// The SSE connection may have issues but the AI work can still finish
return
}
flusher.Flush()
log.Debug().Msg("Sent SSE heartbeat")
case <-heartbeatDone:
return
}
}
}()
defer close(heartbeatDone)
// Helper to safely write SSE events, tracking if client disconnected
safeWrite := func(data []byte) bool {
if clientDisconnected.Load() {
return false
}
_ = rc.SetWriteDeadline(time.Now().Add(10 * time.Second))
_, err := w.Write(data)
if err != nil {
log.Debug().Err(err).Msg("Failed to write SSE event (client may have disconnected)")
clientDisconnected.Store(true)
return false
}
flusher.Flush()
return true
}
// Stream callback - write SSE events
callback := func(event ai.StreamEvent) {
// Skip the 'done' event from service - we'll send our own at the end
// This ensures 'complete' comes before 'done'
if event.Type == "done" {
log.Debug().Msg("Skipping service 'done' event - will send final 'done' after 'complete'")
return
}
data, err := json.Marshal(event)
if err != nil {
log.Error().Err(err).Msg("Failed to marshal stream event")
return
}
log.Debug().
Str("event_type", event.Type).
Msg("Streaming AI event")
// SSE format: data: <json>\n\n
safeWrite([]byte("data: " + string(data) + "\n\n"))
}
// Convert history from API type to service type
var history []ai.ConversationMessage
for _, msg := range req.History {
history = append(history, ai.ConversationMessage{
Role: msg.Role,
Content: msg.Content,
})
}
if useCase == "" {
useCase = "chat"
}
// Ensure we always send a final 'done' event
defer func() {
if !clientDisconnected.Load() {
doneEvent := ai.StreamEvent{Type: "done"}
data, _ := json.Marshal(doneEvent)
safeWrite([]byte("data: " + string(data) + "\n\n"))
log.Debug().Msg("Sent final 'done' event")
}
}()
// Execute with streaming
resp, err := h.GetAIService(r.Context()).ExecuteStream(ctx, ai.ExecuteRequest{
Prompt: req.Prompt,
TargetType: targetType,
TargetID: req.TargetID,
Context: req.Context,
History: history,
FindingID: req.FindingID,
Model: req.Model,
UseCase: useCase,
}, callback)
if err != nil {
log.Error().Err(err).Msg("AI streaming execution failed")
// Send error event — use generic message to avoid leaking internal details
errEvent := ai.StreamEvent{Type: "error", Data: map[string]string{"message": "AI request failed. Please try again."}}
data, _ := json.Marshal(errEvent)
safeWrite([]byte("data: " + string(data) + "\n\n"))
return
}
log.Info().
Str("model", resp.Model).
Int("input_tokens", resp.InputTokens).
Int("output_tokens", resp.OutputTokens).
Int("tool_calls", len(resp.ToolCalls)).
Msg("AI streaming request completed")
// Send final response with metadata (before 'done')
finalEvent := aiExecuteStreamCompleteEvent{
Type: "complete",
Model: resp.Model,
InputTokens: resp.InputTokens,
OutputTokens: resp.OutputTokens,
ToolCalls: resp.ToolCalls,
}.NormalizeCollections()
data, _ := json.Marshal(finalEvent)
safeWrite([]byte("data: " + string(data) + "\n\n"))
// 'done' event is sent by the defer above
}
// AIRunCommandRequest is the request body for POST /api/ai/run-command
type AIRunCommandRequest struct {
Command string `json:"command"`
ApprovalID string `json:"approval_id"`
TargetType string `json:"target_type"`
TargetID string `json:"target_id"`
RunOnHost bool `json:"run_on_host"`
VMID string `json:"vmid,omitempty"`
TargetHost string `json:"target_host,omitempty"` // Explicit host for routing
}
// HandleRunCommand executes a single approved command (POST /api/ai/run-command)
func (h *AISettingsHandler) HandleRunCommand(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Require authentication
if !CheckAuth(h.getConfig(r.Context()), w, r) {
return
}
// Gated for AI Auto-Fix (Pro feature)
if !h.GetAIService(r.Context()).HasLicenseFeature(ai.FeatureAIAutoFix) {
WriteLicenseRequired(w, ai.FeatureAIAutoFix, "Pulse Patrol Auto-Fix requires Pulse Pro")
return
}
// Parse request
r.Body = http.MaxBytesReader(w, r.Body, 16*1024)
bodyBytes, readErr := io.ReadAll(r.Body)
if readErr != nil {
log.Error().Err(readErr).Msg("Failed to read request body")
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
log.Debug().Int("body_len", len(bodyBytes)).Msg("run-command request received")
var req AIRunCommandRequest
if err := json.Unmarshal(bodyBytes, &req); err != nil {
log.Error().Err(err).Str("body", string(bodyBytes)).Msg("Failed to decode JSON body")
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
if strings.TrimSpace(req.Command) == "" {
http.Error(w, "Command is required", http.StatusBadRequest)
return
}
if strings.TrimSpace(req.ApprovalID) == "" {
http.Error(w, "approval_id is required", http.StatusBadRequest)
return
}
approvalTargetType, approvalTargetID, targetErr := normalizeRunCommandApprovalTarget(req)
if targetErr != nil {
http.Error(w, targetErr.Error(), http.StatusBadRequest)
return
}
store := approval.GetStore()
if store == nil {
http.Error(w, "Approval store not initialized", http.StatusServiceUnavailable)
return
}
orgID := approval.NormalizeOrgID(GetOrgID(r.Context()))
approvalReq, ok := store.GetApproval(req.ApprovalID)
if !ok || !approval.BelongsToOrg(approvalReq, orgID) {
http.Error(w, "Approval request not found", http.StatusNotFound)
return
}
if _, err := store.ConsumeApproval(req.ApprovalID, req.Command, approvalTargetType, approvalTargetID); err != nil {
log.Error().Err(err).Str("approval_id", req.ApprovalID).Msg("Failed to consume approval")
http.Error(w, "Failed to consume approval", http.StatusConflict)
return
}
log.Info().
Str("command", req.Command).
Str("approval_id", req.ApprovalID).
Str("target_type", approvalTargetType).
Str("target_id", approvalTargetID).
Bool("run_on_host", req.RunOnHost).
Str("target_host", req.TargetHost).
Msg("Executing approved command")
// Execute with timeout (5 minutes for long-running commands)
ctx, cancel := context.WithTimeout(r.Context(), 300*time.Second)
defer cancel()
resp, err := h.GetAIService(r.Context()).RunCommand(ctx, ai.RunCommandRequest{
Command: req.Command,
ApprovalID: req.ApprovalID,
TargetType: approvalTargetType,
TargetID: approvalTargetID,
RunOnHost: req.RunOnHost,
VMID: req.VMID,
TargetHost: strings.ToLower(strings.TrimSpace(req.TargetHost)),
})
if err != nil {
log.Error().Err(err).Msg("Failed to execute command")
http.Error(w, "Failed to execute command", http.StatusInternalServerError)
return
}
if err := utils.WriteJSONResponse(w, resp); err != nil {
log.Error().Err(err).Msg("Failed to write run command response")
}
}
func normalizeRunCommandApprovalTarget(req AIRunCommandRequest) (string, string, error) {
targetType := normalizeAIExecuteTargetType(req.TargetType)
targetID := strings.TrimSpace(req.TargetID)
targetHost := strings.ToLower(strings.TrimSpace(req.TargetHost))
allowedTargetTypes := map[string]struct{}{
"agent": {},
"system-container": {},
"vm": {},
}
if req.RunOnHost {
targetType = "agent"
if targetHost == "" {
return "", "", errors.New("target_host is required when run_on_host is true")
}
targetID = targetHost
}
if targetType == "" {
targetType = "agent"
}
if _, ok := allowedTargetTypes[targetType]; !ok {
return "", "", fmt.Errorf("unsupported target_type %q (allowed: agent, system-container, vm)", targetType)
}
if (targetType == "system-container" || targetType == "vm") && strings.TrimSpace(req.VMID) != "" {
targetID = strings.TrimSpace(req.VMID)
}
if targetType == "agent" {
if targetID == "" {
targetID = targetHost
}
if targetID == "" {
return "", "", errors.New("target_id or target_host is required for agent commands")
}
targetID = strings.ToLower(targetID)
}
if targetID == "" {
return "", "", fmt.Errorf("target_id is required for target_type '%s'", targetType)
}
return targetType, targetID, nil
}
// maxGuestIDLength is the maximum allowed length for guest_id across all
// knowledge endpoints, preventing abuse via oversized identifiers.
const maxGuestIDLength = 256
// maxImportNotes is the maximum number of notes allowed in a single import
// request. This prevents abuse even within the 1MB body size limit.
const maxImportNotes = 500
// sanitizeFilenameComponent strips characters unsafe for Content-Disposition
// filenames. Only alphanumeric, hyphens, underscores, and dots are kept.
func sanitizeFilenameComponent(s string) string {
var b strings.Builder
b.Grow(len(s))
for _, c := range s {
if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '-' || c == '_' || c == '.' {
b.WriteRune(c)
}
}
result := b.String()
if result == "" {
return "export"
}
return result
}
// HandleGetGuestKnowledge returns all notes for a guest
func (h *AISettingsHandler) HandleGetGuestKnowledge(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
guestID := r.URL.Query().Get("guest_id")
if guestID == "" {
http.Error(w, "guest_id is required", http.StatusBadRequest)
return
}
if len(guestID) > maxGuestIDLength {
http.Error(w, "guest_id too long", http.StatusBadRequest)
return
}
knowledge, err := h.GetAIService(r.Context()).GetGuestKnowledge(guestID)
if err != nil {
http.Error(w, sanitizeErrorForClient(err, "Failed to get knowledge"), http.StatusInternalServerError)
return
}
if err := utils.WriteJSONResponse(w, knowledge); err != nil {
log.Error().Err(err).Msg("Failed to write knowledge response")
}
}
// HandleSaveGuestNote saves a note for a guest
func (h *AISettingsHandler) HandleSaveGuestNote(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
r.Body = http.MaxBytesReader(w, r.Body, 64*1024) // 64KB max
var req struct {
GuestID string `json:"guest_id"`
GuestName string `json:"guest_name"`
GuestType string `json:"guest_type"`
Category string `json:"category"`
Title string `json:"title"`
Content string `json:"content"`
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
if req.GuestID == "" || req.Category == "" || req.Title == "" || req.Content == "" {
http.Error(w, "guest_id, category, title, and content are required", http.StatusBadRequest)
return
}
if len(req.GuestID) > maxGuestIDLength {
http.Error(w, "guest_id too long", http.StatusBadRequest)
return
}
if len(req.Category) > 128 {
http.Error(w, "category too long", http.StatusBadRequest)
return
}
if len(req.Title) > 1024 {
http.Error(w, "title too long", http.StatusBadRequest)
return
}
if len(req.Content) > 32*1024 {
http.Error(w, "content too long", http.StatusBadRequest)
return
}
if err := h.GetAIService(r.Context()).SaveGuestNote(req.GuestID, req.GuestName, req.GuestType, req.Category, req.Title, req.Content); err != nil {
http.Error(w, sanitizeErrorForClient(err, "Failed to save note"), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"success": true}`))
}
// HandleDeleteGuestNote deletes a note from a guest
func (h *AISettingsHandler) HandleDeleteGuestNote(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
r.Body = http.MaxBytesReader(w, r.Body, 4*1024) // 4KB max — only needs guest_id + note_id
var req struct {
GuestID string `json:"guest_id"`
NoteID string `json:"note_id"`
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
if req.GuestID == "" || req.NoteID == "" {
http.Error(w, "guest_id and note_id are required", http.StatusBadRequest)
return
}
if len(req.GuestID) > maxGuestIDLength {
http.Error(w, "guest_id too long", http.StatusBadRequest)
return
}
if len(req.NoteID) > maxGuestIDLength {
http.Error(w, "note_id too long", http.StatusBadRequest)
return
}
if err := h.GetAIService(r.Context()).DeleteGuestNote(req.GuestID, req.NoteID); err != nil {
if strings.Contains(err.Error(), "not found") {
http.Error(w, "Note not found", http.StatusNotFound)
return
}
http.Error(w, sanitizeErrorForClient(err, "Failed to delete note"), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"success": true}`))
}
// HandleExportGuestKnowledge exports all knowledge for a guest as JSON
func (h *AISettingsHandler) HandleExportGuestKnowledge(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
guestID := r.URL.Query().Get("guest_id")
if guestID == "" {
http.Error(w, "guest_id is required", http.StatusBadRequest)
return
}
if len(guestID) > maxGuestIDLength {
http.Error(w, "guest_id too long", http.StatusBadRequest)
return
}
knowledge, err := h.GetAIService(r.Context()).GetGuestKnowledge(guestID)
if err != nil {
http.Error(w, sanitizeErrorForClient(err, "Failed to get knowledge"), http.StatusInternalServerError)
return
}
// Sanitize guestID for Content-Disposition header to prevent header injection.
// Only allow alphanumeric, hyphens, underscores, and dots.
safeID := sanitizeFilenameComponent(guestID)
// Set headers for file download
w.Header().Set("Content-Type", "application/json")
w.Header().Set("Content-Disposition", "attachment; filename=\"pulse-notes-"+safeID+".json\"")
if err := json.NewEncoder(w).Encode(knowledge); err != nil {
log.Error().Err(err).Msg("Failed to encode knowledge export")
}
}
// HandleImportGuestKnowledge imports knowledge from a JSON export
func (h *AISettingsHandler) HandleImportGuestKnowledge(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Limit request body size to 1MB
r.Body = http.MaxBytesReader(w, r.Body, 1024*1024)
var importData struct {
GuestID string `json:"guest_id"`
GuestName string `json:"guest_name"`
GuestType string `json:"guest_type"`
Notes []struct {
Category string `json:"category"`
Title string `json:"title"`
Content string `json:"content"`
} `json:"notes"`
Merge bool `json:"merge"` // If true, add to existing notes; if false, replace
}
if err := json.NewDecoder(r.Body).Decode(&importData); err != nil {
http.Error(w, "Invalid import data", http.StatusBadRequest)
return
}
if importData.GuestID == "" {
http.Error(w, "guest_id is required in import data", http.StatusBadRequest)
return
}
if len(importData.GuestID) > maxGuestIDLength {
http.Error(w, "guest_id too long", http.StatusBadRequest)
return
}
if len(importData.Notes) == 0 {
http.Error(w, "No notes to import", http.StatusBadRequest)
return
}
if len(importData.Notes) > maxImportNotes {
http.Error(w, fmt.Sprintf("Too many notes: %d exceeds maximum of %d", len(importData.Notes), maxImportNotes), http.StatusBadRequest)
return
}
// Pre-filter valid notes before deleting in replace mode to avoid data loss
// when all incoming notes fail validation.
isValidNote := func(n struct {
Category string `json:"category"`
Title string `json:"title"`
Content string `json:"content"`
}) bool {
if n.Category == "" || n.Title == "" || n.Content == "" {
return false
}
if len(n.Category) > 128 || len(n.Title) > 1024 || len(n.Content) > 32*1024 {
return false
}
return true
}
validCount := 0
for _, note := range importData.Notes {
if isValidNote(note) {
validCount++
}
}
if validCount == 0 {
http.Error(w, "No valid notes to import", http.StatusBadRequest)
return
}
// If not merging, we need to delete existing notes first
if !importData.Merge {
existing, err := h.GetAIService(r.Context()).GetGuestKnowledge(importData.GuestID)
if err == nil && existing != nil {
for _, note := range existing.Notes {
_ = h.GetAIService(r.Context()).DeleteGuestNote(importData.GuestID, note.ID)
}
}
}
// Import each note
imported := 0
for _, note := range importData.Notes {
if !isValidNote(note) {
continue
}
if err := h.GetAIService(r.Context()).SaveGuestNote(
importData.GuestID,
importData.GuestName,
importData.GuestType,
note.Category,
note.Title,
note.Content,
); err != nil {
log.Warn().Err(err).Str("title", note.Title).Msg("Failed to import note")
continue
}
imported++
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]interface{}{
"success": true,
"imported": imported,
"total": len(importData.Notes),
})
}
// HandleClearGuestKnowledge deletes all notes for a guest
func (h *AISettingsHandler) HandleClearGuestKnowledge(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
r.Body = http.MaxBytesReader(w, r.Body, 4*1024) // 4KB max — only needs guest_id + confirm
var req struct {
GuestID string `json:"guest_id"`
Confirm bool `json:"confirm"`
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
if req.GuestID == "" {
http.Error(w, "guest_id is required", http.StatusBadRequest)
return
}
if len(req.GuestID) > maxGuestIDLength {
http.Error(w, "guest_id too long", http.StatusBadRequest)
return
}
if !req.Confirm {
http.Error(w, "confirm must be true to clear all notes", http.StatusBadRequest)
return
}
// Get existing knowledge and delete all notes
existing, err := h.GetAIService(r.Context()).GetGuestKnowledge(req.GuestID)
if err != nil {
http.Error(w, sanitizeErrorForClient(err, "Failed to get knowledge"), http.StatusInternalServerError)
return
}
deleted := 0
for _, note := range existing.Notes {
if err := h.GetAIService(r.Context()).DeleteGuestNote(req.GuestID, note.ID); err != nil {
log.Warn().Err(err).Str("note_id", note.ID).Msg("Failed to delete note")
continue
}
deleted++
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]interface{}{
"success": true,
"deleted": deleted,
})
}
// HandleDebugContext returns the system prompt and context that would be sent to the AI
// This is useful for debugging when the AI gives incorrect information
func (h *AISettingsHandler) HandleDebugContext(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Build a sample request to see what context would be sent
req := ai.ExecuteRequest{
Prompt: "Debug context request",
TargetType: r.URL.Query().Get("target_type"),
TargetID: r.URL.Query().Get("target_id"),
}
// Get the debug context from the service
debugInfo := h.GetAIService(r.Context()).GetDebugContext(req)
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(debugInfo)
}
// HandleGetConnectedAgents returns the list of agents currently connected via WebSocket
// This is useful for debugging when AI can't reach certain agents
func (h *AISettingsHandler) HandleGetConnectedAgents(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
type agentInfo struct {
AgentID string `json:"agent_id"`
Hostname string `json:"hostname"`
Version string `json:"version"`
Platform string `json:"platform"`
ConnectedAt string `json:"connected_at"`
}
var agents []agentInfo
if h.agentServer != nil {
for _, a := range h.agentServer.GetConnectedAgents() {
agents = append(agents, agentInfo{
AgentID: a.AgentID,
Hostname: a.Hostname,
Version: a.Version,
Platform: a.Platform,
ConnectedAt: a.ConnectedAt.Format(time.RFC3339),
})
}
}
response := map[string]interface{}{
"count": len(agents),
"agents": agents,
"note": "Agents connect via WebSocket to /api/agent/ws. If an agent is missing, check that pulse-agent is installed and can reach the Pulse server.",
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// AIInvestigateAlertRequest is the request body for POST /api/ai/investigate-alert
type AIInvestigateAlertRequest struct {
AlertIdentifier string `json:"alertIdentifier"`
ResourceID string `json:"resource_id"`
ResourceName string `json:"resource_name"`
ResourceType string `json:"resource_type"` // canonical v6 resource type
AlertType string `json:"alert_type"` // cpu, memory, disk, offline, etc.
Level string `json:"level"` // warning, critical
Value float64 `json:"value"`
Threshold float64 `json:"threshold"`
Message string `json:"message"`
Duration string `json:"duration"` // How long the alert has been active
Node string `json:"node,omitempty"`
VMID int `json:"vmid,omitempty"`
}
func (r AIInvestigateAlertRequest) alertIdentifier() string {
return strings.TrimSpace(r.AlertIdentifier)
}
func normalizeInvestigateAlertTargetType(raw string) (string, error) {
resourceType := normalizeAITransportResourceType(raw)
switch resourceType {
case "vm":
return "vm", nil
case "system-container", "oci-container":
return "system-container", nil
case "agent", "node", "docker-host", "app-container", "pod", "k8s-node", "k8s-cluster", "k8s-deployment", "k8s-service", "storage", "disk", "pbs", "pmg", "proxmox", "ceph":
return "agent", nil
case "":
return "", errors.New("resource_type is required")
default:
return "", fmt.Errorf("unsupported resource_type %q (allowed: vm, system-container, oci-container, app-container, pod, agent, node, docker-host, k8s-cluster, k8s-node, k8s-deployment, k8s-service, storage, disk, pbs, pmg, proxmox, ceph)", raw)
}
}
// HandleInvestigateAlert investigates an alert using AI (POST /api/ai/investigate-alert)
// This is a dedicated endpoint for one-click alert investigation from the UI
func (h *AISettingsHandler) HandleInvestigateAlert(w http.ResponseWriter, r *http.Request) {
// Handle CORS
h.setSSECORSHeaders(w, r)
if r.Method == http.MethodOptions {
w.WriteHeader(http.StatusOK)
return
}
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Require authentication
if !CheckAuth(h.getConfig(r.Context()), w, r) {
return
}
// Check if AI is enabled
if !h.GetAIService(r.Context()).IsEnabled() {
http.Error(w, "Pulse Assistant is not enabled or configured", http.StatusBadRequest)
return
}
// Parse request
r.Body = http.MaxBytesReader(w, r.Body, 16*1024)
var req AIInvestigateAlertRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
// Build investigation prompt
alertIdentifier := req.alertIdentifier()
investigationPrompt := ai.GenerateAlertInvestigationPrompt(ai.AlertInvestigationRequest{
AlertIdentifier: alertIdentifier,
ResourceID: req.ResourceID,
ResourceName: req.ResourceName,
ResourceType: req.ResourceType,
AlertType: req.AlertType,
Level: req.Level,
Value: req.Value,
Threshold: req.Threshold,
Message: req.Message,
Duration: req.Duration,
Node: req.Node,
VMID: req.VMID,
})
log.Info().
Str("alert_identifier", alertIdentifier).
Str("resource", req.ResourceName).
Str("type", req.AlertType).
Msg("AI alert investigation started")
targetType, err := normalizeInvestigateAlertTargetType(req.ResourceType)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
resourceType := strings.ToLower(strings.TrimSpace(req.ResourceType))
targetID := strings.TrimSpace(req.ResourceID)
if targetType == "vm" || targetType == "system-container" {
if req.VMID > 0 {
targetID = strconv.Itoa(req.VMID)
}
} else if targetType == "agent" {
if node := strings.ToLower(strings.TrimSpace(req.Node)); node != "" {
targetID = node
} else if resourceType != "app-container" {
// Keep explicit host-like IDs for node/agent resources.
targetID = strings.ToLower(strings.TrimSpace(req.ResourceID))
} else {
// app-container IDs are container-scoped, not host routing IDs.
targetID = ""
}
}
// Set up SSE streaming
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Connection", "keep-alive")
w.Header().Set("X-Accel-Buffering", "no")
w.Header().Set("Transfer-Encoding", "identity")
flusher, ok := w.(http.Flusher)
if !ok {
http.Error(w, "Streaming not supported", http.StatusInternalServerError)
return
}
// Disable write/read deadlines for SSE
rc := http.NewResponseController(w)
_ = rc.SetWriteDeadline(time.Time{})
_ = rc.SetReadDeadline(time.Time{})
flusher.Flush()
// Create context with timeout
ctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)
defer cancel()
// Heartbeat routine
heartbeatDone := make(chan struct{})
var clientDisconnected atomic.Bool
go func() {
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
_ = rc.SetWriteDeadline(time.Now().Add(10 * time.Second))
_, err := w.Write([]byte(": heartbeat\n\n"))
if err != nil {
clientDisconnected.Store(true)
return
}
flusher.Flush()
case <-heartbeatDone:
return
}
}
}()
defer close(heartbeatDone)
safeWrite := func(data []byte) bool {
if clientDisconnected.Load() {
return false
}
_ = rc.SetWriteDeadline(time.Now().Add(10 * time.Second))
_, err := w.Write(data)
if err != nil {
clientDisconnected.Store(true)
return false
}
flusher.Flush()
return true
}
// Stream callback
callback := func(event ai.StreamEvent) {
if event.Type == "done" {
return
}
data, err := json.Marshal(event)
if err != nil {
return
}
safeWrite([]byte("data: " + string(data) + "\n\n"))
}
// Execute with streaming
defer func() {
if !clientDisconnected.Load() {
doneEvent := ai.StreamEvent{Type: "done"}
data, _ := json.Marshal(doneEvent)
safeWrite([]byte("data: " + string(data) + "\n\n"))
}
}()
resp, err := h.GetAIService(r.Context()).ExecuteStream(ctx, ai.ExecuteRequest{
Prompt: investigationPrompt,
TargetType: targetType,
TargetID: targetID,
Context: map[string]interface{}{
"alertIdentifier": alertIdentifier,
"alertType": req.AlertType,
"alertLevel": req.Level,
"alertMessage": req.Message,
"guestName": req.ResourceName,
"node": req.Node,
},
}, callback)
if err != nil {
log.Error().Err(err).Msg("AI alert investigation failed")
errEvent := ai.StreamEvent{Type: "error", Data: map[string]string{"message": "Alert investigation failed. Please try again."}}
data, _ := json.Marshal(errEvent)
safeWrite([]byte("data: " + string(data) + "\n\n"))
return
}
// Send completion event
finalEvent := aiExecuteStreamCompleteEvent{
Type: "complete",
Model: resp.Model,
InputTokens: resp.InputTokens,
OutputTokens: resp.OutputTokens,
ToolCalls: resp.ToolCalls,
}.NormalizeCollections()
data, _ := json.Marshal(finalEvent)
safeWrite([]byte("data: " + string(data) + "\n\n"))
if alertIdentifier != "" {
h.GetAIService(r.Context()).RecordIncidentAnalysis(alertIdentifier, "Pulse Assistant alert investigation completed", map[string]interface{}{
"model": resp.Model,
"tool_calls": len(resp.ToolCalls),
"input_tokens": resp.InputTokens,
"output_tokens": resp.OutputTokens,
})
}
log.Info().
Str("alert_identifier", alertIdentifier).
Str("model", resp.Model).
Int("tool_calls", len(resp.ToolCalls)).
Msg("AI alert investigation completed")
if alertIdentifier != "" {
h.GetAIService(r.Context()).RecordIncidentAnalysis(alertIdentifier, "Pulse Assistant investigation completed", map[string]interface{}{
"model": resp.Model,
"input_tokens": resp.InputTokens,
"output_tokens": resp.OutputTokens,
"tool_calls": len(resp.ToolCalls),
})
}
}
// SetAlertProvider sets the alert provider for AI context
// Sets on both the default-org service and all tenant services to ensure multi-tenant support.
func (h *AISettingsHandler) SetAlertProvider(ap ai.AlertProvider) {
h.defaultAIService.SetAlertProvider(ap)
h.aiServicesMu.RLock()
defer h.aiServicesMu.RUnlock()
for _, svc := range h.aiServices {
svc.SetAlertProvider(ap)
}
}
// SetAlertResolver sets the alert resolver for AI Patrol autonomous alert management
// Sets on both the default-org service and all tenant services to ensure multi-tenant support.
func (h *AISettingsHandler) SetAlertResolver(resolver ai.AlertResolver) {
h.defaultAIService.SetAlertResolver(resolver)
h.aiServicesMu.RLock()
defer h.aiServicesMu.RUnlock()
for _, svc := range h.aiServices {
svc.SetAlertResolver(resolver)
}
}
// oauthSessions stores active OAuth sessions (state -> session)
// In production, consider using a more robust session store with expiry
type oauthSessionBinding struct {
session *providers.OAuthSession
orgID string
}
const oauthSessionTTL = 15 * time.Minute
var oauthSessions = make(map[string]*oauthSessionBinding)
var oauthSessionsMu sync.Mutex
var exchangeOAuthCodeForTokens = providers.ExchangeCodeForTokens
var createAPIKeyFromOAuth = providers.CreateAPIKeyFromOAuth
func storeOAuthSession(session *providers.OAuthSession, orgID string) {
oauthSessionsMu.Lock()
defer oauthSessionsMu.Unlock()
cutoff := time.Now().Add(-oauthSessionTTL)
for state, binding := range oauthSessions {
if binding == nil || binding.session == nil || binding.session.CreatedAt.Before(cutoff) {
delete(oauthSessions, state)
}
}
oauthSessions[session.State] = &oauthSessionBinding{
session: session,
orgID: orgID,
}
}
func consumeOAuthSession(state string) (*oauthSessionBinding, bool) {
oauthSessionsMu.Lock()
binding, ok := oauthSessions[state]
if ok {
delete(oauthSessions, state) // One-time use
}
oauthSessionsMu.Unlock()
if !ok || binding == nil || binding.session == nil {
return nil, false
}
if time.Since(binding.session.CreatedAt) > oauthSessionTTL {
return nil, false
}
if !isValidOrganizationID(binding.orgID) {
return nil, false
}
return binding, true
}
// HandleOAuthStart initiates the OAuth flow for Claude Pro/Max subscription (POST /api/ai/oauth/start)
// Returns an authorization URL for the user to visit manually
func (h *AISettingsHandler) HandleOAuthStart(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet && r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Generate OAuth session (redirect URI is not used since we use Anthropic's callback)
session, err := providers.GenerateOAuthSession("")
if err != nil {
log.Error().Err(err).Msg("Failed to generate OAuth session")
http.Error(w, "Failed to start OAuth flow", http.StatusInternalServerError)
return
}
orgID := strings.TrimSpace(GetOrgID(r.Context()))
if !isValidOrganizationID(orgID) {
orgID = "default"
}
// Store session, bound to the tenant context that initiated OAuth.
storeOAuthSession(session, orgID)
// Get authorization URL
authURL := providers.GetAuthorizationURL(session)
log.Info().
Str("state", safePrefixForLog(session.State, 8)+"...").
Str("org_id", orgID).
Str("verifier_len", fmt.Sprintf("%d", len(session.CodeVerifier))).
Str("auth_url", authURL).
Msg("Starting Claude OAuth flow - user must visit URL and paste code back")
// Return the URL for the user to visit
response := map[string]string{
"auth_url": authURL,
"state": session.State,
}
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to write OAuth start response")
}
}
// HandleOAuthExchange exchanges a manually-pasted authorization code for tokens (POST /api/ai/oauth/exchange)
func (h *AISettingsHandler) HandleOAuthExchange(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Limit body size — OAuth codes are short strings, 4 KB is plenty.
r.Body = http.MaxBytesReader(w, r.Body, 4*1024)
// Parse request body
var req struct {
Code string `json:"code"`
State string `json:"state"`
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
if req.Code == "" || req.State == "" {
http.Error(w, "Missing code or state", http.StatusBadRequest)
return
}
// Trim any whitespace from the code (user might have copied extra spaces)
code := strings.TrimSpace(req.Code)
// Anthropic's callback page displays the code as "code#state"
// We need to extract just the code part before the #
if idx := strings.Index(code, "#"); idx > 0 {
code = code[:idx]
}
log.Debug().
Str("code_len", fmt.Sprintf("%d", len(code))).
Str("code_prefix", code[:min(20, len(code))]).
Str("state_prefix", req.State[:min(8, len(req.State))]).
Msg("Processing OAuth code exchange")
// Consume one-time OAuth session and enforce TTL.
binding, ok := consumeOAuthSession(req.State)
if !ok {
log.Error().Str("state", req.State[:min(8, len(req.State))]+"...").Msg("OAuth exchange with unknown state")
http.Error(w, "Invalid or expired session. Please start the OAuth flow again.", http.StatusBadRequest)
return
}
orgID := binding.orgID
oauthCtx := context.WithValue(r.Context(), OrgIDContextKey, orgID)
// Exchange code for tokens
ctx, cancel := context.WithTimeout(oauthCtx, 30*time.Second)
defer cancel()
tokens, err := exchangeOAuthCodeForTokens(ctx, code, binding.session)
if err != nil {
log.Error().Err(err).Msg("Failed to exchange OAuth code for tokens")
http.Error(w, "Failed to exchange authorization code", http.StatusBadRequest)
return
}
// Try to create an API key from the OAuth access token
// Team/Enterprise users get org:create_api_key scope and can create API keys
// Pro/Max users don't have this scope and will use OAuth tokens directly
apiKey, err := createAPIKeyFromOAuth(ctx, tokens.AccessToken)
if err != nil {
// Check if it's a permission error (Pro/Max users)
if strings.Contains(err.Error(), "org:create_api_key") || strings.Contains(err.Error(), "403") {
log.Info().Msg("User doesn't have org:create_api_key permission - will use OAuth tokens directly")
// This is fine for Pro/Max users - they'll use OAuth tokens
} else {
log.Error().Err(err).Msg("Failed to create API key from OAuth token")
http.Error(w, "Failed to create API key", http.StatusBadRequest)
return
}
}
if apiKey != "" {
log.Info().Msg("Successfully created API key from OAuth - using subscription-based billing")
}
// Load existing settings
persistence := h.getPersistence(oauthCtx)
if persistence == nil {
http.Error(w, "Tenant persistence unavailable", http.StatusInternalServerError)
return
}
settings, err := h.loadAIConfig(oauthCtx)
if err != nil {
log.Error().Err(err).Msg("Failed to load Pulse Assistant settings for OAuth")
settings = config.NewDefaultAIConfig()
}
if settings == nil {
settings = config.NewDefaultAIConfig()
}
// Update settings
settings.AuthMethod = config.AuthMethodOAuth
settings.OAuthAccessToken = tokens.AccessToken
settings.OAuthRefreshToken = tokens.RefreshToken
settings.OAuthExpiresAt = tokens.ExpiresAt
settings.Enabled = true
// If we got an API key, use it; otherwise use OAuth tokens directly
if apiKey != "" {
settings.AnthropicAPIKey = apiKey
} else {
// Pro/Max users: clear any old API key, will use OAuth client
settings.ClearAPIKey()
}
// Save settings
if err := persistence.SaveAIConfig(*settings); err != nil {
log.Error().Err(err).Msg("Failed to save OAuth tokens")
http.Error(w, "Failed to save OAuth credentials", http.StatusInternalServerError)
return
}
// Reload the AI service with new settings
if svc := h.GetAIService(oauthCtx); svc != nil {
if err := svc.Reload(); err != nil {
log.Warn().Err(err).Msg("Failed to reload AI service after OAuth setup")
}
}
log.Info().Msg("Claude OAuth authentication successful")
response := map[string]interface{}{
"success": true,
"message": "Successfully connected to Claude with your subscription",
}
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to write OAuth exchange response")
}
}
// HandleOAuthCallback handles the OAuth callback (GET /api/ai/oauth/callback)
// This is kept for backwards compatibility but mainly serves as a fallback
func (h *AISettingsHandler) HandleOAuthCallback(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Get code and state from query params
code := r.URL.Query().Get("code")
state := r.URL.Query().Get("state")
errParam := r.URL.Query().Get("error")
errDesc := r.URL.Query().Get("error_description")
// Check for OAuth error
if errParam != "" {
log.Error().
Str("error", errParam).
Str("description", errDesc).
Msg("OAuth authorization failed")
// Redirect to settings page with error (URL-encode to prevent injection)
http.Redirect(w, r, "/settings?ai_oauth_error="+url.QueryEscape(errParam), http.StatusTemporaryRedirect)
return
}
if code == "" || state == "" {
log.Error().Msg("OAuth callback missing code or state")
http.Redirect(w, r, "/settings?ai_oauth_error=missing_params", http.StatusTemporaryRedirect)
return
}
// Consume one-time OAuth session and enforce TTL.
binding, ok := consumeOAuthSession(state)
if !ok {
log.Error().Str("state", state).Msg("OAuth callback with unknown state")
http.Redirect(w, r, "/settings?ai_oauth_error=invalid_state", http.StatusTemporaryRedirect)
return
}
orgID := binding.orgID
oauthCtx := context.WithValue(r.Context(), OrgIDContextKey, orgID)
// Exchange code for tokens
ctx, cancel := context.WithTimeout(oauthCtx, 30*time.Second)
defer cancel()
tokens, err := exchangeOAuthCodeForTokens(ctx, code, binding.session)
if err != nil {
log.Error().Err(err).Msg("Failed to exchange OAuth code for tokens")
http.Redirect(w, r, "/settings?ai_oauth_error=token_exchange_failed", http.StatusTemporaryRedirect)
return
}
// Load existing settings
persistence := h.getPersistence(oauthCtx)
if persistence == nil {
http.Redirect(w, r, "/settings?ai_oauth_error=save_failed", http.StatusTemporaryRedirect)
return
}
settings, err := h.loadAIConfig(oauthCtx)
if err != nil {
log.Error().Err(err).Msg("Failed to load Pulse Assistant settings for OAuth")
settings = config.NewDefaultAIConfig()
}
if settings == nil {
settings = config.NewDefaultAIConfig()
}
// Update settings with OAuth tokens
settings.AuthMethod = config.AuthMethodOAuth
settings.OAuthAccessToken = tokens.AccessToken
settings.OAuthRefreshToken = tokens.RefreshToken
settings.OAuthExpiresAt = tokens.ExpiresAt
settings.Enabled = true
// Clear API key since we're using OAuth
settings.ClearAPIKey()
// Save settings
if err := persistence.SaveAIConfig(*settings); err != nil {
log.Error().Err(err).Msg("Failed to save OAuth tokens")
http.Redirect(w, r, "/settings?ai_oauth_error=save_failed", http.StatusTemporaryRedirect)
return
}
// Reload the AI service with new settings
if svc := h.GetAIService(oauthCtx); svc != nil {
if err := svc.Reload(); err != nil {
log.Warn().Err(err).Msg("Failed to reload AI service after OAuth setup")
}
}
log.Info().Msg("Claude OAuth authentication successful")
// Redirect to settings page with success
http.Redirect(w, r, "/settings?ai_oauth_success=true", http.StatusTemporaryRedirect)
}
// HandleOAuthDisconnect disconnects OAuth and clears tokens (POST /api/ai/oauth/disconnect)
func (h *AISettingsHandler) HandleOAuthDisconnect(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Require admin authentication
if !CheckAuth(h.getConfig(r.Context()), w, r) {
return
}
// Load existing settings
persistence := h.getPersistence(r.Context())
if persistence == nil {
log.Error().Msg("No persistence available for OAuth disconnect")
http.Error(w, "Failed to load settings", http.StatusInternalServerError)
return
}
settings, err := h.loadAIConfig(r.Context())
if err != nil {
log.Error().Err(err).Msg("Failed to load Pulse Assistant settings for OAuth disconnect")
http.Error(w, "Failed to load settings", http.StatusInternalServerError)
return
}
if settings == nil {
settings = config.NewDefaultAIConfig()
}
// Clear OAuth tokens
settings.ClearOAuthTokens()
settings.AuthMethod = config.AuthMethodAPIKey
// Save settings
if err := persistence.SaveAIConfig(*settings); err != nil {
log.Error().Err(err).Msg("Failed to save settings after OAuth disconnect")
http.Error(w, "Failed to save settings", http.StatusInternalServerError)
return
}
// Reload the AI service
if svc := h.GetAIService(r.Context()); svc != nil {
if err := svc.Reload(); err != nil {
log.Warn().Err(err).Msg("Failed to reload AI service after OAuth disconnect")
}
}
log.Info().Msg("Claude OAuth disconnected")
response := map[string]interface{}{
"success": true,
"message": "OAuth disconnected successfully",
}
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to write OAuth disconnect response")
}
}
// PatrolStatusResponse is the response for GET /api/ai/patrol/status
type PatrolStatusResponse struct {
RuntimeState ai.PatrolRuntimeState `json:"runtime_state"`
Running bool `json:"running"`
Enabled bool `json:"enabled"`
LastPatrolAt *time.Time `json:"last_patrol_at,omitempty"`
LastActivityAt *time.Time `json:"last_activity_at,omitempty"`
TriggerStatus *ai.TriggerStatus `json:"trigger_status,omitempty"`
NextPatrolAt *time.Time `json:"next_patrol_at,omitempty"`
LastDurationMs int64 `json:"last_duration_ms"`
ResourcesChecked int `json:"resources_checked"`
FindingsCount int `json:"findings_count"`
ErrorCount int `json:"error_count"`
Healthy bool `json:"healthy"`
IntervalMs int64 `json:"interval_ms"` // Patrol interval in milliseconds
FixedCount int `json:"fixed_count"` // Number of issues auto-fixed by Patrol
BlockedReason string `json:"blocked_reason,omitempty"`
BlockedAt *time.Time `json:"blocked_at,omitempty"`
// Quickstart credit info for Patrol quickstart mode
QuickstartCreditsRemaining int `json:"quickstart_credits_remaining"`
QuickstartCreditsTotal int `json:"quickstart_credits_total"`
UsingQuickstart bool `json:"using_quickstart"`
// License status for Pro feature gating
LicenseRequired bool `json:"license_required"` // True if Pro license needed for full features
LicenseStatus string `json:"license_status"` // "active", "expired", "grace_period", "none"
UpgradeURL string `json:"upgrade_url,omitempty"`
Summary struct {
Critical int `json:"critical"`
Warning int `json:"warning"`
Watch int `json:"watch"`
Info int `json:"info"`
} `json:"summary"`
}
func (h *AISettingsHandler) getPatrolService(ctx context.Context) *ai.PatrolService {
aiService := h.GetAIService(ctx)
if aiService == nil {
return nil
}
return aiService.GetPatrolService()
}
func writePatrolServiceUnavailableResponse(w http.ResponseWriter) {
writeErrorResponse(w, http.StatusServiceUnavailable, "service_unavailable", "Pulse Patrol service not available", nil)
}
// HandleGetPatrolStatus returns the current patrol status (GET /api/ai/patrol/status)
func (h *AISettingsHandler) HandleGetPatrolStatus(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
aiService := h.GetAIService(r.Context())
if aiService == nil {
// Service not initialized (e.g. no persistence/config yet). Return safe defaults.
response := PatrolStatusResponse{
RuntimeState: ai.PatrolRuntimeStateUnavailable,
Running: false,
Enabled: false,
Healthy: true,
LicenseRequired: true,
LicenseStatus: "none",
UpgradeURL: upgradeURLForFeatureFromLicensing(featureAIAutoFixValue),
}
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to write patrol status response (no AI service)")
}
return
}
patrol := aiService.GetPatrolService()
if patrol == nil {
// Patrol not initialized
licenseStatus, _ := aiService.GetLicenseState()
hasAutoFixFeature := aiService.HasLicenseFeature(featureAIAutoFixValue)
response := PatrolStatusResponse{
RuntimeState: ai.PatrolRuntimeStateUnavailable,
Running: false,
Enabled: false,
Healthy: true,
LicenseRequired: !hasAutoFixFeature,
LicenseStatus: licenseStatus,
}
if !hasAutoFixFeature {
response.UpgradeURL = upgradeURLForFeatureFromLicensing(featureAIAutoFixValue)
}
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to write patrol status response")
}
return
}
status := patrol.GetStatus()
summary := patrol.GetFindingsSummary()
// Determine license status for Pro feature gating
// GetLicenseState returns accurate state: none, active, expired, grace_period
licenseStatus, _ := aiService.GetLicenseState()
// Check for auto-fix feature - patrol itself is free, auto-fix requires Pro
hasAutoFixFeature := aiService.HasLicenseFeature(featureAIAutoFixValue)
// Get fixed count from investigation orchestrator
fixedCount := 0
if orchestrator := patrol.GetInvestigationOrchestrator(); orchestrator != nil {
fixedCount = orchestrator.GetFixedCount()
}
response := PatrolStatusResponse{
RuntimeState: status.RuntimeState,
Running: status.Running,
Enabled: status.Enabled,
LastPatrolAt: status.LastPatrolAt,
LastActivityAt: status.LastActivityAt,
TriggerStatus: status.TriggerStatus,
NextPatrolAt: status.NextPatrolAt,
LastDurationMs: status.LastDuration.Milliseconds(),
ResourcesChecked: status.ResourcesChecked,
FindingsCount: status.FindingsCount,
ErrorCount: status.ErrorCount,
Healthy: status.Healthy,
IntervalMs: status.IntervalMs,
FixedCount: fixedCount,
BlockedReason: status.BlockedReason,
BlockedAt: status.BlockedAt,
QuickstartCreditsRemaining: status.QuickstartCreditsRemaining,
QuickstartCreditsTotal: status.QuickstartCreditsTotal,
UsingQuickstart: status.UsingQuickstart,
LicenseRequired: !hasAutoFixFeature,
LicenseStatus: licenseStatus,
}
if !hasAutoFixFeature {
response.UpgradeURL = upgradeURLForFeatureFromLicensing(featureAIAutoFixValue)
}
response.Summary.Critical = summary.Critical
response.Summary.Warning = summary.Warning
response.Summary.Watch = summary.Watch
response.Summary.Info = summary.Info
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to write patrol status response")
}
}
// HandleGetIntelligence returns the unified AI intelligence summary (GET /api/ai/intelligence)
// This provides a single endpoint for system-wide AI insights including:
// - Overall health score and grade
// - Active findings summary
// - Upcoming predictions
// - Recent activity
// - Learning progress
// - Resources at risk
func (h *AISettingsHandler) HandleGetIntelligence(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
aiService := h.GetAIService(r.Context())
if aiService == nil {
response := map[string]interface{}{
"error": "Pulse Patrol service not available",
}
w.WriteHeader(http.StatusServiceUnavailable)
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to write intelligence response")
}
return
}
patrol := aiService.GetPatrolService()
if patrol == nil {
response := map[string]interface{}{
"error": "Pulse Patrol service not available",
}
w.WriteHeader(http.StatusServiceUnavailable)
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to write intelligence response")
}
return
}
// Get unified intelligence facade
intel := patrol.GetIntelligence()
if intel == nil {
response := map[string]interface{}{
"error": "Intelligence not initialized",
}
w.WriteHeader(http.StatusServiceUnavailable)
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to write intelligence response")
}
return
}
// Check for resource_id query parameter for resource-specific intelligence
resourceID := r.URL.Query().Get("resource_id")
if resourceID != "" {
if len(resourceID) > 500 {
http.Error(w, "resource_id too long", http.StatusBadRequest)
return
}
// Return resource-specific intelligence
resourceIntel := intel.GetResourceIntelligence(resourceID)
if err := utils.WriteJSONResponse(w, resourceIntel); err != nil {
log.Error().Err(err).Msg("Failed to write resource intelligence response")
}
return
}
// Return system-wide intelligence summary
summary := intel.GetSummary()
if err := utils.WriteJSONResponse(w, summary); err != nil {
log.Error().Err(err).Msg("Failed to write intelligence summary response")
}
}
// HandlePatrolStream streams real-time patrol analysis via SSE (GET /api/ai/patrol/stream)
func (h *AISettingsHandler) HandlePatrolStream(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
aiService := h.GetAIService(r.Context())
if aiService == nil {
writePatrolServiceUnavailableResponse(w)
return
}
patrol := aiService.GetPatrolService()
if patrol == nil {
writePatrolServiceUnavailableResponse(w)
return
}
// Set SSE headers
h.setSSECORSHeaders(w, r)
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Connection", "keep-alive")
// Disable proxy buffering (e.g. nginx) so events reach clients promptly.
w.Header().Set("X-Accel-Buffering", "no")
flusher, ok := w.(http.Flusher)
if !ok {
http.Error(w, "Streaming not supported", http.StatusInternalServerError)
return
}
// Send an SSE comment to flush headers immediately so clients get the
// HTTP 200 response right away instead of blocking until the first event.
if _, err := fmt.Fprintf(w, ": connected\n\n"); err != nil {
log.Debug().Err(err).Msg("Patrol stream: failed to write initial SSE comment")
return
}
flusher.Flush()
// Subscribe to patrol stream
// Note: SubscribeToStream already sends the current buffered output to the channel
lastID := int64(0)
if raw := strings.TrimSpace(r.Header.Get("Last-Event-ID")); raw != "" {
if v, err := strconv.ParseInt(raw, 10, 64); err == nil && v > 0 {
lastID = v
}
}
// EventSource doesn't allow setting headers manually. Accept a query param fallback
// so clients can resume even when they create a new EventSource instance.
if lastID == 0 {
if raw := strings.TrimSpace(r.URL.Query().Get("last_event_id")); raw != "" {
if v, err := strconv.ParseInt(raw, 10, 64); err == nil && v > 0 {
lastID = v
}
}
}
ch := patrol.SubscribeToStreamFrom(lastID)
defer patrol.UnsubscribeFromStream(ch)
// Stream events until client disconnects
ctx := r.Context()
heartbeat := time.NewTicker(15 * time.Second)
defer heartbeat.Stop()
for {
select {
case <-ctx.Done():
return
case <-heartbeat.C:
// SSE comment heartbeat keeps intermediaries from timing out the stream.
if _, err := fmt.Fprintf(w, ": ping %d\n\n", time.Now().Unix()); err != nil {
log.Debug().Err(err).Msg("Patrol stream: failed to write heartbeat")
return
}
flusher.Flush()
case event, ok := <-ch:
if !ok {
return
}
data, err := json.Marshal(event)
if err != nil {
continue
}
// Best-effort event id for Last-Event-ID support.
if event.Seq != 0 {
if _, err := fmt.Fprintf(w, "id: %d\n", event.Seq); err != nil {
log.Debug().Err(err).Msg("Patrol stream: failed to write event id")
return
}
}
if _, err := fmt.Fprintf(w, "data: %s\n\n", data); err != nil {
log.Debug().Err(err).Msg("Patrol stream: failed to write event data")
return
}
flusher.Flush()
}
}
}
// HandleGetPatrolFindings returns all active findings (GET /api/ai/patrol/findings)
func (h *AISettingsHandler) HandleGetPatrolFindings(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
aiService := h.GetAIService(r.Context())
if aiService == nil {
// Return empty findings
if err := utils.WriteJSONResponse(w, []interface{}{}); err != nil {
log.Error().Err(err).Msg("Failed to write patrol findings response (no AI service)")
}
return
}
patrol := aiService.GetPatrolService()
if patrol == nil {
// Return empty findings
if err := utils.WriteJSONResponse(w, []interface{}{}); err != nil {
log.Error().Err(err).Msg("Failed to write patrol findings response")
}
return
}
// Check for resource_id query parameter
resourceID := r.URL.Query().Get("resource_id")
var findings []*ai.Finding
if resourceID != "" {
findings = patrol.GetFindingsForResource(resourceID)
} else {
findings = patrol.GetAllFindings()
}
// Optional limit parameter (for relay proxy clients with body size constraints)
if limitStr := r.URL.Query().Get("limit"); limitStr != "" {
if limit, err := strconv.Atoi(limitStr); err == nil && limit > 0 && limit < len(findings) {
findings = findings[:limit]
}
}
if err := utils.WriteJSONResponse(w, findings); err != nil {
log.Error().Err(err).Msg("Failed to write patrol findings response")
}
}
// HandleForcePatrol triggers an immediate patrol run (POST /api/ai/patrol/run)
func (h *AISettingsHandler) HandleForcePatrol(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Require admin authentication
if !CheckAuth(h.getConfig(r.Context()), w, r) {
return
}
aiService := h.GetAIService(r.Context())
if aiService == nil {
writePatrolServiceUnavailableResponse(w)
return
}
patrol := aiService.GetPatrolService()
if patrol == nil {
writePatrolServiceUnavailableResponse(w)
return
}
// Cadence cap: Community tier is limited to 1 patrol run per hour.
// Patrol itself is free (ai_patrol), but higher cadence is gated behind Pro/Cloud.
if !aiService.HasLicenseFeature(featureAIAutoFixValue) {
if last := patrol.GetStatus().LastPatrolAt; last != nil {
if since := time.Since(*last); since < 1*time.Hour {
remaining := (1*time.Hour - since).Round(time.Minute)
writeErrorResponse(w, http.StatusTooManyRequests, "patrol_rate_limited",
fmt.Sprintf("Community tier is limited to 1 patrol run per hour. Try again in %s.", remaining), nil)
return
}
}
}
// Trigger patrol asynchronously
patrol.ForcePatrol(r.Context())
response := map[string]interface{}{
"success": true,
"message": "Triggered patrol run",
}
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to write force patrol response")
}
}
// HandleAcknowledgeFinding acknowledges a finding (POST /api/ai/patrol/acknowledge)
// This marks the finding as seen but keeps it visible (dimmed). Auto-resolve removes it when condition clears.
// This matches alert acknowledgement behavior for UI consistency.
func (h *AISettingsHandler) HandleAcknowledgeFinding(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Require authentication
if !CheckAuth(h.getConfig(r.Context()), w, r) {
return
}
patrol := h.getPatrolService(r.Context())
if patrol == nil {
writePatrolServiceUnavailableResponse(w)
return
}
var req struct {
FindingID string `json:"finding_id"`
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
if req.FindingID == "" {
http.Error(w, "finding_id is required", http.StatusBadRequest)
return
}
findings := patrol.GetFindings()
// Try patrol findings store first
var detectedAt time.Time
var category, severity, resourceID, findingKey string
foundInPatrol := false
finding := findings.Get(req.FindingID)
if finding != nil {
foundInPatrol = true
detectedAt = finding.DetectedAt
category = string(finding.Category)
severity = string(finding.Severity)
resourceID = finding.ResourceID
findingKey = finding.Key
}
// If not in patrol findings, check the unified store (for threshold alerts)
unifiedStore := h.GetUnifiedStoreForOrg(GetOrgID(r.Context()))
if !foundInPatrol && unifiedStore != nil {
unifiedFinding := unifiedStore.Get(req.FindingID)
if unifiedFinding != nil {
detectedAt = unifiedFinding.DetectedAt
category = string(unifiedFinding.Category)
severity = string(unifiedFinding.Severity)
resourceID = unifiedFinding.ResourceID
findingKey = unifiedFinding.ID
} else {
http.Error(w, "Finding not found", http.StatusNotFound)
return
}
} else if !foundInPatrol {
http.Error(w, "Finding not found", http.StatusNotFound)
return
}
if foundInPatrol {
if err := patrol.RejectManualActionForRuntimeFinding(req.FindingID, "acknowledged"); err != nil {
http.Error(w, err.Error(), http.StatusConflict)
return
}
}
// Acknowledge in patrol findings if it exists there
if foundInPatrol {
if !findings.Acknowledge(req.FindingID) {
http.Error(w, "Finding not found", http.StatusNotFound)
return
}
}
// Acknowledge in unified store (for both patrol and threshold alerts)
if unifiedStore != nil {
unifiedStore.Acknowledge(req.FindingID)
}
// Record to learning store
if learningStore := h.GetLearningStoreForOrg(GetOrgID(r.Context())); learningStore != nil {
learningStore.RecordFeedback(learning.FeedbackRecord{
FindingID: req.FindingID,
FindingKey: findingKey,
ResourceID: resourceID,
Category: category,
Severity: severity,
Action: learning.ActionAcknowledge,
TimeToAction: time.Since(detectedAt),
})
}
log.Info().
Str("finding_id", req.FindingID).
Msg("AI Patrol: Finding acknowledged by user")
response := map[string]interface{}{
"success": true,
"message": "Finding acknowledged",
}
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to write acknowledge response")
}
}
// HandleSnoozeFinding snoozes a finding for a specified duration (POST /api/ai/patrol/snooze)
// Snoozed findings are hidden from the active list but will reappear if condition persists after snooze expires
func (h *AISettingsHandler) HandleSnoozeFinding(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Require authentication
if !CheckAuth(h.getConfig(r.Context()), w, r) {
return
}
patrol := h.getPatrolService(r.Context())
if patrol == nil {
writePatrolServiceUnavailableResponse(w)
return
}
var req struct {
FindingID string `json:"finding_id"`
DurationHours int `json:"duration_hours"`
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
if req.FindingID == "" {
http.Error(w, "finding_id is required", http.StatusBadRequest)
return
}
if req.DurationHours <= 0 {
http.Error(w, "duration_hours must be positive", http.StatusBadRequest)
return
}
// Cap snooze duration at 7 days
if req.DurationHours > 168 {
req.DurationHours = 168
}
findings := patrol.GetFindings()
duration := time.Duration(req.DurationHours) * time.Hour
// Try patrol findings store first
var detectedAt time.Time
var category, severity, resourceID, findingKey string
foundInPatrol := false
finding := findings.Get(req.FindingID)
if finding != nil {
foundInPatrol = true
detectedAt = finding.DetectedAt
category = string(finding.Category)
severity = string(finding.Severity)
resourceID = finding.ResourceID
findingKey = finding.Key
}
// If not in patrol findings, check the unified store (for threshold alerts)
unifiedStore := h.GetUnifiedStoreForOrg(GetOrgID(r.Context()))
if !foundInPatrol && unifiedStore != nil {
unifiedFinding := unifiedStore.Get(req.FindingID)
if unifiedFinding != nil {
detectedAt = unifiedFinding.DetectedAt
category = string(unifiedFinding.Category)
severity = string(unifiedFinding.Severity)
resourceID = unifiedFinding.ResourceID
findingKey = unifiedFinding.ID
} else {
http.Error(w, "Finding not found or already resolved", http.StatusNotFound)
return
}
} else if !foundInPatrol {
http.Error(w, "Finding not found or already resolved", http.StatusNotFound)
return
}
if foundInPatrol {
if err := patrol.RejectManualActionForRuntimeFinding(req.FindingID, "snoozed"); err != nil {
http.Error(w, err.Error(), http.StatusConflict)
return
}
}
// Snooze in patrol findings if it exists there
if foundInPatrol {
if !findings.Snooze(req.FindingID, duration) {
http.Error(w, "Finding not found or already resolved", http.StatusNotFound)
return
}
}
// Snooze in unified store (for both patrol and threshold alerts)
if unifiedStore != nil {
unifiedStore.Snooze(req.FindingID, duration)
}
// Record to learning store
if learningStore := h.GetLearningStoreForOrg(GetOrgID(r.Context())); learningStore != nil {
learningStore.RecordFeedback(learning.FeedbackRecord{
FindingID: req.FindingID,
FindingKey: findingKey,
ResourceID: resourceID,
Category: category,
Severity: severity,
Action: learning.ActionSnooze,
TimeToAction: time.Since(detectedAt),
})
}
log.Info().
Str("finding_id", req.FindingID).
Int("hours", req.DurationHours).
Msg("AI Patrol: Finding snoozed by user")
response := map[string]interface{}{
"success": true,
"message": fmt.Sprintf("Finding snoozed for %d hours", req.DurationHours),
}
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to write snooze response")
}
}
// HandleResolveFinding manually marks a finding as resolved (POST /api/ai/patrol/resolve)
// Use this when the user has fixed the issue and wants to mark it as resolved
func (h *AISettingsHandler) HandleResolveFinding(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Require authentication
if !CheckAuth(h.getConfig(r.Context()), w, r) {
return
}
patrol := h.getPatrolService(r.Context())
if patrol == nil {
writePatrolServiceUnavailableResponse(w)
return
}
var req struct {
FindingID string `json:"finding_id"`
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
if req.FindingID == "" {
http.Error(w, "finding_id is required", http.StatusBadRequest)
return
}
findings := patrol.GetFindings()
// Get finding details before resolving (for learning/analytics)
finding := findings.Get(req.FindingID)
if finding == nil {
http.Error(w, "Finding not found or already resolved", http.StatusNotFound)
return
}
if err := patrol.RejectManualActionForRuntimeFinding(req.FindingID, "resolved"); err != nil {
http.Error(w, err.Error(), http.StatusConflict)
return
}
// Capture details before action
detectedAt := finding.DetectedAt
category := string(finding.Category)
severity := string(finding.Severity)
resourceID := finding.ResourceID
// Mark as manually resolved (auto=false since user did it)
if !findings.Resolve(req.FindingID, false) {
http.Error(w, "Finding not found or already resolved", http.StatusNotFound)
return
}
// Mirror into unified store for consistent UI state
if store := h.GetUnifiedStoreForOrg(GetOrgID(r.Context())); store != nil {
store.Resolve(req.FindingID)
}
// Record to learning store - manual resolve = user fixed the issue
if learningStore := h.GetLearningStoreForOrg(GetOrgID(r.Context())); learningStore != nil {
learningStore.RecordFeedback(learning.FeedbackRecord{
FindingID: req.FindingID,
FindingKey: finding.Key,
ResourceID: resourceID,
Category: category,
Severity: severity,
Action: learning.ActionQuickFix, // Manual resolve means user took action to fix
TimeToAction: time.Since(detectedAt),
})
}
log.Info().
Str("finding_id", req.FindingID).
Msg("AI Patrol: Finding manually resolved by user")
response := map[string]interface{}{
"success": true,
"message": "Finding marked as resolved",
}
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to write resolve response")
}
}
// HandleSetFindingNote sets or updates a user note on a finding (POST /api/ai/patrol/findings/note)
// Notes provide context that Patrol sees on future runs (e.g., "PBS server was decommissioned").
func (h *AISettingsHandler) HandleSetFindingNote(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
if !CheckAuth(h.getConfig(r.Context()), w, r) {
return
}
patrol := h.getPatrolService(r.Context())
if patrol == nil {
writePatrolServiceUnavailableResponse(w)
return
}
var req struct {
FindingID string `json:"finding_id"`
Note string `json:"note"`
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
if req.FindingID == "" {
http.Error(w, "finding_id is required", http.StatusBadRequest)
return
}
findings := patrol.GetFindings()
ok := findings.SetUserNote(req.FindingID, req.Note)
if !ok {
http.Error(w, "Finding not found", http.StatusNotFound)
return
}
// Mirror the note to the unified store immediately so it's visible
// without waiting for the next patrol sync cycle
if unifiedStore := h.GetUnifiedStoreForOrg(GetOrgID(r.Context())); unifiedStore != nil {
unifiedStore.SetUserNote(req.FindingID, req.Note)
}
response := map[string]interface{}{
"success": true,
"message": "Note updated",
}
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to write set-note response")
}
}
// HandleDismissFinding dismisses a finding with a reason and optional note (POST /api/ai/patrol/dismiss)
// This is part of the LLM memory system - dismissed findings are included in context to prevent re-raising
// Valid reasons: "not_an_issue", "expected_behavior", "will_fix_later"
func (h *AISettingsHandler) HandleDismissFinding(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Require authentication
if !CheckAuth(h.getConfig(r.Context()), w, r) {
return
}
patrol := h.getPatrolService(r.Context())
if patrol == nil {
writePatrolServiceUnavailableResponse(w)
return
}
var req struct {
FindingID string `json:"finding_id"`
Reason string `json:"reason"` // "not_an_issue", "expected_behavior", "will_fix_later"
Note string `json:"note"` // Optional freeform note
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
if req.FindingID == "" {
http.Error(w, "finding_id is required", http.StatusBadRequest)
return
}
if req.Reason == "" {
http.Error(w, "reason is required", http.StatusBadRequest)
return
}
// Validate reason
validReasons := map[string]bool{
"not_an_issue": true,
"expected_behavior": true,
"will_fix_later": true,
}
if req.Reason != "" && !validReasons[req.Reason] {
http.Error(w, "Invalid reason. Valid values: not_an_issue, expected_behavior, will_fix_later", http.StatusBadRequest)
return
}
findings := patrol.GetFindings()
// Try patrol findings store first
var detectedAt time.Time
var category, severity, resourceID, findingKey string
foundInPatrol := false
finding := findings.Get(req.FindingID)
if finding != nil {
foundInPatrol = true
detectedAt = finding.DetectedAt
category = string(finding.Category)
severity = string(finding.Severity)
resourceID = finding.ResourceID
findingKey = finding.Key
}
// If not in patrol findings, check the unified store (for threshold alerts)
unifiedStore := h.GetUnifiedStoreForOrg(GetOrgID(r.Context()))
if !foundInPatrol && unifiedStore != nil {
unifiedFinding := unifiedStore.Get(req.FindingID)
if unifiedFinding != nil {
detectedAt = unifiedFinding.DetectedAt
category = string(unifiedFinding.Category)
severity = string(unifiedFinding.Severity)
resourceID = unifiedFinding.ResourceID
findingKey = unifiedFinding.ID // Use ID as key for unified findings
} else {
http.Error(w, "Finding not found", http.StatusNotFound)
return
}
} else if !foundInPatrol {
http.Error(w, "Finding not found", http.StatusNotFound)
return
}
if foundInPatrol {
if err := patrol.RejectManualActionForRuntimeFinding(req.FindingID, "dismissed"); err != nil {
http.Error(w, err.Error(), http.StatusConflict)
return
}
}
// Dismiss in patrol findings if it exists there
if foundInPatrol {
if !findings.Dismiss(req.FindingID, req.Reason, req.Note) {
http.Error(w, "Finding not found", http.StatusNotFound)
return
}
}
// Dismiss in unified store (for both patrol and threshold alerts)
if unifiedStore != nil {
unifiedStore.Dismiss(req.FindingID, req.Reason, req.Note)
}
// Map dismiss reason to learning action
var learningAction learning.UserAction
switch req.Reason {
case "not_an_issue":
learningAction = learning.ActionDismissNotAnIssue
case "expected_behavior":
learningAction = learning.ActionDismissExpected
case "will_fix_later":
learningAction = learning.ActionDismissWillFixLater
default:
learningAction = learning.ActionDismissNotAnIssue // Default
}
// Record to learning store
if learningStore := h.GetLearningStoreForOrg(GetOrgID(r.Context())); learningStore != nil {
learningStore.RecordFeedback(learning.FeedbackRecord{
FindingID: req.FindingID,
FindingKey: findingKey,
ResourceID: resourceID,
Category: category,
Severity: severity,
Action: learningAction,
UserNote: req.Note,
TimeToAction: time.Since(detectedAt),
})
}
log.Info().
Str("finding_id", req.FindingID).
Str("reason", req.Reason).
Bool("has_note", req.Note != "").
Msg("AI Patrol: Finding dismissed by user with reason")
response := map[string]interface{}{
"success": true,
"message": "Finding dismissed",
}
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to write dismiss response")
}
}
// HandleSuppressFinding permanently suppresses similar findings for a resource (POST /api/ai/patrol/suppress)
// The LLM will be told never to re-raise findings of this type for this resource
func (h *AISettingsHandler) HandleSuppressFinding(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Require authentication
if !CheckAuth(h.getConfig(r.Context()), w, r) {
return
}
patrol := h.getPatrolService(r.Context())
if patrol == nil {
writePatrolServiceUnavailableResponse(w)
return
}
var req struct {
FindingID string `json:"finding_id"`
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
if req.FindingID == "" {
http.Error(w, "finding_id is required", http.StatusBadRequest)
return
}
findings := patrol.GetFindings()
// Get finding details before suppressing (for learning/analytics)
finding := findings.Get(req.FindingID)
if finding == nil {
http.Error(w, "Finding not found", http.StatusNotFound)
return
}
if err := patrol.RejectManualActionForRuntimeFinding(req.FindingID, "suppressed"); err != nil {
http.Error(w, err.Error(), http.StatusConflict)
return
}
// Capture details before action
detectedAt := finding.DetectedAt
category := string(finding.Category)
severity := string(finding.Severity)
resourceID := finding.ResourceID
if !findings.Suppress(req.FindingID) {
http.Error(w, "Finding not found", http.StatusNotFound)
return
}
// Mirror into unified store for consistent UI state
if store := h.GetUnifiedStoreForOrg(GetOrgID(r.Context())); store != nil {
store.Dismiss(req.FindingID, "not_an_issue", "Permanently suppressed by user")
}
// Record to learning store - suppress is a strong "not an issue" signal
if learningStore := h.GetLearningStoreForOrg(GetOrgID(r.Context())); learningStore != nil {
learningStore.RecordFeedback(learning.FeedbackRecord{
FindingID: req.FindingID,
FindingKey: finding.Key,
ResourceID: resourceID,
Category: category,
Severity: severity,
Action: learning.ActionDismissNotAnIssue, // Suppress = permanent dismissal
UserNote: "Permanently suppressed by user",
TimeToAction: time.Since(detectedAt),
})
}
log.Info().
Str("finding_id", req.FindingID).
Msg("AI Patrol: Finding type permanently suppressed by user")
response := map[string]interface{}{
"success": true,
"message": "Finding type suppressed - similar issues will not be raised again",
}
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to write suppress response")
}
}
// HandleClearAllFindings clears all AI findings (DELETE /api/ai/patrol/findings)
// This allows users to clear accumulated findings, especially useful for users who
// accumulated findings before the patrol-without-AI bug was fixed.
func (h *AISettingsHandler) HandleClearAllFindings(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodDelete {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Require authentication (already wrapped by RequireAuth in router)
if !CheckAuth(h.getConfig(r.Context()), w, r) {
return
}
// Check for confirm parameter
if r.URL.Query().Get("confirm") != "true" {
http.Error(w, "confirm=true query parameter required", http.StatusBadRequest)
return
}
patrol := h.getPatrolService(r.Context())
if patrol == nil {
writePatrolServiceUnavailableResponse(w)
return
}
findings := patrol.GetFindings()
count := findings.ClearAll()
log.Info().Int("count", count).Msg("Cleared all AI findings")
response := map[string]interface{}{
"success": true,
"cleared": count,
"message": fmt.Sprintf("Cleared %d findings", count),
}
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to write clear findings response")
}
}
// HandleGetFindingsHistory returns all findings including resolved for history (GET /api/ai/patrol/history)
func (h *AISettingsHandler) HandleGetFindingsHistory(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Require authentication
if !CheckAuth(h.getConfig(r.Context()), w, r) {
return
}
patrol := h.getPatrolService(r.Context())
if patrol == nil {
// Return empty history
if err := utils.WriteJSONResponse(w, []interface{}{}); err != nil {
log.Error().Err(err).Msg("Failed to write findings history response")
}
return
}
// Parse optional startTime query parameter
var startTime *time.Time
if startTimeStr := r.URL.Query().Get("start_time"); startTimeStr != "" {
if t, err := time.Parse(time.RFC3339, startTimeStr); err == nil {
startTime = &t
}
}
findings := patrol.GetFindingsHistory(startTime)
if err := utils.WriteJSONResponse(w, findings); err != nil {
log.Error().Err(err).Msg("Failed to write findings history response")
}
}
// HandleGetPatrolRunHistory returns the history of patrol check runs (GET /api/ai/patrol/runs)
func (h *AISettingsHandler) HandleGetPatrolRunHistory(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
aiService := h.GetAIService(r.Context())
if aiService == nil {
// Return empty history
if err := utils.WriteJSONResponse(w, []interface{}{}); err != nil {
log.Error().Err(err).Msg("Failed to write patrol run history response (no AI service)")
}
return
}
patrol := aiService.GetPatrolService()
if patrol == nil {
// Return empty history
if err := utils.WriteJSONResponse(w, []interface{}{}); err != nil {
log.Error().Err(err).Msg("Failed to write patrol run history response")
}
return
}
// Parse optional limit query parameter (default: 30)
limit := 30
if limitStr := r.URL.Query().Get("limit"); limitStr != "" {
if parsed, err := strconv.Atoi(limitStr); err == nil && parsed > 0 {
limit = parsed
if limit > 100 {
limit = 100 // Cap at MaxPatrolRunHistory
}
}
}
runs := patrol.GetRunHistory(limit)
// By default, omit full tool call arrays to keep payloads lean.
// Use ?include=tool_calls to get the full array.
includeToolCalls := r.URL.Query().Get("include") == "tool_calls"
if !includeToolCalls {
for i := range runs {
runs[i].ToolCalls = nil
}
}
if err := utils.WriteJSONResponse(w, runs); err != nil {
log.Error().Err(err).Msg("Failed to write patrol run history response")
}
}
// HandleGetPatrolRun returns a single patrol run by ID (GET /api/ai/patrol/runs/{id})
func (h *AISettingsHandler) HandleGetPatrolRun(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
const prefix = "/api/ai/patrol/runs/"
if !strings.HasPrefix(r.URL.Path, prefix) {
http.Error(w, "Invalid path", http.StatusBadRequest)
return
}
runID := strings.TrimPrefix(r.URL.Path, prefix)
if runID == "" {
http.Error(w, "run_id is required", http.StatusBadRequest)
return
}
if decoded, err := url.PathUnescape(runID); err == nil {
runID = decoded
}
if strings.TrimSpace(runID) == "" {
http.Error(w, "run_id is required", http.StatusBadRequest)
return
}
aiService := h.GetAIService(r.Context())
if aiService == nil {
writeErrorResponse(
w,
http.StatusServiceUnavailable,
"service_unavailable",
"Pulse Patrol service not available",
nil,
)
return
}
patrol := aiService.GetPatrolService()
if patrol == nil {
writeErrorResponse(
w,
http.StatusServiceUnavailable,
"service_unavailable",
"Pulse Patrol service not available",
nil,
)
return
}
run, ok := patrol.GetRunByID(runID)
if !ok {
writeErrorResponse(w, http.StatusNotFound, "not_found", "Patrol run not found", nil)
return
}
// By default, omit full tool call arrays to keep payloads lean.
// Use ?include=tool_calls to get the full array.
if r.URL.Query().Get("include") != "tool_calls" {
run.ToolCalls = nil
}
if err := utils.WriteJSONResponse(w, run); err != nil {
log.Error().Err(err).Msg("Failed to write patrol run response")
}
}
// HandleGetAICostSummary returns AI usage rollups (GET /api/ai/cost/summary?days=N).
func (h *AISettingsHandler) HandleGetAICostSummary(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Parse optional days query parameter (default: 30, max: 365)
days := 30
if daysStr := r.URL.Query().Get("days"); daysStr != "" {
var parsed int
if _, err := fmt.Sscanf(daysStr, "%d", &parsed); err == nil && parsed > 0 {
days = parsed
if days > 365 {
days = 365
}
}
}
var summary cost.Summary
if h.GetAIService(r.Context()) != nil {
summary = h.GetAIService(r.Context()).GetCostSummary(days)
} else {
summary = cost.EmptySummary(days, cost.DefaultMaxDays, days, false)
}
if err := utils.WriteJSONResponse(w, summary); err != nil {
log.Error().Err(err).Msg("Failed to write AI cost summary response")
}
}
// HandleResetAICostHistory deletes retained AI usage events (POST /api/ai/cost/reset).
func (h *AISettingsHandler) HandleResetAICostHistory(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Limit body size — this endpoint takes no body but cap it to prevent abuse.
r.Body = http.MaxBytesReader(w, r.Body, 1024)
if h.GetAIService(r.Context()) == nil {
http.Error(w, "Pulse Assistant service unavailable", http.StatusServiceUnavailable)
return
}
backupFile := ""
if h.getPersistence(r.Context()) != nil {
configDir := h.getPersistence(r.Context()).DataDir()
if strings.TrimSpace(configDir) != "" {
usagePath := filepath.Join(configDir, "ai_usage_history.json")
if _, err := os.Stat(usagePath); err == nil {
ts := time.Now().UTC().Format("20060102-150405")
backupFile = fmt.Sprintf("ai_usage_history.json.bak-%s", ts)
backupPath := filepath.Join(configDir, backupFile)
if err := os.Rename(usagePath, backupPath); err != nil {
log.Error().Err(err).Str("path", usagePath).Msg("Failed to backup Pulse Assistant usage history before reset")
http.Error(w, "Failed to backup Pulse Assistant usage history", http.StatusInternalServerError)
return
}
}
}
}
if err := h.GetAIService(r.Context()).ClearCostHistory(); err != nil {
log.Error().Err(err).Msg("Failed to clear Pulse Assistant usage history")
http.Error(w, "Failed to clear Pulse Assistant usage history", http.StatusInternalServerError)
return
}
resp := map[string]any{"ok": true}
if backupFile != "" {
resp["backup_file"] = backupFile
}
if err := utils.WriteJSONResponse(w, resp); err != nil {
log.Error().Err(err).Msg("Failed to write clear cost history response")
}
}
// HandleExportAICostHistory exports recent AI usage history as JSON or CSV (GET /api/ai/cost/export?days=N&format=csv|json).
func (h *AISettingsHandler) HandleExportAICostHistory(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
if h.GetAIService(r.Context()) == nil {
http.Error(w, "Pulse Assistant service unavailable", http.StatusServiceUnavailable)
return
}
days := 30
if daysStr := r.URL.Query().Get("days"); daysStr != "" {
if v, err := strconv.Atoi(daysStr); err == nil && v > 0 {
if v > 365 {
v = 365
}
days = v
}
}
format := strings.ToLower(strings.TrimSpace(r.URL.Query().Get("format")))
if format == "" {
format = "json"
}
if format != "json" && format != "csv" {
http.Error(w, "format must be 'json' or 'csv'", http.StatusBadRequest)
return
}
events := h.GetAIService(r.Context()).ListCostEvents(days)
filename := fmt.Sprintf("pulse-ai-usage-%s-%dd.%s", time.Now().UTC().Format("20060102"), days, format)
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%q", filename))
if format == "json" {
w.Header().Set("Content-Type", "application/json")
type exportEvent struct {
cost.UsageEvent
EstimatedUSD float64 `json:"estimated_usd,omitempty"`
PricingKnown bool `json:"pricing_known"`
}
exported := make([]exportEvent, 0, len(events))
for _, e := range events {
provider, model := cost.ResolveProviderAndModel(e.Provider, e.RequestModel, e.ResponseModel)
usd, ok, _ := cost.EstimateUSD(provider, model, int64(e.InputTokens), int64(e.OutputTokens))
exported = append(exported, exportEvent{
UsageEvent: e,
EstimatedUSD: usd,
PricingKnown: ok,
})
}
resp := map[string]any{
"days": days,
"events": exported,
}
if err := json.NewEncoder(w).Encode(resp); err != nil {
log.Error().Err(err).Msg("Failed to write AI cost export JSON")
}
return
}
w.Header().Set("Content-Type", "text/csv")
cw := csv.NewWriter(w)
_ = cw.Write([]string{
"timestamp",
"provider",
"request_model",
"response_model",
"use_case",
"input_tokens",
"output_tokens",
"estimated_usd",
"pricing_known",
"target_type",
"target_id",
"finding_id",
})
for _, e := range events {
provider, model := cost.ResolveProviderAndModel(e.Provider, e.RequestModel, e.ResponseModel)
usd, ok, _ := cost.EstimateUSD(provider, model, int64(e.InputTokens), int64(e.OutputTokens))
_ = cw.Write([]string{
e.Timestamp.UTC().Format(time.RFC3339Nano),
e.Provider,
e.RequestModel,
e.ResponseModel,
e.UseCase,
strconv.Itoa(e.InputTokens),
strconv.Itoa(e.OutputTokens),
strconv.FormatFloat(usd, 'f', 6, 64),
strconv.FormatBool(ok),
e.TargetType,
e.TargetID,
e.FindingID,
})
}
cw.Flush()
if err := cw.Error(); err != nil {
log.Error().Err(err).Msg("Failed to write AI cost export CSV")
}
}
// HandleGetSuppressionRules returns all suppression rules (GET /api/ai/patrol/suppressions)
func (h *AISettingsHandler) HandleGetSuppressionRules(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Require authentication
if !CheckAuth(h.getConfig(r.Context()), w, r) {
return
}
patrol := h.getPatrolService(r.Context())
if patrol == nil {
if err := utils.WriteJSONResponse(w, []interface{}{}); err != nil {
log.Error().Err(err).Msg("Failed to write suppression rules response")
}
return
}
findings := patrol.GetFindings()
rules := findings.GetSuppressionRules()
if err := utils.WriteJSONResponse(w, rules); err != nil {
log.Error().Err(err).Msg("Failed to write suppression rules response")
}
}
// HandleAddSuppressionRule creates a new suppression rule (POST /api/ai/patrol/suppressions)
func (h *AISettingsHandler) HandleAddSuppressionRule(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Require authentication
if !CheckAuth(h.getConfig(r.Context()), w, r) {
return
}
patrol := h.getPatrolService(r.Context())
if patrol == nil {
writePatrolServiceUnavailableResponse(w)
return
}
var req struct {
ResourceID string `json:"resource_id"` // Can be empty for "any resource"
ResourceName string `json:"resource_name"` // Human-readable name
Category string `json:"category"` // Can be empty for "any category"
Description string `json:"description"` // Required - user's reason
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, "Invalid request body", http.StatusBadRequest)
return
}
if req.Description == "" {
http.Error(w, "description is required", http.StatusBadRequest)
return
}
// Convert category string to FindingCategory
var category ai.FindingCategory
switch req.Category {
case "performance":
category = ai.FindingCategoryPerformance
case "capacity":
category = ai.FindingCategoryCapacity
case "reliability":
category = ai.FindingCategoryReliability
case "backup":
category = ai.FindingCategoryBackup
case "security":
category = ai.FindingCategorySecurity
case "general":
category = ai.FindingCategoryGeneral
case "":
category = "" // Any category
default:
http.Error(w, "Invalid category", http.StatusBadRequest)
return
}
findings := patrol.GetFindings()
rule := findings.AddSuppressionRule(req.ResourceID, req.ResourceName, category, req.Description)
log.Info().
Str("rule_id", rule.ID).
Str("resource_id", req.ResourceID).
Str("category", req.Category).
Str("description", req.Description).
Msg("AI Patrol: Manual suppression rule created")
response := map[string]interface{}{
"success": true,
"message": "Suppression rule created",
"rule": rule,
}
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to write add suppression rule response")
}
}
// HandleDeleteSuppressionRule removes a suppression rule (DELETE /api/ai/patrol/suppressions/:id)
func (h *AISettingsHandler) HandleDeleteSuppressionRule(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodDelete {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Require authentication
if !CheckAuth(h.getConfig(r.Context()), w, r) {
return
}
patrol := h.getPatrolService(r.Context())
if patrol == nil {
writePatrolServiceUnavailableResponse(w)
return
}
// Get rule ID from URL path
ruleID := strings.TrimPrefix(r.URL.Path, "/api/ai/patrol/suppressions/")
if ruleID == "" {
http.Error(w, "rule_id is required", http.StatusBadRequest)
return
}
findings := patrol.GetFindings()
if !findings.DeleteSuppressionRule(ruleID) {
http.Error(w, "Rule not found", http.StatusNotFound)
return
}
log.Info().
Str("rule_id", ruleID).
Msg("AI Patrol: Suppression rule deleted")
response := map[string]interface{}{
"success": true,
"message": "Suppression rule deleted",
}
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to write delete suppression rule response")
}
}
// HandleGetDismissedFindings returns all dismissed/suppressed findings (GET /api/ai/patrol/dismissed)
func (h *AISettingsHandler) HandleGetDismissedFindings(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Require authentication
if !CheckAuth(h.getConfig(r.Context()), w, r) {
return
}
patrol := h.getPatrolService(r.Context())
if patrol == nil {
if err := utils.WriteJSONResponse(w, []interface{}{}); err != nil {
log.Error().Err(err).Msg("Failed to write dismissed findings response")
}
return
}
findings := patrol.GetFindings()
dismissed := findings.GetDismissedFindings()
if err := utils.WriteJSONResponse(w, dismissed); err != nil {
log.Error().Err(err).Msg("Failed to write dismissed findings response")
}
}
// getAuthUsername extracts the username from the current auth context
func getAuthUsername(cfg *config.Config, r *http.Request) string {
if token := getAPITokenRecordFromRequest(r); token != nil {
if username := apiTokenAuthenticatedUser(token); username != "" {
return username
}
}
// Check OIDC session first
if cookie, err := readSessionCookie(r); err == nil && cookie.Value != "" {
if username := GetSessionUsername(cookie.Value); username != "" {
return username
}
}
// Check proxy auth
if cfg.ProxyAuthSecret != "" {
if valid, username, _ := CheckProxyAuth(cfg, r); valid && username != "" {
return username
}
}
// Fall back to basic auth username
if cfg.AuthUser != "" {
return cfg.AuthUser
}
// Single-user mode without auth
return ""
}
// ============================================================================
// Approval Workflow Handlers (Pro Feature)
// ============================================================================
// HandleListApprovals has been moved to enterprise.
// The route now delegates to aiAutoFixEndpoints.HandleListApprovals.
// HandleGetApproval returns a specific approval request.
func (h *AISettingsHandler) HandleGetApproval(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Extract ID from path: /api/ai/approvals/{id}
approvalID := strings.TrimPrefix(r.URL.Path, "/api/ai/approvals/")
approvalID = strings.TrimSuffix(approvalID, "/")
approvalID = strings.Split(approvalID, "/")[0] // Handle /approve or /deny suffixes
if approvalID == "" {
writeErrorResponse(w, http.StatusBadRequest, "missing_id", "Approval ID is required", nil)
return
}
store := approval.GetStore()
if store == nil {
writeErrorResponse(w, http.StatusServiceUnavailable, "not_initialized", "Approval store not initialized", nil)
return
}
orgID := approval.NormalizeOrgID(GetOrgID(r.Context()))
req, ok := store.GetApproval(approvalID)
if !ok || !approval.BelongsToOrg(req, orgID) {
writeErrorResponse(w, http.StatusNotFound, "not_found", "Approval request not found", nil)
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(req)
}
// HandleApproveCommand approves a pending command and executes it.
func (h *AISettingsHandler) HandleApproveCommand(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// SECURITY: Approval execution accepts the dedicated mobile relay capability
// for new pairings while remaining backward-compatible with older mobile
// tokens that still carry ai:execute.
if !ensureRelayMobileRuntimeRoute(w, r, relayMobileRouteApprovalApprove) {
return
}
// Extract ID from path: /api/ai/approvals/{id}/approve
path := strings.TrimPrefix(r.URL.Path, "/api/ai/approvals/")
path = strings.TrimSuffix(path, "/approve")
approvalID := strings.TrimSuffix(path, "/")
if approvalID == "" {
writeErrorResponse(w, http.StatusBadRequest, "missing_id", "Approval ID is required", nil)
return
}
store := approval.GetStore()
if store == nil {
writeErrorResponse(w, http.StatusServiceUnavailable, "not_initialized", "Approval store not initialized", nil)
return
}
orgID := approval.NormalizeOrgID(GetOrgID(r.Context()))
existingReq, ok := store.GetApproval(approvalID)
if !ok || !approval.BelongsToOrg(existingReq, orgID) {
writeErrorResponse(w, http.StatusNotFound, "not_found", "Approval request not found", nil)
return
}
// Investigation fix approvals are gated by the AI auto-fix extension point.
// The free adapter returns 402; the enterprise adapter executes the fix.
if existingReq.ToolID == "investigation_fix" {
if h.aiAutoFixEndpoints != nil {
h.aiAutoFixEndpoints.HandleApproveInvestigationFix(w, r)
} else {
WriteLicenseRequired(w, featureAIAutoFixValue, "Pulse Patrol Auto-Fix feature requires Pulse Pro")
}
return
}
username := getAuthUsername(h.getConfig(r.Context()), r)
if username == "" {
username = "anonymous"
}
req, err := store.Approve(approvalID, username)
if err != nil {
writeErrorResponse(w, http.StatusBadRequest, "approval_failed", err.Error(), nil)
return
}
// Log audit event
LogAuditEvent("ai_command_approved", username, GetClientIP(r), r.URL.Path, true,
fmt.Sprintf("Approved command: %s", truncateForLog(req.Command, 100)))
// For chat sidebar approvals, the agentic loop will detect approval and execute
response := map[string]interface{}{
"approved": true,
"request": req,
"approval_id": req.ID,
"message": "Command approved. Pulse Assistant will now execute it.",
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(response)
}
// HandleApproveAndExecuteInvestigationFix has been moved to enterprise.
// The route now delegates to aiAutoFixEndpoints.HandleApproveInvestigationFix.
// executeInvestigationFix has been moved to enterprise.
// isMCPToolCall, cleanTargetHost, executeMCPToolFix, parseMCPToolCall,
// splitToolArgs, and findAgentForTarget have been moved to
// router_routes_ai_relay.go as package-level functions used by the
// AIAutoFixHandlerDeps adapters.
// updateFindingOutcome updates the investigation outcome on a finding
func (h *AISettingsHandler) updateFindingOutcome(ctx context.Context, orgID, findingID, outcome string) {
// Get AI service for this org
svc := h.GetAIService(ctx)
if svc == nil {
log.Warn().Str("orgID", orgID).Msg("AI service not available for finding update")
return
}
patrol := svc.GetPatrolService()
if patrol == nil {
log.Warn().Str("orgID", orgID).Msg("Patrol service not available for finding update")
return
}
findingsStore := patrol.GetFindings()
if findingsStore == nil {
log.Warn().Str("orgID", orgID).Msg("Findings store not available for finding update")
return
}
if !findingsStore.UpdateInvestigationOutcome(findingID, outcome) {
log.Warn().Str("findingID", findingID).Msg("Finding not found for outcome update")
return
}
log.Info().Str("findingID", findingID).Str("outcome", outcome).Msg("Updated finding investigation outcome")
}
// HandleDenyCommand denies a pending command.
func (h *AISettingsHandler) HandleDenyCommand(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
if !ensureRelayMobileRuntimeRoute(w, r, relayMobileRouteApprovalDeny) {
return
}
// Extract ID from path: /api/ai/approvals/{id}/deny
path := strings.TrimPrefix(r.URL.Path, "/api/ai/approvals/")
path = strings.TrimSuffix(path, "/deny")
approvalID := strings.TrimSuffix(path, "/")
if approvalID == "" {
writeErrorResponse(w, http.StatusBadRequest, "missing_id", "Approval ID is required", nil)
return
}
// Parse optional reason from body
var body struct {
Reason string `json:"reason"`
}
if r.Body != nil {
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
log.Warn().Err(err).Msg("Failed to decode deny request body")
}
}
store := approval.GetStore()
if store == nil {
writeErrorResponse(w, http.StatusServiceUnavailable, "not_initialized", "Approval store not initialized", nil)
return
}
orgID := approval.NormalizeOrgID(GetOrgID(r.Context()))
existingReq, ok := store.GetApproval(approvalID)
if !ok || !approval.BelongsToOrg(existingReq, orgID) {
writeErrorResponse(w, http.StatusNotFound, "not_found", "Approval request not found", nil)
return
}
username := getAuthUsername(h.getConfig(r.Context()), r)
if username == "" {
username = "anonymous"
}
req, err := store.Deny(approvalID, username, body.Reason)
if err != nil {
writeErrorResponse(w, http.StatusBadRequest, "denial_failed", err.Error(), nil)
return
}
// Log audit event
LogAuditEvent("ai_command_denied", username, GetClientIP(r), r.URL.Path, true,
fmt.Sprintf("Denied command: %s (reason: %s)", truncateForLog(req.Command, 100), body.Reason))
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]interface{}{
"denied": true,
"request": req,
"message": "Command denied.",
})
}
// truncateForLog truncates a string for logging purposes.
func truncateForLog(s string, maxLen int) string {
if len(s) <= maxLen {
return s
}
return s[:maxLen] + "..."
}
// PatrolAutonomySettings represents the patrol autonomy configuration for API requests
// Uses pointer for FullModeUnlocked to distinguish "not sent" from "sent as false"
type PatrolAutonomySettings struct {
AutonomyLevel string `json:"autonomy_level"` // "monitor", "approval", "assisted", "full"
FullModeUnlocked *bool `json:"full_mode_unlocked,omitempty"` // User has acknowledged Full mode risks (nil = preserve existing)
InvestigationBudget int `json:"investigation_budget"` // Max turns per investigation (5-30)
InvestigationTimeoutSec int `json:"investigation_timeout_sec"` // Max seconds per investigation (60-1800)
}
// PatrolAutonomyResponse represents the patrol autonomy configuration for API responses
// Uses plain bool for FullModeUnlocked since responses always include the actual value
type PatrolAutonomyResponse struct {
AutonomyLevel string `json:"autonomy_level"`
FullModeUnlocked bool `json:"full_mode_unlocked"`
InvestigationBudget int `json:"investigation_budget"`
InvestigationTimeoutSec int `json:"investigation_timeout_sec"`
}
// HandleGetPatrolAutonomy returns the current patrol autonomy settings (GET /api/ai/patrol/autonomy)
func (h *AISettingsHandler) HandleGetPatrolAutonomy(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
aiService := h.GetAIService(r.Context())
if aiService == nil {
writeErrorResponse(w, http.StatusServiceUnavailable, "service_unavailable", "Pulse Patrol service not available", nil)
return
}
cfg := aiService.GetConfig()
if cfg == nil {
writeErrorResponse(w, http.StatusServiceUnavailable, "not_configured", "Pulse Patrol not configured", nil)
return
}
autonomyLevel := cfg.GetPatrolAutonomyLevel()
// Community tier lock: without ai_autofix, patrol autonomy is findings-only ("monitor").
// If config contains a higher level from a previous Pro/trial period, clamp the effective
// value at read time so the UI reflects runtime enforcement.
hasAutoFix := aiService.HasLicenseFeature(featureAIAutoFixValue)
if !hasAutoFix && autonomyLevel != config.PatrolAutonomyMonitor {
autonomyLevel = config.PatrolAutonomyMonitor
}
settings := PatrolAutonomyResponse{
AutonomyLevel: autonomyLevel,
FullModeUnlocked: cfg.PatrolFullModeUnlocked,
InvestigationBudget: cfg.GetPatrolInvestigationBudget(),
InvestigationTimeoutSec: int(cfg.GetPatrolInvestigationTimeout().Seconds()),
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(settings)
}
// HandleUpdatePatrolAutonomy has been moved to enterprise.
// The route now delegates to aiAutoFixEndpoints.HandleUpdatePatrolAutonomy.
// maxFindingIDLength is the maximum allowed length for finding IDs in URL paths.
// Real finding IDs are 16 hex chars (SHA256[:8]), but we accept up to 256 for
// forward compatibility with any future ID scheme.
const maxFindingIDLength = 256
// HandleGetInvestigation returns investigation details for a finding (GET /api/ai/findings/{id}/investigation)
func (h *AISettingsHandler) HandleGetInvestigation(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Extract finding ID from path
findingID := strings.TrimPrefix(r.URL.Path, "/api/ai/findings/")
findingID = strings.TrimSuffix(findingID, "/investigation")
if findingID == "" {
writeErrorResponse(w, http.StatusBadRequest, "missing_id", "Finding ID is required", nil)
return
}
if len(findingID) > maxFindingIDLength {
writeErrorResponse(w, http.StatusBadRequest, "invalid_id", "Finding ID is too long", nil)
return
}
aiService := h.GetAIService(r.Context())
if aiService == nil {
writeErrorResponse(w, http.StatusServiceUnavailable, "not_initialized", "Patrol service not initialized", nil)
return
}
patrol := aiService.GetPatrolService()
if patrol == nil {
writeErrorResponse(w, http.StatusServiceUnavailable, "not_initialized", "Patrol service not initialized", nil)
return
}
// Get investigation from orchestrator
orchestrator := patrol.GetInvestigationOrchestrator()
if orchestrator == nil {
writeErrorResponse(w, http.StatusServiceUnavailable, "not_initialized", "Investigation orchestrator not initialized", nil)
return
}
investigation := orchestrator.GetInvestigationByFinding(findingID)
if investigation == nil {
writeErrorResponse(w, http.StatusNotFound, "not_found", "No investigation found for this finding", nil)
return
}
normalizedInvestigation := investigation.NormalizeCollections()
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(normalizedInvestigation)
}
// HandleReapproveInvestigationFix has been moved to enterprise.
// The route now delegates to aiAutoFixEndpoints.HandleReapproveInvestigationFix.
// HandleGetInvestigationMessages returns chat messages for an investigation (GET /api/ai/findings/{id}/investigation/messages)
func (h *AISettingsHandler) HandleGetInvestigationMessages(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Extract finding ID from path
findingID := strings.TrimPrefix(r.URL.Path, "/api/ai/findings/")
findingID = strings.TrimSuffix(findingID, "/investigation/messages")
if findingID == "" {
writeErrorResponse(w, http.StatusBadRequest, "missing_id", "Finding ID is required", nil)
return
}
if len(findingID) > maxFindingIDLength {
writeErrorResponse(w, http.StatusBadRequest, "invalid_id", "Finding ID is too long", nil)
return
}
aiService := h.GetAIService(r.Context())
if aiService == nil {
writeErrorResponse(w, http.StatusServiceUnavailable, "not_initialized", "Patrol service not initialized", nil)
return
}
patrol := aiService.GetPatrolService()
if patrol == nil {
writeErrorResponse(w, http.StatusServiceUnavailable, "not_initialized", "Patrol service not initialized", nil)
return
}
// Get investigation from orchestrator
orchestrator := patrol.GetInvestigationOrchestrator()
if orchestrator == nil {
writeErrorResponse(w, http.StatusServiceUnavailable, "not_initialized", "Investigation orchestrator not initialized", nil)
return
}
investigation := orchestrator.GetInvestigationByFinding(findingID)
if investigation == nil {
writeErrorResponse(w, http.StatusNotFound, "not_found", "No investigation found for this finding", nil)
return
}
// Get chat messages for the investigation session
chatService := aiService.GetChatService()
if chatService == nil {
writeErrorResponse(w, http.StatusServiceUnavailable, "not_initialized", "Chat service not initialized", nil)
return
}
messages, err := chatService.GetMessages(r.Context(), investigation.SessionID)
if err != nil {
writeErrorResponse(w, http.StatusInternalServerError, "fetch_failed", "Failed to get investigation messages", nil)
return
}
for i := range messages {
messages[i] = messages[i].NormalizeCollections()
}
normalizedInvestigation := investigation.NormalizeCollections()
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]interface{}{
"investigation_id": normalizedInvestigation.ID,
"session_id": normalizedInvestigation.SessionID,
"messages": messages,
})
}
// HandleReinvestigateFinding has been moved to enterprise.
// The route now delegates to aiAutoFixEndpoints.HandleReinvestigateFinding.