mirror of
https://github.com/rcourtman/Pulse.git
synced 2026-04-28 03:20:11 +00:00
feat: add multi-tenancy foundation (directory-per-tenant)
Implements Phase 1-2 of multi-tenancy support using a directory-per-tenant strategy that preserves existing file-based persistence. Key changes: - Add MultiTenantPersistence manager for org-scoped config routing - Add TenantMiddleware for X-Pulse-Org-ID header extraction and context propagation - Add MultiTenantMonitor for per-tenant monitor lifecycle management - Refactor handlers (ConfigHandlers, AlertHandlers, AIHandlers, etc.) to be context-aware with getConfig(ctx)/getMonitor(ctx) helpers - Add Organization model for future tenant metadata - Update server and router to wire multi-tenant components All handlers maintain backward compatibility via legacy field fallbacks for single-tenant deployments using the "default" org.
This commit is contained in:
parent
31989803f0
commit
289d95374f
29 changed files with 5980 additions and 772 deletions
3873
coverage_summary.txt
Normal file
3873
coverage_summary.txt
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
|
|
@ -12,12 +13,14 @@ import (
|
|||
"github.com/rcourtman/pulse-go-rewrite/internal/ai/chat"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/config"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/models"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/monitoring"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
// AIPersistence interface for loading/saving AI config
|
||||
type AIPersistence interface {
|
||||
LoadAIConfig() (*config.AIConfig, error)
|
||||
DataDir() string
|
||||
}
|
||||
|
||||
// AIService interface for the AI chat service - enables mocking in tests
|
||||
|
|
@ -57,10 +60,16 @@ type AIService interface {
|
|||
|
||||
// AIHandler handles all AI endpoints using direct AI integration
|
||||
type AIHandler struct {
|
||||
config *config.Config
|
||||
persistence AIPersistence
|
||||
service AIService
|
||||
agentServer *agentexec.Server
|
||||
mtPersistence *config.MultiTenantPersistence
|
||||
mtMonitor *monitoring.MultiTenantMonitor
|
||||
legacyConfig *config.Config
|
||||
legacyPersistence AIPersistence
|
||||
legacyService AIService
|
||||
agentServer *agentexec.Server
|
||||
services map[string]AIService
|
||||
servicesMu sync.RWMutex
|
||||
stateProviders map[string]AIStateProvider
|
||||
stateProvidersMu sync.RWMutex
|
||||
}
|
||||
|
||||
// newChatService is the factory function for creating the AI service.
|
||||
|
|
@ -70,13 +79,176 @@ var newChatService = func(cfg chat.Config) AIService {
|
|||
}
|
||||
|
||||
// NewAIHandler creates a new AI handler
|
||||
func NewAIHandler(cfg *config.Config, persistence AIPersistence, agentServer *agentexec.Server) *AIHandler {
|
||||
return &AIHandler{
|
||||
config: cfg,
|
||||
persistence: persistence,
|
||||
agentServer: agentServer,
|
||||
// service will be initialized in Start()
|
||||
func NewAIHandler(mtp *config.MultiTenantPersistence, mtm *monitoring.MultiTenantMonitor, agentServer *agentexec.Server) *AIHandler {
|
||||
var defaultConfig *config.Config
|
||||
var defaultPersistence AIPersistence
|
||||
|
||||
if mtm != nil {
|
||||
if m, err := mtm.GetMonitor("default"); err == nil && m != nil {
|
||||
defaultConfig = m.GetConfig()
|
||||
}
|
||||
}
|
||||
if mtp != nil {
|
||||
if p, err := mtp.GetPersistence("default"); err == nil {
|
||||
defaultPersistence = p
|
||||
}
|
||||
}
|
||||
|
||||
return &AIHandler{
|
||||
mtPersistence: mtp,
|
||||
mtMonitor: mtm,
|
||||
legacyConfig: defaultConfig,
|
||||
legacyPersistence: defaultPersistence,
|
||||
agentServer: agentServer,
|
||||
services: make(map[string]AIService),
|
||||
stateProviders: make(map[string]AIStateProvider),
|
||||
}
|
||||
}
|
||||
|
||||
// GetService returns the AI service for the current context
|
||||
func (h *AIHandler) GetService(ctx context.Context) AIService {
|
||||
orgID := GetOrgID(ctx)
|
||||
if orgID == "default" || orgID == "" {
|
||||
return h.legacyService
|
||||
}
|
||||
|
||||
h.servicesMu.RLock()
|
||||
svc, exists := h.services[orgID]
|
||||
h.servicesMu.RUnlock()
|
||||
|
||||
if exists {
|
||||
return svc
|
||||
}
|
||||
|
||||
h.servicesMu.Lock()
|
||||
defer h.servicesMu.Unlock()
|
||||
|
||||
// Double check
|
||||
if svc, exists = h.services[orgID]; exists {
|
||||
return svc
|
||||
}
|
||||
|
||||
// Create and start service for this tenant
|
||||
svc = h.initTenantService(ctx, orgID)
|
||||
if svc != nil {
|
||||
h.services[orgID] = svc
|
||||
}
|
||||
return svc
|
||||
}
|
||||
|
||||
// RemoveTenantService stops and removes the AI service for a specific tenant.
|
||||
// This should be called when a tenant is offboarded to free resources.
|
||||
func (h *AIHandler) RemoveTenantService(ctx context.Context, orgID string) error {
|
||||
if orgID == "default" || orgID == "" {
|
||||
return nil // Don't remove legacy service
|
||||
}
|
||||
|
||||
h.servicesMu.Lock()
|
||||
defer h.servicesMu.Unlock()
|
||||
|
||||
svc, exists := h.services[orgID]
|
||||
if !exists {
|
||||
return nil // Nothing to remove
|
||||
}
|
||||
|
||||
if svc != nil {
|
||||
if err := svc.Stop(ctx); err != nil {
|
||||
log.Warn().Str("orgID", orgID).Err(err).Msg("Error stopping AI service for removed tenant")
|
||||
}
|
||||
}
|
||||
|
||||
delete(h.services, orgID)
|
||||
log.Info().Str("orgID", orgID).Msg("Removed AI service for tenant")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *AIHandler) initTenantService(ctx context.Context, orgID string) AIService {
|
||||
if h.mtPersistence == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
persistence, err := h.mtPersistence.GetPersistence(orgID)
|
||||
if err != nil {
|
||||
log.Warn().Str("orgID", orgID).Err(err).Msg("Failed to get persistence for AI service")
|
||||
return nil
|
||||
}
|
||||
|
||||
// We need the config to get the data directory
|
||||
aiCfg, _ := persistence.LoadAIConfig()
|
||||
|
||||
dataDir := h.getDataDir(aiCfg, persistence.DataDir())
|
||||
|
||||
// Create chat config
|
||||
chatCfg := chat.Config{
|
||||
AIConfig: aiCfg,
|
||||
DataDir: dataDir,
|
||||
AgentServer: h.agentServer,
|
||||
}
|
||||
|
||||
// Get monitor for state provider
|
||||
if h.mtMonitor != nil {
|
||||
if m, err := h.mtMonitor.GetMonitor(orgID); err == nil && m != nil {
|
||||
chatCfg.StateProvider = m
|
||||
}
|
||||
}
|
||||
|
||||
svc := newChatService(chatCfg)
|
||||
if err := svc.Start(ctx); err != nil {
|
||||
log.Error().Str("orgID", orgID).Err(err).Msg("Failed to start AI service for tenant")
|
||||
}
|
||||
|
||||
return svc
|
||||
}
|
||||
|
||||
func (h *AIHandler) getDataDir(aiCfg *config.AIConfig, baseDir string) string {
|
||||
dataDir := baseDir
|
||||
if dataDir == "" {
|
||||
dataDir = "data"
|
||||
}
|
||||
return dataDir
|
||||
}
|
||||
|
||||
func (h *AIHandler) getConfig(ctx context.Context) *config.Config {
|
||||
orgID := GetOrgID(ctx)
|
||||
if h.mtMonitor != nil {
|
||||
if m, err := h.mtMonitor.GetMonitor(orgID); err == nil && m != nil {
|
||||
return m.GetConfig()
|
||||
}
|
||||
}
|
||||
return h.legacyConfig
|
||||
}
|
||||
|
||||
func (h *AIHandler) getPersistence(ctx context.Context) AIPersistence {
|
||||
orgID := GetOrgID(ctx)
|
||||
if h.mtPersistence != nil {
|
||||
if p, err := h.mtPersistence.GetPersistence(orgID); err == nil {
|
||||
return p
|
||||
}
|
||||
}
|
||||
return h.legacyPersistence
|
||||
}
|
||||
|
||||
// loadAIConfig loads AI config for the current context
|
||||
func (h *AIHandler) loadAIConfig(ctx context.Context) *config.AIConfig {
|
||||
p := h.getPersistence(ctx)
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
cfg, err := p.LoadAIConfig()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
// SetMultiTenantPersistence updates the persistence manager
|
||||
func (h *AIHandler) SetMultiTenantPersistence(mtp *config.MultiTenantPersistence) {
|
||||
h.mtPersistence = mtp
|
||||
}
|
||||
|
||||
// SetMultiTenantMonitor updates the monitor manager
|
||||
func (h *AIHandler) SetMultiTenantMonitor(mtm *monitoring.MultiTenantMonitor) {
|
||||
h.mtMonitor = mtm
|
||||
}
|
||||
|
||||
// StateProvider interface for infrastructure state
|
||||
|
|
@ -87,7 +259,7 @@ type AIStateProvider interface {
|
|||
// Start initializes and starts the AI chat service
|
||||
func (h *AIHandler) Start(ctx context.Context, stateProvider AIStateProvider) error {
|
||||
log.Info().Msg("AIHandler.Start called")
|
||||
aiCfg := h.loadAIConfig()
|
||||
aiCfg := h.loadAIConfig(ctx)
|
||||
if aiCfg == nil {
|
||||
log.Info().Msg("AI config is nil, AI is disabled")
|
||||
return nil
|
||||
|
|
@ -98,17 +270,19 @@ func (h *AIHandler) Start(ctx context.Context, stateProvider AIStateProvider) er
|
|||
}
|
||||
|
||||
// Determine data directory
|
||||
dataDir := "/tmp/pulse-ai"
|
||||
persistence := h.getPersistence(ctx)
|
||||
dataDir := h.getDataDir(aiCfg, persistence.DataDir())
|
||||
|
||||
log.Info().Bool("enabled", aiCfg.Enabled).Str("model", aiCfg.Model).Msg("Starting AI chat service")
|
||||
h.service = newChatService(chat.Config{
|
||||
// Create chat config
|
||||
chatCfg := chat.Config{
|
||||
AIConfig: aiCfg,
|
||||
DataDir: dataDir,
|
||||
StateProvider: stateProvider,
|
||||
AgentServer: h.agentServer,
|
||||
DataDir: dataDir,
|
||||
})
|
||||
}
|
||||
|
||||
if err := h.service.Start(ctx); err != nil {
|
||||
h.legacyService = newChatService(chatCfg)
|
||||
if err := h.legacyService.Start(ctx); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to start AI chat service")
|
||||
return err
|
||||
}
|
||||
|
|
@ -133,8 +307,8 @@ func (h *AIHandler) Start(ctx context.Context, stateProvider AIStateProvider) er
|
|||
|
||||
// Stop stops the AI chat service
|
||||
func (h *AIHandler) Stop(ctx context.Context) error {
|
||||
if h.service != nil {
|
||||
return h.service.Stop(ctx)
|
||||
if h.legacyService != nil {
|
||||
return h.legacyService.Stop(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -142,39 +316,24 @@ func (h *AIHandler) Stop(ctx context.Context) error {
|
|||
// Restart restarts the AI chat service with updated configuration
|
||||
// Call this when model or other settings change
|
||||
func (h *AIHandler) Restart(ctx context.Context) error {
|
||||
if h.service == nil || !h.service.IsRunning() {
|
||||
if h.legacyService == nil || !h.legacyService.IsRunning() {
|
||||
return nil // Not running, nothing to restart
|
||||
}
|
||||
// Load fresh config from persistence to get latest settings
|
||||
newCfg := h.loadAIConfig()
|
||||
return h.service.Restart(ctx, newCfg)
|
||||
newCfg := h.loadAIConfig(ctx)
|
||||
return h.legacyService.Restart(ctx, newCfg)
|
||||
}
|
||||
|
||||
// IsRunning returns whether AI is running
|
||||
func (h *AIHandler) IsRunning() bool {
|
||||
return h.service != nil && h.service.IsRunning()
|
||||
}
|
||||
|
||||
// GetService returns the underlying AI chat service
|
||||
func (h *AIHandler) GetService() AIService {
|
||||
return h.service
|
||||
}
|
||||
|
||||
// GetAIConfig returns the current AI configuration
|
||||
func (h *AIHandler) GetAIConfig() *config.AIConfig {
|
||||
return h.loadAIConfig()
|
||||
func (h *AIHandler) GetAIConfig(ctx context.Context) *config.AIConfig {
|
||||
return h.loadAIConfig(ctx)
|
||||
}
|
||||
|
||||
func (h *AIHandler) loadAIConfig() *config.AIConfig {
|
||||
if h.persistence == nil {
|
||||
return nil
|
||||
}
|
||||
cfg, err := h.persistence.LoadAIConfig()
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to load AI config")
|
||||
return nil
|
||||
}
|
||||
return cfg
|
||||
// IsRunning returns true if the AI chat service is running
|
||||
func (h *AIHandler) IsRunning(ctx context.Context) bool {
|
||||
svc := h.GetService(ctx)
|
||||
return svc != nil && svc.IsRunning()
|
||||
}
|
||||
|
||||
// ChatRequest represents a chat request
|
||||
|
|
@ -208,10 +367,16 @@ func (h *AIHandler) HandleChat(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
// Auth already handled by RequireAuth wrapper - no need to check again
|
||||
|
||||
if !h.IsRunning() {
|
||||
ctx := r.Context()
|
||||
if !h.IsRunning(ctx) {
|
||||
http.Error(w, "AI is not running", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
svc := h.GetService(ctx)
|
||||
if svc == nil {
|
||||
http.Error(w, "AI service not available", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse request
|
||||
var req ChatRequest
|
||||
|
|
@ -296,7 +461,7 @@ func (h *AIHandler) HandleChat(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
// Stream from AI chat service
|
||||
err := h.service.ExecuteStream(ctx, chat.ExecuteRequest{
|
||||
err := svc.ExecuteStream(ctx, chat.ExecuteRequest{
|
||||
Prompt: req.Prompt,
|
||||
SessionID: req.SessionID,
|
||||
Model: req.Model,
|
||||
|
|
@ -316,12 +481,19 @@ func (h *AIHandler) HandleChat(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
// HandleSessions handles GET /api/ai/sessions - list sessions
|
||||
func (h *AIHandler) HandleSessions(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.IsRunning() {
|
||||
ctx := r.Context()
|
||||
if !h.IsRunning(ctx) {
|
||||
http.Error(w, "AI is not running", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
sessions, err := h.service.ListSessions(r.Context())
|
||||
svc := h.GetService(ctx)
|
||||
if svc == nil {
|
||||
http.Error(w, "AI service not available", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
sessions, err := svc.ListSessions(ctx)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
|
|
@ -333,12 +505,19 @@ func (h *AIHandler) HandleSessions(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
// HandleCreateSession handles POST /api/ai/sessions - create session
|
||||
func (h *AIHandler) HandleCreateSession(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.IsRunning() {
|
||||
ctx := r.Context()
|
||||
if !h.IsRunning(ctx) {
|
||||
http.Error(w, "AI is not running", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
session, err := h.service.CreateSession(r.Context())
|
||||
svc := h.GetService(ctx)
|
||||
if svc == nil {
|
||||
http.Error(w, "AI service not available", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
session, err := svc.CreateSession(ctx)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
|
|
@ -350,12 +529,19 @@ func (h *AIHandler) HandleCreateSession(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
// HandleDeleteSession handles DELETE /api/ai/sessions/{id}
|
||||
func (h *AIHandler) HandleDeleteSession(w http.ResponseWriter, r *http.Request, sessionID string) {
|
||||
if !h.IsRunning() {
|
||||
ctx := r.Context()
|
||||
if !h.IsRunning(ctx) {
|
||||
http.Error(w, "AI is not running", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.service.DeleteSession(r.Context(), sessionID); err != nil {
|
||||
svc := h.GetService(ctx)
|
||||
if svc == nil {
|
||||
http.Error(w, "AI service not available", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
if err := svc.DeleteSession(ctx, sessionID); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
|
@ -365,12 +551,19 @@ func (h *AIHandler) HandleDeleteSession(w http.ResponseWriter, r *http.Request,
|
|||
|
||||
// HandleMessages handles GET /api/ai/sessions/{id}/messages
|
||||
func (h *AIHandler) HandleMessages(w http.ResponseWriter, r *http.Request, sessionID string) {
|
||||
if !h.IsRunning() {
|
||||
ctx := r.Context()
|
||||
if !h.IsRunning(ctx) {
|
||||
http.Error(w, "AI is not running", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
messages, err := h.service.GetMessages(r.Context(), sessionID)
|
||||
svc := h.GetService(ctx)
|
||||
if svc == nil {
|
||||
http.Error(w, "AI service not available", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
messages, err := svc.GetMessages(ctx, sessionID)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
|
|
@ -382,12 +575,19 @@ func (h *AIHandler) HandleMessages(w http.ResponseWriter, r *http.Request, sessi
|
|||
|
||||
// HandleAbort handles POST /api/ai/sessions/{id}/abort
|
||||
func (h *AIHandler) HandleAbort(w http.ResponseWriter, r *http.Request, sessionID string) {
|
||||
if !h.IsRunning() {
|
||||
ctx := r.Context()
|
||||
if !h.IsRunning(ctx) {
|
||||
http.Error(w, "AI is not running", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.service.AbortSession(r.Context(), sessionID); err != nil {
|
||||
svc := h.GetService(ctx)
|
||||
if svc == nil {
|
||||
http.Error(w, "AI service not available", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
if err := svc.AbortSession(ctx, sessionID); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
|
@ -398,7 +598,7 @@ func (h *AIHandler) HandleAbort(w http.ResponseWriter, r *http.Request, sessionI
|
|||
// HandleStatus handles GET /api/ai/status
|
||||
func (h *AIHandler) HandleStatus(w http.ResponseWriter, r *http.Request) {
|
||||
status := map[string]interface{}{
|
||||
"running": h.IsRunning(),
|
||||
"running": h.IsRunning(r.Context()),
|
||||
"engine": "direct",
|
||||
}
|
||||
|
||||
|
|
@ -409,12 +609,19 @@ func (h *AIHandler) HandleStatus(w http.ResponseWriter, r *http.Request) {
|
|||
// HandleSummarize handles POST /api/ai/sessions/{id}/summarize
|
||||
// Compresses context when nearing model limits
|
||||
func (h *AIHandler) HandleSummarize(w http.ResponseWriter, r *http.Request, sessionID string) {
|
||||
if !h.IsRunning() {
|
||||
ctx := r.Context()
|
||||
if !h.IsRunning(ctx) {
|
||||
http.Error(w, "AI is not running", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := h.service.SummarizeSession(r.Context(), sessionID)
|
||||
svc := h.GetService(ctx)
|
||||
if svc == nil {
|
||||
http.Error(w, "AI service not available", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := svc.SummarizeSession(ctx, sessionID)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
|
|
@ -427,12 +634,19 @@ func (h *AIHandler) HandleSummarize(w http.ResponseWriter, r *http.Request, sess
|
|||
// HandleDiff handles GET /api/ai/sessions/{id}/diff
|
||||
// Returns file changes made during the session
|
||||
func (h *AIHandler) HandleDiff(w http.ResponseWriter, r *http.Request, sessionID string) {
|
||||
if !h.IsRunning() {
|
||||
ctx := r.Context()
|
||||
if !h.IsRunning(ctx) {
|
||||
http.Error(w, "AI is not running", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
diff, err := h.service.GetSessionDiff(r.Context(), sessionID)
|
||||
svc := h.GetService(ctx)
|
||||
if svc == nil {
|
||||
http.Error(w, "AI service not available", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
diff, err := svc.GetSessionDiff(ctx, sessionID)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
|
|
@ -445,12 +659,19 @@ func (h *AIHandler) HandleDiff(w http.ResponseWriter, r *http.Request, sessionID
|
|||
// HandleFork handles POST /api/ai/sessions/{id}/fork
|
||||
// Creates a branch point in the conversation
|
||||
func (h *AIHandler) HandleFork(w http.ResponseWriter, r *http.Request, sessionID string) {
|
||||
if !h.IsRunning() {
|
||||
ctx := r.Context()
|
||||
if !h.IsRunning(ctx) {
|
||||
http.Error(w, "AI is not running", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
session, err := h.service.ForkSession(r.Context(), sessionID)
|
||||
svc := h.GetService(ctx)
|
||||
if svc == nil {
|
||||
http.Error(w, "AI service not available", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
session, err := svc.ForkSession(ctx, sessionID)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
|
|
@ -463,12 +684,19 @@ func (h *AIHandler) HandleFork(w http.ResponseWriter, r *http.Request, sessionID
|
|||
// HandleRevert handles POST /api/ai/sessions/{id}/revert
|
||||
// Reverts file changes from the session
|
||||
func (h *AIHandler) HandleRevert(w http.ResponseWriter, r *http.Request, sessionID string) {
|
||||
if !h.IsRunning() {
|
||||
ctx := r.Context()
|
||||
if !h.IsRunning(ctx) {
|
||||
http.Error(w, "AI is not running", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := h.service.RevertSession(r.Context(), sessionID)
|
||||
svc := h.GetService(ctx)
|
||||
if svc == nil {
|
||||
http.Error(w, "AI service not available", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := svc.RevertSession(ctx, sessionID)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
|
|
@ -481,12 +709,19 @@ func (h *AIHandler) HandleRevert(w http.ResponseWriter, r *http.Request, session
|
|||
// HandleUnrevert handles POST /api/ai/sessions/{id}/unrevert
|
||||
// Restores previously reverted changes
|
||||
func (h *AIHandler) HandleUnrevert(w http.ResponseWriter, r *http.Request, sessionID string) {
|
||||
if !h.IsRunning() {
|
||||
ctx := r.Context()
|
||||
if !h.IsRunning(ctx) {
|
||||
http.Error(w, "AI is not running", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := h.service.UnrevertSession(r.Context(), sessionID)
|
||||
svc := h.GetService(ctx)
|
||||
if svc == nil {
|
||||
http.Error(w, "AI service not available", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
result, err := svc.UnrevertSession(ctx, sessionID)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
|
|
@ -506,7 +741,8 @@ type AnswerQuestionRequest struct {
|
|||
|
||||
// HandleAnswerQuestion handles POST /api/ai/question/{questionID}/answer
|
||||
func (h *AIHandler) HandleAnswerQuestion(w http.ResponseWriter, r *http.Request, questionID string) {
|
||||
if !h.IsRunning() {
|
||||
ctx := r.Context()
|
||||
if !h.IsRunning(ctx) {
|
||||
http.Error(w, "AI is not running", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
|
@ -531,7 +767,13 @@ func (h *AIHandler) HandleAnswerQuestion(w http.ResponseWriter, r *http.Request,
|
|||
Int("answers_count", len(answers)).
|
||||
Msg("AIHandler: Received answer to question")
|
||||
|
||||
if err := h.service.AnswerQuestion(r.Context(), questionID, answers); err != nil {
|
||||
svc := h.GetService(ctx)
|
||||
if svc == nil {
|
||||
http.Error(w, "AI service not available", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
if err := svc.AnswerQuestion(ctx, questionID, answers); err != nil {
|
||||
log.Error().Err(err).Str("questionID", questionID).Msg("Failed to answer question")
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
|
|
@ -542,91 +784,91 @@ func (h *AIHandler) HandleAnswerQuestion(w http.ResponseWriter, r *http.Request,
|
|||
|
||||
// SetAlertProvider sets the alert provider for MCP tools
|
||||
func (h *AIHandler) SetAlertProvider(provider chat.MCPAlertProvider) {
|
||||
if h.service != nil {
|
||||
h.service.SetAlertProvider(provider)
|
||||
if h.legacyService != nil {
|
||||
h.legacyService.SetAlertProvider(provider)
|
||||
}
|
||||
}
|
||||
|
||||
// SetFindingsProvider sets the findings provider for MCP tools
|
||||
func (h *AIHandler) SetFindingsProvider(provider chat.MCPFindingsProvider) {
|
||||
if h.service != nil {
|
||||
h.service.SetFindingsProvider(provider)
|
||||
if h.legacyService != nil {
|
||||
h.legacyService.SetFindingsProvider(provider)
|
||||
}
|
||||
}
|
||||
|
||||
// SetBaselineProvider sets the baseline provider for MCP tools
|
||||
func (h *AIHandler) SetBaselineProvider(provider chat.MCPBaselineProvider) {
|
||||
if h.service != nil {
|
||||
h.service.SetBaselineProvider(provider)
|
||||
if h.legacyService != nil {
|
||||
h.legacyService.SetBaselineProvider(provider)
|
||||
}
|
||||
}
|
||||
|
||||
// SetPatternProvider sets the pattern provider for MCP tools
|
||||
func (h *AIHandler) SetPatternProvider(provider chat.MCPPatternProvider) {
|
||||
if h.service != nil {
|
||||
h.service.SetPatternProvider(provider)
|
||||
if h.legacyService != nil {
|
||||
h.legacyService.SetPatternProvider(provider)
|
||||
}
|
||||
}
|
||||
|
||||
// SetMetricsHistory sets the metrics history provider for MCP tools
|
||||
func (h *AIHandler) SetMetricsHistory(provider chat.MCPMetricsHistoryProvider) {
|
||||
if h.service != nil {
|
||||
h.service.SetMetricsHistory(provider)
|
||||
if h.legacyService != nil {
|
||||
h.legacyService.SetMetricsHistory(provider)
|
||||
}
|
||||
}
|
||||
|
||||
// SetAgentProfileManager sets the agent profile manager for MCP tools
|
||||
func (h *AIHandler) SetAgentProfileManager(manager chat.AgentProfileManager) {
|
||||
if h.service != nil {
|
||||
h.service.SetAgentProfileManager(manager)
|
||||
if h.legacyService != nil {
|
||||
h.legacyService.SetAgentProfileManager(manager)
|
||||
}
|
||||
}
|
||||
|
||||
// SetStorageProvider sets the storage provider for MCP tools
|
||||
func (h *AIHandler) SetStorageProvider(provider chat.MCPStorageProvider) {
|
||||
if h.service != nil {
|
||||
h.service.SetStorageProvider(provider)
|
||||
if h.legacyService != nil {
|
||||
h.legacyService.SetStorageProvider(provider)
|
||||
}
|
||||
}
|
||||
|
||||
// SetBackupProvider sets the backup provider for MCP tools
|
||||
func (h *AIHandler) SetBackupProvider(provider chat.MCPBackupProvider) {
|
||||
if h.service != nil {
|
||||
h.service.SetBackupProvider(provider)
|
||||
if h.legacyService != nil {
|
||||
h.legacyService.SetBackupProvider(provider)
|
||||
}
|
||||
}
|
||||
|
||||
// SetDiskHealthProvider sets the disk health provider for MCP tools
|
||||
func (h *AIHandler) SetDiskHealthProvider(provider chat.MCPDiskHealthProvider) {
|
||||
if h.service != nil {
|
||||
h.service.SetDiskHealthProvider(provider)
|
||||
if h.legacyService != nil {
|
||||
h.legacyService.SetDiskHealthProvider(provider)
|
||||
}
|
||||
}
|
||||
|
||||
// SetUpdatesProvider sets the updates provider for MCP tools
|
||||
func (h *AIHandler) SetUpdatesProvider(provider chat.MCPUpdatesProvider) {
|
||||
if h.service != nil {
|
||||
h.service.SetUpdatesProvider(provider)
|
||||
if h.legacyService != nil {
|
||||
h.legacyService.SetUpdatesProvider(provider)
|
||||
}
|
||||
}
|
||||
|
||||
// SetFindingsManager sets the findings manager for MCP tools
|
||||
func (h *AIHandler) SetFindingsManager(manager chat.FindingsManager) {
|
||||
if h.service != nil {
|
||||
h.service.SetFindingsManager(manager)
|
||||
if h.legacyService != nil {
|
||||
h.legacyService.SetFindingsManager(manager)
|
||||
}
|
||||
}
|
||||
|
||||
// SetMetadataUpdater sets the metadata updater for MCP tools
|
||||
func (h *AIHandler) SetMetadataUpdater(updater chat.MetadataUpdater) {
|
||||
if h.service != nil {
|
||||
h.service.SetMetadataUpdater(updater)
|
||||
if h.legacyService != nil {
|
||||
h.legacyService.SetMetadataUpdater(updater)
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateControlSettings updates control settings in the service
|
||||
func (h *AIHandler) UpdateControlSettings(cfg *config.AIConfig) {
|
||||
if h.service != nil {
|
||||
h.service.UpdateControlSettings(cfg)
|
||||
if h.legacyService != nil {
|
||||
h.legacyService.UpdateControlSettings(cfg)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -20,8 +20,8 @@ func (h *AISettingsHandler) HandleGetPatterns(w http.ResponseWriter, r *http.Req
|
|||
return
|
||||
}
|
||||
|
||||
// AI must be enabled to return intelligence data
|
||||
if !h.aiService.IsEnabled() {
|
||||
aiService := h.GetAIService(r.Context())
|
||||
if aiService == nil || !aiService.IsEnabled() {
|
||||
if err := utils.WriteJSONResponse(w, map[string]interface{}{
|
||||
"patterns": []interface{}{},
|
||||
"message": "AI is not enabled",
|
||||
|
|
@ -31,7 +31,7 @@ func (h *AISettingsHandler) HandleGetPatterns(w http.ResponseWriter, r *http.Req
|
|||
return
|
||||
}
|
||||
|
||||
patrol := h.aiService.GetPatrolService()
|
||||
patrol := aiService.GetPatrolService()
|
||||
if patrol == nil {
|
||||
if err := utils.WriteJSONResponse(w, map[string]interface{}{
|
||||
"patterns": []interface{}{},
|
||||
|
|
@ -75,7 +75,7 @@ func (h *AISettingsHandler) HandleGetPatterns(w http.ResponseWriter, r *http.Req
|
|||
})
|
||||
}
|
||||
|
||||
locked := !h.aiService.HasLicenseFeature(license.FeatureAIPatrol)
|
||||
locked := aiService == nil || !aiService.HasLicenseFeature(license.FeatureAIPatrol)
|
||||
if locked {
|
||||
w.Header().Set("X-License-Required", "true")
|
||||
w.Header().Set("X-License-Feature", license.FeatureAIPatrol)
|
||||
|
|
@ -103,8 +103,9 @@ func (h *AISettingsHandler) HandleGetPredictions(w http.ResponseWriter, r *http.
|
|||
return
|
||||
}
|
||||
|
||||
aiService := h.GetAIService(r.Context())
|
||||
// AI must be enabled to return intelligence data
|
||||
if !h.aiService.IsEnabled() {
|
||||
if aiService == nil || !aiService.IsEnabled() {
|
||||
if err := utils.WriteJSONResponse(w, map[string]interface{}{
|
||||
"predictions": []interface{}{},
|
||||
"message": "AI is not enabled",
|
||||
|
|
@ -114,7 +115,7 @@ func (h *AISettingsHandler) HandleGetPredictions(w http.ResponseWriter, r *http.
|
|||
return
|
||||
}
|
||||
|
||||
patrol := h.aiService.GetPatrolService()
|
||||
patrol := aiService.GetPatrolService()
|
||||
if patrol == nil {
|
||||
if err := utils.WriteJSONResponse(w, map[string]interface{}{
|
||||
"predictions": []interface{}{},
|
||||
|
|
@ -160,7 +161,7 @@ func (h *AISettingsHandler) HandleGetPredictions(w http.ResponseWriter, r *http.
|
|||
})
|
||||
}
|
||||
|
||||
locked := !h.aiService.HasLicenseFeature(license.FeatureAIPatrol)
|
||||
locked := aiService == nil || !aiService.HasLicenseFeature(license.FeatureAIPatrol)
|
||||
if locked {
|
||||
w.Header().Set("X-License-Required", "true")
|
||||
w.Header().Set("X-License-Feature", license.FeatureAIPatrol)
|
||||
|
|
@ -188,8 +189,9 @@ func (h *AISettingsHandler) HandleGetCorrelations(w http.ResponseWriter, r *http
|
|||
return
|
||||
}
|
||||
|
||||
aiService := h.GetAIService(r.Context())
|
||||
// AI must be enabled to return intelligence data
|
||||
if !h.aiService.IsEnabled() {
|
||||
if aiService == nil || !aiService.IsEnabled() {
|
||||
if err := utils.WriteJSONResponse(w, map[string]interface{}{
|
||||
"correlations": []interface{}{},
|
||||
"message": "AI is not enabled",
|
||||
|
|
@ -199,7 +201,7 @@ func (h *AISettingsHandler) HandleGetCorrelations(w http.ResponseWriter, r *http
|
|||
return
|
||||
}
|
||||
|
||||
patrol := h.aiService.GetPatrolService()
|
||||
patrol := aiService.GetPatrolService()
|
||||
if patrol == nil {
|
||||
if err := utils.WriteJSONResponse(w, map[string]interface{}{
|
||||
"correlations": []interface{}{},
|
||||
|
|
@ -249,7 +251,7 @@ func (h *AISettingsHandler) HandleGetCorrelations(w http.ResponseWriter, r *http
|
|||
})
|
||||
}
|
||||
|
||||
locked := !h.aiService.HasLicenseFeature(license.FeatureAIPatrol)
|
||||
locked := aiService == nil || !aiService.HasLicenseFeature(license.FeatureAIPatrol)
|
||||
if locked {
|
||||
w.Header().Set("X-License-Required", "true")
|
||||
w.Header().Set("X-License-Feature", license.FeatureAIPatrol)
|
||||
|
|
@ -277,8 +279,9 @@ func (h *AISettingsHandler) HandleGetRecentChanges(w http.ResponseWriter, r *htt
|
|||
return
|
||||
}
|
||||
|
||||
aiService := h.GetAIService(r.Context())
|
||||
// AI must be enabled to return intelligence data
|
||||
if !h.aiService.IsEnabled() {
|
||||
if aiService == nil || !aiService.IsEnabled() {
|
||||
if err := utils.WriteJSONResponse(w, map[string]interface{}{
|
||||
"changes": []interface{}{},
|
||||
"message": "AI is not enabled",
|
||||
|
|
@ -288,7 +291,7 @@ func (h *AISettingsHandler) HandleGetRecentChanges(w http.ResponseWriter, r *htt
|
|||
return
|
||||
}
|
||||
|
||||
patrol := h.aiService.GetPatrolService()
|
||||
patrol := aiService.GetPatrolService()
|
||||
if patrol == nil {
|
||||
if err := utils.WriteJSONResponse(w, map[string]interface{}{
|
||||
"changes": []interface{}{},
|
||||
|
|
@ -337,7 +340,7 @@ func (h *AISettingsHandler) HandleGetRecentChanges(w http.ResponseWriter, r *htt
|
|||
})
|
||||
}
|
||||
|
||||
locked := !h.aiService.HasLicenseFeature(license.FeatureAIPatrol)
|
||||
locked := aiService == nil || !aiService.HasLicenseFeature(license.FeatureAIPatrol)
|
||||
if locked {
|
||||
w.Header().Set("X-License-Required", "true")
|
||||
w.Header().Set("X-License-Feature", license.FeatureAIPatrol)
|
||||
|
|
@ -366,8 +369,9 @@ func (h *AISettingsHandler) HandleGetBaselines(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
aiService := h.GetAIService(r.Context())
|
||||
// AI must be enabled to return intelligence data
|
||||
if !h.aiService.IsEnabled() {
|
||||
if aiService == nil || !aiService.IsEnabled() {
|
||||
if err := utils.WriteJSONResponse(w, map[string]interface{}{
|
||||
"baselines": []interface{}{},
|
||||
"message": "AI is not enabled",
|
||||
|
|
@ -377,7 +381,7 @@ func (h *AISettingsHandler) HandleGetBaselines(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
patrol := h.aiService.GetPatrolService()
|
||||
patrol := aiService.GetPatrolService()
|
||||
if patrol == nil {
|
||||
if err := utils.WriteJSONResponse(w, map[string]interface{}{
|
||||
"baselines": []interface{}{},
|
||||
|
|
@ -422,7 +426,7 @@ func (h *AISettingsHandler) HandleGetBaselines(w http.ResponseWriter, r *http.Re
|
|||
})
|
||||
}
|
||||
|
||||
locked := !h.aiService.HasLicenseFeature(license.FeatureAIPatrol)
|
||||
locked := aiService == nil || !aiService.HasLicenseFeature(license.FeatureAIPatrol)
|
||||
if locked {
|
||||
w.Header().Set("X-License-Required", "true")
|
||||
w.Header().Set("X-License-Feature", license.FeatureAIPatrol)
|
||||
|
|
@ -450,15 +454,16 @@ func (h *AISettingsHandler) HandleGetRemediations(w http.ResponseWriter, r *http
|
|||
return
|
||||
}
|
||||
|
||||
aiService := h.GetAIService(r.Context())
|
||||
// Check for Pulse Pro license (soft-lock)
|
||||
locked := !h.aiService.HasLicenseFeature(license.FeatureAIAutoFix)
|
||||
locked := aiService == nil || !aiService.HasLicenseFeature(license.FeatureAIAutoFix)
|
||||
if locked {
|
||||
w.Header().Set("X-License-Required", "true")
|
||||
w.Header().Set("X-License-Feature", license.FeatureAIAutoFix)
|
||||
}
|
||||
|
||||
// AI must be enabled to return intelligence data
|
||||
if !h.aiService.IsEnabled() {
|
||||
if aiService == nil || !aiService.IsEnabled() {
|
||||
if err := utils.WriteJSONResponse(w, map[string]interface{}{
|
||||
"remediations": []interface{}{},
|
||||
"message": "AI is not enabled",
|
||||
|
|
@ -470,7 +475,7 @@ func (h *AISettingsHandler) HandleGetRemediations(w http.ResponseWriter, r *http
|
|||
return
|
||||
}
|
||||
|
||||
patrol := h.aiService.GetPatrolService()
|
||||
patrol := aiService.GetPatrolService()
|
||||
if patrol == nil {
|
||||
if err := utils.WriteJSONResponse(w, map[string]interface{}{
|
||||
"remediations": []interface{}{},
|
||||
|
|
@ -612,8 +617,9 @@ func (h *AISettingsHandler) HandleGetAnomalies(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
aiService := h.GetAIService(r.Context())
|
||||
// AI must be enabled to return intelligence data
|
||||
if !h.aiService.IsEnabled() {
|
||||
if aiService == nil || !aiService.IsEnabled() {
|
||||
if err := utils.WriteJSONResponse(w, map[string]interface{}{
|
||||
"anomalies": []interface{}{},
|
||||
"message": "AI is not enabled",
|
||||
|
|
@ -623,7 +629,7 @@ func (h *AISettingsHandler) HandleGetAnomalies(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
patrol := h.aiService.GetPatrolService()
|
||||
patrol := aiService.GetPatrolService()
|
||||
if patrol == nil {
|
||||
if err := utils.WriteJSONResponse(w, map[string]interface{}{
|
||||
"anomalies": []interface{}{},
|
||||
|
|
@ -646,7 +652,7 @@ func (h *AISettingsHandler) HandleGetAnomalies(w http.ResponseWriter, r *http.Re
|
|||
}
|
||||
|
||||
// Get current metrics from state provider
|
||||
stateProvider := h.aiService.GetStateProvider()
|
||||
stateProvider := aiService.GetStateProvider()
|
||||
if stateProvider == nil {
|
||||
if err := utils.WriteJSONResponse(w, map[string]interface{}{
|
||||
"anomalies": []interface{}{},
|
||||
|
|
@ -853,8 +859,9 @@ func (h *AISettingsHandler) HandleGetLearningStatus(w http.ResponseWriter, r *ht
|
|||
return
|
||||
}
|
||||
|
||||
aiService := h.GetAIService(r.Context())
|
||||
// AI must be enabled to return learning status
|
||||
if !h.aiService.IsEnabled() {
|
||||
if aiService == nil || !aiService.IsEnabled() {
|
||||
if err := utils.WriteJSONResponse(w, map[string]interface{}{
|
||||
"resources_baselined": 0,
|
||||
"total_metrics": 0,
|
||||
|
|
@ -866,7 +873,7 @@ func (h *AISettingsHandler) HandleGetLearningStatus(w http.ResponseWriter, r *ht
|
|||
return
|
||||
}
|
||||
|
||||
patrol := h.aiService.GetPatrolService()
|
||||
patrol := aiService.GetPatrolService()
|
||||
if patrol == nil {
|
||||
if err := utils.WriteJSONResponse(w, map[string]interface{}{
|
||||
"resources_baselined": 0,
|
||||
|
|
@ -918,7 +925,7 @@ func (h *AISettingsHandler) HandleGetLearningStatus(w http.ResponseWriter, r *ht
|
|||
message = "Baselines established and anomaly detection is active"
|
||||
}
|
||||
|
||||
locked := !h.aiService.HasLicenseFeature(license.FeatureAIPatrol)
|
||||
locked := aiService == nil || !aiService.HasLicenseFeature(license.FeatureAIPatrol)
|
||||
if locked {
|
||||
w.Header().Set("X-License-Required", "true")
|
||||
w.Header().Set("X-License-Feature", license.FeatureAIPatrol)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
|
@ -13,6 +14,7 @@ import (
|
|||
"github.com/rcourtman/pulse-go-rewrite/internal/config"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/mock"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/models"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/monitoring"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/notifications"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/utils"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/websocket"
|
||||
|
|
@ -50,21 +52,49 @@ type AlertMonitor interface {
|
|||
|
||||
// AlertHandlers handles alert-related HTTP endpoints
|
||||
type AlertHandlers struct {
|
||||
monitor AlertMonitor
|
||||
wsHub *websocket.Hub
|
||||
mtMonitor *monitoring.MultiTenantMonitor
|
||||
legacyMonitor AlertMonitor
|
||||
wsHub *websocket.Hub
|
||||
}
|
||||
|
||||
// NewAlertHandlers creates new alert handlers
|
||||
func NewAlertHandlers(monitor AlertMonitor, wsHub *websocket.Hub) *AlertHandlers {
|
||||
func NewAlertHandlers(mtm *monitoring.MultiTenantMonitor, monitor AlertMonitor, wsHub *websocket.Hub) *AlertHandlers {
|
||||
// If mtm is provided, try to populate legacyMonitor from "default" org if not provided
|
||||
if monitor == nil && mtm != nil {
|
||||
if m, err := mtm.GetMonitor("default"); err == nil {
|
||||
monitor = NewAlertMonitorWrapper(m)
|
||||
}
|
||||
}
|
||||
return &AlertHandlers{
|
||||
monitor: monitor,
|
||||
wsHub: wsHub,
|
||||
mtMonitor: mtm,
|
||||
legacyMonitor: monitor,
|
||||
wsHub: wsHub,
|
||||
}
|
||||
}
|
||||
|
||||
// SetMultiTenantMonitor updates the multi-tenant monitor reference
|
||||
func (h *AlertHandlers) SetMultiTenantMonitor(mtm *monitoring.MultiTenantMonitor) {
|
||||
h.mtMonitor = mtm
|
||||
if mtm != nil {
|
||||
if m, err := mtm.GetMonitor("default"); err == nil {
|
||||
h.legacyMonitor = NewAlertMonitorWrapper(m)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetMonitor updates the monitor reference for alert handlers.
|
||||
func (h *AlertHandlers) SetMonitor(m AlertMonitor) {
|
||||
h.monitor = m
|
||||
h.legacyMonitor = m
|
||||
}
|
||||
|
||||
func (h *AlertHandlers) getMonitor(ctx context.Context) AlertMonitor {
|
||||
orgID := GetOrgID(ctx)
|
||||
if h.mtMonitor != nil {
|
||||
if m, err := h.mtMonitor.GetMonitor(orgID); err == nil && m != nil {
|
||||
return NewAlertMonitorWrapper(m)
|
||||
}
|
||||
}
|
||||
return h.legacyMonitor
|
||||
}
|
||||
|
||||
// validateAlertID validates an alert ID for security.
|
||||
|
|
@ -100,7 +130,7 @@ func validateAlertID(alertID string) bool {
|
|||
|
||||
// GetAlertConfig returns the current alert configuration
|
||||
func (h *AlertHandlers) GetAlertConfig(w http.ResponseWriter, r *http.Request) {
|
||||
config := h.monitor.GetAlertManager().GetConfig()
|
||||
config := h.getMonitor(r.Context()).GetAlertManager().GetConfig()
|
||||
|
||||
if err := utils.WriteJSONResponse(w, config); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to write alert config response")
|
||||
|
|
@ -129,25 +159,25 @@ func (h *AlertHandlers) UpdateAlertConfig(w http.ResponseWriter, r *http.Request
|
|||
Msg("Migrated deprecated GroupingWindow to Grouping.Window")
|
||||
}
|
||||
|
||||
h.monitor.GetAlertManager().UpdateConfig(config)
|
||||
updatedConfig := h.monitor.GetAlertManager().GetConfig()
|
||||
h.getMonitor(r.Context()).GetAlertManager().UpdateConfig(config)
|
||||
updatedConfig := h.getMonitor(r.Context()).GetAlertManager().GetConfig()
|
||||
|
||||
// Update notification manager with schedule settings
|
||||
h.monitor.GetNotificationManager().SetCooldown(updatedConfig.Schedule.Cooldown)
|
||||
h.getMonitor(r.Context()).GetNotificationManager().SetCooldown(updatedConfig.Schedule.Cooldown)
|
||||
|
||||
groupWindow = updatedConfig.Schedule.Grouping.Window
|
||||
if groupWindow == 0 && updatedConfig.Schedule.GroupingWindow != 0 {
|
||||
groupWindow = updatedConfig.Schedule.GroupingWindow
|
||||
}
|
||||
h.monitor.GetNotificationManager().SetGroupingWindow(groupWindow)
|
||||
h.monitor.GetNotificationManager().SetGroupingOptions(
|
||||
h.getMonitor(r.Context()).GetNotificationManager().SetGroupingWindow(groupWindow)
|
||||
h.getMonitor(r.Context()).GetNotificationManager().SetGroupingOptions(
|
||||
updatedConfig.Schedule.Grouping.ByNode,
|
||||
updatedConfig.Schedule.Grouping.ByGuest,
|
||||
)
|
||||
h.monitor.GetNotificationManager().SetNotifyOnResolve(updatedConfig.Schedule.NotifyOnResolve)
|
||||
h.getMonitor(r.Context()).GetNotificationManager().SetNotifyOnResolve(updatedConfig.Schedule.NotifyOnResolve)
|
||||
|
||||
// Save to persistent storage
|
||||
if err := h.monitor.GetConfigPersistence().SaveAlertConfig(updatedConfig); err != nil {
|
||||
if err := h.getMonitor(r.Context()).GetConfigPersistence().SaveAlertConfig(updatedConfig); err != nil {
|
||||
// Log error but don't fail the request
|
||||
log.Error().Err(err).Msg("Failed to save alert configuration")
|
||||
}
|
||||
|
|
@ -163,7 +193,7 @@ func (h *AlertHandlers) UpdateAlertConfig(w http.ResponseWriter, r *http.Request
|
|||
// ActivateAlerts activates alert notifications
|
||||
func (h *AlertHandlers) ActivateAlerts(w http.ResponseWriter, r *http.Request) {
|
||||
// Get current config
|
||||
config := h.monitor.GetAlertManager().GetConfig()
|
||||
config := h.getMonitor(r.Context()).GetAlertManager().GetConfig()
|
||||
|
||||
// Check if already active
|
||||
if config.ActivationState == alerts.ActivationActive {
|
||||
|
|
@ -184,22 +214,22 @@ func (h *AlertHandlers) ActivateAlerts(w http.ResponseWriter, r *http.Request) {
|
|||
config.ActivationTime = &now
|
||||
|
||||
// Update config
|
||||
h.monitor.GetAlertManager().UpdateConfig(config)
|
||||
h.getMonitor(r.Context()).GetAlertManager().UpdateConfig(config)
|
||||
|
||||
// Save to persistent storage
|
||||
if err := h.monitor.GetConfigPersistence().SaveAlertConfig(config); err != nil {
|
||||
if err := h.getMonitor(r.Context()).GetConfigPersistence().SaveAlertConfig(config); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to save alert configuration after activation")
|
||||
http.Error(w, "Failed to save configuration", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify about existing critical alerts after activation
|
||||
activeAlerts := h.monitor.GetAlertManager().GetActiveAlerts()
|
||||
activeAlerts := h.getMonitor(r.Context()).GetAlertManager().GetActiveAlerts()
|
||||
criticalCount := 0
|
||||
for _, alert := range activeAlerts {
|
||||
if alert.Level == alerts.AlertLevelCritical && !alert.Acknowledged {
|
||||
// Re-dispatch critical alerts to trigger notifications
|
||||
h.monitor.GetAlertManager().NotifyExistingAlert(alert.ID)
|
||||
h.getMonitor(r.Context()).GetAlertManager().NotifyExistingAlert(alert.ID)
|
||||
criticalCount++
|
||||
}
|
||||
}
|
||||
|
|
@ -224,7 +254,7 @@ func (h *AlertHandlers) ActivateAlerts(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
// GetActiveAlerts returns all active alerts
|
||||
func (h *AlertHandlers) GetActiveAlerts(w http.ResponseWriter, r *http.Request) {
|
||||
alerts := h.monitor.GetAlertManager().GetActiveAlerts()
|
||||
alerts := h.getMonitor(r.Context()).GetAlertManager().GetActiveAlerts()
|
||||
|
||||
if err := utils.WriteJSONResponse(w, alerts); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to write active alerts response")
|
||||
|
|
@ -370,12 +400,12 @@ func (h *AlertHandlers) GetAlertHistory(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
|
||||
if h.monitor == nil {
|
||||
if h.getMonitor(r.Context()) == nil {
|
||||
http.Error(w, "monitor is not initialized", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
||||
manager := h.monitor.GetAlertManager()
|
||||
manager := h.getMonitor(r.Context()).GetAlertManager()
|
||||
var history []alerts.Alert
|
||||
if startTime != nil {
|
||||
history = manager.GetAlertHistorySince(*startTime, fetchLimit)
|
||||
|
|
@ -403,7 +433,7 @@ func (h *AlertHandlers) GetAlertIncidentTimeline(w http.ResponseWriter, r *http.
|
|||
return
|
||||
}
|
||||
|
||||
store := h.monitor.GetIncidentStore()
|
||||
store := h.getMonitor(r.Context()).GetIncidentStore()
|
||||
if store == nil {
|
||||
http.Error(w, "Incident store unavailable", http.StatusServiceUnavailable)
|
||||
return
|
||||
|
|
@ -472,7 +502,7 @@ func (h *AlertHandlers) SaveAlertIncidentNote(w http.ResponseWriter, r *http.Req
|
|||
return
|
||||
}
|
||||
|
||||
store := h.monitor.GetIncidentStore()
|
||||
store := h.getMonitor(r.Context()).GetIncidentStore()
|
||||
if store == nil {
|
||||
http.Error(w, "Incident store unavailable", http.StatusServiceUnavailable)
|
||||
return
|
||||
|
|
@ -522,7 +552,7 @@ func (h *AlertHandlers) SaveAlertIncidentNote(w http.ResponseWriter, r *http.Req
|
|||
|
||||
// ClearAlertHistory clears all alert history
|
||||
func (h *AlertHandlers) ClearAlertHistory(w http.ResponseWriter, r *http.Request) {
|
||||
if err := h.monitor.GetAlertManager().ClearAlertHistory(); err != nil {
|
||||
if err := h.getMonitor(r.Context()).GetAlertManager().ClearAlertHistory(); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
|
@ -570,7 +600,7 @@ func (h *AlertHandlers) UnacknowledgeAlert(w http.ResponseWriter, r *http.Reques
|
|||
Str("path", r.URL.Path).
|
||||
Msg("Attempting to unacknowledge alert")
|
||||
|
||||
if err := h.monitor.GetAlertManager().UnacknowledgeAlert(alertID); err != nil {
|
||||
if err := h.getMonitor(r.Context()).GetAlertManager().UnacknowledgeAlert(alertID); err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("alertID", alertID).
|
||||
|
|
@ -579,7 +609,7 @@ func (h *AlertHandlers) UnacknowledgeAlert(w http.ResponseWriter, r *http.Reques
|
|||
return
|
||||
}
|
||||
|
||||
h.monitor.SyncAlertState()
|
||||
h.getMonitor(r.Context()).SyncAlertState()
|
||||
|
||||
log.Info().
|
||||
Str("alertID", alertID).
|
||||
|
|
@ -594,7 +624,7 @@ func (h *AlertHandlers) UnacknowledgeAlert(w http.ResponseWriter, r *http.Reques
|
|||
// Do this in a goroutine to avoid blocking the HTTP response
|
||||
if h.wsHub != nil {
|
||||
go func() {
|
||||
state := h.monitor.GetState()
|
||||
state := h.getMonitor(r.Context()).GetState()
|
||||
h.wsHub.BroadcastState(state.ToFrontend())
|
||||
log.Debug().Msg("Broadcasted state after alert unacknowledgment")
|
||||
}()
|
||||
|
|
@ -648,7 +678,7 @@ func (h *AlertHandlers) AcknowledgeAlert(w http.ResponseWriter, r *http.Request)
|
|||
Str("alertID", alertID).
|
||||
Msg("About to call AcknowledgeAlert on manager")
|
||||
|
||||
if err := h.monitor.GetAlertManager().AcknowledgeAlert(alertID, user); err != nil {
|
||||
if err := h.getMonitor(r.Context()).GetAlertManager().AcknowledgeAlert(alertID, user); err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Str("alertID", alertID).
|
||||
|
|
@ -657,7 +687,7 @@ func (h *AlertHandlers) AcknowledgeAlert(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
|
||||
h.monitor.SyncAlertState()
|
||||
h.getMonitor(r.Context()).SyncAlertState()
|
||||
|
||||
log.Info().
|
||||
Str("alertID", alertID).
|
||||
|
|
@ -673,7 +703,7 @@ func (h *AlertHandlers) AcknowledgeAlert(w http.ResponseWriter, r *http.Request)
|
|||
// Do this in a goroutine to avoid blocking the HTTP response
|
||||
if h.wsHub != nil {
|
||||
go func() {
|
||||
state := h.monitor.GetState()
|
||||
state := h.getMonitor(r.Context()).GetState()
|
||||
h.wsHub.BroadcastState(state.ToFrontend())
|
||||
log.Debug().Msg("Broadcasted state after alert acknowledgment")
|
||||
}()
|
||||
|
|
@ -711,13 +741,13 @@ func (h *AlertHandlers) AcknowledgeAlertByBody(w http.ResponseWriter, r *http.Re
|
|||
|
||||
user := "admin"
|
||||
|
||||
if err := h.monitor.GetAlertManager().AcknowledgeAlert(alertID, user); err != nil {
|
||||
if err := h.getMonitor(r.Context()).GetAlertManager().AcknowledgeAlert(alertID, user); err != nil {
|
||||
log.Error().Err(err).Str("alertID", alertID).Msg("Failed to acknowledge alert")
|
||||
http.Error(w, err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
h.monitor.SyncAlertState()
|
||||
h.getMonitor(r.Context()).SyncAlertState()
|
||||
|
||||
log.Info().Str("alertID", alertID).Str("user", user).Msg("Alert acknowledged successfully")
|
||||
|
||||
|
|
@ -727,7 +757,7 @@ func (h *AlertHandlers) AcknowledgeAlertByBody(w http.ResponseWriter, r *http.Re
|
|||
|
||||
if h.wsHub != nil {
|
||||
go func() {
|
||||
state := h.monitor.GetState()
|
||||
state := h.getMonitor(r.Context()).GetState()
|
||||
h.wsHub.BroadcastState(state.ToFrontend())
|
||||
}()
|
||||
}
|
||||
|
|
@ -755,13 +785,13 @@ func (h *AlertHandlers) UnacknowledgeAlertByBody(w http.ResponseWriter, r *http.
|
|||
return
|
||||
}
|
||||
|
||||
if err := h.monitor.GetAlertManager().UnacknowledgeAlert(alertID); err != nil {
|
||||
if err := h.getMonitor(r.Context()).GetAlertManager().UnacknowledgeAlert(alertID); err != nil {
|
||||
log.Error().Err(err).Str("alertID", alertID).Msg("Failed to unacknowledge alert")
|
||||
http.Error(w, err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
h.monitor.SyncAlertState()
|
||||
h.getMonitor(r.Context()).SyncAlertState()
|
||||
|
||||
log.Info().Str("alertID", alertID).Msg("Alert unacknowledged successfully")
|
||||
|
||||
|
|
@ -771,7 +801,7 @@ func (h *AlertHandlers) UnacknowledgeAlertByBody(w http.ResponseWriter, r *http.
|
|||
|
||||
if h.wsHub != nil {
|
||||
go func() {
|
||||
state := h.monitor.GetState()
|
||||
state := h.getMonitor(r.Context()).GetState()
|
||||
h.wsHub.BroadcastState(state.ToFrontend())
|
||||
}()
|
||||
}
|
||||
|
|
@ -799,12 +829,12 @@ func (h *AlertHandlers) ClearAlertByBody(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
|
||||
if !h.monitor.GetAlertManager().ClearAlert(alertID) {
|
||||
if !h.getMonitor(r.Context()).GetAlertManager().ClearAlert(alertID) {
|
||||
http.Error(w, "Alert not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
h.monitor.SyncAlertState()
|
||||
h.getMonitor(r.Context()).SyncAlertState()
|
||||
|
||||
log.Info().Str("alertID", alertID).Msg("Alert cleared successfully")
|
||||
|
||||
|
|
@ -814,7 +844,7 @@ func (h *AlertHandlers) ClearAlertByBody(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
if h.wsHub != nil {
|
||||
go func() {
|
||||
state := h.monitor.GetState()
|
||||
state := h.getMonitor(r.Context()).GetState()
|
||||
h.wsHub.BroadcastState(state.ToFrontend())
|
||||
}()
|
||||
}
|
||||
|
|
@ -854,11 +884,11 @@ func (h *AlertHandlers) ClearAlert(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if !h.monitor.GetAlertManager().ClearAlert(alertID) {
|
||||
if !h.getMonitor(r.Context()).GetAlertManager().ClearAlert(alertID) {
|
||||
http.Error(w, "Alert not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
h.monitor.SyncAlertState()
|
||||
h.getMonitor(r.Context()).SyncAlertState()
|
||||
|
||||
// Send response immediately
|
||||
if err := utils.WriteJSONResponse(w, map[string]bool{"success": true}); err != nil {
|
||||
|
|
@ -869,7 +899,7 @@ func (h *AlertHandlers) ClearAlert(w http.ResponseWriter, r *http.Request) {
|
|||
// Do this in a goroutine to avoid blocking the HTTP response
|
||||
if h.wsHub != nil {
|
||||
go func() {
|
||||
state := h.monitor.GetState()
|
||||
state := h.getMonitor(r.Context()).GetState()
|
||||
h.wsHub.BroadcastState(state.ToFrontend())
|
||||
log.Debug().Msg("Broadcasted state after alert clear")
|
||||
}()
|
||||
|
|
@ -910,7 +940,7 @@ func (h *AlertHandlers) BulkAcknowledgeAlerts(w http.ResponseWriter, r *http.Req
|
|||
"alertId": alertID,
|
||||
"success": true,
|
||||
}
|
||||
if err := h.monitor.GetAlertManager().AcknowledgeAlert(alertID, user); err != nil {
|
||||
if err := h.getMonitor(r.Context()).GetAlertManager().AcknowledgeAlert(alertID, user); err != nil {
|
||||
result["success"] = false
|
||||
result["error"] = err.Error()
|
||||
} else {
|
||||
|
|
@ -920,7 +950,7 @@ func (h *AlertHandlers) BulkAcknowledgeAlerts(w http.ResponseWriter, r *http.Req
|
|||
}
|
||||
|
||||
if anySuccess {
|
||||
h.monitor.SyncAlertState()
|
||||
h.getMonitor(r.Context()).SyncAlertState()
|
||||
}
|
||||
|
||||
// Send response immediately
|
||||
|
|
@ -934,7 +964,7 @@ func (h *AlertHandlers) BulkAcknowledgeAlerts(w http.ResponseWriter, r *http.Req
|
|||
// Do this in a goroutine to avoid blocking the HTTP response
|
||||
if h.wsHub != nil && anySuccess {
|
||||
go func() {
|
||||
state := h.monitor.GetState()
|
||||
state := h.getMonitor(r.Context()).GetState()
|
||||
h.wsHub.BroadcastState(state.ToFrontend())
|
||||
log.Debug().Msg("Broadcasted state after bulk alert acknowledgment")
|
||||
}()
|
||||
|
|
@ -969,7 +999,7 @@ func (h *AlertHandlers) BulkClearAlerts(w http.ResponseWriter, r *http.Request)
|
|||
"alertId": alertID,
|
||||
"success": true,
|
||||
}
|
||||
if h.monitor.GetAlertManager().ClearAlert(alertID) {
|
||||
if h.getMonitor(r.Context()).GetAlertManager().ClearAlert(alertID) {
|
||||
anySuccess = true
|
||||
} else {
|
||||
result["success"] = false
|
||||
|
|
@ -979,7 +1009,7 @@ func (h *AlertHandlers) BulkClearAlerts(w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
|
||||
if anySuccess {
|
||||
h.monitor.SyncAlertState()
|
||||
h.getMonitor(r.Context()).SyncAlertState()
|
||||
}
|
||||
|
||||
// Send response immediately
|
||||
|
|
@ -993,7 +1023,7 @@ func (h *AlertHandlers) BulkClearAlerts(w http.ResponseWriter, r *http.Request)
|
|||
// Do this in a goroutine to avoid blocking the HTTP response
|
||||
if h.wsHub != nil && anySuccess {
|
||||
go func() {
|
||||
state := h.monitor.GetState()
|
||||
state := h.getMonitor(r.Context()).GetState()
|
||||
h.wsHub.BroadcastState(state.ToFrontend())
|
||||
log.Debug().Msg("Broadcasted state after bulk alert clear")
|
||||
}()
|
||||
|
|
|
|||
|
|
@ -84,13 +84,18 @@ type SetupCode struct {
|
|||
Used bool
|
||||
NodeType string // "pve" or "pbs"
|
||||
Host string // The host URL for validation
|
||||
OrgID string // Organization ID creating this code
|
||||
}
|
||||
|
||||
// ConfigHandlers handles configuration-related API endpoints
|
||||
type ConfigHandlers struct {
|
||||
config *config.Config
|
||||
persistence *config.ConfigPersistence
|
||||
monitor *monitoring.Monitor
|
||||
mtPersistence *config.MultiTenantPersistence
|
||||
mtMonitor *monitoring.MultiTenantMonitor
|
||||
// Legacy fields - to be removed or used as fallback
|
||||
legacyConfig *config.Config
|
||||
legacyPersistence *config.ConfigPersistence
|
||||
legacyMonitor *monitoring.Monitor
|
||||
|
||||
reloadFunc func() error
|
||||
reloadSystemSettingsFunc func() // Function to reload cached system settings
|
||||
wsHub *websocket.Hub
|
||||
|
|
@ -105,11 +110,33 @@ type ConfigHandlers struct {
|
|||
}
|
||||
|
||||
// NewConfigHandlers creates a new ConfigHandlers instance
|
||||
func NewConfigHandlers(cfg *config.Config, monitor *monitoring.Monitor, reloadFunc func() error, wsHub *websocket.Hub, guestMetadataHandler *GuestMetadataHandler, reloadSystemSettingsFunc func()) *ConfigHandlers {
|
||||
func NewConfigHandlers(mtp *config.MultiTenantPersistence, mtm *monitoring.MultiTenantMonitor, reloadFunc func() error, wsHub *websocket.Hub, guestMetadataHandler *GuestMetadataHandler, reloadSystemSettingsFunc func()) *ConfigHandlers {
|
||||
// Initialize with default (legacy) values if available, for backward compat during migration
|
||||
// Ideally we fetch them from mtp/mtm for "default" org.
|
||||
var defaultConfig *config.Config
|
||||
var defaultMonitor *monitoring.Monitor
|
||||
var defaultPersistence *config.ConfigPersistence
|
||||
|
||||
if mtm != nil {
|
||||
if m, err := mtm.GetMonitor("default"); err == nil {
|
||||
defaultMonitor = m
|
||||
if m != nil {
|
||||
defaultConfig = m.GetConfig()
|
||||
}
|
||||
}
|
||||
}
|
||||
if mtp != nil {
|
||||
if p, err := mtp.GetPersistence("default"); err == nil {
|
||||
defaultPersistence = p
|
||||
}
|
||||
}
|
||||
|
||||
h := &ConfigHandlers{
|
||||
config: cfg,
|
||||
persistence: config.NewConfigPersistence(cfg.DataPath),
|
||||
monitor: monitor,
|
||||
mtPersistence: mtp,
|
||||
mtMonitor: mtm,
|
||||
legacyConfig: defaultConfig,
|
||||
legacyMonitor: defaultMonitor,
|
||||
legacyPersistence: defaultPersistence,
|
||||
reloadFunc: reloadFunc,
|
||||
reloadSystemSettingsFunc: reloadSystemSettingsFunc,
|
||||
wsHub: wsHub,
|
||||
|
|
@ -126,9 +153,23 @@ func NewConfigHandlers(cfg *config.Config, monitor *monitoring.Monitor, reloadFu
|
|||
return h
|
||||
}
|
||||
|
||||
// SetMonitor updates the monitor reference used by the config handlers.
|
||||
// SetMultiTenantMonitor updates the monitor reference used by the config handlers.
|
||||
func (h *ConfigHandlers) SetMultiTenantMonitor(mtm *monitoring.MultiTenantMonitor) {
|
||||
h.mtMonitor = mtm
|
||||
if mtm != nil {
|
||||
if m, err := mtm.GetMonitor("default"); err == nil {
|
||||
h.legacyMonitor = m
|
||||
h.legacyConfig = m.GetConfig()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetMonitor updates the monitor reference used by the config handlers (legacy support).
|
||||
func (h *ConfigHandlers) SetMonitor(m *monitoring.Monitor) {
|
||||
h.monitor = m
|
||||
h.legacyMonitor = m
|
||||
if m != nil {
|
||||
h.legacyConfig = m.GetConfig()
|
||||
}
|
||||
}
|
||||
|
||||
// SetConfig updates the configuration reference used by the handlers.
|
||||
|
|
@ -136,7 +177,49 @@ func (h *ConfigHandlers) SetConfig(cfg *config.Config) {
|
|||
if cfg == nil {
|
||||
return
|
||||
}
|
||||
h.config = cfg
|
||||
h.legacyConfig = cfg
|
||||
}
|
||||
|
||||
// getContextState helper to retrieve tenant-specific state
|
||||
func (h *ConfigHandlers) getContextState(ctx context.Context) (*config.Config, *config.ConfigPersistence, *monitoring.Monitor) {
|
||||
orgID := "default"
|
||||
if ctx != nil {
|
||||
if id := GetOrgID(ctx); id != "" {
|
||||
orgID = id
|
||||
}
|
||||
}
|
||||
|
||||
// Try to get from multi-tenant managers first
|
||||
if h.mtMonitor != nil {
|
||||
if m, err := h.mtMonitor.GetMonitor(orgID); err == nil && m != nil {
|
||||
cfg := m.GetConfig()
|
||||
var p *config.ConfigPersistence
|
||||
if h.mtPersistence != nil {
|
||||
p, _ = h.mtPersistence.GetPersistence(orgID)
|
||||
}
|
||||
return cfg, p, m
|
||||
} else if err != nil {
|
||||
log.Warn().Str("orgID", orgID).Err(err).Msg("Falling back to legacy config - failed to get tenant monitor")
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to legacy (should mostly happen for "default" or initialization)
|
||||
return h.legacyConfig, h.legacyPersistence, h.legacyMonitor
|
||||
}
|
||||
|
||||
func (h *ConfigHandlers) getConfig(ctx context.Context) *config.Config {
|
||||
c, _, _ := h.getContextState(ctx)
|
||||
return c
|
||||
}
|
||||
|
||||
func (h *ConfigHandlers) getPersistence(ctx context.Context) *config.ConfigPersistence {
|
||||
_, p, _ := h.getContextState(ctx)
|
||||
return p
|
||||
}
|
||||
|
||||
func (h *ConfigHandlers) getMonitor(ctx context.Context) *monitoring.Monitor {
|
||||
_, _, m := h.getContextState(ctx)
|
||||
return m
|
||||
}
|
||||
|
||||
// cleanupExpiredCodes removes expired or used setup codes periodically
|
||||
|
|
@ -226,16 +309,16 @@ func (h *ConfigHandlers) isRecentlyAutoRegistered(nodeType, nodeName string) boo
|
|||
return true
|
||||
}
|
||||
|
||||
func (h *ConfigHandlers) findInstanceNameByHost(nodeType, host string) string {
|
||||
func (h *ConfigHandlers) findInstanceNameByHost(ctx context.Context, nodeType, host string) string {
|
||||
switch nodeType {
|
||||
case "pve":
|
||||
for _, node := range h.config.PVEInstances {
|
||||
for _, node := range h.getConfig(ctx).PVEInstances {
|
||||
if node.Host == host {
|
||||
return node.Name
|
||||
}
|
||||
}
|
||||
case "pbs":
|
||||
for _, node := range h.config.PBSInstances {
|
||||
for _, node := range h.getConfig(ctx).PBSInstances {
|
||||
if node.Host == host {
|
||||
return node.Name
|
||||
}
|
||||
|
|
@ -290,7 +373,7 @@ func shouldSkipClusterAutoDetection(host, name string) bool {
|
|||
strings.Contains(lowerName, "concurrent-")
|
||||
}
|
||||
|
||||
func (h *ConfigHandlers) maybeRefreshClusterInfo(instance *config.PVEInstance) {
|
||||
func (h *ConfigHandlers) maybeRefreshClusterInfo(ctx context.Context, instance *config.PVEInstance) {
|
||||
if instance == nil {
|
||||
return
|
||||
}
|
||||
|
|
@ -348,8 +431,8 @@ func (h *ConfigHandlers) maybeRefreshClusterInfo(instance *config.PVEInstance) {
|
|||
Int("endpoints", len(clusterEndpoints)).
|
||||
Msg("Updated cluster metadata after validation retry")
|
||||
|
||||
if h.persistence != nil {
|
||||
if err := h.persistence.SaveNodesConfig(h.config.PVEInstances, h.config.PBSInstances, h.config.PMGInstances); err != nil {
|
||||
if h.getPersistence(ctx) != nil {
|
||||
if err := h.getPersistence(ctx).SaveNodesConfig(h.getConfig(ctx).PVEInstances, h.getConfig(ctx).PBSInstances, h.getConfig(ctx).PMGInstances); err != nil {
|
||||
log.Warn().
|
||||
Err(err).
|
||||
Str("instance", instance.Name).
|
||||
|
|
@ -951,14 +1034,14 @@ func defaultDetectPVECluster(clientConfig proxmox.ClientConfig, nodeName string,
|
|||
}
|
||||
|
||||
// GetAllNodesForAPI returns all configured nodes for API responses
|
||||
func (h *ConfigHandlers) GetAllNodesForAPI() []NodeResponse {
|
||||
func (h *ConfigHandlers) GetAllNodesForAPI(ctx context.Context) []NodeResponse {
|
||||
nodes := []NodeResponse{}
|
||||
|
||||
// Add PVE nodes
|
||||
for i := range h.config.PVEInstances {
|
||||
for i := range h.getConfig(ctx).PVEInstances {
|
||||
// Refresh cluster metadata if we previously failed to detect endpoints
|
||||
h.maybeRefreshClusterInfo(&h.config.PVEInstances[i])
|
||||
pve := h.config.PVEInstances[i]
|
||||
h.maybeRefreshClusterInfo(ctx, &h.getConfig(ctx).PVEInstances[i])
|
||||
pve := h.getConfig(ctx).PVEInstances[i]
|
||||
node := NodeResponse{
|
||||
ID: generateNodeID("pve", i),
|
||||
Type: "pve",
|
||||
|
|
@ -978,7 +1061,7 @@ func (h *ConfigHandlers) GetAllNodesForAPI() []NodeResponse {
|
|||
MonitorPhysicalDisks: pve.MonitorPhysicalDisks,
|
||||
PhysicalDiskPollingMinutes: pve.PhysicalDiskPollingMinutes,
|
||||
TemperatureMonitoringEnabled: pve.TemperatureMonitoringEnabled,
|
||||
Status: h.getNodeStatus("pve", pve.Name),
|
||||
Status: h.getNodeStatus(ctx, "pve", pve.Name),
|
||||
IsCluster: pve.IsCluster,
|
||||
ClusterName: pve.ClusterName,
|
||||
ClusterEndpoints: pve.ClusterEndpoints,
|
||||
|
|
@ -988,7 +1071,7 @@ func (h *ConfigHandlers) GetAllNodesForAPI() []NodeResponse {
|
|||
}
|
||||
|
||||
// Add PBS nodes
|
||||
for i, pbs := range h.config.PBSInstances {
|
||||
for i, pbs := range h.getConfig(ctx).PBSInstances {
|
||||
node := NodeResponse{
|
||||
ID: generateNodeID("pbs", i),
|
||||
Type: "pbs",
|
||||
|
|
@ -1008,14 +1091,14 @@ func (h *ConfigHandlers) GetAllNodesForAPI() []NodeResponse {
|
|||
MonitorPruneJobs: pbs.MonitorPruneJobs,
|
||||
MonitorGarbageJobs: pbs.MonitorGarbageJobs,
|
||||
ExcludeDatastores: pbs.ExcludeDatastores,
|
||||
Status: h.getNodeStatus("pbs", pbs.Name),
|
||||
Status: h.getNodeStatus(ctx, "pbs", pbs.Name),
|
||||
Source: pbs.Source,
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
||||
// Add PMG nodes
|
||||
for i, pmgInst := range h.config.PMGInstances {
|
||||
for i, pmgInst := range h.getConfig(ctx).PMGInstances {
|
||||
monitorMailStats := pmgInst.MonitorMailStats
|
||||
if !pmgInst.MonitorMailStats && !pmgInst.MonitorQueues && !pmgInst.MonitorQuarantine && !pmgInst.MonitorDomainStats {
|
||||
monitorMailStats = true
|
||||
|
|
@ -1038,7 +1121,7 @@ func (h *ConfigHandlers) GetAllNodesForAPI() []NodeResponse {
|
|||
MonitorQueues: pmgInst.MonitorQueues,
|
||||
MonitorQuarantine: pmgInst.MonitorQuarantine,
|
||||
MonitorDomainStats: pmgInst.MonitorDomainStats,
|
||||
Status: h.getNodeStatus("pmg", pmgInst.Name),
|
||||
Status: h.getNodeStatus(ctx, "pmg", pmgInst.Name),
|
||||
}
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
|
|
@ -1054,7 +1137,7 @@ func (h *ConfigHandlers) HandleGetNodes(w http.ResponseWriter, r *http.Request)
|
|||
mockNodes := []NodeResponse{}
|
||||
|
||||
// Get mock state to extract node information
|
||||
state := h.monitor.GetState()
|
||||
state := h.getMonitor(r.Context()).GetState()
|
||||
|
||||
// Get all cluster nodes and standalone nodes
|
||||
var clusterNodes []models.Node
|
||||
|
|
@ -1177,7 +1260,7 @@ func (h *ConfigHandlers) HandleGetNodes(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
|
||||
nodes := h.GetAllNodesForAPI()
|
||||
nodes := h.GetAllNodesForAPI(r.Context())
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(nodes)
|
||||
|
|
@ -1349,7 +1432,7 @@ func resolveHostnameToIP(hostURL string) string {
|
|||
// disambiguateNodeName ensures a node name is unique by appending the host IP if needed.
|
||||
// This handles cases where multiple Proxmox hosts have the same hostname (e.g., "px1" on different networks).
|
||||
// Returns the original name if unique, or "name (ip)" if duplicates exist.
|
||||
func (h *ConfigHandlers) disambiguateNodeName(name, host, nodeType string) string {
|
||||
func (h *ConfigHandlers) disambiguateNodeName(ctx context.Context, name, host, nodeType string) string {
|
||||
if name == "" {
|
||||
return name
|
||||
}
|
||||
|
|
@ -1357,14 +1440,14 @@ func (h *ConfigHandlers) disambiguateNodeName(name, host, nodeType string) strin
|
|||
// Check if any existing node has the same name
|
||||
hasDuplicate := false
|
||||
if nodeType == "pve" {
|
||||
for _, node := range h.config.PVEInstances {
|
||||
for _, node := range h.getConfig(ctx).PVEInstances {
|
||||
if strings.EqualFold(node.Name, name) && node.Host != host {
|
||||
hasDuplicate = true
|
||||
break
|
||||
}
|
||||
}
|
||||
} else if nodeType == "pbs" {
|
||||
for _, node := range h.config.PBSInstances {
|
||||
for _, node := range h.getConfig(ctx).PBSInstances {
|
||||
if strings.EqualFold(node.Name, name) && node.Host != host {
|
||||
hasDuplicate = true
|
||||
break
|
||||
|
|
@ -1489,21 +1572,21 @@ func (h *ConfigHandlers) HandleAddNode(w http.ResponseWriter, r *http.Request) {
|
|||
// We disambiguate names later, but Host URLs must be unique.
|
||||
switch req.Type {
|
||||
case "pve":
|
||||
for _, node := range h.config.PVEInstances {
|
||||
for _, node := range h.getConfig(r.Context()).PVEInstances {
|
||||
if node.Host == normalizedHost {
|
||||
http.Error(w, "A node with this host URL already exists", http.StatusConflict)
|
||||
return
|
||||
}
|
||||
}
|
||||
case "pbs":
|
||||
for _, node := range h.config.PBSInstances {
|
||||
for _, node := range h.getConfig(r.Context()).PBSInstances {
|
||||
if node.Host == normalizedHost {
|
||||
http.Error(w, "A node with this host URL already exists", http.StatusConflict)
|
||||
return
|
||||
}
|
||||
}
|
||||
case "pmg":
|
||||
for _, node := range h.config.PMGInstances {
|
||||
for _, node := range h.getConfig(r.Context()).PMGInstances {
|
||||
if node.Host == normalizedHost {
|
||||
http.Error(w, "A node with this host URL already exists", http.StatusConflict)
|
||||
return
|
||||
|
|
@ -1545,8 +1628,8 @@ func (h *ConfigHandlers) HandleAddNode(w http.ResponseWriter, r *http.Request) {
|
|||
// the node as an endpoint to the existing cluster instead of creating a new instance.
|
||||
// This prevents duplicate VMs/containers when users install agents on multiple cluster nodes.
|
||||
if isCluster && clusterName != "" {
|
||||
for i := range h.config.PVEInstances {
|
||||
existingInstance := &h.config.PVEInstances[i]
|
||||
for i := range h.getConfig(r.Context()).PVEInstances {
|
||||
existingInstance := &h.getConfig(r.Context()).PVEInstances[i]
|
||||
if existingInstance.IsCluster && existingInstance.ClusterName == clusterName {
|
||||
// Found existing cluster with same name - merge endpoints!
|
||||
log.Info().
|
||||
|
|
@ -1571,8 +1654,8 @@ func (h *ConfigHandlers) HandleAddNode(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
// Save the updated configuration
|
||||
if h.persistence != nil {
|
||||
if err := h.persistence.SaveNodesConfig(h.config.PVEInstances, h.config.PBSInstances, h.config.PMGInstances); err != nil {
|
||||
if h.getPersistence(r.Context()) != nil {
|
||||
if err := h.getPersistence(r.Context()).SaveNodesConfig(h.getConfig(r.Context()).PVEInstances, h.getConfig(r.Context()).PBSInstances, h.getConfig(r.Context()).PMGInstances); err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to persist cluster endpoint merge")
|
||||
}
|
||||
}
|
||||
|
|
@ -1629,7 +1712,7 @@ func (h *ConfigHandlers) HandleAddNode(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
// Disambiguate name if duplicate hostnames exist (Issue #891)
|
||||
displayName := h.disambiguateNodeName(req.Name, host, "pve")
|
||||
displayName := h.disambiguateNodeName(r.Context(), req.Name, host, "pve")
|
||||
|
||||
pve := config.PVEInstance{
|
||||
Name: displayName,
|
||||
|
|
@ -1656,7 +1739,7 @@ func (h *ConfigHandlers) HandleAddNode(w http.ResponseWriter, r *http.Request) {
|
|||
pve.PhysicalDiskPollingMinutes = *req.PhysicalDiskPollingMinutes
|
||||
}
|
||||
|
||||
h.config.PVEInstances = append(h.config.PVEInstances, pve)
|
||||
h.getConfig(r.Context()).PVEInstances = append(h.getConfig(r.Context()).PVEInstances, pve)
|
||||
|
||||
if isCluster {
|
||||
log.Info().
|
||||
|
|
@ -1764,7 +1847,7 @@ func (h *ConfigHandlers) HandleAddNode(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
// Disambiguate name if duplicate hostnames exist (Issue #891)
|
||||
pbsDisplayName := h.disambiguateNodeName(req.Name, host, "pbs")
|
||||
pbsDisplayName := h.disambiguateNodeName(r.Context(), req.Name, host, "pbs")
|
||||
|
||||
pbs := config.PBSInstance{
|
||||
Name: pbsDisplayName,
|
||||
|
|
@ -1784,7 +1867,7 @@ func (h *ConfigHandlers) HandleAddNode(w http.ResponseWriter, r *http.Request) {
|
|||
MonitorGarbageJobs: monitorGarbageJobs,
|
||||
TemperatureMonitoringEnabled: req.TemperatureMonitoringEnabled,
|
||||
}
|
||||
h.config.PBSInstances = append(h.config.PBSInstances, pbs)
|
||||
h.getConfig(r.Context()).PBSInstances = append(h.getConfig(r.Context()).PBSInstances, pbs)
|
||||
} else if req.Type == "pmg" {
|
||||
host := normalizedHost
|
||||
|
||||
|
|
@ -1840,7 +1923,7 @@ func (h *ConfigHandlers) HandleAddNode(w http.ResponseWriter, r *http.Request) {
|
|||
// Disambiguate name if duplicate hostnames exist (Issue #891)
|
||||
// Note: PMG uses similar logic to PBS - we check against PMG instances
|
||||
pmgDisplayName := req.Name
|
||||
for _, node := range h.config.PMGInstances {
|
||||
for _, node := range h.getConfig(r.Context()).PMGInstances {
|
||||
if strings.EqualFold(node.Name, req.Name) && node.Host != host {
|
||||
parsed, err := url.Parse(host)
|
||||
if err == nil && parsed.Host != "" {
|
||||
|
|
@ -1866,11 +1949,11 @@ func (h *ConfigHandlers) HandleAddNode(w http.ResponseWriter, r *http.Request) {
|
|||
MonitorDomainStats: monitorDomainStats,
|
||||
TemperatureMonitoringEnabled: req.TemperatureMonitoringEnabled,
|
||||
}
|
||||
h.config.PMGInstances = append(h.config.PMGInstances, pmgInstance)
|
||||
h.getConfig(r.Context()).PMGInstances = append(h.getConfig(r.Context()).PMGInstances, pmgInstance)
|
||||
}
|
||||
|
||||
// Save configuration to disk using our persistence instance
|
||||
if err := h.persistence.SaveNodesConfig(h.config.PVEInstances, h.config.PBSInstances, h.config.PMGInstances); err != nil {
|
||||
if err := h.getPersistence(r.Context()).SaveNodesConfig(h.getConfig(r.Context()).PVEInstances, h.getConfig(r.Context()).PBSInstances, h.getConfig(r.Context()).PMGInstances); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to save nodes configuration")
|
||||
http.Error(w, "Failed to save configuration", http.StatusInternalServerError)
|
||||
return
|
||||
|
|
@ -2232,8 +2315,8 @@ func (h *ConfigHandlers) HandleUpdateNode(w http.ResponseWriter, r *http.Request
|
|||
}
|
||||
|
||||
// Update the node
|
||||
if nodeType == "pve" && index < len(h.config.PVEInstances) {
|
||||
pve := &h.config.PVEInstances[index]
|
||||
if nodeType == "pve" && index < len(h.getConfig(r.Context()).PVEInstances) {
|
||||
pve := &h.getConfig(r.Context()).PVEInstances[index]
|
||||
|
||||
// Only update name if provided
|
||||
if req.Name != "" {
|
||||
|
|
@ -2310,8 +2393,8 @@ func (h *ConfigHandlers) HandleUpdateNode(w http.ResponseWriter, r *http.Request
|
|||
if req.TemperatureMonitoringEnabled != nil {
|
||||
pve.TemperatureMonitoringEnabled = req.TemperatureMonitoringEnabled
|
||||
}
|
||||
} else if nodeType == "pbs" && index < len(h.config.PBSInstances) {
|
||||
pbs := &h.config.PBSInstances[index]
|
||||
} else if nodeType == "pbs" && index < len(h.getConfig(r.Context()).PBSInstances) {
|
||||
pbs := &h.getConfig(r.Context()).PBSInstances[index]
|
||||
pbs.Name = req.Name
|
||||
|
||||
host, err := normalizeNodeHost(req.Host, nodeType)
|
||||
|
|
@ -2395,8 +2478,8 @@ func (h *ConfigHandlers) HandleUpdateNode(w http.ResponseWriter, r *http.Request
|
|||
if req.ExcludeDatastores != nil {
|
||||
pbs.ExcludeDatastores = req.ExcludeDatastores
|
||||
}
|
||||
} else if nodeType == "pmg" && index < len(h.config.PMGInstances) {
|
||||
pmgInst := &h.config.PMGInstances[index]
|
||||
} else if nodeType == "pmg" && index < len(h.getConfig(r.Context()).PMGInstances) {
|
||||
pmgInst := &h.getConfig(r.Context()).PMGInstances[index]
|
||||
pmgInst.Name = req.Name
|
||||
|
||||
if req.Host != "" {
|
||||
|
|
@ -2476,7 +2559,7 @@ func (h *ConfigHandlers) HandleUpdateNode(w http.ResponseWriter, r *http.Request
|
|||
}
|
||||
|
||||
// Save configuration to disk using our persistence instance
|
||||
if err := h.persistence.SaveNodesConfig(h.config.PVEInstances, h.config.PBSInstances, h.config.PMGInstances); err != nil {
|
||||
if err := h.getPersistence(r.Context()).SaveNodesConfig(h.getConfig(r.Context()).PVEInstances, h.getConfig(r.Context()).PBSInstances, h.getConfig(r.Context()).PMGInstances); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to save nodes configuration")
|
||||
http.Error(w, "Failed to save configuration", http.StatusInternalServerError)
|
||||
return
|
||||
|
|
@ -2486,15 +2569,15 @@ func (h *ConfigHandlers) HandleUpdateNode(w http.ResponseWriter, r *http.Request
|
|||
// This fixes issue #440 where PBS alert thresholds were being reset
|
||||
// Alert overrides are stored separately from node configuration
|
||||
// and must be explicitly preserved during node updates
|
||||
if h.monitor != nil {
|
||||
if h.getMonitor(r.Context()) != nil {
|
||||
// Load current alert configuration to preserve overrides
|
||||
alertConfig, err := h.persistence.LoadAlertConfig()
|
||||
alertConfig, err := h.getPersistence(r.Context()).LoadAlertConfig()
|
||||
if err == nil && alertConfig != nil {
|
||||
// For PBS nodes, we need to handle ID mapping
|
||||
// PBS monitoring uses "pbs-<name>" but config uses "pbs-<index>"
|
||||
// We need to preserve overrides by the monitoring ID
|
||||
if nodeType == "pbs" && index < len(h.config.PBSInstances) {
|
||||
pbsName := h.config.PBSInstances[index].Name
|
||||
if nodeType == "pbs" && index < len(h.getConfig(r.Context()).PBSInstances) {
|
||||
pbsName := h.getConfig(r.Context()).PBSInstances[index].Name
|
||||
monitoringID := "pbs-" + pbsName
|
||||
|
||||
// Check if there are overrides for this PBS node
|
||||
|
|
@ -2510,7 +2593,7 @@ func (h *ConfigHandlers) HandleUpdateNode(w http.ResponseWriter, r *http.Request
|
|||
}
|
||||
|
||||
// Apply the alert configuration to preserve all overrides
|
||||
h.monitor.GetAlertManager().UpdateConfig(*alertConfig)
|
||||
h.getMonitor(r.Context()).GetAlertManager().UpdateConfig(*alertConfig)
|
||||
log.Debug().
|
||||
Str("nodeID", nodeID).
|
||||
Str("nodeType", nodeType).
|
||||
|
|
@ -2528,16 +2611,16 @@ func (h *ConfigHandlers) HandleUpdateNode(w http.ResponseWriter, r *http.Request
|
|||
}
|
||||
|
||||
// Trigger discovery refresh after adding node
|
||||
if h.monitor != nil && h.monitor.GetDiscoveryService() != nil {
|
||||
if h.getMonitor(r.Context()) != nil && h.getMonitor(r.Context()).GetDiscoveryService() != nil {
|
||||
log.Info().Msg("Triggering discovery refresh after adding node")
|
||||
h.monitor.GetDiscoveryService().ForceRefresh()
|
||||
h.getMonitor(r.Context()).GetDiscoveryService().ForceRefresh()
|
||||
|
||||
// Broadcast discovery update via WebSocket
|
||||
if h.wsHub != nil {
|
||||
// Wait a moment for discovery to complete
|
||||
go func() {
|
||||
time.Sleep(2 * time.Second)
|
||||
result, _ := h.monitor.GetDiscoveryService().GetCachedResult()
|
||||
result, _ := h.getMonitor(r.Context()).GetDiscoveryService().GetCachedResult()
|
||||
if result != nil {
|
||||
h.wsHub.BroadcastMessage(websocket.Message{
|
||||
Type: "discovery_update",
|
||||
|
|
@ -2592,41 +2675,41 @@ func (h *ConfigHandlers) HandleDeleteNode(w http.ResponseWriter, r *http.Request
|
|||
Str("nodeID", nodeID).
|
||||
Str("nodeType", nodeType).
|
||||
Int("index", index).
|
||||
Int("pveCount", len(h.config.PVEInstances)).
|
||||
Int("pbsCount", len(h.config.PBSInstances)).
|
||||
Int("pmgCount", len(h.config.PMGInstances)).
|
||||
Int("pveCount", len(h.getConfig(r.Context()).PVEInstances)).
|
||||
Int("pbsCount", len(h.getConfig(r.Context()).PBSInstances)).
|
||||
Int("pmgCount", len(h.getConfig(r.Context()).PMGInstances)).
|
||||
Msg("Attempting to delete node")
|
||||
|
||||
var deletedNodeHost string
|
||||
|
||||
// Delete the node
|
||||
if nodeType == "pve" && index < len(h.config.PVEInstances) {
|
||||
deletedNodeHost = h.config.PVEInstances[index].Host
|
||||
if nodeType == "pve" && index < len(h.getConfig(r.Context()).PVEInstances) {
|
||||
deletedNodeHost = h.getConfig(r.Context()).PVEInstances[index].Host
|
||||
log.Info().Str("nodeID", nodeID).Int("index", index).Msg("Deleting PVE node")
|
||||
h.config.PVEInstances = append(h.config.PVEInstances[:index], h.config.PVEInstances[index+1:]...)
|
||||
} else if nodeType == "pbs" && index < len(h.config.PBSInstances) {
|
||||
deletedNodeHost = h.config.PBSInstances[index].Host
|
||||
h.getConfig(r.Context()).PVEInstances = append(h.getConfig(r.Context()).PVEInstances[:index], h.getConfig(r.Context()).PVEInstances[index+1:]...)
|
||||
} else if nodeType == "pbs" && index < len(h.getConfig(r.Context()).PBSInstances) {
|
||||
deletedNodeHost = h.getConfig(r.Context()).PBSInstances[index].Host
|
||||
log.Info().Str("nodeID", nodeID).Int("index", index).Msg("Deleting PBS node")
|
||||
h.config.PBSInstances = append(h.config.PBSInstances[:index], h.config.PBSInstances[index+1:]...)
|
||||
} else if nodeType == "pmg" && index < len(h.config.PMGInstances) {
|
||||
deletedNodeHost = h.config.PMGInstances[index].Host
|
||||
h.getConfig(r.Context()).PBSInstances = append(h.getConfig(r.Context()).PBSInstances[:index], h.getConfig(r.Context()).PBSInstances[index+1:]...)
|
||||
} else if nodeType == "pmg" && index < len(h.getConfig(r.Context()).PMGInstances) {
|
||||
deletedNodeHost = h.getConfig(r.Context()).PMGInstances[index].Host
|
||||
log.Info().Str("nodeID", nodeID).Int("index", index).Msg("Deleting PMG node")
|
||||
h.config.PMGInstances = append(h.config.PMGInstances[:index], h.config.PMGInstances[index+1:]...)
|
||||
h.getConfig(r.Context()).PMGInstances = append(h.getConfig(r.Context()).PMGInstances[:index], h.getConfig(r.Context()).PMGInstances[index+1:]...)
|
||||
} else {
|
||||
log.Warn().
|
||||
Str("nodeID", nodeID).
|
||||
Str("nodeType", nodeType).
|
||||
Int("index", index).
|
||||
Int("pveCount", len(h.config.PVEInstances)).
|
||||
Int("pbsCount", len(h.config.PBSInstances)).
|
||||
Int("pmgCount", len(h.config.PMGInstances)).
|
||||
Int("pveCount", len(h.getConfig(r.Context()).PVEInstances)).
|
||||
Int("pbsCount", len(h.getConfig(r.Context()).PBSInstances)).
|
||||
Int("pmgCount", len(h.getConfig(r.Context()).PMGInstances)).
|
||||
Msg("Node not found for deletion")
|
||||
http.Error(w, "Node not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
// Save configuration to disk using our persistence instance
|
||||
if err := h.persistence.SaveNodesConfigAllowEmpty(h.config.PVEInstances, h.config.PBSInstances, h.config.PMGInstances); err != nil {
|
||||
if err := h.getPersistence(r.Context()).SaveNodesConfigAllowEmpty(h.getConfig(r.Context()).PVEInstances, h.getConfig(r.Context()).PBSInstances, h.getConfig(r.Context()).PMGInstances); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to save nodes configuration")
|
||||
http.Error(w, "Failed to save configuration", http.StatusInternalServerError)
|
||||
return
|
||||
|
|
@ -2666,8 +2749,8 @@ func (h *ConfigHandlers) HandleDeleteNode(w http.ResponseWriter, r *http.Request
|
|||
time.Sleep(500 * time.Millisecond)
|
||||
|
||||
// Trigger full discovery refresh
|
||||
if h.monitor != nil && h.monitor.GetDiscoveryService() != nil {
|
||||
h.monitor.GetDiscoveryService().ForceRefresh()
|
||||
if h.getMonitor(r.Context()) != nil && h.getMonitor(r.Context()).GetDiscoveryService() != nil {
|
||||
h.getMonitor(r.Context()).GetDiscoveryService().ForceRefresh()
|
||||
log.Info().Msg("Triggered background discovery refresh after node deletion")
|
||||
}
|
||||
}()
|
||||
|
|
@ -2719,12 +2802,12 @@ func (h *ConfigHandlers) HandleRefreshClusterNodes(w http.ResponseWriter, r *htt
|
|||
return
|
||||
}
|
||||
|
||||
if index >= len(h.config.PVEInstances) {
|
||||
if index >= len(h.getConfig(r.Context()).PVEInstances) {
|
||||
http.Error(w, "Node not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
pve := &h.config.PVEInstances[index]
|
||||
pve := &h.getConfig(r.Context()).PVEInstances[index]
|
||||
|
||||
// Create client config for cluster detection
|
||||
clientConfig := config.CreateProxmoxConfig(pve)
|
||||
|
|
@ -2753,7 +2836,7 @@ func (h *ConfigHandlers) HandleRefreshClusterNodes(w http.ResponseWriter, r *htt
|
|||
pve.ClusterEndpoints = clusterEndpoints
|
||||
|
||||
// Save configuration
|
||||
if err := h.persistence.SaveNodesConfig(h.config.PVEInstances, h.config.PBSInstances, h.config.PMGInstances); err != nil {
|
||||
if err := h.getPersistence(r.Context()).SaveNodesConfig(h.getConfig(r.Context()).PVEInstances, h.getConfig(r.Context()).PBSInstances, h.getConfig(r.Context()).PMGInstances); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to save nodes configuration after cluster refresh")
|
||||
http.Error(w, "Failed to save configuration", http.StatusInternalServerError)
|
||||
return
|
||||
|
|
@ -2977,8 +3060,8 @@ func (h *ConfigHandlers) HandleTestNode(w http.ResponseWriter, r *http.Request)
|
|||
// Find the node to test
|
||||
var testResult map[string]interface{}
|
||||
|
||||
if nodeType == "pve" && index < len(h.config.PVEInstances) {
|
||||
pve := h.config.PVEInstances[index]
|
||||
if nodeType == "pve" && index < len(h.getConfig(r.Context()).PVEInstances) {
|
||||
pve := h.getConfig(r.Context()).PVEInstances[index]
|
||||
|
||||
// Create a temporary client to test connection
|
||||
authUser := pve.User
|
||||
|
|
@ -3020,8 +3103,8 @@ func (h *ConfigHandlers) HandleTestNode(w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
}
|
||||
}
|
||||
} else if nodeType == "pbs" && index < len(h.config.PBSInstances) {
|
||||
pbsInstance := h.config.PBSInstances[index]
|
||||
} else if nodeType == "pbs" && index < len(h.getConfig(r.Context()).PBSInstances) {
|
||||
pbsInstance := h.getConfig(r.Context()).PBSInstances[index]
|
||||
|
||||
// Create a temporary client to test connection
|
||||
clientConfig := pbs.ClientConfig{
|
||||
|
|
@ -3059,8 +3142,8 @@ func (h *ConfigHandlers) HandleTestNode(w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
}
|
||||
}
|
||||
} else if nodeType == "pmg" && index < len(h.config.PMGInstances) {
|
||||
pmgInstance := h.config.PMGInstances[index]
|
||||
} else if nodeType == "pmg" && index < len(h.getConfig(r.Context()).PMGInstances) {
|
||||
pmgInstance := h.getConfig(r.Context()).PMGInstances[index]
|
||||
|
||||
clientConfig := config.CreatePMGConfig(&pmgInstance)
|
||||
if pmgInstance.Password != "" && pmgInstance.TokenName == "" && pmgInstance.TokenValue == "" {
|
||||
|
|
@ -3132,8 +3215,8 @@ func (h *ConfigHandlers) HandleTestNode(w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
|
||||
// getNodeStatus returns the connection status for a node
|
||||
func (h *ConfigHandlers) getNodeStatus(nodeType, nodeName string) string {
|
||||
if h.monitor == nil {
|
||||
func (h *ConfigHandlers) getNodeStatus(ctx context.Context, nodeType, nodeName string) string {
|
||||
if h.getMonitor(ctx) == nil {
|
||||
if h.isRecentlyAutoRegistered(nodeType, nodeName) {
|
||||
return "connected"
|
||||
}
|
||||
|
|
@ -3141,7 +3224,7 @@ func (h *ConfigHandlers) getNodeStatus(nodeType, nodeName string) string {
|
|||
}
|
||||
|
||||
// Get connection statuses from monitor
|
||||
connectionStatus := h.monitor.GetConnectionStatuses()
|
||||
connectionStatus := h.getMonitor(ctx).GetConnectionStatuses()
|
||||
|
||||
key := fmt.Sprintf("%s-%s", nodeType, nodeName)
|
||||
if connected, ok := connectionStatus[key]; ok {
|
||||
|
|
@ -3165,7 +3248,7 @@ func (h *ConfigHandlers) getNodeStatus(nodeType, nodeName string) string {
|
|||
// HandleGetSystemSettings returns current system settings
|
||||
func (h *ConfigHandlers) HandleGetSystemSettings(w http.ResponseWriter, r *http.Request) {
|
||||
// Load settings from persistence to get all fields including theme
|
||||
persistedSettings, err := h.persistence.LoadSystemSettings()
|
||||
persistedSettings, err := h.getPersistence(r.Context()).LoadSystemSettings()
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to load persisted system settings")
|
||||
persistedSettings = config.DefaultSystemSettings()
|
||||
|
|
@ -3176,22 +3259,22 @@ func (h *ConfigHandlers) HandleGetSystemSettings(w http.ResponseWriter, r *http.
|
|||
|
||||
// Get current values from running config
|
||||
settings := *persistedSettings
|
||||
settings.PVEPollingInterval = int(h.config.PVEPollingInterval.Seconds())
|
||||
settings.PBSPollingInterval = int(h.config.PBSPollingInterval.Seconds())
|
||||
settings.BackupPollingInterval = int(h.config.BackupPollingInterval.Seconds())
|
||||
settings.BackendPort = h.config.BackendPort
|
||||
settings.FrontendPort = h.config.FrontendPort
|
||||
settings.AllowedOrigins = h.config.AllowedOrigins
|
||||
settings.ConnectionTimeout = int(h.config.ConnectionTimeout.Seconds())
|
||||
settings.UpdateChannel = h.config.UpdateChannel
|
||||
settings.AutoUpdateEnabled = h.config.AutoUpdateEnabled
|
||||
settings.AutoUpdateCheckInterval = int(h.config.AutoUpdateCheckInterval.Hours())
|
||||
settings.AutoUpdateTime = h.config.AutoUpdateTime
|
||||
settings.LogLevel = h.config.LogLevel
|
||||
settings.DiscoveryEnabled = h.config.DiscoveryEnabled
|
||||
settings.DiscoverySubnet = h.config.DiscoverySubnet
|
||||
settings.DiscoveryConfig = config.CloneDiscoveryConfig(h.config.Discovery)
|
||||
backupEnabled := h.config.EnableBackupPolling
|
||||
settings.PVEPollingInterval = int(h.getConfig(r.Context()).PVEPollingInterval.Seconds())
|
||||
settings.PBSPollingInterval = int(h.getConfig(r.Context()).PBSPollingInterval.Seconds())
|
||||
settings.BackupPollingInterval = int(h.getConfig(r.Context()).BackupPollingInterval.Seconds())
|
||||
settings.BackendPort = h.getConfig(r.Context()).BackendPort
|
||||
settings.FrontendPort = h.getConfig(r.Context()).FrontendPort
|
||||
settings.AllowedOrigins = h.getConfig(r.Context()).AllowedOrigins
|
||||
settings.ConnectionTimeout = int(h.getConfig(r.Context()).ConnectionTimeout.Seconds())
|
||||
settings.UpdateChannel = h.getConfig(r.Context()).UpdateChannel
|
||||
settings.AutoUpdateEnabled = h.getConfig(r.Context()).AutoUpdateEnabled
|
||||
settings.AutoUpdateCheckInterval = int(h.getConfig(r.Context()).AutoUpdateCheckInterval.Hours())
|
||||
settings.AutoUpdateTime = h.getConfig(r.Context()).AutoUpdateTime
|
||||
settings.LogLevel = h.getConfig(r.Context()).LogLevel
|
||||
settings.DiscoveryEnabled = h.getConfig(r.Context()).DiscoveryEnabled
|
||||
settings.DiscoverySubnet = h.getConfig(r.Context()).DiscoverySubnet
|
||||
settings.DiscoveryConfig = config.CloneDiscoveryConfig(h.getConfig(r.Context()).Discovery)
|
||||
backupEnabled := h.getConfig(r.Context()).EnableBackupPolling
|
||||
settings.BackupPollingEnabled = &backupEnabled
|
||||
|
||||
// Create response structure that includes environment overrides
|
||||
|
|
@ -3245,7 +3328,7 @@ func (h *ConfigHandlers) HandleVerifyTemperatureSSH(w http.ResponseWriter, r *ht
|
|||
homeDir = "/home/pulse"
|
||||
}
|
||||
sshKeyPath := filepath.Join(homeDir, ".ssh/id_ed25519_sensors")
|
||||
tempCollector := monitoring.NewTemperatureCollectorWithPort("root", sshKeyPath, h.config.SSHPort)
|
||||
tempCollector := monitoring.NewTemperatureCollectorWithPort("root", sshKeyPath, h.getConfig(r.Context()).SSHPort)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
|
|
@ -3333,7 +3416,7 @@ func (h *ConfigHandlers) HandleExportConfig(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
// Export configuration
|
||||
exportedData, err := h.persistence.ExportConfig(req.Passphrase)
|
||||
exportedData, err := h.getPersistence(r.Context()).ExportConfig(req.Passphrase)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed to export configuration")
|
||||
http.Error(w, "Failed to export configuration", http.StatusInternalServerError)
|
||||
|
|
@ -3374,7 +3457,7 @@ func (h *ConfigHandlers) HandleImportConfig(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
// Import configuration
|
||||
if err := h.persistence.ImportConfig(req.Data, req.Passphrase); err != nil {
|
||||
if err := h.getPersistence(r.Context()).ImportConfig(req.Data, req.Passphrase); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to import configuration")
|
||||
http.Error(w, "Failed to import configuration: "+err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
|
|
@ -3389,7 +3472,7 @@ func (h *ConfigHandlers) HandleImportConfig(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
// Update the config reference
|
||||
*h.config = *newConfig
|
||||
*h.getConfig(r.Context()) = *newConfig
|
||||
|
||||
// Reload monitor with new configuration
|
||||
if h.reloadFunc != nil {
|
||||
|
|
@ -3402,19 +3485,19 @@ func (h *ConfigHandlers) HandleImportConfig(w http.ResponseWriter, r *http.Reque
|
|||
|
||||
// Also reload alert and notification configs explicitly
|
||||
// (the monitor reload only reloads nodes unless it's a full reload)
|
||||
if h.monitor != nil {
|
||||
if h.getMonitor(r.Context()) != nil {
|
||||
// Reload alert configuration
|
||||
if alertConfig, err := h.persistence.LoadAlertConfig(); err == nil {
|
||||
h.monitor.GetAlertManager().UpdateConfig(*alertConfig)
|
||||
if alertConfig, err := h.getPersistence(r.Context()).LoadAlertConfig(); err == nil {
|
||||
h.getMonitor(r.Context()).GetAlertManager().UpdateConfig(*alertConfig)
|
||||
log.Info().Msg("Reloaded alert configuration after import")
|
||||
} else {
|
||||
log.Warn().Err(err).Msg("Failed to reload alert configuration after import")
|
||||
}
|
||||
|
||||
// Reload webhook configuration
|
||||
if webhooks, err := h.persistence.LoadWebhooks(); err == nil {
|
||||
if webhooks, err := h.getPersistence(r.Context()).LoadWebhooks(); err == nil {
|
||||
// Clear existing webhooks and add new ones
|
||||
notificationMgr := h.monitor.GetNotificationManager()
|
||||
notificationMgr := h.getMonitor(r.Context()).GetNotificationManager()
|
||||
// Get current webhooks to clear them
|
||||
for _, webhook := range notificationMgr.GetWebhooks() {
|
||||
if err := notificationMgr.DeleteWebhook(webhook.ID); err != nil {
|
||||
|
|
@ -3431,8 +3514,8 @@ func (h *ConfigHandlers) HandleImportConfig(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
// Reload email configuration
|
||||
if emailConfig, err := h.persistence.LoadEmailConfig(); err == nil {
|
||||
h.monitor.GetNotificationManager().SetEmailConfig(*emailConfig)
|
||||
if emailConfig, err := h.getPersistence(r.Context()).LoadEmailConfig(); err == nil {
|
||||
h.getMonitor(r.Context()).GetNotificationManager().SetEmailConfig(*emailConfig)
|
||||
log.Info().Msg("Reloaded email configuration after import")
|
||||
} else {
|
||||
log.Warn().Err(err).Msg("Failed to reload email configuration after import")
|
||||
|
|
@ -3463,7 +3546,7 @@ func (h *ConfigHandlers) HandleDiscoverServers(w http.ResponseWriter, r *http.Re
|
|||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
// Return cached results from background discovery service
|
||||
if discoveryService := h.monitor.GetDiscoveryService(); discoveryService != nil {
|
||||
if discoveryService := h.getMonitor(r.Context()).GetDiscoveryService(); discoveryService != nil {
|
||||
result, updated := discoveryService.GetCachedResult()
|
||||
|
||||
var updatedUnix int64
|
||||
|
|
@ -3513,7 +3596,7 @@ func (h *ConfigHandlers) HandleDiscoverServers(w http.ResponseWriter, r *http.Re
|
|||
}
|
||||
|
||||
if req.UseCache {
|
||||
if discoveryService := h.monitor.GetDiscoveryService(); discoveryService != nil {
|
||||
if discoveryService := h.getMonitor(r.Context()).GetDiscoveryService(); discoveryService != nil {
|
||||
result, updated := discoveryService.GetCachedResult()
|
||||
|
||||
var updatedUnix int64
|
||||
|
|
@ -3545,7 +3628,7 @@ func (h *ConfigHandlers) HandleDiscoverServers(w http.ResponseWriter, r *http.Re
|
|||
|
||||
log.Info().Str("subnet", subnet).Msg("Starting manual discovery scan")
|
||||
|
||||
scanner, buildErr := discoveryinternal.BuildScanner(h.config.Discovery)
|
||||
scanner, buildErr := discoveryinternal.BuildScanner(h.getConfig(r.Context()).Discovery)
|
||||
if buildErr != nil {
|
||||
log.Warn().Err(buildErr).Msg("Falling back to default scanner for manual discovery")
|
||||
scanner = pkgdiscovery.NewScanner()
|
||||
|
|
@ -3659,7 +3742,7 @@ func (h *ConfigHandlers) HandleSetupScript(w http.ResponseWriter, r *http.Reques
|
|||
log.Info().
|
||||
Str("type", serverType).
|
||||
Str("host", serverHost).
|
||||
Bool("has_auth", h.config.AuthUser != "" || h.config.AuthPass != "" || h.config.HasAPITokens()).
|
||||
Bool("has_auth", h.getConfig(r.Context()).AuthUser != "" || h.getConfig(r.Context()).AuthPass != "" || h.getConfig(r.Context()).HasAPITokens()).
|
||||
Msg("HandleSetupScript called")
|
||||
|
||||
// The setup script is now public - authentication happens via setup code
|
||||
|
|
@ -4697,6 +4780,7 @@ func (h *ConfigHandlers) HandleSetupScriptURL(w http.ResponseWriter, r *http.Req
|
|||
Used: false,
|
||||
NodeType: req.Type,
|
||||
Host: req.Host,
|
||||
OrgID: GetOrgID(r.Context()),
|
||||
}
|
||||
h.codeMutex.Unlock()
|
||||
|
||||
|
|
@ -4710,9 +4794,9 @@ func (h *ConfigHandlers) HandleSetupScriptURL(w http.ResponseWriter, r *http.Req
|
|||
host := r.Host
|
||||
|
||||
if parsedHost, parsedPort, err := net.SplitHostPort(host); err == nil {
|
||||
if (parsedHost == "127.0.0.1" || parsedHost == "localhost") && parsedPort == strconv.Itoa(h.config.FrontendPort) {
|
||||
if (parsedHost == "127.0.0.1" || parsedHost == "localhost") && parsedPort == strconv.Itoa(h.getConfig(r.Context()).FrontendPort) {
|
||||
// Prefer a user-configured public URL when we're running on loopback.
|
||||
if publicURL := strings.TrimSpace(h.config.PublicURL); publicURL != "" {
|
||||
if publicURL := strings.TrimSpace(h.getConfig(r.Context()).PublicURL); publicURL != "" {
|
||||
if parsedURL, err := url.Parse(publicURL); err == nil && parsedURL.Host != "" {
|
||||
host = parsedURL.Host
|
||||
}
|
||||
|
|
@ -4844,8 +4928,8 @@ func (h *ConfigHandlers) HandleUpdateMockMode(w http.ResponseWriter, r *http.Req
|
|||
mock.SetMockConfig(currentCfg)
|
||||
|
||||
if req.Enabled != nil {
|
||||
if h.monitor != nil {
|
||||
h.monitor.SetMockMode(*req.Enabled)
|
||||
if h.getMonitor(r.Context()) != nil {
|
||||
h.getMonitor(r.Context()).SetMockMode(*req.Enabled)
|
||||
} else {
|
||||
mock.SetEnabled(*req.Enabled)
|
||||
}
|
||||
|
|
@ -4914,14 +4998,14 @@ func (h *ConfigHandlers) HandleAutoRegister(w http.ResponseWriter, r *http.Reque
|
|||
log.Debug().
|
||||
Bool("hasAuthToken", strings.TrimSpace(req.AuthToken) != "").
|
||||
Bool("hasSetupCode", strings.TrimSpace(authCode) != "").
|
||||
Bool("hasConfigToken", h.config.HasAPITokens()).
|
||||
Bool("hasConfigToken", h.getConfig(r.Context()).HasAPITokens()).
|
||||
Msg("Checking authentication for auto-register")
|
||||
|
||||
// First check for setup code/auth token in the request
|
||||
if authCode != "" {
|
||||
matchedAPIToken := false
|
||||
if h.config.HasAPITokens() {
|
||||
if _, ok := h.config.ValidateAPIToken(authCode); ok {
|
||||
if h.getConfig(r.Context()).HasAPITokens() {
|
||||
if _, ok := h.getConfig(r.Context()).ValidateAPIToken(authCode); ok {
|
||||
authenticated = true
|
||||
matchedAPIToken = true
|
||||
log.Info().
|
||||
|
|
@ -4950,6 +5034,12 @@ func (h *ConfigHandlers) HandleAutoRegister(w http.ResponseWriter, r *http.Reque
|
|||
// what's entered in the UI and what's provided in the setup script URL
|
||||
if setupCode.NodeType == req.Type {
|
||||
setupCode.Used = true // Mark as used immediately
|
||||
|
||||
// Inject OrgID from setup code into context for subsequent processing
|
||||
if setupCode.OrgID != "" {
|
||||
ctx := context.WithValue(r.Context(), OrgIDContextKey, setupCode.OrgID)
|
||||
r = r.WithContext(ctx)
|
||||
}
|
||||
// Allow a short grace period for follow-up actions without keeping tokens alive too long
|
||||
graceExpiry := time.Now().Add(1 * time.Minute)
|
||||
if setupCode.ExpiresAt.Before(graceExpiry) {
|
||||
|
|
@ -4980,9 +5070,9 @@ func (h *ConfigHandlers) HandleAutoRegister(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
// If not authenticated via setup code, check API token if configured
|
||||
if !authenticated && h.config.HasAPITokens() {
|
||||
if !authenticated && h.getConfig(r.Context()).HasAPITokens() {
|
||||
apiToken := r.Header.Get("X-API-Token")
|
||||
if _, ok := h.config.ValidateAPIToken(apiToken); ok {
|
||||
if _, ok := h.getConfig(r.Context()).ValidateAPIToken(apiToken); ok {
|
||||
authenticated = true
|
||||
log.Info().Msg("Auto-register authenticated via API token")
|
||||
}
|
||||
|
|
@ -5100,7 +5190,7 @@ func (h *ConfigHandlers) HandleAutoRegister(w http.ResponseWriter, r *http.Reque
|
|||
newHostIP := extractHostIP(host)
|
||||
|
||||
if req.Type == "pve" {
|
||||
for i, node := range h.config.PVEInstances {
|
||||
for i, node := range h.getConfig(r.Context()).PVEInstances {
|
||||
if node.Host == host {
|
||||
existingIndex = i
|
||||
break
|
||||
|
|
@ -5141,7 +5231,7 @@ func (h *ConfigHandlers) HandleAutoRegister(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
}
|
||||
} else {
|
||||
for i, node := range h.config.PBSInstances {
|
||||
for i, node := range h.getConfig(r.Context()).PBSInstances {
|
||||
if node.Host == host {
|
||||
existingIndex = i
|
||||
break
|
||||
|
|
@ -5182,7 +5272,7 @@ func (h *ConfigHandlers) HandleAutoRegister(w http.ResponseWriter, r *http.Reque
|
|||
if existingIndex >= 0 {
|
||||
// Update existing node
|
||||
if req.Type == "pve" {
|
||||
instance := &h.config.PVEInstances[existingIndex]
|
||||
instance := &h.getConfig(r.Context()).PVEInstances[existingIndex]
|
||||
// Update host in case IP changed (DHCP scenario)
|
||||
// But preserve user's configured hostname when matched by IP resolution (Issue #940)
|
||||
if !preserveHost {
|
||||
|
|
@ -5220,7 +5310,7 @@ func (h *ConfigHandlers) HandleAutoRegister(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
// Keep other settings as they were
|
||||
} else {
|
||||
instance := &h.config.PBSInstances[existingIndex]
|
||||
instance := &h.getConfig(r.Context()).PBSInstances[existingIndex]
|
||||
// Update host in case IP changed (DHCP scenario)
|
||||
// But preserve user's configured hostname when matched by IP resolution (Issue #940)
|
||||
if !preserveHost {
|
||||
|
|
@ -5263,8 +5353,8 @@ func (h *ConfigHandlers) HandleAutoRegister(w http.ResponseWriter, r *http.Reque
|
|||
// CLUSTER DEDUPLICATION: Check if we already have this cluster configured
|
||||
// If so, merge this node as an endpoint instead of creating a duplicate instance
|
||||
if isCluster && clusterName != "" {
|
||||
for i := range h.config.PVEInstances {
|
||||
existingInstance := &h.config.PVEInstances[i]
|
||||
for i := range h.getConfig(r.Context()).PVEInstances {
|
||||
existingInstance := &h.getConfig(r.Context()).PVEInstances[i]
|
||||
if existingInstance.IsCluster && existingInstance.ClusterName == clusterName {
|
||||
// Found existing cluster with same name - merge endpoints!
|
||||
log.Info().
|
||||
|
|
@ -5289,8 +5379,8 @@ func (h *ConfigHandlers) HandleAutoRegister(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
// Save and reload
|
||||
if h.persistence != nil {
|
||||
if err := h.persistence.SaveNodesConfig(h.config.PVEInstances, h.config.PBSInstances, h.config.PMGInstances); err != nil {
|
||||
if h.getPersistence(r.Context()) != nil {
|
||||
if err := h.getPersistence(r.Context()).SaveNodesConfig(h.getConfig(r.Context()).PVEInstances, h.getConfig(r.Context()).PBSInstances, h.getConfig(r.Context()).PMGInstances); err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to persist cluster endpoint merge during auto-registration")
|
||||
}
|
||||
}
|
||||
|
|
@ -5333,7 +5423,7 @@ func (h *ConfigHandlers) HandleAutoRegister(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
// Disambiguate node name if duplicate hostnames exist
|
||||
displayName := h.disambiguateNodeName(nodeConfig.Name, nodeConfig.Host, "pve")
|
||||
displayName := h.disambiguateNodeName(r.Context(), nodeConfig.Name, nodeConfig.Host, "pve")
|
||||
|
||||
newInstance := config.PVEInstance{
|
||||
Name: displayName,
|
||||
|
|
@ -5350,7 +5440,7 @@ func (h *ConfigHandlers) HandleAutoRegister(w http.ResponseWriter, r *http.Reque
|
|||
ClusterEndpoints: clusterEndpoints,
|
||||
Source: req.Source, // Track how this node was registered
|
||||
}
|
||||
h.config.PVEInstances = append(h.config.PVEInstances, newInstance)
|
||||
h.getConfig(r.Context()).PVEInstances = append(h.getConfig(r.Context()).PVEInstances, newInstance)
|
||||
|
||||
if isCluster {
|
||||
log.Info().
|
||||
|
|
@ -5385,7 +5475,7 @@ func (h *ConfigHandlers) HandleAutoRegister(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
// Disambiguate node name if duplicate hostnames exist
|
||||
pbsDisplayName := h.disambiguateNodeName(nodeConfig.Name, nodeConfig.Host, "pbs")
|
||||
pbsDisplayName := h.disambiguateNodeName(r.Context(), nodeConfig.Name, nodeConfig.Host, "pbs")
|
||||
|
||||
newInstance := config.PBSInstance{
|
||||
Name: pbsDisplayName,
|
||||
|
|
@ -5401,14 +5491,14 @@ func (h *ConfigHandlers) HandleAutoRegister(w http.ResponseWriter, r *http.Reque
|
|||
MonitorGarbageJobs: monitorGarbageJobs,
|
||||
Source: req.Source, // Track how this node was registered
|
||||
}
|
||||
h.config.PBSInstances = append(h.config.PBSInstances, newInstance)
|
||||
h.getConfig(r.Context()).PBSInstances = append(h.getConfig(r.Context()).PBSInstances, newInstance)
|
||||
}
|
||||
log.Info().Str("host", req.Host).Str("type", req.Type).Msg("Added new node via auto-registration")
|
||||
}
|
||||
|
||||
// Log what we're about to save
|
||||
if req.Type == "pve" && len(h.config.PVEInstances) > 0 {
|
||||
lastNode := h.config.PVEInstances[len(h.config.PVEInstances)-1]
|
||||
if req.Type == "pve" && len(h.getConfig(r.Context()).PVEInstances) > 0 {
|
||||
lastNode := h.getConfig(r.Context()).PVEInstances[len(h.getConfig(r.Context()).PVEInstances)-1]
|
||||
log.Info().
|
||||
Str("name", lastNode.Name).
|
||||
Str("host", lastNode.Host).
|
||||
|
|
@ -5418,7 +5508,7 @@ func (h *ConfigHandlers) HandleAutoRegister(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
// Save configuration
|
||||
if err := h.persistence.SaveNodesConfig(h.config.PVEInstances, h.config.PBSInstances, h.config.PMGInstances); err != nil {
|
||||
if err := h.getPersistence(r.Context()).SaveNodesConfig(h.getConfig(r.Context()).PVEInstances, h.getConfig(r.Context()).PBSInstances, h.getConfig(r.Context()).PMGInstances); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to save auto-registered node")
|
||||
http.Error(w, "Failed to save configuration", http.StatusInternalServerError)
|
||||
return
|
||||
|
|
@ -5426,7 +5516,7 @@ func (h *ConfigHandlers) HandleAutoRegister(w http.ResponseWriter, r *http.Reque
|
|||
|
||||
log.Info().Msg("Configuration saved successfully")
|
||||
|
||||
actualName := h.findInstanceNameByHost(req.Type, host)
|
||||
actualName := h.findInstanceNameByHost(r.Context(), req.Type, host)
|
||||
if actualName == "" {
|
||||
actualName = strings.TrimSpace(req.ServerName)
|
||||
}
|
||||
|
|
@ -5452,9 +5542,9 @@ func (h *ConfigHandlers) HandleAutoRegister(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
// Trigger a discovery refresh to remove the node from discovered list
|
||||
if h.monitor != nil && h.monitor.GetDiscoveryService() != nil {
|
||||
if h.getMonitor(r.Context()) != nil && h.getMonitor(r.Context()).GetDiscoveryService() != nil {
|
||||
log.Info().Msg("Triggering discovery refresh after auto-registration")
|
||||
h.monitor.GetDiscoveryService().ForceRefresh()
|
||||
h.getMonitor(r.Context()).GetDiscoveryService().ForceRefresh()
|
||||
}
|
||||
|
||||
// Broadcast auto-registration success via WebSocket
|
||||
|
|
@ -5477,8 +5567,8 @@ func (h *ConfigHandlers) HandleAutoRegister(w http.ResponseWriter, r *http.Reque
|
|||
})
|
||||
|
||||
// Also broadcast a discovery update to refresh the UI
|
||||
if h.monitor != nil && h.monitor.GetDiscoveryService() != nil {
|
||||
result, _ := h.monitor.GetDiscoveryService().GetCachedResult()
|
||||
if h.getMonitor(r.Context()) != nil && h.getMonitor(r.Context()).GetDiscoveryService() != nil {
|
||||
result, _ := h.getMonitor(r.Context()).GetDiscoveryService().GetCachedResult()
|
||||
if result != nil {
|
||||
h.wsHub.BroadcastMessage(websocket.Message{
|
||||
Type: "discovery_update",
|
||||
|
|
@ -5512,7 +5602,7 @@ func (h *ConfigHandlers) HandleAutoRegister(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
// handleSecureAutoRegister handles the new secure registration flow where Pulse generates the token
|
||||
func (h *ConfigHandlers) handleSecureAutoRegister(w http.ResponseWriter, _ *http.Request, req *AutoRegisterRequest, clientIP string) {
|
||||
func (h *ConfigHandlers) handleSecureAutoRegister(w http.ResponseWriter, r *http.Request, req *AutoRegisterRequest, clientIP string) {
|
||||
log.Info().
|
||||
Str("type", req.Type).
|
||||
Str("host", req.Host).
|
||||
|
|
@ -5629,7 +5719,7 @@ func (h *ConfigHandlers) handleSecureAutoRegister(w http.ResponseWriter, _ *http
|
|||
MonitorStorage: true,
|
||||
MonitorBackups: true,
|
||||
}
|
||||
h.config.PVEInstances = append(h.config.PVEInstances, pveNode)
|
||||
h.getConfig(r.Context()).PVEInstances = append(h.getConfig(r.Context()).PVEInstances, pveNode)
|
||||
} else if req.Type == "pbs" {
|
||||
pbsNode := config.PBSInstance{
|
||||
Name: serverName,
|
||||
|
|
@ -5644,17 +5734,17 @@ func (h *ConfigHandlers) handleSecureAutoRegister(w http.ResponseWriter, _ *http
|
|||
MonitorVerifyJobs: true,
|
||||
MonitorPruneJobs: true,
|
||||
}
|
||||
h.config.PBSInstances = append(h.config.PBSInstances, pbsNode)
|
||||
h.getConfig(r.Context()).PBSInstances = append(h.getConfig(r.Context()).PBSInstances, pbsNode)
|
||||
}
|
||||
|
||||
// Save configuration
|
||||
if err := h.persistence.SaveNodesConfig(h.config.PVEInstances, h.config.PBSInstances, h.config.PMGInstances); err != nil {
|
||||
if err := h.getPersistence(r.Context()).SaveNodesConfig(h.getConfig(r.Context()).PVEInstances, h.getConfig(r.Context()).PBSInstances, h.getConfig(r.Context()).PMGInstances); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to save auto-registered node")
|
||||
http.Error(w, "Failed to save configuration", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
actualName := h.findInstanceNameByHost(req.Type, host)
|
||||
actualName := h.findInstanceNameByHost(r.Context(), req.Type, host)
|
||||
if actualName == "" {
|
||||
actualName = serverName
|
||||
}
|
||||
|
|
@ -5849,13 +5939,13 @@ func (h *ConfigHandlers) HandleAgentInstallCommand(w http.ResponseWriter, r *htt
|
|||
|
||||
// Persist the token
|
||||
config.Mu.Lock()
|
||||
h.config.APITokens = append(h.config.APITokens, *record)
|
||||
h.config.SortAPITokens()
|
||||
h.getConfig(r.Context()).APITokens = append(h.getConfig(r.Context()).APITokens, *record)
|
||||
h.getConfig(r.Context()).SortAPITokens()
|
||||
|
||||
if h.persistence != nil {
|
||||
if err := h.persistence.SaveAPITokens(h.config.APITokens); err != nil {
|
||||
if h.getPersistence(r.Context()) != nil {
|
||||
if err := h.getPersistence(r.Context()).SaveAPITokens(h.getConfig(r.Context()).APITokens); err != nil {
|
||||
// Rollback the in-memory addition
|
||||
h.config.APITokens = h.config.APITokens[:len(h.config.APITokens)-1]
|
||||
h.getConfig(r.Context()).APITokens = h.getConfig(r.Context()).APITokens[:len(h.getConfig(r.Context()).APITokens)-1]
|
||||
config.Mu.Unlock()
|
||||
log.Error().Err(err).Msg("Failed to persist API tokens after creation")
|
||||
http.Error(w, "Failed to save token to disk: "+err.Error(), http.StatusInternalServerError)
|
||||
|
|
@ -5867,9 +5957,9 @@ func (h *ConfigHandlers) HandleAgentInstallCommand(w http.ResponseWriter, r *htt
|
|||
// Derive Pulse URL from the request
|
||||
host := r.Host
|
||||
if parsedHost, parsedPort, err := net.SplitHostPort(host); err == nil {
|
||||
if (parsedHost == "127.0.0.1" || parsedHost == "localhost") && parsedPort == strconv.Itoa(h.config.FrontendPort) {
|
||||
if (parsedHost == "127.0.0.1" || parsedHost == "localhost") && parsedPort == strconv.Itoa(h.getConfig(r.Context()).FrontendPort) {
|
||||
// Prefer a user-configured public URL when we're running on loopback
|
||||
if publicURL := strings.TrimSpace(h.config.PublicURL); publicURL != "" {
|
||||
if publicURL := strings.TrimSpace(h.getConfig(r.Context()).PublicURL); publicURL != "" {
|
||||
if parsedURL, err := url.Parse(publicURL); err == nil && parsedURL.Host != "" {
|
||||
host = parsedURL.Host
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1516,13 +1516,14 @@ func buildAIChatDiagnostic(cfg *config.Config, aiHandler *AIHandler) *AIChatDiag
|
|||
// Calculate enabled state based on AI config
|
||||
// NOTE: aiHandler might be nil during early startup
|
||||
if aiHandler != nil {
|
||||
aiCfg := aiHandler.GetAIConfig()
|
||||
ctx := context.Background()
|
||||
aiCfg := aiHandler.GetAIConfig(ctx)
|
||||
if aiCfg != nil {
|
||||
diag.Enabled = aiCfg.Enabled
|
||||
diag.Model = aiCfg.GetChatModel()
|
||||
}
|
||||
|
||||
svc := aiHandler.GetService()
|
||||
svc := aiHandler.GetService(ctx)
|
||||
if svc != nil {
|
||||
diag.Running = svc.IsRunning()
|
||||
diag.Healthy = svc.IsRunning() // Consolidate for now
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
|
|
@ -17,9 +18,10 @@ import (
|
|||
|
||||
// DockerAgentHandlers manages ingest from the external Docker agent.
|
||||
type DockerAgentHandlers struct {
|
||||
monitor *monitoring.Monitor
|
||||
wsHub *websocket.Hub
|
||||
config *config.Config
|
||||
mtMonitor *monitoring.MultiTenantMonitor
|
||||
legacyMonitor *monitoring.Monitor
|
||||
wsHub *websocket.Hub
|
||||
config *config.Config
|
||||
}
|
||||
|
||||
type dockerCommandAckRequest struct {
|
||||
|
|
@ -56,13 +58,40 @@ func normalizeCommandStatus(status string) (string, error) {
|
|||
}
|
||||
|
||||
// NewDockerAgentHandlers constructs a new Docker agent handler group.
|
||||
func NewDockerAgentHandlers(m *monitoring.Monitor, hub *websocket.Hub, cfg *config.Config) *DockerAgentHandlers {
|
||||
return &DockerAgentHandlers{monitor: m, wsHub: hub, config: cfg}
|
||||
func NewDockerAgentHandlers(mtm *monitoring.MultiTenantMonitor, m *monitoring.Monitor, hub *websocket.Hub, cfg *config.Config) *DockerAgentHandlers {
|
||||
// If mtm is provided, try to populate legacyMonitor from "default" org if not provided
|
||||
if m == nil && mtm != nil {
|
||||
if mon, err := mtm.GetMonitor("default"); err == nil {
|
||||
m = mon
|
||||
}
|
||||
}
|
||||
return &DockerAgentHandlers{mtMonitor: mtm, legacyMonitor: m, wsHub: hub, config: cfg}
|
||||
}
|
||||
|
||||
// SetMonitor updates the monitor reference for docker agent handlers.
|
||||
func (h *DockerAgentHandlers) SetMonitor(m *monitoring.Monitor) {
|
||||
h.monitor = m
|
||||
h.legacyMonitor = m
|
||||
}
|
||||
|
||||
// SetMultiTenantMonitor updates the multi-tenant monitor reference
|
||||
func (h *DockerAgentHandlers) SetMultiTenantMonitor(mtm *monitoring.MultiTenantMonitor) {
|
||||
h.mtMonitor = mtm
|
||||
if mtm != nil {
|
||||
if m, err := mtm.GetMonitor("default"); err == nil {
|
||||
h.legacyMonitor = m
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getMonitor helper
|
||||
func (h *DockerAgentHandlers) getMonitor(ctx context.Context) *monitoring.Monitor {
|
||||
orgID := GetOrgID(ctx)
|
||||
if h.mtMonitor != nil {
|
||||
if m, err := h.mtMonitor.GetMonitor(orgID); err == nil && m != nil {
|
||||
return m
|
||||
}
|
||||
}
|
||||
return h.legacyMonitor
|
||||
}
|
||||
|
||||
// HandleReport accepts heartbeat payloads from the Docker agent.
|
||||
|
|
@ -89,7 +118,7 @@ func (h *DockerAgentHandlers) HandleReport(w http.ResponseWriter, r *http.Reques
|
|||
|
||||
tokenRecord := getAPITokenRecordFromRequest(r)
|
||||
|
||||
host, err := h.monitor.ApplyDockerReport(report, tokenRecord)
|
||||
host, err := h.getMonitor(r.Context()).ApplyDockerReport(report, tokenRecord)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, http.StatusBadRequest, "invalid_report", err.Error(), nil)
|
||||
return
|
||||
|
|
@ -101,7 +130,7 @@ func (h *DockerAgentHandlers) HandleReport(w http.ResponseWriter, r *http.Reques
|
|||
Msg("Docker agent report processed")
|
||||
|
||||
// Broadcast the updated state for near-real-time UI updates
|
||||
go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend())
|
||||
go h.wsHub.BroadcastState(h.getMonitor(r.Context()).GetState().ToFrontend())
|
||||
|
||||
response := map[string]any{
|
||||
"success": true,
|
||||
|
|
@ -110,7 +139,7 @@ func (h *DockerAgentHandlers) HandleReport(w http.ResponseWriter, r *http.Reques
|
|||
"lastSeen": host.LastSeen,
|
||||
}
|
||||
|
||||
if payload, cmd := h.monitor.FetchDockerCommandForHost(host.ID); cmd != nil {
|
||||
if payload, cmd := h.getMonitor(r.Context()).FetchDockerCommandForHost(host.ID); cmd != nil {
|
||||
commandResponse := map[string]any{
|
||||
"id": cmd.ID,
|
||||
"type": cmd.Type,
|
||||
|
|
@ -202,25 +231,25 @@ func (h *DockerAgentHandlers) HandleCommandAck(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
commandStatus, hostID, shouldRemove, err := h.monitor.AcknowledgeDockerHostCommand(commandID, req.HostID, status, req.Message)
|
||||
commandStatus, hostID, shouldRemove, err := h.getMonitor(r.Context()).AcknowledgeDockerHostCommand(commandID, req.HostID, status, req.Message)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, http.StatusBadRequest, "docker_command_ack_failed", err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
if shouldRemove {
|
||||
if _, removeErr := h.monitor.RemoveDockerHost(hostID); removeErr != nil {
|
||||
if _, removeErr := h.getMonitor(r.Context()).RemoveDockerHost(hostID); removeErr != nil {
|
||||
log.Error().Err(removeErr).Str("dockerHostID", hostID).Str("commandID", commandID).Msg("Failed to remove docker host after command completion")
|
||||
} else {
|
||||
// Clear the removal block since the agent has confirmed it stopped successfully.
|
||||
// This allows immediate re-enrollment without waiting for the 24-hour TTL.
|
||||
if reenrollErr := h.monitor.AllowDockerHostReenroll(hostID); reenrollErr != nil {
|
||||
if reenrollErr := h.getMonitor(r.Context()).AllowDockerHostReenroll(hostID); reenrollErr != nil {
|
||||
log.Warn().Err(reenrollErr).Str("dockerHostID", hostID).Msg("Failed to clear removal block after successful stop")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend())
|
||||
go h.wsHub.BroadcastState(h.getMonitor(r.Context()).GetState().ToFrontend())
|
||||
|
||||
if err := utils.WriteJSONResponse(w, map[string]any{
|
||||
"success": true,
|
||||
|
|
@ -252,20 +281,20 @@ func (h *DockerAgentHandlers) HandleDeleteHost(w http.ResponseWriter, r *http.Re
|
|||
forceParam := strings.ToLower(r.URL.Query().Get("force"))
|
||||
force := forceParam == "true" || strings.ToLower(r.URL.Query().Get("mode")) == "force"
|
||||
|
||||
priorHost, hostExists := h.monitor.GetDockerHost(hostID)
|
||||
priorHost, hostExists := h.getMonitor(r.Context()).GetDockerHost(hostID)
|
||||
|
||||
if shouldHide {
|
||||
if !hostExists {
|
||||
writeErrorResponse(w, http.StatusNotFound, "docker_host_not_found", "Docker host not found", nil)
|
||||
return
|
||||
}
|
||||
host, err := h.monitor.HideDockerHost(hostID)
|
||||
host, err := h.getMonitor(r.Context()).HideDockerHost(hostID)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, http.StatusNotFound, "docker_host_not_found", err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend())
|
||||
go h.wsHub.BroadcastState(h.getMonitor(r.Context()).GetState().ToFrontend())
|
||||
|
||||
if err := utils.WriteJSONResponse(w, map[string]any{
|
||||
"success": true,
|
||||
|
|
@ -294,13 +323,13 @@ func (h *DockerAgentHandlers) HandleDeleteHost(w http.ResponseWriter, r *http.Re
|
|||
}
|
||||
|
||||
if !force && strings.EqualFold(priorHost.Status, "online") {
|
||||
command, err := h.monitor.QueueDockerHostStop(hostID)
|
||||
command, err := h.getMonitor(r.Context()).QueueDockerHostStop(hostID)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, http.StatusBadRequest, "docker_command_failed", err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend())
|
||||
go h.wsHub.BroadcastState(h.getMonitor(r.Context()).GetState().ToFrontend())
|
||||
|
||||
if err := utils.WriteJSONResponse(w, map[string]any{
|
||||
"success": true,
|
||||
|
|
@ -313,13 +342,13 @@ func (h *DockerAgentHandlers) HandleDeleteHost(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
host, err := h.monitor.RemoveDockerHost(hostID)
|
||||
host, err := h.getMonitor(r.Context()).RemoveDockerHost(hostID)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, http.StatusNotFound, "docker_host_not_found", err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend())
|
||||
go h.wsHub.BroadcastState(h.getMonitor(r.Context()).GetState().ToFrontend())
|
||||
|
||||
if err := utils.WriteJSONResponse(w, map[string]any{
|
||||
"success": true,
|
||||
|
|
@ -345,13 +374,13 @@ func (h *DockerAgentHandlers) HandleAllowReenroll(w http.ResponseWriter, r *http
|
|||
return
|
||||
}
|
||||
|
||||
if err := h.monitor.AllowDockerHostReenroll(hostID); err != nil {
|
||||
if err := h.getMonitor(r.Context()).AllowDockerHostReenroll(hostID); err != nil {
|
||||
writeErrorResponse(w, http.StatusBadRequest, "docker_host_reenroll_failed", err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Broadcast updated state to ensure the frontend reflects the change
|
||||
go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend())
|
||||
go h.wsHub.BroadcastState(h.getMonitor(r.Context()).GetState().ToFrontend())
|
||||
|
||||
if err := utils.WriteJSONResponse(w, map[string]any{
|
||||
"success": true,
|
||||
|
|
@ -376,13 +405,13 @@ func (h *DockerAgentHandlers) HandleUnhideHost(w http.ResponseWriter, r *http.Re
|
|||
return
|
||||
}
|
||||
|
||||
host, err := h.monitor.UnhideDockerHost(hostID)
|
||||
host, err := h.getMonitor(r.Context()).UnhideDockerHost(hostID)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, http.StatusNotFound, "docker_host_not_found", err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend())
|
||||
go h.wsHub.BroadcastState(h.getMonitor(r.Context()).GetState().ToFrontend())
|
||||
|
||||
if err := utils.WriteJSONResponse(w, map[string]any{
|
||||
"success": true,
|
||||
|
|
@ -408,13 +437,13 @@ func (h *DockerAgentHandlers) HandleMarkPendingUninstall(w http.ResponseWriter,
|
|||
return
|
||||
}
|
||||
|
||||
host, err := h.monitor.MarkDockerHostPendingUninstall(hostID)
|
||||
host, err := h.getMonitor(r.Context()).MarkDockerHostPendingUninstall(hostID)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, http.StatusNotFound, "docker_host_not_found", err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend())
|
||||
go h.wsHub.BroadcastState(h.getMonitor(r.Context()).GetState().ToFrontend())
|
||||
|
||||
if err := utils.WriteJSONResponse(w, map[string]any{
|
||||
"success": true,
|
||||
|
|
@ -454,13 +483,13 @@ func (h *DockerAgentHandlers) HandleSetCustomDisplayName(w http.ResponseWriter,
|
|||
|
||||
customName := strings.TrimSpace(req.DisplayName)
|
||||
|
||||
host, err := h.monitor.SetDockerHostCustomDisplayName(hostID, customName)
|
||||
host, err := h.getMonitor(r.Context()).SetDockerHostCustomDisplayName(hostID, customName)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, http.StatusNotFound, "docker_host_not_found", err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend())
|
||||
go h.wsHub.BroadcastState(h.getMonitor(r.Context()).GetState().ToFrontend())
|
||||
|
||||
if err := utils.WriteJSONResponse(w, map[string]any{
|
||||
"success": true,
|
||||
|
|
@ -510,13 +539,13 @@ func (h *DockerAgentHandlers) HandleContainerUpdate(w http.ResponseWriter, r *ht
|
|||
}
|
||||
|
||||
// Queue the update command
|
||||
commandStatus, err := h.monitor.QueueDockerContainerUpdateCommand(req.HostID, req.ContainerID, req.ContainerName)
|
||||
commandStatus, err := h.getMonitor(r.Context()).QueueDockerContainerUpdateCommand(req.HostID, req.ContainerID, req.ContainerName)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, http.StatusBadRequest, "update_command_failed", err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend())
|
||||
go h.wsHub.BroadcastState(h.getMonitor(r.Context()).GetState().ToFrontend())
|
||||
|
||||
if err := utils.WriteJSONResponse(w, map[string]any{
|
||||
"success": true,
|
||||
|
|
@ -550,13 +579,13 @@ func (h *DockerAgentHandlers) HandleCheckUpdates(w http.ResponseWriter, r *http.
|
|||
}
|
||||
|
||||
// Queue the check updates command
|
||||
commandStatus, err := h.monitor.QueueDockerCheckUpdatesCommand(hostID)
|
||||
commandStatus, err := h.getMonitor(r.Context()).QueueDockerCheckUpdatesCommand(hostID)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, http.StatusBadRequest, "check_updates_command_failed", err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend())
|
||||
go h.wsHub.BroadcastState(h.getMonitor(r.Context()).GetState().ToFrontend())
|
||||
|
||||
if err := utils.WriteJSONResponse(w, map[string]any{
|
||||
"success": true,
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
|
@ -12,19 +13,30 @@ import (
|
|||
|
||||
// DockerMetadataHandler handles Docker resource metadata operations
|
||||
type DockerMetadataHandler struct {
|
||||
store *config.DockerMetadataStore
|
||||
mtPersistence *config.MultiTenantPersistence
|
||||
}
|
||||
|
||||
// NewDockerMetadataHandler creates a new Docker metadata handler
|
||||
func NewDockerMetadataHandler(dataPath string) *DockerMetadataHandler {
|
||||
func NewDockerMetadataHandler(mtPersistence *config.MultiTenantPersistence) *DockerMetadataHandler {
|
||||
return &DockerMetadataHandler{
|
||||
store: config.NewDockerMetadataStore(dataPath, nil),
|
||||
mtPersistence: mtPersistence,
|
||||
}
|
||||
}
|
||||
|
||||
// Store returns the underlying metadata store
|
||||
func (h *DockerMetadataHandler) getStore(ctx context.Context) *config.DockerMetadataStore {
|
||||
orgID := "default"
|
||||
if ctx != nil {
|
||||
if id := GetOrgID(ctx); id != "" {
|
||||
orgID = id
|
||||
}
|
||||
}
|
||||
p, _ := h.mtPersistence.GetPersistence(orgID)
|
||||
return p.GetDockerMetadataStore()
|
||||
}
|
||||
|
||||
// Store returns the underlying metadata store for default tenant
|
||||
func (h *DockerMetadataHandler) Store() *config.DockerMetadataStore {
|
||||
return h.store
|
||||
return h.getStore(context.Background())
|
||||
}
|
||||
|
||||
// HandleGetMetadata retrieves metadata for a specific Docker resource or all resources
|
||||
|
|
@ -40,7 +52,8 @@ func (h *DockerMetadataHandler) HandleGetMetadata(w http.ResponseWriter, r *http
|
|||
if path == "/api/docker/metadata" || path == "/api/docker/metadata/" {
|
||||
// Get all metadata
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
allMeta := h.store.GetAll()
|
||||
store := h.getStore(r.Context())
|
||||
allMeta := store.GetAll()
|
||||
if allMeta == nil {
|
||||
// Return empty object instead of null
|
||||
json.NewEncoder(w).Encode(make(map[string]*config.DockerMetadata))
|
||||
|
|
@ -57,7 +70,8 @@ func (h *DockerMetadataHandler) HandleGetMetadata(w http.ResponseWriter, r *http
|
|||
|
||||
if resourceID != "" {
|
||||
// Get specific Docker resource metadata
|
||||
meta := h.store.Get(resourceID)
|
||||
store := h.getStore(r.Context())
|
||||
meta := store.Get(resourceID)
|
||||
if meta == nil {
|
||||
// Return empty metadata instead of 404
|
||||
json.NewEncoder(w).Encode(&config.DockerMetadata{ID: resourceID})
|
||||
|
|
@ -120,7 +134,8 @@ func (h *DockerMetadataHandler) HandleUpdateMetadata(w http.ResponseWriter, r *h
|
|||
}
|
||||
}
|
||||
|
||||
if err := h.store.Set(resourceID, &meta); err != nil {
|
||||
store := h.getStore(r.Context())
|
||||
if err := store.Set(resourceID, &meta); err != nil {
|
||||
log.Error().Err(err).Str("resourceID", resourceID).Msg("Failed to save Docker metadata")
|
||||
// Provide more specific error message
|
||||
errMsg := "Failed to save metadata"
|
||||
|
|
@ -152,7 +167,8 @@ func (h *DockerMetadataHandler) HandleDeleteMetadata(w http.ResponseWriter, r *h
|
|||
return
|
||||
}
|
||||
|
||||
if err := h.store.Delete(resourceID); err != nil {
|
||||
store := h.getStore(r.Context())
|
||||
if err := store.Delete(resourceID); err != nil {
|
||||
log.Error().Err(err).Str("resourceID", resourceID).Msg("Failed to delete Docker metadata")
|
||||
http.Error(w, "Failed to delete metadata", http.StatusInternalServerError)
|
||||
return
|
||||
|
|
@ -176,7 +192,8 @@ func (h *DockerMetadataHandler) HandleGetHostMetadata(w http.ResponseWriter, r *
|
|||
if path == "/api/docker/hosts/metadata" || path == "/api/docker/hosts/metadata/" {
|
||||
// Get all host metadata
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
allMeta := h.store.GetAllHostMetadata()
|
||||
store := h.getStore(r.Context())
|
||||
allMeta := store.GetAllHostMetadata()
|
||||
if allMeta == nil {
|
||||
// Return empty object instead of null
|
||||
json.NewEncoder(w).Encode(make(map[string]*config.DockerHostMetadata))
|
||||
|
|
@ -193,7 +210,8 @@ func (h *DockerMetadataHandler) HandleGetHostMetadata(w http.ResponseWriter, r *
|
|||
|
||||
if hostID != "" {
|
||||
// Get specific Docker host metadata
|
||||
meta := h.store.GetHostMetadata(hostID)
|
||||
store := h.getStore(r.Context())
|
||||
meta := store.GetHostMetadata(hostID)
|
||||
if meta == nil {
|
||||
// Return empty metadata instead of 404
|
||||
json.NewEncoder(w).Encode(&config.DockerHostMetadata{})
|
||||
|
|
@ -257,7 +275,8 @@ func (h *DockerMetadataHandler) HandleUpdateHostMetadata(w http.ResponseWriter,
|
|||
}
|
||||
|
||||
// Get existing metadata to merge with new data
|
||||
existing := h.store.GetHostMetadata(hostID)
|
||||
store := h.getStore(r.Context())
|
||||
existing := store.GetHostMetadata(hostID)
|
||||
if existing != nil {
|
||||
// Merge: only update fields that are provided
|
||||
if meta.CustomDisplayName != "" || existing.CustomDisplayName != "" {
|
||||
|
|
@ -271,7 +290,7 @@ func (h *DockerMetadataHandler) HandleUpdateHostMetadata(w http.ResponseWriter,
|
|||
}
|
||||
}
|
||||
|
||||
if err := h.store.SetHostMetadata(hostID, &meta); err != nil {
|
||||
if err := store.SetHostMetadata(hostID, &meta); err != nil {
|
||||
log.Error().Err(err).Str("hostID", hostID).Msg("Failed to save Docker host metadata")
|
||||
// Provide more specific error message
|
||||
errMsg := "Failed to save metadata"
|
||||
|
|
@ -300,10 +319,9 @@ func (h *DockerMetadataHandler) HandleDeleteHostMetadata(w http.ResponseWriter,
|
|||
hostID := strings.TrimPrefix(r.URL.Path, "/api/docker/hosts/metadata/")
|
||||
if hostID == "" || hostID == "metadata" {
|
||||
http.Error(w, "Host ID required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.store.SetHostMetadata(hostID, nil); err != nil {
|
||||
store := h.getStore(r.Context())
|
||||
if err := store.SetHostMetadata(hostID, nil); err != nil {
|
||||
log.Error().Err(err).Str("hostID", hostID).Msg("Failed to delete Docker host metadata")
|
||||
http.Error(w, "Failed to delete metadata", http.StatusInternalServerError)
|
||||
return
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
|
@ -12,24 +13,42 @@ import (
|
|||
|
||||
// GuestMetadataHandler handles guest metadata operations
|
||||
type GuestMetadataHandler struct {
|
||||
store *config.GuestMetadataStore
|
||||
mtPersistence *config.MultiTenantPersistence
|
||||
}
|
||||
|
||||
// NewGuestMetadataHandler creates a new guest metadata handler
|
||||
func NewGuestMetadataHandler(dataPath string) *GuestMetadataHandler {
|
||||
func NewGuestMetadataHandler(mtPersistence *config.MultiTenantPersistence) *GuestMetadataHandler {
|
||||
return &GuestMetadataHandler{
|
||||
store: config.NewGuestMetadataStore(dataPath, nil),
|
||||
mtPersistence: mtPersistence,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *GuestMetadataHandler) getStore(ctx context.Context) *config.GuestMetadataStore {
|
||||
// Default to "default" org if none specified (though middleware should always set it)
|
||||
orgID := "default"
|
||||
if ctx != nil {
|
||||
if id := GetOrgID(ctx); id != "" {
|
||||
orgID = id
|
||||
}
|
||||
}
|
||||
p, _ := h.mtPersistence.GetPersistence(orgID)
|
||||
return p.GetGuestMetadataStore()
|
||||
}
|
||||
|
||||
// Reload reloads the guest metadata from disk
|
||||
func (h *GuestMetadataHandler) Reload() error {
|
||||
return h.store.Load()
|
||||
// For multi-tenant, we might need to reload all loaded stores?
|
||||
// Or we just rely on lazy loading.
|
||||
// Since stores are cached in ConfigPersistence, we currently don't have an easy way to iterate all.
|
||||
// But stores load on init. Reload() method on store might be needed if modified on disk externally.
|
||||
// For now, this is a no-op or TODO for multi-tenant deep reload.
|
||||
// Actually, we can get "default" store and reload it for legacy compat.
|
||||
return h.getStore(context.Background()).Load()
|
||||
}
|
||||
|
||||
// Store returns the underlying metadata store
|
||||
// Store returns the underlying metadata store for the default tenant (Legacy support)
|
||||
func (h *GuestMetadataHandler) Store() *config.GuestMetadataStore {
|
||||
return h.store
|
||||
return h.getStore(context.Background())
|
||||
}
|
||||
|
||||
// HandleGetMetadata retrieves metadata for a specific guest or all guests
|
||||
|
|
@ -45,7 +64,8 @@ func (h *GuestMetadataHandler) HandleGetMetadata(w http.ResponseWriter, r *http.
|
|||
if path == "/api/guests/metadata" || path == "/api/guests/metadata/" {
|
||||
// Get all metadata
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
allMeta := h.store.GetAll()
|
||||
store := h.getStore(r.Context())
|
||||
allMeta := store.GetAll()
|
||||
if allMeta == nil {
|
||||
// Return empty object instead of null
|
||||
json.NewEncoder(w).Encode(make(map[string]*config.GuestMetadata))
|
||||
|
|
@ -62,7 +82,8 @@ func (h *GuestMetadataHandler) HandleGetMetadata(w http.ResponseWriter, r *http.
|
|||
|
||||
if guestID != "" {
|
||||
// Get specific guest metadata
|
||||
meta := h.store.Get(guestID)
|
||||
store := h.getStore(r.Context())
|
||||
meta := store.Get(guestID)
|
||||
if meta == nil {
|
||||
// Return empty metadata instead of 404
|
||||
json.NewEncoder(w).Encode(&config.GuestMetadata{ID: guestID})
|
||||
|
|
@ -125,7 +146,8 @@ func (h *GuestMetadataHandler) HandleUpdateMetadata(w http.ResponseWriter, r *ht
|
|||
}
|
||||
}
|
||||
|
||||
if err := h.store.Set(guestID, &meta); err != nil {
|
||||
store := h.getStore(r.Context())
|
||||
if err := store.Set(guestID, &meta); err != nil {
|
||||
log.Error().Err(err).Str("guestID", guestID).Msg("Failed to save guest metadata")
|
||||
// Provide more specific error message
|
||||
errMsg := "Failed to save metadata"
|
||||
|
|
@ -157,7 +179,8 @@ func (h *GuestMetadataHandler) HandleDeleteMetadata(w http.ResponseWriter, r *ht
|
|||
return
|
||||
}
|
||||
|
||||
if err := h.store.Delete(guestID); err != nil {
|
||||
store := h.getStore(r.Context())
|
||||
if err := store.Delete(guestID); err != nil {
|
||||
log.Error().Err(err).Str("guestID", guestID).Msg("Failed to delete guest metadata")
|
||||
http.Error(w, "Failed to delete metadata", http.StatusInternalServerError)
|
||||
return
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
|
@ -30,18 +31,46 @@ var configSigningState struct {
|
|||
|
||||
// HostAgentHandlers manages ingest from the pulse-host-agent.
|
||||
type HostAgentHandlers struct {
|
||||
monitor *monitoring.Monitor
|
||||
wsHub *websocket.Hub
|
||||
mtMonitor *monitoring.MultiTenantMonitor
|
||||
legacyMonitor *monitoring.Monitor
|
||||
wsHub *websocket.Hub
|
||||
}
|
||||
|
||||
// NewHostAgentHandlers constructs a new handler set for host agents.
|
||||
func NewHostAgentHandlers(m *monitoring.Monitor, hub *websocket.Hub) *HostAgentHandlers {
|
||||
return &HostAgentHandlers{monitor: m, wsHub: hub}
|
||||
func NewHostAgentHandlers(mtm *monitoring.MultiTenantMonitor, m *monitoring.Monitor, hub *websocket.Hub) *HostAgentHandlers {
|
||||
// If mtm is provided, try to populate legacyMonitor from "default" org if not provided
|
||||
if m == nil && mtm != nil {
|
||||
if mon, err := mtm.GetMonitor("default"); err == nil {
|
||||
m = mon
|
||||
}
|
||||
}
|
||||
return &HostAgentHandlers{mtMonitor: mtm, legacyMonitor: m, wsHub: hub}
|
||||
}
|
||||
|
||||
// SetMonitor updates the monitor reference for host agent handlers.
|
||||
func (h *HostAgentHandlers) SetMonitor(m *monitoring.Monitor) {
|
||||
h.monitor = m
|
||||
h.legacyMonitor = m
|
||||
}
|
||||
|
||||
// SetMultiTenantMonitor updates the multi-tenant monitor reference
|
||||
func (h *HostAgentHandlers) SetMultiTenantMonitor(mtm *monitoring.MultiTenantMonitor) {
|
||||
h.mtMonitor = mtm
|
||||
if mtm != nil {
|
||||
if m, err := mtm.GetMonitor("default"); err == nil {
|
||||
h.legacyMonitor = m
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getMonitor helper
|
||||
func (h *HostAgentHandlers) getMonitor(ctx context.Context) *monitoring.Monitor {
|
||||
orgID := GetOrgID(ctx)
|
||||
if h.mtMonitor != nil {
|
||||
if m, err := h.mtMonitor.GetMonitor(orgID); err == nil && m != nil {
|
||||
return m
|
||||
}
|
||||
}
|
||||
return h.legacyMonitor
|
||||
}
|
||||
|
||||
// HandleReport ingests host agent reports.
|
||||
|
|
@ -67,7 +96,7 @@ func (h *HostAgentHandlers) HandleReport(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
tokenRecord := getAPITokenRecordFromRequest(r)
|
||||
|
||||
host, err := h.monitor.ApplyHostReport(report, tokenRecord)
|
||||
host, err := h.getMonitor(r.Context()).ApplyHostReport(report, tokenRecord)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, http.StatusBadRequest, "invalid_report", err.Error(), nil)
|
||||
return
|
||||
|
|
@ -79,10 +108,10 @@ func (h *HostAgentHandlers) HandleReport(w http.ResponseWriter, r *http.Request)
|
|||
Str("platform", host.Platform).
|
||||
Msg("Host agent report processed")
|
||||
|
||||
go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend())
|
||||
go h.wsHub.BroadcastState(h.getMonitor(r.Context()).GetState().ToFrontend())
|
||||
|
||||
// Include any server-side config overrides in the response
|
||||
serverConfig := h.monitor.GetHostAgentConfig(host.ID)
|
||||
serverConfig := h.getMonitor(r.Context()).GetHostAgentConfig(host.ID)
|
||||
|
||||
resp := map[string]any{
|
||||
"success": true,
|
||||
|
|
@ -120,7 +149,7 @@ func (h *HostAgentHandlers) HandleLookup(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
|
||||
state := h.monitor.GetState()
|
||||
state := h.getMonitor(r.Context()).GetState()
|
||||
|
||||
var (
|
||||
host models.Host
|
||||
|
|
@ -218,13 +247,13 @@ func (h *HostAgentHandlers) HandleDeleteHost(w http.ResponseWriter, r *http.Requ
|
|||
}
|
||||
|
||||
// Remove the host from state
|
||||
host, err := h.monitor.RemoveHostAgent(hostID)
|
||||
host, err := h.getMonitor(r.Context()).RemoveHostAgent(hostID)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, http.StatusNotFound, "host_not_found", err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend())
|
||||
go h.wsHub.BroadcastState(h.getMonitor(r.Context()).GetState().ToFrontend())
|
||||
|
||||
if err := utils.WriteJSONResponse(w, map[string]any{
|
||||
"success": true,
|
||||
|
|
@ -268,8 +297,8 @@ func (h *HostAgentHandlers) canReadConfig(record *config.APITokenRecord) bool {
|
|||
record.HasScope(config.ScopeSettingsWrite)
|
||||
}
|
||||
|
||||
func (h *HostAgentHandlers) resolveConfigHost(hostID string, record *config.APITokenRecord) (models.Host, bool) {
|
||||
state := h.monitor.GetState()
|
||||
func (h *HostAgentHandlers) resolveConfigHost(ctx context.Context, hostID string, record *config.APITokenRecord) (models.Host, bool) {
|
||||
state := h.getMonitor(ctx).GetState()
|
||||
|
||||
if record == nil || record.HasScope(config.ScopeHostManage) || record.HasScope(config.ScopeSettingsWrite) {
|
||||
for _, candidate := range state.Hosts {
|
||||
|
|
@ -363,7 +392,7 @@ func (h *HostAgentHandlers) handleGetConfig(w http.ResponseWriter, r *http.Reque
|
|||
return
|
||||
}
|
||||
|
||||
host, ok := h.resolveConfigHost(hostID, record)
|
||||
host, ok := h.resolveConfigHost(r.Context(), hostID, record)
|
||||
if !ok {
|
||||
writeErrorResponse(w, http.StatusNotFound, "host_not_found", "Host has not registered with Pulse yet", nil)
|
||||
LogAuditEvent("host_agent_config_fetch", auth.GetUser(r.Context()), GetClientIP(r), r.URL.Path, false,
|
||||
|
|
@ -373,7 +402,7 @@ func (h *HostAgentHandlers) handleGetConfig(w http.ResponseWriter, r *http.Reque
|
|||
|
||||
hostID = host.ID
|
||||
|
||||
config := h.monitor.GetHostAgentConfig(hostID)
|
||||
config := h.getMonitor(r.Context()).GetHostAgentConfig(hostID)
|
||||
signedConfig, err := h.signHostConfig(hostID, config)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("Failed to sign host config payload")
|
||||
|
|
@ -417,7 +446,7 @@ func (h *HostAgentHandlers) ensureHostTokenMatch(w http.ResponseWriter, r *http.
|
|||
return true
|
||||
}
|
||||
|
||||
state := h.monitor.GetState()
|
||||
state := h.getMonitor(r.Context()).GetState()
|
||||
for _, host := range state.Hosts {
|
||||
if host.ID != hostID {
|
||||
continue
|
||||
|
|
@ -446,12 +475,12 @@ func (h *HostAgentHandlers) handlePatchConfig(w http.ResponseWriter, r *http.Req
|
|||
return
|
||||
}
|
||||
|
||||
if err := h.monitor.UpdateHostAgentConfig(hostID, req.CommandsEnabled); err != nil {
|
||||
if err := h.getMonitor(r.Context()).UpdateHostAgentConfig(hostID, req.CommandsEnabled); err != nil {
|
||||
writeErrorResponse(w, http.StatusInternalServerError, "update_failed", err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend())
|
||||
go h.wsHub.BroadcastState(h.getMonitor(r.Context()).GetState().ToFrontend())
|
||||
|
||||
log.Info().
|
||||
Str("hostId", hostID).
|
||||
|
|
@ -499,13 +528,13 @@ func (h *HostAgentHandlers) HandleUninstall(w http.ResponseWriter, r *http.Reque
|
|||
log.Info().Str("hostId", hostID).Msg("Received unregistration request from agent uninstaller")
|
||||
|
||||
// Remove the host from state
|
||||
_, err := h.monitor.RemoveHostAgent(hostID)
|
||||
_, err := h.getMonitor(r.Context()).RemoveHostAgent(hostID)
|
||||
if err != nil {
|
||||
// If host not found, we still return success because the goal is reached
|
||||
log.Warn().Err(err).Str("hostId", hostID).Msg("Host not found during unregistration request")
|
||||
}
|
||||
|
||||
go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend())
|
||||
go h.wsHub.BroadcastState(h.getMonitor(r.Context()).GetState().ToFrontend())
|
||||
|
||||
if err := utils.WriteJSONResponse(w, map[string]any{
|
||||
"success": true,
|
||||
|
|
@ -548,12 +577,12 @@ func (h *HostAgentHandlers) HandleLink(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if err := h.monitor.LinkHostAgent(hostID, nodeID); err != nil {
|
||||
if err := h.getMonitor(r.Context()).LinkHostAgent(hostID, nodeID); err != nil {
|
||||
writeErrorResponse(w, http.StatusBadRequest, "link_failed", err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend())
|
||||
go h.wsHub.BroadcastState(h.getMonitor(r.Context()).GetState().ToFrontend())
|
||||
|
||||
if err := utils.WriteJSONResponse(w, map[string]any{
|
||||
"success": true,
|
||||
|
|
@ -590,12 +619,12 @@ func (h *HostAgentHandlers) HandleUnlink(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
|
||||
if err := h.monitor.UnlinkHostAgent(hostID); err != nil {
|
||||
if err := h.getMonitor(r.Context()).UnlinkHostAgent(hostID); err != nil {
|
||||
writeErrorResponse(w, http.StatusNotFound, "unlink_failed", err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend())
|
||||
go h.wsHub.BroadcastState(h.getMonitor(r.Context()).GetState().ToFrontend())
|
||||
|
||||
if err := utils.WriteJSONResponse(w, map[string]any{
|
||||
"success": true,
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
|
@ -12,19 +13,30 @@ import (
|
|||
|
||||
// HostMetadataHandler handles host metadata operations
|
||||
type HostMetadataHandler struct {
|
||||
store *config.HostMetadataStore
|
||||
mtPersistence *config.MultiTenantPersistence
|
||||
}
|
||||
|
||||
// NewHostMetadataHandler creates a new host metadata handler
|
||||
func NewHostMetadataHandler(dataPath string) *HostMetadataHandler {
|
||||
func NewHostMetadataHandler(mtPersistence *config.MultiTenantPersistence) *HostMetadataHandler {
|
||||
return &HostMetadataHandler{
|
||||
store: config.NewHostMetadataStore(dataPath, nil),
|
||||
mtPersistence: mtPersistence,
|
||||
}
|
||||
}
|
||||
|
||||
// Store returns the underlying metadata store
|
||||
func (h *HostMetadataHandler) getStore(ctx context.Context) *config.HostMetadataStore {
|
||||
orgID := "default"
|
||||
if ctx != nil {
|
||||
if id := GetOrgID(ctx); id != "" {
|
||||
orgID = id
|
||||
}
|
||||
}
|
||||
p, _ := h.mtPersistence.GetPersistence(orgID)
|
||||
return p.GetHostMetadataStore()
|
||||
}
|
||||
|
||||
// Store returns the underlying metadata store for default tenant
|
||||
func (h *HostMetadataHandler) Store() *config.HostMetadataStore {
|
||||
return h.store
|
||||
return h.getStore(context.Background())
|
||||
}
|
||||
|
||||
// HandleGetMetadata retrieves metadata for a specific host or all hosts
|
||||
|
|
@ -40,7 +52,8 @@ func (h *HostMetadataHandler) HandleGetMetadata(w http.ResponseWriter, r *http.R
|
|||
if path == "/api/hosts/metadata" || path == "/api/hosts/metadata/" {
|
||||
// Get all metadata
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
allMeta := h.store.GetAll()
|
||||
store := h.getStore(r.Context())
|
||||
allMeta := store.GetAll()
|
||||
if allMeta == nil {
|
||||
// Return empty object instead of null
|
||||
json.NewEncoder(w).Encode(make(map[string]*config.HostMetadata))
|
||||
|
|
@ -57,7 +70,8 @@ func (h *HostMetadataHandler) HandleGetMetadata(w http.ResponseWriter, r *http.R
|
|||
|
||||
if hostID != "" {
|
||||
// Get specific host metadata
|
||||
meta := h.store.Get(hostID)
|
||||
store := h.getStore(r.Context())
|
||||
meta := store.Get(hostID)
|
||||
if meta == nil {
|
||||
// Return empty metadata instead of 404
|
||||
json.NewEncoder(w).Encode(&config.HostMetadata{ID: hostID})
|
||||
|
|
@ -119,8 +133,8 @@ func (h *HostMetadataHandler) HandleUpdateMetadata(w http.ResponseWriter, r *htt
|
|||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := h.store.Set(hostID, &meta); err != nil {
|
||||
store := h.getStore(r.Context())
|
||||
if err := store.Set(hostID, &meta); err != nil {
|
||||
log.Error().Err(err).Str("hostID", hostID).Msg("Failed to save host metadata")
|
||||
// Provide more specific error message
|
||||
errMsg := "Failed to save metadata"
|
||||
|
|
@ -151,8 +165,8 @@ func (h *HostMetadataHandler) HandleDeleteMetadata(w http.ResponseWriter, r *htt
|
|||
http.Error(w, "Host ID required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if err := h.store.Delete(hostID); err != nil {
|
||||
store := h.getStore(r.Context())
|
||||
if err := store.Delete(hostID); err != nil {
|
||||
log.Error().Err(err).Str("hostID", hostID).Msg("Failed to delete host metadata")
|
||||
http.Error(w, "Failed to delete metadata", http.StatusInternalServerError)
|
||||
return
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
|
@ -15,18 +16,46 @@ import (
|
|||
|
||||
// KubernetesAgentHandlers manages ingest from the Kubernetes agent.
|
||||
type KubernetesAgentHandlers struct {
|
||||
monitor *monitoring.Monitor
|
||||
wsHub *websocket.Hub
|
||||
mtMonitor *monitoring.MultiTenantMonitor
|
||||
legacyMonitor *monitoring.Monitor
|
||||
wsHub *websocket.Hub
|
||||
}
|
||||
|
||||
// NewKubernetesAgentHandlers constructs a new Kubernetes agent handler group.
|
||||
func NewKubernetesAgentHandlers(m *monitoring.Monitor, hub *websocket.Hub) *KubernetesAgentHandlers {
|
||||
return &KubernetesAgentHandlers{monitor: m, wsHub: hub}
|
||||
func NewKubernetesAgentHandlers(mtm *monitoring.MultiTenantMonitor, m *monitoring.Monitor, hub *websocket.Hub) *KubernetesAgentHandlers {
|
||||
// If mtm is provided, try to populate legacyMonitor from "default" org if not provided
|
||||
if m == nil && mtm != nil {
|
||||
if mon, err := mtm.GetMonitor("default"); err == nil {
|
||||
m = mon
|
||||
}
|
||||
}
|
||||
return &KubernetesAgentHandlers{mtMonitor: mtm, legacyMonitor: m, wsHub: hub}
|
||||
}
|
||||
|
||||
// SetMonitor updates the monitor reference for kubernetes agent handlers.
|
||||
func (h *KubernetesAgentHandlers) SetMonitor(m *monitoring.Monitor) {
|
||||
h.monitor = m
|
||||
h.legacyMonitor = m
|
||||
}
|
||||
|
||||
// SetMultiTenantMonitor updates the multi-tenant monitor reference
|
||||
func (h *KubernetesAgentHandlers) SetMultiTenantMonitor(mtm *monitoring.MultiTenantMonitor) {
|
||||
h.mtMonitor = mtm
|
||||
if mtm != nil {
|
||||
if m, err := mtm.GetMonitor("default"); err == nil {
|
||||
h.legacyMonitor = m
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getMonitor helper
|
||||
func (h *KubernetesAgentHandlers) getMonitor(ctx context.Context) *monitoring.Monitor {
|
||||
orgID := GetOrgID(ctx)
|
||||
if h.mtMonitor != nil {
|
||||
if m, err := h.mtMonitor.GetMonitor(orgID); err == nil && m != nil {
|
||||
return m
|
||||
}
|
||||
}
|
||||
return h.legacyMonitor
|
||||
}
|
||||
|
||||
// HandleReport accepts heartbeat payloads from the Kubernetes agent.
|
||||
|
|
@ -52,7 +81,7 @@ func (h *KubernetesAgentHandlers) HandleReport(w http.ResponseWriter, r *http.Re
|
|||
|
||||
tokenRecord := getAPITokenRecordFromRequest(r)
|
||||
|
||||
cluster, err := h.monitor.ApplyKubernetesReport(report, tokenRecord)
|
||||
cluster, err := h.getMonitor(r.Context()).ApplyKubernetesReport(report, tokenRecord)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, http.StatusBadRequest, "invalid_report", err.Error(), nil)
|
||||
return
|
||||
|
|
@ -66,7 +95,7 @@ func (h *KubernetesAgentHandlers) HandleReport(w http.ResponseWriter, r *http.Re
|
|||
Int("deployments", len(cluster.Deployments)).
|
||||
Msg("Kubernetes agent report processed")
|
||||
|
||||
go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend())
|
||||
go h.wsHub.BroadcastState(h.getMonitor(r.Context()).GetState().ToFrontend())
|
||||
|
||||
if err := utils.WriteJSONResponse(w, map[string]any{
|
||||
"success": true,
|
||||
|
|
@ -130,13 +159,13 @@ func (h *KubernetesAgentHandlers) HandleDeleteCluster(w http.ResponseWriter, r *
|
|||
return
|
||||
}
|
||||
|
||||
cluster, err := h.monitor.RemoveKubernetesCluster(clusterID)
|
||||
cluster, err := h.getMonitor(r.Context()).RemoveKubernetesCluster(clusterID)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, http.StatusNotFound, "kubernetes_cluster_not_found", err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend())
|
||||
go h.wsHub.BroadcastState(h.getMonitor(r.Context()).GetState().ToFrontend())
|
||||
|
||||
if err := utils.WriteJSONResponse(w, map[string]any{
|
||||
"success": true,
|
||||
|
|
@ -162,7 +191,7 @@ func (h *KubernetesAgentHandlers) HandleAllowReenroll(w http.ResponseWriter, r *
|
|||
return
|
||||
}
|
||||
|
||||
if err := h.monitor.AllowKubernetesClusterReenroll(clusterID); err != nil {
|
||||
if err := h.getMonitor(r.Context()).AllowKubernetesClusterReenroll(clusterID); err != nil {
|
||||
writeErrorResponse(w, http.StatusBadRequest, "kubernetes_cluster_reenroll_failed", err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
|
@ -190,13 +219,13 @@ func (h *KubernetesAgentHandlers) HandleUnhideCluster(w http.ResponseWriter, r *
|
|||
return
|
||||
}
|
||||
|
||||
cluster, err := h.monitor.UnhideKubernetesCluster(clusterID)
|
||||
cluster, err := h.getMonitor(r.Context()).UnhideKubernetesCluster(clusterID)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, http.StatusNotFound, "kubernetes_cluster_not_found", err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend())
|
||||
go h.wsHub.BroadcastState(h.getMonitor(r.Context()).GetState().ToFrontend())
|
||||
|
||||
if err := utils.WriteJSONResponse(w, map[string]any{
|
||||
"success": true,
|
||||
|
|
@ -222,13 +251,13 @@ func (h *KubernetesAgentHandlers) HandleMarkPendingUninstall(w http.ResponseWrit
|
|||
return
|
||||
}
|
||||
|
||||
cluster, err := h.monitor.MarkKubernetesClusterPendingUninstall(clusterID)
|
||||
cluster, err := h.getMonitor(r.Context()).MarkKubernetesClusterPendingUninstall(clusterID)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, http.StatusNotFound, "kubernetes_cluster_not_found", err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend())
|
||||
go h.wsHub.BroadcastState(h.getMonitor(r.Context()).GetState().ToFrontend())
|
||||
|
||||
if err := utils.WriteJSONResponse(w, map[string]any{
|
||||
"success": true,
|
||||
|
|
@ -268,13 +297,13 @@ func (h *KubernetesAgentHandlers) HandleSetCustomDisplayName(w http.ResponseWrit
|
|||
|
||||
customName := strings.TrimSpace(req.DisplayName)
|
||||
|
||||
cluster, err := h.monitor.SetKubernetesClusterCustomDisplayName(clusterID, customName)
|
||||
cluster, err := h.getMonitor(r.Context()).SetKubernetesClusterCustomDisplayName(clusterID, customName)
|
||||
if err != nil {
|
||||
writeErrorResponse(w, http.StatusNotFound, "kubernetes_cluster_not_found", err.Error(), nil)
|
||||
return
|
||||
}
|
||||
|
||||
go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend())
|
||||
go h.wsHub.BroadcastState(h.getMonitor(r.Context()).GetState().ToFrontend())
|
||||
|
||||
if err := utils.WriteJSONResponse(w, map[string]any{
|
||||
"success": true,
|
||||
|
|
|
|||
86
internal/api/middleware_tenant.go
Normal file
86
internal/api/middleware_tenant.go
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/config"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/models"
|
||||
)
|
||||
|
||||
type OrganizationContextKey string
|
||||
|
||||
const (
|
||||
OrgIDContextKey OrganizationContextKey = "org_id"
|
||||
OrgContextKey OrganizationContextKey = "org_object"
|
||||
)
|
||||
|
||||
// TenantMiddleware extracts the organization ID from the request and
|
||||
// sets up the context for multi-tenant isolation.
|
||||
type TenantMiddleware struct {
|
||||
persistence *config.MultiTenantPersistence
|
||||
}
|
||||
|
||||
func NewTenantMiddleware(p *config.MultiTenantPersistence) *TenantMiddleware {
|
||||
return &TenantMiddleware{persistence: p}
|
||||
}
|
||||
|
||||
func (m *TenantMiddleware) Middleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// 1. Extract Org ID
|
||||
// Priority:
|
||||
// 1. Header: X-Pulse-Org-ID (for API clients/agents)
|
||||
// 2. Cookie: pulse_org_id (for browser session)
|
||||
// 3. Fallback: "default" (for backward compatibility)
|
||||
|
||||
orgID := r.Header.Get("X-Pulse-Org-ID")
|
||||
if orgID == "" {
|
||||
// Check cookie
|
||||
if cookie, err := r.Cookie("pulse_org_id"); err == nil {
|
||||
orgID = cookie.Value
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to default
|
||||
if orgID == "" {
|
||||
orgID = "default"
|
||||
}
|
||||
|
||||
// 2. Validate/Load Organization
|
||||
// In a real implementation, we would check if the user has access to this org.
|
||||
// For Phase 1 (Persistence), we just ensure the org is valid in the persistence layer.
|
||||
|
||||
// Ensure the organization persistence is initialized
|
||||
// This creates the directory if it doesn't exist for valid IDs
|
||||
_, err := m.persistence.GetPersistence(orgID)
|
||||
if err != nil {
|
||||
http.Error(w, "Invalid Organization ID", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// 3. Inject into Context
|
||||
ctx := context.WithValue(r.Context(), OrgIDContextKey, orgID)
|
||||
|
||||
// Also store a mock organization object for now
|
||||
org := &models.Organization{ID: orgID, DisplayName: orgID}
|
||||
ctx = context.WithValue(ctx, OrgContextKey, org)
|
||||
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
})
|
||||
}
|
||||
|
||||
// Helper to get OrgID from context
|
||||
func GetOrgID(ctx context.Context) string {
|
||||
if id, ok := ctx.Value(OrgIDContextKey).(string); ok {
|
||||
return id
|
||||
}
|
||||
return "default"
|
||||
}
|
||||
|
||||
// Helper to get Organization from context
|
||||
func GetOrganization(ctx context.Context) *models.Organization {
|
||||
if org, ok := ctx.Value(OrgContextKey).(*models.Organization); ok {
|
||||
return org
|
||||
}
|
||||
return &models.Organization{ID: "default", DisplayName: "Default Organization"}
|
||||
}
|
||||
80
internal/api/middleware_tenant_test.go
Normal file
80
internal/api/middleware_tenant_test.go
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/config"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestTenantMiddleware(t *testing.T) {
|
||||
// Setup temporary directory for testing
|
||||
tmpDir, err := os.MkdirTemp("", "pulse-tenant-test-*")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// Create MultiTenantPersistence
|
||||
mtp := config.NewMultiTenantPersistence(tmpDir)
|
||||
|
||||
// Create middleware
|
||||
middleware := NewTenantMiddleware(mtp)
|
||||
|
||||
// Test handler that checks the context
|
||||
testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
orgID := GetOrgID(r.Context())
|
||||
if orgID == "" {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
w.Write([]byte("OrgID missing"))
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte("OrgID: " + orgID))
|
||||
})
|
||||
|
||||
// Wrap handler
|
||||
handler := middleware.Middleware(testHandler)
|
||||
|
||||
t.Run("Default Org (No Header)", func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", "/", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
handler.ServeHTTP(rec, req)
|
||||
|
||||
assert.Equal(t, http.StatusOK, rec.Code)
|
||||
assert.Equal(t, "OrgID: default", rec.Body.String())
|
||||
|
||||
// Verify default directory was created
|
||||
_, err := os.Stat(filepath.Join(tmpDir, "orgs", "default"))
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Custom Org (Header)", func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", "/", nil)
|
||||
req.Header.Set("X-Pulse-Org-ID", "customer-a")
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
handler.ServeHTTP(rec, req)
|
||||
|
||||
assert.Equal(t, http.StatusOK, rec.Code)
|
||||
assert.Equal(t, "OrgID: customer-a", rec.Body.String())
|
||||
|
||||
// Verify custom directory was created
|
||||
_, err := os.Stat(filepath.Join(tmpDir, "orgs", "customer-a"))
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
|
||||
t.Run("Invalid Org ID (Directory Traversal Attempt)", func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", "/", nil)
|
||||
req.Header.Set("X-Pulse-Org-ID", "../../../etc/passwd")
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
handler.ServeHTTP(rec, req)
|
||||
|
||||
assert.Equal(t, http.StatusBadRequest, rec.Code)
|
||||
})
|
||||
}
|
||||
|
|
@ -1,6 +1,7 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
|
@ -9,6 +10,7 @@ import (
|
|||
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/config"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/models"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/monitoring"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/notifications"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
|
@ -51,24 +53,52 @@ type NotificationMonitor interface {
|
|||
|
||||
// NotificationHandlers handles notification-related HTTP endpoints
|
||||
type NotificationHandlers struct {
|
||||
monitor NotificationMonitor
|
||||
mtMonitor *monitoring.MultiTenantMonitor
|
||||
legacyMonitor NotificationMonitor
|
||||
}
|
||||
|
||||
// NewNotificationHandlers creates new notification handlers
|
||||
func NewNotificationHandlers(monitor NotificationMonitor) *NotificationHandlers {
|
||||
func NewNotificationHandlers(mtm *monitoring.MultiTenantMonitor, monitor NotificationMonitor) *NotificationHandlers {
|
||||
// If mtm is provided, try to populate legacyMonitor from "default" org if not provided
|
||||
if monitor == nil && mtm != nil {
|
||||
if m, err := mtm.GetMonitor("default"); err == nil {
|
||||
monitor = NewNotificationMonitorWrapper(m)
|
||||
}
|
||||
}
|
||||
return &NotificationHandlers{
|
||||
monitor: monitor,
|
||||
mtMonitor: mtm,
|
||||
legacyMonitor: monitor,
|
||||
}
|
||||
}
|
||||
|
||||
// SetMonitor updates the monitor reference for notification handlers.
|
||||
func (h *NotificationHandlers) SetMonitor(m NotificationMonitor) {
|
||||
h.monitor = m
|
||||
h.legacyMonitor = m
|
||||
}
|
||||
|
||||
// SetMultiTenantMonitor updates the multi-tenant monitor reference
|
||||
func (h *NotificationHandlers) SetMultiTenantMonitor(mtm *monitoring.MultiTenantMonitor) {
|
||||
h.mtMonitor = mtm
|
||||
if mtm != nil {
|
||||
if m, err := mtm.GetMonitor("default"); err == nil {
|
||||
h.legacyMonitor = NewNotificationMonitorWrapper(m)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *NotificationHandlers) getMonitor(ctx context.Context) NotificationMonitor {
|
||||
orgID := GetOrgID(ctx)
|
||||
if h.mtMonitor != nil {
|
||||
if m, err := h.mtMonitor.GetMonitor(orgID); err == nil && m != nil {
|
||||
return NewNotificationMonitorWrapper(m)
|
||||
}
|
||||
}
|
||||
return h.legacyMonitor
|
||||
}
|
||||
|
||||
// GetEmailConfig returns the current email configuration
|
||||
func (h *NotificationHandlers) GetEmailConfig(w http.ResponseWriter, r *http.Request) {
|
||||
config := h.monitor.GetNotificationManager().GetEmailConfig()
|
||||
config := h.getMonitor(r.Context()).GetNotificationManager().GetEmailConfig()
|
||||
|
||||
// For security, don't return the password
|
||||
config.Password = ""
|
||||
|
|
@ -102,7 +132,7 @@ func (h *NotificationHandlers) UpdateEmailConfig(w http.ResponseWriter, r *http.
|
|||
|
||||
// If password is empty, preserve the existing password
|
||||
if config.Password == "" {
|
||||
existingConfig := h.monitor.GetNotificationManager().GetEmailConfig()
|
||||
existingConfig := h.getMonitor(r.Context()).GetNotificationManager().GetEmailConfig()
|
||||
config.Password = existingConfig.Password
|
||||
}
|
||||
|
||||
|
|
@ -114,10 +144,10 @@ func (h *NotificationHandlers) UpdateEmailConfig(w http.ResponseWriter, r *http.
|
|||
Bool("hasPassword", config.Password != "").
|
||||
Msg("Parsed email config")
|
||||
|
||||
h.monitor.GetNotificationManager().SetEmailConfig(config)
|
||||
h.getMonitor(r.Context()).GetNotificationManager().SetEmailConfig(config)
|
||||
|
||||
// Save to persistent storage
|
||||
if err := h.monitor.GetConfigPersistence().SaveEmailConfig(config); err != nil {
|
||||
if err := h.getMonitor(r.Context()).GetConfigPersistence().SaveEmailConfig(config); err != nil {
|
||||
// Log error but don't fail the request
|
||||
log.Error().Err(err).Msg("Failed to save email configuration")
|
||||
}
|
||||
|
|
@ -128,7 +158,7 @@ func (h *NotificationHandlers) UpdateEmailConfig(w http.ResponseWriter, r *http.
|
|||
|
||||
// GetAppriseConfig returns the current Apprise configuration.
|
||||
func (h *NotificationHandlers) GetAppriseConfig(w http.ResponseWriter, r *http.Request) {
|
||||
config := h.monitor.GetNotificationManager().GetAppriseConfig()
|
||||
config := h.getMonitor(r.Context()).GetNotificationManager().GetAppriseConfig()
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if err := json.NewEncoder(w).Encode(config); err != nil {
|
||||
|
|
@ -166,13 +196,13 @@ func (h *NotificationHandlers) UpdateAppriseConfig(w http.ResponseWriter, r *htt
|
|||
Int("timeoutSeconds", config.TimeoutSeconds).
|
||||
Msg("Parsed Apprise configuration update")
|
||||
|
||||
h.monitor.GetNotificationManager().SetAppriseConfig(config)
|
||||
h.getMonitor(r.Context()).GetNotificationManager().SetAppriseConfig(config)
|
||||
|
||||
if err := h.monitor.GetConfigPersistence().SaveAppriseConfig(config); err != nil {
|
||||
if err := h.getMonitor(r.Context()).GetConfigPersistence().SaveAppriseConfig(config); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to save Apprise configuration")
|
||||
}
|
||||
|
||||
normalized := h.monitor.GetNotificationManager().GetAppriseConfig()
|
||||
normalized := h.getMonitor(r.Context()).GetNotificationManager().GetAppriseConfig()
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
if err := json.NewEncoder(w).Encode(normalized); err != nil {
|
||||
|
|
@ -182,7 +212,7 @@ func (h *NotificationHandlers) UpdateAppriseConfig(w http.ResponseWriter, r *htt
|
|||
|
||||
// GetWebhooks returns all webhook configurations with secrets masked
|
||||
func (h *NotificationHandlers) GetWebhooks(w http.ResponseWriter, r *http.Request) {
|
||||
webhooks := h.monitor.GetNotificationManager().GetWebhooks()
|
||||
webhooks := h.getMonitor(r.Context()).GetNotificationManager().GetWebhooks()
|
||||
|
||||
// Mask sensitive fields in headers and customFields
|
||||
maskedWebhooks := make([]map[string]interface{}, len(webhooks))
|
||||
|
|
@ -245,7 +275,7 @@ func (h *NotificationHandlers) CreateWebhook(w http.ResponseWriter, r *http.Requ
|
|||
}
|
||||
|
||||
// Validate webhook URL
|
||||
if err := h.monitor.GetNotificationManager().ValidateWebhookURL(webhook.URL); err != nil {
|
||||
if err := h.getMonitor(r.Context()).GetNotificationManager().ValidateWebhookURL(webhook.URL); err != nil {
|
||||
http.Error(w, fmt.Sprintf("Invalid webhook URL: %v", err), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
|
@ -255,11 +285,11 @@ func (h *NotificationHandlers) CreateWebhook(w http.ResponseWriter, r *http.Requ
|
|||
webhook.ID = utils.GenerateID("webhook")
|
||||
}
|
||||
|
||||
h.monitor.GetNotificationManager().AddWebhook(webhook)
|
||||
h.getMonitor(r.Context()).GetNotificationManager().AddWebhook(webhook)
|
||||
|
||||
// Save webhooks to persistent storage with all fields
|
||||
webhooks := h.monitor.GetNotificationManager().GetWebhooks()
|
||||
if err := h.monitor.GetConfigPersistence().SaveWebhooks(webhooks); err != nil {
|
||||
webhooks := h.getMonitor(r.Context()).GetNotificationManager().GetWebhooks()
|
||||
if err := h.getMonitor(r.Context()).GetConfigPersistence().SaveWebhooks(webhooks); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to save webhooks")
|
||||
}
|
||||
|
||||
|
|
@ -306,7 +336,7 @@ func (h *NotificationHandlers) UpdateWebhook(w http.ResponseWriter, r *http.Requ
|
|||
|
||||
// Preserve original headers/customFields if the incoming values are redacted
|
||||
// This happens when the frontend sends back masked values from GetWebhooks
|
||||
existingWebhooks := h.monitor.GetNotificationManager().GetWebhooks()
|
||||
existingWebhooks := h.getMonitor(r.Context()).GetNotificationManager().GetWebhooks()
|
||||
for _, existing := range existingWebhooks {
|
||||
if existing.ID == webhookID {
|
||||
// Preserve headers if incoming contains redacted values
|
||||
|
|
@ -340,20 +370,20 @@ func (h *NotificationHandlers) UpdateWebhook(w http.ResponseWriter, r *http.Requ
|
|||
}
|
||||
|
||||
// Validate webhook URL
|
||||
if err := h.monitor.GetNotificationManager().ValidateWebhookURL(webhook.URL); err != nil {
|
||||
if err := h.getMonitor(r.Context()).GetNotificationManager().ValidateWebhookURL(webhook.URL); err != nil {
|
||||
http.Error(w, fmt.Sprintf("Invalid webhook URL: %v", err), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
webhook.ID = webhookID
|
||||
if err := h.monitor.GetNotificationManager().UpdateWebhook(webhookID, webhook); err != nil {
|
||||
if err := h.getMonitor(r.Context()).GetNotificationManager().UpdateWebhook(webhookID, webhook); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
// Save webhooks to persistent storage
|
||||
webhooks := h.monitor.GetNotificationManager().GetWebhooks()
|
||||
if err := h.monitor.GetConfigPersistence().SaveWebhooks(webhooks); err != nil {
|
||||
webhooks := h.getMonitor(r.Context()).GetNotificationManager().GetWebhooks()
|
||||
if err := h.getMonitor(r.Context()).GetConfigPersistence().SaveWebhooks(webhooks); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to save webhooks")
|
||||
}
|
||||
|
||||
|
|
@ -382,14 +412,14 @@ func (h *NotificationHandlers) DeleteWebhook(w http.ResponseWriter, r *http.Requ
|
|||
return
|
||||
}
|
||||
|
||||
if err := h.monitor.GetNotificationManager().DeleteWebhook(webhookID); err != nil {
|
||||
if err := h.getMonitor(r.Context()).GetNotificationManager().DeleteWebhook(webhookID); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
// Save webhooks to persistent storage
|
||||
webhooks := h.monitor.GetNotificationManager().GetWebhooks()
|
||||
if err := h.monitor.GetConfigPersistence().SaveWebhooks(webhooks); err != nil {
|
||||
webhooks := h.getMonitor(r.Context()).GetNotificationManager().GetWebhooks()
|
||||
if err := h.getMonitor(r.Context()).GetConfigPersistence().SaveWebhooks(webhooks); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to save webhooks")
|
||||
}
|
||||
|
||||
|
|
@ -429,7 +459,7 @@ func (h *NotificationHandlers) TestNotification(w http.ResponseWriter, r *http.R
|
|||
}
|
||||
|
||||
// Get actual node info from monitor state
|
||||
state := h.monitor.GetState()
|
||||
state := h.getMonitor(r.Context()).GetState()
|
||||
var nodeInfo *notifications.TestNodeInfo
|
||||
|
||||
// Use first available node and instance
|
||||
|
|
@ -450,7 +480,7 @@ func (h *NotificationHandlers) TestNotification(w http.ResponseWriter, r *http.R
|
|||
Msg("Testing specific webhook")
|
||||
|
||||
// Get the webhook by ID and test it
|
||||
webhooks := h.monitor.GetNotificationManager().GetWebhooks()
|
||||
webhooks := h.getMonitor(r.Context()).GetNotificationManager().GetWebhooks()
|
||||
var foundWebhook *notifications.WebhookConfig
|
||||
for _, wh := range webhooks {
|
||||
if wh.ID == req.WebhookID {
|
||||
|
|
@ -465,7 +495,7 @@ func (h *NotificationHandlers) TestNotification(w http.ResponseWriter, r *http.R
|
|||
}
|
||||
|
||||
// Send test webhook
|
||||
if err := h.monitor.GetNotificationManager().SendTestWebhook(*foundWebhook); err != nil {
|
||||
if err := h.getMonitor(r.Context()).GetNotificationManager().SendTestWebhook(*foundWebhook); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
|
@ -478,7 +508,7 @@ func (h *NotificationHandlers) TestNotification(w http.ResponseWriter, r *http.R
|
|||
|
||||
// If password is empty, use the saved password
|
||||
if emailConfig.Password == "" {
|
||||
savedConfig := h.monitor.GetNotificationManager().GetEmailConfig()
|
||||
savedConfig := h.getMonitor(r.Context()).GetNotificationManager().GetEmailConfig()
|
||||
emailConfig.Password = savedConfig.Password
|
||||
}
|
||||
|
||||
|
|
@ -491,7 +521,7 @@ func (h *NotificationHandlers) TestNotification(w http.ResponseWriter, r *http.R
|
|||
Bool("hasPassword", emailConfig.Password != "").
|
||||
Msg("Testing email with provided config")
|
||||
|
||||
if err := h.monitor.GetNotificationManager().SendTestNotificationWithConfig(req.Method, &emailConfig, nodeInfo); err != nil {
|
||||
if err := h.getMonitor(r.Context()).GetNotificationManager().SendTestNotificationWithConfig(req.Method, &emailConfig, nodeInfo); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
|
@ -502,13 +532,13 @@ func (h *NotificationHandlers) TestNotification(w http.ResponseWriter, r *http.R
|
|||
return
|
||||
}
|
||||
|
||||
if err := h.monitor.GetNotificationManager().SendTestAppriseWithConfig(appriseConfig); err != nil {
|
||||
if err := h.getMonitor(r.Context()).GetNotificationManager().SendTestAppriseWithConfig(appriseConfig); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// Use saved config
|
||||
if err := h.monitor.GetNotificationManager().SendTestNotification(req.Method); err != nil {
|
||||
if err := h.getMonitor(r.Context()).GetNotificationManager().SendTestNotification(req.Method); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
|
@ -528,7 +558,7 @@ func (h *NotificationHandlers) GetWebhookTemplates(w http.ResponseWriter, r *htt
|
|||
|
||||
// GetWebhookHistory returns recent webhook delivery history with URLs redacted
|
||||
func (h *NotificationHandlers) GetWebhookHistory(w http.ResponseWriter, r *http.Request) {
|
||||
history := h.monitor.GetNotificationManager().GetWebhookHistory()
|
||||
history := h.getMonitor(r.Context()).GetNotificationManager().GetWebhookHistory()
|
||||
|
||||
// Redact secrets from URLs in history
|
||||
for i := range history {
|
||||
|
|
@ -705,7 +735,7 @@ func (h *NotificationHandlers) TestWebhook(w http.ResponseWriter, r *http.Reques
|
|||
}
|
||||
|
||||
// Test the webhook
|
||||
status, response, err := h.monitor.GetNotificationManager().TestEnhancedWebhook(webhook)
|
||||
status, response, err := h.getMonitor(r.Context()).GetNotificationManager().TestEnhancedWebhook(webhook)
|
||||
|
||||
result := map[string]interface{}{
|
||||
"status": status,
|
||||
|
|
@ -732,7 +762,7 @@ func (h *NotificationHandlers) TestWebhook(w http.ResponseWriter, r *http.Reques
|
|||
func (h *NotificationHandlers) GetNotificationHealth(w http.ResponseWriter, r *http.Request) {
|
||||
// Get queue stats
|
||||
queueStats := make(map[string]interface{})
|
||||
stats, err := h.monitor.GetNotificationManager().GetQueueStats()
|
||||
stats, err := h.getMonitor(r.Context()).GetNotificationManager().GetQueueStats()
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("Failed to get queue stats for health check")
|
||||
queueStats["error"] = err.Error()
|
||||
|
|
@ -749,7 +779,7 @@ func (h *NotificationHandlers) GetNotificationHealth(w http.ResponseWriter, r *h
|
|||
}
|
||||
|
||||
// Get config status
|
||||
nm := h.monitor.GetNotificationManager()
|
||||
nm := h.getMonitor(r.Context()).GetNotificationManager()
|
||||
emailCfg := nm.GetEmailConfig()
|
||||
webhooks := nm.GetWebhooks()
|
||||
|
||||
|
|
@ -764,7 +794,7 @@ func (h *NotificationHandlers) GetNotificationHealth(w http.ResponseWriter, r *h
|
|||
"enabled": countEnabledWebhooks(webhooks),
|
||||
},
|
||||
"encryption": map[string]interface{}{
|
||||
"enabled": h.monitor.GetConfigPersistence().IsEncryptionEnabled(),
|
||||
"enabled": h.getMonitor(r.Context()).GetConfigPersistence().IsEncryptionEnabled(),
|
||||
},
|
||||
"overall_healthy": queueStats["healthy"] == true,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ func (h *ProfileSuggestionHandler) HandleSuggestProfile(w http.ResponseWriter, r
|
|||
}
|
||||
|
||||
// Check if AI is running
|
||||
if h.aiHandler == nil || !h.aiHandler.IsRunning() {
|
||||
if h.aiHandler == nil || !h.aiHandler.IsRunning(r.Context()) {
|
||||
http.Error(w, "AI service is not available", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
|
|
@ -115,7 +115,7 @@ Only include settings that are relevant to the user's request. Do not include se
|
|||
ctx, cancel := context.WithTimeout(r.Context(), 120*time.Second)
|
||||
defer cancel()
|
||||
|
||||
response, err := h.aiHandler.GetService().Execute(ctx, chat.ExecuteRequest{
|
||||
response, err := h.aiHandler.GetService(ctx).Execute(ctx, chat.ExecuteRequest{
|
||||
Prompt: fullPrompt,
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -46,7 +46,8 @@ import (
|
|||
type Router struct {
|
||||
mux *http.ServeMux
|
||||
config *config.Config
|
||||
monitor *monitoring.Monitor
|
||||
monitor *monitoring.Monitor // Legacy/Default support
|
||||
mtMonitor *monitoring.MultiTenantMonitor // Multi-tenant manager
|
||||
alertHandlers *AlertHandlers
|
||||
configHandlers *ConfigHandlers
|
||||
notificationHandlers *NotificationHandlers
|
||||
|
|
@ -69,6 +70,7 @@ type Router struct {
|
|||
exportLimiter *RateLimiter
|
||||
downloadLimiter *RateLimiter
|
||||
persistence *config.ConfigPersistence
|
||||
multiTenant *config.MultiTenantPersistence
|
||||
oidcMu sync.Mutex
|
||||
oidcService *OIDCService
|
||||
samlManager *SAMLServiceManager
|
||||
|
|
@ -117,7 +119,7 @@ func isDirectLoopbackRequest(req *http.Request) bool {
|
|||
}
|
||||
|
||||
// NewRouter creates a new router instance
|
||||
func NewRouter(cfg *config.Config, monitor *monitoring.Monitor, wsHub *websocket.Hub, reloadFunc func() error, serverVersion string) *Router {
|
||||
func NewRouter(cfg *config.Config, monitor *monitoring.Monitor, mtMonitor *monitoring.MultiTenantMonitor, wsHub *websocket.Hub, reloadFunc func() error, serverVersion string) *Router {
|
||||
// Initialize persistent session and CSRF stores
|
||||
InitSessionStore(cfg.DataPath)
|
||||
InitCSRFStore(cfg.DataPath)
|
||||
|
|
@ -139,6 +141,7 @@ func NewRouter(cfg *config.Config, monitor *monitoring.Monitor, wsHub *websocket
|
|||
mux: http.NewServeMux(),
|
||||
config: cfg,
|
||||
monitor: monitor,
|
||||
mtMonitor: mtMonitor,
|
||||
wsHub: wsHub,
|
||||
reloadFunc: reloadFunc,
|
||||
updateManager: updateManager,
|
||||
|
|
@ -146,6 +149,7 @@ func NewRouter(cfg *config.Config, monitor *monitoring.Monitor, wsHub *websocket
|
|||
exportLimiter: NewRateLimiter(5, 1*time.Minute), // 5 attempts per minute
|
||||
downloadLimiter: NewRateLimiter(60, 1*time.Minute), // downloads/installers per minute per IP
|
||||
persistence: config.NewConfigPersistence(cfg.DataPath),
|
||||
multiTenant: config.NewMultiTenantPersistence(cfg.DataPath),
|
||||
authorizer: auth.GetAuthorizer(),
|
||||
serverVersion: strings.TrimSpace(serverVersion),
|
||||
projectRoot: projectRoot,
|
||||
|
|
@ -189,6 +193,7 @@ func NewRouter(cfg *config.Config, monitor *monitoring.Monitor, wsHub *websocket
|
|||
handler := SecurityHeadersWithConfig(r, allowEmbedding, allowedOrigins)
|
||||
handler = ErrorHandler(handler)
|
||||
handler = DemoModeMiddleware(cfg, handler)
|
||||
handler = NewTenantMiddleware(r.multiTenant).Middleware(handler)
|
||||
handler = UniversalRateLimitMiddleware(handler)
|
||||
r.wrapped = handler
|
||||
return r
|
||||
|
|
@ -197,17 +202,20 @@ func NewRouter(cfg *config.Config, monitor *monitoring.Monitor, wsHub *websocket
|
|||
// setupRoutes configures all routes
|
||||
func (r *Router) setupRoutes() {
|
||||
// Create handlers
|
||||
r.alertHandlers = NewAlertHandlers(NewAlertMonitorWrapper(r.monitor), r.wsHub)
|
||||
r.notificationHandlers = NewNotificationHandlers(NewNotificationMonitorWrapper(r.monitor))
|
||||
r.alertHandlers = NewAlertHandlers(r.mtMonitor, NewAlertMonitorWrapper(r.monitor), r.wsHub)
|
||||
r.notificationHandlers = NewNotificationHandlers(r.mtMonitor, NewNotificationMonitorWrapper(r.monitor))
|
||||
r.notificationQueueHandlers = NewNotificationQueueHandlers(r.monitor)
|
||||
guestMetadataHandler := NewGuestMetadataHandler(r.config.DataPath)
|
||||
dockerMetadataHandler := NewDockerMetadataHandler(r.config.DataPath)
|
||||
hostMetadataHandler := NewHostMetadataHandler(r.config.DataPath)
|
||||
r.configHandlers = NewConfigHandlers(r.config, r.monitor, r.reloadFunc, r.wsHub, guestMetadataHandler, r.reloadSystemSettings)
|
||||
guestMetadataHandler := NewGuestMetadataHandler(r.multiTenant)
|
||||
dockerMetadataHandler := NewDockerMetadataHandler(r.multiTenant)
|
||||
hostMetadataHandler := NewHostMetadataHandler(r.multiTenant)
|
||||
r.configHandlers = NewConfigHandlers(r.multiTenant, r.mtMonitor, r.reloadFunc, r.wsHub, guestMetadataHandler, r.reloadSystemSettings)
|
||||
if r.monitor != nil {
|
||||
r.configHandlers.SetMonitor(r.monitor)
|
||||
}
|
||||
updateHandlers := NewUpdateHandlers(r.updateManager, r.updateHistory)
|
||||
r.dockerAgentHandlers = NewDockerAgentHandlers(r.monitor, r.wsHub, r.config)
|
||||
r.kubernetesAgentHandlers = NewKubernetesAgentHandlers(r.monitor, r.wsHub)
|
||||
r.hostAgentHandlers = NewHostAgentHandlers(r.monitor, r.wsHub)
|
||||
r.dockerAgentHandlers = NewDockerAgentHandlers(r.mtMonitor, r.monitor, r.wsHub, r.config)
|
||||
r.kubernetesAgentHandlers = NewKubernetesAgentHandlers(r.mtMonitor, r.monitor, r.wsHub)
|
||||
r.hostAgentHandlers = NewHostAgentHandlers(r.mtMonitor, r.monitor, r.wsHub)
|
||||
r.resourceHandlers = NewResourceHandlers()
|
||||
r.configProfileHandler = NewConfigProfileHandler(r.persistence)
|
||||
r.licenseHandlers = NewLicenseHandlers(r.config.DataPath)
|
||||
|
|
@ -413,9 +421,9 @@ func (r *Router) setupRoutes() {
|
|||
r.mux.HandleFunc("/api/config/nodes", func(w http.ResponseWriter, req *http.Request) {
|
||||
switch req.Method {
|
||||
case http.MethodGet:
|
||||
RequireAdmin(r.configHandlers.config, RequireScope(config.ScopeSettingsRead, r.configHandlers.HandleGetNodes))(w, req)
|
||||
RequireAdmin(r.config, RequireScope(config.ScopeSettingsRead, r.configHandlers.HandleGetNodes))(w, req)
|
||||
case http.MethodPost:
|
||||
RequireAdmin(r.configHandlers.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleAddNode))(w, req)
|
||||
RequireAdmin(r.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleAddNode))(w, req)
|
||||
default:
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
|
|
@ -425,7 +433,7 @@ func (r *Router) setupRoutes() {
|
|||
// Test node configuration endpoint (for new nodes)
|
||||
r.mux.HandleFunc("/api/config/nodes/test-config", func(w http.ResponseWriter, req *http.Request) {
|
||||
if req.Method == http.MethodPost {
|
||||
RequireAdmin(r.configHandlers.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleTestNodeConfig))(w, req)
|
||||
RequireAdmin(r.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleTestNodeConfig))(w, req)
|
||||
} else {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
|
|
@ -434,7 +442,7 @@ func (r *Router) setupRoutes() {
|
|||
// Test connection endpoint
|
||||
r.mux.HandleFunc("/api/config/nodes/test-connection", func(w http.ResponseWriter, req *http.Request) {
|
||||
if req.Method == http.MethodPost {
|
||||
RequireAdmin(r.configHandlers.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleTestConnection))(w, req)
|
||||
RequireAdmin(r.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleTestConnection))(w, req)
|
||||
} else {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
|
|
@ -442,15 +450,15 @@ func (r *Router) setupRoutes() {
|
|||
r.mux.HandleFunc("/api/config/nodes/", func(w http.ResponseWriter, req *http.Request) {
|
||||
switch req.Method {
|
||||
case http.MethodPut:
|
||||
RequireAdmin(r.configHandlers.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleUpdateNode))(w, req)
|
||||
RequireAdmin(r.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleUpdateNode))(w, req)
|
||||
case http.MethodDelete:
|
||||
RequireAdmin(r.configHandlers.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleDeleteNode))(w, req)
|
||||
RequireAdmin(r.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleDeleteNode))(w, req)
|
||||
case http.MethodPost:
|
||||
// Handle test endpoint and refresh-cluster endpoint
|
||||
if strings.HasSuffix(req.URL.Path, "/test") {
|
||||
RequireAdmin(r.configHandlers.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleTestNode))(w, req)
|
||||
RequireAdmin(r.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleTestNode))(w, req)
|
||||
} else if strings.HasSuffix(req.URL.Path, "/refresh-cluster") {
|
||||
RequireAdmin(r.configHandlers.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleRefreshClusterNodes))(w, req)
|
||||
RequireAdmin(r.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleRefreshClusterNodes))(w, req)
|
||||
} else {
|
||||
http.Error(w, "Not found", http.StatusNotFound)
|
||||
}
|
||||
|
|
@ -469,7 +477,7 @@ func (r *Router) setupRoutes() {
|
|||
r.mux.HandleFunc("/api/config/system", func(w http.ResponseWriter, req *http.Request) {
|
||||
switch req.Method {
|
||||
case http.MethodGet:
|
||||
RequireAdmin(r.configHandlers.config, RequireScope(config.ScopeSettingsRead, r.configHandlers.HandleGetSystemSettings))(w, req)
|
||||
RequireAdmin(r.config, RequireScope(config.ScopeSettingsRead, r.configHandlers.HandleGetSystemSettings))(w, req)
|
||||
default:
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
|
|
@ -479,9 +487,9 @@ func (r *Router) setupRoutes() {
|
|||
r.mux.HandleFunc("/api/system/mock-mode", func(w http.ResponseWriter, req *http.Request) {
|
||||
switch req.Method {
|
||||
case http.MethodGet:
|
||||
RequireAdmin(r.configHandlers.config, RequireScope(config.ScopeSettingsRead, r.configHandlers.HandleGetMockMode))(w, req)
|
||||
RequireAdmin(r.config, RequireScope(config.ScopeSettingsRead, r.configHandlers.HandleGetMockMode))(w, req)
|
||||
case http.MethodPost, http.MethodPut:
|
||||
RequireAdmin(r.configHandlers.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleUpdateMockMode))(w, req)
|
||||
RequireAdmin(r.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleUpdateMockMode))(w, req)
|
||||
default:
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
}
|
||||
|
|
@ -1181,7 +1189,7 @@ func (r *Router) setupRoutes() {
|
|||
}))
|
||||
|
||||
// System settings and API token management
|
||||
r.systemSettingsHandler = NewSystemSettingsHandler(r.config, r.persistence, r.wsHub, r.monitor, r.reloadSystemSettings, r.reloadFunc)
|
||||
r.systemSettingsHandler = NewSystemSettingsHandler(r.config, r.persistence, r.wsHub, r.mtMonitor, r.monitor, r.reloadSystemSettings, r.reloadFunc)
|
||||
r.mux.HandleFunc("/api/system/settings", RequireAdmin(r.config, RequireScope(config.ScopeSettingsRead, r.systemSettingsHandler.HandleGetSystemSettings)))
|
||||
r.mux.HandleFunc("/api/system/settings/update", RequireAdmin(r.config, RequireScope(config.ScopeSettingsWrite, r.systemSettingsHandler.HandleUpdateSystemSettings)))
|
||||
r.mux.HandleFunc("/api/system/ssh-config", r.handleSSHConfig)
|
||||
|
|
@ -1206,7 +1214,7 @@ func (r *Router) setupRoutes() {
|
|||
})
|
||||
|
||||
// AI settings endpoints
|
||||
r.aiSettingsHandler = NewAISettingsHandler(r.config, r.persistence, r.agentExecServer)
|
||||
r.aiSettingsHandler = NewAISettingsHandler(r.multiTenant, r.mtMonitor, r.agentExecServer)
|
||||
// Inject state provider so AI has access to full infrastructure context (VMs, containers, IPs)
|
||||
if r.monitor != nil {
|
||||
r.aiSettingsHandler.SetStateProvider(r.monitor)
|
||||
|
|
@ -1232,7 +1240,7 @@ func (r *Router) setupRoutes() {
|
|||
r.aiSettingsHandler.SetMetadataProvider(metadataProvider)
|
||||
|
||||
// AI chat handler
|
||||
r.aiHandler = NewAIHandler(r.config, r.persistence, r.agentExecServer)
|
||||
r.aiHandler = NewAIHandler(r.multiTenant, r.mtMonitor, r.agentExecServer)
|
||||
// Wire license checker for Pro feature gating (AI Patrol, Alert Analysis, Auto-Fix)
|
||||
r.aiSettingsHandler.SetLicenseChecker(r.licenseHandlers.Service())
|
||||
// Wire model change callback to restart AI chat service when model is changed
|
||||
|
|
@ -1242,8 +1250,9 @@ func (r *Router) setupRoutes() {
|
|||
// Wire control settings change callback to update MCP tool visibility
|
||||
r.aiSettingsHandler.SetOnControlSettingsChange(func() {
|
||||
if r.aiHandler != nil {
|
||||
if svc := r.aiHandler.GetService(); svc != nil {
|
||||
cfg := r.aiHandler.GetAIConfig()
|
||||
ctx := context.Background()
|
||||
if svc := r.aiHandler.GetService(ctx); svc != nil {
|
||||
cfg := r.aiHandler.GetAIConfig(ctx)
|
||||
if cfg != nil {
|
||||
svc.UpdateControlSettings(cfg)
|
||||
log.Info().Str("control_level", cfg.GetControlLevel()).Msg("Updated AI control settings")
|
||||
|
|
@ -1777,7 +1786,7 @@ func (r *Router) StartPatrol(ctx context.Context) {
|
|||
|
||||
// Only initialize baseline learning if AI is enabled
|
||||
// This prevents anomaly data from being collected and displayed when AI is disabled
|
||||
if r.aiSettingsHandler.IsAIEnabled() {
|
||||
if r.aiSettingsHandler.IsAIEnabled(context.Background()) {
|
||||
// Initialize baseline store for anomaly detection
|
||||
// Uses config dir for persistence
|
||||
baselineCfg := ai.DefaultBaselineConfig()
|
||||
|
|
@ -1819,7 +1828,7 @@ func (r *Router) StartPatrol(ctx context.Context) {
|
|||
|
||||
// Only initialize pattern and correlation detectors if AI is enabled
|
||||
// This prevents these subsystems from collecting data and displaying findings when AI is disabled
|
||||
if r.aiSettingsHandler.IsAIEnabled() {
|
||||
if r.aiSettingsHandler.IsAIEnabled(context.Background()) {
|
||||
// Initialize pattern detector for failure prediction
|
||||
patternDetector := ai.NewPatternDetector(ai.PatternDetectorConfig{
|
||||
MaxEvents: 5000,
|
||||
|
|
@ -1912,9 +1921,9 @@ func (r *Router) StartAIChat(ctx context.Context) {
|
|||
r.wireAIChatProviders()
|
||||
|
||||
// Wire up AI patrol if AI is running
|
||||
aiCfg := r.aiHandler.GetAIConfig()
|
||||
if aiCfg != nil && r.aiHandler.IsRunning() {
|
||||
service := r.aiHandler.GetService()
|
||||
aiCfg := r.aiHandler.GetAIConfig(context.Background())
|
||||
if aiCfg != nil && r.aiHandler.IsRunning(context.Background()) {
|
||||
service := r.aiHandler.GetService(context.Background())
|
||||
if service != nil {
|
||||
// Create patrol service - need concrete type for patrol
|
||||
chatService, ok := service.(*chat.Service)
|
||||
|
|
@ -1925,7 +1934,7 @@ func (r *Router) StartAIChat(ctx context.Context) {
|
|||
|
||||
// Wire to existing patrol service
|
||||
if r.aiSettingsHandler != nil {
|
||||
if patrolSvc := r.aiSettingsHandler.GetAIService().GetPatrolService(); patrolSvc != nil {
|
||||
if patrolSvc := r.aiSettingsHandler.GetAIService(context.Background()).GetPatrolService(); patrolSvc != nil {
|
||||
patrolSvc.SetChatPatrol(aiPatrol, true)
|
||||
log.Info().Msg("AI patrol integration enabled")
|
||||
}
|
||||
|
|
@ -1936,11 +1945,11 @@ func (r *Router) StartAIChat(ctx context.Context) {
|
|||
|
||||
// wireAIChatProviders wires up all MCP tool providers for AI chat
|
||||
func (r *Router) wireAIChatProviders() {
|
||||
if r.aiHandler == nil || !r.aiHandler.IsRunning() {
|
||||
if r.aiHandler == nil || !r.aiHandler.IsRunning(context.Background()) {
|
||||
return
|
||||
}
|
||||
|
||||
service := r.aiHandler.GetService()
|
||||
service := r.aiHandler.GetService(context.Background())
|
||||
if service == nil {
|
||||
return
|
||||
}
|
||||
|
|
@ -1958,7 +1967,7 @@ func (r *Router) wireAIChatProviders() {
|
|||
|
||||
// Wire findings provider from patrol service
|
||||
if r.aiSettingsHandler != nil {
|
||||
if patrolSvc := r.aiSettingsHandler.GetAIService().GetPatrolService(); patrolSvc != nil {
|
||||
if patrolSvc := r.aiSettingsHandler.GetAIService(context.Background()).GetPatrolService(); patrolSvc != nil {
|
||||
if findingsStore := patrolSvc.GetFindings(); findingsStore != nil {
|
||||
findingsAdapter := ai.NewFindingsMCPAdapter(findingsStore)
|
||||
if findingsAdapter != nil {
|
||||
|
|
@ -2027,7 +2036,7 @@ func (r *Router) wireAIChatProviders() {
|
|||
|
||||
// Wire baseline provider
|
||||
if r.aiSettingsHandler != nil {
|
||||
if patrolSvc := r.aiSettingsHandler.GetAIService().GetPatrolService(); patrolSvc != nil {
|
||||
if patrolSvc := r.aiSettingsHandler.GetAIService(context.Background()).GetPatrolService(); patrolSvc != nil {
|
||||
if baselineStore := patrolSvc.GetBaselineStore(); baselineStore != nil {
|
||||
baselineAdapter := tools.NewBaselineMCPAdapter(&baselineSourceWrapper{store: baselineStore})
|
||||
if baselineAdapter != nil {
|
||||
|
|
@ -2040,7 +2049,7 @@ func (r *Router) wireAIChatProviders() {
|
|||
|
||||
// Wire pattern provider
|
||||
if r.aiSettingsHandler != nil {
|
||||
if patrolSvc := r.aiSettingsHandler.GetAIService().GetPatrolService(); patrolSvc != nil {
|
||||
if patrolSvc := r.aiSettingsHandler.GetAIService(context.Background()).GetPatrolService(); patrolSvc != nil {
|
||||
if patternDetector := patrolSvc.GetPatternDetector(); patternDetector != nil {
|
||||
patternAdapter := tools.NewPatternMCPAdapter(
|
||||
&patternSourceWrapper{detector: patternDetector},
|
||||
|
|
@ -2056,7 +2065,7 @@ func (r *Router) wireAIChatProviders() {
|
|||
|
||||
// Wire findings manager
|
||||
if r.aiSettingsHandler != nil {
|
||||
if patrolSvc := r.aiSettingsHandler.GetAIService().GetPatrolService(); patrolSvc != nil {
|
||||
if patrolSvc := r.aiSettingsHandler.GetAIService(context.Background()).GetPatrolService(); patrolSvc != nil {
|
||||
findingsManagerAdapter := tools.NewFindingsManagerMCPAdapter(patrolSvc)
|
||||
if findingsManagerAdapter != nil {
|
||||
service.SetFindingsManager(findingsManagerAdapter)
|
||||
|
|
@ -2067,7 +2076,7 @@ func (r *Router) wireAIChatProviders() {
|
|||
|
||||
// Wire metadata updater
|
||||
if r.aiSettingsHandler != nil {
|
||||
metadataAdapter := tools.NewMetadataUpdaterMCPAdapter(r.aiSettingsHandler.GetAIService())
|
||||
metadataAdapter := tools.NewMetadataUpdaterMCPAdapter(r.aiSettingsHandler.GetAIService(context.Background()))
|
||||
if metadataAdapter != nil {
|
||||
service.SetMetadataUpdater(metadataAdapter)
|
||||
log.Debug().Msg("AI chat: Metadata updater wired")
|
||||
|
|
@ -2360,7 +2369,7 @@ func (r *Router) learnBaselines(store *ai.BaselineStore, metricsHistory *monitor
|
|||
// This enables AI to analyze specific resources when alerts fire, providing token-efficient real-time insights
|
||||
func (r *Router) GetAlertTriggeredAnalyzer() *ai.AlertTriggeredAnalyzer {
|
||||
if r.aiSettingsHandler != nil {
|
||||
return r.aiSettingsHandler.GetAlertTriggeredAnalyzer()
|
||||
return r.aiSettingsHandler.GetAlertTriggeredAnalyzer(context.Background())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -3839,6 +3848,8 @@ func (r *Router) handleCharts(w http.ResponseWriter, req *http.Request) {
|
|||
duration = time.Hour
|
||||
case "4h":
|
||||
duration = 4 * time.Hour
|
||||
case "8h":
|
||||
duration = 8 * time.Hour
|
||||
case "12h":
|
||||
duration = 12 * time.Hour
|
||||
case "24h":
|
||||
|
|
|
|||
52
internal/api/router_helpers.go
Normal file
52
internal/api/router_helpers.go
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/monitoring"
|
||||
)
|
||||
|
||||
// getMonitor returns the tenant-specific monitor instance for the request.
|
||||
// It uses the OrgID from the context (injected by TenantMiddleware).
|
||||
// If no tenant monitor is found, or if not in multi-tenant mode, it returns the default monitor.
|
||||
func (r *Router) getMonitor(req *http.Request) (*monitoring.Monitor, error) {
|
||||
if r.mtMonitor == nil {
|
||||
return r.monitor, nil
|
||||
}
|
||||
|
||||
orgID := GetOrgID(req.Context())
|
||||
if orgID == "" {
|
||||
return r.monitor, nil
|
||||
}
|
||||
|
||||
return r.mtMonitor.GetMonitor(orgID)
|
||||
}
|
||||
|
||||
// SetMultiTenantMonitor updates the multi-tenant monitor manager.
|
||||
// Used during reload.
|
||||
func (r *Router) SetMultiTenantMonitor(mtm *monitoring.MultiTenantMonitor) {
|
||||
r.mtMonitor = mtm
|
||||
if r.alertHandlers != nil {
|
||||
r.alertHandlers.SetMultiTenantMonitor(mtm)
|
||||
}
|
||||
if r.notificationHandlers != nil {
|
||||
r.notificationHandlers.SetMultiTenantMonitor(mtm)
|
||||
}
|
||||
if r.dockerAgentHandlers != nil {
|
||||
r.dockerAgentHandlers.SetMultiTenantMonitor(mtm)
|
||||
}
|
||||
if r.hostAgentHandlers != nil {
|
||||
r.hostAgentHandlers.SetMultiTenantMonitor(mtm)
|
||||
}
|
||||
if r.kubernetesAgentHandlers != nil {
|
||||
r.kubernetesAgentHandlers.SetMultiTenantMonitor(mtm)
|
||||
}
|
||||
if r.systemSettingsHandler != nil {
|
||||
r.systemSettingsHandler.SetMultiTenantMonitor(mtm)
|
||||
}
|
||||
if mtm != nil {
|
||||
if m, err := mtm.GetMonitor("default"); err == nil {
|
||||
r.monitor = m
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -16,12 +16,23 @@ import (
|
|||
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/config"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/discovery"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/monitoring"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/notifications"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/utils"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/websocket"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
// SystemSettingsMonitor defines the monitor interface needed by system settings
|
||||
type SystemSettingsMonitor interface {
|
||||
GetDiscoveryService() *discovery.Service
|
||||
StartDiscoveryService(ctx context.Context, wsHub *websocket.Hub, subnet string)
|
||||
StopDiscoveryService()
|
||||
EnableTemperatureMonitoring()
|
||||
DisableTemperatureMonitoring()
|
||||
GetNotificationManager() *notifications.NotificationManager
|
||||
}
|
||||
|
||||
// SystemSettingsHandler handles system settings
|
||||
type SystemSettingsHandler struct {
|
||||
config *config.Config
|
||||
|
|
@ -29,45 +40,63 @@ type SystemSettingsHandler struct {
|
|||
wsHub *websocket.Hub
|
||||
reloadSystemSettingsFunc func() // Function to reload cached system settings
|
||||
reloadMonitorFunc func() error
|
||||
monitor interface {
|
||||
GetDiscoveryService() *discovery.Service
|
||||
StartDiscoveryService(ctx context.Context, wsHub *websocket.Hub, subnet string)
|
||||
StopDiscoveryService()
|
||||
EnableTemperatureMonitoring()
|
||||
DisableTemperatureMonitoring()
|
||||
GetNotificationManager() *notifications.NotificationManager
|
||||
mtMonitor interface {
|
||||
GetMonitor(string) (*monitoring.Monitor, error)
|
||||
}
|
||||
legacyMonitor SystemSettingsMonitor
|
||||
}
|
||||
|
||||
// NewSystemSettingsHandler creates a new system settings handler
|
||||
func NewSystemSettingsHandler(cfg *config.Config, persistence *config.ConfigPersistence, wsHub *websocket.Hub, monitor interface {
|
||||
GetDiscoveryService() *discovery.Service
|
||||
StartDiscoveryService(ctx context.Context, wsHub *websocket.Hub, subnet string)
|
||||
StopDiscoveryService()
|
||||
EnableTemperatureMonitoring()
|
||||
DisableTemperatureMonitoring()
|
||||
GetNotificationManager() *notifications.NotificationManager
|
||||
}, reloadSystemSettingsFunc func(), reloadMonitorFunc func() error) *SystemSettingsHandler {
|
||||
func NewSystemSettingsHandler(cfg *config.Config, persistence *config.ConfigPersistence, wsHub *websocket.Hub, mtm *monitoring.MultiTenantMonitor, monitor SystemSettingsMonitor, reloadSystemSettingsFunc func(), reloadMonitorFunc func() error) *SystemSettingsHandler {
|
||||
// If mtm is provided, try to populate legacyMonitor from "default" org if not provided
|
||||
if monitor == nil && mtm != nil {
|
||||
if m, err := mtm.GetMonitor("default"); err == nil {
|
||||
monitor = m
|
||||
}
|
||||
}
|
||||
return &SystemSettingsHandler{
|
||||
config: cfg,
|
||||
persistence: persistence,
|
||||
wsHub: wsHub,
|
||||
monitor: monitor,
|
||||
mtMonitor: mtm,
|
||||
legacyMonitor: monitor,
|
||||
reloadSystemSettingsFunc: reloadSystemSettingsFunc,
|
||||
reloadMonitorFunc: reloadMonitorFunc,
|
||||
}
|
||||
}
|
||||
|
||||
// SetMonitor updates the monitor reference used by the handler at runtime.
|
||||
func (h *SystemSettingsHandler) SetMonitor(m interface {
|
||||
GetDiscoveryService() *discovery.Service
|
||||
StartDiscoveryService(ctx context.Context, wsHub *websocket.Hub, subnet string)
|
||||
StopDiscoveryService()
|
||||
EnableTemperatureMonitoring()
|
||||
DisableTemperatureMonitoring()
|
||||
GetNotificationManager() *notifications.NotificationManager
|
||||
}) {
|
||||
h.monitor = m
|
||||
func (h *SystemSettingsHandler) SetMonitor(m SystemSettingsMonitor) {
|
||||
h.legacyMonitor = m
|
||||
}
|
||||
|
||||
// SetMultiTenantMonitor updates the multi-tenant monitor reference
|
||||
func (h *SystemSettingsHandler) SetMultiTenantMonitor(mtm *monitoring.MultiTenantMonitor) {
|
||||
h.mtMonitor = mtm
|
||||
if mtm != nil {
|
||||
if m, err := mtm.GetMonitor("default"); err == nil {
|
||||
h.legacyMonitor = m
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *SystemSettingsHandler) getMonitor(ctx context.Context) SystemSettingsMonitor {
|
||||
// Note: SystemSettingsMonitor interface methods (GetDiscoveryService etc.)
|
||||
// must be implemented by *monitoring.Monitor directly.
|
||||
// Since decoupling *monitoring.Monitor from specific interfaces involves casting or wrappers,
|
||||
// we assume here that *monitoring.Monitor satisfies SystemSettingsMonitor.
|
||||
|
||||
if h.mtMonitor != nil {
|
||||
// Use GetOrgID helper from current package or context
|
||||
// Assuming we can access GetOrgID from api package context helpers
|
||||
orgID := GetOrgID(ctx)
|
||||
if mtm, ok := h.mtMonitor.(*monitoring.MultiTenantMonitor); ok {
|
||||
if m, err := mtm.GetMonitor(orgID); err == nil && m != nil {
|
||||
return m
|
||||
}
|
||||
}
|
||||
}
|
||||
return h.legacyMonitor
|
||||
}
|
||||
|
||||
// SetConfig updates the configuration reference used by the handler.
|
||||
|
|
@ -680,44 +709,44 @@ func (h *SystemSettingsHandler) HandleUpdateSystemSettings(w http.ResponseWriter
|
|||
}
|
||||
|
||||
// Start or stop discovery service based on setting change
|
||||
if h.monitor != nil {
|
||||
if h.getMonitor(r.Context()) != nil {
|
||||
if settings.DiscoveryEnabled && !prevDiscoveryEnabled {
|
||||
// Discovery was just enabled, start the service
|
||||
subnet := h.config.DiscoverySubnet
|
||||
if subnet == "" {
|
||||
subnet = "auto"
|
||||
}
|
||||
h.monitor.StartDiscoveryService(context.Background(), h.wsHub, subnet)
|
||||
h.getMonitor(r.Context()).StartDiscoveryService(context.Background(), h.wsHub, subnet)
|
||||
log.Info().Msg("Discovery service started via settings update")
|
||||
} else if !settings.DiscoveryEnabled && prevDiscoveryEnabled {
|
||||
// Discovery was just disabled, stop the service
|
||||
h.monitor.StopDiscoveryService()
|
||||
h.getMonitor(r.Context()).StopDiscoveryService()
|
||||
log.Info().Msg("Discovery service stopped via settings update")
|
||||
} else if settings.DiscoveryEnabled && settings.DiscoverySubnet != "" {
|
||||
// Subnet changed while discovery is enabled, update it
|
||||
if svc := h.monitor.GetDiscoveryService(); svc != nil {
|
||||
if svc := h.getMonitor(r.Context()).GetDiscoveryService(); svc != nil {
|
||||
svc.SetSubnet(settings.DiscoverySubnet)
|
||||
}
|
||||
}
|
||||
if discoveryConfigUpdated && settings.DiscoveryEnabled {
|
||||
if svc := h.monitor.GetDiscoveryService(); svc != nil {
|
||||
if svc := h.getMonitor(r.Context()).GetDiscoveryService(); svc != nil {
|
||||
log.Info().Msg("Discovery configuration changed; triggering refresh")
|
||||
svc.ForceRefresh()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if tempToggleRequested && h.monitor != nil {
|
||||
if tempToggleRequested && h.getMonitor(r.Context()) != nil {
|
||||
if settings.TemperatureMonitoringEnabled && !prevTempEnabled {
|
||||
h.monitor.EnableTemperatureMonitoring()
|
||||
h.getMonitor(r.Context()).EnableTemperatureMonitoring()
|
||||
} else if !settings.TemperatureMonitoringEnabled && prevTempEnabled {
|
||||
h.monitor.DisableTemperatureMonitoring()
|
||||
h.getMonitor(r.Context()).DisableTemperatureMonitoring()
|
||||
}
|
||||
}
|
||||
|
||||
// Update webhook allowed private CIDRs if changed
|
||||
if _, ok := rawRequest["webhookAllowedPrivateCIDRs"]; ok && h.monitor != nil {
|
||||
if nm := h.monitor.GetNotificationManager(); nm != nil {
|
||||
if _, ok := rawRequest["webhookAllowedPrivateCIDRs"]; ok && h.getMonitor(r.Context()) != nil {
|
||||
if nm := h.getMonitor(r.Context()).GetNotificationManager(); nm != nil {
|
||||
if err := nm.UpdateAllowedPrivateCIDRs(settings.WebhookAllowedPrivateCIDRs); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to update webhook allowed private CIDRs")
|
||||
http.Error(w, fmt.Sprintf("Invalid webhook allowed private CIDRs: %v", err), http.StatusBadRequest)
|
||||
|
|
@ -729,8 +758,8 @@ func (h *SystemSettingsHandler) HandleUpdateSystemSettings(w http.ResponseWriter
|
|||
// Update public URL for notifications if changed
|
||||
if _, ok := rawRequest["publicURL"]; ok {
|
||||
h.config.PublicURL = settings.PublicURL
|
||||
if h.monitor != nil {
|
||||
if nm := h.monitor.GetNotificationManager(); nm != nil {
|
||||
if h.getMonitor(r.Context()) != nil {
|
||||
if nm := h.getMonitor(r.Context()).GetNotificationManager(); nm != nil {
|
||||
nm.SetPublicURL(settings.PublicURL)
|
||||
log.Info().Str("publicURL", settings.PublicURL).Msg("Updated notification public URL from settings")
|
||||
}
|
||||
|
|
|
|||
86
internal/config/multi_tenant.go
Normal file
86
internal/config/multi_tenant.go
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/models"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
// MultiTenantPersistence manages a collection of TenantPersistence instances,
|
||||
// one for each organization.
|
||||
type MultiTenantPersistence struct {
|
||||
baseDataDir string
|
||||
mu sync.RWMutex
|
||||
tenants map[string]*ConfigPersistence
|
||||
}
|
||||
|
||||
// NewMultiTenantPersistence creates a new multi-tenant persistence manager.
|
||||
func NewMultiTenantPersistence(baseDataDir string) *MultiTenantPersistence {
|
||||
return &MultiTenantPersistence{
|
||||
baseDataDir: baseDataDir,
|
||||
tenants: make(map[string]*ConfigPersistence),
|
||||
}
|
||||
}
|
||||
|
||||
// GetPersistence returns the persistence instance for a specific organization.
|
||||
// It initializes the persistence if it hasn't been loaded yet.
|
||||
func (mtp *MultiTenantPersistence) GetPersistence(orgID string) (*ConfigPersistence, error) {
|
||||
mtp.mu.RLock()
|
||||
persistence, exists := mtp.tenants[orgID]
|
||||
mtp.mu.RUnlock()
|
||||
|
||||
if exists {
|
||||
return persistence, nil
|
||||
}
|
||||
|
||||
mtp.mu.Lock()
|
||||
defer mtp.mu.Unlock()
|
||||
|
||||
// Double-check locking pattern
|
||||
if persistence, exists = mtp.tenants[orgID]; exists {
|
||||
return persistence, nil
|
||||
}
|
||||
|
||||
// Validate OrgID (prevent directory traversal)
|
||||
if filepath.Base(orgID) != orgID || orgID == "" || orgID == "." || orgID == ".." {
|
||||
return nil, fmt.Errorf("invalid organization ID: %s", orgID)
|
||||
}
|
||||
|
||||
// Determine org data directory
|
||||
// Global/Default org uses the root data dir (legacy compatibility)
|
||||
// New orgs use /data/orgs/<org-id>
|
||||
var orgDir string
|
||||
if orgID == "default" {
|
||||
orgDir = filepath.Join(mtp.baseDataDir, "orgs", "default")
|
||||
} else {
|
||||
orgDir = filepath.Join(mtp.baseDataDir, "orgs", orgID)
|
||||
}
|
||||
|
||||
log.Info().Str("org_id", orgID).Str("dir", orgDir).Msg("Initializing tenant persistence")
|
||||
|
||||
cp, err := newConfigPersistence(orgDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize persistence for org %s: %w", orgID, err)
|
||||
}
|
||||
|
||||
// Ensure the directory exists
|
||||
if err := cp.EnsureConfigDir(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mtp.tenants[orgID] = cp
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
// LoadOrganizationMetadata loads basic metadata for an organization.
|
||||
// This is separate from the tenant's internal config.
|
||||
func (mtp *MultiTenantPersistence) LoadOrganization(orgID string) (*models.Organization, error) {
|
||||
// TODO: implementing organization metadata storage in system.json later
|
||||
return &models.Organization{
|
||||
ID: orgID,
|
||||
DisplayName: orgID, // Placeholder
|
||||
}, nil
|
||||
}
|
||||
|
|
@ -44,6 +44,12 @@ type ConfigPersistence struct {
|
|||
aiChatSessionsFile string
|
||||
crypto *crypto.CryptoManager
|
||||
fs FileSystem
|
||||
|
||||
// Lazy loaded metadata stores
|
||||
guestMetadataStore *GuestMetadataStore
|
||||
dockerMetadataStore *DockerMetadataStore
|
||||
hostMetadataStore *HostMetadataStore
|
||||
metadataMu sync.Mutex
|
||||
}
|
||||
|
||||
// FileSystem interface for mocking file operations
|
||||
|
|
@ -138,6 +144,11 @@ func (c *ConfigPersistence) DataDir() string {
|
|||
return c.configDir
|
||||
}
|
||||
|
||||
// GetConfigDir returns the configuration directory path (alias for DataDir to match interface expectations)
|
||||
func (c *ConfigPersistence) GetConfigDir() string {
|
||||
return c.configDir
|
||||
}
|
||||
|
||||
// EnsureConfigDir ensures the configuration directory exists
|
||||
func (c *ConfigPersistence) EnsureConfigDir() error {
|
||||
return c.fs.MkdirAll(c.configDir, 0700)
|
||||
|
|
|
|||
34
internal/config/persistence_metadata_accessors.go
Normal file
34
internal/config/persistence_metadata_accessors.go
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
package config
|
||||
|
||||
// GetGuestMetadataStore returns the guest metadata store, creating it if necessary
|
||||
func (c *ConfigPersistence) GetGuestMetadataStore() *GuestMetadataStore {
|
||||
c.metadataMu.Lock()
|
||||
defer c.metadataMu.Unlock()
|
||||
|
||||
if c.guestMetadataStore == nil {
|
||||
c.guestMetadataStore = NewGuestMetadataStore(c.configDir, c.fs)
|
||||
}
|
||||
return c.guestMetadataStore
|
||||
}
|
||||
|
||||
// GetDockerMetadataStore returns the docker metadata store, creating it if necessary
|
||||
func (c *ConfigPersistence) GetDockerMetadataStore() *DockerMetadataStore {
|
||||
c.metadataMu.Lock()
|
||||
defer c.metadataMu.Unlock()
|
||||
|
||||
if c.dockerMetadataStore == nil {
|
||||
c.dockerMetadataStore = NewDockerMetadataStore(c.configDir, c.fs)
|
||||
}
|
||||
return c.dockerMetadataStore
|
||||
}
|
||||
|
||||
// GetHostMetadataStore returns the host metadata store, creating it if necessary
|
||||
func (c *ConfigPersistence) GetHostMetadataStore() *HostMetadataStore {
|
||||
c.metadataMu.Lock()
|
||||
defer c.metadataMu.Unlock()
|
||||
|
||||
if c.hostMetadataStore == nil {
|
||||
c.hostMetadataStore = NewHostMetadataStore(c.configDir, c.fs)
|
||||
}
|
||||
return c.hostMetadataStore
|
||||
}
|
||||
20
internal/models/organization.go
Normal file
20
internal/models/organization.go
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
package models
|
||||
|
||||
import "time"
|
||||
|
||||
// Organization represents a distinct tenant in the system.
|
||||
type Organization struct {
|
||||
// ID is the unique identifier for the organization (e.g., "customer-a").
|
||||
// It is used as the directory name for data isolation.
|
||||
ID string `json:"id"`
|
||||
|
||||
// DisplayName is the human-readable name of the organization.
|
||||
DisplayName string `json:"displayName"`
|
||||
|
||||
// CreatedAt is when the organization was registered.
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
|
||||
// EncryptionKeyID refers to the specific encryption key used for this org's data
|
||||
// (Future proofing for per-tenant encryption keys)
|
||||
EncryptionKeyID string `json:"encryptionKeyId,omitempty"`
|
||||
}
|
||||
10
internal/monitoring/monitor_accessors.go
Normal file
10
internal/monitoring/monitor_accessors.go
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
package monitoring
|
||||
|
||||
import "github.com/rcourtman/pulse-go-rewrite/internal/config"
|
||||
|
||||
// GetConfig returns the current configuration used by the monitor
|
||||
func (m *Monitor) GetConfig() *config.Config {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
return m.config
|
||||
}
|
||||
115
internal/monitoring/multi_tenant_monitor.go
Normal file
115
internal/monitoring/multi_tenant_monitor.go
Normal file
|
|
@ -0,0 +1,115 @@
|
|||
package monitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/config"
|
||||
"github.com/rcourtman/pulse-go-rewrite/internal/websocket"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
// MultiTenantMonitor manages a dedicated Monitor instance for each organization.
|
||||
type MultiTenantMonitor struct {
|
||||
mu sync.RWMutex
|
||||
monitors map[string]*Monitor
|
||||
persistence *config.MultiTenantPersistence
|
||||
baseConfig *config.Config
|
||||
wsHub *websocket.Hub
|
||||
globalCtx context.Context
|
||||
globalCancel context.CancelFunc
|
||||
}
|
||||
|
||||
// NewMultiTenantMonitor creates a new multi-tenant monitor manager.
|
||||
func NewMultiTenantMonitor(baseCfg *config.Config, persistence *config.MultiTenantPersistence, wsHub *websocket.Hub) *MultiTenantMonitor {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
return &MultiTenantMonitor{
|
||||
monitors: make(map[string]*Monitor),
|
||||
persistence: persistence,
|
||||
baseConfig: baseCfg, // Used as a template or for global settings
|
||||
wsHub: wsHub,
|
||||
globalCtx: ctx,
|
||||
globalCancel: cancel,
|
||||
}
|
||||
}
|
||||
|
||||
// GetMonitor returns the monitor instance for a specific organization.
|
||||
// It lazily initializes the monitor if it doesn't exist.
|
||||
func (mtm *MultiTenantMonitor) GetMonitor(orgID string) (*Monitor, error) {
|
||||
mtm.mu.RLock()
|
||||
monitor, exists := mtm.monitors[orgID]
|
||||
mtm.mu.RUnlock()
|
||||
|
||||
if exists {
|
||||
return monitor, nil
|
||||
}
|
||||
|
||||
mtm.mu.Lock()
|
||||
defer mtm.mu.Unlock()
|
||||
|
||||
// Double-check locking pattern
|
||||
if monitor, exists = mtm.monitors[orgID]; exists {
|
||||
return monitor, nil
|
||||
}
|
||||
|
||||
// Initialize new monitor for this tenant
|
||||
log.Info().Str("org_id", orgID).Msg("Initializing tenant monitor")
|
||||
|
||||
// 1. Load Tenant Config
|
||||
// We need a specific config for this tenant.
|
||||
// For now, we clone the base config (assuming shared defaults)
|
||||
// In the future, we'll load overrides from persistence.GetPersistence(orgID)
|
||||
tenantConfig := *mtm.baseConfig // Shallow copy
|
||||
|
||||
// Ensure the DataPath is correct for this tenant to isolate storage (sqlite, etc)
|
||||
tenantPersistence, err := mtm.persistence.GetPersistence(orgID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get persistence for org %s: %w", orgID, err)
|
||||
}
|
||||
tenantConfig.DataPath = tenantPersistence.GetConfigDir()
|
||||
|
||||
// 2. Create Monitor
|
||||
// Usage of internal New constructor
|
||||
monitor, err = New(&tenantConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create monitor for org %s: %w", orgID, err)
|
||||
}
|
||||
|
||||
// 3. Start Monitor
|
||||
// We pass the global context, but maybe we should give it a derived one?
|
||||
// Using globalCtx ensures all monitors stop when MultiTenantMonitor stops.
|
||||
// NOTE: Monitor.Start is async
|
||||
go monitor.Start(mtm.globalCtx, mtm.wsHub)
|
||||
|
||||
mtm.monitors[orgID] = monitor
|
||||
return monitor, nil
|
||||
}
|
||||
|
||||
// Stop stops all tenant monitors.
|
||||
func (mtm *MultiTenantMonitor) Stop() {
|
||||
mtm.mu.Lock()
|
||||
defer mtm.mu.Unlock()
|
||||
|
||||
log.Info().Msg("Stopping MultiTenantMonitor and all tenant instances")
|
||||
mtm.globalCancel()
|
||||
|
||||
for _, monitor := range mtm.monitors {
|
||||
monitor.Stop()
|
||||
}
|
||||
// Clear map
|
||||
mtm.monitors = make(map[string]*Monitor)
|
||||
}
|
||||
|
||||
// RemoveTenant stops and removes a specific tenant's monitor.
|
||||
// Useful for offboarding or manual reloading.
|
||||
func (mtm *MultiTenantMonitor) RemoveTenant(orgID string) {
|
||||
mtm.mu.Lock()
|
||||
defer mtm.mu.Unlock()
|
||||
|
||||
if monitor, exists := mtm.monitors[orgID]; exists {
|
||||
log.Info().Str("org_id", orgID).Msg("Stopping and removing tenant monitor")
|
||||
monitor.Stop()
|
||||
delete(mtm.monitors, orgID)
|
||||
}
|
||||
}
|
||||
|
|
@ -12,28 +12,28 @@ import (
|
|||
|
||||
// ReloadableMonitor wraps a Monitor with reload capability
|
||||
type ReloadableMonitor struct {
|
||||
mu sync.RWMutex
|
||||
monitor *Monitor
|
||||
config *config.Config
|
||||
wsHub *websocket.Hub
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
parentCtx context.Context
|
||||
reloadChan chan chan error
|
||||
mu sync.RWMutex
|
||||
mtMonitor *MultiTenantMonitor
|
||||
persistence *config.MultiTenantPersistence
|
||||
config *config.Config
|
||||
wsHub *websocket.Hub
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
parentCtx context.Context
|
||||
reloadChan chan chan error
|
||||
}
|
||||
|
||||
// NewReloadableMonitor creates a new reloadable monitor
|
||||
func NewReloadableMonitor(cfg *config.Config, wsHub *websocket.Hub) (*ReloadableMonitor, error) {
|
||||
monitor, err := New(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func NewReloadableMonitor(cfg *config.Config, persistence *config.MultiTenantPersistence, wsHub *websocket.Hub) (*ReloadableMonitor, error) {
|
||||
mtMonitor := NewMultiTenantMonitor(cfg, persistence, wsHub)
|
||||
// No error check needed for NewMultiTenantMonitor as it doesn't return error
|
||||
|
||||
rm := &ReloadableMonitor{
|
||||
monitor: monitor,
|
||||
config: cfg,
|
||||
wsHub: wsHub,
|
||||
reloadChan: make(chan chan error, 1),
|
||||
mtMonitor: mtMonitor,
|
||||
config: cfg,
|
||||
persistence: persistence,
|
||||
wsHub: wsHub,
|
||||
reloadChan: make(chan chan error, 1),
|
||||
}
|
||||
|
||||
return rm, nil
|
||||
|
|
@ -46,8 +46,10 @@ func (rm *ReloadableMonitor) Start(ctx context.Context) {
|
|||
rm.ctx, rm.cancel = context.WithCancel(ctx)
|
||||
rm.mu.Unlock()
|
||||
|
||||
// Start the monitor
|
||||
go rm.monitor.Start(rm.ctx, rm.wsHub)
|
||||
// Start the multi-tenant monitor manager
|
||||
// Note: It doesn't start individual monitors until requested via GetMonitor()
|
||||
// But we might want to start "default" monitor if it exists?
|
||||
// For now, lazy loading handles it.
|
||||
|
||||
// Watch for reload signals
|
||||
go rm.watchReload(ctx)
|
||||
|
|
@ -101,31 +103,37 @@ func (rm *ReloadableMonitor) doReload() error {
|
|||
// Wait a moment for cleanup
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
// Create new monitor
|
||||
newMonitor, err := New(cfg)
|
||||
if err != nil {
|
||||
// Restart old monitor if new one fails
|
||||
rm.ctx, rm.cancel = context.WithCancel(rm.parentCtx)
|
||||
go rm.monitor.Start(rm.ctx, rm.wsHub)
|
||||
return err
|
||||
}
|
||||
// Create new multi-tenant monitor
|
||||
// Note: We lose existing instances state here, which is expected on full reload.
|
||||
newMTMonitor := NewMultiTenantMonitor(cfg, rm.persistence, rm.wsHub)
|
||||
|
||||
// Replace monitor
|
||||
rm.monitor = newMonitor
|
||||
rm.mtMonitor = newMTMonitor
|
||||
rm.config = cfg
|
||||
|
||||
// Start new monitor
|
||||
// Start new monitor context (individual monitors are lazy loaded/started)
|
||||
rm.ctx, rm.cancel = context.WithCancel(rm.parentCtx)
|
||||
go rm.monitor.Start(rm.ctx, rm.wsHub)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetMonitor returns the current monitor instance
|
||||
// GetMultiTenantMonitor returns the current multi-tenant monitor instance
|
||||
func (rm *ReloadableMonitor) GetMultiTenantMonitor() *MultiTenantMonitor {
|
||||
rm.mu.RLock()
|
||||
defer rm.mu.RUnlock()
|
||||
return rm.mtMonitor
|
||||
}
|
||||
|
||||
// GetMonitor returns the default monitor instance (compatibility shim)
|
||||
// It ensures the "default" tenant is initialized.
|
||||
func (rm *ReloadableMonitor) GetMonitor() *Monitor {
|
||||
rm.mu.RLock()
|
||||
defer rm.mu.RUnlock()
|
||||
return rm.monitor
|
||||
if rm.mtMonitor == nil {
|
||||
return nil
|
||||
}
|
||||
m, _ := rm.mtMonitor.GetMonitor("default")
|
||||
return m
|
||||
}
|
||||
|
||||
// GetConfig returns the current configuration used by the monitor.
|
||||
|
|
@ -140,7 +148,13 @@ func (rm *ReloadableMonitor) GetConfig() *config.Config {
|
|||
|
||||
// GetState returns the current state
|
||||
func (rm *ReloadableMonitor) GetState() interface{} {
|
||||
return rm.GetMonitor().GetState()
|
||||
// For backward compatibility / frontend simplicity, return default org state
|
||||
// TODO: Make WebSocket state getter tenant-aware
|
||||
monitor, err := rm.GetMultiTenantMonitor().GetMonitor("default")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return monitor.GetState()
|
||||
}
|
||||
|
||||
// Stop stops the monitor
|
||||
|
|
@ -152,7 +166,7 @@ func (rm *ReloadableMonitor) Stop() {
|
|||
rm.cancel()
|
||||
}
|
||||
|
||||
if rm.monitor != nil {
|
||||
rm.monitor.Stop()
|
||||
if rm.mtMonitor != nil {
|
||||
rm.mtMonitor.Stop()
|
||||
}
|
||||
}
|
||||
|
|
|
|||
9
mock.env
Normal file
9
mock.env
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
# Mock Mode Configuration
|
||||
PULSE_MOCK_MODE=true
|
||||
PULSE_MOCK_NODES=7
|
||||
PULSE_MOCK_VMS_PER_NODE=5
|
||||
PULSE_MOCK_LXCS_PER_NODE=8
|
||||
PULSE_MOCK_DOCKER_HOSTS=3
|
||||
PULSE_MOCK_DOCKER_CONTAINERS=12
|
||||
PULSE_MOCK_RANDOM_METRICS=true
|
||||
PULSE_MOCK_STOPPED_PERCENT=20
|
||||
|
|
@ -132,7 +132,8 @@ func Run(ctx context.Context, version string) error {
|
|||
go wsHub.Run()
|
||||
|
||||
// Initialize reloadable monitoring system
|
||||
reloadableMonitor, err := monitoring.NewReloadableMonitor(cfg, wsHub)
|
||||
mtPersistence := config.NewMultiTenantPersistence(cfg.DataPath)
|
||||
reloadableMonitor, err := monitoring.NewReloadableMonitor(cfg, mtPersistence, wsHub)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize monitoring system: %w", err)
|
||||
}
|
||||
|
|
@ -181,13 +182,14 @@ func Run(ctx context.Context, version string) error {
|
|||
}
|
||||
if router != nil {
|
||||
router.SetMonitor(reloadableMonitor.GetMonitor())
|
||||
router.SetMultiTenantMonitor(reloadableMonitor.GetMultiTenantMonitor())
|
||||
if cfg := reloadableMonitor.GetConfig(); cfg != nil {
|
||||
router.SetConfig(cfg)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
router = api.NewRouter(cfg, reloadableMonitor.GetMonitor(), wsHub, reloadFunc, version)
|
||||
router = api.NewRouter(cfg, reloadableMonitor.GetMonitor(), reloadableMonitor.GetMultiTenantMonitor(), wsHub, reloadFunc, version)
|
||||
|
||||
// Inject resource store into monitor for WebSocket broadcasts
|
||||
router.SetMonitor(reloadableMonitor.GetMonitor())
|
||||
|
|
@ -221,6 +223,7 @@ func Run(ctx context.Context, version string) error {
|
|||
log.Error().Err(err).Msg("Failed to reload monitor after mock.env change")
|
||||
} else if router != nil {
|
||||
router.SetMonitor(reloadableMonitor.GetMonitor())
|
||||
router.SetMultiTenantMonitor(reloadableMonitor.GetMultiTenantMonitor())
|
||||
if cfg := reloadableMonitor.GetConfig(); cfg != nil {
|
||||
router.SetConfig(cfg)
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue