Pulse/internal/config/config.go
2026-03-18 16:06:30 +00:00

1732 lines
70 KiB
Go

// Package config manages Pulse configuration from multiple sources.
//
// Configuration File Separation:
// - .env: Authentication credentials ONLY (PULSE_AUTH_USER, PULSE_AUTH_PASS)
// - system.json: Application settings (polling interval, timeouts, update settings, etc.)
// - nodes.enc: Encrypted node credentials (PVE/PBS passwords and tokens)
//
// This separation ensures security, clarity, and proper access control.
// See docs/CONFIGURATION.md for detailed documentation.
package config
import (
"encoding/json"
"fmt"
"net"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"time"
"sync"
"github.com/joho/godotenv"
"github.com/rcourtman/pulse-go-rewrite/internal/logging"
"github.com/rcourtman/pulse-go-rewrite/internal/utils"
"github.com/rcourtman/pulse-go-rewrite/pkg/auth"
pkglicensing "github.com/rcourtman/pulse-go-rewrite/pkg/licensing"
"github.com/rcourtman/pulse-go-rewrite/pkg/tlsutil"
"github.com/rs/zerolog/log"
)
// Mu protects concurrent access to the configuration,
// particularly for fields updated by the config watcher (AuthUser, AuthPass, APITokens).
var Mu sync.RWMutex
const (
DefaultGuestMetadataMinRefresh = 2 * time.Minute
DefaultGuestMetadataRefreshJitter = 45 * time.Second
DefaultGuestMetadataRetryBackoff = 30 * time.Second
DefaultGuestMetadataMaxConcurrent = 4
)
// Vars for mocking system calls in tests
var (
osStat = os.Stat
execCommand = exec.Command
netDial = net.Dial
netInterfaceAddrs = net.InterfaceAddrs
)
// CanonicalUpdateChannel returns the supported v6 update channel if valid.
func CanonicalUpdateChannel(channel string) (string, bool) {
switch strings.ToLower(strings.TrimSpace(channel)) {
case "stable":
return "stable", true
case "rc":
return "rc", true
default:
return "", false
}
}
// EffectiveUpdateChannel resolves the runtime update channel using persisted
// settings first and a fallback runtime/config value second. v6 only supports
// stable and rc, and defaults to stable when unset.
func EffectiveUpdateChannel(channel string, fallback string) string {
if canonical, ok := CanonicalUpdateChannel(channel); ok {
return canonical
}
if canonical, ok := CanonicalUpdateChannel(fallback); ok {
return canonical
}
return "stable"
}
// EffectiveAutoUpdateEnabled enforces the v6 policy that unattended
// auto-updates only run on the stable channel.
func EffectiveAutoUpdateEnabled(channel string, enabled bool) bool {
return enabled && EffectiveUpdateChannel(channel, "") == "stable"
}
// IsPasswordHashed checks if a string looks like a bcrypt hash
func IsPasswordHashed(password string) bool {
// Bcrypt hashes start with $2a$, $2b$, or $2y$ and are 60 characters long
// We check for >= 55 to catch truncated hashes and warn users
if !strings.HasPrefix(password, "$2") {
return false
}
length := len(password)
if length == 60 {
return true // Perfect bcrypt hash
}
// Warn about truncated or invalid hashes
if length >= 55 && length < 60 {
log.Error().
Int("length", length).
Msg("Bcrypt hash appears truncated; expected 60 characters; treating value as plaintext")
return false // Treat as plaintext to force user to fix it
}
return false
}
// Config holds all application configuration
// NOTE: The envconfig tags are legacy and not used - configuration is loaded from encrypted JSON files
type Config struct {
// Server settings
BindAddress string
FrontendPort int `envconfig:"FRONTEND_PORT" default:"7655"`
ConfigPath string
DataPath string
AppRoot string `json:"-"` // Root directory of the application (where binary lives)
PublicURL string `envconfig:"PULSE_PUBLIC_URL" default:""` // Full URL to access Pulse (e.g., http://198.51.100.100:7655)
AgentConnectURL string `envconfig:"PULSE_AGENT_CONNECT_URL" default:""` // Dedicated direct connect URL for agents (e.g. http://192.0.2.5:7655)
// Proxmox VE connections
PVEInstances []PVEInstance
// Proxmox Backup Server connections
PBSInstances []PBSInstance
// Proxmox Mail Gateway connections
PMGInstances []PMGInstance
// Monitoring settings
PVEPollingInterval time.Duration `envconfig:"PVE_POLLING_INTERVAL"` // PVE polling interval (10s default)
PBSPollingInterval time.Duration `envconfig:"PBS_POLLING_INTERVAL"` // PBS polling interval (60s default)
PMGPollingInterval time.Duration `envconfig:"PMG_POLLING_INTERVAL"` // PMG polling interval (60s default)
ConnectionTimeout time.Duration `envconfig:"CONNECTION_TIMEOUT" default:"60s"` // Default 60s for slow storage operations
BackupPollingCycles int `envconfig:"BACKUP_POLLING_CYCLES" default:"10"`
BackupPollingInterval time.Duration `envconfig:"BACKUP_POLLING_INTERVAL"`
EnableBackupPolling bool `envconfig:"ENABLE_BACKUP_POLLING" default:"true"`
TemperatureMonitoringEnabled bool `json:"temperatureMonitoringEnabled"`
WebhookBatchDelay time.Duration `envconfig:"WEBHOOK_BATCH_DELAY" default:"10s"`
AdaptivePollingEnabled bool `envconfig:"ADAPTIVE_POLLING_ENABLED" default:"false"`
AdaptivePollingBaseInterval time.Duration `envconfig:"ADAPTIVE_POLLING_BASE_INTERVAL" default:"10s"`
AdaptivePollingMinInterval time.Duration `envconfig:"ADAPTIVE_POLLING_MIN_INTERVAL" default:"5s"`
AdaptivePollingMaxInterval time.Duration `envconfig:"ADAPTIVE_POLLING_MAX_INTERVAL" default:"5m"`
GuestMetadataMinRefreshInterval time.Duration `envconfig:"GUEST_METADATA_MIN_REFRESH_INTERVAL" default:"2m" json:"guestMetadataMinRefreshInterval"`
GuestMetadataRefreshJitter time.Duration `envconfig:"GUEST_METADATA_REFRESH_JITTER" default:"45s" json:"guestMetadataRefreshJitter"`
GuestMetadataRetryBackoff time.Duration `envconfig:"GUEST_METADATA_RETRY_BACKOFF" default:"30s" json:"guestMetadataRetryBackoff"`
GuestMetadataMaxConcurrent int `envconfig:"GUEST_METADATA_MAX_CONCURRENT" default:"4" json:"guestMetadataMaxConcurrent"`
DNSCacheTimeout time.Duration `envconfig:"DNS_CACHE_TIMEOUT" default:"5m" json:"dnsCacheTimeout"`
SSHPort int `envconfig:"SSH_PORT" default:"22" json:"sshPort"` // Default SSH port for temperature monitoring
MaxPollTimeout time.Duration `envconfig:"MAX_POLL_TIMEOUT" default:"3m" json:"-"` // Maximum poll timeout for large clusters (default 3m)
// Metrics retention settings (tiered storage)
// These control how long historical metrics are retained at each aggregation level.
MetricsRetentionRawHours int `json:"metricsRetentionRawHours"` // Raw data (~5s intervals), default: 2 hours
MetricsRetentionMinuteHours int `json:"metricsRetentionMinuteHours"` // Minute averages, default: 24 hours
MetricsRetentionHourlyDays int `json:"metricsRetentionHourlyDays"` // Hourly averages, default: 7 days
MetricsRetentionDailyDays int `json:"metricsRetentionDailyDays"` // Daily averages, default: 90 days
// Logging settings
LogLevel string `envconfig:"LOG_LEVEL" default:"info"`
LogFormat string `envconfig:"LOG_FORMAT" default:"auto"` // "json", "console", or "auto"
LogFile string `envconfig:"LOG_FILE" default:""`
LogMaxSize int `envconfig:"LOG_MAX_SIZE" default:"100"` // MB
LogMaxAge int `envconfig:"LOG_MAX_AGE" default:"30"` // days
LogCompress bool `envconfig:"LOG_COMPRESS" default:"true"`
// Security settings
APIToken string
APITokens []APITokenRecord `json:"-"`
AuthUser string `envconfig:"PULSE_AUTH_USER"`
AuthPass string `envconfig:"PULSE_AUTH_PASS"`
DemoMode bool `envconfig:"DEMO_MODE" default:"false"` // Read-only demo mode
AllowedOrigins string `envconfig:"ALLOWED_ORIGINS" default:"*"`
HideLocalLogin bool `envconfig:"PULSE_AUTH_HIDE_LOCAL_LOGIN" default:"false"`
DisableDockerUpdateActions bool `envconfig:"PULSE_DISABLE_DOCKER_UPDATE_ACTIONS" default:"false"` // Hide Docker update buttons (read-only mode for containers)
DisableLocalUpgradeMetrics bool `envconfig:"PULSE_DISABLE_LOCAL_UPGRADE_METRICS" default:"false"` // Disable local-only upgrade UX metrics collection
TelemetryEnabled bool `envconfig:"PULSE_TELEMETRY" default:"true"` // Anonymous telemetry enabled by default (install ID, version, resource counts, feature flags — opt out any time)
MultiTenantEnabled bool `envconfig:"PULSE_MULTI_TENANT_ENABLED" default:"false"` // Enable multi-tenant support
MetricsToken string `envconfig:"PULSE_METRICS_TOKEN" default:"" json:"-"` // Bearer token for /metrics endpoint (empty = unauthenticated)
ProTrialSignupURL string `envconfig:"PULSE_PRO_TRIAL_SIGNUP_URL" default:""` // Hosted signup/checkout URL for starting Pulse Pro trials
// Proxy authentication settings
ProxyAuthSecret string `envconfig:"PROXY_AUTH_SECRET"`
ProxyAuthUserHeader string `envconfig:"PROXY_AUTH_USER_HEADER"`
ProxyAuthRoleHeader string `envconfig:"PROXY_AUTH_ROLE_HEADER"`
ProxyAuthRoleSeparator string `envconfig:"PROXY_AUTH_ROLE_SEPARATOR" default:"|"`
ProxyAuthAdminRole string `envconfig:"PROXY_AUTH_ADMIN_ROLE" default:"admin"`
ProxyAuthLogoutURL string `envconfig:"PROXY_AUTH_LOGOUT_URL"`
// HTTPS/TLS settings
HTTPSEnabled bool `envconfig:"HTTPS_ENABLED" default:"false"`
TLSCertFile string `envconfig:"TLS_CERT_FILE" default:""`
TLSKeyFile string `envconfig:"TLS_KEY_FILE" default:""`
HTTPRedirectPort int `envconfig:"HTTP_REDIRECT_PORT" default:"0"` // When HTTPS is enabled, start an HTTP listener on this port that redirects to HTTPS (0 = disabled)
// Update settings
UpdateChannel string
AutoUpdateEnabled bool
AutoUpdateCheckInterval time.Duration
AutoUpdateTime string
// Discovery settings
DiscoveryEnabled bool `envconfig:"DISCOVERY_ENABLED" default:"false"`
DiscoverySubnet string `envconfig:"DISCOVERY_SUBNET" default:"auto"`
Discovery DiscoveryConfig `json:"discoveryConfig"`
// Track which settings are overridden by environment variables
EnvOverrides map[string]bool `json:"-"`
}
// DiscoveryConfig captures overrides for network discovery behaviour.
type DiscoveryConfig struct {
EnvironmentOverride string `json:"environment_override,omitempty"`
SubnetAllowlist []string `json:"subnet_allowlist,omitempty"`
SubnetBlocklist []string `json:"subnet_blocklist,omitempty"`
IPBlocklist []string `json:"ip_blocklist,omitempty"` // Individual IPs to skip (auto-populated with configured Proxmox hosts)
MaxHostsPerScan int `json:"max_hosts_per_scan,omitempty"`
MaxConcurrent int `json:"max_concurrent,omitempty"`
EnableReverseDNS bool `json:"enable_reverse_dns"`
ScanGateways bool `json:"scan_gateways"`
DialTimeout int `json:"dial_timeout_ms,omitempty"`
HTTPTimeout int `json:"http_timeout_ms,omitempty"`
}
// DefaultDiscoveryConfig returns opinionated defaults for discovery behaviour.
func DefaultDiscoveryConfig() DiscoveryConfig {
return DiscoveryConfig{
EnvironmentOverride: "auto",
SubnetAllowlist: nil,
SubnetBlocklist: []string{"169.254.0.0/16"},
MaxHostsPerScan: 1024,
MaxConcurrent: 50,
EnableReverseDNS: true,
ScanGateways: true,
DialTimeout: 1000,
HTTPTimeout: 2000,
}
}
// CloneDiscoveryConfig returns a deep copy of the provided discovery config.
func CloneDiscoveryConfig(cfg DiscoveryConfig) DiscoveryConfig {
clone := cfg
if cfg.SubnetAllowlist != nil {
clone.SubnetAllowlist = append([]string(nil), cfg.SubnetAllowlist...)
}
if cfg.SubnetBlocklist != nil {
clone.SubnetBlocklist = append([]string(nil), cfg.SubnetBlocklist...)
}
if cfg.IPBlocklist != nil {
clone.IPBlocklist = append([]string(nil), cfg.IPBlocklist...)
}
return clone
}
// DeepCopy creates a deep copy of the Config struct.
// This is important for tenant isolation to ensure each tenant
// has independent config data that won't be shared.
func (c *Config) DeepCopy() *Config {
if c == nil {
return nil
}
// Start with a shallow copy
clone := *c
// Deep copy PVEInstances slice
if len(c.PVEInstances) > 0 {
clone.PVEInstances = make([]PVEInstance, len(c.PVEInstances))
copy(clone.PVEInstances, c.PVEInstances)
}
// Deep copy PBSInstances slice
if len(c.PBSInstances) > 0 {
clone.PBSInstances = make([]PBSInstance, len(c.PBSInstances))
copy(clone.PBSInstances, c.PBSInstances)
}
// Deep copy PMGInstances slice
if len(c.PMGInstances) > 0 {
clone.PMGInstances = make([]PMGInstance, len(c.PMGInstances))
copy(clone.PMGInstances, c.PMGInstances)
}
// Deep copy APITokens slice
if len(c.APITokens) > 0 {
clone.APITokens = make([]APITokenRecord, len(c.APITokens))
for i, token := range c.APITokens {
clone.APITokens[i] = token.Clone()
}
}
// Deep copy EnvOverrides map
if len(c.EnvOverrides) > 0 {
clone.EnvOverrides = make(map[string]bool, len(c.EnvOverrides))
for k, v := range c.EnvOverrides {
clone.EnvOverrides[k] = v
}
}
// Deep copy Discovery config
clone.Discovery = CloneDiscoveryConfig(c.Discovery)
return &clone
}
// NormalizeDiscoveryConfig ensures a discovery config contains sane values and defaults.
func NormalizeDiscoveryConfig(cfg DiscoveryConfig) DiscoveryConfig {
defaults := DefaultDiscoveryConfig()
normalized := CloneDiscoveryConfig(cfg)
// Normalize environment override and ensure it's valid.
if canonicalEnv, ok := CanonicalDiscoveryEnvironment(normalized.EnvironmentOverride); ok {
normalized.EnvironmentOverride = canonicalEnv
} else {
log.Warn().
Str("environment", normalized.EnvironmentOverride).
Msg("Unknown discovery environment override detected; falling back to auto")
normalized.EnvironmentOverride = defaults.EnvironmentOverride
}
normalized.SubnetAllowlist = sanitizeCIDRList(normalized.SubnetAllowlist)
if normalized.SubnetAllowlist == nil {
normalized.SubnetAllowlist = []string{}
}
normalized.SubnetBlocklist = sanitizeCIDRList(normalized.SubnetBlocklist)
if normalized.SubnetBlocklist == nil {
normalized.SubnetBlocklist = append([]string(nil), defaults.SubnetBlocklist...)
}
if normalized.MaxHostsPerScan <= 0 {
normalized.MaxHostsPerScan = defaults.MaxHostsPerScan
}
if normalized.MaxConcurrent <= 0 {
normalized.MaxConcurrent = defaults.MaxConcurrent
}
if normalized.DialTimeout <= 0 {
normalized.DialTimeout = defaults.DialTimeout
}
if normalized.HTTPTimeout <= 0 {
normalized.HTTPTimeout = defaults.HTTPTimeout
}
return normalized
}
func sanitizeCIDRList(values []string) []string {
if len(values) == 0 {
return nil
}
cleaned := make([]string, 0, len(values))
seen := make(map[string]struct{}, len(values))
for _, raw := range values {
entry := strings.TrimSpace(raw)
if entry == "" {
continue
}
// Avoid duplicates to keep config minimal.
if _, exists := seen[entry]; exists {
continue
}
seen[entry] = struct{}{}
cleaned = append(cleaned, entry)
}
if len(cleaned) == 0 {
return []string{}
}
return cleaned
}
// UnmarshalJSON decodes the persisted snake_case discovery config format.
func (d *DiscoveryConfig) UnmarshalJSON(data []byte) error {
type payload struct {
EnvironmentOverride *string `json:"environment_override"`
SubnetAllowlist *[]string `json:"subnet_allowlist"`
SubnetBlocklist *[]string `json:"subnet_blocklist"`
MaxHostsPerScan *int `json:"max_hosts_per_scan"`
MaxConcurrent *int `json:"max_concurrent"`
EnableReverseDNS *bool `json:"enable_reverse_dns"`
ScanGateways *bool `json:"scan_gateways"`
DialTimeout *int `json:"dial_timeout_ms"`
HTTPTimeout *int `json:"http_timeout_ms"`
}
var in payload
if err := json.Unmarshal(data, &in); err != nil {
return fmt.Errorf("unmarshal discovery config: %w", err)
}
cfg := DefaultDiscoveryConfig()
if in.EnvironmentOverride != nil {
cfg.EnvironmentOverride = strings.TrimSpace(*in.EnvironmentOverride)
}
switch {
case in.SubnetAllowlist != nil:
cfg.SubnetAllowlist = sanitizeCIDRList(*in.SubnetAllowlist)
default:
cfg.SubnetAllowlist = []string{}
}
switch {
case in.SubnetBlocklist != nil:
cfg.SubnetBlocklist = sanitizeCIDRList(*in.SubnetBlocklist)
}
if in.MaxHostsPerScan != nil {
cfg.MaxHostsPerScan = *in.MaxHostsPerScan
}
if in.MaxConcurrent != nil {
cfg.MaxConcurrent = *in.MaxConcurrent
}
if in.EnableReverseDNS != nil {
cfg.EnableReverseDNS = *in.EnableReverseDNS
}
if in.ScanGateways != nil {
cfg.ScanGateways = *in.ScanGateways
}
if in.DialTimeout != nil {
cfg.DialTimeout = *in.DialTimeout
}
if in.HTTPTimeout != nil {
cfg.HTTPTimeout = *in.HTTPTimeout
}
*d = NormalizeDiscoveryConfig(cfg)
return nil
}
// IsValidDiscoveryEnvironment reports whether the supplied override is recognised.
func IsValidDiscoveryEnvironment(value string) bool {
_, ok := CanonicalDiscoveryEnvironment(value)
return ok
}
// CanonicalDiscoveryEnvironment returns the canonical discovery environment token.
// Only canonical v6 hyphen-separated tokens are accepted.
func CanonicalDiscoveryEnvironment(value string) (string, bool) {
normalized := strings.ToLower(strings.TrimSpace(value))
switch normalized {
case "", "auto":
return "auto", true
case "native", "docker-host", "docker-bridge", "lxc-privileged", "lxc-unprivileged":
return normalized, true
default:
return "", false
}
}
func splitAndTrim(value string) []string {
if value == "" {
return []string{}
}
parts := strings.Split(value, ",")
result := make([]string, 0, len(parts))
for _, part := range parts {
trimmed := strings.TrimSpace(part)
if trimmed != "" {
result = append(result, trimmed)
}
}
return result
}
func parsePortOverride(envVar, value string) (int, error) {
port, err := strconv.Atoi(strings.TrimSpace(value))
if err != nil {
return 0, fmt.Errorf("%s must be an integer", envVar)
}
if port <= 0 || port > 65535 {
return 0, fmt.Errorf("%s must be between 1 and 65535", envVar)
}
return port, nil
}
func parseConnectionTimeoutOverride(value string) (time.Duration, error) {
raw := strings.TrimSpace(value)
if raw == "" {
return 0, fmt.Errorf("connection timeout value cannot be empty")
}
if looksLikeNumericSeconds(raw) {
return time.ParseDuration(raw + "s")
}
return time.ParseDuration(raw)
}
func looksLikeNumericSeconds(value string) bool {
if value == "" {
return false
}
if value[0] == '+' || value[0] == '-' {
value = value[1:]
if value == "" {
return false
}
}
dotSeen := false
for _, ch := range value {
if ch == '.' {
if dotSeen {
return false
}
dotSeen = true
continue
}
if ch < '0' || ch > '9' {
return false
}
}
return true
}
// PVEInstance represents a Proxmox VE connection
type PVEInstance struct {
Name string
Host string // Primary endpoint (user-provided)
GuestURL string // Optional guest-accessible URL (for navigation)
User string
Password string
TokenName string
TokenValue string
Fingerprint string
VerifySSL bool
MonitorVMs bool
MonitorContainers bool
MonitorStorage bool
MonitorBackups bool
MonitorPhysicalDisks *bool // Monitor physical disks (nil = enabled by default, can be explicitly disabled)
PhysicalDiskPollingMinutes int // How often to poll physical disks (0 = use default)
TemperatureMonitoringEnabled *bool // Monitor temperature via SSH (nil = use global setting, true/false = override)
SSHPort int // SSH port for temperature monitoring (0 = use global default)
// Cluster support
IsCluster bool // True if this is a cluster
ClusterName string // Cluster name if applicable
ClusterEndpoints []ClusterEndpoint // All discovered cluster nodes
// Agent tracking
Source string // "agent" or "script" - how this node was registered (empty = legacy/manual)
DisableCeph bool // Disable Ceph status polling for this instance
}
// ClusterEndpoint represents a single node in a cluster
type ClusterEndpoint struct {
NodeID string // Node ID in cluster
NodeName string // Node name
Host string // Full URL (e.g., https://node1.lan:8006)
GuestURL string // Optional guest-accessible URL (for navigation)
IP string // IP address (auto-discovered from cluster)
IPOverride string // User-specified IP override (takes precedence over IP if set)
Fingerprint string // TLS certificate fingerprint (SHA256, auto-captured via TOFU)
Online bool // Current online status from Proxmox
LastSeen time.Time // Last successful connection
PulseReachable *bool // Pulse's view: can Pulse reach this endpoint? nil = not yet checked
LastPulseCheck *time.Time // Last time Pulse checked connectivity
PulseError string // Last error Pulse encountered connecting to this endpoint
}
// EffectiveIP returns the IP to use for this endpoint, preferring IPOverride if set
func (e ClusterEndpoint) EffectiveIP() string {
if e.IPOverride != "" {
return e.IPOverride
}
return e.IP
}
// PBSInstance represents a Proxmox Backup Server connection
type PBSInstance struct {
Name string
Host string
GuestURL string // Optional guest-accessible URL (for navigation)
User string
Password string
TokenName string
TokenValue string
Fingerprint string
VerifySSL bool
MonitorBackups bool
MonitorDatastores bool
MonitorSyncJobs bool
MonitorVerifyJobs bool
MonitorPruneJobs bool
MonitorGarbageJobs bool
TemperatureMonitoringEnabled *bool // Monitor temperature via SSH (nil = use global setting, true/false = override)
SSHPort int // SSH port for temperature monitoring (0 = use global default)
// Agent tracking
Source string // "agent" or "script" - how this node was registered (empty = legacy/manual)
DisableCeph bool // Disable Ceph status polling for this instance
// Datastore exclusion (for unmounted/removable datastores that cause log noise)
ExcludeDatastores []string
}
// PMGInstance represents a Proxmox Mail Gateway connection
type PMGInstance struct {
Name string
Host string
GuestURL string // Optional guest-accessible URL (for navigation)
User string
Password string
TokenName string
TokenValue string
Fingerprint string
VerifySSL bool
MonitorMailStats bool
MonitorQueues bool
MonitorQuarantine bool
MonitorDomainStats bool
TemperatureMonitoringEnabled *bool // Monitor temperature via SSH (nil = use global setting, true/false = override)
SSHPort int // SSH port for temperature monitoring (0 = use global default)
}
// Global persistence instance for saving
var globalPersistence *ConfigPersistence
// defaultDataDir is the fallback config/data directory when PULSE_DATA_DIR is not set.
// Kept as a var to allow tests to override without requiring /etc/pulse to exist.
var defaultDataDir = "/etc/pulse"
// ResolveRuntimeDataDir returns the canonical runtime data directory for Pulse.
// Callers may provide an explicit owner path; otherwise the resolved directory
// comes from PULSE_DATA_DIR and finally the package default.
func ResolveRuntimeDataDir(explicitPath string) string {
return utils.ResolveDataDirWithDefault(explicitPath, defaultDataDir)
}
// parsePollingIntervalEnv reads an environment variable that represents a polling
// interval (accepts Go duration strings like "30s" or plain integer seconds),
// validates that it meets a minimum, and returns the parsed duration. Returns
// (0, false) when the env var is unset or the value is invalid/below minimum.
func parsePollingIntervalEnv(envName string, minDuration time.Duration) (time.Duration, bool) {
intervalStr := utils.GetenvTrim(envName)
if intervalStr == "" {
return 0, false
}
if dur, err := time.ParseDuration(intervalStr); err == nil {
if dur < minDuration {
log.Warn().Dur("interval", dur).Msgf("Ignoring %s below %s from environment", envName, minDuration)
return 0, false
}
log.Info().Dur("interval", dur).Msgf("Overriding %s polling interval from environment", strings.TrimSuffix(envName, "_POLLING_INTERVAL"))
return dur, true
}
if seconds, err := strconv.Atoi(intervalStr); err == nil {
if time.Duration(seconds)*time.Second < minDuration {
log.Warn().Int("seconds", seconds).Msgf("Ignoring %s below %s from environment", envName, minDuration)
return 0, false
}
log.Info().Int("seconds", seconds).Msgf("Overriding %s polling interval (seconds) from environment", strings.TrimSuffix(envName, "_POLLING_INTERVAL"))
return time.Duration(seconds) * time.Second, true
}
log.Warn().Str("value", intervalStr).Msgf("Invalid %s value, expected duration or seconds", envName)
return 0, false
}
// Load reads configuration from encrypted persistence files
func Load() (*Config, error) {
// Get data directory from environment
dataDir := ResolveRuntimeDataDir("")
// Load .env file if it exists (for deployment overrides)
envFile := filepath.Join(dataDir, ".env")
if _, err := os.Stat(envFile); err == nil {
if err := godotenv.Load(envFile); err != nil {
log.Warn().Err(err).Str("file", envFile).Msg("Failed to load .env file")
} else {
log.Info().Str("file", envFile).Msg("Loaded .env file for deployment overrides")
}
}
// Also try loading from current directory for development
if err := godotenv.Load(); err == nil {
log.Info().Msg("Loaded configuration from .env in current directory")
}
// Initialize config with defaults
cfg := &Config{
BindAddress: "0.0.0.0",
FrontendPort: 7655,
ConfigPath: dataDir,
DataPath: dataDir,
AppRoot: detectAppRoot(),
ConnectionTimeout: 60 * time.Second,
BackupPollingCycles: 10,
BackupPollingInterval: 0,
EnableBackupPolling: true,
PVEPollingInterval: 10 * time.Second,
AdaptivePollingEnabled: false,
AdaptivePollingBaseInterval: 10 * time.Second,
AdaptivePollingMinInterval: 5 * time.Second,
AdaptivePollingMaxInterval: 5 * time.Minute,
GuestMetadataMinRefreshInterval: DefaultGuestMetadataMinRefresh,
GuestMetadataRefreshJitter: DefaultGuestMetadataRefreshJitter,
GuestMetadataRetryBackoff: DefaultGuestMetadataRetryBackoff,
GuestMetadataMaxConcurrent: DefaultGuestMetadataMaxConcurrent,
DNSCacheTimeout: 5 * time.Minute, // Default DNS cache timeout
LogLevel: "info",
LogFormat: "auto",
LogMaxSize: 100,
LogMaxAge: 30,
LogCompress: true,
AllowedOrigins: "", // Empty means no CORS headers (same-origin only)
PBSPollingInterval: 60 * time.Second, // Default PBS polling (slower)
PMGPollingInterval: 60 * time.Second, // Default PMG polling (aggregated stats)
DiscoveryEnabled: false,
DiscoverySubnet: "auto",
TemperatureMonitoringEnabled: true,
TelemetryEnabled: true, // Enabled by default; opt out via PULSE_TELEMETRY=false or Settings
MaxPollTimeout: 3 * time.Minute, // Default max poll timeout for large clusters
ProTrialSignupURL: pkglicensing.DefaultProTrialSignupURL,
EnvOverrides: make(map[string]bool),
AgentConnectURL: "",
// Metrics retention defaults (tiered)
MetricsRetentionRawHours: 2, // 2 hours of raw ~5s data
MetricsRetentionMinuteHours: 24, // 24 hours of minute averages
MetricsRetentionHourlyDays: 7, // 7 days of hourly averages
MetricsRetentionDailyDays: 90, // 90 days of daily averages
}
cfg.Discovery = DefaultDiscoveryConfig()
// Initialize persistence
persistence := NewConfigPersistence(dataDir)
if persistence != nil {
// Store global persistence for saving
globalPersistence = persistence
// Load nodes configuration
if nodesConfig, err := persistence.LoadNodesConfig(); err == nil && nodesConfig != nil {
cfg.PVEInstances = nodesConfig.PVEInstances
cfg.PBSInstances = nodesConfig.PBSInstances
cfg.PMGInstances = nodesConfig.PMGInstances
log.Info().
Int("pve", len(cfg.PVEInstances)).
Int("pbs", len(cfg.PBSInstances)).
Int("pmg", len(cfg.PMGInstances)).
Msg("Loaded nodes configuration")
} else if err != nil {
log.Warn().Err(err).Msg("Failed to load nodes configuration")
}
// Load system configuration
if systemSettings, err := persistence.LoadSystemSettings(); err == nil && systemSettings != nil {
// Load polling intervals if configured
if systemSettings.PVEPollingInterval > 0 {
cfg.PVEPollingInterval = time.Duration(systemSettings.PVEPollingInterval) * time.Second
}
if systemSettings.PBSPollingInterval > 0 {
cfg.PBSPollingInterval = time.Duration(systemSettings.PBSPollingInterval) * time.Second
}
if systemSettings.PMGPollingInterval > 0 {
cfg.PMGPollingInterval = time.Duration(systemSettings.PMGPollingInterval) * time.Second
}
if systemSettings.BackupPollingInterval > 0 {
cfg.BackupPollingInterval = time.Duration(systemSettings.BackupPollingInterval) * time.Second
} else if systemSettings.BackupPollingInterval == 0 {
cfg.BackupPollingInterval = 0
}
if systemSettings.BackupPollingEnabled != nil {
cfg.EnableBackupPolling = *systemSettings.BackupPollingEnabled
}
if systemSettings.AdaptivePollingEnabled != nil {
cfg.AdaptivePollingEnabled = *systemSettings.AdaptivePollingEnabled
}
if systemSettings.AdaptivePollingBaseInterval > 0 {
cfg.AdaptivePollingBaseInterval = time.Duration(systemSettings.AdaptivePollingBaseInterval) * time.Second
}
if systemSettings.AdaptivePollingMinInterval > 0 {
cfg.AdaptivePollingMinInterval = time.Duration(systemSettings.AdaptivePollingMinInterval) * time.Second
}
if systemSettings.AdaptivePollingMaxInterval > 0 {
cfg.AdaptivePollingMaxInterval = time.Duration(systemSettings.AdaptivePollingMaxInterval) * time.Second
}
cfg.UpdateChannel = EffectiveUpdateChannel(systemSettings.UpdateChannel, cfg.UpdateChannel)
cfg.AutoUpdateEnabled = EffectiveAutoUpdateEnabled(cfg.UpdateChannel, systemSettings.AutoUpdateEnabled)
if systemSettings.AutoUpdateCheckInterval > 0 {
cfg.AutoUpdateCheckInterval = time.Duration(systemSettings.AutoUpdateCheckInterval) * time.Hour
}
if systemSettings.AutoUpdateTime != "" {
cfg.AutoUpdateTime = systemSettings.AutoUpdateTime
}
if systemSettings.AllowedOrigins != "" {
cfg.AllowedOrigins = systemSettings.AllowedOrigins
}
if systemSettings.ConnectionTimeout > 0 {
cfg.ConnectionTimeout = time.Duration(systemSettings.ConnectionTimeout) * time.Second
}
if systemSettings.LogLevel != "" {
cfg.LogLevel = systemSettings.LogLevel
}
// Always load DiscoveryEnabled even if false
cfg.DiscoveryEnabled = systemSettings.DiscoveryEnabled
if systemSettings.DiscoverySubnet != "" {
cfg.DiscoverySubnet = systemSettings.DiscoverySubnet
}
cfg.Discovery = NormalizeDiscoveryConfig(CloneDiscoveryConfig(systemSettings.DiscoveryConfig))
cfg.TemperatureMonitoringEnabled = systemSettings.TemperatureMonitoringEnabled
// Load DNS cache timeout
if systemSettings.DNSCacheTimeout > 0 {
cfg.DNSCacheTimeout = time.Duration(systemSettings.DNSCacheTimeout) * time.Second
}
// Load SSH port
if systemSettings.SSHPort > 0 {
cfg.SSHPort = systemSettings.SSHPort
} else {
cfg.SSHPort = 22 // Default SSH port
}
// Load HideLocalLogin
cfg.HideLocalLogin = systemSettings.HideLocalLogin
// Load DisableDockerUpdateActions (hide Docker update buttons)
cfg.DisableDockerUpdateActions = systemSettings.DisableDockerUpdateActions
// Load DisableLocalUpgradeMetrics (privacy: local-only upgrade UX metrics)
cfg.DisableLocalUpgradeMetrics = systemSettings.DisableLocalUpgradeMetrics
// Load TelemetryEnabled (enabled by default; nil means true for upgrading users)
if systemSettings.TelemetryEnabled != nil {
cfg.TelemetryEnabled = *systemSettings.TelemetryEnabled
} else {
cfg.TelemetryEnabled = true // default: enabled
}
// Load PublicURL from settings (will be overridden by env var if set)
if systemSettings.PublicURL != "" {
cfg.PublicURL = systemSettings.PublicURL
}
// Load metrics retention settings (only override if explicitly set)
if systemSettings.MetricsRetentionRawHours > 0 {
cfg.MetricsRetentionRawHours = systemSettings.MetricsRetentionRawHours
}
if systemSettings.MetricsRetentionMinuteHours > 0 {
cfg.MetricsRetentionMinuteHours = systemSettings.MetricsRetentionMinuteHours
}
if systemSettings.MetricsRetentionHourlyDays > 0 {
cfg.MetricsRetentionHourlyDays = systemSettings.MetricsRetentionHourlyDays
}
if systemSettings.MetricsRetentionDailyDays > 0 {
cfg.MetricsRetentionDailyDays = systemSettings.MetricsRetentionDailyDays
}
// APIToken is not loaded from system.json; it is kept in sync from APITokens.
log.Info().
Str("updateChannel", cfg.UpdateChannel).
Str("logLevel", cfg.LogLevel).
Dur("dnsCacheTimeout", cfg.DNSCacheTimeout).
Int("metricsRetentionDailyDays", cfg.MetricsRetentionDailyDays).
Msg("Loaded system configuration")
} else {
// No system.json exists - create default one
log.Info().Msg("No system.json found, creating default")
defaultSettings := DefaultSystemSettings()
defaultSettings.ConnectionTimeout = int(cfg.ConnectionTimeout.Seconds())
if err := persistence.SaveSystemSettings(*defaultSettings); err != nil {
log.Warn().Err(err).Msg("Failed to create default system.json")
}
}
}
// Load API tokens
if tokens, err := persistence.LoadAPITokens(); err == nil {
if migrated := bindMissingAPITokenIDs(tokens); migrated > 0 {
if err := persistence.SaveAPITokens(tokens); err != nil {
log.Error().Err(err).Int("count", migrated).Msg("Failed to persist API token ID migration")
} else {
log.Warn().Int("count", migrated).Msg("Migrated API tokens missing IDs")
}
}
if migrated := bindLegacyAPITokensToDefault(tokens); migrated > 0 {
if err := persistence.SaveAPITokens(tokens); err != nil {
log.Error().Err(err).Int("count", migrated).Msg("Failed to persist legacy API token org binding migration")
} else {
log.Warn().Int("count", migrated).Msg("Migrated legacy API tokens to default organization binding")
}
}
cfg.APITokens = tokens
cfg.SortAPITokens()
log.Info().Int("count", len(tokens)).Msg("Loaded API tokens from persistence")
} else {
log.Warn().Err(err).Msg("Failed to load API tokens from persistence")
}
// Ensure polling intervals have sane defaults if not set
if cfg.PVEPollingInterval <= 0 {
cfg.PVEPollingInterval = 10 * time.Second
}
if cfg.PVEPollingInterval > time.Hour {
cfg.PVEPollingInterval = time.Hour
}
if cfg.PBSPollingInterval == 0 {
cfg.PBSPollingInterval = 60 * time.Second
}
if cfg.PMGPollingInterval == 0 {
cfg.PMGPollingInterval = 60 * time.Second
}
// Limited environment variable support
// NOTE: Node configuration is NOT done via env vars - use the web UI instead
if cyclesStr := utils.GetenvTrim("BACKUP_POLLING_CYCLES"); cyclesStr != "" {
if cycles, err := strconv.Atoi(cyclesStr); err == nil {
if cycles < 0 {
log.Warn().Str("value", cyclesStr).Msg("Ignoring negative BACKUP_POLLING_CYCLES from environment")
} else {
cfg.BackupPollingCycles = cycles
cfg.EnvOverrides["BACKUP_POLLING_CYCLES"] = true
log.Info().Int("cycles", cycles).Msg("Overriding backup polling cycles from environment")
}
} else {
log.Warn().Str("value", cyclesStr).Msg("Invalid BACKUP_POLLING_CYCLES value, ignoring")
}
}
if intervalStr := utils.GetenvTrim("BACKUP_POLLING_INTERVAL"); intervalStr != "" {
if dur, err := time.ParseDuration(intervalStr); err == nil {
if dur < 0 {
log.Warn().Str("value", intervalStr).Msg("Ignoring negative BACKUP_POLLING_INTERVAL from environment")
} else {
cfg.BackupPollingInterval = dur
cfg.EnvOverrides["BACKUP_POLLING_INTERVAL"] = true
log.Info().Dur("interval", dur).Msg("Overriding backup polling interval from environment")
}
} else if seconds, err := strconv.Atoi(intervalStr); err == nil {
if seconds < 0 {
log.Warn().Str("value", intervalStr).Msg("Ignoring negative BACKUP_POLLING_INTERVAL (seconds) from environment")
} else {
cfg.BackupPollingInterval = time.Duration(seconds) * time.Second
cfg.EnvOverrides["BACKUP_POLLING_INTERVAL"] = true
log.Info().Int("seconds", seconds).Msg("Overriding backup polling interval (seconds) from environment")
}
} else {
log.Warn().Str("value", intervalStr).Msg("Invalid BACKUP_POLLING_INTERVAL value, expected duration or seconds")
}
}
if dur, ok := parsePollingIntervalEnv("PVE_POLLING_INTERVAL", 10*time.Second); ok {
cfg.PVEPollingInterval = dur
cfg.EnvOverrides["PVE_POLLING_INTERVAL"] = true
}
if dur, ok := parsePollingIntervalEnv("PBS_POLLING_INTERVAL", 10*time.Second); ok {
cfg.PBSPollingInterval = dur
cfg.EnvOverrides["PBS_POLLING_INTERVAL"] = true
}
if dur, ok := parsePollingIntervalEnv("PMG_POLLING_INTERVAL", 10*time.Second); ok {
cfg.PMGPollingInterval = dur
cfg.EnvOverrides["PMG_POLLING_INTERVAL"] = true
}
if enabledStr := utils.GetenvTrim("ENABLE_TEMPERATURE_MONITORING"); enabledStr != "" {
if enabled, err := strconv.ParseBool(enabledStr); err == nil {
cfg.TemperatureMonitoringEnabled = enabled
cfg.EnvOverrides["temperatureMonitoringEnabled"] = true
log.Info().
Bool("enabled", enabled).
Msg("Overriding temperature monitoring setting from environment")
} else {
log.Warn().
Str("value", enabledStr).
Msg("Invalid ENABLE_TEMPERATURE_MONITORING value, ignoring")
}
}
if hideLocalLoginStr := utils.GetenvTrim("PULSE_AUTH_HIDE_LOCAL_LOGIN"); hideLocalLoginStr != "" {
if hide, err := strconv.ParseBool(hideLocalLoginStr); err == nil {
cfg.HideLocalLogin = hide
cfg.EnvOverrides["PULSE_AUTH_HIDE_LOCAL_LOGIN"] = true
log.Info().Bool("hide", hide).Msg("Overriding hide local login setting from environment")
} else {
log.Warn().Str("value", hideLocalLoginStr).Msg("Invalid PULSE_AUTH_HIDE_LOCAL_LOGIN value, ignoring")
}
}
if disableDockerUpdateActionsStr := utils.GetenvTrim("PULSE_DISABLE_DOCKER_UPDATE_ACTIONS"); disableDockerUpdateActionsStr != "" {
if disabled, err := strconv.ParseBool(disableDockerUpdateActionsStr); err == nil {
cfg.DisableDockerUpdateActions = disabled
cfg.EnvOverrides["PULSE_DISABLE_DOCKER_UPDATE_ACTIONS"] = true
cfg.EnvOverrides["disableDockerUpdateActions"] = true
log.Info().Bool("disabled", disabled).Msg("Overriding Docker update actions setting from environment")
} else {
log.Warn().
Str("value", disableDockerUpdateActionsStr).
Msg("Invalid PULSE_DISABLE_DOCKER_UPDATE_ACTIONS value, ignoring")
}
}
if disableLocalUpgradeMetricsStr := utils.GetenvTrim("PULSE_DISABLE_LOCAL_UPGRADE_METRICS"); disableLocalUpgradeMetricsStr != "" {
if disabled, err := strconv.ParseBool(disableLocalUpgradeMetricsStr); err == nil {
cfg.DisableLocalUpgradeMetrics = disabled
cfg.EnvOverrides["PULSE_DISABLE_LOCAL_UPGRADE_METRICS"] = true
cfg.EnvOverrides["disableLocalUpgradeMetrics"] = true
log.Info().Bool("disabled", disabled).Msg("Overriding local upgrade metrics setting from environment")
} else {
log.Warn().Str("value", disableLocalUpgradeMetricsStr).Msg("Invalid PULSE_DISABLE_LOCAL_UPGRADE_METRICS value, ignoring")
}
}
if telemetryStr := utils.GetenvTrim("PULSE_TELEMETRY"); telemetryStr != "" {
if enabled, err := strconv.ParseBool(telemetryStr); err == nil {
cfg.TelemetryEnabled = enabled
cfg.EnvOverrides["PULSE_TELEMETRY"] = true
cfg.EnvOverrides["telemetryEnabled"] = true
log.Info().Bool("enabled", enabled).Msg("Overriding telemetry setting from environment")
} else {
log.Warn().Str("value", telemetryStr).Msg("Invalid PULSE_TELEMETRY value, ignoring")
}
}
if trialSignupURL := utils.GetenvTrim(pkglicensing.ProTrialSignupURLEnvVar); trialSignupURL != "" {
resolved := pkglicensing.ResolveProTrialSignupURL(trialSignupURL)
if resolved != pkglicensing.DefaultProTrialSignupURL {
cfg.ProTrialSignupURL = resolved
cfg.EnvOverrides[pkglicensing.ProTrialSignupURLEnvVar] = true
cfg.EnvOverrides["proTrialSignupURL"] = true
log.Info().Str("url", resolved).Msg("Overriding Pulse Pro trial signup URL from environment")
} else {
log.Warn().Str("value", trialSignupURL).Msg("Invalid PULSE_PRO_TRIAL_SIGNUP_URL value, ignoring")
}
}
if enabledStr := utils.GetenvTrim("ENABLE_BACKUP_POLLING"); enabledStr != "" {
switch strings.ToLower(enabledStr) {
case "0", "false", "no", "off":
cfg.EnableBackupPolling = false
default:
cfg.EnableBackupPolling = true
}
cfg.EnvOverrides["ENABLE_BACKUP_POLLING"] = true
log.Info().Bool("enabled", cfg.EnableBackupPolling).Msg("Overriding backup polling enabled flag from environment")
}
if adaptiveEnabled := utils.GetenvTrim("ADAPTIVE_POLLING_ENABLED"); adaptiveEnabled != "" {
switch strings.ToLower(adaptiveEnabled) {
case "0", "false", "no", "off":
cfg.AdaptivePollingEnabled = false
default:
cfg.AdaptivePollingEnabled = true
}
cfg.EnvOverrides["ADAPTIVE_POLLING_ENABLED"] = true
log.Info().Bool("enabled", cfg.AdaptivePollingEnabled).Msg("Adaptive polling feature flag overridden by environment")
}
if baseInterval := utils.GetenvTrim("ADAPTIVE_POLLING_BASE_INTERVAL"); baseInterval != "" {
if dur, err := time.ParseDuration(baseInterval); err == nil {
if dur <= 0 {
log.Warn().Str("value", baseInterval).Msg("Ignoring non-positive ADAPTIVE_POLLING_BASE_INTERVAL from environment")
} else {
cfg.AdaptivePollingBaseInterval = dur
cfg.EnvOverrides["ADAPTIVE_POLLING_BASE_INTERVAL"] = true
log.Info().Dur("interval", dur).Msg("Adaptive polling base interval overridden by environment")
}
} else {
log.Warn().Str("value", baseInterval).Msg("Invalid ADAPTIVE_POLLING_BASE_INTERVAL value, expected duration string")
}
}
if minInterval := utils.GetenvTrim("ADAPTIVE_POLLING_MIN_INTERVAL"); minInterval != "" {
if dur, err := time.ParseDuration(minInterval); err == nil {
if dur <= 0 {
log.Warn().Str("value", minInterval).Msg("Ignoring non-positive ADAPTIVE_POLLING_MIN_INTERVAL from environment")
} else {
cfg.AdaptivePollingMinInterval = dur
cfg.EnvOverrides["ADAPTIVE_POLLING_MIN_INTERVAL"] = true
log.Info().Dur("interval", dur).Msg("Adaptive polling min interval overridden by environment")
}
} else {
log.Warn().Str("value", minInterval).Msg("Invalid ADAPTIVE_POLLING_MIN_INTERVAL value, expected duration string")
}
}
if maxInterval := utils.GetenvTrim("ADAPTIVE_POLLING_MAX_INTERVAL"); maxInterval != "" {
if dur, err := time.ParseDuration(maxInterval); err == nil {
if dur <= 0 {
log.Warn().Str("value", maxInterval).Msg("Ignoring non-positive ADAPTIVE_POLLING_MAX_INTERVAL from environment")
} else {
cfg.AdaptivePollingMaxInterval = dur
cfg.EnvOverrides["ADAPTIVE_POLLING_MAX_INTERVAL"] = true
log.Info().Dur("interval", dur).Msg("Adaptive polling max interval overridden by environment")
}
} else {
log.Warn().Str("value", maxInterval).Msg("Invalid ADAPTIVE_POLLING_MAX_INTERVAL value, expected duration string")
}
}
if minRefresh := utils.GetenvTrim("GUEST_METADATA_MIN_REFRESH_INTERVAL"); minRefresh != "" {
if dur, err := time.ParseDuration(minRefresh); err == nil {
if dur <= 0 {
log.Warn().Str("value", minRefresh).Msg("Ignoring non-positive GUEST_METADATA_MIN_REFRESH_INTERVAL from environment")
} else {
cfg.GuestMetadataMinRefreshInterval = dur
cfg.EnvOverrides["GUEST_METADATA_MIN_REFRESH_INTERVAL"] = true
log.Info().Dur("interval", dur).Msg("Guest metadata min refresh interval overridden by environment")
}
} else {
log.Warn().Str("value", minRefresh).Msg("Invalid GUEST_METADATA_MIN_REFRESH_INTERVAL value, expected duration string")
}
}
if jitter := utils.GetenvTrim("GUEST_METADATA_REFRESH_JITTER"); jitter != "" {
if dur, err := time.ParseDuration(jitter); err == nil {
if dur < 0 {
log.Warn().Str("value", jitter).Msg("Ignoring negative GUEST_METADATA_REFRESH_JITTER from environment")
} else {
cfg.GuestMetadataRefreshJitter = dur
cfg.EnvOverrides["GUEST_METADATA_REFRESH_JITTER"] = true
log.Info().Dur("jitter", dur).Msg("Guest metadata refresh jitter overridden by environment")
}
} else {
log.Warn().Str("value", jitter).Msg("Invalid GUEST_METADATA_REFRESH_JITTER value, expected duration string")
}
}
if backoff := utils.GetenvTrim("GUEST_METADATA_RETRY_BACKOFF"); backoff != "" {
if dur, err := time.ParseDuration(backoff); err == nil {
if dur <= 0 {
log.Warn().Str("value", backoff).Msg("Ignoring non-positive GUEST_METADATA_RETRY_BACKOFF from environment")
} else {
cfg.GuestMetadataRetryBackoff = dur
cfg.EnvOverrides["GUEST_METADATA_RETRY_BACKOFF"] = true
log.Info().Dur("backoff", dur).Msg("Guest metadata retry backoff overridden by environment")
}
} else {
log.Warn().Str("value", backoff).Msg("Invalid GUEST_METADATA_RETRY_BACKOFF value, expected duration string")
}
}
if concurrent := utils.GetenvTrim("GUEST_METADATA_MAX_CONCURRENT"); concurrent != "" {
if val, err := strconv.Atoi(concurrent); err == nil {
if val <= 0 {
log.Warn().Str("value", concurrent).Msg("Ignoring non-positive GUEST_METADATA_MAX_CONCURRENT from environment")
} else {
cfg.GuestMetadataMaxConcurrent = val
cfg.EnvOverrides["GUEST_METADATA_MAX_CONCURRENT"] = true
log.Info().Int("maxConcurrent", val).Msg("Guest metadata max concurrency overridden by environment")
}
} else {
log.Warn().Str("value", concurrent).Msg("Invalid GUEST_METADATA_MAX_CONCURRENT value, expected integer")
}
}
if dnsCacheTimeout := utils.GetenvTrim("DNS_CACHE_TIMEOUT"); dnsCacheTimeout != "" {
if dur, err := time.ParseDuration(dnsCacheTimeout); err == nil {
if dur <= 0 {
log.Warn().Str("value", dnsCacheTimeout).Msg("Ignoring non-positive DNS_CACHE_TIMEOUT from environment")
} else {
cfg.DNSCacheTimeout = dur
cfg.EnvOverrides["DNS_CACHE_TIMEOUT"] = true
log.Info().Dur("timeout", dur).Msg("DNS cache timeout overridden by environment")
}
} else {
log.Warn().Str("value", dnsCacheTimeout).Msg("Invalid DNS_CACHE_TIMEOUT value, expected duration string")
}
}
// HTTPS Configuration
if httpsEnabled := utils.GetenvTrim("HTTPS_ENABLED"); httpsEnabled != "" {
if enabled, err := strconv.ParseBool(httpsEnabled); err == nil {
cfg.HTTPSEnabled = enabled
cfg.EnvOverrides["HTTPS_ENABLED"] = true
log.Info().Bool("enabled", enabled).Msg("HTTPS enabled override from environment")
} else {
log.Warn().Str("value", httpsEnabled).Msg("Invalid HTTPS_ENABLED value, ignoring")
}
}
if certFile := utils.GetenvTrim("TLS_CERT_FILE"); certFile != "" {
cfg.TLSCertFile = certFile
cfg.EnvOverrides["TLS_CERT_FILE"] = true
}
if keyFile := utils.GetenvTrim("TLS_KEY_FILE"); keyFile != "" {
cfg.TLSKeyFile = keyFile
cfg.EnvOverrides["TLS_KEY_FILE"] = true
}
if allowedOrigins := utils.GetenvTrim("ALLOWED_ORIGINS"); allowedOrigins != "" {
cfg.AllowedOrigins = allowedOrigins
cfg.EnvOverrides["ALLOWED_ORIGINS"] = true
}
if bindAddr := utils.GetenvTrim("BIND_ADDRESS"); bindAddr != "" {
cfg.BindAddress = bindAddr
cfg.EnvOverrides["BIND_ADDRESS"] = true
}
if sshPort := utils.GetenvTrim("SSH_PORT"); sshPort != "" {
if port, err := strconv.Atoi(sshPort); err == nil {
if port <= 0 || port > 65535 {
log.Warn().Str("value", sshPort).Msg("Ignoring invalid SSH_PORT from environment (must be 1-65535)")
} else {
cfg.SSHPort = port
cfg.EnvOverrides["SSH_PORT"] = true
log.Info().Int("port", port).Msg("SSH port overridden by environment")
}
} else {
log.Warn().Str("value", sshPort).Msg("Invalid SSH_PORT value, expected integer")
}
}
if frontendPort := utils.GetenvTrim("FRONTEND_PORT"); frontendPort != "" {
if p, err := parsePortOverride("FRONTEND_PORT", frontendPort); err == nil {
cfg.FrontendPort = p
cfg.EnvOverrides["FRONTEND_PORT"] = true
log.Info().Int("port", p).Msg("Overriding frontend port from FRONTEND_PORT env var")
} else {
log.Warn().Str("value", frontendPort).Msg("Ignoring invalid FRONTEND_PORT from environment")
}
}
// Detect deprecated DISABLE_AUTH flag and strip it from the runtime env so downstream
// components behave as if it were never set.
if disableAuthEnv := os.Getenv("DISABLE_AUTH"); disableAuthEnv != "" {
if err := os.Unsetenv("DISABLE_AUTH"); err != nil {
log.Warn().
Str("DISABLE_AUTH", disableAuthEnv).
Err(err).
Msg("Failed to remove legacy DISABLE_AUTH environment variable; continuing with authentication enabled")
} else {
log.Warn().
Str("DISABLE_AUTH", disableAuthEnv).
Msg("Removed legacy DISABLE_AUTH environment variable. Authentication remains enabled.")
}
}
// Check if demo mode is enabled
demoModeEnv := os.Getenv("DEMO_MODE")
if demoModeEnv != "" {
cfg.DemoMode = demoModeEnv == "true" || demoModeEnv == "1"
if cfg.DemoMode {
log.Warn().Msg("DEMO MODE - All modifications disabled (read-only)")
}
}
// Load proxy authentication settings
if proxyAuthSecret := os.Getenv("PROXY_AUTH_SECRET"); proxyAuthSecret != "" {
cfg.ProxyAuthSecret = proxyAuthSecret
log.Info().Msg("Proxy authentication secret configured")
// Load other proxy auth settings
if userHeader := os.Getenv("PROXY_AUTH_USER_HEADER"); userHeader != "" {
cfg.ProxyAuthUserHeader = userHeader
log.Info().Str("header", userHeader).Msg("Proxy auth user header configured")
}
if roleHeader := os.Getenv("PROXY_AUTH_ROLE_HEADER"); roleHeader != "" {
cfg.ProxyAuthRoleHeader = roleHeader
log.Info().Str("header", roleHeader).Msg("Proxy auth role header configured")
}
if roleSeparator := os.Getenv("PROXY_AUTH_ROLE_SEPARATOR"); roleSeparator != "" {
cfg.ProxyAuthRoleSeparator = roleSeparator
log.Info().Str("separator", roleSeparator).Msg("Proxy auth role separator configured")
}
if adminRole := os.Getenv("PROXY_AUTH_ADMIN_ROLE"); adminRole != "" {
cfg.ProxyAuthAdminRole = adminRole
log.Info().Str("role", adminRole).Msg("Proxy auth admin role configured")
}
if logoutURL := os.Getenv("PROXY_AUTH_LOGOUT_URL"); logoutURL != "" {
cfg.ProxyAuthLogoutURL = logoutURL
log.Info().Str("url", logoutURL).Msg("Proxy auth logout URL configured")
}
}
if authUser := os.Getenv("PULSE_AUTH_USER"); authUser != "" {
cfg.AuthUser = authUser
log.Info().Msg("Overriding auth user from env var")
}
if authPass := os.Getenv("PULSE_AUTH_PASS"); authPass != "" {
// Auto-hash plain text passwords for security
if !IsPasswordHashed(authPass) {
// Plain text password - hash it immediately
hashedPass, err := auth.HashPassword(authPass)
if err != nil {
log.Error().Err(err).Msg("Failed to hash password from environment variable")
// Fall back to plain text if hashing fails (shouldn't happen)
cfg.AuthPass = authPass
} else {
cfg.AuthPass = hashedPass
log.Info().Msg("Auto-hashed plain text password from environment variable")
}
} else {
// Already hashed - validate it's complete
if len(authPass) != 60 {
log.Error().
Int("length", len(authPass)).
Str("env_var", "PULSE_AUTH_PASS").
Msg("Bcrypt hash appears truncated; expected 60 characters; authentication may fail")
log.Error().
Str("env_var", "PULSE_AUTH_PASS").
Msg("Ensure the full bcrypt hash is enclosed in single quotes in your .env file or Docker environment")
}
cfg.AuthPass = authPass
log.Debug().Msg("Loaded pre-hashed password from env var")
}
}
// HTTPS/TLS configuration from environment
if httpsEnabled := os.Getenv("HTTPS_ENABLED"); httpsEnabled != "" {
cfg.HTTPSEnabled = httpsEnabled == "true" || httpsEnabled == "1"
log.Debug().Bool("enabled", cfg.HTTPSEnabled).Msg("HTTPS enabled status from env var")
}
if tlsCertFile := os.Getenv("TLS_CERT_FILE"); tlsCertFile != "" {
cfg.TLSCertFile = tlsCertFile
log.Debug().Str("cert_file", tlsCertFile).Msg("TLS cert file from env var")
}
if tlsKeyFile := os.Getenv("TLS_KEY_FILE"); tlsKeyFile != "" {
cfg.TLSKeyFile = tlsKeyFile
log.Debug().Str("key_file", tlsKeyFile).Msg("TLS key file from env var")
}
if redirectPort := os.Getenv("HTTP_REDIRECT_PORT"); redirectPort != "" {
if p, err := strconv.Atoi(redirectPort); err == nil && p > 0 && p <= 65535 {
cfg.HTTPRedirectPort = p
cfg.EnvOverrides["HTTP_REDIRECT_PORT"] = true
log.Debug().Int("port", p).Msg("HTTP redirect port from env var")
}
}
if metricsToken := utils.GetenvTrim("PULSE_METRICS_TOKEN"); metricsToken != "" {
cfg.MetricsToken = metricsToken
cfg.EnvOverrides["PULSE_METRICS_TOKEN"] = true
log.Debug().Msg("Metrics token configured from env var")
}
// Support PULSE_AGENT_URL as an alias for PULSE_AGENT_CONNECT_URL
// (Check this first so standard PULSE_AGENT_CONNECT_URL takes precedence if both set)
if agentURL := utils.GetenvTrim("PULSE_AGENT_URL"); agentURL != "" {
cfg.AgentConnectURL = agentURL
cfg.EnvOverrides["PULSE_AGENT_CONNECT_URL"] = true
log.Info().Str("url", agentURL).Msg("Using dedicated agent connect URL from PULSE_AGENT_URL")
}
if agentConnectURL := utils.GetenvTrim("PULSE_AGENT_CONNECT_URL"); agentConnectURL != "" {
cfg.AgentConnectURL = agentConnectURL
cfg.EnvOverrides["PULSE_AGENT_CONNECT_URL"] = true
log.Info().Str("url", agentConnectURL).Msg("Using dedicated agent connect URL from environment")
}
// Update settings are system.json-only; legacy env vars remain ignored.
// Normalize PVE user fields for password authentication
for i := range cfg.PVEInstances {
if cfg.PVEInstances[i].TokenName == "" && cfg.PVEInstances[i].TokenValue == "" && cfg.PVEInstances[i].User != "" && !strings.Contains(cfg.PVEInstances[i].User, "@") {
cfg.PVEInstances[i].User = cfg.PVEInstances[i].User + "@pam"
}
}
if cfg.AllowedOrigins == "" {
// If not configured and we're in development mode (different ports for frontend/backend)
// allow localhost for development convenience
if os.Getenv("NODE_ENV") == "development" || os.Getenv("PULSE_DEV") == "true" {
cfg.AllowedOrigins = "http://localhost:5173,http://localhost:7655"
log.Info().Msg("Development mode: allowing localhost origins")
}
}
// Support env vars for important settings (override system.json)
// NOTE: Environment variables always take precedence over UI/system.json settings
if discoveryEnabled := os.Getenv("DISCOVERY_ENABLED"); discoveryEnabled != "" {
cfg.DiscoveryEnabled = discoveryEnabled == "true" || discoveryEnabled == "1"
cfg.EnvOverrides["discoveryEnabled"] = true
log.Info().Bool("enabled", cfg.DiscoveryEnabled).Msg("Discovery enabled overridden by DISCOVERY_ENABLED env var")
}
if discoverySubnet := os.Getenv("DISCOVERY_SUBNET"); discoverySubnet != "" {
cfg.DiscoverySubnet = discoverySubnet
cfg.EnvOverrides["discoverySubnet"] = true
log.Info().Str("subnet", discoverySubnet).Msg("Discovery subnet overridden by DISCOVERY_SUBNET env var")
}
if envOverride := utils.GetenvTrim("DISCOVERY_ENVIRONMENT_OVERRIDE"); envOverride != "" {
if canonicalEnv, ok := CanonicalDiscoveryEnvironment(envOverride); ok {
cfg.Discovery.EnvironmentOverride = canonicalEnv
cfg.EnvOverrides["discoveryEnvironmentOverride"] = true
log.Info().Str("environment", cfg.Discovery.EnvironmentOverride).Msg("Discovery environment override set by DISCOVERY_ENVIRONMENT_OVERRIDE")
} else {
log.Warn().Str("value", envOverride).Msg("Ignoring invalid DISCOVERY_ENVIRONMENT_OVERRIDE value")
}
}
if allowlistEnv := utils.GetenvTrim("DISCOVERY_SUBNET_ALLOWLIST"); allowlistEnv != "" {
parts := splitAndTrim(allowlistEnv)
cfg.Discovery.SubnetAllowlist = sanitizeCIDRList(parts)
cfg.EnvOverrides["discoverySubnetAllowlist"] = true
log.Info().Int("allowlistCount", len(cfg.Discovery.SubnetAllowlist)).Msg("Discovery subnet allowlist overridden by DISCOVERY_SUBNET_ALLOWLIST")
}
if blocklistEnv := utils.GetenvTrim("DISCOVERY_SUBNET_BLOCKLIST"); blocklistEnv != "" {
parts := splitAndTrim(blocklistEnv)
cfg.Discovery.SubnetBlocklist = sanitizeCIDRList(parts)
cfg.EnvOverrides["discoverySubnetBlocklist"] = true
log.Info().Int("blocklistCount", len(cfg.Discovery.SubnetBlocklist)).Msg("Discovery subnet blocklist overridden by DISCOVERY_SUBNET_BLOCKLIST")
}
if maxHostsEnv := utils.GetenvTrim("DISCOVERY_MAX_HOSTS_PER_SCAN"); maxHostsEnv != "" {
if v, err := strconv.Atoi(maxHostsEnv); err == nil && v > 0 {
cfg.Discovery.MaxHostsPerScan = v
cfg.EnvOverrides["discoveryMaxHostsPerScan"] = true
log.Info().Int("maxHostsPerScan", v).Msg("Discovery max hosts per scan overridden by DISCOVERY_MAX_HOSTS_PER_SCAN")
} else {
log.Warn().Str("value", maxHostsEnv).Msg("Ignoring invalid DISCOVERY_MAX_HOSTS_PER_SCAN value")
}
}
if maxConcurrentEnv := utils.GetenvTrim("DISCOVERY_MAX_CONCURRENT"); maxConcurrentEnv != "" {
if v, err := strconv.Atoi(maxConcurrentEnv); err == nil && v > 0 {
cfg.Discovery.MaxConcurrent = v
cfg.EnvOverrides["discoveryMaxConcurrent"] = true
log.Info().Int("maxConcurrent", v).Msg("Discovery concurrency overridden by DISCOVERY_MAX_CONCURRENT")
} else {
log.Warn().Str("value", maxConcurrentEnv).Msg("Ignoring invalid DISCOVERY_MAX_CONCURRENT value")
}
}
if reverseDNSEnv := utils.GetenvTrim("DISCOVERY_ENABLE_REVERSE_DNS"); reverseDNSEnv != "" {
switch strings.ToLower(reverseDNSEnv) {
case "0", "false", "no", "off":
cfg.Discovery.EnableReverseDNS = false
default:
cfg.Discovery.EnableReverseDNS = true
}
cfg.EnvOverrides["discoveryEnableReverseDns"] = true
log.Info().Bool("enableReverseDNS", cfg.Discovery.EnableReverseDNS).Msg("Discovery reverse DNS setting overridden by DISCOVERY_ENABLE_REVERSE_DNS")
}
if scanGatewaysEnv := utils.GetenvTrim("DISCOVERY_SCAN_GATEWAYS"); scanGatewaysEnv != "" {
switch strings.ToLower(scanGatewaysEnv) {
case "0", "false", "no", "off":
cfg.Discovery.ScanGateways = false
default:
cfg.Discovery.ScanGateways = true
}
cfg.EnvOverrides["discoveryScanGateways"] = true
log.Info().Bool("scanGateways", cfg.Discovery.ScanGateways).Msg("Discovery gateway scanning overridden by DISCOVERY_SCAN_GATEWAYS")
}
if dialTimeoutEnv := utils.GetenvTrim("DISCOVERY_DIAL_TIMEOUT_MS"); dialTimeoutEnv != "" {
if v, err := strconv.Atoi(dialTimeoutEnv); err == nil && v > 0 {
cfg.Discovery.DialTimeout = v
cfg.EnvOverrides["discoveryDialTimeoutMs"] = true
log.Info().Int("dialTimeoutMs", v).Msg("Discovery dial timeout overridden by DISCOVERY_DIAL_TIMEOUT_MS")
} else {
log.Warn().Str("value", dialTimeoutEnv).Msg("Ignoring invalid DISCOVERY_DIAL_TIMEOUT_MS value")
}
}
if httpTimeoutEnv := utils.GetenvTrim("DISCOVERY_HTTP_TIMEOUT_MS"); httpTimeoutEnv != "" {
if v, err := strconv.Atoi(httpTimeoutEnv); err == nil && v > 0 {
cfg.Discovery.HTTPTimeout = v
cfg.EnvOverrides["discoveryHttpTimeoutMs"] = true
log.Info().Int("httpTimeoutMs", v).Msg("Discovery HTTP timeout overridden by DISCOVERY_HTTP_TIMEOUT_MS")
} else {
log.Warn().Str("value", httpTimeoutEnv).Msg("Ignoring invalid DISCOVERY_HTTP_TIMEOUT_MS value")
}
}
if logLevel := os.Getenv("LOG_LEVEL"); logLevel != "" {
cfg.LogLevel = logLevel
cfg.EnvOverrides["logLevel"] = true
log.Info().Str("level", logLevel).Msg("Log level overridden by LOG_LEVEL env var")
}
if logFormat := os.Getenv("LOG_FORMAT"); logFormat != "" {
cfg.LogFormat = logFormat
cfg.EnvOverrides["logFormat"] = true
log.Info().Str("format", logFormat).Msg("Log format overridden by LOG_FORMAT env var")
}
if logFile := os.Getenv("LOG_FILE"); logFile != "" {
cfg.LogFile = logFile
cfg.EnvOverrides["logFile"] = true
log.Info().Str("file", logFile).Msg("Log file overridden by LOG_FILE env var")
}
if logMaxSize := os.Getenv("LOG_MAX_SIZE"); logMaxSize != "" {
if size, err := strconv.Atoi(logMaxSize); err == nil {
if size <= 0 {
log.Warn().Str("value", logMaxSize).Msg("Ignoring non-positive LOG_MAX_SIZE from environment")
} else {
cfg.LogMaxSize = size
cfg.EnvOverrides["logMaxSize"] = true
log.Info().Int("size_mb", size).Msg("Log max size overridden by LOG_MAX_SIZE env var")
}
} else {
log.Warn().Str("value", logMaxSize).Msg("Invalid LOG_MAX_SIZE value, expected integer")
}
}
if logMaxAge := os.Getenv("LOG_MAX_AGE"); logMaxAge != "" {
if age, err := strconv.Atoi(logMaxAge); err == nil {
if age <= 0 {
log.Warn().Str("value", logMaxAge).Msg("Ignoring non-positive LOG_MAX_AGE from environment")
} else {
cfg.LogMaxAge = age
cfg.EnvOverrides["logMaxAge"] = true
log.Info().Int("days", age).Msg("Log max age overridden by LOG_MAX_AGE env var")
}
} else {
log.Warn().Str("value", logMaxAge).Msg("Invalid LOG_MAX_AGE value, expected integer")
}
}
if logCompress := os.Getenv("LOG_COMPRESS"); logCompress != "" {
cfg.LogCompress = logCompress == "true" || logCompress == "1"
cfg.EnvOverrides["logCompress"] = true
log.Info().Bool("compress", cfg.LogCompress).Msg("Log compression overridden by LOG_COMPRESS env var")
}
cfg.Discovery = NormalizeDiscoveryConfig(cfg.Discovery)
if connectionTimeout := utils.GetenvTrim("CONNECTION_TIMEOUT"); connectionTimeout != "" {
if d, err := parseConnectionTimeoutOverride(connectionTimeout); err == nil {
if d < time.Second {
log.Warn().Dur("value", d).Msg("Ignoring CONNECTION_TIMEOUT below 1s from environment")
} else {
cfg.ConnectionTimeout = d
cfg.EnvOverrides["connectionTimeout"] = true
log.Info().Dur("timeout", d).Msg("Connection timeout overridden by CONNECTION_TIMEOUT env var")
}
} else {
log.Warn().Str("value", connectionTimeout).Msg("Invalid CONNECTION_TIMEOUT value, expected seconds or duration string")
}
}
if maxPollTimeout := os.Getenv("MAX_POLL_TIMEOUT"); maxPollTimeout != "" {
if d, err := time.ParseDuration(maxPollTimeout); err == nil {
if d >= 30*time.Second { // Minimum 30 seconds
cfg.MaxPollTimeout = d
cfg.EnvOverrides["maxPollTimeout"] = true
log.Info().Dur("timeout", d).Msg("Max poll timeout overridden by MAX_POLL_TIMEOUT env var")
} else {
log.Warn().Dur("value", d).Msg("MAX_POLL_TIMEOUT too low (minimum 30s), using default")
}
} else {
log.Warn().Str("value", maxPollTimeout).Msg("Invalid MAX_POLL_TIMEOUT value, using default")
}
}
if allowedOrigins := os.Getenv("ALLOWED_ORIGINS"); allowedOrigins != "" {
cfg.AllowedOrigins = allowedOrigins
cfg.EnvOverrides["allowedOrigins"] = true
log.Info().Str("origins", allowedOrigins).Msg("Allowed origins overridden by ALLOWED_ORIGINS env var")
}
if publicURL := os.Getenv("PULSE_PUBLIC_URL"); publicURL != "" {
cfg.PublicURL = publicURL
cfg.EnvOverrides["publicURL"] = true
log.Info().Str("url", publicURL).Msg("Public URL configured from PULSE_PUBLIC_URL env var")
} else {
// In hosted mode, fail closed unless explicitly configured.
if os.Getenv("PULSE_HOSTED_MODE") == "true" {
log.Warn().Msg("Hosted mode enabled: public URL not configured; external links (e.g., magic links) will be disabled. Set PULSE_PUBLIC_URL.")
} else {
// Try to auto-detect public URL if not explicitly configured
if detectedURL := detectPublicURL(cfg.FrontendPort); detectedURL != "" {
cfg.PublicURL = detectedURL
log.Info().Str("url", detectedURL).Msg("Auto-detected public URL for webhook notifications")
}
}
}
// Initialize logging with configuration values
logging.Init(logging.Config{
Format: cfg.LogFormat,
Level: cfg.LogLevel,
Component: "pulse-config",
FilePath: cfg.LogFile,
MaxSizeMB: cfg.LogMaxSize,
MaxAgeDays: cfg.LogMaxAge,
Compress: cfg.LogCompress,
})
// Initialize DNS cache with configured timeout
// This must be done before any HTTP clients are created
tlsutil.SetDNSCacheTTL(cfg.DNSCacheTimeout)
// Validate configuration
if err := cfg.Validate(); err != nil {
return nil, fmt.Errorf("load config: invalid configuration: %w", err)
}
return cfg, nil
}
// Validate checks if the configuration is valid
func (c *Config) Validate() error {
// Validate server settings
if c.FrontendPort <= 0 || c.FrontendPort > 65535 {
return fmt.Errorf("invalid frontend port: %d", c.FrontendPort)
}
if c.SSHPort != 0 && (c.SSHPort < 1 || c.SSHPort > 65535) {
return fmt.Errorf("invalid SSH port: %d (must be between 1 and 65535)", c.SSHPort)
}
if c.HTTPRedirectPort != 0 {
if c.HTTPRedirectPort < 1 || c.HTTPRedirectPort > 65535 {
return fmt.Errorf("invalid HTTP redirect port: %d (must be between 1 and 65535)", c.HTTPRedirectPort)
}
if c.HTTPRedirectPort == c.FrontendPort {
return fmt.Errorf("HTTP redirect port (%d) must differ from frontend port", c.HTTPRedirectPort)
}
}
// Validate monitoring settings
if c.PVEPollingInterval < 10*time.Second {
return fmt.Errorf("PVE polling interval must be at least 10 seconds")
}
if c.PVEPollingInterval > time.Hour {
return fmt.Errorf("PVE polling interval cannot exceed 1 hour")
}
if c.ConnectionTimeout < time.Second {
return fmt.Errorf("connection timeout must be at least 1 second")
}
if c.AdaptivePollingMinInterval <= 0 {
return fmt.Errorf("adaptive polling min interval must be greater than 0")
}
if c.AdaptivePollingBaseInterval <= 0 {
return fmt.Errorf("adaptive polling base interval must be greater than 0")
}
if c.AdaptivePollingMaxInterval <= 0 {
return fmt.Errorf("adaptive polling max interval must be greater than 0")
}
if c.AdaptivePollingMinInterval > c.AdaptivePollingMaxInterval {
return fmt.Errorf("adaptive polling min interval cannot exceed max interval")
}
if c.AdaptivePollingBaseInterval < c.AdaptivePollingMinInterval || c.AdaptivePollingBaseInterval > c.AdaptivePollingMaxInterval {
return fmt.Errorf("adaptive polling base interval must be between min and max intervals")
}
// Validate PVE instances
for i, pve := range c.PVEInstances {
if pve.Host == "" {
return fmt.Errorf("PVE instance %d: host is required", i+1)
}
if !strings.HasPrefix(pve.Host, "http://") && !strings.HasPrefix(pve.Host, "https://") {
return fmt.Errorf("PVE instance %d: host must start with http:// or https://", i+1)
}
// Must have either password or token
if pve.Password == "" && (pve.TokenName == "" || pve.TokenValue == "") {
return fmt.Errorf("PVE instance %d: either password or token authentication is required", i+1)
}
}
// Validate and auto-fix PBS instances
validPBS := []PBSInstance{}
for i, pbs := range c.PBSInstances {
if pbs.Host == "" {
log.Warn().Int("instance", i+1).Msg("PBS instance missing host, skipping")
continue
}
// Auto-fix missing protocol
if !strings.HasPrefix(pbs.Host, "http://") && !strings.HasPrefix(pbs.Host, "https://") {
pbs.Host = "https://" + pbs.Host
log.Info().Str("host", pbs.Host).Msg("PBS host auto-corrected to include https://")
}
// Check authentication
if pbs.Password == "" && (pbs.TokenName == "" || pbs.TokenValue == "") {
log.Warn().Int("instance", i+1).Str("host", pbs.Host).Msg("PBS instance missing authentication, skipping")
continue
}
validPBS = append(validPBS, pbs)
}
c.PBSInstances = validPBS
return nil
}
// detectPublicURL attempts to automatically detect the public URL for Pulse
func detectPublicURL(port int) string {
// When running inside Docker we can't reliably determine an externally reachable address.
// Returning an empty string avoids surfacing container-only IPs (e.g., 172.x) in notifications.
if _, err := osStat("/.dockerenv"); err == nil {
log.Info().Msg("Docker environment detected - skipping public URL auto-detect. Set PULSE_PUBLIC_URL to expose external links.")
return ""
}
// Method 1: Check if we're in a Proxmox container (most common deployment)
if _, err := osStat("/etc/pve"); err == nil {
// We're likely in a ProxmoxVE container
// Try to get the container's IP from hostname -I
if output, err := execCommand("hostname", "-I").Output(); err == nil {
ips := strings.Fields(string(output))
for _, ip := range ips {
// Skip localhost and IPv6
if !strings.HasPrefix(ip, "127.") && !strings.Contains(ip, ":") {
return fmt.Sprintf("http://%s:%d", ip, port)
}
}
}
}
// Method 2: Try to get the primary network interface IP
if ip := getOutboundIP(); ip != "" {
return fmt.Sprintf("http://%s:%d", ip, port)
}
// Method 3: Get all non-loopback IPs and use the first private one
if addrs, err := netInterfaceAddrs(); err == nil {
for _, addr := range addrs {
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
ip := ipnet.IP.String()
// Prefer private IPs (RFC1918)
if strings.HasPrefix(ip, "192.168.") ||
strings.HasPrefix(ip, "10.") ||
strings.HasPrefix(ip, "172.") {
return fmt.Sprintf("http://%s:%d", ip, port)
}
}
}
}
// If no private IP found, use the first public one
for _, addr := range addrs {
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
return fmt.Sprintf("http://%s:%d", ipnet.IP.String(), port)
}
}
}
}
return ""
}
// getOutboundIP gets the preferred outbound IP of this machine
func getOutboundIP() string {
// Try to connect to a public DNS server (doesn't actually connect, just resolves the route)
conn, err := netDial("udp", "8.8.8.8:80")
if err != nil {
// Try Cloudflare DNS as fallback
conn, err = netDial("udp", "1.1.1.1:80")
if err != nil {
return ""
}
}
defer conn.Close()
localAddr, ok := conn.LocalAddr().(*net.UDPAddr)
if !ok || localAddr == nil || localAddr.IP == nil {
return ""
}
return localAddr.IP.String()
}