mirror of
https://github.com/rcourtman/Pulse.git
synced 2026-04-28 03:20:11 +00:00
Improve installer temperature proxy and backup polling
This commit is contained in:
parent
9d34327dbe
commit
a03f8115b6
5 changed files with 127 additions and 6 deletions
27
install.sh
27
install.sh
|
|
@ -278,6 +278,29 @@ safe_read_with_default() {
|
|||
return 0
|
||||
}
|
||||
|
||||
wait_for_pulse_ready() {
|
||||
local pulse_url="$1"
|
||||
local retries="${2:-60}"
|
||||
local delay="${3:-1}"
|
||||
|
||||
if [[ -z "$pulse_url" ]] || ! command -v curl >/dev/null 2>&1; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local api_endpoint="${pulse_url%/}/api/health"
|
||||
print_info "Waiting for Pulse API at ${api_endpoint}..."
|
||||
for attempt in $(seq 1 "$retries"); do
|
||||
if curl -fsS --max-time 2 "$api_endpoint" >/dev/null 2>&1; then
|
||||
print_info "Pulse API is reachable"
|
||||
return 0
|
||||
fi
|
||||
sleep "$delay"
|
||||
done
|
||||
|
||||
print_warn "Pulse API did not respond after ${retries}s; continuing anyway"
|
||||
return 1
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}[ERROR] $1${NC}" >&2
|
||||
}
|
||||
|
|
@ -1487,6 +1510,8 @@ create_lxc_container() {
|
|||
# Get container IP
|
||||
local IP=$(pct exec $CTID -- hostname -I | awk '{print $1}')
|
||||
|
||||
local PULSE_BASE_URL="http://${IP}:${frontend_port}"
|
||||
|
||||
# Automatically register the Proxmox host with Pulse so temperature proxy sync succeeds
|
||||
auto_register_pve_node "$CTID" "$IP" "$frontend_port"
|
||||
|
||||
|
|
@ -1510,6 +1535,8 @@ fi'; then
|
|||
fi
|
||||
fi
|
||||
|
||||
wait_for_pulse_ready "$PULSE_BASE_URL" 120 1
|
||||
|
||||
# Determine if we should install temperature proxy
|
||||
local install_proxy=false
|
||||
local docker_in_container=false
|
||||
|
|
|
|||
|
|
@ -6470,8 +6470,14 @@ func (m *Monitor) pollPVEInstance(ctx context.Context, instanceName string, clie
|
|||
Dur("timeout", timeout).
|
||||
Msg("Starting background backup/snapshot polling")
|
||||
|
||||
// Use parent context for proper cancellation chain
|
||||
backupCtx, cancel := context.WithTimeout(ctx, timeout)
|
||||
// The per-cycle ctx is canceled as soon as the main polling loop finishes,
|
||||
// so derive the backup poll context from the long-lived runtime context instead.
|
||||
parentCtx := m.runtimeCtx
|
||||
if parentCtx == nil {
|
||||
parentCtx = context.Background()
|
||||
}
|
||||
|
||||
backupCtx, cancel := context.WithTimeout(parentCtx, timeout)
|
||||
defer cancel()
|
||||
|
||||
// Poll backup tasks
|
||||
|
|
|
|||
|
|
@ -18,5 +18,17 @@ func storageContentQueryable(storage proxmox.Storage) bool {
|
|||
if storage.Enabled == 0 {
|
||||
return false
|
||||
}
|
||||
return storage.Active == 1
|
||||
|
||||
if storage.Active == 1 {
|
||||
return true
|
||||
}
|
||||
|
||||
// PBS storages report Active=0 on every node because they are accessed remotely via the
|
||||
// backup proxy. We still need to inspect them so the UI can surface PBS-backed Proxmox
|
||||
// backups even when no dedicated PBS instance is configured inside Pulse.
|
||||
if strings.Contains(storage.Content, "backup") && storage.Type == "pbs" {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
74
internal/monitoring/storage_filters_test.go
Normal file
74
internal/monitoring/storage_filters_test.go
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
package monitoring
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rcourtman/pulse-go-rewrite/pkg/proxmox"
|
||||
)
|
||||
|
||||
func TestStorageContentQueryable(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
storage proxmox.Storage
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "disabled storage skipped",
|
||||
storage: proxmox.Storage{
|
||||
Storage: "local",
|
||||
Enabled: 0,
|
||||
Active: 1,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "active storage allowed",
|
||||
storage: proxmox.Storage{
|
||||
Storage: "local-zfs",
|
||||
Enabled: 1,
|
||||
Active: 1,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "pbs backup storage allowed even when inactive",
|
||||
storage: proxmox.Storage{
|
||||
Storage: "pbs-datastore",
|
||||
Type: "pbs",
|
||||
Content: "backup",
|
||||
Enabled: 1,
|
||||
Active: 0,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "non-backup inactive storage skipped",
|
||||
storage: proxmox.Storage{
|
||||
Storage: "nfs-images",
|
||||
Content: "images",
|
||||
Enabled: 1,
|
||||
Active: 0,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "non-pbs inactive backup storage skipped",
|
||||
storage: proxmox.Storage{
|
||||
Storage: "backup-dir",
|
||||
Type: "dir",
|
||||
Content: "backup",
|
||||
Enabled: 1,
|
||||
Active: 0,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := storageContentQueryable(tt.storage); got != tt.want {
|
||||
t.Fatalf("storageContentQueryable() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -91,14 +91,16 @@ func NewTemperatureCollectorWithPort(sshUser, sshKeyPath string, sshPort int) *T
|
|||
tc.hostKeys = manager
|
||||
}
|
||||
|
||||
// Check if proxy is available
|
||||
// Always keep a proxy client so we can detect the socket later even if it
|
||||
// isn't present during startup. Without this, containerized deployments that
|
||||
// mount the socket after Pulse starts never re-enable the hardened proxy.
|
||||
proxyClient := tempproxy.NewClient()
|
||||
tc.proxyClient = proxyClient
|
||||
if proxyClient.IsAvailable() {
|
||||
log.Info().Msg("Temperature proxy detected - using secure host-side bridge")
|
||||
tc.proxyClient = proxyClient
|
||||
tc.useProxy = true
|
||||
} else {
|
||||
log.Debug().Msg("Temperature proxy not available - using direct SSH")
|
||||
log.Debug().Msg("Temperature proxy not available yet - falling back to SSH until socket appears")
|
||||
tc.useProxy = false
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue