fix: Docker persistence actually fixed this time

The ConfigHandlers were calling config.SaveConfig() which uses
globalPersistence initialized at startup with potentially wrong path.
Now handlers use their own persistence instance directly which is
initialized with the correct DataPath.

This was causing Docker configurations to still save to /etc/pulse
even though we thought we fixed it in v4.0.6.

Fixes #253 (for real this time)
This commit is contained in:
Pulse Monitor 2025-08-06 17:14:34 +00:00
parent 9c77f6e7db
commit 9bc050371b
4 changed files with 57 additions and 8 deletions

View file

@ -0,0 +1,49 @@
# Docker Push Instructions for v4.0.6
The Docker image has been built locally but needs to be pushed from a machine with Docker Hub credentials.
## Option 1: Push from this machine
```bash
# Login to Docker Hub
sudo docker login -u rcourtman
# Push all tags
sudo docker push rcourtman/pulse:v4.0.6
sudo docker push rcourtman/pulse:4.0.6
sudo docker push rcourtman/pulse:4.0
sudo docker push rcourtman/pulse:4
sudo docker push rcourtman/pulse:latest
```
## Option 2: Build and push from docker-builder container (192.168.0.174)
```bash
ssh root@192.168.0.174
cd /root/Pulse
git pull
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 \
-t rcourtman/pulse:v4.0.6 \
-t rcourtman/pulse:4.0.6 \
-t rcourtman/pulse:4.0 \
-t rcourtman/pulse:4 \
-t rcourtman/pulse:latest \
--push .
```
## Option 3: Build multi-arch locally with buildx
```bash
# Create buildx builder if not exists
docker buildx create --name multiarch --use
# Build and push
docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 \
-t rcourtman/pulse:v4.0.6 \
-t rcourtman/pulse:4.0.6 \
-t rcourtman/pulse:4.0 \
-t rcourtman/pulse:4 \
-t rcourtman/pulse:latest \
--push .
```
The v4.0.6 release fixes:
- Docker persistence issue (#253)
- Windows VM memory reporting with balloon drivers (#258)

View file

@ -1 +1 @@
4.0.6
4.0.7

View file

@ -344,8 +344,8 @@ func (h *ConfigHandlers) HandleAddNode(w http.ResponseWriter, r *http.Request) {
h.config.PBSInstances = append(h.config.PBSInstances, pbs)
}
// Save configuration to disk
if err := config.SaveConfig(h.config); err != nil {
// Save configuration to disk using our persistence instance
if err := h.persistence.SaveNodesConfig(h.config.PVEInstances, h.config.PBSInstances); err != nil {
log.Error().Err(err).Msg("Failed to save nodes configuration")
http.Error(w, "Failed to save configuration", http.StatusInternalServerError)
return
@ -663,8 +663,8 @@ func (h *ConfigHandlers) HandleUpdateNode(w http.ResponseWriter, r *http.Request
return
}
// Save configuration to disk
if err := config.SaveConfig(h.config); err != nil {
// Save configuration to disk using our persistence instance
if err := h.persistence.SaveNodesConfig(h.config.PVEInstances, h.config.PBSInstances); err != nil {
log.Error().Err(err).Msg("Failed to save nodes configuration")
http.Error(w, "Failed to save configuration", http.StatusInternalServerError)
return
@ -715,8 +715,8 @@ func (h *ConfigHandlers) HandleDeleteNode(w http.ResponseWriter, r *http.Request
return
}
// Save configuration to disk
if err := config.SaveConfig(h.config); err != nil {
// Save configuration to disk using our persistence instance
if err := h.persistence.SaveNodesConfig(h.config.PVEInstances, h.config.PBSInstances); err != nil {
log.Error().Err(err).Msg("Failed to save nodes configuration")
http.Error(w, "Failed to save configuration", http.StatusInternalServerError)
return

View file

@ -143,7 +143,7 @@ func GetCurrentVersion() (*VersionInfo, error) {
// Final fallback
return &VersionInfo{
Version: "4.0.6",
Version: "4.0.7",
Build: "release",
Runtime: "go",
IsDevelopment: false,