Fix waiting for log writers on shutdown, improve persistence enabling

This commit is contained in:
Daniel 2023-10-12 17:05:53 +02:00
parent 7872911480
commit 05bdc44611
5 changed files with 25 additions and 21 deletions

View file

@ -11,7 +11,6 @@ import (
"github.com/safing/portbase/api" "github.com/safing/portbase/api"
"github.com/safing/portbase/config" "github.com/safing/portbase/config"
"github.com/safing/portbase/log" "github.com/safing/portbase/log"
"github.com/safing/portbase/utils"
) )
func registerAPI() error { func registerAPI() error {
@ -140,11 +139,7 @@ func writeMetricsTo(ctx context.Context, url string) error {
) )
} }
var metricsPusherDone = utils.NewBroadcastFlag()
func metricsWriter(ctx context.Context) error { func metricsWriter(ctx context.Context) error {
defer metricsPusherDone.NotifyAndReset()
pushURL := pushOption() pushURL := pushOption()
ticker := module.NewSleepyTicker(1*time.Minute, 0) ticker := module.NewSleepyTicker(1*time.Minute, 0)
defer ticker.Stop() defer ticker.Stop()

View file

@ -16,7 +16,7 @@ import (
const hostStatTTL = 1 * time.Second const hostStatTTL = 1 * time.Second
func registeHostMetrics() (err error) { func registerHostMetrics() (err error) {
// Register load average metrics. // Register load average metrics.
_, err = NewGauge("host/load/avg/1", nil, getFloat64HostStat(LoadAvg1), &Options{Name: "Host Load Avg 1min", Permission: api.PermitUser}) _, err = NewGauge("host/load/avg/1", nil, getFloat64HostStat(LoadAvg1), &Options{Name: "Host Load Avg 1min", Permission: api.PermitUser})
if err != nil { if err != nil {

View file

@ -5,7 +5,7 @@ import (
"github.com/safing/portbase/log" "github.com/safing/portbase/log"
) )
func registeLogMetrics() (err error) { func registerLogMetrics() (err error) {
_, err = NewFetchingCounter( _, err = NewFetchingCounter(
"logs/warning/total", "logs/warning/total",
nil, nil,

View file

@ -5,8 +5,8 @@ import (
"fmt" "fmt"
"sort" "sort"
"sync" "sync"
"time"
"github.com/safing/portbase/log"
"github.com/safing/portbase/modules" "github.com/safing/portbase/modules"
) )
@ -36,7 +36,7 @@ var (
) )
func init() { func init() {
module = modules.Register("metrics", prep, start, stop, "config", "database", "api") module = modules.Register("metrics", prep, start, stop, "config", "database", "api", "base")
} }
func prep() error { func prep() error {
@ -59,11 +59,11 @@ func start() error {
return err return err
} }
if err := registeHostMetrics(); err != nil { if err := registerHostMetrics(); err != nil {
return err return err
} }
if err := registeLogMetrics(); err != nil { if err := registerLogMetrics(); err != nil {
return err return err
} }
@ -71,6 +71,10 @@ func start() error {
return err return err
} }
if err := loadPersistentMetrics(); err != nil {
log.Errorf("metrics: failed to load persistent metrics: %s", err)
}
if pushOption() != "" { if pushOption() != "" {
module.StartServiceWorker("metric pusher", 0, metricsWriter) module.StartServiceWorker("metric pusher", 0, metricsWriter)
} }
@ -82,16 +86,13 @@ func stop() error {
// Wait until the metrics pusher is done, as it may have started reporting // Wait until the metrics pusher is done, as it may have started reporting
// and may report a higher number than we store to disk. For persistent // and may report a higher number than we store to disk. For persistent
// metrics it can then happen that the first report is lower than the // metrics it can then happen that the first report is lower than the
// previous report, making prometheus think that al that happened since the // previous report, making prometheus think that all that happened since the
// last report, due to the automatic restart detection. // last report, due to the automatic restart detection.
done := metricsPusherDone.NewFlag()
done.Refresh() // The registry is read locked when writing metrics.
if !done.IsSet() { // Write lock the registry to make sure all writes are finished.
select { registryLock.Lock()
case <-done.Signal(): registryLock.Unlock() //nolint:staticcheck
case <-time.After(10 * time.Second):
}
}
storePersistentMetrics() storePersistentMetrics()

View file

@ -52,10 +52,18 @@ func EnableMetricPersistence(key string) error {
// Set storage key. // Set storage key.
storageKey = key storageKey = key
return nil
}
func loadPersistentMetrics() error {
// Abort if storage is not enabled.
if storageInit.SetToIf(false, true) {
return nil
}
// Load metrics from storage. // Load metrics from storage.
var err error var err error
storage, err = getMetricsStorage(key) storage, err = getMetricsStorage(storageKey)
switch { switch {
case err == nil: case err == nil:
// Continue. // Continue.