Fix waiting for log writers on shutdown, improve persistence enabling

This commit is contained in:
Daniel 2023-10-12 17:05:53 +02:00
parent 7872911480
commit 05bdc44611
5 changed files with 25 additions and 21 deletions

View file

@ -11,7 +11,6 @@ import (
"github.com/safing/portbase/api"
"github.com/safing/portbase/config"
"github.com/safing/portbase/log"
"github.com/safing/portbase/utils"
)
func registerAPI() error {
@ -140,11 +139,7 @@ func writeMetricsTo(ctx context.Context, url string) error {
)
}
var metricsPusherDone = utils.NewBroadcastFlag()
func metricsWriter(ctx context.Context) error {
defer metricsPusherDone.NotifyAndReset()
pushURL := pushOption()
ticker := module.NewSleepyTicker(1*time.Minute, 0)
defer ticker.Stop()

View file

@ -16,7 +16,7 @@ import (
const hostStatTTL = 1 * time.Second
func registeHostMetrics() (err error) {
func registerHostMetrics() (err error) {
// Register load average metrics.
_, err = NewGauge("host/load/avg/1", nil, getFloat64HostStat(LoadAvg1), &Options{Name: "Host Load Avg 1min", Permission: api.PermitUser})
if err != nil {

View file

@ -5,7 +5,7 @@ import (
"github.com/safing/portbase/log"
)
func registeLogMetrics() (err error) {
func registerLogMetrics() (err error) {
_, err = NewFetchingCounter(
"logs/warning/total",
nil,

View file

@ -5,8 +5,8 @@ import (
"fmt"
"sort"
"sync"
"time"
"github.com/safing/portbase/log"
"github.com/safing/portbase/modules"
)
@ -36,7 +36,7 @@ var (
)
func init() {
module = modules.Register("metrics", prep, start, stop, "config", "database", "api")
module = modules.Register("metrics", prep, start, stop, "config", "database", "api", "base")
}
func prep() error {
@ -59,11 +59,11 @@ func start() error {
return err
}
if err := registeHostMetrics(); err != nil {
if err := registerHostMetrics(); err != nil {
return err
}
if err := registeLogMetrics(); err != nil {
if err := registerLogMetrics(); err != nil {
return err
}
@ -71,6 +71,10 @@ func start() error {
return err
}
if err := loadPersistentMetrics(); err != nil {
log.Errorf("metrics: failed to load persistent metrics: %s", err)
}
if pushOption() != "" {
module.StartServiceWorker("metric pusher", 0, metricsWriter)
}
@ -82,16 +86,13 @@ func stop() error {
// Wait until the metrics pusher is done, as it may have started reporting
// and may report a higher number than we store to disk. For persistent
// metrics it can then happen that the first report is lower than the
// previous report, making prometheus think that al that happened since the
// previous report, making prometheus think that all that happened since the
// last report, due to the automatic restart detection.
done := metricsPusherDone.NewFlag()
done.Refresh()
if !done.IsSet() {
select {
case <-done.Signal():
case <-time.After(10 * time.Second):
}
}
// The registry is read locked when writing metrics.
// Write lock the registry to make sure all writes are finished.
registryLock.Lock()
registryLock.Unlock() //nolint:staticcheck
storePersistentMetrics()

View file

@ -52,10 +52,18 @@ func EnableMetricPersistence(key string) error {
// Set storage key.
storageKey = key
return nil
}
func loadPersistentMetrics() error {
// Abort if storage is not enabled.
if storageInit.SetToIf(false, true) {
return nil
}
// Load metrics from storage.
var err error
storage, err = getMetricsStorage(key)
storage, err = getMetricsStorage(storageKey)
switch {
case err == nil:
// Continue.