Merge pull request #218 from safing/feature/key-reset-and-metrics-race-condition

Fix metrics race condition and add key reset method
This commit is contained in:
Daniel Hovie 2023-09-19 16:58:58 +02:00 committed by GitHub
commit 900a654a4d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 27 additions and 0 deletions

View file

@ -44,6 +44,13 @@ func (b *Base) SetKey(key string) {
} }
} }
// ResetKey resets the database name and key.
// Use with caution!
func (b *Base) ResetKey() {
b.dbName = ""
b.dbKey = ""
}
// Key returns the key of the database record. // Key returns the key of the database record.
// As the key must be set before any usage and can only be set once, this // As the key must be set before any usage and can only be set once, this
// function may be used without locking the record. // function may be used without locking the record.

View file

@ -11,6 +11,7 @@ import (
"github.com/safing/portbase/api" "github.com/safing/portbase/api"
"github.com/safing/portbase/config" "github.com/safing/portbase/config"
"github.com/safing/portbase/log" "github.com/safing/portbase/log"
"github.com/safing/portbase/utils"
) )
func registerAPI() error { func registerAPI() error {
@ -139,7 +140,11 @@ func writeMetricsTo(ctx context.Context, url string) error {
) )
} }
var metricsPusherDone = utils.NewBroadcastFlag()
func metricsWriter(ctx context.Context) error { func metricsWriter(ctx context.Context) error {
defer metricsPusherDone.NotifyAndReset()
pushURL := pushOption() pushURL := pushOption()
ticker := module.NewSleepyTicker(1*time.Minute, 0) ticker := module.NewSleepyTicker(1*time.Minute, 0)
defer ticker.Stop() defer ticker.Stop()

View file

@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"sort" "sort"
"sync" "sync"
"time"
"github.com/safing/portbase/modules" "github.com/safing/portbase/modules"
) )
@ -78,6 +79,20 @@ func start() error {
} }
func stop() error { func stop() error {
// Wait until the metrics pusher is done, as it may have started reporting
// and may report a higher number than we store to disk. For persistent
// metrics it can then happen that the first report is lower than the
// previous report, making prometheus think that al that happened since the
// last report, due to the automatic restart detection.
done := metricsPusherDone.NewFlag()
done.Refresh()
if !done.IsSet() {
select {
case <-done.Signal():
case <-time.After(10 * time.Second):
}
}
storePersistentMetrics() storePersistentMetrics()
return nil return nil