mirror of
https://github.com/safing/portbase
synced 2025-09-01 01:59:48 +00:00
commit
3be9f93001
16 changed files with 265 additions and 77 deletions
|
@ -7,13 +7,17 @@ import (
|
|||
"github.com/safing/portbase/modules"
|
||||
)
|
||||
|
||||
var (
|
||||
module *modules.Module
|
||||
)
|
||||
|
||||
// API Errors
|
||||
var (
|
||||
ErrAuthenticationAlreadySet = errors.New("the authentication function has already been set")
|
||||
)
|
||||
|
||||
func init() {
|
||||
modules.Register("api", prep, start, stop, "base", "database", "config")
|
||||
module = modules.Register("api", prep, start, stop, "base", "database", "config")
|
||||
}
|
||||
|
||||
func prep() error {
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
|
@ -56,8 +58,20 @@ func Serve() {
|
|||
|
||||
// start serving
|
||||
log.Infof("api: starting to listen on %s", server.Addr)
|
||||
// TODO: retry if failed
|
||||
log.Errorf("api: failed to listen on %s: %s", server.Addr, server.ListenAndServe())
|
||||
backoffDuration := 10 * time.Second
|
||||
for {
|
||||
// always returns an error
|
||||
err := module.RunWorker("http endpoint", func(ctx context.Context) error {
|
||||
return server.ListenAndServe()
|
||||
})
|
||||
// return on shutdown error
|
||||
if err == http.ErrServerClosed {
|
||||
return
|
||||
}
|
||||
// log error and restart
|
||||
log.Errorf("api: http endpoint failed: %s - restarting in %s", err, backoffDuration)
|
||||
time.Sleep(backoffDuration)
|
||||
}
|
||||
}
|
||||
|
||||
// GetMuxVars wraps github.com/gorilla/mux.Vars in order to mitigate context key issues in multi-repo projects.
|
||||
|
|
2
config/doc.go
Normal file
2
config/doc.go
Normal file
|
@ -0,0 +1,2 @@
|
|||
// Package config provides a versatile configuration management system.
|
||||
package config
|
72
config/expertise.go
Normal file
72
config/expertise.go
Normal file
|
@ -0,0 +1,72 @@
|
|||
// Package config ... (linter fix)
|
||||
//nolint:dupl
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Expertise Level constants
|
||||
const (
|
||||
ExpertiseLevelUser uint8 = 0
|
||||
ExpertiseLevelExpert uint8 = 1
|
||||
ExpertiseLevelDeveloper uint8 = 2
|
||||
|
||||
ExpertiseLevelNameUser = "user"
|
||||
ExpertiseLevelNameExpert = "expert"
|
||||
ExpertiseLevelNameDeveloper = "developer"
|
||||
|
||||
expertiseLevelKey = "core/expertiseLevel"
|
||||
)
|
||||
|
||||
var (
|
||||
expertiseLevel *int32
|
||||
)
|
||||
|
||||
func init() {
|
||||
var expertiseLevelVal int32
|
||||
expertiseLevel = &expertiseLevelVal
|
||||
|
||||
registerExpertiseLevelOption()
|
||||
}
|
||||
|
||||
func registerExpertiseLevelOption() {
|
||||
err := Register(&Option{
|
||||
Name: "Expertise Level",
|
||||
Key: expertiseLevelKey,
|
||||
Description: "The Expertise Level controls the perceived complexity. Higher settings will show you more complex settings and information. This might also affect various other things relying on this setting. Modified settings in higher expertise levels stay in effect when switching back. (Unlike the Release Level)",
|
||||
|
||||
OptType: OptTypeString,
|
||||
ExpertiseLevel: ExpertiseLevelUser,
|
||||
ReleaseLevel: ExpertiseLevelUser,
|
||||
|
||||
RequiresRestart: false,
|
||||
DefaultValue: ExpertiseLevelNameUser,
|
||||
|
||||
ExternalOptType: "string list",
|
||||
ValidationRegex: fmt.Sprintf("^(%s|%s|%s)$", ExpertiseLevelNameUser, ExpertiseLevelNameExpert, ExpertiseLevelNameDeveloper),
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func updateExpertiseLevel() {
|
||||
new := findStringValue(expertiseLevelKey, "")
|
||||
switch new {
|
||||
case ExpertiseLevelNameUser:
|
||||
atomic.StoreInt32(expertiseLevel, int32(ExpertiseLevelUser))
|
||||
case ExpertiseLevelNameExpert:
|
||||
atomic.StoreInt32(expertiseLevel, int32(ExpertiseLevelExpert))
|
||||
case ExpertiseLevelNameDeveloper:
|
||||
atomic.StoreInt32(expertiseLevel, int32(ExpertiseLevelDeveloper))
|
||||
default:
|
||||
atomic.StoreInt32(expertiseLevel, int32(ExpertiseLevelUser))
|
||||
}
|
||||
}
|
||||
|
||||
// GetExpertiseLevel returns the current active expertise level.
|
||||
func GetExpertiseLevel() uint8 {
|
||||
return uint8(atomic.LoadInt32(expertiseLevel))
|
||||
}
|
|
@ -81,21 +81,7 @@ func findValue(key string) interface{} {
|
|||
option.Lock()
|
||||
defer option.Unlock()
|
||||
|
||||
// check if option is active
|
||||
optionActive := true
|
||||
switch getReleaseLevel() {
|
||||
case ReleaseLevelStable:
|
||||
// In stable, only stable is active
|
||||
optionActive = option.ReleaseLevel == ReleaseLevelStable
|
||||
case ReleaseLevelBeta:
|
||||
// In beta, only stable and beta are active
|
||||
optionActive = option.ReleaseLevel == ReleaseLevelStable || option.ReleaseLevel == ReleaseLevelBeta
|
||||
case ReleaseLevelExperimental:
|
||||
// In experimental, everything is active
|
||||
optionActive = true
|
||||
}
|
||||
|
||||
if optionActive && option.activeValue != nil {
|
||||
if option.ReleaseLevel <= getReleaseLevel() && option.activeValue != nil {
|
||||
return option.activeValue
|
||||
}
|
||||
|
||||
|
|
|
@ -160,21 +160,21 @@ func TestReleaseLevel(t *testing.T) {
|
|||
|
||||
// test option level stable
|
||||
subsystemOption.ReleaseLevel = ReleaseLevelStable
|
||||
err = SetConfigOption(releaseLevelKey, ReleaseLevelStable)
|
||||
err = SetConfigOption(releaseLevelKey, ReleaseLevelNameStable)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testSubsystem() {
|
||||
t.Error("should be active")
|
||||
}
|
||||
err = SetConfigOption(releaseLevelKey, ReleaseLevelBeta)
|
||||
err = SetConfigOption(releaseLevelKey, ReleaseLevelNameBeta)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testSubsystem() {
|
||||
t.Error("should be active")
|
||||
}
|
||||
err = SetConfigOption(releaseLevelKey, ReleaseLevelExperimental)
|
||||
err = SetConfigOption(releaseLevelKey, ReleaseLevelNameExperimental)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -184,21 +184,21 @@ func TestReleaseLevel(t *testing.T) {
|
|||
|
||||
// test option level beta
|
||||
subsystemOption.ReleaseLevel = ReleaseLevelBeta
|
||||
err = SetConfigOption(releaseLevelKey, ReleaseLevelStable)
|
||||
err = SetConfigOption(releaseLevelKey, ReleaseLevelNameStable)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if testSubsystem() {
|
||||
t.Errorf("should be inactive: opt=%s system=%s", subsystemOption.ReleaseLevel, releaseLevel)
|
||||
t.Errorf("should be inactive: opt=%d system=%d", subsystemOption.ReleaseLevel, getReleaseLevel())
|
||||
}
|
||||
err = SetConfigOption(releaseLevelKey, ReleaseLevelBeta)
|
||||
err = SetConfigOption(releaseLevelKey, ReleaseLevelNameBeta)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testSubsystem() {
|
||||
t.Error("should be active")
|
||||
}
|
||||
err = SetConfigOption(releaseLevelKey, ReleaseLevelExperimental)
|
||||
err = SetConfigOption(releaseLevelKey, ReleaseLevelNameExperimental)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -208,21 +208,21 @@ func TestReleaseLevel(t *testing.T) {
|
|||
|
||||
// test option level experimental
|
||||
subsystemOption.ReleaseLevel = ReleaseLevelExperimental
|
||||
err = SetConfigOption(releaseLevelKey, ReleaseLevelStable)
|
||||
err = SetConfigOption(releaseLevelKey, ReleaseLevelNameStable)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if testSubsystem() {
|
||||
t.Error("should be inactive")
|
||||
}
|
||||
err = SetConfigOption(releaseLevelKey, ReleaseLevelBeta)
|
||||
err = SetConfigOption(releaseLevelKey, ReleaseLevelNameBeta)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if testSubsystem() {
|
||||
t.Error("should be inactive")
|
||||
}
|
||||
err = SetConfigOption(releaseLevelKey, ReleaseLevelExperimental)
|
||||
err = SetConfigOption(releaseLevelKey, ReleaseLevelNameExperimental)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -17,14 +17,6 @@ const (
|
|||
OptTypeStringArray uint8 = 2
|
||||
OptTypeInt uint8 = 3
|
||||
OptTypeBool uint8 = 4
|
||||
|
||||
ExpertiseLevelUser uint8 = 1
|
||||
ExpertiseLevelExpert uint8 = 2
|
||||
ExpertiseLevelDeveloper uint8 = 3
|
||||
|
||||
ReleaseLevelStable = "stable"
|
||||
ReleaseLevelBeta = "beta"
|
||||
ReleaseLevelExperimental = "experimental"
|
||||
)
|
||||
|
||||
func getTypeName(t uint8) string {
|
||||
|
@ -50,9 +42,9 @@ type Option struct {
|
|||
Key string // in path format: category/sub/key
|
||||
Description string
|
||||
|
||||
ReleaseLevel string
|
||||
ExpertiseLevel uint8
|
||||
OptType uint8
|
||||
ExpertiseLevel uint8
|
||||
ReleaseLevel uint8
|
||||
|
||||
RequiresRestart bool
|
||||
DefaultValue interface{}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sync"
|
||||
|
@ -10,21 +9,21 @@ import (
|
|||
var (
|
||||
optionsLock sync.RWMutex
|
||||
options = make(map[string]*Option)
|
||||
|
||||
// ErrIncompleteCall is return when RegisterOption is called with empty mandatory values.
|
||||
ErrIncompleteCall = errors.New("could not register config option: all fields, except for the validationRegex are mandatory")
|
||||
)
|
||||
|
||||
// Register registers a new configuration option.
|
||||
func Register(option *Option) error {
|
||||
|
||||
if option.Name == "" ||
|
||||
option.Key == "" ||
|
||||
option.Description == "" ||
|
||||
option.OptType == 0 ||
|
||||
option.ExpertiseLevel == 0 ||
|
||||
option.ReleaseLevel == "" {
|
||||
return ErrIncompleteCall
|
||||
if option.Name == "" {
|
||||
return fmt.Errorf("failed to register option: please set option.Name")
|
||||
}
|
||||
if option.Key == "" {
|
||||
return fmt.Errorf("failed to register option: please set option.Key")
|
||||
}
|
||||
if option.Description == "" {
|
||||
return fmt.Errorf("failed to register option: please set option.Description")
|
||||
}
|
||||
if option.OptType == 0 {
|
||||
return fmt.Errorf("failed to register option: please set option.OptType")
|
||||
}
|
||||
|
||||
if option.ValidationRegex != "" {
|
||||
|
@ -37,7 +36,6 @@ func Register(option *Option) error {
|
|||
|
||||
optionsLock.Lock()
|
||||
defer optionsLock.Unlock()
|
||||
|
||||
options[option.Key] = option
|
||||
|
||||
return nil
|
||||
|
|
|
@ -1,38 +1,51 @@
|
|||
// Package config ... (linter fix)
|
||||
//nolint:dupl
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Release Level constants
|
||||
const (
|
||||
releaseLevelKey = "core/release_level"
|
||||
ReleaseLevelStable uint8 = 0
|
||||
ReleaseLevelBeta uint8 = 1
|
||||
ReleaseLevelExperimental uint8 = 2
|
||||
|
||||
ReleaseLevelNameStable = "stable"
|
||||
ReleaseLevelNameBeta = "beta"
|
||||
ReleaseLevelNameExperimental = "experimental"
|
||||
|
||||
releaseLevelKey = "core/releaseLevel"
|
||||
)
|
||||
|
||||
var (
|
||||
releaseLevel = ReleaseLevelStable
|
||||
releaseLevelLock sync.Mutex
|
||||
releaseLevel *int32
|
||||
)
|
||||
|
||||
func init() {
|
||||
var releaseLevelVal int32
|
||||
releaseLevel = &releaseLevelVal
|
||||
|
||||
registerReleaseLevelOption()
|
||||
}
|
||||
|
||||
func registerReleaseLevelOption() {
|
||||
err := Register(&Option{
|
||||
Name: "Release Selection",
|
||||
Name: "Release Level",
|
||||
Key: releaseLevelKey,
|
||||
Description: "Select maturity level of features that should be available",
|
||||
Description: "The Release Level changes which features are available to you. Some beta or experimental features are also available in the stable release channel. Unavailable settings are set to the default value.",
|
||||
|
||||
OptType: OptTypeString,
|
||||
ExpertiseLevel: ExpertiseLevelExpert,
|
||||
ReleaseLevel: ReleaseLevelStable,
|
||||
|
||||
RequiresRestart: false,
|
||||
DefaultValue: ReleaseLevelStable,
|
||||
DefaultValue: ReleaseLevelNameStable,
|
||||
|
||||
ExternalOptType: "string list",
|
||||
ValidationRegex: fmt.Sprintf("^(%s|%s|%s)$", ReleaseLevelStable, ReleaseLevelBeta, ReleaseLevelExperimental),
|
||||
ValidationRegex: fmt.Sprintf("^(%s|%s|%s)$", ReleaseLevelNameStable, ReleaseLevelNameBeta, ReleaseLevelNameExperimental),
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -41,17 +54,18 @@ func registerReleaseLevelOption() {
|
|||
|
||||
func updateReleaseLevel() {
|
||||
new := findStringValue(releaseLevelKey, "")
|
||||
releaseLevelLock.Lock()
|
||||
if new == "" {
|
||||
releaseLevel = ReleaseLevelStable
|
||||
} else {
|
||||
releaseLevel = new
|
||||
switch new {
|
||||
case ReleaseLevelNameStable:
|
||||
atomic.StoreInt32(releaseLevel, int32(ReleaseLevelStable))
|
||||
case ReleaseLevelNameBeta:
|
||||
atomic.StoreInt32(releaseLevel, int32(ReleaseLevelBeta))
|
||||
case ReleaseLevelNameExperimental:
|
||||
atomic.StoreInt32(releaseLevel, int32(ReleaseLevelExperimental))
|
||||
default:
|
||||
atomic.StoreInt32(releaseLevel, int32(ReleaseLevelStable))
|
||||
}
|
||||
releaseLevelLock.Unlock()
|
||||
}
|
||||
|
||||
func getReleaseLevel() string {
|
||||
releaseLevelLock.Lock()
|
||||
defer releaseLevelLock.Unlock()
|
||||
return releaseLevel
|
||||
func getReleaseLevel() uint8 {
|
||||
return uint8(atomic.LoadInt32(releaseLevel))
|
||||
}
|
||||
|
|
|
@ -36,8 +36,9 @@ func Changed() <-chan struct{} {
|
|||
}
|
||||
|
||||
func signalChanges() {
|
||||
// refetch and save release level
|
||||
// refetch and save release level and expertise level
|
||||
updateReleaseLevel()
|
||||
updateExpertiseLevel()
|
||||
|
||||
// reset validity flag
|
||||
validityFlagLock.Lock()
|
||||
|
|
|
@ -47,12 +47,22 @@ func SetMaxConcurrentMicroTasks(n int) {
|
|||
|
||||
// StartMicroTask starts a new MicroTask with high priority. It will start immediately. The given function will be executed and panics caught. The supplied name should be a constant - the variable should never change as it won't be copied.
|
||||
func (m *Module) StartMicroTask(name *string, fn func(context.Context) error) error {
|
||||
if m == nil {
|
||||
log.Errorf(`modules: cannot start microtask "%s" with nil module`, *name)
|
||||
return errNoModule
|
||||
}
|
||||
|
||||
atomic.AddInt32(microTasks, 1)
|
||||
return m.runMicroTask(name, fn)
|
||||
}
|
||||
|
||||
// StartMediumPriorityMicroTask starts a new MicroTask with medium priority. It will wait until given a go (max 3 seconds). The given function will be executed and panics caught. The supplied name should be a constant - the variable should never change as it won't be copied.
|
||||
func (m *Module) StartMediumPriorityMicroTask(name *string, fn func(context.Context) error) error {
|
||||
if m == nil {
|
||||
log.Errorf(`modules: cannot start microtask "%s" with nil module`, *name)
|
||||
return errNoModule
|
||||
}
|
||||
|
||||
// check if we can go immediately
|
||||
select {
|
||||
case <-mediumPriorityClearance:
|
||||
|
@ -68,6 +78,11 @@ func (m *Module) StartMediumPriorityMicroTask(name *string, fn func(context.Cont
|
|||
|
||||
// StartLowPriorityMicroTask starts a new MicroTask with low priority. It will wait until given a go (max 15 seconds). The given function will be executed and panics caught. The supplied name should be a constant - the variable should never change as it won't be copied.
|
||||
func (m *Module) StartLowPriorityMicroTask(name *string, fn func(context.Context) error) error {
|
||||
if m == nil {
|
||||
log.Errorf(`modules: cannot start microtask "%s" with nil module`, *name)
|
||||
return errNoModule
|
||||
}
|
||||
|
||||
// check if we can go immediately
|
||||
select {
|
||||
case <-lowPriorityClearance:
|
||||
|
|
|
@ -67,14 +67,32 @@ func (m *Module) shutdown() error {
|
|||
m.shutdownFlag.Set()
|
||||
m.cancelCtx()
|
||||
|
||||
// start shutdown function
|
||||
m.waitGroup.Add(1)
|
||||
stopFnError := make(chan error)
|
||||
go func() {
|
||||
stopFnError <- m.runModuleCtrlFn("stop module", m.stop)
|
||||
m.waitGroup.Done()
|
||||
}()
|
||||
|
||||
// wait for workers
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
m.waitGroup.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
// wait for results
|
||||
select {
|
||||
case err := <-stopFnError:
|
||||
return err
|
||||
case <-done:
|
||||
select {
|
||||
case err := <-stopFnError:
|
||||
return err
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
case <-time.After(3 * time.Second):
|
||||
log.Warningf(
|
||||
"%s: timed out while waiting for workers/tasks to finish: workers=%d tasks=%d microtasks=%d, continuing shutdown...",
|
||||
|
@ -84,9 +102,7 @@ func (m *Module) shutdown() error {
|
|||
atomic.LoadInt32(m.microTaskCnt),
|
||||
)
|
||||
}
|
||||
|
||||
// call shutdown function
|
||||
return m.stop()
|
||||
return nil
|
||||
}
|
||||
|
||||
func dummyAction() error {
|
||||
|
|
|
@ -106,7 +106,7 @@ func prepareModules() error {
|
|||
go func() {
|
||||
reports <- &report{
|
||||
module: execM,
|
||||
err: execM.prep(),
|
||||
err: execM.runModuleCtrlFn("prep module", execM.prep),
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
@ -154,7 +154,7 @@ func startModules() error {
|
|||
go func() {
|
||||
reports <- &report{
|
||||
module: execM,
|
||||
err: execM.start(),
|
||||
err: execM.runModuleCtrlFn("start module", execM.start),
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package modules
|
|||
import (
|
||||
"container/list"
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
@ -59,6 +60,10 @@ const (
|
|||
|
||||
// NewTask creates a new task with a descriptive name (non-unique), a optional deadline, and the task function to be executed. You must call one of Queue, Prioritize, StartASAP, Schedule or Repeat in order to have the Task executed.
|
||||
func (m *Module) NewTask(name string, fn func(context.Context, *Task)) *Task {
|
||||
if m == nil {
|
||||
log.Errorf(`modules: cannot create task "%s" with nil module`, name)
|
||||
}
|
||||
|
||||
return &Task{
|
||||
name: name,
|
||||
module: m,
|
||||
|
@ -68,6 +73,10 @@ func (m *Module) NewTask(name string, fn func(context.Context, *Task)) *Task {
|
|||
}
|
||||
|
||||
func (t *Task) isActive() bool {
|
||||
if t.module == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return !t.canceled && !t.module.ShutdownInProgress()
|
||||
}
|
||||
|
||||
|
@ -178,6 +187,7 @@ func (t *Task) Repeat(interval time.Duration) *Task {
|
|||
t.lock.Lock()
|
||||
t.repeat = interval
|
||||
t.executeAt = time.Now().Add(t.repeat)
|
||||
t.addToSchedule()
|
||||
t.lock.Unlock()
|
||||
|
||||
return t
|
||||
|
@ -194,6 +204,10 @@ func (t *Task) Cancel() {
|
|||
}
|
||||
|
||||
func (t *Task) runWithLocking() {
|
||||
if t.module == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// wait for good timeslot regarding microtasks
|
||||
select {
|
||||
case <-taskTimeslot:
|
||||
|
@ -308,6 +322,7 @@ func (t *Task) getExecuteAtWithLocking() time.Time {
|
|||
func (t *Task) addToSchedule() {
|
||||
scheduleLock.Lock()
|
||||
defer scheduleLock.Unlock()
|
||||
// defer printTaskList(taskSchedule) // for debugging
|
||||
|
||||
// notify scheduler
|
||||
defer func() {
|
||||
|
@ -439,3 +454,23 @@ func taskScheduleHandler() {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func printTaskList(*list.List) { //nolint:unused,deadcode // for debugging, NOT production use
|
||||
fmt.Println("Modules Task List:")
|
||||
for e := taskSchedule.Front(); e != nil; e = e.Next() {
|
||||
t, ok := e.Value.(*Task)
|
||||
if ok {
|
||||
fmt.Printf(
|
||||
"%s:%s qu=%v ca=%v exec=%v at=%s rep=%s delay=%s\n",
|
||||
t.module.Name,
|
||||
t.name,
|
||||
t.queued,
|
||||
t.canceled,
|
||||
t.executing,
|
||||
t.executeAt,
|
||||
t.repeat,
|
||||
t.maxDelay,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package modules
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
|
@ -13,8 +14,17 @@ const (
|
|||
DefaultBackoffDuration = 2 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
errNoModule = errors.New("missing module (is nil!)")
|
||||
)
|
||||
|
||||
// RunWorker directly runs a generic worker that does not fit to be a Task or MicroTask, such as long running (and possibly mostly idle) sessions. A call to RunWorker blocks until the worker is finished.
|
||||
func (m *Module) RunWorker(name string, fn func(context.Context) error) error {
|
||||
if m == nil {
|
||||
log.Errorf(`modules: cannot start worker "%s" with nil module`, name)
|
||||
return errNoModule
|
||||
}
|
||||
|
||||
atomic.AddInt32(m.workerCnt, 1)
|
||||
m.waitGroup.Add(1)
|
||||
defer func() {
|
||||
|
@ -27,6 +37,11 @@ func (m *Module) RunWorker(name string, fn func(context.Context) error) error {
|
|||
|
||||
// StartServiceWorker starts a generic worker, which is automatically restarted in case of an error. A call to StartServiceWorker runs the service-worker in a new goroutine and returns immediately. `backoffDuration` specifies how to long to wait before restarts, multiplied by the number of failed attempts. Pass `0` for the default backoff duration. For custom error remediation functionality, build your own error handling procedure using calls to RunWorker.
|
||||
func (m *Module) StartServiceWorker(name string, backoffDuration time.Duration, fn func(context.Context) error) {
|
||||
if m == nil {
|
||||
log.Errorf(`modules: cannot start service worker "%s" with nil module`, name)
|
||||
return
|
||||
}
|
||||
|
||||
go m.runServiceWorker(name, backoffDuration, fn)
|
||||
}
|
||||
|
||||
|
@ -42,6 +57,7 @@ func (m *Module) runServiceWorker(name string, backoffDuration time.Duration, fn
|
|||
backoffDuration = DefaultBackoffDuration
|
||||
}
|
||||
failCnt := 0
|
||||
lastFail := time.Now()
|
||||
|
||||
for {
|
||||
if m.ShutdownInProgress() {
|
||||
|
@ -50,11 +66,18 @@ func (m *Module) runServiceWorker(name string, backoffDuration time.Duration, fn
|
|||
|
||||
err := m.runWorker(name, fn)
|
||||
if err != nil {
|
||||
// log error and restart
|
||||
// reset fail counter if running without error for some time
|
||||
if time.Now().Add(-5 * time.Minute).After(lastFail) {
|
||||
failCnt = 0
|
||||
}
|
||||
// increase fail counter and set last failed time
|
||||
failCnt++
|
||||
lastFail = time.Now()
|
||||
// log error
|
||||
sleepFor := time.Duration(failCnt) * backoffDuration
|
||||
log.Errorf("%s: service-worker %s failed (%d): %s - restarting in %s", m.Name, name, failCnt, err, sleepFor)
|
||||
time.Sleep(sleepFor)
|
||||
// loop to restart
|
||||
} else {
|
||||
// finish
|
||||
return
|
||||
|
@ -77,3 +100,19 @@ func (m *Module) runWorker(name string, fn func(context.Context) error) (err err
|
|||
err = fn(m.Ctx)
|
||||
return
|
||||
}
|
||||
|
||||
func (m *Module) runModuleCtrlFn(name string, fn func() error) (err error) {
|
||||
defer func() {
|
||||
// recover from panic
|
||||
panicVal := recover()
|
||||
if panicVal != nil {
|
||||
me := m.NewPanicError(name, "module-control", panicVal)
|
||||
me.Report()
|
||||
err = me
|
||||
}
|
||||
}()
|
||||
|
||||
// run
|
||||
err = fn()
|
||||
return
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ var (
|
|||
)
|
||||
|
||||
func init() {
|
||||
modules.Register("notifications", nil, start, nil, "base", "database")
|
||||
module = modules.Register("notifications", nil, start, nil, "base", "database")
|
||||
}
|
||||
|
||||
func start() error {
|
||||
|
|
Loading…
Add table
Reference in a new issue