safing-portmaster/service/mgr/workermgr_test.go
Daniel Hååvi 80664d1a27
Restructure modules ()
* Move portbase into monorepo

* Add new simple module mgr

* [WIP] Switch to new simple module mgr

* Add StateMgr and more worker variants

* [WIP] Switch more modules

* [WIP] Switch more modules

* [WIP] swtich more modules

* [WIP] switch all SPN modules

* [WIP] switch all service modules

* [WIP] Convert all workers to the new module system

* [WIP] add new task system to module manager

* [WIP] Add second take for scheduling workers

* [WIP] Add FIXME for bugs in new scheduler

* [WIP] Add minor improvements to scheduler

* [WIP] Add new worker scheduler

* [WIP] Fix more bug related to new module system

* [WIP] Fix start handing of the new module system

* [WIP] Improve startup process

* [WIP] Fix minor issues

* [WIP] Fix missing subsystem in settings

* [WIP] Initialize managers in constructor

* [WIP] Move module event initialization to constrictors

* [WIP] Fix setting for enabling and disabling the SPN module

* [WIP] Move API registeration into module construction

* [WIP] Update states mgr for all modules

* [WIP] Add CmdLine operation support

* Add state helper methods to module group and instance

* Add notification and module status handling to status package

* Fix starting issues

* Remove pilot widget and update security lock to new status data

* Remove debug logs

* Improve http server shutdown

* Add workaround for cleanly shutting down firewall+netquery

* Improve logging

* Add syncing states with notifications for new module system

* Improve starting, stopping, shutdown; resolve FIXMEs/TODOs

* [WIP] Fix most unit tests

* Review new module system and fix minor issues

* Push shutdown and restart events again via API

* Set sleep mode via interface

* Update example/template module

* [WIP] Fix spn/cabin unit test

* Remove deprecated UI elements

* Make log output more similar for the logging transition phase

* Switch spn hub and observer cmds to new module system

* Fix log sources

* Make worker mgr less error prone

* Fix tests and minor issues

* Fix observation hub

* Improve shutdown and restart handling

* Split up big connection.go source file

* Move varint and dsd packages to structures repo

* Improve expansion test

* Fix linter warnings

* Fix interception module on windows

* Fix linter errors

---------

Co-authored-by: Vladimir Stoilov <vladimir@safing.io>
2024-08-09 18:15:48 +03:00

157 lines
4 KiB
Go

package mgr
import (
"sync/atomic"
"testing"
"time"
)
func TestWorkerMgrDelay(t *testing.T) {
t.Parallel()
m := New("DelayTest")
value := atomic.Bool{}
value.Store(false)
// Create a task that will after 1 second.
m.NewWorkerMgr("test", func(w *WorkerCtx) error {
value.Store(true)
return nil
}, nil).Delay(1 * time.Second)
// Check if value is set after 1 second and not before or after.
iterations := 0
for !value.Load() {
iterations++
time.Sleep(10 * time.Millisecond)
}
// 5% difference is acceptable since time.Sleep can't be perfect and it may very on different computers.
if iterations < 95 || iterations > 105 {
t.Errorf("WorkerMgr did not delay for a whole second it=%d", iterations)
}
}
func TestWorkerMgrRepeat(t *testing.T) {
t.Parallel()
m := New("RepeatTest")
value := atomic.Bool{}
value.Store(false)
// Create a task that should repeat every 100 milliseconds.
m.NewWorkerMgr("test", func(w *WorkerCtx) error {
value.Store(true)
return nil
}, nil).Repeat(100 * time.Millisecond)
// Check 10 consecutive runs they should be delayed for around 100 milliseconds each.
for range 10 {
iterations := 0
for !value.Load() {
iterations++
time.Sleep(10 * time.Millisecond)
}
// 10% difference is acceptable at this scale since time.Sleep can't be perfect and it may very on different computers.
if iterations < 9 || iterations > 11 {
t.Errorf("Worker was not delayed for a 100 milliseconds it=%d", iterations)
return
}
// Reset value
value.Store(false)
}
}
func TestWorkerMgrDelayAndRepeat(t *testing.T) { //nolint:dupl
t.Parallel()
m := New("DelayAndRepeatTest")
value := atomic.Bool{}
value.Store(false)
// Create a task that should delay for 1 second and then repeat every 100 milliseconds.
m.NewWorkerMgr("test", func(w *WorkerCtx) error {
value.Store(true)
return nil
}, nil).Delay(1 * time.Second).Repeat(100 * time.Millisecond)
iterations := 0
for !value.Load() {
iterations++
time.Sleep(10 * time.Millisecond)
}
// 5% difference is acceptable since time.Sleep can't be perfect and it may very on different computers.
if iterations < 95 || iterations > 105 {
t.Errorf("WorkerMgr did not delay for a whole second it=%d", iterations)
}
// Reset value
value.Store(false)
// Check 10 consecutive runs they should be delayed for around 100 milliseconds each.
for range 10 {
iterations = 0
for !value.Load() {
iterations++
time.Sleep(10 * time.Millisecond)
}
// 10% difference is acceptable at this scale since time.Sleep can't be perfect and it may very on different computers.
if iterations < 9 || iterations > 11 {
t.Errorf("Worker was not delayed for a 100 milliseconds it=%d", iterations)
return
}
// Reset value
value.Store(false)
}
}
func TestWorkerMgrRepeatAndDelay(t *testing.T) { //nolint:dupl
t.Parallel()
m := New("RepeatAndDelayTest")
value := atomic.Bool{}
value.Store(false)
// Create a task that should delay for 1 second and then repeat every 100 milliseconds but with reverse command order.
m.NewWorkerMgr("test", func(w *WorkerCtx) error {
value.Store(true)
return nil
}, nil).Repeat(100 * time.Millisecond).Delay(1 * time.Second)
iterations := 0
for !value.Load() {
iterations++
time.Sleep(10 * time.Millisecond)
}
// 5% difference is acceptable since time.Sleep can't be perfect and it may very on different computers.
if iterations < 95 || iterations > 105 {
t.Errorf("WorkerMgr did not delay for a whole second it=%d", iterations)
}
// Reset value
value.Store(false)
// Check 10 consecutive runs they should be delayed for around 100 milliseconds each.
for range 10 {
iterations := 0
for !value.Load() {
iterations++
time.Sleep(10 * time.Millisecond)
}
// 10% difference is acceptable at this scale since time.Sleep can't be perfect and it may very on different computers.
if iterations < 9 || iterations > 11 {
t.Errorf("Worker was not delayed for a 100 milliseconds it=%d", iterations)
return
}
// Reset value
value.Store(false)
}
}