mirror of
https://github.com/safing/portmaster
synced 2025-04-19 10:29:11 +00:00
* Move portbase into monorepo * Add new simple module mgr * [WIP] Switch to new simple module mgr * Add StateMgr and more worker variants * [WIP] Switch more modules * [WIP] Switch more modules * [WIP] swtich more modules * [WIP] switch all SPN modules * [WIP] switch all service modules * [WIP] Convert all workers to the new module system * [WIP] add new task system to module manager * [WIP] Add second take for scheduling workers * [WIP] Add FIXME for bugs in new scheduler * [WIP] Add minor improvements to scheduler * [WIP] Add new worker scheduler * [WIP] Fix more bug related to new module system * [WIP] Fix start handing of the new module system * [WIP] Improve startup process * [WIP] Fix minor issues * [WIP] Fix missing subsystem in settings * [WIP] Initialize managers in constructor * [WIP] Move module event initialization to constrictors * [WIP] Fix setting for enabling and disabling the SPN module * [WIP] Move API registeration into module construction * [WIP] Update states mgr for all modules * [WIP] Add CmdLine operation support * Add state helper methods to module group and instance * Add notification and module status handling to status package * Fix starting issues * Remove pilot widget and update security lock to new status data * Remove debug logs * Improve http server shutdown * Add workaround for cleanly shutting down firewall+netquery * Improve logging * Add syncing states with notifications for new module system * Improve starting, stopping, shutdown; resolve FIXMEs/TODOs * [WIP] Fix most unit tests * Review new module system and fix minor issues * Push shutdown and restart events again via API * Set sleep mode via interface * Update example/template module * [WIP] Fix spn/cabin unit test * Remove deprecated UI elements * Make log output more similar for the logging transition phase * Switch spn hub and observer cmds to new module system * Fix log sources * Make worker mgr less error prone * Fix tests and minor issues * Fix observation hub * Improve shutdown and restart handling * Split up big connection.go source file * Move varint and dsd packages to structures repo * Improve expansion test * Fix linter warnings * Fix interception module on windows * Fix linter errors --------- Co-authored-by: Vladimir Stoilov <vladimir@safing.io>
118 lines
2.2 KiB
Go
118 lines
2.2 KiB
Go
package utils
|
|
|
|
import (
|
|
"fmt"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
)
|
|
|
|
func TestStablePoolRealWorld(t *testing.T) {
|
|
t.Parallel()
|
|
// "real world" simulation
|
|
|
|
cnt := 0
|
|
testPool := &StablePool{
|
|
New: func() interface{} {
|
|
cnt++
|
|
return cnt
|
|
},
|
|
}
|
|
var testWg sync.WaitGroup
|
|
var testWorkerWg sync.WaitGroup
|
|
|
|
// for i := 0; i < 100; i++ {
|
|
// cnt++
|
|
// testPool.Put(cnt)
|
|
// }
|
|
for range 100 {
|
|
// block round
|
|
testWg.Add(1)
|
|
// add workers
|
|
testWorkerWg.Add(100)
|
|
for j := range 100 {
|
|
go func() {
|
|
// wait for round to start
|
|
testWg.Wait()
|
|
// get value
|
|
x := testPool.Get()
|
|
// fmt.Println(x)
|
|
// "work"
|
|
time.Sleep(5 * time.Microsecond)
|
|
// re-insert 99%
|
|
if j%100 > 0 {
|
|
testPool.Put(x)
|
|
}
|
|
// mark as finished
|
|
testWorkerWg.Done()
|
|
}()
|
|
}
|
|
// start round
|
|
testWg.Done()
|
|
// wait for round to finish
|
|
testWorkerWg.Wait()
|
|
}
|
|
t.Logf("real world simulation: cnt=%d p.cnt=%d p.max=%d\n", cnt, testPool.Size(), testPool.Max())
|
|
assert.GreaterOrEqual(t, 200, cnt, "should not use more than 200 values")
|
|
assert.GreaterOrEqual(t, 100, testPool.Max(), "pool should have at most this max size")
|
|
|
|
// optimal usage test
|
|
|
|
optPool := &StablePool{}
|
|
for range 1000 {
|
|
for j := range 100 {
|
|
optPool.Put(j)
|
|
}
|
|
for k := range 100 {
|
|
assert.Equal(t, k, optPool.Get(), "should match")
|
|
}
|
|
}
|
|
assert.Equal(t, 100, optPool.Max(), "pool should have exactly this max size")
|
|
}
|
|
|
|
func TestStablePoolFuzzing(t *testing.T) {
|
|
t.Parallel()
|
|
// fuzzing test
|
|
|
|
fuzzPool := &StablePool{}
|
|
var fuzzWg sync.WaitGroup
|
|
var fuzzWorkerWg sync.WaitGroup
|
|
// start goroutines and wait
|
|
fuzzWg.Add(1)
|
|
for i := range 1000 {
|
|
fuzzWorkerWg.Add(2)
|
|
go func() {
|
|
fuzzWg.Wait()
|
|
fuzzPool.Put(i)
|
|
fuzzWorkerWg.Done()
|
|
}()
|
|
go func() {
|
|
fuzzWg.Wait()
|
|
fmt.Print(fuzzPool.Get())
|
|
fuzzWorkerWg.Done()
|
|
}()
|
|
}
|
|
// kick off
|
|
fuzzWg.Done()
|
|
// wait for all to finish
|
|
fuzzWorkerWg.Wait()
|
|
}
|
|
|
|
func TestStablePoolBreaking(t *testing.T) {
|
|
t.Parallel()
|
|
// try to break it
|
|
|
|
breakPool := &StablePool{}
|
|
for range 10 {
|
|
for j := range 100 {
|
|
breakPool.Put(nil)
|
|
breakPool.Put(j)
|
|
breakPool.Put(nil)
|
|
}
|
|
for k := range 100 {
|
|
assert.Equal(t, k, breakPool.Get(), "should match")
|
|
}
|
|
}
|
|
}
|