mirror of
https://github.com/safing/portmaster
synced 2025-09-10 23:14:35 +00:00
Restructure modules (#1572)
* Move portbase into monorepo * Add new simple module mgr * [WIP] Switch to new simple module mgr * Add StateMgr and more worker variants * [WIP] Switch more modules * [WIP] Switch more modules * [WIP] swtich more modules * [WIP] switch all SPN modules * [WIP] switch all service modules * [WIP] Convert all workers to the new module system * [WIP] add new task system to module manager * [WIP] Add second take for scheduling workers * [WIP] Add FIXME for bugs in new scheduler * [WIP] Add minor improvements to scheduler * [WIP] Add new worker scheduler * [WIP] Fix more bug related to new module system * [WIP] Fix start handing of the new module system * [WIP] Improve startup process * [WIP] Fix minor issues * [WIP] Fix missing subsystem in settings * [WIP] Initialize managers in constructor * [WIP] Move module event initialization to constrictors * [WIP] Fix setting for enabling and disabling the SPN module * [WIP] Move API registeration into module construction * [WIP] Update states mgr for all modules * [WIP] Add CmdLine operation support * Add state helper methods to module group and instance * Add notification and module status handling to status package * Fix starting issues * Remove pilot widget and update security lock to new status data * Remove debug logs * Improve http server shutdown * Add workaround for cleanly shutting down firewall+netquery * Improve logging * Add syncing states with notifications for new module system * Improve starting, stopping, shutdown; resolve FIXMEs/TODOs * [WIP] Fix most unit tests * Review new module system and fix minor issues * Push shutdown and restart events again via API * Set sleep mode via interface * Update example/template module * [WIP] Fix spn/cabin unit test * Remove deprecated UI elements * Make log output more similar for the logging transition phase * Switch spn hub and observer cmds to new module system * Fix log sources * Make worker mgr less error prone * Fix tests and minor issues * Fix observation hub * Improve shutdown and restart handling * Split up big connection.go source file * Move varint and dsd packages to structures repo * Improve expansion test * Fix linter warnings * Fix interception module on windows * Fix linter errors --------- Co-authored-by: Vladimir Stoilov <vladimir@safing.io>
This commit is contained in:
parent
10a77498f4
commit
80664d1a27
647 changed files with 37690 additions and 3366 deletions
159
base/database/interface_cache_test.go
Normal file
159
base/database/interface_cache_test.go
Normal file
|
@ -0,0 +1,159 @@
|
|||
package database
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/safing/portmaster/service/mgr"
|
||||
)
|
||||
|
||||
func benchmarkCacheWriting(b *testing.B, storageType string, cacheSize int, sampleSize int, delayWrites bool) { //nolint:gocognit,gocyclo,thelper
|
||||
b.Run(fmt.Sprintf("CacheWriting_%s_%d_%d_%v", storageType, cacheSize, sampleSize, delayWrites), func(b *testing.B) {
|
||||
// Setup Benchmark.
|
||||
|
||||
// Create database.
|
||||
dbName := fmt.Sprintf("cache-w-benchmark-%s-%d-%d-%v", storageType, cacheSize, sampleSize, delayWrites)
|
||||
_, err := Register(&Database{
|
||||
Name: dbName,
|
||||
Description: fmt.Sprintf("Cache Benchmark Database for %s", storageType),
|
||||
StorageType: storageType,
|
||||
})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Create benchmark interface.
|
||||
options := &Options{
|
||||
Local: true,
|
||||
Internal: true,
|
||||
CacheSize: cacheSize,
|
||||
}
|
||||
if cacheSize > 0 && delayWrites {
|
||||
options.DelayCachedWrites = dbName
|
||||
}
|
||||
db := NewInterface(options)
|
||||
|
||||
// Start
|
||||
m := mgr.New("Cache writing benchmark test")
|
||||
var wg sync.WaitGroup
|
||||
if cacheSize > 0 && delayWrites {
|
||||
wg.Add(1)
|
||||
m.Go("Cache writing benchmark worker", func(wc *mgr.WorkerCtx) error {
|
||||
err := db.DelayedCacheWriter(wc)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
wg.Done()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Start Benchmark.
|
||||
b.ResetTimer()
|
||||
for i := range b.N {
|
||||
testRecordID := i % sampleSize
|
||||
r := NewExample(
|
||||
dbName+":"+strconv.Itoa(testRecordID),
|
||||
"A",
|
||||
1,
|
||||
)
|
||||
err = db.Put(r)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// End cache writer and wait
|
||||
m.Cancel()
|
||||
wg.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
func benchmarkCacheReadWrite(b *testing.B, storageType string, cacheSize int, sampleSize int, delayWrites bool) { //nolint:gocognit,gocyclo,thelper
|
||||
b.Run(fmt.Sprintf("CacheReadWrite_%s_%d_%d_%v", storageType, cacheSize, sampleSize, delayWrites), func(b *testing.B) {
|
||||
// Setup Benchmark.
|
||||
|
||||
// Create database.
|
||||
dbName := fmt.Sprintf("cache-rw-benchmark-%s-%d-%d-%v", storageType, cacheSize, sampleSize, delayWrites)
|
||||
_, err := Register(&Database{
|
||||
Name: dbName,
|
||||
Description: fmt.Sprintf("Cache Benchmark Database for %s", storageType),
|
||||
StorageType: storageType,
|
||||
})
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
// Create benchmark interface.
|
||||
options := &Options{
|
||||
Local: true,
|
||||
Internal: true,
|
||||
CacheSize: cacheSize,
|
||||
}
|
||||
if cacheSize > 0 && delayWrites {
|
||||
options.DelayCachedWrites = dbName
|
||||
}
|
||||
db := NewInterface(options)
|
||||
|
||||
// Start
|
||||
m := mgr.New("Cache read/write benchmark test")
|
||||
var wg sync.WaitGroup
|
||||
if cacheSize > 0 && delayWrites {
|
||||
wg.Add(1)
|
||||
m.Go("Cache read/write benchmark worker", func(wc *mgr.WorkerCtx) error {
|
||||
err := db.DelayedCacheWriter(wc)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
wg.Done()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// Start Benchmark.
|
||||
b.ResetTimer()
|
||||
writing := true
|
||||
for i := range b.N {
|
||||
testRecordID := i % sampleSize
|
||||
key := dbName + ":" + strconv.Itoa(testRecordID)
|
||||
|
||||
if i > 0 && testRecordID == 0 {
|
||||
writing = !writing // switch between reading and writing every samplesize
|
||||
}
|
||||
|
||||
if writing {
|
||||
r := NewExample(key, "A", 1)
|
||||
err = db.Put(r)
|
||||
} else {
|
||||
_, err = db.Get(key)
|
||||
}
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// End cache writer and wait
|
||||
m.Cancel()
|
||||
wg.Wait()
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkCache(b *testing.B) {
|
||||
for _, storageType := range []string{"bbolt", "hashmap"} {
|
||||
benchmarkCacheWriting(b, storageType, 32, 8, false)
|
||||
benchmarkCacheWriting(b, storageType, 32, 8, true)
|
||||
benchmarkCacheWriting(b, storageType, 32, 1024, false)
|
||||
benchmarkCacheWriting(b, storageType, 32, 1024, true)
|
||||
benchmarkCacheWriting(b, storageType, 512, 1024, false)
|
||||
benchmarkCacheWriting(b, storageType, 512, 1024, true)
|
||||
|
||||
benchmarkCacheReadWrite(b, storageType, 32, 8, false)
|
||||
benchmarkCacheReadWrite(b, storageType, 32, 8, true)
|
||||
benchmarkCacheReadWrite(b, storageType, 32, 1024, false)
|
||||
benchmarkCacheReadWrite(b, storageType, 32, 1024, true)
|
||||
benchmarkCacheReadWrite(b, storageType, 512, 1024, false)
|
||||
benchmarkCacheReadWrite(b, storageType, 512, 1024, true)
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue