mirror of
https://github.com/safing/portmaster
synced 2025-09-01 18:19:12 +00:00
* Move portbase into monorepo * Add new simple module mgr * [WIP] Switch to new simple module mgr * Add StateMgr and more worker variants * [WIP] Switch more modules * [WIP] Switch more modules * [WIP] swtich more modules * [WIP] switch all SPN modules * [WIP] switch all service modules * [WIP] Convert all workers to the new module system * [WIP] add new task system to module manager * [WIP] Add second take for scheduling workers * [WIP] Add FIXME for bugs in new scheduler * [WIP] Add minor improvements to scheduler * [WIP] Add new worker scheduler * [WIP] Fix more bug related to new module system * [WIP] Fix start handing of the new module system * [WIP] Improve startup process * [WIP] Fix minor issues * [WIP] Fix missing subsystem in settings * [WIP] Initialize managers in constructor * [WIP] Move module event initialization to constrictors * [WIP] Fix setting for enabling and disabling the SPN module * [WIP] Move API registeration into module construction * [WIP] Update states mgr for all modules * [WIP] Add CmdLine operation support * Add state helper methods to module group and instance * Add notification and module status handling to status package * Fix starting issues * Remove pilot widget and update security lock to new status data * Remove debug logs * Improve http server shutdown * Add workaround for cleanly shutting down firewall+netquery * Improve logging * Add syncing states with notifications for new module system * Improve starting, stopping, shutdown; resolve FIXMEs/TODOs * [WIP] Fix most unit tests * Review new module system and fix minor issues * Push shutdown and restart events again via API * Set sleep mode via interface * Update example/template module * [WIP] Fix spn/cabin unit test * Remove deprecated UI elements * Make log output more similar for the logging transition phase * Switch spn hub and observer cmds to new module system * Fix log sources * Make worker mgr less error prone * Fix tests and minor issues * Fix observation hub * Improve shutdown and restart handling * Split up big connection.go source file * Move varint and dsd packages to structures repo * Improve expansion test * Fix linter warnings * Fix interception module on windows * Fix linter errors --------- Co-authored-by: Vladimir Stoilov <vladimir@safing.io>
85 lines
2.1 KiB
Go
85 lines
2.1 KiB
Go
package docks
|
|
|
|
import (
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/safing/portmaster/spn/terminal"
|
|
)
|
|
|
|
var (
|
|
testCapacityTestVolume = 1_000_000
|
|
testCapacitytestMaxTime = 1 * time.Second
|
|
)
|
|
|
|
func TestCapacityOp(t *testing.T) { //nolint:paralleltest // Performance test.
|
|
// Defaults.
|
|
testCapacityOp(t, &CapacityTestOptions{
|
|
TestVolume: testCapacityTestVolume,
|
|
MaxTime: testCapacitytestMaxTime,
|
|
testing: true,
|
|
})
|
|
|
|
// Hit max time first.
|
|
testCapacityOp(t, &CapacityTestOptions{
|
|
TestVolume: testCapacityTestVolume,
|
|
MaxTime: 100 * time.Millisecond,
|
|
testing: true,
|
|
})
|
|
|
|
// Hit volume first.
|
|
testCapacityOp(t, &CapacityTestOptions{
|
|
TestVolume: 100_000,
|
|
MaxTime: testCapacitytestMaxTime,
|
|
testing: true,
|
|
})
|
|
}
|
|
|
|
func testCapacityOp(t *testing.T, opts *CapacityTestOptions) {
|
|
t.Helper()
|
|
|
|
var (
|
|
capTestDelay = 5 * time.Millisecond
|
|
capTestQueueSize uint32 = 10
|
|
)
|
|
|
|
// Create test terminal pair.
|
|
a, b, err := terminal.NewSimpleTestTerminalPair(
|
|
capTestDelay,
|
|
int(capTestQueueSize),
|
|
&terminal.TerminalOpts{
|
|
FlowControl: terminal.FlowControlDFQ,
|
|
FlowControlSize: capTestQueueSize,
|
|
},
|
|
)
|
|
if err != nil {
|
|
t.Fatalf("failed to create test terminal pair: %s", err)
|
|
}
|
|
|
|
// Grant permission for op on remote terminal and start op.
|
|
b.GrantPermission(terminal.IsCraneController)
|
|
op, tErr := NewCapacityTestOp(a, opts)
|
|
if tErr != nil {
|
|
t.Fatalf("failed to start op: %s", tErr)
|
|
}
|
|
|
|
// Wait for result and check error.
|
|
tErr = <-op.Result()
|
|
if !tErr.IsOK() {
|
|
t.Fatalf("op failed: %s", tErr)
|
|
}
|
|
t.Logf("measured capacity: %d bit/s", op.testResult)
|
|
|
|
// Calculate expected bandwidth.
|
|
expectedBitsPerSecond := float64(capacityTestMsgSize*8*int64(capTestQueueSize)) / float64(capTestDelay) * float64(time.Second)
|
|
t.Logf("expected capacity: %f bit/s", expectedBitsPerSecond)
|
|
|
|
// Check if measured bandwidth is within parameters.
|
|
if float64(op.testResult) > expectedBitsPerSecond*1.6 {
|
|
t.Fatal("measured capacity too high")
|
|
}
|
|
// TODO: Check if we can raise this to at least 90%.
|
|
if float64(op.testResult) < expectedBitsPerSecond*0.2 {
|
|
t.Fatal("measured capacity too low")
|
|
}
|
|
}
|