mirror of
https://github.com/safing/portmaster
synced 2025-04-25 13:29:10 +00:00
* Move portbase into monorepo * Add new simple module mgr * [WIP] Switch to new simple module mgr * Add StateMgr and more worker variants * [WIP] Switch more modules * [WIP] Switch more modules * [WIP] swtich more modules * [WIP] switch all SPN modules * [WIP] switch all service modules * [WIP] Convert all workers to the new module system * [WIP] add new task system to module manager * [WIP] Add second take for scheduling workers * [WIP] Add FIXME for bugs in new scheduler * [WIP] Add minor improvements to scheduler * [WIP] Add new worker scheduler * [WIP] Fix more bug related to new module system * [WIP] Fix start handing of the new module system * [WIP] Improve startup process * [WIP] Fix minor issues * [WIP] Fix missing subsystem in settings * [WIP] Initialize managers in constructor * [WIP] Move module event initialization to constrictors * [WIP] Fix setting for enabling and disabling the SPN module * [WIP] Move API registeration into module construction * [WIP] Update states mgr for all modules * [WIP] Add CmdLine operation support * Add state helper methods to module group and instance * Add notification and module status handling to status package * Fix starting issues * Remove pilot widget and update security lock to new status data * Remove debug logs * Improve http server shutdown * Add workaround for cleanly shutting down firewall+netquery * Improve logging * Add syncing states with notifications for new module system * Improve starting, stopping, shutdown; resolve FIXMEs/TODOs * [WIP] Fix most unit tests * Review new module system and fix minor issues * Push shutdown and restart events again via API * Set sleep mode via interface * Update example/template module * [WIP] Fix spn/cabin unit test * Remove deprecated UI elements * Make log output more similar for the logging transition phase * Switch spn hub and observer cmds to new module system * Fix log sources * Make worker mgr less error prone * Fix tests and minor issues * Fix observation hub * Improve shutdown and restart handling * Split up big connection.go source file * Move varint and dsd packages to structures repo * Improve expansion test * Fix linter warnings * Fix interception module on windows * Fix linter errors --------- Co-authored-by: Vladimir Stoilov <vladimir@safing.io>
122 lines
3 KiB
Go
122 lines
3 KiB
Go
package status
|
|
|
|
import (
|
|
"slices"
|
|
"strings"
|
|
"sync"
|
|
|
|
"github.com/safing/portmaster/base/database/record"
|
|
"github.com/safing/portmaster/base/runtime"
|
|
"github.com/safing/portmaster/service/mgr"
|
|
"github.com/safing/portmaster/service/netenv"
|
|
)
|
|
|
|
// SystemStatusRecord describes the overall status of the Portmaster.
|
|
// It's a read-only record exposed via runtime:system/status.
|
|
type SystemStatusRecord struct {
|
|
record.Base
|
|
sync.Mutex
|
|
|
|
// OnlineStatus holds the current online status as
|
|
// seen by the netenv package.
|
|
OnlineStatus netenv.OnlineStatus
|
|
// CaptivePortal holds all information about the captive
|
|
// portal of the network the portmaster is currently
|
|
// connected to, if any.
|
|
CaptivePortal *netenv.CaptivePortal
|
|
|
|
Modules []mgr.StateUpdate
|
|
WorstState struct {
|
|
Module string
|
|
mgr.State
|
|
}
|
|
}
|
|
|
|
func (s *Status) handleModuleStatusUpdate(_ *mgr.WorkerCtx, update mgr.StateUpdate) (cancel bool, err error) {
|
|
s.statesLock.Lock()
|
|
defer s.statesLock.Unlock()
|
|
|
|
s.states[update.Module] = update
|
|
s.deriveNotificationsFromStateUpdate(update)
|
|
s.triggerPublishStatus()
|
|
|
|
return false, nil
|
|
}
|
|
|
|
func (s *Status) triggerPublishStatus() {
|
|
select {
|
|
case s.triggerUpdate <- struct{}{}:
|
|
default:
|
|
}
|
|
}
|
|
|
|
func (s *Status) statusPublisher(w *mgr.WorkerCtx) error {
|
|
for {
|
|
select {
|
|
case <-w.Done():
|
|
return nil
|
|
case <-s.triggerUpdate:
|
|
s.publishSystemStatus()
|
|
}
|
|
}
|
|
}
|
|
|
|
func (s *Status) setupRuntimeProvider() (err error) {
|
|
// register the system status getter
|
|
statusProvider := runtime.SimpleValueGetterFunc(func(_ string) ([]record.Record, error) {
|
|
return []record.Record{s.buildSystemStatus()}, nil
|
|
})
|
|
s.publishUpdate, err = runtime.Register("system/status", statusProvider)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// buildSystemStatus build a new system status record.
|
|
func (s *Status) buildSystemStatus() *SystemStatusRecord {
|
|
s.statesLock.Lock()
|
|
defer s.statesLock.Unlock()
|
|
|
|
status := &SystemStatusRecord{
|
|
CaptivePortal: netenv.GetCaptivePortal(),
|
|
OnlineStatus: netenv.GetOnlineStatus(),
|
|
Modules: make([]mgr.StateUpdate, 0, len(s.states)),
|
|
}
|
|
for _, newStateUpdate := range s.states {
|
|
// Deep copy state.
|
|
newStateUpdate.States = append([]mgr.State(nil), newStateUpdate.States...)
|
|
status.Modules = append(status.Modules, newStateUpdate)
|
|
|
|
// Check if state is worst so far.
|
|
for _, state := range newStateUpdate.States {
|
|
if state.Type.Severity() > status.WorstState.Type.Severity() {
|
|
s.mgr.Error("new worst state", "state", state)
|
|
status.WorstState.State = state
|
|
status.WorstState.Module = newStateUpdate.Module
|
|
}
|
|
}
|
|
}
|
|
slices.SortFunc(status.Modules, func(a, b mgr.StateUpdate) int {
|
|
return strings.Compare(a.Module, b.Module)
|
|
})
|
|
|
|
status.CreateMeta()
|
|
status.SetKey("runtime:system/status")
|
|
return status
|
|
}
|
|
|
|
// publishSystemStatus pushes a new system status via
|
|
// the runtime database.
|
|
func (s *Status) publishSystemStatus() {
|
|
if s.publishUpdate == nil {
|
|
return
|
|
}
|
|
|
|
record := s.buildSystemStatus()
|
|
record.Lock()
|
|
defer record.Unlock()
|
|
|
|
s.publishUpdate(record)
|
|
}
|