safing-portmaster/cmds/notifier/subsystems.go
Daniel Hååvi 80664d1a27
Restructure modules ()
* Move portbase into monorepo

* Add new simple module mgr

* [WIP] Switch to new simple module mgr

* Add StateMgr and more worker variants

* [WIP] Switch more modules

* [WIP] Switch more modules

* [WIP] swtich more modules

* [WIP] switch all SPN modules

* [WIP] switch all service modules

* [WIP] Convert all workers to the new module system

* [WIP] add new task system to module manager

* [WIP] Add second take for scheduling workers

* [WIP] Add FIXME for bugs in new scheduler

* [WIP] Add minor improvements to scheduler

* [WIP] Add new worker scheduler

* [WIP] Fix more bug related to new module system

* [WIP] Fix start handing of the new module system

* [WIP] Improve startup process

* [WIP] Fix minor issues

* [WIP] Fix missing subsystem in settings

* [WIP] Initialize managers in constructor

* [WIP] Move module event initialization to constrictors

* [WIP] Fix setting for enabling and disabling the SPN module

* [WIP] Move API registeration into module construction

* [WIP] Update states mgr for all modules

* [WIP] Add CmdLine operation support

* Add state helper methods to module group and instance

* Add notification and module status handling to status package

* Fix starting issues

* Remove pilot widget and update security lock to new status data

* Remove debug logs

* Improve http server shutdown

* Add workaround for cleanly shutting down firewall+netquery

* Improve logging

* Add syncing states with notifications for new module system

* Improve starting, stopping, shutdown; resolve FIXMEs/TODOs

* [WIP] Fix most unit tests

* Review new module system and fix minor issues

* Push shutdown and restart events again via API

* Set sleep mode via interface

* Update example/template module

* [WIP] Fix spn/cabin unit test

* Remove deprecated UI elements

* Make log output more similar for the logging transition phase

* Switch spn hub and observer cmds to new module system

* Fix log sources

* Make worker mgr less error prone

* Fix tests and minor issues

* Fix observation hub

* Improve shutdown and restart handling

* Split up big connection.go source file

* Move varint and dsd packages to structures repo

* Improve expansion test

* Fix linter warnings

* Fix interception module on windows

* Fix linter errors

---------

Co-authored-by: Vladimir Stoilov <vladimir@safing.io>
2024-08-09 18:15:48 +03:00

121 lines
2.7 KiB
Go

package main
import (
"sync"
"github.com/safing/portmaster/base/api/client"
"github.com/safing/portmaster/base/log"
"github.com/safing/structures/dsd"
)
const (
subsystemsKeySpace = "runtime:subsystems/"
// Module Failure Status Values
// FailureNone = 0 // unused
// FailureHint = 1 // unused.
FailureWarning = 2
FailureError = 3
)
var (
subsystems = make(map[string]*Subsystem)
subsystemsLock sync.Mutex
)
// Subsystem describes a subset of modules that represent a part of a
// service or program to the user. Subsystems can be (de-)activated causing
// all related modules to be brought down or up.
type Subsystem struct { //nolint:maligned // not worth the effort
// ID is a unique identifier for the subsystem.
ID string
// Name holds a human readable name of the subsystem.
Name string
// Description may holds an optional description of
// the subsystem's purpose.
Description string
// Modules contains all modules that are related to the subsystem.
// Note that this slice also contains a reference to the subsystem
// module itself.
Modules []*ModuleStatus
// FailureStatus is the worst failure status that is currently
// set in one of the subsystem's dependencies.
FailureStatus uint8
}
// ModuleStatus describes the status of a module.
type ModuleStatus struct {
Name string
Enabled bool
Status uint8
FailureStatus uint8
FailureID string
FailureMsg string
}
// GetFailure returns the worst of all subsystem failures.
func GetFailure() (failureStatus uint8, failureMsg string) {
subsystemsLock.Lock()
defer subsystemsLock.Unlock()
for _, subsystem := range subsystems {
for _, module := range subsystem.Modules {
if failureStatus < module.FailureStatus {
failureStatus = module.FailureStatus
failureMsg = module.FailureMsg
}
}
}
return
}
func updateSubsystem(s *Subsystem) {
subsystemsLock.Lock()
defer subsystemsLock.Unlock()
subsystems[s.ID] = s
}
func clearSubsystems() {
subsystemsLock.Lock()
defer subsystemsLock.Unlock()
for key := range subsystems {
delete(subsystems, key)
}
}
func subsystemsClient() {
subsystemsOp := apiClient.Qsub("query "+subsystemsKeySpace, handleSubsystem)
subsystemsOp.EnableResuscitation()
}
func handleSubsystem(m *client.Message) {
switch m.Type {
case client.MsgError:
case client.MsgDone:
case client.MsgSuccess:
case client.MsgOk, client.MsgUpdate, client.MsgNew:
newSubsystem := &Subsystem{}
_, err := dsd.Load(m.RawValue, newSubsystem)
if err != nil {
log.Warningf("subsystems: failed to parse new subsystem: %s", err)
return
}
updateSubsystem(newSubsystem)
triggerTrayUpdate()
case client.MsgDelete:
case client.MsgWarning:
case client.MsgOffline:
clearSubsystems()
}
}