safing-portmaster/base/log/logging.go
Daniel Hååvi 80664d1a27
Restructure modules ()
* Move portbase into monorepo

* Add new simple module mgr

* [WIP] Switch to new simple module mgr

* Add StateMgr and more worker variants

* [WIP] Switch more modules

* [WIP] Switch more modules

* [WIP] swtich more modules

* [WIP] switch all SPN modules

* [WIP] switch all service modules

* [WIP] Convert all workers to the new module system

* [WIP] add new task system to module manager

* [WIP] Add second take for scheduling workers

* [WIP] Add FIXME for bugs in new scheduler

* [WIP] Add minor improvements to scheduler

* [WIP] Add new worker scheduler

* [WIP] Fix more bug related to new module system

* [WIP] Fix start handing of the new module system

* [WIP] Improve startup process

* [WIP] Fix minor issues

* [WIP] Fix missing subsystem in settings

* [WIP] Initialize managers in constructor

* [WIP] Move module event initialization to constrictors

* [WIP] Fix setting for enabling and disabling the SPN module

* [WIP] Move API registeration into module construction

* [WIP] Update states mgr for all modules

* [WIP] Add CmdLine operation support

* Add state helper methods to module group and instance

* Add notification and module status handling to status package

* Fix starting issues

* Remove pilot widget and update security lock to new status data

* Remove debug logs

* Improve http server shutdown

* Add workaround for cleanly shutting down firewall+netquery

* Improve logging

* Add syncing states with notifications for new module system

* Improve starting, stopping, shutdown; resolve FIXMEs/TODOs

* [WIP] Fix most unit tests

* Review new module system and fix minor issues

* Push shutdown and restart events again via API

* Set sleep mode via interface

* Update example/template module

* [WIP] Fix spn/cabin unit test

* Remove deprecated UI elements

* Make log output more similar for the logging transition phase

* Switch spn hub and observer cmds to new module system

* Fix log sources

* Make worker mgr less error prone

* Fix tests and minor issues

* Fix observation hub

* Improve shutdown and restart handling

* Split up big connection.go source file

* Move varint and dsd packages to structures repo

* Improve expansion test

* Fix linter warnings

* Fix interception module on windows

* Fix linter errors

---------

Co-authored-by: Vladimir Stoilov <vladimir@safing.io>
2024-08-09 18:15:48 +03:00

249 lines
5.3 KiB
Go

package log
import (
"fmt"
"os"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/tevino/abool"
)
// concept
/*
- Logging function:
- check if file-based levelling enabled
- if yes, check if level is active on this file
- check if level is active
- send data to backend via big buffered channel
- Backend:
- wait until there is time for writing logs
- write logs
- configurable if logged to folder (buffer + rollingFileAppender) and/or console
- console: log everything above INFO to stderr
- Channel overbuffering protection:
- if buffer is full, trigger write
- Anti-Importing-Loop:
- everything imports logging
- logging is configured by main module and is supplied access to configuration and taskmanager
*/
// Severity describes a log level.
type Severity uint32
// Message describes a log level message and is implemented
// by logLine.
type Message interface {
Text() string
Severity() Severity
Time() time.Time
File() string
LineNumber() int
}
type logLine struct {
msg string
tracer *ContextTracer
level Severity
timestamp time.Time
file string
line int
}
func (ll *logLine) Text() string {
return ll.msg
}
func (ll *logLine) Severity() Severity {
return ll.level
}
func (ll *logLine) Time() time.Time {
return ll.timestamp
}
func (ll *logLine) File() string {
return ll.file
}
func (ll *logLine) LineNumber() int {
return ll.line
}
func (ll *logLine) Equal(ol *logLine) bool {
switch {
case ll.msg != ol.msg:
return false
case ll.tracer != nil || ol.tracer != nil:
return false
case ll.file != ol.file:
return false
case ll.line != ol.line:
return false
case ll.level != ol.level:
return false
}
return true
}
// Log Levels.
const (
TraceLevel Severity = 1
DebugLevel Severity = 2
InfoLevel Severity = 3
WarningLevel Severity = 4
ErrorLevel Severity = 5
CriticalLevel Severity = 6
)
var (
logBuffer chan *logLine
forceEmptyingOfBuffer = make(chan struct{})
logLevelInt = uint32(InfoLevel)
logLevel = &logLevelInt
pkgLevelsActive = abool.NewBool(false)
pkgLevels = make(map[string]Severity)
pkgLevelsLock sync.Mutex
logsWaiting = make(chan struct{}, 1)
logsWaitingFlag = abool.NewBool(false)
shutdownFlag = abool.NewBool(false)
shutdownSignal = make(chan struct{})
shutdownWaitGroup sync.WaitGroup
initializing = abool.NewBool(false)
started = abool.NewBool(false)
startedSignal = make(chan struct{})
)
// SetPkgLevels sets individual log levels for packages. Only effective after Start().
func SetPkgLevels(levels map[string]Severity) {
pkgLevelsLock.Lock()
pkgLevels = levels
pkgLevelsLock.Unlock()
pkgLevelsActive.Set()
}
// UnSetPkgLevels removes all individual log levels for packages.
func UnSetPkgLevels() {
pkgLevelsActive.UnSet()
}
// GetLogLevel returns the current log level.
func GetLogLevel() Severity {
return Severity(atomic.LoadUint32(logLevel))
}
// SetLogLevel sets a new log level. Only effective after Start().
func SetLogLevel(level Severity) {
atomic.StoreUint32(logLevel, uint32(level))
// Setup slog here for the transition period.
setupSLog(level)
}
// Name returns the name of the log level.
func (s Severity) Name() string {
switch s {
case TraceLevel:
return "trace"
case DebugLevel:
return "debug"
case InfoLevel:
return "info"
case WarningLevel:
return "warning"
case ErrorLevel:
return "error"
case CriticalLevel:
return "critical"
default:
return "none"
}
}
// ParseLevel returns the level severity of a log level name.
func ParseLevel(level string) Severity {
switch strings.ToLower(level) {
case "trace":
return 1
case "debug":
return 2
case "info":
return 3
case "warning":
return 4
case "error":
return 5
case "critical":
return 6
}
return 0
}
// Start starts the logging system. Must be called in order to see logs.
func Start() (err error) {
if !initializing.SetToIf(false, true) {
return nil
}
logBuffer = make(chan *logLine, 1024)
if logLevelFlag != "" {
initialLogLevel := ParseLevel(logLevelFlag)
if initialLogLevel == 0 {
fmt.Fprintf(os.Stderr, "log warning: invalid log level \"%s\", falling back to level info\n", logLevelFlag)
initialLogLevel = InfoLevel
}
SetLogLevel(initialLogLevel)
} else {
// Setup slog here for the transition period.
setupSLog(GetLogLevel())
}
// get and set file loglevels
pkgLogLevels := pkgLogLevelsFlag
if len(pkgLogLevels) > 0 {
newPkgLevels := make(map[string]Severity)
for _, pair := range strings.Split(pkgLogLevels, ",") {
splitted := strings.Split(pair, "=")
if len(splitted) != 2 {
err = fmt.Errorf("log warning: invalid file log level \"%s\", ignoring", pair)
fmt.Fprintf(os.Stderr, "%s\n", err.Error())
break
}
fileLevel := ParseLevel(splitted[1])
if fileLevel == 0 {
err = fmt.Errorf("log warning: invalid file log level \"%s\", ignoring", pair)
fmt.Fprintf(os.Stderr, "%s\n", err.Error())
break
}
newPkgLevels[splitted[0]] = fileLevel
}
SetPkgLevels(newPkgLevels)
}
if !schedulingEnabled {
close(writeTrigger)
}
startWriter()
started.Set()
close(startedSignal)
return err
}
// Shutdown writes remaining log lines and then stops the log system.
func Shutdown() {
if shutdownFlag.SetToIf(false, true) {
close(shutdownSignal)
}
shutdownWaitGroup.Wait()
}