mirror of
https://github.com/safing/portmaster
synced 2025-09-01 10:09:11 +00:00
Replace dataroot module with BinDir and DataDir on instance, adapt modules
This commit is contained in:
parent
0f3f3c360f
commit
7bc1c3b764
39 changed files with 819 additions and 482 deletions
|
@ -6,30 +6,50 @@ import (
|
|||
"testing"
|
||||
)
|
||||
|
||||
type testInstance struct{}
|
||||
type testInstance struct {
|
||||
dataDir string
|
||||
}
|
||||
|
||||
var _ instance = testInstance{}
|
||||
|
||||
func (stub testInstance) DataDir() string {
|
||||
return stub.dataDir
|
||||
}
|
||||
|
||||
func (stub testInstance) SetCmdLineOperation(f func() error) {}
|
||||
|
||||
func runTest(m *testing.M) error {
|
||||
ds, err := InitializeUnitTestDataroot("test-config")
|
||||
func newTestInstance(testName string) (*testInstance, error) {
|
||||
testDir, err := os.MkdirTemp("", fmt.Sprintf("portmaster-%s", testName))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize dataroot: %w", err)
|
||||
}
|
||||
defer func() { _ = os.RemoveAll(ds) }()
|
||||
module, err = New(&testInstance{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize module: %w", err)
|
||||
return nil, fmt.Errorf("failed to make tmp dir: %w", err)
|
||||
}
|
||||
|
||||
m.Run()
|
||||
return nil
|
||||
return &testInstance{
|
||||
dataDir: testDir,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if err := runTest(m); err != nil {
|
||||
fmt.Printf("%s\n", err)
|
||||
os.Exit(1)
|
||||
func TestConfigPersistence(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
instance, err := newTestInstance("test-config")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create test instance: %s", err)
|
||||
}
|
||||
defer func() { _ = os.RemoveAll(instance.DataDir()) }()
|
||||
|
||||
module, err = New(instance)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to initialize module: %s", err)
|
||||
}
|
||||
|
||||
err = SaveConfig()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = loadConfig(true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,8 +10,6 @@ import (
|
|||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
"github.com/safing/portmaster/base/dataroot"
|
||||
"github.com/safing/portmaster/base/utils"
|
||||
"github.com/safing/portmaster/base/utils/debug"
|
||||
"github.com/safing/portmaster/service/mgr"
|
||||
)
|
||||
|
@ -19,29 +17,13 @@ import (
|
|||
// ChangeEvent is the name of the config change event.
|
||||
const ChangeEvent = "config change"
|
||||
|
||||
var (
|
||||
dataRoot *utils.DirStructure
|
||||
|
||||
exportConfig bool
|
||||
)
|
||||
|
||||
// SetDataRoot sets the data root from which the updates module derives its paths.
|
||||
func SetDataRoot(root *utils.DirStructure) {
|
||||
if dataRoot == nil {
|
||||
dataRoot = root
|
||||
}
|
||||
}
|
||||
var exportConfig bool
|
||||
|
||||
func init() {
|
||||
flag.BoolVar(&exportConfig, "export-config-options", false, "export configuration registry and exit")
|
||||
}
|
||||
|
||||
func prep() error {
|
||||
SetDataRoot(dataroot.Root())
|
||||
if dataRoot == nil {
|
||||
return errors.New("data root is not set")
|
||||
}
|
||||
|
||||
if exportConfig {
|
||||
module.instance.SetCmdLineOperation(exportConfigCmd)
|
||||
return mgr.ErrExecuteCmdLineOp
|
||||
|
@ -51,7 +33,7 @@ func prep() error {
|
|||
}
|
||||
|
||||
func start() error {
|
||||
configFilePath = filepath.Join(dataRoot.Path, "config.json")
|
||||
configFilePath = filepath.Join(module.instance.DataDir(), "config.json")
|
||||
|
||||
// Load log level from log package after it started.
|
||||
err := loadLogLevel()
|
||||
|
@ -136,20 +118,3 @@ func GetActiveConfigValues() map[string]interface{} {
|
|||
|
||||
return values
|
||||
}
|
||||
|
||||
// InitializeUnitTestDataroot initializes a new random tmp directory for running tests.
|
||||
func InitializeUnitTestDataroot(testName string) (string, error) {
|
||||
basePath, err := os.MkdirTemp("", fmt.Sprintf("portmaster-%s", testName))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to make tmp dir: %w", err)
|
||||
}
|
||||
|
||||
ds := utils.NewDirStructure(basePath, 0o0755)
|
||||
SetDataRoot(ds)
|
||||
err = dataroot.Initialize(basePath, 0o0755)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to initialize dataroot: %w", err)
|
||||
}
|
||||
|
||||
return basePath, nil
|
||||
}
|
||||
|
|
|
@ -56,5 +56,6 @@ func New(instance instance) (*Config, error) {
|
|||
}
|
||||
|
||||
type instance interface {
|
||||
DataDir() string
|
||||
SetCmdLineOperation(f func() error)
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ func TestMain(m *testing.M) {
|
|||
panic(err)
|
||||
}
|
||||
|
||||
err = InitializeWithPath(testDir)
|
||||
err = Initialize(testDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
|
|
@ -2,11 +2,10 @@ package dbmodule
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/safing/portmaster/base/database"
|
||||
"github.com/safing/portmaster/base/dataroot"
|
||||
"github.com/safing/portmaster/base/utils"
|
||||
"github.com/safing/portmaster/service/mgr"
|
||||
)
|
||||
|
||||
|
@ -27,18 +26,18 @@ func (dbm *DBModule) Stop() error {
|
|||
return stop()
|
||||
}
|
||||
|
||||
var databaseStructureRoot *utils.DirStructure
|
||||
var databasesRootDir string
|
||||
|
||||
// SetDatabaseLocation sets the location of the database for initialization. Supply either a path or dir structure.
|
||||
func SetDatabaseLocation(dirStructureRoot *utils.DirStructure) {
|
||||
if databaseStructureRoot == nil {
|
||||
databaseStructureRoot = dirStructureRoot
|
||||
func SetDatabaseLocation(dir string) {
|
||||
if databasesRootDir == "" {
|
||||
databasesRootDir = dir
|
||||
}
|
||||
}
|
||||
|
||||
func prep() error {
|
||||
SetDatabaseLocation(dataroot.Root())
|
||||
if databaseStructureRoot == nil {
|
||||
SetDatabaseLocation(filepath.Join(module.instance.DataDir(), "databases"))
|
||||
if databasesRootDir == "" {
|
||||
return errors.New("database location not specified")
|
||||
}
|
||||
|
||||
|
@ -64,16 +63,16 @@ func New(instance instance) (*DBModule, error) {
|
|||
return nil, errors.New("only one instance allowed")
|
||||
}
|
||||
|
||||
if err := prep(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := mgr.New("DBModule")
|
||||
module = &DBModule{
|
||||
mgr: m,
|
||||
instance: instance,
|
||||
}
|
||||
if err := prep(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err := database.Initialize(databaseStructureRoot)
|
||||
err := database.Initialize(databasesRootDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -81,4 +80,6 @@ func New(instance instance) (*DBModule, error) {
|
|||
return module, nil
|
||||
}
|
||||
|
||||
type instance interface{}
|
||||
type instance interface {
|
||||
DataDir() string
|
||||
}
|
||||
|
|
|
@ -3,14 +3,10 @@ package database
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/tevino/abool"
|
||||
|
||||
"github.com/safing/portmaster/base/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
databasesSubDir = "databases"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -19,25 +15,18 @@ var (
|
|||
shuttingDown = abool.NewBool(false)
|
||||
shutdownSignal = make(chan struct{})
|
||||
|
||||
rootStructure *utils.DirStructure
|
||||
databasesStructure *utils.DirStructure
|
||||
rootDir string
|
||||
)
|
||||
|
||||
// InitializeWithPath initializes the database at the specified location using a path.
|
||||
func InitializeWithPath(dirPath string) error {
|
||||
return Initialize(utils.NewDirStructure(dirPath, 0o0755))
|
||||
}
|
||||
|
||||
// Initialize initializes the database at the specified location using a dir structure.
|
||||
func Initialize(dirStructureRoot *utils.DirStructure) error {
|
||||
// Initialize initializes the database at the specified location.
|
||||
func Initialize(databasesRootDir string) error {
|
||||
if initialized.SetToIf(false, true) {
|
||||
rootStructure = dirStructureRoot
|
||||
rootDir = databasesRootDir
|
||||
|
||||
// ensure root and databases dirs
|
||||
databasesStructure = rootStructure.ChildDir(databasesSubDir, 0o0700)
|
||||
err := databasesStructure.Ensure()
|
||||
// Ensure database root dir exists.
|
||||
err := os.MkdirAll(rootDir, 0o0700)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create/open database directory (%s): %w", rootStructure.Path, err)
|
||||
return fmt.Errorf("could not create/open database directory (%s): %w", rootDir, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -67,11 +56,12 @@ func Shutdown() (err error) {
|
|||
|
||||
// getLocation returns the storage location for the given name and type.
|
||||
func getLocation(name, storageType string) (string, error) {
|
||||
location := databasesStructure.ChildDir(name, 0o0700).ChildDir(storageType, 0o0700)
|
||||
// check location
|
||||
err := location.Ensure()
|
||||
location := filepath.Join(rootDir, name, storageType)
|
||||
|
||||
// Make sure location exists.
|
||||
err := os.MkdirAll(location, 0o0700)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(`failed to create/check database dir "%s": %w`, location.Path, err)
|
||||
return "", fmt.Errorf("failed to create/check database dir %q: %w", location, err)
|
||||
}
|
||||
return location.Path, nil
|
||||
return location, nil
|
||||
}
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
package dataroot
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/safing/portmaster/base/utils"
|
||||
)
|
||||
|
||||
var root *utils.DirStructure
|
||||
|
||||
// Initialize initializes the data root directory.
|
||||
func Initialize(rootDir string, perm os.FileMode) error {
|
||||
if root != nil {
|
||||
return errors.New("already initialized")
|
||||
}
|
||||
|
||||
root = utils.NewDirStructure(rootDir, perm)
|
||||
return root.Ensure()
|
||||
}
|
||||
|
||||
// Root returns the data root directory.
|
||||
func Root() *utils.DirStructure {
|
||||
return root
|
||||
}
|
|
@ -10,7 +10,6 @@ import (
|
|||
"github.com/shirou/gopsutil/mem"
|
||||
|
||||
"github.com/safing/portmaster/base/api"
|
||||
"github.com/safing/portmaster/base/dataroot"
|
||||
"github.com/safing/portmaster/base/log"
|
||||
)
|
||||
|
||||
|
@ -209,18 +208,9 @@ func getDiskStat() *disk.UsageStat {
|
|||
return diskStat
|
||||
}
|
||||
|
||||
// Check if we have a data root.
|
||||
dataRoot := dataroot.Root()
|
||||
if dataRoot == nil {
|
||||
log.Warning("metrics: cannot get disk stats without data root")
|
||||
diskStat = nil
|
||||
diskStatExpires = time.Now().Add(hostStatTTL)
|
||||
return diskStat
|
||||
}
|
||||
|
||||
// Refresh.
|
||||
var err error
|
||||
diskStat, err = disk.Usage(dataRoot.Path)
|
||||
diskStat, err = disk.Usage(module.instance.DataDir())
|
||||
if err != nil {
|
||||
log.Warningf("metrics: failed to get load avg: %s", err)
|
||||
diskStat = nil
|
||||
|
|
|
@ -213,4 +213,6 @@ func New(instance instance) (*Metrics, error) {
|
|||
return module, nil
|
||||
}
|
||||
|
||||
type instance interface{}
|
||||
type instance interface {
|
||||
DataDir() string
|
||||
}
|
||||
|
|
|
@ -1,47 +1,49 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"syscall"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/safing/portmaster/base/info"
|
||||
"github.com/safing/portmaster/base/metrics"
|
||||
"github.com/safing/portmaster/service"
|
||||
"github.com/safing/portmaster/service/mgr"
|
||||
"github.com/safing/portmaster/service/updates"
|
||||
"github.com/safing/portmaster/spn/conf"
|
||||
)
|
||||
|
||||
var (
|
||||
printStackOnExit bool
|
||||
enableInputSignals bool
|
||||
rootCmd = &cobra.Command{
|
||||
Use: "portmaster-core",
|
||||
PersistentPreRun: initializeGlobals,
|
||||
Run: cmdRun,
|
||||
}
|
||||
|
||||
sigUSR1 = syscall.Signal(0xa) // dummy for windows
|
||||
binDir string
|
||||
dataDir string
|
||||
|
||||
svcCfg *service.ServiceConfig
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.BoolVar(&printStackOnExit, "print-stack-on-exit", false, "prints the stack before of shutting down")
|
||||
flag.BoolVar(&enableInputSignals, "input-signals", false, "emulate signals using stdin")
|
||||
// Add Go's default flag set.
|
||||
rootCmd.Flags().AddGoFlagSet(flag.CommandLine)
|
||||
|
||||
// Add persisent flags for all commands.
|
||||
rootCmd.PersistentFlags().StringVar(&binDir, "bin-dir", "", "set directory for executable binaries (rw/ro)")
|
||||
rootCmd.PersistentFlags().StringVar(&dataDir, "data-dir", "", "set directory for variable data (rw)")
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
// Call platform specific checks, that will execute commands like "recover-iptables"
|
||||
platformSpecificChecks()
|
||||
|
||||
instance := initialize()
|
||||
run(instance)
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func initialize() *service.Instance {
|
||||
func initializeGlobals(cmd *cobra.Command, args []string) {
|
||||
// set information
|
||||
info.Set("Portmaster", "", "GPLv3")
|
||||
|
||||
|
@ -51,66 +53,13 @@ func initialize() *service.Instance {
|
|||
// Configure user agent.
|
||||
updates.UserAgent = fmt.Sprintf("Portmaster Core (%s %s)", runtime.GOOS, runtime.GOARCH)
|
||||
|
||||
// enable SPN client mode
|
||||
conf.EnableClient(true)
|
||||
conf.EnableIntegration(true)
|
||||
|
||||
// Create instance.
|
||||
var execCmdLine bool
|
||||
instance, err := service.New(&service.ServiceConfig{})
|
||||
switch {
|
||||
case err == nil:
|
||||
// Continue
|
||||
case errors.Is(err, mgr.ErrExecuteCmdLineOp):
|
||||
execCmdLine = true
|
||||
default:
|
||||
fmt.Printf("error creating an instance: %s\n", err)
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
// Execute command line operation, if requested or available.
|
||||
switch {
|
||||
case !execCmdLine:
|
||||
// Run service.
|
||||
case instance.CommandLineOperation == nil:
|
||||
fmt.Println("command line operation execution requested, but not set")
|
||||
os.Exit(3)
|
||||
default:
|
||||
// Run the function and exit.
|
||||
err = instance.CommandLineOperation()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "command line operation failed: %s\n", err)
|
||||
os.Exit(3)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
return instance
|
||||
}
|
||||
|
||||
func printStackTo(writer io.Writer, msg string) {
|
||||
_, err := fmt.Fprintf(writer, "===== %s =====\n", msg)
|
||||
if err == nil {
|
||||
err = pprof.Lookup("goroutine").WriteTo(writer, 1)
|
||||
}
|
||||
if err != nil {
|
||||
slog.Error("failed to write stack trace", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func inputSignals(signalCh chan os.Signal) {
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
for scanner.Scan() {
|
||||
switch scanner.Text() {
|
||||
case "SIGHUP":
|
||||
signalCh <- syscall.SIGHUP
|
||||
case "SIGINT":
|
||||
signalCh <- syscall.SIGINT
|
||||
case "SIGQUIT":
|
||||
signalCh <- syscall.SIGQUIT
|
||||
case "SIGTERM":
|
||||
signalCh <- syscall.SIGTERM
|
||||
case "SIGUSR1":
|
||||
signalCh <- sigUSR1
|
||||
}
|
||||
// Create service config.
|
||||
svcCfg = &service.ServiceConfig{
|
||||
BinDir: binDir,
|
||||
DataDir: dataDir,
|
||||
BinariesIndexURLs: service.DefaultBinaryIndexURLs,
|
||||
IntelIndexURLs: service.DefaultIntelIndexURLs,
|
||||
VerifyBinaryUpdates: service.BinarySigningTrustStore,
|
||||
VerifyIntelUpdates: service.BinarySigningTrustStore,
|
||||
}
|
||||
}
|
||||
|
|
108
cmds/portmaster-core/run.go
Normal file
108
cmds/portmaster-core/run.go
Normal file
|
@ -0,0 +1,108 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"syscall"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/safing/portmaster/service"
|
||||
"github.com/safing/portmaster/service/mgr"
|
||||
"github.com/safing/portmaster/spn/conf"
|
||||
)
|
||||
|
||||
var (
|
||||
printStackOnExit bool
|
||||
enableInputSignals bool
|
||||
|
||||
sigUSR1 = syscall.Signal(0xa) // dummy for windows
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.BoolVar(&printStackOnExit, "print-stack-on-exit", false, "prints the stack before of shutting down")
|
||||
flag.BoolVar(&enableInputSignals, "input-signals", false, "emulate signals using stdin")
|
||||
}
|
||||
|
||||
func cmdRun(cmd *cobra.Command, args []string) {
|
||||
// Call platform specific checks, that will execute commands like "recover-iptables"
|
||||
platformSpecificChecks()
|
||||
|
||||
svcCfg.VerifyBinaryUpdates = nil // FIXME
|
||||
svcCfg.VerifyIntelUpdates = nil // FIXME
|
||||
|
||||
instance := createInstance()
|
||||
run(instance)
|
||||
}
|
||||
|
||||
func createInstance() *service.Instance {
|
||||
// enable SPN client mode
|
||||
conf.EnableClient(true)
|
||||
conf.EnableIntegration(true)
|
||||
|
||||
// Create instance.
|
||||
var execCmdLine bool
|
||||
instance, err := service.New(svcCfg)
|
||||
switch {
|
||||
case err == nil:
|
||||
// Continue
|
||||
case errors.Is(err, mgr.ErrExecuteCmdLineOp):
|
||||
execCmdLine = true
|
||||
default:
|
||||
fmt.Printf("error creating an instance: %s\n", err)
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
// Execute module command line operation, if requested or available.
|
||||
switch {
|
||||
case !execCmdLine:
|
||||
// Run service.
|
||||
case instance.CommandLineOperation == nil:
|
||||
fmt.Println("command line operation execution requested, but not set")
|
||||
os.Exit(3)
|
||||
default:
|
||||
// Run the function and exit.
|
||||
fmt.Println("executing cmdline op")
|
||||
err = instance.CommandLineOperation()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "command line operation failed: %s\n", err)
|
||||
os.Exit(3)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
return instance
|
||||
}
|
||||
|
||||
func printStackTo(writer io.Writer, msg string) {
|
||||
_, err := fmt.Fprintf(writer, "===== %s =====\n", msg)
|
||||
if err == nil {
|
||||
err = pprof.Lookup("goroutine").WriteTo(writer, 1)
|
||||
}
|
||||
if err != nil {
|
||||
slog.Error("failed to write stack trace", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func inputSignals(signalCh chan os.Signal) {
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
for scanner.Scan() {
|
||||
switch scanner.Text() {
|
||||
case "SIGHUP":
|
||||
signalCh <- syscall.SIGHUP
|
||||
case "SIGINT":
|
||||
signalCh <- syscall.SIGINT
|
||||
case "SIGQUIT":
|
||||
signalCh <- syscall.SIGQUIT
|
||||
case "SIGTERM":
|
||||
signalCh <- syscall.SIGTERM
|
||||
case "SIGUSR1":
|
||||
signalCh <- sigUSR1
|
||||
}
|
||||
}
|
||||
}
|
|
@ -129,7 +129,7 @@ func isRunningAsService() bool {
|
|||
// Get the current process ID
|
||||
pid := os.Getpid()
|
||||
|
||||
currentProcess, err := processInfo.NewProcess(int32(pid))
|
||||
currentProcess, err := processInfo.NewProcess(int32(pid)) //nolint:gosec
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
77
cmds/portmaster-core/update.go
Normal file
77
cmds/portmaster-core/update.go
Normal file
|
@ -0,0 +1,77 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/safing/portmaster/base/log"
|
||||
"github.com/safing/portmaster/base/notifications"
|
||||
"github.com/safing/portmaster/service"
|
||||
"github.com/safing/portmaster/service/updates"
|
||||
)
|
||||
|
||||
var updateCmd = &cobra.Command{
|
||||
Use: "update",
|
||||
Short: "Force an update of all components.",
|
||||
RunE: update,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(updateCmd)
|
||||
}
|
||||
|
||||
func update(cmd *cobra.Command, args []string) error {
|
||||
// Finalize config.
|
||||
svcCfg.VerifyBinaryUpdates = nil // FIXME
|
||||
svcCfg.VerifyIntelUpdates = nil // FIXME
|
||||
err := svcCfg.Init()
|
||||
if err != nil {
|
||||
return fmt.Errorf("internal configuration error: %w", err)
|
||||
}
|
||||
|
||||
// Start logging.
|
||||
log.SetLogLevel(log.InfoLevel)
|
||||
_ = log.Start()
|
||||
defer log.Shutdown()
|
||||
|
||||
// Create updaters.
|
||||
instance := &updateDummyInstance{}
|
||||
binaryUpdateConfig, intelUpdateConfig, err := service.MakeUpdateConfigs(svcCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("init updater config: %w", err)
|
||||
}
|
||||
binaryUpdates, err := updates.New(instance, "Binary Updater", *binaryUpdateConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("configure binary updates: %w", err)
|
||||
}
|
||||
intelUpdates, err := updates.New(instance, "Intel Updater", *intelUpdateConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("configure intel updates: %w", err)
|
||||
}
|
||||
|
||||
// Force update all.
|
||||
binErr := binaryUpdates.ForceUpdate()
|
||||
if binErr != nil {
|
||||
log.Errorf("binary update failed: %s", binErr)
|
||||
}
|
||||
intelErr := intelUpdates.ForceUpdate()
|
||||
if intelErr != nil {
|
||||
log.Errorf("intel update failed: %s", intelErr)
|
||||
}
|
||||
|
||||
// Return error.
|
||||
if binErr != nil {
|
||||
return fmt.Errorf("binary update failed: %w", binErr)
|
||||
}
|
||||
if intelErr != nil {
|
||||
return fmt.Errorf("intel update failed: %w", intelErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type updateDummyInstance struct{}
|
||||
|
||||
func (udi *updateDummyInstance) Restart() {}
|
||||
func (udi *updateDummyInstance) Shutdown() {}
|
||||
func (udi *updateDummyInstance) Notifications() *notifications.Notifications { return nil }
|
|
@ -9,7 +9,7 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
bundleSettings = updates.IndexScanConfig{
|
||||
scanConfig = updates.IndexScanConfig{
|
||||
Name: "Portmaster Binaries",
|
||||
PrimaryArtifact: "linux_amd64/portmaster-core",
|
||||
BaseURL: "https://updates.safing.io/",
|
||||
|
@ -60,17 +60,17 @@ var (
|
|||
RunE: scan,
|
||||
}
|
||||
|
||||
bundleDir string
|
||||
scanDir string
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(scanCmd)
|
||||
scanCmd.Flags().StringVarP(&bundleDir, "dir", "d", "", "directory to create index from (required)")
|
||||
scanCmd.Flags().StringVarP(&scanDir, "dir", "d", "", "directory to create index from (required)")
|
||||
_ = scanCmd.MarkFlagRequired("dir")
|
||||
}
|
||||
|
||||
func scan(cmd *cobra.Command, args []string) error {
|
||||
bundle, err := updates.GenerateBundleFromDir(bundleDir, bundleSettings)
|
||||
bundle, err := updates.GenerateIndexFromDir(scanDir, scanConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -111,6 +112,9 @@ func (ii *InstallInfo) checkVersion() {
|
|||
// MakeNumericVersion makes a numeric version with the first three version
|
||||
// segment always using three digits.
|
||||
func MakeNumericVersion(version string) (numericVersion int64, err error) {
|
||||
// Remove any comments.
|
||||
version = strings.SplitN(version, " ", 2)[0]
|
||||
|
||||
// Parse version string.
|
||||
ver, err := semver.NewVersion(version)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,3 +1,89 @@
|
|||
package service
|
||||
|
||||
type ServiceConfig struct{}
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/safing/jess"
|
||||
)
|
||||
|
||||
type ServiceConfig struct {
|
||||
BinDir string
|
||||
DataDir string
|
||||
|
||||
BinariesIndexURLs []string
|
||||
IntelIndexURLs []string
|
||||
VerifyBinaryUpdates jess.TrustStore
|
||||
VerifyIntelUpdates jess.TrustStore
|
||||
}
|
||||
|
||||
func (sc *ServiceConfig) Init() error {
|
||||
// Check directories
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
if sc.BinDir == "" {
|
||||
exeDir, err := getCurrentBinaryFolder() // Default: C:/Program Files/Portmaster
|
||||
if err != nil {
|
||||
return fmt.Errorf("derive bin dir from runnning exe: %w", err)
|
||||
}
|
||||
sc.BinDir = exeDir
|
||||
}
|
||||
if sc.DataDir == "" {
|
||||
sc.DataDir = filepath.FromSlash("$ProgramData/Portmaster")
|
||||
}
|
||||
|
||||
case "linux":
|
||||
// Fall back to defaults.
|
||||
if sc.BinDir == "" {
|
||||
sc.BinDir = "/usr/lib/portmaster"
|
||||
}
|
||||
if sc.DataDir == "" {
|
||||
sc.DataDir = "/var/lib/portmaster"
|
||||
}
|
||||
|
||||
default:
|
||||
// Fail if not configured on other platforms.
|
||||
if sc.BinDir == "" {
|
||||
return errors.New("binary directory must be configured - auto-detection not supported on this platform")
|
||||
}
|
||||
if sc.DataDir == "" {
|
||||
return errors.New("binary directory must be configured - auto-detection not supported on this platform")
|
||||
}
|
||||
}
|
||||
|
||||
// Expand path variables.
|
||||
sc.BinDir = os.ExpandEnv(sc.BinDir)
|
||||
sc.DataDir = os.ExpandEnv(sc.DataDir)
|
||||
|
||||
// Apply defaults for required fields.
|
||||
if len(sc.BinariesIndexURLs) == 0 {
|
||||
sc.BinariesIndexURLs = DefaultBinaryIndexURLs
|
||||
}
|
||||
if len(sc.IntelIndexURLs) == 0 {
|
||||
sc.IntelIndexURLs = DefaultIntelIndexURLs
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getCurrentBinaryFolder() (string, error) {
|
||||
// Get the path of the currently running executable
|
||||
exePath, err := os.Executable()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get executable path: %w", err)
|
||||
}
|
||||
|
||||
// Get the absolute path
|
||||
absPath, err := filepath.Abs(exePath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get absolute path: %w", err)
|
||||
}
|
||||
|
||||
// Get the directory of the executable
|
||||
installDir := filepath.Dir(absPath)
|
||||
|
||||
return installDir, nil
|
||||
}
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/safing/portmaster/base/api"
|
||||
"github.com/safing/portmaster/base/dataroot"
|
||||
"github.com/safing/portmaster/base/info"
|
||||
"github.com/safing/portmaster/service/mgr"
|
||||
)
|
||||
|
@ -15,14 +14,10 @@ import (
|
|||
var (
|
||||
DefaultAPIListenAddress = "127.0.0.1:817"
|
||||
|
||||
dataDir string
|
||||
databaseDir string
|
||||
showVersion bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&dataDir, "data", "", "set data directory")
|
||||
flag.StringVar(&databaseDir, "db", "", "alias to --data (deprecated)")
|
||||
flag.BoolVar(&showVersion, "version", false, "show version and exit")
|
||||
}
|
||||
|
||||
|
@ -39,27 +34,6 @@ func prep(instance instance) error {
|
|||
return mgr.ErrExecuteCmdLineOp
|
||||
}
|
||||
|
||||
// check data root
|
||||
if dataroot.Root() == nil {
|
||||
// initialize data dir
|
||||
|
||||
// backwards compatibility
|
||||
if dataDir == "" {
|
||||
dataDir = databaseDir
|
||||
}
|
||||
|
||||
// check data dir
|
||||
if dataDir == "" {
|
||||
return errors.New("please set the data directory using --data=/path/to/data/dir")
|
||||
}
|
||||
|
||||
// initialize structure
|
||||
err := dataroot.Initialize(dataDir, 0o0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// set api listen address
|
||||
api.SetDefaultAPIListenAddress(DefaultAPIListenAddress)
|
||||
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/safing/portmaster/base/dataroot"
|
||||
"github.com/safing/portmaster/base/log"
|
||||
"github.com/safing/portmaster/service/mgr"
|
||||
)
|
||||
|
@ -26,7 +25,7 @@ func logCleaner(_ *mgr.WorkerCtx) error {
|
|||
ageThreshold := time.Now().Add(-logTTL)
|
||||
|
||||
return filepath.Walk(
|
||||
filepath.Join(dataroot.Root().Path, logFileDir),
|
||||
filepath.Join(module.instance.DataDir(), logFileDir),
|
||||
func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
|
|
|
@ -58,5 +58,6 @@ func New(instance instance) (*Base, error) {
|
|||
}
|
||||
|
||||
type instance interface {
|
||||
DataDir() string
|
||||
SetCmdLineOperation(f func() error)
|
||||
}
|
||||
|
|
|
@ -11,9 +11,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/safing/portmaster/base/api"
|
||||
"github.com/safing/portmaster/base/dataroot"
|
||||
"github.com/safing/portmaster/base/log"
|
||||
"github.com/safing/portmaster/base/utils"
|
||||
"github.com/safing/portmaster/service/netenv"
|
||||
"github.com/safing/portmaster/service/network/netutils"
|
||||
"github.com/safing/portmaster/service/network/packet"
|
||||
|
@ -38,15 +36,12 @@ For production use please create an API key in the settings.`
|
|||
)
|
||||
|
||||
var (
|
||||
dataRoot *utils.DirStructure
|
||||
|
||||
apiPortSet bool
|
||||
apiIP net.IP
|
||||
apiPort uint16
|
||||
)
|
||||
|
||||
func prepAPIAuth() error {
|
||||
dataRoot = dataroot.Root()
|
||||
return api.SetAuthenticator(apiAuthenticator)
|
||||
}
|
||||
|
||||
|
@ -132,7 +127,7 @@ func authenticateAPIRequest(ctx context.Context, pktInfo *packet.Info) (retry bo
|
|||
var originalPid int
|
||||
|
||||
// Get authenticated path.
|
||||
authenticatedPath := module.instance.BinaryUpdates().GetRootPath()
|
||||
authenticatedPath := module.instance.BinaryUpdates().GetMainDir()
|
||||
if authenticatedPath == "" {
|
||||
return false, fmt.Errorf(deniedMsgMisconfigured, api.ErrAPIAccessDeniedMessage) //nolint:stylecheck // message for user
|
||||
}
|
||||
|
@ -214,7 +209,7 @@ func authenticateAPIRequest(ctx context.Context, pktInfo *packet.Info) (retry bo
|
|||
return false, fmt.Errorf(deniedMsgSystem, api.ErrAPIAccessDeniedMessage) //nolint:stylecheck // message for user
|
||||
|
||||
default: // normal process
|
||||
log.Tracer(ctx).Warningf("filter: denying api access to %s - also checked %s (trusted root is %s)", procsChecked[0], strings.Join(procsChecked[1:], " "), dataRoot.Path)
|
||||
log.Tracer(ctx).Warningf("filter: denying api access to %s - also checked %s (trusted root is %s)", procsChecked[0], strings.Join(procsChecked[1:], " "), module.instance.BinDir())
|
||||
return false, fmt.Errorf( //nolint:stylecheck // message for user
|
||||
deniedMsgUnauthorized,
|
||||
api.ErrAPIAccessDeniedMessage,
|
||||
|
|
|
@ -160,6 +160,7 @@ func New(instance instance) (*Firewall, error) {
|
|||
}
|
||||
|
||||
type instance interface {
|
||||
BinDir() string
|
||||
Config() *config.Config
|
||||
BinaryUpdates() *updates.Updater
|
||||
Profile() *profile.ProfileModule
|
||||
|
|
|
@ -4,8 +4,6 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
go_runtime "runtime"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
|
@ -55,6 +53,9 @@ type Instance struct {
|
|||
cancelCtx context.CancelFunc
|
||||
serviceGroup *mgr.Group
|
||||
|
||||
binDir string
|
||||
dataDir string
|
||||
|
||||
exitCode atomic.Int32
|
||||
|
||||
database *dbmodule.DBModule
|
||||
|
@ -105,83 +106,27 @@ type Instance struct {
|
|||
ShouldRestart bool
|
||||
}
|
||||
|
||||
func getCurrentBinaryFolder() (string, error) {
|
||||
// Get the path of the currently running executable
|
||||
exePath, err := os.Executable()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get executable path: %w", err)
|
||||
}
|
||||
|
||||
// Get the absolute path
|
||||
absPath, err := filepath.Abs(exePath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get absolute path: %w", err)
|
||||
}
|
||||
|
||||
// Get the directory of the executable
|
||||
installDir := filepath.Dir(absPath)
|
||||
|
||||
return installDir, nil
|
||||
}
|
||||
|
||||
// New returns a new Portmaster service instance.
|
||||
func New(svcCfg *ServiceConfig) (*Instance, error) { //nolint:maintidx
|
||||
var binaryUpdateIndex updates.Config
|
||||
var intelUpdateIndex updates.Config
|
||||
if go_runtime.GOOS == "windows" {
|
||||
binaryFolder, err := getCurrentBinaryFolder()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
binaryUpdateIndex = updates.Config{
|
||||
Directory: binaryFolder, // Default: C:/Program Files/Portmaster
|
||||
DownloadDirectory: os.ExpandEnv("$ProgramData/Portmaster/new_binary"),
|
||||
PurgeDirectory: filepath.Join(binaryFolder, "old_binary"), // Default: C:/Program Files/Portmaster/old_binary
|
||||
Ignore: []string{"databases", "intel", "config.json"},
|
||||
IndexURLs: []string{"http://192.168.88.11:8000/test-binary.json"},
|
||||
IndexFile: "bin-index.json",
|
||||
AutoApply: false,
|
||||
NeedsRestart: true,
|
||||
}
|
||||
// Initialize config.
|
||||
err := svcCfg.Init()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("internal service config error: %w", err)
|
||||
}
|
||||
|
||||
intelUpdateIndex = updates.Config{
|
||||
Directory: os.ExpandEnv("$ProgramData/Portmaster/intel"),
|
||||
DownloadDirectory: os.ExpandEnv("$ProgramData/Portmaster/new_intel"),
|
||||
PurgeDirectory: os.ExpandEnv("$ProgramData/Portmaster/old_intel"),
|
||||
IndexURLs: []string{"http://192.168.88.11:8000/test-intel.json"},
|
||||
IndexFile: "intel-index.json",
|
||||
AutoApply: true,
|
||||
NeedsRestart: false,
|
||||
}
|
||||
} else if go_runtime.GOOS == "linux" {
|
||||
binaryUpdateIndex = updates.Config{
|
||||
Directory: "/usr/lib/portmaster",
|
||||
DownloadDirectory: "/var/lib/portmaster/new_bin",
|
||||
PurgeDirectory: "/var/lib/portmaster/old_bin",
|
||||
Ignore: []string{"databases", "intel", "config.json"},
|
||||
IndexURLs: []string{"http://localhost:8000/test-binary.json"},
|
||||
IndexFile: "bin-index.json",
|
||||
AutoApply: false,
|
||||
NeedsRestart: true,
|
||||
}
|
||||
|
||||
intelUpdateIndex = updates.Config{
|
||||
Directory: "/var/lib/portmaster/intel",
|
||||
DownloadDirectory: "/var/lib/portmaster/new_intel",
|
||||
PurgeDirectory: "/var/lib/portmaster/intel_bin",
|
||||
IndexURLs: []string{"http://localhost:8000/test-intel.json"},
|
||||
IndexFile: "intel-index.json",
|
||||
AutoApply: true,
|
||||
NeedsRestart: false,
|
||||
}
|
||||
// Make sure data dir exists, so that child directories don't dictate the permissions.
|
||||
err = os.MkdirAll(svcCfg.DataDir, 0o0755)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("data directory %s is not accessible: %w", svcCfg.DataDir, err)
|
||||
}
|
||||
|
||||
// Create instance to pass it to modules.
|
||||
instance := &Instance{}
|
||||
instance := &Instance{
|
||||
binDir: svcCfg.BinDir,
|
||||
dataDir: svcCfg.DataDir,
|
||||
}
|
||||
instance.ctx, instance.cancelCtx = context.WithCancel(context.Background())
|
||||
|
||||
var err error
|
||||
|
||||
// Base modules
|
||||
instance.base, err = base.New(instance)
|
||||
if err != nil {
|
||||
|
@ -221,11 +166,15 @@ func New(svcCfg *ServiceConfig) (*Instance, error) { //nolint:maintidx
|
|||
if err != nil {
|
||||
return instance, fmt.Errorf("create core module: %w", err)
|
||||
}
|
||||
instance.binaryUpdates, err = updates.New(instance, "Binary Updater", binaryUpdateIndex)
|
||||
binaryUpdateConfig, intelUpdateConfig, err := MakeUpdateConfigs(svcCfg)
|
||||
if err != nil {
|
||||
return instance, fmt.Errorf("create updates config: %w", err)
|
||||
}
|
||||
instance.binaryUpdates, err = updates.New(instance, "Binary Updater", *binaryUpdateConfig)
|
||||
if err != nil {
|
||||
return instance, fmt.Errorf("create updates module: %w", err)
|
||||
}
|
||||
instance.intelUpdates, err = updates.New(instance, "Intel Updater", intelUpdateIndex)
|
||||
instance.intelUpdates, err = updates.New(instance, "Intel Updater", *intelUpdateConfig)
|
||||
if err != nil {
|
||||
return instance, fmt.Errorf("create updates module: %w", err)
|
||||
}
|
||||
|
@ -413,6 +362,18 @@ func (i *Instance) SetSleep(enabled bool) {
|
|||
}
|
||||
}
|
||||
|
||||
// BinDir returns the directory for binaries.
|
||||
// This directory may be read-only.
|
||||
func (i *Instance) BinDir() string {
|
||||
return i.binDir
|
||||
}
|
||||
|
||||
// DataDir returns the directory for variable data.
|
||||
// This directory is expected to be read/writeable.
|
||||
func (i *Instance) DataDir() string {
|
||||
return i.dataDir
|
||||
}
|
||||
|
||||
// Database returns the database module.
|
||||
func (i *Instance) Database() *dbmodule.DBModule {
|
||||
return i.database
|
||||
|
|
|
@ -39,9 +39,9 @@ var (
|
|||
filterListLock sync.RWMutex
|
||||
|
||||
// Updater files for tracking upgrades.
|
||||
baseFile *updates.File
|
||||
intermediateFile *updates.File
|
||||
urgentFile *updates.File
|
||||
baseFile *updates.Artifact
|
||||
intermediateFile *updates.Artifact
|
||||
urgentFile *updates.Artifact
|
||||
|
||||
filterListsLoaded chan struct{}
|
||||
)
|
||||
|
@ -77,7 +77,7 @@ func isLoaded() bool {
|
|||
|
||||
// processListFile opens the latest version of file and decodes it's DSDL
|
||||
// content. It calls processEntry for each decoded filterlists entry.
|
||||
func processListFile(ctx context.Context, filter *scopedBloom, file *updates.File) error {
|
||||
func processListFile(ctx context.Context, filter *scopedBloom, file *updates.Artifact) error {
|
||||
f, err := os.Open(file.Path())
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -162,7 +162,7 @@ func getListIndexFromCache() (*ListIndexFile, error) {
|
|||
|
||||
var (
|
||||
// listIndexUpdate must only be used by updateListIndex.
|
||||
listIndexUpdate *updates.File
|
||||
listIndexUpdate *updates.Artifact
|
||||
listIndexUpdateLock sync.Mutex
|
||||
)
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ func performUpdate(ctx context.Context) error {
|
|||
// First, update the list index.
|
||||
err := updateListIndex()
|
||||
if err != nil {
|
||||
log.Errorf("intel/filterlists: failed update list index: %s", err)
|
||||
log.Warningf("intel/filterlists: failed update list index: %s", err)
|
||||
}
|
||||
|
||||
upgradables, err := getUpgradableFiles()
|
||||
|
@ -83,7 +83,7 @@ func performUpdate(ctx context.Context) error {
|
|||
// perform the actual upgrade by processing each file
|
||||
// in the returned order.
|
||||
for idx, file := range upgradables {
|
||||
log.Debugf("intel/filterlists: applying update (%d) %s version %s", idx, file.Identifier(), file.Version())
|
||||
log.Debugf("intel/filterlists: applying update (%d) %s version %s", idx, file.Filename, file.Version)
|
||||
|
||||
if file == baseFile {
|
||||
if idx != 0 {
|
||||
|
@ -101,7 +101,7 @@ func performUpdate(ctx context.Context) error {
|
|||
}
|
||||
|
||||
if err := processListFile(ctx, filterToUpdate, file); err != nil {
|
||||
return fmt.Errorf("failed to process upgrade %s: %w", file.Identifier(), err)
|
||||
return fmt.Errorf("failed to process upgrade %s version %s: %w", file.Filename, file.Version, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -145,10 +145,10 @@ func performUpdate(ctx context.Context) error {
|
|||
|
||||
// try to save the highest version of our files.
|
||||
highestVersion := upgradables[len(upgradables)-1]
|
||||
if err := setCacheDatabaseVersion(highestVersion.Version()); err != nil {
|
||||
if err := setCacheDatabaseVersion(highestVersion.Version); err != nil {
|
||||
log.Errorf("intel/filterlists: failed to save cache database version: %s", err)
|
||||
} else {
|
||||
log.Infof("intel/filterlists: successfully migrated cache database to %s", highestVersion.Version())
|
||||
log.Infof("intel/filterlists: successfully migrated cache database to %s", highestVersion.Version)
|
||||
}
|
||||
|
||||
// The list update succeeded, resolve any states.
|
||||
|
@ -174,51 +174,51 @@ func removeAllObsoleteFilterEntries(wc *mgr.WorkerCtx) error {
|
|||
// getUpgradableFiles returns a slice of filterlists files
|
||||
// that should be updated. The files MUST be updated and
|
||||
// processed in the returned order!
|
||||
func getUpgradableFiles() ([]*updates.File, error) {
|
||||
var updateOrder []*updates.File
|
||||
func getUpgradableFiles() ([]*updates.Artifact, error) {
|
||||
var updateOrder []*updates.Artifact
|
||||
|
||||
// cacheDBInUse := isLoaded()
|
||||
cacheDBInUse := isLoaded()
|
||||
|
||||
// if baseFile == nil || !cacheDBInUse { // TODO(vladimir): || baseFile.UpgradeAvailable()
|
||||
// var err error
|
||||
// baseFile, err = module.instance.Updates().GetFile(baseListFilePath)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// log.Tracef("intel/filterlists: base file needs update, selected version %s", baseFile.Version())
|
||||
// updateOrder = append(updateOrder, baseFile)
|
||||
// }
|
||||
newBaseFile, err := module.instance.IntelUpdates().GetFile(baseListFilePath)
|
||||
if err != nil {
|
||||
log.Warningf("intel/filterlists: failed to get base update: %s", err)
|
||||
} else if newer, _ := newBaseFile.IsNewerThan(baseFile); newer || !cacheDBInUse {
|
||||
log.Tracef("intel/filterlists: base file needs update to version %s", newBaseFile.Version)
|
||||
if newBaseFile.SemVer() == nil {
|
||||
log.Warningf("intel/filterlists: base file needs update to version %s, but semver is invalid", newBaseFile.Version)
|
||||
} else {
|
||||
updateOrder = append(updateOrder, newBaseFile)
|
||||
}
|
||||
}
|
||||
|
||||
// if intermediateFile == nil || intermediateFile.UpgradeAvailable() || !cacheDBInUse {
|
||||
// var err error
|
||||
// intermediateFile, err = getFile(intermediateListFilePath)
|
||||
// if err != nil && !errors.Is(err, updater.ErrNotFound) {
|
||||
// return nil, err
|
||||
// }
|
||||
newIntermediateFile, err := module.instance.IntelUpdates().GetFile(intermediateListFilePath)
|
||||
if err != nil {
|
||||
log.Warningf("intel/filterlists: failed to get intermediate update: %s", err)
|
||||
} else if newer, _ := newIntermediateFile.IsNewerThan(intermediateFile); newer || !cacheDBInUse {
|
||||
log.Tracef("intel/filterlists: intermediate file needs update to version %s", newIntermediateFile.Version)
|
||||
if newIntermediateFile.SemVer() == nil {
|
||||
log.Warningf("intel/filterlists: intermediate file needs update to version %s, but semver is invalid", newIntermediateFile.Version)
|
||||
} else {
|
||||
updateOrder = append(updateOrder, newIntermediateFile)
|
||||
}
|
||||
}
|
||||
|
||||
// if err == nil {
|
||||
// log.Tracef("intel/filterlists: intermediate file needs update, selected version %s", intermediateFile.Version())
|
||||
// updateOrder = append(updateOrder, intermediateFile)
|
||||
// }
|
||||
// }
|
||||
|
||||
// if urgentFile == nil || urgentFile.UpgradeAvailable() || !cacheDBInUse {
|
||||
// var err error
|
||||
// urgentFile, err = getFile(urgentListFilePath)
|
||||
// if err != nil && !errors.Is(err, updater.ErrNotFound) {
|
||||
// return nil, err
|
||||
// }
|
||||
|
||||
// if err == nil {
|
||||
// log.Tracef("intel/filterlists: urgent file needs update, selected version %s", urgentFile.Version())
|
||||
// updateOrder = append(updateOrder, urgentFile)
|
||||
// }
|
||||
// }
|
||||
newUrgentFile, err := module.instance.IntelUpdates().GetFile(urgentListFilePath)
|
||||
if err != nil {
|
||||
log.Warningf("intel/filterlists: failed to get urgent update: %s", err)
|
||||
} else if newer, _ := newUrgentFile.IsNewerThan(urgentFile); newer || !cacheDBInUse {
|
||||
log.Tracef("intel/filterlists: urgent file needs update to version %s", newUrgentFile.Version)
|
||||
if newUrgentFile.SemVer() == nil {
|
||||
log.Warningf("intel/filterlists: urgent file needs update to version %s, but semver is invalid", newUrgentFile.Version)
|
||||
} else {
|
||||
updateOrder = append(updateOrder, newUrgentFile)
|
||||
}
|
||||
}
|
||||
|
||||
return resolveUpdateOrder(updateOrder)
|
||||
}
|
||||
|
||||
func resolveUpdateOrder(updateOrder []*updates.File) ([]*updates.File, error) {
|
||||
func resolveUpdateOrder(updateOrder []*updates.Artifact) ([]*updates.Artifact, error) {
|
||||
// sort the update order by ascending version
|
||||
sort.Sort(byAscVersion(updateOrder))
|
||||
log.Tracef("intel/filterlists: order of updates: %v", updateOrder)
|
||||
|
@ -239,9 +239,8 @@ func resolveUpdateOrder(updateOrder []*updates.File) ([]*updates.File, error) {
|
|||
|
||||
startAtIdx := -1
|
||||
for idx, file := range updateOrder {
|
||||
ver, _ := version.NewSemver(file.Version())
|
||||
log.Tracef("intel/filterlists: checking file with version %s against %s", ver, cacheDBVersion)
|
||||
if ver.GreaterThan(cacheDBVersion) && (startAtIdx == -1 || file == baseFile) {
|
||||
log.Tracef("intel/filterlists: checking file with version %s against %s", file.SemVer(), cacheDBVersion)
|
||||
if file.SemVer().GreaterThan(cacheDBVersion) && (startAtIdx == -1 || file == baseFile) {
|
||||
startAtIdx = idx
|
||||
}
|
||||
}
|
||||
|
@ -258,15 +257,12 @@ func resolveUpdateOrder(updateOrder []*updates.File) ([]*updates.File, error) {
|
|||
return updateOrder[startAtIdx:], nil
|
||||
}
|
||||
|
||||
type byAscVersion []*updates.File
|
||||
type byAscVersion []*updates.Artifact
|
||||
|
||||
func (fs byAscVersion) Len() int { return len(fs) }
|
||||
|
||||
func (fs byAscVersion) Less(i, j int) bool {
|
||||
vi, _ := version.NewSemver(fs[i].Version())
|
||||
vj, _ := version.NewSemver(fs[j].Version())
|
||||
|
||||
return vi.LessThan(vj)
|
||||
return fs[i].SemVer().LessThan(fs[j].SemVer())
|
||||
}
|
||||
|
||||
func (fs byAscVersion) Swap(i, j int) {
|
||||
|
|
|
@ -17,6 +17,12 @@ var worker *updateWorker
|
|||
func init() {
|
||||
worker = &updateWorker{
|
||||
trigger: make(chan struct{}),
|
||||
v4: updateBroadcaster{
|
||||
dbName: v4MMDBResource,
|
||||
},
|
||||
v6: updateBroadcaster{
|
||||
dbName: v6MMDBResource,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -27,26 +33,50 @@ const (
|
|||
|
||||
type geoIPDB struct {
|
||||
*maxminddb.Reader
|
||||
file *updates.File
|
||||
update *updates.Artifact
|
||||
}
|
||||
|
||||
// updateBroadcaster stores a geoIPDB and provides synchronized
|
||||
// access to the MMDB reader. It also supports broadcasting to
|
||||
// multiple waiters when a new database becomes available.
|
||||
type updateBroadcaster struct {
|
||||
rw sync.RWMutex
|
||||
db *geoIPDB
|
||||
rw sync.RWMutex
|
||||
db *geoIPDB
|
||||
dbName string
|
||||
|
||||
waiter chan struct{}
|
||||
}
|
||||
|
||||
// NeedsUpdate returns true if the current broadcaster needs a
|
||||
// database update.
|
||||
func (ub *updateBroadcaster) NeedsUpdate() bool {
|
||||
// AvailableUpdate returns a new update artifact if the current broadcaster
|
||||
// needs a database update.
|
||||
func (ub *updateBroadcaster) AvailableUpdate() *updates.Artifact {
|
||||
ub.rw.RLock()
|
||||
defer ub.rw.RUnlock()
|
||||
|
||||
return ub.db == nil // TODO(vladimir) is this needed: || ub.db.file.UpgradeAvailable()
|
||||
// Get artifact.
|
||||
artifact, err := module.instance.IntelUpdates().GetFile(ub.dbName)
|
||||
if err != nil {
|
||||
// Check if the geoip database is included in the binary index instead.
|
||||
// TODO: Remove when intelhub builds the geoip database.
|
||||
if artifact2, err2 := module.instance.BinaryUpdates().GetFile(ub.dbName); err2 == nil {
|
||||
artifact = artifact2
|
||||
err = nil
|
||||
} else {
|
||||
log.Warningf("geoip: failed to get geoip update: %s", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Return artifact if not yet initialized.
|
||||
if ub.db == nil {
|
||||
return artifact
|
||||
}
|
||||
|
||||
// Compare and return artifact only when confirmed newer.
|
||||
if newer, _ := artifact.IsNewerThan(ub.db.update); newer {
|
||||
return artifact
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReplaceDatabase replaces (or initially sets) the mmdb database.
|
||||
|
@ -153,16 +183,18 @@ func (upd *updateWorker) start() {
|
|||
|
||||
func (upd *updateWorker) run(ctx *mgr.WorkerCtx) error {
|
||||
for {
|
||||
if upd.v4.NeedsUpdate() {
|
||||
if v4, err := getGeoIPDB(v4MMDBResource); err == nil {
|
||||
update := upd.v4.AvailableUpdate()
|
||||
if update != nil {
|
||||
if v4, err := getGeoIPDB(update); err == nil {
|
||||
upd.v4.ReplaceDatabase(v4)
|
||||
} else {
|
||||
log.Warningf("geoip: failed to get v4 database: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if upd.v6.NeedsUpdate() {
|
||||
if v6, err := getGeoIPDB(v6MMDBResource); err == nil {
|
||||
update = upd.v6.AvailableUpdate()
|
||||
if update != nil {
|
||||
if v6, err := getGeoIPDB(update); err == nil {
|
||||
upd.v6.ReplaceDatabase(v6)
|
||||
} else {
|
||||
log.Warningf("geoip: failed to get v6 database: %s", err)
|
||||
|
@ -177,36 +209,17 @@ func (upd *updateWorker) run(ctx *mgr.WorkerCtx) error {
|
|||
}
|
||||
}
|
||||
|
||||
func getGeoIPDB(resource string) (*geoIPDB, error) {
|
||||
log.Debugf("geoip: opening database %s", resource)
|
||||
func getGeoIPDB(update *updates.Artifact) (*geoIPDB, error) {
|
||||
log.Debugf("geoip: opening database %s", update.Path())
|
||||
|
||||
file, err := open(resource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader, err := maxminddb.Open(file.Path())
|
||||
reader, err := maxminddb.Open(update.Path())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open: %w", err)
|
||||
}
|
||||
log.Debugf("geoip: successfully opened database %s", resource)
|
||||
log.Debugf("geoip: successfully opened database %s", update.Filename)
|
||||
|
||||
return &geoIPDB{
|
||||
Reader: reader,
|
||||
file: file,
|
||||
update: update,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func open(resource string) (*updates.File, error) {
|
||||
f, err := module.instance.IntelUpdates().GetFile(resource)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting file: %w", err)
|
||||
}
|
||||
|
||||
// unpacked, err := f.Unpack(".gz", updater.UnpackGZIP)
|
||||
// if err != nil {
|
||||
// return nil, "", fmt.Errorf("unpacking file: %w", err)
|
||||
// }
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
|
|
@ -66,5 +66,6 @@ func New(instance instance) (*GeoIP, error) {
|
|||
}
|
||||
|
||||
type instance interface {
|
||||
BinaryUpdates() *updates.Updater
|
||||
IntelUpdates() *updates.Updater
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
@ -17,7 +18,6 @@ import (
|
|||
"zombiezen.com/go/sqlite/sqlitex"
|
||||
|
||||
"github.com/safing/portmaster/base/config"
|
||||
"github.com/safing/portmaster/base/dataroot"
|
||||
"github.com/safing/portmaster/base/log"
|
||||
"github.com/safing/portmaster/service/netquery/orm"
|
||||
"github.com/safing/portmaster/service/network"
|
||||
|
@ -127,13 +127,13 @@ type (
|
|||
// Note that write connections are serialized by the Database object before being
|
||||
// handed over to SQLite.
|
||||
func New(dbPath string) (*Database, error) {
|
||||
historyParentDir := dataroot.Root().ChildDir("databases", 0o700)
|
||||
if err := historyParentDir.Ensure(); err != nil {
|
||||
historyParentDir := filepath.Join(module.instance.DataDir(), "databases")
|
||||
if err := os.MkdirAll(historyParentDir, 0o0700); err != nil {
|
||||
return nil, fmt.Errorf("failed to ensure database directory exists: %w", err)
|
||||
}
|
||||
|
||||
// Get file location of history database.
|
||||
historyFile := filepath.Join(historyParentDir.Path, "history.db")
|
||||
historyFile := filepath.Join(historyParentDir, "history.db")
|
||||
// Convert to SQLite URI path.
|
||||
historyURI := "file:///" + strings.TrimPrefix(filepath.ToSlash(historyFile), "/")
|
||||
|
||||
|
@ -225,13 +225,13 @@ func (db *Database) Close() error {
|
|||
|
||||
// VacuumHistory rewrites the history database in order to purge deleted records.
|
||||
func VacuumHistory(ctx context.Context) (err error) {
|
||||
historyParentDir := dataroot.Root().ChildDir("databases", 0o700)
|
||||
if err := historyParentDir.Ensure(); err != nil {
|
||||
historyParentDir := filepath.Join(module.instance.DataDir(), "databases")
|
||||
if err := os.MkdirAll(historyParentDir, 0o0700); err != nil {
|
||||
return fmt.Errorf("failed to ensure database directory exists: %w", err)
|
||||
}
|
||||
|
||||
// Get file location of history database.
|
||||
historyFile := filepath.Join(historyParentDir.Path, "history.db")
|
||||
historyFile := filepath.Join(historyParentDir, "history.db")
|
||||
// Convert to SQLite URI path.
|
||||
historyURI := "file:///" + strings.TrimPrefix(filepath.ToSlash(historyFile), "/")
|
||||
|
||||
|
|
|
@ -310,5 +310,6 @@ func NewModule(instance instance) (*NetQuery, error) {
|
|||
}
|
||||
|
||||
type instance interface {
|
||||
DataDir() string
|
||||
Profile() *profile.ProfileModule
|
||||
}
|
||||
|
|
|
@ -3,12 +3,13 @@ package profile
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/safing/portmaster/base/config"
|
||||
"github.com/safing/portmaster/base/database"
|
||||
"github.com/safing/portmaster/base/database/migration"
|
||||
"github.com/safing/portmaster/base/dataroot"
|
||||
"github.com/safing/portmaster/base/log"
|
||||
_ "github.com/safing/portmaster/service/core/base"
|
||||
"github.com/safing/portmaster/service/mgr"
|
||||
|
@ -65,11 +66,11 @@ func prep() error {
|
|||
}
|
||||
|
||||
// Setup icon storage location.
|
||||
iconsDir := dataroot.Root().ChildDir("databases", 0o0700).ChildDir("icons", 0o0700)
|
||||
if err := iconsDir.Ensure(); err != nil {
|
||||
iconsDir := filepath.Join(module.instance.DataDir(), "databases", "icons")
|
||||
if err := os.MkdirAll(iconsDir, 0o0700); err != nil {
|
||||
return fmt.Errorf("failed to create/check icons directory: %w", err)
|
||||
}
|
||||
binmeta.ProfileIconStoragePath = iconsDir.Path
|
||||
binmeta.ProfileIconStoragePath = iconsDir
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -151,5 +152,6 @@ func NewModule(instance instance) (*ProfileModule, error) {
|
|||
}
|
||||
|
||||
type instance interface {
|
||||
DataDir() string
|
||||
Config() *config.Config
|
||||
}
|
||||
|
|
|
@ -197,7 +197,7 @@ func (profile *Profile) parseConfig() error {
|
|||
if ok {
|
||||
profile.filterListIDs, err = filterlists.ResolveListIDs(list)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
log.Warningf("profiles: failed to resolve filter list IDs: %s", err)
|
||||
} else {
|
||||
profile.filterListsSet = true
|
||||
}
|
||||
|
|
|
@ -2,10 +2,11 @@ package ui
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/safing/portmaster/base/api"
|
||||
"github.com/safing/portmaster/base/dataroot"
|
||||
"github.com/safing/portmaster/base/log"
|
||||
"github.com/safing/portmaster/service/mgr"
|
||||
"github.com/safing/portmaster/service/updates"
|
||||
|
@ -28,7 +29,8 @@ func start() error {
|
|||
// may seem dangerous, but proper permission on the parent directory provide
|
||||
// (some) protection.
|
||||
// Processes must _never_ read from this directory.
|
||||
err := dataroot.Root().ChildDir("exec", 0o0777).Ensure()
|
||||
execDir := filepath.Join(module.instance.DataDir(), "exec")
|
||||
err := os.MkdirAll(execDir, 0o0777) //nolint:gosec // This is intentional.
|
||||
if err != nil {
|
||||
log.Warningf("ui: failed to create safe exec dir: %s", err)
|
||||
}
|
||||
|
@ -81,6 +83,7 @@ func New(instance instance) (*UI, error) {
|
|||
}
|
||||
|
||||
type instance interface {
|
||||
DataDir() string
|
||||
API() *api.API
|
||||
BinaryUpdates() *updates.Updater
|
||||
}
|
||||
|
|
106
service/updates.go
Normal file
106
service/updates.go
Normal file
|
@ -0,0 +1,106 @@
|
|||
package service
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
go_runtime "runtime"
|
||||
|
||||
"github.com/safing/jess"
|
||||
"github.com/safing/portmaster/service/updates"
|
||||
)
|
||||
|
||||
var (
|
||||
DefaultBinaryIndexURLs = []string{
|
||||
"https://updates.safing.io/stable.v3.json",
|
||||
}
|
||||
DefaultIntelIndexURLs = []string{
|
||||
"https://updates.safing.io/intel.v3.json",
|
||||
}
|
||||
|
||||
// BinarySigningKeys holds the signing keys in text format.
|
||||
BinarySigningKeys = []string{
|
||||
// Safing Code Signing Key #1
|
||||
"recipient:public-ed25519-key:safing-code-signing-key-1:92bgBLneQUWrhYLPpBDjqHbpFPuNVCPAaivQ951A4aq72HcTiw7R1QmPJwFM1mdePAvEVDjkeb8S4fp2pmRCsRa8HrCvWQEjd88rfZ6TznJMfY4g7P8ioGFjfpyx2ZJ8WCZJG5Qt4Z9nkabhxo2Nbi3iywBTYDLSbP5CXqi7jryW7BufWWuaRVufFFzhwUC2ryWFWMdkUmsAZcvXwde4KLN9FrkWAy61fGaJ8GCwGnGCSitANnU2cQrsGBXZzxmzxwrYD",
|
||||
// Safing Code Signing Key #2
|
||||
"recipient:public-ed25519-key:safing-code-signing-key-2:92bgBLneQUWrhYLPpBDjqHbPC2d1o5JMyZFdavWBNVtdvbPfzDewLW95ScXfYPHd3QvWHSWCtB4xpthaYWxSkK1kYiGp68DPa2HaU8yQ5dZhaAUuV4Kzv42pJcWkCeVnBYqgGBXobuz52rFqhDJy3rz7soXEmYhJEJWwLwMeioK3VzN3QmGSYXXjosHMMNC76rjufSoLNtUQUWZDSnHmqbuxbKMCCsjFXUGGhtZVyb7bnu7QLTLk6SKHBJDMB6zdL9sw3",
|
||||
}
|
||||
|
||||
// BinarySigningTrustStore is an in-memory trust store with the signing keys.
|
||||
BinarySigningTrustStore = jess.NewMemTrustStore()
|
||||
)
|
||||
|
||||
func init() {
|
||||
for _, signingKey := range BinarySigningKeys {
|
||||
rcpt, err := jess.RecipientFromTextFormat(signingKey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = BinarySigningTrustStore.StoreSignet(rcpt)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func MakeUpdateConfigs(svcCfg *ServiceConfig) (binaryUpdateConfig, intelUpdateConfig *updates.Config, err error) {
|
||||
switch go_runtime.GOOS {
|
||||
case "windows":
|
||||
binaryUpdateConfig = &updates.Config{
|
||||
Name: "binaries",
|
||||
Directory: svcCfg.BinDir,
|
||||
DownloadDirectory: filepath.Join(svcCfg.DataDir, "download_binaries"),
|
||||
PurgeDirectory: filepath.Join(svcCfg.BinDir, "upgrade_obsolete_binaries"),
|
||||
Ignore: []string{"databases", "intel", "config.json"},
|
||||
IndexURLs: svcCfg.BinariesIndexURLs,
|
||||
IndexFile: "index.json",
|
||||
Verify: svcCfg.VerifyBinaryUpdates,
|
||||
AutoDownload: false,
|
||||
AutoApply: false,
|
||||
NeedsRestart: true,
|
||||
Notify: true,
|
||||
}
|
||||
intelUpdateConfig = &updates.Config{
|
||||
Name: "intel",
|
||||
Directory: filepath.Join(svcCfg.DataDir, "intel"),
|
||||
DownloadDirectory: filepath.Join(svcCfg.DataDir, "download_intel"),
|
||||
PurgeDirectory: filepath.Join(svcCfg.DataDir, "upgrade_obsolete_intel"),
|
||||
IndexURLs: svcCfg.IntelIndexURLs,
|
||||
IndexFile: "index.json",
|
||||
Verify: svcCfg.VerifyIntelUpdates,
|
||||
AutoDownload: true,
|
||||
AutoApply: true,
|
||||
NeedsRestart: false,
|
||||
Notify: false,
|
||||
}
|
||||
|
||||
case "linux":
|
||||
binaryUpdateConfig = &updates.Config{
|
||||
Name: "binaries",
|
||||
Directory: svcCfg.BinDir,
|
||||
DownloadDirectory: filepath.Join(svcCfg.DataDir, "download_binaries"),
|
||||
PurgeDirectory: filepath.Join(svcCfg.DataDir, "upgrade_obsolete_binaries"),
|
||||
Ignore: []string{"databases", "intel", "config.json"},
|
||||
IndexURLs: svcCfg.BinariesIndexURLs,
|
||||
IndexFile: "index.json",
|
||||
Verify: svcCfg.VerifyBinaryUpdates,
|
||||
AutoDownload: false,
|
||||
AutoApply: false,
|
||||
NeedsRestart: true,
|
||||
Notify: true,
|
||||
}
|
||||
intelUpdateConfig = &updates.Config{
|
||||
Name: "intel",
|
||||
Directory: filepath.Join(svcCfg.DataDir, "intel"),
|
||||
DownloadDirectory: filepath.Join(svcCfg.DataDir, "download_intel"),
|
||||
PurgeDirectory: filepath.Join(svcCfg.DataDir, "upgrade_obsolete_intel"),
|
||||
IndexURLs: svcCfg.IntelIndexURLs,
|
||||
IndexFile: "index.json",
|
||||
Verify: svcCfg.VerifyIntelUpdates,
|
||||
AutoDownload: true,
|
||||
AutoApply: true,
|
||||
NeedsRestart: false,
|
||||
Notify: false,
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -55,7 +55,7 @@ func (d *Downloader) updateIndex(ctx context.Context) error {
|
|||
break
|
||||
}
|
||||
|
||||
log.Warningf("updates: failed to update index from %q: %s", url, err)
|
||||
log.Warningf("updates/%s: failed to update index from %q: %s", d.u.cfg.Name, url, err)
|
||||
err = fmt.Errorf("update index file from %q: %s", url, err)
|
||||
}
|
||||
if err != nil {
|
||||
|
@ -111,7 +111,7 @@ func (d *Downloader) gatherExistingFiles(dir string) error {
|
|||
// Read full file.
|
||||
fileData, err := os.ReadFile(fullpath)
|
||||
if err != nil {
|
||||
log.Debugf("updates: failed to read file %q while searching for existing files: %w", fullpath, err)
|
||||
log.Debugf("updates/%s: failed to read file %q while searching for existing files: %w", d.u.cfg.Name, fullpath, err)
|
||||
return fmt.Errorf("failed to read file %s: %w", fullpath, err)
|
||||
}
|
||||
|
||||
|
@ -150,7 +150,7 @@ artifacts:
|
|||
if err == nil {
|
||||
continue artifacts
|
||||
}
|
||||
log.Debugf("updates: failed to copy existing file %s: %w", artifact.Filename, err)
|
||||
log.Debugf("updates/%s: failed to copy existing file %s: %w", d.u.cfg.Name, artifact.Filename, err)
|
||||
}
|
||||
|
||||
// Try to download the artifact from one of the URLs.
|
||||
|
@ -182,7 +182,7 @@ artifacts:
|
|||
return fmt.Errorf("rename %s after write: %w", artifact.Filename, err)
|
||||
}
|
||||
|
||||
log.Infof("updates: downloaded and verified %s", artifact.Filename)
|
||||
log.Infof("updates/%s: downloaded and verified %s", d.u.cfg.Name, artifact.Filename)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -35,7 +35,8 @@ type Artifact struct {
|
|||
Unpack string `json:"Unpack,omitempty"`
|
||||
Version string `json:"Version,omitempty"`
|
||||
|
||||
localFile string
|
||||
localFile string
|
||||
versionNum *semver.Version
|
||||
}
|
||||
|
||||
// GetFileMode returns the required filesystem permission for the artifact.
|
||||
|
@ -52,6 +53,67 @@ func (a *Artifact) GetFileMode() os.FileMode {
|
|||
return defaultFileMode
|
||||
}
|
||||
|
||||
// Path returns the absolute path to the local file.
|
||||
func (a *Artifact) Path() string {
|
||||
return a.localFile
|
||||
}
|
||||
|
||||
// SemVer returns the version of the artifact.
|
||||
func (a *Artifact) SemVer() *semver.Version {
|
||||
return a.versionNum
|
||||
}
|
||||
|
||||
// IsNewerThan returns whether the artifact is newer than the given artifact.
|
||||
// Returns true if the given artifact is nil.
|
||||
// The second return value "ok" is false when version could not be compared.
|
||||
// In this case, it is up to the caller to decide how to proceed.
|
||||
func (a *Artifact) IsNewerThan(b *Artifact) (newer, ok bool) {
|
||||
switch {
|
||||
case a == nil:
|
||||
return false, false
|
||||
case b == nil:
|
||||
return true, true
|
||||
case a.versionNum == nil:
|
||||
return false, false
|
||||
case b.versionNum == nil:
|
||||
return false, false
|
||||
case a.versionNum.GreaterThan(b.versionNum):
|
||||
return true, true
|
||||
default:
|
||||
return false, true
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Artifact) export(dir string, indexVersion *semver.Version) *Artifact {
|
||||
copy := &Artifact{
|
||||
Filename: a.Filename,
|
||||
SHA256: a.SHA256,
|
||||
URLs: a.URLs,
|
||||
Platform: a.Platform,
|
||||
Unpack: a.Unpack,
|
||||
Version: a.Version,
|
||||
localFile: filepath.Join(dir, a.Filename),
|
||||
versionNum: a.versionNum,
|
||||
}
|
||||
|
||||
// Make sure we have a version number.
|
||||
switch {
|
||||
case copy.versionNum != nil:
|
||||
// Version already parsed.
|
||||
case copy.Version != "":
|
||||
// Need to parse version.
|
||||
v, err := semver.NewVersion(copy.Version)
|
||||
if err == nil {
|
||||
copy.versionNum = v
|
||||
}
|
||||
default:
|
||||
// No version defined, inherit index version.
|
||||
copy.versionNum = indexVersion
|
||||
}
|
||||
|
||||
return copy
|
||||
}
|
||||
|
||||
// Index represents a collection of artifacts with metadata.
|
||||
type Index struct {
|
||||
Name string `json:"Name"`
|
||||
|
@ -90,16 +152,26 @@ func ParseIndex(jsonContent []byte, trustStore jess.TrustStore) (*Index, error)
|
|||
return nil, fmt.Errorf("parse index: %w", err)
|
||||
}
|
||||
|
||||
// Initialize data.
|
||||
err = index.init()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &index, nil
|
||||
}
|
||||
|
||||
func (index *Index) init() error {
|
||||
// Parse version number, if set.
|
||||
if index.Version != "" {
|
||||
versionNum, err := semver.NewVersion(index.Version)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid index version %q: %w", index.Version, err)
|
||||
return fmt.Errorf("invalid index version %q: %w", index.Version, err)
|
||||
}
|
||||
index.versionNum = versionNum
|
||||
}
|
||||
|
||||
// Filter artifacts by currnet platform.
|
||||
// Filter artifacts by current platform.
|
||||
filtered := make([]Artifact, 0)
|
||||
for _, a := range index.Artifacts {
|
||||
if a.Platform == "" || a.Platform == currentPlatform {
|
||||
|
@ -108,7 +180,19 @@ func ParseIndex(jsonContent []byte, trustStore jess.TrustStore) (*Index, error)
|
|||
}
|
||||
index.Artifacts = filtered
|
||||
|
||||
return &index, nil
|
||||
// Parse artifact version numbers.
|
||||
for _, a := range index.Artifacts {
|
||||
if a.Version != "" {
|
||||
v, err := semver.NewVersion(a.Version)
|
||||
if err == nil {
|
||||
a.versionNum = v
|
||||
}
|
||||
} else {
|
||||
a.versionNum = index.versionNum
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CanDoUpgrades returns whether the index is able to follow a secure upgrade path.
|
||||
|
|
|
@ -45,6 +45,8 @@ var (
|
|||
|
||||
// Config holds the configuration for the updates module.
|
||||
type Config struct {
|
||||
// Name of the updater.
|
||||
Name string
|
||||
// Directory is the main directory where the currently to-be-used artifacts live.
|
||||
Directory string
|
||||
// DownloadDirectory is the directory where new artifacts are downloaded to and prepared for upgrading.
|
||||
|
@ -80,6 +82,8 @@ type Config struct {
|
|||
func (cfg *Config) Check() error {
|
||||
// Check if required fields are set.
|
||||
switch {
|
||||
case cfg.Name == "":
|
||||
return errors.New("name must be set")
|
||||
case cfg.Directory == "":
|
||||
return errors.New("directory must be set")
|
||||
case cfg.DownloadDirectory == "":
|
||||
|
@ -157,19 +161,22 @@ func New(instance instance, name string, cfg Config) (*Updater, error) {
|
|||
|
||||
// Load index.
|
||||
index, err := LoadIndex(filepath.Join(cfg.Directory, cfg.IndexFile), cfg.Verify)
|
||||
if err != nil {
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
log.Errorf("updates: invalid index file, falling back to dir scan: %w", err)
|
||||
}
|
||||
|
||||
// Fall back to scanning the directory.
|
||||
index, err = GenerateIndexFromDir(cfg.Directory, IndexScanConfig{Version: "0.0.0"})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("updates index load and dir scan failed: %w", err)
|
||||
}
|
||||
if err == nil {
|
||||
module.index = index
|
||||
return module, nil
|
||||
}
|
||||
module.index = index
|
||||
|
||||
// Fall back to scanning the directory.
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
log.Errorf("updates/%s: invalid index file, falling back to dir scan: %w", cfg.Name, err)
|
||||
}
|
||||
index, err = GenerateIndexFromDir(cfg.Directory, IndexScanConfig{Version: "0.0.0"})
|
||||
if err == nil && index.init() == nil {
|
||||
module.index = index
|
||||
return module, nil
|
||||
}
|
||||
|
||||
// Fall back to empty index.
|
||||
return module, nil
|
||||
}
|
||||
|
||||
|
@ -207,7 +214,7 @@ func (u *Updater) updateAndUpgrade(w *mgr.WorkerCtx, indexURLs []string, ignoreV
|
|||
u.indexLock.Unlock()
|
||||
// Check with local pointer to index.
|
||||
if err := index.ShouldUpgradeTo(downloader.index); err != nil {
|
||||
log.Infof("updates: no new or eligible update: %s", err)
|
||||
log.Infof("updates/%s: no new or eligible update: %s", u.cfg.Name, err)
|
||||
if u.cfg.Notify && u.instance.Notifications() != nil {
|
||||
u.instance.Notifications().NotifyInfo(
|
||||
noNewUpdateNotificationID,
|
||||
|
@ -247,12 +254,12 @@ func (u *Updater) updateAndUpgrade(w *mgr.WorkerCtx, indexURLs []string, ignoreV
|
|||
|
||||
// Download any remaining needed files.
|
||||
// If everything is already found in the download directory, then this is a no-op.
|
||||
log.Infof("updates: downloading new version: %s %s", downloader.index.Name, downloader.index.Version)
|
||||
log.Infof("updates/%s: downloading new version: %s %s", u.cfg.Name, downloader.index.Name, downloader.index.Version)
|
||||
err = downloader.downloadArtifacts(w.Ctx())
|
||||
if err != nil {
|
||||
log.Errorf("updates: failed to download update: %s", err)
|
||||
log.Errorf("updates/%s: failed to download update: %s", u.cfg.Name, err)
|
||||
if err := u.deleteUnfinishedFiles(u.cfg.DownloadDirectory); err != nil {
|
||||
log.Debugf("updates: failed to delete unfinished files in download directory %s", u.cfg.DownloadDirectory)
|
||||
log.Debugf("updates/%s: failed to delete unfinished files in download directory %s", u.cfg.Name, u.cfg.DownloadDirectory)
|
||||
}
|
||||
return fmt.Errorf("downloading failed: %w", err)
|
||||
}
|
||||
|
@ -282,7 +289,7 @@ func (u *Updater) updateAndUpgrade(w *mgr.WorkerCtx, indexURLs []string, ignoreV
|
|||
err = u.upgrade(downloader, ignoreVersion)
|
||||
if err != nil {
|
||||
if err := u.deleteUnfinishedFiles(u.cfg.PurgeDirectory); err != nil {
|
||||
log.Debugf("updates: failed to delete unfinished files in purge directory %s", u.cfg.PurgeDirectory)
|
||||
log.Debugf("updates/%s: failed to delete unfinished files in purge directory %s", u.cfg.Name, u.cfg.PurgeDirectory)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -334,6 +341,14 @@ func (u *Updater) upgradeWorker(w *mgr.WorkerCtx) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// ForceUpdate executes a forced update and upgrade directly and synchronously
|
||||
// and is intended to be used only within a tool, not a service.
|
||||
func (u *Updater) ForceUpdate() error {
|
||||
return u.m.Do("update and upgrade", func(w *mgr.WorkerCtx) error {
|
||||
return u.updateAndUpgrade(w, u.cfg.IndexURLs, true, true)
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateFromURL installs an update from the provided url.
|
||||
func (u *Updater) UpdateFromURL(url string) error {
|
||||
u.m.Go("custom update from url", func(w *mgr.WorkerCtx) error {
|
||||
|
@ -383,10 +398,15 @@ func (u *Updater) GetMainDir() string {
|
|||
}
|
||||
|
||||
// GetFile returns the path of a file given the name. Returns ErrNotFound if file is not found.
|
||||
func (u *Updater) GetFile(name string) (string, error) {
|
||||
func (u *Updater) GetFile(name string) (*Artifact, error) {
|
||||
u.indexLock.Lock()
|
||||
defer u.indexLock.Unlock()
|
||||
|
||||
// Check if any index is active.
|
||||
if u.index == nil {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
|
||||
for _, artifact := range u.index.Artifacts {
|
||||
switch {
|
||||
case artifact.Filename != name:
|
||||
|
@ -396,11 +416,11 @@ func (u *Updater) GetFile(name string) (string, error) {
|
|||
// Platforms are usually pre-filtered, but just to be sure.
|
||||
default:
|
||||
// Artifact matches!
|
||||
return filepath.Join(u.cfg.Directory, artifact.Filename), nil
|
||||
return artifact.export(u.cfg.Directory, u.index.versionNum), nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", ErrNotFound
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
|
||||
// Stop stops the module.
|
||||
|
|
|
@ -43,7 +43,7 @@ func (u *Updater) upgrade(downloader *Downloader, ignoreVersion bool) error {
|
|||
}
|
||||
|
||||
// Recovery failed too.
|
||||
return fmt.Errorf("upgrade (including recovery) failed: %s", upgradeError)
|
||||
return fmt.Errorf("upgrade (including recovery) failed: %s", u.cfg.Name, upgradeError)
|
||||
}
|
||||
|
||||
func (u *Updater) upgradeMoveFiles(downloader *Downloader, ignoreVersion bool) error {
|
||||
|
@ -60,7 +60,9 @@ func (u *Updater) upgradeMoveFiles(downloader *Downloader, ignoreVersion bool) e
|
|||
}
|
||||
|
||||
// Move current version files into purge folder.
|
||||
log.Debugf("updates: removing the old version (v%s from %s)", u.index.Version, u.index.Published)
|
||||
if u.index != nil {
|
||||
log.Debugf("updates/%s: removing the old version (v%s from %s)", u.cfg.Name, u.index.Version, u.index.Published)
|
||||
}
|
||||
files, err := os.ReadDir(u.cfg.Directory)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read current directory: %w", err)
|
||||
|
@ -74,17 +76,17 @@ func (u *Updater) upgradeMoveFiles(downloader *Downloader, ignoreVersion bool) e
|
|||
// Otherwise, move file to purge dir.
|
||||
src := filepath.Join(u.cfg.Directory, file.Name())
|
||||
dst := filepath.Join(u.cfg.PurgeDirectory, file.Name())
|
||||
err := moveFile(src, dst, "", file.Type().Perm())
|
||||
err := u.moveFile(src, dst, "", file.Type().Perm())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to move current file %s to purge dir: %w", file.Name(), err)
|
||||
}
|
||||
}
|
||||
|
||||
// Move the new index file into main directory.
|
||||
log.Debugf("updates: installing the new version (v%s from %s)", u.index.Version, u.index.Published)
|
||||
log.Debugf("updates/%s: installing the new version (v%s from %s)", u.cfg.Name, downloader.index.Version, downloader.index.Published)
|
||||
src := filepath.Join(u.cfg.DownloadDirectory, u.cfg.IndexFile)
|
||||
dst := filepath.Join(u.cfg.Directory, u.cfg.IndexFile)
|
||||
err = moveFile(src, dst, "", defaultFileMode)
|
||||
err = u.moveFile(src, dst, "", defaultFileMode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to move index file to %s: %w", dst, err)
|
||||
}
|
||||
|
@ -93,30 +95,30 @@ func (u *Updater) upgradeMoveFiles(downloader *Downloader, ignoreVersion bool) e
|
|||
for _, artifact := range downloader.index.Artifacts {
|
||||
src = filepath.Join(u.cfg.DownloadDirectory, artifact.Filename)
|
||||
dst = filepath.Join(u.cfg.Directory, artifact.Filename)
|
||||
err = moveFile(src, dst, artifact.SHA256, artifact.GetFileMode())
|
||||
err = u.moveFile(src, dst, artifact.SHA256, artifact.GetFileMode())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to move file %s: %w", artifact.Filename, err)
|
||||
} else {
|
||||
log.Debugf("updates: %s moved", artifact.Filename)
|
||||
log.Debugf("updates/%s: %s moved", u.cfg.Name, artifact.Filename)
|
||||
}
|
||||
}
|
||||
|
||||
// Set new index on module.
|
||||
u.index = downloader.index
|
||||
log.Infof("updates: update complete (v%s from %s)", u.index.Version, u.index.Published)
|
||||
log.Infof("updates/%s: update complete (v%s from %s)", u.cfg.Name, u.index.Version, u.index.Published)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// moveFile moves a file and falls back to copying if it fails.
|
||||
func moveFile(currentPath, newPath string, sha256sum string, fileMode fs.FileMode) error {
|
||||
func (u *Updater) moveFile(currentPath, newPath string, sha256sum string, fileMode fs.FileMode) error {
|
||||
// Try to simply move file.
|
||||
err := os.Rename(currentPath, newPath)
|
||||
if err == nil {
|
||||
// Moving was successful, return.
|
||||
return nil
|
||||
}
|
||||
log.Tracef("updates: failed to move to %q, falling back to copy+delete: %w", newPath, err)
|
||||
log.Tracef("updates/%s: failed to move to %q, falling back to copy+delete: %w", u.cfg.Name, newPath, err)
|
||||
|
||||
// Copy and check the checksum while we are at it.
|
||||
err = copyAndCheckSHA256Sum(currentPath, newPath, sha256sum, fileMode)
|
||||
|
@ -139,10 +141,10 @@ func (u *Updater) recoverFromFailedUpgrade() error {
|
|||
for _, file := range files {
|
||||
purgedFile := filepath.Join(u.cfg.PurgeDirectory, file.Name())
|
||||
activeFile := filepath.Join(u.cfg.Directory, file.Name())
|
||||
err := moveFile(purgedFile, activeFile, "", file.Type().Perm())
|
||||
err := u.moveFile(purgedFile, activeFile, "", file.Type().Perm())
|
||||
if err != nil {
|
||||
// Only warn and continue to recover as many files as possible.
|
||||
log.Warningf("updates: failed to roll back file %s: %w", file.Name(), err)
|
||||
log.Warningf("updates/%s: failed to roll back file %s: %w", u.cfg.Name, file.Name(), err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -176,18 +178,18 @@ func (u *Updater) deleteUnfinishedFiles(dir string) error {
|
|||
|
||||
case strings.HasSuffix(e.Name(), ".download"):
|
||||
path := filepath.Join(dir, e.Name())
|
||||
log.Warningf("updates: deleting unfinished download file: %s\n", path)
|
||||
log.Warningf("updates/%s: deleting unfinished download file: %s", u.cfg.Name, path)
|
||||
err := os.Remove(path)
|
||||
if err != nil {
|
||||
log.Errorf("updates: failed to delete unfinished download file %s: %s", path, err)
|
||||
log.Errorf("updates/%s: failed to delete unfinished download file %s: %s", u.cfg.Name, path, err)
|
||||
}
|
||||
|
||||
case strings.HasSuffix(e.Name(), ".copy"):
|
||||
path := filepath.Join(dir, e.Name())
|
||||
log.Warningf("updates: deleting unfinished copied file: %s\n", path)
|
||||
log.Warningf("updates/%s: deleting unfinished copied file: %s", u.cfg.Name, path)
|
||||
err := os.Remove(path)
|
||||
if err != nil {
|
||||
log.Errorf("updates: failed to delete unfinished copied file %s: %s", path, err)
|
||||
log.Errorf("updates/%s: failed to delete unfinished copied file %s: %s", u.cfg.Name, path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,8 +15,8 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
intelResource *updates.File
|
||||
intelResourcePath = "intel/spn/main-intel.yaml"
|
||||
intelResource *updates.Artifact
|
||||
intelResourceName = "main-intel.yaml"
|
||||
intelResourceMapName = "main"
|
||||
intelResourceUpdateLock sync.Mutex
|
||||
)
|
||||
|
@ -42,18 +42,21 @@ func updateSPNIntel(_ context.Context, _ interface{}) (err error) {
|
|||
return fmt.Errorf("intel resource not for map %q", conf.MainMapName)
|
||||
}
|
||||
|
||||
// Check if there is something to do.
|
||||
// TODO(vladimir): is update check needed
|
||||
if intelResource != nil { // && !intelResource.UpgradeAvailable() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get intel file and load it from disk.
|
||||
intelResource, err = module.instance.IntelUpdates().GetFile(intelResourcePath)
|
||||
// Get possibly updated file.
|
||||
file, err := module.instance.IntelUpdates().GetFile(intelResourceName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get SPN intel update: %w", err)
|
||||
}
|
||||
intelData, err := os.ReadFile(intelResource.Path())
|
||||
|
||||
// Check if file is newer.
|
||||
// Continue on check failure.
|
||||
newer, ok := file.IsNewerThan(intelResource)
|
||||
if ok && !newer {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load intel file from disk.
|
||||
intelData, err := os.ReadFile(file.Path())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load SPN intel update: %w", err)
|
||||
}
|
||||
|
@ -64,8 +67,15 @@ func updateSPNIntel(_ context.Context, _ interface{}) (err error) {
|
|||
return fmt.Errorf("failed to parse SPN intel update: %w", err)
|
||||
}
|
||||
|
||||
// Apply new intel.
|
||||
setVirtualNetworkConfig(intel.VirtualNetworks)
|
||||
return navigator.Main.UpdateIntel(intel, cfgOptionTrustNodeNodes())
|
||||
err = navigator.Main.UpdateIntel(intel, cfgOptionTrustNodeNodes())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update intel on map: %w", err)
|
||||
}
|
||||
|
||||
intelResource = file
|
||||
return nil
|
||||
}
|
||||
|
||||
func resetSPNIntel() {
|
||||
|
|
Loading…
Add table
Reference in a new issue