Replace dataroot module with BinDir and DataDir on instance, adapt modules

This commit is contained in:
Daniel 2024-11-06 10:48:02 +01:00
parent 0f3f3c360f
commit 7bc1c3b764
39 changed files with 819 additions and 482 deletions

View file

@ -6,30 +6,50 @@ import (
"testing" "testing"
) )
type testInstance struct{} type testInstance struct {
dataDir string
}
var _ instance = testInstance{} var _ instance = testInstance{}
func (stub testInstance) DataDir() string {
return stub.dataDir
}
func (stub testInstance) SetCmdLineOperation(f func() error) {} func (stub testInstance) SetCmdLineOperation(f func() error) {}
func runTest(m *testing.M) error { func newTestInstance(testName string) (*testInstance, error) {
ds, err := InitializeUnitTestDataroot("test-config") testDir, err := os.MkdirTemp("", fmt.Sprintf("portmaster-%s", testName))
if err != nil { if err != nil {
return fmt.Errorf("failed to initialize dataroot: %w", err) return nil, fmt.Errorf("failed to make tmp dir: %w", err)
}
defer func() { _ = os.RemoveAll(ds) }()
module, err = New(&testInstance{})
if err != nil {
return fmt.Errorf("failed to initialize module: %w", err)
} }
m.Run() return &testInstance{
return nil dataDir: testDir,
}, nil
} }
func TestMain(m *testing.M) { func TestConfigPersistence(t *testing.T) {
if err := runTest(m); err != nil { t.Parallel()
fmt.Printf("%s\n", err)
os.Exit(1) instance, err := newTestInstance("test-config")
if err != nil {
t.Fatalf("failed to create test instance: %s", err)
}
defer func() { _ = os.RemoveAll(instance.DataDir()) }()
module, err = New(instance)
if err != nil {
t.Fatalf("failed to initialize module: %s", err)
}
err = SaveConfig()
if err != nil {
t.Fatal(err)
}
err = loadConfig(true)
if err != nil {
t.Fatal(err)
} }
} }

View file

@ -10,8 +10,6 @@ import (
"path/filepath" "path/filepath"
"sort" "sort"
"github.com/safing/portmaster/base/dataroot"
"github.com/safing/portmaster/base/utils"
"github.com/safing/portmaster/base/utils/debug" "github.com/safing/portmaster/base/utils/debug"
"github.com/safing/portmaster/service/mgr" "github.com/safing/portmaster/service/mgr"
) )
@ -19,29 +17,13 @@ import (
// ChangeEvent is the name of the config change event. // ChangeEvent is the name of the config change event.
const ChangeEvent = "config change" const ChangeEvent = "config change"
var ( var exportConfig bool
dataRoot *utils.DirStructure
exportConfig bool
)
// SetDataRoot sets the data root from which the updates module derives its paths.
func SetDataRoot(root *utils.DirStructure) {
if dataRoot == nil {
dataRoot = root
}
}
func init() { func init() {
flag.BoolVar(&exportConfig, "export-config-options", false, "export configuration registry and exit") flag.BoolVar(&exportConfig, "export-config-options", false, "export configuration registry and exit")
} }
func prep() error { func prep() error {
SetDataRoot(dataroot.Root())
if dataRoot == nil {
return errors.New("data root is not set")
}
if exportConfig { if exportConfig {
module.instance.SetCmdLineOperation(exportConfigCmd) module.instance.SetCmdLineOperation(exportConfigCmd)
return mgr.ErrExecuteCmdLineOp return mgr.ErrExecuteCmdLineOp
@ -51,7 +33,7 @@ func prep() error {
} }
func start() error { func start() error {
configFilePath = filepath.Join(dataRoot.Path, "config.json") configFilePath = filepath.Join(module.instance.DataDir(), "config.json")
// Load log level from log package after it started. // Load log level from log package after it started.
err := loadLogLevel() err := loadLogLevel()
@ -136,20 +118,3 @@ func GetActiveConfigValues() map[string]interface{} {
return values return values
} }
// InitializeUnitTestDataroot initializes a new random tmp directory for running tests.
func InitializeUnitTestDataroot(testName string) (string, error) {
basePath, err := os.MkdirTemp("", fmt.Sprintf("portmaster-%s", testName))
if err != nil {
return "", fmt.Errorf("failed to make tmp dir: %w", err)
}
ds := utils.NewDirStructure(basePath, 0o0755)
SetDataRoot(ds)
err = dataroot.Initialize(basePath, 0o0755)
if err != nil {
return "", fmt.Errorf("failed to initialize dataroot: %w", err)
}
return basePath, nil
}

View file

@ -56,5 +56,6 @@ func New(instance instance) (*Config, error) {
} }
type instance interface { type instance interface {
DataDir() string
SetCmdLineOperation(f func() error) SetCmdLineOperation(f func() error)
} }

View file

@ -26,7 +26,7 @@ func TestMain(m *testing.M) {
panic(err) panic(err)
} }
err = InitializeWithPath(testDir) err = Initialize(testDir)
if err != nil { if err != nil {
panic(err) panic(err)
} }

View file

@ -2,11 +2,10 @@ package dbmodule
import ( import (
"errors" "errors"
"path/filepath"
"sync/atomic" "sync/atomic"
"github.com/safing/portmaster/base/database" "github.com/safing/portmaster/base/database"
"github.com/safing/portmaster/base/dataroot"
"github.com/safing/portmaster/base/utils"
"github.com/safing/portmaster/service/mgr" "github.com/safing/portmaster/service/mgr"
) )
@ -27,18 +26,18 @@ func (dbm *DBModule) Stop() error {
return stop() return stop()
} }
var databaseStructureRoot *utils.DirStructure var databasesRootDir string
// SetDatabaseLocation sets the location of the database for initialization. Supply either a path or dir structure. // SetDatabaseLocation sets the location of the database for initialization. Supply either a path or dir structure.
func SetDatabaseLocation(dirStructureRoot *utils.DirStructure) { func SetDatabaseLocation(dir string) {
if databaseStructureRoot == nil { if databasesRootDir == "" {
databaseStructureRoot = dirStructureRoot databasesRootDir = dir
} }
} }
func prep() error { func prep() error {
SetDatabaseLocation(dataroot.Root()) SetDatabaseLocation(filepath.Join(module.instance.DataDir(), "databases"))
if databaseStructureRoot == nil { if databasesRootDir == "" {
return errors.New("database location not specified") return errors.New("database location not specified")
} }
@ -64,16 +63,16 @@ func New(instance instance) (*DBModule, error) {
return nil, errors.New("only one instance allowed") return nil, errors.New("only one instance allowed")
} }
if err := prep(); err != nil {
return nil, err
}
m := mgr.New("DBModule") m := mgr.New("DBModule")
module = &DBModule{ module = &DBModule{
mgr: m, mgr: m,
instance: instance, instance: instance,
} }
if err := prep(); err != nil {
return nil, err
}
err := database.Initialize(databaseStructureRoot) err := database.Initialize(databasesRootDir)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -81,4 +80,6 @@ func New(instance instance) (*DBModule, error) {
return module, nil return module, nil
} }
type instance interface{} type instance interface {
DataDir() string
}

View file

@ -3,14 +3,10 @@ package database
import ( import (
"errors" "errors"
"fmt" "fmt"
"os"
"path/filepath"
"github.com/tevino/abool" "github.com/tevino/abool"
"github.com/safing/portmaster/base/utils"
)
const (
databasesSubDir = "databases"
) )
var ( var (
@ -19,25 +15,18 @@ var (
shuttingDown = abool.NewBool(false) shuttingDown = abool.NewBool(false)
shutdownSignal = make(chan struct{}) shutdownSignal = make(chan struct{})
rootStructure *utils.DirStructure rootDir string
databasesStructure *utils.DirStructure
) )
// InitializeWithPath initializes the database at the specified location using a path. // Initialize initializes the database at the specified location.
func InitializeWithPath(dirPath string) error { func Initialize(databasesRootDir string) error {
return Initialize(utils.NewDirStructure(dirPath, 0o0755))
}
// Initialize initializes the database at the specified location using a dir structure.
func Initialize(dirStructureRoot *utils.DirStructure) error {
if initialized.SetToIf(false, true) { if initialized.SetToIf(false, true) {
rootStructure = dirStructureRoot rootDir = databasesRootDir
// ensure root and databases dirs // Ensure database root dir exists.
databasesStructure = rootStructure.ChildDir(databasesSubDir, 0o0700) err := os.MkdirAll(rootDir, 0o0700)
err := databasesStructure.Ensure()
if err != nil { if err != nil {
return fmt.Errorf("could not create/open database directory (%s): %w", rootStructure.Path, err) return fmt.Errorf("could not create/open database directory (%s): %w", rootDir, err)
} }
return nil return nil
@ -67,11 +56,12 @@ func Shutdown() (err error) {
// getLocation returns the storage location for the given name and type. // getLocation returns the storage location for the given name and type.
func getLocation(name, storageType string) (string, error) { func getLocation(name, storageType string) (string, error) {
location := databasesStructure.ChildDir(name, 0o0700).ChildDir(storageType, 0o0700) location := filepath.Join(rootDir, name, storageType)
// check location
err := location.Ensure() // Make sure location exists.
err := os.MkdirAll(location, 0o0700)
if err != nil { if err != nil {
return "", fmt.Errorf(`failed to create/check database dir "%s": %w`, location.Path, err) return "", fmt.Errorf("failed to create/check database dir %q: %w", location, err)
} }
return location.Path, nil return location, nil
} }

View file

@ -1,25 +0,0 @@
package dataroot
import (
"errors"
"os"
"github.com/safing/portmaster/base/utils"
)
var root *utils.DirStructure
// Initialize initializes the data root directory.
func Initialize(rootDir string, perm os.FileMode) error {
if root != nil {
return errors.New("already initialized")
}
root = utils.NewDirStructure(rootDir, perm)
return root.Ensure()
}
// Root returns the data root directory.
func Root() *utils.DirStructure {
return root
}

View file

@ -10,7 +10,6 @@ import (
"github.com/shirou/gopsutil/mem" "github.com/shirou/gopsutil/mem"
"github.com/safing/portmaster/base/api" "github.com/safing/portmaster/base/api"
"github.com/safing/portmaster/base/dataroot"
"github.com/safing/portmaster/base/log" "github.com/safing/portmaster/base/log"
) )
@ -209,18 +208,9 @@ func getDiskStat() *disk.UsageStat {
return diskStat return diskStat
} }
// Check if we have a data root.
dataRoot := dataroot.Root()
if dataRoot == nil {
log.Warning("metrics: cannot get disk stats without data root")
diskStat = nil
diskStatExpires = time.Now().Add(hostStatTTL)
return diskStat
}
// Refresh. // Refresh.
var err error var err error
diskStat, err = disk.Usage(dataRoot.Path) diskStat, err = disk.Usage(module.instance.DataDir())
if err != nil { if err != nil {
log.Warningf("metrics: failed to get load avg: %s", err) log.Warningf("metrics: failed to get load avg: %s", err)
diskStat = nil diskStat = nil

View file

@ -213,4 +213,6 @@ func New(instance instance) (*Metrics, error) {
return module, nil return module, nil
} }
type instance interface{} type instance interface {
DataDir() string
}

View file

@ -1,47 +1,49 @@
package main package main
import ( import (
"bufio"
"errors"
"flag" "flag"
"fmt" "fmt"
"io"
"log/slog"
"os" "os"
"runtime" "runtime"
"runtime/pprof"
"syscall" "github.com/spf13/cobra"
"github.com/safing/portmaster/base/info" "github.com/safing/portmaster/base/info"
"github.com/safing/portmaster/base/metrics" "github.com/safing/portmaster/base/metrics"
"github.com/safing/portmaster/service" "github.com/safing/portmaster/service"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates" "github.com/safing/portmaster/service/updates"
"github.com/safing/portmaster/spn/conf"
) )
var ( var (
printStackOnExit bool rootCmd = &cobra.Command{
enableInputSignals bool Use: "portmaster-core",
PersistentPreRun: initializeGlobals,
Run: cmdRun,
}
sigUSR1 = syscall.Signal(0xa) // dummy for windows binDir string
dataDir string
svcCfg *service.ServiceConfig
) )
func init() { func init() {
flag.BoolVar(&printStackOnExit, "print-stack-on-exit", false, "prints the stack before of shutting down") // Add Go's default flag set.
flag.BoolVar(&enableInputSignals, "input-signals", false, "emulate signals using stdin") rootCmd.Flags().AddGoFlagSet(flag.CommandLine)
// Add persisent flags for all commands.
rootCmd.PersistentFlags().StringVar(&binDir, "bin-dir", "", "set directory for executable binaries (rw/ro)")
rootCmd.PersistentFlags().StringVar(&dataDir, "data-dir", "", "set directory for variable data (rw)")
} }
func main() { func main() {
flag.Parse() if err := rootCmd.Execute(); err != nil {
// Call platform specific checks, that will execute commands like "recover-iptables" fmt.Println(err)
platformSpecificChecks() os.Exit(1)
}
instance := initialize()
run(instance)
} }
func initialize() *service.Instance { func initializeGlobals(cmd *cobra.Command, args []string) {
// set information // set information
info.Set("Portmaster", "", "GPLv3") info.Set("Portmaster", "", "GPLv3")
@ -51,66 +53,13 @@ func initialize() *service.Instance {
// Configure user agent. // Configure user agent.
updates.UserAgent = fmt.Sprintf("Portmaster Core (%s %s)", runtime.GOOS, runtime.GOARCH) updates.UserAgent = fmt.Sprintf("Portmaster Core (%s %s)", runtime.GOOS, runtime.GOARCH)
// enable SPN client mode // Create service config.
conf.EnableClient(true) svcCfg = &service.ServiceConfig{
conf.EnableIntegration(true) BinDir: binDir,
DataDir: dataDir,
// Create instance. BinariesIndexURLs: service.DefaultBinaryIndexURLs,
var execCmdLine bool IntelIndexURLs: service.DefaultIntelIndexURLs,
instance, err := service.New(&service.ServiceConfig{}) VerifyBinaryUpdates: service.BinarySigningTrustStore,
switch { VerifyIntelUpdates: service.BinarySigningTrustStore,
case err == nil:
// Continue
case errors.Is(err, mgr.ErrExecuteCmdLineOp):
execCmdLine = true
default:
fmt.Printf("error creating an instance: %s\n", err)
os.Exit(2)
}
// Execute command line operation, if requested or available.
switch {
case !execCmdLine:
// Run service.
case instance.CommandLineOperation == nil:
fmt.Println("command line operation execution requested, but not set")
os.Exit(3)
default:
// Run the function and exit.
err = instance.CommandLineOperation()
if err != nil {
fmt.Fprintf(os.Stderr, "command line operation failed: %s\n", err)
os.Exit(3)
}
os.Exit(0)
}
return instance
}
func printStackTo(writer io.Writer, msg string) {
_, err := fmt.Fprintf(writer, "===== %s =====\n", msg)
if err == nil {
err = pprof.Lookup("goroutine").WriteTo(writer, 1)
}
if err != nil {
slog.Error("failed to write stack trace", "err", err)
}
}
func inputSignals(signalCh chan os.Signal) {
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
switch scanner.Text() {
case "SIGHUP":
signalCh <- syscall.SIGHUP
case "SIGINT":
signalCh <- syscall.SIGINT
case "SIGQUIT":
signalCh <- syscall.SIGQUIT
case "SIGTERM":
signalCh <- syscall.SIGTERM
case "SIGUSR1":
signalCh <- sigUSR1
}
} }
} }

108
cmds/portmaster-core/run.go Normal file
View file

@ -0,0 +1,108 @@
package main
import (
"bufio"
"errors"
"flag"
"fmt"
"io"
"log/slog"
"os"
"runtime/pprof"
"syscall"
"github.com/spf13/cobra"
"github.com/safing/portmaster/service"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/spn/conf"
)
var (
printStackOnExit bool
enableInputSignals bool
sigUSR1 = syscall.Signal(0xa) // dummy for windows
)
func init() {
flag.BoolVar(&printStackOnExit, "print-stack-on-exit", false, "prints the stack before of shutting down")
flag.BoolVar(&enableInputSignals, "input-signals", false, "emulate signals using stdin")
}
func cmdRun(cmd *cobra.Command, args []string) {
// Call platform specific checks, that will execute commands like "recover-iptables"
platformSpecificChecks()
svcCfg.VerifyBinaryUpdates = nil // FIXME
svcCfg.VerifyIntelUpdates = nil // FIXME
instance := createInstance()
run(instance)
}
func createInstance() *service.Instance {
// enable SPN client mode
conf.EnableClient(true)
conf.EnableIntegration(true)
// Create instance.
var execCmdLine bool
instance, err := service.New(svcCfg)
switch {
case err == nil:
// Continue
case errors.Is(err, mgr.ErrExecuteCmdLineOp):
execCmdLine = true
default:
fmt.Printf("error creating an instance: %s\n", err)
os.Exit(2)
}
// Execute module command line operation, if requested or available.
switch {
case !execCmdLine:
// Run service.
case instance.CommandLineOperation == nil:
fmt.Println("command line operation execution requested, but not set")
os.Exit(3)
default:
// Run the function and exit.
fmt.Println("executing cmdline op")
err = instance.CommandLineOperation()
if err != nil {
fmt.Fprintf(os.Stderr, "command line operation failed: %s\n", err)
os.Exit(3)
}
os.Exit(0)
}
return instance
}
func printStackTo(writer io.Writer, msg string) {
_, err := fmt.Fprintf(writer, "===== %s =====\n", msg)
if err == nil {
err = pprof.Lookup("goroutine").WriteTo(writer, 1)
}
if err != nil {
slog.Error("failed to write stack trace", "err", err)
}
}
func inputSignals(signalCh chan os.Signal) {
scanner := bufio.NewScanner(os.Stdin)
for scanner.Scan() {
switch scanner.Text() {
case "SIGHUP":
signalCh <- syscall.SIGHUP
case "SIGINT":
signalCh <- syscall.SIGINT
case "SIGQUIT":
signalCh <- syscall.SIGQUIT
case "SIGTERM":
signalCh <- syscall.SIGTERM
case "SIGUSR1":
signalCh <- sigUSR1
}
}
}

View file

@ -129,7 +129,7 @@ func isRunningAsService() bool {
// Get the current process ID // Get the current process ID
pid := os.Getpid() pid := os.Getpid()
currentProcess, err := processInfo.NewProcess(int32(pid)) currentProcess, err := processInfo.NewProcess(int32(pid)) //nolint:gosec
if err != nil { if err != nil {
return false return false
} }

View file

@ -0,0 +1,77 @@
package main
import (
"fmt"
"github.com/spf13/cobra"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/notifications"
"github.com/safing/portmaster/service"
"github.com/safing/portmaster/service/updates"
)
var updateCmd = &cobra.Command{
Use: "update",
Short: "Force an update of all components.",
RunE: update,
}
func init() {
rootCmd.AddCommand(updateCmd)
}
func update(cmd *cobra.Command, args []string) error {
// Finalize config.
svcCfg.VerifyBinaryUpdates = nil // FIXME
svcCfg.VerifyIntelUpdates = nil // FIXME
err := svcCfg.Init()
if err != nil {
return fmt.Errorf("internal configuration error: %w", err)
}
// Start logging.
log.SetLogLevel(log.InfoLevel)
_ = log.Start()
defer log.Shutdown()
// Create updaters.
instance := &updateDummyInstance{}
binaryUpdateConfig, intelUpdateConfig, err := service.MakeUpdateConfigs(svcCfg)
if err != nil {
return fmt.Errorf("init updater config: %w", err)
}
binaryUpdates, err := updates.New(instance, "Binary Updater", *binaryUpdateConfig)
if err != nil {
return fmt.Errorf("configure binary updates: %w", err)
}
intelUpdates, err := updates.New(instance, "Intel Updater", *intelUpdateConfig)
if err != nil {
return fmt.Errorf("configure intel updates: %w", err)
}
// Force update all.
binErr := binaryUpdates.ForceUpdate()
if binErr != nil {
log.Errorf("binary update failed: %s", binErr)
}
intelErr := intelUpdates.ForceUpdate()
if intelErr != nil {
log.Errorf("intel update failed: %s", intelErr)
}
// Return error.
if binErr != nil {
return fmt.Errorf("binary update failed: %w", binErr)
}
if intelErr != nil {
return fmt.Errorf("intel update failed: %w", intelErr)
}
return nil
}
type updateDummyInstance struct{}
func (udi *updateDummyInstance) Restart() {}
func (udi *updateDummyInstance) Shutdown() {}
func (udi *updateDummyInstance) Notifications() *notifications.Notifications { return nil }

View file

@ -9,7 +9,7 @@ import (
) )
var ( var (
bundleSettings = updates.IndexScanConfig{ scanConfig = updates.IndexScanConfig{
Name: "Portmaster Binaries", Name: "Portmaster Binaries",
PrimaryArtifact: "linux_amd64/portmaster-core", PrimaryArtifact: "linux_amd64/portmaster-core",
BaseURL: "https://updates.safing.io/", BaseURL: "https://updates.safing.io/",
@ -60,17 +60,17 @@ var (
RunE: scan, RunE: scan,
} }
bundleDir string scanDir string
) )
func init() { func init() {
rootCmd.AddCommand(scanCmd) rootCmd.AddCommand(scanCmd)
scanCmd.Flags().StringVarP(&bundleDir, "dir", "d", "", "directory to create index from (required)") scanCmd.Flags().StringVarP(&scanDir, "dir", "d", "", "directory to create index from (required)")
_ = scanCmd.MarkFlagRequired("dir") _ = scanCmd.MarkFlagRequired("dir")
} }
func scan(cmd *cobra.Command, args []string) error { func scan(cmd *cobra.Command, args []string) error {
bundle, err := updates.GenerateBundleFromDir(bundleDir, bundleSettings) bundle, err := updates.GenerateIndexFromDir(scanDir, scanConfig)
if err != nil { if err != nil {
return err return err
} }

View file

@ -4,6 +4,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"strconv" "strconv"
"strings"
"sync" "sync"
"time" "time"
@ -111,6 +112,9 @@ func (ii *InstallInfo) checkVersion() {
// MakeNumericVersion makes a numeric version with the first three version // MakeNumericVersion makes a numeric version with the first three version
// segment always using three digits. // segment always using three digits.
func MakeNumericVersion(version string) (numericVersion int64, err error) { func MakeNumericVersion(version string) (numericVersion int64, err error) {
// Remove any comments.
version = strings.SplitN(version, " ", 2)[0]
// Parse version string. // Parse version string.
ver, err := semver.NewVersion(version) ver, err := semver.NewVersion(version)
if err != nil { if err != nil {

View file

@ -1,3 +1,89 @@
package service package service
type ServiceConfig struct{} import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"github.com/safing/jess"
)
type ServiceConfig struct {
BinDir string
DataDir string
BinariesIndexURLs []string
IntelIndexURLs []string
VerifyBinaryUpdates jess.TrustStore
VerifyIntelUpdates jess.TrustStore
}
func (sc *ServiceConfig) Init() error {
// Check directories
switch runtime.GOOS {
case "windows":
if sc.BinDir == "" {
exeDir, err := getCurrentBinaryFolder() // Default: C:/Program Files/Portmaster
if err != nil {
return fmt.Errorf("derive bin dir from runnning exe: %w", err)
}
sc.BinDir = exeDir
}
if sc.DataDir == "" {
sc.DataDir = filepath.FromSlash("$ProgramData/Portmaster")
}
case "linux":
// Fall back to defaults.
if sc.BinDir == "" {
sc.BinDir = "/usr/lib/portmaster"
}
if sc.DataDir == "" {
sc.DataDir = "/var/lib/portmaster"
}
default:
// Fail if not configured on other platforms.
if sc.BinDir == "" {
return errors.New("binary directory must be configured - auto-detection not supported on this platform")
}
if sc.DataDir == "" {
return errors.New("binary directory must be configured - auto-detection not supported on this platform")
}
}
// Expand path variables.
sc.BinDir = os.ExpandEnv(sc.BinDir)
sc.DataDir = os.ExpandEnv(sc.DataDir)
// Apply defaults for required fields.
if len(sc.BinariesIndexURLs) == 0 {
sc.BinariesIndexURLs = DefaultBinaryIndexURLs
}
if len(sc.IntelIndexURLs) == 0 {
sc.IntelIndexURLs = DefaultIntelIndexURLs
}
return nil
}
func getCurrentBinaryFolder() (string, error) {
// Get the path of the currently running executable
exePath, err := os.Executable()
if err != nil {
return "", fmt.Errorf("failed to get executable path: %w", err)
}
// Get the absolute path
absPath, err := filepath.Abs(exePath)
if err != nil {
return "", fmt.Errorf("failed to get absolute path: %w", err)
}
// Get the directory of the executable
installDir := filepath.Dir(absPath)
return installDir, nil
}

View file

@ -6,7 +6,6 @@ import (
"fmt" "fmt"
"github.com/safing/portmaster/base/api" "github.com/safing/portmaster/base/api"
"github.com/safing/portmaster/base/dataroot"
"github.com/safing/portmaster/base/info" "github.com/safing/portmaster/base/info"
"github.com/safing/portmaster/service/mgr" "github.com/safing/portmaster/service/mgr"
) )
@ -15,14 +14,10 @@ import (
var ( var (
DefaultAPIListenAddress = "127.0.0.1:817" DefaultAPIListenAddress = "127.0.0.1:817"
dataDir string
databaseDir string
showVersion bool showVersion bool
) )
func init() { func init() {
flag.StringVar(&dataDir, "data", "", "set data directory")
flag.StringVar(&databaseDir, "db", "", "alias to --data (deprecated)")
flag.BoolVar(&showVersion, "version", false, "show version and exit") flag.BoolVar(&showVersion, "version", false, "show version and exit")
} }
@ -39,27 +34,6 @@ func prep(instance instance) error {
return mgr.ErrExecuteCmdLineOp return mgr.ErrExecuteCmdLineOp
} }
// check data root
if dataroot.Root() == nil {
// initialize data dir
// backwards compatibility
if dataDir == "" {
dataDir = databaseDir
}
// check data dir
if dataDir == "" {
return errors.New("please set the data directory using --data=/path/to/data/dir")
}
// initialize structure
err := dataroot.Initialize(dataDir, 0o0755)
if err != nil {
return err
}
}
// set api listen address // set api listen address
api.SetDefaultAPIListenAddress(DefaultAPIListenAddress) api.SetDefaultAPIListenAddress(DefaultAPIListenAddress)

View file

@ -7,7 +7,6 @@ import (
"strings" "strings"
"time" "time"
"github.com/safing/portmaster/base/dataroot"
"github.com/safing/portmaster/base/log" "github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/service/mgr" "github.com/safing/portmaster/service/mgr"
) )
@ -26,7 +25,7 @@ func logCleaner(_ *mgr.WorkerCtx) error {
ageThreshold := time.Now().Add(-logTTL) ageThreshold := time.Now().Add(-logTTL)
return filepath.Walk( return filepath.Walk(
filepath.Join(dataroot.Root().Path, logFileDir), filepath.Join(module.instance.DataDir(), logFileDir),
func(path string, info os.FileInfo, err error) error { func(path string, info os.FileInfo, err error) error {
if err != nil { if err != nil {
if !errors.Is(err, os.ErrNotExist) { if !errors.Is(err, os.ErrNotExist) {

View file

@ -58,5 +58,6 @@ func New(instance instance) (*Base, error) {
} }
type instance interface { type instance interface {
DataDir() string
SetCmdLineOperation(f func() error) SetCmdLineOperation(f func() error)
} }

View file

@ -11,9 +11,7 @@ import (
"time" "time"
"github.com/safing/portmaster/base/api" "github.com/safing/portmaster/base/api"
"github.com/safing/portmaster/base/dataroot"
"github.com/safing/portmaster/base/log" "github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/utils"
"github.com/safing/portmaster/service/netenv" "github.com/safing/portmaster/service/netenv"
"github.com/safing/portmaster/service/network/netutils" "github.com/safing/portmaster/service/network/netutils"
"github.com/safing/portmaster/service/network/packet" "github.com/safing/portmaster/service/network/packet"
@ -38,15 +36,12 @@ For production use please create an API key in the settings.`
) )
var ( var (
dataRoot *utils.DirStructure
apiPortSet bool apiPortSet bool
apiIP net.IP apiIP net.IP
apiPort uint16 apiPort uint16
) )
func prepAPIAuth() error { func prepAPIAuth() error {
dataRoot = dataroot.Root()
return api.SetAuthenticator(apiAuthenticator) return api.SetAuthenticator(apiAuthenticator)
} }
@ -132,7 +127,7 @@ func authenticateAPIRequest(ctx context.Context, pktInfo *packet.Info) (retry bo
var originalPid int var originalPid int
// Get authenticated path. // Get authenticated path.
authenticatedPath := module.instance.BinaryUpdates().GetRootPath() authenticatedPath := module.instance.BinaryUpdates().GetMainDir()
if authenticatedPath == "" { if authenticatedPath == "" {
return false, fmt.Errorf(deniedMsgMisconfigured, api.ErrAPIAccessDeniedMessage) //nolint:stylecheck // message for user return false, fmt.Errorf(deniedMsgMisconfigured, api.ErrAPIAccessDeniedMessage) //nolint:stylecheck // message for user
} }
@ -214,7 +209,7 @@ func authenticateAPIRequest(ctx context.Context, pktInfo *packet.Info) (retry bo
return false, fmt.Errorf(deniedMsgSystem, api.ErrAPIAccessDeniedMessage) //nolint:stylecheck // message for user return false, fmt.Errorf(deniedMsgSystem, api.ErrAPIAccessDeniedMessage) //nolint:stylecheck // message for user
default: // normal process default: // normal process
log.Tracer(ctx).Warningf("filter: denying api access to %s - also checked %s (trusted root is %s)", procsChecked[0], strings.Join(procsChecked[1:], " "), dataRoot.Path) log.Tracer(ctx).Warningf("filter: denying api access to %s - also checked %s (trusted root is %s)", procsChecked[0], strings.Join(procsChecked[1:], " "), module.instance.BinDir())
return false, fmt.Errorf( //nolint:stylecheck // message for user return false, fmt.Errorf( //nolint:stylecheck // message for user
deniedMsgUnauthorized, deniedMsgUnauthorized,
api.ErrAPIAccessDeniedMessage, api.ErrAPIAccessDeniedMessage,

View file

@ -160,6 +160,7 @@ func New(instance instance) (*Firewall, error) {
} }
type instance interface { type instance interface {
BinDir() string
Config() *config.Config Config() *config.Config
BinaryUpdates() *updates.Updater BinaryUpdates() *updates.Updater
Profile() *profile.ProfileModule Profile() *profile.ProfileModule

View file

@ -4,8 +4,6 @@ import (
"context" "context"
"fmt" "fmt"
"os" "os"
"path/filepath"
go_runtime "runtime"
"sync/atomic" "sync/atomic"
"time" "time"
@ -55,6 +53,9 @@ type Instance struct {
cancelCtx context.CancelFunc cancelCtx context.CancelFunc
serviceGroup *mgr.Group serviceGroup *mgr.Group
binDir string
dataDir string
exitCode atomic.Int32 exitCode atomic.Int32
database *dbmodule.DBModule database *dbmodule.DBModule
@ -105,83 +106,27 @@ type Instance struct {
ShouldRestart bool ShouldRestart bool
} }
func getCurrentBinaryFolder() (string, error) {
// Get the path of the currently running executable
exePath, err := os.Executable()
if err != nil {
return "", fmt.Errorf("failed to get executable path: %w", err)
}
// Get the absolute path
absPath, err := filepath.Abs(exePath)
if err != nil {
return "", fmt.Errorf("failed to get absolute path: %w", err)
}
// Get the directory of the executable
installDir := filepath.Dir(absPath)
return installDir, nil
}
// New returns a new Portmaster service instance. // New returns a new Portmaster service instance.
func New(svcCfg *ServiceConfig) (*Instance, error) { //nolint:maintidx func New(svcCfg *ServiceConfig) (*Instance, error) { //nolint:maintidx
var binaryUpdateIndex updates.Config // Initialize config.
var intelUpdateIndex updates.Config err := svcCfg.Init()
if go_runtime.GOOS == "windows" { if err != nil {
binaryFolder, err := getCurrentBinaryFolder() return nil, fmt.Errorf("internal service config error: %w", err)
if err != nil { }
return nil, err
}
binaryUpdateIndex = updates.Config{
Directory: binaryFolder, // Default: C:/Program Files/Portmaster
DownloadDirectory: os.ExpandEnv("$ProgramData/Portmaster/new_binary"),
PurgeDirectory: filepath.Join(binaryFolder, "old_binary"), // Default: C:/Program Files/Portmaster/old_binary
Ignore: []string{"databases", "intel", "config.json"},
IndexURLs: []string{"http://192.168.88.11:8000/test-binary.json"},
IndexFile: "bin-index.json",
AutoApply: false,
NeedsRestart: true,
}
intelUpdateIndex = updates.Config{ // Make sure data dir exists, so that child directories don't dictate the permissions.
Directory: os.ExpandEnv("$ProgramData/Portmaster/intel"), err = os.MkdirAll(svcCfg.DataDir, 0o0755)
DownloadDirectory: os.ExpandEnv("$ProgramData/Portmaster/new_intel"), if err != nil {
PurgeDirectory: os.ExpandEnv("$ProgramData/Portmaster/old_intel"), return nil, fmt.Errorf("data directory %s is not accessible: %w", svcCfg.DataDir, err)
IndexURLs: []string{"http://192.168.88.11:8000/test-intel.json"},
IndexFile: "intel-index.json",
AutoApply: true,
NeedsRestart: false,
}
} else if go_runtime.GOOS == "linux" {
binaryUpdateIndex = updates.Config{
Directory: "/usr/lib/portmaster",
DownloadDirectory: "/var/lib/portmaster/new_bin",
PurgeDirectory: "/var/lib/portmaster/old_bin",
Ignore: []string{"databases", "intel", "config.json"},
IndexURLs: []string{"http://localhost:8000/test-binary.json"},
IndexFile: "bin-index.json",
AutoApply: false,
NeedsRestart: true,
}
intelUpdateIndex = updates.Config{
Directory: "/var/lib/portmaster/intel",
DownloadDirectory: "/var/lib/portmaster/new_intel",
PurgeDirectory: "/var/lib/portmaster/intel_bin",
IndexURLs: []string{"http://localhost:8000/test-intel.json"},
IndexFile: "intel-index.json",
AutoApply: true,
NeedsRestart: false,
}
} }
// Create instance to pass it to modules. // Create instance to pass it to modules.
instance := &Instance{} instance := &Instance{
binDir: svcCfg.BinDir,
dataDir: svcCfg.DataDir,
}
instance.ctx, instance.cancelCtx = context.WithCancel(context.Background()) instance.ctx, instance.cancelCtx = context.WithCancel(context.Background())
var err error
// Base modules // Base modules
instance.base, err = base.New(instance) instance.base, err = base.New(instance)
if err != nil { if err != nil {
@ -221,11 +166,15 @@ func New(svcCfg *ServiceConfig) (*Instance, error) { //nolint:maintidx
if err != nil { if err != nil {
return instance, fmt.Errorf("create core module: %w", err) return instance, fmt.Errorf("create core module: %w", err)
} }
instance.binaryUpdates, err = updates.New(instance, "Binary Updater", binaryUpdateIndex) binaryUpdateConfig, intelUpdateConfig, err := MakeUpdateConfigs(svcCfg)
if err != nil {
return instance, fmt.Errorf("create updates config: %w", err)
}
instance.binaryUpdates, err = updates.New(instance, "Binary Updater", *binaryUpdateConfig)
if err != nil { if err != nil {
return instance, fmt.Errorf("create updates module: %w", err) return instance, fmt.Errorf("create updates module: %w", err)
} }
instance.intelUpdates, err = updates.New(instance, "Intel Updater", intelUpdateIndex) instance.intelUpdates, err = updates.New(instance, "Intel Updater", *intelUpdateConfig)
if err != nil { if err != nil {
return instance, fmt.Errorf("create updates module: %w", err) return instance, fmt.Errorf("create updates module: %w", err)
} }
@ -413,6 +362,18 @@ func (i *Instance) SetSleep(enabled bool) {
} }
} }
// BinDir returns the directory for binaries.
// This directory may be read-only.
func (i *Instance) BinDir() string {
return i.binDir
}
// DataDir returns the directory for variable data.
// This directory is expected to be read/writeable.
func (i *Instance) DataDir() string {
return i.dataDir
}
// Database returns the database module. // Database returns the database module.
func (i *Instance) Database() *dbmodule.DBModule { func (i *Instance) Database() *dbmodule.DBModule {
return i.database return i.database

View file

@ -39,9 +39,9 @@ var (
filterListLock sync.RWMutex filterListLock sync.RWMutex
// Updater files for tracking upgrades. // Updater files for tracking upgrades.
baseFile *updates.File baseFile *updates.Artifact
intermediateFile *updates.File intermediateFile *updates.Artifact
urgentFile *updates.File urgentFile *updates.Artifact
filterListsLoaded chan struct{} filterListsLoaded chan struct{}
) )
@ -77,7 +77,7 @@ func isLoaded() bool {
// processListFile opens the latest version of file and decodes it's DSDL // processListFile opens the latest version of file and decodes it's DSDL
// content. It calls processEntry for each decoded filterlists entry. // content. It calls processEntry for each decoded filterlists entry.
func processListFile(ctx context.Context, filter *scopedBloom, file *updates.File) error { func processListFile(ctx context.Context, filter *scopedBloom, file *updates.Artifact) error {
f, err := os.Open(file.Path()) f, err := os.Open(file.Path())
if err != nil { if err != nil {
return err return err

View file

@ -162,7 +162,7 @@ func getListIndexFromCache() (*ListIndexFile, error) {
var ( var (
// listIndexUpdate must only be used by updateListIndex. // listIndexUpdate must only be used by updateListIndex.
listIndexUpdate *updates.File listIndexUpdate *updates.Artifact
listIndexUpdateLock sync.Mutex listIndexUpdateLock sync.Mutex
) )

View file

@ -63,7 +63,7 @@ func performUpdate(ctx context.Context) error {
// First, update the list index. // First, update the list index.
err := updateListIndex() err := updateListIndex()
if err != nil { if err != nil {
log.Errorf("intel/filterlists: failed update list index: %s", err) log.Warningf("intel/filterlists: failed update list index: %s", err)
} }
upgradables, err := getUpgradableFiles() upgradables, err := getUpgradableFiles()
@ -83,7 +83,7 @@ func performUpdate(ctx context.Context) error {
// perform the actual upgrade by processing each file // perform the actual upgrade by processing each file
// in the returned order. // in the returned order.
for idx, file := range upgradables { for idx, file := range upgradables {
log.Debugf("intel/filterlists: applying update (%d) %s version %s", idx, file.Identifier(), file.Version()) log.Debugf("intel/filterlists: applying update (%d) %s version %s", idx, file.Filename, file.Version)
if file == baseFile { if file == baseFile {
if idx != 0 { if idx != 0 {
@ -101,7 +101,7 @@ func performUpdate(ctx context.Context) error {
} }
if err := processListFile(ctx, filterToUpdate, file); err != nil { if err := processListFile(ctx, filterToUpdate, file); err != nil {
return fmt.Errorf("failed to process upgrade %s: %w", file.Identifier(), err) return fmt.Errorf("failed to process upgrade %s version %s: %w", file.Filename, file.Version, err)
} }
} }
@ -145,10 +145,10 @@ func performUpdate(ctx context.Context) error {
// try to save the highest version of our files. // try to save the highest version of our files.
highestVersion := upgradables[len(upgradables)-1] highestVersion := upgradables[len(upgradables)-1]
if err := setCacheDatabaseVersion(highestVersion.Version()); err != nil { if err := setCacheDatabaseVersion(highestVersion.Version); err != nil {
log.Errorf("intel/filterlists: failed to save cache database version: %s", err) log.Errorf("intel/filterlists: failed to save cache database version: %s", err)
} else { } else {
log.Infof("intel/filterlists: successfully migrated cache database to %s", highestVersion.Version()) log.Infof("intel/filterlists: successfully migrated cache database to %s", highestVersion.Version)
} }
// The list update succeeded, resolve any states. // The list update succeeded, resolve any states.
@ -174,51 +174,51 @@ func removeAllObsoleteFilterEntries(wc *mgr.WorkerCtx) error {
// getUpgradableFiles returns a slice of filterlists files // getUpgradableFiles returns a slice of filterlists files
// that should be updated. The files MUST be updated and // that should be updated. The files MUST be updated and
// processed in the returned order! // processed in the returned order!
func getUpgradableFiles() ([]*updates.File, error) { func getUpgradableFiles() ([]*updates.Artifact, error) {
var updateOrder []*updates.File var updateOrder []*updates.Artifact
// cacheDBInUse := isLoaded() cacheDBInUse := isLoaded()
// if baseFile == nil || !cacheDBInUse { // TODO(vladimir): || baseFile.UpgradeAvailable() newBaseFile, err := module.instance.IntelUpdates().GetFile(baseListFilePath)
// var err error if err != nil {
// baseFile, err = module.instance.Updates().GetFile(baseListFilePath) log.Warningf("intel/filterlists: failed to get base update: %s", err)
// if err != nil { } else if newer, _ := newBaseFile.IsNewerThan(baseFile); newer || !cacheDBInUse {
// return nil, err log.Tracef("intel/filterlists: base file needs update to version %s", newBaseFile.Version)
// } if newBaseFile.SemVer() == nil {
// log.Tracef("intel/filterlists: base file needs update, selected version %s", baseFile.Version()) log.Warningf("intel/filterlists: base file needs update to version %s, but semver is invalid", newBaseFile.Version)
// updateOrder = append(updateOrder, baseFile) } else {
// } updateOrder = append(updateOrder, newBaseFile)
}
}
// if intermediateFile == nil || intermediateFile.UpgradeAvailable() || !cacheDBInUse { newIntermediateFile, err := module.instance.IntelUpdates().GetFile(intermediateListFilePath)
// var err error if err != nil {
// intermediateFile, err = getFile(intermediateListFilePath) log.Warningf("intel/filterlists: failed to get intermediate update: %s", err)
// if err != nil && !errors.Is(err, updater.ErrNotFound) { } else if newer, _ := newIntermediateFile.IsNewerThan(intermediateFile); newer || !cacheDBInUse {
// return nil, err log.Tracef("intel/filterlists: intermediate file needs update to version %s", newIntermediateFile.Version)
// } if newIntermediateFile.SemVer() == nil {
log.Warningf("intel/filterlists: intermediate file needs update to version %s, but semver is invalid", newIntermediateFile.Version)
} else {
updateOrder = append(updateOrder, newIntermediateFile)
}
}
// if err == nil { newUrgentFile, err := module.instance.IntelUpdates().GetFile(urgentListFilePath)
// log.Tracef("intel/filterlists: intermediate file needs update, selected version %s", intermediateFile.Version()) if err != nil {
// updateOrder = append(updateOrder, intermediateFile) log.Warningf("intel/filterlists: failed to get urgent update: %s", err)
// } } else if newer, _ := newUrgentFile.IsNewerThan(urgentFile); newer || !cacheDBInUse {
// } log.Tracef("intel/filterlists: urgent file needs update to version %s", newUrgentFile.Version)
if newUrgentFile.SemVer() == nil {
// if urgentFile == nil || urgentFile.UpgradeAvailable() || !cacheDBInUse { log.Warningf("intel/filterlists: urgent file needs update to version %s, but semver is invalid", newUrgentFile.Version)
// var err error } else {
// urgentFile, err = getFile(urgentListFilePath) updateOrder = append(updateOrder, newUrgentFile)
// if err != nil && !errors.Is(err, updater.ErrNotFound) { }
// return nil, err }
// }
// if err == nil {
// log.Tracef("intel/filterlists: urgent file needs update, selected version %s", urgentFile.Version())
// updateOrder = append(updateOrder, urgentFile)
// }
// }
return resolveUpdateOrder(updateOrder) return resolveUpdateOrder(updateOrder)
} }
func resolveUpdateOrder(updateOrder []*updates.File) ([]*updates.File, error) { func resolveUpdateOrder(updateOrder []*updates.Artifact) ([]*updates.Artifact, error) {
// sort the update order by ascending version // sort the update order by ascending version
sort.Sort(byAscVersion(updateOrder)) sort.Sort(byAscVersion(updateOrder))
log.Tracef("intel/filterlists: order of updates: %v", updateOrder) log.Tracef("intel/filterlists: order of updates: %v", updateOrder)
@ -239,9 +239,8 @@ func resolveUpdateOrder(updateOrder []*updates.File) ([]*updates.File, error) {
startAtIdx := -1 startAtIdx := -1
for idx, file := range updateOrder { for idx, file := range updateOrder {
ver, _ := version.NewSemver(file.Version()) log.Tracef("intel/filterlists: checking file with version %s against %s", file.SemVer(), cacheDBVersion)
log.Tracef("intel/filterlists: checking file with version %s against %s", ver, cacheDBVersion) if file.SemVer().GreaterThan(cacheDBVersion) && (startAtIdx == -1 || file == baseFile) {
if ver.GreaterThan(cacheDBVersion) && (startAtIdx == -1 || file == baseFile) {
startAtIdx = idx startAtIdx = idx
} }
} }
@ -258,15 +257,12 @@ func resolveUpdateOrder(updateOrder []*updates.File) ([]*updates.File, error) {
return updateOrder[startAtIdx:], nil return updateOrder[startAtIdx:], nil
} }
type byAscVersion []*updates.File type byAscVersion []*updates.Artifact
func (fs byAscVersion) Len() int { return len(fs) } func (fs byAscVersion) Len() int { return len(fs) }
func (fs byAscVersion) Less(i, j int) bool { func (fs byAscVersion) Less(i, j int) bool {
vi, _ := version.NewSemver(fs[i].Version()) return fs[i].SemVer().LessThan(fs[j].SemVer())
vj, _ := version.NewSemver(fs[j].Version())
return vi.LessThan(vj)
} }
func (fs byAscVersion) Swap(i, j int) { func (fs byAscVersion) Swap(i, j int) {

View file

@ -17,6 +17,12 @@ var worker *updateWorker
func init() { func init() {
worker = &updateWorker{ worker = &updateWorker{
trigger: make(chan struct{}), trigger: make(chan struct{}),
v4: updateBroadcaster{
dbName: v4MMDBResource,
},
v6: updateBroadcaster{
dbName: v6MMDBResource,
},
} }
} }
@ -27,26 +33,50 @@ const (
type geoIPDB struct { type geoIPDB struct {
*maxminddb.Reader *maxminddb.Reader
file *updates.File update *updates.Artifact
} }
// updateBroadcaster stores a geoIPDB and provides synchronized // updateBroadcaster stores a geoIPDB and provides synchronized
// access to the MMDB reader. It also supports broadcasting to // access to the MMDB reader. It also supports broadcasting to
// multiple waiters when a new database becomes available. // multiple waiters when a new database becomes available.
type updateBroadcaster struct { type updateBroadcaster struct {
rw sync.RWMutex rw sync.RWMutex
db *geoIPDB db *geoIPDB
dbName string
waiter chan struct{} waiter chan struct{}
} }
// NeedsUpdate returns true if the current broadcaster needs a // AvailableUpdate returns a new update artifact if the current broadcaster
// database update. // needs a database update.
func (ub *updateBroadcaster) NeedsUpdate() bool { func (ub *updateBroadcaster) AvailableUpdate() *updates.Artifact {
ub.rw.RLock() ub.rw.RLock()
defer ub.rw.RUnlock() defer ub.rw.RUnlock()
return ub.db == nil // TODO(vladimir) is this needed: || ub.db.file.UpgradeAvailable() // Get artifact.
artifact, err := module.instance.IntelUpdates().GetFile(ub.dbName)
if err != nil {
// Check if the geoip database is included in the binary index instead.
// TODO: Remove when intelhub builds the geoip database.
if artifact2, err2 := module.instance.BinaryUpdates().GetFile(ub.dbName); err2 == nil {
artifact = artifact2
err = nil
} else {
log.Warningf("geoip: failed to get geoip update: %s", err)
return nil
}
}
// Return artifact if not yet initialized.
if ub.db == nil {
return artifact
}
// Compare and return artifact only when confirmed newer.
if newer, _ := artifact.IsNewerThan(ub.db.update); newer {
return artifact
}
return nil
} }
// ReplaceDatabase replaces (or initially sets) the mmdb database. // ReplaceDatabase replaces (or initially sets) the mmdb database.
@ -153,16 +183,18 @@ func (upd *updateWorker) start() {
func (upd *updateWorker) run(ctx *mgr.WorkerCtx) error { func (upd *updateWorker) run(ctx *mgr.WorkerCtx) error {
for { for {
if upd.v4.NeedsUpdate() { update := upd.v4.AvailableUpdate()
if v4, err := getGeoIPDB(v4MMDBResource); err == nil { if update != nil {
if v4, err := getGeoIPDB(update); err == nil {
upd.v4.ReplaceDatabase(v4) upd.v4.ReplaceDatabase(v4)
} else { } else {
log.Warningf("geoip: failed to get v4 database: %s", err) log.Warningf("geoip: failed to get v4 database: %s", err)
} }
} }
if upd.v6.NeedsUpdate() { update = upd.v6.AvailableUpdate()
if v6, err := getGeoIPDB(v6MMDBResource); err == nil { if update != nil {
if v6, err := getGeoIPDB(update); err == nil {
upd.v6.ReplaceDatabase(v6) upd.v6.ReplaceDatabase(v6)
} else { } else {
log.Warningf("geoip: failed to get v6 database: %s", err) log.Warningf("geoip: failed to get v6 database: %s", err)
@ -177,36 +209,17 @@ func (upd *updateWorker) run(ctx *mgr.WorkerCtx) error {
} }
} }
func getGeoIPDB(resource string) (*geoIPDB, error) { func getGeoIPDB(update *updates.Artifact) (*geoIPDB, error) {
log.Debugf("geoip: opening database %s", resource) log.Debugf("geoip: opening database %s", update.Path())
file, err := open(resource) reader, err := maxminddb.Open(update.Path())
if err != nil {
return nil, err
}
reader, err := maxminddb.Open(file.Path())
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to open: %w", err) return nil, fmt.Errorf("failed to open: %w", err)
} }
log.Debugf("geoip: successfully opened database %s", resource) log.Debugf("geoip: successfully opened database %s", update.Filename)
return &geoIPDB{ return &geoIPDB{
Reader: reader, Reader: reader,
file: file, update: update,
}, nil }, nil
} }
func open(resource string) (*updates.File, error) {
f, err := module.instance.IntelUpdates().GetFile(resource)
if err != nil {
return nil, fmt.Errorf("getting file: %w", err)
}
// unpacked, err := f.Unpack(".gz", updater.UnpackGZIP)
// if err != nil {
// return nil, "", fmt.Errorf("unpacking file: %w", err)
// }
return f, nil
}

View file

@ -66,5 +66,6 @@ func New(instance instance) (*GeoIP, error) {
} }
type instance interface { type instance interface {
BinaryUpdates() *updates.Updater
IntelUpdates() *updates.Updater IntelUpdates() *updates.Updater
} }

View file

@ -5,6 +5,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"os"
"path/filepath" "path/filepath"
"sort" "sort"
"strings" "strings"
@ -17,7 +18,6 @@ import (
"zombiezen.com/go/sqlite/sqlitex" "zombiezen.com/go/sqlite/sqlitex"
"github.com/safing/portmaster/base/config" "github.com/safing/portmaster/base/config"
"github.com/safing/portmaster/base/dataroot"
"github.com/safing/portmaster/base/log" "github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/service/netquery/orm" "github.com/safing/portmaster/service/netquery/orm"
"github.com/safing/portmaster/service/network" "github.com/safing/portmaster/service/network"
@ -127,13 +127,13 @@ type (
// Note that write connections are serialized by the Database object before being // Note that write connections are serialized by the Database object before being
// handed over to SQLite. // handed over to SQLite.
func New(dbPath string) (*Database, error) { func New(dbPath string) (*Database, error) {
historyParentDir := dataroot.Root().ChildDir("databases", 0o700) historyParentDir := filepath.Join(module.instance.DataDir(), "databases")
if err := historyParentDir.Ensure(); err != nil { if err := os.MkdirAll(historyParentDir, 0o0700); err != nil {
return nil, fmt.Errorf("failed to ensure database directory exists: %w", err) return nil, fmt.Errorf("failed to ensure database directory exists: %w", err)
} }
// Get file location of history database. // Get file location of history database.
historyFile := filepath.Join(historyParentDir.Path, "history.db") historyFile := filepath.Join(historyParentDir, "history.db")
// Convert to SQLite URI path. // Convert to SQLite URI path.
historyURI := "file:///" + strings.TrimPrefix(filepath.ToSlash(historyFile), "/") historyURI := "file:///" + strings.TrimPrefix(filepath.ToSlash(historyFile), "/")
@ -225,13 +225,13 @@ func (db *Database) Close() error {
// VacuumHistory rewrites the history database in order to purge deleted records. // VacuumHistory rewrites the history database in order to purge deleted records.
func VacuumHistory(ctx context.Context) (err error) { func VacuumHistory(ctx context.Context) (err error) {
historyParentDir := dataroot.Root().ChildDir("databases", 0o700) historyParentDir := filepath.Join(module.instance.DataDir(), "databases")
if err := historyParentDir.Ensure(); err != nil { if err := os.MkdirAll(historyParentDir, 0o0700); err != nil {
return fmt.Errorf("failed to ensure database directory exists: %w", err) return fmt.Errorf("failed to ensure database directory exists: %w", err)
} }
// Get file location of history database. // Get file location of history database.
historyFile := filepath.Join(historyParentDir.Path, "history.db") historyFile := filepath.Join(historyParentDir, "history.db")
// Convert to SQLite URI path. // Convert to SQLite URI path.
historyURI := "file:///" + strings.TrimPrefix(filepath.ToSlash(historyFile), "/") historyURI := "file:///" + strings.TrimPrefix(filepath.ToSlash(historyFile), "/")

View file

@ -310,5 +310,6 @@ func NewModule(instance instance) (*NetQuery, error) {
} }
type instance interface { type instance interface {
DataDir() string
Profile() *profile.ProfileModule Profile() *profile.ProfileModule
} }

View file

@ -3,12 +3,13 @@ package profile
import ( import (
"errors" "errors"
"fmt" "fmt"
"os"
"path/filepath"
"sync/atomic" "sync/atomic"
"github.com/safing/portmaster/base/config" "github.com/safing/portmaster/base/config"
"github.com/safing/portmaster/base/database" "github.com/safing/portmaster/base/database"
"github.com/safing/portmaster/base/database/migration" "github.com/safing/portmaster/base/database/migration"
"github.com/safing/portmaster/base/dataroot"
"github.com/safing/portmaster/base/log" "github.com/safing/portmaster/base/log"
_ "github.com/safing/portmaster/service/core/base" _ "github.com/safing/portmaster/service/core/base"
"github.com/safing/portmaster/service/mgr" "github.com/safing/portmaster/service/mgr"
@ -65,11 +66,11 @@ func prep() error {
} }
// Setup icon storage location. // Setup icon storage location.
iconsDir := dataroot.Root().ChildDir("databases", 0o0700).ChildDir("icons", 0o0700) iconsDir := filepath.Join(module.instance.DataDir(), "databases", "icons")
if err := iconsDir.Ensure(); err != nil { if err := os.MkdirAll(iconsDir, 0o0700); err != nil {
return fmt.Errorf("failed to create/check icons directory: %w", err) return fmt.Errorf("failed to create/check icons directory: %w", err)
} }
binmeta.ProfileIconStoragePath = iconsDir.Path binmeta.ProfileIconStoragePath = iconsDir
return nil return nil
} }
@ -151,5 +152,6 @@ func NewModule(instance instance) (*ProfileModule, error) {
} }
type instance interface { type instance interface {
DataDir() string
Config() *config.Config Config() *config.Config
} }

View file

@ -197,7 +197,7 @@ func (profile *Profile) parseConfig() error {
if ok { if ok {
profile.filterListIDs, err = filterlists.ResolveListIDs(list) profile.filterListIDs, err = filterlists.ResolveListIDs(list)
if err != nil { if err != nil {
lastErr = err log.Warningf("profiles: failed to resolve filter list IDs: %s", err)
} else { } else {
profile.filterListsSet = true profile.filterListsSet = true
} }

View file

@ -2,10 +2,11 @@ package ui
import ( import (
"errors" "errors"
"os"
"path/filepath"
"sync/atomic" "sync/atomic"
"github.com/safing/portmaster/base/api" "github.com/safing/portmaster/base/api"
"github.com/safing/portmaster/base/dataroot"
"github.com/safing/portmaster/base/log" "github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/service/mgr" "github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates" "github.com/safing/portmaster/service/updates"
@ -28,7 +29,8 @@ func start() error {
// may seem dangerous, but proper permission on the parent directory provide // may seem dangerous, but proper permission on the parent directory provide
// (some) protection. // (some) protection.
// Processes must _never_ read from this directory. // Processes must _never_ read from this directory.
err := dataroot.Root().ChildDir("exec", 0o0777).Ensure() execDir := filepath.Join(module.instance.DataDir(), "exec")
err := os.MkdirAll(execDir, 0o0777) //nolint:gosec // This is intentional.
if err != nil { if err != nil {
log.Warningf("ui: failed to create safe exec dir: %s", err) log.Warningf("ui: failed to create safe exec dir: %s", err)
} }
@ -81,6 +83,7 @@ func New(instance instance) (*UI, error) {
} }
type instance interface { type instance interface {
DataDir() string
API() *api.API API() *api.API
BinaryUpdates() *updates.Updater BinaryUpdates() *updates.Updater
} }

106
service/updates.go Normal file
View file

@ -0,0 +1,106 @@
package service
import (
"path/filepath"
go_runtime "runtime"
"github.com/safing/jess"
"github.com/safing/portmaster/service/updates"
)
var (
DefaultBinaryIndexURLs = []string{
"https://updates.safing.io/stable.v3.json",
}
DefaultIntelIndexURLs = []string{
"https://updates.safing.io/intel.v3.json",
}
// BinarySigningKeys holds the signing keys in text format.
BinarySigningKeys = []string{
// Safing Code Signing Key #1
"recipient:public-ed25519-key:safing-code-signing-key-1:92bgBLneQUWrhYLPpBDjqHbpFPuNVCPAaivQ951A4aq72HcTiw7R1QmPJwFM1mdePAvEVDjkeb8S4fp2pmRCsRa8HrCvWQEjd88rfZ6TznJMfY4g7P8ioGFjfpyx2ZJ8WCZJG5Qt4Z9nkabhxo2Nbi3iywBTYDLSbP5CXqi7jryW7BufWWuaRVufFFzhwUC2ryWFWMdkUmsAZcvXwde4KLN9FrkWAy61fGaJ8GCwGnGCSitANnU2cQrsGBXZzxmzxwrYD",
// Safing Code Signing Key #2
"recipient:public-ed25519-key:safing-code-signing-key-2:92bgBLneQUWrhYLPpBDjqHbPC2d1o5JMyZFdavWBNVtdvbPfzDewLW95ScXfYPHd3QvWHSWCtB4xpthaYWxSkK1kYiGp68DPa2HaU8yQ5dZhaAUuV4Kzv42pJcWkCeVnBYqgGBXobuz52rFqhDJy3rz7soXEmYhJEJWwLwMeioK3VzN3QmGSYXXjosHMMNC76rjufSoLNtUQUWZDSnHmqbuxbKMCCsjFXUGGhtZVyb7bnu7QLTLk6SKHBJDMB6zdL9sw3",
}
// BinarySigningTrustStore is an in-memory trust store with the signing keys.
BinarySigningTrustStore = jess.NewMemTrustStore()
)
func init() {
for _, signingKey := range BinarySigningKeys {
rcpt, err := jess.RecipientFromTextFormat(signingKey)
if err != nil {
panic(err)
}
err = BinarySigningTrustStore.StoreSignet(rcpt)
if err != nil {
panic(err)
}
}
}
func MakeUpdateConfigs(svcCfg *ServiceConfig) (binaryUpdateConfig, intelUpdateConfig *updates.Config, err error) {
switch go_runtime.GOOS {
case "windows":
binaryUpdateConfig = &updates.Config{
Name: "binaries",
Directory: svcCfg.BinDir,
DownloadDirectory: filepath.Join(svcCfg.DataDir, "download_binaries"),
PurgeDirectory: filepath.Join(svcCfg.BinDir, "upgrade_obsolete_binaries"),
Ignore: []string{"databases", "intel", "config.json"},
IndexURLs: svcCfg.BinariesIndexURLs,
IndexFile: "index.json",
Verify: svcCfg.VerifyBinaryUpdates,
AutoDownload: false,
AutoApply: false,
NeedsRestart: true,
Notify: true,
}
intelUpdateConfig = &updates.Config{
Name: "intel",
Directory: filepath.Join(svcCfg.DataDir, "intel"),
DownloadDirectory: filepath.Join(svcCfg.DataDir, "download_intel"),
PurgeDirectory: filepath.Join(svcCfg.DataDir, "upgrade_obsolete_intel"),
IndexURLs: svcCfg.IntelIndexURLs,
IndexFile: "index.json",
Verify: svcCfg.VerifyIntelUpdates,
AutoDownload: true,
AutoApply: true,
NeedsRestart: false,
Notify: false,
}
case "linux":
binaryUpdateConfig = &updates.Config{
Name: "binaries",
Directory: svcCfg.BinDir,
DownloadDirectory: filepath.Join(svcCfg.DataDir, "download_binaries"),
PurgeDirectory: filepath.Join(svcCfg.DataDir, "upgrade_obsolete_binaries"),
Ignore: []string{"databases", "intel", "config.json"},
IndexURLs: svcCfg.BinariesIndexURLs,
IndexFile: "index.json",
Verify: svcCfg.VerifyBinaryUpdates,
AutoDownload: false,
AutoApply: false,
NeedsRestart: true,
Notify: true,
}
intelUpdateConfig = &updates.Config{
Name: "intel",
Directory: filepath.Join(svcCfg.DataDir, "intel"),
DownloadDirectory: filepath.Join(svcCfg.DataDir, "download_intel"),
PurgeDirectory: filepath.Join(svcCfg.DataDir, "upgrade_obsolete_intel"),
IndexURLs: svcCfg.IntelIndexURLs,
IndexFile: "index.json",
Verify: svcCfg.VerifyIntelUpdates,
AutoDownload: true,
AutoApply: true,
NeedsRestart: false,
Notify: false,
}
}
return
}

View file

@ -55,7 +55,7 @@ func (d *Downloader) updateIndex(ctx context.Context) error {
break break
} }
log.Warningf("updates: failed to update index from %q: %s", url, err) log.Warningf("updates/%s: failed to update index from %q: %s", d.u.cfg.Name, url, err)
err = fmt.Errorf("update index file from %q: %s", url, err) err = fmt.Errorf("update index file from %q: %s", url, err)
} }
if err != nil { if err != nil {
@ -111,7 +111,7 @@ func (d *Downloader) gatherExistingFiles(dir string) error {
// Read full file. // Read full file.
fileData, err := os.ReadFile(fullpath) fileData, err := os.ReadFile(fullpath)
if err != nil { if err != nil {
log.Debugf("updates: failed to read file %q while searching for existing files: %w", fullpath, err) log.Debugf("updates/%s: failed to read file %q while searching for existing files: %w", d.u.cfg.Name, fullpath, err)
return fmt.Errorf("failed to read file %s: %w", fullpath, err) return fmt.Errorf("failed to read file %s: %w", fullpath, err)
} }
@ -150,7 +150,7 @@ artifacts:
if err == nil { if err == nil {
continue artifacts continue artifacts
} }
log.Debugf("updates: failed to copy existing file %s: %w", artifact.Filename, err) log.Debugf("updates/%s: failed to copy existing file %s: %w", d.u.cfg.Name, artifact.Filename, err)
} }
// Try to download the artifact from one of the URLs. // Try to download the artifact from one of the URLs.
@ -182,7 +182,7 @@ artifacts:
return fmt.Errorf("rename %s after write: %w", artifact.Filename, err) return fmt.Errorf("rename %s after write: %w", artifact.Filename, err)
} }
log.Infof("updates: downloaded and verified %s", artifact.Filename) log.Infof("updates/%s: downloaded and verified %s", d.u.cfg.Name, artifact.Filename)
} }
return nil return nil
} }

View file

@ -35,7 +35,8 @@ type Artifact struct {
Unpack string `json:"Unpack,omitempty"` Unpack string `json:"Unpack,omitempty"`
Version string `json:"Version,omitempty"` Version string `json:"Version,omitempty"`
localFile string localFile string
versionNum *semver.Version
} }
// GetFileMode returns the required filesystem permission for the artifact. // GetFileMode returns the required filesystem permission for the artifact.
@ -52,6 +53,67 @@ func (a *Artifact) GetFileMode() os.FileMode {
return defaultFileMode return defaultFileMode
} }
// Path returns the absolute path to the local file.
func (a *Artifact) Path() string {
return a.localFile
}
// SemVer returns the version of the artifact.
func (a *Artifact) SemVer() *semver.Version {
return a.versionNum
}
// IsNewerThan returns whether the artifact is newer than the given artifact.
// Returns true if the given artifact is nil.
// The second return value "ok" is false when version could not be compared.
// In this case, it is up to the caller to decide how to proceed.
func (a *Artifact) IsNewerThan(b *Artifact) (newer, ok bool) {
switch {
case a == nil:
return false, false
case b == nil:
return true, true
case a.versionNum == nil:
return false, false
case b.versionNum == nil:
return false, false
case a.versionNum.GreaterThan(b.versionNum):
return true, true
default:
return false, true
}
}
func (a *Artifact) export(dir string, indexVersion *semver.Version) *Artifact {
copy := &Artifact{
Filename: a.Filename,
SHA256: a.SHA256,
URLs: a.URLs,
Platform: a.Platform,
Unpack: a.Unpack,
Version: a.Version,
localFile: filepath.Join(dir, a.Filename),
versionNum: a.versionNum,
}
// Make sure we have a version number.
switch {
case copy.versionNum != nil:
// Version already parsed.
case copy.Version != "":
// Need to parse version.
v, err := semver.NewVersion(copy.Version)
if err == nil {
copy.versionNum = v
}
default:
// No version defined, inherit index version.
copy.versionNum = indexVersion
}
return copy
}
// Index represents a collection of artifacts with metadata. // Index represents a collection of artifacts with metadata.
type Index struct { type Index struct {
Name string `json:"Name"` Name string `json:"Name"`
@ -90,16 +152,26 @@ func ParseIndex(jsonContent []byte, trustStore jess.TrustStore) (*Index, error)
return nil, fmt.Errorf("parse index: %w", err) return nil, fmt.Errorf("parse index: %w", err)
} }
// Initialize data.
err = index.init()
if err != nil {
return nil, err
}
return &index, nil
}
func (index *Index) init() error {
// Parse version number, if set. // Parse version number, if set.
if index.Version != "" { if index.Version != "" {
versionNum, err := semver.NewVersion(index.Version) versionNum, err := semver.NewVersion(index.Version)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid index version %q: %w", index.Version, err) return fmt.Errorf("invalid index version %q: %w", index.Version, err)
} }
index.versionNum = versionNum index.versionNum = versionNum
} }
// Filter artifacts by currnet platform. // Filter artifacts by current platform.
filtered := make([]Artifact, 0) filtered := make([]Artifact, 0)
for _, a := range index.Artifacts { for _, a := range index.Artifacts {
if a.Platform == "" || a.Platform == currentPlatform { if a.Platform == "" || a.Platform == currentPlatform {
@ -108,7 +180,19 @@ func ParseIndex(jsonContent []byte, trustStore jess.TrustStore) (*Index, error)
} }
index.Artifacts = filtered index.Artifacts = filtered
return &index, nil // Parse artifact version numbers.
for _, a := range index.Artifacts {
if a.Version != "" {
v, err := semver.NewVersion(a.Version)
if err == nil {
a.versionNum = v
}
} else {
a.versionNum = index.versionNum
}
}
return nil
} }
// CanDoUpgrades returns whether the index is able to follow a secure upgrade path. // CanDoUpgrades returns whether the index is able to follow a secure upgrade path.

View file

@ -45,6 +45,8 @@ var (
// Config holds the configuration for the updates module. // Config holds the configuration for the updates module.
type Config struct { type Config struct {
// Name of the updater.
Name string
// Directory is the main directory where the currently to-be-used artifacts live. // Directory is the main directory where the currently to-be-used artifacts live.
Directory string Directory string
// DownloadDirectory is the directory where new artifacts are downloaded to and prepared for upgrading. // DownloadDirectory is the directory where new artifacts are downloaded to and prepared for upgrading.
@ -80,6 +82,8 @@ type Config struct {
func (cfg *Config) Check() error { func (cfg *Config) Check() error {
// Check if required fields are set. // Check if required fields are set.
switch { switch {
case cfg.Name == "":
return errors.New("name must be set")
case cfg.Directory == "": case cfg.Directory == "":
return errors.New("directory must be set") return errors.New("directory must be set")
case cfg.DownloadDirectory == "": case cfg.DownloadDirectory == "":
@ -157,19 +161,22 @@ func New(instance instance, name string, cfg Config) (*Updater, error) {
// Load index. // Load index.
index, err := LoadIndex(filepath.Join(cfg.Directory, cfg.IndexFile), cfg.Verify) index, err := LoadIndex(filepath.Join(cfg.Directory, cfg.IndexFile), cfg.Verify)
if err != nil { if err == nil {
if !errors.Is(err, os.ErrNotExist) { module.index = index
log.Errorf("updates: invalid index file, falling back to dir scan: %w", err) return module, nil
}
// Fall back to scanning the directory.
index, err = GenerateIndexFromDir(cfg.Directory, IndexScanConfig{Version: "0.0.0"})
if err != nil {
return nil, fmt.Errorf("updates index load and dir scan failed: %w", err)
}
} }
module.index = index
// Fall back to scanning the directory.
if !errors.Is(err, os.ErrNotExist) {
log.Errorf("updates/%s: invalid index file, falling back to dir scan: %w", cfg.Name, err)
}
index, err = GenerateIndexFromDir(cfg.Directory, IndexScanConfig{Version: "0.0.0"})
if err == nil && index.init() == nil {
module.index = index
return module, nil
}
// Fall back to empty index.
return module, nil return module, nil
} }
@ -207,7 +214,7 @@ func (u *Updater) updateAndUpgrade(w *mgr.WorkerCtx, indexURLs []string, ignoreV
u.indexLock.Unlock() u.indexLock.Unlock()
// Check with local pointer to index. // Check with local pointer to index.
if err := index.ShouldUpgradeTo(downloader.index); err != nil { if err := index.ShouldUpgradeTo(downloader.index); err != nil {
log.Infof("updates: no new or eligible update: %s", err) log.Infof("updates/%s: no new or eligible update: %s", u.cfg.Name, err)
if u.cfg.Notify && u.instance.Notifications() != nil { if u.cfg.Notify && u.instance.Notifications() != nil {
u.instance.Notifications().NotifyInfo( u.instance.Notifications().NotifyInfo(
noNewUpdateNotificationID, noNewUpdateNotificationID,
@ -247,12 +254,12 @@ func (u *Updater) updateAndUpgrade(w *mgr.WorkerCtx, indexURLs []string, ignoreV
// Download any remaining needed files. // Download any remaining needed files.
// If everything is already found in the download directory, then this is a no-op. // If everything is already found in the download directory, then this is a no-op.
log.Infof("updates: downloading new version: %s %s", downloader.index.Name, downloader.index.Version) log.Infof("updates/%s: downloading new version: %s %s", u.cfg.Name, downloader.index.Name, downloader.index.Version)
err = downloader.downloadArtifacts(w.Ctx()) err = downloader.downloadArtifacts(w.Ctx())
if err != nil { if err != nil {
log.Errorf("updates: failed to download update: %s", err) log.Errorf("updates/%s: failed to download update: %s", u.cfg.Name, err)
if err := u.deleteUnfinishedFiles(u.cfg.DownloadDirectory); err != nil { if err := u.deleteUnfinishedFiles(u.cfg.DownloadDirectory); err != nil {
log.Debugf("updates: failed to delete unfinished files in download directory %s", u.cfg.DownloadDirectory) log.Debugf("updates/%s: failed to delete unfinished files in download directory %s", u.cfg.Name, u.cfg.DownloadDirectory)
} }
return fmt.Errorf("downloading failed: %w", err) return fmt.Errorf("downloading failed: %w", err)
} }
@ -282,7 +289,7 @@ func (u *Updater) updateAndUpgrade(w *mgr.WorkerCtx, indexURLs []string, ignoreV
err = u.upgrade(downloader, ignoreVersion) err = u.upgrade(downloader, ignoreVersion)
if err != nil { if err != nil {
if err := u.deleteUnfinishedFiles(u.cfg.PurgeDirectory); err != nil { if err := u.deleteUnfinishedFiles(u.cfg.PurgeDirectory); err != nil {
log.Debugf("updates: failed to delete unfinished files in purge directory %s", u.cfg.PurgeDirectory) log.Debugf("updates/%s: failed to delete unfinished files in purge directory %s", u.cfg.Name, u.cfg.PurgeDirectory)
} }
return err return err
} }
@ -334,6 +341,14 @@ func (u *Updater) upgradeWorker(w *mgr.WorkerCtx) error {
return nil return nil
} }
// ForceUpdate executes a forced update and upgrade directly and synchronously
// and is intended to be used only within a tool, not a service.
func (u *Updater) ForceUpdate() error {
return u.m.Do("update and upgrade", func(w *mgr.WorkerCtx) error {
return u.updateAndUpgrade(w, u.cfg.IndexURLs, true, true)
})
}
// UpdateFromURL installs an update from the provided url. // UpdateFromURL installs an update from the provided url.
func (u *Updater) UpdateFromURL(url string) error { func (u *Updater) UpdateFromURL(url string) error {
u.m.Go("custom update from url", func(w *mgr.WorkerCtx) error { u.m.Go("custom update from url", func(w *mgr.WorkerCtx) error {
@ -383,10 +398,15 @@ func (u *Updater) GetMainDir() string {
} }
// GetFile returns the path of a file given the name. Returns ErrNotFound if file is not found. // GetFile returns the path of a file given the name. Returns ErrNotFound if file is not found.
func (u *Updater) GetFile(name string) (string, error) { func (u *Updater) GetFile(name string) (*Artifact, error) {
u.indexLock.Lock() u.indexLock.Lock()
defer u.indexLock.Unlock() defer u.indexLock.Unlock()
// Check if any index is active.
if u.index == nil {
return nil, ErrNotFound
}
for _, artifact := range u.index.Artifacts { for _, artifact := range u.index.Artifacts {
switch { switch {
case artifact.Filename != name: case artifact.Filename != name:
@ -396,11 +416,11 @@ func (u *Updater) GetFile(name string) (string, error) {
// Platforms are usually pre-filtered, but just to be sure. // Platforms are usually pre-filtered, but just to be sure.
default: default:
// Artifact matches! // Artifact matches!
return filepath.Join(u.cfg.Directory, artifact.Filename), nil return artifact.export(u.cfg.Directory, u.index.versionNum), nil
} }
} }
return "", ErrNotFound return nil, ErrNotFound
} }
// Stop stops the module. // Stop stops the module.

View file

@ -43,7 +43,7 @@ func (u *Updater) upgrade(downloader *Downloader, ignoreVersion bool) error {
} }
// Recovery failed too. // Recovery failed too.
return fmt.Errorf("upgrade (including recovery) failed: %s", upgradeError) return fmt.Errorf("upgrade (including recovery) failed: %s", u.cfg.Name, upgradeError)
} }
func (u *Updater) upgradeMoveFiles(downloader *Downloader, ignoreVersion bool) error { func (u *Updater) upgradeMoveFiles(downloader *Downloader, ignoreVersion bool) error {
@ -60,7 +60,9 @@ func (u *Updater) upgradeMoveFiles(downloader *Downloader, ignoreVersion bool) e
} }
// Move current version files into purge folder. // Move current version files into purge folder.
log.Debugf("updates: removing the old version (v%s from %s)", u.index.Version, u.index.Published) if u.index != nil {
log.Debugf("updates/%s: removing the old version (v%s from %s)", u.cfg.Name, u.index.Version, u.index.Published)
}
files, err := os.ReadDir(u.cfg.Directory) files, err := os.ReadDir(u.cfg.Directory)
if err != nil { if err != nil {
return fmt.Errorf("read current directory: %w", err) return fmt.Errorf("read current directory: %w", err)
@ -74,17 +76,17 @@ func (u *Updater) upgradeMoveFiles(downloader *Downloader, ignoreVersion bool) e
// Otherwise, move file to purge dir. // Otherwise, move file to purge dir.
src := filepath.Join(u.cfg.Directory, file.Name()) src := filepath.Join(u.cfg.Directory, file.Name())
dst := filepath.Join(u.cfg.PurgeDirectory, file.Name()) dst := filepath.Join(u.cfg.PurgeDirectory, file.Name())
err := moveFile(src, dst, "", file.Type().Perm()) err := u.moveFile(src, dst, "", file.Type().Perm())
if err != nil { if err != nil {
return fmt.Errorf("failed to move current file %s to purge dir: %w", file.Name(), err) return fmt.Errorf("failed to move current file %s to purge dir: %w", file.Name(), err)
} }
} }
// Move the new index file into main directory. // Move the new index file into main directory.
log.Debugf("updates: installing the new version (v%s from %s)", u.index.Version, u.index.Published) log.Debugf("updates/%s: installing the new version (v%s from %s)", u.cfg.Name, downloader.index.Version, downloader.index.Published)
src := filepath.Join(u.cfg.DownloadDirectory, u.cfg.IndexFile) src := filepath.Join(u.cfg.DownloadDirectory, u.cfg.IndexFile)
dst := filepath.Join(u.cfg.Directory, u.cfg.IndexFile) dst := filepath.Join(u.cfg.Directory, u.cfg.IndexFile)
err = moveFile(src, dst, "", defaultFileMode) err = u.moveFile(src, dst, "", defaultFileMode)
if err != nil { if err != nil {
return fmt.Errorf("failed to move index file to %s: %w", dst, err) return fmt.Errorf("failed to move index file to %s: %w", dst, err)
} }
@ -93,30 +95,30 @@ func (u *Updater) upgradeMoveFiles(downloader *Downloader, ignoreVersion bool) e
for _, artifact := range downloader.index.Artifacts { for _, artifact := range downloader.index.Artifacts {
src = filepath.Join(u.cfg.DownloadDirectory, artifact.Filename) src = filepath.Join(u.cfg.DownloadDirectory, artifact.Filename)
dst = filepath.Join(u.cfg.Directory, artifact.Filename) dst = filepath.Join(u.cfg.Directory, artifact.Filename)
err = moveFile(src, dst, artifact.SHA256, artifact.GetFileMode()) err = u.moveFile(src, dst, artifact.SHA256, artifact.GetFileMode())
if err != nil { if err != nil {
return fmt.Errorf("failed to move file %s: %w", artifact.Filename, err) return fmt.Errorf("failed to move file %s: %w", artifact.Filename, err)
} else { } else {
log.Debugf("updates: %s moved", artifact.Filename) log.Debugf("updates/%s: %s moved", u.cfg.Name, artifact.Filename)
} }
} }
// Set new index on module. // Set new index on module.
u.index = downloader.index u.index = downloader.index
log.Infof("updates: update complete (v%s from %s)", u.index.Version, u.index.Published) log.Infof("updates/%s: update complete (v%s from %s)", u.cfg.Name, u.index.Version, u.index.Published)
return nil return nil
} }
// moveFile moves a file and falls back to copying if it fails. // moveFile moves a file and falls back to copying if it fails.
func moveFile(currentPath, newPath string, sha256sum string, fileMode fs.FileMode) error { func (u *Updater) moveFile(currentPath, newPath string, sha256sum string, fileMode fs.FileMode) error {
// Try to simply move file. // Try to simply move file.
err := os.Rename(currentPath, newPath) err := os.Rename(currentPath, newPath)
if err == nil { if err == nil {
// Moving was successful, return. // Moving was successful, return.
return nil return nil
} }
log.Tracef("updates: failed to move to %q, falling back to copy+delete: %w", newPath, err) log.Tracef("updates/%s: failed to move to %q, falling back to copy+delete: %w", u.cfg.Name, newPath, err)
// Copy and check the checksum while we are at it. // Copy and check the checksum while we are at it.
err = copyAndCheckSHA256Sum(currentPath, newPath, sha256sum, fileMode) err = copyAndCheckSHA256Sum(currentPath, newPath, sha256sum, fileMode)
@ -139,10 +141,10 @@ func (u *Updater) recoverFromFailedUpgrade() error {
for _, file := range files { for _, file := range files {
purgedFile := filepath.Join(u.cfg.PurgeDirectory, file.Name()) purgedFile := filepath.Join(u.cfg.PurgeDirectory, file.Name())
activeFile := filepath.Join(u.cfg.Directory, file.Name()) activeFile := filepath.Join(u.cfg.Directory, file.Name())
err := moveFile(purgedFile, activeFile, "", file.Type().Perm()) err := u.moveFile(purgedFile, activeFile, "", file.Type().Perm())
if err != nil { if err != nil {
// Only warn and continue to recover as many files as possible. // Only warn and continue to recover as many files as possible.
log.Warningf("updates: failed to roll back file %s: %w", file.Name(), err) log.Warningf("updates/%s: failed to roll back file %s: %w", u.cfg.Name, file.Name(), err)
} }
} }
@ -176,18 +178,18 @@ func (u *Updater) deleteUnfinishedFiles(dir string) error {
case strings.HasSuffix(e.Name(), ".download"): case strings.HasSuffix(e.Name(), ".download"):
path := filepath.Join(dir, e.Name()) path := filepath.Join(dir, e.Name())
log.Warningf("updates: deleting unfinished download file: %s\n", path) log.Warningf("updates/%s: deleting unfinished download file: %s", u.cfg.Name, path)
err := os.Remove(path) err := os.Remove(path)
if err != nil { if err != nil {
log.Errorf("updates: failed to delete unfinished download file %s: %s", path, err) log.Errorf("updates/%s: failed to delete unfinished download file %s: %s", u.cfg.Name, path, err)
} }
case strings.HasSuffix(e.Name(), ".copy"): case strings.HasSuffix(e.Name(), ".copy"):
path := filepath.Join(dir, e.Name()) path := filepath.Join(dir, e.Name())
log.Warningf("updates: deleting unfinished copied file: %s\n", path) log.Warningf("updates/%s: deleting unfinished copied file: %s", u.cfg.Name, path)
err := os.Remove(path) err := os.Remove(path)
if err != nil { if err != nil {
log.Errorf("updates: failed to delete unfinished copied file %s: %s", path, err) log.Errorf("updates/%s: failed to delete unfinished copied file %s: %s", u.cfg.Name, path, err)
} }
} }
} }

View file

@ -15,8 +15,8 @@ import (
) )
var ( var (
intelResource *updates.File intelResource *updates.Artifact
intelResourcePath = "intel/spn/main-intel.yaml" intelResourceName = "main-intel.yaml"
intelResourceMapName = "main" intelResourceMapName = "main"
intelResourceUpdateLock sync.Mutex intelResourceUpdateLock sync.Mutex
) )
@ -42,18 +42,21 @@ func updateSPNIntel(_ context.Context, _ interface{}) (err error) {
return fmt.Errorf("intel resource not for map %q", conf.MainMapName) return fmt.Errorf("intel resource not for map %q", conf.MainMapName)
} }
// Check if there is something to do. // Get possibly updated file.
// TODO(vladimir): is update check needed file, err := module.instance.IntelUpdates().GetFile(intelResourceName)
if intelResource != nil { // && !intelResource.UpgradeAvailable() {
return nil
}
// Get intel file and load it from disk.
intelResource, err = module.instance.IntelUpdates().GetFile(intelResourcePath)
if err != nil { if err != nil {
return fmt.Errorf("failed to get SPN intel update: %w", err) return fmt.Errorf("failed to get SPN intel update: %w", err)
} }
intelData, err := os.ReadFile(intelResource.Path())
// Check if file is newer.
// Continue on check failure.
newer, ok := file.IsNewerThan(intelResource)
if ok && !newer {
return nil
}
// Load intel file from disk.
intelData, err := os.ReadFile(file.Path())
if err != nil { if err != nil {
return fmt.Errorf("failed to load SPN intel update: %w", err) return fmt.Errorf("failed to load SPN intel update: %w", err)
} }
@ -64,8 +67,15 @@ func updateSPNIntel(_ context.Context, _ interface{}) (err error) {
return fmt.Errorf("failed to parse SPN intel update: %w", err) return fmt.Errorf("failed to parse SPN intel update: %w", err)
} }
// Apply new intel.
setVirtualNetworkConfig(intel.VirtualNetworks) setVirtualNetworkConfig(intel.VirtualNetworks)
return navigator.Main.UpdateIntel(intel, cfgOptionTrustNodeNodes()) err = navigator.Main.UpdateIntel(intel, cfgOptionTrustNodeNodes())
if err != nil {
return fmt.Errorf("failed to update intel on map: %w", err)
}
intelResource = file
return nil
} }
func resetSPNIntel() { func resetSPNIntel() {