Merge pull request #146 from safing/feature/ui-revamp

Summary PR for PM v0.6 related changes
This commit is contained in:
Daniel 2020-11-24 16:51:01 +01:00 committed by GitHub
commit 8dfcab43c0
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
98 changed files with 3735 additions and 2160 deletions

View file

@ -3,14 +3,16 @@
baseDir="$( cd "$(dirname "$0")" && pwd )"
cd "$baseDir"
COL_OFF="\033[00m"
COL_OFF="\033[0m"
COL_BOLD="\033[01;01m"
COL_RED="\033[31m"
COL_GREEN="\033[32m"
COL_YELLOW="\033[33m"
destDirPart1="../../dist"
destDirPart2="core"
function check {
function prep {
# output
output="main"
# get version
@ -25,46 +27,47 @@ function check {
fi
# build destination path
destPath=${destDirPart1}/${platform}/${destDirPart2}/$filename
}
function check {
prep
# check if file exists
if [[ -f $destPath ]]; then
echo "[core] $platform $version already built"
echo "[core] $platform v$version already built"
else
echo -e "${COL_BOLD}[core] $platform $version${COL_OFF}"
echo -e "${COL_BOLD}[core] $platform v$version${COL_OFF}"
fi
}
function build {
# output
output="main"
# get version
version=$(grep "info.Set" main.go | cut -d'"' -f4)
# build versioned file name
filename="portmaster-core_v${version//./-}"
# platform
platform="${GOOS}_${GOARCH}"
if [[ $GOOS == "windows" ]]; then
filename="${filename}.exe"
output="${output}.exe"
fi
# build destination path
destPath=${destDirPart1}/${platform}/${destDirPart2}/$filename
prep
# check if file exists
if [[ -f $destPath ]]; then
echo "[core] $platform already built in version $version, skipping..."
echo "[core] $platform already built in v$version, skipping..."
return
fi
# build
./build main.go
if [[ $? -ne 0 ]]; then
echo -e "\n${COL_BOLD}[core] $platform: ${COL_RED}BUILD FAILED.${COL_OFF}"
echo -e "\n${COL_BOLD}[core] $platform v$version: ${COL_RED}BUILD FAILED.${COL_OFF}"
exit 1
fi
mkdir -p $(dirname $destPath)
cp $output $destPath
echo -e "\n${COL_BOLD}[core] $platform: successfully built.${COL_OFF}"
echo -e "\n${COL_BOLD}[core] $platform v$version: ${COL_GREEN}successfully built.${COL_OFF}"
}
function reset {
prep
# delete if file exists
if [[ -f $destPath ]]; then
rm $destPath
echo "[core] $platform v$version deleted."
fi
}
function check_all {
@ -79,6 +82,12 @@ function build_all {
GOOS=darwin GOARCH=amd64 build
}
function reset_all {
GOOS=linux GOARCH=amd64 reset
GOOS=windows GOARCH=amd64 reset
GOOS=darwin GOARCH=amd64 reset
}
case $1 in
"check" )
check_all
@ -86,6 +95,9 @@ case $1 in
"build" )
build_all
;;
"reset" )
reset_all
;;
* )
echo ""
echo "build list:"

View file

@ -22,6 +22,7 @@ import (
var (
dataDir string
staging bool
maxRetries int
dataRoot *utils.DirStructure
logsRoot *utils.DirStructure
@ -41,8 +42,8 @@ var (
Use: "portmaster-start",
Short: "Start Portmaster components",
PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) {
mustLoadIndex := cmd == updatesCmd
if err := configureDataRoot(mustLoadIndex); err != nil {
mustLoadIndex := indexRequired(cmd)
if err := configureRegistry(mustLoadIndex); err != nil {
return err
}
@ -64,8 +65,9 @@ func init() {
{
flags.StringVar(&dataDir, "data", "", "Configures the data directory. Alternatively, this can also be set via the environment variable PORTMASTER_DATA.")
flags.StringVar(&registry.UserAgent, "update-agent", "Start", "Sets the user agent for requests to the update server")
flags.BoolVar(&staging, "staging", false, "Use staging update channel (for testing only)")
flags.IntVar(&maxRetries, "max-retries", 5, "Maximum number of retries when starting a Portmaster component")
flags.BoolVar(&stdinSignals, "input-signals", false, "Emulate signals using stdid.")
flags.BoolVar(&stdinSignals, "input-signals", false, "Emulate signals using stdin.")
_ = rootCmd.MarkPersistentFlagDirname("data")
_ = flags.MarkHidden("input-signals")
}
@ -131,34 +133,32 @@ func initCobra() {
portlog.SetLogLevel(portlog.CriticalLevel)
}
func configureDataRoot(mustLoadIndex bool) error {
// The data directory is not
// check for environment variable
// PORTMASTER_DATA
func configureRegistry(mustLoadIndex bool) error {
// If dataDir is not set, check the environment variable.
if dataDir == "" {
dataDir = os.Getenv("PORTMASTER_DATA")
}
// if it's still empty try to auto-detect it
// If it's still empty, try to auto-detect it.
if dataDir == "" {
dataDir = detectInstallationDir()
}
// finally, if it's still empty the user must provide it
// Finally, if it's still empty, the user must provide it.
if dataDir == "" {
return errors.New("please set the data directory using --data=/path/to/data/dir")
}
// remove redundant escape characters and quotes
// Remove left over quotes.
dataDir = strings.Trim(dataDir, `\"`)
// initialize dataroot
// Initialize data root.
err := dataroot.Initialize(dataDir, 0755)
if err != nil {
return fmt.Errorf("failed to initialize data root: %s", err)
}
dataRoot = dataroot.Root()
// initialize registry
// Initialize registry.
err = registry.Initialize(dataRoot.ChildDir("updates", 0755))
if err != nil {
return err
@ -177,6 +177,19 @@ func configureDataRoot(mustLoadIndex bool) error {
// Beta: true,
// })
if stagingActive() {
// Set flag no matter how staging was activated.
staging = true
log.Println("WARNING: staging environment is active.")
registry.AddIndex(updater.Index{
Path: "staging.json",
Stable: true,
Beta: true,
})
}
return updateRegistryIndex(mustLoadIndex)
}
@ -233,3 +246,14 @@ func detectInstallationDir() string {
return parent
}
func stagingActive() bool {
// Check flag and env variable.
if staging || os.Getenv("PORTMASTER_STAGING") == "enabled" {
return true
}
// Check if staging index is present and acessible.
_, err := os.Stat(filepath.Join(registry.StorageDir().Path, "staging.json"))
return err == nil
}

View file

@ -3,14 +3,16 @@
baseDir="$( cd "$(dirname "$0")" && pwd )"
cd "$baseDir"
COL_OFF="\033[00m"
COL_OFF="\033[0m"
COL_BOLD="\033[01;01m"
COL_RED="\033[31m"
COL_GREEN="\033[32m"
COL_YELLOW="\033[33m"
destDirPart1="../../dist"
destDirPart2="start"
function check {
function prep {
# output
output="portmaster-start"
# get version
@ -25,46 +27,47 @@ function check {
fi
# build destination path
destPath=${destDirPart1}/${platform}/${destDirPart2}/$filename
}
function check {
prep
# check if file exists
if [[ -f $destPath ]]; then
echo "[start] $platform $version already built"
else
echo -e "${COL_BOLD}[start] $platform $version${COL_OFF}"
echo -e "${COL_BOLD}[start] $platform v$version${COL_OFF}"
fi
}
function build {
# output
output="portmaster-start"
# get version
version=$(grep "info.Set" main.go | cut -d'"' -f4)
# build versioned file name
filename="portmaster-start_v${version//./-}"
# platform
platform="${GOOS}_${GOARCH}"
if [[ $GOOS == "windows" ]]; then
filename="${filename}.exe"
output="${output}.exe"
fi
# build destination path
destPath=${destDirPart1}/${platform}/${destDirPart2}/$filename
prep
# check if file exists
if [[ -f $destPath ]]; then
echo "[start] $platform already built in version $version, skipping..."
echo "[start] $platform already built in v$version, skipping..."
return
fi
# build
./build
if [[ $? -ne 0 ]]; then
echo -e "\n${COL_BOLD}[start] $platform: ${COL_RED}BUILD FAILED.${COL_OFF}"
echo -e "\n${COL_BOLD}[start] $platform v$version: ${COL_RED}BUILD FAILED.${COL_OFF}"
exit 1
fi
mkdir -p $(dirname $destPath)
cp $output $destPath
echo -e "\n${COL_BOLD}[start] $platform: successfully built.${COL_OFF}"
echo -e "\n${COL_BOLD}[start] $platform v$version: ${COL_GREEN}successfully built.${COL_OFF}"
}
function reset {
prep
# delete if file exists
if [[ -f $destPath ]]; then
rm $destPath
echo "[start] $platform v$version deleted."
fi
}
function check_all {
@ -79,6 +82,12 @@ function build_all {
GOOS=darwin GOARCH=amd64 build
}
function reset_all {
GOOS=linux GOARCH=amd64 reset
GOOS=windows GOARCH=amd64 reset
GOOS=darwin GOARCH=amd64 reset
}
case $1 in
"check" )
check_all
@ -86,6 +95,9 @@ case $1 in
"build" )
build_all
;;
"reset" )
reset_all
;;
* )
echo ""
echo "build list:"

View file

@ -7,6 +7,7 @@ import (
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"strings"
"time"
@ -19,6 +20,9 @@ const (
// RestartExitCode is the exit code that any service started by portmaster-start
// can return in order to trigger a restart after a clean shutdown.
RestartExitCode = 23
exeSuffix = ".exe"
zipSuffix = ".zip"
)
var (
@ -49,7 +53,7 @@ func init() {
},
{
Name: "Portmaster App",
Identifier: "app/portmaster-app",
Identifier: "app/portmaster-app.zip",
AllowDownload: false,
AllowHidingWindow: false,
},
@ -62,7 +66,6 @@ func init() {
{
Name: "Safing Privacy Network",
Identifier: "hub/spn-hub",
ShortIdentifier: "hub",
AllowDownload: true,
AllowHidingWindow: true,
},
@ -147,8 +150,8 @@ func run(opts *Options, cmdArgs []string) (err error) {
}()
// adapt identifier
if onWindows {
opts.Identifier += ".exe"
if onWindows && !strings.HasSuffix(opts.Identifier, zipSuffix) {
opts.Identifier += exeSuffix
}
// setup logging
@ -275,16 +278,30 @@ func execute(opts *Options, args []string) (cont bool, err error) {
if err != nil {
return true, fmt.Errorf("could not get component: %w", err)
}
binPath := file.Path()
// Adapt path for packaged software.
if strings.HasSuffix(binPath, zipSuffix) {
// Remove suffix from binary path.
binPath = strings.TrimSuffix(binPath, zipSuffix)
// Add binary with the same name to access the unpacked binary.
binPath = filepath.Join(binPath, filepath.Base(binPath))
// Adapt binary path on Windows.
if onWindows {
binPath += exeSuffix
}
}
// check permission
if err := fixExecPerm(file.Path()); err != nil {
if err := fixExecPerm(binPath); err != nil {
return true, err
}
log.Printf("starting %s %s\n", file.Path(), strings.Join(args, " "))
log.Printf("starting %s %s\n", binPath, strings.Join(args, " "))
// create command
exc := exec.Command(file.Path(), args...) //nolint:gosec // everything is okay
exc := exec.Command(binPath, args...) //nolint:gosec // everything is okay
if !runningInConsole && opts.AllowHidingWindow {
// Windows only:

View file

@ -24,6 +24,7 @@ var (
RunE: runAndLogControlError(func(cmd *cobra.Command, args []string) error {
return runService(cmd, &Options{
Identifier: "core/portmaster-core",
ShortIdentifier: "core",
AllowDownload: true,
AllowHidingWindow: false,
NoOutput: true,

View file

@ -15,8 +15,8 @@ func init() {
var showCmd = &cobra.Command{
Use: "show",
PersistentPreRunE: func(*cobra.Command, []string) error {
// all show sub-commands need the data-root but no logging.
return configureDataRoot(false)
// All show sub-commands need the registry but no logging.
return configureRegistry(false)
},
Short: "Show the command to run a Portmaster component yourself",
}
@ -27,7 +27,7 @@ func show(opts *Options, cmdArgs []string) error {
// adapt identifier
if onWindows {
opts.Identifier += ".exe"
opts.Identifier += exeSuffix
}
file, err := registry.GetFile(platform(opts.Identifier))

View file

@ -3,22 +3,49 @@ package main
import (
"context"
"fmt"
"os"
"runtime"
"github.com/safing/portbase/log"
"github.com/spf13/cobra"
)
var reset bool
func init() {
rootCmd.AddCommand(updatesCmd)
rootCmd.AddCommand(updateCmd)
rootCmd.AddCommand(purgeCmd)
flags := updateCmd.Flags()
flags.BoolVar(&reset, "reset", false, "Delete all resources and re-download the basic set")
}
var updatesCmd = &cobra.Command{
Use: "update",
Short: "Run a manual update process",
RunE: func(cmd *cobra.Command, args []string) error {
return downloadUpdates()
},
var (
updateCmd = &cobra.Command{
Use: "update",
Short: "Run a manual update process",
RunE: func(cmd *cobra.Command, args []string) error {
return downloadUpdates()
},
}
purgeCmd = &cobra.Command{
Use: "purge",
Short: "Remove old resource versions that are superseded by at least three versions",
RunE: func(cmd *cobra.Command, args []string) error {
return purge()
},
}
)
func indexRequired(cmd *cobra.Command) bool {
switch cmd {
case updateCmd,
purgeCmd:
return true
default:
return false
}
}
func downloadUpdates() error {
@ -26,8 +53,9 @@ func downloadUpdates() error {
if onWindows {
registry.MandatoryUpdates = []string{
platform("core/portmaster-core.exe"),
platform("kext/portmaster-kext.dll"),
platform("kext/portmaster-kext.sys"),
platform("start/portmaster-start.exe"),
platform("app/portmaster-app.exe"),
platform("notifier/portmaster-notifier.exe"),
platform("notifier/portmaster-snoretoast.exe"),
}
@ -35,7 +63,6 @@ func downloadUpdates() error {
registry.MandatoryUpdates = []string{
platform("core/portmaster-core"),
platform("start/portmaster-start"),
platform("app/portmaster-app"),
platform("notifier/portmaster-notifier"),
}
}
@ -43,10 +70,64 @@ func downloadUpdates() error {
// add updates that we require on all platforms.
registry.MandatoryUpdates = append(
registry.MandatoryUpdates,
"all/ui/modules/base.zip",
platform("app/portmaster-app.zip"),
"all/ui/modules/portmaster.zip",
)
log.SetLogLevel(log.InfoLevel)
// Add assets that need unpacking.
registry.AutoUnpack = []string{
platform("app/portmaster-app.zip"),
}
// logging is configured as a persistent pre-run method inherited from
// the root command but since we don't use run.Run() we need to start
// logging ourself.
log.SetLogLevel(log.TraceLevel)
err := log.Start()
if err != nil {
fmt.Printf("failed to start logging: %s\n", err)
}
defer log.Shutdown()
if reset {
// Delete storage.
err = os.RemoveAll(registry.StorageDir().Path)
if err != nil {
return fmt.Errorf("failed to reset update dir: %s", err)
}
err = registry.StorageDir().Ensure()
if err != nil {
return fmt.Errorf("failed to create update dir: %s", err)
}
// Reset registry state.
registry.Reset()
}
// Update all indexes.
err = registry.UpdateIndexes(context.TODO())
if err != nil {
return err
}
// Download all required updates.
err = registry.DownloadUpdates(context.TODO())
if err != nil {
return err
}
// Select versions and unpack the selected.
registry.SelectVersions()
err = registry.UnpackResources()
if err != nil {
return fmt.Errorf("failed to unpack resources: %s", err)
}
return nil
}
func purge() error {
log.SetLogLevel(log.TraceLevel)
// logging is configured as a persistent pre-run method inherited from
// the root command but since we don't use run.Run() we need to start
@ -57,7 +138,8 @@ func downloadUpdates() error {
}
defer log.Shutdown()
return registry.DownloadUpdates(context.TODO())
registry.Purge(3)
return nil
}
func platform(identifier string) string {

View file

@ -20,9 +20,9 @@ var versionCmd = &cobra.Command{
Args: cobra.NoArgs,
PersistentPreRunE: func(*cobra.Command, []string) error {
if showAllVersions {
// if we are going to show all component versions
// we need the dataroot to be configured.
if err := configureDataRoot(false); err != nil {
// If we are going to show all component versions,
// we need the registry to be configured.
if err := configureRegistry(false); err != nil {
return err
}
}

2
cmds/updatemgr/.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
updatemgr
updatemgr.exe

20
cmds/updatemgr/confirm.go Normal file
View file

@ -0,0 +1,20 @@
package main
import (
"bufio"
"fmt"
"os"
"strings"
)
func confirm(msg string) bool {
fmt.Printf("%s: [y|n] ", msg)
scanner := bufio.NewScanner(os.Stdin)
ok := scanner.Scan()
if ok && strings.TrimSpace(scanner.Text()) == "y" {
return true
}
return false
}

View file

@ -12,7 +12,7 @@ import (
var registry *updater.ResourceRegistry
var rootCmd = &cobra.Command{
Use: "uptool",
Use: "updatemgr",
Short: "A simple tool to assist in the update and release process",
Args: cobra.ExactArgs(1),
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {

58
cmds/updatemgr/purge.go Normal file
View file

@ -0,0 +1,58 @@
package main
import (
"context"
"fmt"
"github.com/spf13/cobra"
"github.com/safing/portbase/log"
"github.com/safing/portbase/updater"
)
func init() {
rootCmd.AddCommand(purgeCmd)
}
var purgeCmd = &cobra.Command{
Use: "purge",
Short: "Remove old resource versions that are superseded by at least three versions",
Args: cobra.ExactArgs(1),
RunE: purge,
}
func purge(cmd *cobra.Command, args []string) error {
log.SetLogLevel(log.TraceLevel)
err := log.Start()
if err != nil {
fmt.Printf("failed to start logging: %s\n", err)
}
defer log.Shutdown()
registry.AddIndex(updater.Index{
Path: "stable.json",
Stable: true,
Beta: false,
})
registry.AddIndex(updater.Index{
Path: "beta.json",
Stable: false,
Beta: true,
})
err = registry.LoadIndexes(context.TODO())
if err != nil {
return err
}
err = scanStorage()
if err != nil {
return err
}
registry.SelectVersions()
registry.Purge(3)
return nil
}

122
cmds/updatemgr/staging.go Normal file
View file

@ -0,0 +1,122 @@
package main
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/safing/portbase/updater"
"github.com/spf13/cobra"
)
var (
stageReset bool
)
func init() {
rootCmd.AddCommand(stageCmd)
stageCmd.Flags().BoolVar(&stageReset, "reset", false, "Reset staging assets")
}
var stageCmd = &cobra.Command{
Use: "stage",
Short: "Stage scans the specified directory and loads the indexes - it then creates a staging index with all files newer than the stable and beta indexes",
Args: cobra.ExactArgs(1),
RunE: stage,
}
func stage(cmd *cobra.Command, args []string) error {
registry.AddIndex(updater.Index{
Path: "stable.json",
Stable: true,
Beta: false,
})
registry.AddIndex(updater.Index{
Path: "beta.json",
Stable: false,
Beta: true,
})
err := registry.LoadIndexes(context.TODO())
if err != nil {
return err
}
err = scanStorage()
if err != nil {
return err
}
// Check if we want to reset staging instead.
if stageReset {
for _, stagedPath := range exportStaging(true) {
err = os.Remove(stagedPath)
if err != nil {
return err
}
}
return nil
}
// Export all staged versions and format them.
stagingData, err := json.MarshalIndent(exportStaging(false), "", " ")
if err != nil {
return err
}
// Build destination path.
stagingIndexFilePath := filepath.Join(registry.StorageDir().Path, "staging.json")
// Print preview.
fmt.Printf("staging (%s):\n", stagingIndexFilePath)
fmt.Println(string(stagingData))
// Ask for confirmation.
if !confirm("\nDo you want to write this index?") {
fmt.Println("aborted...")
return nil
}
// Write new index to disk.
err = ioutil.WriteFile(stagingIndexFilePath, stagingData, 0o644) //nolint:gosec // 0644 is intended
if err != nil {
return err
}
fmt.Printf("written %s\n", stagingIndexFilePath)
return nil
}
func exportStaging(storagePath bool) map[string]string {
// Sort all versions.
registry.SetBeta(false)
registry.SelectVersions()
export := registry.Export()
// Go through all versions and save the highest version, if not stable or beta.
versions := make(map[string]string)
for _, rv := range export {
// Get highest version.
v := rv.Versions[0]
// Do not take stable or beta releases into account.
if v.StableRelease || v.BetaRelease {
continue
}
// Add highest version to staging
if storagePath {
rv.SelectedVersion = v
versions[rv.Identifier] = rv.GetFile().Path()
} else {
versions[rv.Identifier] = v.VersionNumber
}
}
return versions
}

77
cmds/updatemgr/update.go Normal file
View file

@ -0,0 +1,77 @@
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"path/filepath"
"github.com/spf13/cobra"
)
func init() {
rootCmd.AddCommand(updateCmd)
}
var updateCmd = &cobra.Command{
Use: "update",
Short: "Update scans the specified directory and registry the index and symlink structure",
Args: cobra.ExactArgs(1),
RunE: update,
}
func update(cmd *cobra.Command, args []string) error {
err := scanStorage()
if err != nil {
return err
}
// Export versions.
betaData, err := json.MarshalIndent(exportSelected(true), "", " ")
if err != nil {
return err
}
stableData, err := json.MarshalIndent(exportSelected(false), "", " ")
if err != nil {
return err
}
// Build destination paths.
betaIndexFilePath := filepath.Join(registry.StorageDir().Path, "beta.json")
stableIndexFilePath := filepath.Join(registry.StorageDir().Path, "stable.json")
// Print previews.
fmt.Printf("beta (%s):\n", betaIndexFilePath)
fmt.Println(string(betaData))
fmt.Printf("\nstable: (%s)\n", stableIndexFilePath)
fmt.Println(string(stableData))
// Ask for confirmation.
if !confirm("\nDo you want to write these new indexes (and update latest symlinks)?") {
fmt.Println("aborted...")
return nil
}
// Write indexes.
err = ioutil.WriteFile(betaIndexFilePath, betaData, 0o644) //nolint:gosec // 0644 is intended
if err != nil {
return err
}
fmt.Printf("written %s\n", betaIndexFilePath)
err = ioutil.WriteFile(stableIndexFilePath, stableData, 0o644) //nolint:gosec // 0644 is intended
if err != nil {
return err
}
fmt.Printf("written %s\n", stableIndexFilePath)
// Create symlinks to latest stable versions.
symlinksDir := registry.StorageDir().ChildDir("latest", 0o755)
err = registry.CreateSymlinks(symlinksDir)
if err != nil {
return err
}
fmt.Printf("updated stable symlinks in %s\n", symlinksDir.Path)
return nil
}

View file

@ -1 +0,0 @@
uptool

View file

@ -1,64 +0,0 @@
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"path/filepath"
"github.com/spf13/cobra"
)
func init() {
rootCmd.AddCommand(updateCmd)
}
var updateCmd = &cobra.Command{
Use: "update",
Short: "Update scans the specified directory and registry the index and symlink structure",
Args: cobra.ExactArgs(1),
RunE: update,
}
func update(cmd *cobra.Command, args []string) error {
err := scanStorage()
if err != nil {
return err
}
// export beta
data, err := json.MarshalIndent(exportSelected(true), "", " ")
if err != nil {
return err
}
// print
fmt.Println("beta:")
fmt.Println(string(data))
// write index
err = ioutil.WriteFile(filepath.Join(registry.StorageDir().Dir, "beta.json"), data, 0o644) //nolint:gosec // 0644 is intended
if err != nil {
return err
}
// export stable
data, err = json.MarshalIndent(exportSelected(false), "", " ")
if err != nil {
return err
}
// print
fmt.Println("\nstable:")
fmt.Println(string(data))
// write index
err = ioutil.WriteFile(filepath.Join(registry.StorageDir().Dir, "stable.json"), data, 0o644) //nolint:gosec // 0644 is intended
if err != nil {
return err
}
// create symlinks
err = registry.CreateSymlinks(registry.StorageDir().ChildDir("latest", 0o755))
if err != nil {
return err
}
fmt.Println("\nstable symlinks created")
return nil
}

View file

@ -9,8 +9,6 @@ import (
"github.com/safing/portbase/dataroot"
"github.com/safing/portbase/info"
"github.com/safing/portbase/modules"
"github.com/safing/portbase/modules/subsystems"
"github.com/safing/portbase/notifications"
)
// Default Values (changeable for testing)
@ -67,11 +65,5 @@ func globalPrep() error {
// set api listen address
api.SetDefaultAPIListenAddress(DefaultAPIListenAddress)
// set notification persistence
notifications.SetPersistenceBasePath("core:notifications")
// set subsystem status dir
subsystems.SetDatabaseKeySpace("core:status/subsystems")
return nil
}

View file

@ -29,26 +29,32 @@ func registerConfig() error {
err := config.Register(&config.Option{
Name: "Development Mode",
Key: CfgDevModeKey,
Description: "In Development Mode security restrictions are lifted/softened to enable easier access to Portmaster for debugging and testing purposes.",
Order: 127,
Description: "In Development Mode, security restrictions are lifted/softened to enable easier access to Portmaster for debugging and testing purposes.",
OptType: config.OptTypeBool,
ExpertiseLevel: config.ExpertiseLevelDeveloper,
ReleaseLevel: config.ReleaseLevelStable,
DefaultValue: defaultDevMode,
Annotations: config.Annotations{
config.DisplayOrderAnnotation: 512,
config.CategoryAnnotation: "Development",
},
})
if err != nil {
return err
}
err = config.Register(&config.Option{
Name: "Use System Notifications",
Name: "Desktop Notifications",
Key: CfgUseSystemNotificationsKey,
Description: "Send notifications to your operating system's notification system. When this setting is turned off, notifications will only be visible in the Portmaster App. This affects both alerts from the Portmaster and questions from the Privacy Filter.",
Order: 32,
Description: "In addition to showing notifications in the Portmaster App, also send them to the Desktop. This requires the Portmaster Notifier to be running.",
OptType: config.OptTypeBool,
ExpertiseLevel: config.ExpertiseLevelUser,
ReleaseLevel: config.ReleaseLevelStable,
DefaultValue: true, // TODO: turn off by default on unsupported systems
Annotations: config.Annotations{
config.DisplayOrderAnnotation: -15,
config.CategoryAnnotation: "User Interface",
},
})
if err != nil {
return err

View file

@ -8,6 +8,10 @@ import (
"github.com/safing/portmaster/profile/endpoints"
)
var (
resolverFilterLists = []string{"17-DNS"}
)
// PreventBypassing checks if the connection should be denied or permitted
// based on some bypass protection checks.
func PreventBypassing(conn *network.Connection) (endpoints.EPResult, string, nsutil.Responder) {
@ -18,5 +22,11 @@ func PreventBypassing(conn *network.Connection) (endpoints.EPResult, string, nsu
nsutil.NxDomain()
}
if conn.Entity.MatchLists(resolverFilterLists) {
return endpoints.Denied,
"blocked rogue connection to DNS resolver",
nsutil.ZeroIP()
}
return endpoints.NoMatch, "", nil
}

View file

@ -11,14 +11,14 @@ var (
CfgOptionEnableFilterKey = "filter/enable"
CfgOptionAskWithSystemNotificationsKey = "filter/askWithSystemNotifications"
CfgOptionAskWithSystemNotificationsOrder = 2
cfgOptionAskWithSystemNotificationsOrder = 2
CfgOptionAskTimeoutKey = "filter/askTimeout"
CfgOptionAskTimeoutOrder = 3
cfgOptionAskTimeoutOrder = 3
askTimeout config.IntOption
CfgOptionPermanentVerdictsKey = "filter/permanentVerdicts"
CfgOptionPermanentVerdictsOrder = 128
cfgOptionPermanentVerdictsOrder = 96
permanentVerdicts config.BoolOption
devMode config.BoolOption
@ -29,12 +29,15 @@ func registerConfig() error {
err := config.Register(&config.Option{
Name: "Permanent Verdicts",
Key: CfgOptionPermanentVerdictsKey,
Description: "With permanent verdicts, control of a connection is fully handed back to the OS after the initial decision in order to drastically increase performance.",
Order: CfgOptionPermanentVerdictsOrder,
Description: "The Portmaster's system integration intercepts every single packet. Usually the first packet is enough for the Portmaster to set the verdict for a connection - ie. to allow or deny it. Making these verdicts permanent means that the Portmaster will tell the system integration that is does not want to see any more packets of that single connection. This brings a major performance increase.",
OptType: config.OptTypeBool,
ExpertiseLevel: config.ExpertiseLevelDeveloper,
ReleaseLevel: config.ReleaseLevelExperimental,
DefaultValue: true,
Annotations: config.Annotations{
config.DisplayOrderAnnotation: cfgOptionPermanentVerdictsOrder,
config.CategoryAnnotation: "Advanced",
},
})
if err != nil {
return err
@ -42,33 +45,42 @@ func registerConfig() error {
permanentVerdicts = config.Concurrent.GetAsBool(CfgOptionPermanentVerdictsKey, true)
err = config.Register(&config.Option{
Name: "Ask with System Notifications",
Name: "Prompt Desktop Notifications",
Key: CfgOptionAskWithSystemNotificationsKey,
Description: `Ask about connections using your operating system's notification system. For this to be enabled, the setting "Use System Notifications" must enabled too. This only affects questions from the Privacy Filter, and does not affect alerts from the Portmaster.`,
Order: CfgOptionAskWithSystemNotificationsOrder,
Description: `In addition to showing prompt notifications in the Portmaster App, also send them to the Desktop. This requires the Portmaster Notifier to be running. Requires Desktop Notifications to be enabled.`,
OptType: config.OptTypeBool,
ExpertiseLevel: config.ExpertiseLevelUser,
ReleaseLevel: config.ReleaseLevelExperimental,
DefaultValue: true,
Annotations: config.Annotations{
config.DisplayOrderAnnotation: cfgOptionAskWithSystemNotificationsOrder,
config.CategoryAnnotation: "General",
config.RequiresAnnotation: config.ValueRequirement{
Key: core.CfgUseSystemNotificationsKey,
Value: true,
},
},
})
if err != nil {
return err
}
err = config.Register(&config.Option{
Name: "Timeout for Ask Notifications",
Name: "Prompt Timeout",
Key: CfgOptionAskTimeoutKey,
Description: "Amount of time (in seconds) how long the Portmaster will wait for a response when prompting about a connection via a notification. Please note that system notifications might not respect this or have it's own limits.",
Order: CfgOptionAskTimeoutOrder,
Description: "How long the Portmaster will wait for a reply to a prompt notification. Please note that Desktop Notifications might not respect this or have their own limits.",
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelUser,
ReleaseLevel: config.ReleaseLevelExperimental,
DefaultValue: 60,
DefaultValue: 20,
Annotations: config.Annotations{
config.DisplayOrderAnnotation: cfgOptionAskTimeoutOrder,
config.UnitAnnotation: "seconds",
config.CategoryAnnotation: "General",
},
})
if err != nil {
return err
}
askTimeout = config.Concurrent.GetAsInt(CfgOptionAskTimeoutKey, 60)
askTimeout = config.Concurrent.GetAsInt(CfgOptionAskTimeoutKey, 15)
devMode = config.Concurrent.GetAsBool(core.CfgDevModeKey, false)
apiListenAddress = config.GetAsString(api.CfgDefaultListenAddressKey, "")

View file

@ -17,13 +17,14 @@ import (
"github.com/safing/portmaster/resolver"
)
func filterDNSSection(entries []dns.RR, p *profile.LayeredProfile, scope int8) ([]dns.RR, []string, int) {
func filterDNSSection(entries []dns.RR, p *profile.LayeredProfile, scope int8) ([]dns.RR, []string, int, string) {
goodEntries := make([]dns.RR, 0, len(entries))
filteredRecords := make([]string, 0, len(entries))
// keeps track of the number of valid and allowed
// A and AAAA records.
var allowedAddressRecords int
var interveningOptionKey string
for _, rr := range entries {
// get IP and classification
@ -45,10 +46,12 @@ func filterDNSSection(entries []dns.RR, p *profile.LayeredProfile, scope int8) (
case classification == netutils.HostLocal:
// No DNS should return localhost addresses
filteredRecords = append(filteredRecords, rr.String())
interveningOptionKey = profile.CfgOptionRemoveOutOfScopeDNSKey
continue
case scope == netutils.Global && (classification == netutils.SiteLocal || classification == netutils.LinkLocal):
// No global DNS should return LAN addresses
filteredRecords = append(filteredRecords, rr.String())
interveningOptionKey = profile.CfgOptionRemoveOutOfScopeDNSKey
continue
}
}
@ -58,12 +61,15 @@ func filterDNSSection(entries []dns.RR, p *profile.LayeredProfile, scope int8) (
switch {
case p.BlockScopeInternet() && classification == netutils.Global:
filteredRecords = append(filteredRecords, rr.String())
interveningOptionKey = profile.CfgOptionBlockScopeInternetKey
continue
case p.BlockScopeLAN() && (classification == netutils.SiteLocal || classification == netutils.LinkLocal):
filteredRecords = append(filteredRecords, rr.String())
interveningOptionKey = profile.CfgOptionBlockScopeLANKey
continue
case p.BlockScopeLocal() && classification == netutils.HostLocal:
filteredRecords = append(filteredRecords, rr.String())
interveningOptionKey = profile.CfgOptionBlockScopeLocalKey
continue
}
@ -75,7 +81,7 @@ func filterDNSSection(entries []dns.RR, p *profile.LayeredProfile, scope int8) (
goodEntries = append(goodEntries, rr)
}
return goodEntries, filteredRecords, allowedAddressRecords
return goodEntries, filteredRecords, allowedAddressRecords, interveningOptionKey
}
func filterDNSResponse(conn *network.Connection, rrCache *resolver.RRCache) *resolver.RRCache {
@ -97,18 +103,30 @@ func filterDNSResponse(conn *network.Connection, rrCache *resolver.RRCache) *res
var filteredRecords []string
var validIPs int
var interveningOptionKey string
rrCache.Answer, filteredRecords, validIPs = filterDNSSection(rrCache.Answer, p, rrCache.ServerScope)
rrCache.Answer, filteredRecords, validIPs, interveningOptionKey = filterDNSSection(rrCache.Answer, p, rrCache.ServerScope)
rrCache.FilteredEntries = append(rrCache.FilteredEntries, filteredRecords...)
// we don't count the valid IPs in the extra section
rrCache.Extra, filteredRecords, _ = filterDNSSection(rrCache.Extra, p, rrCache.ServerScope)
rrCache.Extra, filteredRecords, _, _ = filterDNSSection(rrCache.Extra, p, rrCache.ServerScope)
rrCache.FilteredEntries = append(rrCache.FilteredEntries, filteredRecords...)
if len(rrCache.FilteredEntries) > 0 {
rrCache.Filtered = true
if validIPs == 0 {
conn.Block("no addresses returned for this domain are permitted")
switch interveningOptionKey {
case profile.CfgOptionBlockScopeInternetKey:
conn.Block("Internet access blocked", interveningOptionKey)
case profile.CfgOptionBlockScopeLANKey:
conn.Block("LAN access blocked", interveningOptionKey)
case profile.CfgOptionBlockScopeLocalKey:
conn.Block("Localhost access blocked", interveningOptionKey)
case profile.CfgOptionRemoveOutOfScopeDNSKey:
conn.Block("DNS global/private split-view violation", interveningOptionKey)
default:
conn.Block("DNS response only contained to-be-blocked IPs", interveningOptionKey)
}
// If all entries are filtered, this could mean that these are broken/bogus resource records.
if rrCache.Expired() {
@ -151,12 +169,6 @@ func DecideOnResolvedDNS(
rrCache *resolver.RRCache,
) *resolver.RRCache {
// check profile
if checkProfileExists(ctx, conn, nil) {
// returns true if check triggered
return nil
}
// special grant for connectivity domains
if checkConnectivityDomain(ctx, conn, nil) {
// returns true if check triggered
@ -186,14 +198,14 @@ func mayBlockCNAMEs(ctx context.Context, conn *network.Connection) bool {
result, reason := conn.Process().Profile().MatchEndpoint(ctx, conn.Entity)
if result == endpoints.Denied {
conn.BlockWithContext(reason.String(), reason.Context())
conn.BlockWithContext(reason.String(), profile.CfgOptionFilterCNAMEKey, reason.Context())
return true
}
if result == endpoints.NoMatch {
result, reason = conn.Process().Profile().MatchFilterLists(ctx, conn.Entity)
if result == endpoints.Denied {
conn.BlockWithContext(reason.String(), reason.Context())
conn.BlockWithContext(reason.String(), profile.CfgOptionFilterCNAMEKey, reason.Context())
return true
}
}

View file

@ -24,13 +24,16 @@ func init() {
filterModule,
"config:filter/",
&config.Option{
Name: "Enable Privacy Filter",
Name: "Privacy Filter",
Key: CfgOptionEnableFilterKey,
Description: "Enable the Privacy Filter Subsystem to filter DNS queries and network requests.",
Description: "Enable the DNS and Network Filter.",
OptType: config.OptTypeBool,
ExpertiseLevel: config.ExpertiseLevelUser,
ReleaseLevel: config.ReleaseLevelBeta,
DefaultValue: true,
Annotations: config.Annotations{
config.CategoryAnnotation: "General",
},
},
)
}

View file

@ -85,11 +85,11 @@ func RunInspectors(conn *network.Connection, pkt packet.Packet) (network.Verdict
verdict = network.VerdictDrop
continueInspection = true
case BLOCK_CONN:
conn.SetVerdict(network.VerdictBlock, "", nil)
conn.SetVerdict(network.VerdictBlock, "", "", nil)
verdict = conn.Verdict
activeInspectors[key] = true
case DROP_CONN:
conn.SetVerdict(network.VerdictDrop, "", nil)
conn.SetVerdict(network.VerdictDrop, "", "", nil)
verdict = conn.Verdict
activeInspectors[key] = true
case STOP_INSPECTING:

View file

@ -2,6 +2,7 @@ package firewall
import (
"context"
"net"
"os"
"sync/atomic"
"time"
@ -29,6 +30,9 @@ var (
packetsBlocked = new(uint64)
packetsDropped = new(uint64)
packetsFailed = new(uint64)
blockedIPv4 = net.IPv4(0, 0, 0, 17)
blockedIPv6 = net.ParseIP("::17")
)
func init() {
@ -84,6 +88,11 @@ func handlePacket(ctx context.Context, pkt packet.Packet) {
func fastTrackedPermit(pkt packet.Packet) (handled bool) {
meta := pkt.Info()
// Check for blocked IP
if meta.Dst.Equal(blockedIPv4) || meta.Dst.Equal(blockedIPv6) {
_ = pkt.PermanentBlock()
}
switch meta.Protocol {
case packet.ICMP:
// Always permit ICMP.
@ -171,7 +180,7 @@ func initialHandler(conn *network.Connection, pkt packet.Packet) {
ps := getPortStatusAndMarkUsed(pkt.Info().LocalPort())
if ps.isMe {
// approve
conn.Accept("internally approved")
conn.Accept("connection by Portmaster", noReasonOptionKey)
conn.Internal = true
// finish
conn.StopFirewallHandler()
@ -191,7 +200,7 @@ func initialHandler(conn *network.Connection, pkt packet.Packet) {
// check if filtering is enabled
if !filterEnabled() {
conn.Inspecting = false
conn.SetVerdict(network.VerdictAccept, "privacy filter disabled", nil)
conn.Accept("privacy filter disabled", noReasonOptionKey)
conn.StopFirewallHandler()
issueVerdict(conn, pkt, 0, true)
return

View file

@ -70,7 +70,7 @@ func handleWindowsDNSCache() {
func notifyDisableDNSCache() {
(&notifications.Notification{
ID: "windows-disable-dns-cache",
EventID: "interception:windows-disable-dns-cache",
Message: "The Portmaster needs the Windows Service \"DNS Client\" (dnscache) to be disabled for best effectiveness.",
Type: notifications.Warning,
}).Save()
@ -78,7 +78,7 @@ func notifyDisableDNSCache() {
func notifyRebootRequired() {
(&notifications.Notification{
ID: "windows-dnscache-reboot-required",
EventID: "interception:windows-dnscache-reboot-required",
Message: "Please restart your system to complete Portmaster integration.",
Type: notifications.Warning,
}).Save()

View file

@ -122,6 +122,12 @@ func (pkt *packet) Accept() error {
}
func (pkt *packet) Block() error {
if pkt.Info().Protocol == pmpacket.ICMP {
// ICMP packets attributed to a blocked connection are always allowed, as
// rejection ICMP packets will have the same mark as the blocked
// connection. This is why we need to drop blocked ICMP packets instead.
return pkt.mark(MarkDrop)
}
return pkt.mark(MarkBlock)
}
@ -134,6 +140,12 @@ func (pkt *packet) PermanentAccept() error {
}
func (pkt *packet) PermanentBlock() error {
if pkt.Info().Protocol == pmpacket.ICMP {
// ICMP packets attributed to a blocked connection are always allowed, as
// rejection ICMP packets will have the same mark as the blocked
// connection. This is why we need to drop blocked ICMP packets instead.
return pkt.mark(MarkDropAlways)
}
return pkt.mark(MarkBlockAlways)
}

View file

@ -60,10 +60,18 @@ func init() {
"filter C17 -m mark --mark 0 -j DROP",
"filter C17 -m mark --mark 1700 -j RETURN",
// Accepting ICMP packets with mark 1701 is required for rejecting to work,
// as the rejection ICMP packet will have the same mark. Blocked ICMP
// packets will always result in a drop within the Portmaster.
"filter C17 -m mark --mark 1701 -p icmp -j RETURN",
"filter C17 -m mark --mark 1701 -j REJECT --reject-with icmp-host-prohibited",
"filter C17 -m mark --mark 1702 -j DROP",
"filter C17 -j CONNMARK --save-mark",
"filter C17 -m mark --mark 1710 -j RETURN",
// Accepting ICMP packets with mark 1711 is required for rejecting to work,
// as the rejection ICMP packet will have the same mark. Blocked ICMP
// packets will always result in a drop within the Portmaster.
"filter C17 -m mark --mark 1711 -p icmp -j RETURN",
"filter C17 -m mark --mark 1711 -j REJECT --reject-with icmp-host-prohibited",
"filter C17 -m mark --mark 1712 -j DROP",
"filter C17 -m mark --mark 1717 -j RETURN",

View file

@ -36,44 +36,83 @@ import (
// 3. DecideOnConnection
// is called with the first packet of a network connection.
const noReasonOptionKey = ""
type deciderFn func(context.Context, *network.Connection, packet.Packet) bool
var deciders = []deciderFn{
checkPortmasterConnection,
checkSelfCommunication,
checkConnectionType,
checkConnectionScope,
checkEndpointLists,
checkConnectivityDomain,
checkBypassPrevention,
checkFilterLists,
dropInbound,
checkDomainHeuristics,
checkAutoPermitRelated,
}
// DecideOnConnection makes a decision about a connection.
// When called, the connection and profile is already locked.
func DecideOnConnection(ctx context.Context, conn *network.Connection, pkt packet.Packet) {
// update profiles and check if communication needs reevaluation
if conn.UpdateAndCheck() {
// Check if we have a process and profile.
layeredProfile := conn.Process().Profile()
if layeredProfile == nil {
conn.Deny("unknown process or profile", noReasonOptionKey)
return
}
// Check if the layered profile needs updating.
if layeredProfile.NeedsUpdate() {
// Update revision counter in connection.
conn.ProfileRevisionCounter = layeredProfile.Update()
conn.SaveWhenFinished()
// Reset verdict for connection.
log.Tracer(ctx).Infof("filter: re-evaluating verdict on %s", conn)
conn.Verdict = network.VerdictUndecided
// Reset entity if it exists.
if conn.Entity != nil {
conn.Entity.ResetLists()
}
}
var deciders = []func(context.Context, *network.Connection, packet.Packet) bool{
checkPortmasterConnection,
checkSelfCommunication,
checkProfileExists,
checkConnectionType,
checkConnectivityDomain,
checkConnectionScope,
checkEndpointLists,
checkBypassPrevention,
checkFilterLists,
checkInbound,
checkDomainHeuristics,
checkDefaultPermit,
checkAutoPermitRelated,
checkDefaultAction,
// Run all deciders and return if they came to a conclusion.
done, defaultAction := runDeciders(ctx, conn, pkt)
if done {
return
}
// Deciders did not conclude, use default action.
switch defaultAction {
case profile.DefaultActionPermit:
conn.Accept("default permit", profile.CfgOptionDefaultActionKey)
case profile.DefaultActionAsk:
prompt(ctx, conn, pkt)
default:
conn.Deny("default block", profile.CfgOptionDefaultActionKey)
}
}
func runDeciders(ctx context.Context, conn *network.Connection, pkt packet.Packet) (done bool, defaultAction uint8) {
layeredProfile := conn.Process().Profile()
// Read-lock the all the profiles.
layeredProfile.LockForUsage()
defer layeredProfile.UnlockForUsage()
// Go though all deciders, return if one sets an action.
for _, decider := range deciders {
if decider(ctx, conn, pkt) {
return
return true, profile.DefaultActionNotSet
}
}
// DefaultAction == DefaultActionBlock
conn.Deny("endpoint is not allowed (default=block)")
// Return the default action.
return false, layeredProfile.DefaultAction()
}
// checkPortmasterConnection allows all connection that originate from
@ -82,7 +121,7 @@ func checkPortmasterConnection(ctx context.Context, conn *network.Connection, pk
// grant self
if conn.Process().Pid == os.Getpid() {
log.Tracer(ctx).Infof("filter: granting own connection %s", conn)
conn.Verdict = network.VerdictAccept
conn.Accept("connection by Portmaster", noReasonOptionKey)
conn.Internal = true
return true
}
@ -115,7 +154,7 @@ func checkSelfCommunication(ctx context.Context, conn *network.Connection, pkt p
if err != nil {
log.Tracer(ctx).Warningf("filter: failed to find load local peer process with PID %d: %s", otherPid, err)
} else if otherProcess.Pid == conn.Process().Pid {
conn.Accept("connection to self")
conn.Accept("process internal connection", noReasonOptionKey)
conn.Internal = true
return true
}
@ -126,14 +165,6 @@ func checkSelfCommunication(ctx context.Context, conn *network.Connection, pkt p
return false
}
func checkProfileExists(_ context.Context, conn *network.Connection, _ packet.Packet) bool {
if conn.Process().Profile() == nil {
conn.Block("unknown process or profile")
return true
}
return false
}
func checkEndpointLists(ctx context.Context, conn *network.Connection, _ packet.Packet) bool {
var result endpoints.EPResult
var reason endpoints.Reason
@ -142,17 +173,20 @@ func checkEndpointLists(ctx context.Context, conn *network.Connection, _ packet.
p := conn.Process().Profile()
// check endpoints list
var optionKey string
if conn.Inbound {
result, reason = p.MatchServiceEndpoint(ctx, conn.Entity)
optionKey = profile.CfgOptionServiceEndpointsKey
} else {
result, reason = p.MatchEndpoint(ctx, conn.Entity)
optionKey = profile.CfgOptionEndpointsKey
}
switch result {
case endpoints.Denied:
conn.DenyWithContext(reason.String(), reason.Context())
conn.DenyWithContext(reason.String(), optionKey, reason.Context())
return true
case endpoints.Permitted:
conn.AcceptWithContext(reason.String(), reason.Context())
conn.AcceptWithContext(reason.String(), optionKey, reason.Context())
return true
}
@ -167,16 +201,16 @@ func checkConnectionType(ctx context.Context, conn *network.Connection, _ packet
case network.IncomingLAN, network.IncomingInternet, network.IncomingInvalid:
if p.BlockInbound() {
if conn.Scope == network.IncomingHost {
conn.Block("inbound connections blocked")
conn.Block("inbound connections blocked", profile.CfgOptionBlockInboundKey)
} else {
conn.Drop("inbound connections blocked")
conn.Drop("inbound connections blocked", profile.CfgOptionBlockInboundKey)
}
return true
}
case network.PeerInternet:
// BlockP2P only applies to connections to the Internet
if p.BlockP2P() {
conn.Block("direct connections (P2P) blocked")
conn.Block("direct connections (P2P) blocked", profile.CfgOptionBlockP2PKey)
return true
}
}
@ -202,7 +236,7 @@ func checkConnectivityDomain(_ context.Context, conn *network.Connection, _ pack
case netenv.IsConnectivityDomain(conn.Entity.Domain):
// Special grant!
conn.Accept("special grant for connectivity domain during network bootstrap")
conn.Accept("special grant for connectivity domain during network bootstrap", noReasonOptionKey)
return true
default:
@ -221,29 +255,29 @@ func checkConnectionScope(_ context.Context, conn *network.Connection, _ packet.
switch classification {
case netutils.Global, netutils.GlobalMulticast:
if p.BlockScopeInternet() {
conn.Deny("Internet access blocked") // Block Outbound / Drop Inbound
conn.Deny("Internet access blocked", profile.CfgOptionBlockScopeInternetKey) // Block Outbound / Drop Inbound
return true
}
case netutils.SiteLocal, netutils.LinkLocal, netutils.LocalMulticast:
if p.BlockScopeLAN() {
conn.Block("LAN access blocked") // Block Outbound / Drop Inbound
conn.Block("LAN access blocked", profile.CfgOptionBlockScopeLANKey) // Block Outbound / Drop Inbound
return true
}
case netutils.HostLocal:
if p.BlockScopeLocal() {
conn.Block("Localhost access blocked") // Block Outbound / Drop Inbound
conn.Block("Localhost access blocked", profile.CfgOptionBlockScopeLocalKey) // Block Outbound / Drop Inbound
return true
}
default: // netutils.Invalid
conn.Deny("invalid IP") // Block Outbound / Drop Inbound
conn.Deny("invalid IP", noReasonOptionKey) // Block Outbound / Drop Inbound
return true
}
} else if conn.Entity.Domain != "" {
// DNS Query
// DNS is expected to resolve to LAN or Internet addresses
// TODO: handle domains mapped to localhost
// This is a DNS Request.
// DNS is expected to resolve to LAN or Internet addresses.
// Localhost queries are immediately responded to by the nameserver.
if p.BlockScopeInternet() && p.BlockScopeLAN() {
conn.Block("Internet and LAN access blocked")
conn.Block("Internet and LAN access blocked", profile.CfgOptionBlockScopeInternetKey)
return true
}
}
@ -256,10 +290,10 @@ func checkBypassPrevention(_ context.Context, conn *network.Connection, _ packet
result, reason, reasonCtx := PreventBypassing(conn)
switch result {
case endpoints.Denied:
conn.BlockWithContext("bypass prevention: "+reason, reasonCtx)
conn.BlockWithContext("bypass prevention: "+reason, profile.CfgOptionPreventBypassingKey, reasonCtx)
return true
case endpoints.Permitted:
conn.AcceptWithContext("bypass prevention: "+reason, reasonCtx)
conn.AcceptWithContext("bypass prevention: "+reason, profile.CfgOptionPreventBypassingKey, reasonCtx)
return true
case endpoints.NoMatch:
}
@ -274,7 +308,7 @@ func checkFilterLists(ctx context.Context, conn *network.Connection, pkt packet.
result, reason := p.MatchFilterLists(ctx, conn.Entity)
switch result {
case endpoints.Denied:
conn.DenyWithContext(reason.String(), reason.Context())
conn.DenyWithContext(reason.String(), profile.CfgOptionFilterListsKey, reason.Context())
return true
case endpoints.NoMatch:
// nothing to do
@ -315,7 +349,7 @@ func checkDomainHeuristics(ctx context.Context, conn *network.Connection, _ pack
domainToCheck,
score,
)
conn.Block("possible DGA domain commonly used by malware")
conn.Block("possible DGA domain commonly used by malware", profile.CfgOptionDomainHeuristicsKey)
return true
}
log.Tracer(ctx).Tracef("filter: LMS score of eTLD+1 %s is %.2f", etld1, score)
@ -335,7 +369,7 @@ func checkDomainHeuristics(ctx context.Context, conn *network.Connection, _ pack
domainToCheck,
score,
)
conn.Block("possible data tunnel for covert communication and protection bypassing")
conn.Block("possible data tunnel for covert communication and protection bypassing", profile.CfgOptionDomainHeuristicsKey)
return true
}
log.Tracer(ctx).Tracef("filter: LMS score of entire domain is %.2f", score)
@ -344,20 +378,10 @@ func checkDomainHeuristics(ctx context.Context, conn *network.Connection, _ pack
return false
}
func checkInbound(_ context.Context, conn *network.Connection, _ packet.Packet) bool {
func dropInbound(_ context.Context, conn *network.Connection, _ packet.Packet) bool {
// implicit default=block for inbound
if conn.Inbound {
conn.Drop("endpoint is not allowed (incoming is always default=block)")
return true
}
return false
}
func checkDefaultPermit(_ context.Context, conn *network.Connection, _ packet.Packet) bool {
// check default action
p := conn.Process().Profile()
if p.DefaultAction() == profile.DefaultActionPermit {
conn.Accept("endpoint is not blocked (default=permit)")
conn.Drop("incoming connection blocked by default", profile.CfgOptionServiceEndpointsKey)
return true
}
return false
@ -365,22 +389,24 @@ func checkDefaultPermit(_ context.Context, conn *network.Connection, _ packet.Pa
func checkAutoPermitRelated(_ context.Context, conn *network.Connection, _ packet.Packet) bool {
p := conn.Process().Profile()
if !p.DisableAutoPermit() {
related, reason := checkRelation(conn)
if related {
conn.Accept(reason)
return true
}
}
return false
}
func checkDefaultAction(_ context.Context, conn *network.Connection, pkt packet.Packet) bool {
p := conn.Process().Profile()
if p.DefaultAction() == profile.DefaultActionAsk {
prompt(conn, pkt)
// Auto permit is disabled for default action permit.
if p.DefaultAction() == profile.DefaultActionPermit {
return false
}
// Check if auto permit is disabled.
if p.DisableAutoPermit() {
return false
}
// Check for relation to auto permit.
related, reason := checkRelation(conn)
if related {
conn.Accept(reason, profile.CfgOptionDisableAutoPermitKey)
return true
}
return false
}
@ -426,7 +452,7 @@ matchLoop:
}
if related {
reason = fmt.Sprintf("domain is related to process: %s is related to %s", domainElement, processElement)
reason = fmt.Sprintf("auto permitted: domain is related to process: %s is related to %s", domainElement, processElement)
}
return related, reason
}

View file

@ -1,172 +1,266 @@
package firewall
import (
"context"
"fmt"
"sync"
"time"
"github.com/safing/portmaster/profile/endpoints"
"github.com/safing/portbase/log"
"github.com/safing/portbase/notifications"
"github.com/safing/portmaster/intel"
"github.com/safing/portmaster/network"
"github.com/safing/portmaster/network/packet"
"github.com/safing/portmaster/profile"
"github.com/safing/portmaster/profile/endpoints"
)
const (
// notification action IDs
permitDomainAll = "permit-domain-all"
permitDomainDistinct = "permit-domain-distinct"
denyDomainAll = "deny-domain-all"
denyDomainDistinct = "deny-domain-distinct"
allowDomainAll = "allow-domain-all"
allowDomainDistinct = "allow-domain-distinct"
blockDomainAll = "block-domain-all"
blockDomainDistinct = "block-domain-distinct"
permitIP = "permit-ip"
denyIP = "deny-ip"
permitServingIP = "permit-serving-ip"
denyServingIP = "deny-serving-ip"
allowIP = "allow-ip"
blockIP = "block-ip"
allowServingIP = "allow-serving-ip"
blockServingIP = "block-serving-ip"
cancelPrompt = "cancel"
)
func prompt(conn *network.Connection, pkt packet.Packet) { //nolint:gocognit // TODO
nTTL := time.Duration(askTimeout()) * time.Second
var (
promptNotificationCreation sync.Mutex
)
type promptData struct {
Entity *intel.Entity
Profile promptProfile
}
type promptProfile struct {
Source string
ID string
LinkedPath string
}
func prompt(ctx context.Context, conn *network.Connection, pkt packet.Packet) { //nolint:gocognit // TODO
// Create notification.
n := createPrompt(ctx, conn, pkt)
// wait for response/timeout
select {
case promptResponse := <-n.Response():
switch promptResponse {
case allowDomainAll, allowDomainDistinct, allowIP, allowServingIP:
conn.Accept("permitted via prompt", profile.CfgOptionEndpointsKey)
default: // deny
conn.Deny("blocked via prompt", profile.CfgOptionEndpointsKey)
}
case <-time.After(1 * time.Second):
log.Tracer(ctx).Debugf("filter: continuing prompting async")
conn.Deny("prompting in progress, please respond to prompt", profile.CfgOptionDefaultActionKey)
case <-ctx.Done():
log.Tracer(ctx).Debugf("filter: aborting prompting because of shutdown")
conn.Drop("shutting down", noReasonOptionKey)
}
}
// promptIDPrefix is an identifier for privacy filter prompts. This is also use
// in the UI, so don't change!
const promptIDPrefix = "filter:prompt"
func createPrompt(ctx context.Context, conn *network.Connection, pkt packet.Packet) (n *notifications.Notification) {
expires := time.Now().Add(time.Duration(askTimeout()) * time.Second).Unix()
// Get local profile.
profile := conn.Process().Profile()
if profile == nil {
log.Tracer(ctx).Warningf("filter: tried creating prompt for connection without profile")
return
}
localProfile := profile.LocalProfile()
if localProfile == nil {
log.Tracer(ctx).Warningf("filter: tried creating prompt for connection without local profile")
return
}
// first check if there is an existing notification for this.
// build notification ID
var nID string
switch {
case conn.Inbound, conn.Entity.Domain == "": // connection to/from IP
nID = fmt.Sprintf("filter:prompt-%d-%s-%s", conn.Process().Pid, conn.Scope, pkt.Info().RemoteIP())
nID = fmt.Sprintf(
"%s-%s-%s-%s",
promptIDPrefix,
localProfile.ID,
conn.Scope,
pkt.Info().RemoteIP(),
)
default: // connection to domain
nID = fmt.Sprintf("filter:prompt-%d-%s", conn.Process().Pid, conn.Scope)
nID = fmt.Sprintf(
"%s-%s-%s",
promptIDPrefix,
localProfile.ID,
conn.Scope,
)
}
n := notifications.Get(nID)
saveResponse := true
// Only handle one notification at a time.
promptNotificationCreation.Lock()
defer promptNotificationCreation.Unlock()
n = notifications.Get(nID)
// If there already is a notification, just update the expiry.
if n != nil {
// update with new expiry
n.Update(time.Now().Add(nTTL).Unix())
// do not save response to profile
saveResponse = false
} else {
// create new notification
n = (&notifications.Notification{
ID: nID,
Type: notifications.Prompt,
Expires: time.Now().Add(nTTL).Unix(),
})
// add message and actions
switch {
case conn.Inbound:
n.Message = fmt.Sprintf("Application %s wants to accept connections from %s (%d/%d)", conn.Process(), conn.Entity.IP.String(), conn.Entity.Protocol, conn.Entity.Port)
n.AvailableActions = []*notifications.Action{
{
ID: permitServingIP,
Text: "Permit",
},
{
ID: denyServingIP,
Text: "Deny",
},
}
case conn.Entity.Domain == "": // direct connection
n.Message = fmt.Sprintf("Application %s wants to connect to %s (%d/%d)", conn.Process(), conn.Entity.IP.String(), conn.Entity.Protocol, conn.Entity.Port)
n.AvailableActions = []*notifications.Action{
{
ID: permitIP,
Text: "Permit",
},
{
ID: denyIP,
Text: "Deny",
},
}
default: // connection to domain
if pkt != nil {
n.Message = fmt.Sprintf("Application %s wants to connect to %s (%s %d/%d)", conn.Process(), conn.Entity.Domain, conn.Entity.IP.String(), conn.Entity.Protocol, conn.Entity.Port)
} else {
n.Message = fmt.Sprintf("Application %s wants to connect to %s", conn.Process(), conn.Entity.Domain)
}
n.AvailableActions = []*notifications.Action{
{
ID: permitDomainAll,
Text: "Permit all",
},
{
ID: permitDomainDistinct,
Text: "Permit",
},
{
ID: denyDomainDistinct,
Text: "Deny",
},
}
}
// save new notification
n.Save()
n.Update(expires)
log.Tracer(ctx).Debugf("filter: updated existing prompt notification")
return
}
// wait for response/timeout
select {
case promptResponse := <-n.Response():
switch promptResponse {
case permitDomainAll, permitDomainDistinct, permitIP, permitServingIP:
conn.Accept("permitted by user")
default: // deny
conn.Deny("denied by user")
}
// Reference relevant data for save function
entity := conn.Entity
// Also needed: localProfile
// end here if we won't save the response to the profile
if !saveResponse {
return
}
// get profile
p := conn.Process().Profile()
var ep endpoints.Endpoint
switch promptResponse {
case permitDomainAll:
ep = &endpoints.EndpointDomain{
EndpointBase: endpoints.EndpointBase{Permitted: true},
Domain: "." + conn.Entity.Domain,
}
case permitDomainDistinct:
ep = &endpoints.EndpointDomain{
EndpointBase: endpoints.EndpointBase{Permitted: true},
Domain: conn.Entity.Domain,
}
case denyDomainAll:
ep = &endpoints.EndpointDomain{
EndpointBase: endpoints.EndpointBase{Permitted: false},
Domain: "." + conn.Entity.Domain,
}
case denyDomainDistinct:
ep = &endpoints.EndpointDomain{
EndpointBase: endpoints.EndpointBase{Permitted: false},
Domain: conn.Entity.Domain,
}
case permitIP, permitServingIP:
ep = &endpoints.EndpointIP{
EndpointBase: endpoints.EndpointBase{Permitted: true},
IP: conn.Entity.IP,
}
case denyIP, denyServingIP:
ep = &endpoints.EndpointIP{
EndpointBase: endpoints.EndpointBase{Permitted: false},
IP: conn.Entity.IP,
}
default:
log.Warningf("filter: unknown prompt response: %s", promptResponse)
return
}
switch promptResponse {
case permitServingIP, denyServingIP:
p.AddServiceEndpoint(ep.String())
default:
p.AddEndpoint(ep.String())
}
case <-n.Expired():
conn.Deny("no response to prompt")
// Create new notification.
n = &notifications.Notification{
EventID: nID,
Type: notifications.Prompt,
Title: "Connection Prompt",
Category: "Privacy Filter",
EventData: &promptData{
Entity: entity,
Profile: promptProfile{
Source: string(localProfile.Source),
ID: localProfile.ID,
LinkedPath: localProfile.LinkedPath,
},
},
Expires: expires,
}
// Set action function.
n.SetActionFunction(func(_ context.Context, n *notifications.Notification) error {
return saveResponse(
localProfile,
entity,
n.SelectedActionID,
)
})
// Get name of profile for notification. The profile is read-locked by the firewall handler.
profileName := localProfile.Name
// add message and actions
switch {
case conn.Inbound:
n.Message = fmt.Sprintf("%s wants to accept connections from %s (%d/%d)", profileName, conn.Entity.IP.String(), conn.Entity.Protocol, conn.Entity.Port)
n.AvailableActions = []*notifications.Action{
{
ID: allowServingIP,
Text: "Allow",
},
{
ID: blockServingIP,
Text: "Block",
},
}
case conn.Entity.Domain == "": // direct connection
n.Message = fmt.Sprintf("%s wants to connect to %s (%d/%d)", profileName, conn.Entity.IP.String(), conn.Entity.Protocol, conn.Entity.Port)
n.AvailableActions = []*notifications.Action{
{
ID: allowIP,
Text: "Allow",
},
{
ID: blockIP,
Text: "Block",
},
}
default: // connection to domain
n.Message = fmt.Sprintf("%s wants to connect to %s", profileName, conn.Entity.Domain)
n.AvailableActions = []*notifications.Action{
{
ID: allowDomainAll,
Text: "Allow",
},
{
ID: blockDomainAll,
Text: "Block",
},
}
}
n.Save()
log.Tracer(ctx).Debugf("filter: sent prompt notification")
return n
}
func saveResponse(p *profile.Profile, entity *intel.Entity, promptResponse string) error {
if promptResponse == cancelPrompt {
return nil
}
// Update the profile if necessary.
if p.IsOutdated() {
var err error
p, err = profile.GetProfile(p.Source, p.ID, p.LinkedPath)
if err != nil {
return err
}
}
var ep endpoints.Endpoint
switch promptResponse {
case allowDomainAll:
ep = &endpoints.EndpointDomain{
EndpointBase: endpoints.EndpointBase{Permitted: true},
OriginalValue: "." + entity.Domain,
}
case allowDomainDistinct:
ep = &endpoints.EndpointDomain{
EndpointBase: endpoints.EndpointBase{Permitted: true},
OriginalValue: entity.Domain,
}
case blockDomainAll:
ep = &endpoints.EndpointDomain{
EndpointBase: endpoints.EndpointBase{Permitted: false},
OriginalValue: "." + entity.Domain,
}
case blockDomainDistinct:
ep = &endpoints.EndpointDomain{
EndpointBase: endpoints.EndpointBase{Permitted: false},
OriginalValue: entity.Domain,
}
case allowIP, allowServingIP:
ep = &endpoints.EndpointIP{
EndpointBase: endpoints.EndpointBase{Permitted: true},
IP: entity.IP,
}
case blockIP, blockServingIP:
ep = &endpoints.EndpointIP{
EndpointBase: endpoints.EndpointBase{Permitted: false},
IP: entity.IP,
}
case cancelPrompt:
return nil
default:
return fmt.Errorf("unknown prompt response: %s", promptResponse)
}
switch promptResponse {
case allowServingIP, blockServingIP:
p.AddServiceEndpoint(ep.String())
log.Infof("filter: added incoming rule to profile %s: %q", p, ep.String())
default:
p.AddEndpoint(ep.String())
log.Infof("filter: added outgoing rule to profile %s: %q", p, ep.String())
}
return nil
}

View file

@ -4,15 +4,20 @@ import (
"fmt"
"sync"
"github.com/safing/portbase/database"
"github.com/hashicorp/go-version"
"github.com/safing/portbase/database/record"
)
const resetVersion = "v0.6.0"
type cacheVersionRecord struct {
record.Base
sync.Mutex
Version string
Reset string
}
// getCacheDatabaseVersion reads and returns the cache
@ -37,6 +42,10 @@ func getCacheDatabaseVersion() (*version.Version, error) {
}
}
if verRecord.Reset != resetVersion {
return nil, database.ErrNotFound
}
ver, err := version.NewSemver(verRecord.Version)
if err != nil {
return nil, err
@ -50,6 +59,7 @@ func getCacheDatabaseVersion() (*version.Version, error) {
func setCacheDatabaseVersion(ver string) error {
verRecord := &cacheVersionRecord{
Version: ver,
Reset: resetVersion,
}
verRecord.SetKey(filterListCacheVersionKey)

View file

@ -200,14 +200,14 @@ func processEntry(ctx context.Context, filter *scopedBloom, entry *listEntry, re
normalizeEntry(entry)
// Only add the entry to the bloom filter if it has any sources.
if len(entry.Sources) > 0 {
if len(entry.Resources) > 0 {
filter.add(entry.Type, entry.Entity)
}
r := &entityRecord{
Value: entry.Entity,
Type: entry.Type,
Sources: entry.Sources,
Sources: entry.getSources(),
UpdatedAt: time.Now().Unix(),
}

View file

@ -8,13 +8,31 @@ import (
"io"
"github.com/safing/portbase/formats/dsd"
"github.com/safing/portbase/utils"
)
type listEntry struct {
Entity string `json:"entity"`
Sources []string `json:"sources"`
Whitelist bool `json:"whitelist"`
Type string `json:"type"`
Type string `json:"type"`
Entity string `json:"entity"`
Whitelist bool `json:"whitelist"`
Resources []entryResource `json:"resources"`
}
type entryResource struct {
SourceID string `json:"sourceID"`
ResourceID string `json:"resourceID"`
}
func (entry *listEntry) getSources() (sourceIDs []string) {
sourceIDs = make([]string, 0, len(entry.Resources))
for _, resource := range entry.Resources {
if !utils.StringInSlice(sourceIDs, resource.SourceID) {
sourceIDs = append(sourceIDs, resource.SourceID)
}
}
return
}
// decodeFile decodes a DSDL filterlists file and sends decoded entities to

View file

@ -166,6 +166,8 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
// Get connection for this request. This identifies the process behind the request.
conn := network.NewConnectionFromDNSRequest(ctx, q.FQDN, nil, packet.IPv4, remoteAddr.IP, uint16(remoteAddr.Port))
conn.Lock()
defer conn.Unlock()
// Once we decided on the connection we might need to save it to the database,
// so we defer that check for now.
@ -195,11 +197,11 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
// A reason for this might be that the request is sink-holed to a forced
// IP address in which case we "accept" it, but let the firewall handle
// the resolving as it wishes.
if responder, ok := conn.ReasonContext.(nsutil.Responder); ok {
if responder, ok := conn.Reason.Context.(nsutil.Responder); ok {
// Save the request as open, as we don't know if there will be a connection or not.
network.SaveOpenDNSRequest(conn)
tracer.Infof("nameserver: handing over request for %s to special filter responder: %s", q.ID(), conn.Reason)
tracer.Infof("nameserver: handing over request for %s to special filter responder: %s", q.ID(), conn.Reason.Msg)
return reply(responder)
}
@ -241,11 +243,11 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
rrCache = firewall.DecideOnResolvedDNS(ctx, conn, q, rrCache)
if rrCache == nil {
// Check again if there is a responder from the firewall.
if responder, ok := conn.ReasonContext.(nsutil.Responder); ok {
if responder, ok := conn.Reason.Context.(nsutil.Responder); ok {
// Save the request as open, as we don't know if there will be a connection or not.
network.SaveOpenDNSRequest(conn)
tracer.Infof("nameserver: handing over request for %s to filter responder: %s", q.ID(), conn.Reason)
tracer.Infof("nameserver: handing over request for %s to filter responder: %s", q.ID(), conn.Reason.Msg)
return reply(responder)
}

View file

@ -58,9 +58,9 @@ func ZeroIP(msgs ...string) ResponderFunc {
switch question.Qtype {
case dns.TypeA:
rr, err = dns.NewRR(question.Name + " 0 IN A 0.0.0.0")
rr, err = dns.NewRR(question.Name + " 1 IN A 0.0.0.17")
case dns.TypeAAAA:
rr, err = dns.NewRR(question.Name + " 0 IN AAAA ::")
rr, err = dns.NewRR(question.Name + " 1 IN AAAA ::17")
}
switch {
@ -100,9 +100,9 @@ func Localhost(msgs ...string) ResponderFunc {
switch question.Qtype {
case dns.TypeA:
rr, err = dns.NewRR("localhost. 0 IN A 127.0.0.1")
rr, err = dns.NewRR("localhost. 1 IN A 127.0.0.1")
case dns.TypeAAAA:
rr, err = dns.NewRR("localhost. 0 IN AAAA ::1")
rr, err = dns.NewRR("localhost. 1 IN AAAA ::1")
}
switch {

View file

@ -47,11 +47,16 @@ func checkForConflictingService() error {
// wait for a short duration for the other service to shut down
time.Sleep(10 * time.Millisecond)
// notify user
(&notifications.Notification{
ID: "nameserver-stopped-conflicting-service",
Message: fmt.Sprintf("Portmaster stopped a conflicting name service (pid %d) to gain required system integration.", pid),
}).Save()
notifications.Notify(&notifications.Notification{
EventID: "namserver:stopped-conflicting-service",
Type: notifications.Info,
Title: "Conflicting DNS Service",
Category: "Secure DNS",
Message: fmt.Sprintf(
"The Portmaster stopped a conflicting name service (pid %d) to gain required system integration.",
pid,
),
})
// restart via service-worker logic
return fmt.Errorf("%w: stopped conflicting name service with pid %d", modules.ErrRestartNow, pid)

View file

@ -213,16 +213,6 @@ func setCaptivePortal(portalURL *url.URL) {
return
}
// notify
cleanUpPortalNotification()
defer func() {
// TODO: add "open" button
captivePortalNotification = notifications.NotifyInfo(
"netenv:captive-portal:"+captivePortal.Domain,
"Portmaster detected a captive portal at "+captivePortal.Domain,
)
}()
// set
captivePortal = &CaptivePortal{
URL: portalURL.String(),
@ -234,6 +224,20 @@ func setCaptivePortal(portalURL *url.URL) {
} else {
captivePortal.Domain = portalURL.Hostname()
}
// notify
cleanUpPortalNotification()
captivePortalNotification = notifications.Notify(&notifications.Notification{
EventID: "netenv:captive-portal",
Type: notifications.Info,
Title: "Captive Portal",
Category: "Core",
Message: fmt.Sprintf(
"Portmaster detected a captive portal at %s",
captivePortal.Domain,
),
EventData: captivePortal,
})
}
func cleanUpPortalNotification() {

View file

@ -12,7 +12,7 @@ import (
"github.com/safing/portmaster/process"
)
var (
const (
cleanerTickDuration = 5 * time.Second
deleteConnsAfterEndedThreshold = 5 * time.Minute
)
@ -46,15 +46,8 @@ func cleanConnections() (activePIDs map[int]struct{}) {
nowUnix := now.Unix()
deleteOlderThan := now.Add(-deleteConnsAfterEndedThreshold).Unix()
// lock both together because we cannot fully guarantee in which map a connection lands
// of course every connection should land in the correct map, but this increases resilience
connsLock.Lock()
defer connsLock.Unlock()
dnsConnsLock.Lock()
defer dnsConnsLock.Unlock()
// network connections
for _, conn := range conns {
for _, conn := range conns.clone() {
conn.Lock()
// delete inactive connections
@ -70,15 +63,13 @@ func cleanConnections() (activePIDs map[int]struct{}) {
Dst: conn.Entity.IP,
DstPort: conn.Entity.Port,
}, now)
activePIDs[conn.process.Pid] = struct{}{}
if !exists {
// Step 2: mark end
conn.Ended = nowUnix
if conn.KeyIsSet() {
// Be absolutely sure that we have a key set here, else conn.Save() will deadlock.
conn.Save()
}
conn.Save()
}
case conn.Ended < deleteOlderThan:
// Step 3: delete
@ -90,7 +81,7 @@ func cleanConnections() (activePIDs map[int]struct{}) {
}
// dns requests
for _, conn := range dnsConns {
for _, conn := range dnsConns.clone() {
conn.Lock()
// delete old dns connections

View file

@ -4,7 +4,6 @@ import (
"context"
"fmt"
"net"
"strconv"
"sync"
"time"
@ -19,49 +18,173 @@ import (
"github.com/safing/portmaster/resolver"
)
// FirewallHandler defines the function signature for a firewall handle function
// FirewallHandler defines the function signature for a firewall
// handle function. A firewall handler is responsible for finding
// a reasonable verdict for the connection conn. The connection is
// locked before the firewall handler is called.
type FirewallHandler func(conn *Connection, pkt packet.Packet)
// Connection describes a distinct physical network connection identified by the IP/Port pair.
// ProcessContext holds additional information about the process
// that iniated a connection.
type ProcessContext struct {
// ProcessName is the name of the process.
ProcessName string
//ProfileName is the name of the profile.
ProfileName string
// BinaryPath is the path to the process binary.
BinaryPath string
// PID i the process identifier.
PID int
// Profile is the ID of the main profile that
// is applied to the process.
Profile string
// Source is the source of the profile.
Source string
}
// Connection describes a distinct physical network connection
// identified by the IP/Port pair.
type Connection struct { //nolint:maligned // TODO: fix alignment
record.Base
sync.Mutex
ID string
Scope string
// ID may hold unique connection id. It is only set for non-DNS
// request connections and is considered immutable after a
// connection object has been created.
ID string
// Scope defines the scope of a connection. For DNS requests, the
// scope is always set to the domain name. For direct packet
// connections the scope consists of the involved network environment
// and the packet direction. Once a connection object is created,
// Scope is considered immutable.
Scope string
// IPVersion is set to the packet IP version. It is not set (0) for
// connections created from a DNS request.
IPVersion packet.IPVersion
Inbound bool
// local endpoint
// Inbound is set to true if the connection is incoming. Inbound is
// only set when a connection object is created and is considered
// immutable afterwards.
Inbound bool
// IPProtocol is set to the transport protocol used by the connection.
// Is is considered immutable once a connection object has been
// created. IPProtocol is not set for connections that have been
// created from a DNS request.
IPProtocol packet.IPProtocol
LocalIP net.IP
LocalPort uint16
process *process.Process
// remote endpoint
// LocalIP holds the local IP address of the connection. It is not
// set for connections created from DNS requests. LocalIP is
// considered immutable once a connection object has been created.
LocalIP net.IP
// LocalPort holds the local port of the connection. It is not
// set for connections created from DNS requests. LocalPort is
// considered immutable once a connection object has been created.
LocalPort uint16
// Entity describes the remote entity that the connection has been
// established to. The entity might be changed or information might
// be added to it during the livetime of a connection. Access to
// entity must be guarded by the connection lock.
Entity *intel.Entity
Verdict Verdict
Reason string
ReasonContext interface{}
ReasonID string // format source[:id[:id]] // TODO
Started int64
Ended int64
Tunneled bool
// Verdict is the final decision that has been made for a connection.
// The verdict may change so any access to it must be guarded by the
// connection lock.
Verdict Verdict
// Reason holds information justifying the verdict, as well as additional
// information about the reason.
// Access to Reason must be guarded by the connection lock.
Reason Reason
// Started holds the number of seconds in UNIX epoch time at which
// the connection has been initated and first seen by the portmaster.
// Staretd is only every set when creating a new connection object
// and is considered immutable afterwards.
Started int64
// Ended is set to the number of seconds in UNIX epoch time at which
// the connection is considered terminated. Ended may be set at any
// time so access must be guarded by the connection lock.
Ended int64
// VerdictPermanent is set to true if the final verdict is permanent
// and the connection has been (or will be) handed back to the kernel.
// VerdictPermanent may be changed together with the Verdict and Reason
// properties and must be guarded using the connection lock.
VerdictPermanent bool
Inspecting bool
Encrypted bool // TODO
Internal bool // Portmaster internal connections are marked in order to easily filter these out in the UI
pktQueue chan packet.Packet
// Inspecting is set to true if the connection is being inspected
// by one or more of the registered inspectors. This property may
// be changed during the lifetime of a connection and must be guarded
// using the connection lock.
Inspecting bool
// Tunneled is currently unused and MUST be ignored.
Tunneled bool
// Encrypted is currently unused and MUST be ignored.
Encrypted bool
// ProcessContext holds additional information about the process
// that iniated the connection. It is set once when the connection
// object is created and is considered immutable afterwards.
ProcessContext ProcessContext
// Internal is set to true if the connection is attributed as an
// Portmaster internal connection. Internal may be set at different
// points and access to it must be guarded by the connection lock.
Internal bool
// process holds a reference to the actor process. That is, the
// process instance that initated the connection.
process *process.Process
// pkgQueue is used to serialize packet handling for a single
// connection and is served by the connections packetHandler.
pktQueue chan packet.Packet
// firewallHandler is the firewall handler that is called for
// each packet sent to pktQueue.
firewallHandler FirewallHandler
// saveWhenFinished can be set to drue during the life-time of
// a connection and signals the firewallHandler that a Save()
// should be issued after processing the connection.
saveWhenFinished bool
// activeInspectors is a slice of booleans where each entry
// maps to the index of an available inspector. If the value
// is true the inspector is currently active. False indicates
// that the inspector has finished and should be skipped.
activeInspectors []bool
inspectorData map[uint8]interface{}
// inspectorData holds additional meta data for the inspectors.
// using the inspectors index as a map key.
inspectorData map[uint8]interface{}
// ProfileRevisionCounter is used to track changes to the process
// profile and required for correct re-evaluation of a connections
// verdict.
ProfileRevisionCounter uint64
}
saveWhenFinished bool
profileRevisionCounter uint64
// Reason holds information justifying a verdict, as well as additional
// information about the reason.
type Reason struct {
// Msg is a human readable description of the reason.
Msg string
// OptionKey is the configuration option key of the setting that
// was responsible for the verdict.
OptionKey string
// Profile is the database key of the profile that held the setting
// that was responsible for the verdict.
Profile string
// ReasonContext may hold additional reason-specific information and
// any access must be guarded by the connection lock.
Context interface{}
}
func getProcessContext(ctx context.Context, proc *process.Process) ProcessContext {
// Gather process information.
pCtx := ProcessContext{
BinaryPath: proc.Path,
ProcessName: proc.Name,
PID: proc.Pid,
}
// Get local profile.
localProfile := proc.Profile().LocalProfile()
if localProfile == nil {
log.Tracer(ctx).Warningf("network: process %s has no profile", proc)
return pCtx
}
// Add profile information and return.
pCtx.ProfileName = localProfile.Name
pCtx.Profile = localProfile.ID
pCtx.Source = string(localProfile.Source)
return pCtx
}
// NewConnectionFromDNSRequest returns a new connection based on the given dns request.
@ -91,9 +214,10 @@ func NewConnectionFromDNSRequest(ctx context.Context, fqdn string, cnames []stri
Domain: fqdn,
CNAME: cnames,
},
process: proc,
Started: timestamp,
Ended: timestamp,
process: proc,
ProcessContext: getProcessContext(ctx, proc),
Started: timestamp,
Ended: timestamp,
}
return dnsConn
}
@ -120,7 +244,10 @@ func NewConnectionFromFirstPacket(pkt packet.Packet) *Connection {
scope = IncomingLAN
case netutils.Global, netutils.GlobalMulticast:
scope = IncomingInternet
default: // netutils.Invalid
case netutils.Invalid:
fallthrough
default:
scope = IncomingInvalid
}
entity = &intel.Entity{
@ -169,7 +296,10 @@ func NewConnectionFromFirstPacket(pkt packet.Packet) *Connection {
scope = PeerLAN
case netutils.Global, netutils.GlobalMulticast:
scope = PeerInternet
default: // netutils.Invalid
case netutils.Invalid:
fallthrough
default:
scope = PeerInvalid
}
@ -182,95 +312,96 @@ func NewConnectionFromFirstPacket(pkt packet.Packet) *Connection {
IPVersion: pkt.Info().Version,
Inbound: inbound,
// local endpoint
IPProtocol: pkt.Info().Protocol,
LocalIP: pkt.Info().LocalIP(),
LocalPort: pkt.Info().LocalPort(),
process: proc,
IPProtocol: pkt.Info().Protocol,
LocalIP: pkt.Info().LocalIP(),
LocalPort: pkt.Info().LocalPort(),
ProcessContext: getProcessContext(pkt.Ctx(), proc),
process: proc,
// remote endpoint
Entity: entity,
// meta
Started: time.Now().Unix(),
profileRevisionCounter: proc.Profile().RevisionCnt(),
ProfileRevisionCounter: proc.Profile().RevisionCnt(),
}
}
// GetConnection fetches a Connection from the database.
func GetConnection(id string) (*Connection, bool) {
connsLock.RLock()
defer connsLock.RUnlock()
conn, ok := conns[id]
return conn, ok
return conns.get(id)
}
// AcceptWithContext accepts the connection.
func (conn *Connection) AcceptWithContext(reason string, ctx interface{}) {
if !conn.SetVerdict(VerdictAccept, reason, ctx) {
func (conn *Connection) AcceptWithContext(reason, reasonOptionKey string, ctx interface{}) {
if !conn.SetVerdict(VerdictAccept, reason, reasonOptionKey, ctx) {
log.Warningf("filter: tried to accept %s, but current verdict is %s", conn, conn.Verdict)
}
}
// Accept is like AcceptWithContext but only accepts a reason.
func (conn *Connection) Accept(reason string) {
conn.AcceptWithContext(reason, nil)
func (conn *Connection) Accept(reason, reasonOptionKey string) {
conn.AcceptWithContext(reason, reasonOptionKey, nil)
}
// BlockWithContext blocks the connection.
func (conn *Connection) BlockWithContext(reason string, ctx interface{}) {
if !conn.SetVerdict(VerdictBlock, reason, ctx) {
func (conn *Connection) BlockWithContext(reason, reasonOptionKey string, ctx interface{}) {
if !conn.SetVerdict(VerdictBlock, reason, reasonOptionKey, ctx) {
log.Warningf("filter: tried to block %s, but current verdict is %s", conn, conn.Verdict)
}
}
// Block is like BlockWithContext but does only accepts a reason.
func (conn *Connection) Block(reason string) {
conn.BlockWithContext(reason, nil)
func (conn *Connection) Block(reason, reasonOptionKey string) {
conn.BlockWithContext(reason, reasonOptionKey, nil)
}
// DropWithContext drops the connection.
func (conn *Connection) DropWithContext(reason string, ctx interface{}) {
if !conn.SetVerdict(VerdictDrop, reason, ctx) {
func (conn *Connection) DropWithContext(reason, reasonOptionKey string, ctx interface{}) {
if !conn.SetVerdict(VerdictDrop, reason, reasonOptionKey, ctx) {
log.Warningf("filter: tried to drop %s, but current verdict is %s", conn, conn.Verdict)
}
}
// Drop is like DropWithContext but does only accepts a reason.
func (conn *Connection) Drop(reason string) {
conn.DropWithContext(reason, nil)
func (conn *Connection) Drop(reason, reasonOptionKey string) {
conn.DropWithContext(reason, reasonOptionKey, nil)
}
// DenyWithContext blocks or drops the link depending on the connection direction.
func (conn *Connection) DenyWithContext(reason string, ctx interface{}) {
func (conn *Connection) DenyWithContext(reason, reasonOptionKey string, ctx interface{}) {
if conn.Inbound {
conn.DropWithContext(reason, ctx)
conn.DropWithContext(reason, reasonOptionKey, ctx)
} else {
conn.BlockWithContext(reason, ctx)
conn.BlockWithContext(reason, reasonOptionKey, ctx)
}
}
// Deny is like DenyWithContext but only accepts a reason.
func (conn *Connection) Deny(reason string) {
conn.DenyWithContext(reason, nil)
func (conn *Connection) Deny(reason, reasonOptionKey string) {
conn.DenyWithContext(reason, reasonOptionKey, nil)
}
// FailedWithContext marks the connection with VerdictFailed and stores the reason.
func (conn *Connection) FailedWithContext(reason string, ctx interface{}) {
if !conn.SetVerdict(VerdictFailed, reason, ctx) {
func (conn *Connection) FailedWithContext(reason, reasonOptionKey string, ctx interface{}) {
if !conn.SetVerdict(VerdictFailed, reason, reasonOptionKey, ctx) {
log.Warningf("filter: tried to drop %s due to error but current verdict is %s", conn, conn.Verdict)
}
}
// Failed is like FailedWithContext but only accepts a string.
func (conn *Connection) Failed(reason string) {
conn.FailedWithContext(reason, nil)
func (conn *Connection) Failed(reason, reasonOptionKey string) {
conn.FailedWithContext(reason, reasonOptionKey, nil)
}
// SetVerdict sets a new verdict for the connection, making sure it does not interfere with previous verdicts.
func (conn *Connection) SetVerdict(newVerdict Verdict, reason string, reasonCtx interface{}) (ok bool) {
func (conn *Connection) SetVerdict(newVerdict Verdict, reason, reasonOptionKey string, reasonCtx interface{}) (ok bool) {
if newVerdict >= conn.Verdict {
conn.Verdict = newVerdict
conn.Reason = reason
conn.ReasonContext = reasonCtx
conn.Reason.Msg = reason
conn.Reason.Context = reasonCtx
if reasonOptionKey != "" && conn.Process() != nil {
conn.Reason.OptionKey = reasonOptionKey
conn.Reason.Profile = conn.Process().Profile().GetProfileSource(conn.Reason.OptionKey)
}
return true
}
return false
@ -286,32 +417,24 @@ func (conn *Connection) SaveWhenFinished() {
conn.saveWhenFinished = true
}
// Save saves the connection in the storage and propagates the change through the database system.
// Save saves the connection in the storage and propagates the change
// through the database system. Save may lock dnsConnsLock or connsLock
// in if Save() is called the first time.
// Callers must make sure to lock the connection itself before calling
// Save().
func (conn *Connection) Save() {
conn.UpdateMeta()
if !conn.KeyIsSet() {
// A connection without an ID has been created from
// a DNS request rather than a packet. Choose the correct
// connection store here.
if conn.ID == "" {
// dns request
// set key
conn.SetKey(fmt.Sprintf("network:tree/%d/%s", conn.process.Pid, conn.Scope))
mapKey := strconv.Itoa(conn.process.Pid) + "/" + conn.Scope
// save
dnsConnsLock.Lock()
dnsConns[mapKey] = conn
dnsConnsLock.Unlock()
dnsConns.add(conn)
} else {
// network connection
// set key
conn.SetKey(fmt.Sprintf("network:tree/%d/%s/%s", conn.process.Pid, conn.Scope, conn.ID))
// save
connsLock.Lock()
conns[conn.ID] = conn
connsLock.Unlock()
conns.add(conn)
}
}
@ -319,34 +442,25 @@ func (conn *Connection) Save() {
dbController.PushUpdate(conn)
}
// delete deletes a link from the storage and propagates the change. Nothing is locked - both the conns map and the connection itself require locking
// delete deletes a link from the storage and propagates the change.
// delete may lock either the dnsConnsLock or connsLock. Callers
// must still make sure to lock the connection itself.
func (conn *Connection) delete() {
// A connection without an ID has been created from
// a DNS request rather than a packet. Choose the correct
// connection store here.
if conn.ID == "" {
delete(dnsConns, strconv.Itoa(conn.process.Pid)+"/"+conn.Scope)
dnsConns.delete(conn)
} else {
delete(conns, conn.ID)
conns.delete(conn)
}
conn.Meta().Delete()
dbController.PushUpdate(conn)
}
// UpdateAndCheck updates profiles and checks whether a reevaluation is needed.
func (conn *Connection) UpdateAndCheck() (needsReevaluation bool) {
p := conn.process.Profile()
if p == nil {
return false
}
revCnt := p.Update()
if conn.profileRevisionCounter != revCnt {
conn.profileRevisionCounter = revCnt
needsReevaluation = true
}
return
}
// SetFirewallHandler sets the firewall handler for this link, and starts a worker to handle the packets.
// SetFirewallHandler sets the firewall handler for this link, and starts a
// worker to handle the packets.
func (conn *Connection) SetFirewallHandler(handler FirewallHandler) {
if conn.firewallHandler == nil {
conn.pktQueue = make(chan packet.Packet, 1000)
@ -382,13 +496,13 @@ func (conn *Connection) HandlePacket(pkt packet.Packet) {
// packetHandler sequentially handles queued packets
func (conn *Connection) packetHandler() {
for {
pkt := <-conn.pktQueue
for pkt := range conn.pktQueue {
if pkt == nil {
return
}
// get handler
conn.Lock()
// execute handler or verdict
if conn.firewallHandler != nil {
conn.firewallHandler(conn, pkt)
@ -396,14 +510,16 @@ func (conn *Connection) packetHandler() {
defaultFirewallHandler(conn, pkt)
}
// log verdict
log.Tracer(pkt.Ctx()).Infof("filter: connection %s %s: %s", conn, conn.Verdict.Verb(), conn.Reason)
conn.Unlock()
log.Tracer(pkt.Ctx()).Infof("filter: connection %s %s: %s", conn, conn.Verdict.Verb(), conn.Reason.Msg)
// save does not touch any changing data
// must not be locked, will deadlock with cleaner functions
if conn.saveWhenFinished {
conn.saveWhenFinished = false
conn.Save()
}
conn.Unlock()
// submit trace logs
log.Tracer(pkt.Ctx()).Submit()
}

View file

@ -0,0 +1,57 @@
package network
import (
"strconv"
"sync"
)
type connectionStore struct {
rw sync.RWMutex
items map[string]*Connection
}
func newConnectionStore() *connectionStore {
return &connectionStore{
items: make(map[string]*Connection, 100),
}
}
func (cs *connectionStore) getID(conn *Connection) string {
if conn.ID != "" {
return conn.ID
}
return strconv.Itoa(conn.process.Pid) + "/" + conn.Scope
}
func (cs *connectionStore) add(conn *Connection) {
cs.rw.Lock()
defer cs.rw.Unlock()
cs.items[cs.getID(conn)] = conn
}
func (cs *connectionStore) delete(conn *Connection) {
cs.rw.Lock()
defer cs.rw.Unlock()
delete(cs.items, cs.getID(conn))
}
func (cs *connectionStore) get(id string) (*Connection, bool) {
cs.rw.RLock()
defer cs.rw.RUnlock()
conn, ok := cs.items[id]
return conn, ok
}
func (cs *connectionStore) clone() map[string]*Connection {
cs.rw.RLock()
defer cs.rw.RUnlock()
m := make(map[string]*Connection, len(cs.items))
for key, conn := range cs.items {
m[key] = conn
}
return m
}

View file

@ -3,7 +3,6 @@ package network
import (
"strconv"
"strings"
"sync"
"github.com/safing/portmaster/network/state"
@ -16,15 +15,14 @@ import (
)
var (
dnsConns = make(map[string]*Connection) // key: <PID>/Scope
dnsConnsLock sync.RWMutex
conns = make(map[string]*Connection) // key: Connection ID
connsLock sync.RWMutex
dbController *database.Controller
dnsConns = newConnectionStore()
conns = newConnectionStore()
)
// StorageInterface provices a storage.Interface to the configuration manager.
// StorageInterface provices a storage.Interface to the
// configuration manager.
type StorageInterface struct {
storage.InjectBase
}
@ -45,18 +43,12 @@ func (s *StorageInterface) Get(key string) (record.Record, error) {
}
}
case 3:
dnsConnsLock.RLock()
defer dnsConnsLock.RUnlock()
conn, ok := dnsConns[splitted[1]+"/"+splitted[2]]
if ok {
return conn, nil
if r, ok := dnsConns.get(splitted[1] + "/" + splitted[2]); ok {
return r, nil
}
case 4:
connsLock.RLock()
defer connsLock.RUnlock()
conn, ok := conns[splitted[3]]
if ok {
return conn, nil
if r, ok := conns.get(splitted[3]); ok {
return r, nil
}
}
case "system":
@ -97,28 +89,24 @@ func (s *StorageInterface) processQuery(q *query.Query, it *iterator.Iterator) {
if slashes <= 2 {
// dns scopes only
dnsConnsLock.RLock()
for _, dnsConn := range dnsConns {
for _, dnsConn := range dnsConns.clone() {
dnsConn.Lock()
if q.Matches(dnsConn) {
it.Next <- dnsConn
}
dnsConn.Unlock()
}
dnsConnsLock.RUnlock()
}
if slashes <= 3 {
// connections
connsLock.RLock()
for _, conn := range conns {
for _, conn := range conns.clone() {
conn.Lock()
if q.Matches(conn) {
it.Next <- conn
}
conn.Unlock()
}
connsLock.RUnlock()
}
it.Finish(nil)

View file

@ -17,14 +17,16 @@ var (
openDNSRequests = make(map[string]*Connection) // key: <pid>/fqdn
openDNSRequestsLock sync.Mutex
// scope prefix
unidentifiedProcessScopePrefix = strconv.Itoa(process.UnidentifiedProcessID) + "/"
)
const (
// write open dns requests every
writeOpenDNSRequestsTickDuration = 5 * time.Second
// duration after which DNS requests without a following connection are logged
openDNSRequestLimit = 3 * time.Second
// scope prefix
unidentifiedProcessScopePrefix = strconv.Itoa(process.UnidentifiedProcessID) + "/"
)
func getDNSRequestCacheKey(pid int, fqdn string) string {
@ -122,15 +124,15 @@ func (conn *Connection) GetExtraRRs(ctx context.Context, request *dns.Msg) []dns
}
// Create resource record with verdict and reason.
rr, err := nsutil.MakeMessageRecord(level, fmt.Sprintf("%s: %s", conn.Verdict.Verb(), conn.Reason))
rr, err := nsutil.MakeMessageRecord(level, fmt.Sprintf("%s: %s", conn.Verdict.Verb(), conn.Reason.Msg))
if err != nil {
log.Tracer(ctx).Warningf("filter: failed to add informational record to reply: %s", err)
return nil
}
extra := []dns.RR{rr}
// Add additional records from ReasonContext.
if rrProvider, ok := conn.ReasonContext.(nsutil.RRProvider); ok {
// Add additional records from Reason.Context.
if rrProvider, ok := conn.Reason.Context.(nsutil.RRProvider); ok {
rrs := rrProvider.GetExtraRRs(ctx, request)
extra = append(extra, rrs...)
}

View file

@ -207,6 +207,7 @@ func procDelimiter(c rune) bool {
}
func convertIPv4(data string) net.IP {
// Decode and bullshit check the data length.
decoded, err := hex.DecodeString(data)
if err != nil {
log.Warningf("proc: could not parse IPv4 %s: %s", data, err)
@ -216,11 +217,14 @@ func convertIPv4(data string) net.IP {
log.Warningf("proc: decoded IPv4 %s has wrong length", decoded)
return nil
}
// Build the IPv4 address with the reversed byte order.
ip := net.IPv4(decoded[3], decoded[2], decoded[1], decoded[0])
return ip
}
func convertIPv6(data string) net.IP {
// Decode and bullshit check the data length.
decoded, err := hex.DecodeString(data)
if err != nil {
log.Warningf("proc: could not parse IPv6 %s: %s", data, err)
@ -230,6 +234,11 @@ func convertIPv6(data string) net.IP {
log.Warningf("proc: decoded IPv6 %s has wrong length", decoded)
return nil
}
// Build the IPv6 address with the translated byte order.
for i := 0; i < 16; i += 4 {
decoded[i], decoded[i+1], decoded[i+2], decoded[i+3] = decoded[i+3], decoded[i+2], decoded[i+1], decoded[i]
}
ip := net.IP(decoded)
return ip
}

View file

@ -29,6 +29,8 @@ type BindInfo struct {
PID int
UID int
Inode int
ListensAny bool
}
// Address is an IP + Port pair.
@ -108,3 +110,7 @@ func (i *BindInfo) GetUIDandInode() (int, int) {
return i.UID, i.Inode
}
// compile time checks
var _ Info = new(ConnectionInfo)
var _ Info = new(BindInfo)

View file

@ -31,7 +31,7 @@ var (
var (
baseWaitTime = 3 * time.Millisecond
lookupRetries = 7
lookupRetries = 7 * 2 // Every retry takes two full passes.
)
// Lookup looks for the given connection in the system state tables and returns the PID of the associated process and whether the connection is inbound.
@ -68,97 +68,147 @@ func (table *tcpTable) lookup(pktInfo *packet.Info) (
inbound bool,
err error,
) {
// Search pattern: search, wait, search, refresh, search, wait, search, refresh, ...
localIP := pktInfo.LocalIP()
localPort := pktInfo.LocalPort()
// search until we find something
// Search for the socket until found.
for i := 0; i <= lookupRetries; i++ {
table.lock.RLock()
// always search listeners first
for _, socketInfo := range table.listeners {
if localPort == socketInfo.Local.Port &&
(socketInfo.Local.IP[0] == 0 || localIP.Equal(socketInfo.Local.IP)) {
table.lock.RUnlock()
return checkPID(socketInfo, true)
}
// Check main table for socket.
socketInfo, inbound := table.findSocket(pktInfo)
if socketInfo == nil && table.dualStack != nil {
// If there was no match in the main table and we are dual-stack, check
// the dual-stack table for the socket.
socketInfo, inbound = table.dualStack.findSocket(pktInfo)
}
// search connections
for _, socketInfo := range table.connections {
if localPort == socketInfo.Local.Port &&
localIP.Equal(socketInfo.Local.IP) {
table.lock.RUnlock()
return checkPID(socketInfo, false)
}
// If there's a match, check we have the PID and return.
if socketInfo != nil {
return checkPID(socketInfo, inbound)
}
table.lock.RUnlock()
// every time, except for the last iteration
if i < lookupRetries {
// we found nothing, we could have been too fast, give the kernel some time to think
// back off timer: with 3ms baseWaitTime: 3, 6, 9, 12, 15, 18, 21ms - 84ms in total
time.Sleep(time.Duration(i+1) * baseWaitTime)
// refetch lists
table.updateTables()
// Take turns in waiting and refreshing in order to satisfy the search pattern.
if i%2 == 0 {
// we found nothing, we could have been too fast, give the kernel some time to think
// back off timer: with 3ms baseWaitTime: 3, 6, 9, 12, 15, 18, 21ms - 84ms in total
time.Sleep(time.Duration(i+1) * baseWaitTime)
} else {
// refetch lists
table.updateTables()
if table.dualStack != nil {
table.dualStack.updateTables()
}
}
}
}
return socket.UnidentifiedProcessID, pktInfo.Inbound, ErrConnectionNotFound
}
func (table *tcpTable) findSocket(pktInfo *packet.Info) (
socketInfo socket.Info,
inbound bool,
) {
localIP := pktInfo.LocalIP()
localPort := pktInfo.LocalPort()
table.lock.RLock()
defer table.lock.RUnlock()
// always search listeners first
for _, socketInfo := range table.listeners {
if localPort == socketInfo.Local.Port &&
(socketInfo.ListensAny || localIP.Equal(socketInfo.Local.IP)) {
return socketInfo, false
}
}
// search connections
for _, socketInfo := range table.connections {
if localPort == socketInfo.Local.Port &&
localIP.Equal(socketInfo.Local.IP) {
return socketInfo, false
}
}
return nil, false
}
func (table *udpTable) lookup(pktInfo *packet.Info) (
pid int,
inbound bool,
err error,
) {
localIP := pktInfo.LocalIP()
localPort := pktInfo.LocalPort()
// Search pattern: search, wait, search, refresh, search, wait, search, refresh, ...
isInboundMulticast := pktInfo.Inbound && netutils.ClassifyIP(localIP) == netutils.LocalMulticast
// TODO: Currently broadcast/multicast scopes are not checked, so we might
// attribute an incoming broadcast/multicast packet to the wrong process if
// there are multiple processes listening on the same local port, but
// binding to different addresses. This highly unusual for clients.
isInboundMulticast := pktInfo.Inbound && netutils.ClassifyIP(pktInfo.LocalIP()) == netutils.LocalMulticast
// search until we find something
// Search for the socket until found.
for i := 0; i <= lookupRetries; i++ {
table.lock.RLock()
// search binds
for _, socketInfo := range table.binds {
if localPort == socketInfo.Local.Port &&
(socketInfo.Local.IP[0] == 0 || // zero IP
isInboundMulticast || // inbound broadcast, multicast
localIP.Equal(socketInfo.Local.IP)) {
table.lock.RUnlock()
// do not check direction if remoteIP/Port is not given
if pktInfo.RemotePort() == 0 {
return checkPID(socketInfo, pktInfo.Inbound)
}
// get direction and return
connInbound := table.getDirection(socketInfo, pktInfo)
return checkPID(socketInfo, connInbound)
}
// Check main table for socket.
socketInfo := table.findSocket(pktInfo, isInboundMulticast)
if socketInfo == nil && table.dualStack != nil {
// If there was no match in the main table and we are dual-stack, check
// the dual-stack table for the socket.
socketInfo = table.dualStack.findSocket(pktInfo, isInboundMulticast)
}
table.lock.RUnlock()
// If there's a match, get the direction and check we have the PID, then return.
if socketInfo != nil {
// If there is no remote port, do check for the direction of the
// connection. This will be the case for pure checking functions
// that do not want to change direction state.
if pktInfo.RemotePort() == 0 {
return checkPID(socketInfo, pktInfo.Inbound)
}
// Get (and save) the direction of the connection.
connInbound := table.getDirection(socketInfo, pktInfo)
// Check we have the PID and return.
return checkPID(socketInfo, connInbound)
}
// every time, except for the last iteration
if i < lookupRetries {
// we found nothing, we could have been too fast, give the kernel some time to think
// back off timer: with 3ms baseWaitTime: 3, 6, 9, 12, 15, 18, 21ms - 84ms in total
time.Sleep(time.Duration(i+1) * baseWaitTime)
// refetch lists
table.updateTable()
// Take turns in waiting and refreshing in order to satisfy the search pattern.
if i%2 == 0 {
// we found nothing, we could have been too fast, give the kernel some time to think
// back off timer: with 3ms baseWaitTime: 3, 6, 9, 12, 15, 18, 21ms - 84ms in total
time.Sleep(time.Duration(i+1) * baseWaitTime)
} else {
// refetch lists
table.updateTable()
if table.dualStack != nil {
table.dualStack.updateTable()
}
}
}
}
return socket.UnidentifiedProcessID, pktInfo.Inbound, ErrConnectionNotFound
}
func (table *udpTable) findSocket(pktInfo *packet.Info, isInboundMulticast bool) (socketInfo *socket.BindInfo) {
localIP := pktInfo.LocalIP()
localPort := pktInfo.LocalPort()
table.lock.RLock()
defer table.lock.RUnlock()
// search binds
for _, socketInfo := range table.binds {
if localPort == socketInfo.Local.Port &&
(socketInfo.ListensAny || // zero IP (dual-stack)
isInboundMulticast || // inbound broadcast, multicast
localIP.Equal(socketInfo.Local.IP)) {
return socketInfo
}
}
return nil
}

View file

@ -1,6 +1,8 @@
package state
import (
"net"
"github.com/safing/portbase/log"
)
@ -15,6 +17,11 @@ func (table *tcpTable) updateTables() {
return
}
// Pre-check for any listeners.
for _, bindInfo := range listeners {
bindInfo.ListensAny = bindInfo.Local.IP.Equal(net.IPv4zero) || bindInfo.Local.IP.Equal(net.IPv6zero)
}
table.connections = connections
table.listeners = listeners
})
@ -31,6 +38,11 @@ func (table *udpTable) updateTable() {
return
}
// Pre-check for any listeners.
for _, bindInfo := range binds {
bindInfo.ListensAny = bindInfo.Local.IP.Equal(net.IPv4zero) || bindInfo.Local.IP.Equal(net.IPv6zero)
}
table.binds = binds
})
}

View file

@ -16,16 +16,19 @@ type tcpTable struct {
fetchOnceAgain utils.OnceAgain
fetchTable func() (connections []*socket.ConnectionInfo, listeners []*socket.BindInfo, err error)
dualStack *tcpTable
}
var (
tcp4Table = &tcpTable{
version: 4,
fetchTable: getTCP4Table,
}
tcp6Table = &tcpTable{
version: 6,
fetchTable: getTCP6Table,
}
tcp4Table = &tcpTable{
version: 4,
fetchTable: getTCP4Table,
dualStack: tcp6Table,
}
)

View file

@ -22,6 +22,8 @@ type udpTable struct {
states map[string]map[string]*udpState
statesLock sync.Mutex
dualStack *udpTable
}
type udpState struct {
@ -41,17 +43,18 @@ const (
)
var (
udp4Table = &udpTable{
version: 4,
fetchTable: getUDP4Table,
states: make(map[string]map[string]*udpState),
}
udp6Table = &udpTable{
version: 6,
fetchTable: getUDP6Table,
states: make(map[string]map[string]*udpState),
}
udp4Table = &udpTable{
version: 4,
fetchTable: getUDP4Table,
states: make(map[string]map[string]*udpState),
dualStack: udp6Table,
}
)
// CleanUDPStates cleans the udp connection states which save connection directions.

View file

@ -3,9 +3,10 @@ package network
// Verdict describes the decision made about a connection or link.
type Verdict int8
// List of values a Status can have
// All possible verdicts that can be applied to a network
// connection.
const (
// UNDECIDED is the default status of new connections
// VerdictUndecided is the default status of new connections.
VerdictUndecided Verdict = 0
VerdictUndeterminable Verdict = 1
VerdictAccept Verdict = 2
@ -63,7 +64,7 @@ func (v Verdict) Verb() string {
}
}
// Packer Directions
// Packet Directions
const (
Inbound = true
Outbound = false

75
pack
View file

@ -3,33 +3,60 @@
baseDir="$( cd "$(dirname "$0")" && pwd )"
cd "$baseDir"
# first check what will be built
COL_OFF="\033[0m"
COL_BOLD="\033[01;01m"
COL_RED="\033[31m"
COL_GREEN="\033[32m"
COL_YELLOW="\033[33m"
function packAll() {
for i in ./cmds/* ; do
if [ -e $i/pack ]; then
$i/pack $1
fi
done
function safe_execute {
echo -e "\n[....] $*"
$*
if [[ $? -eq 0 ]]; then
echo -e "[${COL_GREEN} OK ${COL_OFF}] $*"
else
echo -e "[${COL_RED}FAIL${COL_OFF}] $*" >/dev/stderr
echo -e "[${COL_RED}CRIT${COL_OFF}] ABORTING..." >/dev/stderr
exit 1
fi
}
echo ""
echo "pack list:"
echo ""
function check {
./cmds/portmaster-core/pack check
./cmds/portmaster-start/pack check
}
packAll check
function build {
safe_execute ./cmds/portmaster-core/pack build
safe_execute ./cmds/portmaster-start/pack build
}
# confirm
function reset {
./cmds/portmaster-core/pack reset
./cmds/portmaster-start/pack reset
}
echo ""
read -p "press [Enter] to start packing" x
echo ""
# build
set -e
packAll build
echo ""
echo "finished packing."
echo ""
case $1 in
"check" )
check
;;
"build" )
build
;;
"reset" )
reset
;;
* )
echo ""
echo "build list:"
echo ""
check
echo ""
read -p "press [Enter] to start building" x
echo ""
build
echo ""
echo "finished building."
echo ""
;;
esac

View file

@ -14,13 +14,16 @@ func registerConfiguration() error {
// Enable Process Detection
// This should be always enabled. Provided as an option to disable in case there are severe problems on a system, or for debugging.
err := config.Register(&config.Option{
Name: "Enable Process Detection",
Name: "Process Detection",
Key: CfgOptionEnableProcessDetectionKey,
Description: "This option enables the attribution of network traffic to processes. This should be always enabled, and effectively disables app profiles if disabled.",
Order: 144,
Description: "This option enables the attribution of network traffic to processes. This should always be enabled, and effectively disables app profiles if disabled.",
OptType: config.OptTypeBool,
ExpertiseLevel: config.ExpertiseLevelDeveloper,
DefaultValue: true,
Annotations: config.Annotations{
config.DisplayOrderAnnotation: 528,
config.CategoryAnnotation: "Development",
},
})
if err != nil {
return err

View file

@ -66,7 +66,7 @@ func (p *Process) Save() {
}
if dbControllerFlag.IsSet() {
go dbController.PushUpdate(p)
dbController.PushUpdate(p)
}
}
@ -83,7 +83,7 @@ func (p *Process) Delete() {
// propagate delete
p.Meta().Delete()
if dbControllerFlag.IsSet() {
go dbController.PushUpdate(p)
dbController.PushUpdate(p)
}
// TODO: maybe mark the assigned profiles as no longer needed?
@ -106,32 +106,34 @@ func CleanProcessStorage(activePIDs map[int]struct{}) {
// clean primary processes
for _, p := range processesCopy {
p.Lock()
// The PID of a process does not change.
_, active := activePIDs[p.Pid]
switch {
case p.Pid == UnidentifiedProcessID:
// internal
case p.Pid == SystemProcessID:
// internal
case active:
// process in system process table or recently seen on the network
default:
// delete now or soon
switch {
case p.LastSeen == 0:
// add last
p.LastSeen = time.Now().Unix()
case p.LastSeen > threshold:
// within keep period
default:
// delete now
log.Tracef("process.clean: deleted %s", p.DatabaseKey())
go p.Delete()
}
// Check if this is a special process.
if p.Pid == UnidentifiedProcessID || p.Pid == SystemProcessID {
p.profile.MarkStillActive()
continue
}
p.Unlock()
// Check if process is active.
_, active := activePIDs[p.Pid]
if active {
p.profile.MarkStillActive()
continue
}
// Process is inactive, start deletion process
lastSeen := p.GetLastSeen()
switch {
case lastSeen == 0:
// add last seen timestamp
p.SetLastSeen(time.Now().Unix())
case lastSeen > threshold:
// within keep period
default:
// delete now
p.Delete()
log.Tracef("process: cleaned %s", p.DatabaseKey())
}
}
}

View file

@ -30,10 +30,14 @@ func GetProcessByConnection(ctx context.Context, pktInfo *packet.Info) (process
return nil, connInbound, err
}
err = process.GetProfile(ctx)
changed, err := process.GetProfile(ctx)
if err != nil {
log.Tracer(ctx).Errorf("process: failed to get profile for process %s: %s", process, err)
}
if changed {
process.Save()
}
return process, connInbound, nil
}

View file

@ -30,40 +30,56 @@ type Process struct {
record.Base
sync.Mutex
// Constant attributes.
Name string
UserID int
UserName string
UserHome string
Pid int
ParentPid int
Path string
ExecName string
Cwd string
CmdLine string
FirstArg string
ExecName string
ExecHashes map[string]string
// ExecOwner ...
// ExecSignature ...
LocalProfileKey string
profile *profile.LayeredProfile
Name string
Icon string
// Icon is a path to the icon and is either prefixed "f:" for filepath, "d:" for database cache path or "c:"/"a:" for a the icon key to fetch it from a company / authoritative node and cache it in its own cache.
// Mutable attributes.
FirstSeen int64
LastSeen int64
Virtual bool // This process is either merged into another process or is not needed.
Error string // Cache errors
Virtual bool // This process is either merged into another process or is not needed.
Error string // Cache errors
ExecHashes map[string]string
}
// Profile returns the assigned layered profile.
func (p *Process) Profile() *profile.LayeredProfile {
if p == nil {
return nil
}
return p.profile
}
// GetLastSeen returns the unix timestamp when the process was last seen.
func (p *Process) GetLastSeen() int64 {
p.Lock()
defer p.Unlock()
return p.profile
return p.LastSeen
}
// SetLastSeen sets the unix timestamp when the process was last seen.
func (p *Process) SetLastSeen(lastSeen int64) {
p.Lock()
defer p.Unlock()
p.LastSeen = lastSeen
}
// Strings returns a string representation of process.
@ -72,8 +88,6 @@ func (p *Process) String() string {
return "?"
}
p.Lock()
defer p.Unlock()
return fmt.Sprintf("%s:%s:%d", p.UserName, p.Path, p.Pid)
}
@ -218,165 +232,83 @@ func loadProcess(ctx context.Context, pid int) (*Process, error) {
defer markRequestFinished()
}
// create new process
// Create new a process object.
new := &Process{
Pid: pid,
Virtual: true, // caller must decide to actually use the process - we need to save now.
FirstSeen: time.Now().Unix(),
}
switch {
case new.IsKernel():
new.UserName = "Kernel"
new.Name = "Operating System"
default:
pInfo, err := processInfo.NewProcess(int32(pid))
if err != nil {
return nil, err
}
// UID
// net yet implemented for windows
if runtime.GOOS == "linux" {
var uids []int32
uids, err = pInfo.Uids()
if err != nil {
return nil, fmt.Errorf("failed to get UID for p%d: %s", pid, err)
}
new.UserID = int(uids[0])
}
// Username
new.UserName, err = pInfo.Username()
if err != nil {
return nil, fmt.Errorf("process: failed to get Username for p%d: %s", pid, err)
}
// TODO: User Home
// new.UserHome, err =
// PPID
ppid, err := pInfo.Ppid()
if err != nil {
return nil, fmt.Errorf("failed to get PPID for p%d: %s", pid, err)
}
new.ParentPid = int(ppid)
// Path
new.Path, err = pInfo.Exe()
if err != nil {
return nil, fmt.Errorf("failed to get Path for p%d: %s", pid, err)
}
// remove linux " (deleted)" suffix for deleted files
if onLinux {
new.Path = strings.TrimSuffix(new.Path, " (deleted)")
}
// Executable Name
_, new.ExecName = filepath.Split(new.Path)
// Current working directory
// net yet implemented for windows
// new.Cwd, err = pInfo.Cwd()
// if err != nil {
// log.Warningf("process: failed to get Cwd: %s", err)
// }
// Command line arguments
new.CmdLine, err = pInfo.Cmdline()
if err != nil {
return nil, fmt.Errorf("failed to get Cmdline for p%d: %s", pid, err)
}
// Name
new.Name, err = pInfo.Name()
if err != nil {
return nil, fmt.Errorf("failed to get Name for p%d: %s", pid, err)
}
if new.Name == "" {
new.Name = new.ExecName
}
// OS specifics
new.specialOSInit()
// TODO: App Icon
// new.Icon, err =
// get Profile
// processPath := new.Path
// var applyProfile *profiles.Profile
// iterations := 0
// for applyProfile == nil {
//
// iterations++
// if iterations > 10 {
// log.Warningf("process: got into loop while getting profile for %s", new)
// break
// }
//
// applyProfile, err = profiles.GetActiveProfileByPath(processPath)
// if err == database.ErrNotFound {
// applyProfile, err = profiles.FindProfileByPath(processPath, new.UserHome)
// }
// if err != nil {
// log.Warningf("process: could not get profile for %s: %s", new, err)
// } else if applyProfile == nil {
// log.Warningf("process: no default profile found for %s", new)
// } else {
//
// // TODO: there is a lot of undefined behaviour if chaining framework profiles
//
// // process framework
// if applyProfile.Framework != nil {
// if applyProfile.Framework.FindParent > 0 {
// var ppid int32
// for i := uint8(1); i < applyProfile.Framework.FindParent; i++ {
// parent, err := pInfo.Parent()
// if err != nil {
// return nil, err
// }
// ppid = parent.Pid
// }
// if applyProfile.Framework.MergeWithParent {
// return GetOrFindProcess(int(ppid))
// }
// // processPath, err = os.Readlink(fmt.Sprintf("/proc/%d/exe", pid))
// // if err != nil {
// // return nil, fmt.Errorf("could not read /proc/%d/exe: %s", pid, err)
// // }
// continue
// }
//
// newCommand, err := applyProfile.Framework.GetNewPath(new.CmdLine, new.Cwd)
// if err != nil {
// return nil, err
// }
//
// // assign
// new.CmdLine = newCommand
// new.Path = strings.SplitN(newCommand, " ", 2)[0]
// processPath = new.Path
//
// // make sure we loop
// applyProfile = nil
// continue
// }
//
// // apply profile to process
// log.Debugf("process: applied profile to %s: %s", new, applyProfile)
// new.Profile = applyProfile
// new.ProfileKey = applyProfile.GetKey().String()
//
// // update Profile with Process icon if Profile does not have one
// if !new.Profile.Default && new.Icon != "" && new.Profile.Icon == "" {
// new.Profile.Icon = new.Icon
// new.Profile.Save()
// }
// }
// }
// Get process information from the system.
pInfo, err := processInfo.NewProcess(int32(pid))
if err != nil {
return nil, err
}
// UID
// net yet implemented for windows
if runtime.GOOS == "linux" {
var uids []int32
uids, err = pInfo.Uids()
if err != nil {
return nil, fmt.Errorf("failed to get UID for p%d: %s", pid, err)
}
new.UserID = int(uids[0])
}
// Username
new.UserName, err = pInfo.Username()
if err != nil {
return nil, fmt.Errorf("process: failed to get Username for p%d: %s", pid, err)
}
// TODO: User Home
// new.UserHome, err =
// PPID
ppid, err := pInfo.Ppid()
if err != nil {
return nil, fmt.Errorf("failed to get PPID for p%d: %s", pid, err)
}
new.ParentPid = int(ppid)
// Path
new.Path, err = pInfo.Exe()
if err != nil {
return nil, fmt.Errorf("failed to get Path for p%d: %s", pid, err)
}
// remove linux " (deleted)" suffix for deleted files
if onLinux {
new.Path = strings.TrimSuffix(new.Path, " (deleted)")
}
// Executable Name
_, new.ExecName = filepath.Split(new.Path)
// Current working directory
// net yet implemented for windows
// new.Cwd, err = pInfo.Cwd()
// if err != nil {
// log.Warningf("process: failed to get Cwd: %s", err)
// }
// Command line arguments
new.CmdLine, err = pInfo.Cmdline()
if err != nil {
return nil, fmt.Errorf("failed to get Cmdline for p%d: %s", pid, err)
}
// Name
new.Name, err = pInfo.Name()
if err != nil {
return nil, fmt.Errorf("failed to get Name for p%d: %s", pid, err)
}
if new.Name == "" {
new.Name = new.ExecName
}
// OS specifics
new.specialOSInit()
new.Save()
return new, nil
}

View file

@ -2,10 +2,8 @@
package process
// IsKernel returns whether the process is the Kernel.
func (p *Process) IsKernel() bool {
return p.Pid == 0
}
// SystemProcessID is the PID of the System/Kernel itself.
const SystemProcessID = 0
// specialOSInit does special OS specific Process initialization.
func (p *Process) specialOSInit() {

View file

@ -1,9 +1,7 @@
package process
// IsKernel returns whether the process is the Kernel.
func (p *Process) IsKernel() bool {
return p.Pid == 0
}
// SystemProcessID is the PID of the System/Kernel itself.
const SystemProcessID = 0
// specialOSInit does special OS specific Process initialization.
func (p *Process) specialOSInit() {

View file

@ -7,10 +7,8 @@ import (
"github.com/safing/portbase/utils/osdetail"
)
// IsKernel returns whether the process is the Kernel.
func (p *Process) IsKernel() bool {
return p.Pid == 4
}
// SystemProcessID is the PID of the System/Kernel itself.
const SystemProcessID = 4
// specialOSInit does special OS specific Process initialization.
func (p *Process) specialOSInit() {

View file

@ -8,35 +8,51 @@ import (
)
// GetProfile finds and assigns a profile set to the process.
func (p *Process) GetProfile(ctx context.Context) error {
func (p *Process) GetProfile(ctx context.Context) (changed bool, err error) {
p.Lock()
defer p.Unlock()
// only find profiles if not already done.
if p.profile != nil {
log.Tracer(ctx).Trace("process: profile already loaded")
// mark profile as used
// Mark profile as used.
p.profile.MarkUsed()
return nil
return false, nil
}
log.Tracer(ctx).Trace("process: loading profile")
// get profile
localProfile, new, err := profile.FindOrCreateLocalProfileByPath(p.Path)
// Check if we need a special profile.
profileID := ""
switch p.Pid {
case UnidentifiedProcessID:
profileID = profile.UnidentifiedProfileID
case SystemProcessID:
profileID = profile.SystemProfileID
}
// Get the (linked) local profile.
localProfile, err := profile.GetProfile(profile.SourceLocal, profileID, p.Path)
if err != nil {
return err
}
// add more information if new
if new {
localProfile.Name = p.ExecName
return false, err
}
// mark profile as used
localProfile.MarkUsed()
// Update metadata of profile.
metadataUpdated := localProfile.UpdateMetadata(p.Name)
// Mark profile as used.
profileChanged := localProfile.MarkUsed()
// Save the profile if we changed something.
if metadataUpdated || profileChanged {
err := localProfile.Save()
if err != nil {
log.Warningf("process: failed to save profile %s: %s", localProfile.ScopedID(), err)
}
}
// Assign profile to process.
p.LocalProfileKey = localProfile.Key()
p.profile = profile.NewLayeredProfile(localProfile)
p.profile = localProfile.LayeredProfile()
go p.Save()
return nil
return true, nil
}

View file

@ -2,17 +2,16 @@ package process
import (
"context"
"strconv"
"time"
"github.com/safing/portbase/log"
"github.com/safing/portmaster/profile"
"golang.org/x/sync/singleflight"
)
// Special Process IDs
const (
UnidentifiedProcessID = -1
SystemProcessID = 0
)
// UnidentifiedProcessID is the PID used for anything that could not be
// attributed to a PID for any reason.
const UnidentifiedProcessID = -1
var (
// unidentifiedProcess is used when a process cannot be found.
@ -32,53 +31,41 @@ var (
ParentPid: SystemProcessID,
Name: "Operating System",
}
getSpecialProcessSingleInflight singleflight.Group
)
// GetUnidentifiedProcess returns the special process assigned to unidentified processes.
func GetUnidentifiedProcess(ctx context.Context) *Process {
return getSpecialProcess(ctx, UnidentifiedProcessID, unidentifiedProcess, profile.GetUnidentifiedProfile)
return getSpecialProcess(ctx, unidentifiedProcess)
}
// GetSystemProcess returns the special process used for the Kernel.
func GetSystemProcess(ctx context.Context) *Process {
return getSpecialProcess(ctx, SystemProcessID, systemProcess, profile.GetSystemProfile)
return getSpecialProcess(ctx, systemProcess)
}
func getSpecialProcess(ctx context.Context, pid int, template *Process, getProfile func() *profile.Profile) *Process {
// check storage
p, ok := GetProcessFromStorage(pid)
if ok {
return p
}
func getSpecialProcess(ctx context.Context, template *Process) *Process {
p, _, _ := getSpecialProcessSingleInflight.Do(strconv.Itoa(template.Pid), func() (interface{}, error) {
// Check if we have already loaded the special process.
process, ok := GetProcessFromStorage(template.Pid)
if ok {
return process, nil
}
// assign template
p = template
// Create new process from template
process = template
process.FirstSeen = time.Now().Unix()
p.Lock()
defer p.Unlock()
// Get profile.
_, err := process.GetProfile(ctx)
if err != nil {
log.Tracer(ctx).Errorf("process: failed to get profile for process %s: %s", process, err)
}
if p.FirstSeen == 0 {
p.FirstSeen = time.Now().Unix()
}
// only find profiles if not already done.
if p.profile != nil {
log.Tracer(ctx).Trace("process: special profile already loaded")
// mark profile as used
p.profile.MarkUsed()
return p
}
log.Tracer(ctx).Trace("process: loading special profile")
// get profile
localProfile := getProfile()
// mark profile as used
localProfile.MarkUsed()
p.LocalProfileKey = localProfile.Key()
p.profile = profile.NewLayeredProfile(localProfile)
go p.Save()
return p
// Save process to storage.
process.Save()
return process, nil
})
return p.(*Process)
}

View file

@ -7,46 +7,69 @@ import (
)
const (
activeProfileCleanerTickDuration = 10 * time.Minute
activeProfileCleanerThreshold = 1 * time.Hour
activeProfileCleanerTickDuration = 1 * time.Minute
activeProfileCleanerThreshold = 5 * time.Minute
)
var (
// TODO: periodically clean up inactive profiles
activeProfiles = make(map[string]*Profile)
activeProfilesLock sync.RWMutex
)
// getActiveProfile returns a cached copy of an active profile and nil if it isn't found.
// getActiveProfile returns a cached copy of an active profile and
// nil if it isn't found.
func getActiveProfile(scopedID string) *Profile {
activeProfilesLock.Lock()
defer activeProfilesLock.Unlock()
activeProfilesLock.RLock()
defer activeProfilesLock.RUnlock()
profile, ok := activeProfiles[scopedID]
if ok {
return profile
return activeProfiles[scopedID]
}
// getAllActiveProfiles returns a slice of active profiles.
func getAllActiveProfiles() []*Profile {
activeProfilesLock.RLock()
defer activeProfilesLock.RUnlock()
result := make([]*Profile, 0, len(activeProfiles))
for _, p := range activeProfiles {
result = append(result, p)
}
return result
}
// findActiveProfile searched for an active local profile using the linked path.
func findActiveProfile(linkedPath string) *Profile {
activeProfilesLock.RLock()
defer activeProfilesLock.RUnlock()
for _, activeProfile := range activeProfiles {
if activeProfile.LinkedPath == linkedPath {
activeProfile.MarkStillActive()
return activeProfile
}
}
return nil
}
// markProfileActive registers a profile as active.
func markProfileActive(profile *Profile) {
// addActiveProfile registers a active profile.
func addActiveProfile(profile *Profile) {
activeProfilesLock.Lock()
defer activeProfilesLock.Unlock()
profile.MarkStillActive()
activeProfiles[profile.ScopedID()] = profile
}
// markActiveProfileAsOutdated marks an active profile as outdated, so that it will be refetched from the database.
// markActiveProfileAsOutdated marks an active profile as outdated.
func markActiveProfileAsOutdated(scopedID string) {
activeProfilesLock.Lock()
defer activeProfilesLock.Unlock()
activeProfilesLock.RLock()
defer activeProfilesLock.RUnlock()
profile, ok := activeProfiles[scopedID]
if ok {
profile.outdated.Set()
delete(activeProfiles, scopedID)
}
}
@ -55,16 +78,12 @@ func cleanActiveProfiles(ctx context.Context) error {
select {
case <-time.After(activeProfileCleanerTickDuration):
threshold := time.Now().Add(-activeProfileCleanerThreshold)
threshold := time.Now().Add(-activeProfileCleanerThreshold).Unix()
activeProfilesLock.Lock()
for id, profile := range activeProfiles {
// get last used
profile.Lock()
lastUsed := profile.lastUsed
profile.Unlock()
// remove if not used for a while
if lastUsed.Before(threshold) {
// Remove profile if it hasn't been used for a while.
if profile.LastActive() < threshold {
profile.outdated.Set()
delete(activeProfiles, id)
}

View file

@ -4,9 +4,10 @@ import (
"context"
"fmt"
"sync"
"time"
"github.com/safing/portbase/config"
"github.com/safing/portbase/modules"
"github.com/safing/portmaster/intel/filterlists"
"github.com/safing/portmaster/profile/endpoints"
)
@ -25,11 +26,15 @@ func registerConfigUpdater() error {
"config",
"config change",
"update global config profile",
updateGlobalConfigProfile,
func(ctx context.Context, _ interface{}) error {
return updateGlobalConfigProfile(ctx, nil)
},
)
}
func updateGlobalConfigProfile(ctx context.Context, data interface{}) error {
const globalConfigProfileErrorID = "profile:global-profile-error"
func updateGlobalConfigProfile(ctx context.Context, task *modules.Task) error {
cfgLock.Lock()
defer cfgLock.Unlock()
@ -71,13 +76,9 @@ func updateGlobalConfigProfile(ctx context.Context, data interface{}) error {
}
// build global profile for reference
profile := &Profile{
ID: "global-config",
Source: SourceSpecial,
Name: "Global Configuration",
Config: make(map[string]interface{}),
internalSave: true,
}
profile := New(SourceSpecial, "global-config", "")
profile.Name = "Global Configuration"
profile.Internal = true
newConfig := make(map[string]interface{})
// fill profile config options
@ -104,5 +105,27 @@ func updateGlobalConfigProfile(ctx context.Context, data interface{}) error {
lastErr = err
}
// If there was any error, try again later until it succeeds.
if lastErr == nil {
module.Resolve(globalConfigProfileErrorID)
} else {
// Create task after first failure.
if task == nil {
task = module.NewTask(
"retry updating global config profile",
updateGlobalConfigProfile,
)
}
// Schedule task.
task.Schedule(time.Now().Add(15 * time.Second))
// Add module warning to inform user.
module.Warning(
globalConfigProfileErrorID,
fmt.Sprintf("Failed to process global settings: %s", err),
)
}
return lastErr
}

View file

@ -1,7 +1,10 @@
package profile
import (
"strings"
"github.com/safing/portbase/config"
"github.com/safing/portmaster/profile/endpoints"
"github.com/safing/portmaster/status"
)
@ -12,13 +15,18 @@ var (
cfgIntOptions = make(map[string]config.IntOption)
cfgBoolOptions = make(map[string]config.BoolOption)
// General
// Enable Filter Order = 0
CfgOptionDefaultActionKey = "filter/defaultAction"
cfgOptionDefaultAction config.StringOption
cfgOptionDefaultActionOrder = 1
// Prompt Timeout Order = 2
// Prompt Desktop Notifications Order = 2
// Prompt Timeout Order = 3
// Network Scopes
CfgOptionBlockScopeInternetKey = "filter/blockInternet"
cfgOptionBlockScopeInternet config.IntOption // security level option
@ -32,6 +40,8 @@ var (
cfgOptionBlockScopeLocal config.IntOption // security level option
cfgOptionBlockScopeLocalOrder = 18
// Connection Types
CfgOptionBlockP2PKey = "filter/blockP2P"
cfgOptionBlockP2P config.IntOption // security level option
cfgOptionBlockP2POrder = 19
@ -40,6 +50,8 @@ var (
cfgOptionBlockInbound config.IntOption // security level option
cfgOptionBlockInboundOrder = 20
// Rules
CfgOptionEndpointsKey = "filter/endpoints"
cfgOptionEndpoints config.StringArrayOption
cfgOptionEndpointsOrder = 32
@ -48,43 +60,47 @@ var (
cfgOptionServiceEndpoints config.StringArrayOption
cfgOptionServiceEndpointsOrder = 33
CfgOptionPreventBypassingKey = "filter/preventBypassing"
cfgOptionPreventBypassing config.IntOption // security level option
cfgOptionPreventBypassingOrder = 48
CfgOptionFilterListsKey = "filter/lists"
cfgOptionFilterLists config.StringArrayOption
cfgOptionFilterListsOrder = 64
cfgOptionFilterListsOrder = 34
CfgOptionFilterSubDomainsKey = "filter/includeSubdomains"
cfgOptionFilterSubDomains config.IntOption // security level option
cfgOptionFilterSubDomainsOrder = 65
cfgOptionFilterSubDomainsOrder = 35
// DNS Filtering
CfgOptionFilterCNAMEKey = "filter/includeCNAMEs"
cfgOptionFilterCNAME config.IntOption // security level option
cfgOptionFilterCNAMEOrder = 66
CfgOptionDisableAutoPermitKey = "filter/disableAutoPermit"
cfgOptionDisableAutoPermit config.IntOption // security level option
cfgOptionDisableAutoPermitOrder = 80
CfgOptionEnforceSPNKey = "filter/enforceSPN"
cfgOptionEnforceSPN config.IntOption // security level option
cfgOptionEnforceSPNOrder = 96
cfgOptionFilterCNAMEOrder = 48
CfgOptionRemoveOutOfScopeDNSKey = "filter/removeOutOfScopeDNS"
cfgOptionRemoveOutOfScopeDNS config.IntOption // security level option
cfgOptionRemoveOutOfScopeDNSOrder = 112
cfgOptionRemoveOutOfScopeDNSOrder = 49
CfgOptionRemoveBlockedDNSKey = "filter/removeBlockedDNS"
cfgOptionRemoveBlockedDNS config.IntOption // security level option
cfgOptionRemoveBlockedDNSOrder = 113
cfgOptionRemoveBlockedDNSOrder = 50
CfgOptionDomainHeuristicsKey = "filter/domainHeuristics"
cfgOptionDomainHeuristics config.IntOption // security level option
cfgOptionDomainHeuristicsOrder = 114
cfgOptionDomainHeuristicsOrder = 51
// Permanent Verdicts Order = 128
// Advanced
CfgOptionPreventBypassingKey = "filter/preventBypassing"
cfgOptionPreventBypassing config.IntOption // security level option
cfgOptionPreventBypassingOrder = 64
CfgOptionDisableAutoPermitKey = "filter/disableAutoPermit"
cfgOptionDisableAutoPermit config.IntOption // security level option
cfgOptionDisableAutoPermitOrder = 65
// Permanent Verdicts Order = 96
CfgOptionUseSPNKey = "spn/useSPN"
cfgOptionUseSPN config.BoolOption
cfgOptionUseSPNOrder = 129
)
func registerConfiguration() error {
@ -93,15 +109,33 @@ func registerConfiguration() error {
// ask - ask mode: if not verdict is found, the user is consulted
// block - allowlist mode: everything is blocked unless permitted
err := config.Register(&config.Option{
Name: "Default Filter Action",
Key: CfgOptionDefaultActionKey,
Description: `The default filter action when nothing else permits or blocks a connection.`,
Order: cfgOptionDefaultActionOrder,
OptType: config.OptTypeString,
ReleaseLevel: config.ReleaseLevelExperimental,
DefaultValue: "permit",
ExternalOptType: "string list",
ValidationRegex: "^(permit|ask|block)$",
Name: "Default Action",
Key: CfgOptionDefaultActionKey,
Description: `The default action when nothing else permits or blocks an outgoing connection. Incoming connections are always blocked by default.`,
OptType: config.OptTypeString,
DefaultValue: "permit",
Annotations: config.Annotations{
config.DisplayHintAnnotation: config.DisplayHintOneOf,
config.DisplayOrderAnnotation: cfgOptionDefaultActionOrder,
config.CategoryAnnotation: "General",
},
PossibleValues: []config.PossibleValue{
{
Name: "Permit",
Value: "permit",
Description: "Permit all connections",
},
{
Name: "Block",
Value: "block",
Description: "Block all connections",
},
{
Name: "Prompt",
Value: "ask",
Description: "Prompt for decisions",
},
},
})
if err != nil {
return err
@ -111,14 +145,19 @@ func registerConfiguration() error {
// Disable Auto Permit
err = config.Register(&config.Option{
Name: "Disable Auto Permit",
Key: CfgOptionDisableAutoPermitKey,
Description: "Auto Permit searches for a relation between an app and the destionation of a connection - if there is a correlation, the connection will be permitted. This setting is negated in order to provide a streamlined user experience, where higher settings are better.",
Order: cfgOptionDisableAutoPermitOrder,
OptType: config.OptTypeInt,
ExternalOptType: "security level",
DefaultValue: status.SecurityLevelsAll,
ValidationRegex: "^(4|6|7)$",
// TODO: Check how to best handle negation here.
Name: "Disable Auto Permit",
Key: CfgOptionDisableAutoPermitKey,
Description: `Auto Permit searches for a relation between an app and the destination of a connection - if there is a correlation, the connection will be permitted.`,
OptType: config.OptTypeInt,
ReleaseLevel: config.ReleaseLevelBeta,
DefaultValue: status.SecurityLevelsAll,
Annotations: config.Annotations{
config.DisplayOrderAnnotation: cfgOptionDisableAutoPermitOrder,
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.CategoryAnnotation: "Advanced",
},
PossibleValues: status.SecurityLevelValues,
})
if err != nil {
return err
@ -126,42 +165,39 @@ func registerConfiguration() error {
cfgOptionDisableAutoPermit = config.Concurrent.GetAsInt(CfgOptionDisableAutoPermitKey, int64(status.SecurityLevelsAll))
cfgIntOptions[CfgOptionDisableAutoPermitKey] = cfgOptionDisableAutoPermit
filterListHelp := `Format:
Permission:
"+": permit
"-": block
Host Matching:
IP, CIDR, Country Code, ASN, Filterlist, Network Scope, "*" for any
Domains:
"example.com": exact match
".example.com": exact match + subdomains
"*xample.com": prefix wildcard
"example.*": suffix wildcard
"*example*": prefix and suffix wildcard
Protocol and Port Matching (optional):
<protocol>/<port>
rulesHelp := strings.ReplaceAll(`Rules are checked from top to bottom, stopping after the first match. They can match:
Examples:
+ .example.com */HTTP
- .example.com
+ 192.168.0.1
+ 192.168.1.1/24
+ Localhost,LAN
- AS123456789
- L:MAL
+ AT
- *`
- By address: "192.168.0.1"
- By network: "192.168.0.1/24"
- By domain:
- Matching a distinct domain: "example.com"
- Matching a domain with subdomains: ".example.com"
- Matching with a wildcard prefix: "*xample.com"
- Matching with a wildcard suffix: "example.*"
- Matching domains containing text: "*example*"
- By country (based on IP): "US"
- By filter list - use the filterlist ID prefixed with "L:": "L:MAL"
- Match anything: "*"
Additionally, you may supply a protocol and port just behind that using numbers ("6/80") or names ("TCP/HTTP").
In this case the rule is only matched if the protocol and port also match.
Example: "192.168.0.1 TCP/HTTP"
`, `"`, "`")
// Endpoint Filter List
err = config.Register(&config.Option{
Name: "Endpoint Filter List",
Key: CfgOptionEndpointsKey,
Description: "Filter outgoing connections by matching the destination endpoint. Network Scope restrictions still apply.",
Help: filterListHelp,
Order: cfgOptionEndpointsOrder,
OptType: config.OptTypeStringArray,
DefaultValue: []string{},
ExternalOptType: "endpoint list",
Name: "Outgoing Rules",
Key: CfgOptionEndpointsKey,
Description: "Rules that apply to outgoing network connections. Cannot overrule Network Scopes and Connection Types (see above).",
Help: rulesHelp,
OptType: config.OptTypeStringArray,
DefaultValue: []string{},
Annotations: config.Annotations{
config.StackableAnnotation: true,
config.DisplayHintAnnotation: endpoints.DisplayHintEndpointList,
config.DisplayOrderAnnotation: cfgOptionEndpointsOrder,
config.CategoryAnnotation: "Rules",
},
ValidationRegex: `^(\+|\-) [A-z0-9\.:\-*/]+( [A-z0-9/]+)?$`,
})
if err != nil {
@ -172,14 +208,36 @@ Examples:
// Service Endpoint Filter List
err = config.Register(&config.Option{
Name: "Service Endpoint Filter List",
Key: CfgOptionServiceEndpointsKey,
Description: "Filter incoming connections by matching the source endpoint. Network Scope restrictions and the inbound permission still apply. Also not that the implicit default action of this list is to always block.",
Help: filterListHelp,
Order: cfgOptionServiceEndpointsOrder,
OptType: config.OptTypeStringArray,
DefaultValue: []string{"+ Localhost"},
ExternalOptType: "endpoint list",
Name: "Incoming Rules",
Key: CfgOptionServiceEndpointsKey,
Description: "Rules that apply to incoming network connections. Cannot overrule Network Scopes and Connection Types (see above). Also note that the default action for incoming connections is to always block.",
Help: rulesHelp,
OptType: config.OptTypeStringArray,
DefaultValue: []string{"+ Localhost"},
ExpertiseLevel: config.ExpertiseLevelExpert,
Annotations: config.Annotations{
config.StackableAnnotation: true,
config.DisplayHintAnnotation: endpoints.DisplayHintEndpointList,
config.DisplayOrderAnnotation: cfgOptionServiceEndpointsOrder,
config.CategoryAnnotation: "Rules",
config.QuickSettingsAnnotation: []config.QuickSetting{
{
Name: "SSH",
Action: config.QuickMergeTop,
Value: []string{"+ * tcp/22"},
},
{
Name: "HTTP/s",
Action: config.QuickMergeTop,
Value: []string{"+ * tcp/80", "+ * tcp/443"},
},
{
Name: "RDP",
Action: config.QuickMergeTop,
Value: []string{"+ * */3389"},
},
},
},
ValidationRegex: `^(\+|\-) [A-z0-9\.:\-*/]+( [A-z0-9/]+)?$`,
})
if err != nil {
@ -188,15 +246,40 @@ Examples:
cfgOptionServiceEndpoints = config.Concurrent.GetAsStringArray(CfgOptionServiceEndpointsKey, []string{})
cfgStringArrayOptions[CfgOptionServiceEndpointsKey] = cfgOptionServiceEndpoints
filterListsHelp := strings.ReplaceAll(`Filter lists contain domains and IP addresses that are known to be used adversarial. The data is collected from many public sources and put into the following categories. In order to active a category, add it's "ID" to the list.
**Ads & Trackers** - ID: "TRAC"
Services that track and profile people online, including as ads, analytics and telemetry.
**Malware** - ID: "MAL"
Services that are (ab)used for attacking devices through technical means.
**Deception** - ID: "DECEP"
Services that trick humans into thinking the service is genuine, while it is not, including phishing, fake news and fraud.
**Bad Stuff (Mixed)** - ID: "BAD"
Miscellaneous services that are believed to be harmful to security or privacy, but their exact use is unknown, not categorized, or lists have mixed categories.
**NSFW** - ID: "NSFW"
Services that are generally not accepted in work environments, including pornography, violence and gambling.
The lists are automatically updated every hour using incremental updates.
[See here](https://github.com/safing/intel-data) for more detail about these lists, their sources and how to help to improve them.
`, `"`, "`")
// Filter list IDs
err = config.Register(&config.Option{
Name: "Filter List",
Key: CfgOptionFilterListsKey,
Description: "Filter connections by matching the endpoint against configured filterlists",
Order: cfgOptionFilterListsOrder,
OptType: config.OptTypeStringArray,
DefaultValue: []string{"TRAC", "MAL"},
ExternalOptType: "filter list",
Name: "Filter Lists",
Key: CfgOptionFilterListsKey,
Description: "Block connections that match enabled filter lists.",
Help: filterListsHelp,
OptType: config.OptTypeStringArray,
DefaultValue: []string{"TRAC", "MAL"},
Annotations: config.Annotations{
config.DisplayHintAnnotation: "filter list",
config.DisplayOrderAnnotation: cfgOptionFilterListsOrder,
config.CategoryAnnotation: "Rules",
},
ValidationRegex: `^[a-zA-Z0-9\-]+$`,
})
if err != nil {
@ -207,15 +290,18 @@ Examples:
// Include CNAMEs
err = config.Register(&config.Option{
Name: "Filter CNAMEs",
Key: CfgOptionFilterCNAMEKey,
Description: "Also filter requests where a CNAME would be blocked",
Order: cfgOptionFilterCNAMEOrder,
OptType: config.OptTypeInt,
ExternalOptType: "security level",
DefaultValue: status.SecurityLevelsAll,
ValidationRegex: "^(4|6|7)$",
ExpertiseLevel: config.ExpertiseLevelExpert,
Name: "Block Domain Aliases",
Key: CfgOptionFilterCNAMEKey,
Description: "Block a domain if a resolved CNAME (alias) is blocked by a rule or filter list.",
OptType: config.OptTypeInt,
DefaultValue: status.SecurityLevelsAll,
ExpertiseLevel: config.ExpertiseLevelExpert,
Annotations: config.Annotations{
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.DisplayOrderAnnotation: cfgOptionFilterCNAMEOrder,
config.CategoryAnnotation: "DNS Filtering",
},
PossibleValues: status.SecurityLevelValues,
})
if err != nil {
return err
@ -225,14 +311,17 @@ Examples:
// Include subdomains
err = config.Register(&config.Option{
Name: "Filter Subdomains",
Key: CfgOptionFilterSubDomainsKey,
Description: "Also filter a domain if any parent domain is blocked by a filter list",
Order: cfgOptionFilterSubDomainsOrder,
OptType: config.OptTypeInt,
ExternalOptType: "security level",
DefaultValue: status.SecurityLevelsAll,
ValidationRegex: "^(4|6|7)$",
Name: "Block Subdomains of Filter List Entries",
Key: CfgOptionFilterSubDomainsKey,
Description: "Additionally block all subdomains of entries in selected filter lists.",
OptType: config.OptTypeInt,
DefaultValue: status.SecurityLevelsAll,
PossibleValues: status.SecurityLevelValues,
Annotations: config.Annotations{
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.DisplayOrderAnnotation: cfgOptionFilterSubDomainsOrder,
config.CategoryAnnotation: "Rules",
},
})
if err != nil {
return err
@ -242,15 +331,18 @@ Examples:
// Block Scope Local
err = config.Register(&config.Option{
Name: "Block Scope Local",
Key: CfgOptionBlockScopeLocalKey,
Description: "Block internal connections on your own device, ie. localhost.",
Order: cfgOptionBlockScopeLocalOrder,
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelExpert,
ExternalOptType: "security level",
DefaultValue: status.SecurityLevelOff,
ValidationRegex: "^(0|4|6|7)$",
Name: "Block Device-Local Connections",
Key: CfgOptionBlockScopeLocalKey,
Description: "Block all internal connections on your own device, ie. localhost. Is stronger than Rules (see below).",
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelExpert,
DefaultValue: status.SecurityLevelOff,
PossibleValues: status.AllSecurityLevelValues,
Annotations: config.Annotations{
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.DisplayOrderAnnotation: cfgOptionBlockScopeLocalOrder,
config.CategoryAnnotation: "Network Scope",
},
})
if err != nil {
return err
@ -260,14 +352,17 @@ Examples:
// Block Scope LAN
err = config.Register(&config.Option{
Name: "Block Scope LAN",
Key: CfgOptionBlockScopeLANKey,
Description: "Block connections to the Local Area Network.",
Order: cfgOptionBlockScopeLANOrder,
OptType: config.OptTypeInt,
ExternalOptType: "security level",
DefaultValue: status.SecurityLevelsHighAndExtreme,
ValidationRegex: "^(0|4|6|7)$",
Name: "Block LAN",
Key: CfgOptionBlockScopeLANKey,
Description: "Block all connections from and to the Local Area Network. Is stronger than Rules (see below).",
OptType: config.OptTypeInt,
DefaultValue: status.SecurityLevelsHighAndExtreme,
PossibleValues: status.AllSecurityLevelValues,
Annotations: config.Annotations{
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.DisplayOrderAnnotation: cfgOptionBlockScopeLANOrder,
config.CategoryAnnotation: "Network Scope",
},
})
if err != nil {
return err
@ -277,14 +372,17 @@ Examples:
// Block Scope Internet
err = config.Register(&config.Option{
Name: "Block Scope Internet",
Key: CfgOptionBlockScopeInternetKey,
Description: "Block connections to the Internet.",
Order: cfgOptionBlockScopeInternetOrder,
OptType: config.OptTypeInt,
ExternalOptType: "security level",
DefaultValue: status.SecurityLevelOff,
ValidationRegex: "^(0|4|6|7)$",
Name: "Block Internet Access",
Key: CfgOptionBlockScopeInternetKey,
Description: "Block connections from and to the Internet. Is stronger than Rules (see below).",
OptType: config.OptTypeInt,
DefaultValue: status.SecurityLevelOff,
PossibleValues: status.AllSecurityLevelValues,
Annotations: config.Annotations{
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.DisplayOrderAnnotation: cfgOptionBlockScopeInternetOrder,
config.CategoryAnnotation: "Network Scope",
},
})
if err != nil {
return err
@ -294,14 +392,17 @@ Examples:
// Block Peer to Peer Connections
err = config.Register(&config.Option{
Name: "Block Peer to Peer Connections",
Key: CfgOptionBlockP2PKey,
Description: "These are connections that are established directly to an IP address on the Internet without resolving a domain name via DNS first.",
Order: cfgOptionBlockP2POrder,
OptType: config.OptTypeInt,
ExternalOptType: "security level",
DefaultValue: status.SecurityLevelExtreme,
ValidationRegex: "^(4|6|7)$",
Name: "Block P2P/Direct Connections",
Key: CfgOptionBlockP2PKey,
Description: "These are connections that are established directly to an IP address or peer on the Internet without resolving a domain name via DNS first. Is stronger than Rules (see below).",
OptType: config.OptTypeInt,
DefaultValue: status.SecurityLevelExtreme,
PossibleValues: status.SecurityLevelValues,
Annotations: config.Annotations{
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.DisplayOrderAnnotation: cfgOptionBlockP2POrder,
config.CategoryAnnotation: "Connection Types",
},
})
if err != nil {
return err
@ -311,14 +412,17 @@ Examples:
// Block Inbound Connections
err = config.Register(&config.Option{
Name: "Block Inbound Connections",
Key: CfgOptionBlockInboundKey,
Description: "Connections initiated towards your device from the LAN or Internet. This will usually only be the case if you are running a network service or are using peer to peer software.",
Order: cfgOptionBlockInboundOrder,
OptType: config.OptTypeInt,
ExternalOptType: "security level",
DefaultValue: status.SecurityLevelsHighAndExtreme,
ValidationRegex: "^(4|6|7)$",
Name: "Block Incoming Connections",
Key: CfgOptionBlockInboundKey,
Description: "Connections initiated towards your device from the LAN or Internet. This will usually only be the case if you are running a network service or are using peer to peer software. Is stronger than Rules (see below).",
OptType: config.OptTypeInt,
DefaultValue: status.SecurityLevelsHighAndExtreme,
PossibleValues: status.SecurityLevelValues,
Annotations: config.Annotations{
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.DisplayOrderAnnotation: cfgOptionBlockInboundOrder,
config.CategoryAnnotation: "Connection Types",
},
})
if err != nil {
return err
@ -326,36 +430,20 @@ Examples:
cfgOptionBlockInbound = config.Concurrent.GetAsInt(CfgOptionBlockInboundKey, int64(status.SecurityLevelsHighAndExtreme))
cfgIntOptions[CfgOptionBlockInboundKey] = cfgOptionBlockInbound
// Enforce SPN
err = config.Register(&config.Option{
Name: "Enforce SPN",
Key: CfgOptionEnforceSPNKey,
Description: "This setting enforces connections to be routed over the SPN. If this is not possible for any reason, connections will be blocked.",
Order: cfgOptionEnforceSPNOrder,
OptType: config.OptTypeInt,
ReleaseLevel: config.ReleaseLevelExperimental,
ExternalOptType: "security level",
DefaultValue: status.SecurityLevelOff,
ValidationRegex: "^(0|4|6|7)$",
})
if err != nil {
return err
}
cfgOptionEnforceSPN = config.Concurrent.GetAsInt(CfgOptionEnforceSPNKey, int64(status.SecurityLevelOff))
cfgIntOptions[CfgOptionEnforceSPNKey] = cfgOptionEnforceSPN
// Filter Out-of-Scope DNS Records
err = config.Register(&config.Option{
Name: "Filter Out-of-Scope DNS Records",
Key: CfgOptionRemoveOutOfScopeDNSKey,
Description: "Filter DNS answers that are outside of the scope of the server. A server on the public Internet may not respond with a private LAN address.",
Order: cfgOptionRemoveOutOfScopeDNSOrder,
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelBeta,
ExternalOptType: "security level",
DefaultValue: status.SecurityLevelsAll,
ValidationRegex: "^(4|6|7)$",
Name: "Enforce Global/Private Split-View",
Key: CfgOptionRemoveOutOfScopeDNSKey,
Description: "Reject private IP addresses (RFC1918 et al.) from public DNS responses.",
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelDeveloper,
DefaultValue: status.SecurityLevelsAll,
PossibleValues: status.SecurityLevelValues,
Annotations: config.Annotations{
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.DisplayOrderAnnotation: cfgOptionRemoveOutOfScopeDNSOrder,
config.CategoryAnnotation: "DNS Filtering",
},
})
if err != nil {
return err
@ -365,16 +453,18 @@ Examples:
// Filter DNS Records that would be blocked
err = config.Register(&config.Option{
Name: "Filter DNS Records that would be blocked",
Key: CfgOptionRemoveBlockedDNSKey,
Description: "Pre-filter DNS answers that an application would not be allowed to connect to.",
Order: cfgOptionRemoveBlockedDNSOrder,
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelBeta,
ExternalOptType: "security level",
DefaultValue: status.SecurityLevelsAll,
ValidationRegex: "^(4|6|7)$",
Name: "Reject Blocked IPs",
Key: CfgOptionRemoveBlockedDNSKey,
Description: "Reject blocked IP addresses directly from the DNS response instead of handing them over to the app and blocking a resulting connection.",
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelDeveloper,
DefaultValue: status.SecurityLevelsAll,
PossibleValues: status.SecurityLevelValues,
Annotations: config.Annotations{
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.DisplayOrderAnnotation: cfgOptionRemoveBlockedDNSOrder,
config.CategoryAnnotation: "DNS Filtering",
},
})
if err != nil {
return err
@ -384,15 +474,18 @@ Examples:
// Domain heuristics
err = config.Register(&config.Option{
Name: "Enable Domain Heuristics",
Key: CfgOptionDomainHeuristicsKey,
Description: "Domain Heuristics checks for suspicious looking domain names and blocks them. Ths option currently targets domains generated by malware and DNS data tunnels.",
Order: cfgOptionDomainHeuristicsOrder,
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelExpert,
ExternalOptType: "security level",
DefaultValue: status.SecurityLevelsAll,
ValidationRegex: "^(0|4|6|7)$",
Name: "Enable Domain Heuristics",
Key: CfgOptionDomainHeuristicsKey,
Description: "Checks for suspicious domain names and blocks them. This option currently targets domain names generated by malware and DNS data exfiltration channels.",
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelExpert,
DefaultValue: status.SecurityLevelsAll,
PossibleValues: status.AllSecurityLevelValues,
Annotations: config.Annotations{
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.DisplayOrderAnnotation: cfgOptionDomainHeuristicsOrder,
config.CategoryAnnotation: "DNS Filtering",
},
})
if err != nil {
return err
@ -401,16 +494,22 @@ Examples:
// Bypass prevention
err = config.Register(&config.Option{
Name: "Prevent Bypassing",
Key: CfgOptionPreventBypassingKey,
Description: "Prevent apps from bypassing the privacy filter: Firefox by disabling DNS-over-HTTPs",
Order: cfgOptionPreventBypassingOrder,
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelUser,
ReleaseLevel: config.ReleaseLevelBeta,
ExternalOptType: "security level",
DefaultValue: status.SecurityLevelsAll,
ValidationRegex: "^(4|6|7)",
Name: "Block Bypassing",
Key: CfgOptionPreventBypassingKey,
Description: `Prevent apps from bypassing the privacy filter.
Current Features:
- Disable Firefox' internal DNS-over-HTTPs resolver
- Block direct access to public DNS resolvers`,
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelUser,
ReleaseLevel: config.ReleaseLevelBeta,
DefaultValue: status.SecurityLevelsAll,
PossibleValues: status.SecurityLevelValues,
Annotations: config.Annotations{
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.DisplayOrderAnnotation: cfgOptionPreventBypassingOrder,
config.CategoryAnnotation: "Advanced",
},
})
if err != nil {
return err
@ -418,5 +517,23 @@ Examples:
cfgOptionPreventBypassing = config.Concurrent.GetAsInt((CfgOptionPreventBypassingKey), int64(status.SecurityLevelsAll))
cfgIntOptions[CfgOptionPreventBypassingKey] = cfgOptionPreventBypassing
// Use SPN
err = config.Register(&config.Option{
Name: "Use SPN",
Key: CfgOptionUseSPNKey,
Description: "Route connections through the Safing Privacy Network. If it is disabled or unavailable for any reason, connections will be blocked.",
OptType: config.OptTypeBool,
DefaultValue: true,
Annotations: config.Annotations{
config.DisplayOrderAnnotation: cfgOptionUseSPNOrder,
config.CategoryAnnotation: "General",
},
})
if err != nil {
return err
}
cfgOptionUseSPN = config.Concurrent.GetAsBool(CfgOptionUseSPNKey, true)
cfgBoolOptions[CfgOptionUseSPNKey] = cfgOptionUseSPN
return nil
}

View file

@ -27,12 +27,12 @@ var (
})
)
func makeScopedID(source, id string) string {
return source + "/" + id
func makeScopedID(source profileSource, id string) string {
return string(source) + "/" + id
}
func makeProfileKey(source, id string) string {
return profilesDBPath + source + "/" + id
func makeProfileKey(source profileSource, id string) string {
return profilesDBPath + string(source) + "/" + id
}
func registerValidationDBHook() (err error) {

View file

@ -0,0 +1,24 @@
package endpoints
// DisplayHintEndpointList marks an option as an endpoint
// list option. It's meant to be used with DisplayHintAnnotation.
const DisplayHintEndpointList = "endpoint list"
// EndpointListAnnotation is the annotation identifier used in configuration
// options to hint the UI on available endpoint list types. If configured, only
// the specified set of entities is allowed to be used. The value is expected
// to be a single string or []string. If this annotation is missing, all
// values are expected to be allowed.
const EndpointListAnnotation = "safing/portmaster:ui:endpoint-list"
// Allowed values for the EndpointListAnnotation.
const (
EndpointListIP = "ip"
EndpointListAsn = "asn"
EndpointListCountry = "country"
EndpointListDomain = "domain"
EndpointListIPRange = "iprange"
EndpointListLists = "lists"
EndpointListScopes = "scopes"
EndpointListProtocolAndPorts = "protocol-port"
)

View file

@ -21,9 +21,9 @@ type reason struct {
}
func (r *reason) String() string {
prefix := "endpoint in blocklist: "
prefix := "denied by rule: "
if r.Permitted {
prefix = "endpoint in allowlist: "
prefix = "permitted by rule: "
}
return prefix + r.description + " " + r.Value

View file

@ -1,55 +0,0 @@
package profile
import (
"github.com/safing/portbase/database/query"
"github.com/safing/portbase/log"
)
// FindOrCreateLocalProfileByPath returns an existing or new profile for the given application path.
func FindOrCreateLocalProfileByPath(fullPath string) (profile *Profile, new bool, err error) {
// find local profile
it, err := profileDB.Query(
query.New(makeProfileKey(SourceLocal, "")).Where(
query.Where("LinkedPath", query.SameAs, fullPath),
),
)
if err != nil {
return nil, false, err
}
// get first result
r := <-it.Next
// cancel immediately
it.Cancel()
// return new if none was found
if r == nil {
profile = New()
profile.LinkedPath = fullPath
return profile, true, nil
}
// ensure its a profile
profile, err = EnsureProfile(r)
if err != nil {
return nil, false, err
}
// prepare config
err = profile.prepConfig()
if err != nil {
log.Warningf("profiles: profile %s has (partly) invalid configuration: %s", profile.ID, err)
}
// parse config
err = profile.parseConfig()
if err != nil {
log.Warningf("profiles: profile %s has (partly) invalid configuration: %s", profile.ID, err)
}
// mark active
markProfileActive(profile)
// return parsed profile
return profile, false, nil
}

204
profile/get.go Normal file
View file

@ -0,0 +1,204 @@
package profile
import (
"errors"
"os"
"strings"
"github.com/safing/portbase/database"
"github.com/safing/portbase/dataroot"
"github.com/safing/portbase/database/query"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/log"
"golang.org/x/sync/singleflight"
)
const (
// UnidentifiedProfileID is the profile ID used for unidentified processes.
UnidentifiedProfileID = "_unidentified"
// SystemProfileID is the profile ID used for the system/kernel.
SystemProfileID = "_system"
)
var getProfileSingleInflight singleflight.Group
// GetProfile fetches a profile. This function ensure that the profile loaded
// is shared among all callers. You must always supply both the scopedID and
// linkedPath parameters whenever available.
func GetProfile(source profileSource, id, linkedPath string) ( //nolint:gocognit
profile *Profile,
err error,
) {
// Select correct key for single in flight.
singleInflightKey := linkedPath
if singleInflightKey == "" {
singleInflightKey = makeScopedID(source, id)
}
p, err, _ := getProfileSingleInflight.Do(singleInflightKey, func() (interface{}, error) {
var previousVersion *Profile
// Fetch profile depending on the available information.
switch {
case id != "":
scopedID := makeScopedID(source, id)
// Get profile via the scoped ID.
// Check if there already is an active and not outdated profile.
profile = getActiveProfile(scopedID)
if profile != nil {
profile.MarkStillActive()
if profile.outdated.IsSet() {
previousVersion = profile
} else {
return profile, nil
}
}
// Get from database.
profile, err = getProfile(scopedID)
// If we cannot find a profile, check if the request is for a special
// profile we can create.
if errors.Is(err, database.ErrNotFound) {
switch id {
case UnidentifiedProfileID:
profile = New(SourceLocal, UnidentifiedProfileID, linkedPath)
err = nil
case SystemProfileID:
profile = New(SourceLocal, SystemProfileID, linkedPath)
err = nil
}
}
case linkedPath != "":
// Search for profile via a linked path.
// Check if there already is an active and not outdated profile for
// the linked path.
profile = findActiveProfile(linkedPath)
if profile != nil {
if profile.outdated.IsSet() {
previousVersion = profile
} else {
return profile, nil
}
}
// Get from database.
profile, err = findProfile(linkedPath)
default:
return nil, errors.New("cannot fetch profile without ID or path")
}
if err != nil {
return nil, err
}
// Process profiles coming directly from the database.
// As we don't use any caching, these will be new objects.
// Mark the profile as being saved internally in order to not trigger an
// update after saving it to the database.
profile.internalSave = true
// Add a layeredProfile to local profiles.
if profile.Source == SourceLocal {
// If we are refetching, assign the layered profile from the previous version.
if previousVersion != nil {
profile.layeredProfile = previousVersion.layeredProfile
}
// Local profiles must have a layered profile, create a new one if it
// does not yet exist.
if profile.layeredProfile == nil {
profile.layeredProfile = NewLayeredProfile(profile)
}
}
// Add the profile to the currently active profiles.
addActiveProfile(profile)
return profile, nil
})
if err != nil {
return nil, err
}
if p == nil {
return nil, errors.New("profile getter returned nil")
}
return p.(*Profile), nil
}
// getProfile fetches the profile for the given scoped ID.
func getProfile(scopedID string) (profile *Profile, err error) {
// Get profile from the database.
r, err := profileDB.Get(profilesDBPath + scopedID)
if err != nil {
return nil, err
}
// Parse and prepare the profile, return the result.
return prepProfile(r)
}
// findProfile searches for a profile with the given linked path. If it cannot
// find one, it will create a new profile for the given linked path.
func findProfile(linkedPath string) (profile *Profile, err error) {
// Search the database for a matching profile.
it, err := profileDB.Query(
query.New(makeProfileKey(SourceLocal, "")).Where(
query.Where("LinkedPath", query.SameAs, linkedPath),
),
)
if err != nil {
return nil, err
}
// Only wait for the first result, or until the query ends.
r := <-it.Next
// Then cancel the query, should it still be running.
it.Cancel()
// Prep and return an existing profile.
if r != nil {
profile, err = prepProfile(r)
return profile, err
}
// If there was no profile in the database, create a new one, and return it.
profile = New(SourceLocal, "", linkedPath)
// Check if the profile should be marked as internal.
// This is the case whenever the binary resides within the data root dir.
if strings.HasPrefix(linkedPath, dataroot.Root().Dir+string(os.PathSeparator)) {
profile.Internal = true
}
return profile, nil
}
func prepProfile(r record.Record) (*Profile, error) {
// ensure its a profile
profile, err := EnsureProfile(r)
if err != nil {
return nil, err
}
// prepare config
err = profile.prepConfig()
if err != nil {
log.Warningf("profiles: profile %s has (partly) invalid configuration: %s", profile.ID, err)
}
// parse config
err = profile.parseConfig()
if err != nil {
log.Warningf("profiles: profile %s has (partly) invalid configuration: %s", profile.ID, err)
}
// return parsed profile
return profile, nil
}

View file

@ -38,6 +38,11 @@ func start() error {
return err
}
err = registerRevisionProvider()
if err != nil {
return err
}
err = startProfileUpdateChecker()
if err != nil {
return err

View file

@ -1,11 +0,0 @@
package profile
import (
"testing"
"github.com/safing/portmaster/core/pmtesting"
)
func TestMain(m *testing.M) {
pmtesting.TestMain(m, module)
}

View file

@ -0,0 +1,83 @@
package profile
import (
"errors"
"strings"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/log"
"github.com/safing/portbase/runtime"
)
const (
revisionProviderPrefix = "layeredProfile/"
)
var (
errProfileNotActive = errors.New("profile not active")
errNoLayeredProfile = errors.New("profile has no layered profile")
pushLayeredProfile runtime.PushFunc = func(...record.Record) {}
)
func registerRevisionProvider() error {
push, err := runtime.Register(
revisionProviderPrefix,
runtime.SimpleValueGetterFunc(getRevisions),
)
if err != nil {
return err
}
pushLayeredProfile = push
return nil
}
func getRevisions(key string) ([]record.Record, error) {
key = strings.TrimPrefix(key, revisionProviderPrefix)
var profiles []*Profile
if key == "" {
profiles = getAllActiveProfiles()
} else {
// Get active profile.
profile := getActiveProfile(key)
if profile == nil {
return nil, errProfileNotActive
}
profiles = append(profiles, profile)
}
records := make([]record.Record, 0, len(profiles))
for _, p := range profiles {
layered, err := getProfileRevision(p)
if err != nil {
log.Warningf("failed to get layered profile for %s: %s", p.ID, err)
continue
}
records = append(records, layered)
}
return records, nil
}
// getProfileRevision returns the layered profile for p.
// It also updates the layered profile if required.
func getProfileRevision(p *Profile) (*LayeredProfile, error) {
// Get layered profile.
layeredProfile := p.LayeredProfile()
if layeredProfile == nil {
return nil, errNoLayeredProfile
}
// Update profiles if necessary.
if layeredProfile.NeedsUpdate() {
layeredProfile.Update()
}
return layeredProfile, nil
}

View file

@ -5,48 +5,51 @@ import (
"sync"
"sync/atomic"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/log"
"github.com/safing/portbase/runtime"
"github.com/safing/portmaster/status"
"github.com/tevino/abool"
"github.com/safing/portbase/config"
"github.com/safing/portmaster/intel"
"github.com/safing/portmaster/profile/endpoints"
)
var (
no = abool.NewBool(false)
)
// LayeredProfile combines multiple Profiles.
type LayeredProfile struct {
lock sync.Mutex
record.Base
sync.RWMutex
localProfile *Profile
layers []*Profile
revisionCounter uint64
localProfile *Profile
layers []*Profile
validityFlag *abool.AtomicBool
validityFlagLock sync.Mutex
LayerIDs []string
RevisionCounter uint64
globalValidityFlag *config.ValidityFlag
securityLevel *uint32
DisableAutoPermit config.BoolOption
BlockScopeLocal config.BoolOption
BlockScopeLAN config.BoolOption
BlockScopeInternet config.BoolOption
BlockP2P config.BoolOption
BlockInbound config.BoolOption
EnforceSPN config.BoolOption
RemoveOutOfScopeDNS config.BoolOption
RemoveBlockedDNS config.BoolOption
FilterSubDomains config.BoolOption
FilterCNAMEs config.BoolOption
PreventBypassing config.BoolOption
DomainHeuristics config.BoolOption
// These functions give layered access to configuration options and require
// the layered profile to be read locked.
// TODO(ppacher): we need JSON tags here so the layeredProfile can be exposed
// via the API. If we ever switch away from JSON to something else supported
// by DSD this WILL BREAK!
DisableAutoPermit config.BoolOption `json:"-"`
BlockScopeLocal config.BoolOption `json:"-"`
BlockScopeLAN config.BoolOption `json:"-"`
BlockScopeInternet config.BoolOption `json:"-"`
BlockP2P config.BoolOption `json:"-"`
BlockInbound config.BoolOption `json:"-"`
RemoveOutOfScopeDNS config.BoolOption `json:"-"`
RemoveBlockedDNS config.BoolOption `json:"-"`
FilterSubDomains config.BoolOption `json:"-"`
FilterCNAMEs config.BoolOption `json:"-"`
PreventBypassing config.BoolOption `json:"-"`
DomainHeuristics config.BoolOption `json:"-"`
UseSPN config.BoolOption `json:"-"`
}
// NewLayeredProfile returns a new layered profile based on the given local profile.
@ -56,8 +59,7 @@ func NewLayeredProfile(localProfile *Profile) *LayeredProfile {
new := &LayeredProfile{
localProfile: localProfile,
layers: make([]*Profile, 0, len(localProfile.LinkedProfiles)+1),
revisionCounter: 0,
validityFlag: abool.NewBool(true),
LayerIDs: make([]string, 0, len(localProfile.LinkedProfiles)+1),
globalValidityFlag: config.NewValidityFlag(),
securityLevel: &securityLevelVal,
}
@ -86,10 +88,6 @@ func NewLayeredProfile(localProfile *Profile) *LayeredProfile {
CfgOptionBlockInboundKey,
cfgOptionBlockInbound,
)
new.EnforceSPN = new.wrapSecurityLevelOption(
CfgOptionEnforceSPNKey,
cfgOptionEnforceSPN,
)
new.RemoveOutOfScopeDNS = new.wrapSecurityLevelOption(
CfgOptionRemoveOutOfScopeDNSKey,
cfgOptionRemoveOutOfScopeDNS,
@ -114,22 +112,56 @@ func NewLayeredProfile(localProfile *Profile) *LayeredProfile {
CfgOptionDomainHeuristicsKey,
cfgOptionDomainHeuristics,
)
new.UseSPN = new.wrapBoolOption(
CfgOptionUseSPNKey,
cfgOptionUseSPN,
)
// TODO: load linked profiles.
// FUTURE: load forced company profile
new.LayerIDs = append(new.LayerIDs, localProfile.ScopedID())
new.layers = append(new.layers, localProfile)
// FUTURE: load company profile
// FUTURE: load community profile
// TODO: Load additional profiles.
new.updateCaches()
new.CreateMeta()
new.SetKey(runtime.DefaultRegistry.DatabaseName() + ":" + revisionProviderPrefix + localProfile.ScopedID())
// Inform database subscribers about the new layered profile.
new.Lock()
defer new.Unlock()
pushLayeredProfile(new)
return new
}
func (lp *LayeredProfile) getValidityFlag() *abool.AtomicBool {
lp.validityFlagLock.Lock()
defer lp.validityFlagLock.Unlock()
return lp.validityFlag
// LockForUsage locks the layered profile, including all layers individually.
func (lp *LayeredProfile) LockForUsage() {
lp.RLock()
for _, layer := range lp.layers {
layer.RLock()
}
}
// UnlockForUsage unlocks the layered profile, including all layers individually.
func (lp *LayeredProfile) UnlockForUsage() {
lp.RUnlock()
for _, layer := range lp.layers {
layer.RUnlock()
}
}
// LocalProfile returns the local profile associated with this layered profile.
func (lp *LayeredProfile) LocalProfile() *Profile {
if lp == nil {
return nil
}
lp.RLock()
defer lp.RUnlock()
return lp.localProfile
}
// RevisionCnt returns the current profile revision counter.
@ -138,23 +170,57 @@ func (lp *LayeredProfile) RevisionCnt() (revisionCounter uint64) {
return 0
}
lp.lock.Lock()
defer lp.lock.Unlock()
lp.RLock()
defer lp.RUnlock()
return lp.revisionCounter
return lp.RevisionCounter
}
// Update checks for updated profiles and replaces any outdated profiles.
// MarkStillActive marks all the layers as still active.
func (lp *LayeredProfile) MarkStillActive() {
if lp == nil {
return
}
lp.RLock()
defer lp.RUnlock()
for _, layer := range lp.layers {
layer.MarkStillActive()
}
}
// NeedsUpdate checks for outdated profiles.
func (lp *LayeredProfile) NeedsUpdate() (outdated bool) {
lp.RLock()
defer lp.RUnlock()
// Check global config state.
if !lp.globalValidityFlag.IsValid() {
return true
}
// Check config in layers.
for _, layer := range lp.layers {
if layer.outdated.IsSet() {
return true
}
}
return false
}
// Update checks for and replaces any outdated profiles.
func (lp *LayeredProfile) Update() (revisionCounter uint64) {
lp.lock.Lock()
defer lp.lock.Unlock()
lp.Lock()
defer lp.Unlock()
var changed bool
for i, layer := range lp.layers {
if layer.outdated.IsSet() {
changed = true
// update layer
newLayer, err := GetProfile(layer.Source, layer.ID)
newLayer, err := GetProfile(layer.Source, layer.ID, layer.LinkedPath)
if err != nil {
log.Errorf("profiles: failed to update profile %s", layer.ScopedID())
} else {
@ -167,11 +233,6 @@ func (lp *LayeredProfile) Update() (revisionCounter uint64) {
}
if changed {
// reset validity flag
lp.validityFlagLock.Lock()
lp.validityFlag.SetTo(false)
lp.validityFlag = abool.NewBool(true)
lp.validityFlagLock.Unlock()
// get global config validity flag
lp.globalValidityFlag.Refresh()
@ -179,10 +240,12 @@ func (lp *LayeredProfile) Update() (revisionCounter uint64) {
lp.updateCaches()
// bump revision counter
lp.revisionCounter++
lp.RevisionCounter++
pushLayeredProfile(lp)
}
return lp.revisionCounter
return lp.RevisionCounter
}
func (lp *LayeredProfile) updateCaches() {
@ -194,8 +257,6 @@ func (lp *LayeredProfile) updateCaches() {
}
}
atomic.StoreUint32(lp.securityLevel, uint32(newLevel))
// TODO: ignore community profiles
}
// MarkUsed marks the localProfile as used.
@ -203,12 +264,12 @@ func (lp *LayeredProfile) MarkUsed() {
lp.localProfile.MarkUsed()
}
// SecurityLevel returns the highest security level of all layered profiles.
// SecurityLevel returns the highest security level of all layered profiles. This function is atomic and does not require any locking.
func (lp *LayeredProfile) SecurityLevel() uint8 {
return uint8(atomic.LoadUint32(lp.securityLevel))
}
// DefaultAction returns the active default action ID.
// DefaultAction returns the active default action ID. This functions requires the layered profile to be read locked.
func (lp *LayeredProfile) DefaultAction() uint8 {
for _, layer := range lp.layers {
if layer.defaultAction > 0 {
@ -221,7 +282,7 @@ func (lp *LayeredProfile) DefaultAction() uint8 {
return cfgDefaultAction
}
// MatchEndpoint checks if the given endpoint matches an entry in any of the profiles.
// MatchEndpoint checks if the given endpoint matches an entry in any of the profiles. This functions requires the layered profile to be read locked.
func (lp *LayeredProfile) MatchEndpoint(ctx context.Context, entity *intel.Entity) (endpoints.EPResult, endpoints.Reason) {
for _, layer := range lp.layers {
if layer.endpoints.IsSet() {
@ -237,7 +298,7 @@ func (lp *LayeredProfile) MatchEndpoint(ctx context.Context, entity *intel.Entit
return cfgEndpoints.Match(ctx, entity)
}
// MatchServiceEndpoint checks if the given endpoint of an inbound connection matches an entry in any of the profiles.
// MatchServiceEndpoint checks if the given endpoint of an inbound connection matches an entry in any of the profiles. This functions requires the layered profile to be read locked.
func (lp *LayeredProfile) MatchServiceEndpoint(ctx context.Context, entity *intel.Entity) (endpoints.EPResult, endpoints.Reason) {
entity.EnableReverseResolving()
@ -256,7 +317,7 @@ func (lp *LayeredProfile) MatchServiceEndpoint(ctx context.Context, entity *inte
}
// MatchFilterLists matches the entity against the set of filter
// lists.
// lists. This functions requires the layered profile to be read locked.
func (lp *LayeredProfile) MatchFilterLists(ctx context.Context, entity *intel.Entity) (endpoints.EPResult, endpoints.Reason) {
entity.ResolveSubDomainLists(ctx, lp.FilterSubDomains())
entity.EnableCNAMECheck(ctx, lp.FilterCNAMEs())
@ -287,16 +348,6 @@ func (lp *LayeredProfile) MatchFilterLists(ctx context.Context, entity *intel.En
return endpoints.NoMatch, nil
}
// AddEndpoint adds an endpoint to the local endpoint list, saves the local profile and reloads the configuration.
func (lp *LayeredProfile) AddEndpoint(newEntry string) {
lp.localProfile.AddEndpoint(newEntry)
}
// AddServiceEndpoint adds a service endpoint to the local endpoint list, saves the local profile and reloads the configuration.
func (lp *LayeredProfile) AddServiceEndpoint(newEntry string) {
lp.localProfile.AddServiceEndpoint(newEntry)
}
func (lp *LayeredProfile) wrapSecurityLevelOption(configKey string, globalConfig config.IntOption) config.BoolOption {
activeAtLevels := lp.wrapIntOption(configKey, globalConfig)
@ -308,22 +359,27 @@ func (lp *LayeredProfile) wrapSecurityLevelOption(configKey string, globalConfig
}
}
func (lp *LayeredProfile) wrapIntOption(configKey string, globalConfig config.IntOption) config.IntOption {
valid := no
var value int64
func (lp *LayeredProfile) wrapBoolOption(configKey string, globalConfig config.BoolOption) config.BoolOption {
revCnt := lp.RevisionCounter
var value bool
var refreshLock sync.Mutex
return func() int64 {
if !valid.IsSet() {
valid = lp.getValidityFlag()
return func() bool {
refreshLock.Lock()
defer refreshLock.Unlock()
// Check if we need to refresh the value.
if revCnt != lp.RevisionCounter {
revCnt = lp.RevisionCounter
// Go through all layers to find an active value.
found := false
layerLoop:
for _, layer := range lp.layers {
layerValue, ok := layer.configPerspective.GetAsInt(configKey)
layerValue, ok := layer.configPerspective.GetAsBool(configKey)
if ok {
found = true
value = layerValue
break layerLoop
break
}
}
if !found {
@ -335,25 +391,76 @@ func (lp *LayeredProfile) wrapIntOption(configKey string, globalConfig config.In
}
}
func (lp *LayeredProfile) wrapIntOption(configKey string, globalConfig config.IntOption) config.IntOption {
revCnt := lp.RevisionCounter
var value int64
var refreshLock sync.Mutex
return func() int64 {
refreshLock.Lock()
defer refreshLock.Unlock()
// Check if we need to refresh the value.
if revCnt != lp.RevisionCounter {
revCnt = lp.RevisionCounter
// Go through all layers to find an active value.
found := false
for _, layer := range lp.layers {
layerValue, ok := layer.configPerspective.GetAsInt(configKey)
if ok {
found = true
value = layerValue
break
}
}
if !found {
value = globalConfig()
}
}
return value
}
}
// GetProfileSource returns the database key of the first profile in the
// layers that has the given configuration key set. If it returns an empty
// string, the global profile can be assumed to have been effective.
func (lp *LayeredProfile) GetProfileSource(configKey string) string {
for _, layer := range lp.layers {
if layer.configPerspective.Has(configKey) {
return layer.Key()
}
}
// Global Profile
return ""
}
/*
For later:
func (lp *LayeredProfile) wrapStringOption(configKey string, globalConfig config.StringOption) config.StringOption {
valid := no
revCnt := lp.RevisionCounter
var value string
var refreshLock sync.Mutex
return func() string {
if !valid.IsSet() {
valid = lp.getValidityFlag()
refreshLock.Lock()
defer refreshLock.Unlock()
// Check if we need to refresh the value.
if revCnt != lp.RevisionCounter {
revCnt = lp.RevisionCounter
// Go through all layers to find an active value.
found := false
layerLoop:
for _, layer := range lp.layers {
layerValue, ok := layer.configPerspective.GetAsString(configKey)
if ok {
found = true
value = layerValue
break layerLoop
break
}
}
if !found {

View file

@ -1,9 +1,13 @@
package profile
import (
"context"
"errors"
"fmt"
"path/filepath"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/tevino/abool"
@ -12,6 +16,7 @@ import (
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/log"
"github.com/safing/portbase/utils"
"github.com/safing/portbase/utils/osdetail"
"github.com/safing/portmaster/intel/filterlists"
"github.com/safing/portmaster/profile/endpoints"
)
@ -20,12 +25,15 @@ var (
lastUsedUpdateThreshold = 24 * time.Hour
)
// profileSource is the source of the profile.
type profileSource string
// Profile Sources
const (
SourceLocal string = "local" // local, editable
SourceSpecial string = "special" // specials (read-only)
SourceCommunity string = "community"
SourceEnterprise string = "enterprise"
SourceLocal profileSource = "local" // local, editable
SourceSpecial profileSource = "special" // specials (read-only)
SourceCommunity profileSource = "community"
SourceEnterprise profileSource = "enterprise"
)
// Default Action IDs
@ -36,35 +44,76 @@ const (
DefaultActionPermit uint8 = 3
)
// iconType describes the type of the Icon property
// of a profile.
type iconType string
// Supported icon types.
const (
IconTypeFile iconType = "path"
IconTypeDatabase iconType = "database"
IconTypeBlob iconType = "blob"
)
// Profile is used to predefine a security profile for applications.
type Profile struct { //nolint:maligned // not worth the effort
record.Base
sync.Mutex
sync.RWMutex
// Identity
ID string
Source string
// App Information
Name string
// ID is a unique identifier for the profile.
ID string // constant
// Source describes the source of the profile.
Source profileSource // constant
// Name is a human readable name of the profile. It
// defaults to the basename of the application.
Name string
// Description may holds an optional description of the
// profile or the purpose of the application.
Description string
Homepage string
// Icon is a path to the icon and is either prefixed "f:" for filepath, "d:" for a database path or "e:" for the encoded data.
// Homepage may refer the the website of the application
// vendor.
Homepage string
// Icon holds the icon of the application. The value
// may either be a filepath, a database key or a blob URL.
// See IconType for more information.
Icon string
// References - local profiles only
// LinkedPath is a filesystem path to the executable this profile was created for.
LinkedPath string
// IconType describes the type of the Icon property.
IconType iconType
// LinkedPath is a filesystem path to the executable this
// profile was created for.
LinkedPath string // constant
// LinkedProfiles is a list of other profiles
LinkedProfiles []string
// Fingerprints
// TODO: Fingerprints []*Fingerprint
// Configuration
// The mininum security level to apply to connections made with this profile
// SecurityLevel is the mininum security level to apply to
// connections made with this profile.
// Note(ppacher): we may deprecate this one as it can easily
// be "simulated" by adjusting the settings
// directly.
SecurityLevel uint8
Config map[string]interface{}
// Config holds profile specific setttings. It's a nested
// object with keys defining the settings database path. All keys
// until the actual settings value (which is everything that is not
// an object) need to be concatenated for the settings database
// path.
Config map[string]interface{}
// ApproxLastUsed holds a UTC timestamp in seconds of
// when this Profile was approximately last used.
// For performance reasons not every single usage is saved.
ApproxLastUsed int64
// Created holds the UTC timestamp in seconds when the
// profile has been created.
Created int64
// Internal is set to true if the profile is attributed to a
// Portmaster internal process. Internal is set during profile
// creation and may be accessed without lock.
Internal bool
// layeredProfile is a link to the layered profile with this profile as the
// main profile.
// All processes with the same binary should share the same instance of the
// local profile and the associated layered profile.
layeredProfile *LayeredProfile
// Interpreted Data
configPerspective *config.Perspective
@ -75,17 +124,8 @@ type Profile struct { //nolint:maligned // not worth the effort
filterListIDs []string
// Lifecycle Management
outdated *abool.AtomicBool
lastUsed time.Time
// Framework
// If a Profile is declared as a Framework (i.e. an Interpreter and the likes), then the real process/actor must be found
// TODO: Framework *Framework
// When this Profile was approximately last used.
// For performance reasons not every single usage is saved.
ApproxLastUsed int64
Created int64
outdated *abool.AtomicBool
lastActive *int64
internalSave bool
}
@ -94,6 +134,7 @@ func (profile *Profile) prepConfig() (err error) {
// prepare configuration
profile.configPerspective, err = config.NewPerspective(profile.Config)
profile.outdated = abool.New()
profile.lastActive = new(int64)
return
}
@ -153,16 +194,25 @@ func (profile *Profile) parseConfig() error {
}
// New returns a new Profile.
func New() *Profile {
func New(source profileSource, id string, linkedPath string) *Profile {
profile := &Profile{
ID: utils.RandomUUID("").String(),
Source: SourceLocal,
ID: id,
Source: source,
LinkedPath: linkedPath,
Created: time.Now().Unix(),
Config: make(map[string]interface{}),
internalSave: true,
}
// create placeholders
// Generate random ID if none is given.
if id == "" {
profile.ID = utils.RandomUUID("").String()
}
// Make key from ID and source.
profile.makeKey()
// Prepare profile to create placeholders.
_ = profile.prepConfig()
_ = profile.parseConfig()
@ -174,6 +224,11 @@ func (profile *Profile) ScopedID() string {
return makeScopedID(profile.Source, profile.ID)
}
// makeKey derives and sets the record Key from the profile attributes.
func (profile *Profile) makeKey() {
profile.SetKey(makeProfileKey(profile.Source, profile.ID))
}
// Save saves the profile to the database
func (profile *Profile) Save() error {
if profile.ID == "" {
@ -183,38 +238,41 @@ func (profile *Profile) Save() error {
return fmt.Errorf("profile: profile %s does not specify a source", profile.ID)
}
if !profile.KeyIsSet() {
profile.SetKey(makeProfileKey(profile.Source, profile.ID))
}
return profileDB.Put(profile)
}
// MarkUsed marks the profile as used and saves it when it has changed.
func (profile *Profile) MarkUsed() {
profile.Lock()
// lastUsed
profile.lastUsed = time.Now()
// MarkStillActive marks the profile as still active.
func (profile *Profile) MarkStillActive() {
atomic.StoreInt64(profile.lastActive, time.Now().Unix())
}
// LastActive returns the unix timestamp when the profile was last marked as
// still active.
func (profile *Profile) LastActive() int64 {
return atomic.LoadInt64(profile.lastActive)
}
// MarkUsed updates ApproxLastUsed when it's been a while and saves the profile if it was changed.
func (profile *Profile) MarkUsed() (changed bool) {
profile.Lock()
defer profile.Unlock()
// ApproxLastUsed
save := false
if time.Now().Add(-lastUsedUpdateThreshold).Unix() > profile.ApproxLastUsed {
profile.ApproxLastUsed = time.Now().Unix()
save = true
return true
}
profile.Unlock()
if save {
err := profile.Save()
if err != nil {
log.Warningf("profiles: failed to save profile %s after marking as used: %s", profile.ScopedID(), err)
}
}
return false
}
// String returns a string representation of the Profile.
func (profile *Profile) String() string {
return profile.Name
return fmt.Sprintf("<%s %s/%s>", profile.Name, profile.Source, profile.ID)
}
// IsOutdated returns whether the this instance of the profile is marked as outdated.
func (profile *Profile) IsOutdated() bool {
return profile.outdated.IsSet()
}
// AddEndpoint adds an endpoint to the endpoint list, saves the profile and reloads the configuration.
@ -228,82 +286,79 @@ func (profile *Profile) AddServiceEndpoint(newEntry string) {
}
func (profile *Profile) addEndpointyEntry(cfgKey, newEntry string) {
changed := false
// When finished, save the profile.
defer func() {
if !changed {
return
}
err := profile.Save()
if err != nil {
log.Warningf("profile: failed to save profile %s after add an endpoint rule: %s", profile.ScopedID(), err)
}
}()
// When finished increase the revision counter of the layered profile.
defer func() {
if !changed || profile.layeredProfile == nil {
return
}
profile.layeredProfile.Lock()
defer profile.layeredProfile.Unlock()
profile.layeredProfile.RevisionCounter++
}()
// Lock the profile for editing.
profile.Lock()
// get, update, save endpoints list
defer profile.Unlock()
// Get the endpoint list configuration value and add the new entry.
endpointList, ok := profile.configPerspective.GetAsStringArray(cfgKey)
if !ok {
endpointList = make([]string, 0, 1)
if ok {
// A list already exists, check for duplicates within the same prefix.
newEntryPrefix := strings.Split(newEntry, " ")[0] + " "
for _, entry := range endpointList {
if !strings.HasPrefix(entry, newEntryPrefix) {
// We found an entry with a different prefix than the new entry.
// Beyond this entry we cannot possibly know if identical entries will
// match, so we will have to add the new entry no matter what the rest
// of the list has.
break
}
if entry == newEntry {
// An identical entry is already in the list, abort.
log.Debugf("profile: ingoring new endpoint rule for %s, as identical is already present: %s", profile, newEntry)
return
}
}
endpointList = append([]string{newEntry}, endpointList...)
} else {
endpointList = []string{newEntry}
}
endpointList = append(endpointList, newEntry)
// Save new value back to profile.
config.PutValueIntoHierarchicalConfig(profile.Config, cfgKey, endpointList)
changed = true
profile.Unlock()
err := profile.Save()
if err != nil {
log.Warningf("profile: failed to save profile after adding endpoint: %s", err)
}
// reload manually
profile.Lock()
// Reload the profile manually in order to parse the newly added entry.
profile.dataParsed = false
err = profile.parseConfig()
err := profile.parseConfig()
if err != nil {
log.Warningf("profile: failed to parse profile config after adding endpoint: %s", err)
log.Warningf("profile: failed to parse %s config after adding endpoint: %s", profile, err)
}
profile.Unlock()
}
// GetProfile loads a profile from the database.
func GetProfile(source, id string) (*Profile, error) {
return GetProfileByScopedID(makeScopedID(source, id))
}
// GetProfileByScopedID loads a profile from the database using a scoped ID like "local/id" or "community/id".
func GetProfileByScopedID(scopedID string) (*Profile, error) {
// check cache
profile := getActiveProfile(scopedID)
if profile != nil {
profile.MarkUsed()
return profile, nil
}
// get from database
r, err := profileDB.Get(profilesDBPath + scopedID)
if err != nil {
return nil, err
}
// convert
profile, err = EnsureProfile(r)
if err != nil {
return nil, err
}
// lock for prepping
// LayeredProfile returns the layered profile associated with this profile.
func (profile *Profile) LayeredProfile() *LayeredProfile {
profile.Lock()
defer profile.Unlock()
// prepare config
err = profile.prepConfig()
if err != nil {
log.Warningf("profiles: profile %s has (partly) invalid configuration: %s", profile.ID, err)
}
// parse config
err = profile.parseConfig()
if err != nil {
log.Warningf("profiles: profile %s has (partly) invalid configuration: %s", profile.ID, err)
}
// mark as internal
profile.internalSave = true
profile.Unlock()
// mark active
profile.MarkUsed()
markProfileActive(profile)
return profile, nil
return profile.layeredProfile
}
// EnsureProfile ensures that the given record is a *Profile, and returns it.
@ -326,3 +381,108 @@ func EnsureProfile(r record.Record) (*Profile, error) {
}
return new, nil
}
// UpdateMetadata updates meta data fields on the profile and returns whether
// the profile was changed. If there is data that needs to be fetched from the
// operating system, it will start an async worker to fetch that data and save
// the profile afterwards.
func (profile *Profile) UpdateMetadata(processName string) (changed bool) {
// Check if this is a local profile, else warn and return.
if profile.Source != SourceLocal {
log.Warningf("tried to update metadata for non-local profile %s", profile.ScopedID())
return false
}
profile.Lock()
defer profile.Unlock()
// Check if this is a special profile.
if profile.LinkedPath == "" {
// This is a special profile, just assign the processName, if needed, and
// return.
if profile.Name != processName {
profile.Name = processName
return true
}
return false
}
var needsUpdateFromSystem bool
// Check profile name.
_, filename := filepath.Split(profile.LinkedPath)
// Update profile name if it is empty or equals the filename, which is the
// case for older profiles.
if profile.Name == "" || profile.Name == filename {
// Generate a default profile name if does not exist.
profile.Name = osdetail.GenerateBinaryNameFromPath(profile.LinkedPath)
if profile.Name == filename {
// TODO: Theoretically, the generated name could be identical to the
// filename.
// As a quick fix, append a space to the name.
profile.Name += " "
}
changed = true
needsUpdateFromSystem = true
}
// If needed, get more/better data from the operating system.
if needsUpdateFromSystem {
module.StartWorker("get profile metadata", profile.updateMetadataFromSystem)
}
return changed
}
// updateMetadataFromSystem updates the profile metadata with data from the
// operating system and saves it afterwards.
func (profile *Profile) updateMetadataFromSystem(ctx context.Context) error {
// This function is only valid for local profiles.
if profile.Source != SourceLocal || profile.LinkedPath == "" {
return fmt.Errorf("tried to update metadata for non-local / non-linked profile %s", profile.ScopedID())
}
// Save the profile when finished, if needed.
save := false
defer func() {
if save {
err := profile.Save()
if err != nil {
log.Warningf("profile: failed to save %s after metadata update: %s", profile.ScopedID(), err)
}
}
}()
// Get binary name from linked path.
newName, err := osdetail.GetBinaryNameFromSystem(profile.LinkedPath)
if err != nil {
if !errors.Is(err, osdetail.ErrNotSupported) {
log.Warningf("profile: error while getting binary name for %s: %s", profile.LinkedPath, err)
}
return nil
}
// Get filename of linked path for comparison.
_, filename := filepath.Split(profile.LinkedPath)
// TODO: Theoretically, the generated name from the system could be identical
// to the filename. This would mean that the worker is triggered every time
// the profile is freshly loaded.
if newName == filename {
// As a quick fix, append a space to the name.
newName += " "
}
// Lock profile for applying metadata.
profile.Lock()
defer profile.Unlock()
// Apply new name if it changed.
if profile.Name != newName {
profile.Name = newName
save = true
}
return nil
}

View file

@ -1,56 +0,0 @@
package profile
import (
"github.com/safing/portbase/log"
)
const (
unidentifiedProfileID = "_unidentified"
systemProfileID = "_system"
)
// GetUnidentifiedProfile returns the special profile assigned to unidentified processes.
func GetUnidentifiedProfile() *Profile {
// get profile
profile, err := GetProfile(SourceLocal, unidentifiedProfileID)
if err == nil {
return profile
}
// create if not available (or error)
profile = New()
profile.Name = "Unidentified Processes"
profile.Source = SourceLocal
profile.ID = unidentifiedProfileID
// save to db
err = profile.Save()
if err != nil {
log.Warningf("profiles: failed to save %s: %s", profile.ScopedID(), err)
}
return profile
}
// GetSystemProfile returns the special profile used for the Kernel.
func GetSystemProfile() *Profile {
// get profile
profile, err := GetProfile(SourceLocal, systemProfileID)
if err == nil {
return profile
}
// create if not available (or error)
profile = New()
profile.Name = "Operating System"
profile.Source = SourceLocal
profile.ID = systemProfileID
// save to db
err = profile.Save()
if err != nil {
log.Warningf("profiles: failed to save %s: %s", profile.ScopedID(), err)
}
return profile
}

View file

@ -57,19 +57,19 @@ var (
cfgOptionNameServersOrder = 0
CfgOptionNoAssignedNameserversKey = "dns/noAssignedNameservers"
noAssignedNameservers status.SecurityLevelOption
noAssignedNameservers status.SecurityLevelOptionFunc
cfgOptionNoAssignedNameserversOrder = 1
CfgOptionNoMulticastDNSKey = "dns/noMulticastDNS"
noMulticastDNS status.SecurityLevelOption
noMulticastDNS status.SecurityLevelOptionFunc
cfgOptionNoMulticastDNSOrder = 2
CfgOptionNoInsecureProtocolsKey = "dns/noInsecureProtocols"
noInsecureProtocols status.SecurityLevelOption
noInsecureProtocols status.SecurityLevelOptionFunc
cfgOptionNoInsecureProtocolsOrder = 3
CfgOptionDontResolveSpecialDomainsKey = "dns/dontResolveSpecialDomains"
dontResolveSpecialDomains status.SecurityLevelOption
dontResolveSpecialDomains status.SecurityLevelOptionFunc
cfgOptionDontResolveSpecialDomainsOrder = 16
CfgOptionNameserverRetryRateKey = "dns/nameserverRetryRate"
@ -82,36 +82,71 @@ func prepConfig() error {
Name: "DNS Servers",
Key: CfgOptionNameServersKey,
Description: "DNS Servers to use for resolving DNS requests.",
Help: `Format:
Help: strings.ReplaceAll(`DNS Servers are used in the order as entered. The first one will be used as the primary DNS Server. Only if it fails, will the other servers be used as a fallback - in their respective order. If all fail, or if no DNS Server is configured here, the Portmaster will use the one configured in your system or network.
DNS Servers are configured in a URL format. This allows you to specify special settings for a resolver. If you just want to use a resolver at IP 10.2.3.4, please enter: dns://10.2.3.4:53
The format is: protocol://ip:port?parameter=value&parameter=value
Additionally, if it is more likely that the DNS Server of your system or network has a (better) answer to a request, they will be asked first. This will be the case for special local domains and domain spaces announced on the current network.
Protocols:
dot: DNS-over-TLS (recommended)
dns: plain old DNS
tcp: plain old DNS over TCP
DNS Servers are configured in a URL format. This allows you to specify special settings for a resolver. If you just want to use a resolver at IP 10.2.3.4, please enter: "dns://10.2.3.4"
The format is: "protocol://ip:port?parameter=value&parameter=value"
IP:
always use the IP address and _not_ the domain name!
Port:
optionally define a custom port
Parameters:
name: give your DNS Server a name that is used for messages and logs
verify: domain name to verify for "dot", required and only valid for "dot"
blockedif: detect if the name server blocks a query, options:
empty: server replies with NXDomain status, but without any other record in any section
refused: server replies with Refused status
zeroip: server replies with an IP address, but it is zero
`,
Order: cfgOptionNameServersOrder,
- Protocol
- "dot": DNS-over-TLS (recommended)
- "dns": plain old DNS
- "tcp": plain old DNS over TCP
- IP: always use the IP address and _not_ the domain name!
- Port: optionally define a custom port
- Parameters:
- "name": give your DNS Server a name that is used for messages and logs
- "verify": domain name to verify for "dot", required and only valid for protocol "dot"
- "blockedif": detect if the name server blocks a query, options:
- "empty": server replies with NXDomain status, but without any other record in any section
- "refused": server replies with Refused status
- "zeroip": server replies with an IP address, but it is zero
`, `"`, "`"),
OptType: config.OptTypeStringArray,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelStable,
DefaultValue: defaultNameServers,
ValidationRegex: fmt.Sprintf("^(%s|%s|%s)://.*", ServerTypeDoT, ServerTypeDNS, ServerTypeTCP),
Annotations: config.Annotations{
config.DisplayHintAnnotation: config.DisplayHintOrdered,
config.DisplayOrderAnnotation: cfgOptionNameServersOrder,
config.CategoryAnnotation: "Servers",
config.QuickSettingsAnnotation: []config.QuickSetting{
{
Name: "Quad9",
Action: config.QuickReplace,
Value: []string{
"dot://9.9.9.9:853?verify=dns.quad9.net&name=Quad9&blockedif=empty",
"dot://149.112.112.112:853?verify=dns.quad9.net&name=Quad9&blockedif=empty",
},
},
{
Name: "AdGuard",
Action: config.QuickReplace,
Value: []string{
"dot://94.140.14.14:853?verify=dns.adguard.com&name=AdGuard&blockedif=zeroip",
"dot://94.140.15.15:853?verify=dns.adguard.com&name=AdGuard&blockedif=zeroip",
},
},
{
Name: "Foundation for Applied Privacy",
Action: config.QuickReplace,
Value: []string{
"dot://94.130.106.88:853?verify=dot1.applied-privacy.net&name=AppliedPrivacy",
"dot://94.130.106.88:443?verify=dot1.applied-privacy.net&name=AppliedPrivacy",
},
},
{
Name: "Cloudflare",
Action: config.QuickReplace,
Value: []string{
"dot://1.1.1.2:853?verify=cloudflare-dns.com&name=Cloudflare&blockedif=zeroip",
"dot://1.0.0.2:853?verify=cloudflare-dns.com&name=Cloudflare&blockedif=zeroip",
},
},
},
},
})
if err != nil {
return err
@ -119,14 +154,18 @@ Parameters:
configuredNameServers = config.Concurrent.GetAsStringArray(CfgOptionNameServersKey, defaultNameServers)
err = config.Register(&config.Option{
Name: "DNS Server Retry Rate",
Name: "Retry Timeout",
Key: CfgOptionNameserverRetryRateKey,
Description: "Rate at which to retry failed DNS Servers, in seconds.",
Order: cfgOptionNameserverRetryRateOrder,
Description: "Timeout between retries when a DNS server fails.",
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelStable,
DefaultValue: 600,
DefaultValue: 300,
Annotations: config.Annotations{
config.DisplayOrderAnnotation: cfgOptionNameserverRetryRateOrder,
config.UnitAnnotation: "seconds",
config.CategoryAnnotation: "Servers",
},
})
if err != nil {
return err
@ -134,72 +173,87 @@ Parameters:
nameserverRetryRate = config.Concurrent.GetAsInt(CfgOptionNameserverRetryRateKey, 600)
err = config.Register(&config.Option{
Name: "Do not use Multicast DNS",
Key: CfgOptionNoMulticastDNSKey,
Description: "Multicast DNS queries other devices in the local network",
Order: cfgOptionNoMulticastDNSOrder,
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelStable,
ExternalOptType: "security level",
DefaultValue: status.SecurityLevelsHighAndExtreme,
ValidationRegex: "^(4|6|7)$",
Name: "Ignore System/Network Servers",
Key: CfgOptionNoAssignedNameserversKey,
Description: "Ignore DNS servers configured in your system or network.",
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelStable,
DefaultValue: status.SecurityLevelsHighAndExtreme,
PossibleValues: status.SecurityLevelValues,
Annotations: config.Annotations{
config.DisplayOrderAnnotation: cfgOptionNoAssignedNameserversOrder,
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.CategoryAnnotation: "Servers",
},
})
if err != nil {
return err
}
noMulticastDNS = status.ConfigIsActiveConcurrent(CfgOptionNoMulticastDNSKey)
noAssignedNameservers = status.SecurityLevelOption(CfgOptionNoAssignedNameserversKey)
err = config.Register(&config.Option{
Name: "Do not use assigned Nameservers",
Key: CfgOptionNoAssignedNameserversKey,
Description: "that were acquired by the network (dhcp) or system",
Order: cfgOptionNoAssignedNameserversOrder,
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelStable,
ExternalOptType: "security level",
DefaultValue: status.SecurityLevelsHighAndExtreme,
ValidationRegex: "^(4|6|7)$",
Name: "Ignore Multicast DNS",
Key: CfgOptionNoMulticastDNSKey,
Description: "Do not resolve using Multicast DNS. This may break certain Plug and Play devices or services.",
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelStable,
DefaultValue: status.SecurityLevelsHighAndExtreme,
PossibleValues: status.SecurityLevelValues,
Annotations: config.Annotations{
config.DisplayOrderAnnotation: cfgOptionNoMulticastDNSOrder,
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.CategoryAnnotation: "Resolving",
},
})
if err != nil {
return err
}
noAssignedNameservers = status.ConfigIsActiveConcurrent(CfgOptionNoAssignedNameserversKey)
noMulticastDNS = status.SecurityLevelOption(CfgOptionNoMulticastDNSKey)
err = config.Register(&config.Option{
Name: "Do not resolve insecurely",
Key: CfgOptionNoInsecureProtocolsKey,
Description: "Do not resolve domains with insecure protocols, ie. plain DNS",
Order: cfgOptionNoInsecureProtocolsOrder,
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelStable,
ExternalOptType: "security level",
DefaultValue: status.SecurityLevelsHighAndExtreme,
ValidationRegex: "^(4|6|7)$",
Name: "Enforce Secure DNS",
Key: CfgOptionNoInsecureProtocolsKey,
Description: "Never resolve using insecure protocols, ie. plain DNS.",
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelStable,
DefaultValue: status.SecurityLevelsHighAndExtreme,
PossibleValues: status.SecurityLevelValues,
Annotations: config.Annotations{
config.DisplayOrderAnnotation: cfgOptionNoInsecureProtocolsOrder,
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.CategoryAnnotation: "Resolving",
},
})
if err != nil {
return err
}
noInsecureProtocols = status.ConfigIsActiveConcurrent(CfgOptionNoInsecureProtocolsKey)
noInsecureProtocols = status.SecurityLevelOption(CfgOptionNoInsecureProtocolsKey)
err = config.Register(&config.Option{
Name: "Do not resolve special domains",
Key: CfgOptionDontResolveSpecialDomainsKey,
Description: fmt.Sprintf("Do not resolve the special top level domains %s", formatScopeList(specialServiceDomains)),
Order: cfgOptionDontResolveSpecialDomainsOrder,
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelStable,
ExternalOptType: "security level",
DefaultValue: status.SecurityLevelsAll,
ValidationRegex: "^(4|6|7)$",
Name: "Block Unofficial TLDs",
Key: CfgOptionDontResolveSpecialDomainsKey,
Description: fmt.Sprintf(
"Block %s. Unofficial domains may pose a security risk. This does not affect .onion domains in the Tor Browser.",
formatScopeList(specialServiceDomains),
),
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelStable,
DefaultValue: status.SecurityLevelsAll,
PossibleValues: status.AllSecurityLevelValues,
Annotations: config.Annotations{
config.DisplayOrderAnnotation: cfgOptionDontResolveSpecialDomainsOrder,
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.CategoryAnnotation: "Resolving",
},
})
if err != nil {
return err
}
dontResolveSpecialDomains = status.ConfigIsActiveConcurrent(CfgOptionDontResolveSpecialDomainsKey)
dontResolveSpecialDomains = status.SecurityLevelOption(CfgOptionDontResolveSpecialDomainsKey)
return nil
}
@ -207,7 +261,7 @@ Parameters:
func formatScopeList(list []string) string {
formatted := make([]string, 0, len(list))
for _, domain := range list {
formatted = append(formatted, strings.Trim(domain, "."))
formatted = append(formatted, strings.TrimRight(domain, "."))
}
return strings.Join(formatted, ", ")
}

View file

@ -204,28 +204,32 @@ func getSystemResolvers() (resolvers []*Resolver) {
return resolvers
}
const missingResolversErrorID = "missing-resolvers"
func loadResolvers() {
// TODO: what happens when a lot of processes want to reload at once? we do not need to run this multiple times in a short time frame.
resolversLock.Lock()
defer resolversLock.Unlock()
// Resolve module error about missing resolvers.
module.Resolve(missingResolversErrorID)
newResolvers := append(
getConfiguredResolvers(configuredNameServers()),
getSystemResolvers()...,
)
if len(newResolvers) == 0 {
msg := "no (valid) dns servers found in (user) configuration or system, falling back to defaults"
msg := "no (valid) dns servers found in configuration or system, falling back to defaults"
log.Warningf("resolver: %s", msg)
module.Warning("no-valid-user-resolvers", msg)
module.Warning(missingResolversErrorID, msg)
// load defaults directly, overriding config system
newResolvers = getConfiguredResolvers(defaultNameServers)
if len(newResolvers) == 0 {
msg = "no (valid) dns servers found in configuration or system"
log.Criticalf("resolver: %s", msg)
module.Error("no-valid-default-resolvers", msg)
return
module.Error(missingResolversErrorID, msg)
}
}

36
status/autopilot.go Normal file
View file

@ -0,0 +1,36 @@
package status
import "context"
var runAutoPilot = make(chan struct{})
func triggerAutopilot() {
select {
case runAutoPilot <- struct{}{}:
default:
}
}
func autoPilot(ctx context.Context) error {
for {
select {
case <-ctx.Done():
return nil
case <-runAutoPilot:
}
selected := SelectedSecurityLevel()
mitigation := getHighestMitigationLevel()
active := SecurityLevelNormal
if selected != SecurityLevelOff {
active = selected
} else if mitigation != SecurityLevelOff {
active = mitigation
}
setActiveLevel(active)
pushSystemStatus()
}
}

View file

@ -1,20 +0,0 @@
package status
// Definitions of Security and Status Levels.
const (
SecurityLevelOff uint8 = 0
SecurityLevelNormal uint8 = 1
SecurityLevelHigh uint8 = 2
SecurityLevelExtreme uint8 = 4
SecurityLevelsNormalAndHigh uint8 = SecurityLevelNormal | SecurityLevelHigh
SecurityLevelsNormalAndExtreme uint8 = SecurityLevelNormal | SecurityLevelExtreme
SecurityLevelsHighAndExtreme uint8 = SecurityLevelHigh | SecurityLevelExtreme
SecurityLevelsAll uint8 = SecurityLevelNormal | SecurityLevelHigh | SecurityLevelExtreme
StatusOff uint8 = 0
StatusError uint8 = 1
StatusWarning uint8 = 2
StatusOk uint8 = 3
)

View file

@ -1,62 +0,0 @@
package status
import (
"context"
"github.com/safing/portbase/database"
"github.com/safing/portbase/database/query"
"github.com/safing/portbase/database/record"
)
const (
statusDBKey = "core:status/status"
)
var (
statusDB = database.NewInterface(&database.Options{
Local: true,
Internal: true,
})
hook *database.RegisteredHook
)
type statusHook struct {
database.HookBase
}
// UsesPrePut implements the Hook interface.
func (sh *statusHook) UsesPrePut() bool {
return true
}
// PrePut implements the Hook interface.
func (sh *statusHook) PrePut(r record.Record) (record.Record, error) {
// record is already locked!
newStatus, err := EnsureSystemStatus(r)
if err != nil {
return nil, err
}
// apply applicable settings
if SelectedSecurityLevel() != newStatus.SelectedSecurityLevel {
module.StartWorker("set selected security level", func(_ context.Context) error {
setSelectedSecurityLevel(newStatus.SelectedSecurityLevel)
return nil
})
}
// TODO: allow setting of Gate17 status (on/off)
// return original status
return status, nil
}
func initStatusHook() (err error) {
hook, err = database.RegisterHook(query.New(statusDBKey), &statusHook{})
return err
}
func stopStatusHook() error {
return hook.Cancel()
}

View file

@ -1,33 +0,0 @@
package status
import (
"github.com/safing/portbase/config"
)
type (
// SecurityLevelOption defines the returned function by ConfigIsActive.
SecurityLevelOption func(minSecurityLevel uint8) bool
)
func max(a, b uint8) uint8 {
if a > b {
return a
}
return b
}
// ConfigIsActive returns whether the given security level dependent config option is on or off.
func ConfigIsActive(name string) SecurityLevelOption {
activeAtLevel := config.GetAsInt(name, int64(SecurityLevelsAll))
return func(minSecurityLevel uint8) bool {
return uint8(activeAtLevel())&max(ActiveSecurityLevel(), minSecurityLevel) > 0
}
}
// ConfigIsActiveConcurrent returns whether the given security level dependent config option is on or off and is concurrency safe.
func ConfigIsActiveConcurrent(name string) SecurityLevelOption {
activeAtLevel := config.Concurrent.GetAsInt(name, int64(SecurityLevelsAll))
return func(minSecurityLevel uint8) bool {
return uint8(activeAtLevel())&max(ActiveSecurityLevel(), minSecurityLevel) > 0
}
}

View file

@ -1,30 +0,0 @@
package status
import (
"sync/atomic"
)
var (
activeSecurityLevel *uint32
selectedSecurityLevel *uint32
)
func init() {
var (
activeSecurityLevelValue uint32
selectedSecurityLevelValue uint32
)
activeSecurityLevel = &activeSecurityLevelValue
selectedSecurityLevel = &selectedSecurityLevelValue
}
// ActiveSecurityLevel returns the current security level.
func ActiveSecurityLevel() uint8 {
return uint8(atomic.LoadUint32(activeSecurityLevel))
}
// SelectedSecurityLevel returns the selected security level.
func SelectedSecurityLevel() uint8 {
return uint8(atomic.LoadUint32(selectedSecurityLevel))
}

View file

@ -1,16 +0,0 @@
package status
import "testing"
func TestGet(t *testing.T) {
// only test for panics
// TODO: write real tests
ActiveSecurityLevel()
SelectedSecurityLevel()
option := ConfigIsActive("invalid")
option(0)
option = ConfigIsActiveConcurrent("invalid")
option(0)
}

60
status/mitigation.go Normal file
View file

@ -0,0 +1,60 @@
package status
import (
"sync"
"github.com/safing/portbase/log"
)
type knownThreats struct {
sync.RWMutex
// active threats and their recommended mitigation level
list map[string]uint8
}
var threats = &knownThreats{
list: make(map[string]uint8),
}
// SetMitigationLevel sets the mitigation level for id
// to mitigation. If mitigation is SecurityLevelOff the
// mitigation record will be removed. If mitigation is
// an invalid level the call to SetMitigationLevel is a
// no-op.
func SetMitigationLevel(id string, mitigation uint8) {
if !IsValidSecurityLevel(mitigation) {
log.Warningf("tried to set invalid mitigation level %d for threat %s", mitigation, id)
return
}
defer triggerAutopilot()
threats.Lock()
defer threats.Unlock()
if mitigation == 0 {
delete(threats.list, id)
} else {
threats.list[id] = mitigation
}
}
// DeleteMitigationLevel deletes the mitigation level for id.
func DeleteMitigationLevel(id string) {
SetMitigationLevel(id, SecurityLevelOff)
}
// getHighestMitigationLevel returns the highest mitigation
// level set on a threat.
func getHighestMitigationLevel() uint8 {
threats.RLock()
defer threats.RUnlock()
var level uint8 = SecurityLevelNormal
for _, lvl := range threats.list {
if lvl > level {
level = lvl
}
}
return level
}

View file

@ -1,9 +1,10 @@
package status
import (
"github.com/safing/portbase/database"
"github.com/safing/portbase/log"
"context"
"github.com/safing/portbase/modules"
"github.com/safing/portmaster/netenv"
)
var (
@ -11,56 +12,30 @@ var (
)
func init() {
module = modules.Register("status", nil, start, stop, "base")
module = modules.Register("status", nil, start, nil, "base")
}
func start() error {
err := initSystemStatus()
if err != nil {
if err := setupRuntimeProvider(); err != nil {
return err
}
err = startNetEnvHooking()
module.StartWorker("auto-pilot", autoPilot)
triggerAutopilot()
err := module.RegisterEventHook(
"netenv",
netenv.OnlineStatusChangedEvent,
"update online status in system status",
func(_ context.Context, _ interface{}) error {
triggerAutopilot()
return nil
},
)
if err != nil {
return err
}
status.Save()
return initStatusHook()
}
func initSystemStatus() error {
// load status from database
r, err := statusDB.Get(statusDBKey)
switch err {
case nil:
loadedStatus, err := EnsureSystemStatus(r)
if err != nil {
log.Criticalf("status: failed to unwrap system status: %s", err)
} else {
status = loadedStatus
}
case database.ErrNotFound:
// create new status
default:
log.Criticalf("status: failed to load system status: %s", err)
}
status.Lock()
defer status.Unlock()
// load status into atomic getters
atomicUpdateSelectedSecurityLevel(status.SelectedSecurityLevel)
// update status
status.updateThreatMitigationLevel()
status.autopilot()
status.updateOnlineStatus()
return nil
}
func stop() error {
return stopStatusHook()
}

View file

@ -1,28 +0,0 @@
package status
import (
"context"
"github.com/safing/portmaster/netenv"
)
// startNetEnvHooking starts the listener for online status changes.
func startNetEnvHooking() error {
return module.RegisterEventHook(
"netenv",
netenv.OnlineStatusChangedEvent,
"update online status in system status",
func(_ context.Context, _ interface{}) error {
status.Lock()
status.updateOnlineStatus()
status.Unlock()
status.Save()
return nil
},
)
}
func (s *SystemStatus) updateOnlineStatus() {
s.OnlineStatus = netenv.GetOnlineStatus()
s.CaptivePortal = netenv.GetCaptivePortal()
}

97
status/provider.go Normal file
View file

@ -0,0 +1,97 @@
package status
import (
"fmt"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/runtime"
"github.com/safing/portmaster/netenv"
)
var (
pushUpdate runtime.PushFunc
)
func setupRuntimeProvider() (err error) {
// register the system status getter
//
statusProvider := runtime.SimpleValueGetterFunc(func(_ string) ([]record.Record, error) {
return []record.Record{buildSystemStatus()}, nil
})
pushUpdate, err = runtime.Register("system/status", statusProvider)
if err != nil {
return err
}
// register the selected security level setter
//
levelProvider := runtime.SimpleValueSetterFunc(setSelectedSecurityLevel)
_, err = runtime.Register("system/security-level", levelProvider)
if err != nil {
return err
}
return nil
}
// setSelectedSecurityLevel updates the selected security level
func setSelectedSecurityLevel(r record.Record) (record.Record, error) {
var upd *SelectedSecurityLevelRecord
if r.IsWrapped() {
upd = new(SelectedSecurityLevelRecord)
if err := record.Unwrap(r, upd); err != nil {
return nil, err
}
} else {
// TODO(ppacher): this can actually never happen
// as we're write-only and ValueProvider.Set() should
// only ever be called from the HTTP API (so r must be wrapped).
// Though, make sure we handle the case as well ...
var ok bool
upd, ok = r.(*SelectedSecurityLevelRecord)
if !ok {
return nil, fmt.Errorf("expected *SelectedSecurityLevelRecord but got %T", r)
}
}
if !IsValidSecurityLevel(upd.SelectedSecurityLevel) {
return nil, fmt.Errorf("invalid security level: %d", upd.SelectedSecurityLevel)
}
if SelectedSecurityLevel() != upd.SelectedSecurityLevel {
setSelectedLevel(upd.SelectedSecurityLevel)
triggerAutopilot()
}
return r, nil
}
// buildSystemStatus build a new system status record.
func buildSystemStatus() *SystemStatusRecord {
status := &SystemStatusRecord{
ActiveSecurityLevel: ActiveSecurityLevel(),
SelectedSecurityLevel: SelectedSecurityLevel(),
ThreatMitigationLevel: getHighestMitigationLevel(),
CaptivePortal: netenv.GetCaptivePortal(),
OnlineStatus: netenv.GetOnlineStatus(),
}
status.CreateMeta()
status.SetKey("runtime:system/status")
return status
}
// pushSystemStatus pushes a new system status via
// the runtime database.
func pushSystemStatus() {
if pushUpdate == nil {
return
}
record := buildSystemStatus()
record.Lock()
defer record.Unlock()
pushUpdate(record)
}

42
status/records.go Normal file
View file

@ -0,0 +1,42 @@
package status
import (
"sync"
"github.com/safing/portbase/database/record"
"github.com/safing/portmaster/netenv"
)
// SystemStatusRecord describes the overall status of the Portmaster.
// It's a read-only record exposed via runtime:system/status.
type SystemStatusRecord struct {
record.Base
sync.Mutex
// ActiveSecurityLevel holds the currently
// active security level.
ActiveSecurityLevel uint8
// SelectedSecurityLevel holds the security level
// as selected by the user.
SelectedSecurityLevel uint8
// ThreatMitigationLevel holds the security level
// as selected by the auto-pilot.
ThreatMitigationLevel uint8
// OnlineStatus holds the current online status as
// seen by the netenv package.
OnlineStatus netenv.OnlineStatus
// CaptivePortal holds all information about the captive
// portal of the network the portmaster is currently
// connected to, if any.
CaptivePortal *netenv.CaptivePortal
}
// SelectedSecurityLevelRecord is used as a dummy record.Record
// to provide a simply runtime-configuration for the user.
// It is write-only and exposed at runtime:system/security-level
type SelectedSecurityLevelRecord struct {
record.Base
sync.Mutex
SelectedSecurityLevel uint8
}

114
status/security_level.go Normal file
View file

@ -0,0 +1,114 @@
package status
import "github.com/safing/portbase/config"
type (
// SecurityLevelOptionFunc can be called with a minimum security level
// and returns whether or not a given security option is enabled or
// not.
// Use SecurityLevelOption() to get a SecurityLevelOptionFunc for a
// specific option.
SecurityLevelOptionFunc func(minSecurityLevel uint8) bool
)
// DisplayHintSecurityLevel is an external option hint for security levels.
// It's meant to be used as a value for config.DisplayHintAnnotation.
const DisplayHintSecurityLevel string = "security level"
// Security levels
const (
SecurityLevelOff uint8 = 0
SecurityLevelNormal uint8 = 1
SecurityLevelHigh uint8 = 2
SecurityLevelExtreme uint8 = 4
SecurityLevelsNormalAndHigh uint8 = SecurityLevelNormal | SecurityLevelHigh
SecurityLevelsNormalAndExtreme uint8 = SecurityLevelNormal | SecurityLevelExtreme
SecurityLevelsHighAndExtreme uint8 = SecurityLevelHigh | SecurityLevelExtreme
SecurityLevelsAll uint8 = SecurityLevelNormal | SecurityLevelHigh | SecurityLevelExtreme
)
// SecurityLevelValues defines all possible security levels.
var SecurityLevelValues = []config.PossibleValue{
{
Name: "Normal",
Value: SecurityLevelsAll,
},
{
Name: "High",
Value: SecurityLevelsHighAndExtreme,
},
{
Name: "Extreme",
Value: SecurityLevelExtreme,
},
}
// AllSecurityLevelValues is like SecurityLevelValues but also includes Off.
var AllSecurityLevelValues = append([]config.PossibleValue{
{
Name: "Off",
Value: SecurityLevelOff,
},
},
SecurityLevelValues...,
)
// IsValidSecurityLevel returns true if level is a valid,
// single security level. Level is also invalid if it's a
// bitmask with more that one security level set.
func IsValidSecurityLevel(level uint8) bool {
return level == SecurityLevelOff ||
level == SecurityLevelNormal ||
level == SecurityLevelHigh ||
level == SecurityLevelExtreme
}
// IsValidSecurityLevelMask returns true if level is a valid
// security level mask. It's like IsValidSecurityLevel but
// also allows bitmask combinations.
func IsValidSecurityLevelMask(level uint8) bool {
return level <= 7
}
func max(a, b uint8) uint8 {
if a > b {
return a
}
return b
}
// SecurityLevelOption returns a function to check if the option
// identified by name is active at a given minimum security level.
// The returned function is safe for concurrent use with configuration
// updates.
func SecurityLevelOption(name string) SecurityLevelOptionFunc {
activeAtLevel := config.Concurrent.GetAsInt(name, int64(SecurityLevelsAll))
return func(minSecurityLevel uint8) bool {
return uint8(activeAtLevel())&max(ActiveSecurityLevel(), minSecurityLevel) > 0
}
}
// SecurityLevelString returns the given security level as a string.
func SecurityLevelString(level uint8) string {
switch level {
case SecurityLevelOff:
return "Off"
case SecurityLevelNormal:
return "Normal"
case SecurityLevelHigh:
return "High"
case SecurityLevelExtreme:
return "Extreme"
case SecurityLevelsNormalAndHigh:
return "Normal and High"
case SecurityLevelsNormalAndExtreme:
return "Normal and Extreme"
case SecurityLevelsHighAndExtreme:
return "High and Extreme"
case SecurityLevelsAll:
return "Normal, High and Extreme"
default:
return "INVALID"
}
}

View file

@ -1,54 +0,0 @@
package status
import (
"sync/atomic"
"github.com/safing/portbase/log"
)
// autopilot automatically adjusts the security level as needed.
func (s *SystemStatus) autopilot() {
// check if users is overruling
if s.SelectedSecurityLevel > SecurityLevelOff {
s.ActiveSecurityLevel = s.SelectedSecurityLevel
atomicUpdateActiveSecurityLevel(s.SelectedSecurityLevel)
return
}
// update active security level
switch s.ThreatMitigationLevel {
case SecurityLevelOff:
s.ActiveSecurityLevel = SecurityLevelNormal
atomicUpdateActiveSecurityLevel(SecurityLevelNormal)
case SecurityLevelNormal, SecurityLevelHigh, SecurityLevelExtreme:
s.ActiveSecurityLevel = s.ThreatMitigationLevel
atomicUpdateActiveSecurityLevel(s.ThreatMitigationLevel)
default:
log.Errorf("status: threat mitigation level is set to invalid value: %d", s.ThreatMitigationLevel)
}
}
// setSelectedSecurityLevel sets the selected security level.
func setSelectedSecurityLevel(level uint8) {
switch level {
case SecurityLevelOff, SecurityLevelNormal, SecurityLevelHigh, SecurityLevelExtreme:
status.Lock()
status.SelectedSecurityLevel = level
atomicUpdateSelectedSecurityLevel(level)
status.autopilot()
status.Unlock()
status.Save()
default:
log.Errorf("status: tried to set selected security level to invalid value: %d", level)
}
}
func atomicUpdateActiveSecurityLevel(level uint8) {
atomic.StoreUint32(activeSecurityLevel, uint32(level))
}
func atomicUpdateSelectedSecurityLevel(level uint8) {
atomic.StoreUint32(selectedSecurityLevel, uint32(level))
}

View file

@ -1,11 +0,0 @@
package status
import "testing"
func TestSet(t *testing.T) {
// only test for panics
// TODO: write real tests
setSelectedSecurityLevel(0)
}

30
status/state.go Normal file
View file

@ -0,0 +1,30 @@
package status
import (
"sync/atomic"
)
var (
activeLevel = new(uint32)
selectedLevel = new(uint32)
)
func setActiveLevel(lvl uint8) {
atomic.StoreUint32(activeLevel, uint32(lvl))
}
func setSelectedLevel(lvl uint8) {
atomic.StoreUint32(selectedLevel, uint32(lvl))
}
// ActiveSecurityLevel returns the currently active security
// level.
func ActiveSecurityLevel() uint8 {
return uint8(atomic.LoadUint32(activeLevel))
}
// SelectedSecurityLevel returns the security level as selected
// by the user.
func SelectedSecurityLevel() uint8 {
return uint8(atomic.LoadUint32(selectedLevel))
}

View file

@ -1,113 +0,0 @@
package status
import (
"context"
"fmt"
"sync"
"github.com/safing/portmaster/netenv"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/log"
)
var (
status *SystemStatus
)
func init() {
status = &SystemStatus{
Threats: make(map[string]*Threat),
}
status.SetKey(statusDBKey)
}
// SystemStatus saves basic information about the current system status.
//nolint:maligned // TODO
type SystemStatus struct {
record.Base
sync.Mutex
ActiveSecurityLevel uint8
SelectedSecurityLevel uint8
OnlineStatus netenv.OnlineStatus
CaptivePortal *netenv.CaptivePortal
ThreatMitigationLevel uint8
Threats map[string]*Threat
}
// SaveAsync saves the SystemStatus to the database asynchronously.
func (s *SystemStatus) SaveAsync() {
module.StartWorker("save system status", func(_ context.Context) error {
s.Save()
return nil
})
}
// Save saves the SystemStatus to the database.
func (s *SystemStatus) Save() {
err := statusDB.Put(s)
if err != nil {
log.Errorf("status: could not save status to database: %s", err)
}
}
// EnsureSystemStatus ensures that the given record is of type SystemStatus and unwraps it, if needed.
func EnsureSystemStatus(r record.Record) (*SystemStatus, error) {
// unwrap
if r.IsWrapped() {
// only allocate a new struct, if we need it
new := &SystemStatus{}
err := record.Unwrap(r, new)
if err != nil {
return nil, err
}
return new, nil
}
// or adjust type
new, ok := r.(*SystemStatus)
if !ok {
return nil, fmt.Errorf("record not of type *SystemStatus, but %T", r)
}
return new, nil
}
// FmtActiveSecurityLevel returns the current security level as a string.
func FmtActiveSecurityLevel() string {
status.Lock()
mitigationLevel := status.ThreatMitigationLevel
status.Unlock()
active := ActiveSecurityLevel()
s := FmtSecurityLevel(active)
if mitigationLevel > 0 && active != mitigationLevel {
s += "*"
}
return s
}
// FmtSecurityLevel returns the given security level as a string.
func FmtSecurityLevel(level uint8) string {
switch level {
case SecurityLevelOff:
return "Off"
case SecurityLevelNormal:
return "Normal"
case SecurityLevelHigh:
return "High"
case SecurityLevelExtreme:
return "Extreme"
case SecurityLevelsNormalAndHigh:
return "Normal and High"
case SecurityLevelsNormalAndExtreme:
return "Normal and Extreme"
case SecurityLevelsHighAndExtreme:
return "High and Extreme"
case SecurityLevelsAll:
return "Normal, High and Extreme"
default:
return "INVALID"
}
}

View file

@ -1,34 +0,0 @@
package status
import "testing"
func TestStatus(t *testing.T) {
setSelectedSecurityLevel(SecurityLevelOff)
if FmtActiveSecurityLevel() != "Normal" {
t.Errorf("unexpected string representation: %s", FmtActiveSecurityLevel())
}
setSelectedSecurityLevel(SecurityLevelNormal)
AddOrUpdateThreat(&Threat{MitigationLevel: SecurityLevelHigh})
if FmtActiveSecurityLevel() != "Normal*" {
t.Errorf("unexpected string representation: %s", FmtActiveSecurityLevel())
}
setSelectedSecurityLevel(SecurityLevelHigh)
if FmtActiveSecurityLevel() != "High" {
t.Errorf("unexpected string representation: %s", FmtActiveSecurityLevel())
}
setSelectedSecurityLevel(SecurityLevelHigh)
AddOrUpdateThreat(&Threat{MitigationLevel: SecurityLevelExtreme})
if FmtActiveSecurityLevel() != "High*" {
t.Errorf("unexpected string representation: %s", FmtActiveSecurityLevel())
}
setSelectedSecurityLevel(SecurityLevelExtreme)
if FmtActiveSecurityLevel() != "Extreme" {
t.Errorf("unexpected string representation: %s", FmtActiveSecurityLevel())
}
}

View file

@ -1,73 +1,133 @@
package status
import (
"strings"
"sync"
"time"
"github.com/safing/portbase/log"
"github.com/safing/portbase/notifications"
)
// Threat describes a detected threat.
// Threat represents a threat to the system.
// A threat is basically a notification with strong
// typed EventData. Use the methods expored on Threat
// to manipulate the EventData field and push updates
// of the notification.
// Do not use EventData directly!
type Threat struct {
ID string // A unique ID chosen by reporting module (eg. modulePrefix-incident) to periodically check threat existence
Name string // Descriptive (human readable) name for detected threat
Description string // Simple description
AdditionalData interface{} // Additional data a module wants to make available for the user
MitigationLevel uint8 // Recommended Security Level to switch to for mitigation
Started int64
Ended int64
// TODO: add locking
*notifications.Notification
}
// AddOrUpdateThreat adds or updates a new threat in the system status.
func AddOrUpdateThreat(new *Threat) {
status.Lock()
defer status.Unlock()
status.Threats[new.ID] = new
status.updateThreatMitigationLevel()
status.autopilot()
status.SaveAsync()
// ThreatPayload holds threat related information.
type ThreatPayload struct {
// MitigationLevel holds the recommended security
// level to mitigate the threat.
MitigationLevel uint8
// Started holds the UNIX epoch timestamp in seconds
// at which the threat has been detected the first time.
Started int64
// Ended holds the UNIX epoch timestamp in seconds
// at which the threat has been detected the last time.
Ended int64
// Data may holds threat-specific data.
Data interface{}
}
// DeleteThreat deletes a threat from the system status.
func DeleteThreat(id string) {
status.Lock()
defer status.Unlock()
delete(status.Threats, id)
status.updateThreatMitigationLevel()
status.autopilot()
status.SaveAsync()
}
// GetThreats returns all threats who's IDs are prefixed by the given string, and also a locker for editing them.
func GetThreats(idPrefix string) ([]*Threat, sync.Locker) {
status.Lock()
defer status.Unlock()
var exportedThreats []*Threat
for id, threat := range status.Threats {
if strings.HasPrefix(id, idPrefix) {
exportedThreats = append(exportedThreats, threat)
}
// NewThreat returns a new threat. Note that the
// threat only gets published once Publish is called.
//
// Example:
//
// threat := NewThreat("portscan", "Someone is scanning you").
// SetData(portscanResult).
// SetMitigationLevel(SecurityLevelExtreme).
// Publish()
//
// // Once you're done, delete the threat
// threat.Delete().Publish()
//
func NewThreat(id, title, msg string) *Threat {
t := &Threat{
Notification: &notifications.Notification{
EventID: id,
Type: notifications.Warning,
Title: title,
Category: "Threat",
Message: msg,
},
}
return exportedThreats, &status.Mutex
t.threatData().Started = time.Now().Unix()
return t
}
func (s *SystemStatus) updateThreatMitigationLevel() {
// get highest mitigationLevel
var mitigationLevel uint8
for _, threat := range s.Threats {
switch threat.MitigationLevel {
case SecurityLevelNormal, SecurityLevelHigh, SecurityLevelExtreme:
if threat.MitigationLevel > mitigationLevel {
mitigationLevel = threat.MitigationLevel
}
}
// SetData sets the data member of the threat payload.
func (t *Threat) SetData(data interface{}) *Threat {
t.Lock()
defer t.Unlock()
t.threatData().Data = data
return t
}
// SetMitigationLevel sets the mitigation level of the
// threat data.
func (t *Threat) SetMitigationLevel(lvl uint8) *Threat {
t.Lock()
defer t.Unlock()
t.threatData().MitigationLevel = lvl
return t
}
// Delete sets the ended timestamp of the threat.
func (t *Threat) Delete() *Threat {
t.Lock()
defer t.Unlock()
t.threatData().Ended = time.Now().Unix()
return t
}
// Payload returns a copy of the threat payload.
func (t *Threat) Payload() ThreatPayload {
t.Lock()
defer t.Unlock()
return *t.threatData() // creates a copy
}
// Publish publishes the current threat.
// Publish should always be called when changes to
// the threat are recorded.
func (t *Threat) Publish() *Threat {
data := t.Payload()
if data.Ended > 0 {
DeleteMitigationLevel(t.EventID)
} else {
SetMitigationLevel(t.EventID, data.MitigationLevel)
}
// set new ThreatMitigationLevel
s.ThreatMitigationLevel = mitigationLevel
t.Save()
return t
}
// threatData returns the threat payload associated with this
// threat. If not data has been created yet a new ThreatPayload
// is attached to t and returned. The caller must make sure to
// hold appropriate locks when working with the returned payload.
func (t *Threat) threatData() *ThreatPayload {
if t.EventData == nil {
t.EventData = new(ThreatPayload)
}
payload, ok := t.EventData.(*ThreatPayload)
if !ok {
log.Warningf("unexpected type %T in thread notification payload", t.EventData)
return new(ThreatPayload)
}
return payload
}

View file

@ -28,7 +28,7 @@ func registerRoutes() error {
api.RegisterHandleFunc("/ui/modules/{moduleName:[a-z]+}", redirAddSlash).Methods("GET", "HEAD")
api.RegisterHandleFunc("/ui/modules/{moduleName:[a-z]+}/", ServeBundle("")).Methods("GET", "HEAD")
api.RegisterHandleFunc("/ui/modules/{moduleName:[a-z]+}/{resPath:[a-zA-Z0-9/\\._-]+}", ServeBundle("")).Methods("GET", "HEAD")
api.RegisterHandleFunc("/", RedirectToBase)
api.RegisterHandleFunc("/", redirectToDefault)
return nil
}
@ -97,13 +97,21 @@ func ServeFileFromBundle(w http.ResponseWriter, r *http.Request, bundleName stri
readCloser, err := bundle.Open(path)
if err != nil {
if err == resources.ErrNotFound {
log.Tracef("ui: requested resource \"%s\" not found in bundle %s: %s", path, bundleName, err)
http.Error(w, err.Error(), http.StatusNotFound)
// Check if there is a base index.html file we can serve instead.
var indexErr error
path = "index.html"
readCloser, indexErr = bundle.Open(path)
if indexErr != nil {
// If we cannot get an index, continue with handling the original error.
log.Tracef("ui: requested resource \"%s\" not found in bundle %s: %s", path, bundleName, err)
http.Error(w, err.Error(), http.StatusNotFound)
return
}
} else {
log.Tracef("ui: error opening module %s: %s", bundleName, err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
return
}
// set content type
@ -131,9 +139,9 @@ func ServeFileFromBundle(w http.ResponseWriter, r *http.Request, bundleName stri
readCloser.Close()
}
// RedirectToBase redirects the requests to the control app
func RedirectToBase(w http.ResponseWriter, r *http.Request) {
u, err := url.Parse("/ui/modules/base/")
// redirectToDefault redirects the request to the default UI module.
func redirectToDefault(w http.ResponseWriter, r *http.Request) {
u, err := url.Parse("/ui/modules/portmaster/")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
@ -141,6 +149,7 @@ func RedirectToBase(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, r.URL.ResolveReference(u).String(), http.StatusTemporaryRedirect)
}
// redirAddSlash redirects the request to the same, but with a trailing slash.
func redirAddSlash(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, r.RequestURI+"/", http.StatusPermanentRedirect)
}

View file

@ -2,7 +2,6 @@ package updates
import (
"context"
"fmt"
"github.com/safing/portbase/config"
"github.com/safing/portbase/log"
@ -15,42 +14,56 @@ const (
var (
releaseChannel config.StringOption
devMode config.BoolOption
disableUpdates config.BoolOption
enableUpdates config.BoolOption
previousReleaseChannel string
updatesCurrentlyDisabled bool
previousDevMode bool
previousReleaseChannel string
updatesCurrentlyEnabled bool
previousDevMode bool
)
func registerConfig() error {
err := config.Register(&config.Option{
Name: "Release Channel",
Key: releaseChannelKey,
Description: "The Release Channel changes which updates are applied. When using beta, you will receive new features earlier and Portmaster will update more frequently. Some beta or experimental features are also available in the stable release channel.",
Order: 1,
Description: "Switch release channel.",
OptType: config.OptTypeString,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelBeta,
ExpertiseLevel: config.ExpertiseLevelDeveloper,
ReleaseLevel: config.ReleaseLevelExperimental,
RequiresRestart: false,
DefaultValue: releaseChannelStable,
ExternalOptType: "string list",
ValidationRegex: fmt.Sprintf("^(%s|%s)$", releaseChannelStable, releaseChannelBeta),
PossibleValues: []config.PossibleValue{
{
Name: "Stable",
Value: releaseChannelStable,
},
{
Name: "Beta",
Value: releaseChannelBeta,
},
},
Annotations: config.Annotations{
config.DisplayOrderAnnotation: -4,
config.DisplayHintAnnotation: config.DisplayHintOneOf,
config.CategoryAnnotation: "Updates",
},
})
if err != nil {
return err
}
err = config.Register(&config.Option{
Name: "Disable Updates",
Key: disableUpdatesKey,
Description: "Disable automatic updates.",
Order: 64,
Name: "Automatic Updates",
Key: enableUpdatesKey,
Description: "Enable automatic checking, downloading and applying of updates. This affects all kinds of updates, including intelligence feeds and broadcast notifications.",
OptType: config.OptTypeBool,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelStable,
RequiresRestart: false,
DefaultValue: false,
ExternalOptType: "disable updates",
DefaultValue: true,
Annotations: config.Annotations{
config.DisplayOrderAnnotation: -12,
config.CategoryAnnotation: "Updates",
},
})
if err != nil {
return err
@ -63,8 +76,8 @@ func initConfig() {
releaseChannel = config.GetAsString(releaseChannelKey, releaseChannelStable)
previousReleaseChannel = releaseChannel()
disableUpdates = config.GetAsBool(disableUpdatesKey, false)
updatesCurrentlyDisabled = disableUpdates()
enableUpdates = config.GetAsBool(enableUpdatesKey, true)
updatesCurrentlyEnabled = enableUpdates()
devMode = config.GetAsBool(cfgDevModeKey, false)
previousDevMode = devMode()
@ -86,10 +99,10 @@ func updateRegistryConfig(_ context.Context, _ interface{}) error {
changed = true
}
if disableUpdates() != updatesCurrentlyDisabled {
updatesCurrentlyDisabled = disableUpdates()
if enableUpdates() != updatesCurrentlyEnabled {
updatesCurrentlyEnabled = enableUpdates()
changed = true
forceUpdate = !updatesCurrentlyDisabled
forceUpdate = updatesCurrentlyEnabled
}
if changed {
@ -100,7 +113,7 @@ func updateRegistryConfig(_ context.Context, _ interface{}) error {
module.Resolve(updateFailed)
_ = TriggerUpdate()
log.Infof("updates: automatic updates enabled again.")
} else if updatesCurrentlyDisabled {
} else if !updatesCurrentlyEnabled {
module.Warning(updateFailed, "Automatic updates are disabled! This also affects security updates and threat intelligence.")
log.Warningf("updates: automatic updates are now disabled.")
}

View file

@ -35,6 +35,8 @@ type versions struct {
Core *info.Info
Resources map[string]*updater.Resource
Beta bool
Staging bool
internalSave bool
}
@ -43,6 +45,8 @@ func initVersionExport() (err error) {
// init export struct
versionExport = &versions{
internalSave: true,
Beta: registry.Beta,
Staging: staging,
}
versionExport.SetKey(versionsDBKey)

View file

@ -4,6 +4,8 @@ import (
"context"
"flag"
"fmt"
"os"
"path/filepath"
"runtime"
"time"
@ -20,7 +22,7 @@ const (
releaseChannelStable = "stable"
releaseChannelBeta = "beta"
disableUpdatesKey = "core/disableUpdates"
enableUpdatesKey = "core/automaticUpdates"
// ModuleName is the name of the update module
// and can be used when declaring module dependencies.
@ -49,6 +51,7 @@ var (
module *modules.Module
registry *updater.ResourceRegistry
userAgentFromFlag string
staging bool
updateTask *modules.Task
updateASAP bool
@ -76,13 +79,13 @@ func init() {
module.RegisterEvent(ResourceUpdateEvent)
flag.StringVar(&userAgentFromFlag, "update-agent", "", "Sets the user agent for requests to the update server")
flag.BoolVar(&staging, "staging", false, "Use staging update channel (for testing only)")
// initialize mandatory updates
if onWindows {
MandatoryUpdates = []string{
platform("core/portmaster-core.exe"),
platform("start/portmaster-start.exe"),
platform("app/portmaster-app.exe"),
platform("notifier/portmaster-notifier.exe"),
platform("notifier/portmaster-snoretoast.exe"),
}
@ -90,10 +93,15 @@ func init() {
MandatoryUpdates = []string{
platform("core/portmaster-core"),
platform("start/portmaster-start"),
platform("app/portmaster-app"),
platform("notifier/portmaster-notifier"),
}
}
MandatoryUpdates = append(
MandatoryUpdates,
platform("app/portmaster-app.zip"),
"all/ui/modules/portmaster.zip",
)
}
func prep() error {
@ -139,9 +147,12 @@ func start() error {
},
UserAgent: UserAgent,
MandatoryUpdates: MandatoryUpdates,
Beta: releaseChannel() == releaseChannelBeta,
DevMode: devMode(),
Online: true,
AutoUnpack: []string{
platform("app/portmaster-app.zip"),
},
Beta: releaseChannel() == releaseChannelBeta,
DevMode: devMode(),
Online: true,
}
if userAgentFromFlag != "" {
// override with flag value
@ -159,18 +170,33 @@ func start() error {
Beta: false,
})
registry.AddIndex(updater.Index{
Path: "beta.json",
Stable: false,
Beta: true,
})
if registry.Beta {
registry.AddIndex(updater.Index{
Path: "beta.json",
Stable: false,
Beta: true,
})
}
registry.AddIndex(updater.Index{
Path: "all/intel/intel.json",
Stable: true,
Beta: false,
Beta: true,
})
if stagingActive() {
// Set flag no matter how staging was activated.
staging = true
log.Warning("updates: staging environment is active")
registry.AddIndex(updater.Index{
Path: "staging.json",
Stable: true,
Beta: true,
})
}
err = registry.LoadIndexes(module.Ctx)
if err != nil {
log.Warningf("updates: failed to load indexes: %s", err)
@ -184,6 +210,7 @@ func start() error {
registry.SelectVersions()
module.TriggerEvent(VersionUpdateEvent, nil)
// Initialize the version export - this requires the registry to be set up.
err = initVersionExport()
if err != nil {
return err
@ -245,7 +272,7 @@ func DisableUpdateSchedule() error {
}
func checkForUpdates(ctx context.Context) (err error) {
if updatesCurrentlyDisabled {
if !updatesCurrentlyEnabled {
log.Debugf("updates: automatic updates are disabled")
return nil
}
@ -257,7 +284,7 @@ func checkForUpdates(ctx context.Context) (err error) {
if err == nil {
module.Resolve(updateInProgress)
} else {
module.Warning(updateFailed, "Failed to check for updates: "+err.Error())
module.Warning(updateFailed, "Failed to update: "+err.Error())
}
}()
@ -273,6 +300,13 @@ func checkForUpdates(ctx context.Context) (err error) {
registry.SelectVersions()
// Unpack selected resources.
err = registry.UnpackResources()
if err != nil {
err = fmt.Errorf("failed to update: %w", err)
return
}
module.TriggerEvent(ResourceUpdateEvent, nil)
return nil
}
@ -288,3 +322,14 @@ func stop() error {
func platform(identifier string) string {
return fmt.Sprintf("%s_%s/%s", runtime.GOOS, runtime.GOARCH, identifier)
}
func stagingActive() bool {
// Check flag and env variable.
if staging || os.Getenv("PORTMASTER_STAGING") == "enabled" {
return true
}
// Check if staging index is present and acessible.
_, err := os.Stat(filepath.Join(registry.StorageDir().Path, "staging.json"))
return err == nil
}

View file

@ -15,6 +15,7 @@ import (
processInfo "github.com/shirou/gopsutil/process"
"github.com/tevino/abool"
"github.com/safing/portbase/dataroot"
"github.com/safing/portbase/info"
"github.com/safing/portbase/log"
"github.com/safing/portbase/notifications"
@ -99,18 +100,30 @@ func upgradeCoreNotify() error {
// check for new version
if info.GetInfo().Version != pmCoreUpdate.Version() {
n := notifications.NotifyInfo(
"updates:core-update-available",
fmt.Sprintf("There is an update available for the Portmaster core (v%s), please restart the Portmaster to apply the update.", pmCoreUpdate.Version()),
notifications.Action{
ID: "later",
Text: "Later",
n := notifications.Notify(&notifications.Notification{
EventID: "updates:core-update-available",
Type: notifications.Info,
Title: fmt.Sprintf(
"Portmaster Update v%s",
pmCoreUpdate.Version(),
),
Category: "Core",
Message: fmt.Sprintf(
`:tada: Update to **Portmaster v%s** is available!
Please restart the Portmaster to apply the update.`,
pmCoreUpdate.Version(),
),
AvailableActions: []*notifications.Action{
{
ID: "restart",
Text: "Restart",
},
{
ID: "later",
Text: "Not now",
},
},
notifications.Action{
ID: "restart",
Text: "Restart Portmaster Now",
},
)
})
n.SetActionFunction(upgradeCoreNotifyActionHandler)
log.Debugf("updates: new portmaster version available, sending notification to user")
@ -119,7 +132,7 @@ func upgradeCoreNotify() error {
return nil
}
func upgradeCoreNotifyActionHandler(n *notifications.Notification) {
func upgradeCoreNotifyActionHandler(_ context.Context, n *notifications.Notification) error {
switch n.SelectedActionID {
case "restart":
// Cannot directly trigger due to import loop.
@ -130,11 +143,13 @@ func upgradeCoreNotifyActionHandler(n *notifications.Notification) {
nil,
)
if err != nil {
log.Warningf("updates: failed to trigger restart via notification: %s", err)
return fmt.Errorf("failed to trigger restart via notification: %s", err)
}
case "later":
n.Expires = time.Now().Unix() // expire immediately
return n.Delete()
}
return nil
}
func upgradeHub() error {
@ -192,12 +207,11 @@ func upgradePortmasterStart() error {
}
// update portmaster-start in data root
rootPmStartPath := filepath.Join(filepath.Dir(registry.StorageDir().Path), filename)
rootPmStartPath := filepath.Join(dataroot.Root().Path, filename)
err := upgradeFile(rootPmStartPath, pmCtrlUpdate)
if err != nil {
return err
}
log.Infof("updates: upgraded %s", rootPmStartPath)
return nil
}
@ -244,10 +258,18 @@ func warnOnIncorrectParentPath() {
if !strings.HasPrefix(absPath, root) {
log.Warningf("detected unexpected path %s for portmaster-start", absPath)
notifications.NotifyWarn(
"updates:unsupported-parent",
fmt.Sprintf("The portmaster has been launched by an unexpected %s binary at %s. Please configure your system to use the binary at %s as this version will be kept up to date automatically.", expectedFileName, absPath, filepath.Join(root, expectedFileName)),
)
notifications.Notify(&notifications.Notification{
EventID: "updates:unsupported-parent",
Type: notifications.Warning,
Title: "Unsupported Launcher",
Category: "Core",
Message: fmt.Sprintf(
"The portmaster has been launched by an unexpected %s binary at %s. Please configure your system to use the binary at %s as this version will be kept up to date automatically.",
expectedFileName,
absPath,
filepath.Join(root, expectedFileName),
),
})
}
}
@ -268,6 +290,7 @@ func upgradeFile(fileToUpgrade string, file *updater.File) error {
// abort if version matches
currentVersion = strings.Trim(strings.TrimSpace(string(out)), "*")
if currentVersion == file.Version() {
log.Tracef("updates: %s is already v%s", fileToUpgrade, file.Version())
// already up to date!
return nil
}
@ -330,6 +353,8 @@ func upgradeFile(fileToUpgrade string, file *updater.File) error {
}
}
}
log.Infof("updates: upgraded %s to v%s", fileToUpgrade, file.Version())
return nil
}