Merge pull request #1798 from safing/feature/new-installer

Feature/new installer
This commit is contained in:
Daniel Hååvi 2025-02-03 13:52:28 +01:00 committed by GitHub
commit c62d744656
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
157 changed files with 6994 additions and 3727 deletions

View file

@ -12,11 +12,11 @@ desktop/tauri/src-tauri/target
# Copy from .gitignore:
# Compiled binaries
*.exe
dist/
# *.exe
# dist/
# Dist dir
dist
# dist
# Custom dev deops
go.mod.*

View file

@ -1,6 +1,12 @@
name: Release
on:
push:
branches:
- v2.0
- feature/new-installer
tags:
- v*
workflow_dispatch:
jobs:
@ -26,7 +32,6 @@ jobs:
- name: Upload Dist
uses: actions/upload-artifact@v4
with:
name: dist
path: ./dist/
if-no-files-found: error
@ -51,6 +56,12 @@ jobs:
run: earthly --ci --remote-cache=ghcr.io/safing/build-cache --push +installer-linux
# --ci include --no-output flag
- name: Upload Installers
uses: actions/upload-artifact@v4
with:
path: ./dist/linux_amd64/
if-no-files-found: error
installer-windows:
name: Installer windows
runs-on: windows-latest
@ -62,9 +73,13 @@ jobs:
- name: Download Dist
uses: actions/download-artifact@v4
with:
name: dist
path: dist/
- name: Build windows artifacts
run: powershell -NoProfile -File ./packaging/windows/generate_windows_installers.ps1
- name: Upload Installers
uses: actions/upload-artifact@v4
with:
path: ./dist/windows_amd64/
if-no-files-found: error

41
.github/workflows/windows-dll.yml vendored Normal file
View file

@ -0,0 +1,41 @@
name: Windows Portmaster Core DLL
on:
push:
paths:
- 'windows_core_dll/**'
branches:
- master
- develop
pull_request:
paths:
- 'windows_core_dll/**'
branches:
- master
- develop
workflow_dispatch:
jobs:
build:
name: Build
runs-on: windows-latest
steps:
- name: Checkout Repository
uses: actions/checkout@v4
- name: Add msbuild to PATH
uses: microsoft/setup-msbuild@v2
- name: Build DLL
run: msbuild windows_core_dll\windows_core_dll.sln -t:rebuild -property:Configuration=Release
- name: Verify DLL
shell: powershell
run: |
if (!(Test-Path "windows_core_dll/x64/Release/portmaster-core.dll")) {
Write-Error "DLL build failed: portmaster-core.dll not found"
exit 1
}
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: portmaster-core-dll
path: windows_core_dll/x64/Release/portmaster-core.dll

4
.gitignore vendored
View file

@ -12,8 +12,7 @@ go.mod.*
vendor
# testing
testing
spn/testing/simple/testdata
testdata
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.a
@ -52,3 +51,4 @@ go.work.sum
# Kext releases
windows_kext/release/kext_release_*.zip
windows_core_dll/.vs/windows_core_dll

View file

@ -70,6 +70,11 @@ build:
# ./dist/all/assets.zip
BUILD +assets
build-spn:
BUILD +go-build --CMDS="hub" --GOOS="linux" --GOARCH="amd64"
BUILD +go-build --CMDS="hub" --GOOS="linux" --GOARCH="arm64"
# TODO: Add other platforms
go-ci:
BUILD +go-build --GOOS="linux" --GOARCH="amd64"
BUILD +go-build --GOOS="linux" --GOARCH="arm64"
@ -421,7 +426,7 @@ rust-base:
DO rust+INIT --keep_fingerprints=true
# For now we need tauri-cli 2.0.0 for bulding
DO rust+CARGO --args="install tauri-cli --version ${tauri_version} --locked"
DO rust+CARGO --args="install tauri-cli --version 2.1.0 --locked"
# Explicitly cache here.
SAVE IMAGE --cache-hint
@ -475,10 +480,11 @@ tauri-build:
# Binaries
SAVE ARTIFACT --if-exists --keep-ts "target/${target}/release/portmaster" AS LOCAL "${outputDir}/${GO_ARCH_STRING}/portmaster"
SAVE ARTIFACT --if-exists --keep-ts "target/${target}/release/portmaster.exe" AS LOCAL "${outputDir}/${GO_ARCH_STRING}/portmaster.exe"
# SAVE ARTIFACT --if-exists --keep-ts "target/${target}/release/WebView2Loader.dll" AS LOCAL "${outputDir}/${GO_ARCH_STRING}/WebView2Loader.dll"
SAVE ARTIFACT --if-exists --keep-ts "target/${target}/release/WebView2Loader.dll" AS LOCAL "${outputDir}/${GO_ARCH_STRING}/WebView2Loader.dll"
SAVE ARTIFACT --if-exists --keep-ts "target/${target}/release/portmaster" ./output/portmaster
SAVE ARTIFACT --if-exists --keep-ts "target/${target}/release/portmaster.exe" ./output/portmaster.exe
SAVE ARTIFACT --if-exists --keep-ts "target/${target}/release/WebView2Loader.dll" ./output/WebView2Loader.dll
tauri-release:
@ -512,43 +518,37 @@ tauri-lint:
release-prep:
FROM +rust-base
WORKDIR /app
# Linux specific
COPY (+tauri-build/output/portmaster --target="x86_64-unknown-linux-gnu") ./output/binary/linux_amd64/portmaster
COPY (+go-build/output/portmaster-core --GOARCH=amd64 --GOOS=linux --CMDS=portmaster-core) ./output/binary/linux_amd64/portmaster-core
# Windows specific
COPY (+tauri-build/output/portmaster.exe --target="x86_64-pc-windows-gnu") ./output/binary/windows_amd64/portmaster.exe
COPY (+tauri-build/output/WebView2Loader.dll --target="x86_64-pc-windows-gnu") ./output/binary/windows_amd64/WebView2Loader.dll
COPY (+go-build/output/portmaster-core.exe --GOARCH=amd64 --GOOS=windows --CMDS=portmaster-core) ./output/binary/windows_amd64/portmaster-core.exe
# TODO(vladimir): figure out a way to get the lastest release of the kext.
RUN touch ./output/binary/windows_amd64/portmaster-kext.sys
# All platforms
COPY (+assets/assets.zip) ./output/binary/all/assets.zip
COPY (+angular-project/output/portmaster.zip --project=portmaster --dist=./dist --configuration=production --baseHref=/ui/modules/portmaster/) ./output/binary/all/portmaster.zip
# Intel
# TODO(vladimir): figure out a way to download all latest intel data.
RUN mkdir -p ./output/intel
RUN wget -O ./output/intel/geoipv4.mmdb.gz "https://updates.safing.io/all/intel/geoip/geoipv4_v20240820-0-1.mmdb.gz" && \
wget -O ./output/intel/geoipv6.mmdb.gz "https://updates.safing.io/all/intel/geoip/geoipv6_v20240820-0-1.mmdb.gz" && \
wget -O ./output/intel/index.dsd "https://updates.safing.io/all/intel/lists/index_v2023-6-13.dsd" && \
wget -O ./output/intel/base.dsdl "https://updates.safing.io/all/intel/lists/base_v20241001-0-9.dsdl" && \
wget -O ./output/intel/intermediate.dsdl "https://updates.safing.io/all/intel/lists/intermediate_v20240929-0-0.dsdl" && \
wget -O ./output/intel/urgent.dsdl "https://updates.safing.io/all/intel/lists/urgent_v20241002-2-14.dsdl"
# Build update manager
COPY (+go-build/output/updatemgr --GOARCH=amd64 --GOOS=linux --CMDS=updatemgr) ./updatemgr
RUN ./updatemgr scan --dir "./output/binary" > ./output/binary/index.json
RUN ./updatemgr scan --dir "./output/intel" > ./output/intel/index.json
# Get binary artifacts from current release
RUN mkdir -p ./output/download/windows_amd64 && ./updatemgr download https://updates.safing.io/stable.v3.json --platform windows_amd64 ./output/download/windows_amd64
# Intel Extracted (needed for the installers)
RUN mkdir -p ./output/intel_decompressed
RUN cp ./output/intel/index.json ./output/intel_decompressed/index.json
RUN gzip -dc ./output/intel/geoipv4.mmdb.gz > ./output/intel_decompressed/geoipv4.mmdb
RUN gzip -dc ./output/intel/geoipv6.mmdb.gz > ./output/intel_decompressed/geoipv6.mmdb
RUN cp ./output/intel/index.dsd ./output/intel_decompressed/index.dsd
RUN cp ./output/intel/base.dsdl ./output/intel_decompressed/base.dsdl
RUN cp ./output/intel/intermediate.dsdl ./output/intel_decompressed/intermediate.dsdl
RUN cp ./output/intel/urgent.dsdl ./output/intel_decompressed/urgent.dsdl
# Copy required artifacts
RUN cp ./output/download/windows_amd64/portmaster-kext.sys ./output/binary/windows_amd64/portmaster-kext.sys
RUN cp ./output/download/windows_amd64/portmaster-kext.pdb ./output/binary/windows_amd64/portmaster-kext.pdb
RUN cp ./output/download/windows_amd64/portmaster-core.dll ./output/binary/windows_amd64/portmaster-core.dll
# Create new binary index from artifacts
RUN ./updatemgr scan --dir "./output/binary" > ./output/binary/index.json
# Get intel index and assets
RUN mkdir -p ./output/intel && ./updatemgr download https://updates.safing.io/intel.v3.json ./output/intel
# Save all artifacts to output folder
SAVE ARTIFACT --if-exists --keep-ts "output/binary/index.json" AS LOCAL "${outputDir}/binary/index.json"
@ -556,7 +556,6 @@ release-prep:
SAVE ARTIFACT --if-exists --keep-ts "output/binary/linux_amd64/*" AS LOCAL "${outputDir}/binary/linux_amd64/"
SAVE ARTIFACT --if-exists --keep-ts "output/binary/windows_amd64/*" AS LOCAL "${outputDir}/binary/windows_amd64/"
SAVE ARTIFACT --if-exists --keep-ts "output/intel/*" AS LOCAL "${outputDir}/intel/"
SAVE ARTIFACT --if-exists --keep-ts "output/intel_decompressed/*" AS LOCAL "${outputDir}/intel_decompressed/"
# Save all artifacts to the container output folder so other containers can access it.
SAVE ARTIFACT --if-exists --keep-ts "output/binary/index.json" "output/binary/index.json"
@ -564,7 +563,7 @@ release-prep:
SAVE ARTIFACT --if-exists --keep-ts "output/binary/linux_amd64/*" "output/binary/linux_amd64/"
SAVE ARTIFACT --if-exists --keep-ts "output/binary/windows_amd64/*" "output/binary/windows_amd64/"
SAVE ARTIFACT --if-exists --keep-ts "output/intel/*" "output/intel/"
SAVE ARTIFACT --if-exists --keep-ts "output/intel_decompressed/*" "output/intel_decompressed/"
SAVE ARTIFACT --if-exists --keep-ts "output/download/*" "output/download/"
installer-linux:
FROM +rust-base
@ -594,7 +593,7 @@ installer-linux:
# Download the intel data
RUN mkdir -p intel
COPY (+release-prep/output/intel_decompressed/*) ./intel/
COPY (+release-prep/output/intel/*) ./intel/
# build the installers
RUN cargo tauri bundle --ci --target="${target}"

View file

@ -3,9 +3,9 @@ package database
import (
"errors"
"fmt"
"os"
"path/filepath"
"github.com/safing/portmaster/base/utils"
"github.com/tevino/abool"
)
@ -23,10 +23,10 @@ func Initialize(databasesRootDir string) error {
if initialized.SetToIf(false, true) {
rootDir = databasesRootDir
// Ensure database root dir exists.
err := os.MkdirAll(rootDir, 0o0700)
// ensure root and databases dirs
err := utils.EnsureDirectory(rootDir, utils.AdminOnlyExecPermission)
if err != nil {
return fmt.Errorf("could not create/open database directory (%s): %w", rootDir, err)
return fmt.Errorf("failed to create/check database dir %q: %w", rootDir, err)
}
return nil
@ -59,7 +59,7 @@ func getLocation(name, storageType string) (string, error) {
location := filepath.Join(rootDir, name, storageType)
// Make sure location exists.
err := os.MkdirAll(location, 0o0700)
err := utils.EnsureDirectory(location, utils.AdminOnlyExecPermission)
if err != nil {
return "", fmt.Errorf("failed to create/check database dir %q: %w", location, err)
}

View file

@ -288,10 +288,10 @@ func writeFile(filename string, data []byte, perm os.FileMode) error {
defer t.Cleanup() //nolint:errcheck
// Set permissions before writing data, in case the data is sensitive.
if !onWindows {
if err := t.Chmod(perm); err != nil {
return err
}
// TODO(vladimir): to set permissions on windows we need the full path of the file.
err = t.Chmod(perm)
if err != nil {
return err
}
if _, err := t.Write(data); err != nil {

View file

@ -10,8 +10,6 @@ import (
"sync"
)
// FIXME: version does not show in portmaster
var (
name string
license string
@ -76,6 +74,7 @@ func Set(setName string, setVersion string, setLicenseName string) {
if setVersion != "" {
version = setVersion
versionNumber = setVersion
}
}
@ -167,9 +166,9 @@ func CondensedVersion() string {
}
return fmt.Sprintf(
"%s %s (%s; built with %s [%s %s] from %s [%s] at %s)",
"%s %s (%s/%s; built with %s [%s %s] from %s [%s] at %s)",
info.Name, version,
runtime.GOOS,
runtime.GOOS, runtime.GOARCH,
runtime.Version(), runtime.Compiler, cgoInfo,
info.Commit, dirtyInfo, info.CommitTime,
)

View file

@ -191,7 +191,7 @@ func ParseLevel(level string) Severity {
}
// Start starts the logging system. Must be called in order to see logs.
func Start(level string, logToStdout bool, logDir string) (err error) {
func Start(level string, logToStdout bool, logDir string) error {
if !initializing.SetToIf(false, true) {
return nil
}
@ -232,13 +232,13 @@ func Start(level string, logToStdout bool, logDir string) (err error) {
// Delete all logs older than one month.
if !logToStdout {
err = CleanOldLogs(logDir, 30*24*time.Hour)
err := CleanOldLogs(logDir, 30*24*time.Hour)
if err != nil {
Errorf("log: failed to clean old log files: %s", err)
}
}
return err
return nil
}
// Shutdown writes remaining log lines and then stops the log system.

View file

@ -1,14 +1,19 @@
package log
import (
"io"
"log/slog"
"os"
"runtime"
"github.com/lmittmann/tint"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
)
func setupSLog(level Severity) {
// TODO: Changes in the log level are not yet reflected onto the slog handlers in the modules.
// Set highest possible level, so it can be changed in runtime.
handlerLogLevel := level.toSLogLevel()
@ -17,21 +22,23 @@ func setupSLog(level Severity) {
switch runtime.GOOS {
case "windows":
logHandler = tint.NewHandler(
GlobalWriter,
windowsColoring(GlobalWriter), // Enable coloring on Windows.
&tint.Options{
AddSource: true,
Level: handlerLogLevel,
TimeFormat: timeFormat,
NoColor: !GlobalWriter.IsStdout(), // FIXME: also check for tty.
NoColor: !( /* Color: */ GlobalWriter.IsStdout() && isatty.IsTerminal(GlobalWriter.file.Fd())),
},
)
case "linux":
logHandler = tint.NewHandler(GlobalWriter, &tint.Options{
AddSource: true,
Level: handlerLogLevel,
TimeFormat: timeFormat,
NoColor: !GlobalWriter.IsStdout(), // FIXME: also check for tty.
NoColor: !( /* Color: */ GlobalWriter.IsStdout() && isatty.IsTerminal(GlobalWriter.file.Fd())),
})
default:
logHandler = tint.NewHandler(os.Stdout, &tint.Options{
AddSource: true,
@ -43,6 +50,11 @@ func setupSLog(level Severity) {
// Set as default logger.
slog.SetDefault(slog.New(logHandler))
// Set actual log level.
slog.SetLogLoggerLevel(handlerLogLevel)
}
func windowsColoring(lw *LogWriter) io.Writer {
if lw.IsStdout() {
return colorable.NewColorable(lw.file)
}
return lw
}

View file

@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"io/fs"
"log/slog"
"os"
"runtime"
)
@ -12,7 +13,11 @@ const isWindows = runtime.GOOS == "windows"
// EnsureDirectory ensures that the given directory exists and that is has the given permissions set.
// If path is a file, it is deleted and a directory created.
func EnsureDirectory(path string, perm os.FileMode) error {
func EnsureDirectory(path string, perm FSPermission) error {
if !perm.IsExecPermission() {
slog.Warn("utils: setting not executable permission for directory", "dir", path)
}
// open path
f, err := os.Stat(path)
if err == nil {
@ -20,10 +25,11 @@ func EnsureDirectory(path string, perm os.FileMode) error {
if f.IsDir() {
// directory exists, check permissions
if isWindows {
// TODO: set correct permission on windows
// acl.Chmod(path, perm)
} else if f.Mode().Perm() != perm {
return os.Chmod(path, perm)
// Ignore windows permission error. For none admin users it will always fail.
_ = SetFilePermission(path, perm)
return nil
} else if f.Mode().Perm() != perm.AsUnixPermission() {
return SetFilePermission(path, perm)
}
return nil
}
@ -34,11 +40,17 @@ func EnsureDirectory(path string, perm os.FileMode) error {
}
// file does not exist (or has been deleted)
if err == nil || errors.Is(err, fs.ErrNotExist) {
err = os.Mkdir(path, perm)
err = os.MkdirAll(path, perm.AsUnixPermission())
if err != nil {
return fmt.Errorf("could not create dir %s: %w", path, err)
}
return os.Chmod(path, perm)
// Set permissions.
err = SetFilePermission(path, perm)
// Ignore windows permission error. For none admin users it will always fail.
if !isWindows {
return err
}
return nil
}
// other error opening path
return fmt.Errorf("failed to access %s: %w", path, err)

10
base/utils/permissions.go Normal file
View file

@ -0,0 +1,10 @@
//go:build !windows
package utils
import "os"
// SetFilePermission sets the permission of a file or directory.
func SetFilePermission(path string, perm FSPermission) error {
return os.Chmod(path, perm.AsUnixPermission())
}

View file

@ -0,0 +1,68 @@
//go:build windows
package utils
import (
"github.com/hectane/go-acl"
"golang.org/x/sys/windows"
)
var (
systemSID *windows.SID
adminsSID *windows.SID
usersSID *windows.SID
)
func init() {
// Initialize Security ID for all need groups.
// Reference: https://learn.microsoft.com/en-us/windows-server/identity/ad-ds/manage/understand-security-identifiers
var err error
systemSID, err = windows.StringToSid("S-1-5-18") // SYSTEM (Local System)
if err != nil {
panic(err)
}
adminsSID, err = windows.StringToSid("S-1-5-32-544") // Administrators
if err != nil {
panic(err)
}
usersSID, err = windows.StringToSid("S-1-5-32-545") // Users
if err != nil {
panic(err)
}
}
// SetFilePermission sets the permission of a file or directory.
func SetFilePermission(path string, perm FSPermission) error {
switch perm {
case AdminOnlyPermission, AdminOnlyExecPermission:
// Set only admin rights, remove all others.
acl.Apply(
path,
true,
false,
acl.GrantSid(windows.GENERIC_ALL|windows.STANDARD_RIGHTS_ALL, systemSID),
acl.GrantSid(windows.GENERIC_ALL|windows.STANDARD_RIGHTS_ALL, adminsSID),
)
case PublicReadPermission, PublicReadExecPermission:
// Set admin rights and read/execute rights for users, remove all others.
acl.Apply(
path,
true,
false,
acl.GrantSid(windows.GENERIC_ALL|windows.STANDARD_RIGHTS_ALL, systemSID),
acl.GrantSid(windows.GENERIC_ALL|windows.STANDARD_RIGHTS_ALL, adminsSID),
acl.GrantSid(windows.GENERIC_READ|windows.GENERIC_EXECUTE, usersSID),
)
case PublicWritePermission, PublicWriteExecPermission:
// Set full control to admin and regular users. Guest users will not have access.
acl.Apply(
path,
true,
false,
acl.GrantSid(windows.GENERIC_ALL|windows.STANDARD_RIGHTS_ALL, systemSID),
acl.GrantSid(windows.GENERIC_ALL|windows.STANDARD_RIGHTS_ALL, adminsSID),
acl.GrantSid(windows.GENERIC_ALL|windows.STANDARD_RIGHTS_ALL, usersSID),
)
}
return nil
}

View file

@ -1,6 +1,11 @@
package renameio
import "os"
import (
"os"
"runtime"
"github.com/hectane/go-acl"
)
// WriteFile mirrors os.WriteFile, replacing an existing file with the same
// name atomically.
@ -14,7 +19,12 @@ func WriteFile(filename string, data []byte, perm os.FileMode) error {
}()
// Set permissions before writing data, in case the data is sensitive.
if err := t.Chmod(perm); err != nil {
if runtime.GOOS == "windows" {
err = acl.Chmod(t.path, perm)
} else {
err = t.Chmod(perm)
}
if err != nil {
return err
}

View file

@ -2,25 +2,64 @@ package utils
import (
"fmt"
"os"
"io/fs"
"path/filepath"
"strings"
"sync"
)
type FSPermission uint8
const (
AdminOnlyPermission FSPermission = iota
AdminOnlyExecPermission
PublicReadPermission
PublicReadExecPermission
PublicWritePermission
PublicWriteExecPermission
)
// AsUnixDirExecPermission return the corresponding unix permission for a directory or executable.
func (perm FSPermission) AsUnixPermission() fs.FileMode {
switch perm {
case AdminOnlyPermission:
return 0o600
case AdminOnlyExecPermission:
return 0o700
case PublicReadPermission:
return 0o644
case PublicReadExecPermission:
return 0o755
case PublicWritePermission:
return 0o666
case PublicWriteExecPermission:
return 0o777
}
return 0
}
func (perm FSPermission) IsExecPermission() bool {
switch perm {
case AdminOnlyExecPermission, PublicReadExecPermission, PublicWriteExecPermission:
return true
}
return false
}
// DirStructure represents a directory structure with permissions that should be enforced.
type DirStructure struct {
sync.Mutex
Path string
Dir string
Perm os.FileMode
Perm FSPermission
Parent *DirStructure
Children map[string]*DirStructure
}
// NewDirStructure returns a new DirStructure.
func NewDirStructure(path string, perm os.FileMode) *DirStructure {
func NewDirStructure(path string, perm FSPermission) *DirStructure {
return &DirStructure{
Path: path,
Perm: perm,
@ -29,7 +68,7 @@ func NewDirStructure(path string, perm os.FileMode) *DirStructure {
}
// ChildDir adds a new child DirStructure and returns it. Should the child already exist, the existing child is returned and the permissions are updated.
func (ds *DirStructure) ChildDir(dirName string, perm os.FileMode) (child *DirStructure) {
func (ds *DirStructure) ChildDir(dirName string, perm FSPermission) (child *DirStructure) {
ds.Lock()
defer ds.Unlock()

View file

@ -13,13 +13,13 @@ func ExampleDirStructure() {
// output:
// / [755]
// /repo [777]
// /repo/b [707]
// /repo/b/c [750]
// /repo/b/d [707]
// /repo/b/d/e [707]
// /repo/b/d/f [707]
// /repo/b/d/f/g [707]
// /repo/b/d/f/g/h [707]
// /repo/b [755]
// /repo/b/c [777]
// /repo/b/d [755]
// /repo/b/d/e [755]
// /repo/b/d/f [755]
// /repo/b/d/f/g [755]
// /repo/b/d/f/g/h [755]
// /secret [700]
basePath, err := os.MkdirTemp("", "")
@ -28,12 +28,12 @@ func ExampleDirStructure() {
return
}
ds := NewDirStructure(basePath, 0o0755)
secret := ds.ChildDir("secret", 0o0700)
repo := ds.ChildDir("repo", 0o0777)
_ = repo.ChildDir("a", 0o0700)
b := repo.ChildDir("b", 0o0707)
c := b.ChildDir("c", 0o0750)
ds := NewDirStructure(basePath, PublicReadPermission)
secret := ds.ChildDir("secret", AdminOnlyPermission)
repo := ds.ChildDir("repo", PublicWritePermission)
_ = repo.ChildDir("a", AdminOnlyPermission)
b := repo.ChildDir("b", PublicReadPermission)
c := b.ChildDir("c", PublicWritePermission)
err = ds.Ensure()
if err != nil {

View file

@ -1,12 +1,14 @@
package main
package cmdbase
import (
"context"
"errors"
"flag"
"fmt"
"io"
"log/slog"
"os"
"os/exec"
"runtime"
"runtime/pprof"
"time"
@ -15,14 +17,12 @@ import (
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/service"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/spn/conf"
)
var printStackOnExit bool
func init() {
flag.BoolVar(&printStackOnExit, "print-stack-on-exit", false, "prints the stack before of shutting down")
}
var (
RebootOnRestart bool
PrintStackOnExit bool
)
type SystemService interface {
Run()
@ -30,21 +30,47 @@ type SystemService interface {
RestartService() error
}
func cmdRun(cmd *cobra.Command, args []string) {
// Run platform specific setup or switches.
runPlatformSpecifics(cmd, args)
type ServiceInstance interface {
Ready() bool
Start() error
Stop() error
Restart()
Shutdown()
Ctx() context.Context
IsShuttingDown() bool
ShuttingDown() <-chan struct{}
ShutdownCtx() context.Context
IsShutDown() bool
ShutdownComplete() <-chan struct{}
ExitCode() int
ShouldRestartIsSet() bool
CommandLineOperationIsSet() bool
CommandLineOperationExecute() error
}
// SETUP
var (
SvcFactory func(*service.ServiceConfig) (ServiceInstance, error)
SvcConfig *service.ServiceConfig
)
// Enable SPN client mode.
// TODO: Move this to service config.
conf.EnableClient(true)
conf.EnableIntegration(true)
func RunService(cmd *cobra.Command, args []string) {
if SvcFactory == nil || SvcConfig == nil {
fmt.Fprintln(os.Stderr, "internal error: service not set up in cmdbase")
os.Exit(1)
}
// Start logging.
// Note: Must be created before the service instance, so that they use the right logger.
err := log.Start(SvcConfig.LogLevel, SvcConfig.LogToStdout, SvcConfig.LogDir)
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(4)
}
// Create instance.
// Instance modules might request a cmdline execution of a function.
var execCmdLine bool
instance, err := service.New(svcCfg)
instance, err := SvcFactory(SvcConfig)
switch {
case err == nil:
// Continue
@ -59,13 +85,13 @@ func cmdRun(cmd *cobra.Command, args []string) {
switch {
case !execCmdLine:
// Run service.
case instance.CommandLineOperation == nil:
case !instance.CommandLineOperationIsSet():
fmt.Println("command line operation execution requested, but not set")
os.Exit(3)
default:
// Run the function and exit.
fmt.Println("executing cmdline op")
err = instance.CommandLineOperation()
err = instance.CommandLineOperationExecute()
if err != nil {
fmt.Fprintf(os.Stderr, "command line operation failed: %s\n", err)
os.Exit(3)
@ -75,16 +101,6 @@ func cmdRun(cmd *cobra.Command, args []string) {
// START
// FIXME: fix color and duplicate level when logging with slog
// FIXME: check for tty for color enabling
// Start logging.
err = log.Start(svcCfg.LogLevel, svcCfg.LogToStdout, svcCfg.LogDir)
if err != nil {
fmt.Fprintln(os.Stderr, err.Error())
os.Exit(4)
}
// Create system service.
service := NewSystemService(instance)
@ -102,7 +118,7 @@ func cmdRun(cmd *cobra.Command, args []string) {
select {
case <-instance.ShutdownComplete():
// Print stack on shutdown, if enabled.
if printStackOnExit {
if PrintStackOnExit {
printStackTo(log.GlobalWriter, "PRINTING STACK ON EXIT")
}
case <-time.After(3 * time.Minute):
@ -110,9 +126,22 @@ func cmdRun(cmd *cobra.Command, args []string) {
}
// Check if restart was triggered and send start service command if true.
if instance.ShouldRestart && service.IsService() {
if err := service.RestartService(); err != nil {
slog.Error("failed to restart service", "err", err)
if instance.ShouldRestartIsSet() && service.IsService() {
// Check if we should reboot instead.
var rebooting bool
if RebootOnRestart {
// Trigger system reboot and record success.
rebooting = triggerSystemReboot()
if !rebooting {
log.Warningf("updates: rebooting failed, only restarting service instead")
}
}
// Restart service if not rebooting.
if !rebooting {
if err := service.RestartService(); err != nil {
slog.Error("failed to restart service", "err", err)
}
}
}
@ -138,3 +167,19 @@ func printStackTo(writer io.Writer, msg string) {
slog.Error("failed to write stack trace", "err", err)
}
}
func triggerSystemReboot() (success bool) {
switch runtime.GOOS {
case "linux":
err := exec.Command("systemctl", "reboot").Run()
if err != nil {
log.Errorf("updates: triggering reboot with systemctl failed: %s", err)
return false
}
default:
log.Warningf("updates: rebooting is not support on %s", runtime.GOOS)
return false
}
return true
}

View file

@ -1,4 +1,4 @@
package main
package cmdbase
import (
"fmt"
@ -9,17 +9,15 @@ import (
"syscall"
processInfo "github.com/shirou/gopsutil/process"
"github.com/spf13/cobra"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/service"
)
type LinuxSystemService struct {
instance *service.Instance
instance ServiceInstance
}
func NewSystemService(instance *service.Instance) *LinuxSystemService {
func NewSystemService(instance ServiceInstance) *LinuxSystemService {
return &LinuxSystemService{instance: instance}
}
@ -30,7 +28,7 @@ func (s *LinuxSystemService) Run() {
slog.Error("failed to start", "err", err)
// Print stack on start failure, if enabled.
if printStackOnExit {
if PrintStackOnExit {
printStackTo(log.GlobalWriter, "PRINTING STACK ON START FAILURE")
}
@ -62,7 +60,7 @@ wait:
continue wait
} else {
// Trigger shutdown.
fmt.Printf(" <SIGNAL: %v>", sig) // CLI output.
fmt.Printf(" <SIGNAL: %v>\n", sig) // CLI output.
slog.Warn("received stop signal", "signal", sig)
s.instance.Shutdown()
break wait
@ -128,18 +126,3 @@ func (s *LinuxSystemService) IsService() bool {
// Check if the parent process ID is 1 == init system
return ppid == 1
}
func runPlatformSpecifics(cmd *cobra.Command, args []string) {
// If recover-iptables flag is set, run the recover-iptables command.
// This is for backwards compatibility
if recoverIPTables {
exitCode := 0
err := recover(cmd, args)
if err != nil {
fmt.Printf("failed: %s", err)
exitCode = 1
}
os.Exit(exitCode)
}
}

View file

@ -1,4 +1,4 @@
package main
package cmdbase
// Based on the official Go examples from
// https://github.com/golang/sys/blob/master/windows/svc/example
@ -13,21 +13,19 @@ import (
"os/signal"
"syscall"
"github.com/spf13/cobra"
"golang.org/x/sys/windows/svc"
"golang.org/x/sys/windows/svc/debug"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/service"
)
const serviceName = "PortmasterCore"
type WindowsSystemService struct {
instance *service.Instance
instance ServiceInstance
}
func NewSystemService(instance *service.Instance) *WindowsSystemService {
func NewSystemService(instance ServiceInstance) *WindowsSystemService {
return &WindowsSystemService{instance: instance}
}
@ -67,7 +65,7 @@ func (s *WindowsSystemService) Execute(args []string, changeRequests <-chan svc.
fmt.Printf("failed to start: %s\n", err)
// Print stack on start failure, if enabled.
if printStackOnExit {
if PrintStackOnExit {
printStackTo(log.GlobalWriter, "PRINTING STACK ON START FAILURE")
}
@ -102,7 +100,7 @@ waitSignal:
select {
case sig := <-signalCh:
// Trigger shutdown.
fmt.Printf(" <SIGNAL: %v>", sig) // CLI output.
fmt.Printf(" <SIGNAL: %v>\n", sig) // CLI output.
slog.Warn("received stop signal", "signal", sig)
break waitSignal
@ -112,7 +110,7 @@ waitSignal:
changes <- c.CurrentStatus
case svc.Stop, svc.Shutdown:
fmt.Printf(" <SERVICE CMD: %v>", serviceCmdName(c.Cmd)) // CLI output.
fmt.Printf(" <SERVICE CMD: %v>\n", serviceCmdName(c.Cmd)) // CLI output.
slog.Warn("received service shutdown command", "cmd", c.Cmd)
break waitSignal
@ -201,8 +199,6 @@ sc.exe start $serviceName`
return nil
}
func runPlatformSpecifics(cmd *cobra.Command, args []string)
func serviceCmdName(cmd svc.Cmd) string {
switch cmd {
case svc.Stop:

View file

@ -1,4 +1,4 @@
package main
package cmdbase
import (
"fmt"
@ -12,32 +12,28 @@ import (
"github.com/safing/portmaster/service/updates"
)
var updateCmd = &cobra.Command{
var UpdateCmd = &cobra.Command{
Use: "update",
Short: "Force an update of all components.",
RunE: update,
}
func init() {
rootCmd.AddCommand(updateCmd)
}
func update(cmd *cobra.Command, args []string) error {
// Finalize config.
err := svcCfg.Init()
err := SvcConfig.Init()
if err != nil {
return fmt.Errorf("internal configuration error: %w", err)
}
// Force logging to stdout.
svcCfg.LogToStdout = true
SvcConfig.LogToStdout = true
// Start logging.
_ = log.Start(svcCfg.LogLevel, svcCfg.LogToStdout, svcCfg.LogDir)
_ = log.Start(SvcConfig.LogLevel, SvcConfig.LogToStdout, SvcConfig.LogDir)
defer log.Shutdown()
// Create updaters.
instance := &updateDummyInstance{}
binaryUpdateConfig, intelUpdateConfig, err := service.MakeUpdateConfigs(svcCfg)
binaryUpdateConfig, intelUpdateConfig, err := service.MakeUpdateConfigs(SvcConfig)
if err != nil {
return fmt.Errorf("init updater config: %w", err)
}

20
cmds/cmdbase/version.go Normal file
View file

@ -0,0 +1,20 @@
package cmdbase
import (
"fmt"
"github.com/spf13/cobra"
"github.com/safing/portmaster/base/info"
)
var VersionCmd = &cobra.Command{
Use: "version",
Short: "Show version and related metadata.",
RunE: Version,
}
func Version(cmd *cobra.Command, args []string) error {
fmt.Println(info.FullVersion())
return nil
}

View file

@ -1,158 +1,95 @@
package main
import (
"errors"
"flag"
"fmt"
"io"
"log/slog"
"os"
"os/signal"
"runtime"
"runtime/pprof"
"syscall"
"time"
"github.com/spf13/cobra"
"github.com/safing/portmaster/base/info"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/metrics"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/cmds/cmdbase"
"github.com/safing/portmaster/service"
"github.com/safing/portmaster/service/configure"
"github.com/safing/portmaster/service/updates"
"github.com/safing/portmaster/spn"
"github.com/safing/portmaster/spn/conf"
)
var (
rootCmd = &cobra.Command{
Use: "spn-hub",
PersistentPreRun: initializeGlobals,
Run: cmdbase.RunService,
}
binDir string
dataDir string
logToStdout bool
logDir string
logLevel string
)
func init() {
// flag.BoolVar(&updates.RebootOnRestart, "reboot-on-restart", false, "reboot server on auto-upgrade")
// FIXME
// Add persistent flags for all commands.
rootCmd.PersistentFlags().StringVar(&binDir, "bin-dir", "", "set directory for executable binaries (rw/ro)")
rootCmd.PersistentFlags().StringVar(&dataDir, "data-dir", "", "set directory for variable data (rw)")
// Add flags for service only.
rootCmd.Flags().BoolVar(&logToStdout, "log-stdout", false, "log to stdout instead of file")
rootCmd.Flags().StringVar(&logDir, "log-dir", "", "set directory for logs")
rootCmd.Flags().StringVar(&logLevel, "log", "", "set log level to [trace|debug|info|warning|error|critical]")
rootCmd.Flags().BoolVar(&cmdbase.PrintStackOnExit, "print-stack-on-exit", false, "prints the stack before of shutting down")
rootCmd.Flags().BoolVar(&cmdbase.RebootOnRestart, "reboot-on-restart", false, "reboot server instead of service restart")
// Add other commands.
rootCmd.AddCommand(cmdbase.VersionCmd)
rootCmd.AddCommand(cmdbase.UpdateCmd)
}
var sigUSR1 = syscall.Signal(0xa)
func main() {
flag.Parse()
// Add Go's default flag set.
// TODO: Move flags throughout Portmaster to here and add their values to the service config.
rootCmd.Flags().AddGoFlagSet(flag.CommandLine)
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func initializeGlobals(cmd *cobra.Command, args []string) {
// Set name and license.
info.Set("SPN Hub", "", "GPLv3")
info.Set("SPN Hub", "0.7.8", "GPLv3")
// Configure metrics.
_ = metrics.SetNamespace("hub")
// Configure user agent and updates.
// Configure user agent.
updates.UserAgent = fmt.Sprintf("SPN Hub (%s %s)", runtime.GOOS, runtime.GOARCH)
// helper.IntelOnly()
// Set SPN public hub mode.
conf.EnablePublicHub(true)
// Start logger with default log level.
_ = log.Start(log.WarningLevel)
// FIXME: Use service?
// Create instance.
var execCmdLine bool
instance, err := spn.New()
switch {
case err == nil:
// Continue
case errors.Is(err, mgr.ErrExecuteCmdLineOp):
execCmdLine = true
default:
fmt.Printf("error creating an instance: %s\n", err)
os.Exit(2)
// Configure service.
cmdbase.SvcFactory = func(svcCfg *service.ServiceConfig) (cmdbase.ServiceInstance, error) {
svc, err := service.New(svcCfg)
return svc, err
}
// Execute command line operation, if requested or available.
switch {
case !execCmdLine:
// Run service.
case instance.CommandLineOperation == nil:
fmt.Println("command line operation execution requested, but not set")
os.Exit(3)
default:
// Run the function and exit.
err = instance.CommandLineOperation()
if err != nil {
fmt.Fprintf(os.Stderr, "command line operation failed: %s\n", err)
os.Exit(3)
}
os.Exit(0)
}
cmdbase.SvcConfig = &service.ServiceConfig{
BinDir: binDir,
DataDir: dataDir,
// Start
go func() {
err = instance.Start()
if err != nil {
fmt.Printf("instance start failed: %s\n", err)
os.Exit(1)
}
}()
LogToStdout: logToStdout,
LogDir: logDir,
LogLevel: logLevel,
// Wait for signal.
signalCh := make(chan os.Signal, 1)
signal.Notify(
signalCh,
os.Interrupt,
syscall.SIGHUP,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGQUIT,
sigUSR1,
)
select {
case sig := <-signalCh:
// Only print and continue to wait if SIGUSR1
if sig == sigUSR1 {
printStackTo(os.Stderr, "PRINTING STACK ON REQUEST")
} else {
fmt.Println(" <INTERRUPT>") // CLI output.
slog.Warn("program was interrupted, stopping")
}
case <-instance.ShutdownComplete():
log.Shutdown()
os.Exit(instance.ExitCode())
}
// Catch signals during shutdown.
// Rapid unplanned disassembly after 5 interrupts.
go func() {
forceCnt := 5
for {
<-signalCh
forceCnt--
if forceCnt > 0 {
fmt.Printf(" <INTERRUPT> again, but already shutting down - %d more to force\n", forceCnt)
} else {
printStackTo(os.Stderr, "PRINTING STACK ON FORCED EXIT")
os.Exit(1)
}
}
}()
// Rapid unplanned disassembly after 3 minutes.
go func() {
time.Sleep(3 * time.Minute)
printStackTo(os.Stderr, "PRINTING STACK - TAKING TOO LONG FOR SHUTDOWN")
os.Exit(1)
}()
// Stop instance.
if err := instance.Stop(); err != nil {
slog.Error("failed to stop", "err", err)
}
log.Shutdown()
os.Exit(instance.ExitCode())
}
func printStackTo(writer io.Writer, msg string) {
_, err := fmt.Fprintf(writer, "===== %s =====\n", msg)
if err == nil {
err = pprof.Lookup("goroutine").WriteTo(writer, 1)
}
if err != nil {
slog.Error("failed to write stack trace", "err", err)
BinariesIndexURLs: configure.DefaultStableBinaryIndexURLs,
IntelIndexURLs: configure.DefaultIntelIndexURLs,
VerifyBinaryUpdates: configure.BinarySigningTrustStore,
VerifyIntelUpdates: configure.BinarySigningTrustStore,
}
}

View file

@ -1,41 +1,75 @@
package main
import (
"errors"
"flag"
"fmt"
"io"
"log/slog"
"os"
"os/signal"
"runtime"
"runtime/pprof"
"syscall"
"time"
"github.com/safing/portmaster/base/api"
"github.com/safing/portmaster/base/info"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/metrics"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/cmds/cmdbase"
"github.com/safing/portmaster/service"
"github.com/safing/portmaster/service/configure"
"github.com/safing/portmaster/service/updates"
"github.com/safing/portmaster/spn"
"github.com/safing/portmaster/spn/captain"
"github.com/safing/portmaster/spn/conf"
"github.com/safing/portmaster/spn/sluice"
"github.com/spf13/cobra"
)
var sigUSR1 = syscall.Signal(0xa)
var (
rootCmd = &cobra.Command{
Use: "observation-hub",
PersistentPreRun: initializeGlobals,
Run: cmdbase.RunService,
}
binDir string
dataDir string
logToStdout bool
logDir string
logLevel string
)
func init() {
// Add persistent flags for all commands.
rootCmd.PersistentFlags().StringVar(&binDir, "bin-dir", "", "set directory for executable binaries (rw/ro)")
rootCmd.PersistentFlags().StringVar(&dataDir, "data-dir", "", "set directory for variable data (rw)")
// Add flags for service only.
rootCmd.Flags().BoolVar(&logToStdout, "log-stdout", false, "log to stdout instead of file")
rootCmd.Flags().StringVar(&logDir, "log-dir", "", "set directory for logs")
rootCmd.Flags().StringVar(&logLevel, "log", "", "set log level to [trace|debug|info|warning|error|critical]")
rootCmd.Flags().BoolVar(&cmdbase.PrintStackOnExit, "print-stack-on-exit", false, "prints the stack before of shutting down")
rootCmd.Flags().BoolVar(&cmdbase.RebootOnRestart, "reboot-on-restart", false, "reboot server instead of service restart")
// Add other commands.
rootCmd.AddCommand(cmdbase.VersionCmd)
rootCmd.AddCommand(cmdbase.UpdateCmd)
}
func main() {
flag.Parse()
// Add Go's default flag set.
// TODO: Move flags throughout Portmaster to here and add their values to the service config.
rootCmd.Flags().AddGoFlagSet(flag.CommandLine)
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func initializeGlobals(cmd *cobra.Command, args []string) {
// Set version info.
info.Set("SPN Observation Hub", "", "GPLv3")
// Configure metrics.
_ = metrics.SetNamespace("observer")
// Configure user agent and updates.
// Configure user agent.
updates.UserAgent = fmt.Sprintf("SPN Observation Hub (%s %s)", runtime.GOOS, runtime.GOARCH)
// Configure SPN mode.
@ -46,129 +80,37 @@ func main() {
sluice.EnableListener = false
api.EnableServer = false
// Start logger with default log level.
_ = log.Start(log.WarningLevel)
// Configure service.
cmdbase.SvcFactory = func(svcCfg *service.ServiceConfig) (cmdbase.ServiceInstance, error) {
svc, err := service.New(svcCfg)
// Create instance.
var execCmdLine bool
instance, err := spn.New()
switch {
case err == nil:
// Continue
case errors.Is(err, mgr.ErrExecuteCmdLineOp):
execCmdLine = true
default:
fmt.Printf("error creating an instance: %s\n", err)
os.Exit(2)
}
// Add additional modules.
observer, err := New(instance)
if err != nil {
fmt.Printf("error creating an instance: create observer module: %s\n", err)
os.Exit(2)
}
instance.AddModule(observer)
_, err = NewApprise(instance)
if err != nil {
fmt.Printf("error creating an instance: create apprise module: %s\n", err)
os.Exit(2)
}
instance.AddModule(observer)
// FIXME: Use service?
// Execute command line operation, if requested or available.
switch {
case !execCmdLine:
// Run service.
case instance.CommandLineOperation == nil:
fmt.Println("command line operation execution requested, but not set")
os.Exit(3)
default:
// Run the function and exit.
err = instance.CommandLineOperation()
// Add additional modules.
observer, err := New(svc)
if err != nil {
fmt.Fprintf(os.Stderr, "command line operation failed: %s\n", err)
os.Exit(3)
fmt.Printf("error creating an instance: create observer module: %s\n", err)
os.Exit(2)
}
os.Exit(0)
}
// Start
go func() {
err = instance.Start()
svc.AddModule(observer)
_, err = NewApprise(svc)
if err != nil {
fmt.Printf("instance start failed: %s\n", err)
os.Exit(1)
fmt.Printf("error creating an instance: create apprise module: %s\n", err)
os.Exit(2)
}
}()
svc.AddModule(observer)
// Wait for signal.
signalCh := make(chan os.Signal, 1)
signal.Notify(
signalCh,
os.Interrupt,
syscall.SIGHUP,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGQUIT,
sigUSR1,
)
select {
case sig := <-signalCh:
// Only print and continue to wait if SIGUSR1
if sig == sigUSR1 {
printStackTo(os.Stderr, "PRINTING STACK ON REQUEST")
} else {
fmt.Println(" <INTERRUPT>") // CLI output.
slog.Warn("program was interrupted, stopping")
}
case <-instance.ShuttingDown():
log.Shutdown()
os.Exit(instance.ExitCode())
return svc, err
}
cmdbase.SvcConfig = &service.ServiceConfig{
BinDir: binDir,
DataDir: dataDir,
// Catch signals during shutdown.
// Rapid unplanned disassembly after 5 interrupts.
go func() {
forceCnt := 5
for {
<-signalCh
forceCnt--
if forceCnt > 0 {
fmt.Printf(" <INTERRUPT> again, but already shutting down - %d more to force\n", forceCnt)
} else {
printStackTo(os.Stderr, "PRINTING STACK ON FORCED EXIT")
os.Exit(1)
}
}
}()
LogToStdout: logToStdout,
LogDir: logDir,
LogLevel: logLevel,
// Rapid unplanned disassembly after 3 minutes.
go func() {
time.Sleep(3 * time.Minute)
printStackTo(os.Stderr, "PRINTING STACK - TAKING TOO LONG FOR SHUTDOWN")
os.Exit(1)
}()
// Stop instance.
if err := instance.Stop(); err != nil {
slog.Error("failed to stop", "err", err)
}
log.Shutdown()
os.Exit(instance.ExitCode())
}
func printStackTo(writer io.Writer, msg string) {
_, err := fmt.Fprintf(writer, "===== %s =====\n", msg)
if err == nil {
err = pprof.Lookup("goroutine").WriteTo(writer, 1)
}
if err != nil {
slog.Error("failed to write stack trace", "err", err)
BinariesIndexURLs: configure.DefaultStableBinaryIndexURLs,
IntelIndexURLs: configure.DefaultIntelIndexURLs,
VerifyBinaryUpdates: configure.BinarySigningTrustStore,
VerifyIntelUpdates: configure.BinarySigningTrustStore,
}
}

View file

@ -10,7 +10,9 @@ import (
"github.com/safing/portmaster/base/info"
"github.com/safing/portmaster/base/metrics"
"github.com/safing/portmaster/cmds/cmdbase"
"github.com/safing/portmaster/service"
"github.com/safing/portmaster/service/configure"
"github.com/safing/portmaster/service/updates"
)
@ -18,7 +20,7 @@ var (
rootCmd = &cobra.Command{
Use: "portmaster-core",
PersistentPreRun: initializeGlobals,
Run: cmdRun,
Run: mainRun,
}
binDir string
@ -28,15 +30,11 @@ var (
logDir string
logLevel string
svcCfg *service.ServiceConfig
printVersion bool
)
func init() {
// Add Go's default flag set.
// TODO: Move flags throughout Portmaster to here and add their values to the service config.
rootCmd.Flags().AddGoFlagSet(flag.CommandLine)
// Add persisent flags for all commands.
// Add persistent flags for all commands.
rootCmd.PersistentFlags().StringVar(&binDir, "bin-dir", "", "set directory for executable binaries (rw/ro)")
rootCmd.PersistentFlags().StringVar(&dataDir, "data-dir", "", "set directory for variable data (rw)")
@ -44,17 +42,32 @@ func init() {
rootCmd.Flags().BoolVar(&logToStdout, "log-stdout", false, "log to stdout instead of file")
rootCmd.Flags().StringVar(&logDir, "log-dir", "", "set directory for logs")
rootCmd.Flags().StringVar(&logLevel, "log", "", "set log level to [trace|debug|info|warning|error|critical]")
rootCmd.Flags().BoolVar(&printVersion, "version", false, "print version (backward compatibility; use command instead)")
rootCmd.Flags().BoolVar(&cmdbase.PrintStackOnExit, "print-stack-on-exit", false, "prints the stack before of shutting down")
// Add other commands.
rootCmd.AddCommand(cmdbase.VersionCmd)
rootCmd.AddCommand(cmdbase.UpdateCmd)
}
func main() {
// Add Go's default flag set.
// TODO: Move flags throughout Portmaster to here and add their values to the service config.
rootCmd.Flags().AddGoFlagSet(flag.CommandLine)
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func mainRun(cmd *cobra.Command, args []string) {
runPlatformSpecifics(cmd, args)
cmdbase.RunService(cmd, args)
}
func initializeGlobals(cmd *cobra.Command, args []string) {
// set information
// Set version info.
info.Set("Portmaster", "", "GPLv3")
// Configure metrics.
@ -63,8 +76,12 @@ func initializeGlobals(cmd *cobra.Command, args []string) {
// Configure user agent.
updates.UserAgent = fmt.Sprintf("Portmaster Core (%s %s)", runtime.GOOS, runtime.GOARCH)
// Create service config.
svcCfg = &service.ServiceConfig{
// Configure service.
cmdbase.SvcFactory = func(svcCfg *service.ServiceConfig) (cmdbase.ServiceInstance, error) {
svc, err := service.New(svcCfg)
return svc, err
}
cmdbase.SvcConfig = &service.ServiceConfig{
BinDir: binDir,
DataDir: dataDir,
@ -72,9 +89,18 @@ func initializeGlobals(cmd *cobra.Command, args []string) {
LogDir: logDir,
LogLevel: logLevel,
BinariesIndexURLs: service.DefaultStableBinaryIndexURLs,
IntelIndexURLs: service.DefaultIntelIndexURLs,
VerifyBinaryUpdates: service.BinarySigningTrustStore,
VerifyIntelUpdates: service.BinarySigningTrustStore,
BinariesIndexURLs: configure.DefaultStableBinaryIndexURLs,
IntelIndexURLs: configure.DefaultIntelIndexURLs,
VerifyBinaryUpdates: configure.BinarySigningTrustStore,
VerifyIntelUpdates: configure.BinarySigningTrustStore,
}
}
func runFlagCmd(fn func(cmd *cobra.Command, args []string) error, cmd *cobra.Command, args []string) {
if err := fn(cmd, args); err != nil {
fmt.Printf("failed: %s\n", err)
os.Exit(1)
}
os.Exit(0)
}

View file

@ -0,0 +1,21 @@
package main
import (
"github.com/safing/portmaster/cmds/cmdbase"
"github.com/spf13/cobra"
)
var recoverIPTablesFlag bool
func init() {
rootCmd.Flags().BoolVar(&recoverIPTablesFlag, "recover-iptables", false, "recovers ip table rules (backward compatibility; use command instead)")
}
func runPlatformSpecifics(cmd *cobra.Command, args []string) {
switch {
case printVersion:
runFlagCmd(cmdbase.Version, cmd, args)
case recoverIPTablesFlag:
runFlagCmd(recoverIPTables, cmd, args)
}
}

View file

@ -0,0 +1,13 @@
package main
import (
"github.com/safing/portmaster/cmds/cmdbase"
"github.com/spf13/cobra"
)
func runPlatformSpecifics(cmd *cobra.Command, args []string) {
switch {
case printVersion:
runFlagCmd(cmdbase.Version, cmd, args)
}
}

View file

@ -2,7 +2,6 @@ package main
import (
"errors"
"flag"
"fmt"
"os"
"strings"
@ -13,23 +12,17 @@ import (
"github.com/safing/portmaster/service/firewall/interception"
)
var (
recoverCmd = &cobra.Command{
Use: "recover-iptables",
Short: "Force an update of all components.",
RunE: update,
}
recoverIPTables bool
)
var recoverCmd = &cobra.Command{
Use: "recover-iptables",
Short: "Clean up Portmaster rules in iptables",
RunE: recoverIPTables,
}
func init() {
rootCmd.AddCommand(recoverCmd)
flag.BoolVar(&recoverIPTables, "recover-iptables", false, "recovers ip table rules (backward compatibility; use command instead)")
}
func recover(cmd *cobra.Command, args []string) error {
func recoverIPTables(cmd *cobra.Command, args []string) error {
// interception.DeactiveNfqueueFirewall uses coreos/go-iptables
// which shells out to the /sbin/iptables binary. As a result,
// we don't get the errno of the actual error and need to parse the

View file

@ -6,7 +6,7 @@ import (
)
func setupDatabases(path string) error {
err := database.InitializeWithPath(path)
err := database.Initialize(path)
if err != nil {
return err
}

View file

@ -37,13 +37,12 @@ func main() {
}
// Start logging.
err := log.Start()
err := log.Start("trace", true, "")
if err != nil {
fmt.Printf("failed to start logging: %s\n", err)
os.Exit(1)
}
defer log.Shutdown()
log.SetLogLevel(log.TraceLevel)
log.Info("starting traffic generator")
// Execute requests

123
cmds/updatemgr/convert.go Normal file
View file

@ -0,0 +1,123 @@
package main
import (
"encoding/json"
"fmt"
"path"
"strings"
"time"
"github.com/safing/portmaster/service/updates"
)
func convertV1(indexData []byte, baseURL string, lastUpdate time.Time) (*updates.Index, error) {
// Parse old index.
oldIndex := make(map[string]string)
err := json.Unmarshal(indexData, &oldIndex)
if err != nil {
return nil, fmt.Errorf("failed to parse old v1 index: %w", err)
}
// Create new index.
newIndex := &updates.Index{
Published: lastUpdate,
Artifacts: make([]*updates.Artifact, 0, len(oldIndex)),
}
// Convert all entries.
if err := convertEntries(newIndex, baseURL, oldIndex); err != nil {
return nil, err
}
return newIndex, nil
}
type IndexV2 struct {
Channel string
Published time.Time
Releases map[string]string
}
func convertV2(indexData []byte, baseURL string) (*updates.Index, error) {
// Parse old index.
oldIndex := &IndexV2{}
err := json.Unmarshal(indexData, oldIndex)
if err != nil {
return nil, fmt.Errorf("failed to parse old v2 index: %w", err)
}
// Create new index.
newIndex := &updates.Index{
Published: oldIndex.Published,
Artifacts: make([]*updates.Artifact, 0, len(oldIndex.Releases)),
}
// Convert all entries.
if err := convertEntries(newIndex, baseURL, oldIndex.Releases); err != nil {
return nil, err
}
return newIndex, nil
}
func convertEntries(index *updates.Index, baseURL string, entries map[string]string) error {
entries:
for identifier, version := range entries {
dir, filename := path.Split(identifier)
artifactPath := GetVersionedPath(identifier, version)
// Check if file is to be ignored.
if scanConfig.IsIgnored(artifactPath) {
continue entries
}
// Get the platform.
var platform string
splittedPath := strings.Split(dir, "/")
if len(splittedPath) >= 1 {
platform = splittedPath[0]
if platform == "all" {
platform = ""
}
} else {
continue entries
}
// Create new artifact.
newArtifact := &updates.Artifact{
Filename: filename,
URLs: []string{baseURL + artifactPath},
Platform: platform,
Version: version,
}
// Derive unpack setting.
unpack, err := scanConfig.UnpackSetting(filename)
if err != nil {
return fmt.Errorf("failed to get unpack setting for %s: %w", filename, err)
}
newArtifact.Unpack = unpack
// Add to new index.
index.Artifacts = append(index.Artifacts, newArtifact)
}
return nil
}
// GetVersionedPath combines the identifier and version and returns it as a file path.
func GetVersionedPath(identifier, version string) (versionedPath string) {
identifierPath, filename := path.Split(identifier)
// Split the filename where the version should go.
splittedFilename := strings.SplitN(filename, ".", 2)
// Replace `.` with `-` for the filename format.
transformedVersion := strings.Replace(version, ".", "-", 2)
// Put everything back together and return it.
versionedPath = identifierPath + splittedFilename[0] + "_v" + transformedVersion
if len(splittedFilename) > 1 {
versionedPath += "." + splittedFilename[1]
}
return versionedPath
}

View file

@ -0,0 +1,88 @@
package main
import (
"errors"
"fmt"
"os"
"runtime"
"github.com/spf13/cobra"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/service/updates"
)
const currentPlatform = runtime.GOOS + "_" + runtime.GOARCH
var (
downloadCmd = &cobra.Command{
Use: "download [index URL] [download dir]",
Short: "Download all artifacts by an index to a directory",
RunE: download,
Args: cobra.ExactArgs(2),
}
downloadPlatform string
)
func init() {
rootCmd.AddCommand(downloadCmd)
downloadCmd.Flags().StringVarP(&downloadPlatform, "platform", "p", currentPlatform, "Define platform to download artifacts for")
}
func download(cmd *cobra.Command, args []string) error {
// Args.
indexURL := args[0]
targetDir := args[1]
// Check target dir.
stat, err := os.Stat(targetDir)
if err != nil {
return fmt.Errorf("failed to access target dir: %w", err)
}
if !stat.IsDir() {
return errors.New("target is not a directory")
}
// Create temporary directories.
tmpDownload, err := os.MkdirTemp("", "portmaster-updatemgr-download-")
if err != nil {
return err
}
tmpPurge, err := os.MkdirTemp("", "portmaster-updatemgr-purge-")
if err != nil {
return err
}
// Create updater.
u, err := updates.New(nil, "", updates.Config{
Name: "Downloader",
Directory: targetDir,
DownloadDirectory: tmpDownload,
PurgeDirectory: tmpPurge,
IndexURLs: []string{indexURL},
IndexFile: "index.json",
Platform: downloadPlatform,
})
if err != nil {
return err
}
// Start logging.
err = log.Start(log.InfoLevel.Name(), true, "")
if err != nil {
return err
}
// Download artifacts.
err = u.ForceUpdate()
// Stop logging.
log.Shutdown()
// Remove tmp dirs
os.RemoveAll(tmpDownload)
os.RemoveAll(tmpPurge)
return err
}

215
cmds/updatemgr/mirror.go Normal file
View file

@ -0,0 +1,215 @@
package main
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/spf13/cobra"
"github.com/safing/portmaster/service/updates"
)
var (
// UserAgent is an HTTP User-Agent that is used to add
// more context to requests made by the registry when
// fetching resources from the update server.
UserAgent = fmt.Sprintf("Portmaster Update Mgr (%s %s)", runtime.GOOS, runtime.GOARCH)
client http.Client
)
func init() {
rootCmd.AddCommand(mirrorCmd)
}
var (
mirrorCmd = &cobra.Command{
Use: "mirror [index URL] [mirror dir]",
Short: "Mirror all artifacts by an index to a directory, keeping the directory structure and file names intact",
RunE: mirror,
Args: cobra.ExactArgs(2),
}
)
func mirror(cmd *cobra.Command, args []string) error {
// Args.
indexURL := args[0]
targetDir := args[1]
// Check target dir.
stat, err := os.Stat(targetDir)
if err != nil {
return fmt.Errorf("failed to access target dir: %w", err)
}
if !stat.IsDir() {
return errors.New("target is not a directory")
}
// Calculate Base URL.
u, err := url.Parse(indexURL)
if err != nil {
return fmt.Errorf("invalid index URL: %w", err)
}
indexPath := u.Path
u.RawQuery = ""
u.RawFragment = ""
u.Path = ""
u.RawPath = ""
baseURL := u.String() + "/"
// Download Index.
fmt.Println("downloading index...")
indexData, err := downloadData(cmd.Context(), indexURL)
if err != nil {
return fmt.Errorf("download index: %w", err)
}
// Parse (and convert) index.
var index *updates.Index
_, newIndexName := path.Split(indexPath)
switch {
case strings.HasSuffix(indexPath, ".v3.json"):
index = &updates.Index{}
err := json.Unmarshal(indexData, index)
if err != nil {
return fmt.Errorf("parse v3 index: %w", err)
}
case strings.HasSuffix(indexPath, ".v2.json"):
index, err = convertV2(indexData, baseURL)
if err != nil {
return fmt.Errorf("convert v2 index: %w", err)
}
newIndexName = strings.TrimSuffix(newIndexName, ".v2.json") + ".v3.json"
case strings.HasSuffix(indexPath, ".json"):
index, err = convertV1(indexData, baseURL, time.Now())
if err != nil {
return fmt.Errorf("convert v1 index: %w", err)
}
newIndexName = strings.TrimSuffix(newIndexName, ".json") + ".v3.json"
default:
return errors.New("invalid index file extension")
}
// Download and save artifacts.
for _, artifact := range index.Artifacts {
fmt.Printf("downloading %s...\n", artifact.Filename)
// Download artifact and add any missing checksums.
artifactData, artifactLocation, err := getArtifact(cmd.Context(), artifact)
if err != nil {
return fmt.Errorf("get artifact %s: %w", artifact.Filename, err)
}
// Write artifact to correct location.
artifactDst := filepath.Join(targetDir, filepath.FromSlash(artifactLocation))
artifactDir, _ := filepath.Split(artifactDst)
err = os.MkdirAll(artifactDir, 0o0755)
if err != nil {
return fmt.Errorf("create artifact dir %s: %w", artifactDir, err)
}
err = os.WriteFile(artifactDst, artifactData, 0o0644)
if err != nil {
return fmt.Errorf("save artifact %s: %w", artifact.Filename, err)
}
}
// Save index.
indexJson, err := json.MarshalIndent(index, "", " ")
if err != nil {
return fmt.Errorf("marshal index: %w", err)
}
indexDst := filepath.Join(targetDir, newIndexName)
err = os.WriteFile(indexDst, indexJson, 0o0644)
if err != nil {
return fmt.Errorf("write index to %s: %w", indexDst, err)
}
return err
}
func getArtifact(ctx context.Context, artifact *updates.Artifact) (artifactData []byte, artifactLocation string, err error) {
// Check URL.
if len(artifact.URLs) == 0 {
return nil, "", errors.New("no URLs defined")
}
u, err := url.Parse(artifact.URLs[0])
if err != nil {
return nil, "", fmt.Errorf("invalid URL: %w", err)
}
// Download data from URL.
artifactData, err = downloadData(ctx, artifact.URLs[0])
if err != nil {
return nil, "", fmt.Errorf("GET artifact: %w", err)
}
// Decompress artifact data, if configured.
var finalArtifactData []byte
if artifact.Unpack != "" {
finalArtifactData, err = updates.Decompress(artifact.Unpack, artifactData)
if err != nil {
return nil, "", fmt.Errorf("decompress: %w", err)
}
} else {
finalArtifactData = artifactData
}
// Verify or generate checksum.
if artifact.SHA256 != "" {
if err := updates.CheckSHA256Sum(finalArtifactData, artifact.SHA256); err != nil {
return nil, "", err
}
} else {
fileHash := sha256.New()
if _, err := io.Copy(fileHash, bytes.NewReader(finalArtifactData)); err != nil {
return nil, "", fmt.Errorf("digest file: %w", err)
}
artifact.SHA256 = hex.EncodeToString(fileHash.Sum(nil))
}
return artifactData, u.Path, nil
}
func downloadData(ctx context.Context, url string) ([]byte, error) {
// Setup request.
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, http.NoBody)
if err != nil {
return nil, fmt.Errorf("failed to create GET request to %s: %w", url, err)
}
if UserAgent != "" {
req.Header.Set("User-Agent", UserAgent)
}
// Start request with shared http client.
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("failed a get file request to: %w", err)
}
defer func() { _ = resp.Body.Close() }()
// Check for HTTP status errors.
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("server returned non-OK status: %d %s", resp.StatusCode, resp.Status)
}
// Read the full body and return it.
content, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read body of response: %w", err)
}
return content, nil
}

View file

@ -69,7 +69,7 @@ func sign(cmd *cobra.Command, args []string) error {
}
// Parse index and check if it is valid.
index, err := updates.ParseIndex(unsignedIndexData, nil)
index, err := updates.ParseIndex(unsignedIndexData, "", nil)
if err != nil {
return fmt.Errorf("invalid index: %w", err)
}
@ -85,7 +85,7 @@ func sign(cmd *cobra.Command, args []string) error {
}
// Check by parsing again.
index, err = updates.ParseIndex(signedIndexData, nil)
index, err = updates.ParseIndex(signedIndexData, "", nil)
if err != nil {
return fmt.Errorf("invalid index after signing: %w", err)
}

View file

@ -23,13 +23,13 @@
"@fortawesome/free-brands-svg-icons": "^6.4.0",
"@fortawesome/free-regular-svg-icons": "^6.4.0",
"@fortawesome/free-solid-svg-icons": "^6.4.0",
"@tauri-apps/api": ">=2.0.0-rc.1",
"@tauri-apps/plugin-cli": ">=2.0.0-rc.1",
"@tauri-apps/plugin-clipboard-manager": ">=2.0.0-rc.1",
"@tauri-apps/plugin-dialog": ">=2.0.0-rc.1",
"@tauri-apps/plugin-notification": ">=2.0.0-rc.1",
"@tauri-apps/plugin-os": ">=2.0.0-rc.1",
"@tauri-apps/plugin-shell": "^2.0.0-rc",
"@tauri-apps/api": ">=2.1.1",
"@tauri-apps/plugin-cli": ">=2.0.0",
"@tauri-apps/plugin-clipboard-manager": ">=2.0.0",
"@tauri-apps/plugin-dialog": ">=2.0.0",
"@tauri-apps/plugin-notification": ">=2.0.0",
"@tauri-apps/plugin-os": ">=2.0.0",
"@tauri-apps/plugin-shell": "^2.0.1",
"autoprefixer": "^10.4.14",
"d3": "^7.8.4",
"data-urls": "^5.0.0",
@ -4406,9 +4406,9 @@
"peer": true
},
"node_modules/@tauri-apps/api": {
"version": "2.0.0-rc.4",
"resolved": "https://registry.npmjs.org/@tauri-apps/api/-/api-2.0.0-rc.4.tgz",
"integrity": "sha512-UNiIhhKG08j4ooss2oEEVexffmWkgkYlC2M3GcX3VPtNsqFgVNL8Mcw/4Y7rO9M9S+ffAMnLOF5ypzyuyb8tyg==",
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/@tauri-apps/api/-/api-2.1.1.tgz",
"integrity": "sha512-fzUfFFKo4lknXGJq8qrCidkUcKcH2UHhfaaCNt4GzgzGaW2iS26uFOg4tS3H4P8D6ZEeUxtiD5z0nwFF0UN30A==",
"license": "Apache-2.0 OR MIT",
"funding": {
"type": "opencollective",
@ -4416,57 +4416,57 @@
}
},
"node_modules/@tauri-apps/plugin-cli": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-cli/-/plugin-cli-2.0.0-rc.1.tgz",
"integrity": "sha512-EcSTRfEU3zzlNbgwVtZVzqB19z3PNjyXD9H+YXuuLpV+Hwuh6Oi1fhUdCI0mp5zr9HSMWE+HzHkpBI7sVP1RyA==",
"license": "MIT or APACHE-2.0",
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-cli/-/plugin-cli-2.0.0.tgz",
"integrity": "sha512-glQmlL1IiCGEa1FHYa/PTPSeYhfu56omLRgHXWlJECDt6DbJyRuJWVgtkQfUxtqnVdYnnU+DGIGeiInoEqtjLw==",
"license": "MIT OR Apache-2.0",
"dependencies": {
"@tauri-apps/api": "^2.0.0-rc.4"
"@tauri-apps/api": "^2.0.0"
}
},
"node_modules/@tauri-apps/plugin-clipboard-manager": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-clipboard-manager/-/plugin-clipboard-manager-2.0.0-rc.1.tgz",
"integrity": "sha512-hFgUABMmQuVGKwHb8PR9fuqfk0WRkedbWUt/ZV5sL4Q6kLrsp3JYJvtzVPeMYdeBvMqHl8WXNxAc/zwSld2h9w==",
"license": "MIT or APACHE-2.0",
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-clipboard-manager/-/plugin-clipboard-manager-2.0.0.tgz",
"integrity": "sha512-V1sXmbjnwfXt/r48RJMwfUmDMSaP/8/YbH4CLNxt+/sf1eHlIP8PRFdFDQwLN0cNQKu2rqQVbG/Wc/Ps6cDUhw==",
"license": "MIT OR Apache-2.0",
"dependencies": {
"@tauri-apps/api": "^2.0.0-rc.4"
"@tauri-apps/api": "^2.0.0"
}
},
"node_modules/@tauri-apps/plugin-dialog": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-dialog/-/plugin-dialog-2.0.0-rc.1.tgz",
"integrity": "sha512-H28gh6BfZtjflHQ+HrmWwunDriBI3AQLAKnMs50GA6zeNUULqbQr7VXbAAKeJL/0CmWcecID4PKXVoSlaWRhEg==",
"license": "MIT or APACHE-2.0",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-dialog/-/plugin-dialog-2.0.1.tgz",
"integrity": "sha512-fnUrNr6EfvTqdls/ufusU7h6UbNFzLKvHk/zTuOiBq01R3dTODqwctZlzakdbfSp/7pNwTKvgKTAgl/NAP/Z0Q==",
"license": "MIT OR Apache-2.0",
"dependencies": {
"@tauri-apps/api": "^2.0.0-rc.4"
"@tauri-apps/api": "^2.0.0"
}
},
"node_modules/@tauri-apps/plugin-notification": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-notification/-/plugin-notification-2.0.0-rc.1.tgz",
"integrity": "sha512-ddDj7xM8XR7Zv2vdpofNXlLjcp49p/VjlL0D+/eBcMuyooaLNMor3jz/+H6s23iHerdxMWA50mzy26BRN1BySA==",
"license": "MIT or APACHE-2.0",
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-notification/-/plugin-notification-2.0.0.tgz",
"integrity": "sha512-6qEDYJS7mgXZWLXA0EFL+DVCJh8sJlzSoyw6B50pxhLPVFjc5Vr5DVzl5W3mUHaYhod5wsC984eQnlCCGqxYDA==",
"license": "MIT OR Apache-2.0",
"dependencies": {
"@tauri-apps/api": "^2.0.0-rc.4"
"@tauri-apps/api": "^2.0.0"
}
},
"node_modules/@tauri-apps/plugin-os": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-os/-/plugin-os-2.0.0-rc.1.tgz",
"integrity": "sha512-PV8zlSTmYfiN2xzILUmlDSEycS7UYbH2yXk/ZqF+qQU6/s+OVQvmSth4EhllFjcpvPbtqELvpzfjw+2qEouchA==",
"license": "MIT or APACHE-2.0",
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-os/-/plugin-os-2.0.0.tgz",
"integrity": "sha512-M7hG/nNyQYTJxVG/UhTKhp9mpXriwWzrs9mqDreB8mIgqA3ek5nHLdwRZJWhkKjZrnDT4v9CpA9BhYeplTlAiA==",
"license": "MIT OR Apache-2.0",
"dependencies": {
"@tauri-apps/api": "^2.0.0-rc.4"
"@tauri-apps/api": "^2.0.0"
}
},
"node_modules/@tauri-apps/plugin-shell": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-shell/-/plugin-shell-2.0.0-rc.1.tgz",
"integrity": "sha512-JtNROc0rqEwN/g93ig5pK4cl1vUo2yn+osCpY9de64cy/d9hRzof7AuYOgvt/Xcd5VPQmlgo2AGvUh5sQRSR1A==",
"license": "MIT or APACHE-2.0",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-shell/-/plugin-shell-2.0.1.tgz",
"integrity": "sha512-akU1b77sw3qHiynrK0s930y8zKmcdrSD60htjH+mFZqv5WaakZA/XxHR3/sF1nNv9Mgmt/Shls37HwnOr00aSw==",
"license": "MIT OR Apache-2.0",
"dependencies": {
"@tauri-apps/api": "^2.0.0-rc.4"
"@tauri-apps/api": "^2.0.0"
}
},
"node_modules/@tootallnate/once": {
@ -21067,56 +21067,56 @@
"peer": true
},
"@tauri-apps/api": {
"version": "2.0.0-rc.4",
"resolved": "https://registry.npmjs.org/@tauri-apps/api/-/api-2.0.0-rc.4.tgz",
"integrity": "sha512-UNiIhhKG08j4ooss2oEEVexffmWkgkYlC2M3GcX3VPtNsqFgVNL8Mcw/4Y7rO9M9S+ffAMnLOF5ypzyuyb8tyg=="
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/@tauri-apps/api/-/api-2.1.1.tgz",
"integrity": "sha512-fzUfFFKo4lknXGJq8qrCidkUcKcH2UHhfaaCNt4GzgzGaW2iS26uFOg4tS3H4P8D6ZEeUxtiD5z0nwFF0UN30A=="
},
"@tauri-apps/plugin-cli": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-cli/-/plugin-cli-2.0.0-rc.1.tgz",
"integrity": "sha512-EcSTRfEU3zzlNbgwVtZVzqB19z3PNjyXD9H+YXuuLpV+Hwuh6Oi1fhUdCI0mp5zr9HSMWE+HzHkpBI7sVP1RyA==",
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-cli/-/plugin-cli-2.0.0.tgz",
"integrity": "sha512-glQmlL1IiCGEa1FHYa/PTPSeYhfu56omLRgHXWlJECDt6DbJyRuJWVgtkQfUxtqnVdYnnU+DGIGeiInoEqtjLw==",
"requires": {
"@tauri-apps/api": "^2.0.0-rc.4"
"@tauri-apps/api": "^2.0.0"
}
},
"@tauri-apps/plugin-clipboard-manager": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-clipboard-manager/-/plugin-clipboard-manager-2.0.0-rc.1.tgz",
"integrity": "sha512-hFgUABMmQuVGKwHb8PR9fuqfk0WRkedbWUt/ZV5sL4Q6kLrsp3JYJvtzVPeMYdeBvMqHl8WXNxAc/zwSld2h9w==",
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-clipboard-manager/-/plugin-clipboard-manager-2.0.0.tgz",
"integrity": "sha512-V1sXmbjnwfXt/r48RJMwfUmDMSaP/8/YbH4CLNxt+/sf1eHlIP8PRFdFDQwLN0cNQKu2rqQVbG/Wc/Ps6cDUhw==",
"requires": {
"@tauri-apps/api": "^2.0.0-rc.4"
"@tauri-apps/api": "^2.0.0"
}
},
"@tauri-apps/plugin-dialog": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-dialog/-/plugin-dialog-2.0.0-rc.1.tgz",
"integrity": "sha512-H28gh6BfZtjflHQ+HrmWwunDriBI3AQLAKnMs50GA6zeNUULqbQr7VXbAAKeJL/0CmWcecID4PKXVoSlaWRhEg==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-dialog/-/plugin-dialog-2.0.1.tgz",
"integrity": "sha512-fnUrNr6EfvTqdls/ufusU7h6UbNFzLKvHk/zTuOiBq01R3dTODqwctZlzakdbfSp/7pNwTKvgKTAgl/NAP/Z0Q==",
"requires": {
"@tauri-apps/api": "^2.0.0-rc.4"
"@tauri-apps/api": "^2.0.0"
}
},
"@tauri-apps/plugin-notification": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-notification/-/plugin-notification-2.0.0-rc.1.tgz",
"integrity": "sha512-ddDj7xM8XR7Zv2vdpofNXlLjcp49p/VjlL0D+/eBcMuyooaLNMor3jz/+H6s23iHerdxMWA50mzy26BRN1BySA==",
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-notification/-/plugin-notification-2.0.0.tgz",
"integrity": "sha512-6qEDYJS7mgXZWLXA0EFL+DVCJh8sJlzSoyw6B50pxhLPVFjc5Vr5DVzl5W3mUHaYhod5wsC984eQnlCCGqxYDA==",
"requires": {
"@tauri-apps/api": "^2.0.0-rc.4"
"@tauri-apps/api": "^2.0.0"
}
},
"@tauri-apps/plugin-os": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-os/-/plugin-os-2.0.0-rc.1.tgz",
"integrity": "sha512-PV8zlSTmYfiN2xzILUmlDSEycS7UYbH2yXk/ZqF+qQU6/s+OVQvmSth4EhllFjcpvPbtqELvpzfjw+2qEouchA==",
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-os/-/plugin-os-2.0.0.tgz",
"integrity": "sha512-M7hG/nNyQYTJxVG/UhTKhp9mpXriwWzrs9mqDreB8mIgqA3ek5nHLdwRZJWhkKjZrnDT4v9CpA9BhYeplTlAiA==",
"requires": {
"@tauri-apps/api": "^2.0.0-rc.4"
"@tauri-apps/api": "^2.0.0"
}
},
"@tauri-apps/plugin-shell": {
"version": "2.0.0-rc.1",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-shell/-/plugin-shell-2.0.0-rc.1.tgz",
"integrity": "sha512-JtNROc0rqEwN/g93ig5pK4cl1vUo2yn+osCpY9de64cy/d9hRzof7AuYOgvt/Xcd5VPQmlgo2AGvUh5sQRSR1A==",
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@tauri-apps/plugin-shell/-/plugin-shell-2.0.1.tgz",
"integrity": "sha512-akU1b77sw3qHiynrK0s930y8zKmcdrSD60htjH+mFZqv5WaakZA/XxHR3/sF1nNv9Mgmt/Shls37HwnOr00aSw==",
"requires": {
"@tauri-apps/api": "^2.0.0-rc.4"
"@tauri-apps/api": "^2.0.0"
}
},
"@tootallnate/once": {

View file

@ -37,13 +37,13 @@
"@fortawesome/free-brands-svg-icons": "^6.4.0",
"@fortawesome/free-regular-svg-icons": "^6.4.0",
"@fortawesome/free-solid-svg-icons": "^6.4.0",
"@tauri-apps/api": ">=2.0.0-rc.1",
"@tauri-apps/plugin-cli": ">=2.0.0-rc.1",
"@tauri-apps/plugin-clipboard-manager": ">=2.0.0-rc.1",
"@tauri-apps/plugin-dialog": ">=2.0.0-rc.1",
"@tauri-apps/plugin-notification": ">=2.0.0-rc.1",
"@tauri-apps/plugin-os": ">=2.0.0-rc.1",
"@tauri-apps/plugin-shell": "^2.0.0-rc",
"@tauri-apps/api": ">=2.1.1",
"@tauri-apps/plugin-cli": ">=2.0.0",
"@tauri-apps/plugin-clipboard-manager": ">=2.0.0",
"@tauri-apps/plugin-dialog": ">=2.0.0",
"@tauri-apps/plugin-notification": ">=2.0.0",
"@tauri-apps/plugin-os": ">=2.0.0",
"@tauri-apps/plugin-shell": "^2.0.1",
"autoprefixer": "^10.4.14",
"d3": "^7.8.4",
"data-urls": "^5.0.0",

View file

@ -238,13 +238,13 @@ export class EditProfileDialog implements OnInit, OnDestroy {
this.portapi.delete(icon.Value).subscribe();
}
// FIXME(ppacher): we cannot yet delete API based icons ...
// TODO(ppacher): we cannot yet delete API based icons ...
});
if (this.iconData !== '') {
// save the new icon in the cache database
// FIXME(ppacher): we currently need to calls because the icon API in portmaster
// TODO(ppacher): we currently need to calls because the icon API in portmaster
// does not update the profile but just saves the file and returns the filename.
// So we still need to update the profile manually.
updateIcon = this.profileService
@ -261,7 +261,7 @@ export class EditProfileDialog implements OnInit, OnDestroy {
})
);
// FIXME(ppacher): reset presentationpath
// TODO(ppacher): reset presentationpath
} else {
// just clear out that there was an icon
this.profile.Icons = [];

View file

@ -543,7 +543,7 @@ export class SfngNetqueryLineChartComponent<D extends SeriesData = any> implemen
.append("title")
.text(d => d.text)
// FIXME(ppacher): somehow d3 does not recognize which data points must be removed
// TODO(ppacher): somehow d3 does not recognize which data points must be removed
// or re-placed. For now, just remove them all
this.svgInner
.select('.points')

View file

@ -184,7 +184,7 @@ export class SfngNetquerySearchbarComponent implements ControlValueAccessor, OnI
const queries: Observable<SfngSearchbarSuggestion<any>>[] = [];
const queryKeys: (keyof Partial<NetqueryConnection>)[] = [];
// FIXME(ppacher): confirm .type is an actually allowed field
// TODO(ppacher): confirm .type is an actually allowed field
if (!!parser.lastUnterminatedCondition) {
fields = [parser.lastUnterminatedCondition.type as keyof NetqueryConnection];
limit = 0;

File diff suppressed because it is too large Load diff

View file

@ -12,21 +12,21 @@ rust-version = "1.64"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[build-dependencies]
tauri-build = { version = "2.0.1", features = [] }
tauri-build = { version = "2.0.3", features = [] }
[dependencies]
# Tauri
tauri = { version = "2.0.1", features = ["tray-icon", "image-png", "config-json5", "devtools"] }
tauri-plugin-shell = "2.0.1"
tauri-plugin-dialog = "2.0.1"
tauri-plugin-clipboard-manager = "2.0.1"
tauri = { version = "2.1.1", features = ["tray-icon", "image-png", "config-json5", "devtools"] }
tauri-plugin-shell = "2.0.2"
tauri-plugin-dialog = "2.0.3"
tauri-plugin-clipboard-manager = "2.0.2"
tauri-plugin-os = "2.0.1"
tauri-plugin-single-instance = "2.0.1"
tauri-plugin-notification = "2.0.1"
tauri-plugin-log = "2.0.1"
tauri-plugin-window-state = "2.0.1"
tauri-plugin-log = "2.0.2"
tauri-plugin-window-state = "2.0.2"
tauri-cli = "2.0.1"
tauri-cli = "2.1.0"
clap_lex = "0.7.2"
# General
@ -82,4 +82,4 @@ ctor = "0.2.6"
custom-protocol = [ "tauri/custom-protocol" ]
[package.metadata.clippy]
allow = ["clippy::collapsible_else_if"]
allow = ["clippy::collapsible_else_if"]

View file

@ -37,7 +37,7 @@
],
"definitions": {
"Capability": {
"description": "A grouping and boundary mechanism developers can use to isolate access to the IPC layer.\n\nIt controls application windows fine grained access to the Tauri core, application, or plugin commands. If a window is not matching any capability then it has no access to the IPC layer at all.\n\nThis can be done to create groups of windows, based on their required system access, which can reduce impact of frontend vulnerabilities in less privileged windows. Windows can be added to a capability by exact name (e.g. `main-window`) or glob patterns like `*` or `admin-*`. A Window can have none, one, or multiple associated capabilities.\n\n## Example\n\n```json { \"identifier\": \"main-user-files-write\", \"description\": \"This capability allows the `main` window on macOS and Windows access to `filesystem` write related commands and `dialog` commands to enable programatic access to files selected by the user.\", \"windows\": [ \"main\" ], \"permissions\": [ \"core:default\", \"dialog:open\", { \"identifier\": \"fs:allow-write-text-file\", \"allow\": [{ \"path\": \"$HOME/test.txt\" }] }, \"platforms\": [\"macOS\",\"windows\"] } ```",
"description": "A grouping and boundary mechanism developers can use to isolate access to the IPC layer.\n\nIt controls application windows fine grained access to the Tauri core, application, or plugin commands. If a window is not matching any capability then it has no access to the IPC layer at all.\n\nThis can be done to create groups of windows, based on their required system access, which can reduce impact of frontend vulnerabilities in less privileged windows. Windows can be added to a capability by exact name (e.g. `main-window`) or glob patterns like `*` or `admin-*`. A Window can have none, one, or multiple associated capabilities.\n\n## Example\n\n```json { \"identifier\": \"main-user-files-write\", \"description\": \"This capability allows the `main` window on macOS and Windows access to `filesystem` write related commands and `dialog` commands to enable programatic access to files selected by the user.\", \"windows\": [ \"main\" ], \"permissions\": [ \"core:default\", \"dialog:open\", { \"identifier\": \"fs:allow-write-text-file\", \"allow\": [{ \"path\": \"$HOME/test.txt\" }] }, ], \"platforms\": [\"macOS\",\"windows\"] } ```",
"type": "object",
"required": [
"identifier",
@ -84,7 +84,7 @@
}
},
"permissions": {
"description": "List of permissions attached to this capability.\n\nMust include the plugin name as prefix in the form of `${plugin-name}:${permission-name}`. For commands directly implemented in the application itself only `${permission-name}` is required.\n\n## Example\n\n```json [ \"core:default\", \"shell:allow-open\", \"dialog:open\", { \"identifier\": \"fs:allow-write-text-file\", \"allow\": [{ \"path\": \"$HOME/test.txt\" }] } ```",
"description": "List of permissions attached to this capability.\n\nMust include the plugin name as prefix in the form of `${plugin-name}:${permission-name}`. For commands directly implemented in the application itself only `${permission-name}` is required.\n\n## Example\n\n```json [ \"core:default\", \"shell:allow-open\", \"dialog:open\", { \"identifier\": \"fs:allow-write-text-file\", \"allow\": [{ \"path\": \"$HOME/test.txt\" }] } ] ```",
"type": "array",
"items": {
"$ref": "#/definitions/PermissionEntry"

View file

@ -37,7 +37,7 @@
],
"definitions": {
"Capability": {
"description": "A grouping and boundary mechanism developers can use to isolate access to the IPC layer.\n\nIt controls application windows fine grained access to the Tauri core, application, or plugin commands. If a window is not matching any capability then it has no access to the IPC layer at all.\n\nThis can be done to create groups of windows, based on their required system access, which can reduce impact of frontend vulnerabilities in less privileged windows. Windows can be added to a capability by exact name (e.g. `main-window`) or glob patterns like `*` or `admin-*`. A Window can have none, one, or multiple associated capabilities.\n\n## Example\n\n```json { \"identifier\": \"main-user-files-write\", \"description\": \"This capability allows the `main` window on macOS and Windows access to `filesystem` write related commands and `dialog` commands to enable programatic access to files selected by the user.\", \"windows\": [ \"main\" ], \"permissions\": [ \"core:default\", \"dialog:open\", { \"identifier\": \"fs:allow-write-text-file\", \"allow\": [{ \"path\": \"$HOME/test.txt\" }] }, \"platforms\": [\"macOS\",\"windows\"] } ```",
"description": "A grouping and boundary mechanism developers can use to isolate access to the IPC layer.\n\nIt controls application windows fine grained access to the Tauri core, application, or plugin commands. If a window is not matching any capability then it has no access to the IPC layer at all.\n\nThis can be done to create groups of windows, based on their required system access, which can reduce impact of frontend vulnerabilities in less privileged windows. Windows can be added to a capability by exact name (e.g. `main-window`) or glob patterns like `*` or `admin-*`. A Window can have none, one, or multiple associated capabilities.\n\n## Example\n\n```json { \"identifier\": \"main-user-files-write\", \"description\": \"This capability allows the `main` window on macOS and Windows access to `filesystem` write related commands and `dialog` commands to enable programatic access to files selected by the user.\", \"windows\": [ \"main\" ], \"permissions\": [ \"core:default\", \"dialog:open\", { \"identifier\": \"fs:allow-write-text-file\", \"allow\": [{ \"path\": \"$HOME/test.txt\" }] }, ], \"platforms\": [\"macOS\",\"windows\"] } ```",
"type": "object",
"required": [
"identifier",
@ -84,7 +84,7 @@
}
},
"permissions": {
"description": "List of permissions attached to this capability.\n\nMust include the plugin name as prefix in the form of `${plugin-name}:${permission-name}`. For commands directly implemented in the application itself only `${permission-name}` is required.\n\n## Example\n\n```json [ \"core:default\", \"shell:allow-open\", \"dialog:open\", { \"identifier\": \"fs:allow-write-text-file\", \"allow\": [{ \"path\": \"$HOME/test.txt\" }] } ```",
"description": "List of permissions attached to this capability.\n\nMust include the plugin name as prefix in the form of `${plugin-name}:${permission-name}`. For commands directly implemented in the application itself only `${permission-name}` is required.\n\n## Example\n\n```json [ \"core:default\", \"shell:allow-open\", \"dialog:open\", { \"identifier\": \"fs:allow-write-text-file\", \"allow\": [{ \"path\": \"$HOME/test.txt\" }] } ] ```",
"type": "array",
"items": {
"$ref": "#/definitions/PermissionEntry"

File diff suppressed because it is too large Load diff

View file

@ -1,9 +1,9 @@
use log::LevelFilter;
#[cfg(not(debug_assertions))]
const DEFAULT_LOG_LEVEL: log::LevelFilter = log::LevelFilter::Warn;
// #[cfg(not(debug_assertions))]
// const DEFAULT_LOG_LEVEL: log::LevelFilter = log::LevelFilter::Warn;
#[cfg(debug_assertions)]
// #[cfg(debug_assertions)]
const DEFAULT_LOG_LEVEL: log::LevelFilter = log::LevelFilter::Debug;
#[derive(Debug)]
@ -43,8 +43,8 @@ pub fn parse(raw: impl IntoIterator<Item = impl Into<std::ffi::OsString>>) -> Cl
data: None,
log_level: DEFAULT_LOG_LEVEL,
background: false,
with_prompts: false,
with_notifications: false,
with_prompts: true,
with_notifications: true,
};
let raw = clap_lex::RawArgs::new(raw);
@ -67,11 +67,11 @@ pub fn parse(raw: impl IntoIterator<Item = impl Into<std::ffi::OsString>>) -> Cl
Ok("background") => {
cli.background = true;
}
Ok("with_prompts") => {
cli.with_prompts = true;
Ok("no-prompts") => {
cli.with_prompts = false;
}
Ok("with_notifications") => {
cli.with_notifications = true;
Ok("no-notifications") => {
cli.with_notifications = false;
}
_ => {
// Ignore unexpected flags

View file

@ -126,15 +126,16 @@ fn main() {
let cli_args = cli::parse(std::env::args());
// TODO(vladimir): Support for other log targets?
#[cfg(target_os = "linux")]
let log_target = if let Some(data_dir) = cli_args.data {
tauri_plugin_log::Target::new(tauri_plugin_log::TargetKind::Folder {
path: Path::new(&format!("{}/logs/app2", data_dir)).into(),
file_name: None,
})
} else {
tauri_plugin_log::Target::new(tauri_plugin_log::TargetKind::Stdout)
};
let log_target = tauri_plugin_log::Target::new(tauri_plugin_log::TargetKind::Stdout);
// let log_target = if let Some(data_dir) = cli_args.data {
// tauri_plugin_log::Target::new(tauri_plugin_log::TargetKind::Folder {
// path: Path::new(&format!("{}/logs/app2", data_dir)).into(),
// file_name: None,
// })
// } else {
// };
// TODO(vladimir): Permission for logs/app2 folder are not guaranteed. Use the default location for now.
#[cfg(target_os = "windows")]

View file

@ -2,6 +2,7 @@ use crate::portapi::client::*;
use crate::portapi::message::*;
use crate::portapi::models::notification::*;
use crate::portapi::types::*;
use log::debug;
use log::error;
use serde_json::json;
use tauri::async_runtime;
@ -25,12 +26,12 @@ pub async fn notification_handler(cli: PortAPI) {
Ok(n) => {
// Skip if this one should not be shown using the system notifications
if !n.show_on_system {
return;
continue;
}
// Skip if this action has already been acted on
if n.selected_action_id.is_empty() {
return;
if !n.selected_action_id.is_empty() {
continue;
}
show_notification(&cli, key, n).await;
}

View file

@ -30,7 +30,7 @@ use crate::{
portmaster::PortmasterExt,
window::{create_main_window, may_navigate_to_ui, open_window},
};
use tauri_plugin_dialog::DialogExt;
use tauri_plugin_dialog::{DialogExt, MessageDialogButtons};
pub type AppIcon = TrayIcon<Wry>;
@ -185,7 +185,7 @@ pub fn setup_tray_menu(
app.dialog()
.message("This does not stop the Portmaster system service")
.title("Do you really want to quit the user interface?")
.buttons(tauri_plugin_dialog::MessageDialogButtons::OkCancelCustom(
.buttons(MessageDialogButtons::OkCancelCustom(
"Yes, exit".to_owned(),
"No".to_owned(),
))

View file

@ -28,12 +28,12 @@
"takesValue": true
},
{
"name": "with-notifications",
"description": "Enable experimental notifications via Tauri. Replaces the notifier app."
"name": "no-notifications",
"description": "Disable notifications via Tauri."
},
{
"name": "with-prompts",
"description": "Enable experimental prompt support via Tauri. Replaces the notifier app."
"name": "no-prompts",
"description": "Disable prompt support via Tauri."
},
]
}
@ -63,13 +63,13 @@
"/usr/lib/systemd/system/portmaster.service": "../../../packaging/linux/portmaster.service",
// Binary files
"/usr/lib/portmaster/bin-index.json": "binary/bin-index.json",
"/usr/lib/portmaster/index.json": "binary/index.json",
"/usr/lib/portmaster/portmaster-core": "binary/portmaster-core",
"/usr/lib/portmaster/portmaster.zip": "binary/portmaster.zip",
"/usr/lib/portmaster/assets.zip": "binary/assets.zip",
// Intel files
"/var/lib/portmaster/intel/intel-index.json": "intel/intel-index.json",
"/var/lib/portmaster/intel/index.json": "intel/index.json",
"/var/lib/portmaster/intel/base.dsdl": "intel/base.dsdl",
"/var/lib/portmaster/intel/geoipv4.mmdb": "intel/geoipv4.mmdb",
"/var/lib/portmaster/intel/geoipv6.mmdb": "intel/geoipv6.mmdb",
@ -94,13 +94,13 @@
"/usr/lib/systemd/system/portmaster.service": "../../../packaging/linux/portmaster.service",
// Binary files
"/usr/lib/portmaster/bin-index.json": "binary/bin-index.json",
"/usr/lib/portmaster/index.json": "binary/index.json",
"/usr/lib/portmaster/portmaster-core": "binary/portmaster-core",
"/usr/lib/portmaster/portmaster.zip": "binary/portmaster.zip",
"/usr/lib/portmaster/assets.zip": "binary/assets.zip",
// Intel files
"/var/lib/portmaster/intel/intel-index.json": "intel/intel-index.json",
"/var/lib/portmaster/intel/index.json": "intel/index.json",
"/var/lib/portmaster/intel/base.dsdl": "intel/base.dsdl",
"/var/lib/portmaster/intel/geoipv4.mmdb": "intel/geoipv4.mmdb",
"/var/lib/portmaster/intel/geoipv6.mmdb": "intel/geoipv6.mmdb",

View file

@ -12,15 +12,17 @@
<Fragment>
<Component Id="BinaryFiles" Directory="INSTALLDIR" Guid="850cdd31-424d-45f5-b8f0-95df950ebd0d">
<File Id="BinIndexJson" Source="..\..\..\..\binary\bin-index.json" />
<File Id="BinIndexJson" Source="..\..\..\..\binary\index.json" />
<File Id="PortmasterCoreExe" Source="..\..\..\..\binary\portmaster-core.exe" />
<File Id="PortmasterCoreDLL" Source="..\..\..\..\binary\portmaster-core.dll" />
<File Id="PortmasterKextSys" Source="..\..\..\..\binary\portmaster-kext.sys" />
<File Id="WebView2Loader" Source="..\..\..\..\binary\WebView2Loader.dll" />
<File Id="PortmasterZip" Source="..\..\..\..\binary\portmaster.zip" />
<File Id="AssetsZip" Source="..\..\..\..\binary\assets.zip" />
</Component>
<Component Id="IntelFiles" Directory="IntelDir" Guid="0bb439f1-2075-45b0-95bf-78ed3dffeb69">
<File Id="IntelIndexJson" Source="..\..\..\..\intel\intel-index.json" />
<File Id="IntelIndexJson" Source="..\..\..\..\intel\index.json" />
<File Id="BaseDsdl" Source="..\..\..\..\intel\base.dsdl" />
<File Id="Geoipv4Mmdb" Source="..\..\..\..\intel\geoipv4.mmdb" />
<File Id="Geoipv6Mmdb" Source="..\..\..\..\intel\geoipv6.mmdb" />

View file

@ -3,15 +3,17 @@
SetOutPath "$INSTDIR"
File "..\..\..\..\binary\bin-index.json"
File "..\..\..\..\binary\index.json"
File "..\..\..\..\binary\portmaster-core.exe"
File "..\..\..\..\binary\portmaster-kext.sys"
File "..\..\..\..\binary\portmaster-core.dll"
File "..\..\..\..\binary\WebView2Loader.dll"
File "..\..\..\..\binary\portmaster.zip"
File "..\..\..\..\binary\assets.zip"
SetOutPath "$COMMONPROGRAMDATA\Portmaster\intel"
File "..\..\..\..\intel\intel-index.json"
File "..\..\..\..\intel\index.json"
File "..\..\..\..\intel\base.dsdl"
File "..\..\..\..\intel\geoipv4.mmdb"
File "..\..\..\..\intel\geoipv6.mmdb"
@ -25,7 +27,7 @@
!macroend
!macro NSIS_HOOK_POSTINSTALL
ExecWait 'sc.exe create PortmasterCore binPath= "$INSTDIR\portmaster-core.exe" --data="$COMMONPROGRAMDATA\Portmaster\data"'
ExecWait 'sc.exe create PortmasterCore binPath= "$INSTDIR\portmaster-core.exe --log-dir=%PROGRAMDATA%\Portmaster\logs"'
!macroend
!macro NSIS_HOOK_PREUNINSTALL

View file

@ -3,7 +3,7 @@
<Fragment>
<CustomAction Id="InstallPortmasterService"
Directory="INSTALLDIR"
ExeCommand="sc.exe create PortmasterCore binPath= &quot;[INSTALLDIR]portmaster-core.exe --data [CommonAppDataFolder]Portmaster\data&quot;"
ExeCommand="sc.exe create PortmasterCore binPath= &quot;[INSTALLDIR]portmaster-core.exe --log-dir=%PROGRAMDATA%\Portmaster\logs&quot;"
Execute="commit"
Return="check"
Impersonate="no"

8
go.mod
View file

@ -31,10 +31,13 @@ require (
github.com/gorilla/websocket v1.5.3
github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/go-version v1.7.0
github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb
github.com/jackc/puddle/v2 v2.2.1
github.com/lmittmann/tint v1.0.5
github.com/maruel/panicparse/v2 v2.3.1
github.com/mat/besticon v3.12.0+incompatible
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.20
github.com/miekg/dns v1.1.62
github.com/mitchellh/copystructure v1.2.0
github.com/mitchellh/go-server-timing v1.0.1
@ -55,6 +58,7 @@ require (
github.com/tidwall/gjson v1.17.3
github.com/tidwall/sjson v1.2.5
github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26
github.com/varlink/go v0.4.0
github.com/vincent-petithory/dataurl v1.0.0
go.etcd.io/bbolt v1.3.10
golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa
@ -69,7 +73,9 @@ require (
require (
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect
github.com/aead/ecdh v0.2.0 // indirect
github.com/alessio/shellescape v1.4.2 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/danieljoos/wincred v1.2.1 // indirect
github.com/dgraph-io/ristretto v0.1.1 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
@ -85,7 +91,6 @@ require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/native v1.1.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.8 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mdlayher/netlink v1.7.2 // indirect
github.com/mdlayher/socket v0.5.1 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
@ -107,6 +112,7 @@ require (
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
github.com/zalando/go-keyring v0.2.5 // indirect
github.com/zeebo/blake3 v0.2.4 // indirect
golang.org/x/crypto v0.26.0 // indirect
golang.org/x/mod v0.20.0 // indirect

19
go.sum
View file

@ -14,6 +14,8 @@ github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 h1:5L8Mj9Co9sJVgW3TpY
github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6/go.mod h1:3HgLJ9d18kXMLQlJvIY3+FszZYMxCz8WfE2MQ7hDY0w=
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
github.com/alessio/shellescape v1.4.2 h1:MHPfaU+ddJ0/bYWpgIeUnQUqKrlJ1S7BfEYPM4uEoM0=
github.com/alessio/shellescape v1.4.2/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
@ -39,6 +41,8 @@ github.com/coreos/go-iptables v0.7.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFE
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/danieljoos/wincred v1.2.1 h1:dl9cBrupW8+r5250DYkYxocLeZ1Y4vB1kxgtjxw8GQs=
github.com/danieljoos/wincred v1.2.1/go.mod h1:uGaFL9fDn3OLTvzCGulzE+SzjEe5NGlh5FdCcyfPwps=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -129,6 +133,8 @@ github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKe
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/hcl v0.0.0-20170914154624-68e816d1c783/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb h1:PGufWXXDq9yaev6xX1YQauaO1MV90e6Mpoq1I7Lz/VM=
github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E=
github.com/inconshreveable/log15 v0.0.0-20170622235902-74a0988b5f80/go.mod h1:cOaXtrgN4ScfRrD9Bre7U1thNq5RtJ8ZoP4iXVGRj6o=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
@ -171,8 +177,11 @@ github.com/mat/besticon v3.12.0+incompatible h1:1KTD6wisfjfnX+fk9Kx/6VEZL+MAW1Lh
github.com/mat/besticon v3.12.0+incompatible/go.mod h1:mA1auQYHt6CW5e7L9HJLmqVQC8SzNk2gVwouO0AbiEU=
github.com/mattn/go-colorable v0.0.10-0.20170816031813-ad5389df28cd/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.2/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43/go.mod h1:+t7E0lkKfbBsebllff1xdTmyJt8lH37niI6kwFk9OTo=
@ -235,8 +244,6 @@ github.com/rot256/pblind v0.0.0-20240730113005-f3275049ead5 h1:R/qQ2Hw5/BgVQS87p
github.com/rot256/pblind v0.0.0-20240730113005-f3275049ead5/go.mod h1:NTdpGnZ/E2cKXTiAz824w1p6OIm0mBbXcyuiYPCi/Ps=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/safing/jess v0.3.4 h1:/p6ensqEUn2jI/z1EB9JUdwH4MJQirh/C9jEwNBzxw8=
github.com/safing/jess v0.3.4/go.mod h1:+B6UJnXVxi406Wk08SDnoC5NNBL7t3N0vZGokEbkVQI=
github.com/safing/jess v0.3.5 h1:KS5elTKfWcDUow8SUoCj5QdyyGJNoExJNySerNkbxUU=
github.com/safing/jess v0.3.5/go.mod h1:+B6UJnXVxi406Wk08SDnoC5NNBL7t3N0vZGokEbkVQI=
github.com/safing/structures v1.1.0 h1:QzHBQBjaZSLzw2f6PM4ibSmPcfBHAOB5CKJ+k4FYkhQ=
@ -269,6 +276,8 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM
github.com/spkg/zipfs v0.7.1 h1:+2X5lvNHTybnDMQZAIHgedRXZK1WXdc+94R/P5v2XWE=
github.com/spkg/zipfs v0.7.1/go.mod h1:48LW+/Rh1G7aAav1ew1PdlYn52T+LM+ARmSHfDNJvg8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@ -300,6 +309,8 @@ github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G
github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ=
github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ=
github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tzWUS3BUzXY=
github.com/varlink/go v0.4.0 h1:+/BQoUO9eJK/+MTSHwFcJch7TMsb6N6Dqp6g0qaXXRo=
github.com/varlink/go v0.4.0/go.mod h1:DKg9Y2ctoNkesREGAEak58l+jOC6JU2aqZvUYs5DynU=
github.com/vincent-petithory/dataurl v1.0.0 h1:cXw+kPto8NLuJtlMsI152irrVw9fRDX8AbShPRpg2CI=
github.com/vincent-petithory/dataurl v1.0.0/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U=
github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
@ -314,6 +325,8 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/zalando/go-keyring v0.2.5 h1:Bc2HHpjALryKD62ppdEzaFG6VxL6Bc+5v0LYpN8Lba8=
github.com/zalando/go-keyring v0.2.5/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk=
github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY=
github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI=
@ -375,6 +388,7 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -403,6 +417,7 @@ golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

View file

@ -35,7 +35,7 @@ CapabilityBoundingSet=cap_chown cap_kill cap_net_admin cap_net_bind_service cap_
StateDirectory=portmaster
# TODO(ppacher): add --disable-software-updates once it's merged and the release process changed.
WorkingDirectory=/var/lib/portmaster/data
ExecStart=/usr/lib/portmaster/portmaster-core --data /var/lib/portmaster/data -devmode -- $PORTMASTER_ARGS
ExecStart=/usr/lib/portmaster/portmaster-core --log-dir=/var/lib/portmaster/log -- $PORTMASTER_ARGS
ExecStopPost=-/usr/lib/portmaster/portmaster-core -recover-iptables
[Install]

View file

@ -19,11 +19,10 @@ if (-not (Test-Path -Path $binaryDir)) {
New-Item -ItemType Directory -Path $binaryDir > $null
}
Write-Output "Copying binary files"
Copy-Item -Force -Path "dist/binary/index.json" -Destination "$binaryDir/index.json"
Copy-Item -Force -Path "dist/binary/windows_amd64/portmaster-core.exe" -Destination "$binaryDir/portmaster-core.exe"
Copy-Item -Force -Path "dist/binary/windows_amd64/portmaster-kext.sys" -Destination "$binaryDir/portmaster-kext.sys"
Copy-Item -Force -Path "dist/download/windows_amd64/portmaster-core.exe" -Destination "$binaryDir/portmaster-core.exe"
Copy-Item -Force -Path "dist/download/windows_amd64/portmaster-kext.sys" -Destination "$binaryDir/portmaster-kext.sys"
Copy-Item -Force -Path "dist/download/windows_amd64/portmaster-kext.dll" -Destination "$binaryDir/portmaster-kext.dll"
Copy-Item -Force -Path "dist/binary/all/portmaster.zip" -Destination "$binaryDir/portmaster.zip"
Copy-Item -Force -Path "dist/binary/all/assets.zip" -Destination "$binaryDir/assets.zip"
@ -39,7 +38,7 @@ if (-not (Test-Path -Path $intelDir)) {
}
Write-Output "Copying intel files"
Copy-Item -Force -Path "dist/intel_decompressed/*" -Destination "$intelDir/"
Copy-Item -Force -Path "dist/intel/*" -Destination "$intelDir/"
Set-Location $destinationDir
@ -53,7 +52,8 @@ if (-not (Get-Command cargo -ErrorAction SilentlyContinue)) {
}
Write-Output "Downloading tauri-cli"
Invoke-WebRequest -Uri https://github.com/tauri-apps/tauri/releases/download/tauri-cli-v2.0.1/cargo-tauri-x86_64-pc-windows-msvc.zip -OutFile tauri-cli.zip
Invoke-WebRequest -Uri https://github.com/tauri-apps/tauri/releases/download/tauri-cli-v2.1.0/cargo-tauri-x86_64-pc-windows-msvc.zip -OutFile tauri-cli.zip
Expand-Archive -Force tauri-cli.zip
./tauri-cli/cargo-tauri.exe bundle

View file

@ -5,6 +5,7 @@ import (
"time"
"github.com/safing/portmaster/base/config"
"github.com/safing/portmaster/service/core"
"github.com/safing/portmaster/service/intel/geoip"
"github.com/safing/portmaster/service/netenv"
"github.com/safing/portmaster/spn/access"
@ -17,19 +18,18 @@ var portmasterStarted = time.Now()
func collectData() interface{} {
data := make(map[string]interface{})
// TODO(vladimir)
// Get data about versions.
// versions := updates.GetSimpleVersions()
// data["Updates"] = versions
// data["Version"] = versions.Build.Version
// numericVersion, err := MakeNumericVersion(versions.Build.Version)
// if err != nil {
// data["NumericVersion"] = &DataError{
// Error: err,
// }
// } else {
// data["NumericVersion"] = numericVersion
// }
versions := core.GetSimpleVersions()
data["Updates"] = versions
data["Version"] = versions.Build.Version
numericVersion, err := MakeNumericVersion(versions.Build.Version)
if err != nil {
data["NumericVersion"] = &DataError{
Error: err,
}
} else {
data["NumericVersion"] = numericVersion
}
// Get data about install.
installInfo, err := GetInstallInfo()

View file

@ -43,10 +43,6 @@ var (
startOnce sync.Once
)
func init() {
// module = modules.Register("broadcasts", prep, start, nil, "updates", "netenv", "notifications")
}
func prep() error {
// Register API endpoints.
if err := registerAPIEndpoints(); err != nil {

View file

@ -21,7 +21,7 @@ import (
)
const (
broadcastsResourcePath = "intel/portmaster/notifications.yaml"
broadcastsResourceName = "notifications.yaml"
broadcastNotificationIDPrefix = "broadcasts:"
@ -67,7 +67,7 @@ type BroadcastNotification struct {
func broadcastNotify(ctx *mgr.WorkerCtx) error {
// Get broadcast notifications file, load it from disk and parse it.
broadcastsResource, err := module.instance.IntelUpdates().GetFile(broadcastsResourcePath)
broadcastsResource, err := module.instance.IntelUpdates().GetFile(broadcastsResourceName)
if err != nil {
return fmt.Errorf("failed to get broadcast notifications update: %w", err)
}

View file

@ -3,6 +3,7 @@ package compat
import (
"net"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/network/packet"
"github.com/safing/portmaster/service/process"
)
@ -31,10 +32,16 @@ func SubmitDNSCheckDomain(subdomain string) (respondWith net.IP) {
// ReportSecureDNSBypassIssue reports a DNS bypassing issue for the given process.
func ReportSecureDNSBypassIssue(p *process.Process) {
secureDNSBypassIssue.notify(p)
module.mgr.Go("report secure dns bypass issue", func(w *mgr.WorkerCtx) error {
secureDNSBypassIssue.notify(p)
return nil
})
}
// ReportMultiPeerUDPTunnelIssue reports a multi-peer UDP tunnel for the given process.
func ReportMultiPeerUDPTunnelIssue(p *process.Process) {
multiPeerUDPTunnelIssue.notify(p)
module.mgr.Go("report multi-peer udp tunnel issue", func(w *mgr.WorkerCtx) error {
multiPeerUDPTunnelIssue.notify(p)
return nil
})
}

View file

@ -181,4 +181,5 @@ func New(instance instance) (*Compat, error) {
type instance interface {
NetEnv() *netenv.NetEnv
Resolver() *resolver.ResolverModule
}

View file

@ -158,6 +158,12 @@ func selfcheck(ctx context.Context) (issue *systemIssue, err error) {
// Step 3: Have the nameserver respond with random data in the answer section.
// Check if the resolver is enabled
if module.instance.Resolver().IsDisabled() {
// There is no control over the response, there is nothing more that can be checked.
return nil, nil
}
// Wait for the reply from the resolver.
select {
case err := <-dnsCheckLookupError:

View file

@ -9,6 +9,8 @@ import (
"github.com/safing/jess"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/service/configure"
"github.com/safing/portmaster/service/updates"
)
type ServiceConfig struct {
@ -76,11 +78,10 @@ func (sc *ServiceConfig) Init() error {
// Apply defaults for required fields.
if len(sc.BinariesIndexURLs) == 0 {
// FIXME: Select based on setting.
sc.BinariesIndexURLs = DefaultStableBinaryIndexURLs
sc.BinariesIndexURLs = configure.DefaultStableBinaryIndexURLs
}
if len(sc.IntelIndexURLs) == 0 {
sc.IntelIndexURLs = DefaultIntelIndexURLs
sc.IntelIndexURLs = configure.DefaultIntelIndexURLs
}
// Check log level.
@ -109,3 +110,71 @@ func getCurrentBinaryFolder() (string, error) {
return installDir, nil
}
func MakeUpdateConfigs(svcCfg *ServiceConfig) (binaryUpdateConfig, intelUpdateConfig *updates.Config, err error) {
switch runtime.GOOS {
case "windows":
binaryUpdateConfig = &updates.Config{
Name: "binaries",
Directory: svcCfg.BinDir,
DownloadDirectory: filepath.Join(svcCfg.DataDir, "download_binaries"),
PurgeDirectory: filepath.Join(svcCfg.BinDir, "upgrade_obsolete_binaries"),
Ignore: []string{"databases", "intel", "config.json"},
IndexURLs: svcCfg.BinariesIndexURLs, // May be changed by config during instance startup.
IndexFile: "index.json",
Verify: svcCfg.VerifyBinaryUpdates,
AutoCheck: true, // May be changed by config during instance startup.
AutoDownload: false,
AutoApply: false,
NeedsRestart: true,
Notify: true,
}
intelUpdateConfig = &updates.Config{
Name: "intel",
Directory: filepath.Join(svcCfg.DataDir, "intel"),
DownloadDirectory: filepath.Join(svcCfg.DataDir, "download_intel"),
PurgeDirectory: filepath.Join(svcCfg.DataDir, "upgrade_obsolete_intel"),
IndexURLs: svcCfg.IntelIndexURLs,
IndexFile: "index.json",
Verify: svcCfg.VerifyIntelUpdates,
AutoCheck: true, // May be changed by config during instance startup.
AutoDownload: true,
AutoApply: true,
NeedsRestart: false,
Notify: false,
}
case "linux":
binaryUpdateConfig = &updates.Config{
Name: "binaries",
Directory: svcCfg.BinDir,
DownloadDirectory: filepath.Join(svcCfg.DataDir, "download_binaries"),
PurgeDirectory: filepath.Join(svcCfg.DataDir, "upgrade_obsolete_binaries"),
Ignore: []string{"databases", "intel", "config.json"},
IndexURLs: svcCfg.BinariesIndexURLs, // May be changed by config during instance startup.
IndexFile: "index.json",
Verify: svcCfg.VerifyBinaryUpdates,
AutoCheck: true, // May be changed by config during instance startup.
AutoDownload: false,
AutoApply: false,
NeedsRestart: true,
Notify: true,
}
intelUpdateConfig = &updates.Config{
Name: "intel",
Directory: filepath.Join(svcCfg.DataDir, "intel"),
DownloadDirectory: filepath.Join(svcCfg.DataDir, "download_intel"),
PurgeDirectory: filepath.Join(svcCfg.DataDir, "upgrade_obsolete_intel"),
IndexURLs: svcCfg.IntelIndexURLs,
IndexFile: "index.json",
Verify: svcCfg.VerifyIntelUpdates,
AutoCheck: true, // May be changed by config during instance startup.
AutoDownload: true,
AutoApply: true,
NeedsRestart: false,
Notify: false,
}
}
return
}

View file

@ -0,0 +1,65 @@
package configure
import (
"github.com/safing/jess"
)
var (
DefaultStableBinaryIndexURLs = []string{
"https://updates.safing.io/stable.v3.json",
}
DefaultBetaBinaryIndexURLs = []string{
"https://updates.safing.io/beta.v3.json",
}
DefaultStagingBinaryIndexURLs = []string{
"https://updates.safing.io/staging.v3.json",
}
DefaultSupportBinaryIndexURLs = []string{
"https://updates.safing.io/support.v3.json",
}
DefaultIntelIndexURLs = []string{
"https://updates.safing.io/intel.v3.json",
}
// BinarySigningKeys holds the signing keys in text format.
BinarySigningKeys = []string{
// Safing Code Signing Key #1
"recipient:public-ed25519-key:safing-code-signing-key-1:92bgBLneQUWrhYLPpBDjqHbpFPuNVCPAaivQ951A4aq72HcTiw7R1QmPJwFM1mdePAvEVDjkeb8S4fp2pmRCsRa8HrCvWQEjd88rfZ6TznJMfY4g7P8ioGFjfpyx2ZJ8WCZJG5Qt4Z9nkabhxo2Nbi3iywBTYDLSbP5CXqi7jryW7BufWWuaRVufFFzhwUC2ryWFWMdkUmsAZcvXwde4KLN9FrkWAy61fGaJ8GCwGnGCSitANnU2cQrsGBXZzxmzxwrYD",
// Safing Code Signing Key #2
"recipient:public-ed25519-key:safing-code-signing-key-2:92bgBLneQUWrhYLPpBDjqHbPC2d1o5JMyZFdavWBNVtdvbPfzDewLW95ScXfYPHd3QvWHSWCtB4xpthaYWxSkK1kYiGp68DPa2HaU8yQ5dZhaAUuV4Kzv42pJcWkCeVnBYqgGBXobuz52rFqhDJy3rz7soXEmYhJEJWwLwMeioK3VzN3QmGSYXXjosHMMNC76rjufSoLNtUQUWZDSnHmqbuxbKMCCsjFXUGGhtZVyb7bnu7QLTLk6SKHBJDMB6zdL9sw3",
}
// BinarySigningTrustStore is an in-memory trust store with the signing keys.
BinarySigningTrustStore = jess.NewMemTrustStore()
)
func init() {
for _, signingKey := range BinarySigningKeys {
rcpt, err := jess.RecipientFromTextFormat(signingKey)
if err != nil {
panic(err)
}
err = BinarySigningTrustStore.StoreSignet(rcpt)
if err != nil {
panic(err)
}
}
}
// GetBinaryUpdateURLs returns the correct binary update URLs for the given release channel.
// Silently falls back to stable if release channel is invalid.
func GetBinaryUpdateURLs(releaseChannel string) []string {
switch releaseChannel {
case "stable":
return DefaultStableBinaryIndexURLs
case "beta":
return DefaultBetaBinaryIndexURLs
case "staging":
return DefaultStagingBinaryIndexURLs
case "support":
return DefaultSupportBinaryIndexURLs
default:
return DefaultStableBinaryIndexURLs
}
}

View file

@ -1,19 +1,27 @@
package core
import (
"bytes"
"context"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
"time"
"github.com/ghodss/yaml"
"github.com/safing/portmaster/base/api"
"github.com/safing/portmaster/base/config"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/notifications"
"github.com/safing/portmaster/base/rng"
"github.com/safing/portmaster/base/utils"
"github.com/safing/portmaster/base/utils/debug"
"github.com/safing/portmaster/service/compat"
"github.com/safing/portmaster/service/process"
@ -149,6 +157,17 @@ func registerAPIEndpoints() error {
return err
}
if err := api.RegisterEndpoint(api.Endpoint{
Name: "Get Resource",
Description: "Returns the requested resource from the udpate system",
Path: `updates/get/?{artifact_path:[A-Za-z0-9/\.\-_]{1,255}}/{artifact_name:[A-Za-z0-9\.\-_]{1,255}}`,
Read: api.PermitUser,
ReadMethod: http.MethodGet,
HandlerFunc: getUpdateResource,
}); err != nil {
return err
}
return nil
}
@ -170,6 +189,113 @@ func restart(_ *api.Request) (msg string, err error) {
return "restart initiated", nil
}
func getUpdateResource(w http.ResponseWriter, r *http.Request) {
// Get identifier from URL.
var identifier string
if ar := api.GetAPIRequest(r); ar != nil {
identifier = ar.URLVars["artifact_name"]
}
if identifier == "" {
http.Error(w, "no resource specified", http.StatusBadRequest)
return
}
// Get resource.
artifact, err := module.instance.BinaryUpdates().GetFile(identifier)
if err != nil {
intelArtifact, intelErr := module.instance.IntelUpdates().GetFile(identifier)
if intelErr == nil {
artifact = intelArtifact
} else {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
}
// Open file for reading.
file, err := os.Open(artifact.Path())
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer file.Close() //nolint:errcheck,gosec
// Assign file to reader
var reader io.Reader = file
// Add version and hash to header.
if artifact.Version != "" {
w.Header().Set("Resource-Version", artifact.Version)
}
if artifact.SHA256 != "" {
w.Header().Set("Resource-SHA256", artifact.SHA256)
}
// Set Content-Type.
contentType, _ := utils.MimeTypeByExtension(filepath.Ext(artifact.Path()))
w.Header().Set("Content-Type", contentType)
// Check if the content type may be returned.
accept := r.Header.Get("Accept")
if accept != "" {
mimeTypes := strings.Split(accept, ",")
// First, clean mime types.
for i, mimeType := range mimeTypes {
mimeType = strings.TrimSpace(mimeType)
mimeType, _, _ = strings.Cut(mimeType, ";")
mimeTypes[i] = mimeType
}
// Second, check if we may return anything.
var acceptsAny bool
for _, mimeType := range mimeTypes {
switch mimeType {
case "*", "*/*":
acceptsAny = true
}
}
// Third, check if we can convert.
if !acceptsAny {
var converted bool
sourceType, _, _ := strings.Cut(contentType, ";")
findConvertiblePair:
for _, mimeType := range mimeTypes {
switch {
case sourceType == "application/yaml" && mimeType == "application/json":
yamlData, err := io.ReadAll(reader)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
jsonData, err := yaml.YAMLToJSON(yamlData)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
reader = bytes.NewReader(jsonData)
converted = true
break findConvertiblePair
}
}
// If we could not convert to acceptable format, return an error.
if !converted {
http.Error(w, "conversion to requested format not supported", http.StatusNotAcceptable)
return
}
}
}
// Write file.
w.WriteHeader(http.StatusOK)
if r.Method != http.MethodHead {
_, err = io.Copy(w, reader)
if err != nil {
log.Errorf("updates: failed to serve resource file: %s", err)
return
}
}
}
// debugInfo returns the debugging information for support requests.
func debugInfo(ar *api.Request) (data []byte, err error) {
// Create debug information helper.
@ -192,7 +318,7 @@ func debugInfo(ar *api.Request) (data []byte, err error) {
config.AddToDebugInfo(di)
// Detailed information.
// TODO(vladimir): updates.AddToDebugInfo(di)
AddVersionsToDebugInfo(di)
compat.AddToDebugInfo(di)
module.instance.AddWorkerInfoToDebugInfo(di)
di.AddGoroutineStack()

View file

@ -1,46 +0,0 @@
package base
import (
"errors"
"flag"
"fmt"
"github.com/safing/portmaster/base/api"
"github.com/safing/portmaster/base/info"
"github.com/safing/portmaster/service/mgr"
)
// Default Values (changeable for testing).
var (
DefaultAPIListenAddress = "127.0.0.1:817"
showVersion bool
)
func init() {
flag.BoolVar(&showVersion, "version", false, "show version and exit")
}
func prep(instance instance) error {
// check if meta info is ok
err := info.CheckVersion()
if err != nil {
return errors.New("compile error: please compile using the provided build script")
}
// print version
if showVersion {
instance.SetCmdLineOperation(printVersion)
return mgr.ErrExecuteCmdLineOp
}
// set api listen address
api.SetDefaultAPIListenAddress(DefaultAPIListenAddress)
return nil
}
func printVersion() error {
fmt.Println(info.FullVersion())
return nil
}

View file

@ -4,9 +4,13 @@ import (
"errors"
"sync/atomic"
"github.com/safing/portmaster/base/api"
"github.com/safing/portmaster/service/mgr"
)
// DefaultAPIListenAddress is the default listen address for the API.
var DefaultAPIListenAddress = "127.0.0.1:817"
// Base is the base module.
type Base struct {
mgr *mgr.Manager
@ -21,7 +25,7 @@ func (b *Base) Manager() *mgr.Manager {
// Start starts the module.
func (b *Base) Start() error {
startProfiling()
registerLogCleaner()
// registerLogCleaner()
return nil
}
@ -47,9 +51,9 @@ func New(instance instance) (*Base, error) {
instance: instance,
}
if err := prep(instance); err != nil {
return nil, err
}
// Set api listen address.
api.SetDefaultAPIListenAddress(DefaultAPIListenAddress)
if err := registerDatabases(); err != nil {
return nil, err
}

View file

@ -6,10 +6,11 @@ import (
"fmt"
"sync/atomic"
"github.com/safing/portmaster/base/config"
"github.com/safing/portmaster/base/database"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/metrics"
"github.com/safing/portmaster/base/utils/debug"
_ "github.com/safing/portmaster/service/broadcasts"
"github.com/safing/portmaster/service/mgr"
_ "github.com/safing/portmaster/service/netenv"
_ "github.com/safing/portmaster/service/netquery"
@ -19,6 +20,11 @@ import (
"github.com/safing/portmaster/service/updates"
)
var db = database.NewInterface(&database.Options{
Local: true,
Internal: true,
})
// Core is the core service module.
type Core struct {
m *mgr.Manager
@ -56,8 +62,10 @@ func init() {
func prep() error {
// init config
err := registerConfig()
if err != nil {
if err := registerConfig(); err != nil {
return err
}
if err := registerUpdateConfig(); err != nil {
return err
}
@ -77,6 +85,10 @@ func start() error {
return fmt.Errorf("failed to start plattform-specific components: %w", err)
}
// Setup update system.
initUpdateConfig()
initVersionExport()
// Enable persistent metrics.
if err := metrics.EnableMetricPersistence("core:metrics/storage"); err != nil {
log.Warningf("core: failed to enable persisted metrics: %s", err)
@ -116,6 +128,7 @@ type instance interface {
Shutdown()
Restart()
AddWorkerInfoToDebugInfo(di *debug.Info)
Config() *config.Config
BinaryUpdates() *updates.Updater
IntelUpdates() *updates.Updater
}

View file

@ -0,0 +1,134 @@
package core
import (
"github.com/safing/portmaster/base/config"
"github.com/safing/portmaster/service/configure"
"github.com/safing/portmaster/service/mgr"
)
// Release Channel Configuration Keys.
const (
ReleaseChannelKey = "core/releaseChannel"
ReleaseChannelJSONKey = "core.releaseChannel"
)
// Release Channels.
const (
ReleaseChannelStable = "stable"
ReleaseChannelBeta = "beta"
ReleaseChannelStaging = "staging"
ReleaseChannelSupport = "support"
)
const (
enableSoftwareUpdatesKey = "core/automaticUpdates"
enableIntelUpdatesKey = "core/automaticIntelUpdates"
)
var (
releaseChannel config.StringOption
enableSoftwareUpdates config.BoolOption
enableIntelUpdates config.BoolOption
initialReleaseChannel string
)
func registerUpdateConfig() error {
err := config.Register(&config.Option{
Name: "Release Channel",
Key: ReleaseChannelKey,
Description: `Use "Stable" for the best experience. The "Beta" channel will have the newest features and fixes, but may also break and cause interruption. Use others only temporarily and when instructed.`,
OptType: config.OptTypeString,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelStable,
RequiresRestart: true,
DefaultValue: ReleaseChannelStable,
PossibleValues: []config.PossibleValue{
{
Name: "Stable",
Description: "Production releases.",
Value: ReleaseChannelStable,
},
{
Name: "Beta",
Description: "Production releases for testing new features that may break and cause interruption.",
Value: ReleaseChannelBeta,
},
{
Name: "Support",
Description: "Support releases or version changes for troubleshooting. Only use temporarily and when instructed.",
Value: ReleaseChannelSupport,
},
{
Name: "Staging",
Description: "Dangerous development releases for testing random things and experimenting. Only use temporarily and when instructed.",
Value: ReleaseChannelStaging,
},
},
Annotations: config.Annotations{
config.DisplayOrderAnnotation: -4,
config.DisplayHintAnnotation: config.DisplayHintOneOf,
config.CategoryAnnotation: "Updates",
},
})
if err != nil {
return err
}
err = config.Register(&config.Option{
Name: "Automatic Software Updates",
Key: enableSoftwareUpdatesKey,
Description: "Automatically check for and download software updates. This does not include intelligence data updates.",
OptType: config.OptTypeBool,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelStable,
RequiresRestart: false,
DefaultValue: true,
Annotations: config.Annotations{
config.DisplayOrderAnnotation: -12,
config.CategoryAnnotation: "Updates",
},
})
if err != nil {
return err
}
err = config.Register(&config.Option{
Name: "Automatic Intelligence Data Updates",
Key: enableIntelUpdatesKey,
Description: "Automatically check for and download intelligence data updates. This includes filter lists, geo-ip data, and more. Does not include software updates.",
OptType: config.OptTypeBool,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelStable,
RequiresRestart: false,
DefaultValue: true,
Annotations: config.Annotations{
config.DisplayOrderAnnotation: -11,
config.CategoryAnnotation: "Updates",
},
})
if err != nil {
return err
}
return nil
}
func initUpdateConfig() {
releaseChannel = config.Concurrent.GetAsString(ReleaseChannelKey, ReleaseChannelStable)
enableSoftwareUpdates = config.Concurrent.GetAsBool(enableSoftwareUpdatesKey, true)
enableIntelUpdates = config.Concurrent.GetAsBool(enableIntelUpdatesKey, true)
initialReleaseChannel = releaseChannel()
module.instance.Config().EventConfigChange.AddCallback("configure updates", func(wc *mgr.WorkerCtx, s struct{}) (cancel bool, err error) {
configureUpdates()
return false, nil
})
configureUpdates()
}
func configureUpdates() {
module.instance.BinaryUpdates().Configure(enableSoftwareUpdates(), configure.GetBinaryUpdateURLs(releaseChannel()))
module.instance.IntelUpdates().Configure(enableIntelUpdates(), configure.DefaultIntelIndexURLs)
}

View file

@ -0,0 +1,176 @@
package core
import (
"bytes"
"fmt"
"sync"
"text/tabwriter"
"github.com/safing/portmaster/base/database/record"
"github.com/safing/portmaster/base/info"
"github.com/safing/portmaster/base/utils/debug"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates"
)
const (
// versionsDBKey is the database key for update version information.
versionsDBKey = "core:status/versions"
// versionsDBKey is the database key for simple update version information.
simpleVersionsDBKey = "core:status/simple-versions"
)
// Versions holds update versions and status information.
type Versions struct {
record.Base
sync.Mutex
Core *info.Info
Resources map[string]*updates.Artifact
Channel string
Beta bool
Staging bool
}
// SimpleVersions holds simplified update versions and status information.
type SimpleVersions struct {
record.Base
sync.Mutex
Build *info.Info
Resources map[string]*SimplifiedResourceVersion
Channel string
}
// SimplifiedResourceVersion holds version information about one resource.
type SimplifiedResourceVersion struct {
Version string
}
// GetVersions returns the update versions and status information.
// Resources must be locked when accessed.
func GetVersions() *Versions {
// Get all artifacts.
resources := make(map[string]*updates.Artifact)
if artifacts, err := module.instance.BinaryUpdates().GetFiles(); err == nil {
for _, artifact := range artifacts {
resources[artifact.Filename] = artifact
}
}
if artifacts, err := module.instance.IntelUpdates().GetFiles(); err == nil {
for _, artifact := range artifacts {
resources[artifact.Filename] = artifact
}
}
return &Versions{
Core: info.GetInfo(),
Resources: resources,
Channel: initialReleaseChannel,
Beta: initialReleaseChannel == ReleaseChannelBeta,
Staging: initialReleaseChannel == ReleaseChannelStaging,
}
}
// GetSimpleVersions returns the simplified update versions and status information.
func GetSimpleVersions() *SimpleVersions {
// Get all artifacts, simply map.
resources := make(map[string]*SimplifiedResourceVersion)
if artifacts, err := module.instance.BinaryUpdates().GetFiles(); err == nil {
for _, artifact := range artifacts {
resources[artifact.Filename] = &SimplifiedResourceVersion{
Version: artifact.Version,
}
}
}
if artifacts, err := module.instance.IntelUpdates().GetFiles(); err == nil {
for _, artifact := range artifacts {
resources[artifact.Filename] = &SimplifiedResourceVersion{
Version: artifact.Version,
}
}
}
// Fill base info.
return &SimpleVersions{
Build: info.GetInfo(),
Resources: resources,
Channel: initialReleaseChannel,
}
}
func initVersionExport() {
module.instance.BinaryUpdates().EventResourcesUpdated.AddCallback("export version status", export)
module.instance.IntelUpdates().EventResourcesUpdated.AddCallback("export version status", export)
_, _ = export(nil, struct{}{})
}
func (v *Versions) save() error {
if !v.KeyIsSet() {
v.SetKey(versionsDBKey)
}
return db.Put(v)
}
func (v *SimpleVersions) save() error {
if !v.KeyIsSet() {
v.SetKey(simpleVersionsDBKey)
}
return db.Put(v)
}
// export is an event hook.
func export(_ *mgr.WorkerCtx, _ struct{}) (cancel bool, err error) {
// Export versions.
if err := GetVersions().save(); err != nil {
return false, err
}
if err := GetSimpleVersions().save(); err != nil {
return false, err
}
return false, nil
}
// AddVersionsToDebugInfo adds the update system status to the given debug.Info.
func AddVersionsToDebugInfo(di *debug.Info) {
overviewBuf := bytes.NewBuffer(nil)
tableBuf := bytes.NewBuffer(nil)
tabWriter := tabwriter.NewWriter(tableBuf, 8, 4, 3, ' ', 0)
fmt.Fprint(tabWriter, "\nFile\tVersion\tIndex\tSHA256\n")
// Collect data for debug info.
var cnt int
if index, err := module.instance.BinaryUpdates().GetIndex(); err == nil {
fmt.Fprintf(overviewBuf, "Binaries Index: v%s from %s\n", index.Version, index.Published)
for _, artifact := range index.Artifacts {
fmt.Fprintf(tabWriter, "\n%s\t%s\t%s\t%s", artifact.Filename, vStr(artifact.Version), "binaries", artifact.SHA256)
cnt++
}
}
if index, err := module.instance.IntelUpdates().GetIndex(); err == nil {
fmt.Fprintf(overviewBuf, "Intel Index: v%s from %s\n", index.Version, index.Published)
for _, artifact := range index.Artifacts {
fmt.Fprintf(tabWriter, "\n%s\t%s\t%s\t%s", artifact.Filename, vStr(artifact.Version), "intel", artifact.SHA256)
cnt++
}
}
_ = tabWriter.Flush()
// Add section.
di.AddSection(
fmt.Sprintf("Updates: %s (%d)", initialReleaseChannel, cnt),
debug.UseCodeSection,
overviewBuf.String(),
tableBuf.String(),
)
}
func vStr(v string) string {
if v != "" {
return v
}
return "unknown"
}

View file

@ -43,8 +43,24 @@ func PreventBypassing(ctx context.Context, conn *network.Connection) (endpoints.
return endpoints.NoMatch, "", nil
}
// If Portmaster resolver is disabled allow requests going to system dns resolver.
// And allow all connections out of the System Resolver.
if module.instance.Resolver().IsDisabled() {
// TODO(vladimir): Is there a more specific check that can be done?
if conn.Process().IsSystemResolver() {
return endpoints.NoMatch, "", nil
}
if conn.Entity.Port == 53 && conn.Entity.IPScope.IsLocalhost() {
return endpoints.NoMatch, "", nil
}
}
// Block bypass attempts using an (encrypted) DNS server.
switch {
case looksLikeOutgoingDNSRequest(conn) && module.instance.Resolver().IsDisabled():
// Allow. Packet will be analyzed and blocked if its not a dns request, before sent.
conn.Inspecting = true
return endpoints.NoMatch, "", nil
case conn.Entity.Port == 53:
return endpoints.Denied,
"blocked DNS query, manual dns setup required",
@ -62,3 +78,17 @@ func PreventBypassing(ctx context.Context, conn *network.Connection) (endpoints.
return endpoints.NoMatch, "", nil
}
func looksLikeOutgoingDNSRequest(conn *network.Connection) bool {
// Outbound on remote port 53, UDP.
if conn.Inbound {
return false
}
if conn.Entity.Port != 53 {
return false
}
if conn.IPProtocol != packet.UDP {
return false
}
return true
}

View file

@ -287,6 +287,30 @@ func UpdateIPsAndCNAMEs(q *resolver.Query, rrCache *resolver.RRCache, conn *netw
}
}
// Create new record for this IP.
record := resolver.ResolvedDomain{
Domain: q.FQDN,
Resolver: rrCache.Resolver,
DNSRequestContext: rrCache.ToDNSRequestContext(),
Expires: rrCache.Expires,
}
// Process CNAMEs
record.AddCNAMEs(cnames)
// Link connection with cnames.
if conn.Type == network.DNSRequest {
conn.Entity.CNAME = record.CNAMEs
}
SaveIPsInCache(ips, profileID, record)
}
// formatRR is a friendlier alternative to miekg/dns.RR.String().
func formatRR(rr dns.RR) string {
return strings.ReplaceAll(rr.String(), "\t", " ")
}
// SaveIPsInCache saves the provided ips in the dns cashe assoseted with the record Domain and CNAMEs.
func SaveIPsInCache(ips []net.IP, profileID string, record resolver.ResolvedDomain) {
// Package IPs and CNAMEs into IPInfo structs.
for _, ip := range ips {
// Never save domain attributions for localhost IPs.
@ -294,31 +318,6 @@ func UpdateIPsAndCNAMEs(q *resolver.Query, rrCache *resolver.RRCache, conn *netw
continue
}
// Create new record for this IP.
record := resolver.ResolvedDomain{
Domain: q.FQDN,
Resolver: rrCache.Resolver,
DNSRequestContext: rrCache.ToDNSRequestContext(),
Expires: rrCache.Expires,
}
// Resolve all CNAMEs in the correct order and add the to the record.
domain := q.FQDN
for {
nextDomain, isCNAME := cnames[domain]
if !isCNAME {
break
}
record.CNAMEs = append(record.CNAMEs, nextDomain)
domain = nextDomain
}
// Update the entity to include the CNAMEs of the query response.
conn.Entity.CNAME = record.CNAMEs
// Check if there is an existing record for this DNS response.
// Else create a new one.
ipString := ip.String()
info, err := resolver.GetIPInfo(profileID, ipString)
if err != nil {
@ -341,8 +340,3 @@ func UpdateIPsAndCNAMEs(q *resolver.Query, rrCache *resolver.RRCache, conn *netw
}
}
}
// formatRR is a friendlier alternative to miekg/dns.RR.String().
func formatRR(rr dns.RR) string {
return strings.ReplaceAll(rr.String(), "\t", " ")
}

View file

@ -0,0 +1,109 @@
//go:build windows
// +build windows
package dnsmonitor
import (
"fmt"
"runtime"
"sync"
"sync/atomic"
"github.com/safing/portmaster/service/integration"
"golang.org/x/sys/windows"
)
type ETWSession struct {
i *integration.ETWFunctions
shutdownGuard atomic.Bool
shutdownMutex sync.Mutex
state uintptr
}
// NewSession creates new ETW event listener and initializes it. This is a low level interface, make sure to call DestroySession when you are done using it.
func NewSession(etwInterface *integration.ETWFunctions, callback func(domain string, pid uint32, result string)) (*ETWSession, error) {
if etwInterface == nil {
return nil, fmt.Errorf("etw interface was nil")
}
etwSession := &ETWSession{
i: etwInterface,
}
// Make sure session from previous instances are not running.
_ = etwSession.i.StopOldSession()
// Initialize notification activated callback
win32Callback := windows.NewCallback(func(domain *uint16, pid uint32, result *uint16) uintptr {
callback(windows.UTF16PtrToString(domain), pid, windows.UTF16PtrToString(result))
return 0
})
// The function only allocates memory it will not fail.
etwSession.state = etwSession.i.CreateState(win32Callback)
// Make sure DestroySession is called even if caller forgets to call it.
runtime.SetFinalizer(etwSession, func(s *ETWSession) {
_ = s.i.DestroySession(s.state)
})
// Initialize session.
err := etwSession.i.InitializeSession(etwSession.state)
if err != nil {
return nil, fmt.Errorf("failed to initialize session: %q", err)
}
return etwSession, nil
}
// StartTrace starts the tracing session of dns events. This is a blocking call. It will not return until the trace is stopped.
func (l *ETWSession) StartTrace() error {
return l.i.StartTrace(l.state)
}
// IsRunning returns true if DestroySession has NOT been called.
func (l *ETWSession) IsRunning() bool {
return !l.shutdownGuard.Load()
}
// FlushTrace flushes the trace buffer.
func (l *ETWSession) FlushTrace() error {
if l.i == nil {
return fmt.Errorf("session not initialized")
}
l.shutdownMutex.Lock()
defer l.shutdownMutex.Unlock()
// Make sure session is still running.
if l.shutdownGuard.Load() {
return nil
}
return l.i.FlushTrace(l.state)
}
// StopTrace stops the trace. This will cause StartTrace to return.
func (l *ETWSession) StopTrace() error {
return l.i.StopTrace(l.state)
}
// DestroySession closes the session and frees the allocated memory. Listener cannot be used after this function is called.
func (l *ETWSession) DestroySession() error {
if l.i == nil {
return fmt.Errorf("session not initialized")
}
l.shutdownMutex.Lock()
defer l.shutdownMutex.Unlock()
if l.shutdownGuard.Swap(true) {
return nil
}
err := l.i.DestroySession(l.state)
if err != nil {
return err
}
l.state = 0
return nil
}

View file

@ -0,0 +1,19 @@
//go:build !linux && !windows
// +build !linux,!windows
package dnsmonitor
type Listener struct{}
func newListener(_ *DNSMonitor) (*Listener, error) {
return &Listener{}, nil
}
func (l *Listener) flush() error {
// Nothing to flush
return nil
}
func (l *Listener) stop() error {
return nil
}

View file

@ -0,0 +1,145 @@
//go:build linux
// +build linux
package dnsmonitor
import (
"errors"
"fmt"
"net"
"os"
"github.com/miekg/dns"
"github.com/varlink/go/varlink"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/resolver"
)
type Listener struct {
varlinkConn *varlink.Connection
}
func newListener(module *DNSMonitor) (*Listener, error) {
// Set source of the resolver.
ResolverInfo.Source = resolver.ServerSourceSystemd
// Check if the system has systemd-resolver.
_, err := os.Stat("/run/systemd/resolve/io.systemd.Resolve.Monitor")
if err != nil {
return nil, fmt.Errorf("system does not support systemd resolver monitor")
}
listener := &Listener{}
restartAttempts := 0
module.mgr.Go("systemd-resolver-event-listener", func(w *mgr.WorkerCtx) error {
// Abort initialization if the connection failed after too many tries.
if restartAttempts > 10 {
return nil
}
restartAttempts += 1
// Initialize varlink connection
varlinkConn, err := varlink.NewConnection(module.mgr.Ctx(), "unix:/run/systemd/resolve/io.systemd.Resolve.Monitor")
if err != nil {
return fmt.Errorf("failed to connect to systemd-resolver varlink service: %w", err)
}
defer func() {
if varlinkConn != nil {
err = varlinkConn.Close()
if err != nil {
log.Errorf("dnsmonitor: failed to close varlink connection: %s", err)
}
}
}()
listener.varlinkConn = varlinkConn
// Subscribe to the dns query events
receive, err := listener.varlinkConn.Send(w.Ctx(), "io.systemd.Resolve.Monitor.SubscribeQueryResults", nil, varlink.More)
if err != nil {
var varlinkErr *varlink.Error
if errors.As(err, &varlinkErr) {
return fmt.Errorf("failed to issue Varlink call: %+v", varlinkErr.Parameters)
} else {
return fmt.Errorf("failed to issue Varlink call: %w", err)
}
}
for {
queryResult := QueryResult{}
// Receive the next event from the resolver.
flags, err := receive(w.Ctx(), &queryResult)
if err != nil {
var varlinkErr *varlink.Error
if errors.As(err, &varlinkErr) {
return fmt.Errorf("failed to receive Varlink reply: %+v", varlinkErr.Parameters)
} else {
return fmt.Errorf("failed to receive Varlink reply: %w", err)
}
}
// Check if the reply indicates the end of the stream
if flags&varlink.Continues == 0 {
break
}
// Ignore if there is no question.
if queryResult.Question == nil || len(*queryResult.Question) == 0 {
continue
}
// Protmaster self check
domain := (*queryResult.Question)[0].Name
if processIfSelfCheckDomain(dns.Fqdn(domain)) {
// Not need to process result.
continue
}
if queryResult.Rcode != nil {
continue // Ignore DNS errors
}
listener.processAnswer(domain, &queryResult)
}
return nil
})
return listener, nil
}
func (l *Listener) flush() error {
// Nothing to flush
return nil
}
func (l *Listener) stop() error {
return nil
}
func (l *Listener) processAnswer(domain string, queryResult *QueryResult) {
// Allocated data struct for the parsed result.
cnames := make(map[string]string)
ips := make([]net.IP, 0, 5)
// Check if the query is valid
if queryResult.Answer == nil {
return
}
// Go trough each answer entry.
for _, a := range *queryResult.Answer {
if a.RR.Address != nil {
ip := net.IP(*a.RR.Address)
// Answer contains ip address.
ips = append(ips, ip)
} else if a.RR.Name != nil {
// Answer is a CNAME.
cnames[domain] = *a.RR.Name
}
}
saveDomain(domain, ips, cnames, resolver.IPInfoProfileScopeGlobal)
}

View file

@ -0,0 +1,130 @@
//go:build windows
// +build windows
package dnsmonitor
import (
"context"
"fmt"
"net"
"strconv"
"strings"
"github.com/miekg/dns"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/process"
"github.com/safing/portmaster/service/resolver"
)
type Listener struct {
etw *ETWSession
}
func newListener(module *DNSMonitor) (*Listener, error) {
// Set source of the resolver.
ResolverInfo.Source = resolver.ServerSourceETW
listener := &Listener{}
// Initialize new dns event session.
err := initializeSessions(module, listener)
if err != nil {
// Listen for event if the dll has been loaded
module.instance.OSIntegration().OnInitializedEvent.AddCallback("loader-listener", func(wc *mgr.WorkerCtx, s struct{}) (cancel bool, err error) {
err = initializeSessions(module, listener)
if err != nil {
return false, err
}
return true, nil
})
}
return listener, nil
}
func initializeSessions(module *DNSMonitor, listener *Listener) error {
var err error
listener.etw, err = NewSession(module.instance.OSIntegration().GetETWInterface(), listener.processEvent)
if err != nil {
return err
}
// Start listener
module.mgr.Go("etw-dns-event-listener", func(w *mgr.WorkerCtx) error {
return listener.etw.StartTrace()
})
return nil
}
func (l *Listener) flush() error {
if l.etw == nil {
return fmt.Errorf("etw not initialized")
}
return l.etw.FlushTrace()
}
func (l *Listener) stop() error {
if l == nil {
return fmt.Errorf("listener is nil")
}
if l.etw == nil {
return fmt.Errorf("invalid etw session")
}
// Stop and destroy trace. Destroy should be called even if stop fails for some reason.
err := l.etw.StopTrace()
err2 := l.etw.DestroySession()
if err != nil {
return fmt.Errorf("StopTrace failed: %w", err)
}
if err2 != nil {
return fmt.Errorf("DestroySession failed: %w", err2)
}
return nil
}
func (l *Listener) processEvent(domain string, pid uint32, result string) {
if processIfSelfCheckDomain(dns.Fqdn(domain)) {
// Not need to process result.
return
}
// Ignore empty results
if len(result) == 0 {
return
}
profileScope := resolver.IPInfoProfileScopeGlobal
// Get the profile ID if the process can be found
if proc, err := process.GetOrFindProcess(context.Background(), int(pid)); err == nil {
if profile := proc.Profile(); profile != nil {
if localProfile := profile.LocalProfile(); localProfile != nil {
profileScope = localProfile.ID
}
}
}
cnames := make(map[string]string)
ips := []net.IP{}
resultArray := strings.Split(result, ";")
for _, r := range resultArray {
// For results other than IP addresses, the string starts with "type:"
if strings.HasPrefix(r, "type:") {
dnsValueArray := strings.Split(r, " ")
if len(dnsValueArray) < 3 {
continue
}
// Ignore everything except CNAME records
if value, err := strconv.ParseInt(dnsValueArray[1], 10, 16); err == nil && value == int64(dns.TypeCNAME) {
cnames[domain] = dnsValueArray[2]
}
} else {
// If the event doesn't start with "type:", it's an IP address
ip := net.ParseIP(r)
if ip != nil {
ips = append(ips, ip)
}
}
}
saveDomain(domain, ips, cnames, profileScope)
}

View file

@ -0,0 +1,139 @@
package dnsmonitor
import (
"errors"
"net"
"strings"
"github.com/miekg/dns"
"github.com/safing/portmaster/base/database"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/service/compat"
"github.com/safing/portmaster/service/integration"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/network/netutils"
"github.com/safing/portmaster/service/resolver"
)
var ResolverInfo = resolver.ResolverInfo{
Name: "SystemResolver",
Type: resolver.ServerTypeMonitor,
}
type DNSMonitor struct {
instance instance
mgr *mgr.Manager
listener *Listener
}
// Manager returns the module manager.
func (dl *DNSMonitor) Manager() *mgr.Manager {
return dl.mgr
}
// Start starts the module.
func (dl *DNSMonitor) Start() error {
// Initialize dns event listener
var err error
dl.listener, err = newListener(dl)
if err != nil {
log.Warningf("dnsmonitor: failed to start dns listener: %s", err)
}
return nil
}
// Stop stops the module.
func (dl *DNSMonitor) Stop() error {
if dl.listener != nil {
err := dl.listener.stop()
if err != nil {
log.Errorf("dnsmonitor: failed to close listener: %s", err)
}
}
return nil
}
// Flush flushes the buffer forcing all events to be processed.
func (dl *DNSMonitor) Flush() error {
return dl.listener.flush()
}
func saveDomain(domain string, ips []net.IP, cnames map[string]string, profileScope string) {
fqdn := dns.Fqdn(domain)
// Create new record for this IP.
record := resolver.ResolvedDomain{
Domain: fqdn,
Resolver: &ResolverInfo,
DNSRequestContext: &resolver.DNSRequestContext{},
Expires: 0,
}
// Process cnames
record.AddCNAMEs(cnames)
// Add to cache
saveIPsInCache(ips, profileScope, record)
}
func New(instance instance) (*DNSMonitor, error) {
// Initialize module
m := mgr.New("DNSMonitor")
module := &DNSMonitor{
mgr: m,
instance: instance,
}
return module, nil
}
type instance interface {
OSIntegration() *integration.OSIntegration
}
func processIfSelfCheckDomain(fqdn string) bool {
// Check for compat check dns request.
if strings.HasSuffix(fqdn, compat.DNSCheckInternalDomainScope) {
subdomain := strings.TrimSuffix(fqdn, compat.DNSCheckInternalDomainScope)
_ = compat.SubmitDNSCheckDomain(subdomain)
log.Infof("dnsmonitor: self-check domain received")
// No need to parse the answer.
return true
}
return false
}
// saveIPsInCache saves the provided ips in the dns cashe assoseted with the record Domain and CNAMEs.
func saveIPsInCache(ips []net.IP, profileID string, record resolver.ResolvedDomain) {
// Package IPs and CNAMEs into IPInfo structs.
for _, ip := range ips {
// Never save domain attributions for localhost IPs.
if netutils.GetIPScope(ip) == netutils.HostLocal {
continue
}
ipString := ip.String()
info, err := resolver.GetIPInfo(profileID, ipString)
if err != nil {
if !errors.Is(err, database.ErrNotFound) {
log.Errorf("dnsmonitor: failed to search for IP info record: %s", err)
}
info = &resolver.IPInfo{
IP: ipString,
ProfileID: profileID,
}
}
// Add the new record to the resolved domains for this IP and scope.
info.AddDomain(record)
// Save if the record is new or has been updated.
if err := info.Save(); err != nil {
log.Errorf("dnsmonitor: failed to save IP info record: %s", err)
}
}
}

View file

@ -0,0 +1,83 @@
//go:build linux
// +build linux
package dnsmonitor
// List of struct that define the systemd-resolver varlink dns event protocol.
// Source: `sudo varlinkctl introspect /run/systemd/resolve/io.systemd.Resolve.Monitor io.systemd.Resolve.Monitor`
type ResourceKey struct {
Class int `json:"class"`
Type int `json:"type"`
Name string `json:"name"`
}
type ResourceRecord struct {
Key ResourceKey `json:"key"`
Name *string `json:"name,omitempty"`
Address *[]byte `json:"address,omitempty"`
// Rest of the fields are not used.
// Priority *int `json:"priority,omitempty"`
// Weight *int `json:"weight,omitempty"`
// Port *int `json:"port,omitempty"`
// CPU *string `json:"cpu,omitempty"`
// OS *string `json:"os,omitempty"`
// Items *[]string `json:"items,omitempty"`
// MName *string `json:"mname,omitempty"`
// RName *string `json:"rname,omitempty"`
// Serial *int `json:"serial,omitempty"`
// Refresh *int `json:"refresh,omitempty"`
// Expire *int `json:"expire,omitempty"`
// Minimum *int `json:"minimum,omitempty"`
// Exchange *string `json:"exchange,omitempty"`
// Version *int `json:"version,omitempty"`
// Size *int `json:"size,omitempty"`
// HorizPre *int `json:"horiz_pre,omitempty"`
// VertPre *int `json:"vert_pre,omitempty"`
// Latitude *int `json:"latitude,omitempty"`
// Longitude *int `json:"longitude,omitempty"`
// Altitude *int `json:"altitude,omitempty"`
// KeyTag *int `json:"key_tag,omitempty"`
// Algorithm *int `json:"algorithm,omitempty"`
// DigestType *int `json:"digest_type,omitempty"`
// Digest *string `json:"digest,omitempty"`
// FPType *int `json:"fptype,omitempty"`
// Fingerprint *string `json:"fingerprint,omitempty"`
// Flags *int `json:"flags,omitempty"`
// Protocol *int `json:"protocol,omitempty"`
// DNSKey *string `json:"dnskey,omitempty"`
// Signer *string `json:"signer,omitempty"`
// TypeCovered *int `json:"type_covered,omitempty"`
// Labels *int `json:"labels,omitempty"`
// OriginalTTL *int `json:"original_ttl,omitempty"`
// Expiration *int `json:"expiration,omitempty"`
// Inception *int `json:"inception,omitempty"`
// Signature *string `json:"signature,omitempty"`
// NextDomain *string `json:"next_domain,omitempty"`
// Types *[]int `json:"types,omitempty"`
// Iterations *int `json:"iterations,omitempty"`
// Salt *string `json:"salt,omitempty"`
// Hash *string `json:"hash,omitempty"`
// CertUsage *int `json:"cert_usage,omitempty"`
// Selector *int `json:"selector,omitempty"`
// MatchingType *int `json:"matching_type,omitempty"`
// Data *string `json:"data,omitempty"`
// Tag *string `json:"tag,omitempty"`
// Value *string `json:"value,omitempty"`
}
type Answer struct {
RR *ResourceRecord `json:"rr,omitempty"`
Raw string `json:"raw"`
IfIndex *int `json:"ifindex,omitempty"`
}
type QueryResult struct {
Ready *bool `json:"ready,omitempty"`
State *string `json:"state,omitempty"`
Rcode *int `json:"rcode,omitempty"`
Errno *int `json:"errno,omitempty"`
Question *[]ResourceKey `json:"question,omitempty"`
CollectedQuestions *[]ResourceKey `json:"collectedQuestions,omitempty"`
Answer *[]Answer `json:"answer,omitempty"`
}

View file

@ -188,7 +188,7 @@ func (q *Queue) packetHandler(ctx context.Context) func(nfqueue.Attribute) int {
return 0
}
if err := pmpacket.Parse(*attrs.Payload, &pkt.Base); err != nil {
if err := pmpacket.ParseLayer3(*attrs.Payload, &pkt.Base); err != nil {
log.Warningf("nfqueue: failed to parse payload: %s", err)
_ = pkt.Drop()
return 0

View file

@ -59,7 +59,7 @@ func (pkt *Packet) LoadPacketData() error {
return packet.ErrFailedToLoadPayload
}
err = packet.Parse(payload, &pkt.Base)
err = packet.ParseLayer3(payload, &pkt.Base)
if err != nil {
log.Tracer(pkt.Ctx()).Warningf("windowskext: failed to parse payload: %s", err)
return packet.ErrFailedToLoadPayload

View file

@ -55,6 +55,7 @@ func Handler(ctx context.Context, packets chan packet.Packet, bandwidthUpdate ch
newPacket := &Packet{
verdictRequest: conn.ID,
payload: conn.Payload,
payloadLayer: conn.PayloadLayer,
verdictSet: abool.NewBool(false),
}
info := newPacket.Info()

View file

@ -4,6 +4,7 @@
package windowskext
import (
"fmt"
"sync"
"github.com/tevino/abool"
@ -19,6 +20,7 @@ type Packet struct {
verdictRequest uint64
payload []byte
payloadLayer uint8
verdictSet *abool.AtomicBool
payloadLoaded bool
@ -51,7 +53,15 @@ func (pkt *Packet) LoadPacketData() error {
pkt.payloadLoaded = true
if len(pkt.payload) > 0 {
err := packet.Parse(pkt.payload, &pkt.Base)
var err error
switch pkt.payloadLayer {
case 3:
err = packet.ParseLayer3(pkt.payload, &pkt.Base)
case 4:
err = packet.ParseLayer4(pkt.payload, &pkt.Base)
default:
err = fmt.Errorf("unsupported payload layer: %d", pkt.payloadLayer)
}
if err != nil {
log.Tracef("payload: %#v", pkt.payload)
log.Tracer(pkt.Ctx()).Warningf("windowskext: failed to parse payload: %s", err)

View file

@ -16,6 +16,7 @@ import (
"github.com/safing/portmaster/service/netquery"
"github.com/safing/portmaster/service/network"
"github.com/safing/portmaster/service/profile"
"github.com/safing/portmaster/service/resolver"
"github.com/safing/portmaster/service/updates"
"github.com/safing/portmaster/spn/access"
"github.com/safing/portmaster/spn/captain"
@ -35,8 +36,7 @@ func (ss *stringSliceFlag) Set(value string) error {
var allowedClients stringSliceFlag
type Firewall struct {
mgr *mgr.Manager
mgr *mgr.Manager
instance instance
}
@ -168,4 +168,5 @@ type instance interface {
Access() *access.Access
Network() *network.Network
NetQuery() *netquery.NetQuery
Resolver() *resolver.ResolverModule
}

View file

@ -6,10 +6,12 @@ import (
"fmt"
"net"
"os"
"strings"
"sync/atomic"
"time"
"github.com/google/gopacket/layers"
"github.com/miekg/dns"
"github.com/tevino/abool"
"github.com/safing/portmaster/base/log"
@ -23,6 +25,7 @@ import (
"github.com/safing/portmaster/service/network/netutils"
"github.com/safing/portmaster/service/network/packet"
"github.com/safing/portmaster/service/process"
"github.com/safing/portmaster/service/resolver"
"github.com/safing/portmaster/spn/access"
)
@ -444,8 +447,9 @@ func filterHandler(conn *network.Connection, pkt packet.Packet) {
filterConnection = false
log.Tracer(pkt.Ctx()).Infof("filter: granting own pre-authenticated connection %s", conn)
// Redirect outbound DNS packets if enabled,
// Redirect outbound DNS packets if enabled,
case dnsQueryInterception() &&
!module.instance.Resolver().IsDisabled() &&
pkt.IsOutbound() &&
pkt.Info().DstPort == 53 &&
// that don't match the address of our nameserver,
@ -478,11 +482,13 @@ func filterHandler(conn *network.Connection, pkt packet.Packet) {
// Decide how to continue handling connection.
switch {
case conn.Inspecting && looksLikeOutgoingDNSRequest(conn):
inspectDNSPacket(conn, pkt)
conn.UpdateFirewallHandler(inspectDNSPacket)
case conn.Inspecting:
log.Tracer(pkt.Ctx()).Trace("filter: start inspecting")
conn.UpdateFirewallHandler(inspectAndVerdictHandler)
inspectAndVerdictHandler(conn, pkt)
default:
conn.StopFirewallHandler()
verdictHandler(conn, pkt)
@ -506,7 +512,7 @@ func FilterConnection(ctx context.Context, conn *network.Connection, pkt packet.
}
// TODO: Enable inspection framework again.
conn.Inspecting = false
// conn.Inspecting = false
// TODO: Quick fix for the SPN.
// Use inspection framework for proper encryption detection.
@ -580,6 +586,98 @@ func inspectAndVerdictHandler(conn *network.Connection, pkt packet.Packet) {
issueVerdict(conn, pkt, 0, true)
}
func inspectDNSPacket(conn *network.Connection, pkt packet.Packet) {
// Ignore info-only packets in this handler.
if pkt.InfoOnly() {
return
}
dnsPacket := new(dns.Msg)
err := pkt.LoadPacketData()
if err != nil {
_ = pkt.Block()
log.Errorf("filter: failed to load packet payload: %s", err)
return
}
// Parse and block invalid packets.
err = dnsPacket.Unpack(pkt.Payload())
if err != nil {
err = pkt.PermanentBlock()
if err != nil {
log.Errorf("filter: failed to block packet: %s", err)
}
_ = conn.SetVerdict(network.VerdictBlock, "none DNS data on DNS port", "", nil)
conn.VerdictPermanent = true
conn.Save()
return
}
// Packet was parsed.
// Allow it but only after the answer was added to the cache.
defer func() {
err = pkt.Accept()
if err != nil {
log.Errorf("filter: failed to accept dns packet: %s", err)
}
}()
// Check if packet has a question.
if len(dnsPacket.Question) == 0 {
return
}
// Read create structs with the needed data.
question := dnsPacket.Question[0]
fqdn := dns.Fqdn(question.Name)
// Check for compat check dns request.
if strings.HasSuffix(fqdn, compat.DNSCheckInternalDomainScope) {
subdomain := strings.TrimSuffix(fqdn, compat.DNSCheckInternalDomainScope)
_ = compat.SubmitDNSCheckDomain(subdomain)
log.Infof("packet_handler: self-check domain received")
// No need to parse the answer.
return
}
// Check if there is an answer.
if len(dnsPacket.Answer) == 0 {
return
}
resolverInfo := &resolver.ResolverInfo{
Name: "DNSRequestObserver",
Type: resolver.ServerTypeFirewall,
Source: resolver.ServerSourceFirewall,
IP: conn.Entity.IP,
Domain: conn.Entity.Domain,
IPScope: conn.Entity.IPScope,
}
rrCache := &resolver.RRCache{
Domain: fqdn,
Question: dns.Type(question.Qtype),
RCode: dnsPacket.Rcode,
Answer: dnsPacket.Answer,
Ns: dnsPacket.Ns,
Extra: dnsPacket.Extra,
Resolver: resolverInfo,
}
query := &resolver.Query{
FQDN: fqdn,
QType: dns.Type(question.Qtype),
NoCaching: false,
IgnoreFailing: false,
LocalResolversOnly: false,
ICANNSpace: false,
DomainRoot: "",
}
// Save to cache
UpdateIPsAndCNAMEs(query, rrCache, conn)
}
func icmpFilterHandler(conn *network.Connection, pkt packet.Packet) {
// Load packet data.
err := pkt.LoadPacketData()

View file

@ -3,7 +3,6 @@ package service
import (
"context"
"fmt"
"os"
"sync/atomic"
"time"
@ -14,12 +13,15 @@ import (
"github.com/safing/portmaster/base/notifications"
"github.com/safing/portmaster/base/rng"
"github.com/safing/portmaster/base/runtime"
"github.com/safing/portmaster/base/utils"
"github.com/safing/portmaster/service/broadcasts"
"github.com/safing/portmaster/service/compat"
"github.com/safing/portmaster/service/core"
"github.com/safing/portmaster/service/core/base"
"github.com/safing/portmaster/service/firewall"
"github.com/safing/portmaster/service/firewall/interception"
"github.com/safing/portmaster/service/firewall/interception/dnsmonitor"
"github.com/safing/portmaster/service/integration"
"github.com/safing/portmaster/service/intel/customlists"
"github.com/safing/portmaster/service/intel/filterlists"
"github.com/safing/portmaster/service/intel/geoip"
@ -74,6 +76,7 @@ type Instance struct {
core *core.Core
binaryUpdates *updates.Updater
intelUpdates *updates.Updater
integration *integration.OSIntegration
geoip *geoip.GeoIP
netenv *netenv.NetEnv
ui *ui.UI
@ -83,6 +86,7 @@ type Instance struct {
firewall *firewall.Firewall
filterLists *filterlists.FilterLists
interception *interception.Interception
dnsmonitor *dnsmonitor.DNSMonitor
customlist *customlists.CustomList
status *status.Status
broadcasts *broadcasts.Broadcasts
@ -119,7 +123,7 @@ func New(svcCfg *ServiceConfig) (*Instance, error) { //nolint:maintidx
}
// Make sure data dir exists, so that child directories don't dictate the permissions.
err = os.MkdirAll(svcCfg.DataDir, 0o0755)
err = utils.EnsureDirectory(svcCfg.DataDir, utils.PublicReadExecPermission)
if err != nil {
return nil, fmt.Errorf("data directory %s is not accessible: %w", svcCfg.DataDir, err)
}
@ -167,10 +171,6 @@ func New(svcCfg *ServiceConfig) (*Instance, error) { //nolint:maintidx
}
// Service modules
instance.core, err = core.New(instance)
if err != nil {
return instance, fmt.Errorf("create core module: %w", err)
}
binaryUpdateConfig, intelUpdateConfig, err := MakeUpdateConfigs(svcCfg)
if err != nil {
return instance, fmt.Errorf("create updates config: %w", err)
@ -183,6 +183,14 @@ func New(svcCfg *ServiceConfig) (*Instance, error) { //nolint:maintidx
if err != nil {
return instance, fmt.Errorf("create updates module: %w", err)
}
instance.core, err = core.New(instance)
if err != nil {
return instance, fmt.Errorf("create core module: %w", err)
}
instance.integration, err = integration.New(instance)
if err != nil {
return instance, fmt.Errorf("create integration module: %w", err)
}
instance.geoip, err = geoip.New(instance)
if err != nil {
return instance, fmt.Errorf("create customlist module: %w", err)
@ -219,6 +227,10 @@ func New(svcCfg *ServiceConfig) (*Instance, error) { //nolint:maintidx
if err != nil {
return instance, fmt.Errorf("create interception module: %w", err)
}
instance.dnsmonitor, err = dnsmonitor.New(instance)
if err != nil {
return instance, fmt.Errorf("create dns-listener module: %w", err)
}
instance.customlist, err = customlists.New(instance)
if err != nil {
return instance, fmt.Errorf("create customlist module: %w", err)
@ -309,6 +321,7 @@ func New(svcCfg *ServiceConfig) (*Instance, error) { //nolint:maintidx
instance.core,
instance.binaryUpdates,
instance.intelUpdates,
instance.integration,
instance.geoip,
instance.netenv,
@ -322,6 +335,7 @@ func New(svcCfg *ServiceConfig) (*Instance, error) { //nolint:maintidx
instance.filterLists,
instance.customlist,
instance.interception,
instance.dnsmonitor,
instance.compat,
instance.status,
@ -429,6 +443,11 @@ func (i *Instance) IntelUpdates() *updates.Updater {
return i.intelUpdates
}
// OSIntegration returns the integration module.
func (i *Instance) OSIntegration() *integration.OSIntegration {
return i.integration
}
// GeoIP returns the geoip module.
func (i *Instance) GeoIP() *geoip.GeoIP {
return i.geoip
@ -514,6 +533,11 @@ func (i *Instance) Interception() *interception.Interception {
return i.interception
}
// DNSMonitor returns the dns-listener module.
func (i *Instance) DNSMonitor() *dnsmonitor.DNSMonitor {
return i.dnsmonitor
}
// CustomList returns the customlist module.
func (i *Instance) CustomList() *customlists.CustomList {
return i.customlist
@ -708,3 +732,23 @@ func (i *Instance) ShutdownComplete() <-chan struct{} {
func (i *Instance) ExitCode() int {
return int(i.exitCode.Load())
}
// ShouldRestartIsSet returns whether the service/instance should be restarted.
func (i *Instance) ShouldRestartIsSet() bool {
return i.ShouldRestart
}
// CommandLineOperationIsSet returns whether the command line option is set.
func (i *Instance) CommandLineOperationIsSet() bool {
return i.CommandLineOperation != nil
}
// CommandLineOperationExecute executes the set command line option.
func (i *Instance) CommandLineOperationExecute() error {
return i.CommandLineOperation()
}
// AddModule adds a module to the service group.
func (i *Instance) AddModule(m mgr.Module) {
i.serviceGroup.Add(m)
}

View file

@ -0,0 +1,114 @@
//go:build windows
// +build windows
package integration
import (
"fmt"
"golang.org/x/sys/windows"
)
type ETWFunctions struct {
createState *windows.Proc
initializeSession *windows.Proc
startTrace *windows.Proc
flushTrace *windows.Proc
stopTrace *windows.Proc
destroySession *windows.Proc
stopOldSession *windows.Proc
}
func initializeETW(dll *windows.DLL) (*ETWFunctions, error) {
functions := &ETWFunctions{}
var err error
functions.createState, err = dll.FindProc("PM_ETWCreateState")
if err != nil {
return functions, fmt.Errorf("failed to load function PM_ETWCreateState: %q", err)
}
functions.initializeSession, err = dll.FindProc("PM_ETWInitializeSession")
if err != nil {
return functions, fmt.Errorf("failed to load function PM_ETWInitializeSession: %q", err)
}
functions.startTrace, err = dll.FindProc("PM_ETWStartTrace")
if err != nil {
return functions, fmt.Errorf("failed to load function PM_ETWStartTrace: %q", err)
}
functions.flushTrace, err = dll.FindProc("PM_ETWFlushTrace")
if err != nil {
return functions, fmt.Errorf("failed to load function PM_ETWFlushTrace: %q", err)
}
functions.stopTrace, err = dll.FindProc("PM_ETWStopTrace")
if err != nil {
return functions, fmt.Errorf("failed to load function PM_ETWStopTrace: %q", err)
}
functions.destroySession, err = dll.FindProc("PM_ETWDestroySession")
if err != nil {
return functions, fmt.Errorf("failed to load function PM_ETWDestroySession: %q", err)
}
functions.stopOldSession, err = dll.FindProc("PM_ETWStopOldSession")
if err != nil {
return functions, fmt.Errorf("failed to load function PM_ETWDestroySession: %q", err)
}
return functions, nil
}
// CreateState calls the dll createState C function.
func (etw ETWFunctions) CreateState(callback uintptr) uintptr {
state, _, _ := etw.createState.Call(callback)
return state
}
// InitializeSession calls the dll initializeSession C function.
func (etw ETWFunctions) InitializeSession(state uintptr) error {
rc, _, _ := etw.initializeSession.Call(state)
if rc != 0 {
return fmt.Errorf("failed with status code: %d", rc)
}
return nil
}
// StartTrace calls the dll startTrace C function.
func (etw ETWFunctions) StartTrace(state uintptr) error {
rc, _, _ := etw.startTrace.Call(state)
if rc != 0 {
return fmt.Errorf("failed with status code: %d", rc)
}
return nil
}
// FlushTrace calls the dll flushTrace C function.
func (etw ETWFunctions) FlushTrace(state uintptr) error {
rc, _, _ := etw.flushTrace.Call(state)
if rc != 0 {
return fmt.Errorf("failed with status code: %d", rc)
}
return nil
}
// StopTrace calls the dll stopTrace C function.
func (etw ETWFunctions) StopTrace(state uintptr) error {
rc, _, _ := etw.stopTrace.Call(state)
if rc != 0 {
return fmt.Errorf("failed with status code: %d", rc)
}
return nil
}
// DestroySession calls the dll destroySession C function.
func (etw ETWFunctions) DestroySession(state uintptr) error {
rc, _, _ := etw.destroySession.Call(state)
if rc != 0 {
return fmt.Errorf("failed with status code: %d", rc)
}
return nil
}
// StopOldSession calls the dll stopOldSession C function.
func (etw ETWFunctions) StopOldSession() error {
rc, _, _ := etw.stopOldSession.Call()
if rc != 0 {
return fmt.Errorf("failed with status code: %d", rc)
}
return nil
}

View file

@ -0,0 +1,16 @@
//go:build !windows
// +build !windows
package integration
type OSSpecific struct{}
// Initialize is empty on any OS different then Windows.
func (i *OSIntegration) Initialize() error {
return nil
}
// CleanUp releases any resourses allocated during initializaion.
func (i *OSIntegration) CleanUp() error {
return nil
}

View file

@ -0,0 +1,85 @@
//go:build windows
// +build windows
package integration
import (
"fmt"
"sync"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/service/mgr"
"golang.org/x/sys/windows"
)
type OSSpecific struct {
dll *windows.DLL
etwFunctions *ETWFunctions
}
// Initialize loads the dll and finds all the needed functions from it.
func (i *OSIntegration) Initialize() error {
// Try to load dll
err := i.loadDLL()
if err != nil {
log.Errorf("integration: failed to load dll: %s", err)
callbackLock := sync.Mutex{}
// listen for event from the updater and try to load again if any.
i.instance.BinaryUpdates().EventResourcesUpdated.AddCallback("core-dll-loader", func(wc *mgr.WorkerCtx, s struct{}) (cancel bool, err error) {
// Make sure no multiple callas are executed at the same time.
callbackLock.Lock()
defer callbackLock.Unlock()
// Try to load again.
err = i.loadDLL()
if err != nil {
log.Errorf("integration: failed to load dll: %s", err)
} else {
log.Info("integration: initialize successful after updater event")
}
return false, nil
})
} else {
log.Info("integration: initialize successful")
}
return nil
}
func (i *OSIntegration) loadDLL() error {
// Find path to the dll.
file, err := i.instance.BinaryUpdates().GetFile("portmaster-core.dll")
if err != nil {
return err
}
// Load the DLL.
i.os.dll, err = windows.LoadDLL(file.Path())
if err != nil {
return fmt.Errorf("failed to load dll: %q", err)
}
// Enumerate all needed dll functions.
i.os.etwFunctions, err = initializeETW(i.os.dll)
if err != nil {
return err
}
// Notify listeners
i.OnInitializedEvent.Submit(struct{}{})
return nil
}
// CleanUp releases any resources allocated during initialization.
func (i *OSIntegration) CleanUp() error {
if i.os.dll != nil {
return i.os.dll.Release()
}
return nil
}
// GetETWInterface return struct containing all the ETW related functions, and nil if it was not loaded yet
func (i *OSIntegration) GetETWInterface() *ETWFunctions {
return i.os.etwFunctions
}

View file

@ -0,0 +1,49 @@
package integration
import (
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates"
)
// OSIntegration module provides special integration with the OS.
type OSIntegration struct {
m *mgr.Manager
OnInitializedEvent *mgr.EventMgr[struct{}]
//nolint:unused
os OSSpecific
instance instance
}
// New returns a new OSIntegration module.
func New(instance instance) (*OSIntegration, error) {
m := mgr.New("OSIntegration")
module := &OSIntegration{
m: m,
OnInitializedEvent: mgr.NewEventMgr[struct{}]("on-initialized", m),
instance: instance,
}
return module, nil
}
// Manager returns the module manager.
func (i *OSIntegration) Manager() *mgr.Manager {
return i.m
}
// Start starts the module.
func (i *OSIntegration) Start() error {
return i.Initialize()
}
// Stop stops the module.
func (i *OSIntegration) Stop() error {
return i.CleanUp()
}
type instance interface {
BinaryUpdates() *updates.Updater
}

View file

@ -49,7 +49,7 @@ var (
var cache = database.NewInterface(&database.Options{
Local: true,
Internal: true,
CacheSize: 2 ^ 8,
CacheSize: 256,
})
// getFileFunc is the function used to get a file from

View file

@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"os"
"strings"
"sync"
"github.com/safing/portmaster/base/database"
@ -181,18 +182,18 @@ func updateListIndex() error {
}
// Check if the version in the cache is current.
_, err = getListIndexFromCache()
index, err := getListIndexFromCache()
switch {
case errors.Is(err, database.ErrNotFound):
log.Info("filterlists: index not in cache, starting update")
case err != nil:
log.Warningf("filterlists: failed to load index from cache, starting update: %s", err)
// case !listIndexUpdate.EqualsVersion(strings.TrimPrefix(index.Version, "v")):
// log.Infof(
// "filterlists: index from cache is outdated, starting update (%s != %s)",
// strings.TrimPrefix(index.Version, "v"),
// listIndexUpdate.Version(),
// )
case listIndexUpdate.Version != strings.TrimPrefix(index.Version, "v"):
log.Infof(
"filterlists: index from cache is outdated, starting update (%s != %s)",
strings.TrimPrefix(index.Version, "v"),
listIndexUpdate.Version,
)
default:
// List is in cache and current, there is nothing to do.
log.Debug("filterlists: index is up to date")
@ -202,8 +203,6 @@ func updateListIndex() error {
return nil
}
// case listIndexUpdate.UpgradeAvailable():
// log.Info("filterlists: index update available, starting update")
default:
// Index is loaded and no update is available, there is nothing to do.
return nil
@ -236,19 +235,22 @@ func updateListIndex() error {
// ResolveListIDs resolves a slice of source or category IDs into
// a slice of distinct source IDs.
func ResolveListIDs(ids []string) ([]string, error) {
// Try get the list
index, err := getListIndexFromCache()
if err != nil {
if errors.Is(err, database.ErrNotFound) {
if err := updateListIndex(); err != nil {
// Update the list index
if err = updateListIndex(); err != nil {
return nil, err
}
// retry resolving IDs
return ResolveListIDs(ids)
// Retry getting the list.
if index, err = getListIndexFromCache(); err != nil {
return nil, err
}
} else {
log.Errorf("failed to resolved ids %v: %s", ids, err)
return nil, err
}
log.Errorf("failed to resolved ids %v: %s", ids, err)
return nil, err
}
resolved := index.getDistictSourceIDs(ids...)

View file

@ -4,7 +4,7 @@ import "time"
// SleepyTicker is wrapper over time.Ticker that respects the sleep mode of the module.
type SleepyTicker struct {
ticker time.Ticker
ticker *time.Ticker
normalDuration time.Duration
sleepDuration time.Duration
sleepMode bool
@ -16,7 +16,7 @@ type SleepyTicker struct {
// If sleepDuration is set to 0 ticker will not tick during sleep.
func NewSleepyTicker(normalDuration time.Duration, sleepDuration time.Duration) *SleepyTicker {
st := &SleepyTicker{
ticker: *time.NewTicker(normalDuration),
ticker: time.NewTicker(normalDuration),
normalDuration: normalDuration,
sleepDuration: sleepDuration,
sleepMode: false,

View file

@ -0,0 +1,57 @@
package mgr
import (
"testing"
"time"
)
func TestSleepyTickerStop(t *testing.T) {
normalDuration := 100 * time.Millisecond
sleepDuration := 200 * time.Millisecond
st := NewSleepyTicker(normalDuration, sleepDuration)
st.Stop() // no panic expected here
}
func TestSleepyTicker(t *testing.T) {
normalDuration := 100 * time.Millisecond
sleepDuration := 200 * time.Millisecond
st := NewSleepyTicker(normalDuration, sleepDuration)
// Test normal mode
select {
case <-st.Wait():
// Expected tick
case <-time.After(normalDuration + 50*time.Millisecond):
t.Error("expected tick in normal mode")
}
// Test sleep mode
st.SetSleep(true)
select {
case <-st.Wait():
// Expected tick
case <-time.After(sleepDuration + 50*time.Millisecond):
t.Error("expected tick in sleep mode")
}
// Test sleep mode with sleepDuration == 0
st = NewSleepyTicker(normalDuration, 0)
st.SetSleep(true)
select {
case <-st.Wait():
t.Error("did not expect tick when sleepDuration is 0")
case <-time.After(normalDuration):
// Expected no tick
}
// Test stopping the ticker
st.Stop()
select {
case <-st.Wait():
t.Error("did not expect tick after stopping the ticker")
case <-time.After(normalDuration):
// Expected no tick
}
}

View file

@ -224,8 +224,8 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
}
// Save the request as open, as we don't know if there will be a connection or not.
network.SaveOpenDNSRequest(q, rrCache, conn)
firewall.UpdateIPsAndCNAMEs(q, rrCache, conn)
network.SaveOpenDNSRequest(q, rrCache, conn)
case network.VerdictUndeterminable:
fallthrough

View file

@ -5,7 +5,6 @@ import (
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strings"
@ -19,6 +18,7 @@ import (
"github.com/safing/portmaster/base/config"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/utils"
"github.com/safing/portmaster/service/netquery/orm"
"github.com/safing/portmaster/service/network"
"github.com/safing/portmaster/service/network/netutils"
@ -128,7 +128,8 @@ type (
// handed over to SQLite.
func New(dbPath string) (*Database, error) {
historyParentDir := filepath.Join(module.instance.DataDir(), "databases")
if err := os.MkdirAll(historyParentDir, 0o0700); err != nil {
err := utils.EnsureDirectory(historyParentDir, utils.AdminOnlyExecPermission)
if err != nil {
return nil, fmt.Errorf("failed to ensure database directory exists: %w", err)
}
@ -226,7 +227,8 @@ func (db *Database) Close() error {
// VacuumHistory rewrites the history database in order to purge deleted records.
func VacuumHistory(ctx context.Context) (err error) {
historyParentDir := filepath.Join(module.instance.DataDir(), "databases")
if err := os.MkdirAll(historyParentDir, 0o0700); err != nil {
err = utils.EnsureDirectory(historyParentDir, utils.AdminOnlyExecPermission)
if err != nil {
return fmt.Errorf("failed to ensure database directory exists: %w", err)
}

View file

@ -93,7 +93,6 @@ func debugInfo(ar *api.Request) (data []byte, err error) {
config.AddToDebugInfo(di)
// Detailed information.
// TODO(vladimir): updates.AddToDebugInfo(di)
// compat.AddToDebugInfo(di) // TODO: Cannot use due to interception import requirement which we don't want for SPN Hubs.
di.AddGoroutineStack()

View file

@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"net"
"runtime"
"sync"
"sync/atomic"
"time"
@ -18,6 +19,7 @@ import (
"github.com/safing/portmaster/service/netenv"
"github.com/safing/portmaster/service/network/netutils"
"github.com/safing/portmaster/service/network/packet"
"github.com/safing/portmaster/service/network/reference"
"github.com/safing/portmaster/service/process"
_ "github.com/safing/portmaster/service/process/tags"
"github.com/safing/portmaster/service/resolver"
@ -536,12 +538,41 @@ func (conn *Connection) GatherConnectionInfo(pkt packet.Packet) (err error) {
// Find domain and DNS context of entity.
if conn.Entity.Domain == "" && conn.process.Profile() != nil {
profileScope := conn.process.Profile().LocalProfile().ID
// check if we can find a domain for that IP
ipinfo, err := resolver.GetIPInfo(conn.process.Profile().LocalProfile().ID, pkt.Info().RemoteIP().String())
ipinfo, err := resolver.GetIPInfo(profileScope, pkt.Info().RemoteIP().String())
if err != nil {
// Try again with the global scope, in case DNS went through the system resolver.
ipinfo, err = resolver.GetIPInfo(resolver.IPInfoProfileScopeGlobal, pkt.Info().RemoteIP().String())
}
if runtime.GOOS == "windows" && err != nil {
// On windows domains may come with delay.
if module.instance.Resolver().IsDisabled() && conn.shouldWaitForDomain() {
// Flush the dns listener buffer and try again.
for i := range 4 {
err = module.instance.DNSMonitor().Flush()
if err != nil {
// Error flushing, dont try again.
break
}
// Try with profile scope
ipinfo, err = resolver.GetIPInfo(profileScope, pkt.Info().RemoteIP().String())
if err == nil {
log.Tracer(pkt.Ctx()).Debugf("network: found domain with scope (%s) from dnsmonitor after %d tries", profileScope, +1)
break
}
// Try again with the global scope
ipinfo, err = resolver.GetIPInfo(resolver.IPInfoProfileScopeGlobal, pkt.Info().RemoteIP().String())
if err == nil {
log.Tracer(pkt.Ctx()).Debugf("network: found domain from dnsmonitor after %d tries", i+1)
break
}
time.Sleep(5 * time.Millisecond)
}
}
}
if err == nil {
lastResolvedDomain := ipinfo.MostRecentDomain()
if lastResolvedDomain != nil {
@ -869,3 +900,17 @@ func (conn *Connection) String() string {
return fmt.Sprintf("%s -> %s", conn.process, conn.Entity.IP)
}
}
func (conn *Connection) shouldWaitForDomain() bool {
// Should wait for Global Unicast, outgoing and not ICMP connections
switch {
case conn.Entity.IPScope != netutils.Global:
return false
case conn.Inbound:
return false
case reference.IsICMP(conn.Entity.Protocol):
return false
}
return true
}

View file

@ -9,10 +9,12 @@ import (
"sync/atomic"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/service/firewall/interception/dnsmonitor"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/netenv"
"github.com/safing/portmaster/service/network/state"
"github.com/safing/portmaster/service/profile"
"github.com/safing/portmaster/service/resolver"
)
// Events.
@ -188,4 +190,6 @@ func New(instance instance) (*Network, error) {
type instance interface {
Profile() *profile.ProfileModule
Resolver() *resolver.ResolverModule
DNSMonitor() *dnsmonitor.DNSMonitor
}

View file

@ -106,11 +106,12 @@ func checkError(packet gopacket.Packet, info *Info) error {
return nil
}
// Parse parses an IP packet and saves the information in the given packet object.
func Parse(packetData []byte, pktBase *Base) (err error) {
// ParseLayer3 parses an IP packet and saves the information in the given packet object.
func ParseLayer3(packetData []byte, pktBase *Base) (err error) {
if len(packetData) == 0 {
return errors.New("empty packet")
}
pktBase.layer3Data = packetData
ipVersion := packetData[0] >> 4
@ -155,6 +156,62 @@ func Parse(packetData []byte, pktBase *Base) (err error) {
return nil
}
// ParseLayer4 parses an layer 4 packet and saves the information in the given packet object.
func ParseLayer4(packetData []byte, pktBase *Base) (err error) {
if len(packetData) == 0 {
return errors.New("empty packet")
}
var layer gopacket.LayerType
switch pktBase.info.Protocol {
case ICMP:
layer = layers.LayerTypeICMPv4
case IGMP:
layer = layers.LayerTypeIGMP
case TCP:
layer = layers.LayerTypeTCP
case UDP:
layer = layers.LayerTypeUDP
case ICMPv6:
layer = layers.LayerTypeICMPv6
case UDPLite:
return fmt.Errorf("UDPLite not supported")
case RAW:
return fmt.Errorf("RAW protocol not supported")
case AnyHostInternalProtocol61:
return fmt.Errorf("AnyHostInternalProtocol61 protocol not supported")
default:
return fmt.Errorf("protocol not supported")
}
packet := gopacket.NewPacket(packetData, layer, gopacket.DecodeOptions{
Lazy: true,
NoCopy: true,
})
availableDecoders := []func(gopacket.Packet, *Info) error{
parseTCP,
parseUDP,
// parseUDPLite, // We don't yet support udplite.
parseICMPv4,
parseICMPv6,
parseIGMP,
checkError,
}
for _, dec := range availableDecoders {
if err := dec(packet, pktBase.Info()); err != nil {
return err
}
}
pktBase.layers = packet
if transport := packet.TransportLayer(); transport != nil {
pktBase.layer5Data = transport.LayerPayload()
}
return nil
}
func init() {
genIPProtocolFromLayerType()
}

View file

@ -143,7 +143,7 @@ func updateGlobalConfigProfile(_ context.Context) error {
module.states.Add(mgr.State{
ID: globalConfigProfileErrorID,
Name: "Internal Settings Failure",
Message: fmt.Sprintf("Some global settings might not be applied correctly. You can try restarting the Portmaster to resolve this problem. Error: %s", err),
Message: fmt.Sprintf("Some global settings might not be applied correctly. You can try restarting the Portmaster to resolve this problem. Error: %s", lastErr),
Type: mgr.StateTypeWarning,
})
}

View file

@ -3,7 +3,6 @@ package profile
import (
"errors"
"fmt"
"os"
"path/filepath"
"sync/atomic"
@ -11,6 +10,7 @@ import (
"github.com/safing/portmaster/base/database"
"github.com/safing/portmaster/base/database/migration"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/utils"
_ "github.com/safing/portmaster/service/core/base"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/profile/binmeta"
@ -66,10 +66,18 @@ func prep() error {
}
// Setup icon storage location.
iconsDir := filepath.Join(module.instance.DataDir(), "databases", "icons")
if err := os.MkdirAll(iconsDir, 0o0700); err != nil {
return fmt.Errorf("failed to create/check icons directory: %w", err)
databaseDir := filepath.Join(module.instance.DataDir(), "databases")
// Ensure folder existents and permission
err := utils.EnsureDirectory(databaseDir, utils.AdminOnlyExecPermission)
if err != nil {
return fmt.Errorf("failed to ensure directory existence %s: %w", databaseDir, err)
}
iconsDir := filepath.Join(databaseDir, "icons")
err = utils.EnsureDirectory(iconsDir, utils.AdminOnlyExecPermission)
if err != nil {
return fmt.Errorf("failed to ensure directory existence %s: %w", iconsDir, err)
}
binmeta.ProfileIconStoragePath = iconsDir
return nil

Some files were not shown because too many files have changed in this diff Show more