Merge pull request #1 from safing/develop

Merge upstream changes
This commit is contained in:
Vladimir Stoilov 2022-07-21 15:50:42 +02:00 committed by GitHub
commit b71417dde6
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
91 changed files with 6704 additions and 796 deletions

11
.github/dependabot.yml vendored Normal file
View file

@ -0,0 +1,11 @@
# To get started with Dependabot version updates, you'll need to specify which
# package ecosystems to update and where the package manifests are located.
# Please see the documentation for all configuration options:
# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
version: 2
updates:
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "daily"

72
.github/workflows/codeql-analysis.yml vendored Normal file
View file

@ -0,0 +1,72 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ "develop", master ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ "develop" ]
schedule:
- cron: '43 14 * * 4'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: [ 'go' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
# Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
steps:
- name: Checkout repository
uses: actions/checkout@v3
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v2
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
# If the Autobuild fails above, remove it and uncomment the following three lines.
# modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
# - run: |
# echo "Run, Build Application using script"
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2

View file

@ -16,31 +16,18 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
uses: actions/checkout@v3
- uses: actions/setup-go@v2
- uses: actions/setup-go@v3
with:
go-version: '^1.15'
# nektos/act does not have sudo install but we need it on GH actions so
# try to install it.
- name: Install sudo
run: bash -c "apt-get update || true ; apt-get install sudo || true"
env:
DEBIAN_FRONTEND: noninteractive
- name: Install git and gcc
run: sudo bash -c "apt-get update && apt-get install -y git gcc libc6-dev"
env:
DEBIAN_FRONTEND: noninteractive
go-version: '^1.18'
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v2
uses: golangci/golangci-lint-action@v3
with:
version: v1.44
version: v1.45.1
only-new-issues: true
args: -c ./.golangci.yml
skip-go-installation: true
- name: Get dependencies
run: go mod download
@ -48,42 +35,20 @@ jobs:
- name: Run go vet
run: go vet ./...
# golint is run (sufficiently; with excludes) as a part of golangci-lint.
# - name: Install golint
# run: bash -c "GOBIN=$(pwd) go get -u golang.org/x/lint/golint"
#
# - name: Run golint
# run: ./golint -set_exit_status -min_confidence 1.0 ./...
# gofmt is run (sufficiently; with excludes) as a part of golangci-lint.
# - name: Run gofmt
# run: bash -c 'test -z "$(gofmt -s -l .)"'
test:
name: Test
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- name: Check out code
uses: actions/checkout@v3
- uses: actions/setup-go@v2
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '^1.15'
# nektos/act does not have sudo install but we need it on GH actions so
# try to install it.
- name: Install sudo
run: bash -c "apt-get update || true ; apt-get install sudo || true"
env:
DEBIAN_FRONTEND: noninteractive
- name: Install git and gcc
run: sudo bash -c "apt-get update && apt-get install -y git gcc libc6-dev"
env:
DEBIAN_FRONTEND: noninteractive
go-version: '^1.18'
- name: Get dependencies
run: go mod download
- name: Test
- name: Run tests
run: ./test --test-only

View file

@ -1,4 +1,4 @@
# Take Back Control of Your Computer
# Control Your Computer,<br>[Get Peace of Mind](https://safing.io/portmaster/)
Portmaster is a free and open-source application that puts you back in charge over all your computer's network connections.
Developed in the EU 🇪🇺, Austria.

116
broadcasts/api.go Normal file
View file

@ -0,0 +1,116 @@
package broadcasts
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
"github.com/safing/portbase/api"
"github.com/safing/portbase/database"
"github.com/safing/portbase/database/accessor"
)
func registerAPIEndpoints() error {
if err := api.RegisterEndpoint(api.Endpoint{
Path: `broadcasts/matching-data`,
Read: api.PermitAdmin,
BelongsTo: module,
StructFunc: handleMatchingData,
Name: "Get Broadcast Notifications Matching Data",
Description: "Returns the data used by the broadcast notifications to match the instance.",
}); err != nil {
return err
}
if err := api.RegisterEndpoint(api.Endpoint{
Path: `broadcasts/reset-state`,
Write: api.PermitAdmin,
WriteMethod: http.MethodPost,
BelongsTo: module,
ActionFunc: handleResetState,
Name: "Resets the Broadcast Notification States",
Description: "Delete the cache of Broadcast Notifications, making them appear again.",
}); err != nil {
return err
}
if err := api.RegisterEndpoint(api.Endpoint{
Path: `broadcasts/simulate`,
Write: api.PermitAdmin,
WriteMethod: http.MethodPost,
BelongsTo: module,
ActionFunc: handleSimulate,
Name: "Simulate Broadcast Notifications",
Description: "Test broadcast notifications by sending a valid source file in the body.",
Parameters: []api.Parameter{
{
Method: http.MethodPost,
Field: "state",
Value: "true",
Description: "Check against state when deciding to display a broadcast notification. Acknowledgements are always saved.",
},
},
}); err != nil {
return err
}
return nil
}
func handleMatchingData(ar *api.Request) (i interface{}, err error) {
return collectData(), nil
}
func handleResetState(ar *api.Request) (msg string, err error) {
err = db.Delete(broadcastStatesDBKey)
if err != nil {
return "", err
}
return "Reset complete.", nil
}
func handleSimulate(ar *api.Request) (msg string, err error) {
// Parse broadcast notification data.
broadcasts, err := parseBroadcastSource(ar.InputData)
if err != nil {
return "", fmt.Errorf("failed to parse broadcast notifications update: %w", err)
}
// Get and marshal matching data.
matchingData := collectData()
matchingJSON, err := json.Marshal(matchingData)
if err != nil {
return "", fmt.Errorf("failed to marshal broadcast notifications matching data: %w", err)
}
matchingDataAccessor := accessor.NewJSONBytesAccessor(&matchingJSON)
var bss *BroadcastStates
if ar.URL.Query().Get("state") == "true" {
// Get broadcast notification states.
bss, err = getBroadcastStates()
if err != nil {
if !errors.Is(err, database.ErrNotFound) {
return "", fmt.Errorf("failed to get broadcast notifications states: %w", err)
}
bss = newBroadcastStates()
}
}
// Go through all broadcast nofications and check if they match.
var results []string
for _, bn := range broadcasts.Notifications {
err := handleBroadcast(bn, matchingDataAccessor, bss)
switch {
case err == nil:
results = append(results, fmt.Sprintf("%30s: displayed", bn.id))
case errors.Is(err, ErrSkip):
results = append(results, fmt.Sprintf("%30s: %s", bn.id, err))
default:
results = append(results, fmt.Sprintf("FAILED %23s: %s", bn.id, err))
}
}
return strings.Join(results, "\n"), nil
}

102
broadcasts/data.go Normal file
View file

@ -0,0 +1,102 @@
package broadcasts
import (
"time"
"github.com/safing/portbase/config"
"github.com/safing/portmaster/intel/geoip"
"github.com/safing/portmaster/netenv"
"github.com/safing/portmaster/updates"
"github.com/safing/spn/access"
"github.com/safing/spn/captain"
)
var portmasterStarted = time.Now()
func collectData() interface{} {
data := make(map[string]interface{})
// Get data about versions.
versions := updates.GetSimpleVersions()
data["Updates"] = versions
data["Version"] = versions.Build.Version
numericVersion, err := MakeNumericVersion(versions.Build.Version)
if err != nil {
data["NumericVersion"] = &DataError{
Error: err,
}
} else {
data["NumericVersion"] = numericVersion
}
// Get data about install.
installInfo, err := GetInstallInfo()
if err != nil {
data["Install"] = &DataError{
Error: err,
}
} else {
data["Install"] = installInfo
}
// Get global configuration.
data["Config"] = config.GetActiveConfigValues()
// Get data about device location.
locs, ok := netenv.GetInternetLocation()
if ok && locs.Best().LocationOrNil() != nil {
loc := locs.Best()
data["Location"] = &Location{
Country: loc.Location.Country.ISOCode,
Coordinates: loc.Location.Coordinates,
ASN: loc.Location.AutonomousSystemNumber,
ASOrg: loc.Location.AutonomousSystemOrganization,
Source: loc.Source,
SourceAccuracy: loc.SourceAccuracy,
}
}
// Get data about SPN status.
data["SPN"] = captain.GetSPNStatus()
// Get data about account.
userRecord, err := access.GetUser()
if err != nil {
data["Account"] = &DataError{
Error: err,
}
} else {
data["Account"] = &Account{
UserRecord: userRecord,
UpToDate: userRecord.Meta().Modified > time.Now().Add(-7*24*time.Hour).Unix(),
MayUseUSP: userRecord.MayUseSPN(),
}
}
// Time running.
data["UptimeHours"] = int(time.Since(portmasterStarted).Hours())
return data
}
// Location holds location matching data.
type Location struct {
Country string
Coordinates geoip.Coordinates
ASN uint
ASOrg string
Source netenv.DeviceLocationSource
SourceAccuracy int
}
// Account holds SPN account matching data.
type Account struct {
*access.UserRecord
UpToDate bool
MayUseUSP bool
}
// DataError represents an error getting some matching data.
type DataError struct {
Error error
}

175
broadcasts/install_info.go Normal file
View file

@ -0,0 +1,175 @@
package broadcasts
import (
"errors"
"fmt"
"strconv"
"sync"
"time"
semver "github.com/hashicorp/go-version"
"github.com/safing/portbase/database"
"github.com/safing/portbase/database/query"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/info"
"github.com/safing/portbase/log"
)
const installInfoDBKey = "core:status/install-info"
// InstallInfo holds generic info about the install.
type InstallInfo struct {
record.Base
sync.Mutex
Version string
NumericVersion int64
Time time.Time
NumericDate int64
DaysSinceInstall int64
UnixTimestamp int64
}
// GetInstallInfo returns the install info from the database.
func GetInstallInfo() (*InstallInfo, error) {
r, err := db.Get(installInfoDBKey)
if err != nil {
return nil, err
}
// Unwrap.
if r.IsWrapped() {
// Only allocate a new struct, if we need it.
newRecord := &InstallInfo{}
err = record.Unwrap(r, newRecord)
if err != nil {
return nil, err
}
return newRecord, nil
}
// or adjust type
newRecord, ok := r.(*InstallInfo)
if !ok {
return nil, fmt.Errorf("record not of type *InstallInfo, but %T", r)
}
return newRecord, nil
}
func ensureInstallInfo() {
// Get current install info from database.
installInfo, err := GetInstallInfo()
if err != nil {
installInfo = &InstallInfo{}
if !errors.Is(err, database.ErrNotFound) {
log.Warningf("updates: failed to load install info: %s", err)
}
}
// Fill in missing data and save.
installInfo.checkAll()
if err := installInfo.save(); err != nil {
log.Warningf("updates: failed to save install info: %s", err)
}
}
func (ii *InstallInfo) save() error {
if !ii.KeyIsSet() {
ii.SetKey(installInfoDBKey)
}
return db.Put(ii)
}
func (ii *InstallInfo) checkAll() {
ii.checkVersion()
ii.checkInstallDate()
}
func (ii *InstallInfo) checkVersion() {
// Check if everything is present.
if ii.Version != "" && ii.NumericVersion > 0 {
return
}
// Update version information.
versionInfo := info.GetInfo()
ii.Version = versionInfo.Version
// Update numeric version.
if versionInfo.Version != "" {
numericVersion, err := MakeNumericVersion(versionInfo.Version)
if err != nil {
log.Warningf("updates: failed to make numeric version: %s", err)
} else {
ii.NumericVersion = numericVersion
}
}
}
// MakeNumericVersion makes a numeric version with the first three version
// segment always using three digits.
func MakeNumericVersion(version string) (numericVersion int64, err error) {
// Parse version string.
ver, err := semver.NewVersion(version)
if err != nil {
return 0, fmt.Errorf("failed to parse core version: %w", err)
}
// Transform version for numeric representation.
segments := ver.Segments()
for i := 0; i < 3 && i < len(segments); i++ {
segmentNumber := int64(segments[i])
if segmentNumber > 999 {
segmentNumber = 999
}
switch i {
case 0:
numericVersion += segmentNumber * 1000000
case 1:
numericVersion += segmentNumber * 1000
case 2:
numericVersion += segmentNumber
}
}
return numericVersion, nil
}
func (ii *InstallInfo) checkInstallDate() {
// Check if everything is present.
if ii.UnixTimestamp > 0 &&
ii.NumericDate > 0 &&
ii.DaysSinceInstall > 0 &&
!ii.Time.IsZero() {
return
}
// Find oldest created database entry and use it as install time.
oldest := time.Now().Unix()
it, err := db.Query(query.New("core"))
if err != nil {
log.Warningf("updates: failed to create iterator for searching DB for install time: %s", err)
return
}
defer it.Cancel()
for r := range it.Next {
if oldest > r.Meta().Created {
oldest = r.Meta().Created
}
}
// Set data.
ii.UnixTimestamp = oldest
ii.Time = time.Unix(oldest, 0)
ii.DaysSinceInstall = int64(time.Since(ii.Time).Hours()) / 24
// Transform date for numeric representation.
numericDate, err := strconv.ParseInt(ii.Time.Format("20060102"), 10, 64)
if err != nil {
log.Warningf("updates: failed to make numeric date from %s: %s", ii.Time, err)
} else {
ii.NumericDate = numericDate
}
}

46
broadcasts/module.go Normal file
View file

@ -0,0 +1,46 @@
package broadcasts
import (
"sync"
"time"
"github.com/safing/portbase/database"
"github.com/safing/portbase/modules"
)
var (
module *modules.Module
db = database.NewInterface(&database.Options{
Local: true,
Internal: true,
})
startOnce sync.Once
)
func init() {
module = modules.Register("broadcasts", prep, start, nil, "updates", "netenv", "notifications")
}
func prep() error {
// Register API endpoints.
if err := registerAPIEndpoints(); err != nil {
return err
}
return nil
}
func start() error {
// Ensure the install info is up to date.
ensureInstallInfo()
// Start broadcast notifier task.
startOnce.Do(func() {
module.NewTask("broadcast notifier", broadcastNotify).
Repeat(10 * time.Minute).Queue()
})
return nil
}

285
broadcasts/notify.go Normal file
View file

@ -0,0 +1,285 @@
package broadcasts
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"strings"
"sync"
"time"
"github.com/ghodss/yaml"
"github.com/safing/portbase/database"
"github.com/safing/portbase/database/accessor"
"github.com/safing/portbase/database/query"
"github.com/safing/portbase/log"
"github.com/safing/portbase/modules"
"github.com/safing/portbase/notifications"
"github.com/safing/portmaster/updates"
)
const (
broadcastsResourcePath = "intel/portmaster/notifications.yaml"
broadcastNotificationIDPrefix = "broadcasts:"
minRepeatDuration = 1 * time.Hour
)
// Errors.
var (
ErrSkip = errors.New("broadcast skipped")
ErrSkipDoesNotMatch = fmt.Errorf("%w: does not match", ErrSkip)
ErrSkipAlreadyActive = fmt.Errorf("%w: already active", ErrSkip)
ErrSkipAlreadyShown = fmt.Errorf("%w: already shown", ErrSkip)
ErrSkipRemovedByMismatch = fmt.Errorf("%w: removed due to mismatch", ErrSkip)
ErrSkipRemovedBySource = fmt.Errorf("%w: removed by source", ErrSkip)
)
// BroadcastNotifications holds the data structure of the broadcast
// notifications update file.
type BroadcastNotifications struct {
Notifications map[string]*BroadcastNotification
}
// BroadcastNotification is a single broadcast notification.
type BroadcastNotification struct {
*notifications.Notification
id string
// Match holds a query string that needs to match the local matching data in
// order for the broadcast to be displayed.
Match string
matchingQuery *query.Query
// AttachToModule signifies if the broadcast notification should be attached to the module.
AttachToModule bool
// Remove signifies that the broadcast should be canceled and its state removed.
Remove bool
// Permanent signifies that the broadcast cannot be acknowledge by the user
// and remains in the UI indefinitely.
Permanent bool
// Repeat specifies a duration after which the broadcast should be shown again.
Repeat string
repeatDuration time.Duration
}
func broadcastNotify(ctx context.Context, t *modules.Task) error {
// Get broadcast notifications file, load it from disk and parse it.
broadcastsResource, err := updates.GetFile(broadcastsResourcePath)
if err != nil {
return fmt.Errorf("failed to get broadcast notifications update: %w", err)
}
broadcastsData, err := ioutil.ReadFile(broadcastsResource.Path())
if err != nil {
return fmt.Errorf("failed to load broadcast notifications update: %w", err)
}
broadcasts, err := parseBroadcastSource(broadcastsData)
if err != nil {
return fmt.Errorf("failed to parse broadcast notifications update: %w", err)
}
// Get and marshal matching data.
matchingData := collectData()
matchingJSON, err := json.Marshal(matchingData)
if err != nil {
return fmt.Errorf("failed to marshal broadcast notifications matching data: %w", err)
}
matchingDataAccessor := accessor.NewJSONBytesAccessor(&matchingJSON)
// Get broadcast notification states.
bss, err := getBroadcastStates()
if err != nil {
if !errors.Is(err, database.ErrNotFound) {
return fmt.Errorf("failed to get broadcast notifications states: %w", err)
}
bss = newBroadcastStates()
}
// Go through all broadcast nofications and check if they match.
for _, bn := range broadcasts.Notifications {
err := handleBroadcast(bn, matchingDataAccessor, bss)
switch {
case err == nil:
log.Infof("broadcasts: displaying broadcast %s", bn.id)
case errors.Is(err, ErrSkip):
log.Tracef("broadcasts: skipped displaying broadcast %s: %s", bn.id, err)
default:
log.Warningf("broadcasts: failed to handle broadcast %s: %s", bn.id, err)
}
}
return nil
}
func parseBroadcastSource(yamlData []byte) (*BroadcastNotifications, error) {
// Parse data.
broadcasts := &BroadcastNotifications{}
err := yaml.Unmarshal(yamlData, broadcasts)
if err != nil {
return nil, err
}
// Add IDs to struct for easier handling.
for id, bn := range broadcasts.Notifications {
bn.id = id
// Parse matching query.
if bn.Match != "" {
q, err := query.ParseQuery("query / where " + bn.Match)
if err != nil {
return nil, fmt.Errorf("failed to parse query of broadcast notification %s: %w", bn.id, err)
}
bn.matchingQuery = q
}
// Parse the repeat duration.
if bn.Repeat != "" {
duration, err := time.ParseDuration(bn.Repeat)
if err != nil {
return nil, fmt.Errorf("failed to parse repeat duration of broadcast notification %s: %w", bn.id, err)
}
bn.repeatDuration = duration
// Raise duration to minimum.
if bn.repeatDuration < minRepeatDuration {
bn.repeatDuration = minRepeatDuration
}
}
}
return broadcasts, nil
}
func handleBroadcast(bn *BroadcastNotification, matchingDataAccessor accessor.Accessor, bss *BroadcastStates) error {
// Check if broadcast was already shown.
if bss != nil {
state, ok := bss.States[bn.id]
switch {
case !ok || state.Read.IsZero():
// Was never shown, continue.
case bn.repeatDuration == 0 && !state.Read.IsZero():
// Was already shown and is not repeated, skip.
return ErrSkipAlreadyShown
case bn.repeatDuration > 0 && time.Now().Add(-bn.repeatDuration).After(state.Read):
// Was already shown, but should be repeated now, continue.
}
}
// Check if broadcast should be removed.
if bn.Remove {
removeBroadcast(bn, bss)
return ErrSkipRemovedBySource
}
// Skip if broadcast does not match.
if bn.matchingQuery != nil && !bn.matchingQuery.MatchesAccessor(matchingDataAccessor) {
removed := removeBroadcast(bn, bss)
if removed {
return ErrSkipRemovedByMismatch
}
return ErrSkipDoesNotMatch
}
// Check if there is already an active notification for this.
eventID := broadcastNotificationIDPrefix + bn.id
n := notifications.Get(eventID)
if n != nil {
// Already active!
return ErrSkipAlreadyActive
}
// Prepare notification for displaying.
n = bn.Notification
n.EventID = eventID
n.GUID = ""
n.State = ""
n.SelectedActionID = ""
// It is okay to edit the notification, as they are loaded from the file every time.
// Add dismiss button if the notification is not permanent.
if !bn.Permanent {
n.AvailableActions = append(n.AvailableActions, &notifications.Action{
ID: "ack",
Text: "Got it!",
})
}
n.SetActionFunction(markBroadcastAsRead)
// Display notification.
n.Save()
// Attach to module to raise more awareness.
if bn.AttachToModule {
n.AttachToModule(module)
}
return nil
}
func removeBroadcast(bn *BroadcastNotification, bss *BroadcastStates) (removed bool) {
// Remove any active notification.
n := notifications.Get(broadcastNotificationIDPrefix + bn.id)
if n != nil {
removed = true
n.Delete()
}
// Remove any state.
if bss != nil {
delete(bss.States, bn.id)
}
return
}
var savingBroadcastStateLock sync.Mutex
func markBroadcastAsRead(ctx context.Context, n *notifications.Notification) error {
// Lock persisting broadcast state.
savingBroadcastStateLock.Lock()
defer savingBroadcastStateLock.Unlock()
// Get notification data.
var broadcastID, actionID string
func() {
n.Lock()
defer n.Unlock()
broadcastID = strings.TrimPrefix(n.EventID, broadcastNotificationIDPrefix)
actionID = n.SelectedActionID
}()
// Check response.
switch actionID {
case "ack":
case "":
return fmt.Errorf("no action ID for %s", broadcastID)
default:
return fmt.Errorf("unexpected action ID for %s: %s", broadcastID, actionID)
}
// Get broadcast notification states.
bss, err := getBroadcastStates()
if err != nil {
if !errors.Is(err, database.ErrNotFound) {
return fmt.Errorf("failed to get broadcast notifications states: %w", err)
}
bss = newBroadcastStates()
}
// Get state for this notification.
bs, ok := bss.States[broadcastID]
if !ok {
bs = &BroadcastState{}
bss.States[broadcastID] = bs
}
// Delete to allow for timely repeats.
n.Delete()
// Mark as read and save to DB.
log.Infof("broadcasts: user acknowledged broadcast %s", broadcastID)
bs.Read = time.Now()
return bss.save()
}

64
broadcasts/state.go Normal file
View file

@ -0,0 +1,64 @@
package broadcasts
import (
"fmt"
"sync"
"time"
"github.com/safing/portbase/database/record"
)
const broadcastStatesDBKey = "core:broadcasts/state"
// BroadcastStates holds states for broadcast notifications.
type BroadcastStates struct {
record.Base
sync.Mutex
States map[string]*BroadcastState
}
// BroadcastState holds state for a single broadcast notifications.
type BroadcastState struct {
Read time.Time
}
func (bss *BroadcastStates) save() error {
return db.Put(bss)
}
// getbroadcastStates returns the broadcast states from the database.
func getBroadcastStates() (*BroadcastStates, error) {
r, err := db.Get(broadcastStatesDBKey)
if err != nil {
return nil, err
}
// Unwrap.
if r.IsWrapped() {
// Only allocate a new struct, if we need it.
newRecord := &BroadcastStates{}
err = record.Unwrap(r, newRecord)
if err != nil {
return nil, err
}
return newRecord, nil
}
// or adjust type
newRecord, ok := r.(*BroadcastStates)
if !ok {
return nil, fmt.Errorf("record not of type *BroadcastStates, but %T", r)
}
return newRecord, nil
}
// newBroadcastStates returns a new BroadcastStates.
func newBroadcastStates() *BroadcastStates {
bss := &BroadcastStates{
States: make(map[string]*BroadcastState),
}
bss.SetKey(broadcastStatesDBKey)
return bss
}

9
broadcasts/testdata/README.md vendored Normal file
View file

@ -0,0 +1,9 @@
# Testing Broadcast Notifications
```
# Reset state
curl -X POST http://127.0.0.1:817/api/v1/broadcasts/reset-state
# Simulate notifications
curl --upload-file notifications.yaml http://127.0.0.1:817/api/v1/broadcasts/simulate
```

22
broadcasts/testdata/notifications.yaml vendored Normal file
View file

@ -0,0 +1,22 @@
notifications:
test1:
title: "[TEST] Normal Broadcast"
message: "This is a normal broadcast without matching. (#1)"
test2:
title: "[TEST] Permanent Broadcast"
message: "This is a permanent broadcast without matching. (#2)"
type: 1 # Warning
permanent: true
test3:
title: "[TEST] Repeating Broadcast"
message: "This is a repeating broadcast without matching. (#3)"
repeat: "1m"
test4:
title: "[TEST] Matching Broadcast: PM version"
message: "This is a normal broadcast that matches the PM version. (#4)"
match: "NumericVersion > 8000"
test5:
title: "[TEST] Important Update"
message: "A criticial update has been released, please update immediately. (#5)"
type: 3 # Error
attachToModule: true

View file

@ -19,7 +19,7 @@ import ( //nolint:gci,nolintlint
func main() {
// set information
info.Set("Portmaster", "0.8.7", "AGPLv3", true)
info.Set("Portmaster", "0.9.0", "AGPLv3", true)
// Configure metrics.
_ = metrics.SetNamespace("portmaster")

View file

@ -17,7 +17,7 @@ import (
)
func initializeLogFile(logFilePath string, identifier string, version string) *os.File {
logFile, err := os.OpenFile(logFilePath, os.O_RDWR|os.O_CREATE, 0o0444)
logFile, err := os.OpenFile(logFilePath, os.O_RDWR|os.O_CREATE, 0o0440)
if err != nil {
log.Printf("failed to create log file %s: %s\n", logFilePath, err)
return nil

View file

@ -77,7 +77,7 @@ func main() {
cobra.OnInitialize(initCobra)
// set meta info
info.Set("Portmaster Start", "0.8.6", "AGPLv3", false)
info.Set("Portmaster Start", "0.9.0", "AGPLv3", false)
// catch interrupt for clean shutdown
signalCh := make(chan os.Signal, 2)

View file

@ -121,6 +121,21 @@ func getExecArgs(opts *Options, cmdArgs []string) []string {
if stdinSignals {
args = append(args, "--input-signals")
}
if opts.Identifier == "app/portmaster-app.zip" {
// see https://www.freedesktop.org/software/systemd/man/pam_systemd.html#type=
if xdgSessionType := os.Getenv("XDG_SESSION_TYPE"); xdgSessionType == "wayland" {
// we're running the Portmaster UI App under Wayland so make sure we add some arguments
// required by Electron
args = append(args,
[]string{
"--enable-features=UseOzonePlatform",
"--ozone-platform=wayland",
}...,
)
}
}
args = append(args, cmdArgs...)
return args
}

View file

@ -11,7 +11,10 @@ import (
"github.com/safing/portmaster/updates/helper"
)
var reset bool
var (
reset bool
intelOnly bool
)
func init() {
rootCmd.AddCommand(updateCmd)
@ -19,6 +22,7 @@ func init() {
flags := updateCmd.Flags()
flags.BoolVar(&reset, "reset", false, "Delete all resources and re-download the basic set")
flags.BoolVar(&intelOnly, "intel-only", false, "Only make downloading intel updates mandatory")
}
var (
@ -49,6 +53,11 @@ func indexRequired(cmd *cobra.Command) bool {
}
func downloadUpdates() error {
// Check if only intel data is mandatory.
if intelOnly {
helper.IntelOnly()
}
// Set required updates.
registry.MandatoryUpdates = helper.MandatoryUpdates()
registry.AutoUnpack = helper.AutoUnpackUpdates()
@ -97,9 +106,11 @@ func downloadUpdates() error {
return fmt.Errorf("failed to unpack resources: %w", err)
}
// Fix chrome-sandbox permissions
if err := helper.EnsureChromeSandboxPermissions(registry); err != nil {
return fmt.Errorf("failed to fix electron permissions: %w", err)
if !intelOnly {
// Fix chrome-sandbox permissions
if err := helper.EnsureChromeSandboxPermissions(registry); err != nil {
return fmt.Errorf("failed to fix electron permissions: %w", err)
}
}
return nil

View file

@ -16,7 +16,7 @@ var (
module *modules.Module
selfcheckTask *modules.Task
selfcheckTaskRetryAfter = 10 * time.Second
selfcheckTaskRetryAfter = 5 * time.Second
// selfCheckIsFailing holds whether or not the self-check is currently
// failing. This helps other failure systems to not make noise when there is
@ -26,8 +26,16 @@ var (
// selfcheckFails counts how often the self check failed successively.
// selfcheckFails is not locked as it is only accessed by the self-check task.
selfcheckFails int
// selfcheckNetworkChangedFlag is used to track changed to the network for
// the self-check.
selfcheckNetworkChangedFlag = netenv.GetNetworkChangedFlag()
)
// selfcheckFailThreshold holds the threshold of how many times the selfcheck
// must fail before it is reported.
const selfcheckFailThreshold = 5
func init() {
module = modules.Register("compat", prep, start, stop, "base", "network", "interception", "netenv", "notifications")
@ -43,11 +51,17 @@ func prep() error {
}
func start() error {
startNotify()
selfcheckNetworkChangedFlag.Refresh()
selfcheckTask = module.NewTask("compatibility self-check", selfcheckTaskFunc).
Repeat(5 * time.Minute).
MaxDelay(selfcheckTaskRetryAfter).
Schedule(time.Now().Add(selfcheckTaskRetryAfter))
module.NewTask("clean notify thresholds", cleanNotifyThreshold).
Repeat(10 * time.Minute)
return module.RegisterEventHook(
netenv.ModuleName,
netenv.NetworkChangedEvent,
@ -67,35 +81,47 @@ func stop() error {
}
func selfcheckTaskFunc(ctx context.Context, task *modules.Task) error {
// Create tracing logger.
ctx, tracer := log.AddTracer(ctx)
defer tracer.Submit()
tracer.Tracef("compat: running self-check")
// Run selfcheck and return if successful.
issue, err := selfcheck(ctx)
if err == nil {
selfCheckIsFailing.UnSet()
selfcheckFails = 0
resetSystemIssue()
return nil
}
switch {
case err == nil:
// Successful.
tracer.Debugf("compat: self-check successful")
case issue == nil:
// Internal error.
tracer.Warningf("compat: %s", err)
case selfcheckNetworkChangedFlag.IsSet():
// The network changed, ignore the issue.
default:
// The self-check failed.
// Log result.
if issue != nil {
// Set state and increase counter.
selfCheckIsFailing.Set()
selfcheckFails++
log.Errorf("compat: %s", err)
if selfcheckFails >= 3 {
// Log and notify.
tracer.Errorf("compat: %s", err)
if selfcheckFails >= selfcheckFailThreshold {
issue.notify(err)
}
// Retry quicker when failed.
task.Schedule(time.Now().Add(selfcheckTaskRetryAfter))
} else {
selfCheckIsFailing.UnSet()
selfcheckFails = 0
// Only log internal errors, but don't notify.
log.Warningf("compat: %s", err)
return nil
}
// Reset self-check state.
selfcheckNetworkChangedFlag.Refresh()
selfCheckIsFailing.UnSet()
selfcheckFails = 0
resetSystemIssue()
return nil
}

View file

@ -3,21 +3,25 @@ package compat
import (
"context"
"fmt"
"net"
"strings"
"sync"
"time"
"github.com/safing/portbase/config"
"github.com/safing/portbase/log"
"github.com/safing/portbase/modules"
"github.com/safing/portbase/notifications"
"github.com/safing/portmaster/process"
"github.com/safing/portmaster/profile"
)
type baseIssue struct {
id string //nolint:structcheck // Inherited.
title string //nolint:structcheck // Inherited.
message string //nolint:structcheck // Inherited.
level notifications.Type //nolint:structcheck // Inherited.
id string //nolint:structcheck // Inherited.
title string //nolint:structcheck // Inherited.
message string //nolint:structcheck // Inherited.
level notifications.Type //nolint:structcheck // Inherited.
actions []*notifications.Action //nolint:structcheck // Inherited.
}
type systemIssue baseIssue
@ -25,6 +29,10 @@ type systemIssue baseIssue
type appIssue baseIssue
var (
// Copy of firewall.CfgOptionDNSQueryInterceptionKey.
cfgOptionDNSQueryInterceptionKey = "filter/dnsQueryInterception"
dnsQueryInterception config.BoolOption
systemIssueNotification *notifications.Notification
systemIssueNotificationLock sync.Mutex
@ -40,12 +48,27 @@ var (
message: "Portmaster detected that something is interfering with its operation. This could be a VPN, an Anti-Virus or another network protection software. Please check if you are running an incompatible [VPN client](https://docs.safing.io/portmaster/install/status/vpn-compatibility) or [software](https://docs.safing.io/portmaster/install/status/software-compatibility). Otherwise, please report the issue via [GitHub](https://github.com/safing/portmaster/issues) or send a mail to [support@safing.io](mailto:support@safing.io) so we can help you out.",
level: notifications.Error,
}
// manualDNSSetupRequired is additionally initialized in startNotify().
manualDNSSetupRequired = &systemIssue{
id: "compat:manual-dns-setup-required",
title: "Manual DNS Setup Required",
level: notifications.Error,
actions: []*notifications.Action{
{
Text: "Revert",
Type: notifications.ActionTypeOpenSetting,
Payload: &notifications.ActionTypeOpenSettingPayload{
Key: cfgOptionDNSQueryInterceptionKey,
},
},
},
}
manualDNSSetupRequiredMessage = "You have disabled Seamless DNS Integration. As a result, Portmaster can no longer protect you or filter connections reliably. To fix this, you have to manually configure %s as the DNS Server in your system and in any conflicting application. This message will disappear 10 seconds after correct configuration."
secureDNSBypassIssue = &appIssue{
id: "compat:secure-dns-bypass-%s",
title: "Detected %s Bypass Attempt",
message: `[APPNAME] is bypassing Portmaster's firewall functions through its Secure DNS resolver. Portmaster can no longer protect or filter connections coming from [APPNAME]. Disable Secure DNS within [APPNAME] to restore functionality.
Rest assured that Portmaster already handles Secure DNS for your whole device.`,
id: "compat:secure-dns-bypass-%s",
title: "Blocked Bypass Attempt by %s",
message: `[APPNAME] is using its own Secure DNS resolver, which would bypass Portmaster's firewall protections. If [APPNAME] experiences problems, disable Secure DNS within [APPNAME] to restore functionality. Rest assured that Portmaster handles Secure DNS for your whole device, including [APPNAME].`,
// TODO: Add this when the new docs page is finished:
// , or [find out about other options](link to new docs page)
level: notifications.Warning,
@ -58,6 +81,37 @@ Rest assured that Portmaster already handles Secure DNS for your whole device.`,
}
)
func startNotify() {
dnsQueryInterception = config.Concurrent.GetAsBool(cfgOptionDNSQueryInterceptionKey, true)
systemIssueNotificationLock.Lock()
defer systemIssueNotificationLock.Unlock()
manualDNSSetupRequired.message = fmt.Sprintf(
manualDNSSetupRequiredMessage,
`"127.0.0.1"`,
)
}
// SetNameserverListenIP sets the IP address the nameserver is listening on.
// The IP address is used in compatibility notifications.
func SetNameserverListenIP(ip net.IP) {
systemIssueNotificationLock.Lock()
defer systemIssueNotificationLock.Unlock()
manualDNSSetupRequired.message = fmt.Sprintf(
manualDNSSetupRequiredMessage,
`"`+ip.String()+`"`,
)
}
func systemCompatOrManualDNSIssue() *systemIssue {
if dnsQueryInterception() {
return systemCompatibilityIssue
}
return manualDNSSetupRequired
}
func (issue *systemIssue) notify(err error) {
systemIssueNotificationLock.Lock()
defer systemIssueNotificationLock.Unlock()
@ -74,11 +128,12 @@ func (issue *systemIssue) notify(err error) {
// Create new notification.
n := &notifications.Notification{
EventID: issue.id,
Type: issue.level,
Title: issue.title,
Message: issue.message,
ShowOnSystem: true,
EventID: issue.id,
Type: issue.level,
Title: issue.title,
Message: issue.message,
ShowOnSystem: true,
AvailableActions: issue.actions,
}
notifications.Notify(n)
@ -124,9 +179,6 @@ func (issue *appIssue) notify(proc *process.Process) {
proc.Path,
)
// Build message.
message := strings.ReplaceAll(issue.message, "[APPNAME]", p.Name)
// Check if we already have this notification.
eventID := fmt.Sprintf(issue.id, p.ID)
n := notifications.Get(eventID)
@ -134,19 +186,30 @@ func (issue *appIssue) notify(proc *process.Process) {
return
}
// Otherwise, create a new one.
// Check if we reach the threshold to actually send a notification.
if !isOverThreshold(eventID) {
return
}
// Build message.
message := strings.ReplaceAll(issue.message, "[APPNAME]", p.Name)
// Create a new notification.
n = &notifications.Notification{
EventID: eventID,
Type: issue.level,
Title: fmt.Sprintf(issue.title, p.Name),
Message: message,
ShowOnSystem: true,
AvailableActions: []*notifications.Action{
EventID: eventID,
Type: issue.level,
Title: fmt.Sprintf(issue.title, p.Name),
Message: message,
ShowOnSystem: true,
AvailableActions: issue.actions,
}
if len(n.AvailableActions) == 0 {
n.AvailableActions = []*notifications.Action{
{
ID: "ack",
Text: "OK",
},
},
}
}
notifications.Notify(n)
@ -171,3 +234,54 @@ func (issue *appIssue) notify(proc *process.Process) {
return nil
})
}
const (
notifyThresholdMinIncidents = 11
notifyThresholdResetAfter = 2 * time.Minute
)
var (
notifyThresholds = make(map[string]*notifyThreshold)
notifyThresholdsLock sync.Mutex
)
type notifyThreshold struct {
FirstSeen time.Time
Incidents uint
}
func (nt *notifyThreshold) expired() bool {
return time.Now().Add(-notifyThresholdResetAfter).After(nt.FirstSeen)
}
func isOverThreshold(id string) bool {
notifyThresholdsLock.Lock()
defer notifyThresholdsLock.Unlock()
// Get notify threshold and check if we reach the minimum incidents.
nt, ok := notifyThresholds[id]
if ok && !nt.expired() {
nt.Incidents++
return nt.Incidents >= notifyThresholdMinIncidents
}
// Add new entry.
notifyThresholds[id] = &notifyThreshold{
FirstSeen: time.Now(),
Incidents: 1,
}
return false
}
func cleanNotifyThreshold(ctx context.Context, task *modules.Task) error {
notifyThresholdsLock.Lock()
defer notifyThresholdsLock.Unlock()
for id, nt := range notifyThresholds {
if nt.expired() {
delete(notifyThresholds, id)
}
}
return nil
}

View file

@ -28,12 +28,12 @@ var (
systemIntegrationCheckDialNet = fmt.Sprintf("ip4:%d", uint8(SystemIntegrationCheckProtocol))
systemIntegrationCheckDialIP = SystemIntegrationCheckDstIP.String()
systemIntegrationCheckPackets = make(chan packet.Packet, 1)
systemIntegrationCheckWaitDuration = 10 * time.Second
systemIntegrationCheckWaitDuration = 20 * time.Second
// DNSCheckInternalDomainScope is the domain scope to use for dns checks.
DNSCheckInternalDomainScope = ".self-check." + resolver.InternalSpecialUseDomain
dnsCheckReceivedDomain = make(chan string, 1)
dnsCheckWaitDuration = 10 * time.Second
dnsCheckWaitDuration = 20 * time.Second
dnsCheckAnswerLock sync.Mutex
dnsCheckAnswer net.IP
)
@ -61,7 +61,7 @@ func selfcheck(ctx context.Context) (issue *systemIssue, err error) {
if err != nil {
return nil, fmt.Errorf("failed to create system integration conn: %w", err)
}
_, err = conn.Write([]byte("SELF-CHECK"))
_, err = conn.Write([]byte("PORTMASTER SELF CHECK"))
if err != nil {
return nil, fmt.Errorf("failed to send system integration packet: %w", err)
}
@ -70,7 +70,7 @@ func selfcheck(ctx context.Context) (issue *systemIssue, err error) {
select {
case <-systemIntegrationCheckPackets:
// Check passed!
log.Tracef("compat: self-check #1: system integration check passed")
log.Tracer(ctx).Tracef("compat: self-check #1: system integration check passed")
case <-time.After(systemIntegrationCheckWaitDuration):
return systemIntegrationIssue, fmt.Errorf("self-check #1: system integration check failed: did not receive test packet after %s", systemIntegrationCheckWaitDuration)
case <-ctx.Done():
@ -139,12 +139,12 @@ func selfcheck(ctx context.Context) (issue *systemIssue, err error) {
select {
case receivedTestDomain := <-dnsCheckReceivedDomain:
if receivedTestDomain != randomSubdomain {
return systemCompatibilityIssue, fmt.Errorf("self-check #2: dns integration check failed: received unmatching subdomain %q", receivedTestDomain)
return systemCompatOrManualDNSIssue(), fmt.Errorf("self-check #2: dns integration check failed: received unmatching subdomain %q", receivedTestDomain)
}
case <-time.After(dnsCheckWaitDuration):
return systemCompatibilityIssue, fmt.Errorf("self-check #2: dns integration check failed: did not receive test query after %s", dnsCheckWaitDuration)
return systemCompatOrManualDNSIssue(), fmt.Errorf("self-check #2: dns integration check failed: did not receive test query after %s", dnsCheckWaitDuration)
}
log.Tracef("compat: self-check #2: dns integration query check passed")
log.Tracer(ctx).Tracef("compat: self-check #2: dns integration query check passed")
// Step 3: Have the nameserver respond with random data in the answer section.
@ -164,7 +164,7 @@ func selfcheck(ctx context.Context) (issue *systemIssue, err error) {
if !dnsCheckReturnedIP.Equal(randomAnswer) {
return systemCompatibilityIssue, fmt.Errorf("self-check #3: dns integration check failed: received unmatching response %q", dnsCheckReturnedIP)
}
log.Tracef("compat: self-check #3: dns integration response check passed")
log.Tracer(ctx).Tracef("compat: self-check #3: dns integration response check passed")
return nil, nil
}

View file

@ -2,8 +2,6 @@ package base
import (
"github.com/safing/portbase/database"
// Dependencies.
_ "github.com/safing/portbase/database/dbmodule"
_ "github.com/safing/portbase/database/storage/bbolt"
)

View file

@ -7,10 +7,12 @@ import (
"github.com/safing/portbase/modules"
"github.com/safing/portbase/modules/subsystems"
"github.com/safing/portmaster/updates"
_ "github.com/safing/portmaster/broadcasts"
_ "github.com/safing/portmaster/netenv"
_ "github.com/safing/portmaster/netquery"
_ "github.com/safing/portmaster/status"
_ "github.com/safing/portmaster/ui"
"github.com/safing/portmaster/updates"
)
const (
@ -25,7 +27,7 @@ var (
)
func init() {
module = modules.Register("core", prep, start, nil, "base", "subsystems", "status", "updates", "api", "notifications", "ui", "netenv", "network", "interception", "compat")
module = modules.Register("core", prep, start, nil, "base", "subsystems", "status", "updates", "api", "notifications", "ui", "netenv", "network", "netquery", "interception", "compat", "broadcasts")
subsystems.Register(
"core",
"Core",

View file

@ -22,11 +22,7 @@ import (
)
const (
deniedMsgUnidentified = `%wFailed to identify the requesting process.
You can enable the Development Mode to disable API authentication for development purposes.
If you are seeing this message in the Portmaster App, please restart the app or right-click and select "Reload".
In the future, this issue will be remediated automatically.`
deniedMsgUnidentified = `%wFailed to identify the requesting process. Reload to try again.`
deniedMsgSystem = `%wSystem access to the Portmaster API is not permitted.
You can enable the Development Mode to disable API authentication for development purposes.`
@ -136,6 +132,12 @@ func authenticateAPIRequest(ctx context.Context, pktInfo *packet.Info) (retry bo
if authenticatedPath == "" {
return false, fmt.Errorf(deniedMsgMisconfigured, api.ErrAPIAccessDeniedMessage) //nolint:stylecheck // message for user
}
// Get real path.
authenticatedPath, err = filepath.EvalSymlinks(authenticatedPath)
if err != nil {
return false, fmt.Errorf(deniedMsgUnidentified, api.ErrAPIAccessDeniedMessage) //nolint:stylecheck // message for user
}
// Add filepath separator to confine to directory.
authenticatedPath += string(filepath.Separator)
// Get process of request.
@ -157,8 +159,10 @@ func authenticateAPIRequest(ctx context.Context, pktInfo *packet.Info) (retry bo
break checkLevelsLoop
default: // normal process
// Check if the requesting process is in database root / updates dir.
if strings.HasPrefix(proc.Path, authenticatedPath) {
return false, nil
if realPath, err := filepath.EvalSymlinks(proc.Path); err == nil {
if strings.HasPrefix(realPath, authenticatedPath) {
return false, nil
}
}
}

View file

@ -43,8 +43,12 @@ func PreventBypassing(ctx context.Context, conn *network.Connection) (endpoints.
return endpoints.NoMatch, "", nil
}
// Block bypass attempts using an encrypted DNS server.
// Block bypass attempts using an (encrypted) DNS server.
switch {
case conn.Entity.Port == 53:
return endpoints.Denied,
"blocked DNS query, manual dns setup required",
nsutil.BlockIP()
case conn.Entity.Port == 853:
// Block connections to port 853 - DNS over TLS.
fallthrough

View file

@ -23,6 +23,10 @@ var (
cfgOptionPermanentVerdictsOrder = 96
permanentVerdicts config.BoolOption
CfgOptionDNSQueryInterceptionKey = "filter/dnsQueryInterception"
cfgOptionDNSQueryInterceptionOrder = 97
dnsQueryInterception config.BoolOption
devMode config.BoolOption
apiListenAddress config.StringOption
)
@ -46,6 +50,24 @@ func registerConfig() error {
}
permanentVerdicts = config.Concurrent.GetAsBool(CfgOptionPermanentVerdictsKey, true)
err = config.Register(&config.Option{
Name: "Seamless DNS Integration",
Key: CfgOptionDNSQueryInterceptionKey,
Description: "Intercept and redirect astray DNS queries to the Portmaster's internal DNS server. This enables seamless DNS integration without having to configure the system or other software. However, this may lead to compatibility issues with other software that attempts the same.",
OptType: config.OptTypeBool,
ExpertiseLevel: config.ExpertiseLevelDeveloper,
ReleaseLevel: config.ReleaseLevelExperimental,
DefaultValue: true,
Annotations: config.Annotations{
config.DisplayOrderAnnotation: cfgOptionDNSQueryInterceptionOrder,
config.CategoryAnnotation: "Advanced",
},
})
if err != nil {
return err
}
dnsQueryInterception = config.Concurrent.GetAsBool(CfgOptionDNSQueryInterceptionKey, true)
err = config.Register(&config.Option{
Name: "Prompt Desktop Notifications",
Key: CfgOptionAskWithSystemNotificationsKey,

View file

@ -263,7 +263,7 @@ func UpdateIPsAndCNAMEs(q *resolver.Query, rrCache *resolver.RRCache, conn *netw
// Package IPs and CNAMEs into IPInfo structs.
for _, ip := range ips {
// Never save domain attributions for localhost IPs.
if netutils.ClassifyIP(ip) == netutils.HostLocal {
if netutils.GetIPScope(ip) == netutils.HostLocal {
continue
}

View file

@ -2,11 +2,11 @@ package firewall
import (
"github.com/safing/portbase/config"
"github.com/safing/portbase/log"
"github.com/safing/portbase/modules"
"github.com/safing/portbase/modules/subsystems"
// Dependency.
_ "github.com/safing/portmaster/core"
"github.com/safing/portmaster/intel/filterlists"
"github.com/safing/spn/captain"
)
@ -14,10 +14,13 @@ var (
filterModule *modules.Module
filterEnabled config.BoolOption
tunnelEnabled config.BoolOption
unbreakFilterListIDs = []string{"UNBREAK"}
resolvedUnbreakFilterListIDs []string
)
func init() {
filterModule = modules.Register("filter", filterPrep, nil, nil, "core", "intel")
filterModule = modules.Register("filter", filterPrep, filterStart, nil, "core", "intel")
subsystems.Register(
"filter",
"Privacy Filter",
@ -49,3 +52,14 @@ func filterPrep() (err error) {
tunnelEnabled = config.Concurrent.GetAsBool(captain.CfgOptionEnableSPNKey, false)
return nil
}
func filterStart() error {
// TODO: Re-resolve IDs when filterlist index changes.
resolvedIDs, err := filterlists.ResolveListIDs(unbreakFilterListIDs)
if err != nil {
log.Warningf("filter: failed to resolve unbreak filter list IDs: %s", err)
} else {
resolvedUnbreakFilterListIDs = resolvedIDs
}
return nil
}

View file

@ -16,8 +16,6 @@ import (
"github.com/safing/portbase/log"
"github.com/safing/portbase/modules"
"github.com/safing/portmaster/compat"
// Dependency.
_ "github.com/safing/portmaster/core/base"
"github.com/safing/portmaster/firewall/inspection"
"github.com/safing/portmaster/firewall/interception"
@ -332,8 +330,9 @@ func initialHandler(conn *network.Connection, pkt packet.Packet) {
conn.Accept("connection by Portmaster", noReasonOptionKey)
conn.Internal = true
// Redirect outbound DNS packests,
case pkt.IsOutbound() &&
// Redirect outbound DNS packets if enabled,
case dnsQueryInterception() &&
pkt.IsOutbound() &&
pkt.Info().DstPort == 53 &&
// that don't match the address of our nameserver,
nameserverIPMatcherReady.IsSet() &&
@ -341,7 +340,7 @@ func initialHandler(conn *network.Connection, pkt packet.Packet) {
// and are not broadcast queries by us.
// Context:
// - Unicast queries by the resolver are pre-authenticated.
// - Unicast qeries by the compat self-check should be redirected.
// - Unicast queries by the compat self-check should be redirected.
!(conn.Process().Pid == ownPID &&
conn.Entity.IPScope == netutils.LocalMulticast):

View file

@ -153,7 +153,8 @@ func (q *Queue) handleError(e error) int {
// Close the existing socket
if nf := q.getNfq(); nf != nil {
_ = nf.Close()
// Call Close() on the Con directly, as nf.Close() calls waitgroup.Wait(), which then may deadlock.
_ = nf.Con.Close()
}
// Trigger a restart of the queue

View file

@ -141,6 +141,13 @@ func (pkt *packet) Drop() error {
}
func (pkt *packet) PermanentAccept() error {
// If the packet is localhost only, do not permanently accept the outgoing
// packet, as the packet mark will be copied to the connection mark, which
// will stick and it will bypass the incoming queue.
if !pkt.Info().Inbound && pkt.Info().Dst.IsLoopback() {
return pkt.Accept()
}
return pkt.mark(MarkAcceptAlways)
}

View file

@ -10,8 +10,8 @@ import (
"github.com/hashicorp/go-multierror"
"github.com/safing/portbase/log"
"github.com/safing/portbase/notifications"
"github.com/safing/portmaster/firewall/interception/nfq"
"github.com/safing/portmaster/netenv"
"github.com/safing/portmaster/network/packet"
)
@ -46,83 +46,89 @@ type nfQueue interface {
func init() {
v4chains = []string{
"mangle C170",
"mangle C171",
"filter C17",
"mangle PORTMASTER-INGEST-OUTPUT",
"mangle PORTMASTER-INGEST-INPUT",
"filter PORTMASTER-FILTER",
"nat PORTMASTER-REDIRECT",
}
v4rules = []string{
"mangle C170 -j CONNMARK --restore-mark",
"mangle C170 -m mark --mark 0 -j NFQUEUE --queue-num 17040 --queue-bypass",
"mangle PORTMASTER-INGEST-OUTPUT -j CONNMARK --restore-mark",
"mangle PORTMASTER-INGEST-OUTPUT -m mark --mark 0 -j NFQUEUE --queue-num 17040 --queue-bypass",
"mangle C171 -j CONNMARK --restore-mark",
"mangle C171 -m mark --mark 0 -j NFQUEUE --queue-num 17140 --queue-bypass",
"mangle PORTMASTER-INGEST-INPUT -j CONNMARK --restore-mark",
"mangle PORTMASTER-INGEST-INPUT -m mark --mark 0 -j NFQUEUE --queue-num 17140 --queue-bypass",
"filter C17 -m mark --mark 0 -j DROP",
"filter C17 -m mark --mark 1700 -j RETURN",
"filter PORTMASTER-FILTER -m mark --mark 0 -j DROP",
"filter PORTMASTER-FILTER -m mark --mark 1700 -j RETURN",
// Accepting ICMP packets with mark 1701 is required for rejecting to work,
// as the rejection ICMP packet will have the same mark. Blocked ICMP
// packets will always result in a drop within the Portmaster.
"filter C17 -m mark --mark 1701 -p icmp -j RETURN",
"filter C17 -m mark --mark 1701 -j REJECT --reject-with icmp-host-prohibited",
"filter C17 -m mark --mark 1702 -j DROP",
"filter C17 -j CONNMARK --save-mark",
"filter C17 -m mark --mark 1710 -j RETURN",
"filter PORTMASTER-FILTER -m mark --mark 1701 -p icmp -j RETURN",
"filter PORTMASTER-FILTER -m mark --mark 1701 -j REJECT --reject-with icmp-host-prohibited",
"filter PORTMASTER-FILTER -m mark --mark 1702 -j DROP",
"filter PORTMASTER-FILTER -j CONNMARK --save-mark",
"filter PORTMASTER-FILTER -m mark --mark 1710 -j RETURN",
// Accepting ICMP packets with mark 1711 is required for rejecting to work,
// as the rejection ICMP packet will have the same mark. Blocked ICMP
// packets will always result in a drop within the Portmaster.
"filter C17 -m mark --mark 1711 -p icmp -j RETURN",
"filter C17 -m mark --mark 1711 -j REJECT --reject-with icmp-host-prohibited",
"filter C17 -m mark --mark 1712 -j DROP",
"filter C17 -m mark --mark 1717 -j RETURN",
"filter PORTMASTER-FILTER -m mark --mark 1711 -p icmp -j RETURN",
"filter PORTMASTER-FILTER -m mark --mark 1711 -j REJECT --reject-with icmp-host-prohibited",
"filter PORTMASTER-FILTER -m mark --mark 1712 -j DROP",
"filter PORTMASTER-FILTER -m mark --mark 1717 -j RETURN",
"nat PORTMASTER-REDIRECT -m mark --mark 1799 -p udp -j DNAT --to 127.0.0.17:53",
"nat PORTMASTER-REDIRECT -m mark --mark 1717 -p tcp -j DNAT --to 127.0.0.17:717",
"nat PORTMASTER-REDIRECT -m mark --mark 1717 -p udp -j DNAT --to 127.0.0.17:717",
// "nat PORTMASTER-REDIRECT -m mark --mark 1717 ! -p tcp ! -p udp -j DNAT --to 127.0.0.17",
}
v4once = []string{
"mangle OUTPUT -j C170",
"mangle INPUT -j C171",
"filter OUTPUT -j C17",
"filter INPUT -j C17",
"nat OUTPUT -m mark --mark 1799 -p udp -j DNAT --to 127.0.0.17:53",
"nat OUTPUT -m mark --mark 1717 -p tcp -j DNAT --to 127.0.0.17:717",
"nat OUTPUT -m mark --mark 1717 -p udp -j DNAT --to 127.0.0.17:717",
// "nat OUTPUT -m mark --mark 1717 ! -p tcp ! -p udp -j DNAT --to 127.0.0.17",
"mangle OUTPUT -j PORTMASTER-INGEST-OUTPUT",
"mangle INPUT -j PORTMASTER-INGEST-INPUT",
"filter OUTPUT -j PORTMASTER-FILTER",
"filter INPUT -j PORTMASTER-FILTER",
"nat OUTPUT -j PORTMASTER-REDIRECT",
}
v6chains = []string{
"mangle C170",
"mangle C171",
"filter C17",
"mangle PORTMASTER-INGEST-OUTPUT",
"mangle PORTMASTER-INGEST-INPUT",
"filter PORTMASTER-FILTER",
"nat PORTMASTER-REDIRECT",
}
v6rules = []string{
"mangle C170 -j CONNMARK --restore-mark",
"mangle C170 -m mark --mark 0 -j NFQUEUE --queue-num 17060 --queue-bypass",
"mangle PORTMASTER-INGEST-OUTPUT -j CONNMARK --restore-mark",
"mangle PORTMASTER-INGEST-OUTPUT -m mark --mark 0 -j NFQUEUE --queue-num 17060 --queue-bypass",
"mangle C171 -j CONNMARK --restore-mark",
"mangle C171 -m mark --mark 0 -j NFQUEUE --queue-num 17160 --queue-bypass",
"mangle PORTMASTER-INGEST-INPUT -j CONNMARK --restore-mark",
"mangle PORTMASTER-INGEST-INPUT -m mark --mark 0 -j NFQUEUE --queue-num 17160 --queue-bypass",
"filter C17 -m mark --mark 0 -j DROP",
"filter C17 -m mark --mark 1700 -j RETURN",
"filter C17 -m mark --mark 1701 -p icmpv6 -j RETURN",
"filter C17 -m mark --mark 1701 -j REJECT --reject-with icmp6-adm-prohibited",
"filter C17 -m mark --mark 1702 -j DROP",
"filter C17 -j CONNMARK --save-mark",
"filter C17 -m mark --mark 1710 -j RETURN",
"filter C17 -m mark --mark 1711 -p icmpv6 -j RETURN",
"filter C17 -m mark --mark 1711 -j REJECT --reject-with icmp6-adm-prohibited",
"filter C17 -m mark --mark 1712 -j DROP",
"filter C17 -m mark --mark 1717 -j RETURN",
"filter PORTMASTER-FILTER -m mark --mark 0 -j DROP",
"filter PORTMASTER-FILTER -m mark --mark 1700 -j RETURN",
"filter PORTMASTER-FILTER -m mark --mark 1701 -p icmpv6 -j RETURN",
"filter PORTMASTER-FILTER -m mark --mark 1701 -j REJECT --reject-with icmp6-adm-prohibited",
"filter PORTMASTER-FILTER -m mark --mark 1702 -j DROP",
"filter PORTMASTER-FILTER -j CONNMARK --save-mark",
"filter PORTMASTER-FILTER -m mark --mark 1710 -j RETURN",
"filter PORTMASTER-FILTER -m mark --mark 1711 -p icmpv6 -j RETURN",
"filter PORTMASTER-FILTER -m mark --mark 1711 -j REJECT --reject-with icmp6-adm-prohibited",
"filter PORTMASTER-FILTER -m mark --mark 1712 -j DROP",
"filter PORTMASTER-FILTER -m mark --mark 1717 -j RETURN",
"nat PORTMASTER-REDIRECT -m mark --mark 1799 -p udp -j DNAT --to [::1]:53",
"nat PORTMASTER-REDIRECT -m mark --mark 1717 -p tcp -j DNAT --to [::1]:717",
"nat PORTMASTER-REDIRECT -m mark --mark 1717 -p udp -j DNAT --to [::1]:717",
// "nat PORTMASTER-REDIRECT -m mark --mark 1717 ! -p tcp ! -p udp -j DNAT --to [::1]",
}
v6once = []string{
"mangle OUTPUT -j C170",
"mangle INPUT -j C171",
"filter OUTPUT -j C17",
"filter INPUT -j C17",
"nat OUTPUT -m mark --mark 1799 -p udp -j DNAT --to [::1]:53",
"nat OUTPUT -m mark --mark 1717 -p tcp -j DNAT --to [::1]:717",
"nat OUTPUT -m mark --mark 1717 -p udp -j DNAT --to [::1]:717",
// "nat OUTPUT -m mark --mark 1717 ! -p tcp ! -p udp -j DNAT --to [::1]",
"mangle OUTPUT -j PORTMASTER-INGEST-OUTPUT",
"mangle INPUT -j PORTMASTER-INGEST-INPUT",
"filter OUTPUT -j PORTMASTER-FILTER",
"filter INPUT -j PORTMASTER-FILTER",
"nat OUTPUT -j PORTMASTER-REDIRECT",
}
// Reverse because we'd like to insert in a loop
@ -135,13 +141,10 @@ func activateNfqueueFirewall() error {
return err
}
if err := activateIPTables(iptables.ProtocolIPv6, v6rules, v6once, v6chains); err != nil {
notifications.NotifyError(
"interception:ipv6-possibly-disabled",
"Is IPv6 enabled?",
"The Portmaster succeeded with IPv4 network integration, but failed with IPv6 integration. Please make sure IPv6 is enabled on your device.",
)
return err
if netenv.IPv6Enabled() {
if err := activateIPTables(iptables.ProtocolIPv6, v6rules, v6once, v6chains); err != nil {
return err
}
}
return nil
@ -157,8 +160,10 @@ func DeactivateNfqueueFirewall() error {
}
// IPv6
if err := deactivateIPTables(iptables.ProtocolIPv6, v6once, v6chains); err != nil {
result = multierror.Append(result, err)
if netenv.IPv6Enabled() {
if err := deactivateIPTables(iptables.ProtocolIPv6, v6once, v6chains); err != nil {
result = multierror.Append(result, err)
}
}
return result.ErrorOrNil()
@ -258,15 +263,22 @@ func StartNfqueueInterception(packets chan<- packet.Packet) (err error) {
_ = Stop()
return fmt.Errorf("nfqueue(IPv4, in): %w", err)
}
out6Queue, err = nfq.New(17060, true)
if err != nil {
_ = Stop()
return fmt.Errorf("nfqueue(IPv6, out): %w", err)
}
in6Queue, err = nfq.New(17160, true)
if err != nil {
_ = Stop()
return fmt.Errorf("nfqueue(IPv6, in): %w", err)
if netenv.IPv6Enabled() {
out6Queue, err = nfq.New(17060, true)
if err != nil {
_ = Stop()
return fmt.Errorf("nfqueue(IPv6, out): %w", err)
}
in6Queue, err = nfq.New(17160, true)
if err != nil {
_ = Stop()
return fmt.Errorf("nfqueue(IPv6, in): %w", err)
}
} else {
log.Warningf("interception: no IPv6 stack detected, disabling IPv6 network integration")
out6Queue = &disabledNfQueue{}
in6Queue = &disabledNfQueue{}
}
go handleInterception(packets)
@ -321,3 +333,11 @@ func handleInterception(packets chan<- packet.Packet) {
}
}
}
type disabledNfQueue struct{}
func (dnfq *disabledNfQueue) PacketChannel() <-chan packet.Packet {
return nil
}
func (dnfq *disabledNfQueue) Destroy() {}

View file

@ -3,7 +3,9 @@ package firewall
import (
"context"
"fmt"
"net"
"path/filepath"
"strconv"
"strings"
"github.com/agext/levenshtein"
@ -41,6 +43,7 @@ type deciderFn func(context.Context, *network.Connection, *profile.LayeredProfil
var defaultDeciders = []deciderFn{
checkPortmasterConnection,
checkSelfCommunication,
checkIfBroadcastReply,
checkConnectionType,
checkConnectionScope,
checkEndpointLists,
@ -182,6 +185,46 @@ func checkSelfCommunication(ctx context.Context, conn *network.Connection, _ *pr
return false
}
func checkIfBroadcastReply(ctx context.Context, conn *network.Connection, _ *profile.LayeredProfile, _ packet.Packet) bool {
// Only check inbound connections.
if !conn.Inbound {
return false
}
// Only check if the process has been identified.
if !conn.Process().IsIdentified() {
return false
}
// Check if the remote IP is part of a local network.
localNet, err := netenv.GetLocalNetwork(conn.Entity.IP)
if err != nil {
log.Tracer(ctx).Warningf("filter: failed to get local network: %s", err)
return false
}
if localNet == nil {
return false
}
// Search for a matching requesting connection.
requestingConn := network.GetMulticastRequestConn(conn, localNet)
if requestingConn == nil {
return false
}
conn.Accept(
fmt.Sprintf(
"response to multi/broadcast query to %s/%s",
packet.IPProtocol(requestingConn.Entity.Protocol),
net.JoinHostPort(
requestingConn.Entity.IP.String(),
strconv.Itoa(int(requestingConn.Entity.Port)),
),
),
"",
)
return true
}
func checkEndpointLists(ctx context.Context, conn *network.Connection, p *profile.LayeredProfile, _ packet.Packet) bool {
// DNS request from the system resolver require a special decision process,
// because the original requesting process is not known. Here, we only check
@ -389,6 +432,16 @@ func checkFilterLists(ctx context.Context, conn *network.Connection, p *profile.
result, reason := p.MatchFilterLists(ctx, conn.Entity)
switch result {
case endpoints.Denied:
// If the connection matches a filter list, check if the "unbreak" list matches too and abort blocking.
for _, blockedListID := range conn.Entity.BlockedByLists {
for _, unbreakListID := range resolvedUnbreakFilterListIDs {
if blockedListID == unbreakListID {
log.Tracer(ctx).Debugf("filter: unbreak filter %s matched, ignoring other filter list matches", unbreakListID)
return false
}
}
}
// Otherwise, continue with blocking.
conn.DenyWithContext(reason.String(), profile.CfgOptionFilterListsKey, reason.Context())
return true
case endpoints.NoMatch:
@ -439,10 +492,7 @@ func checkDomainHeuristics(ctx context.Context, conn *network.Connection, p *pro
trimmedDomain := strings.TrimRight(conn.Entity.Domain, ".")
etld1, err := publicsuffix.EffectiveTLDPlusOne(trimmedDomain)
if err != nil {
// we don't apply any checks here and let the request through
// because a malformed domain-name will likely be dropped by
// checks better suited for that.
log.Tracer(ctx).Warningf("filter: failed to get eTLD+1: %s", err)
// Don't run the check if the domain is a TLD.
return false
}

View file

@ -34,7 +34,7 @@ func checkTunneling(ctx context.Context, conn *network.Connection, pkt packet.Pa
case conn.Process().Pid == ownPID:
// Bypass tunneling for certain own connections.
switch {
case captain.ClientBootstrapping():
case !captain.ClientReady():
return
case captain.IsExcepted(conn.Entity.IP):
return
@ -42,10 +42,10 @@ func checkTunneling(ctx context.Context, conn *network.Connection, pkt packet.Pa
}
// Check more extensively for Local/LAN connections.
myNet, err := netenv.IsMyNet(conn.Entity.IP)
localNet, err := netenv.GetLocalNetwork(conn.Entity.IP)
if err != nil {
log.Warningf("firewall: failed to check if %s is in my net: %s", conn.Entity.IP, err)
} else if myNet {
} else if localNet != nil {
// With IPv6, just checking the IP scope is not enough, as the host very
// likely has a public IPv6 address.
// Don't tunnel LAN connections.

87
go.mod
View file

@ -1,30 +1,87 @@
module github.com/safing/portmaster
go 1.15
go 1.18
require (
github.com/agext/levenshtein v1.2.3
github.com/cookieo9/resources-go v0.0.0-20150225115733-d27c04069d0d
github.com/coreos/go-iptables v0.6.0
github.com/florianl/go-nfqueue v1.3.0
github.com/florianl/go-nfqueue v1.3.1
github.com/ghodss/yaml v1.0.0
github.com/godbus/dbus/v5 v5.1.0
github.com/google/gopacket v1.1.19
github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/go-version v1.4.0
github.com/mdlayher/socket v0.2.3 // indirect
github.com/miekg/dns v1.1.47
github.com/oschwald/maxminddb-golang v1.8.0
github.com/safing/portbase v0.14.1
github.com/safing/spn v0.4.5
github.com/hashicorp/go-version v1.6.0
github.com/miekg/dns v1.1.50
github.com/oschwald/maxminddb-golang v1.9.0
github.com/safing/portbase v0.14.5
github.com/safing/spn v0.4.13
github.com/shirou/gopsutil v3.21.11+incompatible
github.com/spf13/cobra v1.4.0
github.com/stretchr/testify v1.7.0
github.com/spf13/cobra v1.5.0
github.com/stretchr/testify v1.8.0
github.com/tannerryan/ring v1.1.2
github.com/tevino/abool v1.2.0
github.com/umahmood/haversine v0.0.0-20151105152445-808ab04add26
golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064 // indirect
golang.org/x/net v0.0.0-20220225172249-27dd8689420f
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8
golang.org/x/tools v0.1.10 // indirect
golang.org/x/net v0.0.0-20220708220712-1185a9018129
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8
zombiezen.com/go/sqlite v0.10.1
)
require (
github.com/VictoriaMetrics/metrics v1.18.1 // indirect
github.com/aead/ecdh v0.2.0 // indirect
github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 // indirect
github.com/armon/go-radix v1.0.0 // indirect
github.com/awalterschulze/gographviz v2.0.3+incompatible // indirect
github.com/bluele/gcache v0.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/fxamacker/cbor/v2 v2.4.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/gofrs/uuid v4.2.0+incompatible // indirect
github.com/google/go-cmp v0.5.8 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/josharian/native v1.0.0 // indirect
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mdlayher/netlink v1.6.0 // indirect
github.com/mdlayher/socket v0.2.3 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
github.com/rot256/pblind v0.0.0-20211117203330-22455f90b565 // indirect
github.com/safing/jess v0.2.3 // indirect
github.com/satori/go.uuid v1.2.0 // indirect
github.com/seehuhn/fortuna v1.0.1 // indirect
github.com/seehuhn/sha256d v1.0.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/tidwall/gjson v1.14.1 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.0 // indirect
github.com/tidwall/sjson v1.2.4 // indirect
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.5.0 // indirect
github.com/valyala/fastrand v1.1.0 // indirect
github.com/valyala/histogram v1.2.0 // indirect
github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
go.etcd.io/bbolt v1.3.6 // indirect
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d // indirect
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
golang.org/x/tools v0.1.11 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
modernc.org/libc v1.16.17 // indirect
modernc.org/mathutil v1.4.1 // indirect
modernc.org/memory v1.1.1 // indirect
modernc.org/sqlite v1.17.3 // indirect
)

130
go.sum
View file

@ -66,6 +66,7 @@ cloud.google.com/go/trace v0.1.0/go.mod h1:wxEwsoeRVPbeSkt7ZC9nWCgmoKQRAoySN7XHW
contrib.go.opencensus.io/exporter/aws v0.0.0-20200617204711-c478e41e60e9/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA=
contrib.go.opencensus.io/exporter/stackdriver v0.13.8/go.mod h1:huNtlWx75MwO7qMs0KrMxPZXzNNWebav1Sq/pm02JdQ=
contrib.go.opencensus.io/integrations/ocsql v0.1.7/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE=
crawshaw.io/iox v0.0.0-20181124134642-c51c3df30797/go.mod h1:sXBiorCo8c46JlQV3oXPKINnZ8mcqnye1EkVkqsectk=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/AlecAivazis/survey/v2 v2.0.7/go.mod h1:mlizQTaPjnR4jcpwRSaSlkbsRfYFEyKgLQvYTzxxiHA=
github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M=
@ -250,6 +251,7 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@ -263,7 +265,6 @@ github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70d
github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI=
github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
@ -298,8 +299,9 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/florianl/go-nfqueue v1.3.0 h1:cvZGUM6k1zxkokHM79Hg/q39cVjf3WAQZ/46ncpuhkc=
github.com/florianl/go-nfqueue v1.3.0/go.mod h1:sA7IQtpB3zxpdwJ4y4999SjK+1lx91TEqBBB4CIlFX0=
github.com/florianl/go-nfqueue v1.3.1 h1:khQ9fYCrjbu5CF8dZF55G2RTIEIQRI0Aj5k3msJR6Gw=
github.com/florianl/go-nfqueue v1.3.1/go.mod h1:aHWbgkhryJxF5XxYvJ3oRZpdD4JP74Zu/hP1zuhja+M=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
@ -354,7 +356,6 @@ github.com/go-test/deep v1.0.2/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3a
github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo=
github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.5/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
@ -424,8 +425,9 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-replayers/grpcreplay v1.1.0/go.mod h1:qzAvJ8/wi57zq7gWqaE6AwLM6miiXUQwP1S+I9icmhk=
github.com/google/go-replayers/httpreplay v1.0.0/go.mod h1:LJhKoTwS5Wy5Ld/peq8dFFG5OfJyHEz7ft+DsTUv25M=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@ -459,6 +461,7 @@ github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/wire v0.5.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
@ -530,8 +533,10 @@ github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.4.0 h1:aAQzgqIrRKRa7w75CKpbBxYsmUoPjzVm1W59ca1L0J4=
github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.5.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@ -646,7 +651,9 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-sqlite3 v1.14.12/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43/go.mod h1:+t7E0lkKfbBsebllff1xdTmyJt8lH37niI6kwFk9OTo=
github.com/mdlayher/ethtool v0.0.0-20211028163843-288d040e9d60/go.mod h1:aYbhishWc4Ai3I2U4Gaa2n3kHWSwzme6EsG/46HRQbE=
@ -686,8 +693,11 @@ github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0
github.com/miekg/dns v1.1.44/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
github.com/miekg/dns v1.1.45/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
github.com/miekg/dns v1.1.46/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
github.com/miekg/dns v1.1.47 h1:J9bWiXbqMbnZPcY8Qi2E3EWIBsIm6MZzzJB9VRg5gL8=
github.com/miekg/dns v1.1.47/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
github.com/miekg/dns v1.1.48/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
github.com/miekg/dns v1.1.49/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
@ -743,8 +753,9 @@ github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/oschwald/maxminddb-golang v1.8.0 h1:Uh/DSnGoxsyp/KYbY1AuP0tYEwfs0sCph9p/UMXK/Hk=
github.com/oschwald/maxminddb-golang v1.8.0/go.mod h1:RXZtst0N6+FY/3qCNmZMBApR19cdQj43/NM9VkrNAis=
github.com/oschwald/maxminddb-golang v1.9.0 h1:tIk4nv6VT9OiPyrnDAfJS1s1xKDQMZOsGojab6EjC1Y=
github.com/oschwald/maxminddb-golang v1.9.0/go.mod h1:TK+s/Z2oZq0rSl4PSeAEoP0bgm82Cp5HyvYbt8K3zLY=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
@ -785,6 +796,8 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7z
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
@ -812,8 +825,12 @@ github.com/safing/portbase v0.13.4/go.mod h1:5vj5IK5WJoSGareDe6yCMZfnF7txVRx7jZy
github.com/safing/portbase v0.13.5/go.mod h1:5vj5IK5WJoSGareDe6yCMZfnF7txVRx7jZyTZInISP0=
github.com/safing/portbase v0.13.6/go.mod h1:G0maDSQxYDuluNhMzA1zVd/nfXawfECv5H7+fnTfVhM=
github.com/safing/portbase v0.14.0/go.mod h1:z9sRR/vqohAGdYSSx2B+o8tND4WVvcxPL6XBBtN3bDI=
github.com/safing/portbase v0.14.1 h1:k/SgywE5QutDbf5hHbEArrQnjBByerGm0nhh5n8VnDw=
github.com/safing/portbase v0.14.1/go.mod h1:z9sRR/vqohAGdYSSx2B+o8tND4WVvcxPL6XBBtN3bDI=
github.com/safing/portbase v0.14.2/go.mod h1:z9sRR/vqohAGdYSSx2B+o8tND4WVvcxPL6XBBtN3bDI=
github.com/safing/portbase v0.14.3/go.mod h1:z9sRR/vqohAGdYSSx2B+o8tND4WVvcxPL6XBBtN3bDI=
github.com/safing/portbase v0.14.4/go.mod h1:z9sRR/vqohAGdYSSx2B+o8tND4WVvcxPL6XBBtN3bDI=
github.com/safing/portbase v0.14.5 h1:+8H+mQ7AFjA04M7UPq0490pj3/+nvJj3pEUP1PYTMYc=
github.com/safing/portbase v0.14.5/go.mod h1:z9sRR/vqohAGdYSSx2B+o8tND4WVvcxPL6XBBtN3bDI=
github.com/safing/portmaster v0.7.3/go.mod h1:o//kZ8eE+5vT1V22mgnxHIAdlEz42sArsK5OF2Lf/+s=
github.com/safing/portmaster v0.7.4/go.mod h1:Q93BWdF1oAL0oUMukshl8W1aPZhmrlTGi6tFTFc3pTw=
github.com/safing/portmaster v0.7.6/go.mod h1:qOs9hQtvAzTVICRbwLg3vddqOaqJHeWBjWQ0C+TJ/Bw=
@ -828,6 +845,10 @@ github.com/safing/portmaster v0.7.21/go.mod h1:Jy0G6x6m5dE36Mv9grXHI77cxysQ0fIQV
github.com/safing/portmaster v0.8.0/go.mod h1:lY2/WvOlH8kl1AwkixdWCjlo+PZQv+oEOQhIaSS/+wA=
github.com/safing/portmaster v0.8.5-interdep/go.mod h1:A+zAVEKjr057ktgiMSJRdUmOF+FPW8XY/5LqGnbsKbU=
github.com/safing/portmaster v0.8.5/go.mod h1:MqOlFwHcIx/109Ugutz/CG23znuuXCRVHcIcfX0VC/c=
github.com/safing/portmaster v0.8.7/go.mod h1:RUgCWt5v22jDUOtJfOwApi//Kt8RTZQhlREcBc+L4z8=
github.com/safing/portmaster v0.8.9-interdep/go.mod h1:1hK7QpvFVlb/sglkc3SKj+RXMGBuk0wqO2s3pvMg1Xs=
github.com/safing/portmaster v0.8.9/go.mod h1:tv0rxO76hrpBLdArN7YTypOaseH6zgQ2gLI2zCknk9Q=
github.com/safing/portmaster v0.8.14-interdep/go.mod h1:HIkaE8wCXr8ULyZSWFkQNNY9obpMufxizXZugnjHLK0=
github.com/safing/spn v0.3.4/go.mod h1:TfzNsZCbnlWv0UFDILFOUSudVKJZlnBVoR1fDXrjOK0=
github.com/safing/spn v0.3.5/go.mod h1:jHkFF2Yu1fnjFu4KXjVA+iagMr/z4eB4p3jiwikvKj8=
github.com/safing/spn v0.3.6/go.mod h1:RSeFb/h5Wt3yDVezXj3lhXJ/Iwd7FbtsGf5E+p5J2YQ=
@ -841,12 +862,15 @@ github.com/safing/spn v0.3.17/go.mod h1:Fq/70Hl0OUxtYuY5NATv5q468hvfDDEFwN3mivEe
github.com/safing/spn v0.3.19/go.mod h1:phCnWjWOgdVMXaMsmDr6izR/ROVElSZGdIm7j7PIit4=
github.com/safing/spn v0.4.0/go.mod h1:0jBetnYCfxqO5PJskhPOxJ/v6VRfE+bQU98XW240BNw=
github.com/safing/spn v0.4.2/go.mod h1:yZPezHDEYyhei8n13tTxjQCGq6LRr5svz9WFAAeDPec=
github.com/safing/spn v0.4.3 h1:iEFmpzyrThJ8QF9Qpbxk/m4w2+ZvbVPyuqJ4EwnpfDg=
github.com/safing/spn v0.4.3/go.mod h1:YHtg3FkZviN8T7db4BdRffbYO1pO7w9SydQatLmvW2M=
github.com/safing/spn v0.4.4 h1:DGCkHaCgkQ0ivsD3J3BkUJ2auMpCEhzZdlD3JDmK/U4=
github.com/safing/spn v0.4.4/go.mod h1:mkQA5pYM1SUd4JkTyuwXFycFMGQXLTd9RUJuY2vqccM=
github.com/safing/spn v0.4.5 h1:I3nv0YOD7Rh+hTpWHdsIlilK10Dz/ZZF//OfJm+Un3I=
github.com/safing/spn v0.4.5/go.mod h1:mkQA5pYM1SUd4JkTyuwXFycFMGQXLTd9RUJuY2vqccM=
github.com/safing/spn v0.4.6/go.mod h1:AmZ+rore+6DQp0GSchIAXPn8ij0Knyw7uy4PbMLljXg=
github.com/safing/spn v0.4.7/go.mod h1:NoSG9K0OK9hrPC76yqWFS6RtvbqZdIc/KGOsC4T3hV8=
github.com/safing/spn v0.4.11/go.mod h1:nro/I6b2JnafeeqoMsQRqf6TaQeL9uLLZkUREtxLVDE=
github.com/safing/spn v0.4.12 h1:Tw7TUZEZR4yZy7L+ICRCketDk5L5x0s0pvrSUHFaKs4=
github.com/safing/spn v0.4.12/go.mod h1:AUNgBrRwCcspC98ljptDnrPuHLn/BHSG+rSprV/5Wlc=
github.com/safing/spn v0.4.13 h1:5NXWUl/2EWyotrQhW3tD+3DYw7hEqQk0n0lHa+w4eFo=
github.com/safing/spn v0.4.13/go.mod h1:rBeimIc1FHQOhX7lTh/LaFGRotmnwZIDWUSsPyeIDog=
github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig=
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
@ -890,8 +914,9 @@ github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tL
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4=
github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU=
github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
@ -907,14 +932,17 @@ github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tannerryan/ring v1.1.2 h1:iXayOjqHQOLzuy9GwSKuG3nhWfzQkldMlQivcgIr7gQ=
github.com/tannerryan/ring v1.1.2/go.mod h1:DkELJEjbZhJBtFKR9Xziwj3HKZnb/knRgljNqp65vH4=
@ -934,8 +962,9 @@ github.com/tidwall/gjson v1.11.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vl
github.com/tidwall/gjson v1.12.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/gjson v1.13.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/gjson v1.14.0 h1:6aeJ0bzojgWLa82gDQHcx3S0Lr/O51I9bJ5nv6JFx5w=
github.com/tidwall/gjson v1.14.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/gjson v1.14.1 h1:iymTbGkQBhveq21bEvAQ81I0LEBork8BFe1CUZXdyuo=
github.com/tidwall/gjson v1.14.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/match v1.1.0/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
@ -957,8 +986,9 @@ github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03O
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
github.com/tklauser/numcpus v0.2.1/go.mod h1:9aU+wOc6WjUIZEwWMP62PL/41d65P+iks1gBkr4QyP8=
github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8=
github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o=
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
github.com/tklauser/numcpus v0.5.0 h1:ooe7gN0fg6myJ0EKoTAf5hebTZrH52px3New/D9iJ+A=
github.com/tklauser/numcpus v0.5.0/go.mod h1:OGzpTxpcIMNGYQdit2BYL1pvk/dSOaJWjKoflh+RQjo=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
@ -1072,8 +1102,14 @@ golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.0.0-20220213190939-1e6e3497d506/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064 h1:S25/rfnfsMVgORT4/J61MJ7rdyseOZOyvLIrZEZ7s6s=
golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220513210258-46612604a0f9/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -1111,8 +1147,9 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -1190,8 +1227,13 @@ golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220121210141-e204ce36a2ba/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220513224357-95641704303c/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220621193019-9d032be2e588/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220708220712-1185a9018129 h1:vucSRfWwTsoXro7P+3Cjlr6flUMtzCwzlvkxEQtHHB0=
golang.org/x/net v0.0.0-20220708220712-1185a9018129/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1222,8 +1264,10 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1344,8 +1388,15 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220224120231-95c6836cb0e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8 h1:OH54vjqzRWmbJ62fjuhxy7AxFFgoHN0/DPc/UrL8cAs=
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220325203850-36772127a21f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220513210249-45d2b4557a2a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@ -1425,6 +1476,7 @@ golang.org/x/tools v0.0.0-20200808161706-5bf02b21f123/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
@ -1441,13 +1493,14 @@ golang.org/x/tools v0.1.6/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
golang.org/x/tools v0.1.11 h1:loJ25fNOEhSXfHrpoGj91eCUThwdNX6u24rO1xnNteY=
golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@ -1642,8 +1695,9 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
@ -1665,6 +1719,32 @@ k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc=
modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw=
modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ=
modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ=
modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ=
modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA=
modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A=
modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU=
modernc.org/libc v1.16.7/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU=
modernc.org/libc v1.16.17 h1:rXo8IZJvP+QSN1KrlV23dtkM3XfGYXjx3RbLLzBtndM=
modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU=
modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
modernc.org/memory v1.1.1 h1:bDOL0DIDLQv7bWhP3gMvIrnoFw+Eo6F7a2QK9HPDiFU=
modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw=
modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
modernc.org/sqlite v1.17.3 h1:iE+coC5g17LtByDYDWKpR6m2Z9022YrSh3bumwOnIrI=
modernc.org/sqlite v1.17.3/go.mod h1:10hPVYar9C0kfXuTWGz8s0XtB8uAGymUy51ZzStYe3k=
modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw=
modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw=
modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8=
nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0=
nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
@ -1674,3 +1754,5 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
zombiezen.com/go/sqlite v0.10.1 h1:PSgVSHeIVOGKbX7ZIQNXGKn3wcqM6JBnT4yS1OLjWbM=
zombiezen.com/go/sqlite v0.10.1/go.mod h1:tOd9u3peffVYnXOedepSJmX92n/mbqf594wcJ+29jf8=

View file

@ -24,6 +24,11 @@ var updateInProgress = abool.New()
func tryListUpdate(ctx context.Context) error {
err := performUpdate(ctx)
if err != nil {
// Check if we are shutting down.
if module.IsStopping() {
return nil
}
// Check if the module already has a failure status set. If not, set a
// generic one with the returned error.
failureStatus, _, _ := module.FailureStatus()

View file

@ -26,3 +26,8 @@ func GetLocation(ip net.IP) (*Location, error) {
record.FillMissingInfo()
return record, nil
}
// IsInitialized returns whether the geoip database has been initialized.
func IsInitialized(v6, wait bool) bool {
return worker.GetReader(v6, wait) != nil
}

75
nameserver/conflict.go Normal file
View file

@ -0,0 +1,75 @@
package nameserver
import (
"net"
"os"
processInfo "github.com/shirou/gopsutil/process"
"github.com/safing/portbase/log"
"github.com/safing/portmaster/network/packet"
"github.com/safing/portmaster/network/state"
)
var commonResolverIPs = []net.IP{
net.IPv4zero,
net.IPv4(127, 0, 0, 1), // default
net.IPv4(127, 0, 0, 53), // some resolvers on Linux
net.IPv6zero,
net.IPv6loopback,
}
func findConflictingProcess(ip net.IP, port uint16) (conflictingProcess *processInfo.Process) {
// Evaluate which IPs to check.
var ipsToCheck []net.IP
if ip.Equal(net.IPv4zero) || ip.Equal(net.IPv6zero) {
ipsToCheck = commonResolverIPs
} else {
ipsToCheck = []net.IP{ip}
}
// Find the conflicting process.
var err error
for _, resolverIP := range ipsToCheck {
conflictingProcess, err = getListeningProcess(resolverIP, port)
switch {
case err != nil:
// Log the error and let the worker try again.
log.Warningf("nameserver: failed to find conflicting service: %s", err)
case conflictingProcess != nil:
// Conflicting service found.
return conflictingProcess
}
}
return nil
}
func getListeningProcess(resolverIP net.IP, resolverPort uint16) (*processInfo.Process, error) {
pid, _, err := state.Lookup(&packet.Info{
Inbound: true,
Version: 0, // auto-detect
Protocol: packet.UDP,
Src: nil, // do not record direction
SrcPort: 0, // do not record direction
Dst: resolverIP,
DstPort: resolverPort,
}, true)
if err != nil {
// there may be nothing listening on :53
return nil, nil //nolint:nilerr // Treat lookup error as "not found".
}
// Ignore if it's us for some reason.
if pid == os.Getpid() {
return nil, nil
}
proc, err := processInfo.NewProcess(int32(pid))
if err != nil {
// Process may have disappeared already.
return nil, err
}
return proc, nil
}

View file

@ -13,6 +13,8 @@ import (
"github.com/safing/portbase/log"
"github.com/safing/portbase/modules"
"github.com/safing/portbase/modules/subsystems"
"github.com/safing/portbase/notifications"
"github.com/safing/portmaster/compat"
"github.com/safing/portmaster/firewall"
"github.com/safing/portmaster/netenv"
)
@ -24,6 +26,9 @@ var (
stopListener1 func() error
stopListener2 func() error
stopListenersLock sync.Mutex
eventIDConflictingService = "nameserver:conflicting-service"
eventIDListenerFailed = "nameserver:listener-failed"
)
func init() {
@ -53,6 +58,9 @@ func start() error {
return fmt.Errorf("failed to parse nameserver listen address: %w", err)
}
// Tell the compat module where we are listening.
compat.SetNameserverListenIP(ip1)
// Get own hostname.
hostname, err = os.Hostname()
if err != nil {
@ -129,24 +137,93 @@ func startListener(ip net.IP, port uint16, first bool) {
return nil
}
// Resolve generic listener error, if primary listener.
if first {
module.Resolve(eventIDListenerFailed)
}
// Start listening.
log.Infof("nameserver: starting to listen on %s", dnsServer.Addr)
err := dnsServer.ListenAndServe()
if err != nil {
// check if we are shutting down
// Stop worker without error if we are shutting down.
if module.IsStopping() {
return nil
}
// is something blocking our port?
checkErr := checkForConflictingService(ip, port)
if checkErr != nil {
return checkErr
}
log.Warningf("nameserver: failed to listen on %s: %s", dnsServer.Addr, err)
handleListenError(err, ip, port, first)
}
return err
})
}
func handleListenError(err error, ip net.IP, port uint16, primaryListener bool) {
var n *notifications.Notification
// Create suffix for secondary listener
var secondaryEventIDSuffix string
if !primaryListener {
secondaryEventIDSuffix = "-secondary"
}
// Find a conflicting service.
cfProcess := findConflictingProcess(ip, port)
if cfProcess != nil {
// Report the conflicting process.
// Build conflicting process description.
var cfDescription string
cfName, err := cfProcess.Name()
if err == nil && cfName != "" {
cfDescription = cfName
}
cfExe, err := cfProcess.Exe()
if err == nil && cfDescription != "" {
if cfDescription != "" {
cfDescription += " (" + cfExe + ")"
} else {
cfDescription = cfName
}
}
// Notify user about conflicting service.
n = notifications.Notify(&notifications.Notification{
EventID: eventIDConflictingService + secondaryEventIDSuffix,
Type: notifications.Error,
Title: "Conflicting DNS Software",
Message: fmt.Sprintf(
"Restart Portmaster after you have deactivated or properly configured the conflicting software: %s",
cfDescription,
),
ShowOnSystem: true,
AvailableActions: []*notifications.Action{
{
Text: "Open Docs",
Type: notifications.ActionTypeOpenURL,
Payload: "https://docs.safing.io/portmaster/install/status/software-compatibility",
},
},
})
} else {
// If no conflict is found, report the error directly.
n = notifications.Notify(&notifications.Notification{
EventID: eventIDListenerFailed + secondaryEventIDSuffix,
Type: notifications.Error,
Title: "Secure DNS Error",
Message: fmt.Sprintf(
"The internal DNS server failed. Restart Portmaster to try again. Error: %s",
err,
),
ShowOnSystem: true,
})
}
// Attach error to module, if primary listener.
if primaryListener {
n.AttachToModule(module)
}
}
func stop() error {
stopListenersLock.Lock()
defer stopListenersLock.Unlock()
@ -182,7 +259,11 @@ func getListenAddresses(listenAddress string) (ip1, ip2 net.IP, port uint16, err
// listen separately for IPv4 and IPv6.
if ipString == "localhost" {
ip1 = net.IPv4(127, 0, 0, 17)
ip2 = net.IPv6loopback
if netenv.IPv6Enabled() {
ip2 = net.IPv6loopback
} else {
log.Warningf("nameserver: no IPv6 stack detected, disabling IPv6 nameserver listener")
}
} else {
ip1 = net.ParseIP(ipString)
if ip1 == nil {

View file

@ -106,6 +106,9 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
return reply(nsutil.Refused("invalid domain"))
}
// Get public suffix after validation.
q.InitPublicSuffixData()
// Check if query is failing.
// Some software retries failing queries excessively. This might not be a
// problem normally, but handling a request is pretty expensive for the
@ -168,7 +171,7 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
}
default:
tracer.Warningf("nameserver: external request for %s%s, ignoring", q.FQDN, q.QType)
tracer.Warningf("nameserver: external request from %s for %s%s, ignoring", remoteAddr, q.FQDN, q.QType)
return reply(nsutil.Refused("external queries are not permitted"))
}
conn.Lock()
@ -252,10 +255,13 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
if err != nil {
switch {
case errors.Is(err, resolver.ErrNotFound):
tracer.Tracef("nameserver: %s", err)
conn.Failed("domain does not exist", "")
return reply(nsutil.NxDomain("nxdomain: " + err.Error()))
// Try alternatives domain names for unofficial domain spaces.
rrCache = checkAlternativeCaches(ctx, q)
if rrCache == nil {
tracer.Tracef("nameserver: %s", err)
conn.Failed("domain does not exist", "")
return reply(nsutil.NxDomain("nxdomain: " + err.Error()))
}
case errors.Is(err, resolver.ErrBlocked):
tracer.Tracef("nameserver: %s", err)
conn.Block(err.Error(), "")
@ -268,7 +274,7 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
case errors.Is(err, resolver.ErrOffline):
if rrCache == nil {
log.Tracer(ctx).Debugf("nameserver: not resolving %s, device is offline", q.ID())
tracer.Debugf("nameserver: not resolving %s, device is offline", q.ID())
conn.Failed("not resolving, device is offline", "")
return reply(nsutil.ServerFailure(err.Error()))
}
@ -290,8 +296,12 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
addFailingQuery(q, errors.New("emptry reply from resolver"))
return reply(nsutil.ServerFailure("internal error: empty reply"))
case rrCache.RCode == dns.RcodeNameError:
// Return now if NXDomain.
return reply(nsutil.NxDomain("no answer found (NXDomain)"))
// Try alternatives domain names for unofficial domain spaces.
rrCache = checkAlternativeCaches(ctx, q)
if rrCache == nil {
// Return now if NXDomain.
return reply(nsutil.NxDomain("no answer found (NXDomain)"))
}
}
// Check with firewall again after resolving.
@ -336,3 +346,52 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
)
return reply(rrCache, conn, rrCache)
}
func checkAlternativeCaches(ctx context.Context, q *resolver.Query) *resolver.RRCache {
// Do not try alternatives when the query is in a public suffix.
// This also includes arpa. and local.
if q.ICANNSpace {
return nil
}
// Check if the env resolver has something.
pmEnvQ := &resolver.Query{
FQDN: q.FQDN + "local." + resolver.InternalSpecialUseDomain,
QType: q.QType,
}
rrCache, err := resolver.QueryPortmasterEnv(ctx, pmEnvQ)
if err == nil && rrCache != nil && rrCache.RCode == dns.RcodeSuccess {
makeAlternativeRecord(ctx, q, rrCache, pmEnvQ.FQDN)
return rrCache
}
// Check if we have anything in cache
localFQDN := q.FQDN + "local."
rrCache, err = resolver.GetRRCache(localFQDN, q.QType)
if err == nil && rrCache != nil && rrCache.RCode == dns.RcodeSuccess {
makeAlternativeRecord(ctx, q, rrCache, localFQDN)
return rrCache
}
return nil
}
func makeAlternativeRecord(ctx context.Context, q *resolver.Query, rrCache *resolver.RRCache, altName string) {
log.Tracer(ctx).Debugf("using %s to answer query", altName)
// Duplicate answers so they match the query.
copied := make([]dns.RR, 0, len(rrCache.Answer))
for _, answer := range rrCache.Answer {
if strings.ToLower(answer.Header().Name) == altName {
copiedAnswer := dns.Copy(answer)
copiedAnswer.Header().Name = q.FQDN
copied = append(copied, copiedAnswer)
}
}
if len(copied) > 0 {
rrCache.Answer = append(rrCache.Answer, copied...)
}
// Update the question.
rrCache.Domain = q.FQDN
}

View file

@ -1,164 +0,0 @@
package nameserver
import (
"fmt"
"net"
"os"
"strconv"
"time"
"github.com/safing/portbase/log"
"github.com/safing/portbase/modules"
"github.com/safing/portbase/notifications"
"github.com/safing/portmaster/network/packet"
"github.com/safing/portmaster/network/state"
)
var (
commonResolverIPs = []net.IP{
net.IPv4zero,
net.IPv4(127, 0, 0, 1), // default
net.IPv4(127, 0, 0, 53), // some resolvers on Linux
net.IPv6zero,
net.IPv6loopback,
}
// lastKilledPID holds the PID of the last killed conflicting service.
// It is only accessed by checkForConflictingService, which is only called by
// the nameserver worker.
lastKilledPID int
)
func checkForConflictingService(ip net.IP, port uint16) error {
// Evaluate which IPs to check.
var ipsToCheck []net.IP
if ip.Equal(net.IPv4zero) || ip.Equal(net.IPv6zero) {
ipsToCheck = commonResolverIPs
} else {
ipsToCheck = []net.IP{ip}
}
// Check if there is another resolver when need to take over.
var killed int
var killingFailed bool
ipsToCheckLoop:
for _, resolverIP := range ipsToCheck {
pid, err := takeover(resolverIP, port)
switch {
case err != nil:
// Log the error and let the worker try again.
log.Infof("nameserver: failed to stop conflicting service: %s", err)
killingFailed = true
break ipsToCheckLoop
case pid != 0:
// Conflicting service identified and killed!
killed = pid
break ipsToCheckLoop
}
}
// Notify user of failed killing or repeated kill.
if killingFailed || (killed != 0 && killed == lastKilledPID) {
// Notify the user that we failed to kill something.
notifications.Notify(&notifications.Notification{
EventID: "namserver:failed-to-kill-conflicting-service",
Type: notifications.Error,
Title: "Failed to Stop Conflicting DNS Client",
Message: "The Portmaster failed to stop a conflicting DNS client to gain required system integration. If there is another DNS Client (Nameserver; Resolver) on this device, please disable it.",
ShowOnSystem: true,
AvailableActions: []*notifications.Action{
{
ID: "ack",
Text: "OK",
},
{
Text: "Open Docs",
Type: notifications.ActionTypeOpenURL,
Payload: "https://docs.safing.io/portmaster/install/status/software-compatibility",
},
},
})
return nil
}
// Check if something was killed.
if killed == 0 {
return nil
}
lastKilledPID = killed
// Notify the user that we killed something.
notifications.Notify(&notifications.Notification{
EventID: "namserver:stopped-conflicting-service",
Type: notifications.Info,
Title: "Stopped Conflicting DNS Client",
Message: fmt.Sprintf(
"The Portmaster stopped a conflicting DNS client (pid %d) to gain required system integration. If you are running another DNS client on this device on purpose, you can the check the documentation if it is compatible with the Portmaster.",
killed,
),
ShowOnSystem: true,
AvailableActions: []*notifications.Action{
{
ID: "ack",
Text: "OK",
},
{
Text: "Open Docs",
Type: notifications.ActionTypeOpenURL,
Payload: "https://docs.safing.io/portmaster/install/status/software-compatibility",
},
},
})
// Restart nameserver via service-worker logic.
// Wait shortly so that the other process can shut down.
time.Sleep(10 * time.Millisecond)
return fmt.Errorf("%w: stopped conflicting name service with pid %d", modules.ErrRestartNow, killed)
}
func takeover(resolverIP net.IP, resolverPort uint16) (int, error) {
pid, _, err := state.Lookup(&packet.Info{
Inbound: true,
Version: 0, // auto-detect
Protocol: packet.UDP,
Src: nil, // do not record direction
SrcPort: 0, // do not record direction
Dst: resolverIP,
DstPort: resolverPort,
}, true)
if err != nil {
// there may be nothing listening on :53
return 0, nil //nolint:nilerr // Treat lookup error as "not found".
}
// Just don't, uh, kill ourselves...
if pid == os.Getpid() {
return 0, nil
}
proc, err := os.FindProcess(pid)
if err != nil {
// huh. gone already? I guess we'll wait then...
return 0, err
}
err = proc.Signal(os.Interrupt)
if err != nil {
err = proc.Kill()
if err != nil {
log.Errorf("nameserver: failed to stop conflicting service (pid %d): %s", pid, err)
return 0, err
}
}
log.Warningf(
"nameserver: killed conflicting service with PID %d over %s",
pid,
net.JoinHostPort(
resolverIP.String(),
strconv.Itoa(int(resolverPort)),
),
)
return pid, nil
}

View file

@ -152,11 +152,9 @@ func IsMyIP(ip net.IP) (yes bool, err error) {
return false, nil
}
// IsMyNet returns whether the given IP is currently in the host's broadcast
// domain - ie. the networks that the host is directly attached to.
// Function is optimized with the assumption that is unlikely that the IP is
// in the broadcast domain.
func IsMyNet(ip net.IP) (yes bool, err error) {
// GetLocalNetwork uses the given IP to search for a network configured on the
// device and returns it.
func GetLocalNetwork(ip net.IP) (myNet *net.IPNet, err error) {
myNetworksLock.Lock()
defer myNetworksLock.Unlock()
@ -164,16 +162,16 @@ func IsMyNet(ip net.IP) (yes bool, err error) {
if myNetworksNetworkChangedFlag.IsSet() {
err := refreshMyNetworks()
if err != nil {
return false, err
return nil, err
}
}
// Check if the IP address is in my networks.
for _, myNet := range myNetworks {
if myNet.Contains(ip) {
return true, nil
return myNet, nil
}
}
return false, nil
return nil, nil
}

View file

@ -55,7 +55,7 @@ func registerAPIEndpoints() error {
Read: api.PermitUser,
BelongsTo: module,
StructFunc: func(ar *api.Request) (i interface{}, err error) {
return getLocationFromTraceroute()
return getLocationFromTraceroute(&DeviceLocations{})
},
Name: "Get Approximate Internet Location via Traceroute",
Description: "Returns an approximation of where the device is on the Internet using a the traceroute technique.",

View file

@ -47,16 +47,16 @@ type DeviceLocations struct {
}
// Best returns the best (most accurate) device location.
func (dl *DeviceLocations) Best() *DeviceLocation {
if len(dl.All) > 0 {
return dl.All[0]
func (dls *DeviceLocations) Best() *DeviceLocation {
if len(dls.All) > 0 {
return dls.All[0]
}
return nil
}
// BestV4 returns the best (most accurate) IPv4 device location.
func (dl *DeviceLocations) BestV4() *DeviceLocation {
for _, loc := range dl.All {
func (dls *DeviceLocations) BestV4() *DeviceLocation {
for _, loc := range dls.All {
if loc.IPVersion == packet.IPv4 {
return loc
}
@ -65,8 +65,8 @@ func (dl *DeviceLocations) BestV4() *DeviceLocation {
}
// BestV6 returns the best (most accurate) IPv6 device location.
func (dl *DeviceLocations) BestV6() *DeviceLocation {
for _, loc := range dl.All {
func (dls *DeviceLocations) BestV6() *DeviceLocation {
for _, loc := range dls.All {
if loc.IPVersion == packet.IPv6 {
return loc
}
@ -74,11 +74,8 @@ func (dl *DeviceLocations) BestV6() *DeviceLocation {
return nil
}
func copyDeviceLocations() *DeviceLocations {
locationsLock.Lock()
defer locationsLock.Unlock()
// Create a copy of the locations, but not the entries.
// Copy creates a copy of the locations, but not the individual entries.
func (dls *DeviceLocations) Copy() *DeviceLocations {
cp := &DeviceLocations{
All: make([]*DeviceLocation, len(locations.All)),
}
@ -87,6 +84,32 @@ func copyDeviceLocations() *DeviceLocations {
return cp
}
// AddLocation adds a location.
func (dls *DeviceLocations) AddLocation(dl *DeviceLocation) {
if dls == nil {
return
}
// Add to locations, if better.
var exists bool
for i, existing := range dls.All {
if (dl.IP == nil && existing.IP == nil) || dl.IP.Equal(existing.IP) {
exists = true
if dl.IsMoreAccurateThan(existing) {
// Replace
dls.All[i] = dl
break
}
}
}
if !exists {
dls.All = append(dls.All, dl)
}
// Sort locations.
sort.Sort(sortLocationsByAccuracy(dls.All))
}
// DeviceLocation represents a single IP and metadata. It must not be changed
// once created.
type DeviceLocation struct {
@ -147,6 +170,12 @@ func (dl *DeviceLocation) String() string {
return "<none>"
case dl.Location == nil:
return dl.IP.String()
case dl.Source == SourceTimezone:
return fmt.Sprintf(
"TZ(%.0f/%.0f)",
dl.Location.Coordinates.Latitude,
dl.Location.Coordinates.Longitude,
)
default:
return fmt.Sprintf("%s (AS%d in %s)", dl.IP, dl.Location.AutonomousSystemNumber, dl.Location.Country.ISOCode)
}
@ -193,6 +222,14 @@ func (a sortLocationsByAccuracy) Less(i, j int) bool { return !a[j].IsMoreAccura
// SetInternetLocation provides the location management system with a possible Internet location.
func SetInternetLocation(ip net.IP, source DeviceLocationSource) (dl *DeviceLocation, ok bool) {
locationsLock.Lock()
defer locationsLock.Unlock()
return locations.AddIP(ip, source)
}
// AddIP adds a new location based on the given IP.
func (dls *DeviceLocations) AddIP(ip net.IP, source DeviceLocationSource) (dl *DeviceLocation, ok bool) {
// Check if IP is global.
if netutils.GetIPScope(ip) != netutils.Global {
return nil, false
@ -216,40 +253,16 @@ func SetInternetLocation(ip net.IP, source DeviceLocationSource) (dl *DeviceLoca
log.Warningf("netenv: failed to get geolocation data of %s (from %s): %s", ip, source, err)
return nil, false
}
// Only use location if there is data for it.
if geoLoc.Country.ISOCode == "" {
return nil, false
}
loc.Location = geoLoc
addLocation(loc)
dls.AddLocation(loc)
return loc, true
}
func addLocation(dl *DeviceLocation) {
if dl == nil {
return
}
locationsLock.Lock()
defer locationsLock.Unlock()
// Add to locations, if better.
var exists bool
for i, existing := range locations.All {
if (dl.IP == nil && existing.IP == nil) || dl.IP.Equal(existing.IP) {
exists = true
if dl.IsMoreAccurateThan(existing) {
// Replace
locations.All[i] = dl
break
}
}
}
if !exists {
locations.All = append(locations.All, dl)
}
// Sort locations.
sort.Sort(sortLocationsByAccuracy(locations.All))
}
// GetApproximateInternetLocation returns the approximate Internet location.
// Deprecated: Please use GetInternetLocation instead.
func GetApproximateInternetLocation() (net.IP, error) {
@ -267,23 +280,21 @@ func GetInternetLocation() (deviceLocations *DeviceLocations, ok bool) {
// Check if the network changed, if not, return cache.
if !locationNetworkChangedFlag.IsSet() {
return copyDeviceLocations(), true
locationsLock.Lock()
defer locationsLock.Unlock()
return locations.Copy(), true
}
locationNetworkChangedFlag.Refresh()
// Get all assigned addresses.
v4s, v6s, err := GetAssignedAddresses()
if err != nil {
log.Warningf("netenv: failed to get assigned addresses for device location: %s", err)
return nil, false
}
// Create new location list.
dls := &DeviceLocations{}
// Check interfaces for global addresses.
v4ok, v6ok := getLocationFromInterfaces()
v4ok, v6ok := getLocationFromInterfaces(dls)
// Try other methods for missing locations.
if len(v4s) > 0 && !v4ok {
_, err = getLocationFromTraceroute()
if !v4ok {
_, err := getLocationFromTraceroute(dls)
if err != nil {
log.Warningf("netenv: failed to get IPv4 device location from traceroute: %s", err)
} else {
@ -292,35 +303,43 @@ func GetInternetLocation() (deviceLocations *DeviceLocations, ok bool) {
// Get location from timezone as final fallback.
if !v4ok {
getLocationFromTimezone(packet.IPv4)
getLocationFromTimezone(dls, packet.IPv4)
}
}
if len(v6s) > 0 && !v6ok {
if !v6ok && IPv6Enabled() {
// TODO: Find more ways to get IPv6 device location
// Get location from timezone as final fallback.
getLocationFromTimezone(packet.IPv6)
getLocationFromTimezone(dls, packet.IPv6)
}
// As a last guard, make sure there is at least one location in the list.
if len(dls.All) == 0 {
getLocationFromTimezone(dls, packet.IPv4)
}
// Set new locations.
locationsLock.Lock()
defer locationsLock.Unlock()
locations = dls
// Return gathered locations.
cp := copyDeviceLocations()
return cp, true
return locations.Copy(), true
}
func getLocationFromInterfaces() (v4ok, v6ok bool) {
func getLocationFromInterfaces(dls *DeviceLocations) (v4ok, v6ok bool) {
globalIPv4, globalIPv6, err := GetAssignedGlobalAddresses()
if err != nil {
log.Warningf("netenv: location: failed to get assigned global addresses: %s", err)
return false, false
}
for _, ip := range globalIPv4 {
if _, ok := SetInternetLocation(ip, SourceInterface); ok {
if _, ok := dls.AddIP(ip, SourceInterface); ok {
v4ok = true
}
}
for _, ip := range globalIPv6 {
if _, ok := SetInternetLocation(ip, SourceInterface); ok {
if _, ok := dls.AddIP(ip, SourceInterface); ok {
v6ok = true
}
}
@ -338,7 +357,7 @@ func getLocationFromUPnP() (ok bool) {
}
*/
func getLocationFromTraceroute() (dl *DeviceLocation, err error) {
func getLocationFromTraceroute(dls *DeviceLocations) (dl *DeviceLocation, err error) {
// Create connection.
conn, err := net.ListenPacket("ip4:icmp", "")
if err != nil {
@ -459,7 +478,7 @@ nextHop:
// We have received a valid time exceeded error.
// If message came from a global unicast, us it!
if netutils.GetIPScope(remoteIP) == netutils.Global {
dl, ok := SetInternetLocation(remoteIP, SourceTraceroute)
dl, ok := dls.AddIP(remoteIP, SourceTraceroute)
if !ok {
return nil, errors.New("invalid IP address")
}
@ -505,7 +524,7 @@ func recvICMP(currentHop int, icmpPacketsViaFirewall chan packet.Packet) (
}
}
func getLocationFromTimezone(ipVersion packet.IPVersion) (ok bool) { //nolint:unparam // This is documentation.
func getLocationFromTimezone(dls *DeviceLocations, ipVersion packet.IPVersion) {
// Create base struct.
tzLoc := &DeviceLocation{
IPVersion: ipVersion,
@ -520,6 +539,5 @@ func getLocationFromTimezone(ipVersion packet.IPVersion) (ok bool) { //nolint:un
tzLoc.Location.Coordinates.Latitude = 48
tzLoc.Location.Coordinates.Longitude = float64(offsetSeconds) / 43200 * 180
addLocation(tzLoc)
return true
dls.AddLocation(tzLoc)
}

View file

@ -1,6 +1,9 @@
package netenv
import (
"github.com/tevino/abool"
"github.com/safing/portbase/log"
"github.com/safing/portbase/modules"
)
@ -20,6 +23,8 @@ func init() {
}
func prep() error {
checkForIPv6Stack()
if err := registerAPIEndpoints(); err != nil {
return err
}
@ -46,3 +51,22 @@ func start() error {
return nil
}
var ipv6Enabled = abool.NewBool(true)
// IPv6Enabled returns whether the device has an active IPv6 stack.
// This is only checked once on startup in order to maintain consistency.
func IPv6Enabled() bool {
return ipv6Enabled.IsSet()
}
func checkForIPv6Stack() {
_, v6IPs, err := GetAssignedAddresses()
if err != nil {
log.Warningf("netenv: failed to get assigned addresses to check for ipv6 stack: %s", err)
return
}
// Set IPv6 as enabled if any IPv6 addresses are found.
ipv6Enabled.SetTo(len(v6IPs) > 0)
}

View file

@ -37,13 +37,12 @@ var (
PortalTestIP = net.IPv4(192, 0, 2, 1)
PortalTestURL = fmt.Sprintf("http://%s/", PortalTestIP)
DNSTestDomain = "one.one.one.one."
DNSTestExpectedIP = net.IPv4(1, 1, 1, 1)
DNSFallbackTestDomain = "dns-check.safing.io."
DNSFallbackTestExpectedIP = net.IPv4(0, 65, 67, 75) // Ascii: \0ACK
DNSTestDomain = "online-check.safing.io."
DNSTestExpectedIP = net.IPv4(0, 65, 67, 75) // Ascii: \0ACK
DNSTestQueryFunc func(ctx context.Context, fdqn string) (ips []net.IP, ok bool, err error)
ConnectedToSPN = abool.New()
ConnectedToDNS = abool.New()
// SpecialCaptivePortalDomain is the domain name used to point to the detected captive portal IP
// or the captive portal test IP. The default value should be overridden by the resolver package,
@ -53,8 +52,6 @@ var (
// ConnectivityDomains holds all connectivity domains. This slice must not be modified.
ConnectivityDomains = []string{
SpecialCaptivePortalDomain,
DNSTestDomain, // Internal DNS Check
DNSFallbackTestDomain, // Internal DNS Check
// Windows
"dns.msftncsi.com.", // DNS Check
@ -380,20 +377,20 @@ func monitorOnlineStatus(ctx context.Context) error {
func getDynamicStatusTrigger() <-chan time.Time {
switch GetOnlineStatus() {
case StatusOffline:
// Will be triggered by network change anyway.
return time.After(20 * time.Second)
// Will also be triggered by network change.
return time.After(10 * time.Second)
case StatusLimited, StatusPortal:
// Change will not be detected otherwise, but impact is minor.
return time.After(5 * time.Second)
case StatusSemiOnline:
// Very small impact.
return time.After(20 * time.Second)
return time.After(60 * time.Second)
case StatusOnline:
// Don't check until resolver reports problems.
return nil
case StatusUnknown:
return time.After(5 * time.Second)
default: // other unknown status
fallthrough
default:
return time.After(5 * time.Minute)
}
}
@ -407,13 +404,18 @@ func checkOnlineStatus(ctx context.Context) {
return StatusUnknown
}*/
// 0) check if connected to SPN
// 0) check if connected to SPN and/or DNS.
if ConnectedToSPN.IsSet() {
updateOnlineStatus(StatusOnline, nil, "connected to SPN")
return
}
if ConnectedToDNS.IsSet() {
updateOnlineStatus(StatusOnline, nil, "connected to DNS")
return
}
// 1) check for addresses
ipv4, ipv6, err := GetAssignedAddresses()
@ -508,34 +510,28 @@ func checkOnlineStatus(ctx context.Context) {
// 3) resolve a query
// Check with primary dns check domain.
ips, err := net.LookupIP(DNSTestDomain)
if err != nil {
log.Warningf("netenv: dns check query failed: %s", err)
} else {
// check for expected response
for _, ip := range ips {
if ip.Equal(DNSTestExpectedIP) {
updateOnlineStatus(StatusOnline, nil, "all checks passed")
return
}
}
}
// If that did not work, check with fallback dns check domain.
ips, err = net.LookupIP(DNSFallbackTestDomain)
if err != nil {
log.Warningf("netenv: dns fallback check query failed: %s", err)
updateOnlineStatus(StatusLimited, nil, "dns fallback check query failed")
// Check if we can resolve the dns check domain.
if DNSTestQueryFunc == nil {
updateOnlineStatus(StatusOnline, nil, "all checks passed, dns query check disabled")
return
}
// check for expected response
for _, ip := range ips {
if ip.Equal(DNSFallbackTestExpectedIP) {
updateOnlineStatus(StatusOnline, nil, "all checks passed")
return
}
ips, ok, err := DNSTestQueryFunc(ctx, DNSTestDomain)
switch {
case ok && err != nil:
updateOnlineStatus(StatusOnline, nil, fmt.Sprintf(
"all checks passed, acceptable result for dns query check: %s",
err,
))
case ok && len(ips) >= 1 && ips[0].Equal(DNSTestExpectedIP):
updateOnlineStatus(StatusOnline, nil, "all checks passed")
case ok && len(ips) >= 1:
log.Warningf("netenv: dns query check response mismatched: got %s", ips[0])
updateOnlineStatus(StatusOnline, nil, "all checks passed, dns query check response mismatched")
case ok:
log.Warningf("netenv: dns query check response mismatched: empty response")
updateOnlineStatus(StatusOnline, nil, "all checks passed, dns query check response was empty")
default:
log.Warningf("netenv: dns query check failed: %s", err)
updateOnlineStatus(StatusOffline, nil, "dns query check failed")
}
// unexpected response
updateOnlineStatus(StatusSemiOnline, nil, "dns check query response mismatched")
}

140
netquery/chart_handler.go Normal file
View file

@ -0,0 +1,140 @@
package netquery
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"github.com/safing/portmaster/netquery/orm"
)
type ChartHandler struct {
Database *Database
}
func (ch *ChartHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
requestPayload, err := ch.parseRequest(req)
if err != nil {
http.Error(resp, err.Error(), http.StatusBadRequest)
return
}
query, paramMap, err := requestPayload.generateSQL(req.Context(), ch.Database.Schema)
if err != nil {
http.Error(resp, err.Error(), http.StatusBadRequest)
return
}
// actually execute the query against the database and collect the result
var result []map[string]interface{}
if err := ch.Database.Execute(
req.Context(),
query,
orm.WithNamedArgs(paramMap),
orm.WithResult(&result),
orm.WithSchema(*ch.Database.Schema),
); err != nil {
http.Error(resp, "Failed to execute query: "+err.Error(), http.StatusInternalServerError)
return
}
// send the HTTP status code
resp.WriteHeader(http.StatusOK)
// prepare the result encoder.
enc := json.NewEncoder(resp)
enc.SetEscapeHTML(false)
enc.SetIndent("", " ")
enc.Encode(map[string]interface{}{
"results": result,
"query": query,
"params": paramMap,
})
}
func (ch *ChartHandler) parseRequest(req *http.Request) (*QueryActiveConnectionChartPayload, error) {
var body io.Reader
switch req.Method {
case http.MethodPost, http.MethodPut:
body = req.Body
case http.MethodGet:
body = strings.NewReader(req.URL.Query().Get("q"))
default:
return nil, fmt.Errorf("invalid HTTP method")
}
var requestPayload QueryActiveConnectionChartPayload
blob, err := ioutil.ReadAll(body)
if err != nil {
return nil, fmt.Errorf("failed to read body" + err.Error())
}
body = bytes.NewReader(blob)
dec := json.NewDecoder(body)
dec.DisallowUnknownFields()
if err := json.Unmarshal(blob, &requestPayload); err != nil && !errors.Is(err, io.EOF) {
return nil, fmt.Errorf("invalid query: %w", err)
}
return &requestPayload, nil
}
func (req *QueryActiveConnectionChartPayload) generateSQL(ctx context.Context, schema *orm.TableSchema) (string, map[string]interface{}, error) {
template := `
WITH RECURSIVE epoch(x) AS (
SELECT strftime('%%s')-600
UNION ALL
SELECT x+1 FROM epoch WHERE x+1 < strftime('%%s')+0
)
SELECT x as timestamp, SUM(verdict IN (2, 5, 6)) AS value, SUM(verdict NOT IN (2, 5, 6)) as countBlocked FROM epoch
JOIN connections
ON strftime('%%s', connections.started)+0 <= timestamp+0 AND (connections.ended IS NULL OR strftime('%%s', connections.ended)+0 >= timestamp+0)
%s
GROUP BY round(timestamp/10, 0)*10;`
clause, params, err := req.Query.toSQLWhereClause(ctx, "", schema, orm.DefaultEncodeConfig)
if err != nil {
return "", nil, err
}
if params == nil {
params = make(map[string]interface{})
}
if req.TextSearch != nil {
textSearch, textParams, err := req.TextSearch.toSQLConditionClause(ctx, schema, "", orm.DefaultEncodeConfig)
if err != nil {
return "", nil, err
}
if textSearch != "" {
if clause != "" {
clause += " AND "
}
clause += textSearch
for key, val := range textParams {
params[key] = val
}
}
}
if clause == "" {
return fmt.Sprintf(template, ""), map[string]interface{}{}, nil
}
return fmt.Sprintf(template, "WHERE ( "+clause+")"), params, nil
}

315
netquery/database.go Normal file
View file

@ -0,0 +1,315 @@
package netquery
import (
"context"
"encoding/json"
"fmt"
"io"
"strings"
"sync"
"time"
"github.com/safing/portbase/log"
"github.com/safing/portmaster/netquery/orm"
"github.com/safing/portmaster/network"
"github.com/safing/portmaster/network/netutils"
"github.com/safing/portmaster/network/packet"
"zombiezen.com/go/sqlite"
"zombiezen.com/go/sqlite/sqlitex"
)
// InMemory is the "file path" to open a new in-memory database.
const InMemory = ":memory:"
// Available connection types as their string representation.
const (
ConnTypeDNS = "dns"
ConnTypeIP = "ip"
)
// ConnectionTypeToString is a lookup map to get the string representation
// of a network.ConnectionType as used by this package.
var ConnectionTypeToString = map[network.ConnectionType]string{
network.DNSRequest: ConnTypeDNS,
network.IPConnection: ConnTypeIP,
}
type (
// Database represents a SQLite3 backed connection database.
// It's use is tailored for persistance and querying of network.Connection.
// Access to the underlying SQLite database is synchronized.
//
// TODO(ppacher): somehow I'm receiving SIGBUS or SIGSEGV when no doing
// synchronization in *Database. Check what exactly sqlite.OpenFullMutex, etc..
// are actually supposed to do.
//
Database struct {
Schema *orm.TableSchema
l sync.Mutex
conn *sqlite.Conn
}
// Conn is a network connection that is stored in a SQLite database and accepted
// by the *Database type of this package. This also defines, using the ./orm package,
// the table schema and the model that is exposed via the runtime database as well as
// the query API.
//
// Use ConvertConnection from this package to convert a network.Connection to this
// representation.
Conn struct {
// ID is a device-unique identifier for the connection. It is built
// from network.Connection by hashing the connection ID and the start
// time. We cannot just use the network.Connection.ID because it is only unique
// as long as the connection is still active and might be, although unlikely,
// reused afterwards.
ID string `sqlite:"id,primary"`
ProfileID string `sqlite:"profile"`
Path string `sqlite:"path"`
Type string `sqlite:"type,varchar(8)"`
External bool `sqlite:"external"`
IPVersion packet.IPVersion `sqlite:"ip_version"`
IPProtocol packet.IPProtocol `sqlite:"ip_protocol"`
LocalIP string `sqlite:"local_ip"`
LocalPort uint16 `sqlite:"local_port"`
RemoteIP string `sqlite:"remote_ip"`
RemotePort uint16 `sqlite:"remote_port"`
Domain string `sqlite:"domain"`
Country string `sqlite:"country,varchar(2)"`
ASN uint `sqlite:"asn"`
ASOwner string `sqlite:"as_owner"`
Latitude float64 `sqlite:"latitude"`
Longitude float64 `sqlite:"longitude"`
Scope netutils.IPScope `sqlite:"scope"`
Verdict network.Verdict `sqlite:"verdict"`
Started time.Time `sqlite:"started,text,time"`
Ended *time.Time `sqlite:"ended,text,time"`
Tunneled bool `sqlite:"tunneled"`
Encrypted bool `sqlite:"encrypted"`
Internal bool `sqlite:"internal"`
Direction string `sqlite:"direction"`
ExtraData json.RawMessage `sqlite:"extra_data"`
Allowed *bool `sqlite:"allowed"`
ProfileRevision int `sqlite:"profile_revision"`
ExitNode *string `sqlite:"exit_node"`
// FIXME(ppacher): support "NOT" in search query to get rid of the following helper fields
SPNUsed bool `sqlite:"spn_used"` // could use "exit_node IS NOT NULL" or "exit IS NULL"
Active bool `sqlite:"active"` // could use "ended IS NOT NULL" or "ended IS NULL"
// FIXME(ppacher): we need to profile here for "suggestion" support. It would be better to keep a table of profiles in sqlite and use joins here
ProfileName string `sqlite:"profile_name"`
}
)
// New opens a new database at path. The database is opened with Full-Mutex, Write-Ahead-Log (WAL)
// and Shared-Cache enabled.
//
// TODO(ppacher): check which sqlite "open flags" provide the best performance and don't cause
// SIGBUS/SIGSEGV when used with out a dedicated mutex in *Database.
//
func New(path string) (*Database, error) {
c, err := sqlite.OpenConn(
path,
sqlite.OpenCreate,
sqlite.OpenReadWrite,
sqlite.OpenFullMutex,
sqlite.OpenWAL,
sqlite.OpenSharedCache,
)
if err != nil {
return nil, fmt.Errorf("failed to open sqlite at %s: %w", path, err)
}
schema, err := orm.GenerateTableSchema("connections", Conn{})
if err != nil {
return nil, err
}
return &Database{
Schema: schema,
conn: c,
}, nil
}
// NewInMemory is like New but creates a new in-memory database and
// automatically applies the connection table schema.
func NewInMemory() (*Database, error) {
db, err := New(InMemory)
if err != nil {
return nil, err
}
// this should actually never happen because an in-memory database
// always starts empty...
if err := db.ApplyMigrations(); err != nil {
return nil, fmt.Errorf("failed to prepare database: %w", err)
}
return db, nil
}
// ApplyMigrations applies any table and data migrations that are needed
// to bring db up-to-date with the built-in schema.
// TODO(ppacher): right now this only applies the current schema and ignores
// any data-migrations. Once the history module is implemented this should
// become/use a full migration system -- use zombiezen.com/go/sqlite/sqlitemigration
func (db *Database) ApplyMigrations() error {
// get the create-table SQL statement from the infered schema
sql := db.Schema.CreateStatement(false)
// execute the SQL
if err := sqlitex.ExecuteTransient(db.conn, sql, nil); err != nil {
return fmt.Errorf("failed to create schema: %w", err)
}
return nil
}
// Execute executes a custom SQL query against the SQLite database used by db.
// It uses orm.RunQuery() under the hood so please refer to the orm package for
// more information about available options.
func (db *Database) Execute(ctx context.Context, sql string, args ...orm.QueryOption) error {
db.l.Lock()
defer db.l.Unlock()
return orm.RunQuery(ctx, db.conn, sql, args...)
}
// CountRows returns the number of rows stored in the database.
func (db *Database) CountRows(ctx context.Context) (int, error) {
var result []struct {
Count int `sqlite:"count"`
}
if err := db.Execute(ctx, "SELECT COUNT(*) AS count FROM connections", orm.WithResult(&result)); err != nil {
return 0, fmt.Errorf("failed to perform query: %w", err)
}
if len(result) != 1 {
return 0, fmt.Errorf("unexpected number of rows returned, expected 1 got %d", len(result))
}
return result[0].Count, nil
}
// Cleanup removes all connections that have ended before threshold.
//
// NOTE(ppacher): there is no easy way to get the number of removed
// rows other than counting them in a first step. Though, that's
// probably not worth the cylces...
func (db *Database) Cleanup(ctx context.Context, threshold time.Time) (int, error) {
where := `WHERE ended IS NOT NULL
AND datetime(ended) < datetime(:threshold)`
sql := "DELETE FROM connections " + where + ";"
args := orm.WithNamedArgs(map[string]interface{}{
":threshold": threshold.UTC().Format(orm.SqliteTimeFormat),
})
var result []struct {
Count int `sqlite:"count"`
}
if err := db.Execute(
ctx,
"SELECT COUNT(*) AS count FROM connections "+where,
args,
orm.WithTransient(),
orm.WithResult(&result),
); err != nil {
return 0, fmt.Errorf("failed to perform query: %w", err)
}
if len(result) != 1 {
return 0, fmt.Errorf("unexpected number of rows, expected 1 got %d", len(result))
}
err := db.Execute(ctx, sql, args)
if err != nil {
return 0, err
}
return result[0].Count, nil
}
// dumpTo is a simple helper method that dumps all rows stored in the SQLite database
// as JSON to w.
// Any error aborts dumping rows and is returned.
func (db *Database) dumpTo(ctx context.Context, w io.Writer) error {
db.l.Lock()
defer db.l.Unlock()
var conns []Conn
if err := sqlitex.ExecuteTransient(db.conn, "SELECT * FROM connections", &sqlitex.ExecOptions{
ResultFunc: func(stmt *sqlite.Stmt) error {
var c Conn
if err := orm.DecodeStmt(ctx, db.Schema, stmt, &c, orm.DefaultDecodeConfig); err != nil {
return err
}
conns = append(conns, c)
return nil
},
}); err != nil {
return err
}
enc := json.NewEncoder(w)
enc.SetIndent("", " ")
return enc.Encode(conns)
}
// Save inserts the connection conn into the SQLite database. If conn
// already exists the table row is updated instead.
func (db *Database) Save(ctx context.Context, conn Conn) error {
connMap, err := orm.ToParamMap(ctx, conn, "", orm.DefaultEncodeConfig)
if err != nil {
return fmt.Errorf("failed to encode connection for SQL: %w", err)
}
columns := make([]string, 0, len(connMap))
placeholders := make([]string, 0, len(connMap))
values := make(map[string]interface{}, len(connMap))
updateSets := make([]string, 0, len(connMap))
for key, value := range connMap {
columns = append(columns, key)
placeholders = append(placeholders, ":"+key)
values[":"+key] = value
updateSets = append(updateSets, fmt.Sprintf("%s = :%s", key, key))
}
db.l.Lock()
defer db.l.Unlock()
// TODO(ppacher): make sure this one can be cached to speed up inserting
// and save some CPU cycles for the user
sql := fmt.Sprintf(
`INSERT INTO connections (%s)
VALUES(%s)
ON CONFLICT(id) DO UPDATE SET
%s
`,
strings.Join(columns, ", "),
strings.Join(placeholders, ", "),
strings.Join(updateSets, ", "),
)
if err := sqlitex.ExecuteTransient(db.conn, sql, &sqlitex.ExecOptions{
Named: values,
ResultFunc: func(stmt *sqlite.Stmt) error {
log.Errorf("netquery: got result statement with %d columns", stmt.ColumnCount())
return nil
},
}); err != nil {
log.Errorf("netquery: failed to execute:\n\t%q\n\treturned error was: %s\n\tparameters: %+v", sql, err, values)
return err
}
return nil
}
// Close closes the underlying database connection. db should and cannot be
// used after Close() has returned.
func (db *Database) Close() error {
return db.conn.Close()
}

251
netquery/manager.go Normal file
View file

@ -0,0 +1,251 @@
package netquery
import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"time"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/formats/dsd"
"github.com/safing/portbase/log"
"github.com/safing/portbase/runtime"
"github.com/safing/portmaster/network"
)
type (
// ConnectionStore describes the interface that is used by Manager
// to save new or updated connection objects.
// It is implemented by the *Database type of this package.
ConnectionStore interface {
// Save is called to perists the new or updated connection. If required,
// It's up the the implementation to figure out if the operation is an
// insert or an update.
// The ID of Conn is unique and can be trusted to never collide with other
// connections of the save device.
Save(context.Context, Conn) error
}
// Manager handles new and updated network.Connections feeds and persists them
// at a connection store.
// Manager also registers itself as a runtime database and pushes updates to
// connections using the local format.
// Users should use this update feed rather than the deprecated "network:" database.
Manager struct {
store ConnectionStore
push runtime.PushFunc
runtimeReg *runtime.Registry
pushPrefix string
}
)
// NewManager returns a new connection manager that persists all newly created or
// updated connections at store.
func NewManager(store ConnectionStore, pushPrefix string, reg *runtime.Registry) (*Manager, error) {
mng := &Manager{
store: store,
runtimeReg: reg,
pushPrefix: pushPrefix,
}
push, err := reg.Register(pushPrefix, runtime.SimpleValueGetterFunc(mng.runtimeGet))
if err != nil {
return nil, err
}
mng.push = push
return mng, nil
}
func (mng *Manager) runtimeGet(keyOrPrefix string) ([]record.Record, error) {
// TODO(ppacher):
// we don't yet support querying using the runtime database here ...
// consider exposing connection from the database at least by ID.
//
// NOTE(ppacher):
// for debugging purposes use RuntimeQueryRunner to execute plain
// SQL queries against the database using portbase/database/runtime.
return nil, nil
}
// HandleFeed starts reading new and updated connections from feed and persists them
// in the configured ConnectionStore. HandleFeed blocks until either ctx is cancelled
// or feed is closed.
// Any errors encountered when processing new or updated connections are logged but
// otherwise ignored.
// HandleFeed handles and persists updates one after each other! Depending on the system
// load the user might want to use a buffered channel for feed.
func (mng *Manager) HandleFeed(ctx context.Context, feed <-chan *network.Connection) {
for {
select {
case <-ctx.Done():
return
case conn, ok := <-feed:
if !ok {
return
}
model, err := convertConnection(conn)
if err != nil {
log.Errorf("netquery: failed to convert connection %s to sqlite model: %s", conn.ID, err)
continue
}
log.Tracef("netquery: updating connection %s", conn.ID)
if err := mng.store.Save(ctx, *model); err != nil {
log.Errorf("netquery: failed to save connection %s in sqlite database: %s", conn.ID, err)
continue
}
// we clone the record metadata from the connection
// into the new model so the portbase/database layer
// can handle NEW/UPDATE correctly.
cloned := conn.Meta().Duplicate()
// push an update for the connection
if err := mng.pushConnUpdate(ctx, *cloned, *model); err != nil {
log.Errorf("netquery: failed to push update for conn %s via database system: %s", conn.ID, err)
}
}
}
}
func (mng *Manager) pushConnUpdate(ctx context.Context, meta record.Meta, conn Conn) error {
blob, err := json.Marshal(conn)
if err != nil {
return fmt.Errorf("failed to marshal connection: %w", err)
}
key := fmt.Sprintf("%s:%s%s", mng.runtimeReg.DatabaseName(), mng.pushPrefix, conn.ID)
wrapper, err := record.NewWrapper(
key,
&meta,
dsd.JSON,
blob,
)
if err != nil {
return fmt.Errorf("failed to create record wrapper: %w", err)
}
mng.push(wrapper)
return nil
}
// convertConnection converts conn to the local representation used
// to persist the information in SQLite. convertConnection attempts
// to lock conn and may thus block for some time.
func convertConnection(conn *network.Connection) (*Conn, error) {
conn.Lock()
defer conn.Unlock()
direction := "outbound"
if conn.Inbound {
direction = "inbound"
}
c := Conn{
ID: genConnID(conn),
External: conn.External,
IPVersion: conn.IPVersion,
IPProtocol: conn.IPProtocol,
LocalIP: conn.LocalIP.String(),
LocalPort: conn.LocalPort,
Verdict: conn.Verdict,
Started: time.Unix(conn.Started, 0),
Tunneled: conn.Tunneled,
Encrypted: conn.Encrypted,
Internal: conn.Internal,
Direction: direction,
Type: ConnectionTypeToString[conn.Type],
ProfileID: conn.ProcessContext.Source + "/" + conn.ProcessContext.Profile,
Path: conn.ProcessContext.BinaryPath,
ProfileRevision: int(conn.ProfileRevisionCounter),
ProfileName: conn.ProcessContext.ProfileName,
}
switch conn.Type {
case network.DNSRequest:
c.Type = "dns"
case network.IPConnection:
c.Type = "ip"
}
switch conn.Verdict {
case network.VerdictAccept, network.VerdictRerouteToNameserver, network.VerdictRerouteToTunnel:
accepted := true
c.Allowed = &accepted
case network.VerdictUndecided, network.VerdictUndeterminable:
c.Allowed = nil
default:
allowed := false
c.Allowed = &allowed
}
if conn.Ended > 0 {
ended := time.Unix(conn.Ended, 0)
c.Ended = &ended
c.Active = false
} else {
c.Active = true
}
extraData := map[string]interface{}{
"pid": conn.ProcessContext.PID,
}
if conn.TunnelContext != nil {
extraData["tunnel"] = conn.TunnelContext
exitNode := conn.TunnelContext.GetExitNodeID()
c.ExitNode = &exitNode
c.SPNUsed = true
}
if conn.DNSContext != nil {
extraData["dns"] = conn.DNSContext
}
// TODO(ppacher): enable when TLS inspection is merged
// if conn.TLSContext != nil {
// extraData["tls"] = conn.TLSContext
// }
if conn.Entity != nil {
extraData["cname"] = conn.Entity.CNAME
extraData["blockedByLists"] = conn.Entity.BlockedByLists
extraData["blockedEntities"] = conn.Entity.BlockedEntities
extraData["reason"] = conn.Reason
c.RemoteIP = conn.Entity.IP.String()
c.RemotePort = conn.Entity.Port
c.Domain = conn.Entity.Domain
c.Country = conn.Entity.Country
c.ASN = conn.Entity.ASN
c.ASOwner = conn.Entity.ASOrg
c.Scope = conn.Entity.IPScope
if conn.Entity.Coordinates != nil {
c.Latitude = conn.Entity.Coordinates.Latitude
c.Longitude = conn.Entity.Coordinates.Longitude
}
}
// pre-compute the JSON blob for the extra data column
// and assign it.
extraDataBlob, err := json.Marshal(extraData)
if err != nil {
return nil, fmt.Errorf("failed to marshal extra data: %w", err)
}
c.ExtraData = extraDataBlob
return &c, nil
}
func genConnID(conn *network.Connection) string {
data := conn.ID + "-" + time.Unix(conn.Started, 0).String()
hash := sha256.Sum256([]byte(data))
return hex.EncodeToString(hash[:])
}

165
netquery/module_api.go Normal file
View file

@ -0,0 +1,165 @@
package netquery
import (
"context"
"fmt"
"time"
"github.com/safing/portbase/api"
"github.com/safing/portbase/config"
"github.com/safing/portbase/database"
"github.com/safing/portbase/database/query"
"github.com/safing/portbase/log"
"github.com/safing/portbase/modules"
"github.com/safing/portbase/runtime"
"github.com/safing/portmaster/network"
)
type Module struct {
*modules.Module
db *database.Interface
sqlStore *Database
mng *Manager
feed chan *network.Connection
}
func init() {
mod := new(Module)
mod.Module = modules.Register(
"netquery",
mod.Prepare,
mod.Start,
mod.Stop,
"api",
"network",
"database",
)
}
func (m *Module) Prepare() error {
var err error
m.db = database.NewInterface(&database.Options{
Local: true,
Internal: true,
})
m.sqlStore, err = NewInMemory()
if err != nil {
return fmt.Errorf("failed to create in-memory database: %w", err)
}
m.mng, err = NewManager(m.sqlStore, "netquery/data/", runtime.DefaultRegistry)
if err != nil {
return fmt.Errorf("failed to create manager: %w", err)
}
m.feed = make(chan *network.Connection, 1000)
queryHander := &QueryHandler{
Database: m.sqlStore,
IsDevMode: config.Concurrent.GetAsBool(config.CfgDevModeKey, false),
}
chartHandler := &ChartHandler{
Database: m.sqlStore,
}
// FIXME(ppacher): use appropriate permissions for this
if err := api.RegisterEndpoint(api.Endpoint{
Path: "netquery/query",
MimeType: "application/json",
Read: api.PermitUser,
Write: api.PermitUser,
BelongsTo: m.Module,
HandlerFunc: queryHander.ServeHTTP,
Name: "Query Connections",
Description: "Query the in-memory sqlite connection database.",
}); err != nil {
return fmt.Errorf("failed to register API endpoint: %w", err)
}
if err := api.RegisterEndpoint(api.Endpoint{
Path: "netquery/charts/connection-active",
MimeType: "application/json",
Read: api.PermitUser,
Write: api.PermitUser,
BelongsTo: m.Module,
HandlerFunc: chartHandler.ServeHTTP,
Name: "Active Connections Chart",
Description: "Query the in-memory sqlite connection database and return a chart of active connections.",
}); err != nil {
return fmt.Errorf("failed to register API endpoint: %w", err)
}
return nil
}
func (mod *Module) Start() error {
mod.StartServiceWorker("netquery-feeder", time.Second, func(ctx context.Context) error {
sub, err := mod.db.Subscribe(query.New("network:"))
if err != nil {
return fmt.Errorf("failed to subscribe to network tree: %w", err)
}
defer sub.Cancel()
for {
select {
case <-ctx.Done():
return nil
case rec, ok := <-sub.Feed:
if !ok {
return nil
}
conn, ok := rec.(*network.Connection)
if !ok {
// This is fine as we also receive process updates on
// this channel.
continue
}
mod.feed <- conn
}
}
})
mod.StartServiceWorker("netquery-persister", time.Second, func(ctx context.Context) error {
mod.mng.HandleFeed(ctx, mod.feed)
return nil
})
mod.StartServiceWorker("netquery-row-cleaner", time.Second, func(ctx context.Context) error {
for {
select {
case <-ctx.Done():
return nil
case <-time.After(10 * time.Second):
threshold := time.Now().Add(-network.DeleteConnsAfterEndedThreshold)
count, err := mod.sqlStore.Cleanup(ctx, threshold)
if err != nil {
log.Errorf("netquery: failed to count number of rows in memory: %s", err)
} else {
log.Tracef("netquery: successfully removed %d old rows that ended before %s", count, threshold)
}
}
}
})
// for debugging, we provide a simple direct SQL query interface using
// the runtime database
// FIXME: Expose only in dev mode.
_, err := NewRuntimeQueryRunner(mod.sqlStore, "netquery/query/", runtime.DefaultRegistry)
if err != nil {
return fmt.Errorf("failed to set up runtime SQL query runner: %w", err)
}
return nil
}
func (mod *Module) Stop() error {
close(mod.feed)
return nil
}

482
netquery/orm/decoder.go Normal file
View file

@ -0,0 +1,482 @@
package orm
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"log"
"reflect"
"strings"
"time"
"zombiezen.com/go/sqlite"
)
// Commonly used error messages when working with orm.
var (
errStructExpected = errors.New("encode: can only encode structs to maps")
errStructPointerExpected = errors.New("decode: result must be pointer to a struct type or map[string]interface{}")
errUnexpectedColumnType = errors.New("decode: unexpected column type")
)
// constants used when transforming data to and from sqlite.
var (
// sqliteTimeFromat defines the string representation that is
// expected by SQLite DATETIME functions.
// Note that SQLite itself does not include support for a DATETIME
// column type. Instead, dates and times are stored either as INTEGER,
// TEXT or REAL.
// This package provides support for time.Time being stored as TEXT (using a
// preconfigured timezone; UTC by default) or as INTEGER (the user can choose between
// unixepoch and unixnano-epoch where the nano variant is not offically supported by
// SQLITE).
SqliteTimeFormat = "2006-01-02 15:04:05"
)
type (
// Stmt describes the interface that must be implemented in order to
// be decodable to a struct type using DecodeStmt. This interface is implemented
// by *sqlite.Stmt.
Stmt interface {
ColumnCount() int
ColumnName(int) string
ColumnType(int) sqlite.ColumnType
ColumnText(int) string
ColumnBool(int) bool
ColumnFloat(int) float64
ColumnInt(int) int
ColumnReader(int) *bytes.Reader
}
// DecodeFunc is called for each non-basic type during decoding.
DecodeFunc func(colIdx int, colDef *ColumnDef, stmt Stmt, fieldDef reflect.StructField, outval reflect.Value) (interface{}, bool, error)
DecodeConfig struct {
DecodeHooks []DecodeFunc
}
)
// DecodeStmt decodes the current result row loaded in Stmt into the struct or map type result.
// Decoding hooks configured in cfg are executed before trying to decode basic types and may
// be specified to provide support for special types.
// See DatetimeDecoder() for an example of a DecodeHook that handles graceful time.Time conversion.
//
func DecodeStmt(ctx context.Context, schema *TableSchema, stmt Stmt, result interface{}, cfg DecodeConfig) error {
// make sure we got something to decode into ...
if result == nil {
return fmt.Errorf("%w, got %T", errStructPointerExpected, result)
}
// fast path for decoding into a map
if mp, ok := result.(*map[string]interface{}); ok {
return decodeIntoMap(ctx, schema, stmt, mp, cfg)
}
// make sure we got a pointer in result
if reflect.TypeOf(result).Kind() != reflect.Ptr {
return fmt.Errorf("%w, got %T", errStructPointerExpected, result)
}
// make sure it's a poiter to a struct type
t := reflect.ValueOf(result).Elem().Type()
if t.Kind() != reflect.Struct {
return fmt.Errorf("%w, got %T", errStructPointerExpected, result)
}
// if result is a nil pointer make sure to allocate some space
// for the resulting struct
resultValue := reflect.ValueOf(result)
if resultValue.IsNil() {
resultValue.Set(
reflect.New(t),
)
}
// we need access to the struct directly and not to the
// pointer.
target := reflect.Indirect(resultValue)
// create a lookup map from field name (or sqlite:"" tag)
// to the field name
lm := make(map[string]string)
for i := 0; i < target.NumField(); i++ {
fieldType := t.Field(i)
// skip unexported fields
if !fieldType.IsExported() {
continue
}
lm[sqlColumnName(fieldType)] = fieldType.Name
}
// iterate over all columns and assign them to the correct
// fields
for i := 0; i < stmt.ColumnCount(); i++ {
colName := stmt.ColumnName(i)
fieldName, ok := lm[colName]
if !ok {
// there's no target field for this column
// so we can skip it
continue
}
fieldType, _ := t.FieldByName(fieldName)
value := target.FieldByName(fieldName)
colType := stmt.ColumnType(i)
// if the column is reported as NULL we keep
// the field as it is.
// TODO(ppacher): should we set it to nil here?
if colType == sqlite.TypeNull {
continue
}
// if value is a nil pointer we need to allocate some memory
// first
if getKind(value) == reflect.Ptr && value.IsNil() {
storage := reflect.New(fieldType.Type.Elem())
value.Set(storage)
// make sure value actually points the
// dereferenced target storage
value = storage.Elem()
}
colDef := schema.GetColumnDef(colName)
// execute all decode hooks but make sure we use decodeBasic() as the
// last one.
columnValue, err := runDecodeHooks(
i,
colDef,
stmt,
fieldType,
value,
append(cfg.DecodeHooks, decodeBasic()),
)
if err != nil {
return err
}
// if we don't have a converted value now we try to
// decode basic types
if columnValue == nil {
return fmt.Errorf("cannot decode column %d (type=%s)", i, colType)
}
//log.Printf("valueTypeName: %s fieldName = %s value-orig = %s value = %s (%v) newValue = %s", value.Type().String(), fieldName, target.FieldByName(fieldName).Type(), value.Type(), value, columnValue)
// convert it to the target type if conversion is possible
newValue := reflect.ValueOf(columnValue)
if newValue.Type().ConvertibleTo(value.Type()) {
newValue = newValue.Convert(value.Type())
}
// assign the new value to the struct field.
value.Set(newValue)
}
return nil
}
// DatetimeDecoder is capable of decoding sqlite INTEGER or TEXT storage classes into
// time.Time. For INTEGER storage classes, it supports 'unixnano' struct tag value to
// decide between Unix or UnixNano epoch timestamps.
//
// FIXME(ppacher): update comment about loc parameter and TEXT storage class parsing
//
func DatetimeDecoder(loc *time.Location) DecodeFunc {
return func(colIdx int, colDef *ColumnDef, stmt Stmt, fieldDef reflect.StructField, outval reflect.Value) (interface{}, bool, error) {
// if we have the column definition available we
// use the target go type from there.
outType := outval.Type()
if colDef != nil {
outType = colDef.GoType
}
// we only care about "time.Time" here
if outType.String() != "time.Time" || (colDef != nil && !colDef.IsTime) {
log.Printf("not decoding %s %v", outType, colDef)
return nil, false, nil
}
switch stmt.ColumnType(colIdx) {
case sqlite.TypeInteger:
// stored as unix-epoch, if unixnano is set in the struct field tag
// we parse it with nano-second resolution
// TODO(ppacher): actually split the tag value at "," and search
// the slice for "unixnano"
if strings.Contains(fieldDef.Tag.Get("sqlite"), ",unixnano") {
return time.Unix(0, int64(stmt.ColumnInt(colIdx))), true, nil
}
return time.Unix(int64(stmt.ColumnInt(colIdx)), 0), true, nil
case sqlite.TypeText:
// stored ISO8601 but does not have any timezone information
// assigned so we always treat it as loc here.
t, err := time.ParseInLocation(SqliteTimeFormat, stmt.ColumnText(colIdx), loc)
if err != nil {
return nil, false, fmt.Errorf("failed to parse %q in %s: %w", stmt.ColumnText(colIdx), fieldDef.Name, err)
}
return t, true, nil
case sqlite.TypeFloat:
// stored as Julian day numbers
return nil, false, fmt.Errorf("REAL storage type not support for time.Time")
case sqlite.TypeNull:
return nil, true, nil
default:
return nil, false, fmt.Errorf("unsupported storage type for time.Time: %s", stmt.ColumnType(colIdx))
}
}
}
func decodeIntoMap(ctx context.Context, schema *TableSchema, stmt Stmt, mp *map[string]interface{}, cfg DecodeConfig) error {
if *mp == nil {
*mp = make(map[string]interface{})
}
for i := 0; i < stmt.ColumnCount(); i++ {
var x interface{}
colDef := schema.GetColumnDef(stmt.ColumnName(i))
outVal := reflect.ValueOf(&x).Elem()
fieldType := reflect.StructField{}
if colDef != nil {
outVal = reflect.New(colDef.GoType).Elem()
fieldType = reflect.StructField{
Type: colDef.GoType,
}
}
val, err := runDecodeHooks(
i,
colDef,
stmt,
fieldType,
outVal,
append(cfg.DecodeHooks, decodeBasic()),
)
if err != nil {
return fmt.Errorf("failed to decode column %s: %w", stmt.ColumnName(i), err)
}
(*mp)[stmt.ColumnName(i)] = val
}
return nil
}
func decodeBasic() DecodeFunc {
return func(colIdx int, colDef *ColumnDef, stmt Stmt, fieldDef reflect.StructField, outval reflect.Value) (result interface{}, handled bool, err error) {
valueKind := getKind(outval)
colType := stmt.ColumnType(colIdx)
colName := stmt.ColumnName(colIdx)
errInvalidType := fmt.Errorf("%w %s for column %s with field type %s", errUnexpectedColumnType, colType.String(), colName, outval.Type())
// if we have the column definition available we
// use the target go type from there.
if colDef != nil {
valueKind = normalizeKind(colDef.GoType.Kind())
// if we have a column defintion we try to convert the value to
// the actual Go-type that was used in the model.
// this is useful, for example, to ensure a []byte{} is always decoded into json.RawMessage
// or that type aliases like (type myInt int) are decoded into myInt instead of int
defer func() {
if handled {
t := reflect.New(colDef.GoType).Elem()
if result == nil || reflect.ValueOf(result).IsZero() {
return
}
if reflect.ValueOf(result).Type().ConvertibleTo(colDef.GoType) {
result = reflect.ValueOf(result).Convert(colDef.GoType).Interface()
}
t.Set(reflect.ValueOf(result))
result = t.Interface()
}
}()
}
log.Printf("decoding %s into kind %s", colName, valueKind)
if colType == sqlite.TypeNull {
if colDef != nil && colDef.Nullable {
return nil, true, nil
}
if colDef != nil && !colDef.Nullable {
return reflect.New(colDef.GoType).Elem().Interface(), true, nil
}
if outval.Kind() == reflect.Ptr {
return nil, true, nil
}
}
switch valueKind {
case reflect.String:
if colType != sqlite.TypeText {
return nil, false, errInvalidType
}
return stmt.ColumnText(colIdx), true, nil
case reflect.Bool:
// sqlite does not have a BOOL type, it rather stores a 1/0 in a column
// with INTEGER affinity.
if colType != sqlite.TypeInteger {
return nil, false, errInvalidType
}
return stmt.ColumnBool(colIdx), true, nil
case reflect.Float64:
if colType != sqlite.TypeFloat {
return nil, false, errInvalidType
}
return stmt.ColumnFloat(colIdx), true, nil
case reflect.Int, reflect.Uint: // getKind() normalizes all ints to reflect.Int/Uint because sqlite doesn't really care ...
if colType != sqlite.TypeInteger {
return nil, false, errInvalidType
}
return stmt.ColumnInt(colIdx), true, nil
case reflect.Slice:
if outval.Type().Elem().Kind() != reflect.Uint8 {
return nil, false, fmt.Errorf("slices other than []byte for BLOB are not supported")
}
if colType != sqlite.TypeBlob {
return nil, false, errInvalidType
}
columnValue, err := io.ReadAll(stmt.ColumnReader(colIdx))
if err != nil {
return nil, false, fmt.Errorf("failed to read blob for column %s: %w", fieldDef.Name, err)
}
return columnValue, true, nil
case reflect.Interface:
var (
t reflect.Type
x interface{}
)
switch colType {
case sqlite.TypeBlob:
t = reflect.TypeOf([]byte{})
columnValue, err := io.ReadAll(stmt.ColumnReader(colIdx))
if err != nil {
return nil, false, fmt.Errorf("failed to read blob for column %s: %w", fieldDef.Name, err)
}
x = columnValue
case sqlite.TypeFloat:
t = reflect.TypeOf(float64(0))
x = stmt.ColumnFloat(colIdx)
case sqlite.TypeInteger:
t = reflect.TypeOf(int(0))
x = stmt.ColumnInt(colIdx)
case sqlite.TypeText:
t = reflect.TypeOf(string(""))
x = stmt.ColumnText(colIdx)
case sqlite.TypeNull:
t = nil
x = nil
default:
return nil, false, fmt.Errorf("unsupported column type %s", colType)
}
if t == nil {
return nil, true, nil
}
target := reflect.New(t).Elem()
target.Set(reflect.ValueOf(x))
return target.Interface(), true, nil
default:
return nil, false, fmt.Errorf("cannot decode into %s", valueKind)
}
}
}
func sqlColumnName(fieldType reflect.StructField) string {
tagValue, hasTag := fieldType.Tag.Lookup("sqlite")
if !hasTag {
return fieldType.Name
}
parts := strings.Split(tagValue, ",")
if parts[0] != "" {
return parts[0]
}
return fieldType.Name
}
// runDecodeHooks tries to decode the column value of stmt at index colIdx into outval by running all decode hooks.
// The first hook that returns a non-nil interface wins, other hooks will not be executed. If an error is
// returned by a decode hook runDecodeHooks stops the error is returned to the caller.
func runDecodeHooks(colIdx int, colDef *ColumnDef, stmt Stmt, fieldDef reflect.StructField, outval reflect.Value, hooks []DecodeFunc) (interface{}, error) {
for _, fn := range hooks {
res, end, err := fn(colIdx, colDef, stmt, fieldDef, outval)
if err != nil {
return res, err
}
if end {
return res, nil
}
}
return nil, nil
}
// getKind returns the kind of value but normalized Int, Uint and Float varaints
// to their base type.
func getKind(val reflect.Value) reflect.Kind {
kind := val.Kind()
return normalizeKind(kind)
}
func normalizeKind(kind reflect.Kind) reflect.Kind {
switch {
case kind >= reflect.Int && kind <= reflect.Int64:
return reflect.Int
case kind >= reflect.Uint && kind <= reflect.Uint64:
return reflect.Uint
case kind >= reflect.Float32 && kind <= reflect.Float64:
return reflect.Float64
default:
return kind
}
}
var DefaultDecodeConfig = DecodeConfig{
DecodeHooks: []DecodeFunc{
DatetimeDecoder(time.UTC),
},
}

View file

@ -0,0 +1,573 @@
package orm
import (
"bytes"
"context"
"encoding/json"
"log"
"reflect"
"testing"
"time"
"github.com/stretchr/testify/assert"
"zombiezen.com/go/sqlite"
)
type testStmt struct {
columns []string
values []interface{}
types []sqlite.ColumnType
}
func (ts testStmt) ColumnCount() int { return len(ts.columns) }
func (ts testStmt) ColumnName(i int) string { return ts.columns[i] }
func (ts testStmt) ColumnBool(i int) bool { return ts.values[i].(bool) }
func (ts testStmt) ColumnText(i int) string { return ts.values[i].(string) }
func (ts testStmt) ColumnFloat(i int) float64 { return ts.values[i].(float64) }
func (ts testStmt) ColumnInt(i int) int { return ts.values[i].(int) }
func (ts testStmt) ColumnReader(i int) *bytes.Reader { return bytes.NewReader(ts.values[i].([]byte)) }
func (ts testStmt) ColumnType(i int) sqlite.ColumnType { return ts.types[i] }
// compile time check
var _ Stmt = new(testStmt)
type exampleFieldTypes struct {
S string
I int
F float64
B bool
}
type examplePointerTypes struct {
S *string
I *int
F *float64
B *bool
}
type exampleStructTags struct {
S string `sqlite:"col_string"`
I int `sqlite:"col_int"`
}
type exampleIntConv struct {
I8 int8
I16 int16
I32 int32
I64 int64
I int
}
type exampleBlobTypes struct {
B []byte
}
type exampleJSONRawTypes struct {
B json.RawMessage
}
type exampleTimeTypes struct {
T time.Time
TP *time.Time
}
type exampleInterface struct {
I interface{}
IP *interface{}
}
func (ett *exampleTimeTypes) Equal(other interface{}) bool {
oett, ok := other.(*exampleTimeTypes)
if !ok {
return false
}
return ett.T.Equal(oett.T) && (ett.TP != nil && oett.TP != nil && ett.TP.Equal(*oett.TP)) || (ett.TP == nil && oett.TP == nil)
}
type myInt int
type exampleTimeNano struct {
T time.Time `sqlite:",unixnano"`
}
func (etn *exampleTimeNano) Equal(other interface{}) bool {
oetn, ok := other.(*exampleTimeNano)
if !ok {
return false
}
return etn.T.Equal(oetn.T)
}
func Test_Decoder(t *testing.T) {
ctx := context.TODO()
refTime := time.Date(2022, time.February, 15, 9, 51, 00, 00, time.UTC)
cases := []struct {
Desc string
Stmt testStmt
ColumnDef []ColumnDef
Result interface{}
Expected interface{}
}{
{
"Decoding into nil is not allowed",
testStmt{
columns: nil,
values: nil,
types: nil,
},
nil,
nil,
nil,
},
{
"Decoding into basic types",
testStmt{
columns: []string{"S", "I", "F", "B"},
types: []sqlite.ColumnType{
sqlite.TypeText,
sqlite.TypeInteger,
sqlite.TypeFloat,
sqlite.TypeInteger,
},
values: []interface{}{
"string value",
1,
1.2,
true,
},
},
nil,
&exampleFieldTypes{},
&exampleFieldTypes{
S: "string value",
I: 1,
F: 1.2,
B: true,
},
},
{
"Decoding into basic types with different order",
testStmt{
columns: []string{"I", "S", "B", "F"},
types: []sqlite.ColumnType{
sqlite.TypeInteger,
sqlite.TypeText,
sqlite.TypeInteger,
sqlite.TypeFloat,
},
values: []interface{}{
1,
"string value",
true,
1.2,
},
},
nil,
&exampleFieldTypes{},
&exampleFieldTypes{
S: "string value",
I: 1,
F: 1.2,
B: true,
},
},
{
"Decoding into basic types with missing values",
testStmt{
columns: []string{"F", "B"},
types: []sqlite.ColumnType{
sqlite.TypeFloat,
sqlite.TypeInteger,
},
values: []interface{}{
1.2,
true,
},
},
nil,
&exampleFieldTypes{},
&exampleFieldTypes{
F: 1.2,
B: true,
},
},
{
"Decoding into pointer types",
testStmt{
columns: []string{"S", "I", "F", "B"},
types: []sqlite.ColumnType{
sqlite.TypeText,
sqlite.TypeInteger,
sqlite.TypeFloat,
sqlite.TypeInteger,
},
values: []interface{}{
"string value",
1,
1.2,
true,
},
},
nil,
&examplePointerTypes{},
func() interface{} {
s := "string value"
i := 1
f := 1.2
b := true
return &examplePointerTypes{
S: &s,
I: &i,
F: &f,
B: &b,
}
},
},
{
"Decoding into pointer types with missing values",
testStmt{
columns: []string{"S", "B"},
types: []sqlite.ColumnType{
sqlite.TypeText,
sqlite.TypeInteger,
sqlite.TypeFloat,
sqlite.TypeInteger,
},
values: []interface{}{
"string value",
true,
},
},
nil,
&examplePointerTypes{},
func() interface{} {
s := "string value"
b := true
return &examplePointerTypes{
S: &s,
B: &b,
}
},
},
{
"Decoding into fields with struct tags",
testStmt{
columns: []string{"col_string", "col_int"},
types: []sqlite.ColumnType{
sqlite.TypeText,
sqlite.TypeInteger,
},
values: []interface{}{
"string value",
1,
},
},
nil,
&exampleStructTags{},
&exampleStructTags{
S: "string value",
I: 1,
},
},
{
"Decoding into correct int type",
testStmt{
columns: []string{"I8", "I16", "I32", "I64", "I"},
types: []sqlite.ColumnType{
sqlite.TypeInteger,
sqlite.TypeInteger,
sqlite.TypeInteger,
sqlite.TypeInteger,
sqlite.TypeInteger,
},
values: []interface{}{
1,
1,
1,
1,
1,
},
},
nil,
&exampleIntConv{},
&exampleIntConv{
1, 1, 1, 1, 1,
},
},
{
"Handling NULL values for basic types",
testStmt{
columns: []string{"S", "I", "F"},
types: []sqlite.ColumnType{
sqlite.TypeNull,
sqlite.TypeNull,
sqlite.TypeFloat,
},
values: []interface{}{
// we use nil here but actually that does not matter
nil,
nil,
1.0,
},
},
nil,
&exampleFieldTypes{},
&exampleFieldTypes{
F: 1.0,
},
},
{
"Handling NULL values for pointer types",
testStmt{
columns: []string{"S", "I", "F"},
types: []sqlite.ColumnType{
sqlite.TypeNull,
sqlite.TypeNull,
sqlite.TypeFloat,
},
values: []interface{}{
// we use nil here but actually that does not matter
nil,
nil,
1.0,
},
},
nil,
&examplePointerTypes{},
func() interface{} {
f := 1.0
return &examplePointerTypes{F: &f}
},
},
{
"Handling blob types",
testStmt{
columns: []string{"B"},
types: []sqlite.ColumnType{
sqlite.TypeBlob,
},
values: []interface{}{
([]byte)("hello world"),
},
},
nil,
&exampleBlobTypes{},
&exampleBlobTypes{
B: ([]byte)("hello world"),
},
},
{
"Handling blob types as json.RawMessage",
testStmt{
columns: []string{"B"},
types: []sqlite.ColumnType{
sqlite.TypeBlob,
},
values: []interface{}{
([]byte)("hello world"),
},
},
nil,
&exampleJSONRawTypes{},
&exampleJSONRawTypes{
B: (json.RawMessage)("hello world"),
},
},
{
"Handling time.Time and pointers to it",
testStmt{
columns: []string{"T", "TP"},
types: []sqlite.ColumnType{
sqlite.TypeInteger,
sqlite.TypeInteger,
},
values: []interface{}{
int(refTime.Unix()),
int(refTime.Unix()),
},
},
nil,
&exampleTimeTypes{},
&exampleTimeTypes{
T: refTime,
TP: &refTime,
},
},
{
"Handling time.Time in nano-second resolution (struct tags)",
testStmt{
columns: []string{"T", "TP"},
types: []sqlite.ColumnType{
sqlite.TypeInteger,
sqlite.TypeInteger,
},
values: []interface{}{
int(refTime.UnixNano()),
int(refTime.UnixNano()),
},
},
nil,
&exampleTimeNano{},
&exampleTimeNano{
T: refTime,
},
},
{
"Decoding into interface",
testStmt{
columns: []string{"I", "IP"},
types: []sqlite.ColumnType{
sqlite.TypeText,
sqlite.TypeText,
},
values: []interface{}{
"value1",
"value2",
},
},
nil,
&exampleInterface{},
func() interface{} {
var x interface{}
x = "value2"
return &exampleInterface{
I: "value1",
IP: &x,
}
},
},
{
"Decoding into map[string]interface{}",
testStmt{
columns: []string{"I", "F", "S", "B"},
types: []sqlite.ColumnType{
sqlite.TypeInteger,
sqlite.TypeFloat,
sqlite.TypeText,
sqlite.TypeBlob,
},
values: []interface{}{
1,
1.1,
"string value",
[]byte("blob value"),
},
},
nil,
new(map[string]interface{}),
&map[string]interface{}{
"I": 1,
"F": 1.1,
"S": "string value",
"B": []byte("blob value"),
},
},
{
"Decoding using type-hints",
testStmt{
columns: []string{"B", "T"},
types: []sqlite.ColumnType{
sqlite.TypeInteger,
sqlite.TypeText,
},
values: []interface{}{
true,
refTime.Format(SqliteTimeFormat),
},
},
[]ColumnDef{
{
Name: "B",
Type: sqlite.TypeInteger,
GoType: reflect.TypeOf(true),
},
{
Name: "T",
Type: sqlite.TypeText,
GoType: reflect.TypeOf(time.Time{}),
IsTime: true,
},
},
new(map[string]interface{}),
&map[string]interface{}{
"B": true,
"T": refTime,
},
},
{
"Decoding into type aliases",
testStmt{
columns: []string{"B"},
types: []sqlite.ColumnType{
sqlite.TypeBlob,
},
values: []interface{}{
[]byte(`{"foo": "bar}`),
},
},
[]ColumnDef{
{
Name: "B",
Type: sqlite.TypeBlob,
GoType: reflect.TypeOf(json.RawMessage(`{"foo": "bar}`)),
},
},
new(map[string]interface{}),
&map[string]interface{}{
"B": json.RawMessage(`{"foo": "bar}`),
},
},
{
"Decoding into type aliases #2",
testStmt{
columns: []string{"I"},
types: []sqlite.ColumnType{sqlite.TypeInteger},
values: []interface{}{
10,
},
},
[]ColumnDef{
{
Name: "I",
Type: sqlite.TypeInteger,
GoType: reflect.TypeOf(myInt(0)),
},
},
new(map[string]interface{}),
&map[string]interface{}{
"I": myInt(10),
},
},
}
for idx := range cases {
c := cases[idx]
t.Run(c.Desc, func(t *testing.T) {
//t.Parallel()
log.Println(c.Desc)
err := DecodeStmt(ctx, &TableSchema{Columns: c.ColumnDef}, c.Stmt, c.Result, DefaultDecodeConfig)
if fn, ok := c.Expected.(func() interface{}); ok {
c.Expected = fn()
}
if c.Expected == nil {
assert.Error(t, err, c.Desc)
} else {
assert.NoError(t, err, c.Desc)
if equaler, ok := c.Expected.(interface{ Equal(x interface{}) bool }); ok {
assert.True(t, equaler.Equal(c.Result))
} else {
assert.Equal(t, c.Expected, c.Result)
}
}
})
}
}

232
netquery/orm/encoder.go Normal file
View file

@ -0,0 +1,232 @@
package orm
import (
"context"
"fmt"
"reflect"
"time"
"zombiezen.com/go/sqlite"
)
type (
EncodeFunc func(col *ColumnDef, valType reflect.Type, val reflect.Value) (interface{}, bool, error)
EncodeConfig struct {
EncodeHooks []EncodeFunc
}
)
// ToParamMap returns a map that contains the sqlite compatible value of each struct field of
// r using the sqlite column name as a map key. It either uses the name of the
// exported struct field or the value of the "sqlite" tag.
func ToParamMap(ctx context.Context, r interface{}, keyPrefix string, cfg EncodeConfig) (map[string]interface{}, error) {
// make sure we work on a struct type
val := reflect.Indirect(reflect.ValueOf(r))
if val.Kind() != reflect.Struct {
return nil, fmt.Errorf("%w, got %T", errStructExpected, r)
}
res := make(map[string]interface{}, val.NumField())
for i := 0; i < val.NumField(); i++ {
fieldType := val.Type().Field(i)
field := val.Field(i)
// skip unexported fields
if !fieldType.IsExported() {
continue
}
colDef, err := getColumnDef(fieldType)
if err != nil {
return nil, fmt.Errorf("failed to get column definition for %s: %w", fieldType.Name, err)
}
x, found, err := runEncodeHooks(
colDef,
fieldType.Type,
field,
append(
cfg.EncodeHooks,
encodeBasic(),
),
)
if err != nil {
return nil, fmt.Errorf("failed to run encode hooks: %w", err)
}
if !found {
if reflect.Indirect(field).IsValid() {
x = reflect.Indirect(field).Interface()
}
}
res[keyPrefix+sqlColumnName(fieldType)] = x
}
return res, nil
}
func EncodeValue(ctx context.Context, colDef *ColumnDef, val interface{}, cfg EncodeConfig) (interface{}, error) {
fieldValue := reflect.ValueOf(val)
fieldType := reflect.TypeOf(val)
x, found, err := runEncodeHooks(
colDef,
fieldType,
fieldValue,
append(
cfg.EncodeHooks,
encodeBasic(),
),
)
if err != nil {
return nil, fmt.Errorf("failed to run encode hooks: %w", err)
}
if !found {
if reflect.Indirect(fieldValue).IsValid() {
x = reflect.Indirect(fieldValue).Interface()
}
}
return x, nil
}
func encodeBasic() EncodeFunc {
return func(col *ColumnDef, valType reflect.Type, val reflect.Value) (interface{}, bool, error) {
kind := valType.Kind()
if kind == reflect.Ptr {
valType = valType.Elem()
kind = valType.Kind()
if val.IsNil() {
if !col.Nullable {
// we need to set the zero value here since the column
// is not marked as nullable
return reflect.New(valType).Elem().Interface(), true, nil
}
return nil, true, nil
}
val = val.Elem()
}
switch normalizeKind(kind) {
case reflect.String,
reflect.Float64,
reflect.Bool,
reflect.Int,
reflect.Uint:
// sqlite package handles conversion of those types
// already
return val.Interface(), true, nil
case reflect.Slice:
if valType.Elem().Kind() == reflect.Uint8 {
// this is []byte
return val.Interface(), true, nil
}
fallthrough
default:
return nil, false, fmt.Errorf("cannot convert value of kind %s for use in SQLite", kind)
}
}
}
func DatetimeEncoder(loc *time.Location) EncodeFunc {
return func(colDef *ColumnDef, valType reflect.Type, val reflect.Value) (interface{}, bool, error) {
// if fieldType holds a pointer we need to dereference the value
ft := valType.String()
if valType.Kind() == reflect.Ptr {
ft = valType.Elem().String()
val = reflect.Indirect(val)
}
// we only care about "time.Time" here
var t time.Time
if ft == "time.Time" {
// handle the zero time as a NULL.
if !val.IsValid() || val.IsZero() {
return nil, true, nil
}
var ok bool
valInterface := val.Interface()
t, ok = valInterface.(time.Time)
if !ok {
return nil, false, fmt.Errorf("cannot convert reflect value to time.Time")
}
} else if valType.Kind() == reflect.String && colDef.IsTime {
var err error
t, err = time.Parse(time.RFC3339, val.String())
if err != nil {
return nil, false, fmt.Errorf("failed to parse time as RFC3339: %w", err)
}
} else {
// we don't care ...
return nil, false, nil
}
switch colDef.Type {
case sqlite.TypeInteger:
if colDef.UnixNano {
return t.UnixNano(), true, nil
}
return t.Unix(), true, nil
case sqlite.TypeText:
str := t.In(loc).Format(SqliteTimeFormat)
return str, true, nil
}
return nil, false, fmt.Errorf("cannot store time.Time in %s", colDef.Type)
}
}
func runEncodeHooks(colDef *ColumnDef, valType reflect.Type, val reflect.Value, hooks []EncodeFunc) (interface{}, bool, error) {
if valType == nil {
if !colDef.Nullable {
switch colDef.Type {
case sqlite.TypeBlob:
return []byte{}, true, nil
case sqlite.TypeFloat:
return 0.0, true, nil
case sqlite.TypeText:
return "", true, nil
case sqlite.TypeInteger:
return 0, true, nil
default:
return nil, false, fmt.Errorf("unsupported sqlite data type: %s", colDef.Type)
}
}
return nil, true, nil
}
for _, fn := range hooks {
res, end, err := fn(colDef, valType, val)
if err != nil {
return res, false, err
}
if end {
return res, true, nil
}
}
return nil, false, nil
}
var DefaultEncodeConfig = EncodeConfig{
EncodeHooks: []EncodeFunc{
DatetimeEncoder(time.UTC),
},
}

View file

@ -0,0 +1,260 @@
package orm
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/assert"
"zombiezen.com/go/sqlite"
)
func Test_EncodeAsMap(t *testing.T) {
ctx := context.TODO()
refTime := time.Date(2022, time.February, 15, 9, 51, 00, 00, time.UTC)
cases := []struct {
Desc string
Input interface{}
Expected map[string]interface{}
}{
{
"Encode basic types",
struct {
I int
F float64
S string
B []byte
}{
I: 1,
F: 1.2,
S: "string",
B: ([]byte)("bytes"),
},
map[string]interface{}{
"I": 1,
"F": 1.2,
"S": "string",
"B": ([]byte)("bytes"),
},
},
{
"Encode using struct tags",
struct {
I int `sqlite:"col_int"`
S string `sqlite:"col_string"`
}{
I: 1,
S: "string value",
},
map[string]interface{}{
"col_int": 1,
"col_string": "string value",
},
},
{
"Ignore Private fields",
struct {
I int
s string
}{
I: 1,
s: "string value",
},
map[string]interface{}{
"I": 1,
},
},
{
"Handle Pointers",
struct {
I *int
S *string
}{
I: new(int),
},
map[string]interface{}{
"I": 0,
"S": nil,
},
},
{
"Handle time.Time types",
struct {
TinInt time.Time `sqlite:",integer,unixnano"`
TinString time.Time `sqlite:",text"`
}{
TinInt: refTime,
TinString: refTime,
},
map[string]interface{}{
"TinInt": refTime.UnixNano(),
"TinString": refTime.Format(SqliteTimeFormat),
},
},
{
"Handle time.Time pointer types",
struct {
TinInt *time.Time `sqlite:",integer,unixnano"`
TinString *time.Time `sqlite:",text"`
Tnil1 *time.Time `sqlite:",text"`
Tnil2 *time.Time `sqlite:",text"`
}{
TinInt: &refTime,
TinString: &refTime,
Tnil1: nil,
Tnil2: (*time.Time)(nil),
},
map[string]interface{}{
"TinInt": refTime.UnixNano(),
"TinString": refTime.Format(SqliteTimeFormat),
"Tnil1": nil,
"Tnil2": nil,
},
},
}
for idx := range cases {
c := cases[idx]
t.Run(c.Desc, func(t *testing.T) {
// t.Parallel()
res, err := ToParamMap(ctx, c.Input, "", DefaultEncodeConfig)
assert.NoError(t, err)
assert.Equal(t, c.Expected, res)
})
}
}
func Test_EncodeValue(t *testing.T) {
ctx := context.TODO()
refTime := time.Date(2022, time.February, 15, 9, 51, 00, 00, time.UTC)
cases := []struct {
Desc string
Column ColumnDef
Input interface{}
Output interface{}
}{
{
"Special value time.Time as text",
ColumnDef{
IsTime: true,
Type: sqlite.TypeText,
},
refTime,
refTime.Format(SqliteTimeFormat),
},
{
"Special value time.Time as unix-epoch",
ColumnDef{
IsTime: true,
Type: sqlite.TypeInteger,
},
refTime,
refTime.Unix(),
},
{
"Special value time.Time as unixnano-epoch",
ColumnDef{
IsTime: true,
Type: sqlite.TypeInteger,
UnixNano: true,
},
refTime,
refTime.UnixNano(),
},
{
"Special value zero time",
ColumnDef{
IsTime: true,
Type: sqlite.TypeText,
},
time.Time{},
nil,
},
{
"Special value zero time pointer",
ColumnDef{
IsTime: true,
Type: sqlite.TypeText,
},
new(time.Time),
nil,
},
{
"Special value *time.Time as text",
ColumnDef{
IsTime: true,
Type: sqlite.TypeText,
},
&refTime,
refTime.Format(SqliteTimeFormat),
},
{
"Special value untyped nil",
ColumnDef{
Nullable: true,
IsTime: true,
Type: sqlite.TypeText,
},
nil,
nil,
},
{
"Special value typed nil",
ColumnDef{
IsTime: true,
Type: sqlite.TypeText,
},
(*time.Time)(nil),
nil,
},
{
"Time formated as string",
ColumnDef{
IsTime: true,
Type: sqlite.TypeText,
},
refTime.In(time.Local).Format(time.RFC3339),
refTime.Format(SqliteTimeFormat),
},
{
"Nullable integer",
ColumnDef{
Type: sqlite.TypeInteger,
Nullable: true,
},
nil,
nil,
},
{
"Not-Null integer",
ColumnDef{
Name: "test",
Type: sqlite.TypeInteger,
},
nil,
0,
},
{
"Not-Null string",
ColumnDef{
Type: sqlite.TypeText,
},
nil,
"",
},
}
for idx := range cases {
c := cases[idx]
t.Run(c.Desc, func(t *testing.T) {
//t.Parallel()
res, err := EncodeValue(ctx, &c.Column, c.Input, DefaultEncodeConfig)
assert.NoError(t, err)
assert.Equal(t, c.Output, res)
})
}
}

View file

@ -0,0 +1,171 @@
package orm
import (
"context"
"fmt"
"reflect"
"zombiezen.com/go/sqlite"
"zombiezen.com/go/sqlite/sqlitex"
)
type (
// QueryOption can be specified at RunQuery to alter the behavior
// of the executed query.
QueryOption func(opts *queryOpts)
queryOpts struct {
Transient bool
Args []interface{}
NamedArgs map[string]interface{}
Result interface{}
DecodeConfig DecodeConfig
Schema TableSchema
}
)
// WithTransient marks the query as transient.
//
// Transient queries will not be cached for later
// re-use after they have been prepared.
func WithTransient() QueryOption {
return func(opts *queryOpts) {
opts.Transient = true
}
}
// WithArgs adds a list of arguments for the query. Arguments
// are applied in order.
//
// See SQL Language Expression documentation of SQLite for
// details: https://sqlite.org/lang_expr.html
func WithArgs(args ...interface{}) QueryOption {
return func(opts *queryOpts) {
opts.Args = args
}
}
// WithNamedArgs adds args to the query. The query must used
// named argument placeholders. According to the SQLite spec,
// arguments must either start with ':', '@' or '$'.
//
// See SQL Language Expression documentation of SQLite for
// details: https://sqlite.org/lang_expr.html
func WithNamedArgs(args map[string]interface{}) QueryOption {
return func(opts *queryOpts) {
opts.NamedArgs = args
}
}
func WithSchema(tbl TableSchema) QueryOption {
return func(opts *queryOpts) {
opts.Schema = tbl
}
}
// WithResult sets the result receiver. result is expected to
// be a pointer to a slice of struct or map types.
//
// For decoding DecodeStmt is used to decode each
// row into a new slice element. It thus supports special values
// like time.Time. See DecodeStmt() and WithDecodeConfig() for
// more information.
func WithResult(result interface{}) QueryOption {
return func(opts *queryOpts) {
opts.Result = result
}
}
// WithDecodeConfig configures the DecodeConfig to use when
// calling DecodeStmt to decode each row into the result slice.
//
// If not specified, DefaultDecodeConfig will be used.
func WithDecodeConfig(cfg DecodeConfig) QueryOption {
return func(opts *queryOpts) {
opts.DecodeConfig = cfg
}
}
// RunQuery executes the query stored in sql against the databased opened in
// conn. Please refer to the documentation of QueryOption, especially WithResult()
// for more information on how to retrieve the resulting rows.
//
// Example:
//
// var result []struct{
// Count int `sqlite:"rowCount"`
// }
//
// err := RunQuery(ctx, conn, "SELECT COUNT(*) AS rowCount FROM table", WithResult(&result))
// fmt.Println(result[0].Count)
//
func RunQuery(ctx context.Context, conn *sqlite.Conn, sql string, modifiers ...QueryOption) error {
args := queryOpts{
DecodeConfig: DefaultDecodeConfig,
}
for _, fn := range modifiers {
fn(&args)
}
opts := &sqlitex.ExecOptions{
Args: args.Args,
Named: args.NamedArgs,
}
var (
sliceVal reflect.Value
valElemType reflect.Type
)
if args.Result != nil {
target := args.Result
outVal := reflect.ValueOf(target)
if outVal.Kind() != reflect.Ptr {
return fmt.Errorf("target must be a pointer, got %T", target)
}
sliceVal = reflect.Indirect(outVal)
if !sliceVal.IsValid() || sliceVal.IsNil() {
newVal := reflect.Zero(outVal.Type().Elem())
sliceVal.Set(newVal)
}
kind := sliceVal.Kind()
if kind != reflect.Slice {
return fmt.Errorf("target but be pointer to slice, got %T", target)
}
valType := sliceVal.Type()
valElemType = valType.Elem()
opts.ResultFunc = func(stmt *sqlite.Stmt) error {
var currentField reflect.Value
currentField = reflect.New(valElemType)
if err := DecodeStmt(ctx, &args.Schema, stmt, currentField.Interface(), args.DecodeConfig); err != nil {
return err
}
sliceVal = reflect.Append(sliceVal, reflect.Indirect(currentField))
return nil
}
}
var err error
if args.Transient {
err = sqlitex.ExecuteTransient(conn, sql, opts)
} else {
err = sqlitex.Execute(conn, sql, opts)
}
if err != nil {
return err
}
if args.Result != nil {
reflect.Indirect(reflect.ValueOf(args.Result)).Set(sliceVal)
}
return nil
}

View file

@ -0,0 +1,235 @@
package orm
import (
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"zombiezen.com/go/sqlite"
)
var (
errSkipStructField = errors.New("struct field should be skipped")
)
var (
TagUnixNano = "unixnano"
TagPrimaryKey = "primary"
TagAutoIncrement = "autoincrement"
TagTime = "time"
TagNotNull = "not-null"
TagNullable = "nullable"
TagTypeInt = "integer"
TagTypeText = "text"
TagTypePrefixVarchar = "varchar"
TagTypeBlob = "blob"
TagTypeFloat = "float"
)
var sqlTypeMap = map[sqlite.ColumnType]string{
sqlite.TypeBlob: "BLOB",
sqlite.TypeFloat: "REAL",
sqlite.TypeInteger: "INTEGER",
sqlite.TypeText: "TEXT",
}
type (
TableSchema struct {
Name string
Columns []ColumnDef
}
ColumnDef struct {
Name string
Nullable bool
Type sqlite.ColumnType
GoType reflect.Type
Length int
PrimaryKey bool
AutoIncrement bool
UnixNano bool
IsTime bool
}
)
func (ts TableSchema) GetColumnDef(name string) *ColumnDef {
for _, def := range ts.Columns {
if def.Name == name {
return &def
}
}
return nil
}
func (ts TableSchema) CreateStatement(ifNotExists bool) string {
sql := "CREATE TABLE"
if ifNotExists {
sql += " IF NOT EXISTS"
}
sql += " " + ts.Name + " ( "
for idx, col := range ts.Columns {
sql += col.AsSQL()
if idx < len(ts.Columns)-1 {
sql += ", "
}
}
sql += " );"
return sql
}
func (def ColumnDef) AsSQL() string {
sql := def.Name + " "
if def.Type == sqlite.TypeText && def.Length > 0 {
sql += fmt.Sprintf("VARCHAR(%d)", def.Length)
} else {
sql += sqlTypeMap[def.Type]
}
if def.PrimaryKey {
sql += " PRIMARY KEY"
}
if def.AutoIncrement {
sql += " AUTOINCREMENT"
}
if !def.Nullable {
sql += " NOT NULL"
}
return sql
}
func GenerateTableSchema(name string, d interface{}) (*TableSchema, error) {
ts := &TableSchema{
Name: name,
}
val := reflect.Indirect(reflect.ValueOf(d))
if val.Kind() != reflect.Struct {
return nil, fmt.Errorf("%w, got %T", errStructExpected, d)
}
for i := 0; i < val.NumField(); i++ {
fieldType := val.Type().Field(i)
if !fieldType.IsExported() {
continue
}
def, err := getColumnDef(fieldType)
if err != nil {
if errors.Is(err, errSkipStructField) {
continue
}
return nil, fmt.Errorf("struct field %s: %w", fieldType.Name, err)
}
ts.Columns = append(ts.Columns, *def)
}
return ts, nil
}
func getColumnDef(fieldType reflect.StructField) (*ColumnDef, error) {
def := &ColumnDef{
Name: fieldType.Name,
Nullable: fieldType.Type.Kind() == reflect.Ptr,
}
ft := fieldType.Type
if fieldType.Type.Kind() == reflect.Ptr {
ft = fieldType.Type.Elem()
}
def.GoType = ft
kind := normalizeKind(ft.Kind())
switch kind {
case reflect.Int:
def.Type = sqlite.TypeInteger
case reflect.Float64:
def.Type = sqlite.TypeFloat
case reflect.String:
def.Type = sqlite.TypeText
case reflect.Slice:
// only []byte/[]uint8 is supported
if ft.Elem().Kind() != reflect.Uint8 {
return nil, fmt.Errorf("slices of type %s is not supported", ft.Elem())
}
def.Type = sqlite.TypeBlob
}
if err := applyStructFieldTag(fieldType, def); err != nil {
return nil, err
}
return def, nil
}
// applyStructFieldTag parses the sqlite:"" struct field tag and update the column
// definition def accordingly.
func applyStructFieldTag(fieldType reflect.StructField, def *ColumnDef) error {
parts := strings.Split(fieldType.Tag.Get("sqlite"), ",")
if len(parts) > 0 && parts[0] != "" {
if parts[0] == "-" {
return errSkipStructField
}
def.Name = parts[0]
}
if len(parts) > 1 {
for _, k := range parts[1:] {
switch k {
// column modifieres
case TagPrimaryKey:
def.PrimaryKey = true
case TagAutoIncrement:
def.AutoIncrement = true
case TagNotNull:
def.Nullable = false
case TagNullable:
def.Nullable = true
case TagUnixNano:
def.UnixNano = true
case TagTime:
def.IsTime = true
// basic column types
case TagTypeInt:
def.Type = sqlite.TypeInteger
case TagTypeText:
def.Type = sqlite.TypeText
case TagTypeFloat:
def.Type = sqlite.TypeFloat
case TagTypeBlob:
def.Type = sqlite.TypeBlob
// advanced column types
default:
if strings.HasPrefix(k, TagTypePrefixVarchar) {
lenStr := strings.TrimSuffix(strings.TrimPrefix(k, TagTypePrefixVarchar+"("), ")")
length, err := strconv.ParseInt(lenStr, 10, 0)
if err != nil {
return fmt.Errorf("failed to parse varchar length %q: %w", lenStr, err)
}
def.Type = sqlite.TypeText
def.Length = int(length)
}
}
}
}
return nil
}

View file

@ -0,0 +1,41 @@
package orm
import (
"testing"
"github.com/stretchr/testify/assert"
)
func Test_SchemaBuilder(t *testing.T) {
cases := []struct {
Name string
Model interface{}
ExpectedSQL string
}{
{
"Simple",
struct {
ID int `sqlite:"id,primary,autoincrement"`
Text string `sqlite:"text,nullable"`
Int *int `sqlite:",not-null"`
Float interface{} `sqlite:",float,nullable"`
}{},
`CREATE TABLE Simple ( id INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL, text TEXT, Int INTEGER NOT NULL, Float REAL );`,
},
{
"Varchar",
struct {
S string `sqlite:",varchar(10)"`
}{},
`CREATE TABLE Varchar ( S VARCHAR(10) NOT NULL );`,
},
}
for idx := range cases {
c := cases[idx]
res, err := GenerateTableSchema(c.Name, c.Model)
assert.NoError(t, err)
assert.Equal(t, c.ExpectedSQL, res.CreateStatement(false))
}
}

556
netquery/query.go Normal file
View file

@ -0,0 +1,556 @@
package netquery
import (
"context"
"encoding/json"
"fmt"
"io"
"log"
"sort"
"strings"
"github.com/hashicorp/go-multierror"
"github.com/safing/portmaster/netquery/orm"
"zombiezen.com/go/sqlite"
)
type (
Query map[string][]Matcher
MatchType interface {
Operator() string
}
Equal interface{}
Matcher struct {
Equal interface{} `json:"$eq,omitempty"`
NotEqual interface{} `json:"$ne,omitempty"`
In []interface{} `json:"$in,omitempty"`
NotIn []interface{} `json:"$notIn,omitempty"`
Like string `json:"$like,omitempty"`
}
Count struct {
As string `json:"as"`
Field string `json:"field"`
Distinct bool `json:"distinct"`
}
Sum struct {
Condition Query `json:"condition"`
As string `json:"as"`
Distinct bool `json:"distinct"`
}
// NOTE: whenever adding support for new operators make sure
// to update UnmarshalJSON as well.
Select struct {
Field string `json:"field"`
Count *Count `json:"$count,omitempty"`
Sum *Sum `json:"$sum,omitempty"`
Distinct *string `json:"$distinct"`
}
Selects []Select
TextSearch struct {
Fields []string `json:"fields"`
Value string `json:"value"`
}
QueryRequestPayload struct {
Select Selects `json:"select"`
Query Query `json:"query"`
OrderBy OrderBys `json:"orderBy"`
GroupBy []string `json:"groupBy"`
TextSearch *TextSearch `json:"textSearch"`
Pagination
selectedFields []string
whitelistedFields []string
paramMap map[string]interface{}
}
QueryActiveConnectionChartPayload struct {
Query Query `json:"query"`
TextSearch *TextSearch `json:"textSearch"`
}
OrderBy struct {
Field string `json:"field"`
Desc bool `json:"desc"`
}
OrderBys []OrderBy
Pagination struct {
PageSize int `json:"pageSize"`
Page int `json:"page"`
}
)
func (query *Query) UnmarshalJSON(blob []byte) error {
if *query == nil {
*query = make(Query)
}
var model map[string]json.RawMessage
if err := json.Unmarshal(blob, &model); err != nil {
return err
}
for columnName, rawColumnQuery := range model {
if len(rawColumnQuery) == 0 {
continue
}
switch rawColumnQuery[0] {
case '{':
m, err := parseMatcher(rawColumnQuery)
if err != nil {
return err
}
(*query)[columnName] = []Matcher{*m}
case '[':
var rawMatchers []json.RawMessage
if err := json.Unmarshal(rawColumnQuery, &rawMatchers); err != nil {
return err
}
(*query)[columnName] = make([]Matcher, len(rawMatchers))
for idx, val := range rawMatchers {
// this should not happen
if len(val) == 0 {
continue
}
// if val starts with a { we have a matcher definition
if val[0] == '{' {
m, err := parseMatcher(val)
if err != nil {
return err
}
(*query)[columnName][idx] = *m
continue
} else if val[0] == '[' {
return fmt.Errorf("invalid token [ in query for column %s", columnName)
}
// val is a dedicated JSON primitive and not an object or array
// so we treat that as an EQUAL condition.
var x interface{}
if err := json.Unmarshal(val, &x); err != nil {
return err
}
(*query)[columnName][idx] = Matcher{
Equal: x,
}
}
default:
// value is a JSON primitive and not an object or array
// so we treat that as an EQUAL condition.
var x interface{}
if err := json.Unmarshal(rawColumnQuery, &x); err != nil {
return err
}
(*query)[columnName] = []Matcher{
{Equal: x},
}
}
}
return nil
}
// TODO(ppacher): right now we only support LIMIT and OFFSET for pagination but that
// has an issue that loading the same page twice might yield different results due to
// new records shifting the result slice. To overcome this, return a "PageToken" to the
// user that includes the time the initial query was created so paginated queries can
// ensure new records don't end up in the result set.
func (page *Pagination) toSQLLimitOffsetClause() string {
limit := page.PageSize
// default and cap the limit to at most 100 items
// per page to avoid out-of-memory conditions when loading
// thousands of results at once.
if limit <= 0 || limit > 100 {
limit = 100
}
sql := fmt.Sprintf("LIMIT %d", limit)
if page.Page > 0 {
sql += fmt.Sprintf(" OFFSET %d", page.Page*limit)
}
return sql
}
func parseMatcher(raw json.RawMessage) (*Matcher, error) {
var m Matcher
if err := json.Unmarshal(raw, &m); err != nil {
return nil, err
}
if err := m.Validate(); err != nil {
return nil, fmt.Errorf("invalid query matcher: %s", err)
}
log.Printf("parsed matcher %s: %+v", string(raw), m)
return &m, nil
}
func (match Matcher) Validate() error {
found := 0
if match.Equal != nil {
found++
}
if match.NotEqual != nil {
found++
}
if match.In != nil {
found++
}
if match.NotIn != nil {
found++
}
if match.Like != "" {
found++
}
if found == 0 {
return fmt.Errorf("no conditions specified")
}
return nil
}
func (text TextSearch) toSQLConditionClause(ctx context.Context, schema *orm.TableSchema, suffix string, encoderConfig orm.EncodeConfig) (string, map[string]interface{}, error) {
var (
queryParts []string
params = make(map[string]interface{})
)
key := fmt.Sprintf(":t%s", suffix)
params[key] = fmt.Sprintf("%%%s%%", text.Value)
for _, field := range text.Fields {
colDef := schema.GetColumnDef(field)
if colDef == nil {
return "", nil, fmt.Errorf("column %s is not allowed in text-search", colDef.Name)
}
if colDef.Type != sqlite.TypeText {
return "", nil, fmt.Errorf("type of column %s cannot be used in text-search", colDef.Name)
}
queryParts = append(queryParts, fmt.Sprintf("%s LIKE %s", colDef.Name, key))
}
if len(queryParts) == 0 {
return "", nil, nil
}
return "( " + strings.Join(queryParts, " OR ") + " )", params, nil
}
func (match Matcher) toSQLConditionClause(ctx context.Context, suffix string, conjunction string, colDef orm.ColumnDef, encoderConfig orm.EncodeConfig) (string, map[string]interface{}, error) {
var (
queryParts []string
params = make(map[string]interface{})
errs = new(multierror.Error)
key = fmt.Sprintf("%s%s", colDef.Name, suffix)
)
add := func(operator, suffix string, list bool, values ...interface{}) {
var placeholder []string
for idx, value := range values {
encodedValue, err := orm.EncodeValue(ctx, &colDef, value, encoderConfig)
if err != nil {
errs.Errors = append(errs.Errors,
fmt.Errorf("failed to encode %v for column %s: %w", value, colDef.Name, err),
)
return
}
uniqKey := fmt.Sprintf(":%s%s%d", key, suffix, idx)
placeholder = append(placeholder, uniqKey)
params[uniqKey] = encodedValue
}
if len(placeholder) == 1 && !list {
queryParts = append(queryParts, fmt.Sprintf("%s %s %s", colDef.Name, operator, placeholder[0]))
} else {
queryParts = append(queryParts, fmt.Sprintf("%s %s ( %s )", colDef.Name, operator, strings.Join(placeholder, ", ")))
}
}
if match.Equal != nil {
add("=", "eq", false, match.Equal)
}
if match.NotEqual != nil {
add("!=", "ne", false, match.NotEqual)
}
if match.In != nil {
add("IN", "in", true, match.In...)
}
if match.NotIn != nil {
add("NOT IN", "notin", true, match.NotIn...)
}
if match.Like != "" {
add("LIKE", "like", false, match.Like)
}
if len(queryParts) == 0 {
// this is an empty matcher without a single condition.
// we convert that to a no-op TRUE value
return "( 1 = 1 )", nil, errs.ErrorOrNil()
}
if len(queryParts) == 1 {
return queryParts[0], params, errs.ErrorOrNil()
}
return "( " + strings.Join(queryParts, " "+conjunction+" ") + " )", params, errs.ErrorOrNil()
}
func (query Query) toSQLWhereClause(ctx context.Context, suffix string, m *orm.TableSchema, encoderConfig orm.EncodeConfig) (string, map[string]interface{}, error) {
if len(query) == 0 {
return "", nil, nil
}
// create a lookup map to validate column names
lm := make(map[string]orm.ColumnDef, len(m.Columns))
for _, col := range m.Columns {
lm[col.Name] = col
}
paramMap := make(map[string]interface{})
columnStmts := make([]string, 0, len(query))
// get all keys and sort them so we get a stable output
queryKeys := make([]string, 0, len(query))
for column := range query {
queryKeys = append(queryKeys, column)
}
sort.Strings(queryKeys)
// actually create the WHERE clause parts for each
// column in query.
errs := new(multierror.Error)
for _, column := range queryKeys {
values := query[column]
colDef, ok := lm[column]
if !ok {
errs.Errors = append(errs.Errors, fmt.Errorf("column %s is not allowed", column))
continue
}
queryParts := make([]string, len(values))
for idx, val := range values {
matcherQuery, params, err := val.toSQLConditionClause(ctx, fmt.Sprintf("%s%d", suffix, idx), "AND", colDef, encoderConfig)
if err != nil {
errs.Errors = append(errs.Errors,
fmt.Errorf("invalid matcher at index %d for column %s: %w", idx, colDef.Name, err),
)
continue
}
// merge parameters up into the superior parameter map
for key, val := range params {
if _, ok := paramMap[key]; ok {
// is is soley a developer mistake when implementing a matcher so no forgiving ...
panic("sqlite parameter collision")
}
paramMap[key] = val
}
queryParts[idx] = matcherQuery
}
columnStmts = append(columnStmts,
fmt.Sprintf("( %s )", strings.Join(queryParts, " OR ")),
)
}
whereClause := strings.Join(columnStmts, " AND ")
return whereClause, paramMap, errs.ErrorOrNil()
}
func (sel *Selects) UnmarshalJSON(blob []byte) error {
if len(blob) == 0 {
return io.ErrUnexpectedEOF
}
// if we are looking at a slice directly decode into
// a []Select
if blob[0] == '[' {
var result []Select
if err := json.Unmarshal(blob, &result); err != nil {
return err
}
(*sel) = result
return nil
}
// if it's an object decode into a single select
if blob[0] == '{' {
var result Select
if err := json.Unmarshal(blob, &result); err != nil {
return err
}
*sel = []Select{result}
return nil
}
// otherwise this is just the field name
var field string
if err := json.Unmarshal(blob, &field); err != nil {
return err
}
return nil
}
func (sel *Select) UnmarshalJSON(blob []byte) error {
if len(blob) == 0 {
return io.ErrUnexpectedEOF
}
// if we have an object at hand decode the select
// directly
if blob[0] == '{' {
var res struct {
Field string `json:"field"`
Count *Count `json:"$count"`
Sum *Sum `json:"$sum"`
Distinct *string `json:"$distinct"`
}
if err := json.Unmarshal(blob, &res); err != nil {
return err
}
sel.Count = res.Count
sel.Field = res.Field
sel.Distinct = res.Distinct
sel.Sum = res.Sum
if sel.Count != nil && sel.Count.As != "" {
if !charOnlyRegexp.MatchString(sel.Count.As) {
return fmt.Errorf("invalid characters in $count.as, value must match [a-zA-Z]+")
}
}
return nil
}
var x string
if err := json.Unmarshal(blob, &x); err != nil {
return err
}
sel.Field = x
return nil
}
func (orderBys *OrderBys) UnmarshalJSON(blob []byte) error {
if len(blob) == 0 {
return io.ErrUnexpectedEOF
}
if blob[0] == '[' {
var result []OrderBy
if err := json.Unmarshal(blob, &result); err != nil {
return err
}
*orderBys = result
return nil
}
if blob[0] == '{' {
var result OrderBy
if err := json.Unmarshal(blob, &result); err != nil {
return err
}
*orderBys = []OrderBy{result}
return nil
}
var field string
if err := json.Unmarshal(blob, &field); err != nil {
return err
}
*orderBys = []OrderBy{
{
Field: field,
Desc: false,
},
}
return nil
}
func (orderBy *OrderBy) UnmarshalJSON(blob []byte) error {
if len(blob) == 0 {
return io.ErrUnexpectedEOF
}
if blob[0] == '{' {
var res struct {
Field string `json:"field"`
Desc bool `json:"desc"`
}
if err := json.Unmarshal(blob, &res); err != nil {
return err
}
orderBy.Desc = res.Desc
orderBy.Field = res.Field
return nil
}
var field string
if err := json.Unmarshal(blob, &field); err != nil {
return err
}
orderBy.Field = field
orderBy.Desc = false
return nil
}

356
netquery/query_handler.go Normal file
View file

@ -0,0 +1,356 @@
package netquery
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"regexp"
"strings"
"time"
"github.com/safing/portbase/log"
"github.com/safing/portmaster/netquery/orm"
)
var (
charOnlyRegexp = regexp.MustCompile("[a-zA-Z]+")
)
type (
// QueryHandler implements http.Handler and allows to perform SQL
// query and aggregate functions on Database.
QueryHandler struct {
IsDevMode func() bool
Database *Database
}
)
func (qh *QueryHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
start := time.Now()
requestPayload, err := qh.parseRequest(req)
if err != nil {
http.Error(resp, err.Error(), http.StatusBadRequest)
return
}
queryParsed := time.Since(start)
query, paramMap, err := requestPayload.generateSQL(req.Context(), qh.Database.Schema)
if err != nil {
http.Error(resp, err.Error(), http.StatusBadRequest)
return
}
sqlQueryBuilt := time.Since(start)
// actually execute the query against the database and collect the result
var result []map[string]interface{}
if err := qh.Database.Execute(
req.Context(),
query,
orm.WithNamedArgs(paramMap),
orm.WithResult(&result),
orm.WithSchema(*qh.Database.Schema),
); err != nil {
http.Error(resp, "Failed to execute query: "+err.Error(), http.StatusInternalServerError)
return
}
sqlQueryFinished := time.Since(start)
// send the HTTP status code
resp.WriteHeader(http.StatusOK)
// prepare the result encoder.
enc := json.NewEncoder(resp)
enc.SetEscapeHTML(false)
enc.SetIndent("", " ")
// prepare the result body that, in dev mode, contains
// some diagnostics data about the query
var resultBody map[string]interface{}
if qh.IsDevMode() {
resultBody = map[string]interface{}{
"sql_prep_stmt": query,
"sql_params": paramMap,
"query": requestPayload.Query,
"orderBy": requestPayload.OrderBy,
"groupBy": requestPayload.GroupBy,
"selects": requestPayload.Select,
"times": map[string]interface{}{
"start_time": start,
"query_parsed_after": queryParsed.String(),
"query_built_after": sqlQueryBuilt.String(),
"query_executed_after": sqlQueryFinished.String(),
},
}
} else {
resultBody = make(map[string]interface{})
}
resultBody["results"] = result
// and finally stream the response
if err := enc.Encode(resultBody); err != nil {
// we failed to encode the JSON body to resp so we likely either already sent a
// few bytes or the pipe was already closed. In either case, trying to send the
// error using http.Error() is non-sense. We just log it out here and that's all
// we can do.
log.Errorf("failed to encode JSON response: %s", err)
return
}
}
func (qh *QueryHandler) parseRequest(req *http.Request) (*QueryRequestPayload, error) {
var body io.Reader
switch req.Method {
case http.MethodPost, http.MethodPut:
body = req.Body
case http.MethodGet:
body = strings.NewReader(req.URL.Query().Get("q"))
default:
return nil, fmt.Errorf("invalid HTTP method")
}
var requestPayload QueryRequestPayload
blob, err := ioutil.ReadAll(body)
if err != nil {
return nil, fmt.Errorf("failed to read body" + err.Error())
}
body = bytes.NewReader(blob)
dec := json.NewDecoder(body)
dec.DisallowUnknownFields()
if err := json.Unmarshal(blob, &requestPayload); err != nil && !errors.Is(err, io.EOF) {
return nil, fmt.Errorf("invalid query: %w", err)
}
return &requestPayload, nil
}
func (req *QueryRequestPayload) generateSQL(ctx context.Context, schema *orm.TableSchema) (string, map[string]interface{}, error) {
if err := req.prepareSelectedFields(ctx, schema); err != nil {
return "", nil, fmt.Errorf("perparing selected fields: %w", err)
}
// build the SQL where clause from the payload query
whereClause, paramMap, err := req.Query.toSQLWhereClause(
ctx,
"",
schema,
orm.DefaultEncodeConfig,
)
if err != nil {
return "", nil, fmt.Errorf("generating where clause: %w", err)
}
if req.paramMap == nil {
req.paramMap = make(map[string]interface{})
}
for key, val := range paramMap {
req.paramMap[key] = val
}
if req.TextSearch != nil {
textClause, textParams, err := req.TextSearch.toSQLConditionClause(ctx, schema, "", orm.DefaultEncodeConfig)
if err != nil {
return "", nil, fmt.Errorf("generating text-search clause: %w", err)
}
if textClause != "" {
if whereClause != "" {
whereClause += " AND "
}
whereClause += textClause
for key, val := range textParams {
req.paramMap[key] = val
}
}
}
groupByClause, err := req.generateGroupByClause(schema)
if err != nil {
return "", nil, fmt.Errorf("generating group-by clause: %w", err)
}
orderByClause, err := req.generateOrderByClause(schema)
if err != nil {
return "", nil, fmt.Errorf("generating order-by clause: %w", err)
}
selectClause := req.generateSelectClause()
query := `SELECT ` + selectClause + ` FROM connections`
if whereClause != "" {
query += " WHERE " + whereClause
}
query += " " + groupByClause + " " + orderByClause + " " + req.Pagination.toSQLLimitOffsetClause()
return strings.TrimSpace(query), req.paramMap, nil
}
func (req *QueryRequestPayload) prepareSelectedFields(ctx context.Context, schema *orm.TableSchema) error {
for idx, s := range req.Select {
var field string
switch {
case s.Count != nil:
field = s.Count.Field
case s.Distinct != nil:
field = *s.Distinct
case s.Sum != nil:
// field is not used in case of $sum
field = "*"
default:
field = s.Field
}
colName := "*"
if field != "*" || (s.Count == nil && s.Sum == nil) {
var err error
colName, err = req.validateColumnName(schema, field)
if err != nil {
return err
}
}
switch {
case s.Count != nil:
var as = s.Count.As
if as == "" {
as = fmt.Sprintf("%s_count", colName)
}
var distinct = ""
if s.Count.Distinct {
distinct = "DISTINCT "
}
req.selectedFields = append(
req.selectedFields,
fmt.Sprintf("COUNT(%s%s) AS %s", distinct, colName, as),
)
req.whitelistedFields = append(req.whitelistedFields, as)
case s.Sum != nil:
if s.Sum.As == "" {
return fmt.Errorf("missing 'as' for $sum")
}
clause, params, err := s.Sum.Condition.toSQLWhereClause(ctx, fmt.Sprintf("sel%d", idx), schema, orm.DefaultEncodeConfig)
if err != nil {
return fmt.Errorf("in $sum: %w", err)
}
req.paramMap = params
req.selectedFields = append(
req.selectedFields,
fmt.Sprintf("SUM(%s) AS %s", clause, s.Sum.As),
)
req.whitelistedFields = append(req.whitelistedFields, s.Sum.As)
case s.Distinct != nil:
req.selectedFields = append(req.selectedFields, fmt.Sprintf("DISTINCT %s", colName))
req.whitelistedFields = append(req.whitelistedFields, colName)
default:
req.selectedFields = append(req.selectedFields, colName)
}
}
return nil
}
func (req *QueryRequestPayload) generateGroupByClause(schema *orm.TableSchema) (string, error) {
if len(req.GroupBy) == 0 {
return "", nil
}
var groupBys = make([]string, len(req.GroupBy))
for idx, name := range req.GroupBy {
colName, err := req.validateColumnName(schema, name)
if err != nil {
return "", err
}
groupBys[idx] = colName
}
groupByClause := "GROUP BY " + strings.Join(groupBys, ", ")
// if there are no explicitly selected fields we default to the
// group-by columns as that's what's expected most of the time anyway...
if len(req.selectedFields) == 0 {
req.selectedFields = append(req.selectedFields, groupBys...)
}
return groupByClause, nil
}
func (req *QueryRequestPayload) generateSelectClause() string {
var selectClause = "*"
if len(req.selectedFields) > 0 {
selectClause = strings.Join(req.selectedFields, ", ")
}
return selectClause
}
func (req *QueryRequestPayload) generateOrderByClause(schema *orm.TableSchema) (string, error) {
if len(req.OrderBy) == 0 {
return "", nil
}
var orderBys = make([]string, len(req.OrderBy))
for idx, sort := range req.OrderBy {
colName, err := req.validateColumnName(schema, sort.Field)
if err != nil {
return "", err
}
if sort.Desc {
orderBys[idx] = fmt.Sprintf("%s DESC", colName)
} else {
orderBys[idx] = fmt.Sprintf("%s ASC", colName)
}
}
return "ORDER BY " + strings.Join(orderBys, ", "), nil
}
func (req *QueryRequestPayload) validateColumnName(schema *orm.TableSchema, field string) (string, error) {
colDef := schema.GetColumnDef(field)
if colDef != nil {
return colDef.Name, nil
}
for _, selected := range req.whitelistedFields {
if field == selected {
return field, nil
}
}
for _, selected := range req.selectedFields {
if field == selected {
return field, nil
}
}
return "", fmt.Errorf("column name %q not allowed", field)
}
// compile time check
var _ http.Handler = new(QueryHandler)

244
netquery/query_test.go Normal file
View file

@ -0,0 +1,244 @@
package netquery
import (
"context"
"encoding/json"
"fmt"
"testing"
"time"
"github.com/safing/portmaster/netquery/orm"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func Test_UnmarshalQuery(t *testing.T) {
var cases = []struct {
Name string
Input string
Expected Query
Error error
}{
{
"Parse a simple query",
`{ "domain": ["example.com", "example.at"] }`,
Query{
"domain": []Matcher{
{
Equal: "example.com",
},
{
Equal: "example.at",
},
},
},
nil,
},
{
"Parse a more complex query",
`
{
"domain": [
{
"$in": [
"example.at",
"example.com"
]
},
{
"$like": "microsoft.%"
}
],
"path": [
"/bin/ping",
{
"$notin": [
"/sbin/ping",
"/usr/sbin/ping"
]
}
]
}
`,
Query{
"domain": []Matcher{
{
In: []interface{}{
"example.at",
"example.com",
},
},
{
Like: "microsoft.%",
},
},
"path": []Matcher{
{
Equal: "/bin/ping",
},
{
NotIn: []interface{}{
"/sbin/ping",
"/usr/sbin/ping",
},
},
},
},
nil,
},
}
for _, c := range cases {
t.Run(c.Name, func(t *testing.T) {
var q Query
err := json.Unmarshal([]byte(c.Input), &q)
if c.Error != nil {
if assert.Error(t, err) {
assert.Equal(t, c.Error.Error(), err.Error())
}
} else {
assert.NoError(t, err)
assert.Equal(t, c.Expected, q)
}
})
}
}
func Test_QueryBuilder(t *testing.T) {
now := time.Now()
var cases = []struct {
N string
Q Query
R string
P map[string]interface{}
E error
}{
{
"No filter",
nil,
"",
nil,
nil,
},
{
"Simple, one-column filter",
Query{"domain": []Matcher{
{
Equal: "example.com",
},
{
Equal: "example.at",
},
}},
"( domain = :domain0eq0 OR domain = :domain1eq0 )",
map[string]interface{}{
":domain0eq0": "example.com",
":domain1eq0": "example.at",
},
nil,
},
{
"Two column filter",
Query{
"domain": []Matcher{
{
Equal: "example.com",
},
},
"path": []Matcher{
{
Equal: "/bin/curl",
},
{
Equal: "/bin/ping",
},
},
},
"( domain = :domain0eq0 ) AND ( path = :path0eq0 OR path = :path1eq0 )",
map[string]interface{}{
":domain0eq0": "example.com",
":path0eq0": "/bin/curl",
":path1eq0": "/bin/ping",
},
nil,
},
{
"Time based filter",
Query{
"started": []Matcher{
{
Equal: now.Format(time.RFC3339),
},
},
},
"( started = :started0eq0 )",
map[string]interface{}{
":started0eq0": now.In(time.UTC).Format(orm.SqliteTimeFormat),
},
nil,
},
{
"Invalid column access",
Query{
"forbiddenField": []Matcher{{}},
},
"",
nil,
fmt.Errorf("1 error occurred:\n\t* column forbiddenField is not allowed\n\n"),
},
{
"Complex example",
Query{
"domain": []Matcher{
{
In: []interface{}{"example.at", "example.com"},
},
{
Like: "microsoft.%",
},
},
"path": []Matcher{
{
NotIn: []interface{}{
"/bin/ping",
"/sbin/ping",
"/usr/bin/ping",
},
},
},
},
"( domain IN ( :domain0in0, :domain0in1 ) OR domain LIKE :domain1like0 ) AND ( path NOT IN ( :path0notin0, :path0notin1, :path0notin2 ) )",
map[string]interface{}{
":domain0in0": "example.at",
":domain0in1": "example.com",
":domain1like0": "microsoft.%",
":path0notin0": "/bin/ping",
":path0notin1": "/sbin/ping",
":path0notin2": "/usr/bin/ping",
},
nil,
},
}
tbl, err := orm.GenerateTableSchema("connections", Conn{})
require.NoError(t, err)
for idx, c := range cases {
t.Run(c.N, func(t *testing.T) {
//t.Parallel()
str, params, err := c.Q.toSQLWhereClause(context.TODO(), "", tbl, orm.DefaultEncodeConfig)
if c.E != nil {
if assert.Error(t, err) {
assert.Equal(t, c.E.Error(), err.Error(), "test case %d", idx)
}
} else {
assert.NoError(t, err, "test case %d", idx)
assert.Equal(t, c.P, params, "test case %d", idx)
assert.Equal(t, c.R, str, "test case %d", idx)
}
})
}
}

View file

@ -0,0 +1,77 @@
package netquery
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/formats/dsd"
"github.com/safing/portbase/log"
"github.com/safing/portbase/runtime"
"github.com/safing/portmaster/netquery/orm"
)
// RuntimeQueryRunner provides a simple interface for the runtime database
// that allows direct SQL queries to be performed against db.
// Each resulting row of that query are marshaled as map[string]interface{}
// and returned as a single record to the caller.
//
// Using portbase/database#Query is not possible because portbase/database will
// complain about the SQL query being invalid. To work around that issue,
// RuntimeQueryRunner uses a 'GET key' request where the SQL query is embedded into
// the record key.
type RuntimeQueryRunner struct {
db *Database
reg *runtime.Registry
keyPrefix string
}
// NewRuntimeQueryRunner returns a new runtime SQL query runner that parses
// and serves SQL queries form GET <prefix>/<plain sql query> requests.
func NewRuntimeQueryRunner(db *Database, prefix string, reg *runtime.Registry) (*RuntimeQueryRunner, error) {
runner := &RuntimeQueryRunner{
db: db,
reg: reg,
keyPrefix: prefix,
}
if _, err := reg.Register(prefix, runtime.SimpleValueGetterFunc(runner.get)); err != nil {
return nil, fmt.Errorf("failed to register runtime value provider: %w", err)
}
return runner, nil
}
func (runner *RuntimeQueryRunner) get(keyOrPrefix string) ([]record.Record, error) {
query := strings.TrimPrefix(
keyOrPrefix,
runner.keyPrefix,
)
log.Infof("netquery: executing custom SQL query: %q", query)
var result []map[string]interface{}
if err := runner.db.Execute(context.Background(), query, orm.WithResult(&result)); err != nil {
return nil, fmt.Errorf("failed to perform query %q: %w", query, err)
}
// we need to wrap the result slice into a map as portbase/database attempts
// to inject a _meta field.
blob, err := json.Marshal(map[string]interface{}{
"result": result,
})
if err != nil {
return nil, fmt.Errorf("failed to marshal result: %w", err)
}
// construct a new record wrapper that uses the already prepared JSON blob.
key := fmt.Sprintf("%s:%s", runner.reg.DatabaseName(), keyOrPrefix)
wrapper, err := record.NewWrapper(key, new(record.Meta), dsd.JSON, blob)
if err != nil {
return nil, fmt.Errorf("failed to create record wrapper: %w", err)
}
return []record.Record{wrapper}, nil
}

View file

@ -12,7 +12,7 @@ import (
const (
cleanerTickDuration = 5 * time.Second
deleteConnsAfterEndedThreshold = 10 * time.Minute
DeleteConnsAfterEndedThreshold = 10 * time.Minute
)
func connectionCleaner(ctx context.Context) error {
@ -41,7 +41,7 @@ func cleanConnections() (activePIDs map[int]struct{}) {
_ = module.RunMediumPriorityMicroTask(&name, func(ctx context.Context) error {
now := time.Now().UTC()
nowUnix := now.Unix()
deleteOlderThan := now.Add(-deleteConnsAfterEndedThreshold).Unix()
deleteOlderThan := now.Add(-DeleteConnsAfterEndedThreshold).Unix()
// network connections
for _, conn := range conns.clone() {

View file

@ -149,7 +149,10 @@ type Connection struct { //nolint:maligned // TODO: fix alignment
DNSContext *resolver.DNSRequestContext
// TunnelContext holds additional information about the tunnel that this
// connection is using.
TunnelContext interface{}
TunnelContext interface {
GetExitNodeID() string
}
// Internal is set to true if the connection is attributed as an
// Portmaster internal connection. Internal may be set at different
// points and access to it must be guarded by the connection lock.
@ -580,10 +583,7 @@ func (conn *Connection) SetFirewallHandler(handler FirewallHandler) {
conn.pktQueue = make(chan packet.Packet, 1000)
// start handling
module.StartWorker("packet handler", func(ctx context.Context) error {
conn.packetHandler()
return nil
})
module.StartWorker("packet handler", conn.packetHandlerWorker)
}
conn.firewallHandler = handler
}
@ -608,35 +608,46 @@ func (conn *Connection) HandlePacket(pkt packet.Packet) {
}
}
// packetHandler sequentially handles queued packets.
func (conn *Connection) packetHandler() {
for pkt := range conn.pktQueue {
if pkt == nil {
return
// packetHandlerWorker sequentially handles queued packets.
func (conn *Connection) packetHandlerWorker(ctx context.Context) error {
for {
select {
case pkt := <-conn.pktQueue:
if pkt == nil {
return nil
}
packetHandlerHandleConn(conn, pkt)
case <-ctx.Done():
conn.Lock()
defer conn.Unlock()
conn.firewallHandler = nil
return nil
}
// get handler
conn.Lock()
}
}
// execute handler or verdict
if conn.firewallHandler != nil {
conn.firewallHandler(conn, pkt)
} else {
defaultFirewallHandler(conn, pkt)
}
func packetHandlerHandleConn(conn *Connection, pkt packet.Packet) {
conn.Lock()
defer conn.Unlock()
// log verdict
log.Tracer(pkt.Ctx()).Infof("filter: connection %s %s: %s", conn, conn.Verdict.Verb(), conn.Reason.Msg)
// submit trace logs
log.Tracer(pkt.Ctx()).Submit()
// Handle packet with appropriate handler.
if conn.firewallHandler != nil {
conn.firewallHandler(conn, pkt)
} else {
defaultFirewallHandler(conn, pkt)
}
// save does not touch any changing data
// must not be locked, will deadlock with cleaner functions
if conn.saveWhenFinished {
conn.saveWhenFinished = false
conn.Save()
}
// Log verdict.
log.Tracer(pkt.Ctx()).Infof("filter: connection %s %s: %s", conn, conn.Verdict.Verb(), conn.Reason.Msg)
// Submit trace logs.
log.Tracer(pkt.Ctx()).Submit()
conn.Unlock()
// Save() itself does not touch any changing data.
// Must not be locked - would deadlock with cleaner functions.
if conn.saveWhenFinished {
conn.saveWhenFinished = false
conn.Save()
}
}

View file

@ -1,4 +1,4 @@
// +build windows
//go:build windows
package iphelper

View file

@ -1,4 +1,4 @@
// +build windows
//go:build windows
package iphelper

View file

@ -1,4 +1,4 @@
// +build windows
//go:build windows
package iphelper
@ -10,6 +10,7 @@ import (
"sync"
"unsafe"
"github.com/safing/portbase/log"
"github.com/safing/portmaster/network/socket"
"golang.org/x/sys/windows"
@ -28,7 +29,7 @@ const (
type iphelperTCPTable struct {
// docs: https://msdn.microsoft.com/en-us/library/windows/desktop/aa366921(v=vs.85).aspx
numEntries uint32
table [4096]iphelperTCPRow
table [maxStateTableEntries]iphelperTCPRow
}
type iphelperTCPRow struct {
@ -44,7 +45,7 @@ type iphelperTCPRow struct {
type iphelperTCP6Table struct {
// docs: https://msdn.microsoft.com/en-us/library/windows/desktop/aa366905(v=vs.85).aspx
numEntries uint32
table [4096]iphelperTCP6Row
table [maxStateTableEntries]iphelperTCP6Row
}
type iphelperTCP6Row struct {
@ -62,7 +63,7 @@ type iphelperTCP6Row struct {
type iphelperUDPTable struct {
// docs: https://msdn.microsoft.com/en-us/library/windows/desktop/aa366932(v=vs.85).aspx
numEntries uint32
table [4096]iphelperUDPRow
table [maxStateTableEntries]iphelperUDPRow
}
type iphelperUDPRow struct {
@ -75,7 +76,7 @@ type iphelperUDPRow struct {
type iphelperUDP6Table struct {
// docs: https://msdn.microsoft.com/en-us/library/windows/desktop/aa366925(v=vs.85).aspx
numEntries uint32
table [4096]iphelperUDP6Row
table [maxStateTableEntries]iphelperUDP6Row
}
type iphelperUDP6Row struct {
@ -125,6 +126,11 @@ const (
// maxBufSize is the maximum size we will allocate for responses. This was
// previously set at 65k, which was too little for some production cases.
maxBufSize = 1048576 // 2^20B, 1MB
// maxStateTableEntries is the maximum supported amount of entries of the
// state tables.
// This is never allocated, but just casted to from an unsafe pointer.
maxStateTableEntries = 65535
)
var (
@ -261,7 +267,14 @@ func (ipHelper *IPHelper) getTable(ipVersion, protocol uint8) (connections []*so
case protocol == TCP && ipVersion == IPv4:
tcpTable := (*iphelperTCPTable)(unsafe.Pointer(&buf[0]))
table := tcpTable.table[:tcpTable.numEntries]
// Check if we got more entries than supported.
tableEntries := tcpTable.numEntries
if tableEntries > maxStateTableEntries {
tableEntries = maxStateTableEntries
log.Warningf("network/iphelper: received TCPv4 table with more entries than supported: %d/%d", tcpTable.numEntries, maxStateTableEntries)
}
// Cap table to actual entries.
table := tcpTable.table[:tableEntries]
for _, row := range table {
if row.state == iphelperTCPStateListen {
@ -290,7 +303,14 @@ func (ipHelper *IPHelper) getTable(ipVersion, protocol uint8) (connections []*so
case protocol == TCP && ipVersion == IPv6:
tcpTable := (*iphelperTCP6Table)(unsafe.Pointer(&buf[0]))
table := tcpTable.table[:tcpTable.numEntries]
// Check if we got more entries than supported.
tableEntries := tcpTable.numEntries
if tableEntries > maxStateTableEntries {
tableEntries = maxStateTableEntries
log.Warningf("network/iphelper: received TCPv6 table with more entries than supported: %d/%d", tcpTable.numEntries, maxStateTableEntries)
}
// Cap table to actual entries.
table := tcpTable.table[:tableEntries]
for _, row := range table {
if row.state == iphelperTCPStateListen {
@ -319,7 +339,14 @@ func (ipHelper *IPHelper) getTable(ipVersion, protocol uint8) (connections []*so
case protocol == UDP && ipVersion == IPv4:
udpTable := (*iphelperUDPTable)(unsafe.Pointer(&buf[0]))
table := udpTable.table[:udpTable.numEntries]
// Check if we got more entries than supported.
tableEntries := udpTable.numEntries
if tableEntries > maxStateTableEntries {
tableEntries = maxStateTableEntries
log.Warningf("network/iphelper: received UDPv4 table with more entries than supported: %d/%d", udpTable.numEntries, maxStateTableEntries)
}
// Cap table to actual entries.
table := udpTable.table[:tableEntries]
for _, row := range table {
binds = append(binds, &socket.BindInfo{
@ -334,7 +361,14 @@ func (ipHelper *IPHelper) getTable(ipVersion, protocol uint8) (connections []*so
case protocol == UDP && ipVersion == IPv6:
udpTable := (*iphelperUDP6Table)(unsafe.Pointer(&buf[0]))
table := udpTable.table[:udpTable.numEntries]
// Check if we got more entries than supported.
tableEntries := udpTable.numEntries
if tableEntries > maxStateTableEntries {
tableEntries = maxStateTableEntries
log.Warningf("network/iphelper: received UDPv6 table with more entries than supported: %d/%d", udpTable.numEntries, maxStateTableEntries)
}
// Cap table to actual entries.
table := udpTable.table[:tableEntries]
for _, row := range table {
binds = append(binds, &socket.BindInfo{

View file

@ -1,4 +1,4 @@
// +build windows
//go:build windows
package iphelper

43
network/multicast.go Normal file
View file

@ -0,0 +1,43 @@
package network
import (
"net"
"github.com/safing/portmaster/network/netutils"
)
// GetMulticastRequestConn searches for and returns the requesting connnection
// of a possible multicast/broadcast response.
func GetMulticastRequestConn(responseConn *Connection, responseFromNet *net.IPNet) *Connection {
// Calculate the broadcast address the query would have gone to.
responseNetBroadcastIP := netutils.GetBroadcastAddress(responseFromNet.IP, responseFromNet.Mask)
// Find requesting multicast/broadcast connection.
for _, conn := range conns.clone() {
switch {
case conn.Inbound:
// Ignore incoming connections.
case conn.Ended != 0:
// Ignore ended connections.
case conn.Entity.Protocol != responseConn.Entity.Protocol:
// Ignore on protocol mismatch.
case conn.LocalPort != responseConn.LocalPort:
// Ignore on local port mismatch.
case !conn.LocalIP.Equal(responseConn.LocalIP):
// Ignore on local IP mismatch.
case !conn.Process().Equal(responseConn.Process()):
// Ignore if processes mismatch.
case conn.Entity.IPScope == netutils.LocalMulticast &&
(responseConn.Entity.IPScope == netutils.LinkLocal ||
responseConn.Entity.IPScope == netutils.SiteLocal):
// We found a (possibly routed) multicast request that matches the response!
return conn
case conn.Entity.IP.Equal(responseNetBroadcastIP) &&
responseFromNet.Contains(conn.LocalIP):
// We found a (link local) broadcast request that matches the response!
return conn
}
}
return nil
}

View file

@ -4,21 +4,38 @@ import (
"fmt"
"net"
"regexp"
"strings"
"github.com/miekg/dns"
)
var cleanDomainRegex = regexp.MustCompile(
`^` + // match beginning
`(` + // start subdomain group
`(xn--)?` + // idn prefix
`[a-z0-9_-]{1,63}` + // main chunk
`\.` + // ending with a dot
`)*` + // end subdomain group, allow any number of subdomains
`(xn--)?` + // TLD idn prefix
`[a-z0-9_-]{2,63}` + // TLD main chunk with at least two characters
`\.` + // ending with a dot
`$`, // match end
var (
cleanDomainRegex = regexp.MustCompile(
`^` + // match beginning
`(` + // start subdomain group
`(xn--)?` + // idn prefix
`[a-z0-9_-]{1,63}` + // main chunk
`\.` + // ending with a dot
`)*` + // end subdomain group, allow any number of subdomains
`(xn--)?` + // TLD idn prefix
`[a-z0-9_-]{2,63}` + // TLD main chunk with at least two characters
`\.` + // ending with a dot
`$`, // match end
)
// dnsSDDomainRegex is a lot more lax to better suit the allowed characters in DNS-SD.
// Not all characters have been allowed - some special characters were
// removed to reduce the general attack surface.
dnsSDDomainRegex = regexp.MustCompile(
// Start of charset selection.
`^[` +
// Printable ASCII (character code 32-127), excluding some special characters.
` !#$%&()*+,\-\./0-9:;=?@A-Z[\\\]^_\a-z{|}~` +
// Only latin characters from extended ASCII (character code 128-255).
`ŠŒŽšœžŸ¡¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ` +
// End of charset selection.
`]*$`,
)
)
// IsValidFqdn returns whether the given string is a valid fqdn.
@ -33,15 +50,18 @@ func IsValidFqdn(fqdn string) bool {
return false
}
// check with regex
if !cleanDomainRegex.MatchString(fqdn) {
// IsFqdn checks if a domain name is fully qualified.
if !dns.IsFqdn(fqdn) {
return false
}
// check with miegk/dns
// Use special check for .local domains to support DNS-SD.
if strings.HasSuffix(fqdn, ".local.") {
return dnsSDDomainRegex.MatchString(fqdn)
}
// IsFqdn checks if a domain name is fully qualified.
if !dns.IsFqdn(fqdn) {
// check with regex
if !cleanDomainRegex.MatchString(fqdn) {
return false
}

View file

@ -28,30 +28,47 @@ func GetIPScope(ip net.IP) IPScope { //nolint:gocognit
if ip4 := ip.To4(); ip4 != nil {
// IPv4
switch {
case ip4[0] == 0:
// 0.0.0.0/8
return Invalid
case ip4[0] == 127:
// 127.0.0.0/8
// 127.0.0.0/8 (RFC1918)
return HostLocal
case ip4[0] == 169 && ip4[1] == 254:
// 169.254.0.0/16
// 169.254.0.0/16 (RFC3927)
return LinkLocal
case ip4[0] == 10:
// 10.0.0.0/8
// 10.0.0.0/8 (RFC1918)
return SiteLocal
case ip4[0] == 172 && ip4[1]&0xf0 == 16:
// 172.16.0.0/12
case ip4[0] == 100 && ip4[1]&0b11000000 == 64:
// 100.64.0.0/10 (RFC6598)
return SiteLocal
case ip4[0] == 172 && ip4[1]&0b11110000 == 16:
// 172.16.0.0/12 (RFC1918)
return SiteLocal
case ip4[0] == 192 && ip4[1] == 0 && ip4[2] == 2:
// 192.0.2.0/24 (TEST-NET-1, RFC5737)
return Invalid
case ip4[0] == 192 && ip4[1] == 168:
// 192.168.0.0/16
// 192.168.0.0/16 (RFC1918)
return SiteLocal
case ip4[0] == 198 && ip4[1] == 51 && ip4[2] == 100:
// 198.51.100.0/24 (TEST-NET-2, RFC5737)
return Invalid
case ip4[0] == 203 && ip4[1] == 0 && ip4[2] == 113:
// 203.0.113.0/24 (TEST-NET-3, RFC5737)
return Invalid
case ip4[0] == 224:
// 224.0.0.0/8
// 224.0.0.0/8 (RFC5771)
return LocalMulticast
case ip4[0] == 233 && ip4[1] == 252 && ip4[2] == 0:
// 233.252.0.0/24 (MCAST-TEST-NET; RFC5771, RFC6676)
return Invalid
case ip4[0] >= 225 && ip4[0] <= 238:
// 225.0.0.0/8 - 238.0.0.0/8
// 225.0.0.0/8 - 238.0.0.0/8 (RFC5771)
return GlobalMulticast
case ip4[0] == 239:
// 239.0.0.0/8
// RFC2365 - https://tools.ietf.org/html/rfc2365
// 239.0.0.0/8 (RFC2365)
return LocalMulticast
case ip4[0] == 255 && ip4[1] == 255 && ip4[2] == 255 && ip4[3] == 255:
// 255.255.255.255/32
@ -65,6 +82,8 @@ func GetIPScope(ip net.IP) IPScope { //nolint:gocognit
} else if len(ip) == net.IPv6len {
// IPv6
switch {
case ip.Equal(net.IPv6zero):
return Invalid
case ip.Equal(net.IPv6loopback):
return HostLocal
case ip[0]&0xfe == 0xfc:
@ -110,3 +129,29 @@ func (scope IPScope) IsGlobal() bool {
return false
}
}
// GetBroadcastAddress returns the broadcast address of the given IP and network mask.
// If a mixed IPv4/IPv6 input is given, it returns nil.
func GetBroadcastAddress(ip net.IP, netMask net.IPMask) net.IP {
// Convert to standard v4.
if ip4 := ip.To4(); ip4 != nil {
ip = ip4
}
mask := net.IP(netMask)
if ip4Mask := mask.To4(); ip4Mask != nil {
mask = ip4Mask
}
// Check for mixed v4/v6 input.
if len(ip) != len(mask) {
return nil
}
// Merge to broadcast address
n := len(ip)
broadcastAddress := make(net.IP, n)
for i := 0; i < n; i++ {
broadcastAddress[i] = ip[i] | ^mask[i]
}
return broadcastAddress
}

View file

@ -60,7 +60,7 @@ func Lookup(pktInfo *packet.Info, fast bool) (pid int, inbound bool, err error)
return udp6Table.lookup(pktInfo, fast)
default:
return socket.UndefinedProcessID, false, errors.New("unsupported protocol for finding process")
return socket.UndefinedProcessID, pktInfo.Inbound, errors.New("unsupported protocol for finding process")
}
}

View file

@ -67,6 +67,32 @@ func (p *Process) Profile() *profile.LayeredProfile {
return p.profile
}
// IsIdentified returns whether the process has been identified or if it
// represents some kind of unidentified process.
func (p *Process) IsIdentified() bool {
// Check if process exists.
if p == nil {
return false
}
// Check for special PIDs.
switch p.Pid {
case UndefinedProcessID:
return false
case UnidentifiedProcessID:
return false
case UnsolicitedProcessID:
return false
default:
return true
}
}
// Equal returns if the two processes are both identified and have the same PID.
func (p *Process) Equal(other *Process) bool {
return p.IsIdentified() && other.IsIdentified() && p.Pid == other.Pid
}
// IsSystemResolver is a shortcut to check if the process is or belongs to the
// system resolver and needs special handling.
func (p *Process) IsSystemResolver() bool {

View file

@ -8,6 +8,7 @@ import (
"golang.org/x/sync/singleflight"
"github.com/safing/portbase/log"
"github.com/safing/portmaster/network/socket"
"github.com/safing/portmaster/profile"
)
@ -28,6 +29,13 @@ const (
NetworkHostProcessID = -255
)
func init() {
// Check required matching values.
if UndefinedProcessID != socket.UndefinedProcessID {
panic("UndefinedProcessID does not match socket.UndefinedProcessID")
}
}
var (
// unidentifiedProcess is used for non-attributed outgoing connections.
unidentifiedProcess = &Process{

View file

@ -258,6 +258,12 @@ Examples: "192.168.0.1 TCP/HTTP", "LAN UDP/50000-55000", "example.com */HTTPS",
Important: DNS Requests are only matched against domain and filter list rules, all others require an IP address and are checked only with the following IP connection.
`, `"`, "`")
// rulesVerdictNames defines the verdicts names to be used for filter rules.
rulesVerdictNames := map[string]string{
"-": "Block", // Default.
"+": "Allow",
}
// Endpoint Filter List
err = config.Register(&config.Option{
Name: "Outgoing Rules",
@ -268,10 +274,11 @@ Important: DNS Requests are only matched against domain and filter list rules, a
OptType: config.OptTypeStringArray,
DefaultValue: []string{},
Annotations: config.Annotations{
config.StackableAnnotation: true,
config.DisplayHintAnnotation: endpoints.DisplayHintEndpointList,
config.DisplayOrderAnnotation: cfgOptionEndpointsOrder,
config.CategoryAnnotation: "Rules",
config.StackableAnnotation: true,
config.DisplayHintAnnotation: endpoints.DisplayHintEndpointList,
config.DisplayOrderAnnotation: cfgOptionEndpointsOrder,
config.CategoryAnnotation: "Rules",
endpoints.EndpointListVerdictNamesAnnotation: rulesVerdictNames,
},
ValidationRegex: endpoints.ListEntryValidationRegex,
ValidationFunc: endpoints.ValidateEndpointListConfigOption,
@ -283,6 +290,7 @@ Important: DNS Requests are only matched against domain and filter list rules, a
cfgStringArrayOptions[CfgOptionEndpointsKey] = cfgOptionEndpoints
// Service Endpoint Filter List
defaultIncomingRulesValue := []string{"+ Localhost"}
err = config.Register(&config.Option{
Name: "Incoming Rules",
Key: CfgOptionServiceEndpointsKey,
@ -290,13 +298,14 @@ Important: DNS Requests are only matched against domain and filter list rules, a
Help: rulesHelp,
Sensitive: true,
OptType: config.OptTypeStringArray,
DefaultValue: []string{"+ Localhost"},
DefaultValue: defaultIncomingRulesValue,
ExpertiseLevel: config.ExpertiseLevelExpert,
Annotations: config.Annotations{
config.StackableAnnotation: true,
config.DisplayHintAnnotation: endpoints.DisplayHintEndpointList,
config.DisplayOrderAnnotation: cfgOptionServiceEndpointsOrder,
config.CategoryAnnotation: "Rules",
config.StackableAnnotation: true,
config.DisplayHintAnnotation: endpoints.DisplayHintEndpointList,
config.DisplayOrderAnnotation: cfgOptionServiceEndpointsOrder,
config.CategoryAnnotation: "Rules",
endpoints.EndpointListVerdictNamesAnnotation: rulesVerdictNames,
config.QuickSettingsAnnotation: []config.QuickSetting{
{
Name: "SSH",
@ -313,6 +322,16 @@ Important: DNS Requests are only matched against domain and filter list rules, a
Action: config.QuickMergeTop,
Value: []string{"+ * */3389"},
},
{
Name: "Allow all from LAN",
Action: config.QuickMergeTop,
Value: []string{"+ LAN"},
},
{
Name: "Allow all from Internet",
Action: config.QuickMergeTop,
Value: []string{"+ Internet"},
},
},
},
ValidationRegex: endpoints.ListEntryValidationRegex,
@ -321,38 +340,17 @@ Important: DNS Requests are only matched against domain and filter list rules, a
if err != nil {
return err
}
cfgOptionServiceEndpoints = config.Concurrent.GetAsStringArray(CfgOptionServiceEndpointsKey, []string{})
cfgOptionServiceEndpoints = config.Concurrent.GetAsStringArray(CfgOptionServiceEndpointsKey, defaultIncomingRulesValue)
cfgStringArrayOptions[CfgOptionServiceEndpointsKey] = cfgOptionServiceEndpoints
filterListsHelp := strings.ReplaceAll(`Filter lists contain domains and IP addresses that are known to be used adversarial. The data is collected from many public sources and put into the following categories. In order to active a category, add it's "ID" to the list.
**Ads & Trackers** - ID: "TRAC"
Services that track and profile people online, including as ads, analytics and telemetry.
**Malware** - ID: "MAL"
Services that are (ab)used for attacking devices through technical means.
**Deception** - ID: "DECEP"
Services that trick humans into thinking the service is genuine, while it is not, including phishing, fake news and fraud.
**Bad Stuff (Mixed)** - ID: "BAD"
Miscellaneous services that are believed to be harmful to security or privacy, but their exact use is unknown, not categorized, or lists have mixed categories.
**NSFW** - ID: "NSFW"
Services that are generally not accepted in work environments, including pornography, violence and gambling.
The lists are automatically updated every hour using incremental updates.
[See here](https://github.com/safing/intel-data) for more detail about these lists, their sources and how to help to improve them.
`, `"`, "`")
// Filter list IDs
defaultFilterListsValue := []string{"TRAC", "MAL", "BAD"}
err = config.Register(&config.Option{
Name: "Filter Lists",
Key: CfgOptionFilterListsKey,
Description: "Block connections that match enabled filter lists.",
Help: filterListsHelp,
OptType: config.OptTypeStringArray,
DefaultValue: []string{"TRAC", "MAL", "BAD"},
DefaultValue: defaultFilterListsValue,
Annotations: config.Annotations{
config.DisplayHintAnnotation: "filter list",
config.DisplayOrderAnnotation: cfgOptionFilterListsOrder,
@ -363,7 +361,7 @@ The lists are automatically updated every hour using incremental updates.
if err != nil {
return err
}
cfgOptionFilterLists = config.Concurrent.GetAsStringArray(CfgOptionFilterListsKey, []string{})
cfgOptionFilterLists = config.Concurrent.GetAsStringArray(CfgOptionFilterListsKey, defaultFilterListsValue)
cfgStringArrayOptions[CfgOptionFilterListsKey] = cfgOptionFilterLists
// Include CNAMEs
@ -575,7 +573,9 @@ The lists are automatically updated every hour using incremental updates.
err = config.Register(&config.Option{
Name: "Block Bypassing",
Key: CfgOptionPreventBypassingKey,
Description: `Prevent apps from bypassing the privacy filter.
Description: `Prevent apps from bypassing Portmaster's privacy protections.
If Block Bypassing is disabled, Portmaster can no longer protect you or filter connections from the affected applications.
Current Features:
- Disable Firefox' internal DNS-over-HTTPs resolver
- Block direct access to public DNS resolvers

View file

@ -208,6 +208,14 @@ func parseEndpoint(value string) (endpoint Endpoint, err error) { //nolint:gocog
return nil, fmt.Errorf(`invalid endpoint definition: "%s"`, value)
}
// Remove comment.
for i, field := range fields {
if strings.HasPrefix(field, "#") {
fields = fields[:i]
break
}
}
// any
if endpoint, err = parseTypeAny(fields); endpoint != nil || err != nil {
return

View file

@ -62,11 +62,13 @@ entriesLoop:
// ListEntryValidationRegex is a regex to bullshit check endpoint list entries.
var ListEntryValidationRegex = strings.Join([]string{
`^(\+|\-) `, // Rule verdict.
`(! +)?`, // Invert matching.
`[A-z0-9\.:\-*/]+`, // Entity matching.
`( `, // Start of optional matching.
`[A-z0-9*]+`, // Protocol matching.
`(/[A-z0-9]+(\-[A-z0-9]+)?)?`, // Port and port range matching.
`)?$`, // End of optional matching.
`)?`, // End of optional matching.
`( +#.*)?`, // Optional comment.
}, "")
// ValidateEndpointListConfigOption validates the given value.
@ -88,7 +90,32 @@ func (e Endpoints) IsSet() bool {
// Match checks whether the given entity matches any of the endpoint definitions in the list.
func (e Endpoints) Match(ctx context.Context, entity *intel.Entity) (result EPResult, reason Reason) {
for _, entry := range e {
if entry != nil {
if entry == nil {
continue
}
if result, reason = entry.Matches(ctx, entity); result != NoMatch {
return
}
}
return NoMatch, nil
}
// MatchMulti checks whether the given entities match any of the endpoint
// definitions in the list. Every rule is evaluated against all given entities
// and only if not match was registered, the next rule is evaluated.
func (e Endpoints) MatchMulti(ctx context.Context, entities ...*intel.Entity) (result EPResult, reason Reason) {
for _, entry := range e {
if entry == nil {
continue
}
for _, entity := range entities {
if entity == nil {
continue
}
if result, reason = entry.Matches(ctx, entity); result != NoMatch {
return
}

View file

@ -6,8 +6,6 @@ import (
"github.com/safing/portbase/database/migration"
"github.com/safing/portbase/log"
"github.com/safing/portbase/modules"
// Dependency.
_ "github.com/safing/portmaster/core/base"
"github.com/safing/portmaster/updates"
)

View file

@ -12,7 +12,7 @@ func registerAPI() error {
Path: "dns/clear",
Write: api.PermitUser,
BelongsTo: module,
ActionFunc: clearNameCache,
ActionFunc: clearNameCacheHandler,
Name: "Clear cached DNS records",
Description: "Deletes all saved DNS records from the database.",
}); err != nil {

View file

@ -31,7 +31,7 @@ var (
// Cloudflare (encrypted DNS, with malware protection)
`dot://1.1.1.2:853?verify=cloudflare-dns.com&name=Cloudflare&blockedif=zeroip`,
// `dot://1.0.0.2:853?verify=cloudflare-dns.com&name=Cloudflare&blockedif=zeroip`,
`dot://1.0.0.2:853?verify=cloudflare-dns.com&name=Cloudflare&blockedif=zeroip`,
// AdGuard (encrypted DNS, default flavor)
// `dot://94.140.14.14:853?verify=dns.adguard.com&name=AdGuard&blockedif=zeroip`,
@ -107,8 +107,9 @@ The format is: "protocol://ip:port?parameter=value&parameter=value"
- "search": specify prioritized domains/TLDs for this resolver (delimited by ",")
- "search-only": use this resolver for domains in the "search" parameter only (no value)
`, `"`, "`"),
Sensitive: true,
OptType: config.OptTypeStringArray,
ExpertiseLevel: config.ExpertiseLevelExpert,
ExpertiseLevel: config.ExpertiseLevelUser,
ReleaseLevel: config.ReleaseLevelStable,
DefaultValue: defaultNameServers,
ValidationRegex: fmt.Sprintf("^(%s|%s|%s|%s|%s|%s)://.*", ServerTypeDoT, ServerTypeDoH, ServerTypeDNS, ServerTypeTCP, HTTPSProtocol, TLSProtocol),
@ -118,6 +119,14 @@ The format is: "protocol://ip:port?parameter=value&parameter=value"
config.DisplayOrderAnnotation: cfgOptionNameServersOrder,
config.CategoryAnnotation: "Servers",
config.QuickSettingsAnnotation: []config.QuickSetting{
{
Name: "Cloudflare (with Malware Filter)",
Action: config.QuickReplace,
Value: []string{
"dot://cloudflare-dns.com?ip=1.1.1.2&name=Cloudflare&blockedif=zeroip",
"dot://cloudflare-dns.com?ip=1.0.0.2&name=Cloudflare&blockedif=zeroip",
},
},
{
Name: "Quad9",
Action: config.QuickReplace,
@ -141,14 +150,6 @@ The format is: "protocol://ip:port?parameter=value&parameter=value"
"dot://dot1.applied-privacy.net?ip=94.130.106.88&name=AppliedPrivacy",
},
},
{
Name: "Cloudflare (with Malware Filter)",
Action: config.QuickReplace,
Value: []string{
"dot://cloudflare-dns.com?ip=1.1.1.2&name=Cloudflare&blockedif=zeroip",
"dot://cloudflare-dns.com?ip=1.0.0.2&name=Cloudflare&blockedif=zeroip",
},
},
},
"self:detail:internalSpecialUseDomains": internalSpecialUseDomains,
"self:detail:connectivityDomains": netenv.ConnectivityDomains,
@ -222,9 +223,9 @@ The format is: "protocol://ip:port?parameter=value&parameter=value"
noMulticastDNS = status.SecurityLevelOption(CfgOptionNoMulticastDNSKey)
err = config.Register(&config.Option{
Name: "Enforce Secure DNS",
Name: "Use Secure Protocols Only",
Key: CfgOptionNoInsecureProtocolsKey,
Description: "Never resolve using insecure protocols, ie. plain DNS.",
Description: "Never resolve using insecure protocols, ie. plain DNS. This may break certain local DNS services, which always use plain DNS.",
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelStable,

View file

@ -1,6 +1,7 @@
package resolver
import (
"context"
"errors"
"fmt"
"sync"
@ -129,13 +130,18 @@ func (nameRecord *NameRecord) Save() error {
return recordDatabase.PutNew(nameRecord)
}
// clearNameCache clears all dns caches from the database.
func clearNameCache(ar *api.Request) (msg string, err error) {
// clearNameCacheHandler is an API handler that clears all dns caches from the database.
func clearNameCacheHandler(ar *api.Request) (msg string, err error) {
log.Info("resolver: user requested dns cache clearing via action")
return clearNameCache(ar.Context())
}
// clearNameCache clears all dns caches from the database.
func clearNameCache(ctx context.Context) (msg string, err error) {
recordDatabase.FlushCache()
recordDatabase.ClearCache()
n, err := recordDatabase.Purge(ar.Context(), query.New(nameRecordsKeyPrefix))
n, err := recordDatabase.Purge(ctx, query.New(nameRecordsKeyPrefix))
if err != nil {
return "", err
}

View file

@ -4,10 +4,13 @@ import (
"context"
"errors"
"fmt"
"net"
"strings"
"sync"
"time"
"github.com/miekg/dns"
"golang.org/x/net/publicsuffix"
"github.com/safing/portbase/database"
"github.com/safing/portbase/log"
@ -89,6 +92,11 @@ type Query struct {
IgnoreFailing bool
LocalResolversOnly bool
// ICANNSpace signifies if the domain is within ICANN managed domain space.
ICANNSpace bool
// Domain root is the effective TLD +1.
DomainRoot string
// internal
dotPrefixedFQDN string
}
@ -98,6 +106,41 @@ func (q *Query) ID() string {
return q.FQDN + q.QType.String()
}
// InitPublicSuffixData initializes the public suffix data.
func (q *Query) InitPublicSuffixData() {
// Get public suffix and derive if domain is in ICANN space.
suffix, icann := publicsuffix.PublicSuffix(strings.TrimSuffix(q.FQDN, "."))
if icann || strings.Contains(suffix, ".") {
q.ICANNSpace = true
}
// Override some cases.
switch suffix {
case "example":
q.ICANNSpace = true // Defined by ICANN.
case "invalid":
q.ICANNSpace = true // Defined by ICANN.
case "local":
q.ICANNSpace = true // Defined by ICANN.
case "localhost":
q.ICANNSpace = true // Defined by ICANN.
case "onion":
q.ICANNSpace = false // Defined by ICANN, but special.
case "test":
q.ICANNSpace = true // Defined by ICANN.
}
// Add suffix to adhere to FQDN format.
suffix += "."
switch {
case len(q.FQDN) == len(suffix):
// We are at or below the domain root, reset.
q.DomainRoot = ""
case len(q.FQDN) > len(suffix):
domainRootStart := strings.LastIndex(q.FQDN[:len(q.FQDN)-len(suffix)-1], ".") + 1
q.DomainRoot = q.FQDN[domainRootStart:]
}
}
// check runs sanity checks and does some initialization. Returns whether the query passed the basic checks.
func (q *Query) check() (ok bool) {
if q.FQDN == "" {
@ -318,8 +361,8 @@ func resolveAndCache(ctx context.Context, q *Query, oldCache *RRCache) (rrCache
}
// check if we are online
if primarySource != ServerSourceEnv && netenv.GetOnlineStatus() == netenv.StatusOffline {
if !netenv.IsConnectivityDomain(q.FQDN) {
if netenv.GetOnlineStatus() == netenv.StatusOffline && primarySource != ServerSourceEnv {
if q.FQDN != netenv.DNSTestDomain && !netenv.IsConnectivityDomain(q.FQDN) {
// we are offline and this is not an online check query
return oldCache, ErrOffline
}
@ -358,6 +401,7 @@ resolveLoop:
// some resolvers might also block
return nil, err
case netenv.GetOnlineStatus() == netenv.StatusOffline &&
q.FQDN != netenv.DNSTestDomain &&
!netenv.IsConnectivityDomain(q.FQDN):
// we are offline and this is not an online check query
return oldCache, ErrOffline
@ -478,3 +522,45 @@ func shouldResetCache(q *Query) (reset bool) {
return false
}
func init() {
netenv.DNSTestQueryFunc = testConnectivity
}
// testConnectivity test if resolving a query succeeds and returns whether the
// query itself succeeded, separate from interpreting the result.
func testConnectivity(ctx context.Context, fdqn string) (ips []net.IP, ok bool, err error) {
q := &Query{
FQDN: fdqn,
QType: dns.Type(dns.TypeA),
NoCaching: true,
}
if !q.check() {
return nil, false, ErrInvalid
}
rrCache, err := resolveAndCache(ctx, q, nil)
switch {
case err == nil:
switch rrCache.RCode {
case dns.RcodeNameError:
return nil, true, ErrNotFound
case dns.RcodeRefused:
return nil, true, errors.New("refused")
default:
ips := rrCache.ExportAllARecords()
if len(ips) > 0 {
return ips, true, nil
}
return nil, true, ErrNotFound
}
case errors.Is(err, ErrNotFound):
return nil, true, err
case errors.Is(err, ErrBlocked):
return nil, true, err
case errors.Is(err, ErrNoCompliance):
return nil, true, err
default:
return nil, false, err
}
}

View file

@ -128,7 +128,7 @@ func (er *envResolverConn) makeRRCache(q *Query, answers []dns.RR) *RRCache {
// Disable caching, as the env always has the raw data available.
q.NoCaching = true
return &RRCache{
rrCache := &RRCache{
Domain: q.FQDN,
Question: q.QType,
RCode: dns.RcodeSuccess,
@ -136,6 +136,10 @@ func (er *envResolverConn) makeRRCache(q *Query, answers []dns.RR) *RRCache {
Extra: []dns.RR{internalSpecialUseComment}, // Always add comment about this TLD.
Resolver: envResolver.Info.Copy(),
}
if len(rrCache.Answer) == 0 {
rrCache.RCode = dns.RcodeNameError
}
return rrCache
}
func (er *envResolverConn) ReportFailure() {}
@ -145,3 +149,8 @@ func (er *envResolverConn) IsFailing() bool {
}
func (er *envResolverConn) ResetFailure() {}
// QueryPortmasterEnv queries the environment resolver directly.
func QueryPortmasterEnv(ctx context.Context, q *Query) (*RRCache, error) {
return envResolver.Conn.Query(ctx, q)
}

View file

@ -12,6 +12,7 @@ import (
"github.com/miekg/dns"
"github.com/safing/portbase/log"
"github.com/safing/portmaster/netenv"
"github.com/safing/portmaster/network/netutils"
)
@ -91,19 +92,6 @@ func listenToMDNS(ctx context.Context) error {
}()
}
multicast6Conn, err = net.ListenMulticastUDP("udp6", nil, &net.UDPAddr{IP: net.IP([]byte{0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfb}), Port: 5353})
if err != nil {
// TODO: retry after some time
log.Warningf("intel(mdns): failed to create udp6 listen multicast socket: %s", err)
} else {
module.StartServiceWorker("mdns udp6 multicast listener", 0, func(ctx context.Context) error {
return listenForDNSPackets(ctx, multicast6Conn, messages)
})
defer func() {
_ = multicast6Conn.Close()
}()
}
unicast4Conn, err = net.ListenUDP("udp4", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
if err != nil {
// TODO: retry after some time
@ -117,17 +105,34 @@ func listenToMDNS(ctx context.Context) error {
}()
}
unicast6Conn, err = net.ListenUDP("udp6", &net.UDPAddr{IP: net.IPv6zero, Port: 0})
if err != nil {
// TODO: retry after some time
log.Warningf("intel(mdns): failed to create udp6 listen socket: %s", err)
if netenv.IPv6Enabled() {
multicast6Conn, err = net.ListenMulticastUDP("udp6", nil, &net.UDPAddr{IP: net.IP([]byte{0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfb}), Port: 5353})
if err != nil {
// TODO: retry after some time
log.Warningf("intel(mdns): failed to create udp6 listen multicast socket: %s", err)
} else {
module.StartServiceWorker("mdns udp6 multicast listener", 0, func(ctx context.Context) error {
return listenForDNSPackets(ctx, multicast6Conn, messages)
})
defer func() {
_ = multicast6Conn.Close()
}()
}
unicast6Conn, err = net.ListenUDP("udp6", &net.UDPAddr{IP: net.IPv6zero, Port: 0})
if err != nil {
// TODO: retry after some time
log.Warningf("intel(mdns): failed to create udp6 listen socket: %s", err)
} else {
module.StartServiceWorker("mdns udp6 unicast listener", 0, func(ctx context.Context) error {
return listenForDNSPackets(ctx, unicast6Conn, messages)
})
defer func() {
_ = unicast6Conn.Close()
}()
}
} else {
module.StartServiceWorker("mdns udp6 unicast listener", 0, func(ctx context.Context) error {
return listenForDNSPackets(ctx, unicast6Conn, messages)
})
defer func() {
_ = unicast6Conn.Close()
}()
log.Warningf("resolver: no IPv6 stack detected, disabling IPv6 mDNS resolver")
}
// start message handler

View file

@ -236,8 +236,8 @@ func (brc *BasicResolverConn) init() {
// ReportFailure reports that an error occurred with this resolver.
func (brc *BasicResolverConn) ReportFailure() {
// Don't mark resolver as failed if we are offline.
if !netenv.Online() {
// don't mark failed if we are offline
return
}
@ -254,6 +254,11 @@ func (brc *BasicResolverConn) ReportFailure() {
// the fail.
brc.networkChangedFlag.Refresh()
}
// Report to netenv that a configured server failed.
if brc.resolver.Info.Source == ServerSourceConfigured {
netenv.ConnectedToDNS.UnSet()
}
}
// IsFailing returns if this resolver is currently failing.
@ -285,4 +290,9 @@ func (brc *BasicResolverConn) ResetFailure() {
defer brc.failLock.Unlock()
brc.fails = 0
}
// Report to netenv that a configured server succeeded.
if brc.resolver.Info.Source == ServerSourceConfigured {
netenv.ConnectedToDNS.Set()
}
}

View file

@ -8,6 +8,7 @@ import (
"time"
"github.com/miekg/dns"
"github.com/stretchr/testify/assert"
"github.com/safing/portbase/log"
)
@ -103,3 +104,57 @@ func TestBulkResolving(t *testing.T) {
t.Logf("total time taken: %s", time.Since(started))
}
func TestPublicSuffix(t *testing.T) {
t.Parallel()
testSuffix(t, "co.uk.", "", true)
testSuffix(t, "amazon.co.uk.", "amazon.co.uk.", true)
testSuffix(t, "books.amazon.co.uk.", "amazon.co.uk.", true)
testSuffix(t, "www.books.amazon.co.uk.", "amazon.co.uk.", true)
testSuffix(t, "com.", "", true)
testSuffix(t, "amazon.com.", "amazon.com.", true)
testSuffix(t, "example0.debian.net.", "example0.debian.net.", true)
testSuffix(t, "example1.debian.org.", "debian.org.", true)
testSuffix(t, "golang.dev.", "golang.dev.", true)
testSuffix(t, "golang.net.", "golang.net.", true)
testSuffix(t, "play.golang.org.", "golang.org.", true)
testSuffix(t, "gophers.in.space.museum.", "in.space.museum.", true)
testSuffix(t, "0emm.com.", "0emm.com.", true)
testSuffix(t, "a.0emm.com.", "", true)
testSuffix(t, "b.c.d.0emm.com.", "c.d.0emm.com.", true)
testSuffix(t, "org.", "", true)
testSuffix(t, "foo.org.", "foo.org.", true)
testSuffix(t, "foo.co.uk.", "foo.co.uk.", true)
testSuffix(t, "foo.dyndns.org.", "foo.dyndns.org.", true)
testSuffix(t, "foo.blogspot.co.uk.", "foo.blogspot.co.uk.", true)
testSuffix(t, "there.is.no.such-tld.", "no.such-tld.", false)
testSuffix(t, "www.some.bit.", "some.bit.", false)
testSuffix(t, "cromulent.", "", false)
testSuffix(t, "arpa.", "", true)
testSuffix(t, "in-addr.arpa.", "", true)
testSuffix(t, "1.in-addr.arpa.", "1.in-addr.arpa.", true)
testSuffix(t, "ip6.arpa.", "", true)
testSuffix(t, "1.ip6.arpa.", "1.ip6.arpa.", true)
testSuffix(t, "www.some.arpa.", "some.arpa.", true)
testSuffix(t, "www.some.home.arpa.", "home.arpa.", true)
testSuffix(t, ".", "", false)
testSuffix(t, "", "", false)
// Test edge case domains.
testSuffix(t, "www.some.example.", "some.example.", true)
testSuffix(t, "www.some.invalid.", "some.invalid.", true)
testSuffix(t, "www.some.local.", "some.local.", true)
testSuffix(t, "www.some.localhost.", "some.localhost.", true)
testSuffix(t, "www.some.onion.", "some.onion.", false)
testSuffix(t, "www.some.test.", "some.test.", true)
}
func testSuffix(t *testing.T, fqdn, domainRoot string, icannSpace bool) {
t.Helper()
q := &Query{FQDN: fqdn}
q.InitPublicSuffixData()
assert.Equal(t, domainRoot, q.DomainRoot)
assert.Equal(t, icannSpace, q.ICANNSpace)
}

View file

@ -1,6 +1,7 @@
package resolver
import (
"context"
"fmt"
"net"
"net/url"
@ -37,13 +38,14 @@ const (
)
var (
globalResolvers []*Resolver // all (global) resolvers
localResolvers []*Resolver // all resolvers that are in site-local or link-local IP ranges
systemResolvers []*Resolver // all resolvers that were assigned by the system
localScopes []*Scope // list of scopes with a list of local resolvers that can resolve the scope
activeResolvers map[string]*Resolver // lookup map of all resolvers
resolverInitDomains map[string]struct{} // a set with all domains of the dns resolvers
resolversLock sync.RWMutex
globalResolvers []*Resolver // all (global) resolvers
localResolvers []*Resolver // all resolvers that are in site-local or link-local IP ranges
systemResolvers []*Resolver // all resolvers that were assigned by the system
localScopes []*Scope // list of scopes with a list of local resolvers that can resolve the scope
activeResolvers map[string]*Resolver // lookup map of all resolvers
resolverInitDomains map[string]struct{} // a set with all domains of the dns resolvers
resolversLock sync.RWMutex
)
func indexOfScope(domain string, list []*Scope) int {
@ -364,8 +366,20 @@ func loadResolvers() {
// Resolve module error about missing resolvers.
module.Resolve(missingResolversErrorID)
// Check if settings were changed and clear name cache when they did.
newResolverConfig := configuredNameServers()
if len(currentResolverConfig) > 0 &&
!utils.StringSliceEqual(currentResolverConfig, newResolverConfig) {
module.StartWorker("clear dns cache", func(ctx context.Context) error {
log.Info("resolver: clearing dns cache due to changed resolver config")
_, err := clearNameCache(ctx)
return err
})
}
currentResolverConfig = newResolverConfig
newResolvers := append(
getConfiguredResolvers(configuredNameServers()),
getConfiguredResolvers(newResolverConfig),
getSystemResolvers()...,
)

View file

@ -2,11 +2,8 @@ package updates
import (
"context"
"errors"
"sync"
"github.com/safing/portbase/database"
"github.com/safing/portbase/database/query"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/info"
"github.com/safing/portbase/log"
@ -14,48 +11,95 @@ import (
"github.com/safing/portmaster/updates/helper"
)
// Database key for update information.
const (
// versionsDBKey is the database key for update version information.
versionsDBKey = "core:status/versions"
// versionsDBKey is the database key for simple update version information.
simpleVersionsDBKey = "core:status/simple-versions"
)
var (
versionExport *versions
versionExportDB = database.NewInterface(&database.Options{
Local: true,
Internal: true,
})
versionExportHook *database.RegisteredHook
)
// versions holds updates status information.
type versions struct {
// Versions holds update versions and status information.
type Versions struct {
record.Base
lock sync.Mutex
sync.Mutex
Core *info.Info
Resources map[string]*updater.Resource
Channel string
Beta bool
Staging bool
}
internalSave bool
// SimpleVersions holds simplified update versions and status information.
type SimpleVersions struct {
record.Base
sync.Mutex
Build *info.Info
Resources map[string]*SimplifiedResourceVersion
Channel string
}
// SimplifiedResourceVersion holds version information about one resource.
type SimplifiedResourceVersion struct {
Version string
}
// GetVersions returns the update versions and status information.
// Resources must be locked when accessed.
func GetVersions() *Versions {
return &Versions{
Core: info.GetInfo(),
Resources: registry.Export(),
Channel: initialReleaseChannel,
Beta: initialReleaseChannel == helper.ReleaseChannelBeta,
Staging: initialReleaseChannel == helper.ReleaseChannelStaging,
}
}
// GetSimpleVersions returns the simplified update versions and status information.
func GetSimpleVersions() *SimpleVersions {
// Fill base info.
v := &SimpleVersions{
Build: info.GetInfo(),
Resources: make(map[string]*SimplifiedResourceVersion),
Channel: initialReleaseChannel,
}
// Iterate through all versions and add version info.
for id, resource := range registry.Export() {
func() {
resource.Lock()
defer resource.Unlock()
// Get current in-used or selected version.
var rv *updater.ResourceVersion
switch {
case resource.ActiveVersion != nil:
rv = resource.ActiveVersion
case resource.SelectedVersion != nil:
rv = resource.SelectedVersion
}
// Get information from resource.
if rv != nil {
v.Resources[id] = &SimplifiedResourceVersion{
Version: rv.VersionNumber,
}
}
}()
}
return v
}
func initVersionExport() (err error) {
// init export struct
versionExport = &versions{
internalSave: true,
Channel: initialReleaseChannel,
Beta: initialReleaseChannel == helper.ReleaseChannelBeta,
Staging: initialReleaseChannel == helper.ReleaseChannelStaging,
if err := GetVersions().save(); err != nil {
log.Warningf("updates: failed to export version information: %s", err)
}
versionExport.SetKey(versionsDBKey)
// attach hook to database
versionExportHook, err = database.RegisterHook(query.New(versionsDBKey), &exportHook{})
if err != nil {
return err
if err := GetSimpleVersions().save(); err != nil {
log.Warningf("updates: failed to export version information: %s", err)
}
return module.RegisterEventHook(
@ -66,71 +110,24 @@ func initVersionExport() (err error) {
)
}
func stopVersionExport() error {
return versionExportHook.Cancel()
func (v *Versions) save() error {
if !v.KeyIsSet() {
v.SetKey(versionsDBKey)
}
return db.Put(v)
}
func (v *SimpleVersions) save() error {
if !v.KeyIsSet() {
v.SetKey(simpleVersionsDBKey)
}
return db.Put(v)
}
// export is an event hook.
func export(_ context.Context, _ interface{}) error {
// populate
versionExport.lock.Lock()
versionExport.Core = info.GetInfo()
versionExport.Resources = registry.Export()
versionExport.lock.Unlock()
// save
err := versionExportDB.Put(versionExport)
if err != nil {
log.Warningf("updates: failed to export versions: %s", err)
if err := GetVersions().save(); err != nil {
return err
}
return nil
}
// Lock locks the versionExport and all associated resources.
func (v *versions) Lock() {
// lock self
v.lock.Lock()
// lock all resources
for _, res := range v.Resources {
res.Lock()
}
}
// Lock unlocks the versionExport and all associated resources.
func (v *versions) Unlock() {
// unlock all resources
for _, res := range v.Resources {
res.Unlock()
}
// unlock self
v.lock.Unlock()
}
type exportHook struct {
database.HookBase
}
// UsesPrePut implements the Hook interface.
func (eh *exportHook) UsesPrePut() bool {
return true
}
var errInternalRecord = errors.New("may not modify internal record")
// PrePut implements the Hook interface.
func (eh *exportHook) PrePut(r record.Record) (record.Record, error) {
if r.IsWrapped() {
return nil, errInternalRecord
}
ve, ok := r.(*versions)
if !ok {
return nil, errInternalRecord
}
if !ve.internalSave {
return nil, errInternalRecord
}
return r, nil
return GetSimpleVersions().save()
}

View file

@ -1,6 +1,7 @@
package helper
import (
"errors"
"fmt"
"os"
"path/filepath"
@ -34,6 +35,9 @@ func EnsureChromeSandboxPermissions(reg *updater.ResourceRegistry) error {
var err error
pmElectronUpdate, err = reg.GetFile(identifier)
if err != nil {
if errors.Is(err, updater.ErrNotAvailableLocally) {
return nil
}
return fmt.Errorf("failed to get file: %w", err)
}

View file

@ -33,6 +33,12 @@ func SetIndexes(registry *updater.ResourceRegistry, releaseChannel string, delet
// Reset indexes before adding them (again).
registry.ResetIndexes()
// Add the intel index first, in order to be able to override it with the
// other indexes when needed.
registry.AddIndex(updater.Index{
Path: "all/intel/intel.json",
})
// Always add the stable index as a base.
registry.AddIndex(updater.Index{
Path: ReleaseChannelStable + ".json",
@ -85,13 +91,6 @@ func SetIndexes(registry *updater.ResourceRegistry, releaseChannel string, delet
}
}
// Add the intel index last, as it updates the fastest and should not be
// crippled by other faulty indexes. It can only specify versions for its
// scope anyway.
registry.AddIndex(updater.Index{
Path: "all/intel/intel.json",
})
// Set pre-release usage.
registry.SetUsePreReleases(usePreReleases)

View file

@ -7,6 +7,7 @@ import (
"runtime"
"time"
"github.com/safing/portbase/database"
"github.com/safing/portbase/dataroot"
"github.com/safing/portbase/log"
"github.com/safing/portbase/modules"
@ -48,6 +49,11 @@ var (
updateASAP bool
disableTaskSchedule bool
db = database.NewInterface(&database.Options{
Local: true,
Internal: true,
})
// UserAgent is an HTTP User-Agent that is used to add
// more context to requests made by the registry when
// fetching resources from the update server.
@ -55,6 +61,8 @@ var (
)
const (
updatesDirName = "updates"
updateFailed = "updates:failed"
updateSuccess = "updates:success"
)
@ -108,7 +116,7 @@ func start() error {
registry.UserAgent = userAgentFromFlag
}
// initialize
err := registry.Initialize(dataroot.Root().ChildDir("updates", 0o0755))
err := registry.Initialize(dataroot.Root().ChildDir(updatesDirName, 0o0755))
if err != nil {
return err
}
@ -269,10 +277,13 @@ func checkForUpdates(ctx context.Context) (err error) {
func stop() error {
if registry != nil {
return registry.Cleanup()
err := registry.Cleanup()
if err != nil {
log.Warningf("updates: failed to clean up registry: %s", err)
}
}
return stopVersionExport()
return nil
}
// RootPath returns the root path used for storing updates.

View file

@ -230,7 +230,10 @@ func warnOnIncorrectParentPath() {
return
}
if parentName != expectedFileName {
log.Warningf("updates: parent process does not seem to be portmaster-start, name is %s", parentName)
// Only warn about this if not in dev mode.
if !devMode() {
log.Warningf("updates: parent process does not seem to be portmaster-start, name is %s", parentName)
}
// TODO(ppacher): once we released a new installer and folks had time
// to update we should send a module warning/hint to the