mirror of
https://github.com/safing/portmaster
synced 2025-09-02 02:29:12 +00:00
Implemented peer review comments
This commit is contained in:
parent
f96f8d8d6e
commit
f630df0b1f
8 changed files with 90 additions and 35 deletions
|
@ -10,6 +10,7 @@ import (
|
||||||
"github.com/safing/portbase/log"
|
"github.com/safing/portbase/log"
|
||||||
"github.com/safing/portmaster/intel/filterlist"
|
"github.com/safing/portmaster/intel/filterlist"
|
||||||
"github.com/safing/portmaster/intel/geoip"
|
"github.com/safing/portmaster/intel/geoip"
|
||||||
|
"github.com/safing/portmaster/network/netutils"
|
||||||
"github.com/safing/portmaster/status"
|
"github.com/safing/portmaster/status"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -303,15 +304,12 @@ func (e *Entity) getIPLists() {
|
||||||
if ip == nil {
|
if ip == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// abort if it's not a global unicast (not that IPv6 link local unicasts are treated
|
|
||||||
// as global)
|
// only load lists for IP addresses that are classified as global.
|
||||||
if !ip.IsGlobalUnicast() {
|
if netutils.ClassifyIP(ip) != netutils.Global {
|
||||||
return
|
|
||||||
}
|
|
||||||
// ingore linc local unicasts as well (not done by IsGlobalUnicast above).
|
|
||||||
if ip.IsLinkLocalUnicast() {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("intel: loading IP list for %s", ip)
|
log.Debugf("intel: loading IP list for %s", ip)
|
||||||
e.loadIPListOnce.Do(func() {
|
e.loadIPListOnce.Do(func() {
|
||||||
list, err := filterlist.LookupIP(ip)
|
list, err := filterlist.LookupIP(ip)
|
||||||
|
|
|
@ -2,6 +2,7 @@ package filterlist
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -90,12 +91,33 @@ func processListFile(ctx context.Context, filter *scopedBloom, file *updater.Fil
|
||||||
|
|
||||||
g, ctx := errgroup.WithContext(ctx)
|
g, ctx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
g.Go(func() error {
|
// startSafe runs fn inside the error group but wrapped
|
||||||
|
// in recovered function.
|
||||||
|
startSafe := func(fn func() error) {
|
||||||
|
g.Go(func() (err error) {
|
||||||
|
defer func() {
|
||||||
|
if x := recover(); x != nil {
|
||||||
|
if e, ok := x.(error); ok {
|
||||||
|
err = e
|
||||||
|
} else {
|
||||||
|
err = fmt.Errorf("%v", x)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = fn()
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
startSafe(func() (err error) {
|
||||||
defer close(values)
|
defer close(values)
|
||||||
return decodeFile(ctx, f, values)
|
|
||||||
|
err = decodeFile(ctx, f, values)
|
||||||
|
return
|
||||||
})
|
})
|
||||||
|
|
||||||
g.Go(func() error {
|
startSafe(func() error {
|
||||||
defer close(records)
|
defer close(records)
|
||||||
for entry := range values {
|
for entry := range values {
|
||||||
if err := processEntry(ctx, filter, entry, records); err != nil {
|
if err := processEntry(ctx, filter, entry, records); err != nil {
|
||||||
|
@ -139,7 +161,7 @@ func processListFile(ctx context.Context, filter *scopedBloom, file *updater.Fil
|
||||||
return batchPut(nil)
|
return batchPut(nil)
|
||||||
}
|
}
|
||||||
startBatch = func() {
|
startBatch = func() {
|
||||||
g.Go(processBatch)
|
startSafe(processBatch)
|
||||||
}
|
}
|
||||||
|
|
||||||
startBatch()
|
startBatch()
|
||||||
|
|
|
@ -191,6 +191,8 @@ func updateListIndex() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ResolveListIDs resolves a slice of source or category IDs into
|
||||||
|
// a slice of distinct source IDs.
|
||||||
func ResolveListIDs(ids []string) ([]string, error) {
|
func ResolveListIDs(ids []string) ([]string, error) {
|
||||||
index, err := getListIndexFromCache()
|
index, err := getListIndexFromCache()
|
||||||
|
|
||||||
|
|
|
@ -1,17 +1,25 @@
|
||||||
package filterlist
|
package filterlist
|
||||||
|
|
||||||
|
import "strings"
|
||||||
|
|
||||||
// LookupMap is a helper type for matching a list of endpoint sources
|
// LookupMap is a helper type for matching a list of endpoint sources
|
||||||
// against a map.
|
// against a map.
|
||||||
type LookupMap map[string]struct{}
|
type LookupMap map[string]struct{}
|
||||||
|
|
||||||
// Match returns Denied if a source in `list` is part of lm.
|
// Match checks if a source in `list` is part of lm.
|
||||||
|
// Matches are joined to string and returned.
|
||||||
// If nothing is found, an empty string is returned.
|
// If nothing is found, an empty string is returned.
|
||||||
func (lm LookupMap) Match(list []string) string {
|
func (lm LookupMap) Match(list []string) string {
|
||||||
|
matches := make([]string, 0, len(list))
|
||||||
for _, l := range list {
|
for _, l := range list {
|
||||||
if _, ok := lm[l]; ok {
|
if _, ok := lm[l]; ok {
|
||||||
return l
|
matches = append(matches, l)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return ""
|
if len(matches) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Join(matches, ", ")
|
||||||
}
|
}
|
||||||
|
|
|
@ -101,7 +101,7 @@ func performUpdate(ctx context.Context) error {
|
||||||
// been updated now. Once we are done, start a worker
|
// been updated now. Once we are done, start a worker
|
||||||
// for that purpose.
|
// for that purpose.
|
||||||
if cleanupRequired {
|
if cleanupRequired {
|
||||||
defer module.StartWorker("filterlist:cleanup", removeObsoleteFilterEntries)
|
defer module.StartWorker("filterlist:cleanup", removeAllObsoleteFilterEntries)
|
||||||
}
|
}
|
||||||
|
|
||||||
// try to save the highest version of our files.
|
// try to save the highest version of our files.
|
||||||
|
@ -113,7 +113,20 @@ func performUpdate(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func removeObsoleteFilterEntries(_ context.Context) error {
|
func removeAllObsoleteFilterEntries(_ context.Context) error {
|
||||||
|
for {
|
||||||
|
done, err := removeObsoleteFilterEntries(1000)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if done {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeObsoleteFilterEntries(batchSize int) (bool, error) {
|
||||||
log.Infof("intel/filterlists: cleanup task started, removing obsolete filter list entries ...")
|
log.Infof("intel/filterlists: cleanup task started, removing obsolete filter list entries ...")
|
||||||
|
|
||||||
iter, err := cache.Query(
|
iter, err := cache.Query(
|
||||||
|
@ -124,20 +137,33 @@ func removeObsoleteFilterEntries(_ context.Context) error {
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
keys := make([]string, 0, batchSize)
|
||||||
|
|
||||||
var cnt int
|
var cnt int
|
||||||
for r := range iter.Next {
|
for r := range iter.Next {
|
||||||
cnt++
|
cnt++
|
||||||
r.Meta().Delete()
|
keys = append(keys, r.Key())
|
||||||
if err := cache.Put(r); err != nil {
|
|
||||||
log.Errorf("intel/filterlists: failed to remove stale cache entry %q: %s", r.Key(), err)
|
if cnt == batchSize {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iter.Cancel()
|
||||||
|
|
||||||
|
for _, key := range keys {
|
||||||
|
if err := cache.Delete(key); err != nil {
|
||||||
|
log.Errorf("intel/filterlists: failed to remove stale cache entry %q: %s", key, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("intel/filterlists: successfully removed %d obsolete entries", cnt)
|
log.Debugf("intel/filterlists: successfully removed %d obsolete entries", cnt)
|
||||||
return nil
|
|
||||||
|
// if we removed less entries that the batch size we
|
||||||
|
// are done and no more entries exist
|
||||||
|
return cnt < batchSize, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getUpgradableFiles returns a slice of filterlist files
|
// getUpgradableFiles returns a slice of filterlist files
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/safing/portmaster/intel/filterlist"
|
||||||
"github.com/safing/portmaster/profile/endpoints"
|
"github.com/safing/portmaster/profile/endpoints"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -14,6 +15,7 @@ var (
|
||||||
cfgDefaultAction uint8
|
cfgDefaultAction uint8
|
||||||
cfgEndpoints endpoints.Endpoints
|
cfgEndpoints endpoints.Endpoints
|
||||||
cfgServiceEndpoints endpoints.Endpoints
|
cfgServiceEndpoints endpoints.Endpoints
|
||||||
|
cfgFilterLists []string
|
||||||
)
|
)
|
||||||
|
|
||||||
func registerConfigUpdater() error {
|
func registerConfigUpdater() error {
|
||||||
|
@ -60,6 +62,12 @@ func updateGlobalConfigProfile(ctx context.Context, data interface{}) error {
|
||||||
lastErr = err
|
lastErr = err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
list = cfgOptionFilterLists()
|
||||||
|
cfgFilterLists, err = filterlist.ResolveListIDs(list)
|
||||||
|
if err != nil {
|
||||||
|
lastErr = err
|
||||||
|
}
|
||||||
|
|
||||||
// build global profile for reference
|
// build global profile for reference
|
||||||
profile := &Profile{
|
profile := &Profile{
|
||||||
ID: "config",
|
ID: "config",
|
||||||
|
|
|
@ -63,6 +63,7 @@ func registerConfiguration() error {
|
||||||
Description: `The default filter action when nothing else permits or blocks a connection.`,
|
Description: `The default filter action when nothing else permits or blocks a connection.`,
|
||||||
OptType: config.OptTypeString,
|
OptType: config.OptTypeString,
|
||||||
DefaultValue: "permit",
|
DefaultValue: "permit",
|
||||||
|
ExternalOptType: "string list",
|
||||||
ValidationRegex: "^(permit|ask|block)$",
|
ValidationRegex: "^(permit|ask|block)$",
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -6,7 +6,6 @@ import (
|
||||||
|
|
||||||
"github.com/safing/portbase/log"
|
"github.com/safing/portbase/log"
|
||||||
|
|
||||||
"github.com/safing/portmaster/intel/filterlist"
|
|
||||||
"github.com/safing/portmaster/status"
|
"github.com/safing/portmaster/status"
|
||||||
|
|
||||||
"github.com/tevino/abool"
|
"github.com/tevino/abool"
|
||||||
|
@ -228,8 +227,8 @@ func (lp *LayeredProfile) MatchFilterLists(entity *intel.Entity) (result endpoin
|
||||||
|
|
||||||
log.Errorf("number of layers: %d", len(lp.layers))
|
log.Errorf("number of layers: %d", len(lp.layers))
|
||||||
for _, layer := range lp.layers {
|
for _, layer := range lp.layers {
|
||||||
if id := lookupMap.Match(layer.filterListIDs); id != "" {
|
if reason := lookupMap.Match(layer.filterListIDs); reason != "" {
|
||||||
return endpoints.Denied, id
|
return endpoints.Denied, reason
|
||||||
}
|
}
|
||||||
|
|
||||||
// only check the first layer that has filter list
|
// only check the first layer that has filter list
|
||||||
|
@ -239,19 +238,10 @@ func (lp *LayeredProfile) MatchFilterLists(entity *intel.Entity) (result endpoin
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(ppacher): re-resolving global list IDs is a bit overkill,
|
|
||||||
// add some caching here.
|
|
||||||
cfgLock.RLock()
|
cfgLock.RLock()
|
||||||
defer cfgLock.RUnlock()
|
defer cfgLock.RUnlock()
|
||||||
|
if reason := lookupMap.Match(cfgFilterLists); reason != "" {
|
||||||
globalIds, err := filterlist.ResolveListIDs(cfgOptionFilterLists())
|
return endpoints.Denied, reason
|
||||||
if err != nil {
|
|
||||||
log.Errorf("filter: failed to get global filter list IDs: %s", err)
|
|
||||||
return endpoints.NoMatch, ""
|
|
||||||
}
|
|
||||||
|
|
||||||
if id := lookupMap.Match(globalIds); id != "" {
|
|
||||||
return endpoints.Denied, id
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return endpoints.NoMatch, ""
|
return endpoints.NoMatch, ""
|
||||||
|
|
Loading…
Add table
Reference in a new issue