Implemented peer review comments

This commit is contained in:
Patrick Pacher 2020-04-14 11:14:04 +02:00
parent f96f8d8d6e
commit f630df0b1f
No known key found for this signature in database
GPG key ID: E8CD2DA160925A6D
8 changed files with 90 additions and 35 deletions

View file

@ -10,6 +10,7 @@ import (
"github.com/safing/portbase/log"
"github.com/safing/portmaster/intel/filterlist"
"github.com/safing/portmaster/intel/geoip"
"github.com/safing/portmaster/network/netutils"
"github.com/safing/portmaster/status"
)
@ -303,15 +304,12 @@ func (e *Entity) getIPLists() {
if ip == nil {
return
}
// abort if it's not a global unicast (not that IPv6 link local unicasts are treated
// as global)
if !ip.IsGlobalUnicast() {
return
}
// ingore linc local unicasts as well (not done by IsGlobalUnicast above).
if ip.IsLinkLocalUnicast() {
// only load lists for IP addresses that are classified as global.
if netutils.ClassifyIP(ip) != netutils.Global {
return
}
log.Debugf("intel: loading IP list for %s", ip)
e.loadIPListOnce.Do(func() {
list, err := filterlist.LookupIP(ip)

View file

@ -2,6 +2,7 @@ package filterlist
import (
"context"
"fmt"
"os"
"sort"
"strings"
@ -90,12 +91,33 @@ func processListFile(ctx context.Context, filter *scopedBloom, file *updater.Fil
g, ctx := errgroup.WithContext(ctx)
g.Go(func() error {
// startSafe runs fn inside the error group but wrapped
// in recovered function.
startSafe := func(fn func() error) {
g.Go(func() (err error) {
defer func() {
if x := recover(); x != nil {
if e, ok := x.(error); ok {
err = e
} else {
err = fmt.Errorf("%v", x)
}
}
}()
err = fn()
return err
})
}
startSafe(func() (err error) {
defer close(values)
return decodeFile(ctx, f, values)
err = decodeFile(ctx, f, values)
return
})
g.Go(func() error {
startSafe(func() error {
defer close(records)
for entry := range values {
if err := processEntry(ctx, filter, entry, records); err != nil {
@ -139,7 +161,7 @@ func processListFile(ctx context.Context, filter *scopedBloom, file *updater.Fil
return batchPut(nil)
}
startBatch = func() {
g.Go(processBatch)
startSafe(processBatch)
}
startBatch()

View file

@ -191,6 +191,8 @@ func updateListIndex() error {
return nil
}
// ResolveListIDs resolves a slice of source or category IDs into
// a slice of distinct source IDs.
func ResolveListIDs(ids []string) ([]string, error) {
index, err := getListIndexFromCache()

View file

@ -1,17 +1,25 @@
package filterlist
import "strings"
// LookupMap is a helper type for matching a list of endpoint sources
// against a map.
type LookupMap map[string]struct{}
// Match returns Denied if a source in `list` is part of lm.
// Match checks if a source in `list` is part of lm.
// Matches are joined to string and returned.
// If nothing is found, an empty string is returned.
func (lm LookupMap) Match(list []string) string {
matches := make([]string, 0, len(list))
for _, l := range list {
if _, ok := lm[l]; ok {
return l
matches = append(matches, l)
}
}
if len(matches) == 0 {
return ""
}
return strings.Join(matches, ", ")
}

View file

@ -101,7 +101,7 @@ func performUpdate(ctx context.Context) error {
// been updated now. Once we are done, start a worker
// for that purpose.
if cleanupRequired {
defer module.StartWorker("filterlist:cleanup", removeObsoleteFilterEntries)
defer module.StartWorker("filterlist:cleanup", removeAllObsoleteFilterEntries)
}
// try to save the highest version of our files.
@ -113,7 +113,20 @@ func performUpdate(ctx context.Context) error {
return nil
}
func removeObsoleteFilterEntries(_ context.Context) error {
func removeAllObsoleteFilterEntries(_ context.Context) error {
for {
done, err := removeObsoleteFilterEntries(1000)
if err != nil {
return err
}
if done {
return nil
}
}
}
func removeObsoleteFilterEntries(batchSize int) (bool, error) {
log.Infof("intel/filterlists: cleanup task started, removing obsolete filter list entries ...")
iter, err := cache.Query(
@ -124,20 +137,33 @@ func removeObsoleteFilterEntries(_ context.Context) error {
),
)
if err != nil {
return err
return false, err
}
keys := make([]string, 0, batchSize)
var cnt int
for r := range iter.Next {
cnt++
r.Meta().Delete()
if err := cache.Put(r); err != nil {
log.Errorf("intel/filterlists: failed to remove stale cache entry %q: %s", r.Key(), err)
keys = append(keys, r.Key())
if cnt == batchSize {
break
}
}
iter.Cancel()
for _, key := range keys {
if err := cache.Delete(key); err != nil {
log.Errorf("intel/filterlists: failed to remove stale cache entry %q: %s", key, err)
}
}
log.Debugf("intel/filterlists: successfully removed %d obsolete entries", cnt)
return nil
// if we removed less entries that the batch size we
// are done and no more entries exist
return cnt < batchSize, nil
}
// getUpgradableFiles returns a slice of filterlist files

View file

@ -5,6 +5,7 @@ import (
"fmt"
"sync"
"github.com/safing/portmaster/intel/filterlist"
"github.com/safing/portmaster/profile/endpoints"
)
@ -14,6 +15,7 @@ var (
cfgDefaultAction uint8
cfgEndpoints endpoints.Endpoints
cfgServiceEndpoints endpoints.Endpoints
cfgFilterLists []string
)
func registerConfigUpdater() error {
@ -60,6 +62,12 @@ func updateGlobalConfigProfile(ctx context.Context, data interface{}) error {
lastErr = err
}
list = cfgOptionFilterLists()
cfgFilterLists, err = filterlist.ResolveListIDs(list)
if err != nil {
lastErr = err
}
// build global profile for reference
profile := &Profile{
ID: "config",

View file

@ -63,6 +63,7 @@ func registerConfiguration() error {
Description: `The default filter action when nothing else permits or blocks a connection.`,
OptType: config.OptTypeString,
DefaultValue: "permit",
ExternalOptType: "string list",
ValidationRegex: "^(permit|ask|block)$",
})
if err != nil {

View file

@ -6,7 +6,6 @@ import (
"github.com/safing/portbase/log"
"github.com/safing/portmaster/intel/filterlist"
"github.com/safing/portmaster/status"
"github.com/tevino/abool"
@ -228,8 +227,8 @@ func (lp *LayeredProfile) MatchFilterLists(entity *intel.Entity) (result endpoin
log.Errorf("number of layers: %d", len(lp.layers))
for _, layer := range lp.layers {
if id := lookupMap.Match(layer.filterListIDs); id != "" {
return endpoints.Denied, id
if reason := lookupMap.Match(layer.filterListIDs); reason != "" {
return endpoints.Denied, reason
}
// only check the first layer that has filter list
@ -239,19 +238,10 @@ func (lp *LayeredProfile) MatchFilterLists(entity *intel.Entity) (result endpoin
}
}
// TODO(ppacher): re-resolving global list IDs is a bit overkill,
// add some caching here.
cfgLock.RLock()
defer cfgLock.RUnlock()
globalIds, err := filterlist.ResolveListIDs(cfgOptionFilterLists())
if err != nil {
log.Errorf("filter: failed to get global filter list IDs: %s", err)
return endpoints.NoMatch, ""
}
if id := lookupMap.Match(globalIds); id != "" {
return endpoints.Denied, id
if reason := lookupMap.Match(cfgFilterLists); reason != "" {
return endpoints.Denied, reason
}
return endpoints.NoMatch, ""