mirror of
https://github.com/safing/portmaster
synced 2025-09-01 18:19:12 +00:00
Rename filterlist to filterlists
This commit is contained in:
parent
f630df0b1f
commit
e77d971259
17 changed files with 41 additions and 41 deletions
|
@ -8,7 +8,7 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/safing/portbase/log"
|
||||
"github.com/safing/portmaster/intel/filterlist"
|
||||
"github.com/safing/portmaster/intel/filterlists"
|
||||
"github.com/safing/portmaster/intel/geoip"
|
||||
"github.com/safing/portmaster/network/netutils"
|
||||
"github.com/safing/portmaster/status"
|
||||
|
@ -35,7 +35,7 @@ type Entity struct {
|
|||
fetchLocationOnce sync.Once
|
||||
|
||||
Lists []string
|
||||
ListsMap filterlist.LookupMap
|
||||
ListsMap filterlists.LookupMap
|
||||
|
||||
// we only load each data above at most once
|
||||
loadDomainListOnce sync.Once
|
||||
|
@ -231,7 +231,7 @@ func (e *Entity) getDomainLists() {
|
|||
|
||||
e.loadDomainListOnce.Do(func() {
|
||||
log.Debugf("intel: loading domain list for %s", domain)
|
||||
list, err := filterlist.LookupDomain(domain)
|
||||
list, err := filterlists.LookupDomain(domain)
|
||||
if err != nil {
|
||||
log.Errorf("intel: failed to get domain blocklists for %s: %s", domain, err)
|
||||
e.loadDomainListOnce = sync.Once{}
|
||||
|
@ -255,7 +255,7 @@ func (e *Entity) getASNLists() {
|
|||
|
||||
log.Debugf("intel: loading ASN list for %d", asn)
|
||||
e.loadAsnListOnce.Do(func() {
|
||||
list, err := filterlist.LookupASNString(fmt.Sprintf("%d", asn))
|
||||
list, err := filterlists.LookupASNString(fmt.Sprintf("%d", asn))
|
||||
if err != nil {
|
||||
log.Errorf("intel: failed to get ASN blocklist for %d: %s", asn, err)
|
||||
e.loadAsnListOnce = sync.Once{}
|
||||
|
@ -279,7 +279,7 @@ func (e *Entity) getCountryLists() {
|
|||
|
||||
log.Debugf("intel: loading country list for %s", country)
|
||||
e.loadCoutryListOnce.Do(func() {
|
||||
list, err := filterlist.LookupCountry(country)
|
||||
list, err := filterlists.LookupCountry(country)
|
||||
if err != nil {
|
||||
log.Errorf("intel: failed to load country blocklist for %s: %s", country, err)
|
||||
e.loadCoutryListOnce = sync.Once{}
|
||||
|
@ -312,7 +312,7 @@ func (e *Entity) getIPLists() {
|
|||
|
||||
log.Debugf("intel: loading IP list for %s", ip)
|
||||
e.loadIPListOnce.Do(func() {
|
||||
list, err := filterlist.LookupIP(ip)
|
||||
list, err := filterlists.LookupIP(ip)
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("intel: failed to get IP blocklist for %s: %s", ip.String(), err)
|
||||
|
@ -335,7 +335,7 @@ func (e *Entity) GetLists() ([]string, bool) {
|
|||
}
|
||||
|
||||
// GetListsMap is like GetLists but returns a lookup map for list IDs.
|
||||
func (e *Entity) GetListsMap() (filterlist.LookupMap, bool) {
|
||||
func (e *Entity) GetListsMap() (filterlists.LookupMap, bool) {
|
||||
e.getLists()
|
||||
|
||||
if e.ListsMap == nil {
|
||||
|
@ -361,8 +361,8 @@ func mergeStringList(a, b []string) []string {
|
|||
return res
|
||||
}
|
||||
|
||||
func buildLookupMap(l []string) filterlist.LookupMap {
|
||||
m := make(filterlist.LookupMap, len(l))
|
||||
func buildLookupMap(l []string) filterlists.LookupMap {
|
||||
m := make(filterlists.LookupMap, len(l))
|
||||
|
||||
for _, s := range l {
|
||||
m[s] = struct{}{}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package filterlist
|
||||
package filterlists
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
|
@ -59,7 +59,7 @@ func (bf *scopedBloom) getBloomForType(entityType string) (*ring.Ring, error) {
|
|||
case "country":
|
||||
r = bf.country
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported filterlist entity type %q", entityType)
|
||||
return nil, fmt.Errorf("unsupported filterlists entity type %q", entityType)
|
||||
}
|
||||
|
||||
return r, nil
|
|
@ -1,4 +1,4 @@
|
|||
package filterlist
|
||||
package filterlists
|
||||
|
||||
import (
|
||||
"fmt"
|
|
@ -1,4 +1,4 @@
|
|||
package filterlist
|
||||
package filterlists
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -78,7 +78,7 @@ func isLoaded() bool {
|
|||
}
|
||||
|
||||
// processListFile opens the latest version of f ile and decodes it's DSDL
|
||||
// content. It calls processEntry for each decoded filterlist entry.
|
||||
// content. It calls processEntry for each decoded filterlists entry.
|
||||
func processListFile(ctx context.Context, filter *scopedBloom, file *updater.File) error {
|
||||
f, err := os.Open(file.Path())
|
||||
if err != nil {
|
|
@ -1,4 +1,4 @@
|
|||
package filterlist
|
||||
package filterlists
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
|
@ -17,7 +17,7 @@ type listEntry struct {
|
|||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// decodeFile decodes a DSDL filterlist file and sends decoded entities to
|
||||
// decodeFile decodes a DSDL filterlists file and sends decoded entities to
|
||||
// ch. It blocks until all list entries have been consumed or ctx is cancelled.
|
||||
func decodeFile(ctx context.Context, r io.Reader, ch chan<- *listEntry) error {
|
||||
compressed, format, err := parseHeader(r)
|
|
@ -1,4 +1,4 @@
|
|||
package filterlist
|
||||
package filterlists
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -37,7 +37,7 @@ type Category struct {
|
|||
Description string `json:"description,omitempty"`
|
||||
}
|
||||
|
||||
// Source defines an external filterlist source.
|
||||
// Source defines an external filterlists source.
|
||||
type Source struct {
|
||||
// ID is a unique ID for the source. Entities always reference the
|
||||
// sources they have been observed in using this ID. Refer to the
|
||||
|
@ -56,7 +56,7 @@ type Source struct {
|
|||
// to the Type definition for more information and well-known types.
|
||||
Type string `json:"type"`
|
||||
|
||||
// URL points to the filterlist file.
|
||||
// URL points to the filterlists file.
|
||||
URL string `json:"url"`
|
||||
|
||||
// Category holds the unique ID of a category the source belongs to. Since
|
|
@ -1,15 +1,15 @@
|
|||
package filterlist
|
||||
package filterlists
|
||||
|
||||
const (
|
||||
cacheDBPrefix = "cache:intel/filterlists"
|
||||
|
||||
// filterListCacheVersionKey is used to store the highest version
|
||||
// of a filterlist file (base, intermediate or urgent) in the
|
||||
// of a filterlists file (base, intermediate or urgent) in the
|
||||
// cache database. It's used to decide if the cache database and
|
||||
// bloomfilters need to be resetted and rebuilt.
|
||||
filterListCacheVersionKey = cacheDBPrefix + "/version"
|
||||
|
||||
// filterListIndexKey is used to store the filterlist index.
|
||||
// filterListIndexKey is used to store the filterlists index.
|
||||
filterListIndexKey = cacheDBPrefix + "/index"
|
||||
|
||||
// filterListKeyPrefix is the prefix inside that cache database
|
|
@ -1,4 +1,4 @@
|
|||
package filterlist
|
||||
package filterlists
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
@ -18,7 +18,7 @@ import (
|
|||
func lookupBlockLists(entity, value string) ([]string, error) {
|
||||
key := makeListCacheKey(entity, value)
|
||||
if !isLoaded() {
|
||||
log.Warningf("intel/filterlist: not searching for %s because filterlists not loaded", key)
|
||||
log.Warningf("intel/filterlists: not searching for %s because filterlists not loaded", key)
|
||||
// filterLists have not yet been loaded so
|
||||
// there's no point querying into the cache
|
||||
// database.
|
||||
|
@ -32,7 +32,7 @@ func lookupBlockLists(entity, value string) ([]string, error) {
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
log.Debugf("intel/filterlist: searching for entries with %s", key)
|
||||
log.Debugf("intel/filterlists: searching for entries with %s", key)
|
||||
entry, err := getEntityRecordByKey(key)
|
||||
if err != nil {
|
||||
if err == database.ErrNotFound {
|
|
@ -1,4 +1,4 @@
|
|||
package filterlist
|
||||
package filterlists
|
||||
|
||||
import "strings"
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
package filterlist
|
||||
package filterlists
|
||||
|
||||
/*
|
||||
|
||||
|
@ -63,7 +63,7 @@ func TestLookupDomain(t *testing.T) {
|
|||
}
|
||||
|
||||
// testMarkNotLoaded ensures that functions believe
|
||||
// filterlist are not yet loaded. It returns a
|
||||
// filterlists are not yet loaded. It returns a
|
||||
// func that restores the previous state.
|
||||
func testMarkNotLoaded() func() {
|
||||
if isLoaded() {
|
|
@ -1,4 +1,4 @@
|
|||
package filterlist
|
||||
package filterlists
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -23,7 +23,7 @@ var (
|
|||
)
|
||||
|
||||
func init() {
|
||||
module = modules.Register("filterlist", prep, start, nil, "core", "netenv")
|
||||
module = modules.Register("filterlists", prep, start, nil, "core", "netenv")
|
||||
}
|
||||
|
||||
func prep() error {
|
|
@ -1,4 +1,4 @@
|
|||
package filterlist
|
||||
package filterlists
|
||||
|
||||
/*
|
||||
func TestMain(m *testing.M) {
|
|
@ -1,4 +1,4 @@
|
|||
package filterlist
|
||||
package filterlists
|
||||
|
||||
import (
|
||||
"fmt"
|
|
@ -1,4 +1,4 @@
|
|||
package filterlist
|
||||
package filterlists
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
@ -97,11 +97,11 @@ func performUpdate(ctx context.Context) error {
|
|||
}
|
||||
|
||||
// if we processed the base file we need to perform
|
||||
// some cleanup on filterlist entities that have not
|
||||
// some cleanup on filterlists entities that have not
|
||||
// been updated now. Once we are done, start a worker
|
||||
// for that purpose.
|
||||
if cleanupRequired {
|
||||
defer module.StartWorker("filterlist:cleanup", removeAllObsoleteFilterEntries)
|
||||
defer module.StartWorker("filterlists:cleanup", removeAllObsoleteFilterEntries)
|
||||
}
|
||||
|
||||
// try to save the highest version of our files.
|
||||
|
@ -166,7 +166,7 @@ func removeObsoleteFilterEntries(batchSize int) (bool, error) {
|
|||
return cnt < batchSize, nil
|
||||
}
|
||||
|
||||
// getUpgradableFiles returns a slice of filterlist files
|
||||
// getUpgradableFiles returns a slice of filterlists files
|
||||
// that should be updated. The files MUST be updated and
|
||||
// processed in the returned order!
|
||||
func getUpgradableFiles() ([]*updater.File, error) {
|
|
@ -10,5 +10,5 @@ var (
|
|||
)
|
||||
|
||||
func init() {
|
||||
Module = modules.Register("intel", nil, nil, nil, "geoip", "filterlist")
|
||||
Module = modules.Register("intel", nil, nil, nil, "geoip", "filterlists")
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/safing/portmaster/intel/filterlist"
|
||||
"github.com/safing/portmaster/intel/filterlists"
|
||||
"github.com/safing/portmaster/profile/endpoints"
|
||||
)
|
||||
|
||||
|
@ -63,7 +63,7 @@ func updateGlobalConfigProfile(ctx context.Context, data interface{}) error {
|
|||
}
|
||||
|
||||
list = cfgOptionFilterLists()
|
||||
cfgFilterLists, err = filterlist.ResolveListIDs(list)
|
||||
cfgFilterLists, err = filterlists.ResolveListIDs(list)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
|
||||
"github.com/safing/portbase/config"
|
||||
"github.com/safing/portbase/database/record"
|
||||
"github.com/safing/portmaster/intel/filterlist"
|
||||
"github.com/safing/portmaster/intel/filterlists"
|
||||
"github.com/safing/portmaster/profile/endpoints"
|
||||
)
|
||||
|
||||
|
@ -144,7 +144,7 @@ func (profile *Profile) parseConfig() error {
|
|||
|
||||
list, ok = profile.configPerspective.GetAsStringArray(CfgOptionFilterListKey)
|
||||
if ok {
|
||||
profile.filterListIDs, err = filterlist.ResolveListIDs(list)
|
||||
profile.filterListIDs, err = filterlists.ResolveListIDs(list)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue