mirror of
https://github.com/safing/portmaster
synced 2025-09-01 18:19:12 +00:00
Merge pull request #1347 from safing/feature/async-failing-resolver-checking
Check failing resolvers asynchronously
This commit is contained in:
commit
a1b8191b23
9 changed files with 317 additions and 192 deletions
|
@ -63,7 +63,7 @@ var (
|
|||
},
|
||||
},
|
||||
}
|
||||
manualDNSSetupRequiredMessage = "You have disabled Seamless DNS Integration. As a result, Portmaster can no longer protect you or filter connections reliably. To fix this, you have to manually configure %s as the DNS Server in your system and in any conflicting application. This message will disappear 10 seconds after correct configuration."
|
||||
manualDNSSetupRequiredMessage = "You have disabled Seamless DNS Integration. As a result, Portmaster can no longer protect you or filter connections reliably. To fix this, you have to manually configure %s as the DNS Server in your system and in any conflicting application. This message will disappear some time after correct configuration."
|
||||
|
||||
secureDNSBypassIssue = &appIssue{
|
||||
id: "compat:secure-dns-bypass-%s",
|
||||
|
|
|
@ -645,16 +645,17 @@ Pro Tip: You can use "#" to add a comment to a rule.
|
|||
|
||||
// Bypass prevention
|
||||
err = config.Register(&config.Option{
|
||||
Name: "Block Bypassing",
|
||||
Name: "Block Secure DNS Bypassing",
|
||||
Key: CfgOptionPreventBypassingKey,
|
||||
Description: `Prevent apps from bypassing Portmaster's privacy protections.
|
||||
If Block Bypassing is disabled, Portmaster can no longer protect you or filter connections from the affected applications.
|
||||
Description: `Prevent apps from bypassing Portmaster's Secure DNS resolver.
|
||||
If disabled, Portmaster might have reduced information to correctly enforce rules and filter lists.
|
||||
Important: Portmaster's firewall itself cannot be bypassed.
|
||||
|
||||
Current Features:
|
||||
- Disable Firefox' internal DNS-over-HTTPs resolver
|
||||
- Block direct access to public DNS resolvers
|
||||
|
||||
Please note that if you are using the system resolver, bypass attempts might be additionally blocked there too.`,
|
||||
Please note that DNS bypass attempts might be additionally blocked in the Sytem D there too.`,
|
||||
OptType: config.OptTypeInt,
|
||||
ExpertiseLevel: config.ExpertiseLevelUser,
|
||||
ReleaseLevel: config.ReleaseLevelStable,
|
||||
|
|
|
@ -178,9 +178,9 @@ When referring to the DNS server using a domain name, as with DoH, it is highly
|
|||
configuredNameServers = config.Concurrent.GetAsStringArray(CfgOptionNameServersKey, defaultNameServers)
|
||||
|
||||
err = config.Register(&config.Option{
|
||||
Name: "Ignore Failing DNS Servers Duration",
|
||||
Name: "Retry Failing DNS Servers",
|
||||
Key: CfgOptionNameserverRetryRateKey,
|
||||
Description: "Duration in seconds how long a failing DNS server should not be retried.",
|
||||
Description: "Duration in seconds how often failing DNS server should be retried. This is done continuously in the background.",
|
||||
OptType: config.OptTypeInt,
|
||||
ExpertiseLevel: config.ExpertiseLevelDeveloper,
|
||||
ReleaseLevel: config.ReleaseLevelStable,
|
||||
|
|
125
resolver/failing.go
Normal file
125
resolver/failing.go
Normal file
|
@ -0,0 +1,125 @@
|
|||
package resolver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/safing/portbase/log"
|
||||
"github.com/safing/portbase/modules"
|
||||
"github.com/safing/portmaster/netenv"
|
||||
)
|
||||
|
||||
var (
|
||||
// FailThreshold is amount of errors a resolvers must experience in order to be regarded as failed.
|
||||
FailThreshold = 5
|
||||
|
||||
// FailObserveDuration is the duration in which failures are counted in order to mark a resolver as failed.
|
||||
FailObserveDuration = time.Duration(FailThreshold) * 10 * time.Second
|
||||
)
|
||||
|
||||
// IsFailing returns if this resolver is currently failing.
|
||||
func (brc *BasicResolverConn) IsFailing() bool {
|
||||
return brc.failing.IsSet()
|
||||
}
|
||||
|
||||
// ReportFailure reports that an error occurred with this resolver.
|
||||
func (brc *BasicResolverConn) ReportFailure() {
|
||||
// Don't mark resolver as failed if we are offline.
|
||||
if !netenv.Online() {
|
||||
return
|
||||
}
|
||||
|
||||
// Ingore report when we are already failing.
|
||||
if brc.IsFailing() {
|
||||
return
|
||||
}
|
||||
|
||||
brc.failLock.Lock()
|
||||
defer brc.failLock.Unlock()
|
||||
|
||||
// Check if we are within the observation period.
|
||||
if time.Since(brc.failingStarted) > FailObserveDuration {
|
||||
brc.fails = 1
|
||||
brc.failingStarted = time.Now()
|
||||
return
|
||||
}
|
||||
|
||||
// Increase and check if we need to set to failing.
|
||||
brc.fails++
|
||||
if brc.fails > FailThreshold {
|
||||
brc.failing.Set()
|
||||
}
|
||||
|
||||
// Report to netenv that a configured server failed.
|
||||
if brc.resolver.Info.Source == ServerSourceConfigured {
|
||||
netenv.ConnectedToDNS.UnSet()
|
||||
}
|
||||
}
|
||||
|
||||
// ResetFailure resets the failure status.
|
||||
func (brc *BasicResolverConn) ResetFailure() {
|
||||
if brc.failing.SetToIf(true, false) {
|
||||
brc.failLock.Lock()
|
||||
defer brc.failLock.Unlock()
|
||||
brc.fails = 0
|
||||
brc.failingStarted = time.Time{}
|
||||
}
|
||||
|
||||
// Report to netenv that a configured server succeeded.
|
||||
if brc.resolver.Info.Source == ServerSourceConfigured {
|
||||
netenv.ConnectedToDNS.Set()
|
||||
}
|
||||
}
|
||||
|
||||
func checkFailingResolvers(ctx context.Context, task *modules.Task) error {
|
||||
var resolvers []*Resolver
|
||||
|
||||
// Make a copy of the resolver list.
|
||||
func() {
|
||||
resolversLock.Lock()
|
||||
defer resolversLock.Unlock()
|
||||
|
||||
resolvers = make([]*Resolver, len(globalResolvers))
|
||||
copy(resolvers, globalResolvers)
|
||||
}()
|
||||
|
||||
// Start logging.
|
||||
ctx, tracer := log.AddTracer(ctx)
|
||||
tracer.Debugf("resolver: checking failed resolvers")
|
||||
defer tracer.Submit()
|
||||
|
||||
// Go through all resolvers and check if they are reachable again.
|
||||
for i, resolver := range resolvers {
|
||||
// Skip resolver that are not failing.
|
||||
if !resolver.Conn.IsFailing() {
|
||||
continue
|
||||
}
|
||||
|
||||
tracer.Tracef("resolver: testing failed resolver [%d/%d] %s", i+1, len(resolvers), resolver)
|
||||
|
||||
// Test if we can resolve via this resolver.
|
||||
ips, _, err := testConnectivity(ctx, netenv.DNSTestDomain, resolver)
|
||||
switch {
|
||||
case err != nil:
|
||||
tracer.Debugf("resolver: failed resolver %s is still failing: %s", resolver, err)
|
||||
case len(ips) == 0 || !ips[0].Equal(netenv.DNSTestExpectedIP):
|
||||
tracer.Debugf("resolver: failed resolver %s received unexpected A records: %s", resolver, ips)
|
||||
default:
|
||||
// Resolver test successful.
|
||||
tracer.Infof("resolver: check successful, resolver %s is available again", resolver)
|
||||
resolver.Conn.ResetFailure()
|
||||
}
|
||||
|
||||
// Check if context was canceled.
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// Set next execution time.
|
||||
if task != nil {
|
||||
task.Schedule(time.Now().Add(time.Duration(nameserverRetryRate()) * time.Second))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -27,7 +27,9 @@ func init() {
|
|||
|
||||
func prep() error {
|
||||
// Set DNS test connectivity function for the online status check
|
||||
netenv.DNSTestQueryFunc = testConnectivity
|
||||
netenv.DNSTestQueryFunc = func(ctx context.Context, fdqn string) (ips []net.IP, ok bool, err error) {
|
||||
return testConnectivity(ctx, fdqn, nil)
|
||||
}
|
||||
|
||||
intel.SetReverseResolver(ResolveIPAndValidate)
|
||||
|
||||
|
@ -96,6 +98,21 @@ func start() error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Check failing resolvers regularly and when the network changes.
|
||||
checkFailingResolversTask := module.NewTask("check failing resolvers", checkFailingResolvers).Repeat(1 * time.Minute)
|
||||
err = module.RegisterEventHook(
|
||||
"netenv",
|
||||
netenv.NetworkChangedEvent,
|
||||
"check failing resolvers",
|
||||
func(_ context.Context, _ any) error {
|
||||
checkFailingResolversTask.StartASAP()
|
||||
return nil
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
module.NewTask("suggest using stale cache", suggestUsingStaleCacheTask).Repeat(2 * time.Minute)
|
||||
|
||||
module.StartServiceWorker(
|
||||
|
@ -130,9 +147,11 @@ var (
|
|||
failingResolverNotification *notifications.Notification
|
||||
failingResolverNotificationSet = abool.New()
|
||||
failingResolverNotificationLock sync.Mutex
|
||||
|
||||
failingResolverErrorID = "resolver:all-configured-resolvers-failed"
|
||||
)
|
||||
|
||||
func notifyAboutFailingResolvers(err error) {
|
||||
func notifyAboutFailingResolvers() {
|
||||
failingResolverNotificationLock.Lock()
|
||||
defer failingResolverNotificationLock.Unlock()
|
||||
failingResolverNotificationSet.Set()
|
||||
|
@ -144,19 +163,32 @@ func notifyAboutFailingResolvers(err error) {
|
|||
|
||||
// Create new notification.
|
||||
n := ¬ifications.Notification{
|
||||
EventID: "resolver:all-configured-resolvers-failed",
|
||||
Type: notifications.Error,
|
||||
Title: "Detected DNS Compatibility Issue",
|
||||
Message: "Portmaster detected that something is interfering with its Secure DNS resolver. This could be a firewall or another secure DNS resolver software. Please check if you are running incompatible [software](https://docs.safing.io/portmaster/install/status/software-compatibility). Otherwise, please report the issue via [GitHub](https://github.com/safing/portmaster/issues) or send a mail to [support@safing.io](mailto:support@safing.io) so we can help you out.",
|
||||
EventID: failingResolverErrorID,
|
||||
Type: notifications.Error,
|
||||
Title: "Configured DNS Servers Failing",
|
||||
Message: `All configured DNS servers in Portmaster are failing.
|
||||
|
||||
You might not be able to connect to these servers, or all of these servers are offline.
|
||||
Choosing different DNS servers might fix this problem.
|
||||
|
||||
While the issue persists, Portmaster will use the DNS servers from your system or network, if permitted by configuration.
|
||||
|
||||
Alternatively, there might be something on your device that is interfering with Portmaster. This could be a firewall or another secure DNS resolver software. If that is your suspicion, please [check if you are running incompatible software here](https://docs.safing.io/portmaster/install/status/software-compatibility).
|
||||
|
||||
This notification will go away when Portmaster detects a working configured DNS server.`,
|
||||
ShowOnSystem: true,
|
||||
AvailableActions: []*notifications.Action{{
|
||||
Text: "Change DNS Servers",
|
||||
Type: notifications.ActionTypeOpenSetting,
|
||||
Payload: ¬ifications.ActionTypeOpenSettingPayload{
|
||||
Key: CfgOptionNameServersKey,
|
||||
},
|
||||
}},
|
||||
}
|
||||
notifications.Notify(n)
|
||||
|
||||
failingResolverNotification = n
|
||||
n.AttachToModule(module)
|
||||
|
||||
// Report the raw error as module error.
|
||||
module.NewErrorMessage("resolving", err).Report()
|
||||
}
|
||||
|
||||
func resetFailingResolversNotification() {
|
||||
|
@ -167,10 +199,14 @@ func resetFailingResolversNotification() {
|
|||
failingResolverNotificationLock.Lock()
|
||||
defer failingResolverNotificationLock.Unlock()
|
||||
|
||||
// Remove the notification.
|
||||
if failingResolverNotification != nil {
|
||||
failingResolverNotification.Delete()
|
||||
failingResolverNotification = nil
|
||||
}
|
||||
|
||||
// Additionally, resolve the module error, if not done through the notification.
|
||||
module.Resolve(failingResolverErrorID)
|
||||
}
|
||||
|
||||
// AddToDebugInfo adds the system status to the given debug.Info.
|
||||
|
|
|
@ -400,116 +400,102 @@ func resolveAndCache(ctx context.Context, q *Query, oldCache *RRCache) (rrCache
|
|||
log.Tracer(ctx).Debugf("resolver: allowing online status test domain %s to resolve even though offline", q.FQDN)
|
||||
}
|
||||
|
||||
// Report when all configured resolvers are failing.
|
||||
var failureReported bool
|
||||
defer func() {
|
||||
if failureReported &&
|
||||
netenv.Online() &&
|
||||
primarySource == ServerSourceConfigured &&
|
||||
allConfiguredResolversAreFailing() {
|
||||
notifyAboutFailingResolvers()
|
||||
}
|
||||
}()
|
||||
|
||||
// start resolving
|
||||
for _, resolver := range resolvers {
|
||||
if module.IsStopping() {
|
||||
return nil, ErrShuttingDown
|
||||
}
|
||||
|
||||
var i int
|
||||
// once with skipping recently failed resolvers, once without
|
||||
resolveLoop:
|
||||
for i = 0; i < 2; i++ {
|
||||
for _, resolver := range resolvers {
|
||||
if module.IsStopping() {
|
||||
return nil, ErrShuttingDown
|
||||
}
|
||||
// Skip failing resolvers.
|
||||
if resolver.Conn.IsFailing() {
|
||||
log.Tracer(ctx).Tracef("resolver: skipping resolver %s, because it is failing", resolver)
|
||||
continue
|
||||
}
|
||||
|
||||
// check if resolver failed recently (on first run)
|
||||
if i == 0 && resolver.Conn.IsFailing() {
|
||||
log.Tracer(ctx).Tracef("resolver: skipping resolver %s, because it failed recently", resolver)
|
||||
continue
|
||||
}
|
||||
|
||||
// resolve
|
||||
log.Tracer(ctx).Tracef("resolver: sending query for %s to %s", q.ID(), resolver.Info.ID())
|
||||
rrCache, err = resolver.Conn.Query(ctx, q)
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, ErrNotFound):
|
||||
// NXDomain, or similar
|
||||
if tryAll {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
case errors.Is(err, ErrBlocked):
|
||||
// some resolvers might also block
|
||||
return nil, err
|
||||
case netenv.GetOnlineStatus() == netenv.StatusOffline &&
|
||||
q.FQDN != netenv.DNSTestDomain &&
|
||||
!netenv.IsConnectivityDomain(q.FQDN):
|
||||
// we are offline and this is not an online check query
|
||||
return oldCache, ErrOffline
|
||||
case errors.Is(err, ErrContinue):
|
||||
continue
|
||||
case errors.Is(err, ErrTimeout):
|
||||
resolver.Conn.ReportFailure()
|
||||
log.Tracer(ctx).Debugf("resolver: query to %s timed out", resolver.Info.ID())
|
||||
continue
|
||||
case errors.Is(err, context.Canceled):
|
||||
return nil, err
|
||||
case errors.Is(err, context.DeadlineExceeded):
|
||||
return nil, err
|
||||
case errors.Is(err, ErrShuttingDown):
|
||||
return nil, err
|
||||
default:
|
||||
resolver.Conn.ReportFailure()
|
||||
log.Tracer(ctx).Warningf("resolver: query to %s failed: %s", resolver.Info.ID(), err)
|
||||
// resolve
|
||||
log.Tracer(ctx).Tracef("resolver: sending query for %s to %s", q.ID(), resolver.Info.ID())
|
||||
rrCache, err = resolver.Conn.Query(ctx, q)
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, ErrNotFound):
|
||||
// NXDomain, or similar
|
||||
if tryAll {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if rrCache == nil {
|
||||
// Defensive: This should normally not happen.
|
||||
return nil, err
|
||||
case errors.Is(err, ErrBlocked):
|
||||
// some resolvers might also block
|
||||
return nil, err
|
||||
case netenv.GetOnlineStatus() == netenv.StatusOffline &&
|
||||
q.FQDN != netenv.DNSTestDomain &&
|
||||
!netenv.IsConnectivityDomain(q.FQDN):
|
||||
// we are offline and this is not an online check query
|
||||
return oldCache, ErrOffline
|
||||
case errors.Is(err, ErrContinue):
|
||||
continue
|
||||
case errors.Is(err, ErrTimeout):
|
||||
resolver.Conn.ReportFailure()
|
||||
failureReported = true
|
||||
log.Tracer(ctx).Debugf("resolver: query to %s timed out", resolver.Info.ID())
|
||||
continue
|
||||
case errors.Is(err, context.Canceled):
|
||||
return nil, err
|
||||
case errors.Is(err, context.DeadlineExceeded):
|
||||
return nil, err
|
||||
case errors.Is(err, ErrShuttingDown):
|
||||
return nil, err
|
||||
default:
|
||||
resolver.Conn.ReportFailure()
|
||||
failureReported = true
|
||||
log.Tracer(ctx).Warningf("resolver: query to %s failed: %s", resolver.Info.ID(), err)
|
||||
continue
|
||||
}
|
||||
// Check if request succeeded and whether we should try another resolver.
|
||||
if rrCache.RCode != dns.RcodeSuccess && tryAll {
|
||||
continue
|
||||
}
|
||||
|
||||
// Report a successful connection.
|
||||
resolver.Conn.ResetFailure()
|
||||
// Reset failing resolvers notification, if querying in global scope.
|
||||
if primarySource == ServerSourceConfigured {
|
||||
resetFailingResolversNotification()
|
||||
}
|
||||
|
||||
break resolveLoop
|
||||
}
|
||||
if rrCache == nil {
|
||||
// Defensive: This should normally not happen.
|
||||
continue
|
||||
}
|
||||
// Check if request succeeded and whether we should try another resolver.
|
||||
if rrCache.RCode != dns.RcodeSuccess && tryAll {
|
||||
continue
|
||||
}
|
||||
|
||||
// Report a successful connection.
|
||||
resolver.Conn.ResetFailure()
|
||||
// Reset failing resolvers notification, if querying in global scope.
|
||||
if primarySource == ServerSourceConfigured && !allConfiguredResolversAreFailing() {
|
||||
resetFailingResolversNotification()
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
// Post-process errors
|
||||
if err != nil {
|
||||
// tried all resolvers, possibly twice
|
||||
if i > 1 {
|
||||
err = fmt.Errorf("all %d query-compliant resolvers failed, last error: %w", len(resolvers), err)
|
||||
|
||||
if primarySource == ServerSourceConfigured &&
|
||||
netenv.Online() && CompatSelfCheckIsFailing() {
|
||||
notifyAboutFailingResolvers(err)
|
||||
} else {
|
||||
resetFailingResolversNotification()
|
||||
}
|
||||
}
|
||||
} else if rrCache == nil /* defensive */ {
|
||||
// Validate return values.
|
||||
if err == nil && rrCache == nil {
|
||||
err = ErrNotFound
|
||||
}
|
||||
|
||||
// Check if we want to use an older cache instead.
|
||||
if oldCache != nil {
|
||||
oldCache.IsBackup = true
|
||||
|
||||
switch {
|
||||
case err != nil:
|
||||
// There was an error during resolving, return the old cache entry instead.
|
||||
// Handle error.
|
||||
if err != nil {
|
||||
// Check if we can return an older cache instead of the error.
|
||||
if oldCache != nil {
|
||||
oldCache.IsBackup = true
|
||||
log.Tracer(ctx).Debugf("resolver: serving backup cache of %s because query failed: %s", q.ID(), err)
|
||||
return oldCache, nil
|
||||
case !rrCache.Cacheable():
|
||||
// The new result is NXDomain, return the old cache entry instead.
|
||||
log.Tracer(ctx).Debugf("resolver: serving backup cache of %s because fresh response is NXDomain", q.ID())
|
||||
return oldCache, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Return error, if there is one.
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("all %d query-compliant resolvers failed, last error: %w", len(resolvers), err)
|
||||
}
|
||||
|
||||
// Adjust TTLs.
|
||||
|
@ -556,7 +542,8 @@ func shouldResetCache(q *Query) (reset bool) {
|
|||
|
||||
// testConnectivity test if resolving a query succeeds and returns whether the
|
||||
// query itself succeeded, separate from interpreting the result.
|
||||
func testConnectivity(ctx context.Context, fdqn string) (ips []net.IP, ok bool, err error) {
|
||||
// Provide a resolver to use or automatically select one if nil.
|
||||
func testConnectivity(ctx context.Context, fdqn string, resolver *Resolver) (ips []net.IP, ok bool, err error) {
|
||||
q := &Query{
|
||||
FQDN: fdqn,
|
||||
QType: dns.Type(dns.TypeA),
|
||||
|
@ -566,7 +553,15 @@ func testConnectivity(ctx context.Context, fdqn string) (ips []net.IP, ok bool,
|
|||
return nil, false, ErrInvalid
|
||||
}
|
||||
|
||||
rrCache, err := resolveAndCache(ctx, q, nil)
|
||||
// Resolve with given resolver or auto-select.
|
||||
var rrCache *RRCache
|
||||
if resolver != nil {
|
||||
rrCache, err = resolver.Conn.Query(ctx, q)
|
||||
} else {
|
||||
rrCache, err = resolveAndCache(ctx, q, nil)
|
||||
}
|
||||
|
||||
// Enhance results.
|
||||
switch {
|
||||
case err == nil:
|
||||
switch rrCache.RCode {
|
||||
|
|
|
@ -36,9 +36,6 @@ const (
|
|||
TLSProtocol = "tls"
|
||||
)
|
||||
|
||||
// FailThreshold is amount of errors a resolvers must experience in order to be regarded as failed.
|
||||
var FailThreshold = 20
|
||||
|
||||
// Resolver holds information about an active resolver.
|
||||
type Resolver struct {
|
||||
// Server config url (and ID)
|
||||
|
@ -221,10 +218,10 @@ type BasicResolverConn struct {
|
|||
|
||||
resolver *Resolver
|
||||
|
||||
failing *abool.AtomicBool
|
||||
failingUntil time.Time
|
||||
fails int
|
||||
failLock sync.Mutex
|
||||
failing *abool.AtomicBool
|
||||
failingStarted time.Time
|
||||
fails int
|
||||
failLock sync.Mutex
|
||||
|
||||
networkChangedFlag *utils.Flag
|
||||
}
|
||||
|
@ -234,66 +231,3 @@ func (brc *BasicResolverConn) init() {
|
|||
brc.failing = abool.New()
|
||||
brc.networkChangedFlag = netenv.GetNetworkChangedFlag()
|
||||
}
|
||||
|
||||
// ReportFailure reports that an error occurred with this resolver.
|
||||
func (brc *BasicResolverConn) ReportFailure() {
|
||||
// Don't mark resolver as failed if we are offline.
|
||||
if !netenv.Online() {
|
||||
return
|
||||
}
|
||||
|
||||
brc.failLock.Lock()
|
||||
defer brc.failLock.Unlock()
|
||||
|
||||
brc.fails++
|
||||
if brc.fails > FailThreshold {
|
||||
brc.failing.Set()
|
||||
brc.failingUntil = time.Now().Add(time.Duration(nameserverRetryRate()) * time.Second)
|
||||
brc.fails = 0
|
||||
|
||||
// Refresh the network changed flag in order to only regard changes after
|
||||
// the fail.
|
||||
brc.networkChangedFlag.Refresh()
|
||||
}
|
||||
|
||||
// Report to netenv that a configured server failed.
|
||||
if brc.resolver.Info.Source == ServerSourceConfigured {
|
||||
netenv.ConnectedToDNS.UnSet()
|
||||
}
|
||||
}
|
||||
|
||||
// IsFailing returns if this resolver is currently failing.
|
||||
func (brc *BasicResolverConn) IsFailing() bool {
|
||||
// Check if not failing.
|
||||
if !brc.failing.IsSet() {
|
||||
return false
|
||||
}
|
||||
|
||||
brc.failLock.Lock()
|
||||
defer brc.failLock.Unlock()
|
||||
|
||||
// Reset failure status if the network changed since the last query.
|
||||
if brc.networkChangedFlag.IsSet() {
|
||||
brc.networkChangedFlag.Refresh()
|
||||
brc.fails = 0
|
||||
brc.failing.UnSet()
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if we are still
|
||||
return time.Now().Before(brc.failingUntil)
|
||||
}
|
||||
|
||||
// ResetFailure resets the failure status.
|
||||
func (brc *BasicResolverConn) ResetFailure() {
|
||||
if brc.failing.SetToIf(true, false) {
|
||||
brc.failLock.Lock()
|
||||
defer brc.failLock.Unlock()
|
||||
brc.fails = 0
|
||||
}
|
||||
|
||||
// Report to netenv that a configured server succeeded.
|
||||
if brc.resolver.Info.Source == ServerSourceConfigured {
|
||||
netenv.ConnectedToDNS.Set()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -358,6 +358,12 @@ func getSystemResolvers() (resolvers []*Resolver) {
|
|||
const missingResolversErrorID = "missing-resolvers"
|
||||
|
||||
func loadResolvers() {
|
||||
defer func() {
|
||||
if !allConfiguredResolversAreFailing() {
|
||||
resetFailingResolversNotification()
|
||||
}
|
||||
}()
|
||||
|
||||
// TODO: what happens when a lot of processes want to reload at once? we do not need to run this multiple times in a short time frame.
|
||||
resolversLock.Lock()
|
||||
defer resolversLock.Unlock()
|
||||
|
@ -383,21 +389,21 @@ func loadResolvers() {
|
|||
)
|
||||
|
||||
if len(newResolvers) == 0 {
|
||||
log.Warning("resolver: no (valid) dns server found in config or system, falling back to global defaults")
|
||||
module.Warning(
|
||||
missingResolversErrorID,
|
||||
"Using Factory Default DNS Servers",
|
||||
"The Portmaster could not find any (valid) DNS servers in the settings or system. In order to prevent being disconnected, the factory defaults are being used instead.",
|
||||
)
|
||||
|
||||
// load defaults directly, overriding config system
|
||||
newResolvers = getConfiguredResolvers(defaultNameServers)
|
||||
if len(newResolvers) == 0 {
|
||||
if len(newResolvers) > 0 {
|
||||
log.Warning("resolver: no (valid) dns server found in config or system, falling back to global defaults")
|
||||
module.Warning(
|
||||
missingResolversErrorID,
|
||||
"Using Factory Default DNS Servers",
|
||||
"The Portmaster could not find any (valid) DNS servers in the settings or system. In order to prevent being disconnected, the factory defaults are being used instead. If you just switched your network, this should be resolved shortly.",
|
||||
)
|
||||
} else {
|
||||
log.Critical("resolver: no (valid) dns server found in config, system or global defaults")
|
||||
module.Error(
|
||||
missingResolversErrorID,
|
||||
"No DNS Server Configured",
|
||||
"The Portmaster could not find any (valid) DNS servers in the settings or system. You will experience severe connectivity problems until resolved.",
|
||||
"No DNS Servers Configured",
|
||||
"The Portmaster could not find any (valid) DNS servers in the settings or system. You will experience severe connectivity problems until resolved. If you just switched your network, this should be resolved shortly.",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -585,3 +591,25 @@ func ForceResolverReconnect(ctx context.Context) {
|
|||
}
|
||||
tracer.Info("resolver: all active resolvers were forced to reconnect")
|
||||
}
|
||||
|
||||
// allConfiguredResolversAreFailing reports whether all configured resolvers are failing.
|
||||
// Return false if there are no configured resolvers.
|
||||
func allConfiguredResolversAreFailing() bool {
|
||||
resolversLock.RLock()
|
||||
defer resolversLock.RUnlock()
|
||||
|
||||
// If there are no configured resolvers, return as not failing.
|
||||
if len(currentResolverConfig) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Return as not failing, if we can find any non-failing configured resolver.
|
||||
for _, resolver := range globalResolvers {
|
||||
if !resolver.Conn.IsFailing() && resolver.Info.Source == ServerSourceConfigured {
|
||||
// We found a non-failing configured resolver.
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -235,7 +235,13 @@ func TriggerUpdate(forceIndexCheck, downloadAll bool) error {
|
|||
if downloadAll {
|
||||
forceDownload.Set()
|
||||
}
|
||||
updateTask.StartASAP()
|
||||
|
||||
// If index check if forced, start quicker.
|
||||
if forceIndexCheck {
|
||||
updateTask.StartASAP()
|
||||
} else {
|
||||
updateTask.Queue()
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("updates: triggering update to run as soon as possible")
|
||||
|
|
Loading…
Add table
Reference in a new issue