mirror of
https://github.com/safing/portmaster
synced 2025-04-10 22:19:11 +00:00
* Move portbase into monorepo * Add new simple module mgr * [WIP] Switch to new simple module mgr * Add StateMgr and more worker variants * [WIP] Switch more modules * [WIP] Switch more modules * [WIP] swtich more modules * [WIP] switch all SPN modules * [WIP] switch all service modules * [WIP] Convert all workers to the new module system * [WIP] add new task system to module manager * [WIP] Add second take for scheduling workers * [WIP] Add FIXME for bugs in new scheduler * [WIP] Add minor improvements to scheduler * [WIP] Add new worker scheduler * [WIP] Fix more bug related to new module system * [WIP] Fix start handing of the new module system * [WIP] Improve startup process * [WIP] Fix minor issues * [WIP] Fix missing subsystem in settings * [WIP] Initialize managers in constructor * [WIP] Move module event initialization to constrictors * [WIP] Fix setting for enabling and disabling the SPN module * [WIP] Move API registeration into module construction * [WIP] Update states mgr for all modules * [WIP] Add CmdLine operation support * Add state helper methods to module group and instance * Add notification and module status handling to status package * Fix starting issues * Remove pilot widget and update security lock to new status data * Remove debug logs * Improve http server shutdown * Add workaround for cleanly shutting down firewall+netquery * Improve logging * Add syncing states with notifications for new module system * Improve starting, stopping, shutdown; resolve FIXMEs/TODOs * [WIP] Fix most unit tests * Review new module system and fix minor issues * Push shutdown and restart events again via API * Set sleep mode via interface * Update example/template module * [WIP] Fix spn/cabin unit test * Remove deprecated UI elements * Make log output more similar for the logging transition phase * Switch spn hub and observer cmds to new module system * Fix log sources * Make worker mgr less error prone * Fix tests and minor issues * Fix observation hub * Improve shutdown and restart handling * Split up big connection.go source file * Move varint and dsd packages to structures repo * Improve expansion test * Fix linter warnings * Fix interception module on windows * Fix linter errors --------- Co-authored-by: Vladimir Stoilov <vladimir@safing.io>
108 lines
2.6 KiB
Go
108 lines
2.6 KiB
Go
package resolver
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"net"
|
|
"time"
|
|
|
|
"github.com/miekg/dns"
|
|
|
|
"github.com/safing/portmaster/base/log"
|
|
"github.com/safing/portmaster/service/netenv"
|
|
)
|
|
|
|
var (
|
|
defaultClientTTL = 5 * time.Minute
|
|
defaultRequestTimeout = 3 * time.Second // dns query
|
|
defaultConnectTimeout = 5 * time.Second // tcp/tls
|
|
maxRequestTimeout = 5 * time.Second
|
|
)
|
|
|
|
// PlainResolver is a resolver using plain DNS.
|
|
type PlainResolver struct {
|
|
BasicResolverConn
|
|
}
|
|
|
|
// NewPlainResolver returns a new TPCResolver.
|
|
func NewPlainResolver(resolver *Resolver) *PlainResolver {
|
|
newResolver := &PlainResolver{
|
|
BasicResolverConn: BasicResolverConn{
|
|
resolver: resolver,
|
|
},
|
|
}
|
|
newResolver.BasicResolverConn.init()
|
|
return newResolver
|
|
}
|
|
|
|
// Query executes the given query against the resolver.
|
|
func (pr *PlainResolver) Query(ctx context.Context, q *Query) (*RRCache, error) {
|
|
queryStarted := time.Now()
|
|
|
|
// create query
|
|
dnsQuery := new(dns.Msg)
|
|
dnsQuery.SetQuestion(q.FQDN, uint16(q.QType))
|
|
|
|
// get timeout from context and config
|
|
var timeout time.Duration
|
|
if deadline, ok := ctx.Deadline(); !ok {
|
|
timeout = 0
|
|
} else {
|
|
timeout = time.Until(deadline)
|
|
}
|
|
if timeout > defaultRequestTimeout {
|
|
timeout = defaultRequestTimeout
|
|
}
|
|
|
|
// create client
|
|
dnsClient := &dns.Client{
|
|
UDPSize: 1024,
|
|
Timeout: timeout,
|
|
Dialer: &net.Dialer{
|
|
Timeout: timeout,
|
|
LocalAddr: getLocalAddr("udp"),
|
|
},
|
|
}
|
|
|
|
// query server
|
|
reply, ttl, err := dnsClient.Exchange(dnsQuery, pr.resolver.ServerAddress)
|
|
log.Tracer(ctx).Tracef("resolver: query took %s", ttl)
|
|
// error handling
|
|
if err != nil {
|
|
// Hint network environment at failed connection if err is not a timeout.
|
|
var nErr net.Error
|
|
if errors.As(err, &nErr) && !nErr.Timeout() {
|
|
netenv.ReportFailedConnection()
|
|
}
|
|
|
|
return nil, err
|
|
}
|
|
|
|
// check if blocked
|
|
if pr.resolver.IsBlockedUpstream(reply) {
|
|
return nil, &BlockedUpstreamError{pr.resolver.Info.DescriptiveName()}
|
|
}
|
|
|
|
// Hint network environment at successful connection.
|
|
netenv.ReportSuccessfulConnection()
|
|
|
|
// Report request duration for metrics.
|
|
reportRequestDuration(queryStarted, pr.resolver)
|
|
|
|
newRecord := &RRCache{
|
|
Domain: q.FQDN,
|
|
Question: q.QType,
|
|
RCode: reply.Rcode,
|
|
Answer: reply.Answer,
|
|
Ns: reply.Ns,
|
|
Extra: reply.Extra,
|
|
Resolver: pr.resolver.Info.Copy(),
|
|
}
|
|
|
|
// TODO: check if reply.Answer is valid
|
|
return newRecord, nil
|
|
}
|
|
|
|
// ForceReconnect forces the resolver to re-establish the connection to the server.
|
|
// Does nothing for PlainResolver, as every request uses its own connection.
|
|
func (pr *PlainResolver) ForceReconnect(_ context.Context) {}
|