Fix linting errors

This commit is contained in:
Patrick Pacher 2024-03-27 16:17:58 +01:00
parent 653a365bce
commit 61176af14e
48 changed files with 167 additions and 153 deletions

View file

@ -38,6 +38,9 @@ linters:
- whitespace - whitespace
- wrapcheck - wrapcheck
- wsl - wsl
- perfsprint # TODO(ppacher): we should re-enanble this one to avoid costly fmt.* calls in the hot-path
- testifylint
- gomoddirectives
linters-settings: linters-settings:
revive: revive:

View file

@ -9,8 +9,9 @@ import (
"image" "image"
"image/png" "image/png"
"github.com/safing/portbase/log"
"golang.org/x/image/draw" "golang.org/x/image/draw"
"github.com/safing/portbase/log"
) )
// Colored Icon IDs. // Colored Icon IDs.
@ -35,7 +36,7 @@ var (
//go:embed data/icons/pm_light_blue_512.png //go:embed data/icons/pm_light_blue_512.png
BluePNG []byte BluePNG []byte
// ColoredIcons holds all the icons as .PNGs // ColoredIcons holds all the icons as .PNGs.
ColoredIcons [4][]byte ColoredIcons [4][]byte
) )

View file

@ -2,7 +2,7 @@ package main
import ( import (
"fmt" "fmt"
"io/ioutil" "io"
"net/http" "net/http"
"net/http/cookiejar" "net/http/cookiejar"
"strings" "strings"
@ -16,9 +16,7 @@ const (
apiShutdownEndpoint = "core/shutdown" apiShutdownEndpoint = "core/shutdown"
) )
var ( var httpAPIClient *http.Client
httpApiClient *http.Client
)
func init() { func init() {
// Make cookie jar. // Make cookie jar.
@ -29,22 +27,22 @@ func init() {
} }
// Create client. // Create client.
httpApiClient = &http.Client{ httpAPIClient = &http.Client{
Jar: jar, Jar: jar,
Timeout: 3 * time.Second, Timeout: 3 * time.Second,
} }
} }
func httpApiAction(endpoint string) (response string, err error) { func httpAPIAction(endpoint string) (response string, err error) {
// Make action request. // Make action request.
resp, err := httpApiClient.Post(apiBaseURL+endpoint, "", nil) resp, err := httpAPIClient.Post(apiBaseURL+endpoint, "", nil)
if err != nil { if err != nil {
return "", fmt.Errorf("request failed: %w", err) return "", fmt.Errorf("request failed: %w", err)
} }
// Read the response body. // Read the response body.
defer resp.Body.Close() defer func() { _ = resp.Body.Close() }()
respData, err := ioutil.ReadAll(resp.Body) respData, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
return "", fmt.Errorf("failed to read data: %w", err) return "", fmt.Errorf("failed to read data: %w", err)
} }
@ -60,6 +58,6 @@ func httpApiAction(endpoint string) (response string, err error) {
// TriggerShutdown triggers a shutdown via the APi. // TriggerShutdown triggers a shutdown via the APi.
func TriggerShutdown() error { func TriggerShutdown() error {
_, err := httpApiAction(apiShutdownEndpoint) _, err := httpAPIAction(apiShutdownEndpoint)
return err return err
} }

View file

@ -18,7 +18,7 @@ func ensureAppIcon() (location string, err error) {
if appIconPath == "" { if appIconPath == "" {
appIconPath = filepath.Join(dataDir, "exec", "portmaster.png") appIconPath = filepath.Join(dataDir, "exec", "portmaster.png")
} }
err = os.WriteFile(appIconPath, icons.PNG, 0o0644) err = os.WriteFile(appIconPath, icons.PNG, 0o0644) // nolint:gosec
}) })
return appIconPath, err return appIconPath, err

View file

@ -52,6 +52,8 @@ var (
} }
) )
const query = "query "
func init() { func init() {
flag.StringVar(&dataDir, "data", "", "set data directory") flag.StringVar(&dataDir, "data", "", "set data directory")
flag.BoolVar(&printStackOnExit, "print-stack-on-exit", false, "prints the stack before of shutting down") flag.BoolVar(&printStackOnExit, "print-stack-on-exit", false, "prints the stack before of shutting down")

View file

@ -14,7 +14,7 @@ type Notification struct {
systemID NotificationID systemID NotificationID
} }
// IsSupported returns whether the action is supported on this system. // IsSupportedAction returns whether the action is supported on this system.
func IsSupportedAction(a pbnotify.Action) bool { func IsSupportedAction(a pbnotify.Action) bool {
switch a.Type { switch a.Type {
case pbnotify.ActionTypeNone: case pbnotify.ActionTypeNone:
@ -26,11 +26,10 @@ func IsSupportedAction(a pbnotify.Action) bool {
// SelectAction sends an action back to the portmaster. // SelectAction sends an action back to the portmaster.
func (n *Notification) SelectAction(action string) { func (n *Notification) SelectAction(action string) {
new := &pbnotify.Notification{ upd := &pbnotify.Notification{
EventID: n.EventID, EventID: n.EventID,
SelectedActionID: action, SelectedActionID: action,
} }
// FIXME: check response _ = apiClient.Update(fmt.Sprintf("%s%s", dbNotifBasePath, upd.EventID), upd, nil)
apiClient.Update(fmt.Sprintf("%s%s", dbNotifBasePath, new.EventID), new, nil)
} }

View file

@ -9,7 +9,6 @@ import (
"github.com/safing/portbase/api/client" "github.com/safing/portbase/api/client"
"github.com/safing/portbase/formats/dsd" "github.com/safing/portbase/formats/dsd"
"github.com/safing/portbase/log" "github.com/safing/portbase/log"
pbnotify "github.com/safing/portbase/notifications" pbnotify "github.com/safing/portbase/notifications"
) )

View file

@ -2,9 +2,11 @@ package main
import ( import (
"context" "context"
"errors"
"sync" "sync"
notify "github.com/dhaavi/go-notify" notify "github.com/dhaavi/go-notify"
"github.com/safing/portbase/log" "github.com/safing/portbase/log"
) )
@ -45,7 +47,12 @@ listenForNotifications:
continue listenForNotifications continue listenForNotifications
} }
notification := n.(*Notification) notification, ok := n.(*Notification)
if !ok {
log.Errorf("received invalid notification type %T", n)
continue listenForNotifications
}
log.Tracef("notify: received signal: %+v", sig) log.Tracef("notify: received signal: %+v", sig)
if sig.ActionKey != "" { if sig.ActionKey != "" {
@ -62,7 +69,6 @@ listenForNotifications:
} }
} }
} }
} }
func actionListener() { func actionListener() {
@ -71,7 +77,7 @@ func actionListener() {
go handleActions(mainCtx, actions) go handleActions(mainCtx, actions)
err := notify.SignalNotify(mainCtx, actions) err := notify.SignalNotify(mainCtx, actions)
if err != nil && err != context.Canceled { if err != nil && errors.Is(err, context.Canceled) {
log.Errorf("notify: signal listener failed: %s", err) log.Errorf("notify: signal listener failed: %s", err)
} }
} }

View file

@ -4,10 +4,11 @@ import (
"sync" "sync"
"time" "time"
"github.com/tevino/abool"
"github.com/safing/portbase/api/client" "github.com/safing/portbase/api/client"
"github.com/safing/portbase/formats/dsd" "github.com/safing/portbase/formats/dsd"
"github.com/safing/portbase/log" "github.com/safing/portbase/log"
"github.com/tevino/abool"
) )
const ( const (
@ -48,10 +49,10 @@ func updateSPNStatus(s *SPNStatus) {
} }
func spnStatusClient() { func spnStatusClient() {
moduleQueryOp := apiClient.Qsub("query "+spnModuleKey, handleSPNModuleUpdate) moduleQueryOp := apiClient.Qsub(query+spnModuleKey, handleSPNModuleUpdate)
moduleQueryOp.EnableResuscitation() moduleQueryOp.EnableResuscitation()
statusQueryOp := apiClient.Qsub("query "+spnStatusKey, handleSPNStatusUpdate) statusQueryOp := apiClient.Qsub(query+spnStatusKey, handleSPNStatusUpdate)
statusQueryOp.EnableResuscitation() statusQueryOp.EnableResuscitation()
} }

View file

@ -1,7 +1,6 @@
package main package main
import ( import (
"fmt"
"sync" "sync"
"github.com/safing/portbase/api/client" "github.com/safing/portbase/api/client"
@ -14,7 +13,7 @@ const (
// Module Failure Status Values // Module Failure Status Values
// FailureNone = 0 // unused // FailureNone = 0 // unused
// FailureHint = 1 // unused // FailureHint = 1 // unused.
FailureWarning = 2 FailureWarning = 2
FailureError = 3 FailureError = 3
) )
@ -92,7 +91,7 @@ func clearSubsystems() {
} }
func subsystemsClient() { func subsystemsClient() {
subsystemsOp := apiClient.Qsub(fmt.Sprintf("query %s", subsystemsKeySpace), handleSubsystem) subsystemsOp := apiClient.Qsub("query "+subsystemsKeySpace, handleSubsystem)
subsystemsOp.EnableResuscitation() subsystemsOp.EnableResuscitation()
} }

View file

@ -102,7 +102,6 @@ func onReady() {
} }
func onExit() { func onExit() {
} }
func triggerTrayUpdate() { func triggerTrayUpdate() {
@ -172,7 +171,7 @@ func updateTray() {
// Set SPN status if changed. // Set SPN status if changed.
if spnStatus != nil && activeSPNStatus != spnStatus.Status { if spnStatus != nil && activeSPNStatus != spnStatus.Status {
activeSPNStatus = spnStatus.Status activeSPNStatus = spnStatus.Status
menuItemSPNStatus.SetTitle("SPN: " + strings.Title(activeSPNStatus)) menuItemSPNStatus.SetTitle("SPN: " + strings.Title(activeSPNStatus)) // nolint:staticcheck
} }
// Set SPN switch if changed. // Set SPN switch if changed.

View file

@ -79,7 +79,7 @@ func createInstanceLock(lockFilePath string) error {
// create lock file // create lock file
// TODO: Investigate required permissions. // TODO: Investigate required permissions.
err = os.WriteFile(lockFilePath, []byte(fmt.Sprintf("%d", os.Getpid())), 0o0666) //nolint:gosec err = os.WriteFile(lockFilePath, []byte(strconv.Itoa(os.Getpid())), 0o0666) //nolint:gosec
if err != nil { if err != nil {
return err return err
} }

View file

@ -3,6 +3,7 @@ package core
import ( import (
"context" "context"
"encoding/hex" "encoding/hex"
"errors"
"fmt" "fmt"
"net/http" "net/http"
"net/url" "net/url"
@ -23,6 +24,8 @@ import (
"github.com/safing/portmaster/spn/captain" "github.com/safing/portmaster/spn/captain"
) )
var errInvalidReadPermission = errors.New("invalid read permission")
func registerAPIEndpoints() error { func registerAPIEndpoints() error {
if err := api.RegisterEndpoint(api.Endpoint{ if err := api.RegisterEndpoint(api.Endpoint{
Path: "core/shutdown", Path: "core/shutdown",
@ -207,10 +210,10 @@ func authorizeApp(ar *api.Request) (interface{}, error) {
// convert the requested read and write permissions to their api.Permission // convert the requested read and write permissions to their api.Permission
// value. This ensures only "user" or "admin" permissions can be requested. // value. This ensures only "user" or "admin" permissions can be requested.
if getSavePermission(readPermStr) <= api.NotSupported { if getSavePermission(readPermStr) <= api.NotSupported {
return nil, fmt.Errorf("invalid read permission") return nil, errInvalidReadPermission
} }
if getSavePermission(writePermStr) <= api.NotSupported { if getSavePermission(writePermStr) <= api.NotSupported {
return nil, fmt.Errorf("invalid read permission") return nil, errInvalidReadPermission
} }
proc, err := process.GetProcessByRequestOrigin(ar) proc, err := process.GetProcessByRequestOrigin(ar)
@ -281,7 +284,7 @@ func authorizeApp(ar *api.Request) (interface{}, error) {
select { select {
case key := <-ch: case key := <-ch:
if len(key) == 0 { if len(key) == 0 {
return nil, fmt.Errorf("access denied") return nil, errors.New("access denied")
} }
return map[string]interface{}{ return map[string]interface{}{
@ -289,6 +292,6 @@ func authorizeApp(ar *api.Request) (interface{}, error) {
"validUntil": validUntil, "validUntil": validUntil,
}, nil }, nil
case <-ar.Context().Done(): case <-ar.Context().Done():
return nil, fmt.Errorf("timeout") return nil, errors.New("timeout")
} }
} }

View file

@ -4,6 +4,7 @@ package nfq
import ( import (
"encoding/binary" "encoding/binary"
"errors"
"fmt" "fmt"
ct "github.com/florianl/go-conntrack" ct "github.com/florianl/go-conntrack"
@ -35,7 +36,7 @@ func TeardownNFCT() {
// DeleteAllMarkedConnection deletes all marked entries from the conntrack table. // DeleteAllMarkedConnection deletes all marked entries from the conntrack table.
func DeleteAllMarkedConnection() error { func DeleteAllMarkedConnection() error {
if nfct == nil { if nfct == nil {
return fmt.Errorf("nfq: nfct not initialized") return errors.New("nfq: nfct not initialized")
} }
// Delete all ipv4 marked connections // Delete all ipv4 marked connections
@ -87,7 +88,7 @@ func deleteMarkedConnections(nfct *ct.Nfct, f ct.Family) (deleted int) {
// DeleteMarkedConnection removes a specific connection from the conntrack table. // DeleteMarkedConnection removes a specific connection from the conntrack table.
func DeleteMarkedConnection(conn *network.Connection) error { func DeleteMarkedConnection(conn *network.Connection) error {
if nfct == nil { if nfct == nil {
return fmt.Errorf("nfq: nfct not initialized") return errors.New("nfq: nfct not initialized")
} }
con := ct.Con{ con := ct.Con{

View file

@ -612,18 +612,6 @@ func issueVerdict(conn *network.Connection, pkt packet.Packet, verdict network.V
} }
} }
// verdictRating rates the privacy and security aspect of verdicts from worst to best.
var verdictRating = []network.Verdict{
network.VerdictAccept, // Connection allowed in the open.
network.VerdictRerouteToTunnel, // Connection allowed, but protected.
network.VerdictRerouteToNameserver, // Connection allowed, but resolved via Portmaster.
network.VerdictBlock, // Connection blocked, with feedback.
network.VerdictDrop, // Connection blocked, without feedback.
network.VerdictFailed,
network.VerdictUndeterminable,
network.VerdictUndecided,
}
// func tunnelHandler(pkt packet.Packet) { // func tunnelHandler(pkt packet.Packet) {
// tunnelInfo := GetTunnelInfo(pkt.Info().Dst) // tunnelInfo := GetTunnelInfo(pkt.Info().Dst)
// if tunnelInfo == nil { // if tunnelInfo == nil {

View file

@ -2,9 +2,9 @@ package intel
import ( import (
"context" "context"
"fmt"
"net" "net"
"sort" "sort"
"strconv"
"strings" "strings"
"sync" "sync"
@ -433,7 +433,7 @@ func (e *Entity) getASNLists(ctx context.Context) {
} }
e.loadAsnListOnce.Do(func() { e.loadAsnListOnce.Do(func() {
asnStr := fmt.Sprintf("%d", asn) asnStr := strconv.FormatUint(uint64(asn), 10)
list, err := filterlists.LookupASNString(asnStr) list, err := filterlists.LookupASNString(asnStr)
if err != nil { if err != nil {
log.Tracer(ctx).Errorf("intel: failed to get ASN blocklist for %d: %s", asn, err) log.Tracer(ctx).Errorf("intel: failed to get ASN blocklist for %d: %s", asn, err)

View file

@ -103,18 +103,19 @@ func parseHeader(r io.Reader) (compressed bool, format byte, err error) {
if _, err = r.Read(listHeader[:]); err != nil { if _, err = r.Read(listHeader[:]); err != nil {
// if we have an error here we can safely abort because // if we have an error here we can safely abort because
// the file must be broken // the file must be broken
return return compressed, format, err
} }
if listHeader[0] != dsd.LIST { if listHeader[0] != dsd.LIST {
err = fmt.Errorf("unexpected file type: %d (%c), expected dsd list", listHeader[0], listHeader[0]) err = fmt.Errorf("unexpected file type: %d (%c), expected dsd list", listHeader[0], listHeader[0])
return
return compressed, format, err
} }
var compression [1]byte var compression [1]byte
if _, err = r.Read(compression[:]); err != nil { if _, err = r.Read(compression[:]); err != nil {
// same here, a DSDL file must have at least 2 bytes header // same here, a DSDL file must have at least 2 bytes header
return return compressed, format, err
} }
if compression[0] == dsd.GZIP { if compression[0] == dsd.GZIP {
@ -122,15 +123,16 @@ func parseHeader(r io.Reader) (compressed bool, format byte, err error) {
var formatSlice [1]byte var formatSlice [1]byte
if _, err = r.Read(formatSlice[:]); err != nil { if _, err = r.Read(formatSlice[:]); err != nil {
return return compressed, format, err
} }
format = formatSlice[0] format = formatSlice[0]
return return compressed, format, err
} }
format = compression[0] format = compression[0]
return // nolint:nakedret
return compressed, format, err
} }
// byteReader extends an io.Reader to implement the ByteReader interface. // byteReader extends an io.Reader to implement the ByteReader interface.

View file

@ -1,7 +1,7 @@
package geoip package geoip
import ( import (
"fmt" "errors"
"net" "net"
"github.com/oschwald/maxminddb-golang" "github.com/oschwald/maxminddb-golang"
@ -16,7 +16,7 @@ func getReader(ip net.IP) *maxminddb.Reader {
func GetLocation(ip net.IP) (*Location, error) { func GetLocation(ip net.IP) (*Location, error) {
db := getReader(ip) db := getReader(ip)
if db == nil { if db == nil {
return nil, fmt.Errorf("geoip database not available") return nil, errors.New("geoip database not available")
} }
record := &Location{} record := &Location{}
if err := db.Lookup(ip, record); err != nil { if err := db.Lookup(ip, record); err != nil {

View file

@ -191,10 +191,8 @@ func handleListenError(err error, ip net.IP, port uint16, primaryListener bool)
EventID: eventIDConflictingService + secondaryEventIDSuffix, EventID: eventIDConflictingService + secondaryEventIDSuffix,
Type: notifications.Error, Type: notifications.Error,
Title: "Conflicting DNS Software", Title: "Conflicting DNS Software",
Message: fmt.Sprintf( Message: "Restart Portmaster after you have deactivated or properly configured the conflicting software: " +
"Restart Portmaster after you have deactivated or properly configured the conflicting software: %s",
cfDescription, cfDescription,
),
ShowOnSystem: true, ShowOnSystem: true,
AvailableActions: []*notifications.Action{ AvailableActions: []*notifications.Action{
{ {

View file

@ -21,6 +21,8 @@ import (
var hostname string var hostname string
const internalError = "internal error: "
func handleRequestAsWorker(w dns.ResponseWriter, query *dns.Msg) { func handleRequestAsWorker(w dns.ResponseWriter, query *dns.Msg) {
err := module.RunWorker("handle dns request", func(ctx context.Context) error { err := module.RunWorker("handle dns request", func(ctx context.Context) error {
return handleRequest(ctx, w, query) return handleRequest(ctx, w, query)
@ -130,7 +132,7 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
tracer.Tracef("nameserver: delaying failing lookup until end of fail duration for %s", remainingFailingDuration.Round(time.Millisecond)) tracer.Tracef("nameserver: delaying failing lookup until end of fail duration for %s", remainingFailingDuration.Round(time.Millisecond))
time.Sleep(remainingFailingDuration) time.Sleep(remainingFailingDuration)
return reply(nsutil.ServerFailure( return reply(nsutil.ServerFailure(
"internal error: "+failingErr.Error(), internalError+failingErr.Error(),
"delayed failing query to mitigate request flooding", "delayed failing query to mitigate request flooding",
)) ))
} }
@ -138,7 +140,7 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
tracer.Tracef("nameserver: delaying failing lookup for %s", failingDelay.Round(time.Millisecond)) tracer.Tracef("nameserver: delaying failing lookup for %s", failingDelay.Round(time.Millisecond))
time.Sleep(failingDelay) time.Sleep(failingDelay)
return reply(nsutil.ServerFailure( return reply(nsutil.ServerFailure(
"internal error: "+failingErr.Error(), internalError+failingErr.Error(),
"delayed failing query to mitigate request flooding", "delayed failing query to mitigate request flooding",
fmt.Sprintf("error is cached for another %s", remainingFailingDuration.Round(time.Millisecond)), fmt.Sprintf("error is cached for another %s", remainingFailingDuration.Round(time.Millisecond)),
)) ))
@ -148,7 +150,7 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
local, err := netenv.IsMyIP(remoteAddr.IP) local, err := netenv.IsMyIP(remoteAddr.IP)
if err != nil { if err != nil {
tracer.Warningf("nameserver: failed to check if request for %s is local: %s", q.ID(), err) tracer.Warningf("nameserver: failed to check if request for %s is local: %s", q.ID(), err)
return reply(nsutil.ServerFailure("internal error: failed to check if request is local")) return reply(nsutil.ServerFailure(internalError + " failed to check if request is local"))
} }
// Create connection ID for dns request. // Create connection ID for dns request.
@ -170,7 +172,7 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
conn, err = network.NewConnectionFromExternalDNSRequest(ctx, q.FQDN, nil, connID, remoteAddr.IP) conn, err = network.NewConnectionFromExternalDNSRequest(ctx, q.FQDN, nil, connID, remoteAddr.IP)
if err != nil { if err != nil {
tracer.Warningf("nameserver: failed to get host/profile for request for %s%s: %s", q.FQDN, q.QType, err) tracer.Warningf("nameserver: failed to get host/profile for request for %s%s: %s", q.FQDN, q.QType, err)
return reply(nsutil.ServerFailure("internal error: failed to get profile")) return reply(nsutil.ServerFailure(internalError + "failed to get profile"))
} }
default: default:
@ -210,7 +212,7 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
case network.VerdictUndecided, network.VerdictAccept: case network.VerdictUndecided, network.VerdictAccept:
// Check if we have a response. // Check if we have a response.
if rrCache == nil { if rrCache == nil {
conn.Failed("internal error: no reply", "") conn.Failed(internalError+"no reply", "")
return return
} }
@ -293,7 +295,7 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
tracer.Warningf("nameserver: failed to resolve %s: %s", q.ID(), err) tracer.Warningf("nameserver: failed to resolve %s: %s", q.ID(), err)
conn.Failed(fmt.Sprintf("query failed: %s", err), "") conn.Failed(fmt.Sprintf("query failed: %s", err), "")
addFailingQuery(q, err) addFailingQuery(q, err)
return reply(nsutil.ServerFailure("internal error: " + err.Error())) return reply(nsutil.ServerFailure(internalError + err.Error()))
} }
} }
// Handle special cases. // Handle special cases.
@ -301,7 +303,7 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
case rrCache == nil: case rrCache == nil:
tracer.Warning("nameserver: received successful, but empty reply from resolver") tracer.Warning("nameserver: received successful, but empty reply from resolver")
addFailingQuery(q, errors.New("emptry reply from resolver")) addFailingQuery(q, errors.New("emptry reply from resolver"))
return reply(nsutil.ServerFailure("internal error: empty reply")) return reply(nsutil.ServerFailure(internalError + "empty reply"))
case rrCache.RCode == dns.RcodeNameError: case rrCache.RCode == dns.RcodeNameError:
// Try alternatives domain names for unofficial domain spaces. // Try alternatives domain names for unofficial domain spaces.
altRRCache := checkAlternativeCaches(ctx, q) altRRCache := checkAlternativeCaches(ctx, q)

View file

@ -42,7 +42,7 @@ func (ch *ActiveChartHandler) ServeHTTP(resp http.ResponseWriter, req *http.Requ
orm.WithResult(&result), orm.WithResult(&result),
orm.WithSchema(*ch.Database.Schema), orm.WithSchema(*ch.Database.Schema),
); err != nil { ); err != nil {
http.Error(resp, "Failed to execute query: "+err.Error(), http.StatusInternalServerError) http.Error(resp, failedQuery+err.Error(), http.StatusInternalServerError)
return return
} }
@ -77,7 +77,7 @@ func (ch *ActiveChartHandler) parseRequest(req *http.Request) (*QueryActiveConne
var requestPayload QueryActiveConnectionChartPayload var requestPayload QueryActiveConnectionChartPayload
blob, err := io.ReadAll(body) blob, err := io.ReadAll(body)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read body" + err.Error()) return nil, fmt.Errorf("failed to read body: %w", err)
} }
body = bytes.NewReader(blob) body = bytes.NewReader(blob)

View file

@ -49,7 +49,7 @@ func (ch *BandwidthChartHandler) ServeHTTP(resp http.ResponseWriter, req *http.R
orm.WithResult(&result), orm.WithResult(&result),
orm.WithSchema(*ch.Database.Schema), orm.WithSchema(*ch.Database.Schema),
); err != nil { ); err != nil {
http.Error(resp, "Failed to execute query: "+err.Error(), http.StatusInternalServerError) http.Error(resp, failedQuery+err.Error(), http.StatusInternalServerError)
return return
} }
@ -84,7 +84,7 @@ func (ch *BandwidthChartHandler) parseRequest(req *http.Request) (*BandwidthChar
var requestPayload BandwidthChartRequest var requestPayload BandwidthChartRequest
blob, err := io.ReadAll(body) blob, err := io.ReadAll(body)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read body" + err.Error()) return nil, fmt.Errorf("failed to read body: %w", err)
} }
body = bytes.NewReader(blob) body = bytes.NewReader(blob)

View file

@ -23,18 +23,18 @@ type (
// insert or an update. // insert or an update.
// The ID of Conn is unique and can be trusted to never collide with other // The ID of Conn is unique and can be trusted to never collide with other
// connections of the save device. // connections of the save device.
Save(context.Context, Conn, bool) error Save(ctx context.Context, conn Conn, history bool) error
// MarkAllHistoryConnectionsEnded marks all active connections in the history // MarkAllHistoryConnectionsEnded marks all active connections in the history
// database as ended NOW. // database as ended NOW.
MarkAllHistoryConnectionsEnded(context.Context) error MarkAllHistoryConnectionsEnded(ctx context.Context) error
// RemoveAllHistoryData removes all connections from the history database. // RemoveAllHistoryData removes all connections from the history database.
RemoveAllHistoryData(context.Context) error RemoveAllHistoryData(ctx context.Context) error
// RemoveHistoryForProfile removes all connections from the history database. // RemoveHistoryForProfile removes all connections from the history database.
// for a given profile ID (source/id) // for a given profile ID (source/id)
RemoveHistoryForProfile(context.Context, string) error RemoveHistoryForProfile(ctx context.Context, profile string) error
// UpdateBandwidth updates bandwidth data for the connection and optionally also writes // UpdateBandwidth updates bandwidth data for the connection and optionally also writes
// the bandwidth data to the history database. // the bandwidth data to the history database.

View file

@ -41,13 +41,13 @@ type (
// by *sqlite.Stmt. // by *sqlite.Stmt.
Stmt interface { Stmt interface {
ColumnCount() int ColumnCount() int
ColumnName(int) string ColumnName(col int) string
ColumnType(int) sqlite.ColumnType ColumnType(col int) sqlite.ColumnType
ColumnText(int) string ColumnText(col int) string
ColumnBool(int) bool ColumnBool(col int) bool
ColumnFloat(int) float64 ColumnFloat(col int) float64
ColumnInt(int) int ColumnInt(col int) int
ColumnReader(int) *bytes.Reader ColumnReader(col int) *bytes.Reader
} }
// DecodeFunc is called for each non-basic type during decoding. // DecodeFunc is called for each non-basic type during decoding.
@ -230,7 +230,7 @@ func DatetimeDecoder(loc *time.Location) DecodeFunc {
case sqlite.TypeFloat: case sqlite.TypeFloat:
// stored as Julian day numbers // stored as Julian day numbers
return nil, false, fmt.Errorf("REAL storage type not support for time.Time") return nil, false, errors.New("REAL storage type not support for time.Time")
case sqlite.TypeNull: case sqlite.TypeNull:
return nil, true, nil return nil, true, nil
@ -359,7 +359,7 @@ func decodeBasic() DecodeFunc {
case reflect.Slice: case reflect.Slice:
if outval.Type().Elem().Kind() != reflect.Uint8 { if outval.Type().Elem().Kind() != reflect.Uint8 {
return nil, false, fmt.Errorf("slices other than []byte for BLOB are not supported") return nil, false, errors.New("slices other than []byte for BLOB are not supported")
} }
if colType != sqlite.TypeBlob { if colType != sqlite.TypeBlob {

View file

@ -2,6 +2,7 @@ package orm
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"reflect" "reflect"
"time" "time"
@ -171,7 +172,7 @@ func DatetimeEncoder(loc *time.Location) EncodeFunc {
valInterface := val.Interface() valInterface := val.Interface()
t, ok = valInterface.(time.Time) t, ok = valInterface.(time.Time)
if !ok { if !ok {
return nil, false, fmt.Errorf("cannot convert reflect value to time.Time") return nil, false, errors.New("cannot convert reflect value to time.Time")
} }
case valType.Kind() == reflect.String && colDef.IsTime: case valType.Kind() == reflect.String && colDef.IsTime:

View file

@ -6,6 +6,7 @@ import (
"time" "time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"zombiezen.com/go/sqlite" "zombiezen.com/go/sqlite"
) )
@ -120,7 +121,7 @@ func TestEncodeAsMap(t *testing.T) { //nolint:tparallel
c := cases[idx] c := cases[idx]
t.Run(c.Desc, func(t *testing.T) { t.Run(c.Desc, func(t *testing.T) {
res, err := ToParamMap(ctx, c.Input, "", DefaultEncodeConfig, nil) res, err := ToParamMap(ctx, c.Input, "", DefaultEncodeConfig, nil)
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, c.Expected, res) assert.Equal(t, c.Expected, res)
}) })
} }
@ -253,7 +254,7 @@ func TestEncodeValue(t *testing.T) { //nolint:tparallel
c := cases[idx] c := cases[idx]
t.Run(c.Desc, func(t *testing.T) { t.Run(c.Desc, func(t *testing.T) {
res, err := EncodeValue(ctx, &c.Column, c.Input, DefaultEncodeConfig) res, err := EncodeValue(ctx, &c.Column, c.Input, DefaultEncodeConfig)
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, c.Output, res) assert.Equal(t, c.Output, res)
}) })
} }

View file

@ -274,7 +274,7 @@ func applyStructFieldTag(fieldType reflect.StructField, def *ColumnDef) error {
case sqlite.TypeText: case sqlite.TypeText:
def.Default = defaultValue def.Default = defaultValue
case sqlite.TypeBlob: case sqlite.TypeBlob:
return fmt.Errorf("default values for TypeBlob not yet supported") return errors.New("default values for TypeBlob not yet supported")
default: default:
return fmt.Errorf("failed to apply default value for unknown sqlite column type %s", def.Type) return fmt.Errorf("failed to apply default value for unknown sqlite column type %s", def.Type)
} }

View file

@ -4,6 +4,7 @@ import (
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestSchemaBuilder(t *testing.T) { func TestSchemaBuilder(t *testing.T) {
@ -37,7 +38,7 @@ func TestSchemaBuilder(t *testing.T) {
c := cases[idx] c := cases[idx]
res, err := GenerateTableSchema(c.Name, c.Model) res, err := GenerateTableSchema(c.Name, c.Model)
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, c.ExpectedSQL, res.CreateStatement("main", false)) assert.Equal(t, c.ExpectedSQL, res.CreateStatement("main", false))
} }
} }

View file

@ -19,6 +19,8 @@ import (
var charOnlyRegexp = regexp.MustCompile("[a-zA-Z]+") var charOnlyRegexp = regexp.MustCompile("[a-zA-Z]+")
const failedQuery = "Failed to execute query: "
type ( type (
// QueryHandler implements http.Handler and allows to perform SQL // QueryHandler implements http.Handler and allows to perform SQL
@ -78,7 +80,7 @@ func (qh *QueryHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
orm.WithResult(&result), orm.WithResult(&result),
orm.WithSchema(*qh.Database.Schema), orm.WithSchema(*qh.Database.Schema),
); err != nil { ); err != nil {
http.Error(resp, "Failed to execute query: "+err.Error(), http.StatusInternalServerError) http.Error(resp, failedQuery+err.Error(), http.StatusInternalServerError)
return return
} }
@ -230,7 +232,7 @@ func parseQueryRequestPayload[T any](req *http.Request) (*T, error) { //nolint:d
blob, err := io.ReadAll(body) blob, err := io.ReadAll(body)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to read body" + err.Error()) return nil, fmt.Errorf("failed to read body: %w", err)
} }
body = bytes.NewReader(blob) body = bytes.NewReader(blob)

View file

@ -102,7 +102,7 @@ func TestUnmarshalQuery(t *testing.T) { //nolint:tparallel
assert.Equal(t, c.Error.Error(), err.Error()) assert.Equal(t, c.Error.Error(), err.Error())
} }
} else { } else {
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, c.Expected, q) assert.Equal(t, c.Expected, q)
} }
}) })
@ -241,7 +241,7 @@ func TestQueryBuilder(t *testing.T) { //nolint:tparallel
assert.Equal(t, c.E.Error(), err.Error(), "test case %d", cID) assert.Equal(t, c.E.Error(), err.Error(), "test case %d", cID)
} }
} else { } else {
assert.NoError(t, err, "test case %d", cID) require.NoError(t, err, "test case %d", cID)
assert.Equal(t, c.P, params, "test case %d", cID) assert.Equal(t, c.P, params, "test case %d", cID)
assert.Equal(t, c.R, str, "test case %d", cID) assert.Equal(t, c.R, str, "test case %d", cID)
} }

View file

@ -136,11 +136,11 @@ func AddNetworkDebugData(di *debug.Info, profile, where string) {
// Collect matching connections. // Collect matching connections.
var ( //nolint:prealloc // We don't know the size. var ( //nolint:prealloc // We don't know the size.
debugConns []*Connection debugConns []*Connection
accepted int accepted int
total int total int
) )
for maybeConn := range it.Next { for maybeConn := range it.Next {
// Switch to correct type. // Switch to correct type.
conn, ok := maybeConn.(*Connection) conn, ok := maybeConn.(*Connection)

View file

@ -751,12 +751,14 @@ func (conn *Connection) SaveWhenFinished() {
func (conn *Connection) Save() { func (conn *Connection) Save() {
conn.UpdateMeta() conn.UpdateMeta()
// nolint:exhaustive
switch conn.Verdict { switch conn.Verdict {
case VerdictAccept, VerdictRerouteToNameserver: case VerdictAccept, VerdictRerouteToNameserver:
conn.ConnectionEstablished = true conn.ConnectionEstablished = true
case VerdictRerouteToTunnel: case VerdictRerouteToTunnel:
// this is already handled when the connection tunnel has been // this is already handled when the connection tunnel has been
// established. // established.
default:
} }
// Do not save/update until data is complete. // Do not save/update until data is complete.

View file

@ -4,6 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"net" "net"
"strconv"
"github.com/google/gopacket" "github.com/google/gopacket"
) )
@ -207,9 +208,9 @@ func (pkt *Base) FmtRemoteIP() string {
func (pkt *Base) FmtRemotePort() string { func (pkt *Base) FmtRemotePort() string {
if pkt.info.SrcPort != 0 { if pkt.info.SrcPort != 0 {
if pkt.info.Inbound { if pkt.info.Inbound {
return fmt.Sprintf("%d", pkt.info.SrcPort) return strconv.FormatUint(uint64(pkt.info.SrcPort), 10)
} }
return fmt.Sprintf("%d", pkt.info.DstPort) return strconv.FormatUint(uint64(pkt.info.DstPort), 10)
} }
return "-" return "-"
} }
@ -235,10 +236,10 @@ type Packet interface {
ExpectInfo() bool ExpectInfo() bool
// Info. // Info.
SetCtx(context.Context) SetCtx(ctx context.Context)
Ctx() context.Context Ctx() context.Context
Info() *Info Info() *Info
SetPacketInfo(Info) SetPacketInfo(info Info)
IsInbound() bool IsInbound() bool
IsOutbound() bool IsOutbound() bool
SetInbound() SetInbound()
@ -253,8 +254,8 @@ type Packet interface {
Payload() []byte Payload() []byte
// Matching. // Matching.
MatchesAddress(bool, IPProtocol, *net.IPNet, uint16) bool MatchesAddress(remote bool, protocol IPProtocol, network *net.IPNet, port uint16) bool
MatchesIP(bool, *net.IPNet) bool MatchesIP(endpoint bool, network *net.IPNet) bool
// Formatting. // Formatting.
String() string String() string

View file

@ -44,7 +44,7 @@ type Address struct {
// Info is a generic interface to both ConnectionInfo and BindInfo. // Info is a generic interface to both ConnectionInfo and BindInfo.
type Info interface { type Info interface {
GetPID() int GetPID() int
SetPID(int) SetPID(pid int)
GetUID() int GetUID() int
GetUIDandInode() (int, int) GetUIDandInode() (int, int)
} }

View file

@ -2,7 +2,6 @@ package process
import ( import (
"errors" "errors"
"fmt"
"net/http" "net/http"
"strconv" "strconv"
@ -70,7 +69,7 @@ func handleGetProcessesByProfile(ar *api.Request) (any, error) {
source := ar.URLVars["source"] source := ar.URLVars["source"]
id := ar.URLVars["id"] id := ar.URLVars["id"]
if id == "" || source == "" { if id == "" || source == "" {
return nil, api.ErrorWithStatus(fmt.Errorf("missing profile source/id"), http.StatusBadRequest) return nil, api.ErrorWithStatus(errors.New("missing profile source/id"), http.StatusBadRequest)
} }
result := GetProcessesWithProfile(ar.Context(), profile.ProfileSource(source), id, true) result := GetProcessesWithProfile(ar.Context(), profile.ProfileSource(source), id, true)

View file

@ -72,7 +72,8 @@ func GetProcessesWithProfile(ctx context.Context, profileSource profile.ProfileS
slices.SortFunc[[]*Process, *Process](procs, func(a, b *Process) int { slices.SortFunc[[]*Process, *Process](procs, func(a, b *Process) int {
return strings.Compare(a.processKey, b.processKey) return strings.Compare(a.processKey, b.processKey)
}) })
slices.CompactFunc[[]*Process, *Process](procs, func(a, b *Process) bool {
procs = slices.CompactFunc[[]*Process, *Process](procs, func(a, b *Process) bool {
return a.processKey == b.processKey return a.processKey == b.processKey
}) })

View file

@ -40,6 +40,6 @@ func AddToDebugInfo(di *debug.Info) {
fmt.Sprintf("Status: %s", netenv.GetOnlineStatus()), fmt.Sprintf("Status: %s", netenv.GetOnlineStatus()),
debug.UseCodeSection|debug.AddContentLineBreaks, debug.UseCodeSection|debug.AddContentLineBreaks,
fmt.Sprintf("OnlineStatus: %s", netenv.GetOnlineStatus()), fmt.Sprintf("OnlineStatus: %s", netenv.GetOnlineStatus()),
fmt.Sprintf("CaptivePortal: %s", netenv.GetCaptivePortal().URL), "CaptivePortal: "+netenv.GetCaptivePortal().URL,
) )
} }

View file

@ -25,6 +25,8 @@ const (
ReleaseChannelSupport = "support" ReleaseChannelSupport = "support"
) )
const jsonSuffix = ".json"
// SetIndexes sets the update registry indexes and also configures the registry // SetIndexes sets the update registry indexes and also configures the registry
// to use pre-releases based on the channel. // to use pre-releases based on the channel.
func SetIndexes( func SetIndexes(
@ -51,12 +53,12 @@ func SetIndexes(
// Always add the stable index as a base. // Always add the stable index as a base.
registry.AddIndex(updater.Index{ registry.AddIndex(updater.Index{
Path: ReleaseChannelStable + ".json", Path: ReleaseChannelStable + jsonSuffix,
AutoDownload: autoDownload, AutoDownload: autoDownload,
}) })
// Add beta index if in beta or staging channel. // Add beta index if in beta or staging channel.
indexPath := ReleaseChannelBeta + ".json" indexPath := ReleaseChannelBeta + jsonSuffix
if releaseChannel == ReleaseChannelBeta || if releaseChannel == ReleaseChannelBeta ||
releaseChannel == ReleaseChannelStaging || releaseChannel == ReleaseChannelStaging ||
(releaseChannel == "" && indexExists(registry, indexPath)) { (releaseChannel == "" && indexExists(registry, indexPath)) {
@ -74,7 +76,7 @@ func SetIndexes(
} }
// Add staging index if in staging channel. // Add staging index if in staging channel.
indexPath = ReleaseChannelStaging + ".json" indexPath = ReleaseChannelStaging + jsonSuffix
if releaseChannel == ReleaseChannelStaging || if releaseChannel == ReleaseChannelStaging ||
(releaseChannel == "" && indexExists(registry, indexPath)) { (releaseChannel == "" && indexExists(registry, indexPath)) {
registry.AddIndex(updater.Index{ registry.AddIndex(updater.Index{
@ -91,7 +93,7 @@ func SetIndexes(
} }
// Add support index if in support channel. // Add support index if in support channel.
indexPath = ReleaseChannelSupport + ".json" indexPath = ReleaseChannelSupport + jsonSuffix
if releaseChannel == ReleaseChannelSupport || if releaseChannel == ReleaseChannelSupport ||
(releaseChannel == "" && indexExists(registry, indexPath)) { (releaseChannel == "" && indexExists(registry, indexPath)) {
registry.AddIndex(updater.Index{ registry.AddIndex(updater.Index{

View file

@ -226,7 +226,7 @@ func TriggerUpdate(forceIndexCheck, downloadAll bool) error {
updateASAP = true updateASAP = true
case !forceIndexCheck && !enableSoftwareUpdates() && !enableIntelUpdates(): case !forceIndexCheck && !enableSoftwareUpdates() && !enableIntelUpdates():
return fmt.Errorf("automatic updating is disabled") return errors.New("automatic updating is disabled")
default: default:
if forceIndexCheck { if forceIndexCheck {
@ -254,7 +254,7 @@ func TriggerUpdate(forceIndexCheck, downloadAll bool) error {
func DisableUpdateSchedule() error { func DisableUpdateSchedule() error {
switch module.Status() { switch module.Status() {
case modules.StatusStarting, modules.StatusOnline, modules.StatusStopping: case modules.StatusStarting, modules.StatusOnline, modules.StatusStopping:
return fmt.Errorf("module already online") return errors.New("module already online")
} }
disableTaskSchedule = true disableTaskSchedule = true

View file

@ -1,6 +1,7 @@
package access package access
import ( import (
"errors"
"fmt" "fmt"
"net/http" "net/http"
@ -86,7 +87,7 @@ func registerAPIEndpoints() error {
DataFunc: func(ar *api.Request) (data []byte, err error) { DataFunc: func(ar *api.Request) (data []byte, err error) {
featureID, ok := ar.URLVars["id"] featureID, ok := ar.URLVars["id"]
if !ok { if !ok {
return nil, fmt.Errorf("invalid feature id") return nil, errors.New("invalid feature id")
} }
for _, feature := range features { for _, feature := range features {
@ -95,7 +96,7 @@ func registerAPIEndpoints() error {
} }
} }
return nil, fmt.Errorf("feature id not found") return nil, errors.New("feature id not found")
}, },
}); err != nil { }); err != nil {
return err return err

View file

@ -128,7 +128,7 @@ findCandidates:
if err != nil { if err != nil {
return fmt.Errorf("failed to connect to a new home hub - tried %d hubs: %w", tries+1, err) return fmt.Errorf("failed to connect to a new home hub - tried %d hubs: %w", tries+1, err)
} }
return fmt.Errorf("no home hub candidates available") return errors.New("no home hub candidates available")
} }
func connectToHomeHub(ctx context.Context, dst *hub.Hub) error { func connectToHomeHub(ctx context.Context, dst *hub.Hub) error {
@ -200,7 +200,7 @@ func connectToHomeHub(ctx context.Context, dst *hub.Hub) error {
// Set new home on map. // Set new home on map.
ok := navigator.Main.SetHome(dst.ID, homeTerminal) ok := navigator.Main.SetHome(dst.ID, homeTerminal)
if !ok { if !ok {
return fmt.Errorf("failed to set home hub on map") return errors.New("failed to set home hub on map")
} }
// Assign crane to home hub in order to query it later. // Assign crane to home hub in order to query it later.

View file

@ -82,7 +82,7 @@ func (t *Tunnel) connectWorker(ctx context.Context) (err error) {
// TODO: Clean this up. // TODO: Clean this up.
t.connInfo.Lock() t.connInfo.Lock()
defer t.connInfo.Unlock() defer t.connInfo.Unlock()
t.connInfo.Failed(fmt.Sprintf("SPN failed to establish route: %s", err), "") t.connInfo.Failed("SPN failed to establish route: "+err.Error(), "")
t.connInfo.Save() t.connInfo.Save()
tracer.Warningf("spn/crew: failed to establish route for %s: %s", t.connInfo, err) tracer.Warningf("spn/crew: failed to establish route for %s: %s", t.connInfo, err)
@ -97,7 +97,7 @@ func (t *Tunnel) connectWorker(ctx context.Context) (err error) {
t.connInfo.Lock() t.connInfo.Lock()
defer t.connInfo.Unlock() defer t.connInfo.Unlock()
t.connInfo.Failed(fmt.Sprintf("SPN failed to initialize data tunnel (connect op): %s", tErr.Error()), "") t.connInfo.Failed("SPN failed to initialize data tunnel (connect op): "+tErr.Error(), "")
t.connInfo.Save() t.connInfo.Save()
// TODO: try with another route? // TODO: try with another route?

View file

@ -5,7 +5,7 @@ import (
"net" "net"
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/require"
) )
func TestCheckStringFormat(t *testing.T) { func TestCheckStringFormat(t *testing.T) {
@ -48,9 +48,9 @@ func TestCheckStringFormat(t *testing.T) {
for testCharacter, isPermitted := range testSet { for testCharacter, isPermitted := range testSet {
if isPermitted { if isPermitted {
assert.NoError(t, checkStringFormat(fmt.Sprintf("test character %q", testCharacter), testCharacter, 3)) require.NoError(t, checkStringFormat(fmt.Sprintf("test character %q", testCharacter), testCharacter, 3))
} else { } else {
assert.Error(t, checkStringFormat(fmt.Sprintf("test character %q", testCharacter), testCharacter, 3)) require.Error(t, checkStringFormat(fmt.Sprintf("test character %q", testCharacter), testCharacter, 3))
} }
} }
} }
@ -59,22 +59,22 @@ func TestCheckIPFormat(t *testing.T) {
t.Parallel() t.Parallel()
// IPv4 // IPv4
assert.NoError(t, checkIPFormat("test IP 1.1.1.1", net.IPv4(1, 1, 1, 1))) require.NoError(t, checkIPFormat("test IP 1.1.1.1", net.IPv4(1, 1, 1, 1)))
assert.NoError(t, checkIPFormat("test IP 192.168.1.1", net.IPv4(192, 168, 1, 1))) require.NoError(t, checkIPFormat("test IP 192.168.1.1", net.IPv4(192, 168, 1, 1)))
assert.Error(t, checkIPFormat("test IP 255.0.0.1", net.IPv4(255, 0, 0, 1))) require.Error(t, checkIPFormat("test IP 255.0.0.1", net.IPv4(255, 0, 0, 1)))
// IPv6 // IPv6
assert.NoError(t, checkIPFormat("test IP ::1", net.ParseIP("::1"))) require.NoError(t, checkIPFormat("test IP ::1", net.ParseIP("::1")))
assert.NoError(t, checkIPFormat("test IP 2606:4700:4700::1111", net.ParseIP("2606:4700:4700::1111"))) require.NoError(t, checkIPFormat("test IP 2606:4700:4700::1111", net.ParseIP("2606:4700:4700::1111")))
// Invalid // Invalid
assert.Error(t, checkIPFormat("test IP with length 3", net.IP([]byte{0, 0, 0}))) require.Error(t, checkIPFormat("test IP with length 3", net.IP([]byte{0, 0, 0})))
assert.Error(t, checkIPFormat("test IP with length 5", net.IP([]byte{0, 0, 0, 0, 0}))) require.Error(t, checkIPFormat("test IP with length 5", net.IP([]byte{0, 0, 0, 0, 0})))
assert.Error(t, checkIPFormat( require.Error(t, checkIPFormat(
"test IP with length 15", "test IP with length 15",
net.IP([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}), net.IP([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}),
)) ))
assert.Error(t, checkIPFormat( require.Error(t, checkIPFormat(
"test IP with length 17", "test IP with length 17",
net.IP([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}), net.IP([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}),
)) ))

View file

@ -4,6 +4,7 @@ import (
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func parseT(t *testing.T, definition string) *Transport { func parseT(t *testing.T, definition string) *Transport {
@ -140,8 +141,8 @@ func TestTransportParsing(t *testing.T) {
// test invalid // test invalid
assert.NotEqual(t, parseTError("spn"), nil, "should fail") require.Error(t, parseTError("spn"), "should fail")
assert.NotEqual(t, parseTError("spn:"), nil, "should fail") require.Error(t, parseTError("spn:"), "should fail")
assert.NotEqual(t, parseTError("spn:0"), nil, "should fail") require.Error(t, parseTError("spn:0"), "should fail")
assert.NotEqual(t, parseTError("spn:65536"), nil, "should fail") require.Error(t, parseTError("spn:65536"), "should fail")
} }

View file

@ -210,9 +210,9 @@ func (m *Map) optimizeForSatelliteConnectivity(result *OptimizationResult) {
// Add to suggested pins. // Add to suggested pins.
if len(region.regardedPins) <= region.satelliteMinLanes { if len(region.regardedPins) <= region.satelliteMinLanes {
result.addSuggested(fmt.Sprintf("best to region %s", region.ID), region.regardedPins...) result.addSuggested("best to region "+region.ID, region.regardedPins...)
} else { } else {
result.addSuggested(fmt.Sprintf("best to region %s", region.ID), region.regardedPins[:region.satelliteMinLanes]...) result.addSuggested("best to region "+region.ID, region.regardedPins[:region.satelliteMinLanes]...)
} }
} }
} }

View file

@ -622,7 +622,7 @@ func (m *Map) updateQuickSettingExcludeCountryList(ctx context.Context, configKe
for _, country := range countryList { for _, country := range countryList {
quickSettings = append(quickSettings, config.QuickSetting{ quickSettings = append(quickSettings, config.QuickSetting{
Name: fmt.Sprintf("Exclude %s (%s)", country.Name, country.Code), Name: fmt.Sprintf("Exclude %s (%s)", country.Name, country.Code),
Value: []string{fmt.Sprintf("- %s", country.Code)}, Value: []string{"- " + country.Code},
Action: config.QuickMergeTop, Action: config.QuickMergeTop,
}) })
} }
@ -700,7 +700,7 @@ func (m *Map) updateSelectRuleCountryList(ctx context.Context, configKey string,
selections = append(selections, selectCountry{ selections = append(selections, selectCountry{
QuickSetting: config.QuickSetting{ QuickSetting: config.QuickSetting{
Name: fmt.Sprintf("%s (%s)", country.Name, country.Code), Name: fmt.Sprintf("%s (%s)", country.Name, country.Code),
Value: []string{fmt.Sprintf("+ %s", country.Code), "- *"}, Value: []string{"+ " + country.Code, "- *"},
Action: config.QuickReplace, Action: config.QuickReplace,
}, },
FlagID: country.Code, FlagID: country.Code,
@ -712,7 +712,7 @@ func (m *Map) updateSelectRuleCountryList(ctx context.Context, configKey string,
selections = append(selections, selectCountry{ selections = append(selections, selectCountry{
QuickSetting: config.QuickSetting{ QuickSetting: config.QuickSetting{
Name: fmt.Sprintf("%s (C:%s)", continent.Name, continent.Code), Name: fmt.Sprintf("%s (C:%s)", continent.Name, continent.Code),
Value: []string{fmt.Sprintf("+ C:%s", continent.Code), "- *"}, Value: []string{"+ C:" + continent.Code, "- *"},
Action: config.QuickReplace, Action: config.QuickReplace,
}, },
}) })

View file

@ -4,6 +4,7 @@ import (
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestSharedHTTP(t *testing.T) { //nolint:paralleltest // Test checks global state. func TestSharedHTTP(t *testing.T) { //nolint:paralleltest // Test checks global state.
@ -11,23 +12,23 @@ func TestSharedHTTP(t *testing.T) { //nolint:paralleltest // Test checks global
// Register multiple handlers. // Register multiple handlers.
err := addHTTPHandler(testPort, "", ServeInfoPage) err := addHTTPHandler(testPort, "", ServeInfoPage)
assert.NoError(t, err, "should be able to share http listener") require.NoError(t, err, "should be able to share http listener")
err = addHTTPHandler(testPort, "/test", ServeInfoPage) err = addHTTPHandler(testPort, "/test", ServeInfoPage)
assert.NoError(t, err, "should be able to share http listener") require.NoError(t, err, "should be able to share http listener")
err = addHTTPHandler(testPort, "/test2", ServeInfoPage) err = addHTTPHandler(testPort, "/test2", ServeInfoPage)
assert.NoError(t, err, "should be able to share http listener") require.NoError(t, err, "should be able to share http listener")
err = addHTTPHandler(testPort, "/", ServeInfoPage) err = addHTTPHandler(testPort, "/", ServeInfoPage)
assert.Error(t, err, "should fail to register path twice") require.Error(t, err, "should fail to register path twice")
// Unregister // Unregister
assert.NoError(t, removeHTTPHandler(testPort, "")) require.NoError(t, removeHTTPHandler(testPort, ""))
assert.NoError(t, removeHTTPHandler(testPort, "/test")) require.NoError(t, removeHTTPHandler(testPort, "/test"))
assert.NoError(t, removeHTTPHandler(testPort, "/not-registered")) // removing unregistered handler does not error require.NoError(t, removeHTTPHandler(testPort, "/not-registered")) // removing unregistered handler does not error
assert.NoError(t, removeHTTPHandler(testPort, "/test2")) require.NoError(t, removeHTTPHandler(testPort, "/test2"))
assert.NoError(t, removeHTTPHandler(testPort, "/not-registered")) // removing unregistered handler does not error require.NoError(t, removeHTTPHandler(testPort, "/not-registered")) // removing unregistered handler does not error
// Check if all handlers are gone again. // Check if all handlers are gone again.
sharedHTTPServersLock.Lock() sharedHTTPServersLock.Lock()
defer sharedHTTPServersLock.Unlock() defer sharedHTTPServersLock.Unlock()
assert.Equal(t, 0, len(sharedHTTPServers), "shared http handlers should be back to zero") assert.Empty(t, sharedHTTPServers, "shared http handlers should be back to zero")
} }

View file

@ -47,7 +47,7 @@ func StartSluice(network, address string) {
// Start service worker. // Start service worker.
module.StartServiceWorker( module.StartServiceWorker(
fmt.Sprintf("%s sluice listener", s.network), s.network+" sluice listener",
10*time.Second, 10*time.Second,
s.listenHandler, s.listenHandler,
) )