Merge pull request from safing/fix/linter

Fix linting errors
This commit is contained in:
Daniel Hååvi 2024-03-27 16:21:40 +01:00 committed by GitHub
commit a268341c52
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
48 changed files with 167 additions and 153 deletions

View file

@ -38,6 +38,9 @@ linters:
- whitespace
- wrapcheck
- wsl
- perfsprint # TODO(ppacher): we should re-enanble this one to avoid costly fmt.* calls in the hot-path
- testifylint
- gomoddirectives
linters-settings:
revive:

View file

@ -9,8 +9,9 @@ import (
"image"
"image/png"
"github.com/safing/portbase/log"
"golang.org/x/image/draw"
"github.com/safing/portbase/log"
)
// Colored Icon IDs.
@ -35,7 +36,7 @@ var (
//go:embed data/icons/pm_light_blue_512.png
BluePNG []byte
// ColoredIcons holds all the icons as .PNGs
// ColoredIcons holds all the icons as .PNGs.
ColoredIcons [4][]byte
)

View file

@ -2,7 +2,7 @@ package main
import (
"fmt"
"io/ioutil"
"io"
"net/http"
"net/http/cookiejar"
"strings"
@ -16,9 +16,7 @@ const (
apiShutdownEndpoint = "core/shutdown"
)
var (
httpApiClient *http.Client
)
var httpAPIClient *http.Client
func init() {
// Make cookie jar.
@ -29,22 +27,22 @@ func init() {
}
// Create client.
httpApiClient = &http.Client{
httpAPIClient = &http.Client{
Jar: jar,
Timeout: 3 * time.Second,
}
}
func httpApiAction(endpoint string) (response string, err error) {
func httpAPIAction(endpoint string) (response string, err error) {
// Make action request.
resp, err := httpApiClient.Post(apiBaseURL+endpoint, "", nil)
resp, err := httpAPIClient.Post(apiBaseURL+endpoint, "", nil)
if err != nil {
return "", fmt.Errorf("request failed: %w", err)
}
// Read the response body.
defer resp.Body.Close()
respData, err := ioutil.ReadAll(resp.Body)
defer func() { _ = resp.Body.Close() }()
respData, err := io.ReadAll(resp.Body)
if err != nil {
return "", fmt.Errorf("failed to read data: %w", err)
}
@ -60,6 +58,6 @@ func httpApiAction(endpoint string) (response string, err error) {
// TriggerShutdown triggers a shutdown via the APi.
func TriggerShutdown() error {
_, err := httpApiAction(apiShutdownEndpoint)
_, err := httpAPIAction(apiShutdownEndpoint)
return err
}

View file

@ -18,7 +18,7 @@ func ensureAppIcon() (location string, err error) {
if appIconPath == "" {
appIconPath = filepath.Join(dataDir, "exec", "portmaster.png")
}
err = os.WriteFile(appIconPath, icons.PNG, 0o0644)
err = os.WriteFile(appIconPath, icons.PNG, 0o0644) // nolint:gosec
})
return appIconPath, err

View file

@ -52,6 +52,8 @@ var (
}
)
const query = "query "
func init() {
flag.StringVar(&dataDir, "data", "", "set data directory")
flag.BoolVar(&printStackOnExit, "print-stack-on-exit", false, "prints the stack before of shutting down")

View file

@ -14,7 +14,7 @@ type Notification struct {
systemID NotificationID
}
// IsSupported returns whether the action is supported on this system.
// IsSupportedAction returns whether the action is supported on this system.
func IsSupportedAction(a pbnotify.Action) bool {
switch a.Type {
case pbnotify.ActionTypeNone:
@ -26,11 +26,10 @@ func IsSupportedAction(a pbnotify.Action) bool {
// SelectAction sends an action back to the portmaster.
func (n *Notification) SelectAction(action string) {
new := &pbnotify.Notification{
upd := &pbnotify.Notification{
EventID: n.EventID,
SelectedActionID: action,
}
// FIXME: check response
apiClient.Update(fmt.Sprintf("%s%s", dbNotifBasePath, new.EventID), new, nil)
_ = apiClient.Update(fmt.Sprintf("%s%s", dbNotifBasePath, upd.EventID), upd, nil)
}

View file

@ -9,7 +9,6 @@ import (
"github.com/safing/portbase/api/client"
"github.com/safing/portbase/formats/dsd"
"github.com/safing/portbase/log"
pbnotify "github.com/safing/portbase/notifications"
)

View file

@ -2,9 +2,11 @@ package main
import (
"context"
"errors"
"sync"
notify "github.com/dhaavi/go-notify"
"github.com/safing/portbase/log"
)
@ -45,7 +47,12 @@ listenForNotifications:
continue listenForNotifications
}
notification := n.(*Notification)
notification, ok := n.(*Notification)
if !ok {
log.Errorf("received invalid notification type %T", n)
continue listenForNotifications
}
log.Tracef("notify: received signal: %+v", sig)
if sig.ActionKey != "" {
@ -62,7 +69,6 @@ listenForNotifications:
}
}
}
}
func actionListener() {
@ -71,7 +77,7 @@ func actionListener() {
go handleActions(mainCtx, actions)
err := notify.SignalNotify(mainCtx, actions)
if err != nil && err != context.Canceled {
if err != nil && errors.Is(err, context.Canceled) {
log.Errorf("notify: signal listener failed: %s", err)
}
}

View file

@ -4,10 +4,11 @@ import (
"sync"
"time"
"github.com/tevino/abool"
"github.com/safing/portbase/api/client"
"github.com/safing/portbase/formats/dsd"
"github.com/safing/portbase/log"
"github.com/tevino/abool"
)
const (
@ -48,10 +49,10 @@ func updateSPNStatus(s *SPNStatus) {
}
func spnStatusClient() {
moduleQueryOp := apiClient.Qsub("query "+spnModuleKey, handleSPNModuleUpdate)
moduleQueryOp := apiClient.Qsub(query+spnModuleKey, handleSPNModuleUpdate)
moduleQueryOp.EnableResuscitation()
statusQueryOp := apiClient.Qsub("query "+spnStatusKey, handleSPNStatusUpdate)
statusQueryOp := apiClient.Qsub(query+spnStatusKey, handleSPNStatusUpdate)
statusQueryOp.EnableResuscitation()
}

View file

@ -1,7 +1,6 @@
package main
import (
"fmt"
"sync"
"github.com/safing/portbase/api/client"
@ -14,7 +13,7 @@ const (
// Module Failure Status Values
// FailureNone = 0 // unused
// FailureHint = 1 // unused
// FailureHint = 1 // unused.
FailureWarning = 2
FailureError = 3
)
@ -92,7 +91,7 @@ func clearSubsystems() {
}
func subsystemsClient() {
subsystemsOp := apiClient.Qsub(fmt.Sprintf("query %s", subsystemsKeySpace), handleSubsystem)
subsystemsOp := apiClient.Qsub("query "+subsystemsKeySpace, handleSubsystem)
subsystemsOp.EnableResuscitation()
}

View file

@ -102,7 +102,6 @@ func onReady() {
}
func onExit() {
}
func triggerTrayUpdate() {
@ -172,7 +171,7 @@ func updateTray() {
// Set SPN status if changed.
if spnStatus != nil && activeSPNStatus != spnStatus.Status {
activeSPNStatus = spnStatus.Status
menuItemSPNStatus.SetTitle("SPN: " + strings.Title(activeSPNStatus))
menuItemSPNStatus.SetTitle("SPN: " + strings.Title(activeSPNStatus)) // nolint:staticcheck
}
// Set SPN switch if changed.

View file

@ -79,7 +79,7 @@ func createInstanceLock(lockFilePath string) error {
// create lock file
// TODO: Investigate required permissions.
err = os.WriteFile(lockFilePath, []byte(fmt.Sprintf("%d", os.Getpid())), 0o0666) //nolint:gosec
err = os.WriteFile(lockFilePath, []byte(strconv.Itoa(os.Getpid())), 0o0666) //nolint:gosec
if err != nil {
return err
}

View file

@ -3,6 +3,7 @@ package core
import (
"context"
"encoding/hex"
"errors"
"fmt"
"net/http"
"net/url"
@ -23,6 +24,8 @@ import (
"github.com/safing/portmaster/spn/captain"
)
var errInvalidReadPermission = errors.New("invalid read permission")
func registerAPIEndpoints() error {
if err := api.RegisterEndpoint(api.Endpoint{
Path: "core/shutdown",
@ -207,10 +210,10 @@ func authorizeApp(ar *api.Request) (interface{}, error) {
// convert the requested read and write permissions to their api.Permission
// value. This ensures only "user" or "admin" permissions can be requested.
if getSavePermission(readPermStr) <= api.NotSupported {
return nil, fmt.Errorf("invalid read permission")
return nil, errInvalidReadPermission
}
if getSavePermission(writePermStr) <= api.NotSupported {
return nil, fmt.Errorf("invalid read permission")
return nil, errInvalidReadPermission
}
proc, err := process.GetProcessByRequestOrigin(ar)
@ -281,7 +284,7 @@ func authorizeApp(ar *api.Request) (interface{}, error) {
select {
case key := <-ch:
if len(key) == 0 {
return nil, fmt.Errorf("access denied")
return nil, errors.New("access denied")
}
return map[string]interface{}{
@ -289,6 +292,6 @@ func authorizeApp(ar *api.Request) (interface{}, error) {
"validUntil": validUntil,
}, nil
case <-ar.Context().Done():
return nil, fmt.Errorf("timeout")
return nil, errors.New("timeout")
}
}

View file

@ -4,6 +4,7 @@ package nfq
import (
"encoding/binary"
"errors"
"fmt"
ct "github.com/florianl/go-conntrack"
@ -35,7 +36,7 @@ func TeardownNFCT() {
// DeleteAllMarkedConnection deletes all marked entries from the conntrack table.
func DeleteAllMarkedConnection() error {
if nfct == nil {
return fmt.Errorf("nfq: nfct not initialized")
return errors.New("nfq: nfct not initialized")
}
// Delete all ipv4 marked connections
@ -87,7 +88,7 @@ func deleteMarkedConnections(nfct *ct.Nfct, f ct.Family) (deleted int) {
// DeleteMarkedConnection removes a specific connection from the conntrack table.
func DeleteMarkedConnection(conn *network.Connection) error {
if nfct == nil {
return fmt.Errorf("nfq: nfct not initialized")
return errors.New("nfq: nfct not initialized")
}
con := ct.Con{

View file

@ -612,18 +612,6 @@ func issueVerdict(conn *network.Connection, pkt packet.Packet, verdict network.V
}
}
// verdictRating rates the privacy and security aspect of verdicts from worst to best.
var verdictRating = []network.Verdict{
network.VerdictAccept, // Connection allowed in the open.
network.VerdictRerouteToTunnel, // Connection allowed, but protected.
network.VerdictRerouteToNameserver, // Connection allowed, but resolved via Portmaster.
network.VerdictBlock, // Connection blocked, with feedback.
network.VerdictDrop, // Connection blocked, without feedback.
network.VerdictFailed,
network.VerdictUndeterminable,
network.VerdictUndecided,
}
// func tunnelHandler(pkt packet.Packet) {
// tunnelInfo := GetTunnelInfo(pkt.Info().Dst)
// if tunnelInfo == nil {

View file

@ -2,9 +2,9 @@ package intel
import (
"context"
"fmt"
"net"
"sort"
"strconv"
"strings"
"sync"
@ -433,7 +433,7 @@ func (e *Entity) getASNLists(ctx context.Context) {
}
e.loadAsnListOnce.Do(func() {
asnStr := fmt.Sprintf("%d", asn)
asnStr := strconv.FormatUint(uint64(asn), 10)
list, err := filterlists.LookupASNString(asnStr)
if err != nil {
log.Tracer(ctx).Errorf("intel: failed to get ASN blocklist for %d: %s", asn, err)

View file

@ -103,18 +103,19 @@ func parseHeader(r io.Reader) (compressed bool, format byte, err error) {
if _, err = r.Read(listHeader[:]); err != nil {
// if we have an error here we can safely abort because
// the file must be broken
return
return compressed, format, err
}
if listHeader[0] != dsd.LIST {
err = fmt.Errorf("unexpected file type: %d (%c), expected dsd list", listHeader[0], listHeader[0])
return
return compressed, format, err
}
var compression [1]byte
if _, err = r.Read(compression[:]); err != nil {
// same here, a DSDL file must have at least 2 bytes header
return
return compressed, format, err
}
if compression[0] == dsd.GZIP {
@ -122,15 +123,16 @@ func parseHeader(r io.Reader) (compressed bool, format byte, err error) {
var formatSlice [1]byte
if _, err = r.Read(formatSlice[:]); err != nil {
return
return compressed, format, err
}
format = formatSlice[0]
return
return compressed, format, err
}
format = compression[0]
return // nolint:nakedret
return compressed, format, err
}
// byteReader extends an io.Reader to implement the ByteReader interface.

View file

@ -1,7 +1,7 @@
package geoip
import (
"fmt"
"errors"
"net"
"github.com/oschwald/maxminddb-golang"
@ -16,7 +16,7 @@ func getReader(ip net.IP) *maxminddb.Reader {
func GetLocation(ip net.IP) (*Location, error) {
db := getReader(ip)
if db == nil {
return nil, fmt.Errorf("geoip database not available")
return nil, errors.New("geoip database not available")
}
record := &Location{}
if err := db.Lookup(ip, record); err != nil {

View file

@ -191,10 +191,8 @@ func handleListenError(err error, ip net.IP, port uint16, primaryListener bool)
EventID: eventIDConflictingService + secondaryEventIDSuffix,
Type: notifications.Error,
Title: "Conflicting DNS Software",
Message: fmt.Sprintf(
"Restart Portmaster after you have deactivated or properly configured the conflicting software: %s",
Message: "Restart Portmaster after you have deactivated or properly configured the conflicting software: " +
cfDescription,
),
ShowOnSystem: true,
AvailableActions: []*notifications.Action{
{

View file

@ -21,6 +21,8 @@ import (
var hostname string
const internalError = "internal error: "
func handleRequestAsWorker(w dns.ResponseWriter, query *dns.Msg) {
err := module.RunWorker("handle dns request", func(ctx context.Context) error {
return handleRequest(ctx, w, query)
@ -130,7 +132,7 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
tracer.Tracef("nameserver: delaying failing lookup until end of fail duration for %s", remainingFailingDuration.Round(time.Millisecond))
time.Sleep(remainingFailingDuration)
return reply(nsutil.ServerFailure(
"internal error: "+failingErr.Error(),
internalError+failingErr.Error(),
"delayed failing query to mitigate request flooding",
))
}
@ -138,7 +140,7 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
tracer.Tracef("nameserver: delaying failing lookup for %s", failingDelay.Round(time.Millisecond))
time.Sleep(failingDelay)
return reply(nsutil.ServerFailure(
"internal error: "+failingErr.Error(),
internalError+failingErr.Error(),
"delayed failing query to mitigate request flooding",
fmt.Sprintf("error is cached for another %s", remainingFailingDuration.Round(time.Millisecond)),
))
@ -148,7 +150,7 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
local, err := netenv.IsMyIP(remoteAddr.IP)
if err != nil {
tracer.Warningf("nameserver: failed to check if request for %s is local: %s", q.ID(), err)
return reply(nsutil.ServerFailure("internal error: failed to check if request is local"))
return reply(nsutil.ServerFailure(internalError + " failed to check if request is local"))
}
// Create connection ID for dns request.
@ -170,7 +172,7 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
conn, err = network.NewConnectionFromExternalDNSRequest(ctx, q.FQDN, nil, connID, remoteAddr.IP)
if err != nil {
tracer.Warningf("nameserver: failed to get host/profile for request for %s%s: %s", q.FQDN, q.QType, err)
return reply(nsutil.ServerFailure("internal error: failed to get profile"))
return reply(nsutil.ServerFailure(internalError + "failed to get profile"))
}
default:
@ -210,7 +212,7 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
case network.VerdictUndecided, network.VerdictAccept:
// Check if we have a response.
if rrCache == nil {
conn.Failed("internal error: no reply", "")
conn.Failed(internalError+"no reply", "")
return
}
@ -293,7 +295,7 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
tracer.Warningf("nameserver: failed to resolve %s: %s", q.ID(), err)
conn.Failed(fmt.Sprintf("query failed: %s", err), "")
addFailingQuery(q, err)
return reply(nsutil.ServerFailure("internal error: " + err.Error()))
return reply(nsutil.ServerFailure(internalError + err.Error()))
}
}
// Handle special cases.
@ -301,7 +303,7 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
case rrCache == nil:
tracer.Warning("nameserver: received successful, but empty reply from resolver")
addFailingQuery(q, errors.New("emptry reply from resolver"))
return reply(nsutil.ServerFailure("internal error: empty reply"))
return reply(nsutil.ServerFailure(internalError + "empty reply"))
case rrCache.RCode == dns.RcodeNameError:
// Try alternatives domain names for unofficial domain spaces.
altRRCache := checkAlternativeCaches(ctx, q)

View file

@ -42,7 +42,7 @@ func (ch *ActiveChartHandler) ServeHTTP(resp http.ResponseWriter, req *http.Requ
orm.WithResult(&result),
orm.WithSchema(*ch.Database.Schema),
); err != nil {
http.Error(resp, "Failed to execute query: "+err.Error(), http.StatusInternalServerError)
http.Error(resp, failedQuery+err.Error(), http.StatusInternalServerError)
return
}
@ -77,7 +77,7 @@ func (ch *ActiveChartHandler) parseRequest(req *http.Request) (*QueryActiveConne
var requestPayload QueryActiveConnectionChartPayload
blob, err := io.ReadAll(body)
if err != nil {
return nil, fmt.Errorf("failed to read body" + err.Error())
return nil, fmt.Errorf("failed to read body: %w", err)
}
body = bytes.NewReader(blob)

View file

@ -49,7 +49,7 @@ func (ch *BandwidthChartHandler) ServeHTTP(resp http.ResponseWriter, req *http.R
orm.WithResult(&result),
orm.WithSchema(*ch.Database.Schema),
); err != nil {
http.Error(resp, "Failed to execute query: "+err.Error(), http.StatusInternalServerError)
http.Error(resp, failedQuery+err.Error(), http.StatusInternalServerError)
return
}
@ -84,7 +84,7 @@ func (ch *BandwidthChartHandler) parseRequest(req *http.Request) (*BandwidthChar
var requestPayload BandwidthChartRequest
blob, err := io.ReadAll(body)
if err != nil {
return nil, fmt.Errorf("failed to read body" + err.Error())
return nil, fmt.Errorf("failed to read body: %w", err)
}
body = bytes.NewReader(blob)

View file

@ -23,18 +23,18 @@ type (
// insert or an update.
// The ID of Conn is unique and can be trusted to never collide with other
// connections of the save device.
Save(context.Context, Conn, bool) error
Save(ctx context.Context, conn Conn, history bool) error
// MarkAllHistoryConnectionsEnded marks all active connections in the history
// database as ended NOW.
MarkAllHistoryConnectionsEnded(context.Context) error
MarkAllHistoryConnectionsEnded(ctx context.Context) error
// RemoveAllHistoryData removes all connections from the history database.
RemoveAllHistoryData(context.Context) error
RemoveAllHistoryData(ctx context.Context) error
// RemoveHistoryForProfile removes all connections from the history database.
// for a given profile ID (source/id)
RemoveHistoryForProfile(context.Context, string) error
RemoveHistoryForProfile(ctx context.Context, profile string) error
// UpdateBandwidth updates bandwidth data for the connection and optionally also writes
// the bandwidth data to the history database.

View file

@ -41,13 +41,13 @@ type (
// by *sqlite.Stmt.
Stmt interface {
ColumnCount() int
ColumnName(int) string
ColumnType(int) sqlite.ColumnType
ColumnText(int) string
ColumnBool(int) bool
ColumnFloat(int) float64
ColumnInt(int) int
ColumnReader(int) *bytes.Reader
ColumnName(col int) string
ColumnType(col int) sqlite.ColumnType
ColumnText(col int) string
ColumnBool(col int) bool
ColumnFloat(col int) float64
ColumnInt(col int) int
ColumnReader(col int) *bytes.Reader
}
// DecodeFunc is called for each non-basic type during decoding.
@ -230,7 +230,7 @@ func DatetimeDecoder(loc *time.Location) DecodeFunc {
case sqlite.TypeFloat:
// stored as Julian day numbers
return nil, false, fmt.Errorf("REAL storage type not support for time.Time")
return nil, false, errors.New("REAL storage type not support for time.Time")
case sqlite.TypeNull:
return nil, true, nil
@ -359,7 +359,7 @@ func decodeBasic() DecodeFunc {
case reflect.Slice:
if outval.Type().Elem().Kind() != reflect.Uint8 {
return nil, false, fmt.Errorf("slices other than []byte for BLOB are not supported")
return nil, false, errors.New("slices other than []byte for BLOB are not supported")
}
if colType != sqlite.TypeBlob {

View file

@ -2,6 +2,7 @@ package orm
import (
"context"
"errors"
"fmt"
"reflect"
"time"
@ -171,7 +172,7 @@ func DatetimeEncoder(loc *time.Location) EncodeFunc {
valInterface := val.Interface()
t, ok = valInterface.(time.Time)
if !ok {
return nil, false, fmt.Errorf("cannot convert reflect value to time.Time")
return nil, false, errors.New("cannot convert reflect value to time.Time")
}
case valType.Kind() == reflect.String && colDef.IsTime:

View file

@ -6,6 +6,7 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"zombiezen.com/go/sqlite"
)
@ -120,7 +121,7 @@ func TestEncodeAsMap(t *testing.T) { //nolint:tparallel
c := cases[idx]
t.Run(c.Desc, func(t *testing.T) {
res, err := ToParamMap(ctx, c.Input, "", DefaultEncodeConfig, nil)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, c.Expected, res)
})
}
@ -253,7 +254,7 @@ func TestEncodeValue(t *testing.T) { //nolint:tparallel
c := cases[idx]
t.Run(c.Desc, func(t *testing.T) {
res, err := EncodeValue(ctx, &c.Column, c.Input, DefaultEncodeConfig)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, c.Output, res)
})
}

View file

@ -274,7 +274,7 @@ func applyStructFieldTag(fieldType reflect.StructField, def *ColumnDef) error {
case sqlite.TypeText:
def.Default = defaultValue
case sqlite.TypeBlob:
return fmt.Errorf("default values for TypeBlob not yet supported")
return errors.New("default values for TypeBlob not yet supported")
default:
return fmt.Errorf("failed to apply default value for unknown sqlite column type %s", def.Type)
}

View file

@ -4,6 +4,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSchemaBuilder(t *testing.T) {
@ -37,7 +38,7 @@ func TestSchemaBuilder(t *testing.T) {
c := cases[idx]
res, err := GenerateTableSchema(c.Name, c.Model)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, c.ExpectedSQL, res.CreateStatement("main", false))
}
}

View file

@ -19,6 +19,8 @@ import (
var charOnlyRegexp = regexp.MustCompile("[a-zA-Z]+")
const failedQuery = "Failed to execute query: "
type (
// QueryHandler implements http.Handler and allows to perform SQL
@ -78,7 +80,7 @@ func (qh *QueryHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
orm.WithResult(&result),
orm.WithSchema(*qh.Database.Schema),
); err != nil {
http.Error(resp, "Failed to execute query: "+err.Error(), http.StatusInternalServerError)
http.Error(resp, failedQuery+err.Error(), http.StatusInternalServerError)
return
}
@ -230,7 +232,7 @@ func parseQueryRequestPayload[T any](req *http.Request) (*T, error) { //nolint:d
blob, err := io.ReadAll(body)
if err != nil {
return nil, fmt.Errorf("failed to read body" + err.Error())
return nil, fmt.Errorf("failed to read body: %w", err)
}
body = bytes.NewReader(blob)

View file

@ -102,7 +102,7 @@ func TestUnmarshalQuery(t *testing.T) { //nolint:tparallel
assert.Equal(t, c.Error.Error(), err.Error())
}
} else {
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, c.Expected, q)
}
})
@ -241,7 +241,7 @@ func TestQueryBuilder(t *testing.T) { //nolint:tparallel
assert.Equal(t, c.E.Error(), err.Error(), "test case %d", cID)
}
} else {
assert.NoError(t, err, "test case %d", cID)
require.NoError(t, err, "test case %d", cID)
assert.Equal(t, c.P, params, "test case %d", cID)
assert.Equal(t, c.R, str, "test case %d", cID)
}

View file

@ -136,11 +136,11 @@ func AddNetworkDebugData(di *debug.Info, profile, where string) {
// Collect matching connections.
var ( //nolint:prealloc // We don't know the size.
debugConns []*Connection
accepted int
total int
debugConns []*Connection
accepted int
total int
)
for maybeConn := range it.Next {
// Switch to correct type.
conn, ok := maybeConn.(*Connection)

View file

@ -751,12 +751,14 @@ func (conn *Connection) SaveWhenFinished() {
func (conn *Connection) Save() {
conn.UpdateMeta()
// nolint:exhaustive
switch conn.Verdict {
case VerdictAccept, VerdictRerouteToNameserver:
conn.ConnectionEstablished = true
case VerdictRerouteToTunnel:
// this is already handled when the connection tunnel has been
// established.
default:
}
// Do not save/update until data is complete.

View file

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"net"
"strconv"
"github.com/google/gopacket"
)
@ -207,9 +208,9 @@ func (pkt *Base) FmtRemoteIP() string {
func (pkt *Base) FmtRemotePort() string {
if pkt.info.SrcPort != 0 {
if pkt.info.Inbound {
return fmt.Sprintf("%d", pkt.info.SrcPort)
return strconv.FormatUint(uint64(pkt.info.SrcPort), 10)
}
return fmt.Sprintf("%d", pkt.info.DstPort)
return strconv.FormatUint(uint64(pkt.info.DstPort), 10)
}
return "-"
}
@ -235,10 +236,10 @@ type Packet interface {
ExpectInfo() bool
// Info.
SetCtx(context.Context)
SetCtx(ctx context.Context)
Ctx() context.Context
Info() *Info
SetPacketInfo(Info)
SetPacketInfo(info Info)
IsInbound() bool
IsOutbound() bool
SetInbound()
@ -253,8 +254,8 @@ type Packet interface {
Payload() []byte
// Matching.
MatchesAddress(bool, IPProtocol, *net.IPNet, uint16) bool
MatchesIP(bool, *net.IPNet) bool
MatchesAddress(remote bool, protocol IPProtocol, network *net.IPNet, port uint16) bool
MatchesIP(endpoint bool, network *net.IPNet) bool
// Formatting.
String() string

View file

@ -44,7 +44,7 @@ type Address struct {
// Info is a generic interface to both ConnectionInfo and BindInfo.
type Info interface {
GetPID() int
SetPID(int)
SetPID(pid int)
GetUID() int
GetUIDandInode() (int, int)
}

View file

@ -2,7 +2,6 @@ package process
import (
"errors"
"fmt"
"net/http"
"strconv"
@ -70,7 +69,7 @@ func handleGetProcessesByProfile(ar *api.Request) (any, error) {
source := ar.URLVars["source"]
id := ar.URLVars["id"]
if id == "" || source == "" {
return nil, api.ErrorWithStatus(fmt.Errorf("missing profile source/id"), http.StatusBadRequest)
return nil, api.ErrorWithStatus(errors.New("missing profile source/id"), http.StatusBadRequest)
}
result := GetProcessesWithProfile(ar.Context(), profile.ProfileSource(source), id, true)

View file

@ -72,7 +72,8 @@ func GetProcessesWithProfile(ctx context.Context, profileSource profile.ProfileS
slices.SortFunc[[]*Process, *Process](procs, func(a, b *Process) int {
return strings.Compare(a.processKey, b.processKey)
})
slices.CompactFunc[[]*Process, *Process](procs, func(a, b *Process) bool {
procs = slices.CompactFunc[[]*Process, *Process](procs, func(a, b *Process) bool {
return a.processKey == b.processKey
})

View file

@ -40,6 +40,6 @@ func AddToDebugInfo(di *debug.Info) {
fmt.Sprintf("Status: %s", netenv.GetOnlineStatus()),
debug.UseCodeSection|debug.AddContentLineBreaks,
fmt.Sprintf("OnlineStatus: %s", netenv.GetOnlineStatus()),
fmt.Sprintf("CaptivePortal: %s", netenv.GetCaptivePortal().URL),
"CaptivePortal: "+netenv.GetCaptivePortal().URL,
)
}

View file

@ -25,6 +25,8 @@ const (
ReleaseChannelSupport = "support"
)
const jsonSuffix = ".json"
// SetIndexes sets the update registry indexes and also configures the registry
// to use pre-releases based on the channel.
func SetIndexes(
@ -51,12 +53,12 @@ func SetIndexes(
// Always add the stable index as a base.
registry.AddIndex(updater.Index{
Path: ReleaseChannelStable + ".json",
Path: ReleaseChannelStable + jsonSuffix,
AutoDownload: autoDownload,
})
// Add beta index if in beta or staging channel.
indexPath := ReleaseChannelBeta + ".json"
indexPath := ReleaseChannelBeta + jsonSuffix
if releaseChannel == ReleaseChannelBeta ||
releaseChannel == ReleaseChannelStaging ||
(releaseChannel == "" && indexExists(registry, indexPath)) {
@ -74,7 +76,7 @@ func SetIndexes(
}
// Add staging index if in staging channel.
indexPath = ReleaseChannelStaging + ".json"
indexPath = ReleaseChannelStaging + jsonSuffix
if releaseChannel == ReleaseChannelStaging ||
(releaseChannel == "" && indexExists(registry, indexPath)) {
registry.AddIndex(updater.Index{
@ -91,7 +93,7 @@ func SetIndexes(
}
// Add support index if in support channel.
indexPath = ReleaseChannelSupport + ".json"
indexPath = ReleaseChannelSupport + jsonSuffix
if releaseChannel == ReleaseChannelSupport ||
(releaseChannel == "" && indexExists(registry, indexPath)) {
registry.AddIndex(updater.Index{

View file

@ -226,7 +226,7 @@ func TriggerUpdate(forceIndexCheck, downloadAll bool) error {
updateASAP = true
case !forceIndexCheck && !enableSoftwareUpdates() && !enableIntelUpdates():
return fmt.Errorf("automatic updating is disabled")
return errors.New("automatic updating is disabled")
default:
if forceIndexCheck {
@ -254,7 +254,7 @@ func TriggerUpdate(forceIndexCheck, downloadAll bool) error {
func DisableUpdateSchedule() error {
switch module.Status() {
case modules.StatusStarting, modules.StatusOnline, modules.StatusStopping:
return fmt.Errorf("module already online")
return errors.New("module already online")
}
disableTaskSchedule = true

View file

@ -1,6 +1,7 @@
package access
import (
"errors"
"fmt"
"net/http"
@ -86,7 +87,7 @@ func registerAPIEndpoints() error {
DataFunc: func(ar *api.Request) (data []byte, err error) {
featureID, ok := ar.URLVars["id"]
if !ok {
return nil, fmt.Errorf("invalid feature id")
return nil, errors.New("invalid feature id")
}
for _, feature := range features {
@ -95,7 +96,7 @@ func registerAPIEndpoints() error {
}
}
return nil, fmt.Errorf("feature id not found")
return nil, errors.New("feature id not found")
},
}); err != nil {
return err

View file

@ -128,7 +128,7 @@ findCandidates:
if err != nil {
return fmt.Errorf("failed to connect to a new home hub - tried %d hubs: %w", tries+1, err)
}
return fmt.Errorf("no home hub candidates available")
return errors.New("no home hub candidates available")
}
func connectToHomeHub(ctx context.Context, dst *hub.Hub) error {
@ -200,7 +200,7 @@ func connectToHomeHub(ctx context.Context, dst *hub.Hub) error {
// Set new home on map.
ok := navigator.Main.SetHome(dst.ID, homeTerminal)
if !ok {
return fmt.Errorf("failed to set home hub on map")
return errors.New("failed to set home hub on map")
}
// Assign crane to home hub in order to query it later.

View file

@ -82,7 +82,7 @@ func (t *Tunnel) connectWorker(ctx context.Context) (err error) {
// TODO: Clean this up.
t.connInfo.Lock()
defer t.connInfo.Unlock()
t.connInfo.Failed(fmt.Sprintf("SPN failed to establish route: %s", err), "")
t.connInfo.Failed("SPN failed to establish route: "+err.Error(), "")
t.connInfo.Save()
tracer.Warningf("spn/crew: failed to establish route for %s: %s", t.connInfo, err)
@ -97,7 +97,7 @@ func (t *Tunnel) connectWorker(ctx context.Context) (err error) {
t.connInfo.Lock()
defer t.connInfo.Unlock()
t.connInfo.Failed(fmt.Sprintf("SPN failed to initialize data tunnel (connect op): %s", tErr.Error()), "")
t.connInfo.Failed("SPN failed to initialize data tunnel (connect op): "+tErr.Error(), "")
t.connInfo.Save()
// TODO: try with another route?

View file

@ -5,7 +5,7 @@ import (
"net"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestCheckStringFormat(t *testing.T) {
@ -48,9 +48,9 @@ func TestCheckStringFormat(t *testing.T) {
for testCharacter, isPermitted := range testSet {
if isPermitted {
assert.NoError(t, checkStringFormat(fmt.Sprintf("test character %q", testCharacter), testCharacter, 3))
require.NoError(t, checkStringFormat(fmt.Sprintf("test character %q", testCharacter), testCharacter, 3))
} else {
assert.Error(t, checkStringFormat(fmt.Sprintf("test character %q", testCharacter), testCharacter, 3))
require.Error(t, checkStringFormat(fmt.Sprintf("test character %q", testCharacter), testCharacter, 3))
}
}
}
@ -59,22 +59,22 @@ func TestCheckIPFormat(t *testing.T) {
t.Parallel()
// IPv4
assert.NoError(t, checkIPFormat("test IP 1.1.1.1", net.IPv4(1, 1, 1, 1)))
assert.NoError(t, checkIPFormat("test IP 192.168.1.1", net.IPv4(192, 168, 1, 1)))
assert.Error(t, checkIPFormat("test IP 255.0.0.1", net.IPv4(255, 0, 0, 1)))
require.NoError(t, checkIPFormat("test IP 1.1.1.1", net.IPv4(1, 1, 1, 1)))
require.NoError(t, checkIPFormat("test IP 192.168.1.1", net.IPv4(192, 168, 1, 1)))
require.Error(t, checkIPFormat("test IP 255.0.0.1", net.IPv4(255, 0, 0, 1)))
// IPv6
assert.NoError(t, checkIPFormat("test IP ::1", net.ParseIP("::1")))
assert.NoError(t, checkIPFormat("test IP 2606:4700:4700::1111", net.ParseIP("2606:4700:4700::1111")))
require.NoError(t, checkIPFormat("test IP ::1", net.ParseIP("::1")))
require.NoError(t, checkIPFormat("test IP 2606:4700:4700::1111", net.ParseIP("2606:4700:4700::1111")))
// Invalid
assert.Error(t, checkIPFormat("test IP with length 3", net.IP([]byte{0, 0, 0})))
assert.Error(t, checkIPFormat("test IP with length 5", net.IP([]byte{0, 0, 0, 0, 0})))
assert.Error(t, checkIPFormat(
require.Error(t, checkIPFormat("test IP with length 3", net.IP([]byte{0, 0, 0})))
require.Error(t, checkIPFormat("test IP with length 5", net.IP([]byte{0, 0, 0, 0, 0})))
require.Error(t, checkIPFormat(
"test IP with length 15",
net.IP([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}),
))
assert.Error(t, checkIPFormat(
require.Error(t, checkIPFormat(
"test IP with length 17",
net.IP([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}),
))

View file

@ -4,6 +4,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func parseT(t *testing.T, definition string) *Transport {
@ -140,8 +141,8 @@ func TestTransportParsing(t *testing.T) {
// test invalid
assert.NotEqual(t, parseTError("spn"), nil, "should fail")
assert.NotEqual(t, parseTError("spn:"), nil, "should fail")
assert.NotEqual(t, parseTError("spn:0"), nil, "should fail")
assert.NotEqual(t, parseTError("spn:65536"), nil, "should fail")
require.Error(t, parseTError("spn"), "should fail")
require.Error(t, parseTError("spn:"), "should fail")
require.Error(t, parseTError("spn:0"), "should fail")
require.Error(t, parseTError("spn:65536"), "should fail")
}

View file

@ -210,9 +210,9 @@ func (m *Map) optimizeForSatelliteConnectivity(result *OptimizationResult) {
// Add to suggested pins.
if len(region.regardedPins) <= region.satelliteMinLanes {
result.addSuggested(fmt.Sprintf("best to region %s", region.ID), region.regardedPins...)
result.addSuggested("best to region "+region.ID, region.regardedPins...)
} else {
result.addSuggested(fmt.Sprintf("best to region %s", region.ID), region.regardedPins[:region.satelliteMinLanes]...)
result.addSuggested("best to region "+region.ID, region.regardedPins[:region.satelliteMinLanes]...)
}
}
}

View file

@ -622,7 +622,7 @@ func (m *Map) updateQuickSettingExcludeCountryList(ctx context.Context, configKe
for _, country := range countryList {
quickSettings = append(quickSettings, config.QuickSetting{
Name: fmt.Sprintf("Exclude %s (%s)", country.Name, country.Code),
Value: []string{fmt.Sprintf("- %s", country.Code)},
Value: []string{"- " + country.Code},
Action: config.QuickMergeTop,
})
}
@ -700,7 +700,7 @@ func (m *Map) updateSelectRuleCountryList(ctx context.Context, configKey string,
selections = append(selections, selectCountry{
QuickSetting: config.QuickSetting{
Name: fmt.Sprintf("%s (%s)", country.Name, country.Code),
Value: []string{fmt.Sprintf("+ %s", country.Code), "- *"},
Value: []string{"+ " + country.Code, "- *"},
Action: config.QuickReplace,
},
FlagID: country.Code,
@ -712,7 +712,7 @@ func (m *Map) updateSelectRuleCountryList(ctx context.Context, configKey string,
selections = append(selections, selectCountry{
QuickSetting: config.QuickSetting{
Name: fmt.Sprintf("%s (C:%s)", continent.Name, continent.Code),
Value: []string{fmt.Sprintf("+ C:%s", continent.Code), "- *"},
Value: []string{"+ C:" + continent.Code, "- *"},
Action: config.QuickReplace,
},
})

View file

@ -4,6 +4,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSharedHTTP(t *testing.T) { //nolint:paralleltest // Test checks global state.
@ -11,23 +12,23 @@ func TestSharedHTTP(t *testing.T) { //nolint:paralleltest // Test checks global
// Register multiple handlers.
err := addHTTPHandler(testPort, "", ServeInfoPage)
assert.NoError(t, err, "should be able to share http listener")
require.NoError(t, err, "should be able to share http listener")
err = addHTTPHandler(testPort, "/test", ServeInfoPage)
assert.NoError(t, err, "should be able to share http listener")
require.NoError(t, err, "should be able to share http listener")
err = addHTTPHandler(testPort, "/test2", ServeInfoPage)
assert.NoError(t, err, "should be able to share http listener")
require.NoError(t, err, "should be able to share http listener")
err = addHTTPHandler(testPort, "/", ServeInfoPage)
assert.Error(t, err, "should fail to register path twice")
require.Error(t, err, "should fail to register path twice")
// Unregister
assert.NoError(t, removeHTTPHandler(testPort, ""))
assert.NoError(t, removeHTTPHandler(testPort, "/test"))
assert.NoError(t, removeHTTPHandler(testPort, "/not-registered")) // removing unregistered handler does not error
assert.NoError(t, removeHTTPHandler(testPort, "/test2"))
assert.NoError(t, removeHTTPHandler(testPort, "/not-registered")) // removing unregistered handler does not error
require.NoError(t, removeHTTPHandler(testPort, ""))
require.NoError(t, removeHTTPHandler(testPort, "/test"))
require.NoError(t, removeHTTPHandler(testPort, "/not-registered")) // removing unregistered handler does not error
require.NoError(t, removeHTTPHandler(testPort, "/test2"))
require.NoError(t, removeHTTPHandler(testPort, "/not-registered")) // removing unregistered handler does not error
// Check if all handlers are gone again.
sharedHTTPServersLock.Lock()
defer sharedHTTPServersLock.Unlock()
assert.Equal(t, 0, len(sharedHTTPServers), "shared http handlers should be back to zero")
assert.Empty(t, sharedHTTPServers, "shared http handlers should be back to zero")
}

View file

@ -47,7 +47,7 @@ func StartSluice(network, address string) {
// Start service worker.
module.StartServiceWorker(
fmt.Sprintf("%s sluice listener", s.network),
s.network+" sluice listener",
10*time.Second,
s.listenHandler,
)