Fix tests and linter warnings

This commit is contained in:
Daniel 2022-02-01 13:12:46 +01:00
parent 7d2cd6c15d
commit f59ad0357a
162 changed files with 668 additions and 696 deletions

View file

@ -37,6 +37,7 @@ type endpointBridgeStorage struct {
storage.InjectBase
}
// EndpointBridgeRequest holds a bridged request API request.
type EndpointBridgeRequest struct {
record.Base
sync.Mutex
@ -48,6 +49,7 @@ type EndpointBridgeRequest struct {
MimeType string
}
// EndpointBridgeResponse holds a bridged request API response.
type EndpointBridgeResponse struct {
record.Base
sync.Mutex

View file

@ -13,9 +13,8 @@ import (
"github.com/tevino/abool"
"github.com/safing/portbase/modules"
"github.com/safing/portbase/log"
"github.com/safing/portbase/modules"
"github.com/safing/portbase/rng"
)
@ -147,7 +146,7 @@ func authenticateRequest(w http.ResponseWriter, r *http.Request, targetHandler h
}
// Check if we need to do any authentication at all.
switch requiredPermission {
switch requiredPermission { //nolint:exhaustive
case NotFound:
// Not found.
tracer.Trace("api: authenticated handler reported: not found")
@ -543,6 +542,8 @@ func (p Permission) Role() string {
return "Admin"
case PermitSelf:
return "Self"
case Dynamic, NotFound, NotSupported:
return "Invalid"
default:
return "Invalid"
}

View file

@ -9,9 +9,7 @@ import (
"github.com/stretchr/testify/assert"
)
var (
testToken = new(AuthToken)
)
var testToken = new(AuthToken)
func testAuthenticator(r *http.Request, s *http.Server) (*AuthToken, error) {
switch {
@ -65,7 +63,9 @@ func init() {
}
}
func TestPermissions(t *testing.T) { //nolint:gocognit
func TestPermissions(t *testing.T) {
t.Parallel()
testHandler := &mainHandler{
mux: mainMux,
}
@ -99,10 +99,11 @@ func TestPermissions(t *testing.T) { //nolint:gocognit
http.MethodHead,
http.MethodPost,
http.MethodPut,
http.MethodDelete,
} {
// Set request permission for test requests.
reading := method == http.MethodGet
_, reading, _ := getEffectiveMethod(&http.Request{Method: method})
if reading {
testToken.Read = requestPerm
testToken.Write = NotSupported
@ -147,7 +148,6 @@ func TestPermissions(t *testing.T) { //nolint:gocognit
}
if expectSuccess {
// Test for success.
if !assert.HTTPBodyContains(
t,
@ -164,9 +164,7 @@ func TestPermissions(t *testing.T) { //nolint:gocognit
handlerPerm, handlerPerm,
)
}
} else {
// Test for error.
if !assert.HTTPError(t,
testHandler.ServeHTTP,
@ -181,7 +179,6 @@ func TestPermissions(t *testing.T) { //nolint:gocognit
handlerPerm, handlerPerm,
)
}
}
}
}
@ -189,6 +186,8 @@ func TestPermissions(t *testing.T) { //nolint:gocognit
}
func TestPermissionDefinitions(t *testing.T) {
t.Parallel()
if NotSupported != 0 {
t.Fatalf("NotSupported must be zero, was %v", NotSupported)
}

View file

@ -5,9 +5,9 @@ import (
"sync"
"time"
"github.com/safing/portbase/log"
"github.com/tevino/abool"
"github.com/safing/portbase/log"
)
const (

View file

@ -25,6 +25,4 @@ const (
apiSeperator = "|"
)
var (
apiSeperatorBytes = []byte(apiSeperator)
)
var apiSeperatorBytes = []byte(apiSeperator)

View file

@ -4,15 +4,14 @@ import (
"bytes"
"errors"
"github.com/tevino/abool"
"github.com/safing/portbase/container"
"github.com/safing/portbase/formats/dsd"
"github.com/tevino/abool"
)
// Client errors.
var (
ErrMalformedMessage = errors.New("malformed message")
)
// ErrMalformedMessage is returned when a malformed message was encountered.
var ErrMalformedMessage = errors.New("malformed message")
// Message is an API message.
type Message struct {

View file

@ -4,10 +4,10 @@ import (
"fmt"
"sync"
"github.com/safing/portbase/log"
"github.com/gorilla/websocket"
"github.com/tevino/abool"
"github.com/gorilla/websocket"
"github.com/safing/portbase/log"
)
type wsState struct {

View file

@ -8,20 +8,18 @@ import (
"net/http"
"sync"
"github.com/tidwall/sjson"
"github.com/safing/portbase/database/iterator"
"github.com/safing/portbase/formats/dsd"
"github.com/safing/portbase/formats/varint"
"github.com/gorilla/websocket"
"github.com/tevino/abool"
"github.com/tidwall/gjson"
"github.com/tidwall/sjson"
"github.com/safing/portbase/container"
"github.com/safing/portbase/database"
"github.com/safing/portbase/database/iterator"
"github.com/safing/portbase/database/query"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/formats/dsd"
"github.com/safing/portbase/formats/varint"
"github.com/safing/portbase/log"
)
@ -75,7 +73,6 @@ func allowAnyOrigin(r *http.Request) bool {
}
func startDatabaseAPI(w http.ResponseWriter, r *http.Request) {
upgrader := websocket.Upgrader{
CheckOrigin: allowAnyOrigin,
ReadBufferSize: 1024,
@ -89,7 +86,7 @@ func startDatabaseAPI(w http.ResponseWriter, r *http.Request) {
return
}
new := &DatabaseAPI{
newDBAPI := &DatabaseAPI{
conn: wsConn,
sendQueue: make(chan []byte, 100),
queries: make(map[string]*iterator.Iterator),
@ -99,14 +96,13 @@ func startDatabaseAPI(w http.ResponseWriter, r *http.Request) {
db: database.NewInterface(nil),
}
module.StartWorker("database api handler", new.handler)
module.StartWorker("database api writer", new.writer)
module.StartWorker("database api handler", newDBAPI.handler)
module.StartWorker("database api writer", newDBAPI.writer)
log.Tracer(r.Context()).Infof("api request: init websocket %s %s", r.RemoteAddr, r.RequestURI)
}
func (api *DatabaseAPI) handler(context.Context) error {
// 123|get|<key>
// 123|ok|<key>|<data>
// 123|error|<message>
@ -429,12 +425,12 @@ func (api *DatabaseAPI) processSub(opID []byte, sub *database.Subscription) {
// TODO: use upd, new and delete msgTypes
r.Lock()
isDeleted := r.Meta().IsDeleted()
new := r.Meta().Created == r.Meta().Modified
isNew := r.Meta().Created == r.Meta().Modified
r.Unlock()
switch {
case isDeleted:
api.send(opID, dbMsgTypeDel, r.Key(), nil)
case new:
case isNew:
api.send(opID, dbMsgTypeNew, r.Key(), data)
default:
api.send(opID, dbMsgTypeUpd, r.Key(), data)

View file

@ -21,7 +21,7 @@ import (
// Endpoint describes an API Endpoint.
// Path and at least one permission are required.
// As is exactly one function.
type Endpoint struct {
type Endpoint struct { //nolint:maligned
// Path describes the URL path of the endpoint.
Path string

View file

@ -5,8 +5,9 @@ import (
"sync"
"testing"
"github.com/safing/portbase/database/record"
"github.com/stretchr/testify/assert"
"github.com/safing/portbase/database/record"
)
const (
@ -21,6 +22,8 @@ type actionTestRecord struct {
}
func TestEndpoints(t *testing.T) {
t.Parallel()
testHandler := &mainHandler{
mux: mainMux,
}
@ -113,6 +116,8 @@ func TestEndpoints(t *testing.T) {
}
func TestActionRegistration(t *testing.T) {
t.Parallel()
assert.Error(t, RegisterEndpoint(Endpoint{}))
assert.Error(t, RegisterEndpoint(Endpoint{

View file

@ -6,11 +6,10 @@ import (
"os"
"testing"
"github.com/safing/portbase/dataroot"
"github.com/safing/portbase/modules"
// API depends on the database for the database api.
_ "github.com/safing/portbase/database/dbmodule"
"github.com/safing/portbase/dataroot"
"github.com/safing/portbase/modules"
)
func init() {
@ -28,7 +27,7 @@ func TestMain(m *testing.M) {
os.Exit(1)
}
// initialize data dir
err = dataroot.Initialize(tmpDir, 0755)
err = dataroot.Initialize(tmpDir, 0o0755)
if err != nil {
fmt.Fprintf(os.Stderr, "failed to initialize data root: %s\n", err)
os.Exit(1)

View file

@ -6,6 +6,7 @@ import (
"github.com/safing/portbase/modules"
)
// ModuleHandler specifies the interface for API endpoints that are bound to a module.
type ModuleHandler interface {
BelongsTo() *modules.Module
}

View file

@ -5,6 +5,7 @@ import (
"net/http"
"github.com/gorilla/mux"
"github.com/safing/portbase/log"
)
@ -32,9 +33,7 @@ type Request struct {
// apiRequestContextKey is a key used for the context key/value storage.
type apiRequestContextKey struct{}
var (
requestContextKey = apiRequestContextKey{}
)
var requestContextKey = apiRequestContextKey{}
// GetAPIRequest returns the API Request of the given http request.
func GetAPIRequest(r *http.Request) *Request {

View file

@ -10,18 +10,17 @@ import (
"sync"
"time"
"github.com/safing/portbase/utils"
"github.com/gorilla/mux"
"github.com/safing/portbase/log"
"github.com/safing/portbase/utils"
)
var (
// gorilla mux
// mainMux is the main mux router.
mainMux = mux.NewRouter()
// main server and lock
// server is the main server.
server = &http.Server{}
handlerLock sync.RWMutex

View file

@ -102,8 +102,8 @@ func registerBasicOptions() error {
})
}
func loadLogLevel() {
setDefaultConfigOption(CfgLogLevel, log.GetLogLevel().Name(), false)
func loadLogLevel() error {
return setDefaultConfigOption(CfgLogLevel, log.GetLogLevel().Name(), false)
}
func setLogLevel(ctx context.Context, data interface{}) error {

View file

@ -13,9 +13,7 @@ import (
"github.com/safing/portbase/log"
)
var (
dbController *database.Controller
)
var dbController *database.Controller
// StorageInterface provices a storage.Interface to the configuration manager.
type StorageInterface struct {
@ -67,6 +65,8 @@ func (s *StorageInterface) Put(r record.Record) (record.Record, error) {
value, ok = acc.GetInt("Value")
case OptTypeBool:
value, ok = acc.GetBool("Value")
case optTypeAny:
ok = false
}
if !ok {
return nil, errors.New("received invalid value in \"Value\"")

View file

@ -1,5 +1,3 @@
// Package config ... (linter fix)
//nolint:dupl
package config
import (

View file

@ -4,10 +4,8 @@ import "sync"
type safe struct{}
var (
// Concurrent makes concurrency safe get methods available.
Concurrent = &safe{}
)
// Concurrent makes concurrency safe get methods available.
var Concurrent = &safe{}
// GetAsString returns a function that returns the wanted string with high performance.
func (cs *safe) GetAsString(name string, fallback string) StringOption {

View file

@ -26,6 +26,8 @@ func parseAndReplaceDefaultConfig(jsonData string) error {
}
func quickRegister(t *testing.T, key string, optType OptionType, defaultValue interface{}) {
t.Helper()
err := Register(&Option{
Name: key,
Key: key,
@ -40,7 +42,7 @@ func quickRegister(t *testing.T, key string, optType OptionType, defaultValue in
}
}
func TestGet(t *testing.T) { //nolint:gocognit
func TestGet(t *testing.T) { //nolint:paralleltest
// reset
options = make(map[string]*Option)
@ -181,7 +183,7 @@ func TestGet(t *testing.T) { //nolint:gocognit
}
}
func TestReleaseLevel(t *testing.T) {
func TestReleaseLevel(t *testing.T) { //nolint:paralleltest
// reset
options = make(map[string]*Option)
registerReleaseLevelOption()

View file

@ -54,9 +54,12 @@ func start() error {
configFilePath = filepath.Join(dataRoot.Path, "config.json")
// Load log level from log package after it started.
loadLogLevel()
err := loadLogLevel()
if err != nil {
return err
}
err := registerAsDatabase()
err = registerAsDatabase()
if err != nil && !os.IsNotExist(err) {
return err
}

View file

@ -9,9 +9,7 @@ import (
"github.com/safing/portbase/log"
)
var (
configFilePath string
)
var configFilePath string
func loadConfig() error {
// check if persistence is configured
@ -69,7 +67,7 @@ func saveConfig() error {
}
// write file
return ioutil.WriteFile(configFilePath, data, 0600)
return ioutil.WriteFile(configFilePath, data, 0o0600)
}
// JSONToMap parses and flattens a hierarchical json object.

View file

@ -36,6 +36,7 @@ var (
)
func TestJSONMapConversion(t *testing.T) {
t.Parallel()
// convert to json
j, err := MapToJSON(mapData)
@ -67,6 +68,8 @@ func TestJSONMapConversion(t *testing.T) {
}
func TestConfigCleaning(t *testing.T) {
t.Parallel()
// load
configFlat, err := JSONToMap(jsonBytes)
if err != nil {

View file

@ -55,7 +55,7 @@ optionsLoop:
if firstErr != nil {
if errCnt > 0 {
return perspective, fmt.Errorf("encountered %d errors, first was: %s", errCnt, firstErr)
return perspective, fmt.Errorf("encountered %d errors, first was: %w", errCnt, firstErr)
}
return perspective, firstErr
}

View file

@ -88,13 +88,13 @@ func Register(option *Option) error {
if option.ValidationRegex != "" {
option.compiledRegex, err = regexp.Compile(option.ValidationRegex)
if err != nil {
return fmt.Errorf("config: could not compile option.ValidationRegex: %s", err)
return fmt.Errorf("config: could not compile option.ValidationRegex: %w", err)
}
}
option.activeFallbackValue, err = validateValue(option, option.DefaultValue)
if err != nil {
return fmt.Errorf("config: invalid default value: %s", err)
return fmt.Errorf("config: invalid default value: %w", err)
}
optionsLock.Lock()

View file

@ -4,7 +4,7 @@ import (
"testing"
)
func TestRegistry(t *testing.T) {
func TestRegistry(t *testing.T) { //nolint:paralleltest
// reset
options = make(map[string]*Option)
@ -46,5 +46,4 @@ func TestRegistry(t *testing.T) {
}); err == nil {
t.Error("should fail")
}
}

View file

@ -1,5 +1,3 @@
// Package config ... (linter fix)
//nolint:dupl
package config
import (
@ -12,7 +10,7 @@ import (
// configuration setting.
type ReleaseLevel uint8
// Release Level constants
// Release Level constants.
const (
ReleaseLevelStable ReleaseLevel = 0
ReleaseLevelBeta ReleaseLevel = 1

View file

@ -74,7 +74,7 @@ func replaceConfig(newValues map[string]interface{}) error {
if firstErr != nil {
if errCnt > 0 {
return fmt.Errorf("encountered %d errors, first was: %s", errCnt, firstErr)
return fmt.Errorf("encountered %d errors, first was: %w", errCnt, firstErr)
}
return firstErr
}
@ -117,7 +117,7 @@ func replaceDefaultConfig(newValues map[string]interface{}) error {
if firstErr != nil {
if errCnt > 0 {
return fmt.Errorf("encountered %d errors, first was: %s", errCnt, firstErr)
return fmt.Errorf("encountered %d errors, first was: %w", errCnt, firstErr)
}
return firstErr
}

View file

@ -3,7 +3,7 @@ package config
import "testing"
func TestLayersGetters(t *testing.T) {
func TestLayersGetters(t *testing.T) { //nolint:paralleltest
// reset
options = make(map[string]*Option)
@ -77,10 +77,9 @@ func TestLayersGetters(t *testing.T) {
if notBool() {
t.Error("expected fallback value: false")
}
}
func TestLayersSetters(t *testing.T) {
func TestLayersSetters(t *testing.T) { //nolint:paralleltest
// reset
options = make(map[string]*Option)
@ -191,5 +190,4 @@ func TestLayersSetters(t *testing.T) {
if err := SetDefaultConfigOption("invalid_delete", nil); err == nil {
t.Error("should fail")
}
}

View file

@ -24,6 +24,8 @@ func (vc *valueCache) getData(opt *Option) interface{} {
return vc.stringVal
case OptTypeStringArray:
return vc.stringArrayVal
case optTypeAny:
return nil
default:
return nil
}
@ -90,7 +92,6 @@ func validateValue(option *Option, value interface{}) (*valueCache, error) { //n
s, ok := entry.(string)
if !ok {
return nil, fmt.Errorf("validation of option %s failed: element %+v at index %d is not a string", option.Key, entry, pos)
}
vConverted[pos] = s
}

View file

@ -7,7 +7,7 @@ import (
"github.com/safing/portbase/formats/varint"
)
// Container is []byte sclie on steroids, allowing for quick data appending, prepending and fetching as well as transparent error transportation. (Error transportation requires use of varints for data)
// Container is []byte sclie on steroids, allowing for quick data appending, prepending and fetching.
type Container struct {
compartments [][]byte
offset int
@ -145,12 +145,12 @@ func (c *Container) GetAll() []byte {
// GetAsContainer returns the given amount of bytes in a new container. Data will NOT be copied and IS consumed.
func (c *Container) GetAsContainer(n int) (*Container, error) {
new := c.gatherAsContainer(n)
if new == nil {
newC := c.gatherAsContainer(n)
if newC == nil {
return nil, errors.New("container: not enough data to return")
}
c.skip(n)
return new, nil
return newC, nil
}
// GetMax returns as much as possible, but the given amount of bytes at maximum. Data MAY be copied and IS consumed.
@ -211,17 +211,13 @@ func (c *Container) renewCompartments() {
}
func (c *Container) carbonCopy() *Container {
new := &Container{
newC := &Container{
compartments: make([][]byte, len(c.compartments)),
offset: c.offset,
err: c.err,
}
for i := 0; i < len(c.compartments); i++ {
new.compartments[i] = c.compartments[i]
}
// TODO: investigate why copy fails to correctly duplicate [][]byte
// copy(new.compartments, c.compartments)
return new
copy(newC.compartments, c.compartments)
return newC
}
func (c *Container) checkOffset() {
@ -300,7 +296,7 @@ func (c *Container) gather(n int) []byte {
return slice[:n]
}
func (c *Container) gatherAsContainer(n int) (new *Container) {
func (c *Container) gatherAsContainer(n int) (newC *Container) {
// Check requested length.
if n < 0 {
return nil
@ -308,20 +304,20 @@ func (c *Container) gatherAsContainer(n int) (new *Container) {
return &Container{}
}
new = &Container{}
newC = &Container{}
for i := c.offset; i < len(c.compartments); i++ {
if n >= len(c.compartments[i]) {
new.compartments = append(new.compartments, c.compartments[i])
newC.compartments = append(newC.compartments, c.compartments[i])
n -= len(c.compartments[i])
} else {
new.compartments = append(new.compartments, c.compartments[i][:n])
newC.compartments = append(newC.compartments, c.compartments[i][:n])
n = 0
}
}
if n > 0 {
return nil
}
return new
return newC
}
func (c *Container) skip(n int) {

View file

@ -23,6 +23,7 @@ var (
)
func TestContainerDataHandling(t *testing.T) {
t.Parallel()
c1 := New(utils.DuplicateBytes(testData))
c1c := c1.carbonCopy()
@ -74,6 +75,8 @@ func TestContainerDataHandling(t *testing.T) {
}
func compareMany(t *testing.T, reference []byte, other ...[]byte) {
t.Helper()
for i, cmp := range other {
if !bytes.Equal(reference, cmp) {
t.Errorf("sample %d does not match reference: sample is '%s'", i+1, string(cmp))
@ -82,6 +85,8 @@ func compareMany(t *testing.T, reference []byte, other ...[]byte) {
}
func TestDataFetching(t *testing.T) {
t.Parallel()
c1 := New(utils.DuplicateBytes(testData))
data := c1.GetMax(1)
if string(data[0]) != "T" {
@ -100,6 +105,8 @@ func TestDataFetching(t *testing.T) {
}
func TestBlocks(t *testing.T) {
t.Parallel()
c1 := New(utils.DuplicateBytes(testData))
c1.PrependLength()
@ -137,10 +144,10 @@ func TestBlocks(t *testing.T) {
if n4 != 43 {
t.Errorf("n should be 43, was %d", n4)
}
}
func TestContainerBlockHandling(t *testing.T) {
t.Parallel()
c1 := New(utils.DuplicateBytes(testData))
c1.PrependLength()
@ -185,6 +192,8 @@ func TestContainerBlockHandling(t *testing.T) {
}
func TestContainerMisc(t *testing.T) {
t.Parallel()
c1 := New()
d1 := c1.CompileData()
if len(d1) > 0 {
@ -193,5 +202,7 @@ func TestContainerMisc(t *testing.T) {
}
func TestDeprecated(t *testing.T) {
t.Parallel()
NewContainer(utils.DuplicateBytes(testData))
}

View file

@ -27,11 +27,11 @@ func (ja *JSONBytesAccessor) Set(key string, value interface{}) error {
}
}
new, err := sjson.SetBytes(*ja.json, key, value)
newJSON, err := sjson.SetBytes(*ja.json, key, value)
if err != nil {
return err
}
*ja.json = new
*ja.json = newJSON
return nil
}
@ -60,15 +60,15 @@ func (ja *JSONBytesAccessor) GetStringArray(key string) (value []string, ok bool
return nil, false
}
slice := result.Array()
new := make([]string, len(slice))
sliceCopy := make([]string, len(slice))
for i, res := range slice {
if res.Type == gjson.String {
new[i] = res.String()
sliceCopy[i] = res.String()
} else {
return nil, false
}
}
return new, true
return sliceCopy, true
}
// GetInt returns the int found by the given json key and whether it could be successfully extracted.

View file

@ -29,11 +29,11 @@ func (ja *JSONAccessor) Set(key string, value interface{}) error {
}
}
new, err := sjson.Set(*ja.json, key, value)
newJSON, err := sjson.Set(*ja.json, key, value)
if err != nil {
return err
}
*ja.json = new
*ja.json = newJSON
return nil
}
@ -84,15 +84,15 @@ func (ja *JSONAccessor) GetStringArray(key string) (value []string, ok bool) {
return nil, false
}
slice := result.Array()
new := make([]string, len(slice))
sliceCopy := make([]string, len(slice))
for i, res := range slice {
if res.Type == gjson.String {
new[i] = res.String()
sliceCopy[i] = res.String()
} else {
return nil, false
}
}
return new, true
return sliceCopy, true
}
// GetInt returns the int found by the given json key and whether it could be successfully extracted.

View file

@ -37,12 +37,12 @@ func (sa *StructAccessor) Set(key string, value interface{}) error {
}
// handle special cases
switch field.Kind() {
switch field.Kind() { // nolint:exhaustive
// ints
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
var newInt int64
switch newVal.Kind() {
switch newVal.Kind() { // nolint:exhaustive
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
newInt = newVal.Int()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
@ -58,7 +58,7 @@ func (sa *StructAccessor) Set(key string, value interface{}) error {
// uints
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
var newUint uint64
switch newVal.Kind() {
switch newVal.Kind() { // nolint:exhaustive
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
newUint = uint64(newVal.Int())
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
@ -73,7 +73,7 @@ func (sa *StructAccessor) Set(key string, value interface{}) error {
// floats
case reflect.Float32, reflect.Float64:
switch newVal.Kind() {
switch newVal.Kind() { // nolint:exhaustive
case reflect.Float32, reflect.Float64:
field.SetFloat(newVal.Float())
default:
@ -124,7 +124,7 @@ func (sa *StructAccessor) GetInt(key string) (value int64, ok bool) {
if !field.IsValid() {
return 0, false
}
switch field.Kind() {
switch field.Kind() { // nolint:exhaustive
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return field.Int(), true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
@ -140,7 +140,7 @@ func (sa *StructAccessor) GetFloat(key string) (value float64, ok bool) {
if !field.IsValid() {
return 0, false
}
switch field.Kind() {
switch field.Kind() { // nolint:exhaustive
case reflect.Float32, reflect.Float64:
return field.Float(), true
default:

View file

@ -49,6 +49,8 @@ var (
)
func testGetString(t *testing.T, acc Accessor, key string, shouldSucceed bool, expectedValue string) {
t.Helper()
v, ok := acc.GetString(key)
switch {
case !ok && shouldSucceed:
@ -62,6 +64,8 @@ func testGetString(t *testing.T, acc Accessor, key string, shouldSucceed bool, e
}
func testGetStringArray(t *testing.T, acc Accessor, key string, shouldSucceed bool, expectedValue []string) {
t.Helper()
v, ok := acc.GetStringArray(key)
switch {
case !ok && shouldSucceed:
@ -75,6 +79,8 @@ func testGetStringArray(t *testing.T, acc Accessor, key string, shouldSucceed bo
}
func testGetInt(t *testing.T, acc Accessor, key string, shouldSucceed bool, expectedValue int64) {
t.Helper()
v, ok := acc.GetInt(key)
switch {
case !ok && shouldSucceed:
@ -88,6 +94,8 @@ func testGetInt(t *testing.T, acc Accessor, key string, shouldSucceed bool, expe
}
func testGetFloat(t *testing.T, acc Accessor, key string, shouldSucceed bool, expectedValue float64) {
t.Helper()
v, ok := acc.GetFloat(key)
switch {
case !ok && shouldSucceed:
@ -101,6 +109,8 @@ func testGetFloat(t *testing.T, acc Accessor, key string, shouldSucceed bool, ex
}
func testGetBool(t *testing.T, acc Accessor, key string, shouldSucceed bool, expectedValue bool) {
t.Helper()
v, ok := acc.GetBool(key)
switch {
case !ok && shouldSucceed:
@ -114,6 +124,8 @@ func testGetBool(t *testing.T, acc Accessor, key string, shouldSucceed bool, exp
}
func testExists(t *testing.T, acc Accessor, key string, shouldSucceed bool) {
t.Helper()
ok := acc.Exists(key)
switch {
case !ok && shouldSucceed:
@ -124,6 +136,8 @@ func testExists(t *testing.T, acc Accessor, key string, shouldSucceed bool) {
}
func testSet(t *testing.T, acc Accessor, key string, shouldSucceed bool, valueToSet interface{}) {
t.Helper()
err := acc.Set(key, valueToSet)
switch {
case err != nil && shouldSucceed:
@ -134,8 +148,9 @@ func testSet(t *testing.T, acc Accessor, key string, shouldSucceed bool, valueTo
}
func TestAccessor(t *testing.T) {
t.Parallel()
// Test interface compliance
// Test interface compliance.
accs := []Accessor{
NewJSONAccessor(&testJSON),
NewJSONBytesAccessor(&testJSONBytes),
@ -273,5 +288,4 @@ func TestAccessor(t *testing.T) {
for _, acc := range accs {
testExists(t, acc, "X", false)
}
}

View file

@ -15,12 +15,10 @@ type Example struct {
Score int
}
var (
exampleDB = NewInterface(&Options{
Internal: true,
Local: true,
})
)
var exampleDB = NewInterface(&Options{
Internal: true,
Local: true,
})
// GetExample gets an Example from the database.
func GetExample(key string) (*Example, error) {
@ -32,20 +30,20 @@ func GetExample(key string) (*Example, error) {
// unwrap
if r.IsWrapped() {
// only allocate a new struct, if we need it
new := &Example{}
err = record.Unwrap(r, new)
newExample := &Example{}
err = record.Unwrap(r, newExample)
if err != nil {
return nil, err
}
return new, nil
return newExample, nil
}
// or adjust type
new, ok := r.(*Example)
newExample, ok := r.(*Example)
if !ok {
return nil, fmt.Errorf("record not of type *Example, but %T", r)
}
return new, nil
return newExample, nil
}
func (e *Example) Save() error {
@ -58,10 +56,10 @@ func (e *Example) SaveAs(key string) error {
}
func NewExample(key, name string, score int) *Example {
new := &Example{
newExample := &Example{
Name: name,
Score: score,
}
new.SetKey(key)
return new
newExample.SetKey(key)
return newExample
}

View file

@ -78,7 +78,7 @@ func (c *Controller) Get(key string) (record.Record, error) {
return r, nil
}
// Get returns the metadata of the record with the given key.
// GetMeta returns the metadata of the record with the given key.
func (c *Controller) GetMeta(key string) (*record.Meta, error) {
if shuttingDown.IsSet() {
return nil, ErrShuttingDown

View file

@ -8,6 +8,7 @@ import (
"github.com/safing/portbase/database/storage"
)
// StorageTypeInjected is the type of injected databases.
const StorageTypeInjected = "injected"
var (
@ -38,7 +39,7 @@ func getController(name string) (*Controller, error) {
// get db registration
registeredDB, err := getDatabase(name)
if err != nil {
return nil, fmt.Errorf(`could not start database %s: %s`, name, err)
return nil, fmt.Errorf("could not start database %s: %w", name, err)
}
// Check if database is injected.
@ -49,13 +50,13 @@ func getController(name string) (*Controller, error) {
// get location
dbLocation, err := getLocation(name, registeredDB.StorageType)
if err != nil {
return nil, fmt.Errorf(`could not start database %s (type %s): %s`, name, registeredDB.StorageType, err)
return nil, fmt.Errorf("could not start database %s (type %s): %w", name, registeredDB.StorageType, err)
}
// start database
storageInt, err := storage.StartDatabase(name, registeredDB.StorageType, dbLocation)
if err != nil {
return nil, fmt.Errorf(`could not start database %s (type %s): %s`, name, registeredDB.StorageType, err)
return nil, fmt.Errorf("could not start database %s (type %s): %w", name, registeredDB.StorageType, err)
}
controller = newController(registeredDB, storageInt, registeredDB.ShadowDelete)

View file

@ -4,7 +4,7 @@ import (
"time"
)
// Database holds information about registered databases
// Database holds information about a registered database.
type Database struct {
Name string
Description string

View file

@ -2,6 +2,7 @@ package database
import (
"context"
"errors"
"fmt"
"io/ioutil"
"log"
@ -11,11 +12,9 @@ import (
"testing"
"time"
"github.com/safing/portbase/database/storage"
q "github.com/safing/portbase/database/query"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/database/storage"
_ "github.com/safing/portbase/database/storage/badger"
_ "github.com/safing/portbase/database/storage/bbolt"
_ "github.com/safing/portbase/database/storage/fstree"
@ -46,7 +45,7 @@ func makeKey(dbName, key string) string {
return fmt.Sprintf("%s:%s", dbName, key)
}
func testDatabase(t *testing.T, storageType string, shadowDelete bool) { //nolint:gocognit,gocyclo
func testDatabase(t *testing.T, storageType string, shadowDelete bool) { //nolint:gocognit,gocyclo,thelper
t.Run(fmt.Sprintf("TestStorage_%s_%v", storageType, shadowDelete), func(t *testing.T) {
dbName := fmt.Sprintf("testing-%s-%v", storageType, shadowDelete)
fmt.Println(dbName)
@ -180,7 +179,7 @@ func testDatabase(t *testing.T, storageType string, shadowDelete bool) { //nolin
// check status individually
_, err = dbController.storage.Get("A")
if err != storage.ErrNotFound {
if !errors.Is(err, storage.ErrNotFound) {
t.Errorf("A should be deleted and purged, err=%s", err)
}
B1, err := dbController.storage.Get("B")
@ -208,13 +207,13 @@ func testDatabase(t *testing.T, storageType string, shadowDelete bool) { //nolin
B2, err := dbController.storage.Get("B")
if err == nil {
t.Errorf("B should be deleted and purged, meta: %+v", B2.Meta())
} else if err != storage.ErrNotFound {
} else if !errors.Is(err, storage.ErrNotFound) {
t.Errorf("B should be deleted and purged, err=%s", err)
}
C2, err := dbController.storage.Get("C")
if err == nil {
t.Errorf("C should be deleted and purged, meta: %+v", C2.Meta())
} else if err != storage.ErrNotFound {
} else if !errors.Is(err, storage.ErrNotFound) {
t.Errorf("C should be deleted and purged, err=%s", err)
}
@ -233,11 +232,11 @@ func testDatabase(t *testing.T, storageType string, shadowDelete bool) { //nolin
if err != nil {
t.Fatal(err)
}
})
}
func TestDatabaseSystem(t *testing.T) {
func TestDatabaseSystem(t *testing.T) { //nolint:tparallel
t.Parallel()
// panic after 10 seconds, to check for locks
finished := make(chan struct{})
@ -282,6 +281,8 @@ func TestDatabaseSystem(t *testing.T) {
}
func countRecords(t *testing.T, db *Interface, query *q.Query) int {
t.Helper()
_, err := query.Check()
if err != nil {
t.Fatal(err)

View file

@ -4,7 +4,7 @@ import (
"errors"
)
// Errors
// Errors.
var (
ErrNotFound = errors.New("database entry not found")
ErrPermissionDenied = errors.New("access to database record denied")

View file

@ -5,8 +5,7 @@ import (
)
// HookBase implements the Hook interface and provides dummy functions to reduce boilerplate.
type HookBase struct {
}
type HookBase struct{}
// UsesPreGet implements the Hook interface and returns false.
func (b *HookBase) UsesPreGet() bool {

View file

@ -120,19 +120,19 @@ func NewInterface(opts *Options) *Interface {
opts = &Options{}
}
new := &Interface{
newIface := &Interface{
options: opts,
}
if opts.CacheSize > 0 {
cacheBuilder := gcache.New(opts.CacheSize).ARC()
if opts.DelayCachedWrites != "" {
cacheBuilder.EvictedFunc(new.cacheEvictHandler)
new.writeCache = make(map[string]record.Record, opts.CacheSize/2)
new.triggerCacheWrite = make(chan struct{})
cacheBuilder.EvictedFunc(newIface.cacheEvictHandler)
newIface.writeCache = make(map[string]record.Record, opts.CacheSize/2)
newIface.triggerCacheWrite = make(chan struct{})
}
new.cache = cacheBuilder.Build()
newIface.cache = cacheBuilder.Build()
}
return new
return newIface
}
// Exists return whether a record with the given key exists.
@ -157,7 +157,7 @@ func (i *Interface) Get(key string) (record.Record, error) {
return r, err
}
func (i *Interface) getRecord(dbName string, dbKey string, mustBeWriteable bool) (r record.Record, db *Controller, err error) {
func (i *Interface) getRecord(dbName string, dbKey string, mustBeWriteable bool) (r record.Record, db *Controller, err error) { //nolint:unparam
if dbName == "" {
dbName, dbKey = record.ParseKey(dbKey)
}
@ -201,7 +201,7 @@ func (i *Interface) getRecord(dbName string, dbKey string, mustBeWriteable bool)
return r, db, nil
}
func (i *Interface) getMeta(dbName string, dbKey string, mustBeWriteable bool) (m *record.Meta, db *Controller, err error) {
func (i *Interface) getMeta(dbName string, dbKey string, mustBeWriteable bool) (m *record.Meta, db *Controller, err error) { //nolint:unparam
if dbName == "" {
dbName, dbKey = record.ParseKey(dbKey)
}
@ -258,7 +258,7 @@ func (i *Interface) InsertValue(key string, attribute string, value interface{})
err = acc.Set(attribute, value)
if err != nil {
return fmt.Errorf("failed to set value with %s: %s", acc.Type(), err)
return fmt.Errorf("failed to set value with %s: %w", acc.Type(), err)
}
i.options.Apply(r)
@ -271,7 +271,7 @@ func (i *Interface) Put(r record.Record) (err error) {
var db *Controller
if !i.options.HasAllPermissions() {
_, db, err = i.getMeta(r.DatabaseName(), r.DatabaseKey(), true)
if err != nil && err != ErrNotFound {
if err != nil && !errors.Is(err, ErrNotFound) {
return err
}
} else {
@ -309,7 +309,7 @@ func (i *Interface) PutNew(r record.Record) (err error) {
var db *Controller
if !i.options.HasAllPermissions() {
_, db, err = i.getMeta(r.DatabaseName(), r.DatabaseKey(), true)
if err != nil && err != ErrNotFound {
if err != nil && !errors.Is(err, ErrNotFound) {
return err
}
} else {
@ -344,11 +344,13 @@ func (i *Interface) PutNew(r record.Record) (err error) {
return db.Put(r)
}
// PutMany stores many records in the database. Warning: This is nearly a direct database access and omits many things:
// PutMany stores many records in the database.
// Warning: This is nearly a direct database access and omits many things:
// - Record locking
// - Hooks
// - Subscriptions
// - Caching
// Use with care.
func (i *Interface) PutMany(dbName string) (put func(record.Record) error) {
interfaceBatch := make(chan record.Record, 100)
@ -519,6 +521,8 @@ func (i *Interface) Delete(key string) error {
}
// Query executes the given query on the database.
// Will not see data that is in the write cache, waiting to be written.
// Use with care with caching.
func (i *Interface) Query(q *query.Query) (*iterator.Iterator, error) {
_, err := q.Check()
if err != nil {
@ -530,7 +534,7 @@ func (i *Interface) Query(q *query.Query) (*iterator.Iterator, error) {
return nil, err
}
// FIXME:
// TODO: Finish caching system integration.
// Flush the cache before we query the database.
// i.FlushCache()

View file

@ -57,7 +57,6 @@ func (i *Interface) DelayedCacheWriter(ctx context.Context) error {
// of a total crash.
i.flushWriteCache(0)
}
}
}

View file

@ -8,7 +8,7 @@ import (
"testing"
)
func benchmarkCacheWriting(b *testing.B, storageType string, cacheSize int, sampleSize int, delayWrites bool) { //nolint:gocognit,gocyclo
func benchmarkCacheWriting(b *testing.B, storageType string, cacheSize int, sampleSize int, delayWrites bool) { //nolint:gocognit,gocyclo,thelper
b.Run(fmt.Sprintf("CacheWriting_%s_%d_%d_%v", storageType, cacheSize, sampleSize, delayWrites), func(b *testing.B) {
// Setup Benchmark.
@ -66,11 +66,10 @@ func benchmarkCacheWriting(b *testing.B, storageType string, cacheSize int, samp
// End cache writer and wait
cancelCtx()
wg.Wait()
})
}
func benchmarkCacheReadWrite(b *testing.B, storageType string, cacheSize int, sampleSize int, delayWrites bool) { //nolint:gocognit,gocyclo
func benchmarkCacheReadWrite(b *testing.B, storageType string, cacheSize int, sampleSize int, delayWrites bool) { //nolint:gocognit,gocyclo,thelper
b.Run(fmt.Sprintf("CacheReadWrite_%s_%d_%d_%v", storageType, cacheSize, sampleSize, delayWrites), func(b *testing.B) {
// Setup Benchmark.
@ -135,7 +134,6 @@ func benchmarkCacheReadWrite(b *testing.B, storageType string, cacheSize int, sa
// End cache writer and wait
cancelCtx()
wg.Wait()
})
}

View file

@ -5,8 +5,9 @@ import (
"fmt"
"path/filepath"
"github.com/safing/portbase/utils"
"github.com/tevino/abool"
"github.com/safing/portbase/utils"
)
const (
@ -25,7 +26,7 @@ var (
// InitializeWithPath initializes the database at the specified location using a path.
func InitializeWithPath(dirPath string) error {
return Initialize(utils.NewDirStructure(dirPath, 0755))
return Initialize(utils.NewDirStructure(dirPath, 0o0755))
}
// Initialize initializes the database at the specified location using a dir structure.
@ -34,16 +35,16 @@ func Initialize(dirStructureRoot *utils.DirStructure) error {
rootStructure = dirStructureRoot
// ensure root and databases dirs
databasesStructure = rootStructure.ChildDir(databasesSubDir, 0700)
databasesStructure = rootStructure.ChildDir(databasesSubDir, 0o0700)
err := databasesStructure.Ensure()
if err != nil {
return fmt.Errorf("could not create/open database directory (%s): %s", rootStructure.Path, err)
return fmt.Errorf("could not create/open database directory (%s): %w", rootStructure.Path, err)
}
if registryPersistence.IsSet() {
err = loadRegistry()
if err != nil {
return fmt.Errorf("could not load database registry (%s): %s", filepath.Join(rootStructure.Path, registryFileName), err)
return fmt.Errorf("could not load database registry (%s): %w", filepath.Join(rootStructure.Path, registryFileName), err)
}
}
@ -74,11 +75,11 @@ func Shutdown() (err error) {
// getLocation returns the storage location for the given name and type.
func getLocation(name, storageType string) (string, error) {
location := databasesStructure.ChildDir(name, 0700).ChildDir(storageType, 0700)
location := databasesStructure.ChildDir(name, 0o0700).ChildDir(storageType, 0o0700)
// check location
err := location.Ensure()
if err != nil {
return "", fmt.Errorf(`failed to create/check database dir "%s": %s`, location.Path, err)
return "", fmt.Errorf(`failed to create/check database dir "%s": %w`, location.Path, err)
}
return location.Path, nil
}

View file

@ -2,12 +2,14 @@ package migration
import "errors"
// DiagnosticStep describes one migration step in the Diagnostics.
type DiagnosticStep struct {
Version string
Description string
}
type Diagnostics struct {
// Diagnostics holds a detailed error report about a failed migration.
type Diagnostics struct { //nolint:errname
// Message holds a human readable message of the encountered
// error.
Message string
@ -45,9 +47,9 @@ func (err *Diagnostics) Error() string {
return msg
}
// Unwrap returns the actual error that happend when executing
// Unwrap returns the actual error that happened when executing
// a migration. It implements the interface required by the stdlib
// errors package to support errors.Is and errors.As
// errors package to support errors.Is() and errors.As().
func (err *Diagnostics) Unwrap() error {
if u := errors.Unwrap(err.Wrapped); u != nil {
return u

View file

@ -9,6 +9,7 @@ import (
"time"
"github.com/hashicorp/go-version"
"github.com/safing/portbase/database"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/formats/dsd"
@ -37,6 +38,7 @@ type Migration struct {
MigrateFunc MigrateFunc
}
// Registry holds a migration stack.
type Registry struct {
key string
@ -200,7 +202,7 @@ func (reg *Registry) getExecutionPlan(startOfMigration *version.Version) ([]Migr
}
// prepare our diagnostics and the execution plan
var execPlan []Migration
execPlan := make([]Migration, 0, len(versions))
for _, ver := range versions {
// skip an migration that has already been applied.
if startOfMigration != nil && startOfMigration.GreaterThanOrEqual(ver) {

View file

@ -15,7 +15,6 @@ type boolCondition struct {
}
func newBoolCondition(key string, operator uint8, value interface{}) *boolCondition {
var parsedValue bool
switch v := value.(type) {

View file

@ -15,7 +15,6 @@ type floatCondition struct {
}
func newFloatCondition(key string, operator uint8, value interface{}) *floatCondition {
var parsedValue float64
switch v := value.(type) {

View file

@ -15,7 +15,6 @@ type intCondition struct {
}
func newIntCondition(key string, operator uint8, value interface{}) *intCondition {
var parsedValue int64
switch v := value.(type) {

View file

@ -15,7 +15,6 @@ type stringSliceCondition struct {
}
func newStringSliceCondition(key string, operator uint8, value interface{}) *stringSliceCondition {
switch v := value.(type) {
case string:
parsedValue := strings.Split(v, ",")
@ -42,7 +41,6 @@ func newStringSliceCondition(key string, operator uint8, value interface{}) *str
operator: errorPresent,
}
}
}
func (c *stringSliceCondition) complies(acc accessor.Accessor) bool {

View file

@ -13,7 +13,7 @@ type Condition interface {
string() string
}
// Operators
// Operators.
const (
Equals uint8 = iota // int
GreaterThan // int

View file

@ -3,6 +3,8 @@ package query
import "testing"
func testSuccess(t *testing.T, c Condition) {
t.Helper()
err := c.check()
if err != nil {
t.Errorf("failed: %s", err)
@ -10,6 +12,8 @@ func testSuccess(t *testing.T, c Condition) {
}
func TestInterfaces(t *testing.T) {
t.Parallel()
testSuccess(t, newIntCondition("banana", Equals, uint(1)))
testSuccess(t, newIntCondition("banana", Equals, uint8(1)))
testSuccess(t, newIntCondition("banana", Equals, uint16(1)))
@ -41,6 +45,8 @@ func TestInterfaces(t *testing.T) {
}
func testCondError(t *testing.T, c Condition) {
t.Helper()
err := c.check()
if err == nil {
t.Error("should fail")
@ -48,6 +54,8 @@ func testCondError(t *testing.T, c Condition) {
}
func TestConditionErrors(t *testing.T) {
t.Parallel()
// test invalid value types
testCondError(t, newBoolCondition("banana", Is, 1))
testCondError(t, newFloatCondition("banana", FloatEquals, true))
@ -68,6 +76,8 @@ func TestConditionErrors(t *testing.T) {
}
func TestWhere(t *testing.T) {
t.Parallel()
c := Where("", 254, nil)
err := c.check()
if err == nil {

View file

@ -3,6 +3,8 @@ package query
import "testing"
func TestGetOpName(t *testing.T) {
t.Parallel()
if getOpName(254) != "[unknown]" {
t.Error("unexpected output")
}

View file

@ -121,7 +121,6 @@ func ParseQuery(query string) (*Query, error) {
}
func extractSnippets(text string) (snippets []*snippet, err error) {
skip := false
start := -1
inParenthesis := false
@ -193,16 +192,17 @@ func extractSnippets(text string) (snippets []*snippet, err error) {
}
return snippets, nil
}
//nolint:gocognit
func parseAndOr(getSnippet func() (*snippet, error), remainingSnippets func() int, rootCondition bool) (Condition, error) {
var isOr = false
var typeSet = false
var wrapInNot = false
var expectingMore = true
var conditions []Condition
var (
isOr = false
typeSet = false
wrapInNot = false
expectingMore = true
conditions []Condition
)
for {
if !expectingMore && rootCondition && remainingSnippets() == 0 {
@ -331,21 +331,19 @@ func parseCondition(firstSnippet *snippet, getSnippet func() (*snippet, error))
return Where(firstSnippet.text, operator, value.text), nil
}
var (
escapeReplacer = regexp.MustCompile(`\\([^\\])`)
)
var escapeReplacer = regexp.MustCompile(`\\([^\\])`)
// prepToken removes surrounding parenthesis and escape characters.
func prepToken(text string) string {
return escapeReplacer.ReplaceAllString(strings.Trim(text, "\""), "$1")
}
// escapeString correctly escapes a snippet for printing
// escapeString correctly escapes a snippet for printing.
func escapeString(token string) string {
// check if token contains characters that need to be escaped
if strings.ContainsAny(token, "()\"\\\t\r\n ") {
// put the token in parenthesis and only escape \ and "
return fmt.Sprintf("\"%s\"", strings.Replace(token, "\"", "\\\"", -1))
return fmt.Sprintf("\"%s\"", strings.ReplaceAll(token, "\"", "\\\""))
}
return token
}

View file

@ -8,6 +8,8 @@ import (
)
func TestExtractSnippets(t *testing.T) {
t.Parallel()
text1 := `query test: where ( "bananas" > 100 and monkeys.# <= "12")or(coconuts < 10 "and" area > 50) or name sameas Julian or name matches ^King\ `
result1 := []*snippet{
{text: "query", globalPosition: 1},
@ -58,6 +60,8 @@ func TestExtractSnippets(t *testing.T) {
}
func testParsing(t *testing.T, queryText string, expectedResult *Query) {
t.Helper()
_, err := expectedResult.Check()
if err != nil {
t.Errorf("failed to create query: %s", err)
@ -84,6 +88,8 @@ func testParsing(t *testing.T, queryText string, expectedResult *Query) {
}
func TestParseQuery(t *testing.T) {
t.Parallel()
text1 := `query test: where (bananas > 100 and monkeys.# <= 12) or not (coconuts < 10 and area not > 50) or name sameas Julian or name matches "^King " orderby name limit 10 offset 20`
result1 := New("test:").Where(Or(
And(
@ -131,6 +137,8 @@ func TestParseQuery(t *testing.T) {
}
func testParseError(t *testing.T, queryText string, expectedErrorString string) {
t.Helper()
_, err := ParseQuery(queryText)
if err == nil {
t.Errorf("should fail to parse: %s", queryText)
@ -142,6 +150,8 @@ func testParseError(t *testing.T, queryText string, expectedErrorString string)
}
func TestParseErrors(t *testing.T) {
t.Parallel()
// syntax
testParseError(t, `query`, `unexpected end at position 5`)
testParseError(t, `query test: where`, `unexpected end at position 17`)

View file

@ -8,9 +8,8 @@ import (
"github.com/safing/portbase/formats/dsd"
)
var (
// copied from https://github.com/tidwall/gjson/blob/master/gjson_test.go
testJSON = `{"age":100, "name":{"here":"B\\\"R"},
// copied from https://github.com/tidwall/gjson/blob/master/gjson_test.go
var testJSON = `{"age":100, "name":{"here":"B\\\"R"},
"noop":{"what is a wren?":"a bird"},
"happy":true,"immortal":false,
"items":[1,2,3,{"tags":[1,2,3],"points":[[1,2],[3,4]]},4,5,6,7],
@ -46,11 +45,11 @@ var (
"lastly":{"yay":"final"},
"temperature": 120.413
}`
)
func testQuery(t *testing.T, r record.Record, shouldMatch bool, condition Condition) {
q := New("test:").Where(condition).MustBeValid()
t.Helper()
q := New("test:").Where(condition).MustBeValid()
// fmt.Printf("%s\n", q.Print())
matched := q.Matches(r)
@ -63,6 +62,7 @@ func testQuery(t *testing.T, r record.Record, shouldMatch bool, condition Condit
}
func TestQuery(t *testing.T) {
t.Parallel()
// if !gjson.Valid(testJSON) {
// t.Fatal("test json is invalid")
@ -110,5 +110,4 @@ func TestQuery(t *testing.T) {
testQuery(t, r, true, Where("happy", Exists, nil))
testQuery(t, r, true, Where("created", Matches, "^2014-[0-9]{2}-[0-9]{2}T"))
}

View file

@ -3,11 +3,11 @@ package record
import "testing"
func TestBaseRecord(t *testing.T) {
t.Parallel()
// check model interface compliance
var m Record
b := &TestRecord{}
m = b
_ = m
}

View file

@ -24,22 +24,16 @@ import (
"github.com/safing/portbase/container"
"github.com/safing/portbase/formats/dsd"
"github.com/safing/portbase/formats/varint"
// Colfer
// "github.com/safing/portbase/database/model/model"
// XDR
// xdr2 "github.com/davecgh/go-xdr/xdr2"
)
var (
testMeta = &Meta{
Created: time.Now().Unix(),
Modified: time.Now().Unix(),
Expires: time.Now().Unix(),
Deleted: time.Now().Unix(),
secret: true,
cronjewel: true,
}
)
var testMeta = &Meta{
Created: time.Now().Unix(),
Modified: time.Now().Unix(),
Expires: time.Now().Unix(),
Deleted: time.Now().Unix(),
secret: true,
cronjewel: true,
}
func BenchmarkAllocateBytes(b *testing.B) {
for i := 0; i < b.N; i++ {
@ -49,8 +43,8 @@ func BenchmarkAllocateBytes(b *testing.B) {
func BenchmarkAllocateStruct1(b *testing.B) {
for i := 0; i < b.N; i++ {
var new Meta
_ = new
var newMeta Meta
_ = newMeta
}
}
@ -61,7 +55,6 @@ func BenchmarkAllocateStruct2(b *testing.B) {
}
func BenchmarkMetaSerializeContainer(b *testing.B) {
// Start benchmark
for i := 0; i < b.N; i++ {
c := container.New()
@ -80,11 +73,9 @@ func BenchmarkMetaSerializeContainer(b *testing.B) {
c.AppendNumber(0)
}
}
}
func BenchmarkMetaUnserializeContainer(b *testing.B) {
// Setup
c := container.New()
c.AppendNumber(uint64(testMeta.Created))
@ -157,11 +148,9 @@ func BenchmarkMetaUnserializeContainer(b *testing.B) {
return
}
}
}
func BenchmarkMetaSerializeVarInt(b *testing.B) {
// Start benchmark
for i := 0; i < b.N; i++ {
encoded := make([]byte, 33)
@ -197,13 +186,10 @@ func BenchmarkMetaSerializeVarInt(b *testing.B) {
default:
encoded[offset] = 0
}
offset++
}
}
func BenchmarkMetaUnserializeVarInt(b *testing.B) {
// Setup
encoded := make([]byte, 33)
offset := 0
@ -295,106 +281,9 @@ func BenchmarkMetaUnserializeVarInt(b *testing.B) {
return
}
}
}
// func BenchmarkMetaSerializeWithXDR2(b *testing.B) {
//
// // Setup
// var w bytes.Buffer
//
// // Reset timer for precise results
// b.ResetTimer()
//
// // Start benchmark
// for i := 0; i < b.N; i++ {
// w.Reset()
// _, err := xdr2.Marshal(&w, testMeta)
// if err != nil {
// b.Errorf("failed to serialize with xdr2: %s", err)
// return
// }
// }
//
// }
// func BenchmarkMetaUnserializeWithXDR2(b *testing.B) {
//
// // Setup
// var w bytes.Buffer
// _, err := xdr2.Marshal(&w, testMeta)
// if err != nil {
// b.Errorf("failed to serialize with xdr2: %s", err)
// }
// encodedData := w.Bytes()
//
// // Reset timer for precise results
// b.ResetTimer()
//
// // Start benchmark
// for i := 0; i < b.N; i++ {
// var newMeta Meta
// _, err := xdr2.Unmarshal(bytes.NewReader(encodedData), &newMeta)
// if err != nil {
// b.Errorf("failed to unserialize with xdr2: %s", err)
// return
// }
// }
//
// }
// func BenchmarkMetaSerializeWithColfer(b *testing.B) {
//
// testColf := &model.Course{
// Created: time.Now().Unix(),
// Modified: time.Now().Unix(),
// Expires: time.Now().Unix(),
// Deleted: time.Now().Unix(),
// Secret: true,
// Cronjewel: true,
// }
//
// // Setup
// for i := 0; i < b.N; i++ {
// _, err := testColf.MarshalBinary()
// if err != nil {
// b.Errorf("failed to serialize with colfer: %s", err)
// return
// }
// }
//
// }
// func BenchmarkMetaUnserializeWithColfer(b *testing.B) {
//
// testColf := &model.Course{
// Created: time.Now().Unix(),
// Modified: time.Now().Unix(),
// Expires: time.Now().Unix(),
// Deleted: time.Now().Unix(),
// Secret: true,
// Cronjewel: true,
// }
// encodedData, err := testColf.MarshalBinary()
// if err != nil {
// b.Errorf("failed to serialize with colfer: %s", err)
// return
// }
//
// // Setup
// for i := 0; i < b.N; i++ {
// var testUnColf model.Course
// err := testUnColf.UnmarshalBinary(encodedData)
// if err != nil {
// b.Errorf("failed to unserialize with colfer: %s", err)
// return
// }
// }
//
// }
func BenchmarkMetaSerializeWithCodegen(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := testMeta.GenCodeMarshal(nil)
if err != nil {
@ -402,11 +291,9 @@ func BenchmarkMetaSerializeWithCodegen(b *testing.B) {
return
}
}
}
func BenchmarkMetaUnserializeWithCodegen(b *testing.B) {
// Setup
encodedData, err := testMeta.GenCodeMarshal(nil)
if err != nil {
@ -426,11 +313,9 @@ func BenchmarkMetaUnserializeWithCodegen(b *testing.B) {
return
}
}
}
func BenchmarkMetaSerializeWithDSDJSON(b *testing.B) {
for i := 0; i < b.N; i++ {
_, err := dsd.Dump(testMeta, dsd.JSON)
if err != nil {
@ -438,11 +323,9 @@ func BenchmarkMetaSerializeWithDSDJSON(b *testing.B) {
return
}
}
}
func BenchmarkMetaUnserializeWithDSDJSON(b *testing.B) {
// Setup
encodedData, err := dsd.Dump(testMeta, dsd.JSON)
if err != nil {
@ -462,5 +345,4 @@ func BenchmarkMetaUnserializeWithDSDJSON(b *testing.B) {
return
}
}
}

View file

@ -13,7 +13,7 @@ var (
_ = time.Now()
)
// GenCodeSize returns the size of the gencode marshalled byte slice
// GenCodeSize returns the size of the gencode marshalled byte slice.
func (m *Meta) GenCodeSize() (s int) {
s += 34
return
@ -133,24 +133,16 @@ func (m *Meta) GenCodeUnmarshal(buf []byte) (uint64, error) {
i := uint64(0)
{
m.Created = 0 | (int64(buf[0+0]) << 0) | (int64(buf[1+0]) << 8) | (int64(buf[2+0]) << 16) | (int64(buf[3+0]) << 24) | (int64(buf[4+0]) << 32) | (int64(buf[5+0]) << 40) | (int64(buf[6+0]) << 48) | (int64(buf[7+0]) << 56)
}
{
m.Modified = 0 | (int64(buf[0+8]) << 0) | (int64(buf[1+8]) << 8) | (int64(buf[2+8]) << 16) | (int64(buf[3+8]) << 24) | (int64(buf[4+8]) << 32) | (int64(buf[5+8]) << 40) | (int64(buf[6+8]) << 48) | (int64(buf[7+8]) << 56)
}
{
m.Expires = 0 | (int64(buf[0+16]) << 0) | (int64(buf[1+16]) << 8) | (int64(buf[2+16]) << 16) | (int64(buf[3+16]) << 24) | (int64(buf[4+16]) << 32) | (int64(buf[5+16]) << 40) | (int64(buf[6+16]) << 48) | (int64(buf[7+16]) << 56)
}
{
m.Deleted = 0 | (int64(buf[0+24]) << 0) | (int64(buf[1+24]) << 8) | (int64(buf[2+24]) << 16) | (int64(buf[3+24]) << 24) | (int64(buf[4+24]) << 32) | (int64(buf[5+24]) << 40) | (int64(buf[6+24]) << 48) | (int64(buf[7+24]) << 56)
}
{
m.secret = buf[32] == 1

View file

@ -6,30 +6,30 @@ import (
"time"
)
var (
genCodeTestMeta = &Meta{
Created: time.Now().Unix(),
Modified: time.Now().Unix(),
Expires: time.Now().Unix(),
Deleted: time.Now().Unix(),
secret: true,
cronjewel: true,
}
)
var genCodeTestMeta = &Meta{
Created: time.Now().Unix(),
Modified: time.Now().Unix(),
Expires: time.Now().Unix(),
Deleted: time.Now().Unix(),
secret: true,
cronjewel: true,
}
func TestGenCode(t *testing.T) {
t.Parallel()
encoded, err := genCodeTestMeta.GenCodeMarshal(nil)
if err != nil {
t.Fatal(err)
}
new := &Meta{}
_, err = new.GenCodeUnmarshal(encoded)
newMeta := &Meta{}
_, err = newMeta.GenCodeUnmarshal(encoded)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(genCodeTestMeta, new) {
t.Errorf("objects are not equal, got: %v", new)
if !reflect.DeepEqual(genCodeTestMeta, newMeta) {
t.Errorf("objects are not equal, got: %v", newMeta)
}
}

View file

@ -2,7 +2,7 @@ package record
import "time"
// Meta holds
// Meta holds metadata about the record.
type Meta struct {
Created int64
Modified int64

View file

@ -32,21 +32,21 @@ func NewRawWrapper(database, key string, data []byte) (*Wrapper, error) {
metaSection, n, err := varint.GetNextBlock(data[offset:])
if err != nil {
return nil, fmt.Errorf("could not get meta section: %s", err)
return nil, fmt.Errorf("could not get meta section: %w", err)
}
offset += n
newMeta := &Meta{}
_, err = dsd.Load(metaSection, newMeta)
if err != nil {
return nil, fmt.Errorf("could not unmarshal meta section: %s", err)
return nil, fmt.Errorf("could not unmarshal meta section: %w", err)
}
var format uint8 = dsd.RAW
if !newMeta.IsDeleted() {
format, n, err = varint.Unpack8(data[offset:])
if err != nil {
return nil, fmt.Errorf("could not get dsd format: %s", err)
return nil, fmt.Errorf("could not get dsd format: %w", err)
}
offset += n
}
@ -79,7 +79,7 @@ func NewWrapper(key string, meta *Meta, format uint8, data []byte) (*Wrapper, er
}, nil
}
// Marshal marshals the object, without the database key or metadata
// Marshal marshals the object, without the database key or metadata.
func (w *Wrapper) Marshal(r Record, format uint8) ([]byte, error) {
if w.Meta() == nil {
return nil, errors.New("missing meta")
@ -134,19 +134,19 @@ func (w *Wrapper) IsWrapped() bool {
}
// Unwrap unwraps data into a record.
func Unwrap(wrapped, new Record) error {
func Unwrap(wrapped, r Record) error {
wrapper, ok := wrapped.(*Wrapper)
if !ok {
return fmt.Errorf("cannot unwrap %T", wrapped)
}
err := dsd.LoadAsFormat(wrapper.Data, wrapper.Format, new)
err := dsd.LoadAsFormat(wrapper.Data, wrapper.Format, r)
if err != nil {
return fmt.Errorf("failed to unwrap %T: %s", new, err)
return fmt.Errorf("failed to unwrap %T: %w", r, err)
}
new.SetKey(wrapped.Key())
new.SetMeta(wrapped.Meta())
r.SetKey(wrapped.Key())
r.SetMeta(wrapped.Meta())
return nil
}

View file

@ -8,6 +8,7 @@ import (
)
func TestWrapper(t *testing.T) {
t.Parallel()
// check model interface compliance
var m Record

View file

@ -32,7 +32,7 @@ var (
// If the database is already registered, only
// the description and the primary API will be
// updated and the effective object will be returned.
func Register(new *Database) (*Database, error) {
func Register(db *Database) (*Database, error) {
if !initialized.IsSet() {
return nil, errors.New("database not initialized")
}
@ -40,31 +40,31 @@ func Register(new *Database) (*Database, error) {
registryLock.Lock()
defer registryLock.Unlock()
registeredDB, ok := registry[new.Name]
registeredDB, ok := registry[db.Name]
save := false
if ok {
// update database
if registeredDB.Description != new.Description {
registeredDB.Description = new.Description
if registeredDB.Description != db.Description {
registeredDB.Description = db.Description
save = true
}
if registeredDB.ShadowDelete != new.ShadowDelete {
registeredDB.ShadowDelete = new.ShadowDelete
if registeredDB.ShadowDelete != db.ShadowDelete {
registeredDB.ShadowDelete = db.ShadowDelete
save = true
}
} else {
// register new database
if !nameConstraint.MatchString(new.Name) {
if !nameConstraint.MatchString(db.Name) {
return nil, errors.New("database name must only contain alphanumeric and `_-` characters and must be at least 3 characters long")
}
now := time.Now().Round(time.Second)
new.Registered = now
new.LastUpdated = now
new.LastLoaded = time.Time{}
db.Registered = now
db.LastUpdated = now
db.LastLoaded = time.Time{}
registry[new.Name] = new
registry[db.Name] = db
save = true
}
@ -124,14 +124,14 @@ func loadRegistry() error {
}
// parse
new := make(map[string]*Database)
err = json.Unmarshal(data, &new)
databases := make(map[string]*Database)
err = json.Unmarshal(data, &databases)
if err != nil {
return err
}
// set
registry = new
registry = databases
return nil
}
@ -150,7 +150,7 @@ func saveRegistry(lock bool) error {
// write file
// TODO: write atomically (best effort)
filePath := path.Join(rootStructure.Path, registryFileName)
return ioutil.WriteFile(filePath, data, 0600)
return ioutil.WriteFile(filePath, data, 0o0600)
}
func registryWriter() {

View file

@ -30,7 +30,7 @@ func NewBadger(name, location string) (storage.Interface, error) {
opts := badger.DefaultOptions(location)
db, err := badger.Open(opts)
if err == badger.ErrTruncateNeeded {
if errors.Is(err, badger.ErrTruncateNeeded) {
// clean up after crash
log.Warningf("database/storage: truncating corrupted value log of badger database %s: this may cause data loss", name)
opts.Truncate = true
@ -54,7 +54,7 @@ func (b *Badger) Get(key string) (record.Record, error) {
var err error
item, err = txn.Get([]byte(key))
if err != nil {
if err == badger.ErrKeyNotFound {
if errors.Is(err, badger.ErrKeyNotFound) {
return storage.ErrNotFound
}
return err
@ -114,7 +114,7 @@ func (b *Badger) Put(r record.Record) (record.Record, error) {
func (b *Badger) Delete(key string) error {
return b.db.Update(func(txn *badger.Txn) error {
err := txn.Delete([]byte(key))
if err != nil && err != badger.ErrKeyNotFound {
if err != nil && !errors.Is(err, badger.ErrKeyNotFound) {
return err
}
return nil
@ -125,7 +125,7 @@ func (b *Badger) Delete(key string) error {
func (b *Badger) Query(q *query.Query, local, internal bool) (*iterator.Iterator, error) {
_, err := q.Check()
if err != nil {
return nil, fmt.Errorf("invalid query: %s", err)
return nil, fmt.Errorf("invalid query: %w", err)
}
queryIter := iterator.New()
@ -169,17 +169,17 @@ func (b *Badger) queryExecutor(queryIter *iterator.Iterator, q *query.Query, loc
if err != nil {
return err
}
new, err := record.NewRawWrapper(b.name, r.DatabaseKey(), copiedData)
newWrapper, err := record.NewRawWrapper(b.name, r.DatabaseKey(), copiedData)
if err != nil {
return err
}
select {
case <-queryIter.Done:
return nil
case queryIter.Next <- new:
case queryIter.Next <- newWrapper:
default:
select {
case queryIter.Next <- new:
case queryIter.Next <- newWrapper:
case <-queryIter.Done:
return nil
case <-time.After(1 * time.Minute):

View file

@ -1,4 +1,3 @@
//nolint:unparam,maligned
package badger
import (
@ -20,7 +19,7 @@ var (
_ storage.Maintainer = &Badger{}
)
type TestRecord struct {
type TestRecord struct { //nolint:maligned
record.Base
sync.Mutex
S string
@ -40,6 +39,8 @@ type TestRecord struct {
}
func TestBadger(t *testing.T) {
t.Parallel()
testDir, err := ioutil.TempDir("", "testing-")
if err != nil {
t.Fatal(err)

View file

@ -16,9 +16,7 @@ import (
"github.com/safing/portbase/database/storage"
)
var (
bucketName = []byte{0}
)
var bucketName = []byte{0}
// BBolt database made pluggable for portbase.
type BBolt struct {
@ -39,10 +37,10 @@ func NewBBolt(name, location string) (storage.Interface, error) {
}
// Open/Create database, retry if there is a timeout.
db, err := bbolt.Open(dbFile, 0600, dbOptions)
db, err := bbolt.Open(dbFile, 0o0600, dbOptions)
for i := 0; i < 5 && err != nil; i++ {
// Try again if there is an error.
db, err = bbolt.Open(dbFile, 0600, dbOptions)
db, err = bbolt.Open(dbFile, 0o0600, dbOptions)
}
if err != nil {
return nil, err
@ -89,7 +87,6 @@ func (b *BBolt) Get(key string) (record.Record, error) {
}
return nil
})
if err != nil {
return nil, err
}
@ -188,7 +185,7 @@ func (b *BBolt) Delete(key string) error {
func (b *BBolt) Query(q *query.Query, local, internal bool) (*iterator.Iterator, error) {
_, err := q.Check()
if err != nil {
return nil, fmt.Errorf("invalid query: %s", err)
return nil, fmt.Errorf("invalid query: %w", err)
}
queryIter := iterator.New()
@ -235,19 +232,19 @@ func (b *BBolt) queryExecutor(queryIter *iterator.Iterator, q *query.Query, loca
duplicate := make([]byte, len(value))
copy(duplicate, value)
new, err := record.NewRawWrapper(b.name, iterWrapper.DatabaseKey(), duplicate)
newWrapper, err := record.NewRawWrapper(b.name, iterWrapper.DatabaseKey(), duplicate)
if err != nil {
return err
}
select {
case <-queryIter.Done:
return nil
case queryIter.Next <- new:
case queryIter.Next <- newWrapper:
default:
select {
case <-queryIter.Done:
return nil
case queryIter.Next <- new:
case queryIter.Next <- newWrapper:
case <-time.After(1 * time.Second):
return errors.New("query timeout")
}

View file

@ -1,4 +1,3 @@
//nolint:unparam,maligned
package bbolt
import (
@ -22,7 +21,7 @@ var (
_ storage.Purger = &BBolt{}
)
type TestRecord struct {
type TestRecord struct { //nolint:maligned
record.Base
sync.Mutex
S string
@ -42,6 +41,8 @@ type TestRecord struct {
}
func TestBBolt(t *testing.T) {
t.Parallel()
testDir, err := ioutil.TempDir("", "testing-")
if err != nil {
t.Fatal(err)

View file

@ -2,7 +2,7 @@ package storage
import "errors"
// Errors for storages
// Errors for storages.
var (
ErrNotFound = errors.New("storage entry not found")
)

View file

@ -23,8 +23,8 @@ import (
)
const (
defaultFileMode = os.FileMode(int(0644))
defaultDirMode = os.FileMode(int(0755))
defaultFileMode = os.FileMode(0o0644)
defaultDirMode = os.FileMode(0o0755)
onWindows = runtime.GOOS == "windows"
)
@ -42,7 +42,7 @@ func init() {
func NewFSTree(name, location string) (storage.Interface, error) {
basePath, err := filepath.Abs(location)
if err != nil {
return nil, fmt.Errorf("fstree: failed to validate path %s: %s", location, err)
return nil, fmt.Errorf("fstree: failed to validate path %s: %w", location, err)
}
file, err := os.Stat(basePath)
@ -50,10 +50,10 @@ func NewFSTree(name, location string) (storage.Interface, error) {
if os.IsNotExist(err) {
err = os.MkdirAll(basePath, defaultDirMode)
if err != nil {
return nil, fmt.Errorf("fstree: failed to create directory %s: %s", basePath, err)
return nil, fmt.Errorf("fstree: failed to create directory %s: %w", basePath, err)
}
} else {
return nil, fmt.Errorf("fstree: failed to stat path %s: %s", basePath, err)
return nil, fmt.Errorf("fstree: failed to stat path %s: %w", basePath, err)
}
} else {
if !file.IsDir() {
@ -93,7 +93,7 @@ func (fst *FSTree) Get(key string) (record.Record, error) {
if os.IsNotExist(err) {
return nil, storage.ErrNotFound
}
return nil, fmt.Errorf("fstree: failed to read file %s: %s", dstPath, err)
return nil, fmt.Errorf("fstree: failed to read file %s: %w", dstPath, err)
}
r, err := record.NewRawWrapper(fst.name, key, data)
@ -132,11 +132,11 @@ func (fst *FSTree) Put(r record.Record) (record.Record, error) {
// create dir and try again
err = os.MkdirAll(filepath.Dir(dstPath), defaultDirMode)
if err != nil {
return nil, fmt.Errorf("fstree: failed to create directory %s: %s", filepath.Dir(dstPath), err)
return nil, fmt.Errorf("fstree: failed to create directory %s: %w", filepath.Dir(dstPath), err)
}
err = writeFile(dstPath, data, defaultFileMode)
if err != nil {
return nil, fmt.Errorf("fstree: could not write file %s: %s", dstPath, err)
return nil, fmt.Errorf("fstree: could not write file %s: %w", dstPath, err)
}
}
@ -153,7 +153,7 @@ func (fst *FSTree) Delete(key string) error {
// remove entry
err = os.Remove(dstPath)
if err != nil {
return fmt.Errorf("fstree: could not delete %s: %s", dstPath, err)
return fmt.Errorf("fstree: could not delete %s: %w", dstPath, err)
}
return nil
@ -163,7 +163,7 @@ func (fst *FSTree) Delete(key string) error {
func (fst *FSTree) Query(q *query.Query, local, internal bool) (*iterator.Iterator, error) {
_, err := q.Check()
if err != nil {
return nil, fmt.Errorf("invalid query: %s", err)
return nil, fmt.Errorf("invalid query: %w", err)
}
walkPrefix, err := fst.buildFilePath(q.DatabaseKeyPrefix(), false)
@ -180,7 +180,7 @@ func (fst *FSTree) Query(q *query.Query, local, internal bool) (*iterator.Iterat
case os.IsNotExist(err):
walkRoot = filepath.Dir(walkPrefix)
default: // err != nil
return nil, fmt.Errorf("fstree: could not stat query root %s: %s", walkPrefix, err)
return nil, fmt.Errorf("fstree: could not stat query root %s: %w", walkPrefix, err)
}
queryIter := iterator.New()
@ -191,10 +191,8 @@ func (fst *FSTree) Query(q *query.Query, local, internal bool) (*iterator.Iterat
func (fst *FSTree) queryExecutor(walkRoot string, queryIter *iterator.Iterator, q *query.Query, local, internal bool) {
err := filepath.Walk(walkRoot, func(path string, info os.FileInfo, err error) error {
// check for error
if err != nil {
return fmt.Errorf("fstree: error in walking fs: %s", err)
return fmt.Errorf("fstree: error in walking fs: %w", err)
}
if info.IsDir() {
@ -217,17 +215,17 @@ func (fst *FSTree) queryExecutor(walkRoot string, queryIter *iterator.Iterator,
if os.IsNotExist(err) {
return nil
}
return fmt.Errorf("fstree: failed to read file %s: %s", path, err)
return fmt.Errorf("fstree: failed to read file %s: %w", path, err)
}
// parse
key, err := filepath.Rel(fst.basePath, path)
if err != nil {
return fmt.Errorf("fstree: failed to extract key from filepath %s: %s", path, err)
return fmt.Errorf("fstree: failed to extract key from filepath %s: %w", path, err)
}
r, err := record.NewRawWrapper(fst.name, key, data)
if err != nil {
return fmt.Errorf("fstree: failed to load file %s: %s", path, err)
return fmt.Errorf("fstree: failed to load file %s: %w", path, err)
}
if !r.Meta().CheckValidity() {

View file

@ -2,7 +2,5 @@ package fstree
import "github.com/safing/portbase/database/storage"
var (
// Compile time interface checks.
_ storage.Interface = &FSTree{}
)
// Compile time interface checks.
var _ storage.Interface = &FSTree{}

View file

@ -113,7 +113,7 @@ func (hm *HashMap) Delete(key string) error {
func (hm *HashMap) Query(q *query.Query, local, internal bool) (*iterator.Iterator, error) {
_, err := q.Check()
if err != nil {
return nil, fmt.Errorf("invalid query: %s", err)
return nil, fmt.Errorf("invalid query: %w", err)
}
queryIter := iterator.New()

View file

@ -1,4 +1,3 @@
//nolint:unparam,maligned
package hashmap
import (
@ -6,10 +5,9 @@ import (
"sync"
"testing"
"github.com/safing/portbase/database/storage"
"github.com/safing/portbase/database/query"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/database/storage"
)
var (
@ -18,7 +16,7 @@ var (
_ storage.Batcher = &HashMap{}
)
type TestRecord struct {
type TestRecord struct { //nolint:maligned
record.Base
sync.Mutex
S string
@ -38,6 +36,8 @@ type TestRecord struct {
}
func TestHashMap(t *testing.T) {
t.Parallel()
// start
db, err := NewHashMap("test", "")
if err != nil {

View file

@ -10,15 +10,13 @@ import (
"github.com/safing/portbase/database/record"
)
var (
// ErrNotImplemented is returned when a function is not implemented by a storage.
ErrNotImplemented = errors.New("not implemented")
)
// ErrNotImplemented is returned when a function is not implemented by a storage.
var ErrNotImplemented = errors.New("not implemented")
// InjectBase is a dummy base structure to reduce boilerplate code for injected storage interfaces.
type InjectBase struct{}
// Compile time interface check
// Compile time interface check.
var _ Interface = &InjectBase{}
// Get returns a database record.

View file

@ -26,7 +26,7 @@ type Interface interface {
MaintainRecordStates(ctx context.Context, purgeDeletedBefore time.Time, shadowDelete bool) error
}
// Maintainer defines the database storage API for backends that support optimized fetching of only the metadata.
// MetaHandler defines the database storage API for backends that support optimized fetching of only the metadata.
type MetaHandler interface {
GetMeta(key string) (*record.Meta, error)
}

View file

@ -17,7 +17,7 @@ type Sinkhole struct {
}
var (
// Compile time interface check
// Compile time interface checks.
_ storage.Interface = &Sinkhole{}
_ storage.Maintainer = &Sinkhole{}
_ storage.Batcher = &Sinkhole{}

View file

@ -7,11 +7,9 @@ import (
"github.com/safing/portbase/utils"
)
var (
root *utils.DirStructure
)
var root *utils.DirStructure
// Initialize initializes the data root directory
// Initialize initializes the data root directory.
func Initialize(rootDir string, perm os.FileMode) error {
if root != nil {
return errors.New("already initialized")

View file

@ -2,6 +2,7 @@ package dsd
import "errors"
// Errors.
var (
ErrIncompatibleFormat = errors.New("dsd: format is incompatible with operation")
ErrIsRaw = errors.New("dsd: given data is in raw format")
@ -26,6 +27,7 @@ const (
LIST = 76 // L
)
// Default Formats.
var (
DefaultSerializationFormat uint8 = JSON
DefaultCompressionFormat uint8 = GZIP

View file

@ -10,6 +10,7 @@ import (
"net/http"
)
// HTTP Related Errors.
var (
ErrMissingBody = errors.New("dsd: missing http body")
ErrMissingContentType = errors.New("dsd: missing http content type")
@ -120,6 +121,7 @@ func DumpToHTTPResponse(w http.ResponseWriter, r *http.Request, t interface{}) e
return nil
}
// Format and MimeType mappings.
var (
FormatToMimeType = map[uint8]string{
JSON: "application/json; charset=utf-8",

View file

@ -7,10 +7,9 @@ import (
)
func TestConversion(t *testing.T) {
t.Parallel()
// t.Run("Basic Static Encoding and Decoding", func(t *testing.T) { ... }
var subjects = []struct {
subjects := []struct {
intType uint8
bytes []byte
integer uint64
@ -100,14 +99,12 @@ func TestConversion(t *testing.T) {
}
}
}
func TestFails(t *testing.T) {
t.Parallel()
// t.Run("Basic Static Encoding and Decoding", func(t *testing.T) { ... }
var subjects = []struct {
subjects := []struct {
intType uint8
bytes []byte
}{
@ -141,5 +138,4 @@ func TestFails(t *testing.T) {
}
}
}

View file

@ -8,9 +8,7 @@ import (
"github.com/safing/portbase/modules"
)
var (
showVersion bool
)
var showVersion bool
func init() {
modules.Register("info", prep, nil, nil)

View file

@ -76,7 +76,7 @@ func FullVersion() string {
}
s += fmt.Sprintf("\ncommit %s\n", commit)
s += fmt.Sprintf("built with %s (%s) %s/%s\n", runtime.Version(), runtime.Compiler, runtime.GOOS, runtime.GOARCH)
s += fmt.Sprintf(" using options %s\n", strings.Replace(buildOptions, "§", " ", -1))
s += fmt.Sprintf(" using options %s\n", strings.ReplaceAll(buildOptions, "§", " "))
s += fmt.Sprintf(" by %s@%s\n", buildUser, buildHost)
s += fmt.Sprintf(" on %s\n", buildDate)
s += fmt.Sprintf("\nLicensed under the %s license.\nThe source code is available here: %s", license, buildSource)

View file

@ -1,4 +1,4 @@
// +build !windows
// go:build !windows
package log
@ -8,14 +8,16 @@ const (
)
const (
// colorBlack = "\033[30m"
colorRed = "\033[31m"
// colorGreen = "\033[32m"
colorRed = "\033[31m"
colorYellow = "\033[33m"
colorBlue = "\033[34m"
colorMagenta = "\033[35m"
colorCyan = "\033[36m"
// colorWhite = "\033[37m"
// Saved for later:
// colorBlack = "\033[30m" //.
// colorGreen = "\033[32m" //.
// colorWhite = "\033[37m" //.
)
func (s Severity) color() string {
@ -30,6 +32,8 @@ func (s Severity) color() string {
return colorRed
case CriticalLevel:
return colorMagenta
case TraceLevel:
return ""
default:
return ""
}

View file

@ -15,7 +15,6 @@ var (
)
func log(level Severity, msg string, tracer *ContextTracer) {
if !started.IsSet() {
// a bit resource intense, but keeps logs before logging started.
// TODO: create option to disable logging

View file

@ -88,7 +88,7 @@ func (ll *logLine) Equal(ol *logLine) bool {
return true
}
// Log Levels
// Log Levels.
const (
TraceLevel Severity = 1
DebugLevel Severity = 2
@ -185,7 +185,6 @@ func ParseLevel(level string) Severity {
// Start starts the logging system. Must be called in order to see logs.
func Start() (err error) {
if !initializing.SetToIf(false, true) {
return nil
}

View file

@ -13,8 +13,8 @@ func init() {
}
}
// test waiting
func TestLogging(t *testing.T) {
t.Parallel()
// skip
if testing.Short() {
@ -61,5 +61,4 @@ func TestLogging(t *testing.T) {
// do not really shut down, we may need logging for other tests
// ShutdownLogging()
}

View file

@ -143,13 +143,11 @@ StackTrace:
}()
var currentLine *logLine
var nextLine *logLine
var duplicates uint64
for {
// reset
currentLine = nil
nextLine = nil
duplicates = 0
// wait until logs need to be processed
@ -175,7 +173,7 @@ StackTrace:
writeLoop:
for {
select {
case nextLine = <-logBuffer:
case nextLine := <-logBuffer:
// first line we process, just assign to currentLine
if currentLine == nil {
currentLine = nextLine
@ -209,10 +207,6 @@ StackTrace:
// add to unexpected logs
addUnexpectedLogs(currentLine)
}
// reset state
currentLine = nil //nolint:ineffassign
nextLine = nil
duplicates = 0 //nolint:ineffassign
// back down a little
select {
@ -281,13 +275,13 @@ func GetLastUnexpectedLogs() []string {
defer lastUnexpectedLogsLock.Unlock()
// Make a copy and return.
len := len(lastUnexpectedLogs)
logsLen := len(lastUnexpectedLogs)
start := lastUnexpectedLogsIndex
logsCopy := make([]string, 0, len)
logsCopy := make([]string, 0, logsLen)
// Loop from mid-to-mid.
for i := start; i < start+len; i++ {
if lastUnexpectedLogs[i%len] != "" {
logsCopy = append(logsCopy, lastUnexpectedLogs[i%len])
for i := start; i < start+logsLen; i++ {
if lastUnexpectedLogs[i%logsLen] != "" {
logsCopy = append(logsCopy, lastUnexpectedLogs[i%logsLen])
}
}

View file

@ -19,9 +19,7 @@ type ContextTracer struct {
logs []*logLine
}
var (
key = ContextTracerKey{}
)
var key = ContextTracerKey{}
// AddTracer adds a ContextTracer to the returned Context. Will return a nil ContextTracer if logging level is not set to trace. Will return a nil ContextTracer if one already exists. Will return a nil ContextTracer in case of an error. Will return a nil context if nil.
func AddTracer(ctx context.Context) (context.Context, *ContextTracer) {

View file

@ -7,6 +7,8 @@ import (
)
func TestContextTracer(t *testing.T) {
t.Parallel()
// skip
if testing.Short() {
t.Skip()

View file

@ -111,6 +111,7 @@ func getLoadAvg() *load.AvgStat {
return loadAvg
}
// LoadAvg1 returns the 1-minute average system load.
func LoadAvg1() (loadAvg float64, ok bool) {
if stat := getLoadAvg(); stat != nil {
return stat.Load1 / float64(runtime.NumCPU()), true
@ -118,6 +119,7 @@ func LoadAvg1() (loadAvg float64, ok bool) {
return 0, false
}
// LoadAvg5 returns the 5-minute average system load.
func LoadAvg5() (loadAvg float64, ok bool) {
if stat := getLoadAvg(); stat != nil {
return stat.Load5 / float64(runtime.NumCPU()), true
@ -125,6 +127,7 @@ func LoadAvg5() (loadAvg float64, ok bool) {
return 0, false
}
// LoadAvg15 returns the 5-minute average system load.
func LoadAvg15() (loadAvg float64, ok bool) {
if stat := getLoadAvg(); stat != nil {
return stat.Load15 / float64(runtime.NumCPU()), true
@ -159,6 +162,7 @@ func getMemStat() *mem.VirtualMemoryStat {
return memStat
}
// MemTotal returns the total system memory.
func MemTotal() (total uint64, ok bool) {
if stat := getMemStat(); stat != nil {
return stat.Total, true
@ -166,6 +170,7 @@ func MemTotal() (total uint64, ok bool) {
return 0, false
}
// MemUsed returns the used system memory.
func MemUsed() (used uint64, ok bool) {
if stat := getMemStat(); stat != nil {
return stat.Used, true
@ -173,6 +178,7 @@ func MemUsed() (used uint64, ok bool) {
return 0, false
}
// MemAvailable returns the available system memory.
func MemAvailable() (available uint64, ok bool) {
if stat := getMemStat(); stat != nil {
return stat.Available, true
@ -180,6 +186,7 @@ func MemAvailable() (available uint64, ok bool) {
return 0, false
}
// MemUsedPercent returns the percent of used system memory.
func MemUsedPercent() (usedPercent float64, ok bool) {
if stat := getMemStat(); stat != nil {
return stat.UsedPercent, true
@ -223,6 +230,7 @@ func getDiskStat() *disk.UsageStat {
return diskStat
}
// DiskTotal returns the total disk space (from the program's data root).
func DiskTotal() (total uint64, ok bool) {
if stat := getDiskStat(); stat != nil {
return stat.Total, true
@ -230,6 +238,7 @@ func DiskTotal() (total uint64, ok bool) {
return 0, false
}
// DiskUsed returns the used disk space (from the program's data root).
func DiskUsed() (used uint64, ok bool) {
if stat := getDiskStat(); stat != nil {
return stat.Used, true
@ -237,6 +246,7 @@ func DiskUsed() (used uint64, ok bool) {
return 0, false
}
// DiskFree returns the available disk space (from the program's data root).
func DiskFree() (free uint64, ok bool) {
if stat := getDiskStat(); stat != nil {
return stat.Free, true
@ -244,6 +254,7 @@ func DiskFree() (free uint64, ok bool) {
return 0, false
}
// DiskUsedPercent returns the percent of used disk space (from the program's data root).
func DiskUsedPercent() (usedPercent float64, ok bool) {
if stat := getDiskStat(); stat != nil {
return stat.UsedPercent, true

View file

@ -1,8 +1,6 @@
package modules
var (
cmdLineOperation func() error
)
var cmdLineOperation func() error
// SetCmdLineOperation sets a command line operation to be executed instead of starting the system. This is useful when functions need all modules to be prepared for a special operation.
func SetCmdLineOperation(fn func() error) {

View file

@ -11,7 +11,7 @@ import (
var (
errorReportingChannel chan *ModuleError
reportToStdErr = true
lastReportedError *ModuleError
lastReportedError *ModuleError //nolint:errname
reportingLock sync.Mutex
)
@ -120,7 +120,7 @@ func (me *ModuleError) Report() {
// IsPanic returns whether the given error is a wrapped panic by the modules package and additionally returns it, if true.
func IsPanic(err error) (bool, *ModuleError) {
switch val := err.(type) {
switch val := err.(type) { //nolint:errorlint // TODO: improve
case *ModuleError:
return true, val
default:

View file

@ -5,12 +5,13 @@ import (
"errors"
"fmt"
"github.com/safing/portbase/log"
"github.com/tevino/abool"
"github.com/safing/portbase/log"
)
type eventHooks struct {
// hooks holds all registed hooks for the event.
// hooks holds all registered hooks for the event.
hooks []*eventHook
// internal signifies that the event and it's data may not be exposed and may
@ -195,7 +196,8 @@ var (
eventSubscriptionFuncReady = abool.NewBool(false)
)
// SetEventSubscriptionFunc
// SetEventSubscriptionFunc sets a function that is called for every event.
// This enabled the runtime package to expose events.
func SetEventSubscriptionFunc(fn func(moduleName, eventName string, internal bool, data interface{})) bool {
if eventSubscriptionFuncEnabled.SetToIf(false, true) {
eventSubscriptionFunc = fn

View file

@ -1,15 +1,13 @@
package modules
var (
exitStatusCode int
)
var exitStatusCode int
// SetExitStatusCode sets the exit code that the program shell return to the host after shutdown.
func SetExitStatusCode(n int) {
exitStatusCode = n
}
// GetExitStatusCode waits for the shutdown to complete and then returns the exit code
// GetExitStatusCode waits for the shutdown to complete and then returns the previously set exit code.
func GetExitStatusCode() int {
<-shutdownCompleteSignal
return exitStatusCode

View file

@ -3,8 +3,9 @@ package modules
import (
"context"
"github.com/safing/portbase/log"
"github.com/tevino/abool"
"github.com/safing/portbase/log"
)
var (

View file

@ -5,7 +5,6 @@ import (
)
func testModuleMgmt(t *testing.T) {
// enable module management
EnableModuleManagement(nil)

View file

@ -5,8 +5,9 @@ import (
"sync/atomic"
"time"
"github.com/safing/portbase/log"
"github.com/tevino/abool"
"github.com/safing/portbase/log"
)
// TODO: getting some errors when in nanosecond precision for tests:
@ -159,9 +160,7 @@ func (m *Module) runMicroTask(name *string, fn func(context.Context) error) (err
return //nolint:nakedret // need to use named return val in order to change in defer
}
var (
microTaskSchedulerStarted = abool.NewBool(false)
)
var microTaskSchedulerStarted = abool.NewBool(false)
func microTaskScheduler() {
// only ever start once

View file

@ -18,8 +18,7 @@ func init() {
go microTaskScheduler()
}
// test waiting
func TestMicroTaskWaiting(t *testing.T) {
func TestMicroTaskWaiting(t *testing.T) { //nolint:paralleltest // Too much interference expected.
// skip
if testing.Short() {
@ -108,17 +107,20 @@ func TestMicroTaskWaiting(t *testing.T) {
if completeOutput != mtwExpectedOutput {
t.Errorf("MicroTask waiting test failed, expected sequence %s, got %s", mtwExpectedOutput, completeOutput)
}
}
// test ordering
// Test Microtask ordering.
// globals
var mtoWaitGroup sync.WaitGroup
var mtoOutputChannel chan string
var mtoWaitCh chan struct{}
// Microtask test globals.
var (
mtoWaitGroup sync.WaitGroup
mtoOutputChannel chan string
mtoWaitCh chan struct{}
)
// Microtask test functions.
// functions
func mediumPrioTaskTester() {
defer mtoWaitGroup.Done()
<-mtoWaitCh
@ -139,8 +141,7 @@ func lowPrioTaskTester() {
})
}
// test
func TestMicroTaskOrdering(t *testing.T) {
func TestMicroTaskOrdering(t *testing.T) { //nolint:paralleltest // Too much interference expected.
// skip
if testing.Short() {
@ -204,5 +205,4 @@ func TestMicroTaskOrdering(t *testing.T) {
if !strings.Contains(completeOutput, "11111") || !strings.Contains(completeOutput, "22222") {
t.Errorf("MicroTask ordering test failed, output was %s. This happens occasionally, please run the test multiple times to verify", completeOutput)
}
}

Some files were not shown because too many files have changed in this diff Show more