Initial commit after restructure

This commit is contained in:
Daniel 2018-08-13 14:05:58 +02:00
commit 96ec15b39b
70 changed files with 6945 additions and 0 deletions

76
api/actions.go Normal file
View file

@ -0,0 +1,76 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package api
import (
"fmt"
_ "github.com/Safing/safing-core/configuration"
"github.com/Safing/safing-core/database"
"github.com/Safing/safing-core/formats/dsd"
"github.com/ipfs/go-datastore"
)
func Get(session *Session, key string) {
iterator, err := database.EasyQueryIterator(key)
if err != nil {
handleError(session, fmt.Sprintf("error|500|could not query: %s", err))
return
}
var returnedStuff bool
for obj, ok := iterator.NextSync(); ok; obj, ok = iterator.NextSync() {
bytes, err := database.DumpModel(obj.Value, dsd.JSON)
returnedStuff = true
if err == nil {
toSend := []byte(fmt.Sprintf("current|%s|%s", obj.Key, string(bytes)))
session.send <- toSend
} else {
handleError(session, fmt.Sprintf("error|500|dump failed: %s", err))
}
}
if !returnedStuff {
handleError(session, "error|400|no results: "+key)
}
}
func Subscribe(session *Session, key string) {
session.Subscribe(key)
Get(session, key)
}
func Unsubscribe(session *Session, key string) {
session.Unsubscribe(key)
}
func Save(session *Session, key string, create bool, data []byte) {
var model database.Model
var err error
dbKey := datastore.NewKey(key)
model, err = database.NewWrapper(&dbKey, data)
if err != nil {
handleError(session, fmt.Sprintf("error|500|failed to wrap object: %s", err))
return
}
if create {
err = database.Create(dbKey, model)
} else {
err = database.Update(dbKey, model)
}
if err != nil {
handleError(session, fmt.Sprintf("error|500|failed to save to database: %s", err))
}
}
func Delete(session *Session, key string) {
dbKey := datastore.NewKey(key)
err := database.Delete(dbKey)
if err != nil {
handleError(session, fmt.Sprintf("error|500|failed to delete from database: %s", err))
}
}

31
api/api.go Normal file
View file

@ -0,0 +1,31 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package api
import (
"net/http"
"github.com/Safing/safing-core/log"
"github.com/Safing/safing-core/modules"
)
var (
apiModule *modules.Module
apiAddress = ":18"
)
func Start() {
apiModule = modules.Register("Api", 32)
go run()
<-apiModule.Stop
apiModule.StopComplete()
}
func run() {
router := NewRouter()
log.Infof("api: starting to listen on %s", apiAddress)
log.Errorf("api: listener failed: %s", http.ListenAndServe(apiAddress, router))
}

120
api/handlers.go Normal file
View file

@ -0,0 +1,120 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package api
import (
"bytes"
"fmt"
"github.com/Safing/safing-core/log"
"net/http"
"github.com/gorilla/websocket"
)
func allowAnyOrigin(r *http.Request) bool {
return true
}
func apiVersionOneHandler(w http.ResponseWriter, r *http.Request) {
upgrader := websocket.Upgrader{
CheckOrigin: allowAnyOrigin,
ReadBufferSize: 1024,
WriteBufferSize: 65536,
}
wsConn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Errorf("upgrade to websocket failed: %s\n", err)
return
}
// new or resume session?
var session *Session
_, msg, err := wsConn.ReadMessage()
if err != nil {
wsConn.Close()
return
}
parts := bytes.SplitN(msg, []byte("|"), 2)
switch string(parts[0]) {
case "start":
session = NewSession(wsConn)
case "resume":
if len(parts) > 1 {
session, err = ResumeSession(string(parts[1]), wsConn)
if err != nil {
handleError(session, fmt.Sprintf("error|500|created new session, restoring failed: %s", err))
} else {
}
} else {
session = NewSession(wsConn)
}
default:
wsConn.Close()
return
}
defer session.Deactivate()
// start handling requests
for {
_, msg, err := wsConn.ReadMessage()
if err != nil {
if !websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) {
log.Warningf("api: read error: %s", err)
}
return
}
log.Tracef("api: got request %s", string(msg))
splitParams := bytes.SplitN(msg, []byte("|"), 3)
if len(splitParams) < 2 {
handleError(session, "error|400|too few params")
}
action, key := string(splitParams[0]), string(splitParams[1])
// if len(splitParams) > 2 {
// json := splitParams[2]
// log.Infof("JSON: %q", json)
// }
switch action {
case "get":
Get(session, key)
case "subscribe":
Subscribe(session, key)
case "unsubscribe":
Unsubscribe(session, key)
case "create":
if len(splitParams) < 3 {
handleError(session, "error|400|invalid action: cannot create without data")
}
Save(session, key, true, splitParams[2])
case "update":
if len(splitParams) < 3 {
handleError(session, "error|400|invalid action: cannot update without data")
}
Save(session, key, false, splitParams[2])
case "delete":
Delete(session, key)
default:
handleError(session, "error|400|invalid action: "+action)
}
}
}
func handleError(session *Session, message string) {
log.Warningf("api: " + message)
toSend := []byte(message)
session.send <- toSend
}

26
api/logger.go Normal file
View file

@ -0,0 +1,26 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package api
import (
"net/http"
"time"
"github.com/Safing/safing-core/log"
)
func Logger(inner http.Handler, name string) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
inner.ServeHTTP(w, r)
log.Infof(
"%s\t%s\t%s\t%s",
r.Method,
r.RequestURI,
name,
time.Since(start),
)
})
}

28
api/router.go Normal file
View file

@ -0,0 +1,28 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package api
import (
"net/http"
"github.com/gorilla/mux"
)
func NewRouter() *mux.Router {
router := mux.NewRouter().StrictSlash(true)
for _, route := range routes {
var handler http.Handler
handler = route.Handler
handler = Logger(handler, route.Name)
router.
Methods(route.Method).
PathPrefix(route.Path).
Name(route.Name).
Handler(handler)
}
return router
}

31
api/routes.go Normal file
View file

@ -0,0 +1,31 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package api
import (
"net/http"
)
type Route struct {
Name string
Method string
Path string
Handler http.Handler
}
type Routes []Route
var routes = Routes{
Route{
"Index",
"GET",
"/test",
http.StripPrefix("/test", http.FileServer(http.Dir("api/test"))),
},
Route{
"Websockets",
"GET",
"/api/v1",
http.HandlerFunc(apiVersionOneHandler),
},
}

145
api/session.go Normal file
View file

@ -0,0 +1,145 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package api
import (
"fmt"
"strings"
"time"
"github.com/gorilla/websocket"
"github.com/ipfs/go-datastore"
uuid "github.com/satori/go.uuid"
"github.com/Safing/safing-core/database"
"github.com/Safing/safing-core/log"
)
// Session holds data for an api session.
type Session struct {
database.Base
ID string
wsConn *websocket.Conn
Expires int64
Subscriptions []string
subscription *database.Subscription
send chan []byte
}
var sessionModel *Session // only use this as parameter for database.EnsureModel-like functions
func init() {
database.RegisterModel(sessionModel, func() database.Model { return new(Session) })
}
// NewSession creates a new session.
func NewSession(wsConn *websocket.Conn) *Session {
session := &Session{
ID: strings.Replace(uuid.NewV4().String(), "-", "", -1),
subscription: database.NewSubscription(),
send: make(chan []byte, 1024),
}
session.wsConn = wsConn
session.CreateWithID()
log.Tracef("api: created new session: %s", session.ID)
toSend := []byte("session|" + session.ID)
session.send <- toSend
go session.Writer()
return session
}
// ResumeSession an existing session.
func ResumeSession(id string, wsConn *websocket.Conn) (*Session, error) {
session, err := GetSession(id)
if err == nil {
if session.wsConn != nil {
session.wsConn.Close()
}
session.wsConn = wsConn
session.Save()
log.Tracef("api: resumed session %s", session.ID)
go session.Writer()
return session, nil
}
return NewSession(wsConn), fmt.Errorf("api: failed to restore session %s, creating new", id)
}
// Deactivate closes down a session, making it ready to be resumed.
func (m *Session) Deactivate() {
m.wsConn.Close()
m.wsConn = nil
m.subscription.Destroy()
m.subscription = nil
m.Save()
}
// Subscribe subscribes to a database key and saves the new subscription table if the session was already persisted.
func (m *Session) Subscribe(subKey string) {
m.subscription.Subscribe(subKey)
m.Subscriptions = *m.subscription.Subscriptions()
if m.GetKey() != nil {
m.Save()
}
}
// Unsubscribe unsubscribes from a database key and saves the new subscription table if the session was already persisted.
func (m *Session) Unsubscribe(subKey string) {
m.subscription.Unsubscribe(subKey)
m.Subscriptions = *m.subscription.Subscriptions()
if m.GetKey() != nil {
m.Save()
}
}
// CreateWithID saves Session with the its ID in the default namespace.
func (m *Session) CreateWithID() error {
m.Expires = time.Now().Add(10 * time.Minute).Unix()
return m.CreateObject(&database.ApiSessions, m.ID, m)
}
// Create saves Session with the provided name in the default namespace.
func (m *Session) Create(name string) error {
m.Expires = time.Now().Add(10 * time.Minute).Unix()
return m.CreateObject(&database.ApiSessions, name, m)
}
// CreateInNamespace saves Session with the provided name in the provided namespace.
func (m *Session) CreateInNamespace(namespace *datastore.Key, name string) error {
m.Expires = time.Now().Add(10 * time.Minute).Unix()
return m.CreateObject(namespace, name, m)
}
// Save saves Session.
func (m *Session) Save() error {
m.Expires = time.Now().Add(10 * time.Minute).Unix()
return m.SaveObject(m)
}
// GetSession fetches Session with the provided name from the default namespace.
func GetSession(name string) (*Session, error) {
return GetSessionFromNamespace(&database.ApiSessions, name)
}
// GetSessionFromNamespace fetches Session with the provided name from the provided namespace.
func GetSessionFromNamespace(namespace *datastore.Key, name string) (*Session, error) {
object, err := database.GetAndEnsureModel(namespace, name, sessionModel)
if err != nil {
return nil, err
}
model, ok := object.(*Session)
if !ok {
return nil, database.NewMismatchError(object, sessionModel)
}
if model.subscription == nil {
model.subscription = database.NewSubscription()
for _, entry := range model.Subscriptions {
model.subscription.Subscribe(entry)
}
}
if model.send != nil {
model.send = make(chan []byte, 1024)
}
return model, nil
}

45
api/test/index.html Normal file
View file

@ -0,0 +1,45 @@
<!DOCTYPE html>
<html>
<head>
<title></title>
<!-- <script src="https://cdn.jsdelivr.net/sockjs/1/sockjs.min.js"></script> -->
</head>
<body>
<script type="text/javascript">
var ws = new WebSocket('ws://localhost:18/api/v1');
ws.onopen = function () {
console.log('open');
};
ws.onerror = function (error) {
console.log('error');
console.log(error);
};
ws.onmessage = function (e) {
reader = new FileReader()
reader.onload = function(e) {
console.log(e.target.result)
}
reader.readAsText(e.data)
};
// var sock = new SockJS("http://localhost:8080/api/v1");
//
// sock.onopen = function() {
// console.log('open');
// };
//
// sock.onmessage = function(e) {
// console.log('message received: ', e.data);
// };
//
// sock.onclose = function(e) {
// console.log('close', e);
// };
</script>
yeeee
</body>
</html>

88
api/writer.go Normal file
View file

@ -0,0 +1,88 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package api
import (
"fmt"
"github.com/Safing/safing-core/database"
"github.com/Safing/safing-core/formats/dsd"
"github.com/Safing/safing-core/log"
"github.com/gorilla/websocket"
"github.com/ipfs/go-datastore"
)
// Writer writes messages to the client.
func (m *Session) Writer() {
wsConn := m.wsConn
defer wsConn.Close()
sub := m.subscription
var model database.Model
var key *datastore.Key
var msg []byte
msgCreated := true
var err error
writeLoop:
for {
model = nil
key = nil
msg = nil
select {
// prioritize direct writes
case msg = <-m.send:
default:
select {
case msg = <-m.send:
case model = <-sub.Created:
msgCreated = true
// log.Tracef("api: got new from subscription")
case model = <-sub.Updated:
msgCreated = false
// log.Tracef("api: got update from subscription")
case key = <-sub.Deleted:
// log.Tracef("api: got delete from subscription")
}
}
if model != nil {
data, err := database.DumpModel(model, dsd.JSON)
if err != nil {
log.Warningf("api: could not dump model: %s", err)
continue writeLoop
}
if msgCreated {
toSend := append([]byte(fmt.Sprintf("created|%s|", model.GetKey().String())), data...)
msg = toSend
} else {
toSend := append([]byte(fmt.Sprintf("updated|%s|", model.GetKey().String())), data...)
msg = toSend
}
} else if key != nil {
toSend := append([]byte(fmt.Sprintf("deleted|%s", key.String())))
msg = toSend
}
// exit if we got nil
if msg == nil {
log.Debugf("api: a sending channel was closed, stopping writer")
return
}
// log.Tracef("api: sending %s", string(*msg))
err = wsConn.WriteMessage(websocket.BinaryMessage, msg)
if err != nil {
// if !websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) {
log.Warningf("api: write error: %s", err)
// }
return
}
}
}

View file

@ -0,0 +1,96 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package configuration
import (
"sync/atomic"
"github.com/Safing/safing-core/database"
datastore "github.com/ipfs/go-datastore"
)
type SecurityLevelBoolean int8
func (slb SecurityLevelBoolean) IsSet() bool {
return int8(atomic.LoadInt32(securityLevel)) >= int8(slb)
}
func (slb SecurityLevelBoolean) IsSetWithLevel(customSecurityLevel int8) bool {
return customSecurityLevel >= int8(slb) || int8(atomic.LoadInt32(securityLevel)) >= int8(slb)
}
func (slb SecurityLevelBoolean) Level() int8 {
return int8(slb)
}
type Configuration struct {
database.Base
// Security Config
EnforceCT SecurityLevelBoolean `json:",omitempty bson:",omitempty"` // Hardfail on Certificate Transparency
EnforceRevocation SecurityLevelBoolean `json:",omitempty bson:",omitempty"` // Hardfail on Certificate Revokation
DenyInsecureTLS SecurityLevelBoolean `json:",omitempty bson:",omitempty"` // Block TLS connections, that use insecure TLS versions, cipher suites, ...
DenyTLSWithoutSNI SecurityLevelBoolean `json:",omitempty bson:",omitempty"` // Block TLS connections that do not use SNI, connections without SNI cannot be verified as well as connections with SNI.
DoNotUseAssignedDNS SecurityLevelBoolean `json:",omitempty bson:",omitempty"` // Do not use DNS Servers assigned by DHCP
DoNotUseMDNS SecurityLevelBoolean `json:",omitempty bson:",omitempty"` // Do not use mDNS
DoNotForwardSpecialDomains SecurityLevelBoolean `json:",omitempty bson:",omitempty"` // Do not resolve special domains with assigned DNS Servers
AlwaysPromptAtNewProfile SecurityLevelBoolean `json:",omitempty bson:",omitempty"` // Always prompt user to review new profiles
DenyNetworkUntilProfileApproved SecurityLevelBoolean `json:",omitempty bson:",omitempty"` // Deny network communication until a new profile is actively approved by the user
// Generic Config
CompetenceLevel int8 `json:",omitempty bson:",omitempty"` // Select CompetenceLevel
Beta bool `json:",omitempty bson:",omitempty"` // Take part in Beta
PermanentVerdicts bool `json:",omitempty bson:",omitempty"` // As soon as work on a link is finished, leave it to the system for performance and stability
DNSServers []string `json:",omitempty bson:",omitempty"` // DNS Servers to use for name resolution. Please refer to the user guide for further help.
// regex: ^(DoH|DNS|TDNS)\|[A-Za-z0-9\.:\[\]]+(\|[A-Za-z0-9\.:]+)?$
DNSServerRetryRate int64 `json:",omitempty bson:",omitempty"` // Amount of seconds to wait until failing DNS Servers may be retried.
CountryBlacklist []string `json:",omitempty bson:",omitempty"` // Do not connect to servers in these countries
ASBlacklist []uint32 `json:",omitempty bson:",omitempty"` // Do not connect to server in these AS
LocalPort17Node bool `json:",omitempty bson:",omitempty"` // Serve as local Port17 Node
PublicPort17Node bool `json:",omitempty bson:",omitempty"` // Serve as public Port17 Node
}
var (
configurationModel *Configuration // only use this as parameter for database.EnsureModel-like functions
configurationInstanceName = "config"
defaultConfigurationInstanceName = "default"
)
func initConfigurationModel() {
database.RegisterModel(configurationModel, func() database.Model { return new(Configuration) })
}
// Create saves Configuration with the provided name in the default namespace.
func (m *Configuration) Create(name string) error {
return m.CreateObject(&database.Me, name, m)
}
// CreateInNamespace saves Configuration with the provided name in the provided namespace.
func (m *Configuration) CreateInNamespace(namespace *datastore.Key, name string) error {
return m.CreateObject(namespace, name, m)
}
// Save saves Configuration.
func (m *Configuration) Save() error {
return m.SaveObject(m)
}
// GetConfiguration fetches Configuration with the provided name in the default namespace.
func GetConfiguration(name string) (*Configuration, error) {
return GetConfigurationFromNamespace(&database.Me, name)
}
// GetConfigurationFromNamespace fetches Configuration with the provided name in the provided namespace.
func GetConfigurationFromNamespace(namespace *datastore.Key, name string) (*Configuration, error) {
object, err := database.GetAndEnsureModel(namespace, name, configurationModel)
if err != nil {
return nil, err
}
model, ok := object.(*Configuration)
if !ok {
return nil, database.NewMismatchError(object, configurationModel)
}
return model, nil
}

238
configuration/core.go Normal file
View file

@ -0,0 +1,238 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package configuration
import (
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/Safing/safing-core/database"
"github.com/Safing/safing-core/log"
"github.com/Safing/safing-core/modules"
)
// think about:
// config changes validation (e.g. if on in secure mode, must be on in fortress mode)
// config switches
// small codebase
// nice api
// be static as much as possible
const (
SecurityLevelOff int8 = 0
SecurityLevelDynamic int8 = 1
SecurityLevelSecure int8 = 2
SecurityLevelFortress int8 = 3
CompetenceLevelNone int8 = 0
CompetenceLevelBasic int8 = 1
CompetenceLevelPowerUser int8 = 2
CompetenceLevelExpert int8 = 3
StatusOk int8 = 0
StatusWarning int8 = 1
StatusError int8 = 2
)
var (
configurationModule *modules.Module
lastChange *int64
securityLevel *int32
lock sync.RWMutex
status *SystemStatus
currentConfig *Configuration
)
func init() {
configurationModule = modules.Register("Configuration", 128)
initDefaultConfig()
initSystemStatusModel()
initConfigurationModel()
lastChangeValue := time.Now().Unix()
lastChange = &lastChangeValue
var securityLevelValue int32
securityLevel = &securityLevelValue
var err error
config, err := GetConfiguration(configurationInstanceName)
if err != nil {
log.Warningf("configuration: could not load configuration: %s", err)
loadedConfig := defaultConfig
config = &loadedConfig
err = config.Create(configurationInstanceName)
if err != nil {
log.Warningf("configuration: could not save new configuration: %s", err)
}
}
status, err = GetSystemStatus()
if err != nil {
log.Warningf("configuration: could not load status: %s", err)
status = &SystemStatus{
CurrentSecurityLevel: 1,
SelectedSecurityLevel: 1,
}
err = status.Create()
if err != nil {
log.Warningf("configuration: could not save new status: %s", err)
}
}
log.Infof("configuration: initial security level is [%s]", status.FmtSecurityLevel())
// atomic.StoreInt32(securityLevel, int32(status.CurrentSecurityLevel))
updateConfig(config)
go configChangeListener()
go statusChangeListener()
}
func configChangeListener() {
sub := database.NewSubscription()
sub.Subscribe(fmt.Sprintf("%s/Configuration:%s", database.Me.String(), configurationInstanceName))
for {
var receivedModel database.Model
select {
case <-configurationModule.Stop:
configurationModule.StopComplete()
return
case receivedModel = <-sub.Updated:
case receivedModel = <-sub.Created:
}
config, ok := database.SilentEnsureModel(receivedModel, configurationModel).(*Configuration)
if !ok {
log.Warning("configuration: received config update, but was not of type *Configuration")
continue
}
updateConfig(config)
}
}
func updateConfig(update *Configuration) {
new := &Configuration{}
if update.EnforceCT > 0 && update.EnforceCT < 4 {
new.EnforceCT = update.EnforceCT
} else {
new.EnforceCT = defaultConfig.EnforceCT
}
if update.EnforceRevocation > 0 && update.EnforceRevocation < 4 {
new.EnforceRevocation = update.EnforceRevocation
} else {
new.EnforceRevocation = defaultConfig.EnforceRevocation
}
if update.DenyInsecureTLS > 0 && update.DenyInsecureTLS < 4 {
new.DenyInsecureTLS = update.DenyInsecureTLS
} else {
new.DenyInsecureTLS = defaultConfig.DenyInsecureTLS
}
if update.DenyTLSWithoutSNI > 0 && update.DenyTLSWithoutSNI < 4 {
new.DenyTLSWithoutSNI = update.DenyTLSWithoutSNI
} else {
new.DenyTLSWithoutSNI = defaultConfig.DenyTLSWithoutSNI
}
if update.DoNotUseAssignedDNS > 0 && update.DoNotUseAssignedDNS < 4 {
new.DoNotUseAssignedDNS = update.DoNotUseAssignedDNS
} else {
new.DoNotUseAssignedDNS = defaultConfig.DoNotUseAssignedDNS
}
if update.DoNotUseMDNS > 0 && update.DoNotUseMDNS < 4 {
new.DoNotUseMDNS = update.DoNotUseMDNS
} else {
new.DoNotUseMDNS = defaultConfig.DoNotUseMDNS
}
if update.DoNotForwardSpecialDomains > 0 && update.DoNotForwardSpecialDomains < 4 {
new.DoNotForwardSpecialDomains = update.DoNotForwardSpecialDomains
} else {
new.DoNotForwardSpecialDomains = defaultConfig.DoNotForwardSpecialDomains
}
if update.AlwaysPromptAtNewProfile > 0 && update.AlwaysPromptAtNewProfile < 4 {
new.AlwaysPromptAtNewProfile = update.AlwaysPromptAtNewProfile
} else {
new.AlwaysPromptAtNewProfile = defaultConfig.AlwaysPromptAtNewProfile
}
if update.DenyNetworkUntilProfileApproved > 0 && update.DenyNetworkUntilProfileApproved < 4 {
new.DenyNetworkUntilProfileApproved = update.DenyNetworkUntilProfileApproved
} else {
new.DenyNetworkUntilProfileApproved = defaultConfig.DenyNetworkUntilProfileApproved
}
// generic configuration
if update.CompetenceLevel >= 0 && update.CompetenceLevel <= 3 {
new.CompetenceLevel = update.CompetenceLevel
} else {
new.CompetenceLevel = 3
// TODO: maybe notify user?
}
if len(update.DNSServers) != 0 {
new.DNSServers = update.DNSServers
} else {
new.DNSServers = defaultConfig.DNSServers
}
if update.DNSServerRetryRate != 0 {
new.DNSServerRetryRate = update.DNSServerRetryRate
} else {
new.DNSServerRetryRate = defaultConfig.DNSServerRetryRate
}
if len(update.CountryBlacklist) != 0 {
new.CountryBlacklist = update.CountryBlacklist
} else {
new.CountryBlacklist = defaultConfig.CountryBlacklist
}
if len(update.ASBlacklist) != 0 {
new.ASBlacklist = update.ASBlacklist
} else {
new.ASBlacklist = defaultConfig.ASBlacklist
}
lock.Lock()
defer lock.Unlock()
// set new config and update timestamp
currentConfig = new
atomic.StoreInt64(lastChange, time.Now().UnixNano())
// update status with new values
// status.CurrentSecurityLevel = currentConfig.SecurityLevel
// status.Save()
// update atomic securityLevel
// atomic.StoreInt32(securityLevel, int32(currentConfig.SecurityLevel))
}
func statusChangeListener() {
sub := database.NewSubscription()
sub.Subscribe(fmt.Sprintf("%s/SystemStatus:%s", database.Me.String(), systemStatusInstanceName))
for {
var receivedModel database.Model
select {
case <-configurationModule.Stop:
configurationModule.StopComplete()
return
case receivedModel = <-sub.Updated:
case receivedModel = <-sub.Created:
}
status, ok := database.SilentEnsureModel(receivedModel, systemStatusModel).(*SystemStatus)
if !ok {
log.Warning("configuration: received system status update, but was not of type *SystemStatus")
continue
}
atomic.StoreInt32(securityLevel, int32(status.CurrentSecurityLevel))
}
}

View file

@ -0,0 +1,23 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package configuration
import (
"fmt"
"testing"
"time"
)
func TestConfiguration(t *testing.T) {
config1 := Get()
fmt.Printf("%v", config1)
time.Sleep(1 * time.Millisecond)
config1.Changed()
time.Sleep(1 * time.Millisecond)
config1.Save()
time.Sleep(1 * time.Millisecond)
config1.Changed()
time.Sleep(1 * time.Millisecond)
}

46
configuration/defaults.go Normal file
View file

@ -0,0 +1,46 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package configuration
import (
"github.com/Safing/safing-core/log"
)
var (
defaultConfig Configuration
)
func initDefaultConfig() {
defaultConfig = Configuration{
// based on security level
EnforceCT: 3,
EnforceRevocation: 3,
DenyInsecureTLS: 2,
DenyTLSWithoutSNI: 2,
DoNotUseAssignedDNS: 3,
DoNotUseMDNS: 2,
DoNotForwardSpecialDomains: 2,
AlwaysPromptAtNewProfile: 3,
DenyNetworkUntilProfileApproved: 3,
// generic configuration
CompetenceLevel: 0,
PermanentVerdicts: true,
// Possible values: DNS, DoH (DNS over HTTPS - using Google's syntax: https://developers.google.com/speed/public-dns/docs/dns-over-https)
// DNSServers: []string{"DoH|dns.google.com:443|df:www.google.com"},
DNSServers: []string{"DNS|1.1.1.1:53", "DNS|1.0.0.1:53", "DNS|[2606:4700:4700::1111]:53", "DNS|[2606:4700:4700::1001]:53", "DNS|8.8.8.8:53", "DNS|8.8.4.4:53", "DNS|[2001:4860:4860::8888]:53", "DNS|[2001:4860:4860::8844]:53", "DNS|208.67.222.222:53", "DNS|208.67.220.220:53"},
// DNSServers: []string{"DNS|[2001:4860:4860::8888]:53", "DNS|[2001:4860:4860::8844]:53"},
// DNSServers: []string{"DoH|dns.google.com:443|df:www.google.com", "DNS|8.8.8.8:53", "DNS|8.8.4.4:53", "DNS|172.30.30.1:53", "DNS|172.20.30.2:53"},
// DNSServers: []string{"DNS|208.67.222.222:53", "DNS|208.67.220.220:53", "DNS|8.8.8.8:53", "DNS|8.8.4.4:53"},
// Amount of seconds to wait until failing DNS Servers may be retried.
DNSServerRetryRate: 120,
// CountryBlacklist []string
// ASBlacklist []uint32
LocalPort17Node: false,
PublicPort17Node: true,
}
err := defaultConfig.Create(defaultConfigurationInstanceName)
if err != nil {
log.Warningf("configuration: could not save default configuration: %s", err)
}
}

View file

@ -0,0 +1,50 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package configuration
import (
"sync"
"sync/atomic"
)
type Interface struct {
*Configuration
LastChange int64
ConfigLock sync.RWMutex
}
func Get() *Interface {
lock.RLock()
defer lock.RUnlock()
return &Interface{
Configuration: currentConfig,
LastChange: atomic.LoadInt64(lastChange),
}
}
func (lc *Interface) RLock() {
lc.ConfigLock.RLock()
}
func (lc *Interface) RUnlock() {
lc.ConfigLock.RUnlock()
}
func (lc *Interface) Changed() bool {
lastGlobalChange := atomic.LoadInt64(lastChange)
if lc.LastChange != lastGlobalChange {
lc.ConfigLock.Lock()
lock.RLock()
lc.Configuration = currentConfig
lc.LastChange = lastGlobalChange
lock.RUnlock()
lc.ConfigLock.Unlock()
return true
}
return false
}
func (lc *Interface) SecurityLevel() int8 {
return int8(atomic.LoadInt32(securityLevel))
}

86
configuration/status.go Normal file
View file

@ -0,0 +1,86 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package configuration
import (
"github.com/Safing/safing-core/database"
datastore "github.com/ipfs/go-datastore"
)
// SystemStatus saves basic information about the current system status.
type SystemStatus struct {
database.Base
CurrentSecurityLevel int8
SelectedSecurityLevel int8
ThreatLevel int8 `json:",omitempty" bson:",omitempty"`
ThreatReason string `json:",omitempty" bson:",omitempty"`
PortmasterStatus int8 `json:",omitempty" bson:",omitempty"`
PortmasterStatusMsg string `json:",omitempty" bson:",omitempty"`
Port17Status int8 `json:",omitempty" bson:",omitempty"`
Port17StatusMsg string `json:",omitempty" bson:",omitempty"`
}
var (
systemStatusModel *SystemStatus // only use this as parameter for database.EnsureModel-like functions
systemStatusInstanceName = "status"
)
func initSystemStatusModel() {
database.RegisterModel(systemStatusModel, func() database.Model { return new(SystemStatus) })
}
// Create saves SystemStatus with the provided name in the default namespace.
func (m *SystemStatus) Create() error {
return m.CreateObject(&database.Me, systemStatusInstanceName, m)
}
// CreateInNamespace saves SystemStatus with the provided name in the provided namespace.
func (m *SystemStatus) CreateInNamespace(namespace *datastore.Key) error {
return m.CreateObject(namespace, systemStatusInstanceName, m)
}
// Save saves SystemStatus.
func (m *SystemStatus) Save() error {
return m.SaveObject(m)
}
// FmtSecurityLevel returns the current security level as a string.
func (m *SystemStatus) FmtSecurityLevel() string {
var s string
switch m.CurrentSecurityLevel {
case SecurityLevelOff:
s = "Off"
case SecurityLevelDynamic:
s = "Dynamic"
case SecurityLevelSecure:
s = "Secure"
case SecurityLevelFortress:
s = "Fortress"
}
if m.CurrentSecurityLevel != m.SelectedSecurityLevel {
s += "*"
}
return s
}
// GetSystemStatus fetches SystemStatus with the provided name in the default namespace.
func GetSystemStatus() (*SystemStatus, error) {
return GetSystemStatusFromNamespace(&database.Me)
}
// GetSystemStatusFromNamespace fetches SystemStatus with the provided name in the provided namespace.
func GetSystemStatusFromNamespace(namespace *datastore.Key) (*SystemStatus, error) {
object, err := database.GetAndEnsureModel(namespace, systemStatusInstanceName, systemStatusModel)
if err != nil {
return nil, err
}
model, ok := object.(*SystemStatus)
if !ok {
return nil, database.NewMismatchError(object, systemStatusModel)
}
return model, nil
}

290
container/container.go Normal file
View file

@ -0,0 +1,290 @@
package container
import (
"errors"
"github.com/Safing/safing-core/formats/varint"
)
// Container is []byte array on steroids, allowing for quick data appending, prepending and fetching as well as transparent error transportation. (Error transportation requires use of varints for data)
type Container struct {
compartments [][]byte
offset int
err error
}
// Data Handling
// NewContainer creates a new container with an optional initial []byte slice. Data will NOT be copied.
func NewContainer(data ...[]byte) *Container {
return &Container{
compartments: data,
}
}
// New creates a new container with an optional initial []byte slice. Data will NOT be copied.
func New(data ...[]byte) *Container {
return &Container{
compartments: data,
}
}
// Prepend prepends data. Data will NOT be copied.
func (c *Container) Prepend(data []byte) {
if c.offset < 1 {
c.renewCompartments()
}
c.offset--
c.compartments[c.offset] = data
}
// Append appends the given data. Data will NOT be copied.
func (c *Container) Append(data []byte) {
c.compartments = append(c.compartments, data)
}
// AppendNumber appends a number (varint encoded).
func (c *Container) AppendNumber(n uint64) {
c.compartments = append(c.compartments, varint.Pack64(n))
}
// AppendAsBlock appends the length of the data and the data itself. Data will NOT be copied.
func (c *Container) AppendAsBlock(data []byte) {
c.AppendNumber(uint64(len(data)))
c.Append(data)
}
// Length returns the full length of all bytes held by the container.
func (c *Container) Length() (length int) {
for i := c.offset; i < len(c.compartments); i++ {
length += len(c.compartments[i])
}
return
}
// Replace replaces all held data with a new data slice. Data will NOT be copied.
func (c *Container) Replace(data []byte) {
c.compartments = [][]byte{data}
}
// CompileData concatenates all bytes held by the container and returns it as one single []byte slice. Data will NOT be copied and is NOT consumed.
func (c *Container) CompileData() []byte {
if len(c.compartments) != 1 {
newBuf := make([]byte, c.Length())
copyBuf := newBuf
for i := c.offset; i < len(c.compartments); i++ {
copy(copyBuf, c.compartments[i])
copyBuf = copyBuf[len(c.compartments[i]):]
}
c.compartments = [][]byte{newBuf}
c.offset = 0
}
return c.compartments[0]
}
// Get returns the given amount of bytes. Data MAY be copied and IS consumed.
func (c *Container) Get(n int) ([]byte, error) {
buf := c.gather(n)
if len(buf) < n {
return nil, errors.New("container: not enough data to return")
}
c.skip(len(buf))
return buf, nil
}
// GetMax returns as much as possible, but the given amount of bytes at maximum. Data MAY be copied and IS consumed.
func (c *Container) GetMax(n int) []byte {
buf := c.gather(n)
c.skip(len(buf))
return buf
}
// WriteToSlice copies data to the give slice until it is full, or the container is empty. It returns the bytes written and if the container is now empty. Data IS copied and IS consumed.
func (c *Container) WriteToSlice(slice []byte) (n int, containerEmptied bool) {
for i := c.offset; i < len(c.compartments); i++ {
copy(slice, c.compartments[i])
if len(slice) < len(c.compartments[i]) {
// only part was copied
n += len(slice)
c.compartments[i] = c.compartments[i][len(slice):]
c.checkOffset()
return n, false
}
// all was copied
n += len(c.compartments[i])
slice = slice[len(c.compartments[i]):]
c.compartments[i] = nil
c.offset = i + 1
}
c.checkOffset()
return n, true
}
func (c *Container) Clean() {
if c.offset > 100 {
c.renewCompartments()
}
}
func (c *Container) renewCompartments() {
baseLength := len(c.compartments) - c.offset + 5
newCompartments := make([][]byte, baseLength, baseLength+5)
copy(newCompartments[5:], c.compartments[c.offset:])
c.compartments = newCompartments
c.offset = 4
}
func (c *Container) carbonCopy() *Container {
new := &Container{
compartments: make([][]byte, len(c.compartments)),
offset: c.offset,
err: c.err,
}
for i := 0; i < len(c.compartments); i++ {
new.compartments[i] = c.compartments[i]
}
// TODO: investigate why copy fails to correctly duplicate [][]byte
// copy(new.compartments, c.compartments)
return new
}
func (c *Container) checkOffset() {
if c.offset >= len(c.compartments) {
c.offset = len(c.compartments) / 2
}
}
// Error Handling
// SetError sets an error.
func (c *Container) SetError(err error) {
c.err = err
c.Replace(append([]byte{0x00}, []byte(err.Error())...))
}
// CheckError checks if there is an error in the data. If so, it will parse the error and delete the data.
func (c *Container) CheckError() {
if len(c.compartments[c.offset]) > 0 && c.compartments[c.offset][0] == 0x00 {
c.compartments[c.offset] = c.compartments[c.offset][1:]
c.err = errors.New(string(c.CompileData()))
c.compartments = nil
}
}
// HasError returns wether or not the container is holding an error.
func (c *Container) HasError() bool {
if c.err != nil {
return true
}
return false
}
// Error returns the error.
func (c *Container) Error() error {
return c.err
}
// Error returns the error.
func (c *Container) ErrString() string {
return c.err.Error()
}
// Block Handling
// PrependLength prepends the current full length of all bytes in the container.
func (c *Container) PrependLength() {
c.Prepend(varint.Pack64(uint64(c.Length())))
}
func (c *Container) gather(n int) []byte {
// check if first slice holds enough data
if len(c.compartments[c.offset]) >= n {
return c.compartments[c.offset][:n]
}
// start gathering data
slice := make([]byte, n)
copySlice := slice
n = 0
for i := c.offset; i < len(c.compartments); i++ {
copy(copySlice, c.compartments[i])
if len(copySlice) <= len(c.compartments[i]) {
n += len(copySlice)
return slice[:n]
}
n += len(c.compartments[i])
copySlice = copySlice[len(c.compartments[i]):]
}
return slice[:n]
}
func (c *Container) skip(n int) {
for i := c.offset; i < len(c.compartments); i++ {
if len(c.compartments[i]) <= n {
n -= len(c.compartments[i])
c.offset = i + 1
c.compartments[i] = nil
if n == 0 {
c.checkOffset()
return
}
} else {
c.compartments[i] = c.compartments[i][n:]
c.checkOffset()
return
}
}
c.checkOffset()
}
// GetNextBlock returns the next block of data defined by a varint (note: data will MAY be copied and IS consumed).
func (c *Container) GetNextBlock() ([]byte, error) {
blockSize, err := c.GetNextN64()
if err != nil {
return nil, err
}
return c.Get(int(blockSize))
}
// GetNextN8 parses and returns a varint of type uint8.
func (c *Container) GetNextN8() (uint8, error) {
buf := c.gather(2)
num, n, err := varint.Unpack8(buf)
if err != nil {
return 0, err
}
c.skip(n)
return num, nil
}
// GetNextN16 parses and returns a varint of type uint16.
func (c *Container) GetNextN16() (uint16, error) {
buf := c.gather(3)
num, n, err := varint.Unpack16(buf)
if err != nil {
return 0, err
}
c.skip(n)
return num, nil
}
// GetNextN32 parses and returns a varint of type uint32.
func (c *Container) GetNextN32() (uint32, error) {
buf := c.gather(5)
num, n, err := varint.Unpack32(buf)
if err != nil {
return 0, err
}
c.skip(n)
return num, nil
}
// GetNextN64 parses and returns a varint of type uint64.
func (c *Container) GetNextN64() (uint64, error) {
buf := c.gather(9)
num, n, err := varint.Unpack64(buf)
if err != nil {
return 0, err
}
c.skip(n)
return num, nil
}

162
container/container_test.go Normal file
View file

@ -0,0 +1,162 @@
package container
import (
"bytes"
"errors"
"testing"
"github.com/Safing/safing-core/utils"
)
var (
testData = []byte("The quick brown fox jumps over the lazy dog")
testDataSplitted = [][]byte{
[]byte("T"),
[]byte("he"),
[]byte(" qu"),
[]byte("ick "),
[]byte("brown"),
[]byte(" fox j"),
[]byte("umps ov"),
[]byte("er the l"),
[]byte("azy dog"),
}
)
func TestContainerDataHandling(t *testing.T) {
c1 := NewContainer(utils.DuplicateBytes(testData))
c1c := c1.carbonCopy()
c2 := NewContainer()
for i := 0; i < len(testData); i++ {
oneByte := make([]byte, 1)
c1c.WriteToSlice(oneByte)
c2.Append(oneByte)
}
c2c := c2.carbonCopy()
c3 := NewContainer()
for i := len(c2c.compartments) - 1; i >= c2c.offset; i-- {
c3.Prepend(c2c.compartments[i])
}
c3c := c3.carbonCopy()
d4 := make([]byte, len(testData)*2)
n, _ := c3c.WriteToSlice(d4)
d4 = d4[:n]
c3c = c3.carbonCopy()
d5 := make([]byte, len(testData))
for i := 0; i < len(testData); i++ {
c3c.WriteToSlice(d5[i : i+1])
}
c6 := NewContainer()
c6.Replace(testData)
c7 := NewContainer(testDataSplitted[0])
for i := 1; i < len(testDataSplitted); i++ {
c7.Append(testDataSplitted[i])
}
c8 := NewContainer(testDataSplitted...)
for i := 0; i < 110; i++ {
c8.Prepend(nil)
}
c8.Clean()
compareMany(t, testData, c1.CompileData(), c2.CompileData(), c3.CompileData(), d4, d5, c6.CompileData(), c7.CompileData(), c8.CompileData())
}
func compareMany(t *testing.T, reference []byte, other ...[]byte) {
for i, cmp := range other {
if !bytes.Equal(reference, cmp) {
t.Errorf("sample %d does not match reference: sample is '%s'", i+1, string(cmp))
}
}
}
func TestContainerErrorHandling(t *testing.T) {
c1 := NewContainer(nil)
if c1.HasError() {
t.Error("should not have error")
}
c1.SetError(errors.New("test error"))
if !c1.HasError() {
t.Error("should have error")
}
c2 := NewContainer(append([]byte{0}, []byte("test error")...))
if c2.HasError() {
t.Error("should not have error")
}
c2.CheckError()
if !c2.HasError() {
t.Error("should have error")
}
if c2.Error().Error() != "test error" {
t.Errorf("error message mismatch, was %s", c2.Error())
}
}
func TestContainerBlockHandling(t *testing.T) {
c1 := NewContainer(utils.DuplicateBytes(testData))
c1.PrependLength()
c1.AppendAsBlock(testData)
c1c := c1.carbonCopy()
c2 := NewContainer(nil)
for i := 0; i < c1.Length(); i++ {
oneByte := make([]byte, 1)
c1c.WriteToSlice(oneByte)
c2.Append(oneByte)
}
c3 := NewContainer(testDataSplitted[0])
for i := 1; i < len(testDataSplitted); i++ {
c3.Append(testDataSplitted[i])
}
c3.PrependLength()
d1, err := c1.GetNextBlock()
if err != nil {
t.Errorf("GetNextBlock failed: %s", err)
}
d2, err := c1.GetNextBlock()
if err != nil {
t.Errorf("GetNextBlock failed: %s", err)
}
d3, err := c2.GetNextBlock()
if err != nil {
t.Errorf("GetNextBlock failed: %s", err)
}
d4, err := c2.GetNextBlock()
if err != nil {
t.Errorf("GetNextBlock failed: %s", err)
}
d5, err := c3.GetNextBlock()
if err != nil {
t.Errorf("GetNextBlock failed: %s", err)
}
compareMany(t, testData, d1, d2, d3, d4, d5)
}
func TestContainerMisc(t *testing.T) {
c1 := NewContainer()
d1 := c1.CompileData()
if len(d1) > 0 {
t.Fatalf("empty container should not hold any data")
}
}

147
crypto/hash/algorithm.go Normal file
View file

@ -0,0 +1,147 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package hash
import (
"crypto/sha256"
"crypto/sha512"
"hash"
"golang.org/x/crypto/sha3"
)
type Algorithm uint8
const (
SHA2_224 Algorithm = 1 + iota
SHA2_256
SHA2_512_224
SHA2_512_256
SHA2_384
SHA2_512
SHA3_224
SHA3_256
SHA3_384
SHA3_512
BLAKE2S_256
BLAKE2B_256
BLAKE2B_384
BLAKE2B_512
)
var (
attributes = map[Algorithm][]uint8{
// block size, output size, security strength - in bytes
SHA2_224: []uint8{64, 28, 14},
SHA2_256: []uint8{64, 32, 16},
SHA2_512_224: []uint8{128, 28, 14},
SHA2_512_256: []uint8{128, 32, 16},
SHA2_384: []uint8{128, 48, 24},
SHA2_512: []uint8{128, 64, 32},
SHA3_224: []uint8{144, 28, 14},
SHA3_256: []uint8{136, 32, 16},
SHA3_384: []uint8{104, 48, 24},
SHA3_512: []uint8{72, 64, 32},
BLAKE2S_256: []uint8{64, 32, 16},
BLAKE2B_256: []uint8{128, 32, 16},
BLAKE2B_384: []uint8{128, 48, 24},
BLAKE2B_512: []uint8{128, 64, 32},
}
functions = map[Algorithm]func() hash.Hash{
SHA2_224: sha256.New224,
SHA2_256: sha256.New,
SHA2_512_224: sha512.New512_224,
SHA2_512_256: sha512.New512_256,
SHA2_384: sha512.New384,
SHA2_512: sha512.New,
SHA3_224: sha3.New224,
SHA3_256: sha3.New256,
SHA3_384: sha3.New384,
SHA3_512: sha3.New512,
BLAKE2S_256: NewBlake2s256,
BLAKE2B_256: NewBlake2b256,
BLAKE2B_384: NewBlake2b384,
BLAKE2B_512: NewBlake2b512,
}
// just ordered by strength and establishment, no research conducted yet.
orderedByRecommendation = []Algorithm{
SHA3_512, // {72, 64, 32}
SHA2_512, // {128, 64, 32}
BLAKE2B_512, // {128, 64, 32}
SHA3_384, // {104, 48, 24}
SHA2_384, // {128, 48, 24}
BLAKE2B_384, // {128, 48, 24}
SHA3_256, // {136, 32, 16}
SHA2_512_256, // {128, 32, 16}
SHA2_256, // {64, 32, 16}
BLAKE2B_256, // {128, 32, 16}
BLAKE2S_256, // {64, 32, 16}
SHA3_224, // {144, 28, 14}
SHA2_512_224, // {128, 28, 14}
SHA2_224, // {64, 28, 14}
}
// names
names = map[Algorithm]string{
SHA2_224: "SHA2-224",
SHA2_256: "SHA2-256",
SHA2_512_224: "SHA2-512/224",
SHA2_512_256: "SHA2-512/256",
SHA2_384: "SHA2-384",
SHA2_512: "SHA2-512",
SHA3_224: "SHA3-224",
SHA3_256: "SHA3-256",
SHA3_384: "SHA3-384",
SHA3_512: "SHA3-512",
BLAKE2S_256: "Blake2s-256",
BLAKE2B_256: "Blake2b-256",
BLAKE2B_384: "Blake2b-384",
BLAKE2B_512: "Blake2b-512",
}
)
func (a Algorithm) BlockSize() uint8 {
att, ok := attributes[a]
if !ok {
return 0
}
return att[0]
}
func (a Algorithm) Size() uint8 {
att, ok := attributes[a]
if !ok {
return 0
}
return att[1]
}
func (a Algorithm) SecurityStrength() uint8 {
att, ok := attributes[a]
if !ok {
return 0
}
return att[2]
}
func (a Algorithm) String() string {
return a.Name()
}
func (a Algorithm) Name() string {
name, ok := names[a]
if !ok {
return ""
}
return name
}
func (a Algorithm) New() hash.Hash {
fn, ok := functions[a]
if !ok {
return nil
}
return fn()
}

View file

@ -0,0 +1,56 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package hash
import "testing"
func TestAttributes(t *testing.T) {
for alg, att := range attributes {
name, ok := names[alg]
if !ok {
t.Errorf("hash test: name missing for Algorithm ID %d", alg)
}
_ = alg.String()
_, ok = functions[alg]
if !ok {
t.Errorf("hash test: function missing for Algorithm %s", name)
}
hash := alg.New()
if len(att) != 3 {
t.Errorf("hash test: Algorithm %s does not have exactly 3 attributes", name)
}
if hash.BlockSize() != int(alg.BlockSize()) {
t.Errorf("hash test: block size mismatch at Algorithm %s", name)
}
if hash.Size() != int(alg.Size()) {
t.Errorf("hash test: size mismatch at Algorithm %s", name)
}
if alg.Size()/2 != alg.SecurityStrength() {
t.Errorf("hash test: possible strength error at Algorithm %s", name)
}
}
noAlg := Algorithm(255)
if noAlg.String() != "" {
t.Error("hash test: invalid Algorithm error")
}
if noAlg.BlockSize() != 0 {
t.Error("hash test: invalid Algorithm error")
}
if noAlg.Size() != 0 {
t.Error("hash test: invalid Algorithm error")
}
if noAlg.SecurityStrength() != 0 {
t.Error("hash test: invalid Algorithm error")
}
if noAlg.New() != nil {
t.Error("hash test: invalid Algorithm error")
}
}

133
crypto/hash/hash.go Normal file
View file

@ -0,0 +1,133 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package hash
import (
"bytes"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"io"
"github.com/Safing/safing-core/formats/varint"
)
type Hash struct {
Algorithm Algorithm
Sum []byte
}
func FromBytes(bytes []byte) (*Hash, int, error) {
hash := &Hash{}
alg, read, err := varint.Unpack8(bytes)
hash.Algorithm = Algorithm(alg)
if err != nil {
return nil, 0, errors.New(fmt.Sprintf("hash: failed to parse: %s", err))
}
// TODO: check if length is correct
hash.Sum = bytes[read:]
return hash, 0, nil
}
func (h *Hash) Bytes() []byte {
return append(varint.Pack8(uint8(h.Algorithm)), h.Sum...)
}
func FromSafe64(s string) (*Hash, error) {
bytes, err := base64.RawURLEncoding.DecodeString(s)
if err != nil {
return nil, errors.New(fmt.Sprintf("hash: failed to parse: %s", err))
}
hash, _, err := FromBytes(bytes)
return hash, err
}
func (h *Hash) Safe64() string {
return base64.RawURLEncoding.EncodeToString(h.Bytes())
}
func FromHex(s string) (*Hash, error) {
bytes, err := hex.DecodeString(s)
if err != nil {
return nil, errors.New(fmt.Sprintf("hash: failed to parse: %s", err))
}
hash, _, err := FromBytes(bytes)
return hash, err
}
func (h *Hash) Hex() string {
return hex.EncodeToString(h.Bytes())
}
func (h *Hash) Equal(other *Hash) bool {
if h.Algorithm != other.Algorithm {
return false
}
return bytes.Equal(h.Sum, other.Sum)
}
func Sum(data []byte, alg Algorithm) *Hash {
hasher := alg.New()
hasher.Write(data)
return &Hash{
Algorithm: alg,
Sum: hasher.Sum(nil),
}
}
func SumString(data string, alg Algorithm) *Hash {
hasher := alg.New()
io.WriteString(hasher, data)
return &Hash{
Algorithm: alg,
Sum: hasher.Sum(nil),
}
}
func SumReader(reader io.Reader, alg Algorithm) (*Hash, error) {
hasher := alg.New()
_, err := io.Copy(hasher, reader)
if err != nil {
return nil, err
}
return &Hash{
Algorithm: alg,
Sum: hasher.Sum(nil),
}, nil
}
func SumAndCompare(data []byte, other Hash) (bool, *Hash) {
newHash := Sum(data, other.Algorithm)
return other.Equal(newHash), newHash
}
func SumReaderAndCompare(reader io.Reader, other Hash) (bool, *Hash, error) {
newHash, err := SumReader(reader, other.Algorithm)
if err != nil {
return false, nil, err
}
return other.Equal(newHash), newHash, nil
}
func RecommendedAlg(strengthInBits uint16) Algorithm {
strengthInBytes := uint8(strengthInBits / 8)
if strengthInBits%8 != 0 {
strengthInBytes++
}
if strengthInBytes == 0 {
strengthInBytes = uint8(0xFF)
}
chosenAlg := orderedByRecommendation[0]
for _, alg := range orderedByRecommendation {
strength := alg.SecurityStrength()
if strength < strengthInBytes {
break
}
chosenAlg = alg
if strength == strengthInBytes {
break
}
}
return chosenAlg
}

84
crypto/hash/hash_test.go Normal file
View file

@ -0,0 +1,84 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package hash
import (
"bytes"
"testing"
)
var (
testEmpty = []byte("")
testFox = []byte("The quick brown fox jumps over the lazy dog")
)
func testAlgorithm(t *testing.T, alg Algorithm, emptyHex, foxHex string) {
var err error
// testEmpty
hash := Sum(testEmpty, alg)
if err != nil {
t.Errorf("test Sum %s (empty): error occured: %s", alg.String(), err)
}
if hash.Hex()[2:] != emptyHex {
t.Errorf("test Sum %s (empty): hex sum mismatch, expected %s, got %s", alg.String(), emptyHex, hash.Hex())
}
// testFox
hash = Sum(testFox, alg)
if err != nil {
t.Errorf("test Sum %s (fox): error occured: %s", alg.String(), err)
}
if hash.Hex()[2:] != foxHex {
t.Errorf("test Sum %s (fox): hex sum mismatch, expected %s, got %s", alg.String(), foxHex, hash.Hex())
}
// testEmpty
hash = SumString(string(testEmpty), alg)
if err != nil {
t.Errorf("test SumString %s (empty): error occured: %s", alg.String(), err)
}
if hash.Hex()[2:] != emptyHex {
t.Errorf("test SumString %s (empty): hex sum mismatch, expected %s, got %s", alg.String(), emptyHex, hash.Hex())
}
// testFox
hash = SumString(string(testFox), alg)
if err != nil {
t.Errorf("test SumString %s (fox): error occured: %s", alg.String(), err)
}
if hash.Hex()[2:] != foxHex {
t.Errorf("test SumString %s (fox): hex sum mismatch, expected %s, got %s", alg.String(), foxHex, hash.Hex())
}
// testEmpty
hash, err = SumReader(bytes.NewReader(testEmpty), alg)
if err != nil {
t.Errorf("test SumReader %s (empty): error occured: %s", alg.String(), err)
}
if hash.Hex()[2:] != emptyHex {
t.Errorf("test SumReader %s (empty): hex sum mismatch, expected %s, got %s", alg.String(), emptyHex, hash.Hex())
}
// testFox
hash, err = SumReader(bytes.NewReader(testFox), alg)
if err != nil {
t.Errorf("test SumReader %s (fox): error occured: %s", alg.String(), err)
}
if hash.Hex()[2:] != foxHex {
t.Errorf("test SumReader %s (fox): hex sum mismatch, expected %s, got %s", alg.String(), foxHex, hash.Hex())
}
}
func TestHash(t *testing.T) {
testAlgorithm(t, SHA2_512,
"cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e",
"07e547d9586f6a73f73fbac0435ed76951218fb7d0c8d788a309d785436bbb642e93a252a954f23912547d1e8a3b5ed6e1bfd7097821233fa0538f3db854fee6",
)
testAlgorithm(t, SHA3_512,
"a69f73cca23a9ac5c8b567dc185a756e97c982164fe25859e0d1dcc1475c80a615b2123af1f5f94c11e3e9402c3ac558f500199d95b6d3e301758586281dcd26",
"01dedd5de4ef14642445ba5f5b97c15e47b9ad931326e4b0727cd94cefc44fff23f07bf543139939b49128caf436dc1bdee54fcb24023a08d9403f9b4bf0d450",
)
}

30
crypto/hash/proxies.go Normal file
View file

@ -0,0 +1,30 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package hash
import (
"hash"
"golang.org/x/crypto/blake2b"
"golang.org/x/crypto/blake2s"
)
func NewBlake2s256() hash.Hash {
h, _ := blake2s.New256(nil)
return h
}
func NewBlake2b256() hash.Hash {
h, _ := blake2b.New256(nil)
return h
}
func NewBlake2b384() hash.Hash {
h, _ := blake2b.New384(nil)
return h
}
func NewBlake2b512() hash.Hash {
h, _ := blake2b.New512(nil)
return h
}

37
crypto/random/random.go Normal file
View file

@ -0,0 +1,37 @@
package random
import (
"crypto/rand"
"fmt"
"io"
"math/big"
)
// just (mostly) a proxy for now, awesome stuff comes later
func Int(randSrc io.Reader, max *big.Int) (n *big.Int, err error) {
return rand.Int(randSrc, max)
}
func Prime(randSrc io.Reader, bits int) (p *big.Int, err error) {
return rand.Prime(randSrc, bits)
}
func Read(b []byte) (n int, err error) {
return rand.Read(b)
}
func Bytes(len int) ([]byte, error) {
r := make([]byte, len)
_, err := Read(r)
if err != nil {
return nil, fmt.Errorf("failed to get random data: %s", err)
}
return r, nil
}
type Reader struct{}
func (r Reader) Read(b []byte) (n int, err error) {
return Read(b)
}

63
database/base.go Normal file
View file

@ -0,0 +1,63 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package database
import (
"errors"
"strings"
"github.com/Safing/safing-core/database/dbutils"
"github.com/ipfs/go-datastore"
uuid "github.com/satori/go.uuid"
)
type Base struct {
dbKey *datastore.Key
meta *dbutils.Meta
}
func (m *Base) SetKey(key *datastore.Key) {
m.dbKey = key
}
func (m *Base) GetKey() *datastore.Key {
return m.dbKey
}
func (m *Base) FmtKey() string {
return m.dbKey.String()
}
func (m *Base) Meta() *dbutils.Meta {
return m.meta
}
func (m *Base) CreateObject(namespace *datastore.Key, name string, model Model) error {
var newKey datastore.Key
if name == "" {
newKey = NewInstance(namespace.ChildString(getTypeName(model)), strings.Replace(uuid.NewV4().String(), "-", "", -1))
} else {
newKey = NewInstance(namespace.ChildString(getTypeName(model)), name)
}
m.dbKey = &newKey
return Create(*m.dbKey, model)
}
func (m *Base) SaveObject(model Model) error {
if m.dbKey == nil {
return errors.New("cannot save new object, use Create() instead")
}
return Update(*m.dbKey, model)
}
func (m *Base) Delete() error {
if m.dbKey == nil {
return errors.New("cannot delete object unsaved object")
}
return Delete(*m.dbKey)
}
func NewInstance(k datastore.Key, s string) datastore.Key {
return datastore.NewKey(k.String() + ":" + s)
}

151
database/database.go Normal file
View file

@ -0,0 +1,151 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package database
import (
"errors"
"fmt"
"os"
"path"
"strings"
ds "github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-datastore/query"
mount "github.com/ipfs/go-datastore/syncmount"
"github.com/Safing/safing-core/database/dbutils"
"github.com/Safing/safing-core/database/ds/channelshim"
"github.com/Safing/safing-core/database/ds/leveldb"
"github.com/Safing/safing-core/log"
"github.com/Safing/safing-core/meta"
)
// TODO: do not let other modules panic, even if database module crashes.
var db ds.Datastore
var ErrNotFound = errors.New("database: entry could not be found")
func init() {
if strings.HasSuffix(os.Args[0], ".test") {
// testing setup
log.Warning("===== DATABASE RUNNING IN TEST MODE =====")
db = channelshim.NewChanneledDatastore(ds.NewMapDatastore())
return
}
// sfsDB, err := simplefs.NewDatastore(meta.DatabaseDir())
// if err != nil {
// fmt.Fprintf(os.Stderr, "FATAL ERROR: could not init simplefs database: %s\n", err)
// os.Exit(1)
// }
ldb, err := leveldb.NewDatastore(path.Join(meta.DatabaseDir(), "leveldb"), &leveldb.Options{})
if err != nil {
fmt.Fprintf(os.Stderr, "FATAL ERROR: could not init simplefs database: %s\n", err)
os.Exit(1)
}
mapDB := ds.NewMapDatastore()
db = channelshim.NewChanneledDatastore(mount.New([]mount.Mount{
mount.Mount{
Prefix: ds.NewKey("/Run"),
Datastore: mapDB,
},
mount.Mount{
Prefix: ds.NewKey("/"),
Datastore: ldb,
},
}))
}
// func Batch() (ds.Batch, error) {
// return db.Batch()
// }
// func Close() error {
// return db.Close()
// }
func Get(key *ds.Key) (Model, error) {
data, err := db.Get(*key)
if err != nil {
switch err {
case ds.ErrNotFound:
return nil, ErrNotFound
default:
return nil, err
}
}
model, ok := data.(Model)
if !ok {
return nil, errors.New("database did not return model")
}
return model, nil
}
func GetAndEnsureModel(namespace *ds.Key, name string, model Model) (Model, error) {
newKey := namespace.ChildString(getTypeName(model)).Instance(name)
data, err := Get(&newKey)
if err != nil {
return nil, err
}
newModel, err := EnsureModel(data, model)
if err != nil {
return nil, err
}
newModel.SetKey(&newKey)
return newModel, nil
}
func Has(key ds.Key) (exists bool, err error) {
return db.Has(key)
}
func Create(key ds.Key, model Model) (err error) {
handleCreateSubscriptions(model)
err = db.Put(key, model)
if err != nil {
log.Tracef("database: failed to create entry %s: %s", key, err)
}
return err
}
func Update(key ds.Key, model Model) (err error) {
handleUpdateSubscriptions(model)
err = db.Put(key, model)
if err != nil {
log.Tracef("database: failed to update entry %s: %s", key, err)
}
return err
}
func Delete(key ds.Key) (err error) {
handleDeleteSubscriptions(&key)
return db.Delete(key)
}
func Query(q dsq.Query) (dsq.Results, error) {
return db.Query(q)
}
func RawGet(key ds.Key) (*dbutils.Wrapper, error) {
data, err := db.Get(key)
if err != nil {
return nil, err
}
wrapped, ok := data.(*dbutils.Wrapper)
if !ok {
return nil, errors.New("returned data is not a wrapper")
}
return wrapped, nil
}
func RawPut(key ds.Key, value interface{}) error {
return db.Put(key, value)
}

12
database/dbutils/meta.go Normal file
View file

@ -0,0 +1,12 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package dbutils
type Meta struct {
Created int64 `json:"c,omitempty" bson:"c,omitempty"`
Modified int64 `json:"m,omitempty" bson:"m,omitempty"`
Expires int64 `json:"e,omitempty" bson:"e,omitempty"`
Deleted int64 `json:"d,omitempty" bson:"d,omitempty"`
Secret bool `json:"s,omitempty" bson:"s,omitempty"` // secrets must not be sent to clients, only synced between cores
Cronjewel bool `json:"j,omitempty" bson:"j,omitempty"` // crownjewels must never leave the instance
}

View file

@ -0,0 +1,67 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
/*
Package dbutils provides important function for datastore backends without creating an import loop.
*/
package dbutils
import (
"errors"
"fmt"
"github.com/ipfs/go-datastore"
"github.com/Safing/safing-core/formats/dsd"
"github.com/Safing/safing-core/formats/varint"
)
type Wrapper struct {
dbKey *datastore.Key
meta *Meta
Format uint8
Data []byte
}
func NewWrapper(key *datastore.Key, data []byte) (*Wrapper, error) {
// line crashes with: panic: runtime error: index out of range
format, _, err := varint.Unpack8(data)
if err != nil {
return nil, fmt.Errorf("database: could not get dsd format: %s", err)
}
new := &Wrapper{
Format: format,
Data: data,
}
new.SetKey(key)
return new, nil
}
func (w *Wrapper) SetKey(key *datastore.Key) {
w.dbKey = key
}
func (w *Wrapper) GetKey() *datastore.Key {
return w.dbKey
}
func (w *Wrapper) FmtKey() string {
return w.dbKey.String()
}
func DumpModel(uncertain interface{}, storageType uint8) ([]byte, error) {
wrapped, ok := uncertain.(*Wrapper)
if ok {
if storageType != dsd.AUTO && storageType != wrapped.Format {
return nil, errors.New("could not dump model, wrapped object format mismatch")
}
return wrapped.Data, nil
}
dumped, err := dsd.Dump(uncertain, storageType)
if err != nil {
return nil, err
}
return dumped, nil
}

100
database/doc.go Normal file
View file

@ -0,0 +1,100 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
/*
Package database provides a universal interface for interacting with the database.
The Lazy Database
The database system can handle Go structs as well as serialized data by the dsd package.
While data is in transit within the system, it does not know which form it currently has. Only when it reaches its destination, it must ensure that it is either of a certain type or dump it.
Internals
The database system uses the Model interface to transparently handle all types of structs that get saved in the database. Structs include Base struct to fulfill most parts of the Model interface.
Boilerplate Code
Receiving model, using as struct:
// At some point, declare a pointer to your model.
// This is only used to identify the model, so you can reuse it safely for this purpose
var cowModel *Cow // only use this as parameter for database.EnsureModel-like functions
receivedModel := <- models // chan database.Model
cow, ok := database.SilentEnsureModel(receivedModel, cowModel).(*Cow)
if !ok {
panic("received model does not match expected model")
}
// more verbose, in case you need better error handling
receivedModel := <- models // chan database.Model
genericModel, err := database.EnsureModel(receivedModel, cowModel)
if err != nil {
panic(err)
}
cow, ok := genericModel.(*Cow)
if !ok {
panic("received model does not match expected model")
}
Receiving a model, dumping:
// receivedModel <- chan database.Model
bytes, err := database.DumpModel(receivedModel, dsd.JSON) // or other dsd format
if err != nil {
panic(err)
}
Model definition:
// Cow makes moo.
type Cow struct {
database.Base
// Fields...
}
var cowModel *Cow // only use this as parameter for database.EnsureModel-like functions
func init() {
database.RegisterModel(cowModel, func() database.Model { return new(Cow) })
}
// this all you need, but you might find the following code helpful:
var cowNamespace = datastore.NewKey("/Cow")
// Create saves Cow with the provided name in the default namespace.
func (m *Cow) Create(name string) error {
return m.CreateObject(&cowNamespace, name, m)
}
// CreateInNamespace saves Cow with the provided name in the provided namespace.
func (m *Cow) CreateInNamespace(namespace *datastore.Key, name string) error {
return m.CreateObject(namespace, name, m)
}
// Save saves Cow.
func (m *Cow) Save() error {
return m.SaveObject(m)
}
// GetCow fetches Cow with the provided name from the default namespace.
func GetCow(name string) (*Cow, error) {
return GetCowFromNamespace(&cowNamespace, name)
}
// GetCowFromNamespace fetches Cow with the provided name from the provided namespace.
func GetCowFromNamespace(namespace *datastore.Key, name string) (*Cow, error) {
object, err := database.GetAndEnsureModel(namespace, name, cowModel)
if err != nil {
return nil, err
}
model, ok := object.(*Cow)
if !ok {
return nil, database.NewMismatchError(object, cowModel)
}
return model, nil
}
*/
package database

View file

@ -0,0 +1,4 @@
from: https://github.com/ipfs/go-datastore/blob/master/basic_ds.go
commit: https://github.com/ipfs/go-datastore/commit/545f59008f75bdb6b28abafd8391d7d7d19422be
original files imported:
- basic_ds.go sha256:f6bd8d26e3511539358b1712c1f1359b9da6425f2d28c8305f4017e61688fe4d

View file

@ -0,0 +1,21 @@
The MIT License
Copyright (c) 2016 Juan Batiz-Benet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View file

@ -0,0 +1,288 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package channelshim
import (
"errors"
"io"
"time"
datastore "github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-datastore/query"
"github.com/tevino/abool"
)
var ErrDatastoreClosed = errors.New("datastore: this instance was closed")
// ChanneledDatastore makes datastore thread-safe
type ChanneledDatastore struct {
child datastore.Datastore
setChild chan datastore.Datastore
putRequest chan *VKeyValue
putReply chan *error
getRequest chan *datastore.Key
getReply chan *VValueErr
hasRequest chan *datastore.Key
hasReply chan *VExistsErr
deleteRequest chan *datastore.Key
deleteReply chan *error
queryRequest chan *dsq.Query
queryReply chan *VResultsErr
batchRequest chan interface{} // nothing actually
batchReply chan *VBatchErr
closeRequest chan interface{} // nothing actually
closeReply chan *error
closedFlag *abool.AtomicBool
}
type VKeyValue struct {
Key datastore.Key
Value interface{}
}
type VValueErr struct {
Value interface{}
Err error
}
type VExistsErr struct {
Exists bool
Err error
}
type VResultsErr struct {
Results dsq.Results
Err error
}
type VBatchErr struct {
Batch datastore.Batch
Err error
}
func (cds *ChanneledDatastore) run() {
if cds.child == nil {
cds.child = <-cds.setChild
}
for {
select {
case v := <-cds.putRequest:
cds.put(v)
case v := <-cds.getRequest:
cds.get(v)
case v := <-cds.hasRequest:
cds.has(v)
case v := <-cds.deleteRequest:
cds.delete(v)
case v := <-cds.queryRequest:
cds.query(v)
case <-cds.batchRequest:
cds.batch()
case <-cds.closeRequest:
err := cds.close()
if err == nil {
cds.closeReply <- &err
defer cds.stop()
return
}
cds.closeReply <- &err
}
}
}
func (cds *ChanneledDatastore) stop() {
for {
select {
case <-cds.putRequest:
cds.putReply <- &ErrDatastoreClosed
case <-cds.getRequest:
cds.getReply <- &VValueErr{nil, ErrDatastoreClosed}
case <-cds.hasRequest:
cds.hasReply <- &VExistsErr{false, ErrDatastoreClosed}
case <-cds.deleteRequest:
cds.deleteReply <- &ErrDatastoreClosed
case <-cds.queryRequest:
cds.queryReply <- &VResultsErr{nil, ErrDatastoreClosed}
case <-cds.batchRequest:
cds.batchReply <- &VBatchErr{nil, ErrDatastoreClosed}
case <-cds.closeRequest:
cds.closeReply <- &ErrDatastoreClosed
case <-time.After(1 * time.Minute):
// TODO: theoretically a race condition, as some goroutines _could_ still be stuck in front of the request channel
close(cds.putRequest)
close(cds.putReply)
close(cds.getRequest)
close(cds.getReply)
close(cds.hasRequest)
close(cds.hasReply)
close(cds.deleteRequest)
close(cds.deleteReply)
close(cds.queryRequest)
close(cds.queryReply)
close(cds.batchRequest)
close(cds.batchReply)
close(cds.closeRequest)
close(cds.closeReply)
return
}
}
}
// NewChanneledDatastore constructs a datastore accessed through channels.
func NewChanneledDatastore(ds datastore.Datastore) *ChanneledDatastore {
cds := &ChanneledDatastore{child: ds}
cds.setChild = make(chan datastore.Datastore)
cds.putRequest = make(chan *VKeyValue)
cds.putReply = make(chan *error)
cds.getRequest = make(chan *datastore.Key)
cds.getReply = make(chan *VValueErr)
cds.hasRequest = make(chan *datastore.Key)
cds.hasReply = make(chan *VExistsErr)
cds.deleteRequest = make(chan *datastore.Key)
cds.deleteReply = make(chan *error)
cds.queryRequest = make(chan *dsq.Query)
cds.queryReply = make(chan *VResultsErr)
cds.batchRequest = make(chan interface{})
cds.batchReply = make(chan *VBatchErr)
cds.closeRequest = make(chan interface{})
cds.closeReply = make(chan *error)
cds.closedFlag = abool.NewBool(false)
go cds.run()
return cds
}
// SetChild sets the child of the datastore, if not yet set
func (cds *ChanneledDatastore) SetChild(ds datastore.Datastore) error {
select {
case cds.setChild <- ds:
default:
return errors.New("channelshim: child already set")
}
return nil
}
// Children implements Shim
func (cds *ChanneledDatastore) Children() []datastore.Datastore {
return []datastore.Datastore{cds.child}
}
// Put implements Datastore.Put
func (cds *ChanneledDatastore) Put(key datastore.Key, value interface{}) error {
if cds.closedFlag.IsSet() {
return ErrDatastoreClosed
}
cds.putRequest <- &VKeyValue{key, value}
err := <-cds.putReply
return *err
}
func (cds *ChanneledDatastore) put(v *VKeyValue) {
err := cds.child.Put(v.Key, v.Value)
cds.putReply <- &err
}
// Get implements Datastore.Get
func (cds *ChanneledDatastore) Get(key datastore.Key) (value interface{}, err error) {
if cds.closedFlag.IsSet() {
return nil, ErrDatastoreClosed
}
cds.getRequest <- &key
v := <-cds.getReply
return v.Value, v.Err
}
func (cds *ChanneledDatastore) get(key *datastore.Key) {
value, err := cds.child.Get(*key)
cds.getReply <- &VValueErr{value, err}
}
// Has implements Datastore.Has
func (cds *ChanneledDatastore) Has(key datastore.Key) (exists bool, err error) {
if cds.closedFlag.IsSet() {
return false, ErrDatastoreClosed
}
cds.hasRequest <- &key
v := <-cds.hasReply
return v.Exists, v.Err
}
func (cds *ChanneledDatastore) has(key *datastore.Key) {
exists, err := cds.child.Has(*key)
cds.hasReply <- &VExistsErr{exists, err}
}
// Delete implements Datastore.Delete
func (cds *ChanneledDatastore) Delete(key datastore.Key) error {
if cds.closedFlag.IsSet() {
return ErrDatastoreClosed
}
cds.deleteRequest <- &key
err := <-cds.deleteReply
return *err
}
func (cds *ChanneledDatastore) delete(key *datastore.Key) {
err := cds.child.Delete(*key)
cds.deleteReply <- &err
}
// Query implements Datastore.Query
func (cds *ChanneledDatastore) Query(q dsq.Query) (dsq.Results, error) {
if cds.closedFlag.IsSet() {
return nil, ErrDatastoreClosed
}
cds.queryRequest <- &q
v := <-cds.queryReply
return v.Results, v.Err
}
func (cds *ChanneledDatastore) query(q *dsq.Query) {
results, err := cds.child.Query(*q)
cds.queryReply <- &VResultsErr{results, err}
}
// Query implements Datastore.Batch
func (cds *ChanneledDatastore) Batch() (datastore.Batch, error) {
if cds.closedFlag.IsSet() {
return nil, ErrDatastoreClosed
}
cds.batchRequest <- nil
v := <-cds.batchReply
return v.Batch, v.Err
}
func (cds *ChanneledDatastore) batch() {
if bds, ok := cds.child.(datastore.Batching); ok {
batch, err := bds.Batch()
cds.batchReply <- &VBatchErr{batch, err}
} else {
cds.batchReply <- &VBatchErr{nil, datastore.ErrBatchUnsupported}
}
}
// Query closed child Datastore and this Shim
func (cds *ChanneledDatastore) Close() error {
if cds.closedFlag.IsSet() {
return ErrDatastoreClosed
}
cds.closeRequest <- nil
err := <-cds.closeReply
return *err
}
func (cds *ChanneledDatastore) close() error {
if cds, ok := cds.child.(io.Closer); ok {
return cds.Close()
}
return nil
}
func (cds *ChanneledDatastore) IsThreadSafe() {
}

View file

@ -0,0 +1,64 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package channelshim
import (
"io"
"sync"
"testing"
datastore "github.com/ipfs/go-datastore"
"github.com/ipfs/go-datastore/query"
)
var cds datastore.Datastore
var key datastore.Key
var q query.Query
var wg sync.WaitGroup
func testFunctions(testClose bool) {
wg.Add(1)
defer wg.Done()
cds.Put(key, "value")
cds.Get(key)
cds.Has(key)
cds.Delete(key)
cds.Query(q)
if batchingDS, ok := cds.(datastore.Batching); ok {
batchingDS.Batch()
}
if testClose {
if closingDS, ok := cds.(io.Closer); ok {
closingDS.Close()
}
}
}
func TestChanneledDatastore(t *testing.T) {
cds = NewChanneledDatastore(datastore.NewNullDatastore())
key = datastore.RandomKey()
// test normal concurrency-safe operation
for i := 0; i < 100; i++ {
go testFunctions(false)
}
wg.Wait()
// test shutdown procedure
for i := 0; i < 50; i++ {
go testFunctions(false)
}
for i := 0; i < 50; i++ {
go testFunctions(true)
}
wg.Wait()
// test closed functions, just to be sure
go testFunctions(true)
wg.Wait()
}

View file

@ -0,0 +1,5 @@
from: https://github.com/ipfs/go-ds-leveldb
commit: https://github.com/ipfs/go-ds-leveldb/commit/d2d7b2c585634fcf7edb0de3602d060de85616a5
original files imported:
- datastore.go sha256:03994e4ddc33e4b9f13a637c95753b808eb02d97a80b8f7539ef9f0498567ce1
- ds_test.go sha256:3f49b4c7769e8a69ba058411f763f7425128769fab25b58650368581dc303a7c

View file

@ -0,0 +1,21 @@
The MIT License
Copyright (c) 2016 Jeromy Johnson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View file

@ -0,0 +1,250 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package leveldb
import (
"fmt"
"github.com/Safing/safing-core/database/dbutils"
"github.com/Safing/safing-core/formats/dsd"
ds "github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-datastore/query"
"github.com/jbenet/goprocess"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
)
type datastore struct {
DB *leveldb.DB
}
type Options opt.Options
func NewDatastore(path string, opts *Options) (*datastore, error) {
var nopts opt.Options
if opts != nil {
nopts = opt.Options(*opts)
}
db, err := leveldb.OpenFile(path, &nopts)
if err != nil {
return nil, err
}
return &datastore{
DB: db,
}, nil
}
// Returns ErrInvalidType if value is not of type []byte.
//
// Note: using sync = false.
// see http://godoc.org/github.com/syndtr/goleveldb/leveldb/opt#WriteOptions
func (d *datastore) Put(key ds.Key, value interface{}) (err error) {
bytes, err := dbutils.DumpModel(value, dsd.AUTO) // or other dsd format
if err != nil {
return err
}
return d.DB.Put(key.Bytes(), bytes, nil)
}
func (d *datastore) Get(key ds.Key) (interface{}, error) {
data, err := d.DB.Get(key.Bytes(), nil)
if err != nil {
if err == leveldb.ErrNotFound {
return nil, ds.ErrNotFound
}
return nil, err
}
model, err := dbutils.NewWrapper(&key, data)
if err != nil {
return nil, err
}
return model, nil
}
func (d *datastore) Has(key ds.Key) (exists bool, err error) {
return d.DB.Has(key.Bytes(), nil)
}
func (d *datastore) Delete(key ds.Key) (err error) {
// leveldb Delete will not return an error if the key doesn't
// exist (see https://github.com/syndtr/goleveldb/issues/109),
// so check that the key exists first and if not return an
// error
exists, err := d.DB.Has(key.Bytes(), nil)
if !exists {
return ds.ErrNotFound
} else if err != nil {
return err
}
return d.DB.Delete(key.Bytes(), nil)
}
func (d *datastore) Query(q dsq.Query) (dsq.Results, error) {
return d.QueryNew(q)
}
func (d *datastore) QueryNew(q dsq.Query) (dsq.Results, error) {
if len(q.Filters) > 0 ||
len(q.Orders) > 0 ||
q.Limit > 0 ||
q.Offset > 0 {
return d.QueryOrig(q)
}
var rnge *util.Range
if q.Prefix != "" {
rnge = util.BytesPrefix([]byte(q.Prefix))
}
i := d.DB.NewIterator(rnge, nil)
return dsq.ResultsFromIterator(q, dsq.Iterator{
Next: func() (dsq.Result, bool) {
ok := i.Next()
if !ok {
return dsq.Result{}, false
}
k := string(i.Key())
e := dsq.Entry{Key: k}
if !q.KeysOnly {
buf := make([]byte, len(i.Value()))
copy(buf, i.Value())
newKey := ds.RawKey(k)
wrapper, err := dbutils.NewWrapper(&newKey, buf)
if err != nil {
return dsq.Result{Error: fmt.Errorf("failed to create wrapper for %s: %s", k, err)}, false
}
e.Value = wrapper
}
return dsq.Result{Entry: e}, true
},
Close: func() error {
i.Release()
return nil
},
}), nil
}
func (d *datastore) QueryOrig(q dsq.Query) (dsq.Results, error) {
// we can use multiple iterators concurrently. see:
// https://godoc.org/github.com/syndtr/goleveldb/leveldb#DB.NewIterator
// advance the iterator only if the reader reads
//
// run query in own sub-process tied to Results.Process(), so that
// it waits for us to finish AND so that clients can signal to us
// that resources should be reclaimed.
qrb := dsq.NewResultBuilder(q)
qrb.Process.Go(func(worker goprocess.Process) {
d.runQuery(worker, qrb)
})
// go wait on the worker (without signaling close)
go qrb.Process.CloseAfterChildren()
// Now, apply remaining things (filters, order)
qr := qrb.Results()
for _, f := range q.Filters {
qr = dsq.NaiveFilter(qr, f)
}
for _, o := range q.Orders {
qr = dsq.NaiveOrder(qr, o)
}
return qr, nil
}
func (d *datastore) runQuery(worker goprocess.Process, qrb *dsq.ResultBuilder) {
var rnge *util.Range
if qrb.Query.Prefix != "" {
rnge = util.BytesPrefix([]byte(qrb.Query.Prefix))
}
i := d.DB.NewIterator(rnge, nil)
defer i.Release()
// advance iterator for offset
if qrb.Query.Offset > 0 {
for j := 0; j < qrb.Query.Offset; j++ {
i.Next()
}
}
// iterate, and handle limit, too
for sent := 0; i.Next(); sent++ {
// end early if we hit the limit
if qrb.Query.Limit > 0 && sent >= qrb.Query.Limit {
break
}
k := string(i.Key())
e := dsq.Entry{Key: k}
if !qrb.Query.KeysOnly {
buf := make([]byte, len(i.Value()))
copy(buf, i.Value())
newKey := ds.RawKey(k)
wrapper, err := dbutils.NewWrapper(&newKey, buf)
if err != nil {
return
}
e.Value = wrapper
}
select {
case qrb.Output <- dsq.Result{Entry: e}: // we sent it out
case <-worker.Closing(): // client told us to end early.
break
}
}
if err := i.Error(); err != nil {
select {
case qrb.Output <- dsq.Result{Error: err}: // client read our error
case <-worker.Closing(): // client told us to end.
return
}
}
}
// LevelDB needs to be closed.
func (d *datastore) Close() (err error) {
return d.DB.Close()
}
func (d *datastore) IsThreadSafe() {}
type leveldbBatch struct {
b *leveldb.Batch
db *leveldb.DB
}
func (d *datastore) Batch() (ds.Batch, error) {
return &leveldbBatch{
b: new(leveldb.Batch),
db: d.DB,
}, nil
}
func (b *leveldbBatch) Put(key ds.Key, value interface{}) error {
val, err := dsd.Dump(value, dsd.AUTO)
if err != nil {
// return ds.ErrInvalidType
return err
}
b.b.Put(key.Bytes(), val)
return nil
}
func (b *leveldbBatch) Commit() error {
return b.db.Write(b.b, nil)
}
func (b *leveldbBatch) Delete(key ds.Key) error {
b.b.Delete(key.Bytes())
return nil
}

View file

@ -0,0 +1,159 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package leveldb
import (
"io/ioutil"
"os"
"testing"
ds "github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-datastore/query"
)
var testcases = map[string]string{
"/a": "a",
"/a/b": "ab",
"/a/b/c": "abc",
"/a/b/d": "a/b/d",
"/a/c": "ac",
"/a/d": "ad",
"/e": "e",
"/f": "f",
}
// returns datastore, and a function to call on exit.
// (this garbage collects). So:
//
// d, close := newDS(t)
// defer close()
func newDS(t *testing.T) (*datastore, func()) {
path, err := ioutil.TempDir("/tmp", "testing_leveldb_")
if err != nil {
t.Fatal(err)
}
d, err := NewDatastore(path, nil)
if err != nil {
t.Fatal(err)
}
return d, func() {
os.RemoveAll(path)
d.Close()
}
}
func addTestCases(t *testing.T, d *datastore, testcases map[string]string) {
for k, v := range testcases {
dsk := ds.NewKey(k)
if err := d.Put(dsk, []byte(v)); err != nil {
t.Fatal(err)
}
}
for k, v := range testcases {
dsk := ds.NewKey(k)
v2, err := d.Get(dsk)
if err != nil {
t.Fatal(err)
}
v2b := v2.(*[]byte)
if string(*v2b) != v {
t.Errorf("%s values differ: %s != %s", k, v, v2)
}
}
}
func TestQuery(t *testing.T) {
d, close := newDS(t)
defer close()
addTestCases(t, d, testcases)
rs, err := d.Query(dsq.Query{Prefix: "/a/"})
if err != nil {
t.Fatal(err)
}
expectMatches(t, []string{
"/a/b",
"/a/b/c",
"/a/b/d",
"/a/c",
"/a/d",
}, rs)
// test offset and limit
rs, err = d.Query(dsq.Query{Prefix: "/a/", Offset: 2, Limit: 2})
if err != nil {
t.Fatal(err)
}
expectMatches(t, []string{
"/a/b/d",
"/a/c",
}, rs)
}
func TestQueryRespectsProcess(t *testing.T) {
d, close := newDS(t)
defer close()
addTestCases(t, d, testcases)
}
func expectMatches(t *testing.T, expect []string, actualR dsq.Results) {
actual, err := actualR.Rest()
if err != nil {
t.Error(err)
}
if len(actual) != len(expect) {
t.Error("not enough", expect, actual)
}
for _, k := range expect {
found := false
for _, e := range actual {
if e.Key == k {
found = true
}
}
if !found {
t.Error(k, "not found")
}
}
}
func TestBatching(t *testing.T) {
d, done := newDS(t)
defer done()
b, err := d.Batch()
if err != nil {
t.Fatal(err)
}
for k, v := range testcases {
err := b.Put(ds.NewKey(k), []byte(v))
if err != nil {
t.Fatal(err)
}
}
err = b.Commit()
if err != nil {
t.Fatal(err)
}
for k, v := range testcases {
val, err := d.Get(ds.NewKey(k))
if err != nil {
t.Fatal(err)
}
if v != string(*val.(*[]byte)) {
t.Fatal("got wrong data!")
}
}
}

View file

@ -0,0 +1,428 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
/*
Package simplefs provides a dead simple file-based datastore backed.
It is primarily meant for easy testing or storing big files that can easily be accesses directly, without datastore.
/path/path/type:key.json
/path/path/type:key/type:key
*/
package simplefs
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
ds "github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-datastore/query"
"github.com/tevino/abool"
"github.com/Safing/safing-core/database/dbutils"
"github.com/Safing/safing-core/formats/dsd"
"github.com/Safing/safing-core/log"
)
const (
SIMPLEFS_TAG = "330adcf3924003a59ae93289bc2cbb236391588f"
DEFAULT_FILEMODE = os.FileMode(int(0644))
DEFAULT_DIRMODE = os.FileMode(int(0755))
)
type datastore struct {
basePath string
basePathLen int
}
func NewDatastore(path string) (*datastore, error) {
basePath, err := filepath.Abs(path)
if err != nil {
return nil, fmt.Errorf("failed to validate path %s: %s", path, err)
}
tagfile := filepath.Join(basePath, ".simplefs")
file, err := os.Stat(basePath)
if err != nil {
if os.IsNotExist(err) {
err = os.MkdirAll(basePath, DEFAULT_DIRMODE)
if err != nil {
return nil, fmt.Errorf("failed to create directory: %s", err)
}
err = ioutil.WriteFile(tagfile, []byte(SIMPLEFS_TAG), DEFAULT_FILEMODE)
if err != nil {
return nil, fmt.Errorf("failed to create tag file (%s): %s", tagfile, err)
}
} else {
return nil, fmt.Errorf("failed to stat path: %s", err)
}
} else {
if !file.IsDir() {
return nil, fmt.Errorf("provided path (%s) is a file", basePath)
}
// check if valid simplefs storage dir
content, err := ioutil.ReadFile(tagfile)
if err != nil {
return nil, fmt.Errorf("could not read tag file (%s): %s", tagfile, err)
}
if string(content) != SIMPLEFS_TAG {
return nil, fmt.Errorf("invalid tag file (%s)", tagfile)
}
}
log.Infof("simplefs: opened database at %s", basePath)
return &datastore{
basePath: basePath,
basePathLen: len(basePath),
}, nil
}
func (d *datastore) buildPath(path string) (string, error) {
if len(path) < 2 {
return "", fmt.Errorf("key too short: %s", path)
}
newPath := filepath.Clean(filepath.Join(d.basePath, path[1:])) + ".dsd"
if !strings.HasPrefix(newPath, d.basePath) {
return "", fmt.Errorf("key integrity check failed, compiled path is %s", newPath)
}
return newPath, nil
}
func (d *datastore) Put(key ds.Key, value interface{}) (err error) {
objPath, err := d.buildPath(key.String())
if err != nil {
return err
}
bytes, err := dbutils.DumpModel(value, dsd.AUTO) // or other dsd format
if err != nil {
return err
}
err = ioutil.WriteFile(objPath, bytes, DEFAULT_FILEMODE)
if err != nil {
// create dir and try again
err = os.MkdirAll(filepath.Dir(objPath), DEFAULT_DIRMODE)
if err != nil {
return fmt.Errorf("failed to create directory %s: %s", filepath.Dir(objPath), err)
}
err = ioutil.WriteFile(objPath, bytes, DEFAULT_FILEMODE)
if err != nil {
return fmt.Errorf("could not write file %s: %s", objPath, err)
}
}
return nil
}
func (d *datastore) Get(key ds.Key) (interface{}, error) {
objPath, err := d.buildPath(key.String())
if err != nil {
return nil, err
}
data, err := ioutil.ReadFile(objPath)
if err != nil {
// TODO: distinguish between error and inexistance
return nil, ds.ErrNotFound
}
model, err := dbutils.NewWrapper(&key, data)
if err != nil {
return nil, err
}
return model, nil
}
func (d *datastore) Has(key ds.Key) (exists bool, err error) {
objPath, err := d.buildPath(key.String())
if err != nil {
return false, err
}
_, err = os.Stat(objPath)
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, fmt.Errorf("failed to stat path %s: %s", objPath, err)
}
return true, nil
}
func (d *datastore) Delete(key ds.Key) (err error) {
objPath, err := d.buildPath(key.String())
if err != nil {
return err
}
// remove entry
err = os.Remove(objPath)
if err != nil {
return fmt.Errorf("could not delete (all) in path %s: %s", objPath, err)
}
// remove children
err = os.RemoveAll(objPath[:len(objPath)-4])
if err != nil {
return fmt.Errorf("could not delete (all) in path %s: %s", objPath, err)
}
return nil
}
func (d *datastore) Query(q dsq.Query) (dsq.Results, error) {
if len(q.Orders) > 0 {
return nil, fmt.Errorf("simplefs: no support for ordering queries yet")
}
// log.Tracef("new query: %s", q.Prefix)
// log.Tracef("simplefs: new query with prefix %s", q.Prefix)
walkPath, err := d.buildPath(q.Prefix)
if err != nil {
return nil, err
}
walkPath = walkPath[:strings.LastIndex(walkPath, string(os.PathSeparator))]
files := make(chan *dsq.Entry)
stopWalkingFlag := abool.NewBool(false)
stopWalking := make(chan interface{})
counter := 0
go func() {
err := filepath.Walk(walkPath, func(path string, info os.FileInfo, err error) error {
// log.Tracef("walking: %s", path)
if err != nil {
return fmt.Errorf("simplfs: error in query: %s", err)
}
// skip directories
if info.IsDir() {
return nil
}
// check if we are still were we should be
if !strings.HasPrefix(path, d.basePath) {
log.Criticalf("simplfs: query jailbreaked: %s", path)
return errors.New("jailbreaked")
}
path = path[d.basePathLen:]
// check if there is enough space to remove ".dsd"
if len(path) < 6 {
return nil
}
path = path[:len(path)-4]
// check if we still match prefix
if !strings.HasPrefix(path, q.Prefix) {
return nil
}
entry := dsq.Entry{
Key: path,
// TODO: phew, do we really want to load every single file we might not need? use nil for now.
Value: nil,
}
for _, filter := range q.Filters {
if !filter.Filter(entry) {
return nil
}
}
// yay, entry matches!
counter++
if q.Offset > counter {
return nil
}
select {
case files <- &entry:
case <-stopWalking:
return errors.New("finished")
}
if q.Limit != 0 && q.Limit <= counter {
return errors.New("finished")
}
return nil
})
if err != nil {
log.Warningf("simplefs: filewalker for query failed: %s", err)
}
close(files)
}()
return dsq.ResultsFromIterator(q, dsq.Iterator{
Next: func() (dsq.Result, bool) {
select {
case entry := <-files:
if entry == nil {
return dsq.Result{}, false
}
// log.Tracef("processing: %s", entry.Key)
if !q.KeysOnly {
objPath, err := d.buildPath(entry.Key)
if err != nil {
return dsq.Result{Error: err}, false
}
data, err := ioutil.ReadFile(objPath)
if err != nil {
return dsq.Result{Error: fmt.Errorf("error reading file %s: %s", entry.Key, err)}, false
}
newKey := ds.RawKey(entry.Key)
wrapper, err := dbutils.NewWrapper(&newKey, data)
if err != nil {
return dsq.Result{Error: fmt.Errorf("failed to create wrapper for %s: %s", entry.Key, err)}, false
}
entry.Value = wrapper
}
return dsq.Result{Entry: *entry, Error: nil}, true
case <-time.After(10 * time.Second):
return dsq.Result{Error: errors.New("filesystem timeout")}, false
}
},
Close: func() error {
if stopWalkingFlag.SetToIf(false, true) {
close(stopWalking)
}
return nil
},
}), nil
}
//
// func (d *datastore) Query(q dsq.Query) (dsq.Results, error) {
// return d.QueryNew(q)
// }
//
// func (d *datastore) QueryNew(q dsq.Query) (dsq.Results, error) {
// if len(q.Filters) > 0 ||
// len(q.Orders) > 0 ||
// q.Limit > 0 ||
// q.Offset > 0 {
// return d.QueryOrig(q)
// }
// var rnge *util.Range
// if q.Prefix != "" {
// rnge = util.BytesPrefix([]byte(q.Prefix))
// }
// i := d.DB.NewIterator(rnge, nil)
// return dsq.ResultsFromIterator(q, dsq.Iterator{
// Next: func() (dsq.Result, bool) {
// ok := i.Next()
// if !ok {
// return dsq.Result{}, false
// }
// k := string(i.Key())
// e := dsq.Entry{Key: k}
//
// if !q.KeysOnly {
// buf := make([]byte, len(i.Value()))
// copy(buf, i.Value())
// e.Value = buf
// }
// return dsq.Result{Entry: e}, true
// },
// Close: func() error {
// i.Release()
// return nil
// },
// }), nil
// }
//
// func (d *datastore) QueryOrig(q dsq.Query) (dsq.Results, error) {
// // we can use multiple iterators concurrently. see:
// // https://godoc.org/github.com/syndtr/goleveldb/leveldb#DB.NewIterator
// // advance the iterator only if the reader reads
// //
// // run query in own sub-process tied to Results.Process(), so that
// // it waits for us to finish AND so that clients can signal to us
// // that resources should be reclaimed.
// qrb := dsq.NewResultBuilder(q)
// qrb.Process.Go(func(worker goprocess.Process) {
// d.runQuery(worker, qrb)
// })
//
// // go wait on the worker (without signaling close)
// go qrb.Process.CloseAfterChildren()
//
// // Now, apply remaining things (filters, order)
// qr := qrb.Results()
// for _, f := range q.Filters {
// qr = dsq.NaiveFilter(qr, f)
// }
// for _, o := range q.Orders {
// qr = dsq.NaiveOrder(qr, o)
// }
// return qr, nil
// }
//
// func (d *datastore) runQuery(worker goprocess.Process, qrb *dsq.ResultBuilder) {
//
// var rnge *util.Range
// if qrb.Query.Prefix != "" {
// rnge = util.BytesPrefix([]byte(qrb.Query.Prefix))
// }
// i := d.DB.NewIterator(rnge, nil)
// defer i.Release()
//
// // advance iterator for offset
// if qrb.Query.Offset > 0 {
// for j := 0; j < qrb.Query.Offset; j++ {
// i.Next()
// }
// }
//
// // iterate, and handle limit, too
// for sent := 0; i.Next(); sent++ {
// // end early if we hit the limit
// if qrb.Query.Limit > 0 && sent >= qrb.Query.Limit {
// break
// }
//
// k := string(i.Key())
// e := dsq.Entry{Key: k}
//
// if !qrb.Query.KeysOnly {
// buf := make([]byte, len(i.Value()))
// copy(buf, i.Value())
// e.Value = buf
// }
//
// select {
// case qrb.Output <- dsq.Result{Entry: e}: // we sent it out
// case <-worker.Closing(): // client told us to end early.
// break
// }
// }
//
// if err := i.Error(); err != nil {
// select {
// case qrb.Output <- dsq.Result{Error: err}: // client read our error
// case <-worker.Closing(): // client told us to end.
// return
// }
// }
// }
func (d *datastore) Close() (err error) {
return nil
}
func (d *datastore) IsThreadSafe() {}

84
database/easyquery.go Normal file
View file

@ -0,0 +1,84 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package database
import (
"errors"
"fmt"
"strings"
dsq "github.com/ipfs/go-datastore/query"
)
type FilterMaxDepth struct {
MaxDepth int
}
func (f FilterMaxDepth) Filter(entry dsq.Entry) bool {
return strings.Count(entry.Key, "/") <= f.MaxDepth
}
type FilterKeyLength struct {
Length int
}
func (f FilterKeyLength) Filter(entry dsq.Entry) bool {
return len(entry.Key) == f.Length
}
func EasyQueryIterator(subscriptionKey string) (dsq.Results, error) {
query := dsq.Query{}
namespaces := strings.Split(subscriptionKey, "/")[1:]
lastSpace := ""
if len(namespaces) != 0 {
lastSpace = namespaces[len(namespaces)-1]
}
switch {
case lastSpace == "":
// get all children
query.Prefix = subscriptionKey
case strings.HasPrefix(lastSpace, "*"):
// get children to defined depth
query.Prefix = strings.Trim(subscriptionKey, "*")
query.Filters = []dsq.Filter{
FilterMaxDepth{len(lastSpace) + len(namespaces) - 1},
}
case strings.Contains(lastSpace, ":"):
query.Prefix = subscriptionKey
query.Filters = []dsq.Filter{
FilterKeyLength{len(query.Prefix)},
}
default:
// get only from this location and this type
query.Prefix = subscriptionKey + ":"
query.Filters = []dsq.Filter{
FilterMaxDepth{len(namespaces)},
}
}
// log.Tracef("easyquery: %s has prefix %s", subscriptionKey, query.Prefix)
results, err := db.Query(query)
if err != nil {
return nil, errors.New(fmt.Sprintf("easyquery: %s", err))
}
return results, nil
}
func EasyQuery(subscriptionKey string) (*[]dsq.Entry, error) {
results, err := EasyQueryIterator(subscriptionKey)
if err != nil {
return nil, err
}
entries, err := results.Rest()
if err != nil {
return nil, errors.New(fmt.Sprintf("easyquery: %s", err))
}
return &entries, nil
}

View file

@ -0,0 +1,68 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package database
import (
"testing"
datastore "github.com/ipfs/go-datastore"
)
func testQuery(t *testing.T, queryString string, expecting []string) {
entries, err := EasyQuery(queryString)
if err != nil {
t.Errorf("error in query %s: %s", queryString, err)
}
totalExcepted := len(expecting)
total := 0
fail := false
keys := datastore.EntryKeys(*entries)
resultLoop:
for _, key := range keys {
total++
for _, expectedName := range expecting {
if key.Name() == expectedName {
continue resultLoop
}
}
fail = true
break
}
if !fail && total == totalExcepted {
return
}
t.Errorf("Query %s got %s, expected %s", queryString, keys, expecting)
}
func TestEasyQuery(t *testing.T) {
// setup test data
(&(TestingModel{})).CreateInNamespace("EasyQuery", "1")
(&(TestingModel{})).CreateInNamespace("EasyQuery", "2")
(&(TestingModel{})).CreateInNamespace("EasyQuery", "3")
(&(TestingModel{})).CreateInNamespace("EasyQuery/A", "4")
(&(TestingModel{})).CreateInNamespace("EasyQuery/A/B", "5")
(&(TestingModel{})).CreateInNamespace("EasyQuery/A/B/C", "6")
(&(TestingModel{})).CreateInNamespace("EasyQuery/A/B/C/D", "7")
(&(TestingModel{})).CreateWithTypeName("EasyQuery", "ConfigModel", "X")
(&(TestingModel{})).CreateWithTypeName("EasyQuery", "ConfigModel", "Y")
(&(TestingModel{})).CreateWithTypeName("EasyQuery/A", "ConfigModel", "Z")
testQuery(t, "/Tests/EasyQuery/TestingModel", []string{"1", "2", "3"})
testQuery(t, "/Tests/EasyQuery/TestingModel:1", []string{"1"})
testQuery(t, "/Tests/EasyQuery/ConfigModel", []string{"X", "Y"})
testQuery(t, "/Tests/EasyQuery/ConfigModel:Y", []string{"Y"})
testQuery(t, "/Tests/EasyQuery/A/", []string{"Z", "4", "5", "6", "7"})
testQuery(t, "/Tests/EasyQuery/A/B/**", []string{"5", "6"})
}

90
database/model.go Normal file
View file

@ -0,0 +1,90 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package database
import (
"fmt"
"strings"
"sync"
"github.com/ipfs/go-datastore"
"github.com/Safing/safing-core/database/dbutils"
"github.com/Safing/safing-core/formats/dsd"
)
type Model interface {
SetKey(*datastore.Key)
GetKey() *datastore.Key
FmtKey() string
// Type() string
// DefaultNamespace() datastore.Key
// Create(string) error
// CreateInLocation(datastore.Key, string) error
// CreateObject(*datastore.Key, string, Model) error
// Save() error
// Delete() error
// CastError(interface{}, interface{}) error
}
func getTypeName(model interface{}) string {
full := fmt.Sprintf("%T", model)
return full[strings.LastIndex(full, ".")+1:]
}
func TypeAssertError(model Model, object interface{}) error {
return fmt.Errorf("database: could not assert %s to type %T (is type %T)", model.FmtKey(), model, object)
}
// Model Registration
var (
registeredModels = make(map[string]func() Model)
registeredModelsLock sync.RWMutex
)
func RegisterModel(model Model, constructor func() Model) {
registeredModelsLock.Lock()
defer registeredModelsLock.Unlock()
registeredModels[fmt.Sprintf("%T", model)] = constructor
}
func NewModel(model Model) (Model, error) {
registeredModelsLock.RLock()
defer registeredModelsLock.RUnlock()
constructor, ok := registeredModels[fmt.Sprintf("%T", model)]
if !ok {
return nil, fmt.Errorf("database: cannot create new %T, not registered", model)
}
return constructor(), nil
}
func EnsureModel(uncertain, model Model) (Model, error) {
wrappedObj, ok := uncertain.(*dbutils.Wrapper)
if !ok {
return uncertain, nil
}
newModel, err := NewModel(model)
if err != nil {
return nil, err
}
_, err = dsd.Load(wrappedObj.Data, &newModel)
if err != nil {
return nil, fmt.Errorf("database: failed to unwrap %T: %s", model, err)
}
newModel.SetKey(wrappedObj.GetKey())
model = newModel
return newModel, nil
}
func SilentEnsureModel(uncertain, model Model) Model {
obj, err := EnsureModel(uncertain, model)
if err != nil {
return nil
}
return obj
}
func NewMismatchError(got, expected interface{}) error {
return fmt.Errorf("database: entry (%T) does not match expected model (%T)", got, expected)
}

108
database/model_test.go Normal file
View file

@ -0,0 +1,108 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package database
import (
"testing"
datastore "github.com/ipfs/go-datastore"
)
type TestingModel struct {
Base
Name string
Value string
}
var testingModel *TestingModel
func init() {
RegisterModel(testingModel, func() Model { return new(TestingModel) })
}
func (m *TestingModel) Create(name string) error {
return m.CreateObject(&Tests, name, m)
}
func (m *TestingModel) CreateInNamespace(namespace string, name string) error {
testsNamescace := Tests.ChildString(namespace)
return m.CreateObject(&testsNamescace, name, m)
}
func (m *TestingModel) CreateWithTypeName(namespace string, typeName string, name string) error {
customNamespace := Tests.ChildString(namespace).ChildString(typeName).Instance(name)
m.dbKey = &customNamespace
handleCreateSubscriptions(m)
return Create(*m.dbKey, m)
}
func (m *TestingModel) Save() error {
return m.SaveObject(m)
}
func GetTestingModel(name string) (*TestingModel, error) {
return GetTestingModelFromNamespace(&Tests, name)
}
func GetTestingModelFromNamespace(namespace *datastore.Key, name string) (*TestingModel, error) {
object, err := GetAndEnsureModel(namespace, name, testingModel)
if err != nil {
return nil, err
}
model, ok := object.(*TestingModel)
if !ok {
return nil, NewMismatchError(object, testingModel)
}
return model, nil
}
func TestModel(t *testing.T) {
// create
m := TestingModel{
Name: "a",
Value: "b",
}
// newKey := datastore.NewKey("/Tests/TestingModel:test")
// m.dbKey = &newKey
// err := Put(*m.dbKey, m)
err := m.Create("")
if err != nil {
t.Errorf("database test: could not create object: %s", err)
}
// get
o, err := GetTestingModel(m.dbKey.Name())
if err != nil {
t.Errorf("database test: failed to get model: %s (%s)", err, m.dbKey.Name())
}
// check fetched object
if o.Name != "a" || o.Value != "b" {
t.Errorf("database test: values do not match: got Name=%s and Value=%s", o.Name, o.Value)
}
// o, err := Get(*m.dbKey)
// if err != nil {
// t.Errorf("database: could not get object: %s", err)
// }
// n, ok := o.(*TestingModel)
// if !ok {
// t.Errorf("database: wrong type, got type %T from %s", o, m.dbKey.String())
// }
// save
o.Value = "c"
err = o.Save()
if err != nil {
t.Errorf("database test: could not save object: %s", err)
}
// delete
err = o.Delete()
if err != nil {
t.Errorf("database test: could not delete object: %s", err)
}
}

61
database/namespaces.go Normal file
View file

@ -0,0 +1,61 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package database
import datastore "github.com/ipfs/go-datastore"
var (
// Persistent data that is fetched or gathered, entries may be deleted
Cache = datastore.NewKey("/Cache")
DNSCache = Cache.ChildString("Dns")
IntelCache = Cache.ChildString("Intel")
FileInfoCache = Cache.ChildString("FileInfo")
ProfileCache = Cache.ChildString("Profile")
IPInfoCache = Cache.ChildString("IPInfo")
CertCache = Cache.ChildString("Cert")
CARevocationInfoCache = Cache.ChildString("CARevocationInfo")
// Volatile, in-memory (recommended) namespace for storing runtime information, cleans itself
Run = datastore.NewKey("/Run")
Processes = Run.ChildString("Processes")
OrphanedConnection = Run.ChildString("OrphanedConnections")
OrphanedLink = Run.ChildString("OrphanedLinks")
Api = Run.ChildString("Api")
ApiSessions = Api.ChildString("ApiSessions")
// Namespace for current device, will be mounted into /Devices/[device]
Me = datastore.NewKey("/Me")
// Holds data of all Devices
Devices = datastore.NewKey("/Devices")
// Holds persistent data
Data = datastore.NewKey("/Data")
Profiles = Data.ChildString("Profiles")
// Holds data distributed by the System (coming from the Community and Devs)
Dist = datastore.NewKey("/Dist")
DistProfiles = Dist.ChildString("Profiles")
DistUpdates = Dist.ChildString("Updates")
// Holds data issued by company
Company = datastore.NewKey("/Company")
CompanyProfiles = Company.ChildString("Profiles")
CompanyUpdates = Company.ChildString("Updates")
// Server
// The Authority namespace is used by authoritative servers (Safing or Company) to store data (Intel, Profiles, ...) to be served to clients
Authority = datastore.NewKey("/Authority")
AthoritativeIntel = Authority.ChildString("Intel")
AthoritativeProfiles = Authority.ChildString("Profiles")
// The Staging namespace is the same as the Authority namespace, but for rolling out new things first to a selected list of clients for testing
AuthorityStaging = datastore.NewKey("/Staging")
AthoritativeStagingProfiles = AuthorityStaging.ChildString("Profiles")
// Holds data of Apps
Apps = datastore.NewKey("/Apps")
// Test & Invalid namespace
Tests = datastore.NewKey("/Tests")
Invalid = datastore.NewKey("/Invalid")
)

37
database/queries.go Normal file
View file

@ -0,0 +1,37 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package database
import (
"time"
"github.com/Safing/safing-core/formats/dsd"
"github.com/Safing/safing-core/log"
dsq "github.com/ipfs/go-datastore/query"
)
func init() {
// go dumper()
}
func dumper() {
for {
time.Sleep(10 * time.Second)
result, err := db.Query(dsq.Query{Prefix: "/Run/Process"})
if err != nil {
log.Warningf("Query failed: %s", err)
continue
}
log.Infof("Dumping all processes:")
for model, ok := result.NextSync(); ok; model, ok = result.NextSync() {
bytes, err := dsd.Dump(model, dsd.AUTO)
if err != nil {
log.Warningf("Error dumping: %s", err)
continue
}
log.Info(string(bytes))
}
log.Infof("END")
}
}

272
database/subscriptions.go Normal file
View file

@ -0,0 +1,272 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package database
import (
"fmt"
"strings"
"sync"
"github.com/Safing/safing-core/modules"
"github.com/Safing/safing-core/taskmanager"
"github.com/ipfs/go-datastore"
"github.com/tevino/abool"
)
var subscriptionModule *modules.Module
var subscriptions []*Subscription
var subLock sync.Mutex
var databaseUpdate chan Model
var databaseCreate chan Model
var databaseDelete chan *datastore.Key
var workIsWaiting chan *struct{}
var workIsWaitingFlag *abool.AtomicBool
var forceProcessing chan *struct{}
type Subscription struct {
typeAndLocation map[string]bool
exactObject map[string]bool
children map[string]uint8
Created chan Model
Updated chan Model
Deleted chan *datastore.Key
}
func NewSubscription() *Subscription {
subLock.Lock()
defer subLock.Unlock()
sub := &Subscription{
typeAndLocation: make(map[string]bool),
exactObject: make(map[string]bool),
children: make(map[string]uint8),
Created: make(chan Model, 128),
Updated: make(chan Model, 128),
Deleted: make(chan *datastore.Key, 128),
}
subscriptions = append(subscriptions, sub)
return sub
}
func (sub *Subscription) Subscribe(subKey string) {
subLock.Lock()
defer subLock.Unlock()
namespaces := strings.Split(subKey, "/")[1:]
lastSpace := ""
if len(namespaces) != 0 {
lastSpace = namespaces[len(namespaces)-1]
}
switch {
case lastSpace == "":
// save key without leading "/"
// save with depth 255 to get all
sub.children[strings.Trim(subKey, "/")] = 0xFF
case strings.HasPrefix(lastSpace, "*"):
// save key without leading or trailing "/" or "*"
// save full wanted depth - this makes comparison easier
sub.children[strings.Trim(subKey, "/*")] = uint8(len(lastSpace) + len(namespaces) - 1)
case strings.Contains(lastSpace, ":"):
sub.exactObject[subKey] = true
default:
sub.typeAndLocation[subKey] = true
}
}
func (sub *Subscription) Unsubscribe(subKey string) {
subLock.Lock()
defer subLock.Unlock()
namespaces := strings.Split(subKey, "/")[1:]
lastSpace := ""
if len(namespaces) != 0 {
lastSpace = namespaces[len(namespaces)-1]
}
switch {
case lastSpace == "":
delete(sub.children, strings.Trim(subKey, "/"))
case strings.HasPrefix(lastSpace, "*"):
delete(sub.children, strings.Trim(subKey, "/*"))
case strings.Contains(lastSpace, ":"):
delete(sub.exactObject, subKey)
default:
delete(sub.typeAndLocation, subKey)
}
}
func (sub *Subscription) Destroy() {
subLock.Lock()
defer subLock.Unlock()
for k, v := range subscriptions {
if v.Created == sub.Created {
defer func() {
subscriptions = append(subscriptions[:k], subscriptions[k+1:]...)
}()
close(sub.Created)
close(sub.Updated)
close(sub.Deleted)
return
}
}
}
func (sub *Subscription) Subscriptions() *[]string {
subStrings := make([]string, 0)
for subString := range sub.exactObject {
subStrings = append(subStrings, subString)
}
for subString := range sub.typeAndLocation {
subStrings = append(subStrings, subString)
}
for subString, depth := range sub.children {
if depth == 0xFF {
subStrings = append(subStrings, fmt.Sprintf("/%s/", subString))
} else {
subStrings = append(subStrings, fmt.Sprintf("/%s/%s", subString, strings.Repeat("*", int(depth)-len(strings.Split(subString, "/")))))
}
}
return &subStrings
}
func (sub *Subscription) String() string {
return fmt.Sprintf("<Subscription [%s]>", strings.Join(*sub.Subscriptions(), " "))
}
func (sub *Subscription) send(key *datastore.Key, model Model, created bool) {
if model == nil {
sub.Deleted <- key
} else if created {
sub.Created <- model
} else {
sub.Updated <- model
}
}
func process(key *datastore.Key, model Model, created bool) {
subLock.Lock()
defer subLock.Unlock()
stringRep := key.String()
// "/Comedy/MontyPython/Actor:JohnCleese"
typeAndLocation := key.Path().String()
// "/Comedy/MontyPython/Actor"
namespaces := key.Namespaces()
// ["Comedy", "MontyPython", "Actor:JohnCleese"]
depth := uint8(len(namespaces))
// 3
subscriptionLoop:
for _, sub := range subscriptions {
if _, ok := sub.exactObject[stringRep]; ok {
sub.send(key, model, created)
continue subscriptionLoop
}
if _, ok := sub.typeAndLocation[typeAndLocation]; ok {
sub.send(key, model, created)
continue subscriptionLoop
}
for i := 0; i < len(namespaces); i++ {
if subscribedDepth, ok := sub.children[strings.Join(namespaces[:i], "/")]; ok {
if subscribedDepth >= depth {
sub.send(key, model, created)
continue subscriptionLoop
}
}
}
}
}
func init() {
subscriptionModule = modules.Register("Database:Subscriptions", 128)
subscriptions = make([]*Subscription, 0)
subLock = sync.Mutex{}
databaseUpdate = make(chan Model, 32)
databaseCreate = make(chan Model, 32)
databaseDelete = make(chan *datastore.Key, 32)
workIsWaiting = make(chan *struct{}, 0)
workIsWaitingFlag = abool.NewBool(false)
forceProcessing = make(chan *struct{}, 0)
go run()
}
func run() {
for {
select {
case <-subscriptionModule.Stop:
subscriptionModule.StopComplete()
return
case <-workIsWaiting:
work()
}
}
}
func work() {
defer workIsWaitingFlag.UnSet()
// wait
select {
case <-taskmanager.StartMediumPriorityMicroTask():
defer taskmanager.EndMicroTask()
case <-forceProcessing:
}
// work
for {
select {
case model := <-databaseCreate:
process(model.GetKey(), model, true)
case model := <-databaseUpdate:
process(model.GetKey(), model, false)
case key := <-databaseDelete:
process(key, nil, false)
default:
return
}
}
}
func handleCreateSubscriptions(model Model) {
select {
case databaseCreate <- model:
default:
forceProcessing <- nil
databaseCreate <- model
}
if workIsWaitingFlag.SetToIf(false, true) {
workIsWaiting <- nil
}
}
func handleUpdateSubscriptions(model Model) {
select {
case databaseUpdate <- model:
default:
forceProcessing <- nil
databaseUpdate <- model
}
if workIsWaitingFlag.SetToIf(false, true) {
workIsWaiting <- nil
}
}
func handleDeleteSubscriptions(key *datastore.Key) {
select {
case databaseDelete <- key:
default:
forceProcessing <- nil
databaseDelete <- key
}
if workIsWaitingFlag.SetToIf(false, true) {
workIsWaiting <- nil
}
}

View file

@ -0,0 +1,103 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package database
import (
"strconv"
"strings"
"sync"
"testing"
)
var subTestWg sync.WaitGroup
func waitForSubs(t *testing.T, sub *Subscription, highest int) {
defer subTestWg.Done()
expecting := 1
var subbedModel Model
forLoop:
for {
select {
case subbedModel = <-sub.Created:
case subbedModel = <-sub.Updated:
}
t.Logf("got model from subscription: %s", subbedModel.GetKey().String())
if !strings.HasPrefix(subbedModel.GetKey().Name(), "sub") {
// not a model that we use for testing, other tests might be interfering
continue forLoop
}
number, err := strconv.Atoi(strings.TrimPrefix(subbedModel.GetKey().Name(), "sub"))
if err != nil || number != expecting {
t.Errorf("test subscription: got unexpected model %s, expected sub%d", subbedModel.GetKey().String(), expecting)
continue forLoop
}
if number == highest {
return
}
expecting++
}
}
func TestSubscriptions(t *testing.T) {
// create subscription
sub := NewSubscription()
// FIRST TEST
subTestWg.Add(1)
go waitForSubs(t, sub, 3)
sub.Subscribe("/Tests/")
t.Log(sub.String())
(&(TestingModel{})).CreateInNamespace("", "sub1")
(&(TestingModel{})).CreateInNamespace("A", "sub2")
(&(TestingModel{})).CreateInNamespace("A/B/C/D/E", "sub3")
subTestWg.Wait()
// SECOND TEST
subTestWg.Add(1)
go waitForSubs(t, sub, 3)
sub.Unsubscribe("/Tests/")
sub.Subscribe("/Tests/A/****")
t.Log(sub.String())
(&(TestingModel{})).CreateInNamespace("", "subX")
(&(TestingModel{})).CreateInNamespace("A", "sub1")
(&(TestingModel{})).CreateInNamespace("A/B/C/D", "sub2")
(&(TestingModel{})).CreateInNamespace("A/B/C/D/E", "subX")
(&(TestingModel{})).CreateInNamespace("A", "sub3")
subTestWg.Wait()
// THIRD TEST
subTestWg.Add(1)
go waitForSubs(t, sub, 3)
sub.Unsubscribe("/Tests/A/****")
sub.Subscribe("/Tests/TestingModel:sub1")
sub.Subscribe("/Tests/TestingModel:sub1/TestingModel")
t.Log(sub.String())
(&(TestingModel{})).CreateInNamespace("", "sub1")
(&(TestingModel{})).CreateInNamespace("", "subX")
(&(TestingModel{})).CreateInNamespace("TestingModel:sub1", "sub2")
(&(TestingModel{})).CreateInNamespace("TestingModel:sub1/A", "subX")
(&(TestingModel{})).CreateInNamespace("TestingModel:sub1", "sub3")
subTestWg.Wait()
// FINAL STUFF
model := &TestingModel{}
model.CreateInNamespace("Invalid", "subX")
model.Save()
sub.Destroy()
// time.Sleep(1 * time.Second)
// pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
}

17
database/wrapper.go Normal file
View file

@ -0,0 +1,17 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package database
import (
"github.com/ipfs/go-datastore"
"github.com/Safing/safing-core/database/dbutils"
)
func NewWrapper(key *datastore.Key, data []byte) (*dbutils.Wrapper, error) {
return dbutils.NewWrapper(key, data)
}
func DumpModel(uncertain interface{}, storageType uint8) ([]byte, error) {
return dbutils.DumpModel(uncertain, storageType)
}

68
database/wrapper_test.go Normal file
View file

@ -0,0 +1,68 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package database
import (
"testing"
"github.com/Safing/safing-core/formats/dsd"
)
func TestWrapper(t *testing.T) {
// create Model
new := &TestingModel{
Name: "a",
Value: "b",
}
newTwo := &TestingModel{
Name: "c",
Value: "d",
}
// dump
bytes, err := DumpModel(new, dsd.JSON)
if err != nil {
panic(err)
}
bytesTwo, err := DumpModel(newTwo, dsd.JSON)
if err != nil {
panic(err)
}
// wrap
wrapped, err := NewWrapper(nil, bytes)
if err != nil {
panic(err)
}
wrappedTwo, err := NewWrapper(nil, bytesTwo)
if err != nil {
panic(err)
}
// model definition for unwrapping
var model *TestingModel
// unwrap
myModel, ok := SilentEnsureModel(wrapped, model).(*TestingModel)
if !ok {
panic("received model does not match expected model")
}
if myModel.Name != "a" || myModel.Value != "b" {
panic("model value mismatch")
}
// verbose unwrap
genericModel, err := EnsureModel(wrappedTwo, model)
if err != nil {
panic(err)
}
myModelTwo, ok := genericModel.(*TestingModel)
if !ok {
panic("received model does not match expected model")
}
if myModelTwo.Name != "c" || myModelTwo.Value != "d" {
panic("model value mismatch")
}
}

123
formats/dsd/dsd.go Normal file
View file

@ -0,0 +1,123 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package dsd
// dynamic structured data
// check here for some benchmarks: https://github.com/alecthomas/go_serialization_benchmarks
import (
"encoding/json"
"errors"
"fmt"
"github.com/pkg/bson"
"github.com/Safing/safing-core/formats/varint"
)
// define types
const (
AUTO = 0
STRING = 83 // S
BYTES = 88 // X
JSON = 74 // J
BSON = 66 // B
// MSGP
)
// define errors
var errNoMoreSpace = errors.New("dsd: no more space left after reading dsd type")
var errUnknownType = errors.New("dsd: tried to unpack unknown type")
var errNotImplemented = errors.New("dsd: this type is not yet implemented")
func Load(data []byte, t interface{}) (interface{}, error) {
if len(data) < 2 {
return nil, errNoMoreSpace
}
format, read, err := varint.Unpack8(data)
if err != nil {
return nil, err
}
if len(data) <= read {
return nil, errNoMoreSpace
}
switch format {
case STRING:
return string(data[read:]), nil
case BYTES:
r := data[read:]
return &r, nil
case JSON:
err := json.Unmarshal(data[read:], t)
if err != nil {
return nil, err
}
return t, nil
case BSON:
err := bson.Unmarshal(data[read:], t)
if err != nil {
return nil, err
}
return t, nil
// case MSGP:
// err := t.UnmarshalMsg(data[read:])
// if err != nil {
// return nil, err
// }
// return t, nil
default:
return nil, errors.New(fmt.Sprintf("dsd: tried to load unknown type %d, data: %v", format, data))
}
}
func Dump(t interface{}, format uint8) ([]byte, error) {
if format == AUTO {
switch t.(type) {
case string:
format = STRING
case []byte:
format = BYTES
default:
format = JSON
}
}
f := varint.Pack8(uint8(format))
var data []byte
var err error
switch format {
case STRING:
data = []byte(t.(string))
case BYTES:
data = t.([]byte)
case JSON:
// TODO: use SetEscapeHTML(false)
data, err = json.Marshal(t)
if err != nil {
return nil, err
}
case BSON:
data, err = bson.Marshal(t)
if err != nil {
return nil, err
}
// case MSGP:
// data, err := t.MarshalMsg(nil)
// if err != nil {
// return nil, err
// }
default:
return nil, errors.New(fmt.Sprintf("dsd: tried to dump unknown type %d", format))
}
r := append(f, data...)
// log.Tracef("packing %v to %s", t, string(r))
// return nil, errors.New(fmt.Sprintf("dsd: dumped bytes are: %v", r))
return r, nil
}

214
formats/dsd/dsd_test.go Normal file
View file

@ -0,0 +1,214 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package dsd
import (
"bytes"
"reflect"
"testing"
)
//go:generate msgp
type SimpleTestStruct struct {
S string
B byte
}
type ComplexTestStruct struct {
I int
I8 int8
I16 int16
I32 int32
I64 int64
Ui uint
Ui8 uint8
Ui16 uint16
Ui32 uint32
Ui64 uint64
S string
Sp *string
Sa []string
Sap *[]string
B byte
Bp *byte
Ba []byte
Bap *[]byte
M map[string]string
Mp *map[string]string
}
func TestConversion(t *testing.T) {
// STRING
d, err := Dump("abc", STRING)
if err != nil {
t.Fatalf("Dump error (string): %s", err)
}
s, err := Load(d, nil)
if err != nil {
t.Fatalf("Load error (string): %s", err)
}
ts := s.(string)
if ts != "abc" {
t.Errorf("Load (string): subject and loaded object are not equal (%v != %v)", ts, "abc")
}
// BYTES
d, err = Dump([]byte("def"), BYTES)
if err != nil {
t.Fatalf("Dump error (string): %s", err)
}
b, err := Load(d, nil)
if err != nil {
t.Fatalf("Load error (string): %s", err)
}
tb := b.(*[]byte)
if !bytes.Equal(*tb, []byte("def")) {
t.Errorf("Load (string): subject and loaded object are not equal (%v != %v)", tb, []byte("def"))
}
// STRUCTS
simpleSubject := SimpleTestStruct{
"a",
0x01,
}
bString := "b"
var bBytes byte
bBytes = 0x02
complexSubject := ComplexTestStruct{
-1,
-2,
-3,
-4,
-5,
1,
2,
3,
4,
5,
"a",
&bString,
[]string{"c", "d", "e"},
&[]string{"f", "g", "h"},
0x01,
&bBytes,
[]byte{0x03, 0x04, 0x05},
&[]byte{0x05, 0x06, 0x07},
map[string]string{
"a": "b",
"c": "d",
"e": "f",
},
&map[string]string{
"g": "h",
"i": "j",
"k": "l",
},
}
// TODO: test all formats
formats := []uint8{JSON}
for _, format := range formats {
// simple
b, err := Dump(&simpleSubject, format)
if err != nil {
t.Fatalf("Dump error (simple struct): %s", err)
}
o, err := Load(b, &SimpleTestStruct{})
if err != nil {
t.Fatalf("Load error (simple struct): %s", err)
}
if !reflect.DeepEqual(&simpleSubject, o) {
t.Errorf("Load (simple struct): subject does not match loaded object")
t.Errorf("Encoded: %v", string(b))
t.Errorf("Compared: %v == %v", &simpleSubject, o)
}
// complex
b, err = Dump(&complexSubject, format)
if err != nil {
t.Fatalf("Dump error (complex struct): %s", err)
}
o, err = Load(b, &ComplexTestStruct{})
if err != nil {
t.Fatalf("Load error (complex struct): %s", err)
}
co := o.(*ComplexTestStruct)
if complexSubject.I != co.I {
t.Errorf("Load (complex struct): struct.I is not equal (%v != %v)", complexSubject.I, co.I)
}
if complexSubject.I8 != co.I8 {
t.Errorf("Load (complex struct): struct.I8 is not equal (%v != %v)", complexSubject.I8, co.I8)
}
if complexSubject.I16 != co.I16 {
t.Errorf("Load (complex struct): struct.I16 is not equal (%v != %v)", complexSubject.I16, co.I16)
}
if complexSubject.I32 != co.I32 {
t.Errorf("Load (complex struct): struct.I32 is not equal (%v != %v)", complexSubject.I32, co.I32)
}
if complexSubject.I64 != co.I64 {
t.Errorf("Load (complex struct): struct.I64 is not equal (%v != %v)", complexSubject.I64, co.I64)
}
if complexSubject.Ui != co.Ui {
t.Errorf("Load (complex struct): struct.Ui is not equal (%v != %v)", complexSubject.Ui, co.Ui)
}
if complexSubject.Ui8 != co.Ui8 {
t.Errorf("Load (complex struct): struct.Ui8 is not equal (%v != %v)", complexSubject.Ui8, co.Ui8)
}
if complexSubject.Ui16 != co.Ui16 {
t.Errorf("Load (complex struct): struct.Ui16 is not equal (%v != %v)", complexSubject.Ui16, co.Ui16)
}
if complexSubject.Ui32 != co.Ui32 {
t.Errorf("Load (complex struct): struct.Ui32 is not equal (%v != %v)", complexSubject.Ui32, co.Ui32)
}
if complexSubject.Ui64 != co.Ui64 {
t.Errorf("Load (complex struct): struct.Ui64 is not equal (%v != %v)", complexSubject.Ui64, co.Ui64)
}
if complexSubject.S != co.S {
t.Errorf("Load (complex struct): struct.S is not equal (%v != %v)", complexSubject.S, co.S)
}
if !reflect.DeepEqual(complexSubject.Sp, co.Sp) {
t.Errorf("Load (complex struct): struct.Sp is not equal (%v != %v)", complexSubject.Sp, co.Sp)
}
if !reflect.DeepEqual(complexSubject.Sa, co.Sa) {
t.Errorf("Load (complex struct): struct.Sa is not equal (%v != %v)", complexSubject.Sa, co.Sa)
}
if !reflect.DeepEqual(complexSubject.Sap, co.Sap) {
t.Errorf("Load (complex struct): struct.Sap is not equal (%v != %v)", complexSubject.Sap, co.Sap)
}
if complexSubject.B != co.B {
t.Errorf("Load (complex struct): struct.B is not equal (%v != %v)", complexSubject.B, co.B)
}
if !reflect.DeepEqual(complexSubject.Bp, co.Bp) {
t.Errorf("Load (complex struct): struct.Bp is not equal (%v != %v)", complexSubject.Bp, co.Bp)
}
if !reflect.DeepEqual(complexSubject.Ba, co.Ba) {
t.Errorf("Load (complex struct): struct.Ba is not equal (%v != %v)", complexSubject.Ba, co.Ba)
}
if !reflect.DeepEqual(complexSubject.Bap, co.Bap) {
t.Errorf("Load (complex struct): struct.Bap is not equal (%v != %v)", complexSubject.Bap, co.Bap)
}
if !reflect.DeepEqual(complexSubject.M, co.M) {
t.Errorf("Load (complex struct): struct.M is not equal (%v != %v)", complexSubject.M, co.M)
}
if !reflect.DeepEqual(complexSubject.Mp, co.Mp) {
t.Errorf("Load (complex struct): struct.Mp is not equal (%v != %v)", complexSubject.Mp, co.Mp)
}
}
}

22
formats/varint/helpers.go Normal file
View file

@ -0,0 +1,22 @@
package varint
import "errors"
// PrependLength prepends the varint encoded length of the byte slice to itself.
func PrependLength(data []byte) []byte {
return append(Pack64(uint64(len(data))), data...)
}
// GetNextBlock extract the integer from the beginning of the given byte slice and returns the remaining bytes, the extracted integer, and whether there was an error.
func GetNextBlock(data []byte) ([]byte, int, error) {
l, n, err := Unpack64(data)
if err != nil {
return nil, 0, err
}
length := int(l)
totalLength := length + n
if totalLength > len(data) {
return nil, 0, errors.New("varint: not enough data for given block length")
}
return data[n:totalLength], totalLength, nil
}

94
formats/varint/varint.go Normal file
View file

@ -0,0 +1,94 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package varint
import "errors"
import "encoding/binary"
// Pack8 packs a uint8 into a VarInt.
func Pack8(n uint8) []byte {
if n < 128 {
return []byte{n}
}
return []byte{n, 0x01}
}
// Pack16 packs a uint16 into a VarInt.
func Pack16(n uint16) []byte {
buf := make([]byte, 3)
w := binary.PutUvarint(buf, uint64(n))
return buf[:w]
}
// Pack32 packs a uint32 into a VarInt.
func Pack32(n uint32) []byte {
buf := make([]byte, 5)
w := binary.PutUvarint(buf, uint64(n))
return buf[:w]
}
// Pack64 packs a uint64 into a VarInt.
func Pack64(n uint64) []byte {
buf := make([]byte, 10)
w := binary.PutUvarint(buf, n)
return buf[:w]
}
// Unpack8 unpacks a VarInt into a uint8. It returns the extracted int, how many bytes were used and an error.
func Unpack8(blob []byte) (uint8, int, error) {
if len(blob) < 1 {
return 0, 0, errors.New("varint: buf has zero length")
}
if blob[0] < 128 {
return blob[0], 1, nil
}
if len(blob) < 2 {
return 0, 0, errors.New("varint: buf too small")
}
if blob[1] != 0x01 {
return 0, 0, errors.New("varint: encoded integer greater than 255 (uint8)")
}
return blob[0], 1, nil
}
// Unpack16 unpacks a VarInt into a uint16. It returns the extracted int, how many bytes were used and an error.
func Unpack16(blob []byte) (uint16, int, error) {
n, r := binary.Uvarint(blob)
if r == 0 {
return 0, 0, errors.New("varint: buf too small")
}
if r < 0 {
return 0, 0, errors.New("varint: encoded integer greater than 18446744073709551615 (uint64)")
}
if n > 65535 {
return 0, 0, errors.New("varint: encoded integer greater than 65535 (uint16)")
}
return uint16(n), r, nil
}
// Unpack32 unpacks a VarInt into a uint32. It returns the extracted int, how many bytes were used and an error.
func Unpack32(blob []byte) (uint32, int, error) {
n, r := binary.Uvarint(blob)
if r == 0 {
return 0, 0, errors.New("varint: buf too small")
}
if r < 0 {
return 0, 0, errors.New("varint: encoded integer greater than 18446744073709551615 (uint64)")
}
if n > 4294967295 {
return 0, 0, errors.New("varint: encoded integer greater than 4294967295 (uint32)")
}
return uint32(n), r, nil
}
// Unpack64 unpacks a VarInt into a uint64. It returns the extracted int, how many bytes were used and an error.
func Unpack64(blob []byte) (uint64, int, error) {
n, r := binary.Uvarint(blob)
if r == 0 {
return 0, 0, errors.New("varint: buf too small")
}
if r < 0 {
return 0, 0, errors.New("varint: encoded integer greater than 18446744073709551615 (uint64)")
}
return n, r, nil
}

View file

@ -0,0 +1,146 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package varint
import (
"bytes"
"testing"
)
func TestConversion(t *testing.T) {
// t.Run("Basic Static Encoding and Decoding", func(t *testing.T) { ... }
var subjects = []struct {
intType uint8
bytes []byte
integer uint64
}{
{8, []byte{0x00}, 0},
{8, []byte{0x01}, 1},
{8, []byte{0x7F}, 127},
{8, []byte{0x80, 0x01}, 128},
{8, []byte{0xFF, 0x01}, 255},
{16, []byte{0x80, 0x02}, 256},
{16, []byte{0xFF, 0x7F}, 16383},
{16, []byte{0x80, 0x80, 0x01}, 16384},
{16, []byte{0xFF, 0xFF, 0x03}, 65535},
{32, []byte{0x80, 0x80, 0x04}, 65536},
{32, []byte{0xFF, 0xFF, 0x7F}, 2097151},
{32, []byte{0x80, 0x80, 0x80, 0x01}, 2097152},
{32, []byte{0xFF, 0xFF, 0xFF, 0x07}, 16777215},
{32, []byte{0x80, 0x80, 0x80, 0x08}, 16777216},
{32, []byte{0xFF, 0xFF, 0xFF, 0x7F}, 268435455},
{32, []byte{0x80, 0x80, 0x80, 0x80, 0x01}, 268435456},
{32, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0x0F}, 4294967295},
{64, []byte{0x80, 0x80, 0x80, 0x80, 0x10}, 4294967296},
{64, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0x7F}, 34359738367},
{64, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x01}, 34359738368},
{64, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1F}, 1099511627775},
{64, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x20}, 1099511627776},
{64, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F}, 4398046511103},
{64, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01}, 4398046511104},
{64, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x3F}, 281474976710655},
{64, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x40}, 281474976710656},
{64, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F}, 562949953421311},
{64, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01}, 562949953421312},
{64, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F}, 72057594037927935},
{64, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01}, 72057594037927936},
{64, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x7F}, 9223372036854775807},
{64, []byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01}, 9223372036854775808},
{64, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01}, 18446744073709551615},
}
for _, subject := range subjects {
actualInteger, _, err := Unpack64(subject.bytes)
if err != nil || actualInteger != subject.integer {
t.Errorf("Unpack64 %d: expected %d, actual %d", subject.bytes, subject.integer, actualInteger)
}
actualBytes := Pack64(subject.integer)
if err != nil || !bytes.Equal(actualBytes, subject.bytes) {
t.Errorf("Pack64 %d: expected %d, actual %d", subject.integer, subject.bytes, actualBytes)
}
if subject.intType <= 32 {
actualInteger, _, err := Unpack32(subject.bytes)
if err != nil || actualInteger != uint32(subject.integer) {
t.Errorf("Unpack32 %d: expected %d, actual %d", subject.bytes, subject.integer, actualInteger)
}
actualBytes := Pack32(uint32(subject.integer))
if err != nil || !bytes.Equal(actualBytes, subject.bytes) {
t.Errorf("Pack32 %d: expected %d, actual %d", subject.integer, subject.bytes, actualBytes)
}
}
if subject.intType <= 16 {
actualInteger, _, err := Unpack16(subject.bytes)
if err != nil || actualInteger != uint16(subject.integer) {
t.Errorf("Unpack16 %d: expected %d, actual %d", subject.bytes, subject.integer, actualInteger)
}
actualBytes := Pack16(uint16(subject.integer))
if err != nil || !bytes.Equal(actualBytes, subject.bytes) {
t.Errorf("Pack16 %d: expected %d, actual %d", subject.integer, subject.bytes, actualBytes)
}
}
if subject.intType <= 8 {
actualInteger, _, err := Unpack8(subject.bytes)
if err != nil || actualInteger != uint8(subject.integer) {
t.Errorf("Unpack8 %d: expected %d, actual %d", subject.bytes, subject.integer, actualInteger)
}
actualBytes := Pack8(uint8(subject.integer))
if err != nil || !bytes.Equal(actualBytes, subject.bytes) {
t.Errorf("Pack8 %d: expected %d, actual %d", subject.integer, subject.bytes, actualBytes)
}
}
}
}
func TestFails(t *testing.T) {
// t.Run("Basic Static Encoding and Decoding", func(t *testing.T) { ... }
var subjects = []struct {
intType uint8
bytes []byte
}{
{32, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01}},
{64, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x02}},
{64, []byte{0xFF}},
}
for _, subject := range subjects {
if subject.intType == 64 {
_, _, err := Unpack64(subject.bytes)
if err == nil {
t.Errorf("Unpack64 %d: expected error while unpacking.", subject.bytes)
}
}
_, _, err := Unpack32(subject.bytes)
if err == nil {
t.Errorf("Unpack32 %d: expected error while unpacking.", subject.bytes)
}
_, _, err = Unpack16(subject.bytes)
if err == nil {
t.Errorf("Unpack16 %d: expected error while unpacking.", subject.bytes)
}
_, _, err = Unpack8(subject.bytes)
if err == nil {
t.Errorf("Unpack8 %d: expected error while unpacking.", subject.bytes)
}
}
}

58
log/formatting.go Normal file
View file

@ -0,0 +1,58 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package log
import "fmt"
var counter uint16
const maxCount uint16 = 999
func (s severity) String() string {
switch s {
case TraceLevel:
return "TRAC"
case DebugLevel:
return "DEBU"
case InfoLevel:
return "INFO"
case WarningLevel:
return "WARN"
case ErrorLevel:
return "ERRO"
case CriticalLevel:
return "CRIT"
default:
return "NONE"
}
}
func formatLine(line *logLine, useColor bool) string {
colorStart := ""
colorEnd := ""
if useColor {
colorStart = line.level.color()
colorEnd = endColor()
}
counter++
var fLine string
if line.line == 0 {
fLine = fmt.Sprintf("%s%s ? ▶ %s %03d%s %s", colorStart, line.time.Format("060102 15:04:05.000"), line.level.String(), counter, colorEnd, line.msg)
} else {
fLen := len(line.file)
fPartStart := fLen - 10
if fPartStart < 0 {
fPartStart = 0
}
fLine = fmt.Sprintf("%s%s %s:%03d ▶ %s %03d%s %s", colorStart, line.time.Format("060102 15:04:05.000"), line.file[fPartStart:], line.line, line.level.String(), counter, colorEnd, line.msg)
}
if counter >= maxCount {
counter = 0
}
return fLine
}

37
log/formatting_nix.go Normal file
View file

@ -0,0 +1,37 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package log
type color int
const (
// colorBlack = "\033[30m"
colorRed = "\033[31m"
// colorGreen = "\033[32m"
colorYellow = "\033[33m"
colorBlue = "\033[34m"
colorMagenta = "\033[35m"
colorCyan = "\033[36m"
// colorWhite = "\033[37m"
)
func (s severity) color() string {
switch s {
case DebugLevel:
return colorCyan
case InfoLevel:
return colorBlue
case WarningLevel:
return colorYellow
case ErrorLevel:
return colorRed
case CriticalLevel:
return colorMagenta
default:
return ""
}
}
func endColor() string {
return "\033[0m"
}

163
log/input.go Normal file
View file

@ -0,0 +1,163 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package log
import (
"fmt"
"runtime"
"strings"
"sync/atomic"
"time"
)
func log_fastcheck(level severity) bool {
if fileLevelsActive.IsSet() {
return true
}
if uint32(level) < atomic.LoadUint32(logLevel) {
return false
}
return true
}
func log(level severity, msg string) {
// check if level is enabled
if !fileLevelsActive.IsSet() && uint32(level) < atomic.LoadUint32(logLevel) {
return
}
// get time
now := time.Now()
// get file and line
_, file, line, ok := runtime.Caller(2)
if !ok {
file = ""
line = 0
} else {
if len(file) > 3 {
file = file[:len(file)-3]
} else {
file = ""
}
}
// check if level is enabled for file or generally
if fileLevelsActive.IsSet() {
fileOnly := strings.Split(file, "/")
sev, ok := fileLevels[fileOnly[len(fileOnly)-1]]
if ok {
if level < sev {
return
}
} else {
if uint32(level) < atomic.LoadUint32(logLevel) {
return
}
}
}
// create log object
log := &logLine{
msg,
level,
now,
file,
line,
}
// send log to processing
select {
case logBuffer <- log:
default:
forceEmptyingOfBuffer <- true
logBuffer <- log
}
// wake up writer if necessary
if logsWaitingFlag.SetToIf(false, true) {
logsWaiting <- true
}
}
func Tracef(things ...interface{}) {
if log_fastcheck(TraceLevel) {
log(TraceLevel, fmt.Sprintf(things[0].(string), things[1:]...))
}
}
func Trace(msg string) {
if log_fastcheck(TraceLevel) {
log(TraceLevel, msg)
}
}
func Debugf(things ...interface{}) {
if log_fastcheck(DebugLevel) {
log(DebugLevel, fmt.Sprintf(things[0].(string), things[1:]...))
}
}
func Debug(msg string) {
if log_fastcheck(DebugLevel) {
log(DebugLevel, msg)
}
}
func Infof(things ...interface{}) {
if log_fastcheck(InfoLevel) {
log(InfoLevel, fmt.Sprintf(things[0].(string), things[1:]...))
}
}
func Info(msg string) {
if log_fastcheck(InfoLevel) {
log(InfoLevel, msg)
}
}
func Warningf(things ...interface{}) {
if log_fastcheck(WarningLevel) {
log(WarningLevel, fmt.Sprintf(things[0].(string), things[1:]...))
}
}
func Warning(msg string) {
if log_fastcheck(WarningLevel) {
log(WarningLevel, msg)
}
}
func Errorf(things ...interface{}) {
if log_fastcheck(ErrorLevel) {
log(ErrorLevel, fmt.Sprintf(things[0].(string), things[1:]...))
}
}
func Error(msg string) {
if log_fastcheck(ErrorLevel) {
log(ErrorLevel, msg)
}
}
func Criticalf(things ...interface{}) {
if log_fastcheck(CriticalLevel) {
log(CriticalLevel, fmt.Sprintf(things[0].(string), things[1:]...))
}
}
func Critical(msg string) {
if log_fastcheck(CriticalLevel) {
log(CriticalLevel, msg)
}
}
func Testf(things ...interface{}) {
fmt.Printf(things[0].(string), things[1:]...)
}
func Test(msg string) {
fmt.Println(msg)
}

60
log/interface.go Normal file
View file

@ -0,0 +1,60 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package log
var Logger *LoggingInterface
type LoggingInterface struct {
}
func (*LoggingInterface) Tracef(things ...interface{}) {
Tracef(things...)
}
func (*LoggingInterface) Trace(msg string) {
Trace(msg)
}
func (*LoggingInterface) Debugf(things ...interface{}) {
Debugf(things...)
}
func (*LoggingInterface) Debug(msg string) {
Debug(msg)
}
func (*LoggingInterface) Infof(things ...interface{}) {
Infof(things...)
}
func (*LoggingInterface) Info(msg string) {
Info(msg)
}
func (*LoggingInterface) Warningf(things ...interface{}) {
Warningf(things...)
}
func (*LoggingInterface) Warning(msg string) {
Warning(msg)
}
func (*LoggingInterface) Errorf(things ...interface{}) {
Errorf(things...)
}
func (*LoggingInterface) Error(msg string) {
Error(msg)
}
func (*LoggingInterface) Criticalf(things ...interface{}) {
Criticalf(things...)
}
func (*LoggingInterface) Critical(msg string) {
Critical(msg)
}
func init() {
Logger = &LoggingInterface{}
}

145
log/logging.go Normal file
View file

@ -0,0 +1,145 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package log
import (
"fmt"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/tevino/abool"
"github.com/Safing/safing-core/meta"
"github.com/Safing/safing-core/modules"
)
// concept
/*
- Logging function:
- check if file-based levelling enabled
- if yes, check if level is active on this file
- check if level is active
- send data to backend via big buffered channel
- Backend:
- wait until there is time for writing logs
- write logs
- configurable if logged to folder (buffer + rollingFileAppender) and/or console
- console: log everything above INFO to stderr
- Channel overbuffering protection:
- if buffer is full, trigger write
- Anti-Importing-Loop:
- everything imports logging
- logging is configured by main module and is supplied access to configuration and taskmanager
*/
type severity uint32
type logLine struct {
msg string
level severity
time time.Time
file string
line int
}
const (
TraceLevel severity = 1
DebugLevel severity = 2
InfoLevel severity = 3
WarningLevel severity = 4
ErrorLevel severity = 5
CriticalLevel severity = 6
)
var (
module *modules.Module
logBuffer chan *logLine
forceEmptyingOfBuffer chan bool
logLevelInt = uint32(3)
logLevel = &logLevelInt
fileLevelsActive = abool.NewBool(false)
fileLevels = make(map[string]severity)
fileLevelsLock sync.Mutex
logsWaiting = make(chan bool, 1)
logsWaitingFlag = abool.NewBool(false)
)
func SetFileLevels(levels map[string]severity) {
fileLevelsLock.Lock()
fileLevels = levels
fileLevelsLock.Unlock()
fileLevelsActive.Set()
}
func UnSetFileLevels() {
fileLevelsActive.UnSet()
}
func SetLogLevel(level severity) {
atomic.StoreUint32(logLevel, uint32(level))
}
func ParseLevel(level string) severity {
switch strings.ToLower(level) {
case "trace":
return 1
case "debug":
return 2
case "info":
return 3
case "warning":
return 4
case "error":
return 5
case "critical":
return 6
}
return 0
}
var ()
func init() {
module = modules.Register("Logging", 0)
modules.RegisterLogger(Logger)
logBuffer = make(chan *logLine, 8192)
forceEmptyingOfBuffer = make(chan bool, 4)
initialLogLevel := ParseLevel(meta.LogLevel())
if initialLogLevel > 0 {
atomic.StoreUint32(logLevel, uint32(initialLogLevel))
} else {
fmt.Printf("WARNING: invalid log level, falling back to level info.")
}
// get and set file loglevels
fileLogLevels := meta.FileLogLevels()
if len(fileLogLevels) > 0 {
newFileLevels := make(map[string]severity)
for _, pair := range strings.Split(fileLogLevels, ",") {
splitted := strings.Split(pair, "=")
if len(splitted) != 2 {
fmt.Printf("WARNING: invalid file log level \"%s\", ignoring", pair)
continue
}
fileLevel := ParseLevel(splitted[1])
if fileLevel == 0 {
fmt.Printf("WARNING: invalid file log level \"%s\", ignoring", pair)
continue
}
newFileLevels[splitted[0]] = fileLevel
}
SetFileLevels(newFileLevels)
}
go writer()
}

59
log/logging_test.go Normal file
View file

@ -0,0 +1,59 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package log
import (
"testing"
"time"
)
// test waiting
func TestLogging(t *testing.T) {
// skip
if testing.Short() {
t.Skip("skipping test in short mode.")
}
// set levels (static random)
SetLogLevel(WarningLevel)
SetLogLevel(InfoLevel)
SetLogLevel(ErrorLevel)
SetLogLevel(DebugLevel)
SetLogLevel(CriticalLevel)
SetLogLevel(TraceLevel)
// log
Trace("Trace")
Debug("Debug")
Info("Info")
Warning("Warning")
Error("Error")
Critical("Critical")
// logf
Tracef("Trace %s", "f")
Debugf("Debug %s", "f")
Infof("Info %s", "f")
Warningf("Warning %s", "f")
Errorf("Error %s", "f")
Criticalf("Critical %s", "f")
// play with levels
SetLogLevel(CriticalLevel)
Warning("Warning")
SetLogLevel(TraceLevel)
// log invalid level
log(0xFF, "msg")
// wait logs to be written
time.Sleep(1 * time.Millisecond)
// just for show
UnSetFileLevels()
// do not really shut down, we may need logging for other tests
// ShutdownLogging()
}

71
log/output.go Normal file
View file

@ -0,0 +1,71 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package log
import (
"fmt"
"github.com/Safing/safing-core/taskmanager"
"time"
)
func writeLine(line *logLine) {
fmt.Println(formatLine(line, true))
// TODO: implement file logging and setting console/file logging
// TODO: use https://github.com/natefinch/lumberjack
}
func writer() {
var line *logLine
startedTask := false
for {
// wait until logs need to be processed
select {
case <-logsWaiting:
logsWaitingFlag.UnSet()
case <-module.Stop:
}
// wait for timeslot to log, or when buffer is full
select {
case <-taskmanager.StartVeryLowPriorityMicroTask():
startedTask = true
case <-forceEmptyingOfBuffer:
case <-module.Stop:
select {
case line = <-logBuffer:
writeLine(line)
case <-time.After(10 * time.Millisecond):
writeLine(&logLine{
"===== LOGGING STOPPED =====",
WarningLevel,
time.Now(),
"",
0,
})
module.StopComplete()
return
}
}
// write all the logs!
writeLoop:
for {
select {
case line = <-logBuffer:
writeLine(line)
default:
if startedTask {
taskmanager.EndMicroTask()
startedTask = false
}
break writeLoop
}
}
}
}

32
modules/logging.go Normal file
View file

@ -0,0 +1,32 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package modules
var logger Logger
var loggerRegistered chan struct{}
type Logger interface {
Tracef(things ...interface{})
Trace(msg string)
Debugf(things ...interface{})
Debug(msg string)
Infof(things ...interface{})
Info(msg string)
Warningf(things ...interface{})
Warning(msg string)
Errorf(things ...interface{})
Error(msg string)
Criticalf(things ...interface{})
Critical(msg string)
}
func RegisterLogger(newLogger Logger) {
if logger == nil {
logger = newLogger
loggerRegistered <- struct{}{}
}
}
func GetLogger() Logger {
return logger
}

147
modules/modules.go Normal file
View file

@ -0,0 +1,147 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package modules
import (
"container/list"
"os"
"time"
"github.com/tevino/abool"
)
var modules *list.List
var addModule chan *Module
var GlobalShutdown chan struct{}
var loggingActive bool
type Module struct {
Name string
Order uint8
Start chan struct{}
Active *abool.AtomicBool
startComplete chan struct{}
Stop chan struct{}
Stopped *abool.AtomicBool
stopComplete chan struct{}
}
func Register(name string, order uint8) *Module {
newModule := &Module{
Name: name,
Order: order,
Start: make(chan struct{}),
Active: abool.NewBool(true),
startComplete: make(chan struct{}),
Stop: make(chan struct{}),
Stopped: abool.NewBool(false),
stopComplete: make(chan struct{}),
}
addModule <- newModule
return newModule
}
func (module *Module) addToList() {
if loggingActive {
logger.Infof("Modules: starting %s", module.Name)
}
for e := modules.Back(); e != nil; e = e.Prev() {
if module.Order > e.Value.(*Module).Order {
modules.InsertAfter(module, e)
return
}
}
modules.PushFront(module)
}
func (module *Module) stop() {
module.Active.UnSet()
defer module.Stopped.Set()
for {
select {
case module.Stop <- struct{}{}:
case <-module.stopComplete:
return
case <-time.After(1 * time.Second):
if loggingActive {
logger.Warningf("Modules: waiting for %s to stop...", module.Name)
}
}
}
}
func (module *Module) StopComplete() {
if loggingActive {
logger.Warningf("Modules: stopped %s", module.Name)
}
module.stopComplete <- struct{}{}
}
func (module *Module) start() {
module.Stopped.UnSet()
defer module.Active.Set()
for {
select {
case module.Start <- struct{}{}:
case <-module.startComplete:
return
}
}
}
func (module *Module) StartComplete() {
if loggingActive {
logger.Infof("Modules: starting %s", module.Name)
}
module.startComplete <- struct{}{}
}
func InitiateFullShutdown() {
close(GlobalShutdown)
}
func fullStop() {
for e := modules.Back(); e != nil; e = e.Prev() {
if e.Value.(*Module).Active.IsSet() {
e.Value.(*Module).stop()
}
}
}
func run() {
select {
case <-loggerRegistered:
logger.Info("Modules: starting")
loggingActive = true
case <-time.After(1 * time.Second):
}
for {
select {
case <-GlobalShutdown:
if loggingActive {
logger.Warning("Modules: stopping")
}
fullStop()
os.Exit(0)
case m := <-addModule:
m.addToList()
// go m.start()
}
}
}
func init() {
modules = list.New()
addModule = make(chan *Module, 10)
GlobalShutdown = make(chan struct{})
loggerRegistered = make(chan struct{}, 1)
loggingActive = false
go run()
}

50
modules/modules_test.go Normal file
View file

@ -0,0 +1,50 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package modules
import (
"fmt"
"time"
)
func newTestModule(name string, order uint8) {
fmt.Printf("up %s\n", name)
module := Register("TestModule", order)
go func() {
<-module.Stop
fmt.Printf("down %s\n", name)
module.StopComplete()
}()
}
func Example() {
// wait for logger registration timeout
time.Sleep(1010 * time.Millisecond)
newTestModule("1", 1)
newTestModule("4", 4)
newTestModule("3", 3)
newTestModule("2", 2)
newTestModule("5", 5)
InitiateFullShutdown()
time.Sleep(10 * time.Millisecond)
// Output:
// up 1
// up 4
// up 3
// up 2
// up 5
// down 5
// down 4
// down 3
// down 2
// down 1
}

167
taskmanager/microtasks.go Normal file
View file

@ -0,0 +1,167 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package taskmanager
import (
"github.com/Safing/safing-core/modules"
"sync/atomic"
"time"
"github.com/tevino/abool"
)
// TODO: getting some errors when in nanosecond precision for tests:
// (1) panic: sync: WaitGroup is reused before previous Wait has returned - should theoretically not happen
// (2) sometimes there seems to some kind of race condition stuff, the test hangs and does not complete
var microTasksModule *modules.Module
var closedChannel chan bool
var tasks *int32
var mediumPriorityClearance chan bool
var lowPriorityClearance chan bool
var veryLowPriorityClearance chan bool
var tasksDone chan bool
var tasksDoneFlag *abool.AtomicBool
var tasksWaiting chan bool
var tasksWaitingFlag *abool.AtomicBool
// StartMicroTask starts a new MicroTask. It will start immediately.
func StartMicroTask() {
atomic.AddInt32(tasks, 1)
tasksDoneFlag.UnSet()
}
// EndMicroTask MUST be always called when a MicroTask was previously started.
func EndMicroTask() {
c := atomic.AddInt32(tasks, -1)
if c < 1 {
if tasksDoneFlag.SetToIf(false, true) {
tasksDone <- true
}
}
}
func newTaskIsWaiting() {
tasksWaiting <- true
}
// StartMediumPriorityMicroTask starts a new MicroTask (waiting its turn) if channel receives.
func StartMediumPriorityMicroTask() chan bool {
if !microTasksModule.Active.IsSet() {
return closedChannel
}
if tasksWaitingFlag.SetToIf(false, true) {
defer newTaskIsWaiting()
}
return mediumPriorityClearance
}
// StartLowPriorityMicroTask starts a new MicroTask (waiting its turn) if channel receives.
func StartLowPriorityMicroTask() chan bool {
if !microTasksModule.Active.IsSet() {
return closedChannel
}
if tasksWaitingFlag.SetToIf(false, true) {
defer newTaskIsWaiting()
}
return lowPriorityClearance
}
// StartVeryLowPriorityMicroTask starts a new MicroTask (waiting its turn) if channel receives.
func StartVeryLowPriorityMicroTask() chan bool {
if !microTasksModule.Active.IsSet() {
return closedChannel
}
if tasksWaitingFlag.SetToIf(false, true) {
defer newTaskIsWaiting()
}
return veryLowPriorityClearance
}
func init() {
microTasksModule = modules.Register("Taskmanager:MicroTasks", 3)
closedChannel = make(chan bool, 0)
close(closedChannel)
var t int32 = 0
tasks = &t
mediumPriorityClearance = make(chan bool, 0)
lowPriorityClearance = make(chan bool, 0)
veryLowPriorityClearance = make(chan bool, 0)
tasksDone = make(chan bool, 1)
tasksDoneFlag = abool.NewBool(true)
tasksWaiting = make(chan bool, 1)
tasksWaitingFlag = abool.NewBool(false)
timoutTimerDuration := 1 * time.Second
// timoutTimer := time.NewTimer(timoutTimerDuration)
go func() {
microTaskManageLoop:
for {
// wait for an event to start new tasks
if microTasksModule.Active.IsSet() {
// reset timer
// https://golang.org/pkg/time/#Timer.Reset
// if !timoutTimer.Stop() {
// <-timoutTimer.C
// }
// timoutTimer.Reset(timoutTimerDuration)
// wait for event to start a new task
select {
case <-tasksWaiting:
if !tasksDoneFlag.IsSet() {
continue microTaskManageLoop
}
case <-time.After(timoutTimerDuration):
case <-tasksDone:
case <-microTasksModule.Stop:
}
} else {
// execute tasks until no tasks are waiting anymore
if !tasksWaitingFlag.IsSet() {
// wait until tasks are finished
if !tasksDoneFlag.IsSet() {
<-tasksDone
}
// signal module completion
microTasksModule.StopComplete()
// exit
return
}
}
// start new task, if none is started, check if we are shutting down
select {
case mediumPriorityClearance <- true:
StartMicroTask()
default:
select {
case lowPriorityClearance <- true:
StartMicroTask()
default:
select {
case veryLowPriorityClearance <- true:
StartMicroTask()
default:
tasksWaitingFlag.UnSet()
}
}
}
}
}()
}

View file

@ -0,0 +1,180 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package taskmanager
import (
"strings"
"sync"
"testing"
"time"
)
// test waiting
func TestMicroTaskWaiting(t *testing.T) {
// skip
if testing.Short() {
t.Skip("skipping test in short mode.")
}
// init
mtwWaitGroup := new(sync.WaitGroup)
mtwOutputChannel := make(chan string, 100)
mtwExpectedOutput := "123456"
mtwSleepDuration := 10 * time.Millisecond
// TEST
mtwWaitGroup.Add(3)
// High Priority - slot 1-5
go func() {
defer mtwWaitGroup.Done()
StartMicroTask()
mtwOutputChannel <- "1"
time.Sleep(mtwSleepDuration * 5)
mtwOutputChannel <- "2"
EndMicroTask()
}()
time.Sleep(mtwSleepDuration * 2)
// High Priority - slot 10-15
go func() {
defer mtwWaitGroup.Done()
time.Sleep(mtwSleepDuration * 8)
StartMicroTask()
mtwOutputChannel <- "4"
time.Sleep(mtwSleepDuration * 5)
mtwOutputChannel <- "6"
EndMicroTask()
}()
// Medium Priority - Waits at slot 3, should execute in slot 6-13
go func() {
defer mtwWaitGroup.Done()
<-StartMediumPriorityMicroTask()
mtwOutputChannel <- "3"
time.Sleep(mtwSleepDuration * 7)
mtwOutputChannel <- "5"
EndMicroTask()
}()
// wait for test to finish
mtwWaitGroup.Wait()
// collect output
close(mtwOutputChannel)
completeOutput := ""
for s := <-mtwOutputChannel; s != ""; s = <-mtwOutputChannel {
completeOutput += s
}
// check if test succeeded
if completeOutput != mtwExpectedOutput {
t.Errorf("MicroTask waiting test failed, expected sequence %s, got %s", mtwExpectedOutput, completeOutput)
}
}
// test ordering
// globals
var mtoWaitGroup sync.WaitGroup
var mtoOutputChannel chan string
var mtoWaitCh chan bool
// functions
func mediumPrioTaskTester() {
defer mtoWaitGroup.Done()
<-mtoWaitCh
<-StartMediumPriorityMicroTask()
mtoOutputChannel <- "1"
time.Sleep(2 * time.Millisecond)
EndMicroTask()
}
func lowPrioTaskTester() {
defer mtoWaitGroup.Done()
<-mtoWaitCh
<-StartLowPriorityMicroTask()
mtoOutputChannel <- "2"
time.Sleep(2 * time.Millisecond)
EndMicroTask()
}
func veryLowPrioTaskTester() {
defer mtoWaitGroup.Done()
<-mtoWaitCh
<-StartVeryLowPriorityMicroTask()
mtoOutputChannel <- "3"
time.Sleep(2 * time.Millisecond)
EndMicroTask()
}
// test
func TestMicroTaskOrdering(t *testing.T) {
// skip
if testing.Short() {
t.Skip("skipping test in short mode.")
}
// init
mtoOutputChannel = make(chan string, 100)
mtoWaitCh = make(chan bool, 0)
// TEST
mtoWaitGroup.Add(30)
// kick off
go mediumPrioTaskTester()
go mediumPrioTaskTester()
go lowPrioTaskTester()
go lowPrioTaskTester()
go veryLowPrioTaskTester()
go veryLowPrioTaskTester()
go lowPrioTaskTester()
go veryLowPrioTaskTester()
go mediumPrioTaskTester()
go veryLowPrioTaskTester()
go lowPrioTaskTester()
go mediumPrioTaskTester()
go veryLowPrioTaskTester()
go mediumPrioTaskTester()
go mediumPrioTaskTester()
go lowPrioTaskTester()
go mediumPrioTaskTester()
go lowPrioTaskTester()
go mediumPrioTaskTester()
go veryLowPrioTaskTester()
go veryLowPrioTaskTester()
go lowPrioTaskTester()
go mediumPrioTaskTester()
go veryLowPrioTaskTester()
go lowPrioTaskTester()
go lowPrioTaskTester()
go mediumPrioTaskTester()
go veryLowPrioTaskTester()
go lowPrioTaskTester()
go veryLowPrioTaskTester()
// wait for all goroutines to be ready
time.Sleep(10 * time.Millisecond)
// sync all goroutines
close(mtoWaitCh)
// wait for test to finish
mtoWaitGroup.Wait()
// collect output
close(mtoOutputChannel)
completeOutput := ""
for s := <-mtoOutputChannel; s != ""; s = <-mtoOutputChannel {
completeOutput += s
}
// check if test succeeded
if !strings.Contains(completeOutput, "11111") || !strings.Contains(completeOutput, "22222") || !strings.Contains(completeOutput, "33333") {
t.Errorf("MicroTask ordering test failed, output was %s. This happens occasionally, please run the test multiple times to verify", completeOutput)
}
}

158
taskmanager/queuedtasks.go Normal file
View file

@ -0,0 +1,158 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package taskmanager
import (
"container/list"
"github.com/Safing/safing-core/modules"
"time"
"github.com/tevino/abool"
)
type Task struct {
name string
start chan bool
started *abool.AtomicBool
schedule *time.Time
}
var taskQueue *list.List
var prioritizedTaskQueue *list.List
var addToQueue chan *Task
var addToPrioritizedQueue chan *Task
var addAsNextTask chan *Task
var finishedQueuedTask chan bool
var queuedTaskRunning *abool.AtomicBool
var getQueueLengthREQ chan bool
var getQueueLengthREP chan int
func newUnqeuedTask(name string) *Task {
t := &Task{
name,
make(chan bool),
abool.NewBool(false),
nil,
}
return t
}
func NewQueuedTask(name string) *Task {
t := newUnqeuedTask(name)
addToQueue <- t
return t
}
func NewPrioritizedQueuedTask(name string) *Task {
t := newUnqeuedTask(name)
addToPrioritizedQueue <- t
return t
}
func (t *Task) addToPrioritizedQueue() {
addToPrioritizedQueue <- t
}
func (t *Task) WaitForStart() chan bool {
return t.start
}
func (t *Task) StartAnyway() {
addAsNextTask <- t
}
func (t *Task) Done() {
if !t.started.SetToIf(false, true) {
finishedQueuedTask <- true
}
}
func TotalQueuedTasks() int {
getQueueLengthREQ <- true
return <-getQueueLengthREP
}
func checkQueueStatus() {
if queuedTaskRunning.SetToIf(false, true) {
finishedQueuedTask <- true
}
}
func fireNextTask() {
if prioritizedTaskQueue.Len() > 0 {
for e := prioritizedTaskQueue.Front(); prioritizedTaskQueue.Len() > 0; e.Next() {
t := e.Value.(*Task)
prioritizedTaskQueue.Remove(e)
if t.started.SetToIf(false, true) {
close(t.start)
return
}
}
}
if taskQueue.Len() > 0 {
for e := taskQueue.Front(); taskQueue.Len() > 0; e.Next() {
t := e.Value.(*Task)
taskQueue.Remove(e)
if t.started.SetToIf(false, true) {
close(t.start)
return
}
}
}
queuedTaskRunning.UnSet()
}
func init() {
module := modules.Register("Taskmanager:QueuedTasks", 3)
taskQueue = list.New()
prioritizedTaskQueue = list.New()
addToQueue = make(chan *Task, 1)
addToPrioritizedQueue = make(chan *Task, 1)
addAsNextTask = make(chan *Task, 1)
finishedQueuedTask = make(chan bool, 1)
queuedTaskRunning = abool.NewBool(false)
getQueueLengthREQ = make(chan bool, 1)
getQueueLengthREP = make(chan int, 1)
go func() {
for {
select {
case <-module.Stop:
// TODO: work off queue?
module.StopComplete()
return
case <-getQueueLengthREQ:
// TODO: maybe clean queues before replying
if queuedTaskRunning.IsSet() {
getQueueLengthREP <- prioritizedTaskQueue.Len() + taskQueue.Len() + 1
} else {
getQueueLengthREP <- prioritizedTaskQueue.Len() + taskQueue.Len()
}
case t := <-addToQueue:
taskQueue.PushBack(t)
checkQueueStatus()
case t := <-addToPrioritizedQueue:
prioritizedTaskQueue.PushBack(t)
checkQueueStatus()
case t := <-addAsNextTask:
prioritizedTaskQueue.PushFront(t)
checkQueueStatus()
case <-finishedQueuedTask:
fireNextTask()
}
}
}()
}

View file

@ -0,0 +1,112 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package taskmanager
import (
"sync"
"testing"
"time"
)
// test waiting
// globals
var qtWg sync.WaitGroup
var qtOutputChannel chan string
var qtSleepDuration time.Duration
// functions
func queuedTaskTester(s string) {
t := NewQueuedTask(s)
go func() {
<-t.WaitForStart()
time.Sleep(qtSleepDuration * 2)
qtOutputChannel <- s
t.Done()
qtWg.Done()
}()
}
func prioritizedTastTester(s string) {
t := NewPrioritizedQueuedTask(s)
go func() {
<-t.WaitForStart()
time.Sleep(qtSleepDuration * 2)
qtOutputChannel <- s
t.Done()
qtWg.Done()
}()
}
// test
func TestQueuedTask(t *testing.T) {
// skip
if testing.Short() {
t.Skip("skipping test in short mode.")
}
// init
expectedOutput := "0123456789"
qtSleepDuration = 10 * time.Millisecond
qtOutputChannel = make(chan string, 100)
qtWg.Add(10)
// test queue length
c := TotalQueuedTasks()
if c != 0 {
t.Errorf("Error in calculating Task Queue, expected 0, got %d", c)
}
// TEST
queuedTaskTester("0")
queuedTaskTester("1")
queuedTaskTester("3")
queuedTaskTester("4")
queuedTaskTester("6")
queuedTaskTester("7")
queuedTaskTester("9")
// test queue length
c = TotalQueuedTasks()
if c != 7 {
t.Errorf("Error in calculating Task Queue, expected 7, got %d", c)
}
time.Sleep(qtSleepDuration * 3)
prioritizedTastTester("2")
time.Sleep(qtSleepDuration * 6)
prioritizedTastTester("5")
time.Sleep(qtSleepDuration * 6)
prioritizedTastTester("8")
// test queue length
c = TotalQueuedTasks()
if c != 3 {
t.Errorf("Error in calculating Task Queue, expected 3, got %d", c)
}
// time.Sleep(qtSleepDuration * 100)
// panic("")
// wait for test to finish
qtWg.Wait()
// test queue length
c = TotalQueuedTasks()
if c != 0 {
t.Errorf("Error in calculating Task Queue, expected 0, got %d", c)
}
// collect output
close(qtOutputChannel)
completeOutput := ""
for s := <-qtOutputChannel; s != ""; s = <-qtOutputChannel {
completeOutput += s
}
// check if test succeeded
if completeOutput != expectedOutput {
t.Errorf("QueuedTask test failed, expected sequence %s, got %s", expectedOutput, completeOutput)
}
}

View file

@ -0,0 +1,79 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package taskmanager
import (
"container/list"
"github.com/Safing/safing-core/modules"
"time"
)
var taskSchedule *list.List
var addToSchedule chan *Task
var waitForever chan time.Time
var getScheduleLengthREQ chan bool
var getScheduleLengthREP chan int
func NewScheduledTask(name string, schedule time.Time) *Task {
t := newUnqeuedTask(name)
t.schedule = &schedule
addToSchedule <- t
return t
}
func TotalScheduledTasks() int {
getScheduleLengthREQ <- true
return <-getScheduleLengthREP
}
func (t *Task) addToSchedule() {
for e := taskSchedule.Back(); e != nil; e = e.Prev() {
if t.schedule.After(*e.Value.(*Task).schedule) {
taskSchedule.InsertAfter(t, e)
return
}
}
taskSchedule.PushFront(t)
}
func waitUntilNextScheduledTask() <-chan time.Time {
if taskSchedule.Len() > 0 {
return time.After(taskSchedule.Front().Value.(*Task).schedule.Sub(time.Now()))
}
return waitForever
}
func init() {
module := modules.Register("Taskmanager:ScheduledTasks", 3)
taskSchedule = list.New()
addToSchedule = make(chan *Task, 1)
waitForever = make(chan time.Time, 1)
getScheduleLengthREQ = make(chan bool, 1)
getScheduleLengthREP = make(chan int, 1)
go func() {
for {
select {
case <-module.Stop:
module.StopComplete()
return
case <-getScheduleLengthREQ:
// TODO: maybe clean queues before replying
getScheduleLengthREP <- prioritizedTaskQueue.Len() + taskSchedule.Len()
case t := <-addToSchedule:
t.addToSchedule()
case <-waitUntilNextScheduledTask():
e := taskSchedule.Front()
t := e.Value.(*Task)
t.addToPrioritizedQueue()
taskSchedule.Remove(e)
}
}
}()
}

View file

@ -0,0 +1,95 @@
// Copyright Safing ICS Technologies GmbH. Use of this source code is governed by the AGPL license that can be found in the LICENSE file.
package taskmanager
import (
"sync"
"testing"
"time"
)
// test waiting
// globals
var stWg sync.WaitGroup
var stOutputChannel chan string
var stSleepDuration time.Duration
var stWaitCh chan bool
// functions
func scheduledTaskTester(s string, sched time.Time) {
t := NewScheduledTask(s, sched)
go func() {
<-stWaitCh
<-t.WaitForStart()
time.Sleep(stSleepDuration)
stOutputChannel <- s
t.Done()
stWg.Done()
}()
}
// test
func TestScheduledTaskWaiting(t *testing.T) {
// skip
if testing.Short() {
t.Skip("skipping test in short mode.")
}
// init
expectedOutput := "0123456789"
stSleepDuration = 10 * time.Millisecond
stOutputChannel = make(chan string, 100)
stWaitCh = make(chan bool, 0)
// test queue length
c := TotalScheduledTasks()
if c != 0 {
t.Errorf("Error in calculating Task Queue, expected 0, got %d", c)
}
stWg.Add(10)
// TEST
scheduledTaskTester("4", time.Now().Add(stSleepDuration*4))
scheduledTaskTester("0", time.Now().Add(stSleepDuration*1))
scheduledTaskTester("8", time.Now().Add(stSleepDuration*8))
scheduledTaskTester("1", time.Now().Add(stSleepDuration*2))
scheduledTaskTester("7", time.Now().Add(stSleepDuration*7))
// test queue length
time.Sleep(1 * time.Millisecond)
c = TotalScheduledTasks()
if c != 5 {
t.Errorf("Error in calculating Task Queue, expected 5, got %d", c)
}
scheduledTaskTester("9", time.Now().Add(stSleepDuration*9))
scheduledTaskTester("3", time.Now().Add(stSleepDuration*3))
scheduledTaskTester("2", time.Now().Add(stSleepDuration*2))
scheduledTaskTester("6", time.Now().Add(stSleepDuration*6))
scheduledTaskTester("5", time.Now().Add(stSleepDuration*5))
// wait for test to finish
close(stWaitCh)
stWg.Wait()
// test queue length
c = TotalScheduledTasks()
if c != 0 {
t.Errorf("Error in calculating Task Queue, expected 0, got %d", c)
}
// collect output
close(stOutputChannel)
completeOutput := ""
for s := <-stOutputChannel; s != ""; s = <-stOutputChannel {
completeOutput += s
}
// check if test succeeded
if completeOutput != expectedOutput {
t.Errorf("ScheduledTask test failed, expected sequence %s, got %s", expectedOutput, completeOutput)
}
}

44
utils/slices.go Normal file
View file

@ -0,0 +1,44 @@
package utils
func StringInSlice(s string, a []string) bool {
for _, entry := range a {
if entry == s {
return true
}
}
return false
}
func RemoveFromStringSlice(a []string, s string) []string {
for key, entry := range a {
if entry == s {
a = append(a[:key], a[key+1:]...)
return a
}
}
return a
}
func DuplicateStrings(a []string) []string {
b := make([]string, len(a))
copy(b, a)
return b
}
func StringSliceEqual(a []string, b []string) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}
func DuplicateBytes(a []byte) []byte {
b := make([]byte, len(a))
copy(b, a)
return b
}

80
utils/slices_test.go Normal file
View file

@ -0,0 +1,80 @@
package utils
import (
"bytes"
"testing"
)
var (
stringTestSlice = []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j"}
stringTestSlice2 = []string{"a", "x", "x", "x", "x", "x", "x", "x", "x", "j"}
stringTestSlice3 = []string{"a", "x"}
byteTestSlice = []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
)
func TestStringInSlice(t *testing.T) {
if !StringInSlice("a", stringTestSlice) {
t.Fatal("string reported not in slice (1), but it is")
}
if !StringInSlice("d", stringTestSlice) {
t.Fatal("string reported not in slice (2), but it is")
}
if !StringInSlice("j", stringTestSlice) {
t.Fatal("string reported not in slice (3), but it is")
}
if StringInSlice("0", stringTestSlice) {
t.Fatal("string reported in slice (1), but is not")
}
if StringInSlice("x", stringTestSlice) {
t.Fatal("string reported in slice (2), but is not")
}
if StringInSlice("k", stringTestSlice) {
t.Fatal("string reported in slice (3), but is not")
}
}
func TestRemoveFromStringSlice(t *testing.T) {
test1 := DuplicateStrings(stringTestSlice)
test1 = RemoveFromStringSlice(test1, "b")
if StringInSlice("b", test1) {
t.Fatal("string reported in slice, but was removed")
}
if len(test1) != len(stringTestSlice)-1 {
t.Fatalf("new string slice length not as expected: is %d, should be %d\nnew slice is %v", len(test1), len(stringTestSlice)-1, test1)
}
}
func TestDuplicateStrings(t *testing.T) {
a := DuplicateStrings(stringTestSlice)
if !StringSliceEqual(a, stringTestSlice) {
t.Fatal("copied string slice is not equal")
}
a[0] = "x"
if StringSliceEqual(a, stringTestSlice) {
t.Fatal("copied string slice is not a real copy")
}
}
func TestStringSliceEqual(t *testing.T) {
if !StringSliceEqual(stringTestSlice, stringTestSlice) {
t.Fatal("strings are equal, but are reported as not")
}
if StringSliceEqual(stringTestSlice, stringTestSlice2) {
t.Fatal("strings are not equal (1), but are reported as equal")
}
if StringSliceEqual(stringTestSlice, stringTestSlice3) {
t.Fatal("strings are not equal (1), but are reported as equal")
}
}
func TestDuplicateBytes(t *testing.T) {
a := DuplicateBytes(byteTestSlice)
if !bytes.Equal(a, byteTestSlice) {
t.Fatal("copied bytes slice is not equal")
}
a[0] = 0xff
if bytes.Equal(a, byteTestSlice) {
t.Fatal("copied bytes slice is not a real copy")
}
}