Merge pull request #35 from safing/feature/pre-alpha-finalizing

Pre alpha finalizing
This commit is contained in:
Daniel 2020-05-01 22:26:30 +02:00 committed by GitHub
commit d1236bbe34
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
17 changed files with 715 additions and 372 deletions

View file

@ -7,6 +7,11 @@ import (
"github.com/safing/portbase/log"
)
// Config Keys
const (
CfgDefaultListenAddressKey = "api/listenAddress"
)
var (
listenAddressFlag string
listenAddressConfig config.StringOption
@ -35,8 +40,9 @@ func getDefaultListenAddress() string {
func registerConfig() error {
err := config.Register(&config.Option{
Name: "API Address",
Key: "api/listenAddress",
Key: CfgDefaultListenAddressKey,
Description: "Define on which IP and port the API should listen on.",
Order: 128,
OptType: config.OptTypeString,
ExpertiseLevel: config.ExpertiseLevelDeveloper,
ReleaseLevel: config.ReleaseLevelStable,

View file

@ -2,8 +2,8 @@ package config
import (
"encoding/json"
"fmt"
"io/ioutil"
"path"
"strings"
"github.com/safing/portbase/log"
@ -72,70 +72,131 @@ func JSONToMap(jsonData []byte) (map[string]interface{}, error) {
return nil, err
}
flatten(loaded, loaded, "")
return loaded, nil
return Flatten(loaded), nil
}
func flatten(rootMap, subMap map[string]interface{}, subKey string) {
// Flatten returns a flattened copy of the given hierarchical config.
func Flatten(config map[string]interface{}) (flattenedConfig map[string]interface{}) {
flattenedConfig = make(map[string]interface{})
flattenMap(flattenedConfig, config, "")
return flattenedConfig
}
func flattenMap(rootMap, subMap map[string]interface{}, subKey string) {
for key, entry := range subMap {
// get next level key
subbedKey := key
if subKey != "" {
subbedKey = fmt.Sprintf("%s/%s", subKey, key)
}
subbedKey := path.Join(subKey, key)
// check for next subMap
nextSub, ok := entry.(map[string]interface{})
if ok {
flatten(rootMap, nextSub, subbedKey)
delete(rootMap, key)
} else if subKey != "" {
flattenMap(rootMap, nextSub, subbedKey)
} else {
// only set if not on root level
rootMap[subbedKey] = entry
}
}
}
// MapToJSON expands a flattened map and returns it as json. The map is altered in the process.
func MapToJSON(values map[string]interface{}) ([]byte, error) {
expand(values)
return json.MarshalIndent(values, "", " ")
// MapToJSON expands a flattened map and returns it as json.
func MapToJSON(config map[string]interface{}) ([]byte, error) {
return json.MarshalIndent(Expand(config), "", " ")
}
// expand expands a flattened map.
func expand(mapData map[string]interface{}) {
var newMaps []map[string]interface{}
for key, entry := range mapData {
// Expand returns a hierarchical copy of the given flattened config.
func Expand(flattenedConfig map[string]interface{}) (config map[string]interface{}) {
config = make(map[string]interface{})
for key, entry := range flattenedConfig {
PutValueIntoHierarchicalConfig(config, key, entry)
}
return config
}
// PutValueIntoHierarchicalConfig injects a configuration entry into an hierarchical config map. Conflicting entries will be replaced.
func PutValueIntoHierarchicalConfig(config map[string]interface{}, key string, value interface{}) {
parts := strings.Split(key, "/")
// create/check maps for all parts except the last one
subMap := config
for i, part := range parts {
if i == len(parts)-1 {
// do not process the last part,
// which is not a map, but the value key itself
break
}
var nextSubMap map[string]interface{}
// get value
value, ok := subMap[part]
if !ok {
// create new map and assign it
nextSubMap = make(map[string]interface{})
subMap[part] = nextSubMap
} else {
nextSubMap, ok = value.(map[string]interface{})
if !ok {
// create new map and assign it
nextSubMap = make(map[string]interface{})
subMap[part] = nextSubMap
}
}
// assign for next parts loop
subMap = nextSubMap
}
// assign value to last submap
subMap[parts[len(parts)-1]] = value
}
// CleanFlattenedConfig removes all inexistent configuration options from the given flattened config map.
func CleanFlattenedConfig(flattenedConfig map[string]interface{}) {
optionsLock.RLock()
defer optionsLock.RUnlock()
for key := range flattenedConfig {
_, ok := options[key]
if !ok {
delete(flattenedConfig, key)
}
}
}
// CleanHierarchicalConfig removes all inexistent configuration options from the given hierarchical config map.
func CleanHierarchicalConfig(config map[string]interface{}) {
optionsLock.RLock()
defer optionsLock.RUnlock()
cleanSubMap(config, "")
}
func cleanSubMap(subMap map[string]interface{}, subKey string) (empty bool) {
var foundValid int
for key, value := range subMap {
value, ok := value.(map[string]interface{})
if ok {
// we found another section
isEmpty := cleanSubMap(value, path.Join(subKey, key))
if isEmpty {
delete(subMap, key)
} else {
foundValid++
}
continue
}
// we found an option value
if strings.Contains(key, "/") {
parts := strings.SplitN(key, "/", 2)
if len(parts) == 2 {
// get subMap
var subMap map[string]interface{}
v, ok := mapData[parts[0]]
if ok {
subMap, ok = v.(map[string]interface{})
if !ok {
subMap = make(map[string]interface{})
newMaps = append(newMaps, subMap)
mapData[parts[0]] = subMap
}
} else {
subMap = make(map[string]interface{})
newMaps = append(newMaps, subMap)
mapData[parts[0]] = subMap
}
// set entry
subMap[parts[1]] = entry
// delete entry from
delete(mapData, key)
delete(subMap, key)
} else {
_, ok := options[path.Join(subKey, key)]
if ok {
foundValid++
} else {
delete(subMap, key)
}
}
}
for _, entry := range newMaps {
expand(entry)
}
return foundValid == 0
}

View file

@ -2,12 +2,12 @@ package config
import (
"bytes"
"encoding/json"
"testing"
)
func TestJSONMapConversion(t *testing.T) {
jsonData := `{
var (
jsonData = `{
"a": "b",
"c": {
"d": "e",
@ -22,9 +22,9 @@ func TestJSONMapConversion(t *testing.T) {
},
"p": "q"
}`
jsonBytes := []byte(jsonData)
jsonBytes = []byte(jsonData)
mapData := map[string]interface{}{
mapData = map[string]interface{}{
"a": "b",
"p": "q",
"c/d": "e",
@ -33,32 +33,62 @@ func TestJSONMapConversion(t *testing.T) {
"c/h/k": "l",
"c/h/m/n": "o",
}
)
m, err := JSONToMap(jsonBytes)
if err != nil {
t.Fatal(err)
}
func TestJSONMapConversion(t *testing.T) {
// convert to json
j, err := MapToJSON(mapData)
if err != nil {
t.Fatal(err)
}
// check if to json matches
if !bytes.Equal(jsonBytes, j) {
t.Errorf("json does not match, got %s", j)
}
// convert to map
m, err := JSONToMap(jsonBytes)
if err != nil {
t.Fatal(err)
}
// and back
j2, err := MapToJSON(m)
if err != nil {
t.Fatal(err)
}
// check if double convert matches
if !bytes.Equal(jsonBytes, j2) {
t.Errorf("json does not match, got %s", j)
}
// fails for some reason
// if !reflect.DeepEqual(mapData, m) {
// t.Errorf("maps do not match, got %s", m)
// }
}
func TestConfigCleaning(t *testing.T) {
// load
configFlat, err := JSONToMap(jsonBytes)
if err != nil {
t.Fatal(err)
}
// clean everything
CleanFlattenedConfig(configFlat)
if len(configFlat) != 0 {
t.Errorf("should be empty: %+v", configFlat)
}
// load manuall for hierarchical config
configHier := make(map[string]interface{})
err = json.Unmarshal(jsonBytes, &configHier)
if err != nil {
t.Fatal(err)
}
// clean everything
CleanHierarchicalConfig(configHier)
if len(configHier) != 0 {
t.Errorf("should be empty: %+v", configHier)
}
}

View file

@ -19,7 +19,7 @@ type perspectiveOption struct {
// NewPerspective parses the given config and returns it as a new perspective.
func NewPerspective(config map[string]interface{}) (*Perspective, error) {
// flatten config structure
flatten(config, config, "")
config = Flatten(config)
perspective := &Perspective{
config: make(map[string]*perspectiveOption),

View file

@ -67,7 +67,7 @@ func InjectDatabase(name string, storageInt storage.Interface) (*Controller, err
_, ok := controllers[name]
if ok {
return nil, errors.New(`database "%s" already loaded`)
return nil, fmt.Errorf(`database "%s" already loaded`, name)
}
registryLock.Lock()

View file

@ -1,6 +1,7 @@
package database
import (
"context"
"fmt"
"io/ioutil"
"log"
@ -162,7 +163,7 @@ func TestDatabaseSystem(t *testing.T) {
testDatabase(t, "fstree")
testDatabase(t, "hashmap")
err = MaintainRecordStates()
err = MaintainRecordStates(context.TODO())
if err != nil {
t.Fatal(err)
}

View file

@ -5,6 +5,7 @@ import (
"time"
"github.com/safing/portbase/database"
"github.com/safing/portbase/log"
"github.com/safing/portbase/modules"
)
@ -15,13 +16,16 @@ func startMaintenanceTasks() {
}
func maintainBasic(ctx context.Context, task *modules.Task) error {
log.Infof("database: running Maintain")
return database.Maintain()
}
func maintainThorough(ctx context.Context, task *modules.Task) error {
log.Infof("database: running MaintainThorough")
return database.MaintainThorough()
}
func maintainRecords(ctx context.Context, task *modules.Task) error {
return database.MaintainRecordStates()
log.Infof("database: running MaintainRecordStates")
return database.MaintainRecordStates(ctx)
}

View file

@ -1,16 +1,21 @@
package database
import (
"context"
"time"
"github.com/tevino/abool"
"github.com/safing/portbase/database/query"
"github.com/safing/portbase/database/record"
)
// Maintain runs the Maintain method on all storages.
func Maintain() (err error) {
controllers := duplicateControllers()
for _, c := range controllers {
// copy, as we might use the very long
all := duplicateControllers()
for _, c := range all {
err = c.Maintain()
if err != nil {
return
@ -21,7 +26,9 @@ func Maintain() (err error) {
// MaintainThorough runs the MaintainThorough method on all storages.
func MaintainThorough() (err error) {
// copy, as we might use the very long
all := duplicateControllers()
for _, c := range all {
err = c.MaintainThorough()
if err != nil {
@ -32,12 +39,32 @@ func MaintainThorough() (err error) {
}
// MaintainRecordStates runs record state lifecycle maintenance on all storages.
func MaintainRecordStates() error {
func MaintainRecordStates(ctx context.Context) error { //nolint:gocognit
// TODO: Put this in the storage interface to correctly maintain on all storages.
// Storages might check for deletion and expiry in the query interface and not return anything here.
// listen for ctx cancel
stop := abool.New()
doneCh := make(chan struct{}) // for goroutine cleanup
defer close(doneCh)
go func() {
select {
case <-ctx.Done():
case <-doneCh:
}
stop.Set()
}()
// copy, as we might use the very long
all := duplicateControllers()
now := time.Now().Unix()
thirtyDaysAgo := time.Now().Add(-30 * 24 * time.Hour).Unix()
for _, c := range all {
if stop.IsSet() {
return nil
}
if c.ReadOnly() || c.Injected() {
continue
@ -56,30 +83,52 @@ func MaintainRecordStates() error {
var toDelete []record.Record
var toExpire []record.Record
for r := range it.Next {
switch {
case r.Meta().Deleted < thirtyDaysAgo:
toDelete = append(toDelete, r)
case r.Meta().Expires < now:
toExpire = append(toExpire, r)
queryLoop:
for {
select {
case r := <-it.Next:
if r == nil {
break queryLoop
}
meta := r.Meta()
switch {
case meta.Deleted > 0 && meta.Deleted < thirtyDaysAgo:
toDelete = append(toDelete, r)
case meta.Expires > 0 && meta.Expires < now:
toExpire = append(toExpire, r)
}
case <-ctx.Done():
it.Cancel()
break queryLoop
}
}
if it.Err() != nil {
return err
}
if stop.IsSet() {
return nil
}
for _, r := range toDelete {
err := c.storage.Delete(r.DatabaseKey())
if err != nil {
return err
}
if stop.IsSet() {
return nil
}
}
for _, r := range toExpire {
r.Meta().Delete()
err := c.Put(r)
if err != nil {
return err
}
if stop.IsSet() {
return nil
}
}
}
@ -87,9 +136,10 @@ func MaintainRecordStates() error {
}
func duplicateControllers() (all []*Controller) {
controllersLock.Lock()
defer controllersLock.Unlock()
controllersLock.RLock()
defer controllersLock.RUnlock()
all = make([]*Controller, 0, len(controllers))
for _, c := range controllers {
all = append(all, c)
}

View file

@ -2,6 +2,7 @@ package modules
import (
"context"
"errors"
"fmt"
"github.com/safing/portbase/log"
@ -39,6 +40,38 @@ func (m *Module) processEventTrigger(event string, data interface{}) {
}
}
// InjectEvent triggers an event from a foreign module and executes all hook functions registered to that event.
func (m *Module) InjectEvent(sourceEventName, targetModuleName, targetEventName string, data interface{}) error {
if !m.OnlineSoon() {
return errors.New("module not yet started")
}
if !modulesLocked.IsSet() {
return errors.New("module system not yet started")
}
targetModule, ok := modules[targetModuleName]
if !ok {
return fmt.Errorf(`module "%s" does not exist`, targetModuleName)
}
targetModule.eventHooksLock.RLock()
defer targetModule.eventHooksLock.RUnlock()
targetHooks, ok := targetModule.eventHooks[targetEventName]
if !ok {
return fmt.Errorf(`module "%s" has no event named "%s"`, targetModuleName, targetEventName)
}
for _, hook := range targetHooks {
if hook.hookingModule.OnlineSoon() {
go m.runEventHook(hook, sourceEventName, data)
}
}
return nil
}
func (m *Module) runEventHook(hook *eventHook, event string, data interface{}) {
// check if source module is ready for handling
if m.Status() != StatusOnline {

View file

@ -18,12 +18,12 @@ type Task struct {
module *Module
taskFn func(context.Context, *Task) error
queued bool
canceled bool
executing bool
overtime bool // locked by scheduleLock
// these are populated at task creation
// ctx is canceled when task is shutdown -> all tasks become canceled
// ctx is canceled when module is shutdown -> all tasks become canceled
ctx context.Context
cancelCtx func()
@ -110,10 +110,9 @@ func (t *Task) prepForQueueing() (ok bool) {
return false
}
t.queued = true
if t.maxDelay != 0 {
t.executeAt = time.Now().Add(t.maxDelay)
t.addToSchedule()
t.addToSchedule(true)
}
return true
@ -129,11 +128,11 @@ func notifyQueue() {
// Queue queues the Task for execution.
func (t *Task) Queue() *Task {
t.lock.Lock()
defer t.lock.Unlock()
if !t.prepForQueueing() {
t.lock.Unlock()
return t
}
t.lock.Unlock()
if t.queueElement == nil {
queuesLock.Lock()
@ -148,11 +147,11 @@ func (t *Task) Queue() *Task {
// Prioritize puts the task in the prioritized queue.
func (t *Task) Prioritize() *Task {
t.lock.Lock()
defer t.lock.Unlock()
if !t.prepForQueueing() {
t.lock.Unlock()
return t
}
t.lock.Unlock()
if t.prioritizedQueueElement == nil {
queuesLock.Lock()
@ -167,11 +166,11 @@ func (t *Task) Prioritize() *Task {
// StartASAP schedules the task to be executed next.
func (t *Task) StartASAP() *Task {
t.lock.Lock()
defer t.lock.Unlock()
if !t.prepForQueueing() {
t.lock.Unlock()
return t
}
t.lock.Unlock()
queuesLock.Lock()
if t.prioritizedQueueElement == nil {
@ -188,17 +187,19 @@ func (t *Task) StartASAP() *Task {
// MaxDelay sets a maximum delay within the task should be executed from being queued. Scheduled tasks are queued when they are triggered. The default delay is 3 minutes.
func (t *Task) MaxDelay(maxDelay time.Duration) *Task {
t.lock.Lock()
defer t.lock.Unlock()
t.maxDelay = maxDelay
t.lock.Unlock()
return t
}
// Schedule schedules the task for execution at the given time.
func (t *Task) Schedule(executeAt time.Time) *Task {
t.lock.Lock()
defer t.lock.Unlock()
t.executeAt = executeAt
t.addToSchedule()
t.lock.Unlock()
t.addToSchedule(false)
return t
}
@ -210,22 +211,23 @@ func (t *Task) Repeat(interval time.Duration) *Task {
}
t.lock.Lock()
defer t.lock.Unlock()
t.repeat = interval
t.executeAt = time.Now().Add(t.repeat)
t.addToSchedule()
t.lock.Unlock()
t.addToSchedule(false)
return t
}
// Cancel cancels the current and any future execution of the Task. This is not reversible by any other functions.
func (t *Task) Cancel() {
t.lock.Lock()
defer t.lock.Unlock()
t.canceled = true
if t.cancelCtx != nil {
t.cancelCtx()
}
t.lock.Unlock()
}
func (t *Task) removeFromQueues() {
@ -234,31 +236,29 @@ func (t *Task) removeFromQueues() {
queuesLock.Lock()
taskQueue.Remove(t.queueElement)
queuesLock.Unlock()
t.lock.Lock()
t.queueElement = nil
t.lock.Unlock()
}
if t.prioritizedQueueElement != nil {
queuesLock.Lock()
prioritizedTaskQueue.Remove(t.prioritizedQueueElement)
queuesLock.Unlock()
t.lock.Lock()
t.prioritizedQueueElement = nil
t.lock.Unlock()
}
if t.scheduleListElement != nil {
scheduleLock.Lock()
taskSchedule.Remove(t.scheduleListElement)
t.overtime = false
scheduleLock.Unlock()
t.lock.Lock()
t.scheduleListElement = nil
t.lock.Unlock()
}
}
func (t *Task) runWithLocking() {
t.lock.Lock()
// we will not attempt execution, remove from queues
t.removeFromQueues()
// check if task is already executing
if t.executing {
t.lock.Unlock()
@ -266,21 +266,22 @@ func (t *Task) runWithLocking() {
}
// check if task is active
// - has not been cancelled
// - module is online (soon)
if !t.isActive() {
t.removeFromQueues()
t.lock.Unlock()
return
}
// check if module was stopped
select {
case <-t.ctx.Done(): // check if module is stopped
t.removeFromQueues()
case <-t.ctx.Done():
t.lock.Unlock()
return
default:
}
// enter executing state
t.executing = true
t.lock.Unlock()
@ -296,8 +297,9 @@ func (t *Task) runWithLocking() {
// wait
<-t.module.StartCompleted()
} else {
// abort, module will not come online
t.lock.Lock()
t.removeFromQueues()
t.executing = false
t.lock.Unlock()
return
}
@ -336,22 +338,23 @@ func (t *Task) executeWithLocking() {
atomic.AddInt32(t.module.taskCnt, -1)
t.module.waitGroup.Done()
// reset
t.lock.Lock()
// reset state
t.executing = false
t.queued = false
// repeat?
if t.isActive() && t.repeat != 0 {
t.executeAt = time.Now().Add(t.repeat)
t.addToSchedule()
t.addToSchedule(false)
}
t.lock.Unlock()
// notify that we finished
if t.cancelCtx != nil {
t.cancelCtx()
}
t.cancelCtx()
// refresh context
t.ctx, t.cancelCtx = context.WithCancel(t.module.Ctx)
t.lock.Unlock()
}()
// run
@ -361,13 +364,7 @@ func (t *Task) executeWithLocking() {
}
}
func (t *Task) getExecuteAtWithLocking() time.Time {
t.lock.Lock()
defer t.lock.Unlock()
return t.executeAt
}
func (t *Task) addToSchedule() {
func (t *Task) addToSchedule(overtime bool) {
if !t.isActive() {
return
}
@ -376,6 +373,11 @@ func (t *Task) addToSchedule() {
defer scheduleLock.Unlock()
// defer printTaskList(taskSchedule) // for debugging
if overtime {
// do not set to false
t.overtime = true
}
// notify scheduler
defer func() {
select {
@ -392,7 +394,7 @@ func (t *Task) addToSchedule() {
continue
}
// compare
if t.executeAt.Before(eVal.getExecuteAtWithLocking()) {
if t.executeAt.Before(eVal.executeAt) {
// insert/move task
if t.scheduleListElement == nil {
t.scheduleListElement = taskSchedule.InsertBefore(t, e)
@ -489,18 +491,28 @@ func taskScheduleHandler() {
return
case <-recalculateNextScheduledTask:
case <-waitUntilNextScheduledTask():
// get first task in schedule
scheduleLock.Lock()
// get first task in schedule
e := taskSchedule.Front()
scheduleLock.Unlock()
if e == nil {
scheduleLock.Unlock()
continue
}
t := e.Value.(*Task)
// process Task
if t.queued {
if t.overtime {
// already queued and maxDelay reached
t.overtime = false
scheduleLock.Unlock()
t.runWithLocking()
} else {
// place in front of prioritized queue
t.overtime = true
scheduleLock.Unlock()
t.StartASAP()
}
}
@ -513,10 +525,10 @@ func printTaskList(*list.List) { //nolint:unused,deadcode // for debugging, NOT
t, ok := e.Value.(*Task)
if ok {
fmt.Printf(
"%s:%s qu=%v ca=%v exec=%v at=%s rep=%s delay=%s\n",
"%s:%s over=%v canc=%v exec=%v exat=%s rep=%s delay=%s\n",
t.module.Name,
t.name,
t.queued,
t.overtime,
t.canceled,
t.executing,
t.executeAt,

View file

@ -168,3 +168,84 @@ func TestScheduledTaskWaiting(t *testing.T) {
}
}
func TestRequeueingTask(t *testing.T) {
blockWg := &sync.WaitGroup{}
wg := &sync.WaitGroup{}
// block task execution
blockWg.Add(1) // mark done at beginning
wg.Add(2) // mark done at end
block := qtModule.NewTask("TestRequeueingTask:block", func(ctx context.Context, t *Task) error {
blockWg.Done()
time.Sleep(100 * time.Millisecond)
wg.Done()
return nil
}).StartASAP()
// make sure first task has started
blockWg.Wait()
// fmt.Printf("%s: %+v\n", time.Now(), block)
// schedule again while executing
blockWg.Add(1) // mark done at beginning
block.StartASAP()
// fmt.Printf("%s: %+v\n", time.Now(), block)
// test task
wg.Add(1)
task := qtModule.NewTask("TestRequeueingTask:test", func(ctx context.Context, t *Task) error {
wg.Done()
return nil
}).Schedule(time.Now().Add(2 * time.Second))
// reschedule
task.Schedule(time.Now().Add(1 * time.Second))
task.Queue()
task.Prioritize()
task.StartASAP()
wg.Wait()
time.Sleep(100 * time.Millisecond) // let tasks finalize execution
// do it again
// block task execution (while first block task is still running!)
blockWg.Add(1) // mark done at beginning
wg.Add(1) // mark done at end
block.StartASAP()
blockWg.Wait()
// reschedule
wg.Add(1)
task.Schedule(time.Now().Add(1 * time.Second))
task.Queue()
task.Prioritize()
task.StartASAP()
wg.Wait()
}
func TestQueueSuccession(t *testing.T) {
var cnt int
wg := &sync.WaitGroup{}
wg.Add(10)
tt := qtModule.NewTask("TestRequeueingTask:test", func(ctx context.Context, task *Task) error {
time.Sleep(10 * time.Millisecond)
wg.Done()
cnt++
fmt.Printf("completed succession %d\n", cnt)
switch cnt {
case 1, 4, 6:
task.Queue()
case 2, 5, 8:
task.StartASAP()
case 3, 7, 9:
task.Schedule(time.Now().Add(10 * time.Millisecond))
}
return nil
})
// fmt.Printf("%+v\n", tt)
tt.StartASAP()
// time.Sleep(100 * time.Millisecond)
// fmt.Printf("%+v\n", tt)
wg.Wait()
}

View file

@ -3,6 +3,7 @@ package rng
import (
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"errors"
"fmt"
"sync"
@ -47,6 +48,15 @@ func start() error {
if rng == nil {
return errors.New("failed to initialize rng")
}
// explicitly add randomness
osEntropy := make([]byte, minFeedEntropy/8)
_, err := rand.Read(osEntropy)
if err != nil {
return fmt.Errorf("could not read entropy from os: %s", err)
}
rng.Reseed(osEntropy)
rngReady = true
// random source: OS

1
rng/test/.gitignore vendored
View file

@ -1,3 +1,4 @@
test
*.bin
*.out
*.txt

View file

@ -10,11 +10,9 @@ To test the quality of entropy, first generate random data with the test program
go build
./test tickfeeder > output.bin # just the additional entropy feed
./test tickfeeder tickfeeder.out 1 # just the additional entropy feed
# OR
./test fortuna > output.bin # the actual CSPRNG
ls -lah output.bin # check filesize: should be ~1MB
./test fortuna fortuna.out 10 # the actual CSPRNG with feeders
Then, run `dieharder`, a random number generator test tool:
@ -22,6 +20,135 @@ Then, run `dieharder`, a random number generator test tool:
Below you can find two test outputs of `dieharder`.
Please note that around 5 tests of `dieharder` normally fail. This is expected and even desired.
Also, the rng currently reseeds (ie. adds entropy) after 1MB or 10 minutes.
`dieharder` of two samples of 10MB of fortuna (with feeders) (`go version go1.14.2 linux/amd64` on 21.04.2020):
#=============================================================================#
# dieharder version 3.31.1 Copyright 2003 Robert G. Brown #
#=============================================================================#
rng_name | filename |rands/second|
mt19937| fortuna.out| 1.00e+08 |
#=============================================================================#
test_name |ntup| tsamples |psamples| p-value |Assessment
#=============================================================================#
diehard_birthdays| 0| 100| 100|0.69048981| PASSED | 2nd sample: PASSED
diehard_operm5| 0| 1000000| 100|0.76010702| PASSED | 2nd sample: PASSED
diehard_rank_32x32| 0| 40000| 100|0.86291558| PASSED | 2nd sample: PASSED
diehard_rank_6x8| 0| 100000| 100|0.63715647| PASSED | 2nd sample: PASSED
diehard_bitstream| 0| 2097152| 100|0.25389670| PASSED | 2nd sample: PASSED
diehard_opso| 0| 2097152| 100|0.70928590| PASSED | 2nd sample: PASSED
diehard_oqso| 0| 2097152| 100|0.75643141| PASSED | 2nd sample: PASSED
diehard_dna| 0| 2097152| 100|0.57096286| PASSED | 2nd sample: PASSED
diehard_count_1s_str| 0| 256000| 100|0.39650366| PASSED | 2nd sample: PASSED
diehard_count_1s_byt| 0| 256000| 100|0.26040557| PASSED | 2nd sample: PASSED
diehard_parking_lot| 0| 12000| 100|0.92327672| PASSED | 2nd sample: PASSED
diehard_2dsphere| 2| 8000| 100|0.86507605| PASSED | 2nd sample: PASSED
diehard_3dsphere| 3| 4000| 100|0.70845388| PASSED | 2nd sample: PASSED
diehard_squeeze| 0| 100000| 100|0.99744782| WEAK | 2nd sample: PASSED
diehard_sums| 0| 100| 100|0.27275938| PASSED | 2nd sample: PASSED
diehard_runs| 0| 100000| 100|0.27299936| PASSED | 2nd sample: PASSED
diehard_runs| 0| 100000| 100|0.42043270| PASSED | 2nd sample: PASSED
diehard_craps| 0| 200000| 100|0.91674884| PASSED | 2nd sample: PASSED
diehard_craps| 0| 200000| 100|0.77856237| PASSED | 2nd sample: PASSED
marsaglia_tsang_gcd| 0| 10000000| 100|0.77922797| PASSED | 2nd sample: PASSED
marsaglia_tsang_gcd| 0| 10000000| 100|0.94589532| PASSED | 2nd sample: PASSED
sts_monobit| 1| 100000| 100|0.99484549| PASSED | 2nd sample: PASSED
sts_runs| 2| 100000| 100|0.70036713| PASSED | 2nd sample: PASSED
sts_serial| 1| 100000| 100|0.79544015| PASSED | 2nd sample: PASSED
sts_serial| 2| 100000| 100|0.91473958| PASSED | 2nd sample: PASSED
sts_serial| 3| 100000| 100|0.66528037| PASSED | 2nd sample: PASSED
sts_serial| 3| 100000| 100|0.84028312| PASSED | 2nd sample: PASSED
sts_serial| 4| 100000| 100|0.82253130| PASSED | 2nd sample: PASSED
sts_serial| 4| 100000| 100|0.90695315| PASSED | 2nd sample: PASSED
sts_serial| 5| 100000| 100|0.55160515| PASSED | 2nd sample: PASSED
sts_serial| 5| 100000| 100|0.05256789| PASSED | 2nd sample: PASSED
sts_serial| 6| 100000| 100|0.25857850| PASSED | 2nd sample: PASSED
sts_serial| 6| 100000| 100|0.58661649| PASSED | 2nd sample: PASSED
sts_serial| 7| 100000| 100|0.46915559| PASSED | 2nd sample: PASSED
sts_serial| 7| 100000| 100|0.57273130| PASSED | 2nd sample: PASSED
sts_serial| 8| 100000| 100|0.99182961| PASSED | 2nd sample: PASSED
sts_serial| 8| 100000| 100|0.86913367| PASSED | 2nd sample: PASSED
sts_serial| 9| 100000| 100|0.19259756| PASSED | 2nd sample: PASSED
sts_serial| 9| 100000| 100|0.61225842| PASSED | 2nd sample: PASSED
sts_serial| 10| 100000| 100|0.40792308| PASSED | 2nd sample: PASSED
sts_serial| 10| 100000| 100|0.99930785| WEAK | 2nd sample: PASSED
sts_serial| 11| 100000| 100|0.07296973| PASSED | 2nd sample: PASSED
sts_serial| 11| 100000| 100|0.04906522| PASSED | 2nd sample: PASSED
sts_serial| 12| 100000| 100|0.66400927| PASSED | 2nd sample: PASSED
sts_serial| 12| 100000| 100|0.67947609| PASSED | 2nd sample: PASSED
sts_serial| 13| 100000| 100|0.20412325| PASSED | 2nd sample: PASSED
sts_serial| 13| 100000| 100|0.19781734| PASSED | 2nd sample: PASSED
sts_serial| 14| 100000| 100|0.08541533| PASSED | 2nd sample: PASSED
sts_serial| 14| 100000| 100|0.07438464| PASSED | 2nd sample: PASSED
sts_serial| 15| 100000| 100|0.04607276| PASSED | 2nd sample: PASSED
sts_serial| 15| 100000| 100|0.56460340| PASSED | 2nd sample: PASSED
sts_serial| 16| 100000| 100|0.40211405| PASSED | 2nd sample: PASSED
sts_serial| 16| 100000| 100|0.81369172| PASSED | 2nd sample: PASSED
rgb_bitdist| 1| 100000| 100|0.52317549| PASSED | 2nd sample: PASSED
rgb_bitdist| 2| 100000| 100|0.49819655| PASSED | 2nd sample: PASSED
rgb_bitdist| 3| 100000| 100|0.65830167| PASSED | 2nd sample: PASSED
rgb_bitdist| 4| 100000| 100|0.75278398| PASSED | 2nd sample: PASSED
rgb_bitdist| 5| 100000| 100|0.23537303| PASSED | 2nd sample: PASSED
rgb_bitdist| 6| 100000| 100|0.82461608| PASSED | 2nd sample: PASSED
rgb_bitdist| 7| 100000| 100|0.46944789| PASSED | 2nd sample: PASSED
rgb_bitdist| 8| 100000| 100|0.44371293| PASSED | 2nd sample: PASSED
rgb_bitdist| 9| 100000| 100|0.61647469| PASSED | 2nd sample: PASSED
rgb_bitdist| 10| 100000| 100|0.97623808| PASSED | 2nd sample: PASSED
rgb_bitdist| 11| 100000| 100|0.26037998| PASSED | 2nd sample: PASSED
rgb_bitdist| 12| 100000| 100|0.59217788| PASSED | 2nd sample: PASSED
rgb_minimum_distance| 2| 10000| 1000|0.19809129| PASSED | 2nd sample: PASSED
rgb_minimum_distance| 3| 10000| 1000|0.97363365| PASSED | 2nd sample: PASSED
rgb_minimum_distance| 4| 10000| 1000|0.62281709| PASSED | 2nd sample: PASSED
rgb_minimum_distance| 5| 10000| 1000|0.13655852| PASSED | 2nd sample: PASSED
rgb_permutations| 2| 100000| 100|0.33726465| PASSED | 2nd sample: PASSED
rgb_permutations| 3| 100000| 100|0.21992025| PASSED | 2nd sample: WEAK
rgb_permutations| 4| 100000| 100|0.27074573| PASSED | 2nd sample: PASSED
rgb_permutations| 5| 100000| 100|0.76925248| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 0| 1000000| 100|0.91881971| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 1| 1000000| 100|0.08282106| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 2| 1000000| 100|0.55991289| PASSED | 2nd sample: WEAK
rgb_lagged_sum| 3| 1000000| 100|0.94939920| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 4| 1000000| 100|0.21248759| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 5| 1000000| 100|0.99308883| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 6| 1000000| 100|0.83174944| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 7| 1000000| 100|0.49883983| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 8| 1000000| 100|0.99900807| WEAK | 2nd sample: PASSED
rgb_lagged_sum| 9| 1000000| 100|0.74164128| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 10| 1000000| 100|0.53367081| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 11| 1000000| 100|0.41808417| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 12| 1000000| 100|0.96082733| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 13| 1000000| 100|0.38208924| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 14| 1000000| 100|0.98335747| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 15| 1000000| 100|0.68708033| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 16| 1000000| 100|0.49715110| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 17| 1000000| 100|0.68418225| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 18| 1000000| 100|0.97255087| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 19| 1000000| 100|0.99556843| WEAK | 2nd sample: PASSED
rgb_lagged_sum| 20| 1000000| 100|0.50758123| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 21| 1000000| 100|0.98435826| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 22| 1000000| 100|0.15752743| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 23| 1000000| 100|0.98607886| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 24| 1000000| 100|0.86645723| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 25| 1000000| 100|0.87384758| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 26| 1000000| 100|0.98680940| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 27| 1000000| 100|0.56386729| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 28| 1000000| 100|0.16874165| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 29| 1000000| 100|0.10369211| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 30| 1000000| 100|0.91356341| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 31| 1000000| 100|0.42526940| PASSED | 2nd sample: PASSED
rgb_lagged_sum| 32| 1000000| 100|0.99939460| WEAK | 2nd sample: PASSED
rgb_kstest_test| 0| 10000| 1000|0.11414525| PASSED | 2nd sample: PASSED
dab_bytedistrib| 0| 51200000| 1|0.27693890| PASSED | 2nd sample: PASSED
dab_dct| 256| 50000| 1|0.15807123| PASSED | 2nd sample: PASSED
Preparing to run test 207. ntuple = 0
dab_filltree| 32| 15000000| 1|0.33275771| PASSED | 2nd sample: PASSED
dab_filltree| 32| 15000000| 1|0.15704033| PASSED | 2nd sample: PASSED
Preparing to run test 208. ntuple = 0
dab_filltree2| 0| 5000000| 1|0.85562670| PASSED | 2nd sample: PASSED
dab_filltree2| 1| 5000000| 1|0.35187836| PASSED | 2nd sample: PASSED
Preparing to run test 209. ntuple = 0
dab_monobit2| 12| 65000000| 1|0.03099468| PASSED | 2nd sample: PASSED
`dieharder` output of 22KB of contextswitch (`go version go1.10.3 linux/amd64` on 23.08.2018):
@ -150,131 +277,3 @@ Please note that around 5 tests of `dieharder` normally fail. This is expected a
dab_filltree2| 1| 5000000| 1|0.30118240| PASSED
Preparing to run test 209. ntuple = 0
dab_monobit2| 12| 65000000| 1|0.00209003| WEAK
`dieharder` of 1MB of fortuna (`go version go1.10.3 linux/amd64` on 23.08.2018):
#=============================================================================#
# dieharder version 3.31.1 Copyright 2003 Robert G. Brown #
#=============================================================================#
rng_name | filename |rands/second|
mt19937| output.bin| 8.44e+07 |
#=============================================================================#
test_name |ntup| tsamples |psamples| p-value |Assessment
#=============================================================================#
diehard_birthdays| 0| 100| 100|0.94302153| PASSED
diehard_operm5| 0| 1000000| 100|0.08378380| PASSED
diehard_rank_32x32| 0| 40000| 100|0.02062049| PASSED
diehard_rank_6x8| 0| 100000| 100|0.43787871| PASSED
diehard_bitstream| 0| 2097152| 100|0.15713023| PASSED
diehard_opso| 0| 2097152| 100|0.79331996| PASSED
diehard_oqso| 0| 2097152| 100|0.54138750| PASSED
diehard_dna| 0| 2097152| 100|0.06957205| PASSED
diehard_count_1s_str| 0| 256000| 100|0.21653644| PASSED
diehard_count_1s_byt| 0| 256000| 100|0.96539542| PASSED
diehard_parking_lot| 0| 12000| 100|0.21306362| PASSED
diehard_2dsphere| 2| 8000| 100|0.40750466| PASSED
diehard_3dsphere| 3| 4000| 100|0.99827314| WEAK
diehard_squeeze| 0| 100000| 100|0.70994607| PASSED
diehard_sums| 0| 100| 100|0.42729005| PASSED
diehard_runs| 0| 100000| 100|0.08118125| PASSED
diehard_runs| 0| 100000| 100|0.99226204| PASSED
diehard_craps| 0| 200000| 100|0.49803401| PASSED
diehard_craps| 0| 200000| 100|0.84011191| PASSED
marsaglia_tsang_gcd| 0| 10000000| 100|0.40135552| PASSED
marsaglia_tsang_gcd| 0| 10000000| 100|0.53311975| PASSED
sts_monobit| 1| 100000| 100|0.96903259| PASSED
sts_runs| 2| 100000| 100|0.55734041| PASSED
sts_serial| 1| 100000| 100|0.69041819| PASSED
sts_serial| 2| 100000| 100|0.61728694| PASSED
sts_serial| 3| 100000| 100|0.70299864| PASSED
sts_serial| 3| 100000| 100|0.36332027| PASSED
sts_serial| 4| 100000| 100|0.57627216| PASSED
sts_serial| 4| 100000| 100|0.95046929| PASSED
sts_serial| 5| 100000| 100|0.79824554| PASSED
sts_serial| 5| 100000| 100|0.62786166| PASSED
sts_serial| 6| 100000| 100|0.84103529| PASSED
sts_serial| 6| 100000| 100|0.89083859| PASSED
sts_serial| 7| 100000| 100|0.69686380| PASSED
sts_serial| 7| 100000| 100|0.79436099| PASSED
sts_serial| 8| 100000| 100|0.84082295| PASSED
sts_serial| 8| 100000| 100|0.95915719| PASSED
sts_serial| 9| 100000| 100|0.48200567| PASSED
sts_serial| 9| 100000| 100|0.10836112| PASSED
sts_serial| 10| 100000| 100|0.45470523| PASSED
sts_serial| 10| 100000| 100|0.97608829| PASSED
sts_serial| 11| 100000| 100|0.89344380| PASSED
sts_serial| 11| 100000| 100|0.31959825| PASSED
sts_serial| 12| 100000| 100|0.43415812| PASSED
sts_serial| 12| 100000| 100|0.27845148| PASSED
sts_serial| 13| 100000| 100|0.50590833| PASSED
sts_serial| 13| 100000| 100|0.39585514| PASSED
sts_serial| 14| 100000| 100|0.55566778| PASSED
sts_serial| 14| 100000| 100|0.57138798| PASSED
sts_serial| 15| 100000| 100|0.12315118| PASSED
sts_serial| 15| 100000| 100|0.41728831| PASSED
sts_serial| 16| 100000| 100|0.23202389| PASSED
sts_serial| 16| 100000| 100|0.84883373| PASSED
rgb_bitdist| 1| 100000| 100|0.45137388| PASSED
rgb_bitdist| 2| 100000| 100|0.93984739| PASSED
rgb_bitdist| 3| 100000| 100|0.85148557| PASSED
rgb_bitdist| 4| 100000| 100|0.77062397| PASSED
rgb_bitdist| 5| 100000| 100|0.79511260| PASSED
rgb_bitdist| 6| 100000| 100|0.86150140| PASSED
rgb_bitdist| 7| 100000| 100|0.98572979| PASSED
rgb_bitdist| 8| 100000| 100|0.73302973| PASSED
rgb_bitdist| 9| 100000| 100|0.39660028| PASSED
rgb_bitdist| 10| 100000| 100|0.13167592| PASSED
rgb_bitdist| 11| 100000| 100|0.87937846| PASSED
rgb_bitdist| 12| 100000| 100|0.80619403| PASSED
rgb_minimum_distance| 2| 10000| 1000|0.38189429| PASSED
rgb_minimum_distance| 3| 10000| 1000|0.21164619| PASSED
rgb_minimum_distance| 4| 10000| 1000|0.91875064| PASSED
rgb_minimum_distance| 5| 10000| 1000|0.27897081| PASSED
rgb_permutations| 2| 100000| 100|0.22927506| PASSED
rgb_permutations| 3| 100000| 100|0.80827585| PASSED
rgb_permutations| 4| 100000| 100|0.38750474| PASSED
rgb_permutations| 5| 100000| 100|0.18938169| PASSED
rgb_lagged_sum| 0| 1000000| 100|0.72234187| PASSED
rgb_lagged_sum| 1| 1000000| 100|0.28633796| PASSED
rgb_lagged_sum| 2| 1000000| 100|0.52961866| PASSED
rgb_lagged_sum| 3| 1000000| 100|0.99876080| WEAK
rgb_lagged_sum| 4| 1000000| 100|0.39603203| PASSED
rgb_lagged_sum| 5| 1000000| 100|0.01004618| PASSED
rgb_lagged_sum| 6| 1000000| 100|0.89539065| PASSED
rgb_lagged_sum| 7| 1000000| 100|0.55558774| PASSED
rgb_lagged_sum| 8| 1000000| 100|0.40063365| PASSED
rgb_lagged_sum| 9| 1000000| 100|0.30905028| PASSED
rgb_lagged_sum| 10| 1000000| 100|0.31161899| PASSED
rgb_lagged_sum| 11| 1000000| 100|0.76729775| PASSED
rgb_lagged_sum| 12| 1000000| 100|0.36416009| PASSED
rgb_lagged_sum| 13| 1000000| 100|0.21062168| PASSED
rgb_lagged_sum| 14| 1000000| 100|0.17580591| PASSED
rgb_lagged_sum| 15| 1000000| 100|0.54465457| PASSED
rgb_lagged_sum| 16| 1000000| 100|0.39394806| PASSED
rgb_lagged_sum| 17| 1000000| 100|0.81572681| PASSED
rgb_lagged_sum| 18| 1000000| 100|0.98821505| PASSED
rgb_lagged_sum| 19| 1000000| 100|0.86755786| PASSED
rgb_lagged_sum| 20| 1000000| 100|0.37832948| PASSED
rgb_lagged_sum| 21| 1000000| 100|0.52001140| PASSED
rgb_lagged_sum| 22| 1000000| 100|0.83595676| PASSED
rgb_lagged_sum| 23| 1000000| 100|0.22643336| PASSED
rgb_lagged_sum| 24| 1000000| 100|0.96475696| PASSED
rgb_lagged_sum| 25| 1000000| 100|0.49570837| PASSED
rgb_lagged_sum| 26| 1000000| 100|0.71327165| PASSED
rgb_lagged_sum| 27| 1000000| 100|0.07344404| PASSED
rgb_lagged_sum| 28| 1000000| 100|0.86374872| PASSED
rgb_lagged_sum| 29| 1000000| 100|0.24892548| PASSED
rgb_lagged_sum| 30| 1000000| 100|0.14314375| PASSED
rgb_lagged_sum| 31| 1000000| 100|0.27884009| PASSED
rgb_lagged_sum| 32| 1000000| 100|0.66637341| PASSED
rgb_kstest_test| 0| 10000| 1000|0.13954587| PASSED
dab_bytedistrib| 0| 51200000| 1|0.54278716| PASSED
dab_dct| 256| 50000| 1|0.71177390| PASSED
Preparing to run test 207. ntuple = 0
dab_filltree| 32| 15000000| 1|0.51006153| PASSED
dab_filltree| 32| 15000000| 1|0.91162889| PASSED
Preparing to run test 208. ntuple = 0
dab_filltree2| 0| 5000000| 1|0.15507188| PASSED
dab_filltree2| 1| 5000000| 1|0.16787382| PASSED
Preparing to run test 209. ntuple = 0
dab_monobit2| 12| 65000000| 1|0.28347219| PASSED

View file

@ -1,6 +1,7 @@
package main
import (
"context"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
@ -10,12 +11,159 @@ import (
"io"
"os"
"runtime"
"strconv"
"time"
"github.com/safing/portbase/log"
"github.com/safing/portbase/modules"
"github.com/safing/portbase/rng"
"github.com/safing/portbase/run"
)
func noise() {
var (
module *modules.Module
outputFile *os.File
outputSize uint64 = 1000000
)
func init() {
module = modules.Register("main", prep, start, nil, "rng")
}
func main() {
runtime.GOMAXPROCS(1)
os.Exit(run.Run())
}
func prep() error {
if len(os.Args) < 3 {
fmt.Printf("usage: ./%s {fortuna|tickfeeder} <file> [output size in MB]", os.Args[0])
return modules.ErrCleanExit
}
switch os.Args[1] {
case "fortuna":
case "tickfeeder":
default:
return fmt.Errorf("usage: %s {fortuna|tickfeeder}", os.Args[0])
}
if len(os.Args) > 3 {
n, err := strconv.ParseUint(os.Args[3], 10, 64)
if err != nil {
return fmt.Errorf("failed to parse output size: %s", err)
}
outputSize = n * 1000000
}
var err error
outputFile, err = os.OpenFile(os.Args[2], os.O_CREATE|os.O_WRONLY, 0660)
if err != nil {
return fmt.Errorf("failed to open output file: %s", err)
}
return nil
}
//nolint:gocognit
func start() error {
// generates 1MB and writes to stdout
log.Infof("writing %dMB to stdout, a \".\" will be printed at every 1024 bytes.", outputSize/1000000)
switch os.Args[1] {
case "fortuna":
module.StartWorker("fortuna", fortuna)
case "tickfeeder":
module.StartWorker("noise", noise)
module.StartWorker("tickfeeder", tickfeeder)
default:
return fmt.Errorf("usage: ./%s {fortuna|tickfeeder}", os.Args[0])
}
return nil
}
func fortuna(_ context.Context) error {
var bytesWritten uint64
for {
if module.IsStopping() {
return nil
}
b, err := rng.Bytes(64)
if err != nil {
return err
}
_, err = outputFile.Write(b)
if err != nil {
return err
}
bytesWritten += 64
if bytesWritten%1024 == 0 {
os.Stderr.WriteString(".")
}
if bytesWritten%65536 == 0 {
fmt.Fprintf(os.Stderr, "\n%d bytes written\n", bytesWritten)
}
if bytesWritten >= outputSize {
os.Stderr.WriteString("\n")
break
}
}
go modules.Shutdown() //nolint:errcheck
return nil
}
func tickfeeder(ctx context.Context) error {
var bytesWritten uint64
var value int64
var pushes int
for {
if module.IsStopping() {
return nil
}
time.Sleep(10 * time.Nanosecond)
value = (value << 1) | (time.Now().UnixNano() % 2)
pushes++
if pushes >= 64 {
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(value))
_, err := outputFile.Write(b)
if err != nil {
return err
}
bytesWritten += 8
if bytesWritten%1024 == 0 {
os.Stderr.WriteString(".")
}
if bytesWritten%65536 == 0 {
fmt.Fprintf(os.Stderr, "\n%d bytes written\n", bytesWritten)
}
pushes = 0
}
if bytesWritten >= outputSize {
os.Stderr.WriteString("\n")
break
}
}
go modules.Shutdown() //nolint:errcheck
return nil
}
func noise(ctx context.Context) error {
// do some aes ctr for noise
key, _ := hex.DecodeString("6368616e676520746869732070617373")
@ -33,93 +181,11 @@ func noise() {
stream := cipher.NewCTR(block, iv)
for {
stream.XORKeyStream(data, data)
select {
case <-ctx.Done():
return nil
default:
stream.XORKeyStream(data, data)
}
}
}
//nolint:gocognit
func main() {
// generates 1MB and writes to stdout
runtime.GOMAXPROCS(1)
if len(os.Args) < 2 {
fmt.Printf("usage: ./%s {fortuna|tickfeeder}\n", os.Args[0])
os.Exit(1)
}
os.Stderr.WriteString("writing 1MB to stdout, a \".\" will be printed at every 1024 bytes.\n")
var bytesWritten int
switch os.Args[1] {
case "fortuna":
err := rng.Start()
if err != nil {
panic(err)
}
for {
b, err := rng.Bytes(64)
if err != nil {
panic(err)
}
os.Stdout.Write(b)
bytesWritten += 64
if bytesWritten%1024 == 0 {
os.Stderr.WriteString(".")
}
if bytesWritten%65536 == 0 {
fmt.Fprintf(os.Stderr, "\n%d bytes written\n", bytesWritten)
}
if bytesWritten >= 1000000 {
os.Stderr.WriteString("\n")
break
}
}
os.Exit(0)
case "tickfeeder":
go noise()
var value int64
var pushes int
for {
time.Sleep(10 * time.Nanosecond)
value = (value << 1) | (time.Now().UnixNano() % 2)
pushes++
if pushes >= 64 {
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(value))
// fmt.Fprintf(os.Stderr, "write: %d\n", value)
os.Stdout.Write(b)
bytesWritten += 8
if bytesWritten%1024 == 0 {
os.Stderr.WriteString(".")
}
if bytesWritten%65536 == 0 {
fmt.Fprintf(os.Stderr, "\n%d bytes written\n", bytesWritten)
}
pushes = 0
}
if bytesWritten >= 1000000 {
os.Stderr.WriteString("\n")
break
}
}
os.Exit(0)
default:
fmt.Printf("usage: %s {fortuna|tickfeeder}\n", os.Args[0])
os.Exit(1)
}
}

31
test
View file

@ -4,7 +4,6 @@ warnings=0
errors=0
scripted=0
goUp="\\e[1A"
all=0
fullTestFlags="-short"
install=0
@ -99,7 +98,6 @@ while true; do
shift 1
;;
"all")
all=1
fullTestFlags=""
shift 1
;;
@ -119,10 +117,9 @@ if [[ $install -eq 1 ]]; then
echo "installing dependencies..."
echo "$ go get -u golang.org/x/lint/golint"
go get -u golang.org/x/lint/golint
if [[ $all -eq 1 ]]; then
echo "$ go get -u github.com/golangci/golangci-lint/cmd/golangci-lint"
go get -u github.com/golangci/golangci-lint/cmd/golangci-lint
fi
# TODO: update golangci-lint version regularly
echo "$ curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.24.0"
curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.24.0
exit 0
fi
@ -141,16 +138,14 @@ if [[ $(which golint) == "" ]]; then
echo "or run: ./test install"
exit 1
fi
if [[ $all -eq 1 ]]; then
if [[ $(which golangci-lint) == "" ]]; then
echo "golangci-lint command not found"
echo "install locally with: go get -u github.com/golangci/golangci-lint/cmd/golangci-lint"
echo "or run: ./test install all"
echo ""
echo "hint: install for CI with: curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin vX.Y.Z"
echo "don't forget to specify the version you want"
exit 1
fi
if [[ $(which golangci-lint) == "" ]]; then
echo "golangci-lint command not found"
echo "install with: curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin vX.Y.Z"
echo "don't forget to specify the version you want"
echo "or run: ./test install"
echo ""
echo "alternatively, install the current dev version with: go get -u github.com/golangci/golangci-lint/cmd/golangci-lint"
exit 1
fi
# target selection
@ -179,9 +174,7 @@ for package in $packages; do
checkformat $package
run golint -set_exit_status -min_confidence 1.0 $package
run go vet $package
if [[ $all -eq 1 ]]; then
run golangci-lint run $GOPATH/src/$package
fi
run golangci-lint run $GOPATH/src/$package
run go test -cover $fullTestFlags $package
done

View file

@ -2,20 +2,18 @@ package osdetail
import (
"fmt"
"regexp"
"strings"
"sync"
"github.com/hashicorp/go-version"
versionCmp "github.com/hashicorp/go-version"
"github.com/shirou/gopsutil/host"
)
var (
versionRe = regexp.MustCompile(`[0-9\.]+`)
// versionRe = regexp.MustCompile(`[0-9\.]+`)
windowsNTVersion string
windowsNTVersionForCmp *versionCmp.Version
windowsNTVersionForCmp *version.Version
fetching sync.Mutex
fetched bool
@ -49,13 +47,13 @@ func WindowsNTVersion() (string, error) {
}
// IsAtLeastWindowsNTVersion returns whether the current WindowsNT version is at least the given version or newer.
func IsAtLeastWindowsNTVersion(version string) (bool, error) {
func IsAtLeastWindowsNTVersion(v string) (bool, error) {
_, err := WindowsNTVersion()
if err != nil {
return false, err
}
versionForCmp, err := versionCmp.NewVersion(version)
versionForCmp, err := version.NewVersion(v)
if err != nil {
return false, err
}
@ -74,9 +72,7 @@ func IsAtLeastWindowsNTVersionWithDefault(v string, defaultValue bool) bool {
// IsAtLeastWindowsVersion returns whether the current Windows version is at least the given version or newer.
func IsAtLeastWindowsVersion(v string) (bool, error) {
var (
NTVersion string
)
var NTVersion string
switch v {
case "7":
NTVersion = "6.1"