Remove config and use service workers for goroutines

This commit is contained in:
Daniel 2020-04-16 13:13:40 +02:00
parent c58d6a0f30
commit 30a6948009
7 changed files with 125 additions and 179 deletions

View file

@ -1,17 +1,20 @@
package rng
import (
"context"
"encoding/binary"
"github.com/tevino/abool"
"github.com/safing/portbase/config"
"github.com/safing/portbase/container"
)
const (
minFeedEntropy = 256
)
var (
rngFeeder = make(chan []byte)
minFeedEntropy config.IntOption
)
// The Feeder is used to feed entropy to the RNG.
@ -34,7 +37,7 @@ func NewFeeder() *Feeder {
needsEntropy: abool.NewBool(true),
buffer: container.New(),
}
go new.run()
module.StartServiceWorker("feeder", 0, new.run)
return new
}
@ -87,7 +90,7 @@ func (f *Feeder) CloseFeeder() {
f.input <- nil
}
func (f *Feeder) run() {
func (f *Feeder) run(ctx context.Context) error {
defer f.needsEntropy.UnSet()
for {
@ -97,23 +100,26 @@ func (f *Feeder) run() {
for {
select {
case newEntropy := <-f.input:
if newEntropy != nil {
// check if feed has been closed
if newEntropy == nil {
return nil
}
// append to buffer
f.buffer.Append(newEntropy.data)
f.entropy += int64(newEntropy.entropy)
if f.entropy >= minFeedEntropy() {
if f.entropy >= minFeedEntropy {
break gather
}
}
case <-shutdownSignal:
return
case <-ctx.Done():
return nil
}
}
// feed
f.needsEntropy.UnSet()
select {
case rngFeeder <- f.buffer.CompileData():
case <-shutdownSignal:
return
case <-ctx.Done():
return nil
}
f.buffer = container.New()
}

View file

@ -1,29 +1,27 @@
package rng
import (
"context"
"time"
)
var (
fullFeedDuration = 100 * time.Millisecond
)
func getFullFeedDuration() time.Duration {
// full feed every 5x time of reseedAfterSeconds
secsUntilFullFeed := reseedAfterSeconds() * 5
secsUntilFullFeed := reseedAfterSeconds * 5
// full feed at most once per minute
if secsUntilFullFeed < 60 {
secsUntilFullFeed = 60
// full feed at most once every ten minutes
if secsUntilFullFeed < 600 {
secsUntilFullFeed = 600
}
return time.Duration(secsUntilFullFeed * int64(time.Second))
return time.Duration(secsUntilFullFeed) * time.Second
}
func fullFeeder() {
func fullFeeder(ctx context.Context) error {
fullFeedDuration := getFullFeedDuration()
for {
select {
case <-time.After(fullFeedDuration):
@ -39,11 +37,8 @@ func fullFeeder() {
}
rngLock.Unlock()
case <-shutdownSignal:
return
}
fullFeedDuration = getFullFeedDuration()
case <-ctx.Done():
return nil
}
}
}

View file

@ -6,19 +6,19 @@ import (
"io"
"math"
"time"
)
"github.com/safing/portbase/config"
const (
reseedAfterSeconds = 600 // ten minutes
reseedAfterBytes = 1048576 // one megabyte
)
var (
// Reader provides a global instance to read from the RNG.
Reader io.Reader
rngBytesRead int64
rngBytesRead uint64
rngLastFeed = time.Now()
reseedAfterSeconds config.IntOption
reseedAfterBytes config.IntOption
)
// reader provides an io.Reader interface
@ -32,8 +32,8 @@ func checkEntropy() (err error) {
if !rngReady {
return errors.New("RNG is not ready yet")
}
if rngBytesRead > reseedAfterBytes() ||
int64(time.Since(rngLastFeed).Seconds()) > reseedAfterSeconds() {
if rngBytesRead > reseedAfterBytes ||
int(time.Since(rngLastFeed).Seconds()) > reseedAfterSeconds {
select {
case r := <-rngFeeder:
rng.Reseed(r)

View file

@ -1,35 +1,36 @@
package rng
import (
"context"
"crypto/rand"
"time"
"github.com/safing/portbase/log"
"fmt"
)
func osFeeder() {
func osFeeder(ctx context.Context) error {
entropyBytes := minFeedEntropy / 8
feeder := NewFeeder()
defer feeder.CloseFeeder()
for {
// get feed entropy
minEntropyBytes := int(minFeedEntropy())/8 + 1
if minEntropyBytes < 32 {
minEntropyBytes = 64
}
// get entropy
osEntropy := make([]byte, minEntropyBytes)
// gather
osEntropy := make([]byte, entropyBytes)
n, err := rand.Read(osEntropy)
if err != nil {
log.Errorf("could not read entropy from os: %s", err)
time.Sleep(10 * time.Second)
return fmt.Errorf("could not read entropy from os: %s", err)
}
if n != minEntropyBytes {
log.Errorf("could not read enough entropy from os: got only %d bytes instead of %d", n, minEntropyBytes)
time.Sleep(10 * time.Second)
if n != entropyBytes {
return fmt.Errorf("could not read enough entropy from os: got only %d bytes instead of %d", n, entropyBytes)
}
// feed
feeder.SupplyEntropy(osEntropy, minEntropyBytes*8)
select {
case feeder.input <- &entropyData{
data: osEntropy,
entropy: entropyBytes * 8,
}:
case <-ctx.Done():
return nil
}
}
}

View file

@ -3,13 +3,13 @@ package rng
import (
"crypto/aes"
"crypto/cipher"
"errors"
"fmt"
"sync"
"github.com/aead/serpent"
"github.com/seehuhn/fortuna"
"github.com/safing/portbase/config"
"github.com/safing/portbase/modules"
)
@ -17,108 +17,46 @@ var (
rng *fortuna.Generator
rngLock sync.Mutex
rngReady = false
rngCipherOption config.StringOption
shutdownSignal = make(chan struct{})
rngCipher = "aes"
// possible values: aes, serpent
module *modules.Module
)
func init() {
modules.Register("random", prep, Start, nil)
}
func prep() error {
err := config.Register(&config.Option{
Name: "RNG Cipher",
Key: "random/rng_cipher",
Description: "Cipher to use for the Fortuna RNG. Requires restart to take effect.",
OptType: config.OptTypeString,
ExpertiseLevel: config.ExpertiseLevelDeveloper,
ReleaseLevel: config.ReleaseLevelExperimental,
ExternalOptType: "string list",
DefaultValue: "aes",
ValidationRegex: "^(aes|serpent)$",
})
if err != nil {
return err
}
rngCipherOption = config.GetAsString("random/rng_cipher", "aes")
err = config.Register(&config.Option{
Name: "Minimum Feed Entropy",
Key: "random/min_feed_entropy",
Description: "The minimum amount of entropy before a entropy source is feed to the RNG, in bits.",
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelDeveloper,
ReleaseLevel: config.ReleaseLevelExperimental,
DefaultValue: 256,
ValidationRegex: "^[0-9]{3,5}$",
})
if err != nil {
return err
}
minFeedEntropy = config.Concurrent.GetAsInt("random/min_feed_entropy", 256)
err = config.Register(&config.Option{
Name: "Reseed after x seconds",
Key: "random/reseed_after_seconds",
Description: "Number of seconds until reseed",
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelDeveloper,
ReleaseLevel: config.ReleaseLevelExperimental,
DefaultValue: 360, // ten minutes
ValidationRegex: "^[1-9][0-9]{1,5}$",
})
if err != nil {
return err
}
reseedAfterSeconds = config.Concurrent.GetAsInt("random/reseed_after_seconds", 360)
err = config.Register(&config.Option{
Name: "Reseed after x bytes",
Key: "random/reseed_after_bytes",
Description: "Number of fetched bytes until reseed",
OptType: config.OptTypeInt,
ExpertiseLevel: config.ExpertiseLevelDeveloper,
ReleaseLevel: config.ReleaseLevelExperimental,
DefaultValue: 1000000, // one megabyte
ValidationRegex: "^[1-9][0-9]{2,9}$",
})
if err != nil {
return err
}
reseedAfterBytes = config.GetAsInt("random/reseed_after_bytes", 1000000)
return nil
module = modules.Register("random", nil, start, nil)
}
func newCipher(key []byte) (cipher.Block, error) {
cipher := rngCipherOption()
switch cipher {
switch rngCipher {
case "aes":
return aes.NewCipher(key)
case "serpent":
return serpent.NewCipher(key)
default:
return nil, fmt.Errorf("unknown or unsupported cipher: %s", cipher)
return nil, fmt.Errorf("unknown or unsupported cipher: %s", rngCipher)
}
}
// Start starts the RNG. Normally, this should be only called by the portbase/modules package.
func Start() (err error) {
func start() error {
rngLock.Lock()
defer rngLock.Unlock()
rng = fortuna.NewGenerator(newCipher)
if rng == nil {
return errors.New("failed to initialize rng")
}
rngReady = true
// random source: OS
go osFeeder()
module.StartServiceWorker("os rng feeder", 0, osFeeder)
// random source: goroutine ticks
go tickFeeder()
module.StartServiceWorker("tick rng feeder", 0, tickFeeder)
// full feeder
go fullFeeder()
module.StartServiceWorker("full feeder", 0, fullFeeder)
return nil
}

View file

@ -2,17 +2,10 @@ package rng
import (
"testing"
"github.com/safing/portbase/config"
)
func init() {
err := prep()
if err != nil {
panic(err)
}
err = Start()
err := start()
if err != nil {
panic(err)
}
@ -21,25 +14,17 @@ func init() {
func TestRNG(t *testing.T) {
key := make([]byte, 16)
err := config.SetConfigOption("random/rng_cipher", "aes")
if err != nil {
t.Errorf("failed to set random/rng_cipher config: %s", err)
}
_, err = newCipher(key)
rngCipher = "aes"
_, err := newCipher(key)
if err != nil {
t.Errorf("failed to create aes cipher: %s", err)
}
rng.Reseed(key)
err = config.SetConfigOption("random/rng_cipher", "serpent")
if err != nil {
t.Errorf("failed to set random/rng_cipher config: %s", err)
}
rngCipher = "serpent"
_, err = newCipher(key)
if err != nil {
t.Errorf("failed to create serpent cipher: %s", err)
}
rng.Reseed(key)
b := make([]byte, 32)
_, err = Read(b)
@ -55,4 +40,9 @@ func TestRNG(t *testing.T) {
if err != nil {
t.Errorf("Bytes failed: %s", err)
}
_, err = Number(100)
if err != nil {
t.Errorf("Number failed: %s", err)
}
}

View file

@ -1,27 +1,25 @@
package rng
import (
"context"
"encoding/binary"
"time"
)
var (
tickDuration = 1 * time.Millisecond
)
func getTickDuration() time.Duration {
func getTickFeederTickDuration() time.Duration {
// be ready in 1/10 time of reseedAfterSeconds
msecsAvailable := reseedAfterSeconds() * 100
// ex.: reseed after 10 minutes: msecsAvailable = 36000
msecsAvailable := reseedAfterSeconds * 100
// ex.: reseed after 10 minutes: msecsAvailable = 60000
// have full entropy after 5 minutes
// one tick generates 0,125 bits of entropy
ticksNeeded := minFeedEntropy() * 8
ticksNeeded := minFeedEntropy * 8
// ex.: minimum entropy is 256: ticksNeeded = 2048
// msces between ticks
tickMsecs := msecsAvailable / ticksNeeded
// ex.: tickMsecs = 17(,578125)
// ex.: tickMsecs = 29(,296875)
// use a minimum of 10 msecs per tick for good entropy
// it would take 21 seconds to get full 256 bits of entropy with 10msec ticks
@ -29,33 +27,51 @@ func getTickDuration() time.Duration {
tickMsecs = 10
}
return time.Duration(tickMsecs * int64(time.Millisecond))
return time.Duration(tickMsecs) * time.Millisecond
}
// tickFeeder is a really simple entropy feeder that adds the least significant bit of the current nanosecond unixtime to its pool every time it 'ticks'.
// The more work the program does, the better the quality, as the internal schedular cannot immediately run the goroutine when it's ready.
func tickFeeder() {
func tickFeeder(ctx context.Context) error {
var value int64
var pushes int
feeder := NewFeeder()
defer feeder.CloseFeeder()
tickDuration := getTickFeederTickDuration()
for {
select {
case <-time.After(tickDuration):
// wait for tick
time.Sleep(tickDuration)
// add tick value
value = (value << 1) | (time.Now().UnixNano() % 2)
pushes++
if pushes >= 64 {
feeder.SupplyEntropyAsInt(value, 8)
// convert to []byte
b := make([]byte, 8)
binary.LittleEndian.PutUint64(b, uint64(value))
// reset
pushes = 0
}
tickDuration = getTickDuration()
case <-shutdownSignal:
return
// feed
select {
case feeder.input <- &entropyData{
data: b,
entropy: 8,
}:
case <-ctx.Done():
return nil
}
} else {
// check if are done
select {
case <-ctx.Done():
return nil
default:
}
}
}
}