Merge pull request #1332 from safing/fix/things-2

Fix a couple things
This commit is contained in:
Daniel Hovie 2023-10-02 16:50:39 +02:00 committed by GitHub
commit 1cd44e919b
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
15 changed files with 143 additions and 249 deletions

View file

@ -95,7 +95,7 @@ func downloadUpdates() error {
}
// Download all required updates.
err = registry.DownloadUpdates(context.TODO(), false)
err = registry.DownloadUpdates(context.TODO(), true)
if err != nil {
return err
}

View file

@ -154,7 +154,7 @@ func verifyUpdates(ctx context.Context) error {
// Re-download broken files.
registry.MandatoryUpdates = helper.MandatoryUpdates()
registry.AutoUnpack = helper.AutoUnpackUpdates()
err = registry.DownloadUpdates(ctx, false)
err = registry.DownloadUpdates(ctx, true)
if err != nil {
return fmt.Errorf("failed to re-download files: %w", err)
}

View file

@ -154,7 +154,7 @@ func reportBandwidth(ctx context.Context, objs bpfObjects, bandwidthUpdates chan
case <-ctx.Done():
return
default:
log.Warningf("ebpf: bandwidth update queue is full (updated=%d, skipped=%d), skipping rest of batch", updated, skipped)
log.Warningf("ebpf: bandwidth update queue is full (updated=%d, skipped=%d), ignoring rest of batch", updated, skipped)
return
}
}

8
go.mod
View file

@ -20,9 +20,9 @@ require (
github.com/mitchellh/go-server-timing v1.0.1
github.com/oschwald/maxminddb-golang v1.12.0
github.com/safing/jess v0.3.1
github.com/safing/portbase v0.17.5
github.com/safing/portbase v0.18.1
github.com/safing/portmaster-android/go v0.0.0-20230830120134-3226ceac3bec
github.com/safing/spn v0.6.22
github.com/safing/spn v0.6.23
github.com/shirou/gopsutil v3.21.11+incompatible
github.com/spf13/cobra v1.7.0
github.com/spkg/zipfs v0.7.1
@ -84,7 +84,7 @@ require (
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/valyala/fastrand v1.1.0 // indirect
github.com/valyala/histogram v1.2.0 // indirect
github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect
github.com/vmihailenco/msgpack/v5 v5.4.0 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
@ -101,5 +101,5 @@ require (
modernc.org/libc v1.24.1 // indirect
modernc.org/mathutil v1.6.0 // indirect
modernc.org/memory v1.7.2 // indirect
modernc.org/sqlite v1.25.0 // indirect
modernc.org/sqlite v1.26.0 // indirect
)

15
go.sum
View file

@ -240,12 +240,12 @@ github.com/safing/jess v0.3.1 h1:cMZVhi2whW/YdD98MPLeLIWJndQ7o2QVt2HefQ/ByFA=
github.com/safing/jess v0.3.1/go.mod h1:aj73Eot1zm2ETkJuw9hJlIO8bRom52uBbsCHemvlZmA=
github.com/safing/portbase v0.15.2/go.mod h1:5bHi99fz7Hh/wOsZUOI631WF9ePSHk57c4fdlOMS91Y=
github.com/safing/portbase v0.16.2/go.mod h1:mzNCWqPbO7vIYbbK5PElGbudwd2vx4YPNawymL8Aro8=
github.com/safing/portbase v0.17.5 h1:0gq0tgPLbKlK+xq7WM+Kcutu5HgYIglxBE3QqN5tIAA=
github.com/safing/portbase v0.17.5/go.mod h1:suLPSjOTqA7iDLozis5OI7PSw+wqJNT8SLvdBhRPlqI=
github.com/safing/portbase v0.18.1 h1:IvWyovJdvJ8yUPH1Fi+BtgPZ3NGyuOxbKM2bg3nc/H8=
github.com/safing/portbase v0.18.1/go.mod h1:suLPSjOTqA7iDLozis5OI7PSw+wqJNT8SLvdBhRPlqI=
github.com/safing/portmaster-android/go v0.0.0-20230830120134-3226ceac3bec h1:oSJY1seobofPwpMoJRkCgXnTwfiQWNfGMCPDfqgAEfg=
github.com/safing/portmaster-android/go v0.0.0-20230830120134-3226ceac3bec/go.mod h1:abwyAQrZGemWbSh/aCD9nnkp0SvFFf/mGWkAbOwPnFE=
github.com/safing/spn v0.6.22 h1:YeSDKnLOPlTnJT4NIdGrTtC21Nv9ApwkHrTmfyQV7OY=
github.com/safing/spn v0.6.22/go.mod h1:MgWfUDkYqi46A+EcxayLD0tc519KBiVEQ6mfAjHIx/4=
github.com/safing/spn v0.6.23 h1:nIPhvl+7pj/yhIMhcCc4v0z14X56/bwTC24d0k9Y9ko=
github.com/safing/spn v0.6.23/go.mod h1:MgWfUDkYqi46A+EcxayLD0tc519KBiVEQ6mfAjHIx/4=
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/seehuhn/fortuna v1.0.1 h1:lu9+CHsmR0bZnx5Ay646XvCSRJ8PJTi5UYJwDBX68H0=
@ -322,8 +322,9 @@ github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G
github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ=
github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ=
github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tzWUS3BUzXY=
github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
github.com/vmihailenco/msgpack/v5 v5.4.0 h1:hRM0digJwyR6vll33NNAwCFguy5JuBD6jxDmQP3l608=
github.com/vmihailenco/msgpack/v5 v5.4.0/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok=
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
@ -494,7 +495,7 @@ modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E=
modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E=
modernc.org/sqlite v1.25.0 h1:AFweiwPNd/b3BoKnBOfFm+Y260guGMF+0UFk0savqeA=
modernc.org/sqlite v1.25.0/go.mod h1:FL3pVXie73rg3Rii6V/u5BoHlSoyeZeIgKZEgHARyCU=
modernc.org/sqlite v1.26.0 h1:SocQdLRSYlA8W99V8YH0NES75thx19d9sB/aFc4R8Lw=
modernc.org/sqlite v1.26.0/go.mod h1:FL3pVXie73rg3Rii6V/u5BoHlSoyeZeIgKZEgHARyCU=
zombiezen.com/go/sqlite v0.13.1 h1:qDzxyWWmMtSSEH5qxamqBFmqA2BLSSbtODi3ojaE02o=
zombiezen.com/go/sqlite v0.13.1/go.mod h1:Ht/5Rg3Ae2hoyh1I7gbWtWAl89CNocfqeb/aAMTkJr4=

View file

@ -4,9 +4,9 @@ package proc
import (
"errors"
"fmt"
"io/fs"
"os"
"strconv"
"time"
"github.com/safing/portbase/log"
@ -19,7 +19,7 @@ var (
)
// GetPID returns the already existing pid of the given socket info or searches for it.
// This also acts as a getter for socket.*Info.PID, as locking for that occurs here.
// This also acts as a getter for socket.Info.PID, as locking for that occurs here.
func GetPID(socketInfo socket.Info) (pid int) {
// Get currently assigned PID to the socket info.
currentPid := socketInfo.GetPID()
@ -41,7 +41,7 @@ func GetPID(socketInfo socket.Info) (pid int) {
// findPID returns the pid of the given uid and socket inode.
func findPID(uid, inode int) (pid int) {
socketName := fmt.Sprintf("socket:[%d]", inode)
socketName := "socket:[" + strconv.Itoa(inode) + "]"
for i := 0; i <= lookupRetries; i++ {
var pidsUpdated bool
@ -83,7 +83,7 @@ func findPID(uid, inode int) (pid int) {
}
// We have updated the PID map, but still cannot find anything.
// So, there is nothing we can other than wait a little for the kernel to
// So, there is nothing we can do other than to wait a little for the kernel to
// populate the information.
// Wait after each try, except for the last iteration
@ -97,16 +97,20 @@ func findPID(uid, inode int) (pid int) {
}
func findSocketFromPid(pid int, socketName string) bool {
entries := readDirNames(fmt.Sprintf("/proc/%d/fd", pid))
socketBase := "/proc/" + strconv.Itoa(pid) + "/fd"
entries := readDirNames(socketBase)
if len(entries) == 0 {
return false
}
for _, entry := range entries {
link, err := os.Readlink(fmt.Sprintf("/proc/%d/fd/%s", pid, entry))
socketBase += "/"
// Look through the FDs in reverse order, because higher/newer FDs will be
// more likely to be searched for.
for i := len(entries) - 1; i >= 0; i-- {
link, err := os.Readlink(socketBase + entries[i])
if err != nil {
if !errors.Is(err, fs.ErrNotExist) {
log.Warningf("proc: failed to read link /proc/%d/fd/%s: %s", pid, entry, err)
log.Warningf("proc: failed to read link /proc/%d/fd/%s: %s", pid, entries[i], err)
}
continue
}

View file

@ -4,12 +4,12 @@ package proc
import (
"errors"
"fmt"
"io/fs"
"os"
"strconv"
"sync"
"syscall"
"time"
"github.com/safing/portbase/log"
"github.com/safing/portbase/utils"
@ -19,7 +19,7 @@ var (
// pidsByUserLock is also used for locking the socketInfo.PID on all socket.*Info structs.
pidsByUser = make(map[int][]int)
pidsByUserLock sync.RWMutex
fetchPidsByUser utils.OnceAgain
fetchPidsByUser = utils.NewCallLimiter(10 * time.Millisecond)
)
// getPidsByUser returns the cached PIDs for the given UID.
@ -31,7 +31,7 @@ func getPidsByUser(uid int) (pids []int, ok bool) {
return
}
// updatePids fetches and creates a new pidsByUser map using utils.OnceAgain.
// updatePids fetches and creates a new pidsByUser map using a call limiter.
func updatePids() {
fetchPidsByUser.Do(func() {
newPidsByUser := make(map[int][]int)
@ -50,7 +50,7 @@ func updatePids() {
continue entryLoop
}
statData, err := os.Stat(fmt.Sprintf("/proc/%d", pid))
statData, err := os.Stat("/proc/" + strconv.FormatInt(pid, 10))
if err != nil {
if !errors.Is(err, fs.ErrNotExist) {
log.Warningf("proc: could not stat /proc/%d: %s", pid, err)

View file

@ -37,7 +37,7 @@ func Exists(pktInfo *packet.Info, now time.Time) (exists bool) {
func (table *tcpTable) exists(pktInfo *packet.Info) (exists bool) {
// Update tables if older than the connection that is checked.
if table.lastUpdateAt.Load() < pktInfo.SeenAt.UnixNano() {
table.updateTables(table.updateIter.Load())
table.updateTables()
}
table.lock.RLock()
@ -64,7 +64,7 @@ func (table *tcpTable) exists(pktInfo *packet.Info) (exists bool) {
func (table *udpTable) exists(pktInfo *packet.Info, now time.Time) (exists bool) {
// Update tables if older than the connection that is checked.
if table.lastUpdateAt.Load() < pktInfo.SeenAt.UnixNano() {
table.updateTables(table.updateIter.Load())
table.updateTables()
}
table.lock.RLock()

View file

@ -25,12 +25,12 @@ type Info struct {
func GetInfo() *Info {
info := &Info{}
info.TCP4Connections, info.TCP4Listeners, _ = tcp4Table.updateTables(tcp4Table.updateIter.Load())
info.UDP4Binds, _ = udp4Table.updateTables(udp4Table.updateIter.Load())
info.TCP4Connections, info.TCP4Listeners = tcp4Table.updateTables()
info.UDP4Binds = udp4Table.updateTables()
if netenv.IPv6Enabled() {
info.TCP6Connections, info.TCP6Listeners, _ = tcp6Table.updateTables(tcp6Table.updateIter.Load())
info.UDP6Binds, _ = udp6Table.updateTables(udp6Table.updateIter.Load())
info.TCP6Connections, info.TCP6Listeners = tcp6Table.updateTables()
info.UDP6Binds = udp6Table.updateTables()
}
info.UpdateMeta()

View file

@ -66,20 +66,18 @@ func (table *tcpTable) lookup(pktInfo *packet.Info, fast bool) (
var (
connections []*socket.ConnectionInfo
listeners []*socket.BindInfo
updateIter uint64
dualStackConnections []*socket.ConnectionInfo
dualStackListeners []*socket.BindInfo
dualStackUpdateIter uint64
)
// Search for the socket until found.
for i := 1; i <= lookupTries; i++ {
// Get or update tables.
// Use existing tables for first check if packet was seen after last table update.
if i == 1 && pktInfo.SeenAt.UnixNano() >= table.lastUpdateAt.Load() {
connections, listeners, updateIter = table.getCurrentTables()
connections, listeners = table.getCurrentTables()
} else {
connections, listeners, updateIter = table.updateTables(updateIter)
connections, listeners = table.updateTables()
}
// Check tables for socket.
@ -97,11 +95,11 @@ func (table *tcpTable) lookup(pktInfo *packet.Info, fast bool) (
continue
}
// Get or update tables.
if i == 0 {
dualStackConnections, dualStackListeners, dualStackUpdateIter = table.dualStack.getCurrentTables()
// Use existing tables for first check if packet was seen after last table update.
if i == 1 && pktInfo.SeenAt.UnixNano() >= table.dualStack.lastUpdateAt.Load() {
dualStackConnections, dualStackListeners = table.dualStack.getCurrentTables()
} else {
dualStackConnections, dualStackListeners, dualStackUpdateIter = table.dualStack.updateTables(dualStackUpdateIter)
dualStackConnections, dualStackListeners = table.dualStack.updateTables()
}
// Check tables for socket.
@ -169,20 +167,17 @@ func (table *udpTable) lookup(pktInfo *packet.Info, fast bool) (
// Prepare variables.
var (
binds []*socket.BindInfo
updateIter uint64
dualStackBinds []*socket.BindInfo
dualStackUpdateIter uint64
binds []*socket.BindInfo
dualStackBinds []*socket.BindInfo
)
// Search for the socket until found.
for i := 1; i <= lookupTries; i++ {
// Get or update tables.
if i == 1 && pktInfo.SeenAt.UnixNano() >= table.lastUpdateAt.Load() {
binds, updateIter = table.getCurrentTables()
binds = table.getCurrentTables()
} else {
binds, updateIter = table.updateTables(updateIter)
binds = table.updateTables()
}
// Check tables for socket.
@ -212,10 +207,10 @@ func (table *udpTable) lookup(pktInfo *packet.Info, fast bool) (
}
// Get or update tables.
if i == 0 {
dualStackBinds, dualStackUpdateIter = table.dualStack.getCurrentTables()
if i == 1 && pktInfo.SeenAt.UnixNano() >= table.lastUpdateAt.Load() {
dualStackBinds = table.dualStack.getCurrentTables()
} else {
dualStackBinds, dualStackUpdateIter = table.dualStack.updateTables(dualStackUpdateIter)
dualStackBinds = table.dualStack.updateTables()
}
// Check tables for socket.

View file

@ -7,10 +7,13 @@ import (
"time"
"github.com/safing/portbase/log"
"github.com/safing/portbase/utils"
"github.com/safing/portmaster/network/socket"
)
const maxUpdateTries = 100
const (
minDurationBetweenTableUpdates = 10 * time.Millisecond
)
type tcpTable struct {
version int
@ -19,29 +22,26 @@ type tcpTable struct {
listeners []*socket.BindInfo
lock sync.RWMutex
updateIter atomic.Uint64
// lastUpdateAt stores the time when the tables where last updated as unix nanoseconds.
lastUpdateAt atomic.Int64
fetchingLock sync.Mutex
fetchingInProgress bool
fetchingDoneSignal chan struct{}
fetchTable func() (connections []*socket.ConnectionInfo, listeners []*socket.BindInfo, err error)
fetchLimiter *utils.CallLimiter
fetchTable func() (connections []*socket.ConnectionInfo, listeners []*socket.BindInfo, err error)
dualStack *tcpTable
}
var (
tcp6Table = &tcpTable{
version: 6,
fetchingDoneSignal: make(chan struct{}),
fetchTable: getTCP6Table,
version: 6,
fetchLimiter: utils.NewCallLimiter(minDurationBetweenTableUpdates),
fetchTable: getTCP6Table,
}
tcp4Table = &tcpTable{
version: 4,
fetchingDoneSignal: make(chan struct{}),
fetchTable: getTCP4Table,
version: 4,
fetchLimiter: utils.NewCallLimiter(minDurationBetweenTableUpdates),
fetchTable: getTCP4Table,
}
)
@ -54,97 +54,38 @@ func EnableTCPDualStack() {
func (table *tcpTable) getCurrentTables() (
connections []*socket.ConnectionInfo,
listeners []*socket.BindInfo,
updateIter uint64,
) {
table.lock.RLock()
defer table.lock.RUnlock()
return table.connections, table.listeners, table.updateIter.Load()
return table.connections, table.listeners
}
func (table *tcpTable) checkFetchingState() (fetch bool, signal chan struct{}) {
table.fetchingLock.Lock()
defer table.fetchingLock.Unlock()
// If fetching is already in progress, just return the signal.
if table.fetchingInProgress {
return false, table.fetchingDoneSignal
}
// Otherwise, tell caller to fetch.
table.fetchingInProgress = true
return true, nil
}
func (table *tcpTable) signalFetchComplete() {
table.fetchingLock.Lock()
defer table.fetchingLock.Unlock()
// Set fetching state.
table.fetchingInProgress = false
// Signal waiting goroutines.
close(table.fetchingDoneSignal)
table.fetchingDoneSignal = make(chan struct{})
}
func (table *tcpTable) updateTables(previousUpdateIter uint64) (
func (table *tcpTable) updateTables() (
connections []*socket.ConnectionInfo,
listeners []*socket.BindInfo,
updateIter uint64,
) {
var tries int
// Attempt to update the tables until we get a new version of the tables.
for previousUpdateIter == table.updateIter.Load() {
// Abort if it takes too long.
tries++
if tries > maxUpdateTries {
log.Warningf("state: failed to upate TCP%d socket table %d times", table.version, tries-1)
return table.getCurrentTables()
// Fetch tables.
table.fetchLimiter.Do(func() {
// Fetch new tables from system.
connections, listeners, err := table.fetchTable()
if err != nil {
log.Warningf("state: failed to get TCP%d socket table: %s", table.version, err)
return
}
// Check if someone is fetching or if we should fetch.
fetch, signal := table.checkFetchingState()
if fetch {
defer table.signalFetchComplete()
// Just to be sure, check again if there is a new version.
if previousUpdateIter < table.updateIter.Load() {
return table.getCurrentTables()
}
// Wait for 5 milliseconds.
time.Sleep(5 * time.Millisecond)
// Fetch new tables from system.
connections, listeners, err := table.fetchTable()
if err != nil {
log.Warningf("state: failed to get TCP%d socket table: %s", table.version, err)
// Return the current tables as fallback, as we need to trigger the defer to complete the fetch.
return table.getCurrentTables()
}
// Pre-check for any listeners.
for _, bindInfo := range listeners {
bindInfo.ListensAny = bindInfo.Local.IP.Equal(net.IPv4zero) || bindInfo.Local.IP.Equal(net.IPv6zero)
}
// Apply new tables.
table.lock.Lock()
defer table.lock.Unlock()
table.connections = connections
table.listeners = listeners
table.updateIter.Add(1)
table.lastUpdateAt.Store(time.Now().UnixNano())
// Return new tables immediately.
return table.connections, table.listeners, table.updateIter.Load()
// Pre-check for any listeners.
for _, bindInfo := range listeners {
bindInfo.ListensAny = bindInfo.Local.IP.Equal(net.IPv4zero) || bindInfo.Local.IP.Equal(net.IPv6zero)
}
// Otherwise, wait for fetch to complete.
<-signal
}
// Apply new tables.
table.lock.Lock()
defer table.lock.Unlock()
table.connections = connections
table.listeners = listeners
table.lastUpdateAt.Store(time.Now().UnixNano())
})
return table.getCurrentTables()
}

View file

@ -9,6 +9,7 @@ import (
"time"
"github.com/safing/portbase/log"
"github.com/safing/portbase/utils"
"github.com/safing/portmaster/netenv"
"github.com/safing/portmaster/network/packet"
"github.com/safing/portmaster/network/socket"
@ -20,14 +21,11 @@ type udpTable struct {
binds []*socket.BindInfo
lock sync.RWMutex
updateIter atomic.Uint64
// lastUpdateAt stores the time when the tables where last updated as unix nanoseconds.
lastUpdateAt atomic.Int64
fetchingLock sync.Mutex
fetchingInProgress bool
fetchingDoneSignal chan struct{}
fetchTable func() (binds []*socket.BindInfo, err error)
fetchLimiter *utils.CallLimiter
fetchTable func() (binds []*socket.BindInfo, err error)
states map[string]map[string]*udpState
statesLock sync.Mutex
@ -53,17 +51,17 @@ const (
var (
udp6Table = &udpTable{
version: 6,
fetchingDoneSignal: make(chan struct{}),
fetchTable: getUDP6Table,
states: make(map[string]map[string]*udpState),
version: 6,
fetchLimiter: utils.NewCallLimiter(minDurationBetweenTableUpdates),
fetchTable: getUDP6Table,
states: make(map[string]map[string]*udpState),
}
udp4Table = &udpTable{
version: 4,
fetchingDoneSignal: make(chan struct{}),
fetchTable: getUDP4Table,
states: make(map[string]map[string]*udpState),
version: 4,
fetchLimiter: utils.NewCallLimiter(minDurationBetweenTableUpdates),
fetchTable: getUDP4Table,
states: make(map[string]map[string]*udpState),
}
)
@ -73,97 +71,34 @@ func EnableUDPDualStack() {
udp4Table.dualStack = udp6Table
}
func (table *udpTable) getCurrentTables() (
binds []*socket.BindInfo,
updateIter uint64,
) {
func (table *udpTable) getCurrentTables() (binds []*socket.BindInfo) {
table.lock.RLock()
defer table.lock.RUnlock()
return table.binds, table.updateIter.Load()
return table.binds
}
func (table *udpTable) checkFetchingState() (fetch bool, signal chan struct{}) {
table.fetchingLock.Lock()
defer table.fetchingLock.Unlock()
// If fetching is already in progress, just return the signal.
if table.fetchingInProgress {
return false, table.fetchingDoneSignal
}
// Otherwise, tell caller to fetch.
table.fetchingInProgress = true
return true, nil
}
func (table *udpTable) signalFetchComplete() {
table.fetchingLock.Lock()
defer table.fetchingLock.Unlock()
// Set fetching state.
table.fetchingInProgress = false
// Signal waiting goroutines.
close(table.fetchingDoneSignal)
table.fetchingDoneSignal = make(chan struct{})
}
func (table *udpTable) updateTables(previousUpdateIter uint64) (
binds []*socket.BindInfo,
updateIter uint64,
) {
var tries int
// Attempt to update the tables until we get a new version of the tables.
for previousUpdateIter == table.updateIter.Load() {
// Abort if it takes too long.
tries++
if tries > maxUpdateTries {
log.Warningf("state: failed to upate UDP%d socket table %d times", table.version, tries-1)
return table.getCurrentTables()
func (table *udpTable) updateTables() (binds []*socket.BindInfo) {
// Fetch tables.
table.fetchLimiter.Do(func() {
// Fetch new tables from system.
binds, err := table.fetchTable()
if err != nil {
log.Warningf("state: failed to get UDP%d socket table: %s", table.version, err)
return
}
// Check if someone is fetching or if we should fetch.
fetch, signal := table.checkFetchingState()
if fetch {
defer table.signalFetchComplete()
// Just to be sure, check again if there is a new version.
if previousUpdateIter < table.updateIter.Load() {
return table.getCurrentTables()
}
// Wait for 5 milliseconds.
time.Sleep(5 * time.Millisecond)
// Fetch new tables from system.
binds, err := table.fetchTable()
if err != nil {
log.Warningf("state: failed to get UDP%d socket table: %s", table.version, err)
// Return the current tables as fallback, as we need to trigger the defer to complete the fetch.
return table.getCurrentTables()
}
// Pre-check for any listeners.
for _, bindInfo := range binds {
bindInfo.ListensAny = bindInfo.Local.IP.Equal(net.IPv4zero) || bindInfo.Local.IP.Equal(net.IPv6zero)
}
// Apply new tables.
table.lock.Lock()
defer table.lock.Unlock()
table.binds = binds
table.updateIter.Add(1)
table.lastUpdateAt.Store(time.Now().UnixNano())
// Return new tables immediately.
return table.binds, table.updateIter.Load()
// Pre-check for any listeners.
for _, bindInfo := range binds {
bindInfo.ListensAny = bindInfo.Local.IP.Equal(net.IPv4zero) || bindInfo.Local.IP.Equal(net.IPv6zero)
}
// Otherwise, wait for fetch to complete.
<-signal
}
// Apply new tables.
table.lock.Lock()
defer table.lock.Unlock()
table.binds = binds
table.lastUpdateAt.Store(time.Now().UnixNano())
})
return table.getCurrentTables()
}
@ -172,11 +107,11 @@ func (table *udpTable) updateTables(previousUpdateIter uint64) (
func CleanUDPStates(_ context.Context) {
now := time.Now().UTC()
udp4Table.updateTables(udp4Table.updateIter.Load())
udp4Table.updateTables()
udp4Table.cleanStates(now)
if netenv.IPv6Enabled() {
udp6Table.updateTables(udp6Table.updateIter.Load())
udp6Table.updateTables()
udp6Table.cleanStates(now)
}
}

View file

@ -287,7 +287,7 @@ func checkForUpdates(ctx context.Context) (err error) {
return //nolint:nakedret // TODO: Would "return err" work with the defer?
}
err = registry.DownloadUpdates(ctx, !forcedUpdate)
err = registry.DownloadUpdates(ctx, forcedUpdate)
if err != nil {
err = fmt.Errorf("failed to download updates: %w", err)
return //nolint:nakedret // TODO: Would "return err" work with the defer?

View file

@ -2,6 +2,7 @@ package updates
import (
"fmt"
"strings"
"sync/atomic"
"time"
@ -58,14 +59,21 @@ func notifyUpdateSuccess(forced bool) {
})
case updateSuccessPending:
msg := fmt.Sprintf(
`%d updates are available for download:
- %s
Press "Download Now" or check for updates later to download and automatically apply all pending updates. You will be notified of important updates that need restarting.`,
len(updateState.PendingDownload),
strings.Join(updateState.PendingDownload, "\n- "),
)
notifications.Notify(&notifications.Notification{
EventID: updateSuccess,
Type: notifications.Info,
Title: fmt.Sprintf("%d Updates Available", len(updateState.PendingDownload)),
Message: fmt.Sprintf(
`%d updates are available for download. Press "Download Now" or check for updates later to download and automatically apply all pending updates. You will be notified of important updates that need restarting.`,
len(updateState.PendingDownload),
),
Message: msg,
AvailableActions: []*notifications.Action{
{
ID: "ack",
@ -84,14 +92,24 @@ func notifyUpdateSuccess(forced bool) {
})
case updateSuccessDownloaded:
msg := fmt.Sprintf(
`%d updates were downloaded and applied:
- %s`,
len(updateState.LastDownload),
strings.Join(updateState.LastDownload, "\n- "),
)
if enableSoftwareUpdates() {
msg += "\n\nYou will be notified of important updates that need restarting."
} else {
msg += "\n\nAutomatic software updates are disabled, and you will be notified when a new software update is ready to be downloaded and applied."
}
notifications.Notify(&notifications.Notification{
EventID: updateSuccess,
Type: notifications.Info,
Title: fmt.Sprintf("%d Updates Applied", len(updateState.LastDownload)),
Message: fmt.Sprintf(
`%d updates were downloaded and applied. You will be notified of important updates that need restarting.`,
len(updateState.LastDownload),
),
Message: msg,
Expires: time.Now().Add(1 * time.Minute).Unix(),
AvailableActions: []*notifications.Action{
{

View file

@ -113,12 +113,12 @@ func upgradeCoreNotify() error {
EventID: "updates:core-update-available",
Type: notifications.Info,
Title: fmt.Sprintf(
"Portmaster Update v%s",
"Portmaster Update v%s Is Ready!",
pmCoreUpdate.Version(),
),
Category: "Core",
Message: fmt.Sprintf(
`A new Portmaster version is available! Restart the Portmaster to upgrade to %s.`,
`A new Portmaster version is ready to go! Restart the Portmaster to upgrade to %s.`,
pmCoreUpdate.Version(),
),
ShowOnSystem: true,