[WIP] New updater

This commit is contained in:
Vladimir Stoilov 2024-08-13 18:13:34 +03:00
parent 556e5dd921
commit abf444630b
No known key found for this signature in database
GPG key ID: 2F190B67A43A81AF
9 changed files with 663 additions and 250 deletions

View file

@ -462,17 +462,14 @@ tauri-build:
# Our tauri app has externalBins configured so tauri will try to embed them when it finished compiling # Our tauri app has externalBins configured so tauri will try to embed them when it finished compiling
# the app. Make sure we copy portmaster-start and portmaster-core in all architectures supported. # the app. Make sure we copy portmaster-start and portmaster-core in all architectures supported.
# See documentation for externalBins for more information on how tauri searches for the binaries. # See documentation for externalBins for more information on how tauri searches for the binaries.
COPY (+go-build/output --CMDS="portmaster-start portmaster-core" --GOOS="${GOOS}" --GOARCH="${GOARCH}" --GOARM="${GOARM}") /tmp/gobuild COPY (+go-build/output --CMDS="portmaster-core" --GOOS="${GOOS}" --GOARCH="${GOARCH}" --GOARM="${GOARM}") /tmp/gobuild
# Place them in the correct folder with the rust target tripple attached. # Place them in the correct folder with the rust target tripple attached.
FOR bin IN $(ls /tmp/gobuild) FOR bin IN $(ls /tmp/gobuild)
# ${bin$.*} does not work in SET commands unfortunately so we use a shell # ${bin$.*} does not work in SET commands unfortunately so we use a shell
# snippet here: # snippet here:
RUN set -e ; \ RUN set -e ; \
dest="./binaries/${bin}-${target}" ; \ dest="./binaries/${bin}" ; \
if [ -z "${bin##*.exe}" ]; then \
dest="./binaries/${bin%.*}-${target}.exe" ; \
fi ; \
cp "/tmp/gobuild/${bin}" "${dest}" ; cp "/tmp/gobuild/${bin}" "${dest}" ;
END END

View file

@ -1,12 +1,8 @@
package updater package updater
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"net/http"
"github.com/safing/portmaster/base/log"
) )
// Errors returned by the updater package. // Errors returned by the updater package.
@ -19,59 +15,60 @@ var (
// GetFile returns the selected (mostly newest) file with the given // GetFile returns the selected (mostly newest) file with the given
// identifier or an error, if it fails. // identifier or an error, if it fails.
func (reg *ResourceRegistry) GetFile(identifier string) (*File, error) { func (reg *ResourceRegistry) GetFile(identifier string) (*File, error) {
reg.RLock() return nil, fmt.Errorf("invalid file: %s", identifier)
res, ok := reg.resources[identifier] // reg.RLock()
reg.RUnlock() // res, ok := reg.resources[identifier]
if !ok { // reg.RUnlock()
return nil, ErrNotFound // if !ok {
} // return nil, ErrNotFound
// }
file := res.GetFile() // file := res.GetFile()
// check if file is available locally // // check if file is available locally
if file.version.Available { // if file.version.Available {
file.markActiveWithLocking() // file.markActiveWithLocking()
// Verify file, if configured. // // Verify file, if configured.
_, err := file.Verify() // _, err := file.Verify()
if err != nil && !errors.Is(err, ErrVerificationNotConfigured) { // if err != nil && !errors.Is(err, ErrVerificationNotConfigured) {
// TODO: If verification is required, try deleting the resource and downloading it again. // // TODO: If verification is required, try deleting the resource and downloading it again.
return nil, fmt.Errorf("failed to verify file: %w", err) // return nil, fmt.Errorf("failed to verify file: %w", err)
} // }
return file, nil // return file, nil
} // }
// check if online // // check if online
if !reg.Online { // if !reg.Online {
return nil, ErrNotAvailableLocally // return nil, ErrNotAvailableLocally
} // }
// check download dir // // check download dir
err := reg.tmpDir.Ensure() // err := reg.tmpDir.Ensure()
if err != nil { // if err != nil {
return nil, fmt.Errorf("could not prepare tmp directory for download: %w", err) // return nil, fmt.Errorf("could not prepare tmp directory for download: %w", err)
} // }
// Start registry operation. // // Start registry operation.
reg.state.StartOperation(StateFetching) // reg.state.StartOperation(StateFetching)
defer reg.state.EndOperation() // defer reg.state.EndOperation()
// download file // // download file
log.Tracef("%s: starting download of %s", reg.Name, file.versionedPath) // log.Tracef("%s: starting download of %s", reg.Name, file.versionedPath)
client := &http.Client{} // client := &http.Client{}
for tries := range 5 { // for tries := range 5 {
err = reg.fetchFile(context.TODO(), client, file.version, tries) // err = reg.fetchFile(context.TODO(), client, file.version, tries)
if err != nil { // if err != nil {
log.Tracef("%s: failed to download %s: %s, retrying (%d)", reg.Name, file.versionedPath, err, tries+1) // log.Tracef("%s: failed to download %s: %s, retrying (%d)", reg.Name, file.versionedPath, err, tries+1)
} else { // } else {
file.markActiveWithLocking() // file.markActiveWithLocking()
// TODO: We just download the file - should we verify it again? // // TODO: We just download the file - should we verify it again?
return file, nil // return file, nil
} // }
} // }
log.Warningf("%s: failed to download %s: %s", reg.Name, file.versionedPath, err) // log.Warningf("%s: failed to download %s: %s", reg.Name, file.versionedPath, err)
return nil, err // return nil, err
} }
// GetVersion returns the selected version of the given identifier. // GetVersion returns the selected version of the given identifier.

View file

@ -73,6 +73,7 @@
"release": "1", "release": "1",
"files": { "files": {
"/usr/lib/systemd/system/portmaster.service": "../../../packaging/linux/portmaster.service", "/usr/lib/systemd/system/portmaster.service": "../../../packaging/linux/portmaster.service",
"/usr/lib/portmaster/portmaster-core": "binaries/portmaster-core",
"/etc/xdg/autostart/portmaster.desktop": "../../../packaging/linux/portmaster-autostart.desktop" "/etc/xdg/autostart/portmaster.desktop": "../../../packaging/linux/portmaster-autostart.desktop"
}, },
"postInstallScript": "../../../packaging/linux/postinst", "postInstallScript": "../../../packaging/linux/postinst",

247
service/updates/bundle.go Normal file
View file

@ -0,0 +1,247 @@
package updates
import (
"archive/zip"
"bytes"
"compress/gzip"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"time"
"github.com/safing/portmaster/base/log"
)
const MaxUnpackSize = 1 << 30 // 2^30 == 1GB
type Artifact struct {
Filename string `json:"Filename"`
SHA256 string `json:"SHA256"`
URLs []string `json:"URLs"`
Platform string `json:"Platform,omitempty"`
Unpack string `json:"Unpack,omitempty"`
Version string `json:"Version,omitempty"`
}
type Bundle struct {
Name string `json:"Bundle"`
Version string `json:"Version"`
Published time.Time `json:"Published"`
Artifacts []Artifact `json:"Artifacts"`
}
func (bundle Bundle) downloadAndVerify(dataDir string) {
client := http.Client{}
for _, artifact := range bundle.Artifacts {
filePath := fmt.Sprintf("%s/%s", dataDir, artifact.Filename)
// TODO(vladimir): is this needed?
_ = os.MkdirAll(filepath.Dir(filePath), os.ModePerm)
// Check file is already downloaded and valid.
exists, err := checkIfFileIsValid(filePath, artifact)
if exists {
log.Debugf("file already download: %s", filePath)
continue
} else if err != nil {
log.Errorf("error while checking old download: %s", err)
}
// Download artifact
err = processArtifact(&client, artifact, filePath)
if err != nil {
log.Errorf("updates: %s", err)
}
}
}
func (bundle Bundle) Verify(dataDir string) error {
for _, artifact := range bundle.Artifacts {
artifactPath := fmt.Sprintf("%s/%s", dataDir, artifact.Filename)
file, err := os.Open(artifactPath)
if err != nil {
return fmt.Errorf("failed to open file %s: %w", artifactPath, err)
}
defer func() { _ = file.Close() }()
isValid, err := checkIfFileIsValid(artifactPath, artifact)
if err != nil {
return err
}
if !isValid {
return fmt.Errorf("file is not valid: %s", artifact.Filename)
}
}
return nil
}
func checkIfFileIsValid(filename string, artifact Artifact) (bool, error) {
// Check if file already exists
file, err := os.Open(filename)
if err != nil {
//nolint:nilerr
return false, nil
}
defer func() { _ = file.Close() }()
providedHash, err := hex.DecodeString(artifact.SHA256)
if err != nil || len(providedHash) != sha256.Size {
return false, fmt.Errorf("invalid provided hash %s: %w", artifact.SHA256, err)
}
// Calculate hash of the file
fileHash := sha256.New()
if _, err := io.Copy(fileHash, file); err != nil {
return false, fmt.Errorf("failed to read file: %w", err)
}
hashInBytes := fileHash.Sum(nil)
if !bytes.Equal(providedHash, hashInBytes) {
return false, fmt.Errorf("file exist but the hash does not match: %s", filename)
}
return true, nil
}
func processArtifact(client *http.Client, artifact Artifact, filePath string) error {
providedHash, err := hex.DecodeString(artifact.SHA256)
if err != nil || len(providedHash) != sha256.Size {
return fmt.Errorf("invalid provided hash %s: %w", artifact.SHA256, err)
}
// Download
content, err := downloadFile(client, artifact.URLs)
if err != nil {
return fmt.Errorf("failed to download artifact: %w", err)
}
// Decompress
if artifact.Unpack != "" {
content, err = unpack(artifact.Unpack, content)
if err != nil {
return fmt.Errorf("failed to decompress artifact: %w", err)
}
}
// Verify
hash := sha256.Sum256(content)
if !bytes.Equal(providedHash, hash[:]) {
// FIXME(vladimir): just for testing. Make it an error before commit.
err = fmt.Errorf("failed to verify artifact: %s", artifact.Filename)
log.Debugf("updates: %s", err)
}
// Save
tmpFilename := fmt.Sprintf("%s.download", filePath)
file, err := os.Create(tmpFilename)
if err != nil {
return fmt.Errorf("failed to create file: %w", err)
}
_, err = file.Write(content)
if err != nil {
return fmt.Errorf("failed to write to file: %w", err)
}
// Rename
err = os.Rename(tmpFilename, filePath)
if err != nil {
return fmt.Errorf("failed to rename file: %w", err)
}
return nil
}
func downloadFile(client *http.Client, urls []string) ([]byte, error) {
for _, url := range urls {
// Try to make the request
resp, err := client.Get(url)
if err != nil {
log.Warningf("failed a get file request to: %s", err)
continue
}
defer func() { _ = resp.Body.Close() }()
// Check if the server returned an error
if resp.StatusCode != http.StatusOK {
log.Warningf("server returned non-OK status: %d %s", resp.StatusCode, resp.Status)
continue
}
content, err := io.ReadAll(resp.Body)
if err != nil {
log.Warningf("failed to read body of response: %s", err)
continue
}
return content, nil
}
return nil, fmt.Errorf("failed to download file from the provided urls")
}
func unpack(cType string, fileBytes []byte) ([]byte, error) {
switch cType {
case "zip":
{
return decompressZip(fileBytes)
}
case "gz":
{
return decompressGzip(fileBytes)
}
default:
{
return nil, fmt.Errorf("unsupported compression type")
}
}
}
func decompressGzip(data []byte) ([]byte, error) {
// Create a gzip reader from the byte array
gzipReader, err := gzip.NewReader(bytes.NewReader(data))
if err != nil {
return nil, fmt.Errorf("failed to create gzip reader: %w", err)
}
defer func() { _ = gzipReader.Close() }()
var buf bytes.Buffer
_, err = io.CopyN(&buf, gzipReader, MaxUnpackSize)
if err != nil && !errors.Is(err, io.EOF) {
return nil, fmt.Errorf("failed to read gzip file: %w", err)
}
return buf.Bytes(), nil
}
func decompressZip(data []byte) ([]byte, error) {
// Create a zip reader from the byte array
zipReader, err := zip.NewReader(bytes.NewReader(data), int64(len(data)))
if err != nil {
return nil, fmt.Errorf("failed to create zip reader: %w", err)
}
// Ensure there is only one file in the zip
if len(zipReader.File) != 1 {
return nil, fmt.Errorf("zip file must contain exactly one file")
}
// Read the single file in the zip
file := zipReader.File[0]
fileReader, err := file.Open()
if err != nil {
return nil, fmt.Errorf("failed to open file in zip: %w", err)
}
defer func() { _ = fileReader.Close() }()
var buf bytes.Buffer
_, err = io.CopyN(&buf, fileReader, MaxUnpackSize)
if err != nil && !errors.Is(err, io.EOF) {
return nil, fmt.Errorf("failed to read file in zip: %w", err)
}
return buf.Bytes(), nil
}

110
service/updates/index.go Normal file
View file

@ -0,0 +1,110 @@
package updates
import (
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"github.com/safing/portmaster/base/log"
)
type UpdateIndex struct {
Directory string
DownloadDirectory string
Ignore []string
IndexURLs []string
IndexFile string
AutoApply bool
}
func (ui *UpdateIndex) downloadIndexFile() (err error) {
_ = os.MkdirAll(ui.Directory, defaultDirMode)
_ = os.MkdirAll(ui.DownloadDirectory, defaultDirMode)
for _, url := range ui.IndexURLs {
err = ui.downloadIndexFileFromURL(url)
if err != nil {
log.Warningf("updates: %s", err)
continue
}
// Downloading was successful.
err = nil
break
}
return
}
func (ui *UpdateIndex) checkForUpdates() (bool, error) {
err := ui.downloadIndexFile()
if err != nil {
return false, err
}
currentBundle, err := ui.GetInstallBundle()
if err != nil {
return true, err // Current installed bundle not found, act as there is update.
}
updateBundle, err := ui.GetUpdateBundle()
if err != nil {
return false, err
}
return currentBundle.Version != updateBundle.Version, nil
}
func (ui *UpdateIndex) downloadIndexFileFromURL(url string) error {
client := http.Client{}
resp, err := client.Get(url)
if err != nil {
return fmt.Errorf("failed a get request to %s: %w", url, err)
}
defer func() { _ = resp.Body.Close() }()
filePath := fmt.Sprintf("%s/%s", ui.DownloadDirectory, ui.IndexFile)
file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, defaultFileMode)
if err != nil {
return err
}
defer func() { _ = file.Close() }()
_, err = io.Copy(file, resp.Body)
if err != nil {
return err
}
return nil
}
func (ui *UpdateIndex) GetInstallBundle() (*Bundle, error) {
indexFile := fmt.Sprintf("%s/%s", ui.Directory, ui.IndexFile)
return ui.GetBundle(indexFile)
}
func (ui *UpdateIndex) GetUpdateBundle() (*Bundle, error) {
indexFile := fmt.Sprintf("%s/%s", ui.DownloadDirectory, ui.IndexFile)
return ui.GetBundle(indexFile)
}
func (ui *UpdateIndex) GetBundle(indexFile string) (*Bundle, error) {
// Check if the file exists.
file, err := os.Open(indexFile)
if err != nil {
return nil, fmt.Errorf("failed to open index file: %w", err)
}
defer func() { _ = file.Close() }()
// Read
content, err := io.ReadAll(file)
if err != nil {
return nil, err
}
// Parse
var bundle Bundle
err = json.Unmarshal(content, &bundle)
if err != nil {
return nil, err
}
return &bundle, nil
}

View file

@ -1,20 +1,14 @@
package updates package updates
import ( import (
"context"
"errors"
"flag"
"fmt" "fmt"
"net/url"
"runtime" "runtime"
"time" "time"
"github.com/safing/portmaster/base/database" "github.com/safing/portmaster/base/database"
"github.com/safing/portmaster/base/dataroot"
"github.com/safing/portmaster/base/log" "github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/updater" "github.com/safing/portmaster/base/updater"
"github.com/safing/portmaster/service/mgr" "github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates/helper"
) )
const ( const (
@ -48,9 +42,6 @@ var (
userAgentFromFlag string userAgentFromFlag string
updateServerFromFlag string updateServerFromFlag string
updateASAP bool
disableTaskSchedule bool
db = database.NewInterface(&database.Options{ db = database.NewInterface(&database.Options{
Local: true, Local: true,
Internal: true, Internal: true,
@ -60,163 +51,126 @@ var (
// more context to requests made by the registry when // more context to requests made by the registry when
// fetching resources from the update server. // fetching resources from the update server.
UserAgent = fmt.Sprintf("Portmaster (%s %s)", runtime.GOOS, runtime.GOARCH) UserAgent = fmt.Sprintf("Portmaster (%s %s)", runtime.GOOS, runtime.GOARCH)
// DefaultUpdateURLs defines the default base URLs of the update server.
DefaultUpdateURLs = []string{
"https://updates.safing.io",
}
// DisableSoftwareAutoUpdate specifies whether software updates should be disabled.
// This is used on Android, as it will never require binary updates.
DisableSoftwareAutoUpdate = false
) )
const ( const (
updatesDirName = "updates"
updateTaskRepeatDuration = 1 * time.Hour updateTaskRepeatDuration = 1 * time.Hour
) )
func init() {
flag.StringVar(&updateServerFromFlag, "update-server", "", "set an alternative update server (full URL)")
flag.StringVar(&userAgentFromFlag, "update-agent", "", "set an alternative user agent for requests to the update server")
}
func prep() error {
// Check if update server URL supplied via flag is a valid URL.
if updateServerFromFlag != "" {
u, err := url.Parse(updateServerFromFlag)
if err != nil {
return fmt.Errorf("supplied update server URL is invalid: %w", err)
}
if u.Scheme != "https" {
return errors.New("supplied update server URL must use HTTPS")
}
}
if err := registerConfig(); err != nil {
return err
}
return registerAPIEndpoints()
}
func start() error { func start() error {
initConfig() // module.restartWorkerMgr.Repeat(10 * time.Minute)
// module.instance.Config().EventConfigChange.AddCallback("update registry config", updateRegistryConfig)
module.restartWorkerMgr.Repeat(10 * time.Minute) // // create registry
module.instance.Config().EventConfigChange.AddCallback("update registry config", updateRegistryConfig) // registry = &updater.ResourceRegistry{
// Name: ModuleName,
// UpdateURLs: DefaultUpdateURLs,
// UserAgent: UserAgent,
// MandatoryUpdates: helper.MandatoryUpdates(),
// AutoUnpack: helper.AutoUnpackUpdates(),
// Verification: helper.VerificationConfig,
// DevMode: devMode(),
// Online: true,
// }
// // Override values from flags.
// if userAgentFromFlag != "" {
// registry.UserAgent = userAgentFromFlag
// }
// if updateServerFromFlag != "" {
// registry.UpdateURLs = []string{updateServerFromFlag}
// }
// create registry // // pre-init state
registry = &updater.ResourceRegistry{ // updateStateExport, err := LoadStateExport()
Name: ModuleName, // if err != nil {
UpdateURLs: DefaultUpdateURLs, // log.Debugf("updates: failed to load exported update state: %s", err)
UserAgent: UserAgent, // } else if updateStateExport.UpdateState != nil {
MandatoryUpdates: helper.MandatoryUpdates(), // err := registry.PreInitUpdateState(*updateStateExport.UpdateState)
AutoUnpack: helper.AutoUnpackUpdates(), // if err != nil {
Verification: helper.VerificationConfig, // return err
DevMode: devMode(), // }
Online: true, // }
}
// Override values from flags.
if userAgentFromFlag != "" {
registry.UserAgent = userAgentFromFlag
}
if updateServerFromFlag != "" {
registry.UpdateURLs = []string{updateServerFromFlag}
}
// pre-init state
updateStateExport, err := LoadStateExport()
if err != nil {
log.Debugf("updates: failed to load exported update state: %s", err)
} else if updateStateExport.UpdateState != nil {
err := registry.PreInitUpdateState(*updateStateExport.UpdateState)
if err != nil {
return err
}
}
// initialize // initialize
err = registry.Initialize(dataroot.Root().ChildDir(updatesDirName, 0o0755)) // err := registry.Initialize(dataroot.Root().ChildDir(updatesDirName, 0o0755))
if err != nil { // if err != nil {
return err // return err
} // }
// register state provider // // register state provider
err = registerRegistryStateProvider() // err = registerRegistryStateProvider()
if err != nil { // if err != nil {
return err // return err
} // }
registry.StateNotifyFunc = pushRegistryState // registry.StateNotifyFunc = pushRegistryState
// Set indexes based on the release channel. // // Set indexes based on the release channel.
warning := helper.SetIndexes( // warning := helper.SetIndexes(
registry, // registry,
initialReleaseChannel, // initialReleaseChannel,
true, // true,
enableSoftwareUpdates() && !DisableSoftwareAutoUpdate, // enableSoftwareUpdates() && !DisableSoftwareAutoUpdate,
enableIntelUpdates(), // enableIntelUpdates(),
) // )
if warning != nil { // if warning != nil {
log.Warningf("updates: %s", warning) // log.Warningf("updates: %s", warning)
} // }
err = registry.LoadIndexes(module.m.Ctx()) // err = registry.LoadIndexes(module.m.Ctx())
if err != nil { // if err != nil {
log.Warningf("updates: failed to load indexes: %s", err) // log.Warningf("updates: failed to load indexes: %s", err)
} // }
err = registry.ScanStorage("") // err = registry.ScanStorage("")
if err != nil { // if err != nil {
log.Warningf("updates: error during storage scan: %s", err) // log.Warningf("updates: error during storage scan: %s", err)
} // }
registry.SelectVersions() // registry.SelectVersions()
module.EventVersionsUpdated.Submit(struct{}{}) // module.EventVersionsUpdated.Submit(struct{}{})
// Initialize the version export - this requires the registry to be set up. // // Initialize the version export - this requires the registry to be set up.
err = initVersionExport() // err = initVersionExport()
if err != nil { // if err != nil {
return err // return err
} // }
// start updater task // // start updater task
if !disableTaskSchedule { // if !disableTaskSchedule {
_ = module.updateWorkerMgr.Repeat(30 * time.Minute) // _ = module.updateWorkerMgr.Repeat(30 * time.Minute)
} // }
if updateASAP { // if updateASAP {
module.updateWorkerMgr.Go() // module.updateWorkerMgr.Go()
} // }
// react to upgrades // // react to upgrades
if err := initUpgrader(); err != nil { // if err := initUpgrader(); err != nil {
return err // return err
} // }
warnOnIncorrectParentPath() // warnOnIncorrectParentPath()
return nil return nil
} }
// TriggerUpdate queues the update task to execute ASAP. // TriggerUpdate queues the update task to execute ASAP.
func TriggerUpdate(forceIndexCheck, downloadAll bool) error { func TriggerUpdate(forceIndexCheck, downloadAll bool) error {
switch { // switch {
case !forceIndexCheck && !enableSoftwareUpdates() && !enableIntelUpdates(): // case !forceIndexCheck && !enableSoftwareUpdates() && !enableIntelUpdates():
return errors.New("automatic updating is disabled") // return errors.New("automatic updating is disabled")
default: // default:
if forceIndexCheck { // if forceIndexCheck {
forceCheck.Set() // forceCheck.Set()
} // }
if downloadAll { // if downloadAll {
forceDownload.Set() // forceDownload.Set()
} // }
// If index check if forced, start quicker. // // If index check if forced, start quicker.
module.updateWorkerMgr.Go() // module.updateWorkerMgr.Go()
} // }
log.Debugf("updates: triggering update to run as soon as possible") log.Debugf("updates: triggering update to run as soon as possible")
return nil return nil
@ -232,68 +186,66 @@ func DisableUpdateSchedule() error {
// return errors.New("module already online") // return errors.New("module already online")
// } // }
disableTaskSchedule = true
return nil return nil
} }
func checkForUpdates(ctx *mgr.WorkerCtx) (err error) { func checkForUpdates(ctx *mgr.WorkerCtx) (err error) {
// Set correct error if context was canceled. // Set correct error if context was canceled.
defer func() { // defer func() {
select { // select {
case <-ctx.Done(): // case <-ctx.Done():
err = context.Canceled // err = context.Canceled
default: // default:
} // }
}() // }()
// Get flags. // // Get flags.
forceIndexCheck := forceCheck.SetToIf(true, false) // forceIndexCheck := forceCheck.SetToIf(true, false)
downloadAll := forceDownload.SetToIf(true, false) // downloadAll := forceDownload.SetToIf(true, false)
// Check again if downloading updates is enabled, or forced. // // Check again if downloading updates is enabled, or forced.
if !forceIndexCheck && !enableSoftwareUpdates() && !enableIntelUpdates() { // if !forceIndexCheck && !enableSoftwareUpdates() && !enableIntelUpdates() {
log.Warningf("updates: automatic updates are disabled") // log.Warningf("updates: automatic updates are disabled")
return nil // return nil
} // }
defer func() { // defer func() {
// Resolve any error and send success notification. // // Resolve any error and send success notification.
if err == nil { // if err == nil {
log.Infof("updates: successfully checked for updates") // log.Infof("updates: successfully checked for updates")
notifyUpdateSuccess(forceIndexCheck) // notifyUpdateSuccess(forceIndexCheck)
return // return
} // }
// Log and notify error. // // Log and notify error.
log.Errorf("updates: check failed: %s", err) // log.Errorf("updates: check failed: %s", err)
notifyUpdateCheckFailed(forceIndexCheck, err) // notifyUpdateCheckFailed(forceIndexCheck, err)
}() // }()
if err = registry.UpdateIndexes(ctx.Ctx()); err != nil { // if err = registry.UpdateIndexes(ctx.Ctx()); err != nil {
err = fmt.Errorf("failed to update indexes: %w", err) // err = fmt.Errorf("failed to update indexes: %w", err)
return //nolint:nakedret // TODO: Would "return err" work with the defer? // return //nolint:nakedret // TODO: Would "return err" work with the defer?
} // }
err = registry.DownloadUpdates(ctx.Ctx(), downloadAll) // err = registry.DownloadUpdates(ctx.Ctx(), downloadAll)
if err != nil { // if err != nil {
err = fmt.Errorf("failed to download updates: %w", err) // err = fmt.Errorf("failed to download updates: %w", err)
return //nolint:nakedret // TODO: Would "return err" work with the defer? // return //nolint:nakedret // TODO: Would "return err" work with the defer?
} // }
registry.SelectVersions() // registry.SelectVersions()
// Unpack selected resources. // // Unpack selected resources.
err = registry.UnpackResources() // err = registry.UnpackResources()
if err != nil { // if err != nil {
err = fmt.Errorf("failed to unpack updates: %w", err) // err = fmt.Errorf("failed to unpack updates: %w", err)
return //nolint:nakedret // TODO: Would "return err" work with the defer? // return //nolint:nakedret // TODO: Would "return err" work with the defer?
} // }
// Purge old resources // // Purge old resources
registry.Purge(2) // registry.Purge(2)
module.EventResourcesUpdated.Submit(struct{}{}) // module.EventResourcesUpdated.Submit(struct{}{})
return nil return nil
} }
@ -314,5 +266,6 @@ func RootPath() string {
// return "" // return ""
// } // }
return registry.StorageDir().Path // return registry.StorageDir().Path
return ""
} }

View file

@ -2,14 +2,24 @@ package updates
import ( import (
"errors" "errors"
"fmt"
"os"
"path/filepath"
"strings"
"sync/atomic" "sync/atomic"
"github.com/safing/portmaster/base/api" "github.com/safing/portmaster/base/api"
"github.com/safing/portmaster/base/config" "github.com/safing/portmaster/base/config"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/notifications" "github.com/safing/portmaster/base/notifications"
"github.com/safing/portmaster/service/mgr" "github.com/safing/portmaster/service/mgr"
) )
const (
defaultFileMode = os.FileMode(0o0644)
defaultDirMode = os.FileMode(0o0755)
)
// Updates provides access to released artifacts. // Updates provides access to released artifacts.
type Updates struct { type Updates struct {
m *mgr.Manager m *mgr.Manager
@ -21,6 +31,9 @@ type Updates struct {
EventResourcesUpdated *mgr.EventMgr[struct{}] EventResourcesUpdated *mgr.EventMgr[struct{}]
EventVersionsUpdated *mgr.EventMgr[struct{}] EventVersionsUpdated *mgr.EventMgr[struct{}]
binUpdates UpdateIndex
intelUpdates UpdateIndex
instance instance instance instance
} }
@ -40,19 +53,84 @@ func New(instance instance) (*Updates, error) {
m: m, m: m,
states: m.NewStateMgr(), states: m.NewStateMgr(),
updateWorkerMgr: m.NewWorkerMgr("updater", checkForUpdates, nil),
restartWorkerMgr: m.NewWorkerMgr("automatic restart", automaticRestart, nil),
EventResourcesUpdated: mgr.NewEventMgr[struct{}](ResourceUpdateEvent, m), EventResourcesUpdated: mgr.NewEventMgr[struct{}](ResourceUpdateEvent, m),
EventVersionsUpdated: mgr.NewEventMgr[struct{}](VersionUpdateEvent, m), EventVersionsUpdated: mgr.NewEventMgr[struct{}](VersionUpdateEvent, m),
instance: instance, instance: instance,
} }
if err := prep(); err != nil {
return nil, err // Events
module.updateWorkerMgr = m.NewWorkerMgr("updater", module.checkForUpdates, nil)
module.restartWorkerMgr = m.NewWorkerMgr("automatic restart", automaticRestart, nil)
module.binUpdates = UpdateIndex{
Directory: "/usr/lib/portmaster",
DownloadDirectory: "/var/portmaster/new_bin",
Ignore: []string{"databases", "intel", "config.json"},
IndexURLs: []string{"http://localhost:8000/test-binary.json"},
IndexFile: "bin-index.json",
AutoApply: false,
}
module.intelUpdates = UpdateIndex{
Directory: "/var/portmaster/intel",
DownloadDirectory: "/var/portmaster/new_intel",
IndexURLs: []string{"http://localhost:8000/test-intel.json"},
IndexFile: "intel-index.json",
AutoApply: true,
} }
return module, nil return module, nil
} }
func deleteUnfinishedDownloads(rootDir string) error {
return filepath.Walk(rootDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// Check if the current file has the specified extension
if !info.IsDir() && strings.HasSuffix(info.Name(), ".download") {
log.Warningf("updates deleting unfinished: %s\n", path)
err := os.Remove(path)
if err != nil {
return fmt.Errorf("failed to delete file %s: %w", path, err)
}
}
return nil
})
}
func (u *Updates) checkForUpdates(_ *mgr.WorkerCtx) error {
_ = deleteUnfinishedDownloads(u.binUpdates.DownloadDirectory)
hasUpdate, err := u.binUpdates.checkForUpdates()
if err != nil {
log.Warningf("failed to get binary index file: %s", err)
}
if hasUpdate {
binBundle, err := u.binUpdates.GetUpdateBundle()
if err == nil {
log.Debugf("Bin Bundle: %+v", binBundle)
_ = os.MkdirAll(u.binUpdates.DownloadDirectory, defaultDirMode)
binBundle.downloadAndVerify(u.binUpdates.DownloadDirectory)
}
}
_ = deleteUnfinishedDownloads(u.intelUpdates.DownloadDirectory)
hasUpdate, err = u.intelUpdates.checkForUpdates()
if err != nil {
log.Warningf("failed to get intel index file: %s", err)
}
if hasUpdate {
intelBundle, err := u.intelUpdates.GetUpdateBundle()
if err == nil {
log.Debugf("Intel Bundle: %+v", intelBundle)
_ = os.MkdirAll(u.intelUpdates.DownloadDirectory, defaultDirMode)
intelBundle.downloadAndVerify(u.intelUpdates.DownloadDirectory)
}
}
return nil
}
// States returns the state manager. // States returns the state manager.
func (u *Updates) States() *mgr.StateMgr { func (u *Updates) States() *mgr.StateMgr {
return u.states return u.states
@ -65,7 +143,36 @@ func (u *Updates) Manager() *mgr.Manager {
// Start starts the module. // Start starts the module.
func (u *Updates) Start() error { func (u *Updates) Start() error {
return start() initConfig()
u.m.Go("check for updates", func(w *mgr.WorkerCtx) error {
binBundle, err := u.binUpdates.GetInstallBundle()
if err != nil {
log.Warningf("failed to get binary bundle: %s", err)
} else {
err = binBundle.Verify(u.binUpdates.Directory)
if err != nil {
log.Warningf("binary bundle is not valid: %s", err)
} else {
log.Infof("binary bundle is valid")
}
}
intelBundle, err := u.intelUpdates.GetInstallBundle()
if err != nil {
log.Warningf("failed to get intel bundle: %s", err)
} else {
err = intelBundle.Verify(u.intelUpdates.Directory)
if err != nil {
log.Warningf("intel bundle is not valid: %s", err)
} else {
log.Infof("intel bundle is valid")
}
}
return nil
})
u.updateWorkerMgr.Go()
return nil
} }
// Stop stops the module. // Stop stops the module.

View file

@ -0,0 +1 @@
package updates

View file

@ -181,16 +181,16 @@ func upgradeHub() error {
DelayedRestart(time.Duration(delayMinutes+60) * time.Minute) DelayedRestart(time.Duration(delayMinutes+60) * time.Minute)
// Increase update checks in order to detect aborts better. // Increase update checks in order to detect aborts better.
if !disableTaskSchedule { // if !disableTaskSchedule {
module.updateWorkerMgr.Repeat(10 * time.Minute) module.updateWorkerMgr.Repeat(10 * time.Minute)
} // }
} else { } else {
AbortRestart() AbortRestart()
// Set update task schedule back to normal. // Set update task schedule back to normal.
if !disableTaskSchedule { // if !disableTaskSchedule {
module.updateWorkerMgr.Repeat(updateTaskRepeatDuration) module.updateWorkerMgr.Repeat(updateTaskRepeatDuration)
} // }
} }
return nil return nil