[WIP] New updater

This commit is contained in:
Vladimir Stoilov 2024-08-13 18:13:34 +03:00
parent 556e5dd921
commit abf444630b
No known key found for this signature in database
GPG key ID: 2F190B67A43A81AF
9 changed files with 663 additions and 250 deletions

View file

@ -462,17 +462,14 @@ tauri-build:
# Our tauri app has externalBins configured so tauri will try to embed them when it finished compiling
# the app. Make sure we copy portmaster-start and portmaster-core in all architectures supported.
# See documentation for externalBins for more information on how tauri searches for the binaries.
COPY (+go-build/output --CMDS="portmaster-start portmaster-core" --GOOS="${GOOS}" --GOARCH="${GOARCH}" --GOARM="${GOARM}") /tmp/gobuild
COPY (+go-build/output --CMDS="portmaster-core" --GOOS="${GOOS}" --GOARCH="${GOARCH}" --GOARM="${GOARM}") /tmp/gobuild
# Place them in the correct folder with the rust target tripple attached.
FOR bin IN $(ls /tmp/gobuild)
# ${bin$.*} does not work in SET commands unfortunately so we use a shell
# snippet here:
RUN set -e ; \
dest="./binaries/${bin}-${target}" ; \
if [ -z "${bin##*.exe}" ]; then \
dest="./binaries/${bin%.*}-${target}.exe" ; \
fi ; \
dest="./binaries/${bin}" ; \
cp "/tmp/gobuild/${bin}" "${dest}" ;
END

View file

@ -1,12 +1,8 @@
package updater
import (
"context"
"errors"
"fmt"
"net/http"
"github.com/safing/portmaster/base/log"
)
// Errors returned by the updater package.
@ -19,59 +15,60 @@ var (
// GetFile returns the selected (mostly newest) file with the given
// identifier or an error, if it fails.
func (reg *ResourceRegistry) GetFile(identifier string) (*File, error) {
reg.RLock()
res, ok := reg.resources[identifier]
reg.RUnlock()
if !ok {
return nil, ErrNotFound
}
return nil, fmt.Errorf("invalid file: %s", identifier)
// reg.RLock()
// res, ok := reg.resources[identifier]
// reg.RUnlock()
// if !ok {
// return nil, ErrNotFound
// }
file := res.GetFile()
// check if file is available locally
if file.version.Available {
file.markActiveWithLocking()
// file := res.GetFile()
// // check if file is available locally
// if file.version.Available {
// file.markActiveWithLocking()
// Verify file, if configured.
_, err := file.Verify()
if err != nil && !errors.Is(err, ErrVerificationNotConfigured) {
// TODO: If verification is required, try deleting the resource and downloading it again.
return nil, fmt.Errorf("failed to verify file: %w", err)
}
// // Verify file, if configured.
// _, err := file.Verify()
// if err != nil && !errors.Is(err, ErrVerificationNotConfigured) {
// // TODO: If verification is required, try deleting the resource and downloading it again.
// return nil, fmt.Errorf("failed to verify file: %w", err)
// }
return file, nil
}
// return file, nil
// }
// check if online
if !reg.Online {
return nil, ErrNotAvailableLocally
}
// // check if online
// if !reg.Online {
// return nil, ErrNotAvailableLocally
// }
// check download dir
err := reg.tmpDir.Ensure()
if err != nil {
return nil, fmt.Errorf("could not prepare tmp directory for download: %w", err)
}
// // check download dir
// err := reg.tmpDir.Ensure()
// if err != nil {
// return nil, fmt.Errorf("could not prepare tmp directory for download: %w", err)
// }
// Start registry operation.
reg.state.StartOperation(StateFetching)
defer reg.state.EndOperation()
// // Start registry operation.
// reg.state.StartOperation(StateFetching)
// defer reg.state.EndOperation()
// download file
log.Tracef("%s: starting download of %s", reg.Name, file.versionedPath)
client := &http.Client{}
for tries := range 5 {
err = reg.fetchFile(context.TODO(), client, file.version, tries)
if err != nil {
log.Tracef("%s: failed to download %s: %s, retrying (%d)", reg.Name, file.versionedPath, err, tries+1)
} else {
file.markActiveWithLocking()
// // download file
// log.Tracef("%s: starting download of %s", reg.Name, file.versionedPath)
// client := &http.Client{}
// for tries := range 5 {
// err = reg.fetchFile(context.TODO(), client, file.version, tries)
// if err != nil {
// log.Tracef("%s: failed to download %s: %s, retrying (%d)", reg.Name, file.versionedPath, err, tries+1)
// } else {
// file.markActiveWithLocking()
// TODO: We just download the file - should we verify it again?
return file, nil
}
}
log.Warningf("%s: failed to download %s: %s", reg.Name, file.versionedPath, err)
return nil, err
// // TODO: We just download the file - should we verify it again?
// return file, nil
// }
// }
// log.Warningf("%s: failed to download %s: %s", reg.Name, file.versionedPath, err)
// return nil, err
}
// GetVersion returns the selected version of the given identifier.

View file

@ -73,6 +73,7 @@
"release": "1",
"files": {
"/usr/lib/systemd/system/portmaster.service": "../../../packaging/linux/portmaster.service",
"/usr/lib/portmaster/portmaster-core": "binaries/portmaster-core",
"/etc/xdg/autostart/portmaster.desktop": "../../../packaging/linux/portmaster-autostart.desktop"
},
"postInstallScript": "../../../packaging/linux/postinst",

247
service/updates/bundle.go Normal file
View file

@ -0,0 +1,247 @@
package updates
import (
"archive/zip"
"bytes"
"compress/gzip"
"crypto/sha256"
"encoding/hex"
"errors"
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"time"
"github.com/safing/portmaster/base/log"
)
const MaxUnpackSize = 1 << 30 // 2^30 == 1GB
type Artifact struct {
Filename string `json:"Filename"`
SHA256 string `json:"SHA256"`
URLs []string `json:"URLs"`
Platform string `json:"Platform,omitempty"`
Unpack string `json:"Unpack,omitempty"`
Version string `json:"Version,omitempty"`
}
type Bundle struct {
Name string `json:"Bundle"`
Version string `json:"Version"`
Published time.Time `json:"Published"`
Artifacts []Artifact `json:"Artifacts"`
}
func (bundle Bundle) downloadAndVerify(dataDir string) {
client := http.Client{}
for _, artifact := range bundle.Artifacts {
filePath := fmt.Sprintf("%s/%s", dataDir, artifact.Filename)
// TODO(vladimir): is this needed?
_ = os.MkdirAll(filepath.Dir(filePath), os.ModePerm)
// Check file is already downloaded and valid.
exists, err := checkIfFileIsValid(filePath, artifact)
if exists {
log.Debugf("file already download: %s", filePath)
continue
} else if err != nil {
log.Errorf("error while checking old download: %s", err)
}
// Download artifact
err = processArtifact(&client, artifact, filePath)
if err != nil {
log.Errorf("updates: %s", err)
}
}
}
func (bundle Bundle) Verify(dataDir string) error {
for _, artifact := range bundle.Artifacts {
artifactPath := fmt.Sprintf("%s/%s", dataDir, artifact.Filename)
file, err := os.Open(artifactPath)
if err != nil {
return fmt.Errorf("failed to open file %s: %w", artifactPath, err)
}
defer func() { _ = file.Close() }()
isValid, err := checkIfFileIsValid(artifactPath, artifact)
if err != nil {
return err
}
if !isValid {
return fmt.Errorf("file is not valid: %s", artifact.Filename)
}
}
return nil
}
func checkIfFileIsValid(filename string, artifact Artifact) (bool, error) {
// Check if file already exists
file, err := os.Open(filename)
if err != nil {
//nolint:nilerr
return false, nil
}
defer func() { _ = file.Close() }()
providedHash, err := hex.DecodeString(artifact.SHA256)
if err != nil || len(providedHash) != sha256.Size {
return false, fmt.Errorf("invalid provided hash %s: %w", artifact.SHA256, err)
}
// Calculate hash of the file
fileHash := sha256.New()
if _, err := io.Copy(fileHash, file); err != nil {
return false, fmt.Errorf("failed to read file: %w", err)
}
hashInBytes := fileHash.Sum(nil)
if !bytes.Equal(providedHash, hashInBytes) {
return false, fmt.Errorf("file exist but the hash does not match: %s", filename)
}
return true, nil
}
func processArtifact(client *http.Client, artifact Artifact, filePath string) error {
providedHash, err := hex.DecodeString(artifact.SHA256)
if err != nil || len(providedHash) != sha256.Size {
return fmt.Errorf("invalid provided hash %s: %w", artifact.SHA256, err)
}
// Download
content, err := downloadFile(client, artifact.URLs)
if err != nil {
return fmt.Errorf("failed to download artifact: %w", err)
}
// Decompress
if artifact.Unpack != "" {
content, err = unpack(artifact.Unpack, content)
if err != nil {
return fmt.Errorf("failed to decompress artifact: %w", err)
}
}
// Verify
hash := sha256.Sum256(content)
if !bytes.Equal(providedHash, hash[:]) {
// FIXME(vladimir): just for testing. Make it an error before commit.
err = fmt.Errorf("failed to verify artifact: %s", artifact.Filename)
log.Debugf("updates: %s", err)
}
// Save
tmpFilename := fmt.Sprintf("%s.download", filePath)
file, err := os.Create(tmpFilename)
if err != nil {
return fmt.Errorf("failed to create file: %w", err)
}
_, err = file.Write(content)
if err != nil {
return fmt.Errorf("failed to write to file: %w", err)
}
// Rename
err = os.Rename(tmpFilename, filePath)
if err != nil {
return fmt.Errorf("failed to rename file: %w", err)
}
return nil
}
func downloadFile(client *http.Client, urls []string) ([]byte, error) {
for _, url := range urls {
// Try to make the request
resp, err := client.Get(url)
if err != nil {
log.Warningf("failed a get file request to: %s", err)
continue
}
defer func() { _ = resp.Body.Close() }()
// Check if the server returned an error
if resp.StatusCode != http.StatusOK {
log.Warningf("server returned non-OK status: %d %s", resp.StatusCode, resp.Status)
continue
}
content, err := io.ReadAll(resp.Body)
if err != nil {
log.Warningf("failed to read body of response: %s", err)
continue
}
return content, nil
}
return nil, fmt.Errorf("failed to download file from the provided urls")
}
func unpack(cType string, fileBytes []byte) ([]byte, error) {
switch cType {
case "zip":
{
return decompressZip(fileBytes)
}
case "gz":
{
return decompressGzip(fileBytes)
}
default:
{
return nil, fmt.Errorf("unsupported compression type")
}
}
}
func decompressGzip(data []byte) ([]byte, error) {
// Create a gzip reader from the byte array
gzipReader, err := gzip.NewReader(bytes.NewReader(data))
if err != nil {
return nil, fmt.Errorf("failed to create gzip reader: %w", err)
}
defer func() { _ = gzipReader.Close() }()
var buf bytes.Buffer
_, err = io.CopyN(&buf, gzipReader, MaxUnpackSize)
if err != nil && !errors.Is(err, io.EOF) {
return nil, fmt.Errorf("failed to read gzip file: %w", err)
}
return buf.Bytes(), nil
}
func decompressZip(data []byte) ([]byte, error) {
// Create a zip reader from the byte array
zipReader, err := zip.NewReader(bytes.NewReader(data), int64(len(data)))
if err != nil {
return nil, fmt.Errorf("failed to create zip reader: %w", err)
}
// Ensure there is only one file in the zip
if len(zipReader.File) != 1 {
return nil, fmt.Errorf("zip file must contain exactly one file")
}
// Read the single file in the zip
file := zipReader.File[0]
fileReader, err := file.Open()
if err != nil {
return nil, fmt.Errorf("failed to open file in zip: %w", err)
}
defer func() { _ = fileReader.Close() }()
var buf bytes.Buffer
_, err = io.CopyN(&buf, fileReader, MaxUnpackSize)
if err != nil && !errors.Is(err, io.EOF) {
return nil, fmt.Errorf("failed to read file in zip: %w", err)
}
return buf.Bytes(), nil
}

110
service/updates/index.go Normal file
View file

@ -0,0 +1,110 @@
package updates
import (
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"github.com/safing/portmaster/base/log"
)
type UpdateIndex struct {
Directory string
DownloadDirectory string
Ignore []string
IndexURLs []string
IndexFile string
AutoApply bool
}
func (ui *UpdateIndex) downloadIndexFile() (err error) {
_ = os.MkdirAll(ui.Directory, defaultDirMode)
_ = os.MkdirAll(ui.DownloadDirectory, defaultDirMode)
for _, url := range ui.IndexURLs {
err = ui.downloadIndexFileFromURL(url)
if err != nil {
log.Warningf("updates: %s", err)
continue
}
// Downloading was successful.
err = nil
break
}
return
}
func (ui *UpdateIndex) checkForUpdates() (bool, error) {
err := ui.downloadIndexFile()
if err != nil {
return false, err
}
currentBundle, err := ui.GetInstallBundle()
if err != nil {
return true, err // Current installed bundle not found, act as there is update.
}
updateBundle, err := ui.GetUpdateBundle()
if err != nil {
return false, err
}
return currentBundle.Version != updateBundle.Version, nil
}
func (ui *UpdateIndex) downloadIndexFileFromURL(url string) error {
client := http.Client{}
resp, err := client.Get(url)
if err != nil {
return fmt.Errorf("failed a get request to %s: %w", url, err)
}
defer func() { _ = resp.Body.Close() }()
filePath := fmt.Sprintf("%s/%s", ui.DownloadDirectory, ui.IndexFile)
file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, defaultFileMode)
if err != nil {
return err
}
defer func() { _ = file.Close() }()
_, err = io.Copy(file, resp.Body)
if err != nil {
return err
}
return nil
}
func (ui *UpdateIndex) GetInstallBundle() (*Bundle, error) {
indexFile := fmt.Sprintf("%s/%s", ui.Directory, ui.IndexFile)
return ui.GetBundle(indexFile)
}
func (ui *UpdateIndex) GetUpdateBundle() (*Bundle, error) {
indexFile := fmt.Sprintf("%s/%s", ui.DownloadDirectory, ui.IndexFile)
return ui.GetBundle(indexFile)
}
func (ui *UpdateIndex) GetBundle(indexFile string) (*Bundle, error) {
// Check if the file exists.
file, err := os.Open(indexFile)
if err != nil {
return nil, fmt.Errorf("failed to open index file: %w", err)
}
defer func() { _ = file.Close() }()
// Read
content, err := io.ReadAll(file)
if err != nil {
return nil, err
}
// Parse
var bundle Bundle
err = json.Unmarshal(content, &bundle)
if err != nil {
return nil, err
}
return &bundle, nil
}

View file

@ -1,20 +1,14 @@
package updates
import (
"context"
"errors"
"flag"
"fmt"
"net/url"
"runtime"
"time"
"github.com/safing/portmaster/base/database"
"github.com/safing/portmaster/base/dataroot"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/updater"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates/helper"
)
const (
@ -48,9 +42,6 @@ var (
userAgentFromFlag string
updateServerFromFlag string
updateASAP bool
disableTaskSchedule bool
db = database.NewInterface(&database.Options{
Local: true,
Internal: true,
@ -60,163 +51,126 @@ var (
// more context to requests made by the registry when
// fetching resources from the update server.
UserAgent = fmt.Sprintf("Portmaster (%s %s)", runtime.GOOS, runtime.GOARCH)
// DefaultUpdateURLs defines the default base URLs of the update server.
DefaultUpdateURLs = []string{
"https://updates.safing.io",
}
// DisableSoftwareAutoUpdate specifies whether software updates should be disabled.
// This is used on Android, as it will never require binary updates.
DisableSoftwareAutoUpdate = false
)
const (
updatesDirName = "updates"
updateTaskRepeatDuration = 1 * time.Hour
)
func init() {
flag.StringVar(&updateServerFromFlag, "update-server", "", "set an alternative update server (full URL)")
flag.StringVar(&userAgentFromFlag, "update-agent", "", "set an alternative user agent for requests to the update server")
}
func prep() error {
// Check if update server URL supplied via flag is a valid URL.
if updateServerFromFlag != "" {
u, err := url.Parse(updateServerFromFlag)
if err != nil {
return fmt.Errorf("supplied update server URL is invalid: %w", err)
}
if u.Scheme != "https" {
return errors.New("supplied update server URL must use HTTPS")
}
}
if err := registerConfig(); err != nil {
return err
}
return registerAPIEndpoints()
}
func start() error {
initConfig()
// module.restartWorkerMgr.Repeat(10 * time.Minute)
// module.instance.Config().EventConfigChange.AddCallback("update registry config", updateRegistryConfig)
module.restartWorkerMgr.Repeat(10 * time.Minute)
module.instance.Config().EventConfigChange.AddCallback("update registry config", updateRegistryConfig)
// // create registry
// registry = &updater.ResourceRegistry{
// Name: ModuleName,
// UpdateURLs: DefaultUpdateURLs,
// UserAgent: UserAgent,
// MandatoryUpdates: helper.MandatoryUpdates(),
// AutoUnpack: helper.AutoUnpackUpdates(),
// Verification: helper.VerificationConfig,
// DevMode: devMode(),
// Online: true,
// }
// // Override values from flags.
// if userAgentFromFlag != "" {
// registry.UserAgent = userAgentFromFlag
// }
// if updateServerFromFlag != "" {
// registry.UpdateURLs = []string{updateServerFromFlag}
// }
// create registry
registry = &updater.ResourceRegistry{
Name: ModuleName,
UpdateURLs: DefaultUpdateURLs,
UserAgent: UserAgent,
MandatoryUpdates: helper.MandatoryUpdates(),
AutoUnpack: helper.AutoUnpackUpdates(),
Verification: helper.VerificationConfig,
DevMode: devMode(),
Online: true,
}
// Override values from flags.
if userAgentFromFlag != "" {
registry.UserAgent = userAgentFromFlag
}
if updateServerFromFlag != "" {
registry.UpdateURLs = []string{updateServerFromFlag}
}
// pre-init state
updateStateExport, err := LoadStateExport()
if err != nil {
log.Debugf("updates: failed to load exported update state: %s", err)
} else if updateStateExport.UpdateState != nil {
err := registry.PreInitUpdateState(*updateStateExport.UpdateState)
if err != nil {
return err
}
}
// // pre-init state
// updateStateExport, err := LoadStateExport()
// if err != nil {
// log.Debugf("updates: failed to load exported update state: %s", err)
// } else if updateStateExport.UpdateState != nil {
// err := registry.PreInitUpdateState(*updateStateExport.UpdateState)
// if err != nil {
// return err
// }
// }
// initialize
err = registry.Initialize(dataroot.Root().ChildDir(updatesDirName, 0o0755))
if err != nil {
return err
}
// err := registry.Initialize(dataroot.Root().ChildDir(updatesDirName, 0o0755))
// if err != nil {
// return err
// }
// register state provider
err = registerRegistryStateProvider()
if err != nil {
return err
}
registry.StateNotifyFunc = pushRegistryState
// // register state provider
// err = registerRegistryStateProvider()
// if err != nil {
// return err
// }
// registry.StateNotifyFunc = pushRegistryState
// Set indexes based on the release channel.
warning := helper.SetIndexes(
registry,
initialReleaseChannel,
true,
enableSoftwareUpdates() && !DisableSoftwareAutoUpdate,
enableIntelUpdates(),
)
if warning != nil {
log.Warningf("updates: %s", warning)
}
// // Set indexes based on the release channel.
// warning := helper.SetIndexes(
// registry,
// initialReleaseChannel,
// true,
// enableSoftwareUpdates() && !DisableSoftwareAutoUpdate,
// enableIntelUpdates(),
// )
// if warning != nil {
// log.Warningf("updates: %s", warning)
// }
err = registry.LoadIndexes(module.m.Ctx())
if err != nil {
log.Warningf("updates: failed to load indexes: %s", err)
}
// err = registry.LoadIndexes(module.m.Ctx())
// if err != nil {
// log.Warningf("updates: failed to load indexes: %s", err)
// }
err = registry.ScanStorage("")
if err != nil {
log.Warningf("updates: error during storage scan: %s", err)
}
// err = registry.ScanStorage("")
// if err != nil {
// log.Warningf("updates: error during storage scan: %s", err)
// }
registry.SelectVersions()
module.EventVersionsUpdated.Submit(struct{}{})
// registry.SelectVersions()
// module.EventVersionsUpdated.Submit(struct{}{})
// Initialize the version export - this requires the registry to be set up.
err = initVersionExport()
if err != nil {
return err
}
// // Initialize the version export - this requires the registry to be set up.
// err = initVersionExport()
// if err != nil {
// return err
// }
// start updater task
if !disableTaskSchedule {
_ = module.updateWorkerMgr.Repeat(30 * time.Minute)
}
// // start updater task
// if !disableTaskSchedule {
// _ = module.updateWorkerMgr.Repeat(30 * time.Minute)
// }
if updateASAP {
module.updateWorkerMgr.Go()
}
// if updateASAP {
// module.updateWorkerMgr.Go()
// }
// react to upgrades
if err := initUpgrader(); err != nil {
return err
}
// // react to upgrades
// if err := initUpgrader(); err != nil {
// return err
// }
warnOnIncorrectParentPath()
// warnOnIncorrectParentPath()
return nil
}
// TriggerUpdate queues the update task to execute ASAP.
func TriggerUpdate(forceIndexCheck, downloadAll bool) error {
switch {
case !forceIndexCheck && !enableSoftwareUpdates() && !enableIntelUpdates():
return errors.New("automatic updating is disabled")
// switch {
// case !forceIndexCheck && !enableSoftwareUpdates() && !enableIntelUpdates():
// return errors.New("automatic updating is disabled")
default:
if forceIndexCheck {
forceCheck.Set()
}
if downloadAll {
forceDownload.Set()
}
// default:
// if forceIndexCheck {
// forceCheck.Set()
// }
// if downloadAll {
// forceDownload.Set()
// }
// If index check if forced, start quicker.
module.updateWorkerMgr.Go()
}
// // If index check if forced, start quicker.
// module.updateWorkerMgr.Go()
// }
log.Debugf("updates: triggering update to run as soon as possible")
return nil
@ -232,68 +186,66 @@ func DisableUpdateSchedule() error {
// return errors.New("module already online")
// }
disableTaskSchedule = true
return nil
}
func checkForUpdates(ctx *mgr.WorkerCtx) (err error) {
// Set correct error if context was canceled.
defer func() {
select {
case <-ctx.Done():
err = context.Canceled
default:
}
}()
// defer func() {
// select {
// case <-ctx.Done():
// err = context.Canceled
// default:
// }
// }()
// Get flags.
forceIndexCheck := forceCheck.SetToIf(true, false)
downloadAll := forceDownload.SetToIf(true, false)
// // Get flags.
// forceIndexCheck := forceCheck.SetToIf(true, false)
// downloadAll := forceDownload.SetToIf(true, false)
// Check again if downloading updates is enabled, or forced.
if !forceIndexCheck && !enableSoftwareUpdates() && !enableIntelUpdates() {
log.Warningf("updates: automatic updates are disabled")
return nil
}
// // Check again if downloading updates is enabled, or forced.
// if !forceIndexCheck && !enableSoftwareUpdates() && !enableIntelUpdates() {
// log.Warningf("updates: automatic updates are disabled")
// return nil
// }
defer func() {
// Resolve any error and send success notification.
if err == nil {
log.Infof("updates: successfully checked for updates")
notifyUpdateSuccess(forceIndexCheck)
return
}
// defer func() {
// // Resolve any error and send success notification.
// if err == nil {
// log.Infof("updates: successfully checked for updates")
// notifyUpdateSuccess(forceIndexCheck)
// return
// }
// Log and notify error.
log.Errorf("updates: check failed: %s", err)
notifyUpdateCheckFailed(forceIndexCheck, err)
}()
// // Log and notify error.
// log.Errorf("updates: check failed: %s", err)
// notifyUpdateCheckFailed(forceIndexCheck, err)
// }()
if err = registry.UpdateIndexes(ctx.Ctx()); err != nil {
err = fmt.Errorf("failed to update indexes: %w", err)
return //nolint:nakedret // TODO: Would "return err" work with the defer?
}
// if err = registry.UpdateIndexes(ctx.Ctx()); err != nil {
// err = fmt.Errorf("failed to update indexes: %w", err)
// return //nolint:nakedret // TODO: Would "return err" work with the defer?
// }
err = registry.DownloadUpdates(ctx.Ctx(), downloadAll)
if err != nil {
err = fmt.Errorf("failed to download updates: %w", err)
return //nolint:nakedret // TODO: Would "return err" work with the defer?
}
// err = registry.DownloadUpdates(ctx.Ctx(), downloadAll)
// if err != nil {
// err = fmt.Errorf("failed to download updates: %w", err)
// return //nolint:nakedret // TODO: Would "return err" work with the defer?
// }
registry.SelectVersions()
// registry.SelectVersions()
// Unpack selected resources.
err = registry.UnpackResources()
if err != nil {
err = fmt.Errorf("failed to unpack updates: %w", err)
return //nolint:nakedret // TODO: Would "return err" work with the defer?
}
// // Unpack selected resources.
// err = registry.UnpackResources()
// if err != nil {
// err = fmt.Errorf("failed to unpack updates: %w", err)
// return //nolint:nakedret // TODO: Would "return err" work with the defer?
// }
// Purge old resources
registry.Purge(2)
// // Purge old resources
// registry.Purge(2)
module.EventResourcesUpdated.Submit(struct{}{})
// module.EventResourcesUpdated.Submit(struct{}{})
return nil
}
@ -314,5 +266,6 @@ func RootPath() string {
// return ""
// }
return registry.StorageDir().Path
// return registry.StorageDir().Path
return ""
}

View file

@ -2,14 +2,24 @@ package updates
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"sync/atomic"
"github.com/safing/portmaster/base/api"
"github.com/safing/portmaster/base/config"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/notifications"
"github.com/safing/portmaster/service/mgr"
)
const (
defaultFileMode = os.FileMode(0o0644)
defaultDirMode = os.FileMode(0o0755)
)
// Updates provides access to released artifacts.
type Updates struct {
m *mgr.Manager
@ -21,6 +31,9 @@ type Updates struct {
EventResourcesUpdated *mgr.EventMgr[struct{}]
EventVersionsUpdated *mgr.EventMgr[struct{}]
binUpdates UpdateIndex
intelUpdates UpdateIndex
instance instance
}
@ -40,19 +53,84 @@ func New(instance instance) (*Updates, error) {
m: m,
states: m.NewStateMgr(),
updateWorkerMgr: m.NewWorkerMgr("updater", checkForUpdates, nil),
restartWorkerMgr: m.NewWorkerMgr("automatic restart", automaticRestart, nil),
EventResourcesUpdated: mgr.NewEventMgr[struct{}](ResourceUpdateEvent, m),
EventVersionsUpdated: mgr.NewEventMgr[struct{}](VersionUpdateEvent, m),
instance: instance,
}
if err := prep(); err != nil {
return nil, err
// Events
module.updateWorkerMgr = m.NewWorkerMgr("updater", module.checkForUpdates, nil)
module.restartWorkerMgr = m.NewWorkerMgr("automatic restart", automaticRestart, nil)
module.binUpdates = UpdateIndex{
Directory: "/usr/lib/portmaster",
DownloadDirectory: "/var/portmaster/new_bin",
Ignore: []string{"databases", "intel", "config.json"},
IndexURLs: []string{"http://localhost:8000/test-binary.json"},
IndexFile: "bin-index.json",
AutoApply: false,
}
module.intelUpdates = UpdateIndex{
Directory: "/var/portmaster/intel",
DownloadDirectory: "/var/portmaster/new_intel",
IndexURLs: []string{"http://localhost:8000/test-intel.json"},
IndexFile: "intel-index.json",
AutoApply: true,
}
return module, nil
}
func deleteUnfinishedDownloads(rootDir string) error {
return filepath.Walk(rootDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// Check if the current file has the specified extension
if !info.IsDir() && strings.HasSuffix(info.Name(), ".download") {
log.Warningf("updates deleting unfinished: %s\n", path)
err := os.Remove(path)
if err != nil {
return fmt.Errorf("failed to delete file %s: %w", path, err)
}
}
return nil
})
}
func (u *Updates) checkForUpdates(_ *mgr.WorkerCtx) error {
_ = deleteUnfinishedDownloads(u.binUpdates.DownloadDirectory)
hasUpdate, err := u.binUpdates.checkForUpdates()
if err != nil {
log.Warningf("failed to get binary index file: %s", err)
}
if hasUpdate {
binBundle, err := u.binUpdates.GetUpdateBundle()
if err == nil {
log.Debugf("Bin Bundle: %+v", binBundle)
_ = os.MkdirAll(u.binUpdates.DownloadDirectory, defaultDirMode)
binBundle.downloadAndVerify(u.binUpdates.DownloadDirectory)
}
}
_ = deleteUnfinishedDownloads(u.intelUpdates.DownloadDirectory)
hasUpdate, err = u.intelUpdates.checkForUpdates()
if err != nil {
log.Warningf("failed to get intel index file: %s", err)
}
if hasUpdate {
intelBundle, err := u.intelUpdates.GetUpdateBundle()
if err == nil {
log.Debugf("Intel Bundle: %+v", intelBundle)
_ = os.MkdirAll(u.intelUpdates.DownloadDirectory, defaultDirMode)
intelBundle.downloadAndVerify(u.intelUpdates.DownloadDirectory)
}
}
return nil
}
// States returns the state manager.
func (u *Updates) States() *mgr.StateMgr {
return u.states
@ -65,7 +143,36 @@ func (u *Updates) Manager() *mgr.Manager {
// Start starts the module.
func (u *Updates) Start() error {
return start()
initConfig()
u.m.Go("check for updates", func(w *mgr.WorkerCtx) error {
binBundle, err := u.binUpdates.GetInstallBundle()
if err != nil {
log.Warningf("failed to get binary bundle: %s", err)
} else {
err = binBundle.Verify(u.binUpdates.Directory)
if err != nil {
log.Warningf("binary bundle is not valid: %s", err)
} else {
log.Infof("binary bundle is valid")
}
}
intelBundle, err := u.intelUpdates.GetInstallBundle()
if err != nil {
log.Warningf("failed to get intel bundle: %s", err)
} else {
err = intelBundle.Verify(u.intelUpdates.Directory)
if err != nil {
log.Warningf("intel bundle is not valid: %s", err)
} else {
log.Infof("intel bundle is valid")
}
}
return nil
})
u.updateWorkerMgr.Go()
return nil
}
// Stop stops the module.

View file

@ -0,0 +1 @@
package updates

View file

@ -181,16 +181,16 @@ func upgradeHub() error {
DelayedRestart(time.Duration(delayMinutes+60) * time.Minute)
// Increase update checks in order to detect aborts better.
if !disableTaskSchedule {
module.updateWorkerMgr.Repeat(10 * time.Minute)
}
// if !disableTaskSchedule {
module.updateWorkerMgr.Repeat(10 * time.Minute)
// }
} else {
AbortRestart()
// Set update task schedule back to normal.
if !disableTaskSchedule {
module.updateWorkerMgr.Repeat(updateTaskRepeatDuration)
}
// if !disableTaskSchedule {
module.updateWorkerMgr.Repeat(updateTaskRepeatDuration)
// }
}
return nil