mirror of
https://github.com/safing/portmaster
synced 2025-09-01 10:09:11 +00:00
[WIP] Simplify update system
This commit is contained in:
parent
d6669ff8f5
commit
0f3f3c360f
29 changed files with 1101 additions and 891 deletions
|
@ -36,6 +36,47 @@ func (n *Notifications) Stop() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// NotifyInfo is a helper method for quickly showing an info notification.
|
||||
// The notification will be activated immediately.
|
||||
// If the provided id is empty, an id will derived from msg.
|
||||
// ShowOnSystem is disabled.
|
||||
// If no actions are defined, a default "OK" (ID:"ack") action will be added.
|
||||
func (n *Notifications) NotifyInfo(id, title, msg string, actions ...Action) *Notification {
|
||||
return NotifyInfo(id, title, msg, actions...)
|
||||
}
|
||||
|
||||
// NotifyWarn is a helper method for quickly showing a warning notification
|
||||
// The notification will be activated immediately.
|
||||
// If the provided id is empty, an id will derived from msg.
|
||||
// ShowOnSystem is enabled.
|
||||
// If no actions are defined, a default "OK" (ID:"ack") action will be added.
|
||||
func (n *Notifications) NotifyWarn(id, title, msg string, actions ...Action) *Notification {
|
||||
return NotifyWarn(id, title, msg, actions...)
|
||||
}
|
||||
|
||||
// NotifyError is a helper method for quickly showing an error notification.
|
||||
// The notification will be activated immediately.
|
||||
// If the provided id is empty, an id will derived from msg.
|
||||
// ShowOnSystem is enabled.
|
||||
// If no actions are defined, a default "OK" (ID:"ack") action will be added.
|
||||
func (n *Notifications) NotifyError(id, title, msg string, actions ...Action) *Notification {
|
||||
return NotifyError(id, title, msg, actions...)
|
||||
}
|
||||
|
||||
// NotifyPrompt is a helper method for quickly showing a prompt notification.
|
||||
// The notification will be activated immediately.
|
||||
// If the provided id is empty, an id will derived from msg.
|
||||
// ShowOnSystem is disabled.
|
||||
// If no actions are defined, a default "OK" (ID:"ack") action will be added.
|
||||
func (n *Notifications) NotifyPrompt(id, title, msg string, actions ...Action) *Notification {
|
||||
return NotifyPrompt(id, title, msg, actions...)
|
||||
}
|
||||
|
||||
// Notify sends the given notification.
|
||||
func (n *Notifications) Notify(notification *Notification) *Notification {
|
||||
return Notify(notification)
|
||||
}
|
||||
|
||||
func prep() error {
|
||||
return registerConfig()
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
bundleSettings = updates.BundleFileSettings{
|
||||
bundleSettings = updates.IndexScanConfig{
|
||||
Name: "Portmaster Binaries",
|
||||
PrimaryArtifact: "linux_amd64/portmaster-core",
|
||||
BaseURL: "https://updates.safing.io/",
|
||||
|
|
|
@ -93,5 +93,5 @@ func New(instance instance) (*Broadcasts, error) {
|
|||
}
|
||||
|
||||
type instance interface {
|
||||
IntelUpdates() *updates.Updates
|
||||
IntelUpdates() *updates.Updater
|
||||
}
|
||||
|
|
|
@ -116,6 +116,6 @@ type instance interface {
|
|||
Shutdown()
|
||||
Restart()
|
||||
AddWorkerInfoToDebugInfo(di *debug.Info)
|
||||
BinaryUpdates() *updates.Updates
|
||||
IntelUpdates() *updates.Updates
|
||||
BinaryUpdates() *updates.Updater
|
||||
IntelUpdates() *updates.Updater
|
||||
}
|
||||
|
|
|
@ -99,5 +99,5 @@ func New(instance instance) (*Interception, error) {
|
|||
}
|
||||
|
||||
type instance interface {
|
||||
BinaryUpdates() *updates.Updates
|
||||
BinaryUpdates() *updates.Updater
|
||||
}
|
||||
|
|
|
@ -161,7 +161,7 @@ func New(instance instance) (*Firewall, error) {
|
|||
|
||||
type instance interface {
|
||||
Config() *config.Config
|
||||
BinaryUpdates() *updates.Updates
|
||||
BinaryUpdates() *updates.Updater
|
||||
Profile() *profile.ProfileModule
|
||||
Captain() *captain.Captain
|
||||
Access() *access.Access
|
||||
|
|
|
@ -67,8 +67,8 @@ type Instance struct {
|
|||
base *base.Base
|
||||
|
||||
core *core.Core
|
||||
binaryUpdates *updates.Updates
|
||||
intelUpdates *updates.Updates
|
||||
binaryUpdates *updates.Updater
|
||||
intelUpdates *updates.Updater
|
||||
geoip *geoip.GeoIP
|
||||
netenv *netenv.NetEnv
|
||||
ui *ui.UI
|
||||
|
@ -126,14 +126,14 @@ func getCurrentBinaryFolder() (string, error) {
|
|||
|
||||
// New returns a new Portmaster service instance.
|
||||
func New(svcCfg *ServiceConfig) (*Instance, error) { //nolint:maintidx
|
||||
var binaryUpdateIndex updates.UpdateIndex
|
||||
var intelUpdateIndex updates.UpdateIndex
|
||||
var binaryUpdateIndex updates.Config
|
||||
var intelUpdateIndex updates.Config
|
||||
if go_runtime.GOOS == "windows" {
|
||||
binaryFolder, err := getCurrentBinaryFolder()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
binaryUpdateIndex = updates.UpdateIndex{
|
||||
binaryUpdateIndex = updates.Config{
|
||||
Directory: binaryFolder, // Default: C:/Program Files/Portmaster
|
||||
DownloadDirectory: os.ExpandEnv("$ProgramData/Portmaster/new_binary"),
|
||||
PurgeDirectory: filepath.Join(binaryFolder, "old_binary"), // Default: C:/Program Files/Portmaster/old_binary
|
||||
|
@ -144,7 +144,7 @@ func New(svcCfg *ServiceConfig) (*Instance, error) { //nolint:maintidx
|
|||
NeedsRestart: true,
|
||||
}
|
||||
|
||||
intelUpdateIndex = updates.UpdateIndex{
|
||||
intelUpdateIndex = updates.Config{
|
||||
Directory: os.ExpandEnv("$ProgramData/Portmaster/intel"),
|
||||
DownloadDirectory: os.ExpandEnv("$ProgramData/Portmaster/new_intel"),
|
||||
PurgeDirectory: os.ExpandEnv("$ProgramData/Portmaster/old_intel"),
|
||||
|
@ -154,7 +154,7 @@ func New(svcCfg *ServiceConfig) (*Instance, error) { //nolint:maintidx
|
|||
NeedsRestart: false,
|
||||
}
|
||||
} else if go_runtime.GOOS == "linux" {
|
||||
binaryUpdateIndex = updates.UpdateIndex{
|
||||
binaryUpdateIndex = updates.Config{
|
||||
Directory: "/usr/lib/portmaster",
|
||||
DownloadDirectory: "/var/lib/portmaster/new_bin",
|
||||
PurgeDirectory: "/var/lib/portmaster/old_bin",
|
||||
|
@ -165,7 +165,7 @@ func New(svcCfg *ServiceConfig) (*Instance, error) { //nolint:maintidx
|
|||
NeedsRestart: true,
|
||||
}
|
||||
|
||||
intelUpdateIndex = updates.UpdateIndex{
|
||||
intelUpdateIndex = updates.Config{
|
||||
Directory: "/var/lib/portmaster/intel",
|
||||
DownloadDirectory: "/var/lib/portmaster/new_intel",
|
||||
PurgeDirectory: "/var/lib/portmaster/intel_bin",
|
||||
|
@ -454,12 +454,12 @@ func (i *Instance) Base() *base.Base {
|
|||
}
|
||||
|
||||
// BinaryUpdates returns the updates module.
|
||||
func (i *Instance) BinaryUpdates() *updates.Updates {
|
||||
func (i *Instance) BinaryUpdates() *updates.Updater {
|
||||
return i.binaryUpdates
|
||||
}
|
||||
|
||||
// IntelUpdates returns the updates module.
|
||||
func (i *Instance) IntelUpdates() *updates.Updates {
|
||||
func (i *Instance) IntelUpdates() *updates.Updater {
|
||||
return i.intelUpdates
|
||||
}
|
||||
|
||||
|
|
|
@ -142,6 +142,6 @@ func New(instance instance) (*FilterLists, error) {
|
|||
}
|
||||
|
||||
type instance interface {
|
||||
IntelUpdates() *updates.Updates
|
||||
IntelUpdates() *updates.Updater
|
||||
NetEnv() *netenv.NetEnv
|
||||
}
|
||||
|
|
|
@ -16,12 +16,12 @@ type testInstance struct {
|
|||
db *dbmodule.DBModule
|
||||
api *api.API
|
||||
config *config.Config
|
||||
updates *updates.Updates
|
||||
updates *updates.Updater
|
||||
}
|
||||
|
||||
var _ instance = &testInstance{}
|
||||
|
||||
func (stub *testInstance) IntelUpdates() *updates.Updates {
|
||||
func (stub *testInstance) IntelUpdates() *updates.Updater {
|
||||
return stub.updates
|
||||
}
|
||||
|
||||
|
@ -77,7 +77,7 @@ func runTest(m *testing.M) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed to create api: %w", err)
|
||||
}
|
||||
stub.updates, err = updates.New(stub, "Test Intel", updates.UpdateIndex{
|
||||
stub.updates, err = updates.New(stub, "Test Intel", updates.Config{
|
||||
Directory: installDir,
|
||||
IndexFile: "index.json",
|
||||
})
|
||||
|
|
|
@ -66,5 +66,5 @@ func New(instance instance) (*GeoIP, error) {
|
|||
}
|
||||
|
||||
type instance interface {
|
||||
IntelUpdates() *updates.Updates
|
||||
IntelUpdates() *updates.Updater
|
||||
}
|
||||
|
|
|
@ -16,12 +16,12 @@ type testInstance struct {
|
|||
db *dbmodule.DBModule
|
||||
api *api.API
|
||||
config *config.Config
|
||||
updates *updates.Updates
|
||||
updates *updates.Updater
|
||||
}
|
||||
|
||||
var _ instance = &testInstance{}
|
||||
|
||||
func (stub *testInstance) IntelUpdates() *updates.Updates {
|
||||
func (stub *testInstance) IntelUpdates() *updates.Updater {
|
||||
return stub.updates
|
||||
}
|
||||
|
||||
|
@ -77,7 +77,7 @@ func runTest(m *testing.M) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed to create api: %w", err)
|
||||
}
|
||||
stub.updates, err = updates.New(stub, "Test Intel", updates.UpdateIndex{
|
||||
stub.updates, err = updates.New(stub, "Test Intel", updates.Config{
|
||||
Directory: installDir,
|
||||
IndexFile: "index.json",
|
||||
})
|
||||
|
|
|
@ -107,5 +107,5 @@ func New(instance instance) (*NetEnv, error) {
|
|||
}
|
||||
|
||||
type instance interface {
|
||||
IntelUpdates() *updates.Updates
|
||||
IntelUpdates() *updates.Updater
|
||||
}
|
||||
|
|
|
@ -70,5 +70,5 @@ func New(instance instance) (*ProcessModule, error) {
|
|||
}
|
||||
|
||||
type instance interface {
|
||||
BinaryUpdates() *updates.Updates
|
||||
BinaryUpdates() *updates.Updater
|
||||
}
|
||||
|
|
|
@ -23,11 +23,11 @@ type testInstance struct {
|
|||
db *dbmodule.DBModule
|
||||
api *api.API
|
||||
config *config.Config
|
||||
updates *updates.Updates
|
||||
updates *updates.Updater
|
||||
geoip *geoip.GeoIP
|
||||
}
|
||||
|
||||
func (stub *testInstance) IntelUpdates() *updates.Updates {
|
||||
func (stub *testInstance) IntelUpdates() *updates.Updater {
|
||||
return stub.updates
|
||||
}
|
||||
|
||||
|
@ -84,7 +84,7 @@ func runTest(m *testing.M) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed to create api: %w", err)
|
||||
}
|
||||
stub.updates, err = updates.New(stub, "Test Intel", updates.UpdateIndex{
|
||||
stub.updates, err = updates.New(stub, "Test Intel", updates.Config{
|
||||
Directory: installDir,
|
||||
IndexFile: "index.json",
|
||||
})
|
||||
|
|
|
@ -22,11 +22,11 @@ type testInstance struct {
|
|||
base *base.Base
|
||||
api *api.API
|
||||
config *config.Config
|
||||
updates *updates.Updates
|
||||
updates *updates.Updater
|
||||
netenv *netenv.NetEnv
|
||||
}
|
||||
|
||||
func (stub *testInstance) IntelUpdates() *updates.Updates {
|
||||
func (stub *testInstance) IntelUpdates() *updates.Updater {
|
||||
return stub.updates
|
||||
}
|
||||
|
||||
|
@ -99,7 +99,7 @@ func runTest(m *testing.M) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed to create netenv: %w", err)
|
||||
}
|
||||
stub.updates, err = updates.New(stub, "Test Intel", updates.UpdateIndex{
|
||||
stub.updates, err = updates.New(stub, "Test Intel", updates.Config{
|
||||
Directory: installDir,
|
||||
IndexFile: "index.json",
|
||||
})
|
||||
|
|
|
@ -82,5 +82,5 @@ func New(instance instance) (*UI, error) {
|
|||
|
||||
type instance interface {
|
||||
API() *api.API
|
||||
BinaryUpdates() *updates.Updates
|
||||
BinaryUpdates() *updates.Updater
|
||||
}
|
||||
|
|
|
@ -1,142 +0,0 @@
|
|||
package updates
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
const MaxUnpackSize = 1 << 30 // 2^30 == 1GB
|
||||
|
||||
const currentPlatform = runtime.GOOS + "_" + runtime.GOARCH
|
||||
|
||||
type Artifact struct {
|
||||
Filename string `json:"Filename"`
|
||||
SHA256 string `json:"SHA256"`
|
||||
URLs []string `json:"URLs"`
|
||||
Platform string `json:"Platform,omitempty"`
|
||||
Unpack string `json:"Unpack,omitempty"`
|
||||
Version string `json:"Version,omitempty"`
|
||||
|
||||
localFile string
|
||||
}
|
||||
|
||||
func (a *Artifact) GetFileMode() os.FileMode {
|
||||
// Special case for portmaster ui. Should be able to be executed from the regular user
|
||||
if a.Platform == currentPlatform && a.Filename == "portmaster" {
|
||||
return executableUIFileMode
|
||||
}
|
||||
|
||||
if a.Platform == currentPlatform {
|
||||
return executableFileMode
|
||||
}
|
||||
|
||||
return defaultFileMode
|
||||
}
|
||||
|
||||
type Bundle struct {
|
||||
Name string `json:"Bundle"`
|
||||
Version string `json:"Version"`
|
||||
Published time.Time `json:"Published"`
|
||||
Artifacts []Artifact `json:"Artifacts"`
|
||||
}
|
||||
|
||||
// LoadBundle loads and parses a bundle from filepath.
|
||||
func LoadBundle(indexFilepath string) (*Bundle, error) {
|
||||
// Read
|
||||
content, err := os.ReadFile(indexFilepath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read index file: %w", err)
|
||||
}
|
||||
|
||||
// Parse
|
||||
var bundle Bundle
|
||||
err = json.Unmarshal(content, &bundle)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s %w", indexFilepath, err)
|
||||
}
|
||||
|
||||
// Filter artifacts
|
||||
filtered := make([]Artifact, 0)
|
||||
for _, a := range bundle.Artifacts {
|
||||
if a.Platform == "" || a.Platform == currentPlatform {
|
||||
filtered = append(filtered, a)
|
||||
}
|
||||
}
|
||||
bundle.Artifacts = filtered
|
||||
|
||||
return &bundle, nil
|
||||
}
|
||||
|
||||
// ParseBundle parses a bundle from json string.
|
||||
func ParseBundle(jsonContent string) (*Bundle, error) {
|
||||
// Parse
|
||||
var bundle Bundle
|
||||
err := json.Unmarshal([]byte(jsonContent), &bundle)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse bundle: %w", err)
|
||||
}
|
||||
|
||||
// Filter artifacts
|
||||
filtered := make([]Artifact, 0)
|
||||
for _, a := range bundle.Artifacts {
|
||||
if a.Platform == "" || a.Platform == currentPlatform {
|
||||
filtered = append(filtered, a)
|
||||
}
|
||||
}
|
||||
bundle.Artifacts = filtered
|
||||
|
||||
return &bundle, nil
|
||||
}
|
||||
|
||||
// Verify checks if the files are present int the dataDir and have the correct hash.
|
||||
func (bundle *Bundle) Verify(dir string) error {
|
||||
for _, artifact := range bundle.Artifacts {
|
||||
artifactPath := filepath.Join(dir, artifact.Filename)
|
||||
isValid, err := checkIfFileIsValid(artifactPath, artifact)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !isValid {
|
||||
return fmt.Errorf("file is not valid: %s", artifact.Filename)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkIfFileIsValid(filename string, artifact Artifact) (bool, error) {
|
||||
// Check if file already exists
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer func() { _ = file.Close() }()
|
||||
|
||||
providedHash, err := hex.DecodeString(artifact.SHA256)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("invalid provided hash %s: %w", artifact.SHA256, err)
|
||||
}
|
||||
if len(providedHash) != sha256.Size {
|
||||
return false, fmt.Errorf("invalid hash length for %s", artifact.SHA256)
|
||||
}
|
||||
|
||||
// Calculate hash of the file
|
||||
fileHash := sha256.New()
|
||||
if _, err := io.Copy(fileHash, file); err != nil {
|
||||
return false, fmt.Errorf("failed to read file: %w", err)
|
||||
}
|
||||
hashInBytes := fileHash.Sum(nil)
|
||||
if !bytes.Equal(providedHash, hashInBytes) {
|
||||
return false, fmt.Errorf("file exist but the hash does not match: %s", filename)
|
||||
}
|
||||
return true, nil
|
||||
}
|
|
@ -10,275 +10,210 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
semver "github.com/hashicorp/go-version"
|
||||
|
||||
"github.com/safing/portmaster/base/log"
|
||||
)
|
||||
|
||||
type Downloader struct {
|
||||
dir string
|
||||
indexFile string
|
||||
u *Updater
|
||||
index *Index
|
||||
indexURLs []string
|
||||
bundle *Bundle
|
||||
version *semver.Version
|
||||
|
||||
existingFiles map[string]string
|
||||
|
||||
httpClient http.Client
|
||||
}
|
||||
|
||||
func CreateDownloader(index UpdateIndex) Downloader {
|
||||
return Downloader{
|
||||
dir: index.DownloadDirectory,
|
||||
indexFile: index.IndexFile,
|
||||
indexURLs: index.IndexURLs,
|
||||
func NewDownloader(u *Updater, indexURLs []string) *Downloader {
|
||||
return &Downloader{
|
||||
u: u,
|
||||
indexURLs: indexURLs,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Downloader) downloadIndexFile(ctx context.Context) error {
|
||||
// Make sure dir exists
|
||||
err := os.MkdirAll(d.dir, defaultDirMode)
|
||||
func (d *Downloader) updateIndex(ctx context.Context) error {
|
||||
// Make sure dir exists.
|
||||
err := os.MkdirAll(d.u.cfg.DownloadDirectory, defaultDirMode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create directory for updates: %s", d.dir)
|
||||
return fmt.Errorf("create download directory: %s", d.u.cfg.DownloadDirectory)
|
||||
}
|
||||
var content string
|
||||
|
||||
// Try to download the index from one of the index URLs.
|
||||
var (
|
||||
indexData []byte
|
||||
index *Index
|
||||
)
|
||||
for _, url := range d.indexURLs {
|
||||
content, err = d.downloadIndexFileFromURL(ctx, url)
|
||||
if err != nil {
|
||||
log.Warningf("updates: failed while downloading index file: %s", err)
|
||||
continue
|
||||
}
|
||||
// Downloading was successful.
|
||||
var bundle *Bundle
|
||||
bundle, err = ParseBundle(content)
|
||||
if err != nil {
|
||||
log.Warningf("updates: %s", err)
|
||||
continue
|
||||
}
|
||||
// Parsing was successful
|
||||
var version *semver.Version
|
||||
version, err = semver.NewVersion(bundle.Version)
|
||||
if err != nil {
|
||||
log.Warningf("updates: failed to parse bundle version: %s", err)
|
||||
continue
|
||||
// Download and verify index.
|
||||
indexData, index, err = d.getIndex(ctx, url)
|
||||
if err == nil {
|
||||
// Valid index found!
|
||||
break
|
||||
}
|
||||
|
||||
// All checks passed. Set and exit the loop.
|
||||
d.bundle = bundle
|
||||
d.version = version
|
||||
err = nil
|
||||
break
|
||||
log.Warningf("updates: failed to update index from %q: %s", url, err)
|
||||
err = fmt.Errorf("update index file from %q: %s", url, err)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("all index URLs failed, last error: %w", err)
|
||||
}
|
||||
d.index = index
|
||||
|
||||
// Write the content into a file.
|
||||
indexFilepath := filepath.Join(d.dir, d.indexFile)
|
||||
err = os.WriteFile(indexFilepath, []byte(content), defaultFileMode)
|
||||
// Write the index into a file.
|
||||
indexFilepath := filepath.Join(d.u.cfg.DownloadDirectory, d.u.cfg.IndexFile)
|
||||
err = os.WriteFile(indexFilepath, []byte(indexData), defaultFileMode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write index file: %w", err)
|
||||
return fmt.Errorf("write index file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify verifies if the downloaded files match the corresponding hash.
|
||||
func (d *Downloader) Verify() error {
|
||||
err := d.parseBundle()
|
||||
func (d *Downloader) getIndex(ctx context.Context, url string) (indexData []byte, bundle *Index, err error) {
|
||||
// Download data from URL.
|
||||
indexData, err = d.downloadData(ctx, url)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, nil, fmt.Errorf("GET index: %w", err)
|
||||
}
|
||||
|
||||
return d.bundle.Verify(d.dir)
|
||||
// Verify and parse index.
|
||||
bundle, err = ParseIndex(indexData, d.u.cfg.Verify)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("parse index: %w", err)
|
||||
}
|
||||
|
||||
return indexData, bundle, nil
|
||||
}
|
||||
|
||||
func (d *Downloader) parseBundle() error {
|
||||
indexFilepath := filepath.Join(d.dir, d.indexFile)
|
||||
var err error
|
||||
d.bundle, err = LoadBundle(indexFilepath)
|
||||
if err != nil {
|
||||
return err
|
||||
// gatherExistingFiles gathers the checksums on existing files.
|
||||
func (d *Downloader) gatherExistingFiles(dir string) error {
|
||||
// Make sure map is initialized.
|
||||
if d.existingFiles == nil {
|
||||
d.existingFiles = make(map[string]string)
|
||||
}
|
||||
|
||||
d.version, err = semver.NewVersion(d.bundle.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Downloader) downloadIndexFileFromURL(ctx context.Context, url string) (string, error) {
|
||||
// Request the index file
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, http.NoBody)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create GET request to: %w", err)
|
||||
}
|
||||
if UserAgent != "" {
|
||||
req.Header.Set("User-Agent", UserAgent)
|
||||
}
|
||||
|
||||
// Perform request
|
||||
resp, err := d.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed GET request to %s: %w", url, err)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
// Check the status code
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
return "", fmt.Errorf("received error from the server status code: %s", resp.Status)
|
||||
}
|
||||
|
||||
// Read the content.
|
||||
content, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(content), nil
|
||||
}
|
||||
|
||||
// CopyMatchingFilesFromCurrent check if there the current bundle files has matching files with the new bundle and copies them if they match.
|
||||
func (d *Downloader) copyMatchingFilesFromCurrent(currentFiles map[string]File) error {
|
||||
// Make sure new dir exists
|
||||
_ = os.MkdirAll(d.dir, defaultDirMode)
|
||||
|
||||
for _, a := range d.bundle.Artifacts {
|
||||
currentFile, ok := currentFiles[a.Filename]
|
||||
if ok && currentFile.Sha256() == a.SHA256 {
|
||||
// Read the content of the current file.
|
||||
content, err := os.ReadFile(currentFile.Path())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read file %s: %w", currentFile.Path(), err)
|
||||
}
|
||||
|
||||
// Check if the content matches the artifact hash
|
||||
expectedHash, err := hex.DecodeString(a.SHA256)
|
||||
if err != nil || len(expectedHash) != sha256.Size {
|
||||
return fmt.Errorf("invalid artifact hash %s: %w", a.SHA256, err)
|
||||
}
|
||||
hash := sha256.Sum256(content)
|
||||
if !bytes.Equal(expectedHash, hash[:]) {
|
||||
return fmt.Errorf("expected and file hash mismatch: %s", currentFile.Path())
|
||||
}
|
||||
|
||||
// Create new file
|
||||
destFilePath := filepath.Join(d.dir, a.Filename)
|
||||
err = os.WriteFile(destFilePath, content, a.GetFileMode())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write to file %s: %w", destFilePath, err)
|
||||
}
|
||||
log.Debugf("updates: file copied from current version: %s", a.Filename)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Downloader) downloadAndVerify(ctx context.Context) error {
|
||||
// Make sure we have the bundle file parsed.
|
||||
err := d.parseBundle()
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid update bundle file: %w", err)
|
||||
}
|
||||
|
||||
// Make sure dir exists
|
||||
_ = os.MkdirAll(d.dir, defaultDirMode)
|
||||
|
||||
for _, artifact := range d.bundle.Artifacts {
|
||||
filePath := filepath.Join(d.dir, artifact.Filename)
|
||||
|
||||
// Check file is already downloaded and valid.
|
||||
exists, _ := checkIfFileIsValid(filePath, artifact)
|
||||
if exists {
|
||||
log.Debugf("updates: file already downloaded: %s", filePath)
|
||||
continue
|
||||
}
|
||||
|
||||
// Download artifact
|
||||
err := d.processArtifact(ctx, artifact, filePath)
|
||||
// Walk directory, just log errors.
|
||||
err := filepath.WalkDir(dir, func(fullpath string, entry fs.DirEntry, err error) error {
|
||||
// Fail on access error.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Downloader) processArtifact(ctx context.Context, artifact Artifact, filePath string) error {
|
||||
providedHash, err := hex.DecodeString(artifact.SHA256)
|
||||
if err != nil || len(providedHash) != sha256.Size {
|
||||
return fmt.Errorf("invalid provided hash %s: %w", artifact.SHA256, err)
|
||||
}
|
||||
// Skip folders.
|
||||
if entry.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Download and verify
|
||||
log.Debugf("updates: downloading file: %s", artifact.Filename)
|
||||
content, err := d.downloadAndVerifyArtifact(ctx, artifact.URLs, artifact.Unpack, providedHash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to download artifact: %w", err)
|
||||
}
|
||||
|
||||
// Save
|
||||
tmpFilename := fmt.Sprintf("%s.download", filePath)
|
||||
err = os.WriteFile(tmpFilename, content, artifact.GetFileMode())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write to file: %w", err)
|
||||
}
|
||||
|
||||
// Rename
|
||||
err = os.Rename(tmpFilename, filePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to rename file: %w", err)
|
||||
}
|
||||
|
||||
log.Infof("updates: file downloaded and verified: %s", artifact.Filename)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Downloader) downloadAndVerifyArtifact(ctx context.Context, urls []string, unpack string, expectedHash []byte) ([]byte, error) {
|
||||
var err error
|
||||
var content []byte
|
||||
|
||||
for _, url := range urls {
|
||||
// Download
|
||||
content, err = d.downloadFile(ctx, url)
|
||||
// Read full file.
|
||||
fileData, err := os.ReadFile(fullpath)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("failed to download artifact from url: %s, %w", url, err)
|
||||
log.Warningf("%s", err)
|
||||
continue
|
||||
log.Debugf("updates: failed to read file %q while searching for existing files: %w", fullpath, err)
|
||||
return fmt.Errorf("failed to read file %s: %w", fullpath, err)
|
||||
}
|
||||
|
||||
// Decompress
|
||||
if unpack != "" {
|
||||
content, err = decompress(unpack, content)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to decompress artifact: %w", err)
|
||||
log.Warningf("%s", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Calculate checksum and add it to the existing files.
|
||||
hashSum := sha256.Sum256(fileData)
|
||||
d.existingFiles[hex.EncodeToString(hashSum[:])] = fullpath
|
||||
|
||||
// Calculate and verify hash
|
||||
hash := sha256.Sum256(content)
|
||||
if !bytes.Equal(expectedHash, hash[:]) {
|
||||
err := fmt.Errorf("artifact hash does not match")
|
||||
log.Warningf("%s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// All file downloaded and verified.
|
||||
return content, nil
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("searching for existing files: %w", err)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Downloader) downloadFile(ctx context.Context, url string) ([]byte, error) {
|
||||
// Try to make the request
|
||||
func (d *Downloader) downloadArtifacts(ctx context.Context) error {
|
||||
// Make sure dir exists.
|
||||
err := os.MkdirAll(d.u.cfg.DownloadDirectory, defaultDirMode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create download directory: %s", d.u.cfg.DownloadDirectory)
|
||||
}
|
||||
|
||||
artifacts:
|
||||
for _, artifact := range d.index.Artifacts {
|
||||
dstFilePath := filepath.Join(d.u.cfg.DownloadDirectory, artifact.Filename)
|
||||
|
||||
// Check if we can copy the artifact from disk instead.
|
||||
if existingFile, ok := d.existingFiles[artifact.SHA256]; ok {
|
||||
// Check if this is the same file.
|
||||
if existingFile == dstFilePath {
|
||||
continue artifacts
|
||||
}
|
||||
// Copy and check.
|
||||
err = copyAndCheckSHA256Sum(existingFile, dstFilePath, artifact.SHA256, artifact.GetFileMode())
|
||||
if err == nil {
|
||||
continue artifacts
|
||||
}
|
||||
log.Debugf("updates: failed to copy existing file %s: %w", artifact.Filename, err)
|
||||
}
|
||||
|
||||
// Try to download the artifact from one of the URLs.
|
||||
var artifactData []byte
|
||||
artifactURLs:
|
||||
for _, url := range artifact.URLs {
|
||||
// Download and verify index.
|
||||
artifactData, err = d.getArtifact(ctx, artifact, url)
|
||||
if err == nil {
|
||||
// Valid artifact found!
|
||||
break artifactURLs
|
||||
}
|
||||
err = fmt.Errorf("update index file from %q: %s", url, err)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("all artifact URLs for %s failed, last error: %w", artifact.Filename, err)
|
||||
}
|
||||
|
||||
// Write artifact to temporary file.
|
||||
tmpFilename := dstFilePath + ".download"
|
||||
err = os.WriteFile(tmpFilename, artifactData, artifact.GetFileMode())
|
||||
if err != nil {
|
||||
return fmt.Errorf("write %s to temp file: %w", artifact.Filename, err)
|
||||
}
|
||||
|
||||
// Rename/Move to actual location.
|
||||
err = os.Rename(tmpFilename, dstFilePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rename %s after write: %w", artifact.Filename, err)
|
||||
}
|
||||
|
||||
log.Infof("updates: downloaded and verified %s", artifact.Filename)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Downloader) getArtifact(ctx context.Context, artifact Artifact, url string) ([]byte, error) {
|
||||
// Download data from URL.
|
||||
artifactData, err := d.downloadData(ctx, url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("GET artifact: %w", err)
|
||||
}
|
||||
|
||||
// Decompress artifact data, if configured.
|
||||
// TODO: Normally we should do operations on "untrusted" data _after_ verification,
|
||||
// but we really want the checksum to be for the unpacked data. Should we add another checksum, or is HTTPS enough?
|
||||
if artifact.Unpack != "" {
|
||||
artifactData, err = decompress(artifact.Unpack, artifactData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decompress: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify checksum.
|
||||
if err := checkSHA256Sum(artifactData, artifact.SHA256); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return artifactData, nil
|
||||
}
|
||||
|
||||
func (d *Downloader) downloadData(ctx context.Context, url string) ([]byte, error) {
|
||||
// Setup request.
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, http.NoBody)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create GET request to %s: %w", url, err)
|
||||
|
@ -286,17 +221,20 @@ func (d *Downloader) downloadFile(ctx context.Context, url string) ([]byte, erro
|
|||
if UserAgent != "" {
|
||||
req.Header.Set("User-Agent", UserAgent)
|
||||
}
|
||||
|
||||
// Start request with shared http client.
|
||||
resp, err := d.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed a get file request to: %w", err)
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
// Check if the server returned an error
|
||||
// Check for HTTP status errors.
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("server returned non-OK status: %d %s", resp.StatusCode, resp.Status)
|
||||
}
|
||||
|
||||
// Read the full body and return it.
|
||||
content, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read body of response: %w", err)
|
||||
|
@ -304,25 +242,6 @@ func (d *Downloader) downloadFile(ctx context.Context, url string) ([]byte, erro
|
|||
return content, nil
|
||||
}
|
||||
|
||||
func (d *Downloader) deleteUnfinishedDownloads() error {
|
||||
entries, err := os.ReadDir(d.dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, e := range entries {
|
||||
// Check if the current file has the download extension
|
||||
if !e.IsDir() && strings.HasSuffix(e.Name(), ".download") {
|
||||
path := filepath.Join(d.dir, e.Name())
|
||||
log.Warningf("updates: deleting unfinished download file: %s\n", path)
|
||||
err := os.Remove(path)
|
||||
if err != nil {
|
||||
log.Errorf("updates: failed to delete unfinished download file %s: %s", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func decompress(cType string, fileBytes []byte) ([]byte, error) {
|
||||
switch cType {
|
||||
case "zip":
|
||||
|
@ -335,46 +254,48 @@ func decompress(cType string, fileBytes []byte) ([]byte, error) {
|
|||
}
|
||||
|
||||
func decompressGzip(data []byte) ([]byte, error) {
|
||||
// Create a gzip reader from the byte array
|
||||
// Create a gzip reader from the byte slice.
|
||||
gzipReader, err := gzip.NewReader(bytes.NewReader(data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create gzip reader: %w", err)
|
||||
return nil, fmt.Errorf("create gzip reader: %w", err)
|
||||
}
|
||||
defer func() { _ = gzipReader.Close() }()
|
||||
|
||||
// Copy from the gzip reader into a new buffer.
|
||||
var buf bytes.Buffer
|
||||
_, err = io.CopyN(&buf, gzipReader, MaxUnpackSize)
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, fmt.Errorf("failed to read gzip file: %w", err)
|
||||
return nil, fmt.Errorf("read gzip file: %w", err)
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func decompressZip(data []byte) ([]byte, error) {
|
||||
// Create a zip reader from the byte array
|
||||
// Create a zip reader from the byte slice.
|
||||
zipReader, err := zip.NewReader(bytes.NewReader(data), int64(len(data)))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create zip reader: %w", err)
|
||||
return nil, fmt.Errorf("create zip reader: %w", err)
|
||||
}
|
||||
|
||||
// Ensure there is only one file in the zip
|
||||
// Ensure there is only one file in the zip.
|
||||
if len(zipReader.File) != 1 {
|
||||
return nil, fmt.Errorf("zip file must contain exactly one file")
|
||||
}
|
||||
|
||||
// Read the single file in the zip
|
||||
// Open single file in the zip.
|
||||
file := zipReader.File[0]
|
||||
fileReader, err := file.Open()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to open file in zip: %w", err)
|
||||
return nil, fmt.Errorf("open file in zip: %w", err)
|
||||
}
|
||||
defer func() { _ = fileReader.Close() }()
|
||||
|
||||
// Copy from the zip reader into a new buffer.
|
||||
var buf bytes.Buffer
|
||||
_, err = io.CopyN(&buf, fileReader, MaxUnpackSize)
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, fmt.Errorf("failed to read file in zip: %w", err)
|
||||
return nil, fmt.Errorf("read file in zip: %w", err)
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
|
|
295
service/updates/index.go
Normal file
295
service/updates/index.go
Normal file
|
@ -0,0 +1,295 @@
|
|||
package updates
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"crypto/subtle"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
semver "github.com/hashicorp/go-version"
|
||||
"github.com/safing/jess"
|
||||
"github.com/safing/jess/filesig"
|
||||
)
|
||||
|
||||
// MaxUnpackSize defines the maximum size that is allowed to be unpacked.
|
||||
const MaxUnpackSize = 1 << 30 // 2^30 == 1GB
|
||||
|
||||
const currentPlatform = runtime.GOOS + "_" + runtime.GOARCH
|
||||
|
||||
var zeroVersion = semver.Must(semver.NewVersion("0.0.0"))
|
||||
|
||||
// Artifacts represents a single file with metadata.
|
||||
type Artifact struct {
|
||||
Filename string `json:"Filename"`
|
||||
SHA256 string `json:"SHA256"`
|
||||
URLs []string `json:"URLs"`
|
||||
Platform string `json:"Platform,omitempty"`
|
||||
Unpack string `json:"Unpack,omitempty"`
|
||||
Version string `json:"Version,omitempty"`
|
||||
|
||||
localFile string
|
||||
}
|
||||
|
||||
// GetFileMode returns the required filesystem permission for the artifact.
|
||||
func (a *Artifact) GetFileMode() os.FileMode {
|
||||
// Special case for portmaster ui. Should be able to be executed from the regular user
|
||||
if a.Platform == currentPlatform && a.Filename == "portmaster" {
|
||||
return executableUIFileMode
|
||||
}
|
||||
|
||||
if a.Platform == currentPlatform {
|
||||
return executableFileMode
|
||||
}
|
||||
|
||||
return defaultFileMode
|
||||
}
|
||||
|
||||
// Index represents a collection of artifacts with metadata.
|
||||
type Index struct {
|
||||
Name string `json:"Name"`
|
||||
Version string `json:"Version"`
|
||||
Published time.Time `json:"Published"`
|
||||
Artifacts []Artifact `json:"Artifacts"`
|
||||
|
||||
versionNum *semver.Version
|
||||
}
|
||||
|
||||
// LoadIndex loads and parses an index from the given filename.
|
||||
func LoadIndex(filename string, trustStore jess.TrustStore) (*Index, error) {
|
||||
// Read index file from disk.
|
||||
content, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read index file: %w", err)
|
||||
}
|
||||
|
||||
// Parse and return.
|
||||
return ParseIndex(content, trustStore)
|
||||
}
|
||||
|
||||
// ParseIndex parses an index from a json string.
|
||||
func ParseIndex(jsonContent []byte, trustStore jess.TrustStore) (*Index, error) {
|
||||
// Verify signature.
|
||||
if trustStore != nil {
|
||||
if err := filesig.VerifyJSONSignature(jsonContent, trustStore); err != nil {
|
||||
return nil, fmt.Errorf("verify: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Parse json.
|
||||
var index Index
|
||||
err := json.Unmarshal([]byte(jsonContent), &index)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse index: %w", err)
|
||||
}
|
||||
|
||||
// Parse version number, if set.
|
||||
if index.Version != "" {
|
||||
versionNum, err := semver.NewVersion(index.Version)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid index version %q: %w", index.Version, err)
|
||||
}
|
||||
index.versionNum = versionNum
|
||||
}
|
||||
|
||||
// Filter artifacts by currnet platform.
|
||||
filtered := make([]Artifact, 0)
|
||||
for _, a := range index.Artifacts {
|
||||
if a.Platform == "" || a.Platform == currentPlatform {
|
||||
filtered = append(filtered, a)
|
||||
}
|
||||
}
|
||||
index.Artifacts = filtered
|
||||
|
||||
return &index, nil
|
||||
}
|
||||
|
||||
// CanDoUpgrades returns whether the index is able to follow a secure upgrade path.
|
||||
func (index *Index) CanDoUpgrades() error {
|
||||
switch {
|
||||
case index.versionNum == nil:
|
||||
return errors.New("missing version number")
|
||||
|
||||
case index.Published.IsZero():
|
||||
return errors.New("missing publish date")
|
||||
|
||||
case index.Published.After(time.Now().Add(15 * time.Minute)):
|
||||
return fmt.Errorf("is from the future (%s)", time.Until(index.Published).Round(time.Minute))
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ShouldUpgradeTo returns whether the given index is a successor and should be upgraded to.
|
||||
func (index *Index) ShouldUpgradeTo(newIndex *Index) error {
|
||||
// Check if both indexes can do upgrades.
|
||||
if err := index.CanDoUpgrades(); err != nil {
|
||||
return fmt.Errorf("current index cannot do upgrades: %w", err)
|
||||
}
|
||||
if err := newIndex.CanDoUpgrades(); err != nil {
|
||||
return fmt.Errorf("new index cannot do upgrade: %w")
|
||||
}
|
||||
|
||||
switch {
|
||||
case index.versionNum.Equal(zeroVersion):
|
||||
// The zero version is used for bootstrapping.
|
||||
// Upgrade in any case.
|
||||
return nil
|
||||
|
||||
case index.Name != newIndex.Name:
|
||||
return errors.New("index names do not match")
|
||||
|
||||
case index.versionNum.GreaterThan(newIndex.versionNum):
|
||||
return errors.New("current index has newer version")
|
||||
|
||||
case index.Published.After(newIndex.Published):
|
||||
return errors.New("current index was published later")
|
||||
|
||||
case index.Published.Equal(newIndex.Published):
|
||||
// "Do nothing".
|
||||
return ErrSameIndex
|
||||
|
||||
default:
|
||||
// Upgrade!
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// VerifyArtifacts checks if all artifacts are present in the given dir and have the correct hash.
|
||||
func (index *Index) VerifyArtifacts(dir string) error {
|
||||
for _, artifact := range index.Artifacts {
|
||||
err := checkSHA256SumFile(filepath.Join(dir, artifact.Filename), artifact.SHA256)
|
||||
if err != nil {
|
||||
return fmt.Errorf("verify %s: %s", artifact.Filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (index *Index) Export(signingKey *jess.Signet, trustStore jess.TrustStore) ([]byte, error) {
|
||||
// Serialize to json.
|
||||
indexData, err := json.Marshal(index)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("serialize: %w", err)
|
||||
}
|
||||
|
||||
// Do not sign if signing key is not given.
|
||||
if signingKey == nil {
|
||||
return indexData, nil
|
||||
}
|
||||
|
||||
// Make envelope.
|
||||
envelope := jess.NewUnconfiguredEnvelope()
|
||||
envelope.SuiteID = jess.SuiteSignV1
|
||||
envelope.Senders = []*jess.Signet{signingKey}
|
||||
|
||||
// Sign json data.
|
||||
signedIndex, err := filesig.AddJSONSignature(indexData, envelope, trustStore)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("sign: %w", err)
|
||||
}
|
||||
|
||||
return signedIndex, nil
|
||||
}
|
||||
|
||||
func checkSHA256SumFile(filename string, sha256sum string) error {
|
||||
// Check expected hash.
|
||||
expectedDigest, err := hex.DecodeString(sha256sum)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid hex encoding for expected hash %s: %w", sha256sum, err)
|
||||
}
|
||||
if len(expectedDigest) != sha256.Size {
|
||||
return fmt.Errorf("invalid size for expected hash %s: %w", sha256sum, err)
|
||||
}
|
||||
|
||||
// Open file for checking.
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open file: %w", err)
|
||||
}
|
||||
defer func() { _ = file.Close() }()
|
||||
|
||||
// Calculate hash of the file.
|
||||
fileHash := sha256.New()
|
||||
if _, err := io.Copy(fileHash, file); err != nil {
|
||||
return fmt.Errorf("read file: %w", err)
|
||||
}
|
||||
if subtle.ConstantTimeCompare(fileHash.Sum(nil), expectedDigest) != 1 {
|
||||
return errors.New("sha256sum mismatch")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkSHA256Sum(fileData []byte, sha256sum string) error {
|
||||
// Check expected hash.
|
||||
expectedDigest, err := hex.DecodeString(sha256sum)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid hex encoding for expected hash %s: %w", sha256sum, err)
|
||||
}
|
||||
if len(expectedDigest) != sha256.Size {
|
||||
return fmt.Errorf("invalid size for expected hash %s: %w", sha256sum, err)
|
||||
}
|
||||
|
||||
// Calculate and compare hash of the file.
|
||||
hashSum := sha256.Sum256(fileData)
|
||||
if subtle.ConstantTimeCompare(hashSum[:], expectedDigest) != 1 {
|
||||
return errors.New("sha256sum mismatch")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyAndCheckSHA256Sum copies the file from src to dst and check the sha256 sum.
|
||||
// As a special case, if the sha256sum is not given, it is not checked.
|
||||
func copyAndCheckSHA256Sum(src, dst, sha256sum string, fileMode fs.FileMode) error {
|
||||
// Check expected hash.
|
||||
var expectedDigest []byte
|
||||
if sha256sum != "" {
|
||||
expectedDigest, err := hex.DecodeString(sha256sum)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid hex encoding for expected hash %s: %w", sha256sum, err)
|
||||
}
|
||||
if len(expectedDigest) != sha256.Size {
|
||||
return fmt.Errorf("invalid size for expected hash %s: %w", sha256sum, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Read file from source.
|
||||
fileData, err := os.ReadFile(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read src file: %w", err)
|
||||
}
|
||||
|
||||
// Calculate and compare hash of the file.
|
||||
if len(expectedDigest) > 0 {
|
||||
hashSum := sha256.Sum256(fileData)
|
||||
if subtle.ConstantTimeCompare(hashSum[:], expectedDigest) != 1 {
|
||||
return errors.New("sha256sum mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
// Write to temporary file.
|
||||
tmpDst := dst + ".copy"
|
||||
err = os.WriteFile(tmpDst, fileData, fileMode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write temp dst file: %w", err)
|
||||
}
|
||||
|
||||
// Rename/Move to actual location.
|
||||
err = os.Rename(tmpDst, dst)
|
||||
if err != nil {
|
||||
return fmt.Errorf("rename dst file after write: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -19,7 +19,7 @@ import (
|
|||
semver "github.com/hashicorp/go-version"
|
||||
)
|
||||
|
||||
type BundleFileSettings struct {
|
||||
type IndexScanConfig struct {
|
||||
Name string
|
||||
Version string
|
||||
PrimaryArtifact string
|
||||
|
@ -34,7 +34,7 @@ type BundleFileSettings struct {
|
|||
unpackFilesGlobs map[string]glob.Glob
|
||||
}
|
||||
|
||||
func (bs *BundleFileSettings) init() error {
|
||||
func (bs *IndexScanConfig) init() error {
|
||||
// Transform base URL into expected format.
|
||||
bs.cleanedBaseURL = strings.TrimSuffix(bs.BaseURL, "/") + "/"
|
||||
|
||||
|
@ -62,7 +62,7 @@ func (bs *BundleFileSettings) init() error {
|
|||
}
|
||||
|
||||
// IsIgnored returns whether a filename should be ignored.
|
||||
func (bs *BundleFileSettings) IsIgnored(filename string) bool {
|
||||
func (bs *IndexScanConfig) IsIgnored(filename string) bool {
|
||||
for _, ignoreGlob := range bs.ignoreFilesGlobs {
|
||||
if ignoreGlob.Match(filename) {
|
||||
return true
|
||||
|
@ -73,7 +73,7 @@ func (bs *BundleFileSettings) IsIgnored(filename string) bool {
|
|||
}
|
||||
|
||||
// UnpackSetting returns the unpack setings for the given filename.
|
||||
func (bs *BundleFileSettings) UnpackSetting(filename string) (string, error) {
|
||||
func (bs *IndexScanConfig) UnpackSetting(filename string) (string, error) {
|
||||
var foundSetting string
|
||||
|
||||
settings:
|
||||
|
@ -94,21 +94,21 @@ settings:
|
|||
return foundSetting, nil
|
||||
}
|
||||
|
||||
// GenerateBundleFromDir generates a bundle from a given folder.
|
||||
func GenerateBundleFromDir(bundleDir string, settings BundleFileSettings) (*Bundle, error) {
|
||||
// GenerateIndexFromDir generates a index from a given folder.
|
||||
func GenerateIndexFromDir(sourceDir string, cfg IndexScanConfig) (*Index, error) {
|
||||
artifacts := make(map[string]Artifact)
|
||||
|
||||
// Initialize.
|
||||
err := settings.init()
|
||||
err := cfg.init()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid bundle settings: %w", err)
|
||||
return nil, fmt.Errorf("invalid index scan config: %w", err)
|
||||
}
|
||||
bundleDir, err = filepath.Abs(bundleDir)
|
||||
sourceDir, err = filepath.Abs(sourceDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid bundle dir: %w", err)
|
||||
return nil, fmt.Errorf("invalid index dir: %w", err)
|
||||
}
|
||||
|
||||
err = filepath.WalkDir(bundleDir, func(fullpath string, d fs.DirEntry, err error) error {
|
||||
err = filepath.WalkDir(sourceDir, func(fullpath string, d fs.DirEntry, err error) error {
|
||||
// Fail on access error.
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -122,13 +122,13 @@ func GenerateBundleFromDir(bundleDir string, settings BundleFileSettings) (*Bund
|
|||
}
|
||||
|
||||
// Get relative path for processing.
|
||||
relpath, err := filepath.Rel(bundleDir, fullpath)
|
||||
relpath, err := filepath.Rel(sourceDir, fullpath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid relative path for %s: %w", fullpath, err)
|
||||
}
|
||||
|
||||
// Check if file is in the ignore list.
|
||||
if settings.IsIgnored(relpath) {
|
||||
if cfg.IsIgnored(relpath) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -184,7 +184,7 @@ func GenerateBundleFromDir(bundleDir string, settings BundleFileSettings) (*Bund
|
|||
artifact := Artifact{}
|
||||
|
||||
// Check if the caller provided a template for the artifact.
|
||||
if t, ok := settings.Templates[identifier]; ok {
|
||||
if t, ok := cfg.Templates[identifier]; ok {
|
||||
artifact = t
|
||||
}
|
||||
|
||||
|
@ -192,14 +192,14 @@ func GenerateBundleFromDir(bundleDir string, settings BundleFileSettings) (*Bund
|
|||
if artifact.Filename == "" {
|
||||
artifact.Filename = identifier
|
||||
}
|
||||
if len(artifact.URLs) == 0 && settings.BaseURL != "" {
|
||||
artifact.URLs = []string{settings.cleanedBaseURL + relpath}
|
||||
if len(artifact.URLs) == 0 && cfg.BaseURL != "" {
|
||||
artifact.URLs = []string{cfg.cleanedBaseURL + relpath}
|
||||
}
|
||||
if artifact.Platform == "" {
|
||||
artifact.Platform = platform
|
||||
}
|
||||
if artifact.Unpack == "" {
|
||||
unpackSetting, err := settings.UnpackSetting(relpath)
|
||||
unpackSetting, err := cfg.UnpackSetting(relpath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid unpack setting for %s at %s: %w", key, relpath, err)
|
||||
}
|
||||
|
@ -225,20 +225,20 @@ func GenerateBundleFromDir(bundleDir string, settings BundleFileSettings) (*Bund
|
|||
return nil, fmt.Errorf("scanning dir: %w", err)
|
||||
}
|
||||
|
||||
// Create base bundle.
|
||||
bundle := &Bundle{
|
||||
Name: settings.Name,
|
||||
Version: settings.Version,
|
||||
// Create base index.
|
||||
index := &Index{
|
||||
Name: cfg.Name,
|
||||
Version: cfg.Version,
|
||||
Published: time.Now(),
|
||||
}
|
||||
if bundle.Version == "" && settings.PrimaryArtifact != "" {
|
||||
pv, ok := artifacts[settings.PrimaryArtifact]
|
||||
if index.Version == "" && cfg.PrimaryArtifact != "" {
|
||||
pv, ok := artifacts[cfg.PrimaryArtifact]
|
||||
if ok {
|
||||
bundle.Version = pv.Version
|
||||
index.Version = pv.Version
|
||||
}
|
||||
}
|
||||
if bundle.Name == "" {
|
||||
bundle.Name = strings.Trim(filepath.Base(bundleDir), "./\\")
|
||||
if index.Name == "" {
|
||||
index.Name = strings.Trim(filepath.Base(sourceDir), "./\\")
|
||||
}
|
||||
|
||||
// Convert to slice and compute hashes.
|
||||
|
@ -257,7 +257,7 @@ func GenerateBundleFromDir(bundleDir string, settings BundleFileSettings) (*Bund
|
|||
}
|
||||
|
||||
// Remove default versions.
|
||||
if artifact.Version == bundle.Version {
|
||||
if artifact.Version == index.Version {
|
||||
artifact.Version = ""
|
||||
}
|
||||
|
||||
|
@ -282,8 +282,8 @@ func GenerateBundleFromDir(bundleDir string, settings BundleFileSettings) (*Bund
|
|||
})
|
||||
|
||||
// Assign and return.
|
||||
bundle.Artifacts = export
|
||||
return bundle, nil
|
||||
index.Artifacts = export
|
||||
return index, nil
|
||||
}
|
||||
|
||||
func selectLatestArtifacts(artifacts []Artifact) ([]Artifact, error) {
|
||||
|
@ -373,8 +373,8 @@ func getIdentifierAndVersion(versionedPath string) (identifier, version string,
|
|||
return dirPath + filename, version, true
|
||||
}
|
||||
|
||||
// GenerateMockFolder generates mock bundle folder for testing.
|
||||
func GenerateMockFolder(dir, name, version string) error {
|
||||
// GenerateMockFolder generates mock index folder for testing.
|
||||
func GenerateMockFolder(dir, name, version string) error { // FIXME: move this to test?
|
||||
// Make sure dir exists
|
||||
_ = os.MkdirAll(dir, defaultDirMode)
|
||||
|
||||
|
@ -400,7 +400,7 @@ func GenerateMockFolder(dir, name, version string) error {
|
|||
}
|
||||
_ = file.Close()
|
||||
|
||||
bundle, err := GenerateBundleFromDir(dir, BundleFileSettings{
|
||||
index, err := GenerateIndexFromDir(dir, IndexScanConfig{
|
||||
Name: name,
|
||||
Version: version,
|
||||
})
|
||||
|
@ -408,12 +408,12 @@ func GenerateMockFolder(dir, name, version string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
bundleStr, err := json.MarshalIndent(bundle, "", " ")
|
||||
indexJson, err := json.MarshalIndent(index, "", " ")
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to marshal bundle: %s\n", err)
|
||||
fmt.Fprintf(os.Stderr, "failed to marshal index: %s\n", err)
|
||||
}
|
||||
|
||||
err = os.WriteFile(filepath.Join(dir, "index.json"), bundleStr, defaultFileMode)
|
||||
err = os.WriteFile(filepath.Join(dir, "index.json"), indexJson, defaultFileMode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
|
@ -3,9 +3,14 @@ package updates
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/safing/jess"
|
||||
"github.com/safing/portmaster/base/log"
|
||||
"github.com/safing/portmaster/base/notifications"
|
||||
"github.com/safing/portmaster/service/mgr"
|
||||
|
@ -14,6 +19,7 @@ import (
|
|||
|
||||
const (
|
||||
updateTaskRepeatDuration = 1 * time.Hour
|
||||
noNewUpdateNotificationID = "updates:no-new-update"
|
||||
updateAvailableNotificationID = "updates:update-available"
|
||||
updateFailedNotificationID = "updates:update-failed"
|
||||
corruptInstallationNotificationID = "updates:corrupt-installation"
|
||||
|
@ -23,43 +29,100 @@ const (
|
|||
ResourceUpdateEvent = "resource update"
|
||||
)
|
||||
|
||||
var (
|
||||
// UserAgent is an HTTP User-Agent that is used to add
|
||||
// more context to requests made by the registry when
|
||||
// fetching resources from the update server.
|
||||
UserAgent = fmt.Sprintf("Portmaster (%s %s)", runtime.GOOS, runtime.GOARCH)
|
||||
// UserAgent is an HTTP User-Agent that is used to add
|
||||
// more context to requests made by the registry when
|
||||
// fetching resources from the update server.
|
||||
var UserAgent = fmt.Sprintf("Portmaster (%s %s)", runtime.GOOS, runtime.GOARCH)
|
||||
|
||||
ErrNotFound error = errors.New("file not found")
|
||||
// Errors.
|
||||
var (
|
||||
ErrNotFound = errors.New("file not found")
|
||||
ErrSameIndex = errors.New("same index")
|
||||
|
||||
ErrNoUpdateAvailable = errors.New("no update available")
|
||||
ErrActionRequired = errors.New("action required")
|
||||
)
|
||||
|
||||
// UpdateIndex holds the configuration for the updates module.
|
||||
type UpdateIndex struct {
|
||||
Directory string
|
||||
// Config holds the configuration for the updates module.
|
||||
type Config struct {
|
||||
// Directory is the main directory where the currently to-be-used artifacts live.
|
||||
Directory string
|
||||
// DownloadDirectory is the directory where new artifacts are downloaded to and prepared for upgrading.
|
||||
// After the upgrade, this directory is cleared.
|
||||
DownloadDirectory string
|
||||
PurgeDirectory string
|
||||
Ignore []string
|
||||
IndexURLs []string
|
||||
IndexFile string
|
||||
AutoApply bool
|
||||
NeedsRestart bool
|
||||
// PurgeDirectory is the directory where old artifacts are moved to during the upgrade process.
|
||||
// After the upgrade, this directory is cleared.
|
||||
PurgeDirectory string
|
||||
// Ignore defines file and directory names within the main directory that should be ignored during the upgrade.
|
||||
Ignore []string
|
||||
|
||||
// IndexURLs defines file
|
||||
IndexURLs []string
|
||||
// IndexFile is the name of the index file used in the directories.
|
||||
IndexFile string
|
||||
// Verify enables and specifies the trust the index signatures will be checked against.
|
||||
Verify jess.TrustStore
|
||||
|
||||
// AutoDownload defines that updates may be downloaded automatically without outside trigger.
|
||||
AutoDownload bool
|
||||
// AutoApply defines that updates may be automatically applied without outside trigger.
|
||||
// Requires AutoDownload the be enabled.
|
||||
AutoApply bool
|
||||
// NeedsRestart defines that a restart is required after an upgrade has been completed.
|
||||
// Restart is triggered automatically, if Notify is disabled.
|
||||
NeedsRestart bool
|
||||
// Notify defines whether the user shall be informed about events via notifications.
|
||||
// If enabled, disables automatic restart after upgrade.
|
||||
Notify bool
|
||||
}
|
||||
|
||||
// Updates provides access to released artifacts.
|
||||
type Updates struct {
|
||||
// Check looks for obvious configuration errors.
|
||||
func (cfg *Config) Check() error {
|
||||
// Check if required fields are set.
|
||||
switch {
|
||||
case cfg.Directory == "":
|
||||
return errors.New("directory must be set")
|
||||
case cfg.DownloadDirectory == "":
|
||||
return errors.New("download directory must be set")
|
||||
case cfg.PurgeDirectory == "":
|
||||
return errors.New("purge directory must be set")
|
||||
case cfg.IndexFile == "":
|
||||
return errors.New("index file must be set")
|
||||
case cfg.AutoApply && !cfg.AutoDownload:
|
||||
return errors.New("auto apply is set, but auto download is not")
|
||||
}
|
||||
|
||||
// Check if Ignore contains paths.
|
||||
for i, s := range cfg.Ignore {
|
||||
if strings.ContainsRune(s, filepath.Separator) {
|
||||
return fmt.Errorf("ignore entry #%d invalid: must be file or directory name, not path", i+1)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if IndexURLs are HTTPS.
|
||||
for i, url := range cfg.IndexURLs {
|
||||
if !strings.HasPrefix(url, "https://") {
|
||||
return fmt.Errorf("index URL #%d invalid: is not a HTTPS url", i+1)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Updater provides access to released artifacts.
|
||||
type Updater struct {
|
||||
m *mgr.Manager
|
||||
states *mgr.StateMgr
|
||||
cfg Config
|
||||
|
||||
index *Index
|
||||
indexLock sync.Mutex
|
||||
|
||||
updateCheckWorkerMgr *mgr.WorkerMgr
|
||||
upgradeWorkerMgr *mgr.WorkerMgr
|
||||
|
||||
EventResourcesUpdated *mgr.EventMgr[struct{}]
|
||||
|
||||
registry Registry
|
||||
downloader Downloader
|
||||
|
||||
autoApply bool
|
||||
needsRestart bool
|
||||
|
||||
corruptedInstallation bool
|
||||
|
||||
isUpdateRunning *abool.AtomicBool
|
||||
|
@ -68,224 +131,280 @@ type Updates struct {
|
|||
}
|
||||
|
||||
// New returns a new Updates module.
|
||||
func New(instance instance, name string, index UpdateIndex) (*Updates, error) {
|
||||
func New(instance instance, name string, cfg Config) (*Updater, error) {
|
||||
m := mgr.New(name)
|
||||
module := &Updates{
|
||||
module := &Updater{
|
||||
m: m,
|
||||
states: m.NewStateMgr(),
|
||||
cfg: cfg,
|
||||
|
||||
EventResourcesUpdated: mgr.NewEventMgr[struct{}](ResourceUpdateEvent, m),
|
||||
|
||||
autoApply: index.AutoApply,
|
||||
needsRestart: index.NeedsRestart,
|
||||
isUpdateRunning: abool.NewBool(false),
|
||||
|
||||
instance: instance,
|
||||
}
|
||||
|
||||
// Workers
|
||||
module.updateCheckWorkerMgr = m.NewWorkerMgr("update checker", module.checkForUpdates, nil).Repeat(updateTaskRepeatDuration)
|
||||
module.upgradeWorkerMgr = m.NewWorkerMgr("upgrader", func(w *mgr.WorkerCtx) error {
|
||||
if !module.isUpdateRunning.SetToIf(false, true) {
|
||||
return fmt.Errorf("unable to apply updates, concurrent updater task is running")
|
||||
}
|
||||
// Make sure to unset it
|
||||
defer module.isUpdateRunning.UnSet()
|
||||
|
||||
module.applyUpdates(module.downloader, false)
|
||||
return nil
|
||||
}, nil)
|
||||
|
||||
var err error
|
||||
module.registry, err = CreateRegistry(index)
|
||||
if err != nil {
|
||||
// Installation is corrupt, set flag and fall back to folder scanning for artifacts discovery.
|
||||
log.Criticalf("updates: failed to create registry: %s (falling back to folder scanning)", err)
|
||||
module.corruptedInstallation = true
|
||||
|
||||
module.registry, err = CreateRegistryFromFolder(index)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Check config.
|
||||
if err := module.cfg.Check(); err != nil {
|
||||
return nil, fmt.Errorf("config is invalid: %w", err)
|
||||
}
|
||||
|
||||
module.downloader = CreateDownloader(index)
|
||||
// Create Workers.
|
||||
module.updateCheckWorkerMgr = m.NewWorkerMgr("update checker", module.updateCheckWorker, nil).
|
||||
Repeat(updateTaskRepeatDuration)
|
||||
module.upgradeWorkerMgr = m.NewWorkerMgr("upgrader", module.upgradeWorker, nil)
|
||||
|
||||
// Load index.
|
||||
index, err := LoadIndex(filepath.Join(cfg.Directory, cfg.IndexFile), cfg.Verify)
|
||||
if err != nil {
|
||||
if !errors.Is(err, os.ErrNotExist) {
|
||||
log.Errorf("updates: invalid index file, falling back to dir scan: %w", err)
|
||||
}
|
||||
|
||||
// Fall back to scanning the directory.
|
||||
index, err = GenerateIndexFromDir(cfg.Directory, IndexScanConfig{Version: "0.0.0"})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("updates index load and dir scan failed: %w", err)
|
||||
}
|
||||
}
|
||||
module.index = index
|
||||
|
||||
return module, nil
|
||||
}
|
||||
|
||||
func (u *Updates) checkForUpdates(wc *mgr.WorkerCtx) error {
|
||||
func (u *Updater) updateAndUpgrade(w *mgr.WorkerCtx, indexURLs []string, ignoreVersion, forceApply bool) (err error) {
|
||||
// Make sure only one update process is running.
|
||||
if !u.isUpdateRunning.SetToIf(false, true) {
|
||||
return fmt.Errorf("unable to check for updates, concurrent updater task is running")
|
||||
return fmt.Errorf("an updater task is already running, please try again later")
|
||||
}
|
||||
// Make sure to unset it on return.
|
||||
defer u.isUpdateRunning.UnSet()
|
||||
// Download the index file.
|
||||
err := u.downloader.downloadIndexFile(wc.Ctx())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to download index file: %w", err)
|
||||
// FIXME: Switch to mutex?
|
||||
|
||||
// Create a new downloader.
|
||||
downloader := NewDownloader(u, indexURLs)
|
||||
|
||||
// Update or load the index file.
|
||||
if len(indexURLs) > 0 {
|
||||
// Download fresh copy, if indexURLs are given.
|
||||
err = downloader.updateIndex(w.Ctx())
|
||||
if err != nil {
|
||||
return fmt.Errorf("update index file: %w", err)
|
||||
}
|
||||
} else {
|
||||
// Otherwise, load index from download dir.
|
||||
downloader.index, err = LoadIndex(filepath.Join(u.cfg.Directory, u.cfg.IndexFile), u.cfg.Verify)
|
||||
if err != nil {
|
||||
return fmt.Errorf("load previously downloaded index file: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if there is a new version.
|
||||
if u.downloader.version.LessThanOrEqual(u.registry.version) {
|
||||
log.Infof("updates: check compete: no new updates")
|
||||
if !ignoreVersion {
|
||||
// Get index to check version.
|
||||
u.indexLock.Lock()
|
||||
index := u.index
|
||||
u.indexLock.Unlock()
|
||||
// Check with local pointer to index.
|
||||
if err := index.ShouldUpgradeTo(downloader.index); err != nil {
|
||||
log.Infof("updates: no new or eligible update: %s", err)
|
||||
if u.cfg.Notify && u.instance.Notifications() != nil {
|
||||
u.instance.Notifications().NotifyInfo(
|
||||
noNewUpdateNotificationID,
|
||||
"No Updates Available",
|
||||
"Portmaster v"+u.index.Version+" is the newest version.",
|
||||
)
|
||||
}
|
||||
return ErrNoUpdateAvailable
|
||||
}
|
||||
}
|
||||
|
||||
// Check if automatic downloads are enabled.
|
||||
if !u.cfg.AutoDownload && !forceApply {
|
||||
if u.cfg.Notify && u.instance.Notifications() != nil {
|
||||
u.instance.Notifications().NotifyInfo(
|
||||
updateAvailableNotificationID,
|
||||
"New Update",
|
||||
"Portmaster v"+downloader.index.Version+" is available. Click Upgrade to download and upgrade now.",
|
||||
notifications.Action{
|
||||
ID: "upgrade",
|
||||
Text: "Upgrade Now",
|
||||
Type: notifications.ActionTypeWebhook,
|
||||
Payload: notifications.ActionTypeWebhookPayload{
|
||||
Method: "POST",
|
||||
URL: "updates/apply",
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
return fmt.Errorf("%w: apply updates to download and upgrade", ErrActionRequired)
|
||||
}
|
||||
|
||||
// Check for existing resources before starting to download.
|
||||
_ = downloader.gatherExistingFiles(u.cfg.Directory) // Artifacts are re-used between versions.
|
||||
_ = downloader.gatherExistingFiles(u.cfg.DownloadDirectory) // Previous download may have been interrupted.
|
||||
_ = downloader.gatherExistingFiles(u.cfg.PurgeDirectory) // Revover faster from a failed upgrade.
|
||||
|
||||
// Download any remaining needed files.
|
||||
// If everything is already found in the download directory, then this is a no-op.
|
||||
log.Infof("updates: downloading new version: %s %s", downloader.index.Name, downloader.index.Version)
|
||||
err = downloader.downloadArtifacts(w.Ctx())
|
||||
if err != nil {
|
||||
log.Errorf("updates: failed to download update: %s", err)
|
||||
if err := u.deleteUnfinishedFiles(u.cfg.DownloadDirectory); err != nil {
|
||||
log.Debugf("updates: failed to delete unfinished files in download directory %s", u.cfg.DownloadDirectory)
|
||||
}
|
||||
return fmt.Errorf("downloading failed: %w", err)
|
||||
}
|
||||
|
||||
// Notify the user that an upgrade is available.
|
||||
if !u.cfg.AutoApply && !forceApply {
|
||||
if u.cfg.Notify && u.instance.Notifications() != nil {
|
||||
u.instance.Notifications().NotifyInfo(
|
||||
updateAvailableNotificationID,
|
||||
"New Update",
|
||||
"Portmaster v"+downloader.index.Version+" is available. Click Upgrade to upgrade now.",
|
||||
notifications.Action{
|
||||
ID: "upgrade",
|
||||
Text: "Upgrade Now",
|
||||
Type: notifications.ActionTypeWebhook,
|
||||
Payload: notifications.ActionTypeWebhookPayload{
|
||||
Method: "POST",
|
||||
URL: "updates/apply",
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
return fmt.Errorf("%w: apply updates to download and upgrade", ErrActionRequired)
|
||||
}
|
||||
|
||||
// Run upgrade procedure.
|
||||
err = u.upgrade(downloader, ignoreVersion)
|
||||
if err != nil {
|
||||
if err := u.deleteUnfinishedFiles(u.cfg.PurgeDirectory); err != nil {
|
||||
log.Debugf("updates: failed to delete unfinished files in purge directory %s", u.cfg.PurgeDirectory)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Install is complete!
|
||||
|
||||
// Clean up and notify modules of changed files.
|
||||
u.cleanupAfterUpgrade()
|
||||
u.EventResourcesUpdated.Submit(struct{}{})
|
||||
|
||||
// If no restart is needed, we are done.
|
||||
if !u.cfg.NeedsRestart {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Download the new version.
|
||||
downloadBundle := u.downloader.bundle
|
||||
log.Infof("updates: check complete: downloading new version: %s %s", downloadBundle.Name, downloadBundle.Version)
|
||||
err = u.downloader.copyMatchingFilesFromCurrent(u.registry.files)
|
||||
if err != nil {
|
||||
log.Warningf("updates: failed to copy files from current installation: %s", err)
|
||||
}
|
||||
err = u.downloader.downloadAndVerify(wc.Ctx())
|
||||
if err != nil {
|
||||
log.Errorf("updates: failed to download update: %s", err)
|
||||
} else {
|
||||
if u.autoApply {
|
||||
// Apply updates.
|
||||
u.applyUpdates(u.downloader, false)
|
||||
} else {
|
||||
// Notify the user with option to trigger upgrade.
|
||||
notifications.NotifyPrompt(updateAvailableNotificationID, "New update is available.", fmt.Sprintf("%s %s", downloadBundle.Name, downloadBundle.Version), notifications.Action{
|
||||
ID: "apply",
|
||||
Text: "Apply",
|
||||
// Notify user that a restart is required.
|
||||
if u.cfg.Notify && u.instance.Notifications() != nil {
|
||||
u.instance.Notifications().NotifyInfo(
|
||||
updateAvailableNotificationID,
|
||||
"Restart Required",
|
||||
"Portmaster v"+downloader.index.Version+" is installed. Restart to use new version.",
|
||||
notifications.Action{
|
||||
ID: "restart",
|
||||
Text: "Restart Now",
|
||||
Type: notifications.ActionTypeWebhook,
|
||||
Payload: notifications.ActionTypeWebhookPayload{
|
||||
Method: "POST",
|
||||
URL: "updates/apply",
|
||||
URL: "updates/apply", // FIXME
|
||||
},
|
||||
})
|
||||
}
|
||||
},
|
||||
)
|
||||
return fmt.Errorf("%w: restart required", ErrActionRequired)
|
||||
}
|
||||
|
||||
// Otherwise, trigger restart immediately.
|
||||
u.instance.Restart()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Updater) updateCheckWorker(w *mgr.WorkerCtx) error {
|
||||
_ = u.updateAndUpgrade(w, u.cfg.IndexURLs, false, false)
|
||||
// FIXME: Handle errors.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Updater) upgradeWorker(w *mgr.WorkerCtx) error {
|
||||
_ = u.updateAndUpgrade(w, u.cfg.IndexURLs, false, true)
|
||||
// FIXME: Handle errors.
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateFromURL installs an update from the provided url.
|
||||
func (u *Updates) UpdateFromURL(url string) error {
|
||||
if !u.isUpdateRunning.SetToIf(false, true) {
|
||||
return fmt.Errorf("unable to upgrade from url, concurrent updater task is running")
|
||||
}
|
||||
|
||||
u.m.Go("custom-url-downloader", func(w *mgr.WorkerCtx) error {
|
||||
// Make sure to unset it on return.
|
||||
defer u.isUpdateRunning.UnSet()
|
||||
|
||||
// Initialize parameters
|
||||
index := UpdateIndex{
|
||||
DownloadDirectory: u.downloader.dir,
|
||||
IndexURLs: []string{url},
|
||||
IndexFile: u.downloader.indexFile,
|
||||
}
|
||||
|
||||
// Initialize with proper values and download the index file.
|
||||
downloader := CreateDownloader(index)
|
||||
err := downloader.downloadIndexFile(w.Ctx())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Start downloading the artifacts
|
||||
err = downloader.downloadAndVerify(w.Ctx())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Artifacts are downloaded, perform the update.
|
||||
u.applyUpdates(downloader, true)
|
||||
|
||||
func (u *Updater) UpdateFromURL(url string) error {
|
||||
u.m.Go("custom update from url", func(w *mgr.WorkerCtx) error {
|
||||
_ = u.updateAndUpgrade(w, []string{url}, true, true)
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Updates) applyUpdates(downloader Downloader, force bool) error {
|
||||
currentBundle := u.registry.bundle
|
||||
downloadBundle := downloader.bundle
|
||||
|
||||
if !force && u.registry.version != nil {
|
||||
if u.downloader.version.LessThanOrEqual(u.registry.version) {
|
||||
// No new version, silently return.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if currentBundle != nil {
|
||||
log.Infof("update: starting update: %s %s -> %s", currentBundle.Name, currentBundle.Version, downloadBundle.Version)
|
||||
}
|
||||
|
||||
err := u.registry.performRecoverableUpgrade(downloader.dir, downloader.indexFile)
|
||||
if err != nil {
|
||||
// Notify the user that update failed.
|
||||
notifications.NotifyPrompt(updateFailedNotificationID, "Failed to apply update.", err.Error())
|
||||
return fmt.Errorf("updates: failed to apply updates: %w", err)
|
||||
}
|
||||
|
||||
if u.needsRestart {
|
||||
// Perform restart.
|
||||
u.instance.Restart()
|
||||
} else {
|
||||
// Update completed and no restart is needed. Submit an event.
|
||||
u.EventResourcesUpdated.Submit(struct{}{})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TriggerUpdateCheck triggers an update check.
|
||||
func (u *Updates) TriggerUpdateCheck() {
|
||||
func (u *Updater) TriggerUpdateCheck() {
|
||||
u.updateCheckWorkerMgr.Go()
|
||||
}
|
||||
|
||||
// TriggerApplyUpdates triggers upgrade.
|
||||
func (u *Updates) TriggerApplyUpdates() {
|
||||
func (u *Updater) TriggerApplyUpdates() {
|
||||
u.upgradeWorkerMgr.Go()
|
||||
}
|
||||
|
||||
// States returns the state manager.
|
||||
func (u *Updates) States() *mgr.StateMgr {
|
||||
func (u *Updater) States() *mgr.StateMgr {
|
||||
return u.states
|
||||
}
|
||||
|
||||
// Manager returns the module manager.
|
||||
func (u *Updates) Manager() *mgr.Manager {
|
||||
func (u *Updater) Manager() *mgr.Manager {
|
||||
return u.m
|
||||
}
|
||||
|
||||
// Start starts the module.
|
||||
func (u *Updates) Start() error {
|
||||
// Remove old files
|
||||
u.m.Go("old files cleaner", func(ctx *mgr.WorkerCtx) error {
|
||||
_ = u.registry.CleanOldFiles()
|
||||
_ = u.downloader.deleteUnfinishedDownloads()
|
||||
return nil
|
||||
})
|
||||
|
||||
if u.corruptedInstallation {
|
||||
notifications.NotifyError(corruptInstallationNotificationID, "Corrupted installation. Reinstall the software.", "")
|
||||
func (u *Updater) Start() error {
|
||||
if u.corruptedInstallation && u.cfg.Notify && u.instance.Notifications() != nil {
|
||||
u.instance.Notifications().NotifyError(
|
||||
corruptInstallationNotificationID,
|
||||
"Install Corruption",
|
||||
"Portmaster has detected that one or more of its own files have been corrupted. Please re-install the software.",
|
||||
)
|
||||
}
|
||||
|
||||
u.updateCheckWorkerMgr.Go()
|
||||
|
||||
u.updateCheckWorkerMgr.Delay(15 * time.Second)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Updates) GetRootPath() string {
|
||||
return u.registry.dir
|
||||
func (u *Updater) GetMainDir() string {
|
||||
return u.cfg.Directory
|
||||
}
|
||||
|
||||
// GetFile returns the path of a file given the name. Returns ErrNotFound if file is not found.
|
||||
func (u *Updates) GetFile(id string) (*File, error) {
|
||||
file, ok := u.registry.files[id]
|
||||
if ok {
|
||||
return &file, nil
|
||||
} else {
|
||||
log.Errorf("updates: requested file id not found: %s", id)
|
||||
return nil, ErrNotFound
|
||||
func (u *Updater) GetFile(name string) (string, error) {
|
||||
u.indexLock.Lock()
|
||||
defer u.indexLock.Unlock()
|
||||
|
||||
for _, artifact := range u.index.Artifacts {
|
||||
switch {
|
||||
case artifact.Filename != name:
|
||||
// Name does not match.
|
||||
case artifact.Platform != "" && artifact.Platform != currentPlatform:
|
||||
// Platform is defined and does not match.
|
||||
// Platforms are usually pre-filtered, but just to be sure.
|
||||
default:
|
||||
// Artifact matches!
|
||||
return filepath.Join(u.cfg.Directory, artifact.Filename), nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", ErrNotFound
|
||||
}
|
||||
|
||||
// Stop stops the module.
|
||||
func (u *Updates) Stop() error {
|
||||
func (u *Updater) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -1,249 +0,0 @@
|
|||
package updates
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
semver "github.com/hashicorp/go-version"
|
||||
|
||||
"github.com/safing/portmaster/base/log"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultFileMode = os.FileMode(0o0644)
|
||||
executableFileMode = os.FileMode(0o0744)
|
||||
executableUIFileMode = os.FileMode(0o0755)
|
||||
defaultDirMode = os.FileMode(0o0755)
|
||||
)
|
||||
|
||||
type Registry struct {
|
||||
bundle *Bundle
|
||||
dir string
|
||||
purgeDir string
|
||||
files map[string]File
|
||||
|
||||
version *semver.Version
|
||||
}
|
||||
|
||||
func CreateRegistry(index UpdateIndex) (Registry, error) {
|
||||
registry := Registry{
|
||||
dir: index.Directory,
|
||||
purgeDir: index.PurgeDirectory,
|
||||
files: make(map[string]File),
|
||||
}
|
||||
// Parse bundle
|
||||
var err error
|
||||
registry.bundle, err = LoadBundle(filepath.Join(index.Directory, index.IndexFile))
|
||||
if err != nil {
|
||||
return Registry{}, err
|
||||
}
|
||||
|
||||
// Parse version
|
||||
registry.version, err = semver.NewVersion(registry.bundle.Version)
|
||||
if err != nil {
|
||||
log.Errorf("updates: failed to parse current version: %s", err)
|
||||
}
|
||||
|
||||
// Process files
|
||||
for _, artifact := range registry.bundle.Artifacts {
|
||||
artifactPath := filepath.Join(registry.dir, artifact.Filename)
|
||||
registry.files[artifact.Filename] = File{id: artifact.Filename, path: artifactPath, version: registry.bundle.Version, sha256: artifact.SHA256}
|
||||
}
|
||||
return registry, nil
|
||||
}
|
||||
|
||||
func CreateRegistryFromFolder(index UpdateIndex) (Registry, error) {
|
||||
registry := Registry{
|
||||
dir: index.Directory,
|
||||
purgeDir: index.PurgeDirectory,
|
||||
files: make(map[string]File),
|
||||
}
|
||||
|
||||
files, err := os.ReadDir(index.Directory)
|
||||
if err != nil {
|
||||
return Registry{}, nil
|
||||
}
|
||||
for _, file := range files {
|
||||
// Skip dirs
|
||||
if file.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip the uninstaller. (Windows)
|
||||
if strings.Contains(strings.ToLower(file.Name()), "uninstall") {
|
||||
continue
|
||||
}
|
||||
|
||||
artifactPath := filepath.Join(registry.dir, file.Name())
|
||||
registry.files[file.Name()] = File{id: file.Name(), path: artifactPath, version: "", sha256: ""}
|
||||
}
|
||||
|
||||
return registry, nil
|
||||
}
|
||||
|
||||
func (r *Registry) performUpgrade(downloadDir string, indexFile string) error {
|
||||
// Make sure provided update is valid
|
||||
indexFilepath := filepath.Join(downloadDir, indexFile)
|
||||
bundle, err := LoadBundle(indexFilepath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid update: %w", err)
|
||||
}
|
||||
|
||||
err = bundle.Verify(downloadDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid update: %w", err)
|
||||
}
|
||||
|
||||
// Make sure purge dir is empty.
|
||||
_ = os.RemoveAll(r.purgeDir)
|
||||
|
||||
// Create purge dir.
|
||||
err = os.MkdirAll(r.purgeDir, defaultDirMode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create directory: %w", err)
|
||||
}
|
||||
|
||||
// Move current version files into purge folder.
|
||||
log.Debugf("updates: removing the old version")
|
||||
for _, file := range r.files {
|
||||
purgePath := filepath.Join(r.purgeDir, file.id)
|
||||
err := moveFile(file.path, purgePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to move file %s: %w", file.path, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Move the new index file
|
||||
log.Debugf("updates: installing the new version")
|
||||
newIndexFile := filepath.Join(r.dir, indexFile)
|
||||
err = moveFile(indexFilepath, newIndexFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to move index file %s: %w", indexFile, err)
|
||||
}
|
||||
|
||||
// Move downloaded files to the current version folder.
|
||||
for _, artifact := range bundle.Artifacts {
|
||||
fromFilepath := filepath.Join(downloadDir, artifact.Filename)
|
||||
toFilepath := filepath.Join(r.dir, artifact.Filename)
|
||||
err = moveFile(fromFilepath, toFilepath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to move file %s: %w", fromFilepath, err)
|
||||
} else {
|
||||
log.Debugf("updates: %s moved", artifact.Filename)
|
||||
}
|
||||
}
|
||||
|
||||
log.Infof("updates: update complete")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func moveFile(currentPath, newPath string) error {
|
||||
err := os.Rename(currentPath, newPath)
|
||||
if err == nil {
|
||||
// Moving was successful return
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Debugf("updates: failed to move '%s' fallback to copy+delete: %s -> %s", err, currentPath, newPath)
|
||||
|
||||
// Failed to move, try copy and delete
|
||||
currentFile, err := os.Open(currentPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = currentFile.Close() }()
|
||||
|
||||
newFile, err := os.Create(newPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = newFile.Close() }()
|
||||
|
||||
_, err = io.Copy(newFile, currentFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make sure file is closed before deletion.
|
||||
_ = currentFile.Close()
|
||||
currentFile = nil
|
||||
|
||||
err = os.Remove(currentPath)
|
||||
if err != nil {
|
||||
log.Errorf("updates: failed to delete while moving file: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Registry) performRecoverableUpgrade(downloadDir string, indexFile string) error {
|
||||
upgradeError := r.performUpgrade(downloadDir, indexFile)
|
||||
if upgradeError != nil {
|
||||
err := r.recover()
|
||||
recoverStatus := "(recovery successful)"
|
||||
if err != nil {
|
||||
recoverStatus = "(recovery failed)"
|
||||
log.Errorf("updates: failed to recover: %s", err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("upgrade failed: %w %s", upgradeError, recoverStatus)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Registry) recover() error {
|
||||
files, err := os.ReadDir(r.purgeDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
recoverPath := filepath.Join(r.purgeDir, file.Name())
|
||||
currentFilepath := filepath.Join(r.dir, file.Name())
|
||||
err := moveFile(recoverPath, currentFilepath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Registry) CleanOldFiles() error {
|
||||
err := os.RemoveAll(r.purgeDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete folder: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type File struct {
|
||||
id string
|
||||
path string
|
||||
version string
|
||||
sha256 string
|
||||
}
|
||||
|
||||
// Identifier return the id of the file witch is the same as the filename.
|
||||
func (f *File) Identifier() string {
|
||||
return f.id
|
||||
}
|
||||
|
||||
// Path returns the path + filename of the file.
|
||||
func (f *File) Path() string {
|
||||
return f.path
|
||||
}
|
||||
|
||||
// Version returns the version of the file. (currently not filled).
|
||||
func (f *File) Version() string {
|
||||
return f.version
|
||||
}
|
||||
|
||||
// Sha256 returns the sha356 sum of the file.
|
||||
func (f *File) Sha256() string {
|
||||
return f.sha256
|
||||
}
|
29
service/updates/signing.go
Normal file
29
service/updates/signing.go
Normal file
|
@ -0,0 +1,29 @@
|
|||
package updates
|
||||
|
||||
import "github.com/safing/jess"
|
||||
|
||||
var (
|
||||
// BinarySigningKeys holds the signing keys in text format.
|
||||
BinarySigningKeys = []string{
|
||||
// Safing Code Signing Key #1
|
||||
"recipient:public-ed25519-key:safing-code-signing-key-1:92bgBLneQUWrhYLPpBDjqHbpFPuNVCPAaivQ951A4aq72HcTiw7R1QmPJwFM1mdePAvEVDjkeb8S4fp2pmRCsRa8HrCvWQEjd88rfZ6TznJMfY4g7P8ioGFjfpyx2ZJ8WCZJG5Qt4Z9nkabhxo2Nbi3iywBTYDLSbP5CXqi7jryW7BufWWuaRVufFFzhwUC2ryWFWMdkUmsAZcvXwde4KLN9FrkWAy61fGaJ8GCwGnGCSitANnU2cQrsGBXZzxmzxwrYD",
|
||||
// Safing Code Signing Key #2
|
||||
"recipient:public-ed25519-key:safing-code-signing-key-2:92bgBLneQUWrhYLPpBDjqHbPC2d1o5JMyZFdavWBNVtdvbPfzDewLW95ScXfYPHd3QvWHSWCtB4xpthaYWxSkK1kYiGp68DPa2HaU8yQ5dZhaAUuV4Kzv42pJcWkCeVnBYqgGBXobuz52rFqhDJy3rz7soXEmYhJEJWwLwMeioK3VzN3QmGSYXXjosHMMNC76rjufSoLNtUQUWZDSnHmqbuxbKMCCsjFXUGGhtZVyb7bnu7QLTLk6SKHBJDMB6zdL9sw3",
|
||||
}
|
||||
|
||||
// BinarySigningTrustStore is an in-memory trust store with the signing keys.
|
||||
BinarySigningTrustStore = jess.NewMemTrustStore()
|
||||
)
|
||||
|
||||
func init() {
|
||||
for _, signingKey := range BinarySigningKeys {
|
||||
rcpt, err := jess.RecipientFromTextFormat(signingKey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = BinarySigningTrustStore.StoreSignet(rcpt)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -56,7 +56,7 @@ func TestPreformUpdate(t *testing.T) {
|
|||
}
|
||||
|
||||
// Create updater
|
||||
updates, err := New(stub, "Test", UpdateIndex{
|
||||
updates, err := New(stub, "Test", Config{
|
||||
Directory: installedDir,
|
||||
DownloadDirectory: updateDir,
|
||||
PurgeDirectory: purgeDir,
|
||||
|
|
196
service/updates/upgrade.go
Normal file
196
service/updates/upgrade.go
Normal file
|
@ -0,0 +1,196 @@
|
|||
package updates
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/safing/portmaster/base/log"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultFileMode = os.FileMode(0o0644)
|
||||
executableFileMode = os.FileMode(0o0744)
|
||||
executableUIFileMode = os.FileMode(0o0755)
|
||||
defaultDirMode = os.FileMode(0o0755)
|
||||
)
|
||||
|
||||
func (u *Updater) upgrade(downloader *Downloader, ignoreVersion bool) error {
|
||||
// Lock index for the upgrade.
|
||||
u.indexLock.Lock()
|
||||
defer u.indexLock.Unlock()
|
||||
|
||||
// Check if we should upgrade at all.
|
||||
if !ignoreVersion {
|
||||
if err := u.index.ShouldUpgradeTo(downloader.index); err != nil {
|
||||
return fmt.Errorf("cannot upgrade: %w", ErrNoUpdateAvailable)
|
||||
}
|
||||
}
|
||||
|
||||
// Execute the upgrade.
|
||||
upgradeError := u.upgradeMoveFiles(downloader, ignoreVersion)
|
||||
if upgradeError == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Attempt to recover from failed upgrade.
|
||||
recoveryErr := u.recoverFromFailedUpgrade()
|
||||
if recoveryErr == nil {
|
||||
return fmt.Errorf("upgrade failed, but recovery was successful: %w", upgradeError)
|
||||
}
|
||||
|
||||
// Recovery failed too.
|
||||
return fmt.Errorf("upgrade (including recovery) failed: %s", upgradeError)
|
||||
}
|
||||
|
||||
func (u *Updater) upgradeMoveFiles(downloader *Downloader, ignoreVersion bool) error {
|
||||
// Important:
|
||||
// We assume that the downloader has done its job and all artifacts are verified.
|
||||
// Files will just be moved here.
|
||||
// In case the files are copied, they are verified in the process.
|
||||
|
||||
// Reset purge directory, so that we can do a clean rollback later.
|
||||
_ = os.RemoveAll(u.cfg.PurgeDirectory)
|
||||
err := os.MkdirAll(u.cfg.PurgeDirectory, defaultDirMode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create purge directory: %w", err)
|
||||
}
|
||||
|
||||
// Move current version files into purge folder.
|
||||
log.Debugf("updates: removing the old version (v%s from %s)", u.index.Version, u.index.Published)
|
||||
files, err := os.ReadDir(u.cfg.Directory)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read current directory: %w", err)
|
||||
}
|
||||
for _, file := range files {
|
||||
// Check if file is ignored.
|
||||
if slices.Contains(u.cfg.Ignore, file.Name()) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Otherwise, move file to purge dir.
|
||||
src := filepath.Join(u.cfg.Directory, file.Name())
|
||||
dst := filepath.Join(u.cfg.PurgeDirectory, file.Name())
|
||||
err := moveFile(src, dst, "", file.Type().Perm())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to move current file %s to purge dir: %w", file.Name(), err)
|
||||
}
|
||||
}
|
||||
|
||||
// Move the new index file into main directory.
|
||||
log.Debugf("updates: installing the new version (v%s from %s)", u.index.Version, u.index.Published)
|
||||
src := filepath.Join(u.cfg.DownloadDirectory, u.cfg.IndexFile)
|
||||
dst := filepath.Join(u.cfg.Directory, u.cfg.IndexFile)
|
||||
err = moveFile(src, dst, "", defaultFileMode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to move index file to %s: %w", dst, err)
|
||||
}
|
||||
|
||||
// Move downloaded files to the current version folder.
|
||||
for _, artifact := range downloader.index.Artifacts {
|
||||
src = filepath.Join(u.cfg.DownloadDirectory, artifact.Filename)
|
||||
dst = filepath.Join(u.cfg.Directory, artifact.Filename)
|
||||
err = moveFile(src, dst, artifact.SHA256, artifact.GetFileMode())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to move file %s: %w", artifact.Filename, err)
|
||||
} else {
|
||||
log.Debugf("updates: %s moved", artifact.Filename)
|
||||
}
|
||||
}
|
||||
|
||||
// Set new index on module.
|
||||
u.index = downloader.index
|
||||
log.Infof("updates: update complete (v%s from %s)", u.index.Version, u.index.Published)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// moveFile moves a file and falls back to copying if it fails.
|
||||
func moveFile(currentPath, newPath string, sha256sum string, fileMode fs.FileMode) error {
|
||||
// Try to simply move file.
|
||||
err := os.Rename(currentPath, newPath)
|
||||
if err == nil {
|
||||
// Moving was successful, return.
|
||||
return nil
|
||||
}
|
||||
log.Tracef("updates: failed to move to %q, falling back to copy+delete: %w", newPath, err)
|
||||
|
||||
// Copy and check the checksum while we are at it.
|
||||
err = copyAndCheckSHA256Sum(currentPath, newPath, sha256sum, fileMode)
|
||||
if err != nil {
|
||||
return fmt.Errorf("move failed, copy+delete fallback failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// recoverFromFailedUpgrade attempts to roll back any moved files by the upgrade process.
|
||||
func (u *Updater) recoverFromFailedUpgrade() error {
|
||||
// Get list of files from purge dir.
|
||||
files, err := os.ReadDir(u.cfg.PurgeDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Move all files back to main dir.
|
||||
for _, file := range files {
|
||||
purgedFile := filepath.Join(u.cfg.PurgeDirectory, file.Name())
|
||||
activeFile := filepath.Join(u.cfg.Directory, file.Name())
|
||||
err := moveFile(purgedFile, activeFile, "", file.Type().Perm())
|
||||
if err != nil {
|
||||
// Only warn and continue to recover as many files as possible.
|
||||
log.Warningf("updates: failed to roll back file %s: %w", file.Name(), err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Updater) cleanupAfterUpgrade() error {
|
||||
err := os.RemoveAll(u.cfg.PurgeDirectory)
|
||||
if err != nil {
|
||||
return fmt.Errorf("delete purge dir: %w", err)
|
||||
}
|
||||
|
||||
err = os.RemoveAll(u.cfg.DownloadDirectory)
|
||||
if err != nil {
|
||||
return fmt.Errorf("delete download dir: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Updater) deleteUnfinishedFiles(dir string) error {
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, e := range entries {
|
||||
switch {
|
||||
case e.IsDir():
|
||||
// Continue.
|
||||
|
||||
case strings.HasSuffix(e.Name(), ".download"):
|
||||
path := filepath.Join(dir, e.Name())
|
||||
log.Warningf("updates: deleting unfinished download file: %s\n", path)
|
||||
err := os.Remove(path)
|
||||
if err != nil {
|
||||
log.Errorf("updates: failed to delete unfinished download file %s: %s", path, err)
|
||||
}
|
||||
|
||||
case strings.HasSuffix(e.Name(), ".copy"):
|
||||
path := filepath.Join(dir, e.Name())
|
||||
log.Warningf("updates: deleting unfinished copied file: %s\n", path)
|
||||
err := os.Remove(path)
|
||||
if err != nil {
|
||||
log.Errorf("updates: failed to delete unfinished copied file %s: %s", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -249,6 +249,6 @@ type instance interface {
|
|||
NetEnv() *netenv.NetEnv
|
||||
Patrol() *patrol.Patrol
|
||||
Config() *config.Config
|
||||
IntelUpdates() *updates.Updates
|
||||
IntelUpdates() *updates.Updater
|
||||
SPNGroup() *mgr.ExtendedGroup
|
||||
}
|
||||
|
|
|
@ -20,11 +20,11 @@ type testInstance struct {
|
|||
db *dbmodule.DBModule
|
||||
api *api.API
|
||||
config *config.Config
|
||||
updates *updates.Updates
|
||||
updates *updates.Updater
|
||||
base *base.Base
|
||||
}
|
||||
|
||||
func (stub *testInstance) IntelUpdates() *updates.Updates {
|
||||
func (stub *testInstance) IntelUpdates() *updates.Updater {
|
||||
return stub.updates
|
||||
}
|
||||
|
||||
|
@ -86,7 +86,7 @@ func runTest(m *testing.M) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed to create config: %w", err)
|
||||
}
|
||||
stub.updates, err = updates.New(stub, "Test Intel", updates.UpdateIndex{
|
||||
stub.updates, err = updates.New(stub, "Test Intel", updates.Config{
|
||||
Directory: installDir,
|
||||
IndexFile: "index.json",
|
||||
})
|
||||
|
|
|
@ -49,8 +49,8 @@ type Instance struct {
|
|||
rng *rng.Rng
|
||||
|
||||
core *core.Core
|
||||
binaryUpdates *updates.Updates
|
||||
intelUpdates *updates.Updates
|
||||
binaryUpdates *updates.Updater
|
||||
intelUpdates *updates.Updater
|
||||
geoip *geoip.GeoIP
|
||||
netenv *netenv.NetEnv
|
||||
filterLists *filterlists.FilterLists
|
||||
|
@ -75,11 +75,11 @@ func New() (*Instance, error) {
|
|||
instance := &Instance{}
|
||||
instance.ctx, instance.cancelCtx = context.WithCancel(context.Background())
|
||||
|
||||
binaryUpdateIndex := updates.UpdateIndex{
|
||||
binaryUpdateIndex := updates.Config{
|
||||
// FIXME: fill
|
||||
}
|
||||
|
||||
intelUpdateIndex := updates.UpdateIndex{
|
||||
intelUpdateIndex := updates.Config{
|
||||
// FIXME: fill
|
||||
}
|
||||
|
||||
|
@ -270,12 +270,12 @@ func (i *Instance) Base() *base.Base {
|
|||
}
|
||||
|
||||
// BinaryUpdates returns the updates module.
|
||||
func (i *Instance) BinaryUpdates() *updates.Updates {
|
||||
func (i *Instance) BinaryUpdates() *updates.Updater {
|
||||
return i.binaryUpdates
|
||||
}
|
||||
|
||||
// IntelUpdates returns the updates module.
|
||||
func (i *Instance) IntelUpdates() *updates.Updates {
|
||||
func (i *Instance) IntelUpdates() *updates.Updater {
|
||||
return i.intelUpdates
|
||||
}
|
||||
|
||||
|
|
|
@ -19,12 +19,12 @@ type testInstance struct {
|
|||
db *dbmodule.DBModule
|
||||
api *api.API
|
||||
config *config.Config
|
||||
updates *updates.Updates
|
||||
updates *updates.Updater
|
||||
base *base.Base
|
||||
geoip *geoip.GeoIP
|
||||
}
|
||||
|
||||
func (stub *testInstance) IntelUpdates() *updates.Updates {
|
||||
func (stub *testInstance) IntelUpdates() *updates.Updater {
|
||||
return stub.updates
|
||||
}
|
||||
|
||||
|
@ -88,7 +88,7 @@ func runTest(m *testing.M) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed to create config: %w", err)
|
||||
}
|
||||
stub.updates, err = updates.New(stub, "Test Intel", updates.UpdateIndex{
|
||||
stub.updates, err = updates.New(stub, "Test Intel", updates.Config{
|
||||
Directory: installDir,
|
||||
IndexFile: "index.json",
|
||||
})
|
||||
|
|
Loading…
Add table
Reference in a new issue