[WIP] New updater first working prototype

This commit is contained in:
Vladimir Stoilov 2024-08-16 16:05:01 +03:00
parent abf444630b
commit 9bae1afd73
No known key found for this signature in database
GPG key ID: 2F190B67A43A81AF
46 changed files with 4107 additions and 4149 deletions

View file

@ -1,15 +1,15 @@
package updater
// Export exports the list of resources.
func (reg *ResourceRegistry) Export() map[string]*Resource {
reg.RLock()
defer reg.RUnlock()
// // Export exports the list of resources.
// func (reg *ResourceRegistry) Export() map[string]*Resource {
// reg.RLock()
// defer reg.RUnlock()
// copy the map
copiedResources := make(map[string]*Resource)
for key, val := range reg.resources {
copiedResources[key] = val.Export()
}
// // copy the map
// copiedResources := make(map[string]*Resource)
// for key, val := range reg.resources {
// copiedResources[key] = val.Export()
// }
return copiedResources
}
// return copiedResources
// }

View file

@ -1,347 +1,347 @@
package updater
import (
"bytes"
"context"
"errors"
"fmt"
"hash"
"io"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"time"
// import (
// "bytes"
// "context"
// "errors"
// "fmt"
// "hash"
// "io"
// "net/http"
// "net/url"
// "os"
// "path"
// "path/filepath"
// "time"
"github.com/safing/jess/filesig"
"github.com/safing/jess/lhash"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/utils/renameio"
)
// "github.com/safing/jess/filesig"
// "github.com/safing/jess/lhash"
// "github.com/safing/portmaster/base/log"
// "github.com/safing/portmaster/base/utils/renameio"
// )
func (reg *ResourceRegistry) fetchFile(ctx context.Context, client *http.Client, rv *ResourceVersion, tries int) error {
// backoff when retrying
if tries > 0 {
select {
case <-ctx.Done():
return nil // module is shutting down
case <-time.After(time.Duration(tries*tries) * time.Second):
}
}
// func (reg *ResourceRegistry) fetchFile(ctx context.Context, client *http.Client, rv *ResourceVersion, tries int) error {
// // backoff when retrying
// if tries > 0 {
// select {
// case <-ctx.Done():
// return nil // module is shutting down
// case <-time.After(time.Duration(tries*tries) * time.Second):
// }
// }
// check destination dir
dirPath := filepath.Dir(rv.storagePath())
err := reg.storageDir.EnsureAbsPath(dirPath)
if err != nil {
return fmt.Errorf("could not create updates folder: %s", dirPath)
}
// // check destination dir
// dirPath := filepath.Dir(rv.storagePath())
// err := reg.storageDir.EnsureAbsPath(dirPath)
// if err != nil {
// return fmt.Errorf("could not create updates folder: %s", dirPath)
// }
// If verification is enabled, download signature first.
var (
verifiedHash *lhash.LabeledHash
sigFileData []byte
)
if rv.resource.VerificationOptions != nil {
verifiedHash, sigFileData, err = reg.fetchAndVerifySigFile(
ctx, client,
rv.resource.VerificationOptions,
rv.versionedSigPath(), rv.SigningMetadata(),
tries,
)
if err != nil {
switch rv.resource.VerificationOptions.DownloadPolicy {
case SignaturePolicyRequire:
return fmt.Errorf("signature verification failed: %w", err)
case SignaturePolicyWarn:
log.Warningf("%s: failed to verify downloaded signature of %s: %s", reg.Name, rv.versionedPath(), err)
case SignaturePolicyDisable:
log.Debugf("%s: failed to verify downloaded signature of %s: %s", reg.Name, rv.versionedPath(), err)
}
}
}
// // If verification is enabled, download signature first.
// var (
// verifiedHash *lhash.LabeledHash
// sigFileData []byte
// )
// if rv.resource.VerificationOptions != nil {
// verifiedHash, sigFileData, err = reg.fetchAndVerifySigFile(
// ctx, client,
// rv.resource.VerificationOptions,
// rv.versionedSigPath(), rv.SigningMetadata(),
// tries,
// )
// if err != nil {
// switch rv.resource.VerificationOptions.DownloadPolicy {
// case SignaturePolicyRequire:
// return fmt.Errorf("signature verification failed: %w", err)
// case SignaturePolicyWarn:
// log.Warningf("%s: failed to verify downloaded signature of %s: %s", reg.Name, rv.versionedPath(), err)
// case SignaturePolicyDisable:
// log.Debugf("%s: failed to verify downloaded signature of %s: %s", reg.Name, rv.versionedPath(), err)
// }
// }
// }
// open file for writing
atomicFile, err := renameio.TempFile(reg.tmpDir.Path, rv.storagePath())
if err != nil {
return fmt.Errorf("could not create temp file for download: %w", err)
}
defer atomicFile.Cleanup() //nolint:errcheck // ignore error for now, tmp dir will be cleaned later again anyway
// // open file for writing
// atomicFile, err := renameio.TempFile(reg.tmpDir.Path, rv.storagePath())
// if err != nil {
// return fmt.Errorf("could not create temp file for download: %w", err)
// }
// defer atomicFile.Cleanup() //nolint:errcheck // ignore error for now, tmp dir will be cleaned later again anyway
// start file download
resp, downloadURL, err := reg.makeRequest(ctx, client, rv.versionedPath(), tries)
if err != nil {
return err
}
defer func() {
_ = resp.Body.Close()
}()
// // start file download
// resp, downloadURL, err := reg.makeRequest(ctx, client, rv.versionedPath(), tries)
// if err != nil {
// return err
// }
// defer func() {
// _ = resp.Body.Close()
// }()
// Write to the hasher at the same time, if needed.
var hasher hash.Hash
var writeDst io.Writer = atomicFile
if verifiedHash != nil {
hasher = verifiedHash.Algorithm().RawHasher()
writeDst = io.MultiWriter(hasher, atomicFile)
}
// // Write to the hasher at the same time, if needed.
// var hasher hash.Hash
// var writeDst io.Writer = atomicFile
// if verifiedHash != nil {
// hasher = verifiedHash.Algorithm().RawHasher()
// writeDst = io.MultiWriter(hasher, atomicFile)
// }
// Download and write file.
n, err := io.Copy(writeDst, resp.Body)
if err != nil {
return fmt.Errorf("failed to download %q: %w", downloadURL, err)
}
if resp.ContentLength != n {
return fmt.Errorf("failed to finish download of %q: written %d out of %d bytes", downloadURL, n, resp.ContentLength)
}
// // Download and write file.
// n, err := io.Copy(writeDst, resp.Body)
// if err != nil {
// return fmt.Errorf("failed to download %q: %w", downloadURL, err)
// }
// if resp.ContentLength != n {
// return fmt.Errorf("failed to finish download of %q: written %d out of %d bytes", downloadURL, n, resp.ContentLength)
// }
// Before file is finalized, check if hash, if available.
if hasher != nil {
downloadDigest := hasher.Sum(nil)
if verifiedHash.EqualRaw(downloadDigest) {
log.Infof("%s: verified signature of %s", reg.Name, downloadURL)
} else {
switch rv.resource.VerificationOptions.DownloadPolicy {
case SignaturePolicyRequire:
return errors.New("file does not match signed checksum")
case SignaturePolicyWarn:
log.Warningf("%s: checksum does not match file from %s", reg.Name, downloadURL)
case SignaturePolicyDisable:
log.Debugf("%s: checksum does not match file from %s", reg.Name, downloadURL)
}
// // Before file is finalized, check if hash, if available.
// if hasher != nil {
// downloadDigest := hasher.Sum(nil)
// if verifiedHash.EqualRaw(downloadDigest) {
// log.Infof("%s: verified signature of %s", reg.Name, downloadURL)
// } else {
// switch rv.resource.VerificationOptions.DownloadPolicy {
// case SignaturePolicyRequire:
// return errors.New("file does not match signed checksum")
// case SignaturePolicyWarn:
// log.Warningf("%s: checksum does not match file from %s", reg.Name, downloadURL)
// case SignaturePolicyDisable:
// log.Debugf("%s: checksum does not match file from %s", reg.Name, downloadURL)
// }
// Reset hasher to signal that the sig should not be written.
hasher = nil
}
}
// // Reset hasher to signal that the sig should not be written.
// hasher = nil
// }
// }
// Write signature file, if we have one and if verification succeeded.
if len(sigFileData) > 0 && hasher != nil {
sigFilePath := rv.storagePath() + filesig.Extension
err := os.WriteFile(sigFilePath, sigFileData, 0o0644) //nolint:gosec
if err != nil {
switch rv.resource.VerificationOptions.DownloadPolicy {
case SignaturePolicyRequire:
return fmt.Errorf("failed to write signature file %s: %w", sigFilePath, err)
case SignaturePolicyWarn:
log.Warningf("%s: failed to write signature file %s: %s", reg.Name, sigFilePath, err)
case SignaturePolicyDisable:
log.Debugf("%s: failed to write signature file %s: %s", reg.Name, sigFilePath, err)
}
}
}
// // Write signature file, if we have one and if verification succeeded.
// if len(sigFileData) > 0 && hasher != nil {
// sigFilePath := rv.storagePath() + filesig.Extension
// err := os.WriteFile(sigFilePath, sigFileData, 0o0644) //nolint:gosec
// if err != nil {
// switch rv.resource.VerificationOptions.DownloadPolicy {
// case SignaturePolicyRequire:
// return fmt.Errorf("failed to write signature file %s: %w", sigFilePath, err)
// case SignaturePolicyWarn:
// log.Warningf("%s: failed to write signature file %s: %s", reg.Name, sigFilePath, err)
// case SignaturePolicyDisable:
// log.Debugf("%s: failed to write signature file %s: %s", reg.Name, sigFilePath, err)
// }
// }
// }
// finalize file
err = atomicFile.CloseAtomicallyReplace()
if err != nil {
return fmt.Errorf("%s: failed to finalize file %s: %w", reg.Name, rv.storagePath(), err)
}
// set permissions
if !onWindows {
// TODO: only set executable files to 0755, set other to 0644
err = os.Chmod(rv.storagePath(), 0o0755) //nolint:gosec // See TODO above.
if err != nil {
log.Warningf("%s: failed to set permissions on downloaded file %s: %s", reg.Name, rv.storagePath(), err)
}
}
// // finalize file
// err = atomicFile.CloseAtomicallyReplace()
// if err != nil {
// return fmt.Errorf("%s: failed to finalize file %s: %w", reg.Name, rv.storagePath(), err)
// }
// // set permissions
// if !onWindows {
// // TODO: only set executable files to 0755, set other to 0644
// err = os.Chmod(rv.storagePath(), 0o0755) //nolint:gosec // See TODO above.
// if err != nil {
// log.Warningf("%s: failed to set permissions on downloaded file %s: %s", reg.Name, rv.storagePath(), err)
// }
// }
log.Debugf("%s: fetched %s and stored to %s", reg.Name, downloadURL, rv.storagePath())
return nil
}
// log.Debugf("%s: fetched %s and stored to %s", reg.Name, downloadURL, rv.storagePath())
// return nil
// }
func (reg *ResourceRegistry) fetchMissingSig(ctx context.Context, client *http.Client, rv *ResourceVersion, tries int) error {
// backoff when retrying
if tries > 0 {
select {
case <-ctx.Done():
return nil // module is shutting down
case <-time.After(time.Duration(tries*tries) * time.Second):
}
}
// func (reg *ResourceRegistry) fetchMissingSig(ctx context.Context, client *http.Client, rv *ResourceVersion, tries int) error {
// // backoff when retrying
// if tries > 0 {
// select {
// case <-ctx.Done():
// return nil // module is shutting down
// case <-time.After(time.Duration(tries*tries) * time.Second):
// }
// }
// Check destination dir.
dirPath := filepath.Dir(rv.storagePath())
err := reg.storageDir.EnsureAbsPath(dirPath)
if err != nil {
return fmt.Errorf("could not create updates folder: %s", dirPath)
}
// // Check destination dir.
// dirPath := filepath.Dir(rv.storagePath())
// err := reg.storageDir.EnsureAbsPath(dirPath)
// if err != nil {
// return fmt.Errorf("could not create updates folder: %s", dirPath)
// }
// Download and verify the missing signature.
verifiedHash, sigFileData, err := reg.fetchAndVerifySigFile(
ctx, client,
rv.resource.VerificationOptions,
rv.versionedSigPath(), rv.SigningMetadata(),
tries,
)
if err != nil {
switch rv.resource.VerificationOptions.DownloadPolicy {
case SignaturePolicyRequire:
return fmt.Errorf("signature verification failed: %w", err)
case SignaturePolicyWarn:
log.Warningf("%s: failed to verify downloaded signature of %s: %s", reg.Name, rv.versionedPath(), err)
case SignaturePolicyDisable:
log.Debugf("%s: failed to verify downloaded signature of %s: %s", reg.Name, rv.versionedPath(), err)
}
return nil
}
// // Download and verify the missing signature.
// verifiedHash, sigFileData, err := reg.fetchAndVerifySigFile(
// ctx, client,
// rv.resource.VerificationOptions,
// rv.versionedSigPath(), rv.SigningMetadata(),
// tries,
// )
// if err != nil {
// switch rv.resource.VerificationOptions.DownloadPolicy {
// case SignaturePolicyRequire:
// return fmt.Errorf("signature verification failed: %w", err)
// case SignaturePolicyWarn:
// log.Warningf("%s: failed to verify downloaded signature of %s: %s", reg.Name, rv.versionedPath(), err)
// case SignaturePolicyDisable:
// log.Debugf("%s: failed to verify downloaded signature of %s: %s", reg.Name, rv.versionedPath(), err)
// }
// return nil
// }
// Check if the signature matches the resource file.
ok, err := verifiedHash.MatchesFile(rv.storagePath())
if err != nil {
switch rv.resource.VerificationOptions.DownloadPolicy {
case SignaturePolicyRequire:
return fmt.Errorf("error while verifying resource file: %w", err)
case SignaturePolicyWarn:
log.Warningf("%s: error while verifying resource file %s", reg.Name, rv.storagePath())
case SignaturePolicyDisable:
log.Debugf("%s: error while verifying resource file %s", reg.Name, rv.storagePath())
}
return nil
}
if !ok {
switch rv.resource.VerificationOptions.DownloadPolicy {
case SignaturePolicyRequire:
return errors.New("resource file does not match signed checksum")
case SignaturePolicyWarn:
log.Warningf("%s: checksum does not match resource file from %s", reg.Name, rv.storagePath())
case SignaturePolicyDisable:
log.Debugf("%s: checksum does not match resource file from %s", reg.Name, rv.storagePath())
}
return nil
}
// // Check if the signature matches the resource file.
// ok, err := verifiedHash.MatchesFile(rv.storagePath())
// if err != nil {
// switch rv.resource.VerificationOptions.DownloadPolicy {
// case SignaturePolicyRequire:
// return fmt.Errorf("error while verifying resource file: %w", err)
// case SignaturePolicyWarn:
// log.Warningf("%s: error while verifying resource file %s", reg.Name, rv.storagePath())
// case SignaturePolicyDisable:
// log.Debugf("%s: error while verifying resource file %s", reg.Name, rv.storagePath())
// }
// return nil
// }
// if !ok {
// switch rv.resource.VerificationOptions.DownloadPolicy {
// case SignaturePolicyRequire:
// return errors.New("resource file does not match signed checksum")
// case SignaturePolicyWarn:
// log.Warningf("%s: checksum does not match resource file from %s", reg.Name, rv.storagePath())
// case SignaturePolicyDisable:
// log.Debugf("%s: checksum does not match resource file from %s", reg.Name, rv.storagePath())
// }
// return nil
// }
// Write signature file.
err = os.WriteFile(rv.storageSigPath(), sigFileData, 0o0644) //nolint:gosec
if err != nil {
switch rv.resource.VerificationOptions.DownloadPolicy {
case SignaturePolicyRequire:
return fmt.Errorf("failed to write signature file %s: %w", rv.storageSigPath(), err)
case SignaturePolicyWarn:
log.Warningf("%s: failed to write signature file %s: %s", reg.Name, rv.storageSigPath(), err)
case SignaturePolicyDisable:
log.Debugf("%s: failed to write signature file %s: %s", reg.Name, rv.storageSigPath(), err)
}
}
// // Write signature file.
// err = os.WriteFile(rv.storageSigPath(), sigFileData, 0o0644) //nolint:gosec
// if err != nil {
// switch rv.resource.VerificationOptions.DownloadPolicy {
// case SignaturePolicyRequire:
// return fmt.Errorf("failed to write signature file %s: %w", rv.storageSigPath(), err)
// case SignaturePolicyWarn:
// log.Warningf("%s: failed to write signature file %s: %s", reg.Name, rv.storageSigPath(), err)
// case SignaturePolicyDisable:
// log.Debugf("%s: failed to write signature file %s: %s", reg.Name, rv.storageSigPath(), err)
// }
// }
log.Debugf("%s: fetched %s and stored to %s", reg.Name, rv.versionedSigPath(), rv.storageSigPath())
return nil
}
// log.Debugf("%s: fetched %s and stored to %s", reg.Name, rv.versionedSigPath(), rv.storageSigPath())
// return nil
// }
func (reg *ResourceRegistry) fetchAndVerifySigFile(ctx context.Context, client *http.Client, verifOpts *VerificationOptions, sigFilePath string, requiredMetadata map[string]string, tries int) (*lhash.LabeledHash, []byte, error) {
// Download signature file.
resp, _, err := reg.makeRequest(ctx, client, sigFilePath, tries)
if err != nil {
return nil, nil, err
}
defer func() {
_ = resp.Body.Close()
}()
sigFileData, err := io.ReadAll(resp.Body)
if err != nil {
return nil, nil, err
}
// func (reg *ResourceRegistry) fetchAndVerifySigFile(ctx context.Context, client *http.Client, verifOpts *VerificationOptions, sigFilePath string, requiredMetadata map[string]string, tries int) (*lhash.LabeledHash, []byte, error) {
// // Download signature file.
// resp, _, err := reg.makeRequest(ctx, client, sigFilePath, tries)
// if err != nil {
// return nil, nil, err
// }
// defer func() {
// _ = resp.Body.Close()
// }()
// sigFileData, err := io.ReadAll(resp.Body)
// if err != nil {
// return nil, nil, err
// }
// Extract all signatures.
sigs, err := filesig.ParseSigFile(sigFileData)
switch {
case len(sigs) == 0 && err != nil:
return nil, nil, fmt.Errorf("failed to parse signature file: %w", err)
case len(sigs) == 0:
return nil, nil, errors.New("no signatures found in signature file")
case err != nil:
return nil, nil, fmt.Errorf("failed to parse signature file: %w", err)
}
// // Extract all signatures.
// sigs, err := filesig.ParseSigFile(sigFileData)
// switch {
// case len(sigs) == 0 && err != nil:
// return nil, nil, fmt.Errorf("failed to parse signature file: %w", err)
// case len(sigs) == 0:
// return nil, nil, errors.New("no signatures found in signature file")
// case err != nil:
// return nil, nil, fmt.Errorf("failed to parse signature file: %w", err)
// }
// Verify all signatures.
var verifiedHash *lhash.LabeledHash
for _, sig := range sigs {
fd, err := filesig.VerifyFileData(
sig,
requiredMetadata,
verifOpts.TrustStore,
)
if err != nil {
return nil, sigFileData, err
}
// // Verify all signatures.
// var verifiedHash *lhash.LabeledHash
// for _, sig := range sigs {
// fd, err := filesig.VerifyFileData(
// sig,
// requiredMetadata,
// verifOpts.TrustStore,
// )
// if err != nil {
// return nil, sigFileData, err
// }
// Save or check verified hash.
if verifiedHash == nil {
verifiedHash = fd.FileHash()
} else if !fd.FileHash().Equal(verifiedHash) {
// Return an error if two valid hashes mismatch.
// For simplicity, all hash algorithms must be the same for now.
return nil, sigFileData, errors.New("file hashes from different signatures do not match")
}
}
// // Save or check verified hash.
// if verifiedHash == nil {
// verifiedHash = fd.FileHash()
// } else if !fd.FileHash().Equal(verifiedHash) {
// // Return an error if two valid hashes mismatch.
// // For simplicity, all hash algorithms must be the same for now.
// return nil, sigFileData, errors.New("file hashes from different signatures do not match")
// }
// }
return verifiedHash, sigFileData, nil
}
// return verifiedHash, sigFileData, nil
// }
func (reg *ResourceRegistry) fetchData(ctx context.Context, client *http.Client, downloadPath string, tries int) (fileData []byte, downloadedFrom string, err error) {
// backoff when retrying
if tries > 0 {
select {
case <-ctx.Done():
return nil, "", nil // module is shutting down
case <-time.After(time.Duration(tries*tries) * time.Second):
}
}
// func (reg *ResourceRegistry) fetchData(ctx context.Context, client *http.Client, downloadPath string, tries int) (fileData []byte, downloadedFrom string, err error) {
// // backoff when retrying
// if tries > 0 {
// select {
// case <-ctx.Done():
// return nil, "", nil // module is shutting down
// case <-time.After(time.Duration(tries*tries) * time.Second):
// }
// }
// start file download
resp, downloadURL, err := reg.makeRequest(ctx, client, downloadPath, tries)
if err != nil {
return nil, downloadURL, err
}
defer func() {
_ = resp.Body.Close()
}()
// // start file download
// resp, downloadURL, err := reg.makeRequest(ctx, client, downloadPath, tries)
// if err != nil {
// return nil, downloadURL, err
// }
// defer func() {
// _ = resp.Body.Close()
// }()
// download and write file
buf := bytes.NewBuffer(make([]byte, 0, resp.ContentLength))
n, err := io.Copy(buf, resp.Body)
if err != nil {
return nil, downloadURL, fmt.Errorf("failed to download %q: %w", downloadURL, err)
}
if resp.ContentLength != n {
return nil, downloadURL, fmt.Errorf("failed to finish download of %q: written %d out of %d bytes", downloadURL, n, resp.ContentLength)
}
// // download and write file
// buf := bytes.NewBuffer(make([]byte, 0, resp.ContentLength))
// n, err := io.Copy(buf, resp.Body)
// if err != nil {
// return nil, downloadURL, fmt.Errorf("failed to download %q: %w", downloadURL, err)
// }
// if resp.ContentLength != n {
// return nil, downloadURL, fmt.Errorf("failed to finish download of %q: written %d out of %d bytes", downloadURL, n, resp.ContentLength)
// }
return buf.Bytes(), downloadURL, nil
}
// return buf.Bytes(), downloadURL, nil
// }
func (reg *ResourceRegistry) makeRequest(ctx context.Context, client *http.Client, downloadPath string, tries int) (resp *http.Response, downloadURL string, err error) {
// parse update URL
updateBaseURL := reg.UpdateURLs[tries%len(reg.UpdateURLs)]
u, err := url.Parse(updateBaseURL)
if err != nil {
return nil, "", fmt.Errorf("failed to parse update URL %q: %w", updateBaseURL, err)
}
// add download path
u.Path = path.Join(u.Path, downloadPath)
// compile URL
downloadURL = u.String()
// func (reg *ResourceRegistry) makeRequest(ctx context.Context, client *http.Client, downloadPath string, tries int) (resp *http.Response, downloadURL string, err error) {
// // parse update URL
// updateBaseURL := reg.UpdateURLs[tries%len(reg.UpdateURLs)]
// u, err := url.Parse(updateBaseURL)
// if err != nil {
// return nil, "", fmt.Errorf("failed to parse update URL %q: %w", updateBaseURL, err)
// }
// // add download path
// u.Path = path.Join(u.Path, downloadPath)
// // compile URL
// downloadURL = u.String()
// create request
req, err := http.NewRequestWithContext(ctx, http.MethodGet, downloadURL, http.NoBody)
if err != nil {
return nil, "", fmt.Errorf("failed to create request for %q: %w", downloadURL, err)
}
// // create request
// req, err := http.NewRequestWithContext(ctx, http.MethodGet, downloadURL, http.NoBody)
// if err != nil {
// return nil, "", fmt.Errorf("failed to create request for %q: %w", downloadURL, err)
// }
// set user agent
if reg.UserAgent != "" {
req.Header.Set("User-Agent", reg.UserAgent)
}
// // set user agent
// if reg.UserAgent != "" {
// req.Header.Set("User-Agent", reg.UserAgent)
// }
// start request
resp, err = client.Do(req)
if err != nil {
return nil, "", fmt.Errorf("failed to make request to %q: %w", downloadURL, err)
}
// // start request
// resp, err = client.Do(req)
// if err != nil {
// return nil, "", fmt.Errorf("failed to make request to %q: %w", downloadURL, err)
// }
// check return code
if resp.StatusCode != http.StatusOK {
_ = resp.Body.Close()
return nil, "", fmt.Errorf("failed to fetch %q: %d %s", downloadURL, resp.StatusCode, resp.Status)
}
// // check return code
// if resp.StatusCode != http.StatusOK {
// _ = resp.Body.Close()
// return nil, "", fmt.Errorf("failed to fetch %q: %d %s", downloadURL, resp.StatusCode, resp.Status)
// }
return resp, downloadURL, err
}
// return resp, downloadURL, err
// }

View file

@ -1,105 +1,97 @@
package updater
import (
"errors"
"io"
"io/fs"
"os"
"strings"
semver "github.com/hashicorp/go-version"
// semver "github.com/hashicorp/go-version"
"github.com/safing/jess/filesig"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/utils"
)
// File represents a file from the update system.
type File struct {
resource *Resource
version *ResourceVersion
notifier *notifier
versionedPath string
storagePath string
}
// type File struct {
// resource *Resource
// version *ResourceVersion
// notifier *notifier
// versionedPath string
// storagePath string
// }
// Identifier returns the identifier of the file.
func (file *File) Identifier() string {
return file.resource.Identifier
}
// // Identifier returns the identifier of the file.
// func (file *File) Identifier() string {
// return file.resource.Identifier
// }
// Version returns the version of the file.
func (file *File) Version() string {
return file.version.VersionNumber
}
// // Version returns the version of the file.
// func (file *File) Version() string {
// return file.version.VersionNumber
// }
// SemVer returns the semantic version of the file.
func (file *File) SemVer() *semver.Version {
return file.version.semVer
}
// // SemVer returns the semantic version of the file.
// func (file *File) SemVer() *semver.Version {
// return file.version.semVer
// }
// EqualsVersion normalizes the given version and checks equality with semver.
func (file *File) EqualsVersion(version string) bool {
return file.version.EqualsVersion(version)
}
// // EqualsVersion normalizes the given version and checks equality with semver.
// func (file *File) EqualsVersion(version string) bool {
// return file.version.EqualsVersion(version)
// }
// Path returns the absolute filepath of the file.
func (file *File) Path() string {
return file.storagePath
}
// // Path returns the absolute filepath of the file.
// func (file *File) Path() string {
// return file.storagePath
// }
// SigningMetadata returns the metadata to be included in signatures.
func (file *File) SigningMetadata() map[string]string {
return map[string]string{
"id": file.Identifier(),
"version": file.Version(),
}
}
// // SigningMetadata returns the metadata to be included in signatures.
// func (file *File) SigningMetadata() map[string]string {
// return map[string]string{
// "id": file.Identifier(),
// "version": file.Version(),
// }
// }
// Verify verifies the given file.
func (file *File) Verify() ([]*filesig.FileData, error) {
// Check if verification is configured.
if file.resource.VerificationOptions == nil {
return nil, ErrVerificationNotConfigured
}
// func (file *File) Verify() ([]*filesig.FileData, error) {
// // Check if verification is configured.
// if file.resource.VerificationOptions == nil {
// return nil, ErrVerificationNotConfigured
// }
// Verify file.
fileData, err := filesig.VerifyFile(
file.storagePath,
file.storagePath+filesig.Extension,
file.SigningMetadata(),
file.resource.VerificationOptions.TrustStore,
)
if err != nil {
switch file.resource.VerificationOptions.DiskLoadPolicy {
case SignaturePolicyRequire:
return nil, err
case SignaturePolicyWarn:
log.Warningf("%s: failed to verify %s: %s", file.resource.registry.Name, file.storagePath, err)
case SignaturePolicyDisable:
log.Debugf("%s: failed to verify %s: %s", file.resource.registry.Name, file.storagePath, err)
}
}
// // Verify file.
// fileData, err := filesig.VerifyFile(
// file.storagePath,
// file.storagePath+filesig.Extension,
// file.SigningMetadata(),
// file.resource.VerificationOptions.TrustStore,
// )
// if err != nil {
// switch file.resource.VerificationOptions.DiskLoadPolicy {
// case SignaturePolicyRequire:
// return nil, err
// case SignaturePolicyWarn:
// log.Warningf("%s: failed to verify %s: %s", file.resource.registry.Name, file.storagePath, err)
// case SignaturePolicyDisable:
// log.Debugf("%s: failed to verify %s: %s", file.resource.registry.Name, file.storagePath, err)
// }
// }
return fileData, nil
}
// return fileData, nil
// }
// Blacklist notifies the update system that this file is somehow broken, and should be ignored from now on, until restarted.
func (file *File) Blacklist() error {
return file.resource.Blacklist(file.version.VersionNumber)
}
// func (file *File) Blacklist() error {
// return file.resource.Blacklist(file.version.VersionNumber)
// }
// markActiveWithLocking marks the file as active, locking the resource in the process.
func (file *File) markActiveWithLocking() {
file.resource.Lock()
defer file.resource.Unlock()
// func (file *File) markActiveWithLocking() {
// file.resource.Lock()
// defer file.resource.Unlock()
// update last used version
if file.resource.ActiveVersion != file.version {
log.Debugf("updater: setting active version of resource %s from %s to %s", file.resource.Identifier, file.resource.ActiveVersion, file.version.VersionNumber)
file.resource.ActiveVersion = file.version
}
}
// // update last used version
// if file.resource.ActiveVersion != file.version {
// log.Debugf("updater: setting active version of resource %s from %s to %s", file.resource.Identifier, file.resource.ActiveVersion, file.version.VersionNumber)
// file.resource.ActiveVersion = file.version
// }
// }
// Unpacker describes the function that is passed to
// File.Unpack. It receives a reader to the compressed/packed
@ -107,50 +99,50 @@ func (file *File) markActiveWithLocking() {
// unpacked file contents. If the returned reader implements
// io.Closer it's close method is invoked when an error
// or io.EOF is returned from Read().
type Unpacker func(io.Reader) (io.Reader, error)
// type Unpacker func(io.Reader) (io.Reader, error)
// Unpack returns the path to the unpacked version of file and
// unpacks it on demand using unpacker.
func (file *File) Unpack(suffix string, unpacker Unpacker) (string, error) {
path := strings.TrimSuffix(file.Path(), suffix)
// func (file *File) Unpack(suffix string, unpacker Unpacker) (string, error) {
// path := strings.TrimSuffix(file.Path(), suffix)
if suffix == "" {
path += "-unpacked"
}
// if suffix == "" {
// path += "-unpacked"
// }
_, err := os.Stat(path)
if err == nil {
return path, nil
}
// _, err := os.Stat(path)
// if err == nil {
// return path, nil
// }
if !errors.Is(err, fs.ErrNotExist) {
return "", err
}
// if !errors.Is(err, fs.ErrNotExist) {
// return "", err
// }
f, err := os.Open(file.Path())
if err != nil {
return "", err
}
defer func() {
_ = f.Close()
}()
// f, err := os.Open(file.Path())
// if err != nil {
// return "", err
// }
// defer func() {
// _ = f.Close()
// }()
r, err := unpacker(f)
if err != nil {
return "", err
}
// r, err := unpacker(f)
// if err != nil {
// return "", err
// }
ioErr := utils.CreateAtomic(path, r, &utils.AtomicFileOptions{
TempDir: file.resource.registry.TmpDir().Path,
})
// ioErr := utils.CreateAtomic(path, r, &utils.AtomicFileOptions{
// TempDir: file.resource.registry.TmpDir().Path,
// })
if c, ok := r.(io.Closer); ok {
if err := c.Close(); err != nil && ioErr == nil {
// if ioErr is already set we ignore the error from
// closing the unpacker.
ioErr = err
}
}
// if c, ok := r.(io.Closer); ok {
// if err := c.Close(); err != nil && ioErr == nil {
// // if ioErr is already set we ignore the error from
// // closing the unpacker.
// ioErr = err
// }
// }
return path, ioErr
}
// return path, ioErr
// }

View file

@ -2,7 +2,6 @@ package updater
import (
"errors"
"fmt"
)
// Errors returned by the updater package.
@ -14,75 +13,75 @@ var (
// GetFile returns the selected (mostly newest) file with the given
// identifier or an error, if it fails.
func (reg *ResourceRegistry) GetFile(identifier string) (*File, error) {
return nil, fmt.Errorf("invalid file: %s", identifier)
// reg.RLock()
// res, ok := reg.resources[identifier]
// reg.RUnlock()
// if !ok {
// return nil, ErrNotFound
// }
// func (reg *ResourceRegistry) GetFile(identifier string) (*File, error) {
// return nil, fmt.Errorf("invalid file: %s", identifier)
// reg.RLock()
// res, ok := reg.resources[identifier]
// reg.RUnlock()
// if !ok {
// return nil, ErrNotFound
// }
// file := res.GetFile()
// // check if file is available locally
// if file.version.Available {
// file.markActiveWithLocking()
// file := res.GetFile()
// // check if file is available locally
// if file.version.Available {
// file.markActiveWithLocking()
// // Verify file, if configured.
// _, err := file.Verify()
// if err != nil && !errors.Is(err, ErrVerificationNotConfigured) {
// // TODO: If verification is required, try deleting the resource and downloading it again.
// return nil, fmt.Errorf("failed to verify file: %w", err)
// }
// // Verify file, if configured.
// _, err := file.Verify()
// if err != nil && !errors.Is(err, ErrVerificationNotConfigured) {
// // TODO: If verification is required, try deleting the resource and downloading it again.
// return nil, fmt.Errorf("failed to verify file: %w", err)
// }
// return file, nil
// }
// return file, nil
// }
// // check if online
// if !reg.Online {
// return nil, ErrNotAvailableLocally
// }
// // check if online
// if !reg.Online {
// return nil, ErrNotAvailableLocally
// }
// // check download dir
// err := reg.tmpDir.Ensure()
// if err != nil {
// return nil, fmt.Errorf("could not prepare tmp directory for download: %w", err)
// }
// // check download dir
// err := reg.tmpDir.Ensure()
// if err != nil {
// return nil, fmt.Errorf("could not prepare tmp directory for download: %w", err)
// }
// // Start registry operation.
// reg.state.StartOperation(StateFetching)
// defer reg.state.EndOperation()
// // Start registry operation.
// reg.state.StartOperation(StateFetching)
// defer reg.state.EndOperation()
// // download file
// log.Tracef("%s: starting download of %s", reg.Name, file.versionedPath)
// client := &http.Client{}
// for tries := range 5 {
// err = reg.fetchFile(context.TODO(), client, file.version, tries)
// if err != nil {
// log.Tracef("%s: failed to download %s: %s, retrying (%d)", reg.Name, file.versionedPath, err, tries+1)
// } else {
// file.markActiveWithLocking()
// // download file
// log.Tracef("%s: starting download of %s", reg.Name, file.versionedPath)
// client := &http.Client{}
// for tries := range 5 {
// err = reg.fetchFile(context.TODO(), client, file.version, tries)
// if err != nil {
// log.Tracef("%s: failed to download %s: %s, retrying (%d)", reg.Name, file.versionedPath, err, tries+1)
// } else {
// file.markActiveWithLocking()
// // TODO: We just download the file - should we verify it again?
// return file, nil
// }
// }
// log.Warningf("%s: failed to download %s: %s", reg.Name, file.versionedPath, err)
// return nil, err
}
// // TODO: We just download the file - should we verify it again?
// return file, nil
// }
// }
// log.Warningf("%s: failed to download %s: %s", reg.Name, file.versionedPath, err)
// return nil, err
// }
// GetVersion returns the selected version of the given identifier.
// The returned resource version may not be modified.
func (reg *ResourceRegistry) GetVersion(identifier string) (*ResourceVersion, error) {
reg.RLock()
res, ok := reg.resources[identifier]
reg.RUnlock()
if !ok {
return nil, ErrNotFound
}
// func (reg *ResourceRegistry) GetVersion(identifier string) (*ResourceVersion, error) {
// reg.RLock()
// res, ok := reg.resources[identifier]
// reg.RUnlock()
// if !ok {
// return nil, ErrNotFound
// }
res.Lock()
defer res.Unlock()
// res.Lock()
// defer res.Unlock()
return res.SelectedVersion, nil
}
// return res.SelectedVersion, nil
// }

View file

@ -1,33 +1,33 @@
package updater
import (
"github.com/tevino/abool"
)
// import (
// "github.com/tevino/abool"
// )
type notifier struct {
upgradeAvailable *abool.AtomicBool
notifyChannel chan struct{}
}
// type notifier struct {
// upgradeAvailable *abool.AtomicBool
// notifyChannel chan struct{}
// }
func newNotifier() *notifier {
return &notifier{
upgradeAvailable: abool.NewBool(false),
notifyChannel: make(chan struct{}),
}
}
// func newNotifier() *notifier {
// return &notifier{
// upgradeAvailable: abool.NewBool(false),
// notifyChannel: make(chan struct{}),
// }
// }
func (n *notifier) markAsUpgradeable() {
if n.upgradeAvailable.SetToIf(false, true) {
close(n.notifyChannel)
}
}
// func (n *notifier) markAsUpgradeable() {
// if n.upgradeAvailable.SetToIf(false, true) {
// close(n.notifyChannel)
// }
// }
// UpgradeAvailable returns whether an upgrade is available for this file.
func (file *File) UpgradeAvailable() bool {
return file.notifier.upgradeAvailable.IsSet()
}
// // UpgradeAvailable returns whether an upgrade is available for this file.
// func (file *File) UpgradeAvailable() bool {
// return file.notifier.upgradeAvailable.IsSet()
// }
// WaitForAvailableUpgrade blocks (selectable) until an upgrade for this file is available.
func (file *File) WaitForAvailableUpgrade() <-chan struct{} {
return file.notifier.notifyChannel
}
// // WaitForAvailableUpgrade blocks (selectable) until an upgrade for this file is available.
// func (file *File) WaitForAvailableUpgrade() <-chan struct{} {
// return file.notifier.notifyChannel
// }

View file

@ -1,270 +1,270 @@
package updater
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
// import (
// "errors"
// "fmt"
// "os"
// "path/filepath"
// "runtime"
// "strings"
// "sync"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/utils"
)
// "github.com/safing/portmaster/base/log"
// "github.com/safing/portmaster/base/utils"
// )
const (
onWindows = runtime.GOOS == "windows"
)
// const (
// onWindows = runtime.GOOS == "windows"
// )
// ResourceRegistry is a registry for managing update resources.
type ResourceRegistry struct {
sync.RWMutex
// type ResourceRegistry struct {
// sync.RWMutex
Name string
storageDir *utils.DirStructure
tmpDir *utils.DirStructure
indexes []*Index
state *RegistryState
// Name string
// storageDir *utils.DirStructure
// tmpDir *utils.DirStructure
// indexes []*Index
// state *RegistryState
resources map[string]*Resource
UpdateURLs []string
UserAgent string
MandatoryUpdates []string
AutoUnpack []string
// resources map[string]*Resource
// UpdateURLs []string
// UserAgent string
// MandatoryUpdates []string
// AutoUnpack []string
// Verification holds a map of VerificationOptions assigned to their
// applicable identifier path prefix.
// Use an empty string to denote the default.
// Use empty options to disable verification for a path prefix.
Verification map[string]*VerificationOptions
// // Verification holds a map of VerificationOptions assigned to their
// // applicable identifier path prefix.
// // Use an empty string to denote the default.
// // Use empty options to disable verification for a path prefix.
// Verification map[string]*VerificationOptions
// UsePreReleases signifies that pre-releases should be used when selecting a
// version. Even if false, a pre-release version will still be used if it is
// defined as the current version by an index.
UsePreReleases bool
// // UsePreReleases signifies that pre-releases should be used when selecting a
// // version. Even if false, a pre-release version will still be used if it is
// // defined as the current version by an index.
// UsePreReleases bool
// DevMode specifies if a local 0.0.0 version should be always chosen, when available.
DevMode bool
// // DevMode specifies if a local 0.0.0 version should be always chosen, when available.
// DevMode bool
// Online specifies if resources may be downloaded if not available locally.
Online bool
// // Online specifies if resources may be downloaded if not available locally.
// Online bool
// StateNotifyFunc may be set to receive any changes to the registry state.
// The specified function may lock the state, but may not block or take a
// lot of time.
StateNotifyFunc func(*RegistryState)
}
// // StateNotifyFunc may be set to receive any changes to the registry state.
// // The specified function may lock the state, but may not block or take a
// // lot of time.
// StateNotifyFunc func(*RegistryState)
// }
// AddIndex adds a new index to the resource registry.
// The order is important, as indexes added later will override the current
// release from earlier indexes.
func (reg *ResourceRegistry) AddIndex(idx Index) {
reg.Lock()
defer reg.Unlock()
// // AddIndex adds a new index to the resource registry.
// // The order is important, as indexes added later will override the current
// // release from earlier indexes.
// func (reg *ResourceRegistry) AddIndex(idx Index) {
// reg.Lock()
// defer reg.Unlock()
// Get channel name from path.
idx.Channel = strings.TrimSuffix(
filepath.Base(idx.Path), filepath.Ext(idx.Path),
)
// // Get channel name from path.
// idx.Channel = strings.TrimSuffix(
// filepath.Base(idx.Path), filepath.Ext(idx.Path),
// )
reg.indexes = append(reg.indexes, &idx)
}
// reg.indexes = append(reg.indexes, &idx)
// }
// PreInitUpdateState sets the initial update state of the registry before initialization.
func (reg *ResourceRegistry) PreInitUpdateState(s UpdateState) error {
if reg.state != nil {
return errors.New("registry already initialized")
}
// // PreInitUpdateState sets the initial update state of the registry before initialization.
// func (reg *ResourceRegistry) PreInitUpdateState(s UpdateState) error {
// if reg.state != nil {
// return errors.New("registry already initialized")
// }
reg.state = &RegistryState{
Updates: s,
}
return nil
}
// reg.state = &RegistryState{
// Updates: s,
// }
// return nil
// }
// Initialize initializes a raw registry struct and makes it ready for usage.
func (reg *ResourceRegistry) Initialize(storageDir *utils.DirStructure) error {
// check if storage dir is available
err := storageDir.Ensure()
if err != nil {
return err
}
// // Initialize initializes a raw registry struct and makes it ready for usage.
// func (reg *ResourceRegistry) Initialize(storageDir *utils.DirStructure) error {
// // check if storage dir is available
// err := storageDir.Ensure()
// if err != nil {
// return err
// }
// set default name
if reg.Name == "" {
reg.Name = "updater"
}
// // set default name
// if reg.Name == "" {
// reg.Name = "updater"
// }
// initialize private attributes
reg.storageDir = storageDir
reg.tmpDir = storageDir.ChildDir("tmp", 0o0700)
reg.resources = make(map[string]*Resource)
if reg.state == nil {
reg.state = &RegistryState{}
}
reg.state.ID = StateReady
reg.state.reg = reg
// // initialize private attributes
// reg.storageDir = storageDir
// reg.tmpDir = storageDir.ChildDir("tmp", 0o0700)
// reg.resources = make(map[string]*Resource)
// if reg.state == nil {
// reg.state = &RegistryState{}
// }
// reg.state.ID = StateReady
// reg.state.reg = reg
// remove tmp dir to delete old entries
err = reg.Cleanup()
if err != nil {
log.Warningf("%s: failed to remove tmp dir: %s", reg.Name, err)
}
// // remove tmp dir to delete old entries
// err = reg.Cleanup()
// if err != nil {
// log.Warningf("%s: failed to remove tmp dir: %s", reg.Name, err)
// }
// (re-)create tmp dir
err = reg.tmpDir.Ensure()
if err != nil {
log.Warningf("%s: failed to create tmp dir: %s", reg.Name, err)
}
// // (re-)create tmp dir
// err = reg.tmpDir.Ensure()
// if err != nil {
// log.Warningf("%s: failed to create tmp dir: %s", reg.Name, err)
// }
// Check verification options.
if reg.Verification != nil {
for prefix, opts := range reg.Verification {
// Check if verification is disable for this prefix.
if opts == nil {
continue
}
// // Check verification options.
// if reg.Verification != nil {
// for prefix, opts := range reg.Verification {
// // Check if verification is disable for this prefix.
// if opts == nil {
// continue
// }
// If enabled, a trust store is required.
if opts.TrustStore == nil {
return fmt.Errorf("verification enabled for prefix %q, but no trust store configured", prefix)
}
// // If enabled, a trust store is required.
// if opts.TrustStore == nil {
// return fmt.Errorf("verification enabled for prefix %q, but no trust store configured", prefix)
// }
// DownloadPolicy must be equal or stricter than DiskLoadPolicy.
if opts.DiskLoadPolicy < opts.DownloadPolicy {
return errors.New("verification download policy must be equal or stricter than the disk load policy")
}
// // DownloadPolicy must be equal or stricter than DiskLoadPolicy.
// if opts.DiskLoadPolicy < opts.DownloadPolicy {
// return errors.New("verification download policy must be equal or stricter than the disk load policy")
// }
// Warn if all policies are disabled.
if opts.DownloadPolicy == SignaturePolicyDisable &&
opts.DiskLoadPolicy == SignaturePolicyDisable {
log.Warningf("%s: verification enabled for prefix %q, but all policies set to disable", reg.Name, prefix)
}
}
}
// // Warn if all policies are disabled.
// if opts.DownloadPolicy == SignaturePolicyDisable &&
// opts.DiskLoadPolicy == SignaturePolicyDisable {
// log.Warningf("%s: verification enabled for prefix %q, but all policies set to disable", reg.Name, prefix)
// }
// }
// }
return nil
}
// return nil
// }
// StorageDir returns the main storage dir of the resource registry.
func (reg *ResourceRegistry) StorageDir() *utils.DirStructure {
return reg.storageDir
}
// // StorageDir returns the main storage dir of the resource registry.
// func (reg *ResourceRegistry) StorageDir() *utils.DirStructure {
// return reg.storageDir
// }
// TmpDir returns the temporary working dir of the resource registry.
func (reg *ResourceRegistry) TmpDir() *utils.DirStructure {
return reg.tmpDir
}
// // TmpDir returns the temporary working dir of the resource registry.
// func (reg *ResourceRegistry) TmpDir() *utils.DirStructure {
// return reg.tmpDir
// }
// SetDevMode sets the development mode flag.
func (reg *ResourceRegistry) SetDevMode(on bool) {
reg.Lock()
defer reg.Unlock()
// // SetDevMode sets the development mode flag.
// func (reg *ResourceRegistry) SetDevMode(on bool) {
// reg.Lock()
// defer reg.Unlock()
reg.DevMode = on
}
// reg.DevMode = on
// }
// SetUsePreReleases sets the UsePreReleases flag.
func (reg *ResourceRegistry) SetUsePreReleases(yes bool) {
reg.Lock()
defer reg.Unlock()
// // SetUsePreReleases sets the UsePreReleases flag.
// func (reg *ResourceRegistry) SetUsePreReleases(yes bool) {
// reg.Lock()
// defer reg.Unlock()
reg.UsePreReleases = yes
}
// reg.UsePreReleases = yes
// }
// AddResource adds a resource to the registry. Does _not_ select new version.
func (reg *ResourceRegistry) AddResource(identifier, version string, index *Index, available, currentRelease, preRelease bool) error {
reg.Lock()
defer reg.Unlock()
// // AddResource adds a resource to the registry. Does _not_ select new version.
// func (reg *ResourceRegistry) AddResource(identifier, version string, index *Index, available, currentRelease, preRelease bool) error {
// reg.Lock()
// defer reg.Unlock()
err := reg.addResource(identifier, version, index, available, currentRelease, preRelease)
return err
}
// err := reg.addResource(identifier, version, index, available, currentRelease, preRelease)
// return err
// }
func (reg *ResourceRegistry) addResource(identifier, version string, index *Index, available, currentRelease, preRelease bool) error {
res, ok := reg.resources[identifier]
if !ok {
res = reg.newResource(identifier)
reg.resources[identifier] = res
}
res.Index = index
// func (reg *ResourceRegistry) addResource(identifier, version string, index *Index, available, currentRelease, preRelease bool) error {
// res, ok := reg.resources[identifier]
// if !ok {
// res = reg.newResource(identifier)
// reg.resources[identifier] = res
// }
// res.Index = index
return res.AddVersion(version, available, currentRelease, preRelease)
}
// return res.AddVersion(version, available, currentRelease, preRelease)
// }
// AddResources adds resources to the registry. Errors are logged, the last one is returned. Despite errors, non-failing resources are still added. Does _not_ select new versions.
func (reg *ResourceRegistry) AddResources(versions map[string]string, index *Index, available, currentRelease, preRelease bool) error {
reg.Lock()
defer reg.Unlock()
// // AddResources adds resources to the registry. Errors are logged, the last one is returned. Despite errors, non-failing resources are still added. Does _not_ select new versions.
// func (reg *ResourceRegistry) AddResources(versions map[string]string, index *Index, available, currentRelease, preRelease bool) error {
// reg.Lock()
// defer reg.Unlock()
// add versions and their flags to registry
var lastError error
for identifier, version := range versions {
lastError = reg.addResource(identifier, version, index, available, currentRelease, preRelease)
if lastError != nil {
log.Warningf("%s: failed to add resource %s: %s", reg.Name, identifier, lastError)
}
}
// // add versions and their flags to registry
// var lastError error
// for identifier, version := range versions {
// lastError = reg.addResource(identifier, version, index, available, currentRelease, preRelease)
// if lastError != nil {
// log.Warningf("%s: failed to add resource %s: %s", reg.Name, identifier, lastError)
// }
// }
return lastError
}
// return lastError
// }
// SelectVersions selects new resource versions depending on the current registry state.
func (reg *ResourceRegistry) SelectVersions() {
reg.RLock()
defer reg.RUnlock()
// // SelectVersions selects new resource versions depending on the current registry state.
// func (reg *ResourceRegistry) SelectVersions() {
// reg.RLock()
// defer reg.RUnlock()
for _, res := range reg.resources {
res.Lock()
res.selectVersion()
res.Unlock()
}
}
// for _, res := range reg.resources {
// res.Lock()
// res.selectVersion()
// res.Unlock()
// }
// }
// GetSelectedVersions returns a list of the currently selected versions.
func (reg *ResourceRegistry) GetSelectedVersions() (versions map[string]string) {
reg.RLock()
defer reg.RUnlock()
// // GetSelectedVersions returns a list of the currently selected versions.
// func (reg *ResourceRegistry) GetSelectedVersions() (versions map[string]string) {
// reg.RLock()
// defer reg.RUnlock()
for _, res := range reg.resources {
res.Lock()
versions[res.Identifier] = res.SelectedVersion.VersionNumber
res.Unlock()
}
// for _, res := range reg.resources {
// res.Lock()
// versions[res.Identifier] = res.SelectedVersion.VersionNumber
// res.Unlock()
// }
return
}
// return
// }
// Purge deletes old updates, retaining a certain amount, specified by the keep
// parameter. Will at least keep 2 updates per resource.
func (reg *ResourceRegistry) Purge(keep int) {
reg.RLock()
defer reg.RUnlock()
// // Purge deletes old updates, retaining a certain amount, specified by the keep
// // parameter. Will at least keep 2 updates per resource.
// func (reg *ResourceRegistry) Purge(keep int) {
// reg.RLock()
// defer reg.RUnlock()
for _, res := range reg.resources {
res.Purge(keep)
}
}
// for _, res := range reg.resources {
// res.Purge(keep)
// }
// }
// ResetResources removes all resources from the registry.
func (reg *ResourceRegistry) ResetResources() {
reg.Lock()
defer reg.Unlock()
// // ResetResources removes all resources from the registry.
// func (reg *ResourceRegistry) ResetResources() {
// reg.Lock()
// defer reg.Unlock()
reg.resources = make(map[string]*Resource)
}
// reg.resources = make(map[string]*Resource)
// }
// ResetIndexes removes all indexes from the registry.
func (reg *ResourceRegistry) ResetIndexes() {
reg.Lock()
defer reg.Unlock()
// // ResetIndexes removes all indexes from the registry.
// func (reg *ResourceRegistry) ResetIndexes() {
// reg.Lock()
// defer reg.Unlock()
reg.indexes = make([]*Index, 0, len(reg.indexes))
}
// reg.indexes = make([]*Index, 0, len(reg.indexes))
// }
// Cleanup removes temporary files.
func (reg *ResourceRegistry) Cleanup() error {
// delete download tmp dir
return os.RemoveAll(reg.tmpDir.Path)
}
// // Cleanup removes temporary files.
// func (reg *ResourceRegistry) Cleanup() error {
// // delete download tmp dir
// return os.RemoveAll(reg.tmpDir.Path)
// }

File diff suppressed because it is too large Load diff

View file

@ -1,49 +1,49 @@
package updater
import (
"strings"
// import (
// "strings"
"github.com/safing/jess"
)
// "github.com/safing/jess"
// )
// VerificationOptions holds options for verification of files.
type VerificationOptions struct {
TrustStore jess.TrustStore
DownloadPolicy SignaturePolicy
DiskLoadPolicy SignaturePolicy
}
// // VerificationOptions holds options for verification of files.
// type VerificationOptions struct {
// TrustStore jess.TrustStore
// DownloadPolicy SignaturePolicy
// DiskLoadPolicy SignaturePolicy
// }
// GetVerificationOptions returns the verification options for the given identifier.
func (reg *ResourceRegistry) GetVerificationOptions(identifier string) *VerificationOptions {
if reg.Verification == nil {
return nil
}
// // GetVerificationOptions returns the verification options for the given identifier.
// func (reg *ResourceRegistry) GetVerificationOptions(identifier string) *VerificationOptions {
// if reg.Verification == nil {
// return nil
// }
var (
longestPrefix = -1
bestMatch *VerificationOptions
)
for prefix, opts := range reg.Verification {
if len(prefix) > longestPrefix && strings.HasPrefix(identifier, prefix) {
longestPrefix = len(prefix)
bestMatch = opts
}
}
// var (
// longestPrefix = -1
// bestMatch *VerificationOptions
// )
// for prefix, opts := range reg.Verification {
// if len(prefix) > longestPrefix && strings.HasPrefix(identifier, prefix) {
// longestPrefix = len(prefix)
// bestMatch = opts
// }
// }
return bestMatch
}
// return bestMatch
// }
// SignaturePolicy defines behavior in case of errors.
type SignaturePolicy uint8
// // SignaturePolicy defines behavior in case of errors.
// type SignaturePolicy uint8
// Signature Policies.
const (
// SignaturePolicyRequire fails on any error.
SignaturePolicyRequire = iota
// // Signature Policies.
// const (
// // SignaturePolicyRequire fails on any error.
// SignaturePolicyRequire = iota
// SignaturePolicyWarn only warns on errors.
SignaturePolicyWarn
// // SignaturePolicyWarn only warns on errors.
// SignaturePolicyWarn
// SignaturePolicyDisable only downloads signatures, but does not verify them.
SignaturePolicyDisable
)
// // SignaturePolicyDisable only downloads signatures, but does not verify them.
// SignaturePolicyDisable
// )

View file

@ -1,180 +1,180 @@
package updater
import (
"sort"
"sync"
"time"
// import (
// "sort"
// "sync"
// "time"
"github.com/safing/portmaster/base/utils"
)
// "github.com/safing/portmaster/base/utils"
// )
// Registry States.
const (
StateReady = "ready" // Default idle state.
StateChecking = "checking" // Downloading indexes.
StateDownloading = "downloading" // Downloading updates.
StateFetching = "fetching" // Fetching a single file.
)
// // Registry States.
// const (
// StateReady = "ready" // Default idle state.
// StateChecking = "checking" // Downloading indexes.
// StateDownloading = "downloading" // Downloading updates.
// StateFetching = "fetching" // Fetching a single file.
// )
// RegistryState describes the registry state.
type RegistryState struct {
sync.Mutex
reg *ResourceRegistry
// // RegistryState describes the registry state.
// type RegistryState struct {
// sync.Mutex
// reg *ResourceRegistry
// ID holds the ID of the state the registry is currently in.
ID string
// // ID holds the ID of the state the registry is currently in.
// ID string
// Details holds further information about the current state.
Details any
// // Details holds further information about the current state.
// Details any
// Updates holds generic information about the current status of pending
// and recently downloaded updates.
Updates UpdateState
// // Updates holds generic information about the current status of pending
// // and recently downloaded updates.
// Updates UpdateState
// operationLock locks the operation of any state changing operation.
// This is separate from the registry lock, which locks access to the
// registry struct.
operationLock sync.Mutex
}
// // operationLock locks the operation of any state changing operation.
// // This is separate from the registry lock, which locks access to the
// // registry struct.
// operationLock sync.Mutex
// }
// StateDownloadingDetails holds details of the downloading state.
type StateDownloadingDetails struct {
// Resources holds the resource IDs that are being downloaded.
Resources []string
// // StateDownloadingDetails holds details of the downloading state.
// type StateDownloadingDetails struct {
// // Resources holds the resource IDs that are being downloaded.
// Resources []string
// FinishedUpTo holds the index of Resources that is currently being
// downloaded. Previous resources have finished downloading.
FinishedUpTo int
}
// // FinishedUpTo holds the index of Resources that is currently being
// // downloaded. Previous resources have finished downloading.
// FinishedUpTo int
// }
// UpdateState holds generic information about the current status of pending
// and recently downloaded updates.
type UpdateState struct {
// LastCheckAt holds the time of the last update check.
LastCheckAt *time.Time
// LastCheckError holds the error of the last check.
LastCheckError error
// PendingDownload holds the resources that are pending download.
PendingDownload []string
// // UpdateState holds generic information about the current status of pending
// // and recently downloaded updates.
// type UpdateState struct {
// // LastCheckAt holds the time of the last update check.
// LastCheckAt *time.Time
// // LastCheckError holds the error of the last check.
// LastCheckError error
// // PendingDownload holds the resources that are pending download.
// PendingDownload []string
// LastDownloadAt holds the time when resources were downloaded the last time.
LastDownloadAt *time.Time
// LastDownloadError holds the error of the last download.
LastDownloadError error
// LastDownload holds the resources that we downloaded the last time updates
// were downloaded.
LastDownload []string
// // LastDownloadAt holds the time when resources were downloaded the last time.
// LastDownloadAt *time.Time
// // LastDownloadError holds the error of the last download.
// LastDownloadError error
// // LastDownload holds the resources that we downloaded the last time updates
// // were downloaded.
// LastDownload []string
// LastSuccessAt holds the time of the last successful update (check).
LastSuccessAt *time.Time
}
// // LastSuccessAt holds the time of the last successful update (check).
// LastSuccessAt *time.Time
// }
// GetState returns the current registry state.
// The returned data must not be modified.
func (reg *ResourceRegistry) GetState() RegistryState {
reg.state.Lock()
defer reg.state.Unlock()
// // GetState returns the current registry state.
// // The returned data must not be modified.
// func (reg *ResourceRegistry) GetState() RegistryState {
// reg.state.Lock()
// defer reg.state.Unlock()
return RegistryState{
ID: reg.state.ID,
Details: reg.state.Details,
Updates: reg.state.Updates,
}
}
// return RegistryState{
// ID: reg.state.ID,
// Details: reg.state.Details,
// Updates: reg.state.Updates,
// }
// }
// StartOperation starts an operation.
func (s *RegistryState) StartOperation(id string) bool {
defer s.notify()
// // StartOperation starts an operation.
// func (s *RegistryState) StartOperation(id string) bool {
// defer s.notify()
s.operationLock.Lock()
// s.operationLock.Lock()
s.Lock()
defer s.Unlock()
// s.Lock()
// defer s.Unlock()
s.ID = id
return true
}
// s.ID = id
// return true
// }
// UpdateOperationDetails updates the details of an operation.
// The supplied struct should be a copy and must not be changed after calling
// this function.
func (s *RegistryState) UpdateOperationDetails(details any) {
defer s.notify()
// // UpdateOperationDetails updates the details of an operation.
// // The supplied struct should be a copy and must not be changed after calling
// // this function.
// func (s *RegistryState) UpdateOperationDetails(details any) {
// defer s.notify()
s.Lock()
defer s.Unlock()
// s.Lock()
// defer s.Unlock()
s.Details = details
}
// s.Details = details
// }
// EndOperation ends an operation.
func (s *RegistryState) EndOperation() {
defer s.notify()
defer s.operationLock.Unlock()
// // EndOperation ends an operation.
// func (s *RegistryState) EndOperation() {
// defer s.notify()
// defer s.operationLock.Unlock()
s.Lock()
defer s.Unlock()
// s.Lock()
// defer s.Unlock()
s.ID = StateReady
s.Details = nil
}
// s.ID = StateReady
// s.Details = nil
// }
// ReportUpdateCheck reports an update check to the registry state.
func (s *RegistryState) ReportUpdateCheck(pendingDownload []string, failed error) {
defer s.notify()
// // ReportUpdateCheck reports an update check to the registry state.
// func (s *RegistryState) ReportUpdateCheck(pendingDownload []string, failed error) {
// defer s.notify()
sort.Strings(pendingDownload)
// sort.Strings(pendingDownload)
s.Lock()
defer s.Unlock()
// s.Lock()
// defer s.Unlock()
now := time.Now()
s.Updates.LastCheckAt = &now
s.Updates.LastCheckError = failed
s.Updates.PendingDownload = pendingDownload
// now := time.Now()
// s.Updates.LastCheckAt = &now
// s.Updates.LastCheckError = failed
// s.Updates.PendingDownload = pendingDownload
if failed == nil {
s.Updates.LastSuccessAt = &now
}
}
// if failed == nil {
// s.Updates.LastSuccessAt = &now
// }
// }
// ReportDownloads reports downloaded updates to the registry state.
func (s *RegistryState) ReportDownloads(downloaded []string, failed error) {
defer s.notify()
// // ReportDownloads reports downloaded updates to the registry state.
// func (s *RegistryState) ReportDownloads(downloaded []string, failed error) {
// defer s.notify()
sort.Strings(downloaded)
// sort.Strings(downloaded)
s.Lock()
defer s.Unlock()
// s.Lock()
// defer s.Unlock()
now := time.Now()
s.Updates.LastDownloadAt = &now
s.Updates.LastDownloadError = failed
s.Updates.LastDownload = downloaded
// now := time.Now()
// s.Updates.LastDownloadAt = &now
// s.Updates.LastDownloadError = failed
// s.Updates.LastDownload = downloaded
// Remove downloaded resources from the pending list.
if len(s.Updates.PendingDownload) > 0 {
newPendingDownload := make([]string, 0, len(s.Updates.PendingDownload))
for _, pending := range s.Updates.PendingDownload {
if !utils.StringInSlice(downloaded, pending) {
newPendingDownload = append(newPendingDownload, pending)
}
}
s.Updates.PendingDownload = newPendingDownload
}
// // Remove downloaded resources from the pending list.
// if len(s.Updates.PendingDownload) > 0 {
// newPendingDownload := make([]string, 0, len(s.Updates.PendingDownload))
// for _, pending := range s.Updates.PendingDownload {
// if !utils.StringInSlice(downloaded, pending) {
// newPendingDownload = append(newPendingDownload, pending)
// }
// }
// s.Updates.PendingDownload = newPendingDownload
// }
if failed == nil {
s.Updates.LastSuccessAt = &now
}
}
// if failed == nil {
// s.Updates.LastSuccessAt = &now
// }
// }
func (s *RegistryState) notify() {
switch {
case s.reg == nil:
return
case s.reg.StateNotifyFunc == nil:
return
}
// func (s *RegistryState) notify() {
// switch {
// case s.reg == nil:
// return
// case s.reg.StateNotifyFunc == nil:
// return
// }
s.reg.StateNotifyFunc(s)
}
// s.reg.StateNotifyFunc(s)
// }

View file

@ -1,272 +1,272 @@
package updater
import (
"context"
"errors"
"fmt"
"io/fs"
"net/http"
"os"
"path/filepath"
"strings"
// import (
// "context"
// "errors"
// "fmt"
// "io/fs"
// "net/http"
// "os"
// "path/filepath"
// "strings"
"github.com/safing/jess/filesig"
"github.com/safing/jess/lhash"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/utils"
)
// "github.com/safing/jess/filesig"
// "github.com/safing/jess/lhash"
// "github.com/safing/portmaster/base/log"
// "github.com/safing/portmaster/base/utils"
// )
// ScanStorage scans root within the storage dir and adds found
// resources to the registry. If an error occurred, it is logged
// and the last error is returned. Everything that was found
// despite errors is added to the registry anyway. Leave root
// empty to scan the full storage dir.
func (reg *ResourceRegistry) ScanStorage(root string) error {
var lastError error
// // ScanStorage scans root within the storage dir and adds found
// // resources to the registry. If an error occurred, it is logged
// // and the last error is returned. Everything that was found
// // despite errors is added to the registry anyway. Leave root
// // empty to scan the full storage dir.
// func (reg *ResourceRegistry) ScanStorage(root string) error {
// var lastError error
// prep root
if root == "" {
root = reg.storageDir.Path
} else {
var err error
root, err = filepath.Abs(root)
if err != nil {
return err
}
if !strings.HasPrefix(root, reg.storageDir.Path) {
return errors.New("supplied scan root path not within storage")
}
}
// // prep root
// if root == "" {
// root = reg.storageDir.Path
// } else {
// var err error
// root, err = filepath.Abs(root)
// if err != nil {
// return err
// }
// if !strings.HasPrefix(root, reg.storageDir.Path) {
// return errors.New("supplied scan root path not within storage")
// }
// }
// walk fs
_ = filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
// skip tmp dir (including errors trying to read it)
if strings.HasPrefix(path, reg.tmpDir.Path) {
return filepath.SkipDir
}
// // walk fs
// _ = filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
// // skip tmp dir (including errors trying to read it)
// if strings.HasPrefix(path, reg.tmpDir.Path) {
// return filepath.SkipDir
// }
// handle walker error
if err != nil {
lastError = fmt.Errorf("%s: could not read %s: %w", reg.Name, path, err)
log.Warning(lastError.Error())
return nil
}
// // handle walker error
// if err != nil {
// lastError = fmt.Errorf("%s: could not read %s: %w", reg.Name, path, err)
// log.Warning(lastError.Error())
// return nil
// }
// Ignore file signatures.
if strings.HasSuffix(path, filesig.Extension) {
return nil
}
// // Ignore file signatures.
// if strings.HasSuffix(path, filesig.Extension) {
// return nil
// }
// get relative path to storage
relativePath, err := filepath.Rel(reg.storageDir.Path, path)
if err != nil {
lastError = fmt.Errorf("%s: could not get relative path of %s: %w", reg.Name, path, err)
log.Warning(lastError.Error())
return nil
}
// // get relative path to storage
// relativePath, err := filepath.Rel(reg.storageDir.Path, path)
// if err != nil {
// lastError = fmt.Errorf("%s: could not get relative path of %s: %w", reg.Name, path, err)
// log.Warning(lastError.Error())
// return nil
// }
// convert to identifier and version
relativePath = filepath.ToSlash(relativePath)
identifier, version, ok := GetIdentifierAndVersion(relativePath)
if !ok {
// file does not conform to format
return nil
}
// // convert to identifier and version
// relativePath = filepath.ToSlash(relativePath)
// identifier, version, ok := GetIdentifierAndVersion(relativePath)
// if !ok {
// // file does not conform to format
// return nil
// }
// fully ignore directories that also have an identifier - these will be unpacked resources
if info.IsDir() {
return filepath.SkipDir
}
// // fully ignore directories that also have an identifier - these will be unpacked resources
// if info.IsDir() {
// return filepath.SkipDir
// }
// save
err = reg.AddResource(identifier, version, nil, true, false, false)
if err != nil {
lastError = fmt.Errorf("%s: could not get add resource %s v%s: %w", reg.Name, identifier, version, err)
log.Warning(lastError.Error())
}
return nil
})
// // save
// err = reg.AddResource(identifier, version, nil, true, false, false)
// if err != nil {
// lastError = fmt.Errorf("%s: could not get add resource %s v%s: %w", reg.Name, identifier, version, err)
// log.Warning(lastError.Error())
// }
// return nil
// })
return lastError
}
// return lastError
// }
// LoadIndexes loads the current release indexes from disk
// or will fetch a new version if not available and the
// registry is marked as online.
func (reg *ResourceRegistry) LoadIndexes(ctx context.Context) error {
var firstErr error
client := &http.Client{}
for _, idx := range reg.getIndexes() {
err := reg.loadIndexFile(idx)
if err == nil {
log.Debugf("%s: loaded index %s", reg.Name, idx.Path)
} else if reg.Online {
// try to download the index file if a local disk version
// does not exist or we don't have permission to read it.
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, fs.ErrPermission) {
err = reg.downloadIndex(ctx, client, idx)
}
}
// // LoadIndexes loads the current release indexes from disk
// // or will fetch a new version if not available and the
// // registry is marked as online.
// func (reg *ResourceRegistry) LoadIndexes(ctx context.Context) error {
// var firstErr error
// client := &http.Client{}
// for _, idx := range reg.getIndexes() {
// err := reg.loadIndexFile(idx)
// if err == nil {
// log.Debugf("%s: loaded index %s", reg.Name, idx.Path)
// } else if reg.Online {
// // try to download the index file if a local disk version
// // does not exist or we don't have permission to read it.
// if errors.Is(err, fs.ErrNotExist) || errors.Is(err, fs.ErrPermission) {
// err = reg.downloadIndex(ctx, client, idx)
// }
// }
if err != nil && firstErr == nil {
firstErr = err
}
}
// if err != nil && firstErr == nil {
// firstErr = err
// }
// }
return firstErr
}
// return firstErr
// }
// getIndexes returns a copy of the index.
// The indexes itself are references.
func (reg *ResourceRegistry) getIndexes() []*Index {
reg.RLock()
defer reg.RUnlock()
// // getIndexes returns a copy of the index.
// // The indexes itself are references.
// func (reg *ResourceRegistry) getIndexes() []*Index {
// reg.RLock()
// defer reg.RUnlock()
indexes := make([]*Index, len(reg.indexes))
copy(indexes, reg.indexes)
return indexes
}
// indexes := make([]*Index, len(reg.indexes))
// copy(indexes, reg.indexes)
// return indexes
// }
func (reg *ResourceRegistry) loadIndexFile(idx *Index) error {
indexPath := filepath.Join(reg.storageDir.Path, filepath.FromSlash(idx.Path))
indexData, err := os.ReadFile(indexPath)
if err != nil {
return fmt.Errorf("failed to read index file %s: %w", idx.Path, err)
}
// func (reg *ResourceRegistry) loadIndexFile(idx *Index) error {
// indexPath := filepath.Join(reg.storageDir.Path, filepath.FromSlash(idx.Path))
// indexData, err := os.ReadFile(indexPath)
// if err != nil {
// return fmt.Errorf("failed to read index file %s: %w", idx.Path, err)
// }
// Verify signature, if enabled.
if verifOpts := reg.GetVerificationOptions(idx.Path); verifOpts != nil {
// Load and check signature.
verifiedHash, _, err := reg.loadAndVerifySigFile(verifOpts, indexPath+filesig.Extension)
if err != nil {
switch verifOpts.DiskLoadPolicy {
case SignaturePolicyRequire:
return fmt.Errorf("failed to verify signature of index %s: %w", idx.Path, err)
case SignaturePolicyWarn:
log.Warningf("%s: failed to verify signature of index %s: %s", reg.Name, idx.Path, err)
case SignaturePolicyDisable:
log.Debugf("%s: failed to verify signature of index %s: %s", reg.Name, idx.Path, err)
}
}
// // Verify signature, if enabled.
// if verifOpts := reg.GetVerificationOptions(idx.Path); verifOpts != nil {
// // Load and check signature.
// verifiedHash, _, err := reg.loadAndVerifySigFile(verifOpts, indexPath+filesig.Extension)
// if err != nil {
// switch verifOpts.DiskLoadPolicy {
// case SignaturePolicyRequire:
// return fmt.Errorf("failed to verify signature of index %s: %w", idx.Path, err)
// case SignaturePolicyWarn:
// log.Warningf("%s: failed to verify signature of index %s: %s", reg.Name, idx.Path, err)
// case SignaturePolicyDisable:
// log.Debugf("%s: failed to verify signature of index %s: %s", reg.Name, idx.Path, err)
// }
// }
// Check if signature checksum matches the index data.
if err == nil && !verifiedHash.Matches(indexData) {
switch verifOpts.DiskLoadPolicy {
case SignaturePolicyRequire:
return fmt.Errorf("index file %s does not match signature", idx.Path)
case SignaturePolicyWarn:
log.Warningf("%s: index file %s does not match signature", reg.Name, idx.Path)
case SignaturePolicyDisable:
log.Debugf("%s: index file %s does not match signature", reg.Name, idx.Path)
}
}
}
// // Check if signature checksum matches the index data.
// if err == nil && !verifiedHash.Matches(indexData) {
// switch verifOpts.DiskLoadPolicy {
// case SignaturePolicyRequire:
// return fmt.Errorf("index file %s does not match signature", idx.Path)
// case SignaturePolicyWarn:
// log.Warningf("%s: index file %s does not match signature", reg.Name, idx.Path)
// case SignaturePolicyDisable:
// log.Debugf("%s: index file %s does not match signature", reg.Name, idx.Path)
// }
// }
// }
// Parse the index file.
indexFile, err := ParseIndexFile(indexData, idx.Channel, idx.LastRelease)
if err != nil {
return fmt.Errorf("failed to parse index file %s: %w", idx.Path, err)
}
// // Parse the index file.
// indexFile, err := ParseIndexFile(indexData, idx.Channel, idx.LastRelease)
// if err != nil {
// return fmt.Errorf("failed to parse index file %s: %w", idx.Path, err)
// }
// Update last seen release.
idx.LastRelease = indexFile.Published
// // Update last seen release.
// idx.LastRelease = indexFile.Published
// Warn if there aren't any releases in the index.
if len(indexFile.Releases) == 0 {
log.Debugf("%s: index %s has no releases", reg.Name, idx.Path)
return nil
}
// // Warn if there aren't any releases in the index.
// if len(indexFile.Releases) == 0 {
// log.Debugf("%s: index %s has no releases", reg.Name, idx.Path)
// return nil
// }
// Add index releases to available resources.
err = reg.AddResources(indexFile.Releases, idx, false, true, idx.PreRelease)
if err != nil {
log.Warningf("%s: failed to add resource: %s", reg.Name, err)
}
return nil
}
// // Add index releases to available resources.
// err = reg.AddResources(indexFile.Releases, idx, false, true, idx.PreRelease)
// if err != nil {
// log.Warningf("%s: failed to add resource: %s", reg.Name, err)
// }
// return nil
// }
func (reg *ResourceRegistry) loadAndVerifySigFile(verifOpts *VerificationOptions, sigFilePath string) (*lhash.LabeledHash, []byte, error) {
// Load signature file.
sigFileData, err := os.ReadFile(sigFilePath)
if err != nil {
return nil, nil, fmt.Errorf("failed to read signature file: %w", err)
}
// func (reg *ResourceRegistry) loadAndVerifySigFile(verifOpts *VerificationOptions, sigFilePath string) (*lhash.LabeledHash, []byte, error) {
// // Load signature file.
// sigFileData, err := os.ReadFile(sigFilePath)
// if err != nil {
// return nil, nil, fmt.Errorf("failed to read signature file: %w", err)
// }
// Extract all signatures.
sigs, err := filesig.ParseSigFile(sigFileData)
switch {
case len(sigs) == 0 && err != nil:
return nil, nil, fmt.Errorf("failed to parse signature file: %w", err)
case len(sigs) == 0:
return nil, nil, errors.New("no signatures found in signature file")
case err != nil:
return nil, nil, fmt.Errorf("failed to parse signature file: %w", err)
}
// // Extract all signatures.
// sigs, err := filesig.ParseSigFile(sigFileData)
// switch {
// case len(sigs) == 0 && err != nil:
// return nil, nil, fmt.Errorf("failed to parse signature file: %w", err)
// case len(sigs) == 0:
// return nil, nil, errors.New("no signatures found in signature file")
// case err != nil:
// return nil, nil, fmt.Errorf("failed to parse signature file: %w", err)
// }
// Verify all signatures.
var verifiedHash *lhash.LabeledHash
for _, sig := range sigs {
fd, err := filesig.VerifyFileData(
sig,
nil,
verifOpts.TrustStore,
)
if err != nil {
return nil, sigFileData, err
}
// // Verify all signatures.
// var verifiedHash *lhash.LabeledHash
// for _, sig := range sigs {
// fd, err := filesig.VerifyFileData(
// sig,
// nil,
// verifOpts.TrustStore,
// )
// if err != nil {
// return nil, sigFileData, err
// }
// Save or check verified hash.
if verifiedHash == nil {
verifiedHash = fd.FileHash()
} else if !fd.FileHash().Equal(verifiedHash) {
// Return an error if two valid hashes mismatch.
// For simplicity, all hash algorithms must be the same for now.
return nil, sigFileData, errors.New("file hashes from different signatures do not match")
}
}
// // Save or check verified hash.
// if verifiedHash == nil {
// verifiedHash = fd.FileHash()
// } else if !fd.FileHash().Equal(verifiedHash) {
// // Return an error if two valid hashes mismatch.
// // For simplicity, all hash algorithms must be the same for now.
// return nil, sigFileData, errors.New("file hashes from different signatures do not match")
// }
// }
return verifiedHash, sigFileData, nil
}
// return verifiedHash, sigFileData, nil
// }
// CreateSymlinks creates a directory structure with unversioned symlinks to the given updates list.
func (reg *ResourceRegistry) CreateSymlinks(symlinkRoot *utils.DirStructure) error {
err := os.RemoveAll(symlinkRoot.Path)
if err != nil {
return fmt.Errorf("failed to wipe symlink root: %w", err)
}
// // CreateSymlinks creates a directory structure with unversioned symlinks to the given updates list.
// func (reg *ResourceRegistry) CreateSymlinks(symlinkRoot *utils.DirStructure) error {
// err := os.RemoveAll(symlinkRoot.Path)
// if err != nil {
// return fmt.Errorf("failed to wipe symlink root: %w", err)
// }
err = symlinkRoot.Ensure()
if err != nil {
return fmt.Errorf("failed to create symlink root: %w", err)
}
// err = symlinkRoot.Ensure()
// if err != nil {
// return fmt.Errorf("failed to create symlink root: %w", err)
// }
reg.RLock()
defer reg.RUnlock()
// reg.RLock()
// defer reg.RUnlock()
for _, res := range reg.resources {
if res.SelectedVersion == nil {
return fmt.Errorf("no selected version available for %s", res.Identifier)
}
// for _, res := range reg.resources {
// if res.SelectedVersion == nil {
// return fmt.Errorf("no selected version available for %s", res.Identifier)
// }
targetPath := res.SelectedVersion.storagePath()
linkPath := filepath.Join(symlinkRoot.Path, filepath.FromSlash(res.Identifier))
linkPathDir := filepath.Dir(linkPath)
// targetPath := res.SelectedVersion.storagePath()
// linkPath := filepath.Join(symlinkRoot.Path, filepath.FromSlash(res.Identifier))
// linkPathDir := filepath.Dir(linkPath)
err = symlinkRoot.EnsureAbsPath(linkPathDir)
if err != nil {
return fmt.Errorf("failed to create dir for link: %w", err)
}
// err = symlinkRoot.EnsureAbsPath(linkPathDir)
// if err != nil {
// return fmt.Errorf("failed to create dir for link: %w", err)
// }
relativeTargetPath, err := filepath.Rel(linkPathDir, targetPath)
if err != nil {
return fmt.Errorf("failed to get relative target path: %w", err)
}
// relativeTargetPath, err := filepath.Rel(linkPathDir, targetPath)
// if err != nil {
// return fmt.Errorf("failed to get relative target path: %w", err)
// }
err = os.Symlink(relativeTargetPath, linkPath)
if err != nil {
return fmt.Errorf("failed to link %s: %w", res.Identifier, err)
}
}
// err = os.Symlink(relativeTargetPath, linkPath)
// if err != nil {
// return fmt.Errorf("failed to link %s: %w", res.Identifier, err)
// }
// }
return nil
}
// return nil
// }

View file

@ -1,195 +1,195 @@
package updater
import (
"archive/zip"
"compress/gzip"
"errors"
"fmt"
"io"
"io/fs"
"os"
"path"
"path/filepath"
"strings"
// import (
// "archive/zip"
// "compress/gzip"
// "errors"
// "fmt"
// "io"
// "io/fs"
// "os"
// "path"
// "path/filepath"
// "strings"
"github.com/hashicorp/go-multierror"
// "github.com/hashicorp/go-multierror"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/utils"
)
// "github.com/safing/portmaster/base/log"
// "github.com/safing/portmaster/base/utils"
// )
// MaxUnpackSize specifies the maximum size that will be unpacked.
const MaxUnpackSize = 1000000000 // 1GB
// // MaxUnpackSize specifies the maximum size that will be unpacked.
// const MaxUnpackSize = 1000000000 // 1GB
// UnpackGZIP unpacks a GZIP compressed reader r
// and returns a new reader. It's suitable to be
// used with registry.GetPackedFile.
func UnpackGZIP(r io.Reader) (io.Reader, error) {
return gzip.NewReader(r)
}
// // UnpackGZIP unpacks a GZIP compressed reader r
// // and returns a new reader. It's suitable to be
// // used with registry.GetPackedFile.
// func UnpackGZIP(r io.Reader) (io.Reader, error) {
// return gzip.NewReader(r)
// }
// UnpackResources unpacks all resources defined in the AutoUnpack list.
func (reg *ResourceRegistry) UnpackResources() error {
reg.RLock()
defer reg.RUnlock()
// // UnpackResources unpacks all resources defined in the AutoUnpack list.
// func (reg *ResourceRegistry) UnpackResources() error {
// reg.RLock()
// defer reg.RUnlock()
var multierr *multierror.Error
for _, res := range reg.resources {
if utils.StringInSlice(reg.AutoUnpack, res.Identifier) {
err := res.UnpackArchive()
if err != nil {
multierr = multierror.Append(
multierr,
fmt.Errorf("%s: %w", res.Identifier, err),
)
}
}
}
// var multierr *multierror.Error
// for _, res := range reg.resources {
// if utils.StringInSlice(reg.AutoUnpack, res.Identifier) {
// err := res.UnpackArchive()
// if err != nil {
// multierr = multierror.Append(
// multierr,
// fmt.Errorf("%s: %w", res.Identifier, err),
// )
// }
// }
// }
return multierr.ErrorOrNil()
}
// return multierr.ErrorOrNil()
// }
const (
zipSuffix = ".zip"
)
// const (
// zipSuffix = ".zip"
// )
// UnpackArchive unpacks the archive the resource refers to. The contents are
// unpacked into a directory with the same name as the file, excluding the
// suffix. If the destination folder already exists, it is assumed that the
// contents have already been correctly unpacked.
func (res *Resource) UnpackArchive() error {
res.Lock()
defer res.Unlock()
// // UnpackArchive unpacks the archive the resource refers to. The contents are
// // unpacked into a directory with the same name as the file, excluding the
// // suffix. If the destination folder already exists, it is assumed that the
// // contents have already been correctly unpacked.
// func (res *Resource) UnpackArchive() error {
// res.Lock()
// defer res.Unlock()
// Only unpack selected versions.
if res.SelectedVersion == nil {
return nil
}
// // Only unpack selected versions.
// if res.SelectedVersion == nil {
// return nil
// }
switch {
case strings.HasSuffix(res.Identifier, zipSuffix):
return res.unpackZipArchive()
default:
return fmt.Errorf("unsupported file type for unpacking")
}
}
// switch {
// case strings.HasSuffix(res.Identifier, zipSuffix):
// return res.unpackZipArchive()
// default:
// return fmt.Errorf("unsupported file type for unpacking")
// }
// }
func (res *Resource) unpackZipArchive() error {
// Get file and directory paths.
archiveFile := res.SelectedVersion.storagePath()
destDir := strings.TrimSuffix(archiveFile, zipSuffix)
tmpDir := filepath.Join(
res.registry.tmpDir.Path,
filepath.FromSlash(strings.TrimSuffix(
path.Base(res.SelectedVersion.versionedPath()),
zipSuffix,
)),
)
// func (res *Resource) unpackZipArchive() error {
// // Get file and directory paths.
// archiveFile := res.SelectedVersion.storagePath()
// destDir := strings.TrimSuffix(archiveFile, zipSuffix)
// tmpDir := filepath.Join(
// res.registry.tmpDir.Path,
// filepath.FromSlash(strings.TrimSuffix(
// path.Base(res.SelectedVersion.versionedPath()),
// zipSuffix,
// )),
// )
// Check status of destination.
dstStat, err := os.Stat(destDir)
switch {
case errors.Is(err, fs.ErrNotExist):
// The destination does not exist, continue with unpacking.
case err != nil:
return fmt.Errorf("cannot access destination for unpacking: %w", err)
case !dstStat.IsDir():
return fmt.Errorf("destination for unpacking is blocked by file: %s", dstStat.Name())
default:
// Archive already seems to be unpacked.
return nil
}
// // Check status of destination.
// dstStat, err := os.Stat(destDir)
// switch {
// case errors.Is(err, fs.ErrNotExist):
// // The destination does not exist, continue with unpacking.
// case err != nil:
// return fmt.Errorf("cannot access destination for unpacking: %w", err)
// case !dstStat.IsDir():
// return fmt.Errorf("destination for unpacking is blocked by file: %s", dstStat.Name())
// default:
// // Archive already seems to be unpacked.
// return nil
// }
// Create the tmp directory for unpacking.
err = res.registry.tmpDir.EnsureAbsPath(tmpDir)
if err != nil {
return fmt.Errorf("failed to create tmp dir for unpacking: %w", err)
}
// // Create the tmp directory for unpacking.
// err = res.registry.tmpDir.EnsureAbsPath(tmpDir)
// if err != nil {
// return fmt.Errorf("failed to create tmp dir for unpacking: %w", err)
// }
// Defer clean up of directories.
defer func() {
// Always clean up the tmp dir.
_ = os.RemoveAll(tmpDir)
// Cleanup the destination in case of an error.
if err != nil {
_ = os.RemoveAll(destDir)
}
}()
// // Defer clean up of directories.
// defer func() {
// // Always clean up the tmp dir.
// _ = os.RemoveAll(tmpDir)
// // Cleanup the destination in case of an error.
// if err != nil {
// _ = os.RemoveAll(destDir)
// }
// }()
// Open the archive for reading.
var archiveReader *zip.ReadCloser
archiveReader, err = zip.OpenReader(archiveFile)
if err != nil {
return fmt.Errorf("failed to open zip reader: %w", err)
}
defer func() {
_ = archiveReader.Close()
}()
// // Open the archive for reading.
// var archiveReader *zip.ReadCloser
// archiveReader, err = zip.OpenReader(archiveFile)
// if err != nil {
// return fmt.Errorf("failed to open zip reader: %w", err)
// }
// defer func() {
// _ = archiveReader.Close()
// }()
// Save all files to the tmp dir.
for _, file := range archiveReader.File {
err = copyFromZipArchive(
file,
filepath.Join(tmpDir, filepath.FromSlash(file.Name)),
)
if err != nil {
return fmt.Errorf("failed to extract archive file %s: %w", file.Name, err)
}
}
// // Save all files to the tmp dir.
// for _, file := range archiveReader.File {
// err = copyFromZipArchive(
// file,
// filepath.Join(tmpDir, filepath.FromSlash(file.Name)),
// )
// if err != nil {
// return fmt.Errorf("failed to extract archive file %s: %w", file.Name, err)
// }
// }
// Make the final move.
err = os.Rename(tmpDir, destDir)
if err != nil {
return fmt.Errorf("failed to move the extracted archive from %s to %s: %w", tmpDir, destDir, err)
}
// // Make the final move.
// err = os.Rename(tmpDir, destDir)
// if err != nil {
// return fmt.Errorf("failed to move the extracted archive from %s to %s: %w", tmpDir, destDir, err)
// }
// Fix permissions on the destination dir.
err = res.registry.storageDir.EnsureAbsPath(destDir)
if err != nil {
return fmt.Errorf("failed to apply directory permissions on %s: %w", destDir, err)
}
// // Fix permissions on the destination dir.
// err = res.registry.storageDir.EnsureAbsPath(destDir)
// if err != nil {
// return fmt.Errorf("failed to apply directory permissions on %s: %w", destDir, err)
// }
log.Infof("%s: unpacked %s", res.registry.Name, res.SelectedVersion.versionedPath())
return nil
}
// log.Infof("%s: unpacked %s", res.registry.Name, res.SelectedVersion.versionedPath())
// return nil
// }
func copyFromZipArchive(archiveFile *zip.File, dstPath string) error {
// If file is a directory, create it and continue.
if archiveFile.FileInfo().IsDir() {
err := os.Mkdir(dstPath, archiveFile.Mode())
if err != nil {
return fmt.Errorf("failed to create directory %s: %w", dstPath, err)
}
return nil
}
// func copyFromZipArchive(archiveFile *zip.File, dstPath string) error {
// // If file is a directory, create it and continue.
// if archiveFile.FileInfo().IsDir() {
// err := os.Mkdir(dstPath, archiveFile.Mode())
// if err != nil {
// return fmt.Errorf("failed to create directory %s: %w", dstPath, err)
// }
// return nil
// }
// Open archived file for reading.
fileReader, err := archiveFile.Open()
if err != nil {
return fmt.Errorf("failed to open file in archive: %w", err)
}
defer func() {
_ = fileReader.Close()
}()
// // Open archived file for reading.
// fileReader, err := archiveFile.Open()
// if err != nil {
// return fmt.Errorf("failed to open file in archive: %w", err)
// }
// defer func() {
// _ = fileReader.Close()
// }()
// Open destination file for writing.
dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, archiveFile.Mode())
if err != nil {
return fmt.Errorf("failed to open destination file %s: %w", dstPath, err)
}
defer func() {
_ = dstFile.Close()
}()
// // Open destination file for writing.
// dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, archiveFile.Mode())
// if err != nil {
// return fmt.Errorf("failed to open destination file %s: %w", dstPath, err)
// }
// defer func() {
// _ = dstFile.Close()
// }()
// Copy full file from archive to dst.
if _, err := io.CopyN(dstFile, fileReader, MaxUnpackSize); err != nil {
// EOF is expected here as the archive is likely smaller
// thane MaxUnpackSize
if errors.Is(err, io.EOF) {
return nil
}
return err
}
// // Copy full file from archive to dst.
// if _, err := io.CopyN(dstFile, fileReader, MaxUnpackSize); err != nil {
// // EOF is expected here as the archive is likely smaller
// // thane MaxUnpackSize
// if errors.Is(err, io.EOF) {
// return nil
// }
// return err
// }
return nil
}
// return nil
// }

View file

@ -1,359 +1,359 @@
package updater
import (
"context"
"fmt"
"net/http"
"os"
"path"
"path/filepath"
"strings"
// import (
// "context"
// "fmt"
// "net/http"
// "os"
// "path"
// "path/filepath"
// "strings"
"golang.org/x/exp/slices"
// "golang.org/x/exp/slices"
"github.com/safing/jess/filesig"
"github.com/safing/jess/lhash"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/utils"
)
// "github.com/safing/jess/filesig"
// "github.com/safing/jess/lhash"
// "github.com/safing/portmaster/base/log"
// "github.com/safing/portmaster/base/utils"
// )
// UpdateIndexes downloads all indexes. An error is only returned when all
// indexes fail to update.
func (reg *ResourceRegistry) UpdateIndexes(ctx context.Context) error {
var lastErr error
var anySuccess bool
// // UpdateIndexes downloads all indexes. An error is only returned when all
// // indexes fail to update.
// func (reg *ResourceRegistry) UpdateIndexes(ctx context.Context) error {
// var lastErr error
// var anySuccess bool
// Start registry operation.
reg.state.StartOperation(StateChecking)
defer reg.state.EndOperation()
// // Start registry operation.
// reg.state.StartOperation(StateChecking)
// defer reg.state.EndOperation()
client := &http.Client{}
for _, idx := range reg.getIndexes() {
if err := reg.downloadIndex(ctx, client, idx); err != nil {
lastErr = err
log.Warningf("%s: failed to update index %s: %s", reg.Name, idx.Path, err)
} else {
anySuccess = true
}
}
// client := &http.Client{}
// for _, idx := range reg.getIndexes() {
// if err := reg.downloadIndex(ctx, client, idx); err != nil {
// lastErr = err
// log.Warningf("%s: failed to update index %s: %s", reg.Name, idx.Path, err)
// } else {
// anySuccess = true
// }
// }
// If all indexes failed to update, fail.
if !anySuccess {
err := fmt.Errorf("failed to update all indexes, last error was: %w", lastErr)
reg.state.ReportUpdateCheck(nil, err)
return err
}
// // If all indexes failed to update, fail.
// if !anySuccess {
// err := fmt.Errorf("failed to update all indexes, last error was: %w", lastErr)
// reg.state.ReportUpdateCheck(nil, err)
// return err
// }
// Get pending resources and update status.
pendingResourceVersions, _ := reg.GetPendingDownloads(true, false)
reg.state.ReportUpdateCheck(
humanInfoFromResourceVersions(pendingResourceVersions),
nil,
)
// // Get pending resources and update status.
// pendingResourceVersions, _ := reg.GetPendingDownloads(true, false)
// reg.state.ReportUpdateCheck(
// humanInfoFromResourceVersions(pendingResourceVersions),
// nil,
// )
return nil
}
// return nil
// }
func (reg *ResourceRegistry) downloadIndex(ctx context.Context, client *http.Client, idx *Index) error {
var (
// Index.
indexErr error
indexData []byte
downloadURL string
// func (reg *ResourceRegistry) downloadIndex(ctx context.Context, client *http.Client, idx *Index) error {
// var (
// // Index.
// indexErr error
// indexData []byte
// downloadURL string
// Signature.
sigErr error
verifiedHash *lhash.LabeledHash
sigFileData []byte
verifOpts = reg.GetVerificationOptions(idx.Path)
)
// // Signature.
// sigErr error
// verifiedHash *lhash.LabeledHash
// sigFileData []byte
// verifOpts = reg.GetVerificationOptions(idx.Path)
// )
// Upgrade to v2 index if verification is enabled.
downloadIndexPath := idx.Path
if verifOpts != nil {
downloadIndexPath = strings.TrimSuffix(downloadIndexPath, baseIndexExtension) + v2IndexExtension
}
// // Upgrade to v2 index if verification is enabled.
// downloadIndexPath := idx.Path
// if verifOpts != nil {
// downloadIndexPath = strings.TrimSuffix(downloadIndexPath, baseIndexExtension) + v2IndexExtension
// }
// Download new index and signature.
for tries := range 3 {
// Index and signature need to be fetched together, so that they are
// fetched from the same source. One source should always have a matching
// index and signature. Backup sources may be behind a little.
// If the signature verification fails, another source should be tried.
// // Download new index and signature.
// for tries := range 3 {
// // Index and signature need to be fetched together, so that they are
// // fetched from the same source. One source should always have a matching
// // index and signature. Backup sources may be behind a little.
// // If the signature verification fails, another source should be tried.
// Get index data.
indexData, downloadURL, indexErr = reg.fetchData(ctx, client, downloadIndexPath, tries)
if indexErr != nil {
log.Debugf("%s: failed to fetch index %s: %s", reg.Name, downloadURL, indexErr)
continue
}
// // Get index data.
// indexData, downloadURL, indexErr = reg.fetchData(ctx, client, downloadIndexPath, tries)
// if indexErr != nil {
// log.Debugf("%s: failed to fetch index %s: %s", reg.Name, downloadURL, indexErr)
// continue
// }
// Get signature and verify it.
if verifOpts != nil {
verifiedHash, sigFileData, sigErr = reg.fetchAndVerifySigFile(
ctx, client,
verifOpts, downloadIndexPath+filesig.Extension, nil,
tries,
)
if sigErr != nil {
log.Debugf("%s: failed to verify signature of %s: %s", reg.Name, downloadURL, sigErr)
continue
}
// // Get signature and verify it.
// if verifOpts != nil {
// verifiedHash, sigFileData, sigErr = reg.fetchAndVerifySigFile(
// ctx, client,
// verifOpts, downloadIndexPath+filesig.Extension, nil,
// tries,
// )
// if sigErr != nil {
// log.Debugf("%s: failed to verify signature of %s: %s", reg.Name, downloadURL, sigErr)
// continue
// }
// Check if the index matches the verified hash.
if verifiedHash.Matches(indexData) {
log.Infof("%s: verified signature of %s", reg.Name, downloadURL)
} else {
sigErr = ErrIndexChecksumMismatch
log.Debugf("%s: checksum does not match file from %s", reg.Name, downloadURL)
continue
}
}
// // Check if the index matches the verified hash.
// if verifiedHash.Matches(indexData) {
// log.Infof("%s: verified signature of %s", reg.Name, downloadURL)
// } else {
// sigErr = ErrIndexChecksumMismatch
// log.Debugf("%s: checksum does not match file from %s", reg.Name, downloadURL)
// continue
// }
// }
break
}
if indexErr != nil {
return fmt.Errorf("failed to fetch index %s: %w", downloadIndexPath, indexErr)
}
if sigErr != nil {
return fmt.Errorf("failed to fetch or verify index %s signature: %w", downloadIndexPath, sigErr)
}
// break
// }
// if indexErr != nil {
// return fmt.Errorf("failed to fetch index %s: %w", downloadIndexPath, indexErr)
// }
// if sigErr != nil {
// return fmt.Errorf("failed to fetch or verify index %s signature: %w", downloadIndexPath, sigErr)
// }
// Parse the index file.
indexFile, err := ParseIndexFile(indexData, idx.Channel, idx.LastRelease)
if err != nil {
return fmt.Errorf("failed to parse index %s: %w", idx.Path, err)
}
// // Parse the index file.
// indexFile, err := ParseIndexFile(indexData, idx.Channel, idx.LastRelease)
// if err != nil {
// return fmt.Errorf("failed to parse index %s: %w", idx.Path, err)
// }
// Add index data to registry.
if len(indexFile.Releases) > 0 {
// Check if all resources are within the indexes' authority.
authoritativePath := path.Dir(idx.Path) + "/"
if authoritativePath == "./" {
// Fix path for indexes at the storage root.
authoritativePath = ""
}
cleanedData := make(map[string]string, len(indexFile.Releases))
for key, version := range indexFile.Releases {
if strings.HasPrefix(key, authoritativePath) {
cleanedData[key] = version
} else {
log.Warningf("%s: index %s oversteps it's authority by defining version for %s", reg.Name, idx.Path, key)
}
}
// // Add index data to registry.
// if len(indexFile.Releases) > 0 {
// // Check if all resources are within the indexes' authority.
// authoritativePath := path.Dir(idx.Path) + "/"
// if authoritativePath == "./" {
// // Fix path for indexes at the storage root.
// authoritativePath = ""
// }
// cleanedData := make(map[string]string, len(indexFile.Releases))
// for key, version := range indexFile.Releases {
// if strings.HasPrefix(key, authoritativePath) {
// cleanedData[key] = version
// } else {
// log.Warningf("%s: index %s oversteps it's authority by defining version for %s", reg.Name, idx.Path, key)
// }
// }
// add resources to registry
err = reg.AddResources(cleanedData, idx, false, true, idx.PreRelease)
if err != nil {
log.Warningf("%s: failed to add resources: %s", reg.Name, err)
}
} else {
log.Debugf("%s: index %s is empty", reg.Name, idx.Path)
}
// // add resources to registry
// err = reg.AddResources(cleanedData, idx, false, true, idx.PreRelease)
// if err != nil {
// log.Warningf("%s: failed to add resources: %s", reg.Name, err)
// }
// } else {
// log.Debugf("%s: index %s is empty", reg.Name, idx.Path)
// }
// Check if dest dir exists.
indexDir := filepath.FromSlash(path.Dir(idx.Path))
err = reg.storageDir.EnsureRelPath(indexDir)
if err != nil {
log.Warningf("%s: failed to ensure directory for updated index %s: %s", reg.Name, idx.Path, err)
}
// // Check if dest dir exists.
// indexDir := filepath.FromSlash(path.Dir(idx.Path))
// err = reg.storageDir.EnsureRelPath(indexDir)
// if err != nil {
// log.Warningf("%s: failed to ensure directory for updated index %s: %s", reg.Name, idx.Path, err)
// }
// Index files must be readable by portmaster-staert with user permissions in order to load the index.
err = os.WriteFile( //nolint:gosec
filepath.Join(reg.storageDir.Path, filepath.FromSlash(idx.Path)),
indexData, 0o0644,
)
if err != nil {
log.Warningf("%s: failed to save updated index %s: %s", reg.Name, idx.Path, err)
}
// // Index files must be readable by portmaster-staert with user permissions in order to load the index.
// err = os.WriteFile( //nolint:gosec
// filepath.Join(reg.storageDir.Path, filepath.FromSlash(idx.Path)),
// indexData, 0o0644,
// )
// if err != nil {
// log.Warningf("%s: failed to save updated index %s: %s", reg.Name, idx.Path, err)
// }
// Write signature file, if we have one.
if len(sigFileData) > 0 {
err = os.WriteFile( //nolint:gosec
filepath.Join(reg.storageDir.Path, filepath.FromSlash(idx.Path)+filesig.Extension),
sigFileData, 0o0644,
)
if err != nil {
log.Warningf("%s: failed to save updated index signature %s: %s", reg.Name, idx.Path+filesig.Extension, err)
}
}
// // Write signature file, if we have one.
// if len(sigFileData) > 0 {
// err = os.WriteFile( //nolint:gosec
// filepath.Join(reg.storageDir.Path, filepath.FromSlash(idx.Path)+filesig.Extension),
// sigFileData, 0o0644,
// )
// if err != nil {
// log.Warningf("%s: failed to save updated index signature %s: %s", reg.Name, idx.Path+filesig.Extension, err)
// }
// }
log.Infof("%s: updated index %s with %d entries", reg.Name, idx.Path, len(indexFile.Releases))
return nil
}
// log.Infof("%s: updated index %s with %d entries", reg.Name, idx.Path, len(indexFile.Releases))
// return nil
// }
// DownloadUpdates checks if updates are available and downloads updates of used components.
func (reg *ResourceRegistry) DownloadUpdates(ctx context.Context, includeManual bool) error {
// Start registry operation.
reg.state.StartOperation(StateDownloading)
defer reg.state.EndOperation()
// // DownloadUpdates checks if updates are available and downloads updates of used components.
// func (reg *ResourceRegistry) DownloadUpdates(ctx context.Context, includeManual bool) error {
// // Start registry operation.
// reg.state.StartOperation(StateDownloading)
// defer reg.state.EndOperation()
// Get pending updates.
toUpdate, missingSigs := reg.GetPendingDownloads(includeManual, true)
downloadDetailsResources := humanInfoFromResourceVersions(toUpdate)
reg.state.UpdateOperationDetails(&StateDownloadingDetails{
Resources: downloadDetailsResources,
})
// // Get pending updates.
// toUpdate, missingSigs := reg.GetPendingDownloads(includeManual, true)
// downloadDetailsResources := humanInfoFromResourceVersions(toUpdate)
// reg.state.UpdateOperationDetails(&StateDownloadingDetails{
// Resources: downloadDetailsResources,
// })
// nothing to update
if len(toUpdate) == 0 && len(missingSigs) == 0 {
log.Infof("%s: everything up to date", reg.Name)
return nil
}
// // nothing to update
// if len(toUpdate) == 0 && len(missingSigs) == 0 {
// log.Infof("%s: everything up to date", reg.Name)
// return nil
// }
// check download dir
if err := reg.tmpDir.Ensure(); err != nil {
return fmt.Errorf("could not prepare tmp directory for download: %w", err)
}
// // check download dir
// if err := reg.tmpDir.Ensure(); err != nil {
// return fmt.Errorf("could not prepare tmp directory for download: %w", err)
// }
// download updates
log.Infof("%s: starting to download %d updates", reg.Name, len(toUpdate))
client := &http.Client{}
var reportError error
// // download updates
// log.Infof("%s: starting to download %d updates", reg.Name, len(toUpdate))
// client := &http.Client{}
// var reportError error
for i, rv := range toUpdate {
log.Infof(
"%s: downloading update [%d/%d]: %s version %s",
reg.Name,
i+1, len(toUpdate),
rv.resource.Identifier, rv.VersionNumber,
)
var err error
for tries := range 3 {
err = reg.fetchFile(ctx, client, rv, tries)
if err == nil {
// Update resource version state.
rv.resource.Lock()
rv.Available = true
if rv.resource.VerificationOptions != nil {
rv.SigAvailable = true
}
rv.resource.Unlock()
// for i, rv := range toUpdate {
// log.Infof(
// "%s: downloading update [%d/%d]: %s version %s",
// reg.Name,
// i+1, len(toUpdate),
// rv.resource.Identifier, rv.VersionNumber,
// )
// var err error
// for tries := range 3 {
// err = reg.fetchFile(ctx, client, rv, tries)
// if err == nil {
// // Update resource version state.
// rv.resource.Lock()
// rv.Available = true
// if rv.resource.VerificationOptions != nil {
// rv.SigAvailable = true
// }
// rv.resource.Unlock()
break
}
}
if err != nil {
reportError := fmt.Errorf("failed to download %s version %s: %w", rv.resource.Identifier, rv.VersionNumber, err)
log.Warningf("%s: %s", reg.Name, reportError)
}
// break
// }
// }
// if err != nil {
// reportError := fmt.Errorf("failed to download %s version %s: %w", rv.resource.Identifier, rv.VersionNumber, err)
// log.Warningf("%s: %s", reg.Name, reportError)
// }
reg.state.UpdateOperationDetails(&StateDownloadingDetails{
Resources: downloadDetailsResources,
FinishedUpTo: i + 1,
})
}
// reg.state.UpdateOperationDetails(&StateDownloadingDetails{
// Resources: downloadDetailsResources,
// FinishedUpTo: i + 1,
// })
// }
if len(missingSigs) > 0 {
log.Infof("%s: downloading %d missing signatures", reg.Name, len(missingSigs))
// if len(missingSigs) > 0 {
// log.Infof("%s: downloading %d missing signatures", reg.Name, len(missingSigs))
for _, rv := range missingSigs {
var err error
for tries := range 3 {
err = reg.fetchMissingSig(ctx, client, rv, tries)
if err == nil {
// Update resource version state.
rv.resource.Lock()
rv.SigAvailable = true
rv.resource.Unlock()
// for _, rv := range missingSigs {
// var err error
// for tries := range 3 {
// err = reg.fetchMissingSig(ctx, client, rv, tries)
// if err == nil {
// // Update resource version state.
// rv.resource.Lock()
// rv.SigAvailable = true
// rv.resource.Unlock()
break
}
}
if err != nil {
reportError := fmt.Errorf("failed to download missing sig of %s version %s: %w", rv.resource.Identifier, rv.VersionNumber, err)
log.Warningf("%s: %s", reg.Name, reportError)
}
}
}
// break
// }
// }
// if err != nil {
// reportError := fmt.Errorf("failed to download missing sig of %s version %s: %w", rv.resource.Identifier, rv.VersionNumber, err)
// log.Warningf("%s: %s", reg.Name, reportError)
// }
// }
// }
reg.state.ReportDownloads(
downloadDetailsResources,
reportError,
)
log.Infof("%s: finished downloading updates", reg.Name)
// reg.state.ReportDownloads(
// downloadDetailsResources,
// reportError,
// )
// log.Infof("%s: finished downloading updates", reg.Name)
return nil
}
// return nil
// }
// DownloadUpdates checks if updates are available and downloads updates of used components.
// // DownloadUpdates checks if updates are available and downloads updates of used components.
// GetPendingDownloads returns the list of pending downloads.
// If manual is set, indexes with AutoDownload=false will be checked.
// If auto is set, indexes with AutoDownload=true will be checked.
func (reg *ResourceRegistry) GetPendingDownloads(manual, auto bool) (resources, sigs []*ResourceVersion) {
reg.RLock()
defer reg.RUnlock()
// // GetPendingDownloads returns the list of pending downloads.
// // If manual is set, indexes with AutoDownload=false will be checked.
// // If auto is set, indexes with AutoDownload=true will be checked.
// func (reg *ResourceRegistry) GetPendingDownloads(manual, auto bool) (resources, sigs []*ResourceVersion) {
// reg.RLock()
// defer reg.RUnlock()
// create list of downloads
var toUpdate []*ResourceVersion
var missingSigs []*ResourceVersion
// // create list of downloads
// var toUpdate []*ResourceVersion
// var missingSigs []*ResourceVersion
for _, res := range reg.resources {
func() {
res.Lock()
defer res.Unlock()
// for _, res := range reg.resources {
// func() {
// res.Lock()
// defer res.Unlock()
// Skip resources without index or indexes that should not be reported
// according to parameters.
switch {
case res.Index == nil:
// Cannot download if resource is not part of an index.
return
case manual && !res.Index.AutoDownload:
// Manual update report and index is not auto-download.
case auto && res.Index.AutoDownload:
// Auto update report and index is auto-download.
default:
// Resource should not be reported.
return
}
// // Skip resources without index or indexes that should not be reported
// // according to parameters.
// switch {
// case res.Index == nil:
// // Cannot download if resource is not part of an index.
// return
// case manual && !res.Index.AutoDownload:
// // Manual update report and index is not auto-download.
// case auto && res.Index.AutoDownload:
// // Auto update report and index is auto-download.
// default:
// // Resource should not be reported.
// return
// }
// Skip resources we don't need.
switch {
case res.inUse():
// Update if resource is in use.
case res.available():
// Update if resource is available locally, ie. was used in the past.
case utils.StringInSlice(reg.MandatoryUpdates, res.Identifier):
// Update is set as mandatory.
default:
// Resource does not need to be updated.
return
}
// // Skip resources we don't need.
// switch {
// case res.inUse():
// // Update if resource is in use.
// case res.available():
// // Update if resource is available locally, ie. was used in the past.
// case utils.StringInSlice(reg.MandatoryUpdates, res.Identifier):
// // Update is set as mandatory.
// default:
// // Resource does not need to be updated.
// return
// }
// Go through all versions until we find versions that need updating.
for _, rv := range res.Versions {
switch {
case !rv.CurrentRelease:
// We are not interested in older releases.
case !rv.Available:
// File not available locally, download!
toUpdate = append(toUpdate, rv)
case !rv.SigAvailable && res.VerificationOptions != nil:
// File signature is not available and verification is enabled, download signature!
missingSigs = append(missingSigs, rv)
}
}
}()
}
// // Go through all versions until we find versions that need updating.
// for _, rv := range res.Versions {
// switch {
// case !rv.CurrentRelease:
// // We are not interested in older releases.
// case !rv.Available:
// // File not available locally, download!
// toUpdate = append(toUpdate, rv)
// case !rv.SigAvailable && res.VerificationOptions != nil:
// // File signature is not available and verification is enabled, download signature!
// missingSigs = append(missingSigs, rv)
// }
// }
// }()
// }
slices.SortFunc(toUpdate, func(a, b *ResourceVersion) int {
return strings.Compare(a.resource.Identifier, b.resource.Identifier)
})
slices.SortFunc(missingSigs, func(a, b *ResourceVersion) int {
return strings.Compare(a.resource.Identifier, b.resource.Identifier)
})
// slices.SortFunc(toUpdate, func(a, b *ResourceVersion) int {
// return strings.Compare(a.resource.Identifier, b.resource.Identifier)
// })
// slices.SortFunc(missingSigs, func(a, b *ResourceVersion) int {
// return strings.Compare(a.resource.Identifier, b.resource.Identifier)
// })
return toUpdate, missingSigs
}
// return toUpdate, missingSigs
// }
func humanInfoFromResourceVersions(resourceVersions []*ResourceVersion) []string {
identifiers := make([]string, len(resourceVersions))
// func humanInfoFromResourceVersions(resourceVersions []*ResourceVersion) []string {
// identifiers := make([]string, len(resourceVersions))
for i, rv := range resourceVersions {
identifiers[i] = fmt.Sprintf("%s v%s", rv.resource.Identifier, rv.VersionNumber)
}
// for i, rv := range resourceVersions {
// identifiers[i] = fmt.Sprintf("%s v%s", rv.resource.Identifier, rv.VersionNumber)
// }
return identifiers
}
// return identifiers
// }

View file

@ -7,7 +7,6 @@ import (
"github.com/safing/portmaster/base/config"
"github.com/safing/portmaster/service/intel/geoip"
"github.com/safing/portmaster/service/netenv"
"github.com/safing/portmaster/service/updates"
"github.com/safing/portmaster/spn/access"
"github.com/safing/portmaster/spn/access/account"
"github.com/safing/portmaster/spn/captain"
@ -18,18 +17,19 @@ var portmasterStarted = time.Now()
func collectData() interface{} {
data := make(map[string]interface{})
// TODO(vladimir)
// Get data about versions.
versions := updates.GetSimpleVersions()
data["Updates"] = versions
data["Version"] = versions.Build.Version
numericVersion, err := MakeNumericVersion(versions.Build.Version)
if err != nil {
data["NumericVersion"] = &DataError{
Error: err,
}
} else {
data["NumericVersion"] = numericVersion
}
// versions := updates.GetSimpleVersions()
// data["Updates"] = versions
// data["Version"] = versions.Build.Version
// numericVersion, err := MakeNumericVersion(versions.Build.Version)
// if err != nil {
// data["NumericVersion"] = &DataError{
// Error: err,
// }
// } else {
// data["NumericVersion"] = numericVersion
// }
// Get data about install.
installInfo, err := GetInstallInfo()

View file

@ -8,6 +8,7 @@ import (
"github.com/safing/portmaster/base/database"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates"
)
type Broadcasts struct {
@ -91,4 +92,6 @@ func New(instance instance) (*Broadcasts, error) {
return module, nil
}
type instance interface{}
type instance interface {
Updates() *updates.Updates
}

View file

@ -18,7 +18,6 @@ import (
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/notifications"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates"
)
const (
@ -68,7 +67,7 @@ type BroadcastNotification struct {
func broadcastNotify(ctx *mgr.WorkerCtx) error {
// Get broadcast notifications file, load it from disk and parse it.
broadcastsResource, err := updates.GetFile(broadcastsResourcePath)
broadcastsResource, err := module.instance.Updates().GetFile(broadcastsResourcePath)
if err != nil {
return fmt.Errorf("failed to get broadcast notifications update: %w", err)
}

View file

@ -149,7 +149,7 @@ func debugInfo(ar *api.Request) (data []byte, err error) {
config.AddToDebugInfo(di)
// Detailed information.
updates.AddToDebugInfo(di)
// TODO(vladimir): updates.AddToDebugInfo(di)
compat.AddToDebugInfo(di)
module.instance.AddWorkerInfoToDebugInfo(di)
di.AddGoroutineStack()

View file

@ -14,15 +14,14 @@ import (
"github.com/safing/portmaster/base/database"
"github.com/safing/portmaster/base/database/record"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/updater"
"github.com/safing/portmaster/service/updates"
"github.com/safing/portmaster/service/updates/registry"
)
const (
baseListFilePath = "intel/lists/base.dsdl"
intermediateListFilePath = "intel/lists/intermediate.dsdl"
urgentListFilePath = "intel/lists/urgent.dsdl"
listIndexFilePath = "intel/lists/index.dsd"
baseListFilePath = "base.dsdl"
intermediateListFilePath = "intermediate.dsdl"
urgentListFilePath = "urgent.dsdl"
listIndexFilePath = "index.dsd"
)
// default bloomfilter element sizes (estimated).
@ -40,9 +39,9 @@ var (
filterListLock sync.RWMutex
// Updater files for tracking upgrades.
baseFile *updater.File
intermediateFile *updater.File
urgentFile *updater.File
baseFile *registry.File
intermediateFile *registry.File
urgentFile *registry.File
filterListsLoaded chan struct{}
)
@ -56,11 +55,10 @@ var cache = database.NewInterface(&database.Options{
// getFileFunc is the function used to get a file from
// the updater. It's basically updates.GetFile and used
// for unit testing.
type getFileFunc func(string) (*updater.File, error)
// getFile points to updates.GetFile but may be set to
// something different during unit testing.
var getFile getFileFunc = updates.GetFile
// var getFile getFileFunc = registry.GetFile
func init() {
filterListsLoaded = make(chan struct{})
@ -79,7 +77,7 @@ func isLoaded() bool {
// processListFile opens the latest version of file and decodes it's DSDL
// content. It calls processEntry for each decoded filterlists entry.
func processListFile(ctx context.Context, filter *scopedBloom, file *updater.File) error {
func processListFile(ctx context.Context, filter *scopedBloom, file *registry.File) error {
f, err := os.Open(file.Path())
if err != nil {
return err

View file

@ -4,14 +4,12 @@ import (
"errors"
"fmt"
"os"
"strings"
"sync"
"github.com/safing/portmaster/base/database"
"github.com/safing/portmaster/base/database/record"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/updater"
"github.com/safing/portmaster/service/updates"
"github.com/safing/portmaster/service/updates/registry"
"github.com/safing/structures/dsd"
)
@ -164,7 +162,7 @@ func getListIndexFromCache() (*ListIndexFile, error) {
var (
// listIndexUpdate must only be used by updateListIndex.
listIndexUpdate *updater.File
listIndexUpdate *registry.File
listIndexUpdateLock sync.Mutex
)
@ -177,24 +175,24 @@ func updateListIndex() error {
case listIndexUpdate == nil:
// This is the first time this function is run, get updater file for index.
var err error
listIndexUpdate, err = updates.GetFile(listIndexFilePath)
listIndexUpdate, err = module.instance.Updates().GetFile(listIndexFilePath)
if err != nil {
return err
}
// Check if the version in the cache is current.
index, err := getListIndexFromCache()
_, err = getListIndexFromCache()
switch {
case errors.Is(err, database.ErrNotFound):
log.Info("filterlists: index not in cache, starting update")
case err != nil:
log.Warningf("filterlists: failed to load index from cache, starting update: %s", err)
case !listIndexUpdate.EqualsVersion(strings.TrimPrefix(index.Version, "v")):
log.Infof(
"filterlists: index from cache is outdated, starting update (%s != %s)",
strings.TrimPrefix(index.Version, "v"),
listIndexUpdate.Version(),
)
// case !listIndexUpdate.EqualsVersion(strings.TrimPrefix(index.Version, "v")):
// log.Infof(
// "filterlists: index from cache is outdated, starting update (%s != %s)",
// strings.TrimPrefix(index.Version, "v"),
// listIndexUpdate.Version(),
// )
default:
// List is in cache and current, there is nothing to do.
log.Debug("filterlists: index is up to date")
@ -204,8 +202,8 @@ func updateListIndex() error {
return nil
}
case listIndexUpdate.UpgradeAvailable():
log.Info("filterlists: index update available, starting update")
// case listIndexUpdate.UpgradeAvailable():
// log.Info("filterlists: index update available, starting update")
default:
// Index is loaded and no update is available, there is nothing to do.
return nil

View file

@ -13,8 +13,8 @@ import (
"github.com/safing/portmaster/base/database"
"github.com/safing/portmaster/base/database/query"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/updater"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates/registry"
)
var updateInProgress = abool.New()
@ -174,51 +174,51 @@ func removeAllObsoleteFilterEntries(wc *mgr.WorkerCtx) error {
// getUpgradableFiles returns a slice of filterlists files
// that should be updated. The files MUST be updated and
// processed in the returned order!
func getUpgradableFiles() ([]*updater.File, error) {
var updateOrder []*updater.File
func getUpgradableFiles() ([]*registry.File, error) {
var updateOrder []*registry.File
cacheDBInUse := isLoaded()
// cacheDBInUse := isLoaded()
if baseFile == nil || baseFile.UpgradeAvailable() || !cacheDBInUse {
var err error
baseFile, err = getFile(baseListFilePath)
if err != nil {
return nil, err
}
log.Tracef("intel/filterlists: base file needs update, selected version %s", baseFile.Version())
updateOrder = append(updateOrder, baseFile)
}
// if baseFile == nil || !cacheDBInUse { // TODO(vladimir): || baseFile.UpgradeAvailable()
// var err error
// baseFile, err = module.instance.Updates().GetFile(baseListFilePath)
// if err != nil {
// return nil, err
// }
// log.Tracef("intel/filterlists: base file needs update, selected version %s", baseFile.Version())
// updateOrder = append(updateOrder, baseFile)
// }
if intermediateFile == nil || intermediateFile.UpgradeAvailable() || !cacheDBInUse {
var err error
intermediateFile, err = getFile(intermediateListFilePath)
if err != nil && !errors.Is(err, updater.ErrNotFound) {
return nil, err
}
// if intermediateFile == nil || intermediateFile.UpgradeAvailable() || !cacheDBInUse {
// var err error
// intermediateFile, err = getFile(intermediateListFilePath)
// if err != nil && !errors.Is(err, updater.ErrNotFound) {
// return nil, err
// }
if err == nil {
log.Tracef("intel/filterlists: intermediate file needs update, selected version %s", intermediateFile.Version())
updateOrder = append(updateOrder, intermediateFile)
}
}
// if err == nil {
// log.Tracef("intel/filterlists: intermediate file needs update, selected version %s", intermediateFile.Version())
// updateOrder = append(updateOrder, intermediateFile)
// }
// }
if urgentFile == nil || urgentFile.UpgradeAvailable() || !cacheDBInUse {
var err error
urgentFile, err = getFile(urgentListFilePath)
if err != nil && !errors.Is(err, updater.ErrNotFound) {
return nil, err
}
// if urgentFile == nil || urgentFile.UpgradeAvailable() || !cacheDBInUse {
// var err error
// urgentFile, err = getFile(urgentListFilePath)
// if err != nil && !errors.Is(err, updater.ErrNotFound) {
// return nil, err
// }
if err == nil {
log.Tracef("intel/filterlists: urgent file needs update, selected version %s", urgentFile.Version())
updateOrder = append(updateOrder, urgentFile)
}
}
// if err == nil {
// log.Tracef("intel/filterlists: urgent file needs update, selected version %s", urgentFile.Version())
// updateOrder = append(updateOrder, urgentFile)
// }
// }
return resolveUpdateOrder(updateOrder)
}
func resolveUpdateOrder(updateOrder []*updater.File) ([]*updater.File, error) {
func resolveUpdateOrder(updateOrder []*registry.File) ([]*registry.File, error) {
// sort the update order by ascending version
sort.Sort(byAscVersion(updateOrder))
log.Tracef("intel/filterlists: order of updates: %v", updateOrder)
@ -258,7 +258,7 @@ func resolveUpdateOrder(updateOrder []*updater.File) ([]*updater.File, error) {
return updateOrder[startAtIdx:], nil
}
type byAscVersion []*updater.File
type byAscVersion []*registry.File
func (fs byAscVersion) Len() int { return len(fs) }

View file

@ -8,9 +8,8 @@ import (
maxminddb "github.com/oschwald/maxminddb-golang"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/updater"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates"
"github.com/safing/portmaster/service/updates/registry"
)
var worker *updateWorker
@ -22,13 +21,13 @@ func init() {
}
const (
v4MMDBResource = "intel/geoip/geoipv4.mmdb.gz"
v6MMDBResource = "intel/geoip/geoipv6.mmdb.gz"
v4MMDBResource = "geoipv4.mmdb"
v6MMDBResource = "geoipv6.mmdb"
)
type geoIPDB struct {
*maxminddb.Reader
file *updater.File
file *registry.File
}
// updateBroadcaster stores a geoIPDB and provides synchronized
@ -47,7 +46,7 @@ func (ub *updateBroadcaster) NeedsUpdate() bool {
ub.rw.RLock()
defer ub.rw.RUnlock()
return ub.db == nil || ub.db.file.UpgradeAvailable()
return ub.db == nil // TODO(vladimir) is this needed: || ub.db.file.UpgradeAvailable()
}
// ReplaceDatabase replaces (or initially sets) the mmdb database.
@ -181,12 +180,12 @@ func (upd *updateWorker) run(ctx *mgr.WorkerCtx) error {
func getGeoIPDB(resource string) (*geoIPDB, error) {
log.Debugf("geoip: opening database %s", resource)
file, unpackedPath, err := openAndUnpack(resource)
file, err := open(resource)
if err != nil {
return nil, err
}
reader, err := maxminddb.Open(unpackedPath)
reader, err := maxminddb.Open(file.Path())
if err != nil {
return nil, fmt.Errorf("failed to open: %w", err)
}
@ -198,16 +197,16 @@ func getGeoIPDB(resource string) (*geoIPDB, error) {
}, nil
}
func openAndUnpack(resource string) (*updater.File, string, error) {
f, err := updates.GetFile(resource)
func open(resource string) (*registry.File, error) {
f, err := module.instance.Updates().GetFile(resource)
if err != nil {
return nil, "", fmt.Errorf("getting file: %w", err)
return nil, fmt.Errorf("getting file: %w", err)
}
unpacked, err := f.Unpack(".gz", updater.UnpackGZIP)
if err != nil {
return nil, "", fmt.Errorf("unpacking file: %w", err)
}
// unpacked, err := f.Unpack(".gz", updater.UnpackGZIP)
// if err != nil {
// return nil, "", fmt.Errorf("unpacking file: %w", err)
// }
return f, unpacked, nil
return f, nil
}

View file

@ -8,6 +8,7 @@ import (
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates"
)
// Event Names.
@ -105,4 +106,6 @@ func New(instance instance) (*NetEnv, error) {
return module, nil
}
type instance interface{}
type instance interface {
Updates() *updates.Updates
}

View file

@ -17,7 +17,6 @@ import (
"github.com/safing/portmaster/base/notifications"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/network/netutils"
"github.com/safing/portmaster/service/updates"
)
// OnlineStatus represent a state of connectivity to the Internet.
@ -221,7 +220,7 @@ func updateOnlineStatus(status OnlineStatus, portalURL *url.URL, comment string)
// Trigger update check when coming (semi) online.
if Online() {
_ = updates.TriggerUpdate(false, false)
module.instance.Updates().EventResourcesUpdated.Submit(struct{}{})
}
}
}

View file

@ -16,7 +16,6 @@ import (
"github.com/safing/portmaster/service/process"
"github.com/safing/portmaster/service/resolver"
"github.com/safing/portmaster/service/status"
"github.com/safing/portmaster/service/updates"
)
func registerAPIEndpoints() error {
@ -94,7 +93,7 @@ func debugInfo(ar *api.Request) (data []byte, err error) {
config.AddToDebugInfo(di)
// Detailed information.
updates.AddToDebugInfo(di)
// TODO(vladimir): updates.AddToDebugInfo(di)
// compat.AddToDebugInfo(di) // TODO: Cannot use due to interception import requirement which we don't want for SPN Hubs.
di.AddGoroutineStack()

View file

@ -8,6 +8,7 @@ import (
"github.com/safing/portmaster/base/dataroot"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates"
)
func prep() error {
@ -56,7 +57,10 @@ func (ui *UI) Stop() error {
return nil
}
var shimLoaded atomic.Bool
var (
shimLoaded atomic.Bool
module *UI
)
// New returns a new UI module.
func New(instance instance) (*UI, error) {
@ -64,7 +68,7 @@ func New(instance instance) (*UI, error) {
return nil, errors.New("only one instance allowed")
}
m := mgr.New("UI")
module := &UI{
module = &UI{
mgr: m,
instance: instance,
}
@ -78,4 +82,5 @@ func New(instance instance) (*UI, error) {
type instance interface {
API() *api.API
Updates() *updates.Updates
}

View file

@ -15,9 +15,8 @@ import (
"github.com/safing/portmaster/base/api"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/updater"
"github.com/safing/portmaster/base/utils"
"github.com/safing/portmaster/service/updates"
"github.com/safing/portmaster/service/updates/registry"
)
var (
@ -92,9 +91,9 @@ func (bs *archiveServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
// get file from update system
zipFile, err := updates.GetFile(fmt.Sprintf("ui/modules/%s.zip", moduleName))
zipFile, err := module.instance.Updates().GetFile(fmt.Sprintf("%s.zip", moduleName))
if err != nil {
if errors.Is(err, updater.ErrNotFound) {
if errors.Is(err, registry.ErrNotFound) {
log.Tracef("ui: requested module %s does not exist", moduleName)
http.Error(w, err.Error(), http.StatusNotFound)
} else {

View file

@ -1,161 +1,161 @@
package updates
import (
"bytes"
"io"
"net/http"
"os"
"path/filepath"
"strings"
// "bytes"
// "io"
// "net/http"
// "os"
// "path/filepath"
// "strings"
"github.com/ghodss/yaml"
// "github.com/ghodss/yaml"
"github.com/safing/portmaster/base/api"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/utils"
// "github.com/safing/portmaster/base/api"
// "github.com/safing/portmaster/base/log"
// "github.com/safing/portmaster/base/utils"
)
const (
apiPathCheckForUpdates = "updates/check"
)
func registerAPIEndpoints() error {
if err := api.RegisterEndpoint(api.Endpoint{
Name: "Check for Updates",
Description: "Checks if new versions are available. If automatic updates are enabled, they are also downloaded and applied.",
Parameters: []api.Parameter{{
Method: http.MethodPost,
Field: "download",
Value: "",
Description: "Force downloading and applying of all updates, regardless of auto-update settings.",
}},
Path: apiPathCheckForUpdates,
Write: api.PermitUser,
ActionFunc: func(r *api.Request) (msg string, err error) {
// Check if we should also download regardless of settings.
downloadAll := r.URL.Query().Has("download")
// func registerAPIEndpoints() error {
// if err := api.RegisterEndpoint(api.Endpoint{
// Name: "Check for Updates",
// Description: "Checks if new versions are available. If automatic updates are enabled, they are also downloaded and applied.",
// Parameters: []api.Parameter{{
// Method: http.MethodPost,
// Field: "download",
// Value: "",
// Description: "Force downloading and applying of all updates, regardless of auto-update settings.",
// }},
// Path: apiPathCheckForUpdates,
// Write: api.PermitUser,
// ActionFunc: func(r *api.Request) (msg string, err error) {
// // Check if we should also download regardless of settings.
// downloadAll := r.URL.Query().Has("download")
// Trigger update task.
err = TriggerUpdate(true, downloadAll)
if err != nil {
return "", err
}
// // Trigger update task.
// err = TriggerUpdate(true, downloadAll)
// if err != nil {
// return "", err
// }
// Report how we triggered.
if downloadAll {
return "downloading all updates...", nil
}
return "checking for updates...", nil
},
}); err != nil {
return err
}
// // Report how we triggered.
// if downloadAll {
// return "downloading all updates...", nil
// }
// return "checking for updates...", nil
// },
// }); err != nil {
// return err
// }
if err := api.RegisterEndpoint(api.Endpoint{
Name: "Get Resource",
Description: "Returns the requested resource from the udpate system",
Path: `updates/get/{identifier:[A-Za-z0-9/\.\-_]{1,255}}`,
Read: api.PermitUser,
ReadMethod: http.MethodGet,
HandlerFunc: func(w http.ResponseWriter, r *http.Request) {
// Get identifier from URL.
var identifier string
if ar := api.GetAPIRequest(r); ar != nil {
identifier = ar.URLVars["identifier"]
}
if identifier == "" {
http.Error(w, "no resource speicified", http.StatusBadRequest)
return
}
// if err := api.RegisterEndpoint(api.Endpoint{
// Name: "Get Resource",
// Description: "Returns the requested resource from the udpate system",
// Path: `updates/get/{identifier:[A-Za-z0-9/\.\-_]{1,255}}`,
// Read: api.PermitUser,
// ReadMethod: http.MethodGet,
// HandlerFunc: func(w http.ResponseWriter, r *http.Request) {
// // Get identifier from URL.
// var identifier string
// if ar := api.GetAPIRequest(r); ar != nil {
// identifier = ar.URLVars["identifier"]
// }
// if identifier == "" {
// http.Error(w, "no resource speicified", http.StatusBadRequest)
// return
// }
// Get resource.
resource, err := registry.GetFile(identifier)
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
// // Get resource.
// resource, err := registry.GetFile(identifier)
// if err != nil {
// http.Error(w, err.Error(), http.StatusNotFound)
// return
// }
// Open file for reading.
file, err := os.Open(resource.Path())
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer file.Close() //nolint:errcheck,gosec
// // Open file for reading.
// file, err := os.Open(resource.Path())
// if err != nil {
// http.Error(w, err.Error(), http.StatusInternalServerError)
// return
// }
// defer file.Close() //nolint:errcheck,gosec
// Assign file to reader
var reader io.Reader = file
// // Assign file to reader
// var reader io.Reader = file
// Add version to header.
w.Header().Set("Resource-Version", resource.Version())
// // Add version to header.
// w.Header().Set("Resource-Version", resource.Version())
// Set Content-Type.
contentType, _ := utils.MimeTypeByExtension(filepath.Ext(resource.Path()))
w.Header().Set("Content-Type", contentType)
// // Set Content-Type.
// contentType, _ := utils.MimeTypeByExtension(filepath.Ext(resource.Path()))
// w.Header().Set("Content-Type", contentType)
// Check if the content type may be returned.
accept := r.Header.Get("Accept")
if accept != "" {
mimeTypes := strings.Split(accept, ",")
// First, clean mime types.
for i, mimeType := range mimeTypes {
mimeType = strings.TrimSpace(mimeType)
mimeType, _, _ = strings.Cut(mimeType, ";")
mimeTypes[i] = mimeType
}
// Second, check if we may return anything.
var acceptsAny bool
for _, mimeType := range mimeTypes {
switch mimeType {
case "*", "*/*":
acceptsAny = true
}
}
// Third, check if we can convert.
if !acceptsAny {
var converted bool
sourceType, _, _ := strings.Cut(contentType, ";")
findConvertiblePair:
for _, mimeType := range mimeTypes {
switch {
case sourceType == "application/yaml" && mimeType == "application/json":
yamlData, err := io.ReadAll(reader)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
jsonData, err := yaml.YAMLToJSON(yamlData)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
reader = bytes.NewReader(jsonData)
converted = true
break findConvertiblePair
}
}
// // Check if the content type may be returned.
// accept := r.Header.Get("Accept")
// if accept != "" {
// mimeTypes := strings.Split(accept, ",")
// // First, clean mime types.
// for i, mimeType := range mimeTypes {
// mimeType = strings.TrimSpace(mimeType)
// mimeType, _, _ = strings.Cut(mimeType, ";")
// mimeTypes[i] = mimeType
// }
// // Second, check if we may return anything.
// var acceptsAny bool
// for _, mimeType := range mimeTypes {
// switch mimeType {
// case "*", "*/*":
// acceptsAny = true
// }
// }
// // Third, check if we can convert.
// if !acceptsAny {
// var converted bool
// sourceType, _, _ := strings.Cut(contentType, ";")
// findConvertiblePair:
// for _, mimeType := range mimeTypes {
// switch {
// case sourceType == "application/yaml" && mimeType == "application/json":
// yamlData, err := io.ReadAll(reader)
// if err != nil {
// http.Error(w, err.Error(), http.StatusInternalServerError)
// return
// }
// jsonData, err := yaml.YAMLToJSON(yamlData)
// if err != nil {
// http.Error(w, err.Error(), http.StatusInternalServerError)
// return
// }
// reader = bytes.NewReader(jsonData)
// converted = true
// break findConvertiblePair
// }
// }
// If we could not convert to acceptable format, return an error.
if !converted {
http.Error(w, "conversion to requested format not supported", http.StatusNotAcceptable)
return
}
}
}
// // If we could not convert to acceptable format, return an error.
// if !converted {
// http.Error(w, "conversion to requested format not supported", http.StatusNotAcceptable)
// return
// }
// }
// }
// Write file.
w.WriteHeader(http.StatusOK)
if r.Method != http.MethodHead {
_, err = io.Copy(w, reader)
if err != nil {
log.Errorf("updates: failed to serve resource file: %s", err)
return
}
}
},
}); err != nil {
return err
}
// // Write file.
// w.WriteHeader(http.StatusOK)
// if r.Method != http.MethodHead {
// _, err = io.Copy(w, reader)
// if err != nil {
// log.Errorf("updates: failed to serve resource file: %s", err)
// return
// }
// }
// },
// }); err != nil {
// return err
// }
return nil
}
// return nil
// }

View file

@ -4,9 +4,9 @@ import (
"github.com/tevino/abool"
"github.com/safing/portmaster/base/config"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates/helper"
// "github.com/safing/portmaster/base/log"
// "github.com/safing/portmaster/service/mgr"
// "github.com/safing/portmaster/service/updates/helper"
)
const cfgDevModeKey = "core/devMode"
@ -27,152 +27,152 @@ var (
forceDownload = abool.New()
)
func registerConfig() error {
err := config.Register(&config.Option{
Name: "Release Channel",
Key: helper.ReleaseChannelKey,
Description: `Use "Stable" for the best experience. The "Beta" channel will have the newest features and fixes, but may also break and cause interruption. Use others only temporarily and when instructed.`,
OptType: config.OptTypeString,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelStable,
RequiresRestart: true,
DefaultValue: helper.ReleaseChannelStable,
PossibleValues: []config.PossibleValue{
{
Name: "Stable",
Description: "Production releases.",
Value: helper.ReleaseChannelStable,
},
{
Name: "Beta",
Description: "Production releases for testing new features that may break and cause interruption.",
Value: helper.ReleaseChannelBeta,
},
{
Name: "Support",
Description: "Support releases or version changes for troubleshooting. Only use temporarily and when instructed.",
Value: helper.ReleaseChannelSupport,
},
{
Name: "Staging",
Description: "Dangerous development releases for testing random things and experimenting. Only use temporarily and when instructed.",
Value: helper.ReleaseChannelStaging,
},
},
Annotations: config.Annotations{
config.DisplayOrderAnnotation: -4,
config.DisplayHintAnnotation: config.DisplayHintOneOf,
config.CategoryAnnotation: "Updates",
},
})
if err != nil {
return err
}
// func registerConfig() error {
// err := config.Register(&config.Option{
// Name: "Release Channel",
// Key: helper.ReleaseChannelKey,
// Description: `Use "Stable" for the best experience. The "Beta" channel will have the newest features and fixes, but may also break and cause interruption. Use others only temporarily and when instructed.`,
// OptType: config.OptTypeString,
// ExpertiseLevel: config.ExpertiseLevelExpert,
// ReleaseLevel: config.ReleaseLevelStable,
// RequiresRestart: true,
// DefaultValue: helper.ReleaseChannelStable,
// PossibleValues: []config.PossibleValue{
// {
// Name: "Stable",
// Description: "Production releases.",
// Value: helper.ReleaseChannelStable,
// },
// {
// Name: "Beta",
// Description: "Production releases for testing new features that may break and cause interruption.",
// Value: helper.ReleaseChannelBeta,
// },
// {
// Name: "Support",
// Description: "Support releases or version changes for troubleshooting. Only use temporarily and when instructed.",
// Value: helper.ReleaseChannelSupport,
// },
// {
// Name: "Staging",
// Description: "Dangerous development releases for testing random things and experimenting. Only use temporarily and when instructed.",
// Value: helper.ReleaseChannelStaging,
// },
// },
// Annotations: config.Annotations{
// config.DisplayOrderAnnotation: -4,
// config.DisplayHintAnnotation: config.DisplayHintOneOf,
// config.CategoryAnnotation: "Updates",
// },
// })
// if err != nil {
// return err
// }
err = config.Register(&config.Option{
Name: "Automatic Software Updates",
Key: enableSoftwareUpdatesKey,
Description: "Automatically check for and download software updates. This does not include intelligence data updates.",
OptType: config.OptTypeBool,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelStable,
RequiresRestart: false,
DefaultValue: true,
Annotations: config.Annotations{
config.DisplayOrderAnnotation: -12,
config.CategoryAnnotation: "Updates",
},
})
if err != nil {
return err
}
// err = config.Register(&config.Option{
// Name: "Automatic Software Updates",
// Key: enableSoftwareUpdatesKey,
// Description: "Automatically check for and download software updates. This does not include intelligence data updates.",
// OptType: config.OptTypeBool,
// ExpertiseLevel: config.ExpertiseLevelExpert,
// ReleaseLevel: config.ReleaseLevelStable,
// RequiresRestart: false,
// DefaultValue: true,
// Annotations: config.Annotations{
// config.DisplayOrderAnnotation: -12,
// config.CategoryAnnotation: "Updates",
// },
// })
// if err != nil {
// return err
// }
err = config.Register(&config.Option{
Name: "Automatic Intelligence Data Updates",
Key: enableIntelUpdatesKey,
Description: "Automatically check for and download intelligence data updates. This includes filter lists, geo-ip data, and more. Does not include software updates.",
OptType: config.OptTypeBool,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelStable,
RequiresRestart: false,
DefaultValue: true,
Annotations: config.Annotations{
config.DisplayOrderAnnotation: -11,
config.CategoryAnnotation: "Updates",
},
})
if err != nil {
return err
}
// err = config.Register(&config.Option{
// Name: "Automatic Intelligence Data Updates",
// Key: enableIntelUpdatesKey,
// Description: "Automatically check for and download intelligence data updates. This includes filter lists, geo-ip data, and more. Does not include software updates.",
// OptType: config.OptTypeBool,
// ExpertiseLevel: config.ExpertiseLevelExpert,
// ReleaseLevel: config.ReleaseLevelStable,
// RequiresRestart: false,
// DefaultValue: true,
// Annotations: config.Annotations{
// config.DisplayOrderAnnotation: -11,
// config.CategoryAnnotation: "Updates",
// },
// })
// if err != nil {
// return err
// }
return nil
}
// return nil
// }
func initConfig() {
releaseChannel = config.Concurrent.GetAsString(helper.ReleaseChannelKey, helper.ReleaseChannelStable)
initialReleaseChannel = releaseChannel()
previousReleaseChannel = releaseChannel()
// func initConfig() {
// releaseChannel = config.Concurrent.GetAsString(helper.ReleaseChannelKey, helper.ReleaseChannelStable)
// initialReleaseChannel = releaseChannel()
// previousReleaseChannel = releaseChannel()
enableSoftwareUpdates = config.Concurrent.GetAsBool(enableSoftwareUpdatesKey, true)
enableIntelUpdates = config.Concurrent.GetAsBool(enableIntelUpdatesKey, true)
softwareUpdatesCurrentlyEnabled = enableSoftwareUpdates()
intelUpdatesCurrentlyEnabled = enableIntelUpdates()
// enableSoftwareUpdates = config.Concurrent.GetAsBool(enableSoftwareUpdatesKey, true)
// enableIntelUpdates = config.Concurrent.GetAsBool(enableIntelUpdatesKey, true)
// softwareUpdatesCurrentlyEnabled = enableSoftwareUpdates()
// intelUpdatesCurrentlyEnabled = enableIntelUpdates()
devMode = config.Concurrent.GetAsBool(cfgDevModeKey, false)
previousDevMode = devMode()
}
// devMode = config.Concurrent.GetAsBool(cfgDevModeKey, false)
// previousDevMode = devMode()
// }
func updateRegistryConfig(_ *mgr.WorkerCtx, _ struct{}) (cancel bool, err error) {
changed := false
// func updateRegistryConfig(_ *mgr.WorkerCtx, _ struct{}) (cancel bool, err error) {
// changed := false
if enableSoftwareUpdates() != softwareUpdatesCurrentlyEnabled {
softwareUpdatesCurrentlyEnabled = enableSoftwareUpdates()
changed = true
}
// if enableSoftwareUpdates() != softwareUpdatesCurrentlyEnabled {
// softwareUpdatesCurrentlyEnabled = enableSoftwareUpdates()
// changed = true
// }
if enableIntelUpdates() != intelUpdatesCurrentlyEnabled {
intelUpdatesCurrentlyEnabled = enableIntelUpdates()
changed = true
}
// if enableIntelUpdates() != intelUpdatesCurrentlyEnabled {
// intelUpdatesCurrentlyEnabled = enableIntelUpdates()
// changed = true
// }
if devMode() != previousDevMode {
registry.SetDevMode(devMode())
previousDevMode = devMode()
changed = true
}
// if devMode() != previousDevMode {
// registry.SetDevMode(devMode())
// previousDevMode = devMode()
// changed = true
// }
if releaseChannel() != previousReleaseChannel {
previousReleaseChannel = releaseChannel()
changed = true
}
// if releaseChannel() != previousReleaseChannel {
// previousReleaseChannel = releaseChannel()
// changed = true
// }
if changed {
// Update indexes based on new settings.
warning := helper.SetIndexes(
registry,
releaseChannel(),
true,
softwareUpdatesCurrentlyEnabled,
intelUpdatesCurrentlyEnabled,
)
if warning != nil {
log.Warningf("updates: %s", warning)
}
// if changed {
// // Update indexes based on new settings.
// warning := helper.SetIndexes(
// registry,
// releaseChannel(),
// true,
// softwareUpdatesCurrentlyEnabled,
// intelUpdatesCurrentlyEnabled,
// )
// if warning != nil {
// log.Warningf("updates: %s", warning)
// }
// Select versions depending on new indexes and modes.
registry.SelectVersions()
module.EventVersionsUpdated.Submit(struct{}{})
// // Select versions depending on new indexes and modes.
// registry.SelectVersions()
// module.EventVersionsUpdated.Submit(struct{}{})
if softwareUpdatesCurrentlyEnabled || intelUpdatesCurrentlyEnabled {
module.states.Clear()
if err := TriggerUpdate(true, false); err != nil {
log.Warningf("updates: failed to trigger update: %s", err)
}
log.Infof("updates: automatic updates are now enabled")
} else {
log.Warningf("updates: automatic updates are now completely disabled")
}
}
// if softwareUpdatesCurrentlyEnabled || intelUpdatesCurrentlyEnabled {
// module.states.Clear()
// if err := TriggerUpdate(true, false); err != nil {
// log.Warningf("updates: failed to trigger update: %s", err)
// }
// log.Infof("updates: automatic updates are now enabled")
// } else {
// log.Warningf("updates: automatic updates are now completely disabled")
// }
// }
return false, nil
}
// return false, nil
// }

View file

@ -1,238 +1,237 @@
package updates
import (
"fmt"
"sort"
"strings"
"sync"
// import (
// "fmt"
// "sort"
// "sync"
"github.com/safing/portmaster/base/database/record"
"github.com/safing/portmaster/base/info"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/updater"
"github.com/safing/portmaster/base/utils/debug"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates/helper"
)
// "github.com/safing/portmaster/base/database/record"
// "github.com/safing/portmaster/base/info"
// "github.com/safing/portmaster/base/log"
// "github.com/safing/portmaster/base/updater"
// "github.com/safing/portmaster/base/utils/debug"
// "github.com/safing/portmaster/service/mgr"
// "github.com/safing/portmaster/service/updates/helper"
// )
const (
// versionsDBKey is the database key for update version information.
versionsDBKey = "core:status/versions"
// const (
// // versionsDBKey is the database key for update version information.
// versionsDBKey = "core:status/versions"
// versionsDBKey is the database key for simple update version information.
simpleVersionsDBKey = "core:status/simple-versions"
// // versionsDBKey is the database key for simple update version information.
// simpleVersionsDBKey = "core:status/simple-versions"
// updateStatusDBKey is the database key for update status information.
updateStatusDBKey = "core:status/updates"
)
// // updateStatusDBKey is the database key for update status information.
// updateStatusDBKey = "core:status/updates"
// )
// Versions holds update versions and status information.
type Versions struct {
record.Base
sync.Mutex
// // Versions holds update versions and status information.
// type Versions struct {
// record.Base
// sync.Mutex
Core *info.Info
Resources map[string]*updater.Resource
Channel string
Beta bool
Staging bool
}
// Core *info.Info
// Resources map[string]*updater.Resource
// Channel string
// Beta bool
// Staging bool
// }
// SimpleVersions holds simplified update versions and status information.
type SimpleVersions struct {
record.Base
sync.Mutex
// // SimpleVersions holds simplified update versions and status information.
// type SimpleVersions struct {
// record.Base
// sync.Mutex
Build *info.Info
Resources map[string]*SimplifiedResourceVersion
Channel string
}
// Build *info.Info
// Resources map[string]*SimplifiedResourceVersion
// Channel string
// }
// SimplifiedResourceVersion holds version information about one resource.
type SimplifiedResourceVersion struct {
Version string
}
// // SimplifiedResourceVersion holds version information about one resource.
// type SimplifiedResourceVersion struct {
// Version string
// }
// UpdateStateExport is a wrapper to export the updates state.
type UpdateStateExport struct {
record.Base
sync.Mutex
// // UpdateStateExport is a wrapper to export the updates state.
// type UpdateStateExport struct {
// record.Base
// sync.Mutex
*updater.UpdateState
}
// *updater.UpdateState
// }
// GetVersions returns the update versions and status information.
// Resources must be locked when accessed.
func GetVersions() *Versions {
return &Versions{
Core: info.GetInfo(),
Resources: registry.Export(),
Channel: initialReleaseChannel,
Beta: initialReleaseChannel == helper.ReleaseChannelBeta,
Staging: initialReleaseChannel == helper.ReleaseChannelStaging,
}
}
// // GetVersions returns the update versions and status information.
// // Resources must be locked when accessed.
// func GetVersions() *Versions {
// return &Versions{
// Core: info.GetInfo(),
// Resources: nil,
// Channel: initialReleaseChannel,
// Beta: initialReleaseChannel == helper.ReleaseChannelBeta,
// Staging: initialReleaseChannel == helper.ReleaseChannelStaging,
// }
// }
// GetSimpleVersions returns the simplified update versions and status information.
func GetSimpleVersions() *SimpleVersions {
// Fill base info.
v := &SimpleVersions{
Build: info.GetInfo(),
Resources: make(map[string]*SimplifiedResourceVersion),
Channel: initialReleaseChannel,
}
// // GetSimpleVersions returns the simplified update versions and status information.
// func GetSimpleVersions() *SimpleVersions {
// // Fill base info.
// v := &SimpleVersions{
// Build: info.GetInfo(),
// Resources: make(map[string]*SimplifiedResourceVersion),
// Channel: initialReleaseChannel,
// }
// Iterate through all versions and add version info.
for id, resource := range registry.Export() {
func() {
resource.Lock()
defer resource.Unlock()
// // Iterate through all versions and add version info.
// // for id, resource := range registry.Export() {
// // func() {
// // resource.Lock()
// // defer resource.Unlock()
// Get current in-used or selected version.
var rv *updater.ResourceVersion
switch {
case resource.ActiveVersion != nil:
rv = resource.ActiveVersion
case resource.SelectedVersion != nil:
rv = resource.SelectedVersion
}
// // // Get current in-used or selected version.
// // var rv *updater.ResourceVersion
// // switch {
// // case resource.ActiveVersion != nil:
// // rv = resource.ActiveVersion
// // case resource.SelectedVersion != nil:
// // rv = resource.SelectedVersion
// // }
// Get information from resource.
if rv != nil {
v.Resources[id] = &SimplifiedResourceVersion{
Version: rv.VersionNumber,
}
}
}()
}
// // // Get information from resource.
// // if rv != nil {
// // v.Resources[id] = &SimplifiedResourceVersion{
// // Version: rv.VersionNumber,
// // }
// // }
// // }()
// // }
return v
}
// return v
// }
// GetStateExport gets the update state from the registry and returns it in an
// exportable struct.
func GetStateExport() *UpdateStateExport {
export := registry.GetState()
return &UpdateStateExport{
UpdateState: &export.Updates,
}
}
// // GetStateExport gets the update state from the registry and returns it in an
// // exportable struct.
// func GetStateExport() *UpdateStateExport {
// // export := registry.GetState()
// return &UpdateStateExport{
// // UpdateState: &export.Updates,
// }
// }
// LoadStateExport loads the exported update state from the database.
func LoadStateExport() (*UpdateStateExport, error) {
r, err := db.Get(updateStatusDBKey)
if err != nil {
return nil, err
}
// // LoadStateExport loads the exported update state from the database.
// func LoadStateExport() (*UpdateStateExport, error) {
// r, err := db.Get(updateStatusDBKey)
// if err != nil {
// return nil, err
// }
// unwrap
if r.IsWrapped() {
// only allocate a new struct, if we need it
newRecord := &UpdateStateExport{}
err = record.Unwrap(r, newRecord)
if err != nil {
return nil, err
}
return newRecord, nil
}
// // unwrap
// if r.IsWrapped() {
// // only allocate a new struct, if we need it
// newRecord := &UpdateStateExport{}
// err = record.Unwrap(r, newRecord)
// if err != nil {
// return nil, err
// }
// return newRecord, nil
// }
// or adjust type
newRecord, ok := r.(*UpdateStateExport)
if !ok {
return nil, fmt.Errorf("record not of type *UpdateStateExport, but %T", r)
}
return newRecord, nil
}
// // or adjust type
// newRecord, ok := r.(*UpdateStateExport)
// if !ok {
// return nil, fmt.Errorf("record not of type *UpdateStateExport, but %T", r)
// }
// return newRecord, nil
// }
func initVersionExport() (err error) {
if err := GetVersions().save(); err != nil {
log.Warningf("updates: failed to export version information: %s", err)
}
if err := GetSimpleVersions().save(); err != nil {
log.Warningf("updates: failed to export version information: %s", err)
}
// func initVersionExport() (err error) {
// if err := GetVersions().save(); err != nil {
// log.Warningf("updates: failed to export version information: %s", err)
// }
// if err := GetSimpleVersions().save(); err != nil {
// log.Warningf("updates: failed to export version information: %s", err)
// }
module.EventVersionsUpdated.AddCallback("export version status", export)
return nil
}
// // module.EventVersionsUpdated.AddCallback("export version status", export)
// return nil
// }
func (v *Versions) save() error {
if !v.KeyIsSet() {
v.SetKey(versionsDBKey)
}
return db.Put(v)
}
// func (v *Versions) save() error {
// if !v.KeyIsSet() {
// v.SetKey(versionsDBKey)
// }
// return db.Put(v)
// }
func (v *SimpleVersions) save() error {
if !v.KeyIsSet() {
v.SetKey(simpleVersionsDBKey)
}
return db.Put(v)
}
// func (v *SimpleVersions) save() error {
// if !v.KeyIsSet() {
// v.SetKey(simpleVersionsDBKey)
// }
// return db.Put(v)
// }
func (s *UpdateStateExport) save() error {
if !s.KeyIsSet() {
s.SetKey(updateStatusDBKey)
}
return db.Put(s)
}
// func (s *UpdateStateExport) save() error {
// if !s.KeyIsSet() {
// s.SetKey(updateStatusDBKey)
// }
// return db.Put(s)
// }
// export is an event hook.
func export(_ *mgr.WorkerCtx, _ struct{}) (cancel bool, err error) {
// Export versions.
if err := GetVersions().save(); err != nil {
return false, err
}
if err := GetSimpleVersions().save(); err != nil {
return false, err
}
// Export udpate state.
if err := GetStateExport().save(); err != nil {
return false, err
}
// // export is an event hook.
// func export(_ *mgr.WorkerCtx, _ struct{}) (cancel bool, err error) {
// // Export versions.
// if err := GetVersions().save(); err != nil {
// return false, err
// }
// if err := GetSimpleVersions().save(); err != nil {
// return false, err
// }
// // Export udpate state.
// if err := GetStateExport().save(); err != nil {
// return false, err
// }
return false, nil
}
// return false, nil
// }
// AddToDebugInfo adds the update system status to the given debug.Info.
func AddToDebugInfo(di *debug.Info) {
// Get resources from registry.
resources := registry.Export()
platformPrefix := helper.PlatformIdentifier("")
// // AddToDebugInfo adds the update system status to the given debug.Info.
// func AddToDebugInfo(di *debug.Info) {
// // Get resources from registry.
// // resources := registry.Export()
// // platformPrefix := helper.PlatformIdentifier("")
// Collect data for debug info.
var active, selected []string
var activeCnt, totalCnt int
for id, r := range resources {
// Ignore resources for other platforms.
if !strings.HasPrefix(id, "all/") && !strings.HasPrefix(id, platformPrefix) {
continue
}
// // Collect data for debug info.
// var active, selected []string
// var activeCnt, totalCnt int
// // for id, r := range resources {
// // // Ignore resources for other platforms.
// // if !strings.HasPrefix(id, "all/") && !strings.HasPrefix(id, platformPrefix) {
// // continue
// // }
totalCnt++
if r.ActiveVersion != nil {
activeCnt++
active = append(active, fmt.Sprintf("%s: %s", id, r.ActiveVersion.VersionNumber))
}
if r.SelectedVersion != nil {
selected = append(selected, fmt.Sprintf("%s: %s", id, r.SelectedVersion.VersionNumber))
}
}
sort.Strings(active)
sort.Strings(selected)
// // totalCnt++
// // if r.ActiveVersion != nil {
// // activeCnt++
// // active = append(active, fmt.Sprintf("%s: %s", id, r.ActiveVersion.VersionNumber))
// // }
// // if r.SelectedVersion != nil {
// // selected = append(selected, fmt.Sprintf("%s: %s", id, r.SelectedVersion.VersionNumber))
// // }
// // }
// sort.Strings(active)
// sort.Strings(selected)
// Compile to one list.
lines := make([]string, 0, len(active)+len(selected)+3)
lines = append(lines, "Active:")
lines = append(lines, active...)
lines = append(lines, "")
lines = append(lines, "Selected:")
lines = append(lines, selected...)
// // Compile to one list.
// lines := make([]string, 0, len(active)+len(selected)+3)
// lines = append(lines, "Active:")
// lines = append(lines, active...)
// lines = append(lines, "")
// lines = append(lines, "Selected:")
// lines = append(lines, selected...)
// Add section.
di.AddSection(
fmt.Sprintf("Updates: %s (%d/%d)", initialReleaseChannel, activeCnt, totalCnt),
debug.UseCodeSection|debug.AddContentLineBreaks,
lines...,
)
}
// // Add section.
// di.AddSection(
// fmt.Sprintf("Updates: %s (%d/%d)", initialReleaseChannel, activeCnt, totalCnt),
// debug.UseCodeSection|debug.AddContentLineBreaks,
// lines...,
// )
// }

View file

@ -1,72 +1,65 @@
package updates
import (
"path"
"github.com/safing/portmaster/base/updater"
"github.com/safing/portmaster/service/updates/helper"
)
// GetPlatformFile returns the latest platform specific file identified by the given identifier.
func GetPlatformFile(identifier string) (*updater.File, error) {
identifier = helper.PlatformIdentifier(identifier)
// func GetPlatformFile(identifier string) (*updater.File, error) {
// identifier = helper.PlatformIdentifier(identifier)
file, err := registry.GetFile(identifier)
if err != nil {
return nil, err
}
// file, err := registry.GetFile(identifier)
// if err != nil {
// return nil, err
// }
module.EventVersionsUpdated.Submit(struct{}{})
return file, nil
}
// module.EventVersionsUpdated.Submit(struct{}{})
// return file, nil
// }
// GetFile returns the latest generic file identified by the given identifier.
func GetFile(identifier string) (*updater.File, error) {
identifier = path.Join("all", identifier)
// func GetFile(identifier string) (*updater.File, error) {
// identifier = path.Join("all", identifier)
file, err := registry.GetFile(identifier)
if err != nil {
return nil, err
}
// file, err := registry.GetFile(identifier)
// if err != nil {
// return nil, err
// }
module.EventVersionsUpdated.Submit(struct{}{})
return file, nil
}
// module.EventVersionsUpdated.Submit(struct{}{})
// return file, nil
// }
// GetPlatformVersion returns the selected platform specific version of the
// given identifier.
// The returned resource version may not be modified.
func GetPlatformVersion(identifier string) (*updater.ResourceVersion, error) {
identifier = helper.PlatformIdentifier(identifier)
// func GetPlatformVersion(identifier string) (*updater.ResourceVersion, error) {
// identifier = helper.PlatformIdentifier(identifier)
rv, err := registry.GetVersion(identifier)
if err != nil {
return nil, err
}
// rv, err := registry.GetVersion(identifier)
// if err != nil {
// return nil, err
// }
return rv, nil
}
// return rv, nil
// }
// GetVersion returns the selected generic version of the given identifier.
// The returned resource version may not be modified.
func GetVersion(identifier string) (*updater.ResourceVersion, error) {
identifier = path.Join("all", identifier)
// func GetVersion(identifier string) (*updater.ResourceVersion, error) {
// identifier = path.Join("all", identifier)
rv, err := registry.GetVersion(identifier)
if err != nil {
return nil, err
}
// rv, err := registry.GetVersion(identifier)
// if err != nil {
// return nil, err
// }
return rv, nil
}
// return rv, nil
// }
// GetVersionWithFullID returns the selected generic version of the given full identifier.
// The returned resource version may not be modified.
func GetVersionWithFullID(identifier string) (*updater.ResourceVersion, error) {
rv, err := registry.GetVersion(identifier)
if err != nil {
return nil, err
}
// func GetVersionWithFullID(identifier string) (*updater.ResourceVersion, error) {
// rv, err := registry.GetVersion(identifier)
// if err != nil {
// return nil, err
// }
return rv, nil
}
// return rv, nil
// }

View file

@ -1,57 +1,58 @@
package helper
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
// import (
// "errors"
// "fmt"
// "os"
// "path/filepath"
// "runtime"
// "strings"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/updater"
)
// "github.com/safing/portmaster/base/log"
// "github.com/safing/portmaster/base/updater"
// "github.com/safing/portmaster/service/updates/registry"
// )
var pmElectronUpdate *updater.File
// var pmElectronUpdate *registry.File
const suidBitWarning = `Failed to set SUID permissions for chrome-sandbox. This is required for Linux kernel versions that do not have unprivileged user namespaces (CONFIG_USER_NS_UNPRIVILEGED) enabled. If you're running and up-to-date distribution kernel you can likely ignore this warning. If you encounter issue starting the user interface please either update your kernel or set the SUID bit (mode 0%0o) on %s`
// const suidBitWarning = `Failed to set SUID permissions for chrome-sandbox. This is required for Linux kernel versions that do not have unprivileged user namespaces (CONFIG_USER_NS_UNPRIVILEGED) enabled. If you're running and up-to-date distribution kernel you can likely ignore this warning. If you encounter issue starting the user interface please either update your kernel or set the SUID bit (mode 0%0o) on %s`
// EnsureChromeSandboxPermissions makes sure the chrome-sandbox distributed
// by our app-electron package has the SUID bit set on systems that do not
// allow unprivileged CLONE_NEWUSER (clone(3)).
// On non-linux systems or systems that have kernel.unprivileged_userns_clone
// set to 1 EnsureChromeSandboPermissions is a NO-OP.
func EnsureChromeSandboxPermissions(reg *updater.ResourceRegistry) error {
if runtime.GOOS != "linux" {
return nil
}
// // EnsureChromeSandboxPermissions makes sure the chrome-sandbox distributed
// // by our app-electron package has the SUID bit set on systems that do not
// // allow unprivileged CLONE_NEWUSER (clone(3)).
// // On non-linux systems or systems that have kernel.unprivileged_userns_clone
// // set to 1 EnsureChromeSandboPermissions is a NO-OP.
// func EnsureChromeSandboxPermissions(reg *updater.ResourceRegistry) error {
// if runtime.GOOS != "linux" {
// return nil
// }
if pmElectronUpdate != nil && !pmElectronUpdate.UpgradeAvailable() {
return nil
}
// if pmElectronUpdate != nil && !pmElectronUpdate.UpgradeAvailable() {
// return nil
// }
identifier := PlatformIdentifier("app/portmaster-app.zip")
// identifier := PlatformIdentifier("app/portmaster-app.zip")
var err error
pmElectronUpdate, err = reg.GetFile(identifier)
if err != nil {
if errors.Is(err, updater.ErrNotAvailableLocally) {
return nil
}
return fmt.Errorf("failed to get file: %w", err)
}
// var err error
// pmElectronUpdate, err = reg.GetFile(identifier)
// if err != nil {
// if errors.Is(err, updater.ErrNotAvailableLocally) {
// return nil
// }
// return fmt.Errorf("failed to get file: %w", err)
// }
unpackedPath := strings.TrimSuffix(
pmElectronUpdate.Path(),
filepath.Ext(pmElectronUpdate.Path()),
)
sandboxFile := filepath.Join(unpackedPath, "chrome-sandbox")
if err := os.Chmod(sandboxFile, 0o0755|os.ModeSetuid); err != nil {
log.Errorf(suidBitWarning, 0o0755|os.ModeSetuid, sandboxFile)
// unpackedPath := strings.TrimSuffix(
// pmElectronUpdate.Path(),
// filepath.Ext(pmElectronUpdate.Path()),
// )
// sandboxFile := filepath.Join(unpackedPath, "chrome-sandbox")
// if err := os.Chmod(sandboxFile, 0o0755|os.ModeSetuid); err != nil {
// log.Errorf(suidBitWarning, 0o0755|os.ModeSetuid, sandboxFile)
return fmt.Errorf("failed to chmod: %w", err)
}
log.Debugf("updates: fixed SUID permission for chrome-sandbox")
// return fmt.Errorf("failed to chmod: %w", err)
// }
// log.Debugf("updates: fixed SUID permission for chrome-sandbox")
return nil
}
// return nil
// }

View file

@ -1,136 +1,136 @@
package helper
import (
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
// import (
// "errors"
// "fmt"
// "io/fs"
// "os"
// "path/filepath"
"github.com/safing/jess/filesig"
"github.com/safing/portmaster/base/updater"
)
// "github.com/safing/jess/filesig"
// "github.com/safing/portmaster/base/updater"
// )
// Release Channel Configuration Keys.
const (
ReleaseChannelKey = "core/releaseChannel"
ReleaseChannelJSONKey = "core.releaseChannel"
)
// // Release Channel Configuration Keys.
// const (
// ReleaseChannelKey = "core/releaseChannel"
// ReleaseChannelJSONKey = "core.releaseChannel"
// )
// Release Channels.
const (
ReleaseChannelStable = "stable"
ReleaseChannelBeta = "beta"
ReleaseChannelStaging = "staging"
ReleaseChannelSupport = "support"
)
// // Release Channels.
// const (
// ReleaseChannelStable = "stable"
// ReleaseChannelBeta = "beta"
// ReleaseChannelStaging = "staging"
// ReleaseChannelSupport = "support"
// )
const jsonSuffix = ".json"
// const jsonSuffix = ".json"
// SetIndexes sets the update registry indexes and also configures the registry
// to use pre-releases based on the channel.
func SetIndexes(
registry *updater.ResourceRegistry,
releaseChannel string,
deleteUnusedIndexes bool,
autoDownload bool,
autoDownloadIntel bool,
) (warning error) {
usePreReleases := false
// // SetIndexes sets the update registry indexes and also configures the registry
// // to use pre-releases based on the channel.
// func SetIndexes(
// registry *updater.ResourceRegistry,
// releaseChannel string,
// deleteUnusedIndexes bool,
// autoDownload bool,
// autoDownloadIntel bool,
// ) (warning error) {
// usePreReleases := false
// Be reminded that the order is important, as indexes added later will
// override the current release from earlier indexes.
// // Be reminded that the order is important, as indexes added later will
// // override the current release from earlier indexes.
// Reset indexes before adding them (again).
registry.ResetIndexes()
// // Reset indexes before adding them (again).
// registry.ResetIndexes()
// Add the intel index first, in order to be able to override it with the
// other indexes when needed.
registry.AddIndex(updater.Index{
Path: "all/intel/intel.json",
AutoDownload: autoDownloadIntel,
})
// // Add the intel index first, in order to be able to override it with the
// // other indexes when needed.
// registry.AddIndex(updater.Index{
// Path: "all/intel/intel.json",
// AutoDownload: autoDownloadIntel,
// })
// Always add the stable index as a base.
registry.AddIndex(updater.Index{
Path: ReleaseChannelStable + jsonSuffix,
AutoDownload: autoDownload,
})
// // Always add the stable index as a base.
// registry.AddIndex(updater.Index{
// Path: ReleaseChannelStable + jsonSuffix,
// AutoDownload: autoDownload,
// })
// Add beta index if in beta or staging channel.
indexPath := ReleaseChannelBeta + jsonSuffix
if releaseChannel == ReleaseChannelBeta ||
releaseChannel == ReleaseChannelStaging ||
(releaseChannel == "" && indexExists(registry, indexPath)) {
registry.AddIndex(updater.Index{
Path: indexPath,
PreRelease: true,
AutoDownload: autoDownload,
})
usePreReleases = true
} else if deleteUnusedIndexes {
err := deleteIndex(registry, indexPath)
if err != nil {
warning = fmt.Errorf("failed to delete unused index %s: %w", indexPath, err)
}
}
// // Add beta index if in beta or staging channel.
// indexPath := ReleaseChannelBeta + jsonSuffix
// if releaseChannel == ReleaseChannelBeta ||
// releaseChannel == ReleaseChannelStaging ||
// (releaseChannel == "" && indexExists(registry, indexPath)) {
// registry.AddIndex(updater.Index{
// Path: indexPath,
// PreRelease: true,
// AutoDownload: autoDownload,
// })
// usePreReleases = true
// } else if deleteUnusedIndexes {
// err := deleteIndex(registry, indexPath)
// if err != nil {
// warning = fmt.Errorf("failed to delete unused index %s: %w", indexPath, err)
// }
// }
// Add staging index if in staging channel.
indexPath = ReleaseChannelStaging + jsonSuffix
if releaseChannel == ReleaseChannelStaging ||
(releaseChannel == "" && indexExists(registry, indexPath)) {
registry.AddIndex(updater.Index{
Path: indexPath,
PreRelease: true,
AutoDownload: autoDownload,
})
usePreReleases = true
} else if deleteUnusedIndexes {
err := deleteIndex(registry, indexPath)
if err != nil {
warning = fmt.Errorf("failed to delete unused index %s: %w", indexPath, err)
}
}
// // Add staging index if in staging channel.
// indexPath = ReleaseChannelStaging + jsonSuffix
// if releaseChannel == ReleaseChannelStaging ||
// (releaseChannel == "" && indexExists(registry, indexPath)) {
// registry.AddIndex(updater.Index{
// Path: indexPath,
// PreRelease: true,
// AutoDownload: autoDownload,
// })
// usePreReleases = true
// } else if deleteUnusedIndexes {
// err := deleteIndex(registry, indexPath)
// if err != nil {
// warning = fmt.Errorf("failed to delete unused index %s: %w", indexPath, err)
// }
// }
// Add support index if in support channel.
indexPath = ReleaseChannelSupport + jsonSuffix
if releaseChannel == ReleaseChannelSupport ||
(releaseChannel == "" && indexExists(registry, indexPath)) {
registry.AddIndex(updater.Index{
Path: indexPath,
AutoDownload: autoDownload,
})
usePreReleases = true
} else if deleteUnusedIndexes {
err := deleteIndex(registry, indexPath)
if err != nil {
warning = fmt.Errorf("failed to delete unused index %s: %w", indexPath, err)
}
}
// // Add support index if in support channel.
// indexPath = ReleaseChannelSupport + jsonSuffix
// if releaseChannel == ReleaseChannelSupport ||
// (releaseChannel == "" && indexExists(registry, indexPath)) {
// registry.AddIndex(updater.Index{
// Path: indexPath,
// AutoDownload: autoDownload,
// })
// usePreReleases = true
// } else if deleteUnusedIndexes {
// err := deleteIndex(registry, indexPath)
// if err != nil {
// warning = fmt.Errorf("failed to delete unused index %s: %w", indexPath, err)
// }
// }
// Set pre-release usage.
registry.SetUsePreReleases(usePreReleases)
// // Set pre-release usage.
// registry.SetUsePreReleases(usePreReleases)
return warning
}
// return warning
// }
func indexExists(registry *updater.ResourceRegistry, indexPath string) bool {
_, err := os.Stat(filepath.Join(registry.StorageDir().Path, indexPath))
return err == nil
}
// func indexExists(registry *updater.ResourceRegistry, indexPath string) bool {
// _, err := os.Stat(filepath.Join(registry.StorageDir().Path, indexPath))
// return err == nil
// }
func deleteIndex(registry *updater.ResourceRegistry, indexPath string) error {
// Remove index itself.
err := os.Remove(filepath.Join(registry.StorageDir().Path, indexPath))
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return err
}
// func deleteIndex(registry *updater.ResourceRegistry, indexPath string) error {
// // Remove index itself.
// err := os.Remove(filepath.Join(registry.StorageDir().Path, indexPath))
// if err != nil && !errors.Is(err, fs.ErrNotExist) {
// return err
// }
// Remove any accompanying signature.
err = os.Remove(filepath.Join(registry.StorageDir().Path, indexPath+filesig.Extension))
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return err
}
// // Remove any accompanying signature.
// err = os.Remove(filepath.Join(registry.StorageDir().Path, indexPath+filesig.Extension))
// if err != nil && !errors.Is(err, fs.ErrNotExist) {
// return err
// }
return nil
}
// return nil
// }

View file

@ -1,42 +1,42 @@
package helper
import (
"github.com/safing/jess"
"github.com/safing/portmaster/base/updater"
)
// import (
// "github.com/safing/jess"
// "github.com/safing/portmaster/base/updater"
// )
var (
// VerificationConfig holds the complete verification configuration for the registry.
VerificationConfig = map[string]*updater.VerificationOptions{
"": { // Default.
TrustStore: BinarySigningTrustStore,
DownloadPolicy: updater.SignaturePolicyRequire,
DiskLoadPolicy: updater.SignaturePolicyWarn,
},
"all/intel/": nil, // Disable until IntelHub supports signing.
}
// var (
// // VerificationConfig holds the complete verification configuration for the registry.
// VerificationConfig = map[string]*updater.VerificationOptions{
// "": { // Default.
// TrustStore: BinarySigningTrustStore,
// DownloadPolicy: updater.SignaturePolicyRequire,
// DiskLoadPolicy: updater.SignaturePolicyWarn,
// },
// "all/intel/": nil, // Disable until IntelHub supports signing.
// }
// BinarySigningKeys holds the signing keys in text format.
BinarySigningKeys = []string{
// Safing Code Signing Key #1
"recipient:public-ed25519-key:safing-code-signing-key-1:92bgBLneQUWrhYLPpBDjqHbpFPuNVCPAaivQ951A4aq72HcTiw7R1QmPJwFM1mdePAvEVDjkeb8S4fp2pmRCsRa8HrCvWQEjd88rfZ6TznJMfY4g7P8ioGFjfpyx2ZJ8WCZJG5Qt4Z9nkabhxo2Nbi3iywBTYDLSbP5CXqi7jryW7BufWWuaRVufFFzhwUC2ryWFWMdkUmsAZcvXwde4KLN9FrkWAy61fGaJ8GCwGnGCSitANnU2cQrsGBXZzxmzxwrYD",
// Safing Code Signing Key #2
"recipient:public-ed25519-key:safing-code-signing-key-2:92bgBLneQUWrhYLPpBDjqHbPC2d1o5JMyZFdavWBNVtdvbPfzDewLW95ScXfYPHd3QvWHSWCtB4xpthaYWxSkK1kYiGp68DPa2HaU8yQ5dZhaAUuV4Kzv42pJcWkCeVnBYqgGBXobuz52rFqhDJy3rz7soXEmYhJEJWwLwMeioK3VzN3QmGSYXXjosHMMNC76rjufSoLNtUQUWZDSnHmqbuxbKMCCsjFXUGGhtZVyb7bnu7QLTLk6SKHBJDMB6zdL9sw3",
}
// // BinarySigningKeys holds the signing keys in text format.
// BinarySigningKeys = []string{
// // Safing Code Signing Key #1
// "recipient:public-ed25519-key:safing-code-signing-key-1:92bgBLneQUWrhYLPpBDjqHbpFPuNVCPAaivQ951A4aq72HcTiw7R1QmPJwFM1mdePAvEVDjkeb8S4fp2pmRCsRa8HrCvWQEjd88rfZ6TznJMfY4g7P8ioGFjfpyx2ZJ8WCZJG5Qt4Z9nkabhxo2Nbi3iywBTYDLSbP5CXqi7jryW7BufWWuaRVufFFzhwUC2ryWFWMdkUmsAZcvXwde4KLN9FrkWAy61fGaJ8GCwGnGCSitANnU2cQrsGBXZzxmzxwrYD",
// // Safing Code Signing Key #2
// "recipient:public-ed25519-key:safing-code-signing-key-2:92bgBLneQUWrhYLPpBDjqHbPC2d1o5JMyZFdavWBNVtdvbPfzDewLW95ScXfYPHd3QvWHSWCtB4xpthaYWxSkK1kYiGp68DPa2HaU8yQ5dZhaAUuV4Kzv42pJcWkCeVnBYqgGBXobuz52rFqhDJy3rz7soXEmYhJEJWwLwMeioK3VzN3QmGSYXXjosHMMNC76rjufSoLNtUQUWZDSnHmqbuxbKMCCsjFXUGGhtZVyb7bnu7QLTLk6SKHBJDMB6zdL9sw3",
// }
// BinarySigningTrustStore is an in-memory trust store with the signing keys.
BinarySigningTrustStore = jess.NewMemTrustStore()
)
// // BinarySigningTrustStore is an in-memory trust store with the signing keys.
// BinarySigningTrustStore = jess.NewMemTrustStore()
// )
func init() {
for _, signingKey := range BinarySigningKeys {
rcpt, err := jess.RecipientFromTextFormat(signingKey)
if err != nil {
panic(err)
}
err = BinarySigningTrustStore.StoreSignet(rcpt)
if err != nil {
panic(err)
}
}
}
// func init() {
// for _, signingKey := range BinarySigningKeys {
// rcpt, err := jess.RecipientFromTextFormat(signingKey)
// if err != nil {
// panic(err)
// }
// err = BinarySigningTrustStore.StoreSignet(rcpt)
// if err != nil {
// panic(err)
// }
// }
// }

View file

@ -1,95 +1,95 @@
package helper
import (
"fmt"
"runtime"
// import (
// "fmt"
// "runtime"
"github.com/tevino/abool"
)
// "github.com/tevino/abool"
// )
const onWindows = runtime.GOOS == "windows"
// const onWindows = runtime.GOOS == "windows"
var intelOnly = abool.New()
// var intelOnly = abool.New()
// IntelOnly specifies that only intel data is mandatory.
func IntelOnly() {
intelOnly.Set()
}
// // IntelOnly specifies that only intel data is mandatory.
// func IntelOnly() {
// intelOnly.Set()
// }
// PlatformIdentifier converts identifier for the current platform.
func PlatformIdentifier(identifier string) string {
// From https://golang.org/pkg/runtime/#GOARCH
// GOOS is the running program's operating system target: one of darwin, freebsd, linux, and so on.
// GOARCH is the running program's architecture target: one of 386, amd64, arm, s390x, and so on.
return fmt.Sprintf("%s_%s/%s", runtime.GOOS, runtime.GOARCH, identifier)
}
// // PlatformIdentifier converts identifier for the current platform.
// func PlatformIdentifier(identifier string) string {
// // From https://golang.org/pkg/runtime/#GOARCH
// // GOOS is the running program's operating system target: one of darwin, freebsd, linux, and so on.
// // GOARCH is the running program's architecture target: one of 386, amd64, arm, s390x, and so on.
// return fmt.Sprintf("%s_%s/%s", runtime.GOOS, runtime.GOARCH, identifier)
// }
// MandatoryUpdates returns mandatory updates that should be loaded on install
// or reset.
func MandatoryUpdates() (identifiers []string) {
// Intel
identifiers = append(
identifiers,
// // MandatoryUpdates returns mandatory updates that should be loaded on install
// // or reset.
// func MandatoryUpdates() (identifiers []string) {
// // Intel
// identifiers = append(
// identifiers,
// Filter lists data
"all/intel/lists/index.dsd",
"all/intel/lists/base.dsdl",
"all/intel/lists/intermediate.dsdl",
"all/intel/lists/urgent.dsdl",
// // Filter lists data
// "all/intel/lists/index.dsd",
// "all/intel/lists/base.dsdl",
// "all/intel/lists/intermediate.dsdl",
// "all/intel/lists/urgent.dsdl",
// Geo IP data
"all/intel/geoip/geoipv4.mmdb.gz",
"all/intel/geoip/geoipv6.mmdb.gz",
)
// // Geo IP data
// "all/intel/geoip/geoipv4.mmdb.gz",
// "all/intel/geoip/geoipv6.mmdb.gz",
// )
// Stop here if we only want intel data.
if intelOnly.IsSet() {
return identifiers
}
// // Stop here if we only want intel data.
// if intelOnly.IsSet() {
// return identifiers
// }
// Binaries
if onWindows {
identifiers = append(
identifiers,
PlatformIdentifier("core/portmaster-core.exe"),
PlatformIdentifier("kext/portmaster-kext.sys"),
PlatformIdentifier("kext/portmaster-kext.pdb"),
PlatformIdentifier("start/portmaster-start.exe"),
PlatformIdentifier("notifier/portmaster-notifier.exe"),
PlatformIdentifier("notifier/portmaster-wintoast.dll"),
PlatformIdentifier("app2/portmaster-app.zip"),
)
} else {
identifiers = append(
identifiers,
PlatformIdentifier("core/portmaster-core"),
PlatformIdentifier("start/portmaster-start"),
PlatformIdentifier("notifier/portmaster-notifier"),
PlatformIdentifier("app2/portmaster-app"),
)
}
// // Binaries
// if onWindows {
// identifiers = append(
// identifiers,
// PlatformIdentifier("core/portmaster-core.exe"),
// PlatformIdentifier("kext/portmaster-kext.sys"),
// PlatformIdentifier("kext/portmaster-kext.pdb"),
// PlatformIdentifier("start/portmaster-start.exe"),
// PlatformIdentifier("notifier/portmaster-notifier.exe"),
// PlatformIdentifier("notifier/portmaster-wintoast.dll"),
// PlatformIdentifier("app2/portmaster-app.zip"),
// )
// } else {
// identifiers = append(
// identifiers,
// PlatformIdentifier("core/portmaster-core"),
// PlatformIdentifier("start/portmaster-start"),
// PlatformIdentifier("notifier/portmaster-notifier"),
// PlatformIdentifier("app2/portmaster-app"),
// )
// }
// Components, Assets and Data
identifiers = append(
identifiers,
// // Components, Assets and Data
// identifiers = append(
// identifiers,
// User interface components
PlatformIdentifier("app/portmaster-app.zip"),
"all/ui/modules/portmaster.zip",
"all/ui/modules/assets.zip",
)
// // User interface components
// PlatformIdentifier("app/portmaster-app.zip"),
// "all/ui/modules/portmaster.zip",
// "all/ui/modules/assets.zip",
// )
return identifiers
}
// return identifiers
// }
// AutoUnpackUpdates returns assets that need unpacking.
func AutoUnpackUpdates() []string {
if intelOnly.IsSet() {
return []string{}
}
// // AutoUnpackUpdates returns assets that need unpacking.
// func AutoUnpackUpdates() []string {
// if intelOnly.IsSet() {
// return []string{}
// }
return []string{
PlatformIdentifier("app/portmaster-app.zip"),
PlatformIdentifier("app2/portmaster-app.zip"),
}
}
// return []string{
// PlatformIdentifier("app/portmaster-app.zip"),
// PlatformIdentifier("app2/portmaster-app.zip"),
// }
// }

View file

@ -1,110 +0,0 @@
package updates
import (
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"github.com/safing/portmaster/base/log"
)
type UpdateIndex struct {
Directory string
DownloadDirectory string
Ignore []string
IndexURLs []string
IndexFile string
AutoApply bool
}
func (ui *UpdateIndex) downloadIndexFile() (err error) {
_ = os.MkdirAll(ui.Directory, defaultDirMode)
_ = os.MkdirAll(ui.DownloadDirectory, defaultDirMode)
for _, url := range ui.IndexURLs {
err = ui.downloadIndexFileFromURL(url)
if err != nil {
log.Warningf("updates: %s", err)
continue
}
// Downloading was successful.
err = nil
break
}
return
}
func (ui *UpdateIndex) checkForUpdates() (bool, error) {
err := ui.downloadIndexFile()
if err != nil {
return false, err
}
currentBundle, err := ui.GetInstallBundle()
if err != nil {
return true, err // Current installed bundle not found, act as there is update.
}
updateBundle, err := ui.GetUpdateBundle()
if err != nil {
return false, err
}
return currentBundle.Version != updateBundle.Version, nil
}
func (ui *UpdateIndex) downloadIndexFileFromURL(url string) error {
client := http.Client{}
resp, err := client.Get(url)
if err != nil {
return fmt.Errorf("failed a get request to %s: %w", url, err)
}
defer func() { _ = resp.Body.Close() }()
filePath := fmt.Sprintf("%s/%s", ui.DownloadDirectory, ui.IndexFile)
file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, defaultFileMode)
if err != nil {
return err
}
defer func() { _ = file.Close() }()
_, err = io.Copy(file, resp.Body)
if err != nil {
return err
}
return nil
}
func (ui *UpdateIndex) GetInstallBundle() (*Bundle, error) {
indexFile := fmt.Sprintf("%s/%s", ui.Directory, ui.IndexFile)
return ui.GetBundle(indexFile)
}
func (ui *UpdateIndex) GetUpdateBundle() (*Bundle, error) {
indexFile := fmt.Sprintf("%s/%s", ui.DownloadDirectory, ui.IndexFile)
return ui.GetBundle(indexFile)
}
func (ui *UpdateIndex) GetBundle(indexFile string) (*Bundle, error) {
// Check if the file exists.
file, err := os.Open(indexFile)
if err != nil {
return nil, fmt.Errorf("failed to open index file: %w", err)
}
defer func() { _ = file.Close() }()
// Read
content, err := io.ReadAll(file)
if err != nil {
return nil, err
}
// Parse
var bundle Bundle
err = json.Unmarshal(content, &bundle)
if err != nil {
return nil, err
}
return &bundle, nil
}

View file

@ -6,9 +6,6 @@ import (
"time"
"github.com/safing/portmaster/base/database"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/updater"
"github.com/safing/portmaster/service/mgr"
)
const (
@ -17,10 +14,6 @@ const (
enableSoftwareUpdatesKey = "core/automaticUpdates"
enableIntelUpdatesKey = "core/automaticIntelUpdates"
// ModuleName is the name of the update module
// and can be used when declaring module dependencies.
ModuleName = "updates"
// VersionUpdateEvent is emitted every time a new
// version of a monitored resource is selected.
// During module initialization VersionUpdateEvent
@ -37,8 +30,6 @@ const (
)
var (
registry *updater.ResourceRegistry
userAgentFromFlag string
updateServerFromFlag string
@ -57,205 +48,13 @@ const (
updateTaskRepeatDuration = 1 * time.Hour
)
func start() error {
// module.restartWorkerMgr.Repeat(10 * time.Minute)
// module.instance.Config().EventConfigChange.AddCallback("update registry config", updateRegistryConfig)
// // create registry
// registry = &updater.ResourceRegistry{
// Name: ModuleName,
// UpdateURLs: DefaultUpdateURLs,
// UserAgent: UserAgent,
// MandatoryUpdates: helper.MandatoryUpdates(),
// AutoUnpack: helper.AutoUnpackUpdates(),
// Verification: helper.VerificationConfig,
// DevMode: devMode(),
// Online: true,
// }
// // Override values from flags.
// if userAgentFromFlag != "" {
// registry.UserAgent = userAgentFromFlag
// }
// if updateServerFromFlag != "" {
// registry.UpdateURLs = []string{updateServerFromFlag}
// }
// // pre-init state
// updateStateExport, err := LoadStateExport()
// if err != nil {
// log.Debugf("updates: failed to load exported update state: %s", err)
// } else if updateStateExport.UpdateState != nil {
// err := registry.PreInitUpdateState(*updateStateExport.UpdateState)
// if err != nil {
// return err
// }
// }
// initialize
// err := registry.Initialize(dataroot.Root().ChildDir(updatesDirName, 0o0755))
// if err != nil {
// return err
// }
// // register state provider
// err = registerRegistryStateProvider()
// if err != nil {
// return err
// }
// registry.StateNotifyFunc = pushRegistryState
// // Set indexes based on the release channel.
// warning := helper.SetIndexes(
// registry,
// initialReleaseChannel,
// true,
// enableSoftwareUpdates() && !DisableSoftwareAutoUpdate,
// enableIntelUpdates(),
// )
// if warning != nil {
// log.Warningf("updates: %s", warning)
// }
// err = registry.LoadIndexes(module.m.Ctx())
// if err != nil {
// log.Warningf("updates: failed to load indexes: %s", err)
// }
// err = registry.ScanStorage("")
// if err != nil {
// log.Warningf("updates: error during storage scan: %s", err)
// }
// registry.SelectVersions()
// module.EventVersionsUpdated.Submit(struct{}{})
// // Initialize the version export - this requires the registry to be set up.
// err = initVersionExport()
// if err != nil {
// return err
// }
// // start updater task
// if !disableTaskSchedule {
// _ = module.updateWorkerMgr.Repeat(30 * time.Minute)
// }
// if updateASAP {
// module.updateWorkerMgr.Go()
// }
// // react to upgrades
// if err := initUpgrader(); err != nil {
// return err
// }
// warnOnIncorrectParentPath()
return nil
}
// TriggerUpdate queues the update task to execute ASAP.
func TriggerUpdate(forceIndexCheck, downloadAll bool) error {
// switch {
// case !forceIndexCheck && !enableSoftwareUpdates() && !enableIntelUpdates():
// return errors.New("automatic updating is disabled")
// default:
// if forceIndexCheck {
// forceCheck.Set()
// }
// if downloadAll {
// forceDownload.Set()
// }
// // If index check if forced, start quicker.
// module.updateWorkerMgr.Go()
// }
log.Debugf("updates: triggering update to run as soon as possible")
return nil
}
// DisableUpdateSchedule disables the update schedule.
// If called, updates are only checked when TriggerUpdate()
// is called.
func DisableUpdateSchedule() error {
// TODO: Updater state should be always on
// switch module.Status() {
// case modules.StatusStarting, modules.StatusOnline, modules.StatusStopping:
// return errors.New("module already online")
// }
return nil
}
func checkForUpdates(ctx *mgr.WorkerCtx) (err error) {
// Set correct error if context was canceled.
// defer func() {
// select {
// case <-ctx.Done():
// err = context.Canceled
// default:
// }
// }()
// // Get flags.
// forceIndexCheck := forceCheck.SetToIf(true, false)
// downloadAll := forceDownload.SetToIf(true, false)
// // Check again if downloading updates is enabled, or forced.
// if !forceIndexCheck && !enableSoftwareUpdates() && !enableIntelUpdates() {
// log.Warningf("updates: automatic updates are disabled")
// return nil
// }
// defer func() {
// // Resolve any error and send success notification.
// if err == nil {
// log.Infof("updates: successfully checked for updates")
// notifyUpdateSuccess(forceIndexCheck)
// return
// }
// // Log and notify error.
// log.Errorf("updates: check failed: %s", err)
// notifyUpdateCheckFailed(forceIndexCheck, err)
// }()
// if err = registry.UpdateIndexes(ctx.Ctx()); err != nil {
// err = fmt.Errorf("failed to update indexes: %w", err)
// return //nolint:nakedret // TODO: Would "return err" work with the defer?
// }
// err = registry.DownloadUpdates(ctx.Ctx(), downloadAll)
// if err != nil {
// err = fmt.Errorf("failed to download updates: %w", err)
// return //nolint:nakedret // TODO: Would "return err" work with the defer?
// }
// registry.SelectVersions()
// // Unpack selected resources.
// err = registry.UnpackResources()
// if err != nil {
// err = fmt.Errorf("failed to unpack updates: %w", err)
// return //nolint:nakedret // TODO: Would "return err" work with the defer?
// }
// // Purge old resources
// registry.Purge(2)
// module.EventResourcesUpdated.Submit(struct{}{})
return nil
}
func stop() error {
if registry != nil {
err := registry.Cleanup()
if err != nil {
log.Warningf("updates: failed to clean up registry: %s", err)
}
}
// if registry != nil {
// err := registry.Cleanup()
// if err != nil {
// log.Warningf("updates: failed to clean up registry: %s", err)
// }
// }
return nil
}

View file

@ -2,10 +2,8 @@ package updates
import (
"errors"
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"sync/atomic"
"github.com/safing/portmaster/base/api"
@ -13,34 +11,33 @@ import (
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/notifications"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates/registry"
)
const (
defaultFileMode = os.FileMode(0o0644)
defaultDirMode = os.FileMode(0o0755)
)
var applyUpdates bool
func init() {
flag.BoolVar(&applyUpdates, "update", false, "apply downloaded updates")
}
// Updates provides access to released artifacts.
type Updates struct {
m *mgr.Manager
states *mgr.StateMgr
updateWorkerMgr *mgr.WorkerMgr
restartWorkerMgr *mgr.WorkerMgr
updateBinaryWorkerMgr *mgr.WorkerMgr
updateIntelWorkerMgr *mgr.WorkerMgr
restartWorkerMgr *mgr.WorkerMgr
EventResourcesUpdated *mgr.EventMgr[struct{}]
EventVersionsUpdated *mgr.EventMgr[struct{}]
binUpdates UpdateIndex
intelUpdates UpdateIndex
registry registry.Registry
instance instance
}
var (
module *Updates
shimLoaded atomic.Bool
)
var shimLoaded atomic.Bool
// New returns a new UI module.
func New(instance instance) (*Updates, error) {
@ -49,20 +46,22 @@ func New(instance instance) (*Updates, error) {
}
m := mgr.New("Updates")
module = &Updates{
module := &Updates{
m: m,
states: m.NewStateMgr(),
EventResourcesUpdated: mgr.NewEventMgr[struct{}](ResourceUpdateEvent, m),
EventVersionsUpdated: mgr.NewEventMgr[struct{}](VersionUpdateEvent, m),
instance: instance,
instance: instance,
}
// Events
module.updateWorkerMgr = m.NewWorkerMgr("updater", module.checkForUpdates, nil)
module.updateBinaryWorkerMgr = m.NewWorkerMgr("binary updater", module.checkForBinaryUpdates, nil)
module.updateIntelWorkerMgr = m.NewWorkerMgr("intel updater", module.checkForIntelUpdates, nil)
module.restartWorkerMgr = m.NewWorkerMgr("automatic restart", automaticRestart, nil)
module.binUpdates = UpdateIndex{
binIndex := registry.UpdateIndex{
Directory: "/usr/lib/portmaster",
DownloadDirectory: "/var/portmaster/new_bin",
Ignore: []string{"databases", "intel", "config.json"},
@ -71,62 +70,48 @@ func New(instance instance) (*Updates, error) {
AutoApply: false,
}
module.intelUpdates = UpdateIndex{
intelIndex := registry.UpdateIndex{
Directory: "/var/portmaster/intel",
DownloadDirectory: "/var/portmaster/new_intel",
IndexURLs: []string{"http://localhost:8000/test-intel.json"},
IndexFile: "intel-index.json",
AutoApply: true,
}
module.registry = registry.New(binIndex, intelIndex)
return module, nil
}
func deleteUnfinishedDownloads(rootDir string) error {
return filepath.Walk(rootDir, func(path string, info os.FileInfo, err error) error {
func (u *Updates) checkForBinaryUpdates(_ *mgr.WorkerCtx) error {
hasUpdates, err := u.registry.CheckForBinaryUpdates()
if err != nil {
log.Errorf("updates: failed to check for binary updates: %s", err)
}
if hasUpdates {
log.Infof("updates: there is updates available in the binary bundle")
err = u.registry.DownloadBinaryUpdates()
if err != nil {
return err
log.Errorf("updates: failed to download bundle: %s", err)
}
// Check if the current file has the specified extension
if !info.IsDir() && strings.HasSuffix(info.Name(), ".download") {
log.Warningf("updates deleting unfinished: %s\n", path)
err := os.Remove(path)
if err != nil {
return fmt.Errorf("failed to delete file %s: %w", path, err)
}
}
return nil
})
} else {
log.Infof("updates: no new binary updates")
}
return nil
}
func (u *Updates) checkForUpdates(_ *mgr.WorkerCtx) error {
_ = deleteUnfinishedDownloads(u.binUpdates.DownloadDirectory)
hasUpdate, err := u.binUpdates.checkForUpdates()
func (u *Updates) checkForIntelUpdates(_ *mgr.WorkerCtx) error {
hasUpdates, err := u.registry.CheckForIntelUpdates()
if err != nil {
log.Warningf("failed to get binary index file: %s", err)
log.Errorf("updates: failed to check for intel updates: %s", err)
}
if hasUpdate {
binBundle, err := u.binUpdates.GetUpdateBundle()
if err == nil {
log.Debugf("Bin Bundle: %+v", binBundle)
_ = os.MkdirAll(u.binUpdates.DownloadDirectory, defaultDirMode)
binBundle.downloadAndVerify(u.binUpdates.DownloadDirectory)
}
}
_ = deleteUnfinishedDownloads(u.intelUpdates.DownloadDirectory)
hasUpdate, err = u.intelUpdates.checkForUpdates()
if err != nil {
log.Warningf("failed to get intel index file: %s", err)
}
if hasUpdate {
intelBundle, err := u.intelUpdates.GetUpdateBundle()
if err == nil {
log.Debugf("Intel Bundle: %+v", intelBundle)
_ = os.MkdirAll(u.intelUpdates.DownloadDirectory, defaultDirMode)
intelBundle.downloadAndVerify(u.intelUpdates.DownloadDirectory)
if hasUpdates {
log.Infof("updates: there is updates available in the intel bundle")
err = u.registry.DownloadIntelUpdates()
if err != nil {
log.Errorf("updates: failed to download bundle: %s", err)
}
} else {
log.Infof("updates: no new intel data updates")
}
return nil
}
@ -143,38 +128,36 @@ func (u *Updates) Manager() *mgr.Manager {
// Start starts the module.
func (u *Updates) Start() error {
initConfig()
u.m.Go("check for updates", func(w *mgr.WorkerCtx) error {
binBundle, err := u.binUpdates.GetInstallBundle()
if err != nil {
log.Warningf("failed to get binary bundle: %s", err)
} else {
err = binBundle.Verify(u.binUpdates.Directory)
if err != nil {
log.Warningf("binary bundle is not valid: %s", err)
} else {
log.Infof("binary bundle is valid")
}
}
// initConfig()
intelBundle, err := u.intelUpdates.GetInstallBundle()
if applyUpdates {
err := u.registry.ApplyBinaryUpdates()
if err != nil {
log.Warningf("failed to get intel bundle: %s", err)
} else {
err = intelBundle.Verify(u.intelUpdates.Directory)
if err != nil {
log.Warningf("intel bundle is not valid: %s", err)
} else {
log.Infof("intel bundle is valid")
}
log.Errorf("updates: failed to apply binary updates: %s", err)
}
err = u.registry.ApplyIntelUpdates()
if err != nil {
log.Errorf("updates: failed to apply intel updates: %s", err)
}
u.instance.Restart()
return nil
})
u.updateWorkerMgr.Go()
}
err := u.registry.Initialize()
if err != nil {
// TODO(vladimir): Find a better way to handle this error. The service will stop if parsing of the bundle files fails.
return fmt.Errorf("failed to initialize registry: %w", err)
}
u.updateBinaryWorkerMgr.Go()
u.updateIntelWorkerMgr.Go()
return nil
}
func (u *Updates) GetFile(id string) (*registry.File, error) {
return u.registry.GetFile(id)
}
// Stop stops the module.
func (u *Updates) Stop() error {
return stop()

View file

@ -1,12 +1,8 @@
package updates
import (
"fmt"
"strings"
"sync/atomic"
"time"
"github.com/safing/portmaster/base/notifications"
)
const (
@ -25,109 +21,109 @@ func (u *Updates) notificationsEnabled() bool {
return u.instance.Notifications() != nil
}
func notifyUpdateSuccess(force bool) {
if !module.notificationsEnabled() {
return
}
// func notifyUpdateSuccess(force bool) {
// if !module.notificationsEnabled() {
// return
// }
updateFailedCnt.Store(0)
module.states.Clear()
updateState := registry.GetState().Updates
// updateFailedCnt.Store(0)
// module.states.Clear()
// updateState := registry.GetState().Updates
flavor := updateSuccess
switch {
case len(updateState.PendingDownload) > 0:
// Show notification if there are pending downloads.
flavor = updateSuccessPending
case updateState.LastDownloadAt != nil &&
time.Since(*updateState.LastDownloadAt) < 5*time.Second:
// Show notification if we downloaded something within the last minute.
flavor = updateSuccessDownloaded
case force:
// Always show notification if update was manually triggered.
default:
// Otherwise, the update was uneventful. Do not show notification.
return
}
// flavor := updateSuccess
// switch {
// case len(updateState.PendingDownload) > 0:
// // Show notification if there are pending downloads.
// flavor = updateSuccessPending
// case updateState.LastDownloadAt != nil &&
// time.Since(*updateState.LastDownloadAt) < 5*time.Second:
// // Show notification if we downloaded something within the last minute.
// flavor = updateSuccessDownloaded
// case force:
// // Always show notification if update was manually triggered.
// default:
// // Otherwise, the update was uneventful. Do not show notification.
// return
// }
switch flavor {
case updateSuccess:
notifications.Notify(&notifications.Notification{
EventID: updateSuccess,
Type: notifications.Info,
Title: "Portmaster Is Up-To-Date",
Message: "Portmaster successfully checked for updates. Everything is up to date.\n\n" + getUpdatingInfoMsg(),
Expires: time.Now().Add(1 * time.Minute).Unix(),
AvailableActions: []*notifications.Action{
{
ID: "ack",
Text: "OK",
},
},
})
// switch flavor {
// case updateSuccess:
// notifications.Notify(&notifications.Notification{
// EventID: updateSuccess,
// Type: notifications.Info,
// Title: "Portmaster Is Up-To-Date",
// Message: "Portmaster successfully checked for updates. Everything is up to date.\n\n" + getUpdatingInfoMsg(),
// Expires: time.Now().Add(1 * time.Minute).Unix(),
// AvailableActions: []*notifications.Action{
// {
// ID: "ack",
// Text: "OK",
// },
// },
// })
case updateSuccessPending:
msg := fmt.Sprintf(
`%d updates are available for download:
// case updateSuccessPending:
// msg := fmt.Sprintf(
// `%d updates are available for download:
- %s
// - %s
Press "Download Now" to download and automatically apply all pending updates. You will be notified of important updates that need restarting.`,
len(updateState.PendingDownload),
strings.Join(updateState.PendingDownload, "\n- "),
)
// Press "Download Now" to download and automatically apply all pending updates. You will be notified of important updates that need restarting.`,
// len(updateState.PendingDownload),
// strings.Join(updateState.PendingDownload, "\n- "),
// )
notifications.Notify(&notifications.Notification{
EventID: updateSuccess,
Type: notifications.Info,
Title: fmt.Sprintf("%d Updates Available", len(updateState.PendingDownload)),
Message: msg,
AvailableActions: []*notifications.Action{
{
ID: "ack",
Text: "OK",
},
{
ID: "download",
Text: "Download Now",
Type: notifications.ActionTypeWebhook,
Payload: &notifications.ActionTypeWebhookPayload{
URL: apiPathCheckForUpdates + "?download",
ResultAction: "display",
},
},
},
})
// notifications.Notify(&notifications.Notification{
// EventID: updateSuccess,
// Type: notifications.Info,
// Title: fmt.Sprintf("%d Updates Available", len(updateState.PendingDownload)),
// Message: msg,
// AvailableActions: []*notifications.Action{
// {
// ID: "ack",
// Text: "OK",
// },
// {
// ID: "download",
// Text: "Download Now",
// Type: notifications.ActionTypeWebhook,
// Payload: &notifications.ActionTypeWebhookPayload{
// URL: apiPathCheckForUpdates + "?download",
// ResultAction: "display",
// },
// },
// },
// })
case updateSuccessDownloaded:
msg := fmt.Sprintf(
`%d updates were downloaded and applied:
// case updateSuccessDownloaded:
// msg := fmt.Sprintf(
// `%d updates were downloaded and applied:
- %s
// - %s
%s
`,
len(updateState.LastDownload),
strings.Join(updateState.LastDownload, "\n- "),
getUpdatingInfoMsg(),
)
// %s
// `,
// len(updateState.LastDownload),
// strings.Join(updateState.LastDownload, "\n- "),
// getUpdatingInfoMsg(),
// )
notifications.Notify(&notifications.Notification{
EventID: updateSuccess,
Type: notifications.Info,
Title: fmt.Sprintf("%d Updates Applied", len(updateState.LastDownload)),
Message: msg,
Expires: time.Now().Add(1 * time.Minute).Unix(),
AvailableActions: []*notifications.Action{
{
ID: "ack",
Text: "OK",
},
},
})
// notifications.Notify(&notifications.Notification{
// EventID: updateSuccess,
// Type: notifications.Info,
// Title: fmt.Sprintf("%d Updates Applied", len(updateState.LastDownload)),
// Message: msg,
// Expires: time.Now().Add(1 * time.Minute).Unix(),
// AvailableActions: []*notifications.Action{
// {
// ID: "ack",
// Text: "OK",
// },
// },
// })
}
}
// }
// }
func getUpdatingInfoMsg() string {
switch {
@ -140,41 +136,41 @@ func getUpdatingInfoMsg() string {
}
}
func notifyUpdateCheckFailed(force bool, err error) {
if !module.notificationsEnabled() {
return
}
// func notifyUpdateCheckFailed(force bool, err error) {
// if !module.notificationsEnabled() {
// return
// }
failedCnt := updateFailedCnt.Add(1)
lastSuccess := registry.GetState().Updates.LastSuccessAt
// failedCnt := updateFailedCnt.Add(1)
// lastSuccess := registry.GetState().Updates.LastSuccessAt
switch {
case force:
// Always show notification if update was manually triggered.
case failedCnt < failedUpdateNotifyCountThreshold:
// Not failed often enough for notification.
return
case lastSuccess == nil:
// No recorded successful update.
case time.Now().Add(-failedUpdateNotifyDurationThreshold).Before(*lastSuccess):
// Failed too recently for notification.
return
}
// switch {
// case force:
// // Always show notification if update was manually triggered.
// case failedCnt < failedUpdateNotifyCountThreshold:
// // Not failed often enough for notification.
// return
// case lastSuccess == nil:
// // No recorded successful update.
// case time.Now().Add(-failedUpdateNotifyDurationThreshold).Before(*lastSuccess):
// // Failed too recently for notification.
// return
// }
notifications.NotifyWarn(
updateFailed,
"Update Check Failed",
fmt.Sprintf(
"Portmaster failed to check for updates. This might be a temporary issue of your device, your network or the update servers. The Portmaster will automatically try again later. The error was: %s",
err,
),
notifications.Action{
Text: "Try Again Now",
Type: notifications.ActionTypeWebhook,
Payload: &notifications.ActionTypeWebhookPayload{
URL: apiPathCheckForUpdates,
ResultAction: "display",
},
},
).SyncWithState(module.states)
}
// notifications.NotifyWarn(
// updateFailed,
// "Update Check Failed",
// fmt.Sprintf(
// "Portmaster failed to check for updates. This might be a temporary issue of your device, your network or the update servers. The Portmaster will automatically try again later. The error was: %s",
// err,
// ),
// notifications.Action{
// Text: "Try Again Now",
// Type: notifications.ActionTypeWebhook,
// Payload: &notifications.ActionTypeWebhookPayload{
// URL: apiPathCheckForUpdates,
// ResultAction: "display",
// },
// },
// ).SyncWithState(module.states)
// }

View file

@ -1,204 +1,201 @@
package updates
import (
"bytes"
"crypto/sha256"
_ "embed"
"encoding/hex"
"errors"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
// import (
// "crypto/sha256"
// _ "embed"
// "encoding/hex"
// "errors"
// "fmt"
// "io/fs"
// "os"
// "path/filepath"
"github.com/tevino/abool"
"golang.org/x/exp/slices"
// "github.com/tevino/abool"
// "golang.org/x/exp/slices"
"github.com/safing/portmaster/base/dataroot"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/utils/renameio"
)
// "github.com/safing/portmaster/base/dataroot"
// "github.com/safing/portmaster/base/log"
// )
var (
portmasterCoreServiceFilePath = "portmaster.service"
portmasterNotifierServiceFilePath = "portmaster_notifier.desktop"
backupExtension = ".backup"
// var (
// portmasterCoreServiceFilePath = "portmaster.service"
// portmasterNotifierServiceFilePath = "portmaster_notifier.desktop"
// backupExtension = ".backup"
//go:embed assets/portmaster.service
currentPortmasterCoreServiceFile []byte
// //go:embed assets/portmaster.service
// currentPortmasterCoreServiceFile []byte
checkedSystemIntegration = abool.New()
// checkedSystemIntegration = abool.New()
// ErrRequiresManualUpgrade is returned when a system integration file requires a manual upgrade.
ErrRequiresManualUpgrade = errors.New("requires a manual upgrade")
)
// // ErrRequiresManualUpgrade is returned when a system integration file requires a manual upgrade.
// ErrRequiresManualUpgrade = errors.New("requires a manual upgrade")
// )
func upgradeSystemIntegration() {
// Check if we already checked the system integration.
if !checkedSystemIntegration.SetToIf(false, true) {
return
}
// func upgradeSystemIntegration() {
// // Check if we already checked the system integration.
// if !checkedSystemIntegration.SetToIf(false, true) {
// return
// }
// Upgrade portmaster core systemd service.
err := upgradeSystemIntegrationFile(
"portmaster core systemd service",
filepath.Join(dataroot.Root().Path, portmasterCoreServiceFilePath),
0o0600,
currentPortmasterCoreServiceFile,
[]string{
"bc26dd37e6953af018ad3676ee77570070e075f2b9f5df6fa59d65651a481468", // Commit 19c76c7 on 2022-01-25
"cc0cb49324dfe11577e8c066dd95cc03d745b50b2153f32f74ca35234c3e8cb5", // Commit ef479e5 on 2022-01-24
"d08a3b5f3aee351f8e120e6e2e0a089964b94c9e9d0a9e5fa822e60880e315fd", // Commit b64735e on 2021-12-07
},
)
if err != nil {
log.Warningf("updates: %s", err)
return
}
// // Upgrade portmaster core systemd service.
// err := upgradeSystemIntegrationFile(
// "portmaster core systemd service",
// filepath.Join(dataroot.Root().Path, portmasterCoreServiceFilePath),
// 0o0600,
// currentPortmasterCoreServiceFile,
// []string{
// "bc26dd37e6953af018ad3676ee77570070e075f2b9f5df6fa59d65651a481468", // Commit 19c76c7 on 2022-01-25
// "cc0cb49324dfe11577e8c066dd95cc03d745b50b2153f32f74ca35234c3e8cb5", // Commit ef479e5 on 2022-01-24
// "d08a3b5f3aee351f8e120e6e2e0a089964b94c9e9d0a9e5fa822e60880e315fd", // Commit b64735e on 2021-12-07
// },
// )
// if err != nil {
// log.Warningf("updates: %s", err)
// return
// }
// Upgrade portmaster notifier systemd user service.
// Permissions only!
err = upgradeSystemIntegrationFile(
"portmaster notifier systemd user service",
filepath.Join(dataroot.Root().Path, portmasterNotifierServiceFilePath),
0o0644,
nil, // Do not update contents.
nil, // Do not update contents.
)
if err != nil {
log.Warningf("updates: %s", err)
return
}
}
// // Upgrade portmaster notifier systemd user service.
// // Permissions only!
// err = upgradeSystemIntegrationFile(
// "portmaster notifier systemd user service",
// filepath.Join(dataroot.Root().Path, portmasterNotifierServiceFilePath),
// 0o0644,
// nil, // Do not update contents.
// nil, // Do not update contents.
// )
// if err != nil {
// log.Warningf("updates: %s", err)
// return
// }
// }
// upgradeSystemIntegrationFile upgrades the file contents and permissions.
// System integration files are not necessarily present and may also be
// edited by third parties, such as the OS itself or other installers.
// The supplied hashes must be sha256 hex-encoded.
func upgradeSystemIntegrationFile(
name string,
filePath string,
fileMode fs.FileMode,
fileData []byte,
permittedUpgradeHashes []string,
) error {
// Upgrade file contents.
if len(fileData) > 0 {
if err := upgradeSystemIntegrationFileContents(name, filePath, fileData, permittedUpgradeHashes); err != nil {
return err
}
}
// // upgradeSystemIntegrationFile upgrades the file contents and permissions.
// // System integration files are not necessarily present and may also be
// // edited by third parties, such as the OS itself or other installers.
// // The supplied hashes must be sha256 hex-encoded.
// func upgradeSystemIntegrationFile(
// name string,
// filePath string,
// fileMode fs.FileMode,
// fileData []byte,
// permittedUpgradeHashes []string,
// ) error {
// // Upgrade file contents.
// if len(fileData) > 0 {
// if err := upgradeSystemIntegrationFileContents(name, filePath, fileData, permittedUpgradeHashes); err != nil {
// return err
// }
// }
// Upgrade file permissions.
if fileMode != 0 {
if err := upgradeSystemIntegrationFilePermissions(name, filePath, fileMode); err != nil {
return err
}
}
// // Upgrade file permissions.
// if fileMode != 0 {
// if err := upgradeSystemIntegrationFilePermissions(name, filePath, fileMode); err != nil {
// return err
// }
// }
return nil
}
// return nil
// }
// upgradeSystemIntegrationFileContents upgrades the file contents.
// System integration files are not necessarily present and may also be
// edited by third parties, such as the OS itself or other installers.
// The supplied hashes must be sha256 hex-encoded.
func upgradeSystemIntegrationFileContents(
name string,
filePath string,
fileData []byte,
permittedUpgradeHashes []string,
) error {
// Read existing file.
existingFileData, err := os.ReadFile(filePath)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil
}
return fmt.Errorf("failed to read %s at %s: %w", name, filePath, err)
}
// // upgradeSystemIntegrationFileContents upgrades the file contents.
// // System integration files are not necessarily present and may also be
// // edited by third parties, such as the OS itself or other installers.
// // The supplied hashes must be sha256 hex-encoded.
// func upgradeSystemIntegrationFileContents(
// name string,
// filePath string,
// fileData []byte,
// permittedUpgradeHashes []string,
// ) error {
// // Read existing file.
// existingFileData, err := os.ReadFile(filePath)
// if err != nil {
// if errors.Is(err, os.ErrNotExist) {
// return nil
// }
// return fmt.Errorf("failed to read %s at %s: %w", name, filePath, err)
// }
// Check if file is already the current version.
existingSum := sha256.Sum256(existingFileData)
existingHexSum := hex.EncodeToString(existingSum[:])
currentSum := sha256.Sum256(fileData)
currentHexSum := hex.EncodeToString(currentSum[:])
if existingHexSum == currentHexSum {
log.Debugf("updates: %s at %s is up to date", name, filePath)
return nil
}
// // Check if file is already the current version.
// existingSum := sha256.Sum256(existingFileData)
// existingHexSum := hex.EncodeToString(existingSum[:])
// currentSum := sha256.Sum256(fileData)
// currentHexSum := hex.EncodeToString(currentSum[:])
// if existingHexSum == currentHexSum {
// log.Debugf("updates: %s at %s is up to date", name, filePath)
// return nil
// }
// Check if we are allowed to upgrade from the existing file.
if !slices.Contains[[]string, string](permittedUpgradeHashes, existingHexSum) {
return fmt.Errorf("%s at %s (sha256:%s) %w, as it is not a previously published version and cannot be automatically upgraded - try installing again", name, filePath, existingHexSum, ErrRequiresManualUpgrade)
}
// // Check if we are allowed to upgrade from the existing file.
// if !slices.Contains[[]string, string](permittedUpgradeHashes, existingHexSum) {
// return fmt.Errorf("%s at %s (sha256:%s) %w, as it is not a previously published version and cannot be automatically upgraded - try installing again", name, filePath, existingHexSum, ErrRequiresManualUpgrade)
// }
// Start with upgrade!
// // Start with upgrade!
// Make backup of existing file.
err = CopyFile(filePath, filePath+backupExtension)
if err != nil {
return fmt.Errorf(
"failed to create backup of %s from %s to %s: %w",
name,
filePath,
filePath+backupExtension,
err,
)
}
// // Make backup of existing file.
// err = CopyFile(filePath, filePath+backupExtension)
// if err != nil {
// return fmt.Errorf(
// "failed to create backup of %s from %s to %s: %w",
// name,
// filePath,
// filePath+backupExtension,
// err,
// )
// }
// Open destination file for writing.
atomicDstFile, err := renameio.TempFile(registry.TmpDir().Path, filePath)
if err != nil {
return fmt.Errorf("failed to create tmp file to update %s at %s: %w", name, filePath, err)
}
defer atomicDstFile.Cleanup() //nolint:errcheck // ignore error for now, tmp dir will be cleaned later again anyway
// // Open destination file for writing.
// // atomicDstFile, err := renameio.TempFile(registry.TmpDir().Path, filePath)
// // if err != nil {
// // return fmt.Errorf("failed to create tmp file to update %s at %s: %w", name, filePath, err)
// // }
// // defer atomicDstFile.Cleanup() //nolint:errcheck // ignore error for now, tmp dir will be cleaned later again anyway
// Write file.
_, err = io.Copy(atomicDstFile, bytes.NewReader(fileData))
if err != nil {
return err
}
// // // Write file.
// // _, err = io.Copy(atomicDstFile, bytes.NewReader(fileData))
// // if err != nil {
// // return err
// // }
// Finalize file.
err = atomicDstFile.CloseAtomicallyReplace()
if err != nil {
return fmt.Errorf("failed to finalize update of %s at %s: %w", name, filePath, err)
}
// // // Finalize file.
// // err = atomicDstFile.CloseAtomicallyReplace()
// // if err != nil {
// // return fmt.Errorf("failed to finalize update of %s at %s: %w", name, filePath, err)
// // }
log.Warningf("updates: %s at %s was upgraded to %s - a reboot may be required", name, filePath, currentHexSum)
return nil
}
// log.Warningf("updates: %s at %s was upgraded to %s - a reboot may be required", name, filePath, currentHexSum)
// return nil
// }
// upgradeSystemIntegrationFilePermissions upgrades the file permissions.
// System integration files are not necessarily present and may also be
// edited by third parties, such as the OS itself or other installers.
func upgradeSystemIntegrationFilePermissions(
name string,
filePath string,
fileMode fs.FileMode,
) error {
// Get current file permissions.
stat, err := os.Stat(filePath)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil
}
return fmt.Errorf("failed to read %s file metadata at %s: %w", name, filePath, err)
}
// // upgradeSystemIntegrationFilePermissions upgrades the file permissions.
// // System integration files are not necessarily present and may also be
// // edited by third parties, such as the OS itself or other installers.
// func upgradeSystemIntegrationFilePermissions(
// name string,
// filePath string,
// fileMode fs.FileMode,
// ) error {
// // Get current file permissions.
// stat, err := os.Stat(filePath)
// if err != nil {
// if errors.Is(err, os.ErrNotExist) {
// return nil
// }
// return fmt.Errorf("failed to read %s file metadata at %s: %w", name, filePath, err)
// }
// If permissions are as expected, do nothing.
if stat.Mode().Perm() == fileMode {
return nil
}
// // If permissions are as expected, do nothing.
// if stat.Mode().Perm() == fileMode {
// return nil
// }
// Otherwise, set correct permissions.
err = os.Chmod(filePath, fileMode)
if err != nil {
return fmt.Errorf("failed to update %s file permissions at %s: %w", name, filePath, err)
}
// // Otherwise, set correct permissions.
// err = os.Chmod(filePath, fileMode)
// if err != nil {
// return fmt.Errorf("failed to update %s file permissions at %s: %w", name, filePath, err)
// }
log.Warningf("updates: %s file permissions at %s updated to %v", name, filePath, fileMode)
return nil
}
// log.Warningf("updates: %s file permissions at %s updated to %v", name, filePath, fileMode)
// return nil
// }

View file

@ -1 +0,0 @@
package updates

View file

@ -1,4 +1,4 @@
package updates
package registry
import (
"archive/zip"
@ -17,6 +17,12 @@ import (
"github.com/safing/portmaster/base/log"
)
const (
defaultFileMode = os.FileMode(0o0644)
executableFileMode = os.FileMode(0o0744)
defaultDirMode = os.FileMode(0o0755)
)
const MaxUnpackSize = 1 << 30 // 2^30 == 1GB
type Artifact struct {
@ -29,40 +35,40 @@ type Artifact struct {
}
type Bundle struct {
dir string
Name string `json:"Bundle"`
Version string `json:"Version"`
Published time.Time `json:"Published"`
Artifacts []Artifact `json:"Artifacts"`
}
func (bundle Bundle) downloadAndVerify(dataDir string) {
func (bundle Bundle) downloadAndVerify() {
client := http.Client{}
for _, artifact := range bundle.Artifacts {
filePath := fmt.Sprintf("%s/%s", dataDir, artifact.Filename)
filePath := fmt.Sprintf("%s/%s", bundle.dir, artifact.Filename)
// TODO(vladimir): is this needed?
_ = os.MkdirAll(filepath.Dir(filePath), os.ModePerm)
_ = os.MkdirAll(filepath.Dir(filePath), defaultDirMode)
// Check file is already downloaded and valid.
exists, err := checkIfFileIsValid(filePath, artifact)
exists, _ := checkIfFileIsValid(filePath, artifact)
if exists {
log.Debugf("file already download: %s", filePath)
log.Debugf("updates: file already downloaded: %s", filePath)
continue
} else if err != nil {
log.Errorf("error while checking old download: %s", err)
}
// Download artifact
err = processArtifact(&client, artifact, filePath)
err := processArtifact(&client, artifact, filePath)
if err != nil {
log.Errorf("updates: %s", err)
}
}
}
func (bundle Bundle) Verify(dataDir string) error {
// Verify checks if the files are present int the dataDir and have the correct hash.
func (bundle Bundle) Verify() error {
for _, artifact := range bundle.Artifacts {
artifactPath := fmt.Sprintf("%s/%s", dataDir, artifact.Filename)
artifactPath := fmt.Sprintf("%s/%s", bundle.dir, artifact.Filename)
file, err := os.Open(artifactPath)
if err != nil {
return fmt.Errorf("failed to open file %s: %w", artifactPath, err)
@ -86,8 +92,7 @@ func checkIfFileIsValid(filename string, artifact Artifact) (bool, error) {
// Check if file already exists
file, err := os.Open(filename)
if err != nil {
//nolint:nilerr
return false, nil
return false, err
}
defer func() { _ = file.Close() }()
@ -131,7 +136,7 @@ func processArtifact(client *http.Client, artifact Artifact, filePath string) er
// Verify
hash := sha256.Sum256(content)
if !bytes.Equal(providedHash, hash[:]) {
// FIXME(vladimir): just for testing. Make it an error before commit.
// FIXME(vladimir): just for testing. Make it an error.
err = fmt.Errorf("failed to verify artifact: %s", artifact.Filename)
log.Debugf("updates: %s", err)
}
@ -142,6 +147,11 @@ func processArtifact(client *http.Client, artifact Artifact, filePath string) er
if err != nil {
return fmt.Errorf("failed to create file: %w", err)
}
if artifact.Platform == "" {
_ = file.Chmod(defaultFileMode)
} else {
_ = file.Chmod(executableFileMode)
}
_, err = file.Write(content)
if err != nil {
return fmt.Errorf("failed to write to file: %w", err)

View file

@ -0,0 +1,56 @@
package registry
import (
"fmt"
"io"
"net/http"
"os"
"github.com/safing/portmaster/base/log"
)
type UpdateIndex struct {
Directory string
DownloadDirectory string
Ignore []string
IndexURLs []string
IndexFile string
AutoApply bool
}
func (ui *UpdateIndex) downloadIndexFile() (err error) {
_ = os.MkdirAll(ui.DownloadDirectory, defaultDirMode)
for _, url := range ui.IndexURLs {
err = ui.downloadIndexFileFromURL(url)
if err != nil {
log.Warningf("updates: %s", err)
continue
}
// Downloading was successful.
err = nil
break
}
return
}
func (ui *UpdateIndex) downloadIndexFileFromURL(url string) error {
client := http.Client{}
resp, err := client.Get(url)
if err != nil {
return fmt.Errorf("failed a get request to %s: %w", url, err)
}
defer func() { _ = resp.Body.Close() }()
filePath := fmt.Sprintf("%s/%s", ui.DownloadDirectory, ui.IndexFile)
file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, defaultFileMode)
if err != nil {
return err
}
defer func() { _ = file.Close() }()
_, err = io.Copy(file, resp.Body)
if err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,245 @@
package registry
import (
"encoding/json"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/safing/portmaster/base/log"
)
var ErrNotFound error = errors.New("file not found")
type File struct {
id string
path string
}
func (f *File) Identifier() string {
return f.id
}
func (f *File) Path() string {
return f.path
}
func (f *File) Version() string {
return ""
}
type Registry struct {
binaryUpdateIndex UpdateIndex
intelUpdateIndex UpdateIndex
binaryBundle *Bundle
intelBundle *Bundle
binaryUpdateBundle *Bundle
intelUpdateBundle *Bundle
files map[string]File
}
// New create new Registry.
func New(binIndex UpdateIndex, intelIndex UpdateIndex) Registry {
return Registry{
binaryUpdateIndex: binIndex,
intelUpdateIndex: intelIndex,
files: make(map[string]File),
}
}
// Initialize parses and initializes currently installed bundles.
func (reg *Registry) Initialize() error {
var err error
// Parse current installed binary bundle.
reg.binaryBundle, err = parseBundle(reg.binaryUpdateIndex.Directory, reg.binaryUpdateIndex.IndexFile)
if err != nil {
return fmt.Errorf("failed to parse binary bundle: %w", err)
}
// Parse current installed intel bundle.
reg.intelBundle, err = parseBundle(reg.intelUpdateIndex.Directory, reg.intelUpdateIndex.IndexFile)
if err != nil {
return fmt.Errorf("failed to parse intel bundle: %w", err)
}
// Add bundle artifacts to registry.
reg.processBundle(reg.binaryBundle)
reg.processBundle(reg.intelBundle)
return nil
}
func (reg *Registry) processBundle(bundle *Bundle) {
for _, artifact := range bundle.Artifacts {
artifactPath := fmt.Sprintf("%s/%s", bundle.dir, artifact.Filename)
reg.files[artifact.Filename] = File{id: artifact.Filename, path: artifactPath}
}
}
// GetFile returns the object of a artifact by id.
func (reg *Registry) GetFile(id string) (*File, error) {
file, ok := reg.files[id]
if ok {
return &file, nil
} else {
log.Errorf("updates: requested file id not found: %s", id)
return nil, ErrNotFound
}
}
// CheckForBinaryUpdates checks if there is a new binary bundle updates.
func (reg *Registry) CheckForBinaryUpdates() (bool, error) {
err := reg.binaryUpdateIndex.downloadIndexFile()
if err != nil {
return false, err
}
reg.binaryUpdateBundle, err = parseBundle(reg.binaryUpdateIndex.DownloadDirectory, reg.binaryUpdateIndex.IndexFile)
if err != nil {
return false, fmt.Errorf("failed to parse bundle file: %w", err)
}
// TODO(vladimir): Make a better check.
if reg.binaryBundle.Version != reg.binaryUpdateBundle.Version {
return true, nil
}
return false, nil
}
// DownloadBinaryUpdates downloads available binary updates.
func (reg *Registry) DownloadBinaryUpdates() error {
if reg.binaryUpdateBundle == nil {
// CheckForBinaryUpdates needs to be called before this.
return fmt.Errorf("no valid update bundle found")
}
_ = deleteUnfinishedDownloads(reg.binaryBundle.dir)
reg.binaryUpdateBundle.downloadAndVerify()
return nil
}
// CheckForIntelUpdates checks if there is a new intel data bundle updates.
func (reg *Registry) CheckForIntelUpdates() (bool, error) {
err := reg.intelUpdateIndex.downloadIndexFile()
if err != nil {
return false, err
}
reg.intelUpdateBundle, err = parseBundle(reg.intelUpdateIndex.DownloadDirectory, reg.intelUpdateIndex.IndexFile)
if err != nil {
return false, fmt.Errorf("failed to parse bundle file: %w", err)
}
// TODO(vladimir): Make a better check.
if reg.intelBundle.Version != reg.intelUpdateBundle.Version {
return true, nil
}
return false, nil
}
// DownloadIntelUpdates downloads available intel data updates.
func (reg *Registry) DownloadIntelUpdates() error {
if reg.intelUpdateBundle == nil {
// CheckForIntelUpdates needs to be called before this.
return fmt.Errorf("no valid update bundle found")
}
_ = deleteUnfinishedDownloads(reg.intelBundle.dir)
reg.intelUpdateBundle.downloadAndVerify()
return nil
}
// ApplyBinaryUpdates removes the current binary folder and replaces it with the downloaded one.
func (reg *Registry) ApplyBinaryUpdates() error {
bundle, err := parseBundle(reg.binaryUpdateIndex.DownloadDirectory, reg.binaryUpdateIndex.IndexFile)
if err != nil {
return fmt.Errorf("failed to parse index file: %w", err)
}
err = bundle.Verify()
if err != nil {
return fmt.Errorf("binary bundle is not valid: %w", err)
}
err = os.RemoveAll(reg.binaryUpdateIndex.Directory)
if err != nil {
return fmt.Errorf("failed to remove dir: %w", err)
}
err = os.Rename(reg.binaryUpdateIndex.DownloadDirectory, reg.binaryUpdateIndex.Directory)
if err != nil {
return fmt.Errorf("failed to move dir: %w", err)
}
return nil
}
// ApplyIntelUpdates removes the current intel folder and replaces it with the downloaded one.
func (reg *Registry) ApplyIntelUpdates() error {
bundle, err := parseBundle(reg.intelUpdateIndex.DownloadDirectory, reg.intelUpdateIndex.IndexFile)
if err != nil {
return fmt.Errorf("failed to parse index file: %w", err)
}
err = bundle.Verify()
if err != nil {
return fmt.Errorf("binary bundle is not valid: %w", err)
}
err = os.RemoveAll(reg.intelUpdateIndex.Directory)
if err != nil {
return fmt.Errorf("failed to remove dir: %w", err)
}
err = os.Rename(reg.intelUpdateIndex.DownloadDirectory, reg.intelUpdateIndex.Directory)
if err != nil {
return fmt.Errorf("failed to move dir: %w", err)
}
return nil
}
func parseBundle(dir string, indexFile string) (*Bundle, error) {
filepath := fmt.Sprintf("%s/%s", dir, indexFile)
// Check if the file exists.
file, err := os.Open(filepath)
if err != nil {
return nil, fmt.Errorf("failed to open index file: %w", err)
}
defer func() { _ = file.Close() }()
// Read
content, err := io.ReadAll(file)
if err != nil {
return nil, err
}
// Parse
var bundle Bundle
err = json.Unmarshal(content, &bundle)
if err != nil {
return nil, err
}
bundle.dir = dir
return &bundle, nil
}
func deleteUnfinishedDownloads(rootDir string) error {
return filepath.Walk(rootDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// Check if the current file has the specified extension
if !info.IsDir() && strings.HasSuffix(info.Name(), ".download") {
log.Warningf("updates deleting unfinished: %s\n", path)
err := os.Remove(path)
if err != nil {
return fmt.Errorf("failed to delete file %s: %w", path, err)
}
}
return nil
})
}

View file

@ -54,7 +54,7 @@ func DelayedRestart(delay time.Duration) {
// Schedule the restart task.
log.Warningf("updates: restart triggered, will execute in %s", delay)
restartAt := time.Now().Add(delay)
module.restartWorkerMgr.Delay(delay)
// module.restartWorkerMgr.Delay(delay)
// Set restartTime.
restartTimeLock.Lock()
@ -68,23 +68,23 @@ func AbortRestart() {
log.Warningf("updates: restart aborted")
// Cancel schedule.
module.restartWorkerMgr.Delay(0)
// module.restartWorkerMgr.Delay(0)
}
}
// TriggerRestartIfPending triggers an automatic restart, if one is pending.
// This can be used to prepone a scheduled restart if the conditions are preferable.
func TriggerRestartIfPending() {
if restartPending.IsSet() {
module.restartWorkerMgr.Go()
}
// if restartPending.IsSet() {
// module.restartWorkerMgr.Go()
// }
}
// RestartNow immediately executes a restart.
// This only works if the process is managed by portmaster-start.
func RestartNow() {
restartPending.Set()
module.restartWorkerMgr.Go()
// module.restartWorkerMgr.Go()
}
func automaticRestart(w *mgr.WorkerCtx) error {
@ -108,11 +108,11 @@ func automaticRestart(w *mgr.WorkerCtx) error {
}
// Set restart exit code.
if !rebooting {
module.instance.Restart()
} else {
module.instance.Shutdown()
}
// if !rebooting {
// module.instance.Restart()
// } else {
// module.instance.Shutdown()
// }
}
return nil

View file

@ -1,49 +1,49 @@
package updates
import (
"github.com/safing/portmaster/base/database/record"
"github.com/safing/portmaster/base/runtime"
"github.com/safing/portmaster/base/updater"
)
// import (
// "github.com/safing/portmaster/base/database/record"
// "github.com/safing/portmaster/base/runtime"
// "github.com/safing/portmaster/base/updater"
// )
var pushRegistryStatusUpdate runtime.PushFunc
// var pushRegistryStatusUpdate runtime.PushFunc
// RegistryStateExport is a wrapper to export the registry state.
type RegistryStateExport struct {
record.Base
*updater.RegistryState
}
// // RegistryStateExport is a wrapper to export the registry state.
// type RegistryStateExport struct {
// record.Base
// *updater.RegistryState
// }
func exportRegistryState(s *updater.RegistryState) *RegistryStateExport {
if s == nil {
state := registry.GetState()
s = &state
}
// func exportRegistryState(s *updater.RegistryState) *RegistryStateExport {
// // if s == nil {
// // state := registry.GetState()
// // s = &state
// // }
export := &RegistryStateExport{
RegistryState: s,
}
// export := &RegistryStateExport{
// RegistryState: s,
// }
export.CreateMeta()
export.SetKey("runtime:core/updates/state")
// export.CreateMeta()
// export.SetKey("runtime:core/updates/state")
return export
}
// return export
// }
func pushRegistryState(s *updater.RegistryState) {
export := exportRegistryState(s)
pushRegistryStatusUpdate(export)
}
// func pushRegistryState(s *updater.RegistryState) {
// export := exportRegistryState(s)
// pushRegistryStatusUpdate(export)
// }
func registerRegistryStateProvider() (err error) {
registryStateProvider := runtime.SimpleValueGetterFunc(func(_ string) ([]record.Record, error) {
return []record.Record{exportRegistryState(nil)}, nil
})
// func registerRegistryStateProvider() (err error) {
// registryStateProvider := runtime.SimpleValueGetterFunc(func(_ string) ([]record.Record, error) {
// return []record.Record{exportRegistryState(nil)}, nil
// })
pushRegistryStatusUpdate, err = runtime.Register("core/updates/state", registryStateProvider)
if err != nil {
return err
}
// pushRegistryStatusUpdate, err = runtime.Register("core/updates/state", registryStateProvider)
// if err != nil {
// return err
// }
return nil
}
// return nil
// }

View file

@ -1,406 +1,403 @@
package updates
import (
"context"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"
// import (
// "context"
// "fmt"
// "os"
// "os/exec"
// "path/filepath"
// "regexp"
// "strings"
// "time"
processInfo "github.com/shirou/gopsutil/process"
"github.com/tevino/abool"
// processInfo "github.com/shirou/gopsutil/process"
// "github.com/tevino/abool"
"github.com/safing/portmaster/base/dataroot"
"github.com/safing/portmaster/base/info"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/notifications"
"github.com/safing/portmaster/base/rng"
"github.com/safing/portmaster/base/updater"
"github.com/safing/portmaster/base/utils/renameio"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates/helper"
)
// "github.com/safing/portmaster/base/dataroot"
// "github.com/safing/portmaster/base/info"
// "github.com/safing/portmaster/base/log"
// "github.com/safing/portmaster/base/notifications"
// "github.com/safing/portmaster/base/rng"
// "github.com/safing/portmaster/base/updater"
// "github.com/safing/portmaster/service/mgr"
// )
const (
upgradedSuffix = "-upgraded"
exeExt = ".exe"
)
// const (
// upgradedSuffix = "-upgraded"
// exeExt = ".exe"
// )
var (
upgraderActive = abool.NewBool(false)
// var (
// upgraderActive = abool.NewBool(false)
pmCtrlUpdate *updater.File
pmCoreUpdate *updater.File
// pmCtrlUpdate *updater.File
// pmCoreUpdate *updater.File
spnHubUpdate *updater.File
// spnHubUpdate *updater.File
rawVersionRegex = regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+b?\*?$`)
)
// rawVersionRegex = regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+b?\*?$`)
// )
func initUpgrader() error {
module.EventResourcesUpdated.AddCallback("run upgrades", upgrader)
return nil
}
// func initUpgrader() error {
// // module.EventResourcesUpdated.AddCallback("run upgrades", upgrader)
// return nil
// }
func upgrader(m *mgr.WorkerCtx, _ struct{}) (cancel bool, err error) {
// Lock runs, but discard additional runs.
if !upgraderActive.SetToIf(false, true) {
return false, nil
}
defer upgraderActive.SetTo(false)
// func upgrader(m *mgr.WorkerCtx, _ struct{}) (cancel bool, err error) {
// // Lock runs, but discard additional runs.
// if !upgraderActive.SetToIf(false, true) {
// return false, nil
// }
// defer upgraderActive.SetTo(false)
// Upgrade portmaster-start.
err = upgradePortmasterStart()
if err != nil {
log.Warningf("updates: failed to upgrade portmaster-start: %s", err)
}
// // Upgrade portmaster-start.
// err = upgradePortmasterStart()
// if err != nil {
// log.Warningf("updates: failed to upgrade portmaster-start: %s", err)
// }
// Upgrade based on binary.
binBaseName := strings.Split(filepath.Base(os.Args[0]), "_")[0]
switch binBaseName {
case "portmaster-core":
// Notify about upgrade.
if err := upgradeCoreNotify(); err != nil {
log.Warningf("updates: failed to notify about core upgrade: %s", err)
}
// // Upgrade based on binary.
// binBaseName := strings.Split(filepath.Base(os.Args[0]), "_")[0]
// switch binBaseName {
// case "portmaster-core":
// // Notify about upgrade.
// if err := upgradeCoreNotify(); err != nil {
// log.Warningf("updates: failed to notify about core upgrade: %s", err)
// }
// Fix chrome sandbox permissions.
if err := helper.EnsureChromeSandboxPermissions(registry); err != nil {
log.Warningf("updates: failed to handle electron upgrade: %s", err)
}
// // Fix chrome sandbox permissions.
// // if err := helper.EnsureChromeSandboxPermissions(registry); err != nil {
// // log.Warningf("updates: failed to handle electron upgrade: %s", err)
// // }
// Upgrade system integration.
upgradeSystemIntegration()
// // Upgrade system integration.
// upgradeSystemIntegration()
case "spn-hub":
// Trigger upgrade procedure.
if err := upgradeHub(); err != nil {
log.Warningf("updates: failed to initiate hub upgrade: %s", err)
}
}
// case "spn-hub":
// // Trigger upgrade procedure.
// if err := upgradeHub(); err != nil {
// log.Warningf("updates: failed to initiate hub upgrade: %s", err)
// }
// }
return false, nil
}
// return false, nil
// }
func upgradeCoreNotify() error {
if pmCoreUpdate != nil && !pmCoreUpdate.UpgradeAvailable() {
return nil
}
// func upgradeCoreNotify() error {
// if pmCoreUpdate != nil && !pmCoreUpdate.UpgradeAvailable() {
// return nil
// }
// make identifier
identifier := "core/portmaster-core" // identifier, use forward slash!
if onWindows {
identifier += exeExt
}
// // make identifier
// identifier := "core/portmaster-core" // identifier, use forward slash!
// if onWindows {
// identifier += exeExt
// }
// get newest portmaster-core
newFile, err := GetPlatformFile(identifier)
if err != nil {
return err
}
pmCoreUpdate = newFile
// // get newest portmaster-core
// // newFile, err := GetPlatformFile(identifier)
// // if err != nil {
// // return err
// // }
// // pmCoreUpdate = newFile
// check for new version
if info.VersionNumber() != pmCoreUpdate.Version() {
n := notifications.Notify(&notifications.Notification{
EventID: "updates:core-update-available",
Type: notifications.Info,
Title: fmt.Sprintf(
"Portmaster Update v%s Is Ready!",
pmCoreUpdate.Version(),
),
Category: "Core",
Message: fmt.Sprintf(
`A new Portmaster version is ready to go! Restart the Portmaster to upgrade to %s.`,
pmCoreUpdate.Version(),
),
ShowOnSystem: true,
AvailableActions: []*notifications.Action{
// TODO: Use special UI action in order to reload UI on restart.
{
ID: "restart",
Text: "Restart",
},
{
ID: "later",
Text: "Not now",
},
},
})
n.SetActionFunction(upgradeCoreNotifyActionHandler)
// // check for new version
// if info.VersionNumber() != pmCoreUpdate.Version() {
// n := notifications.Notify(&notifications.Notification{
// EventID: "updates:core-update-available",
// Type: notifications.Info,
// Title: fmt.Sprintf(
// "Portmaster Update v%s Is Ready!",
// pmCoreUpdate.Version(),
// ),
// Category: "Core",
// Message: fmt.Sprintf(
// `A new Portmaster version is ready to go! Restart the Portmaster to upgrade to %s.`,
// pmCoreUpdate.Version(),
// ),
// ShowOnSystem: true,
// AvailableActions: []*notifications.Action{
// // TODO: Use special UI action in order to reload UI on restart.
// {
// ID: "restart",
// Text: "Restart",
// },
// {
// ID: "later",
// Text: "Not now",
// },
// },
// })
// n.SetActionFunction(upgradeCoreNotifyActionHandler)
log.Debugf("updates: new portmaster version available, sending notification to user")
}
// log.Debugf("updates: new portmaster version available, sending notification to user")
// }
return nil
}
// return nil
// }
func upgradeCoreNotifyActionHandler(_ context.Context, n *notifications.Notification) error {
switch n.SelectedActionID {
case "restart":
log.Infof("updates: user triggered restart via core update notification")
RestartNow()
case "later":
n.Delete()
}
// func upgradeCoreNotifyActionHandler(_ context.Context, n *notifications.Notification) error {
// switch n.SelectedActionID {
// case "restart":
// log.Infof("updates: user triggered restart via core update notification")
// RestartNow()
// case "later":
// n.Delete()
// }
return nil
}
// return nil
// }
func upgradeHub() error {
if spnHubUpdate != nil && !spnHubUpdate.UpgradeAvailable() {
return nil
}
// func upgradeHub() error {
// if spnHubUpdate != nil && !spnHubUpdate.UpgradeAvailable() {
// return nil
// }
// Make identifier for getting file from updater.
identifier := "hub/spn-hub" // identifier, use forward slash!
if onWindows {
identifier += exeExt
}
// // Make identifier for getting file from updater.
// identifier := "hub/spn-hub" // identifier, use forward slash!
// if onWindows {
// identifier += exeExt
// }
// Get newest spn-hub file.
newFile, err := GetPlatformFile(identifier)
if err != nil {
return err
}
spnHubUpdate = newFile
// // Get newest spn-hub file.
// // newFile, err := GetPlatformFile(identifier)
// // if err != nil {
// // return err
// // }
// // spnHubUpdate = newFile
// Check if the new version is different.
if info.GetInfo().Version != spnHubUpdate.Version() {
// Get random delay with up to three hours.
delayMinutes, err := rng.Number(3 * 60)
if err != nil {
return err
}
// // Check if the new version is different.
// if info.GetInfo().Version != spnHubUpdate.Version() {
// // Get random delay with up to three hours.
// delayMinutes, err := rng.Number(3 * 60)
// if err != nil {
// return err
// }
// Delay restart for at least one hour for preparations.
DelayedRestart(time.Duration(delayMinutes+60) * time.Minute)
// // Delay restart for at least one hour for preparations.
// DelayedRestart(time.Duration(delayMinutes+60) * time.Minute)
// Increase update checks in order to detect aborts better.
// if !disableTaskSchedule {
module.updateWorkerMgr.Repeat(10 * time.Minute)
// }
} else {
AbortRestart()
// // Increase update checks in order to detect aborts better.
// // if !disableTaskSchedule {
// // module.updateBinaryWorkerMgr.Repeat(10 * time.Minute)
// // }
// } else {
// AbortRestart()
// Set update task schedule back to normal.
// if !disableTaskSchedule {
module.updateWorkerMgr.Repeat(updateTaskRepeatDuration)
// }
}
// // Set update task schedule back to normal.
// // if !disableTaskSchedule {
// // module.updateBinaryWorkerMgr.Repeat(updateTaskRepeatDuration)
// // }
// }
return nil
}
// return nil
// }
func upgradePortmasterStart() error {
filename := "portmaster-start"
if onWindows {
filename += exeExt
}
// func upgradePortmasterStart() error {
// filename := "portmaster-start"
// if onWindows {
// filename += exeExt
// }
// check if we can upgrade
if pmCtrlUpdate == nil || pmCtrlUpdate.UpgradeAvailable() {
// get newest portmaster-start
newFile, err := GetPlatformFile("start/" + filename) // identifier, use forward slash!
if err != nil {
return err
}
pmCtrlUpdate = newFile
} else {
return nil
}
// // check if we can upgrade
// if pmCtrlUpdate == nil || pmCtrlUpdate.UpgradeAvailable() {
// // get newest portmaster-start
// // newFile, err := GetPlatformFile("start/" + filename) // identifier, use forward slash!
// // if err != nil {
// // return err
// // }
// // pmCtrlUpdate = newFile
// } else {
// return nil
// }
// update portmaster-start in data root
rootPmStartPath := filepath.Join(dataroot.Root().Path, filename)
err := upgradeBinary(rootPmStartPath, pmCtrlUpdate)
if err != nil {
return err
}
// // update portmaster-start in data root
// rootPmStartPath := filepath.Join(dataroot.Root().Path, filename)
// err := upgradeBinary(rootPmStartPath, pmCtrlUpdate)
// if err != nil {
// return err
// }
return nil
}
// return nil
// }
func warnOnIncorrectParentPath() {
expectedFileName := "portmaster-start"
if onWindows {
expectedFileName += exeExt
}
// func warnOnIncorrectParentPath() {
// expectedFileName := "portmaster-start"
// if onWindows {
// expectedFileName += exeExt
// }
// upgrade parent process, if it's portmaster-start
parent, err := processInfo.NewProcess(int32(os.Getppid()))
if err != nil {
log.Tracef("could not get parent process: %s", err)
return
}
parentName, err := parent.Name()
if err != nil {
log.Tracef("could not get parent process name: %s", err)
return
}
if parentName != expectedFileName {
// Only warn about this if not in dev mode.
if !devMode() {
log.Warningf("updates: parent process does not seem to be portmaster-start, name is %s", parentName)
}
// // upgrade parent process, if it's portmaster-start
// parent, err := processInfo.NewProcess(int32(os.Getppid()))
// if err != nil {
// log.Tracef("could not get parent process: %s", err)
// return
// }
// parentName, err := parent.Name()
// if err != nil {
// log.Tracef("could not get parent process name: %s", err)
// return
// }
// if parentName != expectedFileName {
// // Only warn about this if not in dev mode.
// if !devMode() {
// log.Warningf("updates: parent process does not seem to be portmaster-start, name is %s", parentName)
// }
// TODO(ppacher): once we released a new installer and folks had time
// to update we should send a module warning/hint to the
// UI notifying the user that he's still using portmaster-control.
return
}
// // TODO(ppacher): once we released a new installer and folks had time
// // to update we should send a module warning/hint to the
// // UI notifying the user that he's still using portmaster-control.
// return
// }
parentPath, err := parent.Exe()
if err != nil {
log.Tracef("could not get parent process path: %s", err)
return
}
// // parentPath, err := parent.Exe()
// // if err != nil {
// // log.Tracef("could not get parent process path: %s", err)
// // return
// // }
absPath, err := filepath.Abs(parentPath)
if err != nil {
log.Tracef("could not get absolut parent process path: %s", err)
return
}
// // absPath, err := filepath.Abs(parentPath)
// // if err != nil {
// // log.Tracef("could not get absolut parent process path: %s", err)
// // return
// // }
root := filepath.Dir(registry.StorageDir().Path)
if !strings.HasPrefix(absPath, root) {
log.Warningf("detected unexpected path %s for portmaster-start", absPath)
notifications.NotifyWarn(
"updates:unsupported-parent",
"Unsupported Launcher",
fmt.Sprintf(
"The Portmaster has been launched by an unexpected %s binary at %s. Please configure your system to use the binary at %s as this version will be kept up to date automatically.",
expectedFileName,
absPath,
filepath.Join(root, expectedFileName),
),
)
}
}
// // root := filepath.Dir(registry.StorageDir().Path)
// // if !strings.HasPrefix(absPath, root) {
// // log.Warningf("detected unexpected path %s for portmaster-start", absPath)
// // notifications.NotifyWarn(
// // "updates:unsupported-parent",
// // "Unsupported Launcher",
// // fmt.Sprintf(
// // "The Portmaster has been launched by an unexpected %s binary at %s. Please configure your system to use the binary at %s as this version will be kept up to date automatically.",
// // expectedFileName,
// // absPath,
// // filepath.Join(root, expectedFileName),
// // ),
// // )
// // }
// }
func upgradeBinary(fileToUpgrade string, file *updater.File) error {
fileExists := false
_, err := os.Stat(fileToUpgrade)
if err == nil {
// file exists and is accessible
fileExists = true
}
// func upgradeBinary(fileToUpgrade string, file *updater.File) error {
// fileExists := false
// _, err := os.Stat(fileToUpgrade)
// if err == nil {
// // file exists and is accessible
// fileExists = true
// }
if fileExists {
// get current version
var currentVersion string
cmd := exec.Command(fileToUpgrade, "version", "--short")
out, err := cmd.Output()
if err == nil {
// abort if version matches
currentVersion = strings.Trim(strings.TrimSpace(string(out)), "*")
if currentVersion == file.Version() {
log.Debugf("updates: %s is already v%s", fileToUpgrade, file.Version())
// already up to date!
return nil
}
} else {
log.Warningf("updates: failed to run %s to get version for upgrade check: %s", fileToUpgrade, err)
currentVersion = "0.0.0"
}
// if fileExists {
// // get current version
// var currentVersion string
// cmd := exec.Command(fileToUpgrade, "version", "--short")
// out, err := cmd.Output()
// if err == nil {
// // abort if version matches
// currentVersion = strings.Trim(strings.TrimSpace(string(out)), "*")
// if currentVersion == file.Version() {
// log.Debugf("updates: %s is already v%s", fileToUpgrade, file.Version())
// // already up to date!
// return nil
// }
// } else {
// log.Warningf("updates: failed to run %s to get version for upgrade check: %s", fileToUpgrade, err)
// currentVersion = "0.0.0"
// }
// test currentVersion for sanity
if !rawVersionRegex.MatchString(currentVersion) {
log.Debugf("updates: version string returned by %s is invalid: %s", fileToUpgrade, currentVersion)
}
// // test currentVersion for sanity
// if !rawVersionRegex.MatchString(currentVersion) {
// log.Debugf("updates: version string returned by %s is invalid: %s", fileToUpgrade, currentVersion)
// }
// try removing old version
err = os.Remove(fileToUpgrade)
if err != nil {
// ensure tmp dir is here
err = registry.TmpDir().Ensure()
if err != nil {
return fmt.Errorf("could not prepare tmp directory for moving file that needs upgrade: %w", err)
}
// // try removing old version
// err = os.Remove(fileToUpgrade)
// if err != nil {
// // ensure tmp dir is here
// // err = registry.TmpDir().Ensure()
// // if err != nil {
// // return fmt.Errorf("could not prepare tmp directory for moving file that needs upgrade: %w", err)
// // }
// maybe we're on windows and it's in use, try moving
err = os.Rename(fileToUpgrade, filepath.Join(
registry.TmpDir().Path,
fmt.Sprintf(
"%s-%d%s",
filepath.Base(fileToUpgrade),
time.Now().UTC().Unix(),
upgradedSuffix,
),
))
if err != nil {
return fmt.Errorf("unable to move file that needs upgrade: %w", err)
}
}
}
// // maybe we're on windows and it's in use, try moving
// // err = os.Rename(fileToUpgrade, filepath.Join(
// // registry.TmpDir().Path,
// // fmt.Sprintf(
// // "%s-%d%s",
// // filepath.Base(fileToUpgrade),
// // time.Now().UTC().Unix(),
// // upgradedSuffix,
// // ),
// // ))
// // if err != nil {
// // return fmt.Errorf("unable to move file that needs upgrade: %w", err)
// // }
// }
// }
// copy upgrade
err = CopyFile(file.Path(), fileToUpgrade)
if err != nil {
// try again
time.Sleep(1 * time.Second)
err = CopyFile(file.Path(), fileToUpgrade)
if err != nil {
return err
}
}
// // copy upgrade
// err = CopyFile(file.Path(), fileToUpgrade)
// if err != nil {
// // try again
// time.Sleep(1 * time.Second)
// err = CopyFile(file.Path(), fileToUpgrade)
// if err != nil {
// return err
// }
// }
// check permissions
if !onWindows {
info, err := os.Stat(fileToUpgrade)
if err != nil {
return fmt.Errorf("failed to get file info on %s: %w", fileToUpgrade, err)
}
if info.Mode() != 0o0755 {
err := os.Chmod(fileToUpgrade, 0o0755) //nolint:gosec // Set execute permissions.
if err != nil {
return fmt.Errorf("failed to set permissions on %s: %w", fileToUpgrade, err)
}
}
}
// // check permissions
// if !onWindows {
// info, err := os.Stat(fileToUpgrade)
// if err != nil {
// return fmt.Errorf("failed to get file info on %s: %w", fileToUpgrade, err)
// }
// if info.Mode() != 0o0755 {
// err := os.Chmod(fileToUpgrade, 0o0755) //nolint:gosec // Set execute permissions.
// if err != nil {
// return fmt.Errorf("failed to set permissions on %s: %w", fileToUpgrade, err)
// }
// }
// }
log.Infof("updates: upgraded %s to v%s", fileToUpgrade, file.Version())
return nil
}
// log.Infof("updates: upgraded %s to v%s", fileToUpgrade, file.Version())
// return nil
// }
// CopyFile atomically copies a file using the update registry's tmp dir.
func CopyFile(srcPath, dstPath string) error {
// check tmp dir
err := registry.TmpDir().Ensure()
if err != nil {
return fmt.Errorf("could not prepare tmp directory for copying file: %w", err)
}
// // CopyFile atomically copies a file using the update registry's tmp dir.
// func CopyFile(srcPath, dstPath string) error {
// // check tmp dir
// // err := registry.TmpDir().Ensure()
// // if err != nil {
// // return fmt.Errorf("could not prepare tmp directory for copying file: %w", err)
// // }
// open file for writing
atomicDstFile, err := renameio.TempFile(registry.TmpDir().Path, dstPath)
if err != nil {
return fmt.Errorf("could not create temp file for atomic copy: %w", err)
}
defer atomicDstFile.Cleanup() //nolint:errcheck // ignore error for now, tmp dir will be cleaned later again anyway
// // open file for writing
// // atomicDstFile, err := renameio.TempFile(registry.TmpDir().Path, dstPath)
// // if err != nil {
// // return fmt.Errorf("could not create temp file for atomic copy: %w", err)
// // }
// // defer atomicDstFile.Cleanup() //nolint:errcheck // ignore error for now, tmp dir will be cleaned later again anyway
// open source
srcFile, err := os.Open(srcPath)
if err != nil {
return err
}
defer func() {
_ = srcFile.Close()
}()
// // // open source
// // srcFile, err := os.Open(srcPath)
// // if err != nil {
// // return err
// // }
// // defer func() {
// // _ = srcFile.Close()
// // }()
// copy data
_, err = io.Copy(atomicDstFile, srcFile)
if err != nil {
return err
}
// // // copy data
// // _, err = io.Copy(atomicDstFile, srcFile)
// // if err != nil {
// // return err
// // }
// finalize file
err = atomicDstFile.CloseAtomicallyReplace()
if err != nil {
return fmt.Errorf("updates: failed to finalize copy to file %s: %w", dstPath, err)
}
// // // finalize file
// // err = atomicDstFile.CloseAtomicallyReplace()
// // if err != nil {
// // return fmt.Errorf("updates: failed to finalize copy to file %s: %w", dstPath, err)
// // }
return nil
}
// return nil
// }

View file

@ -6,9 +6,8 @@ import (
"os"
"sync"
"github.com/safing/portmaster/base/updater"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates"
"github.com/safing/portmaster/service/updates/registry"
"github.com/safing/portmaster/spn/conf"
"github.com/safing/portmaster/spn/hub"
"github.com/safing/portmaster/spn/navigator"
@ -16,7 +15,7 @@ import (
)
var (
intelResource *updater.File
intelResource *registry.File
intelResourcePath = "intel/spn/main-intel.yaml"
intelResourceMapName = "main"
intelResourceUpdateLock sync.Mutex
@ -44,12 +43,13 @@ func updateSPNIntel(_ context.Context, _ interface{}) (err error) {
}
// Check if there is something to do.
if intelResource != nil && !intelResource.UpgradeAvailable() {
// TODO(vladimir): is update check needed
if intelResource != nil { //&& !intelResource.UpgradeAvailable() {
return nil
}
// Get intel file and load it from disk.
intelResource, err = updates.GetFile(intelResourcePath)
intelResource, err = module.instance.Updates().GetFile(intelResourcePath)
if err != nil {
return fmt.Errorf("failed to get SPN intel update: %w", err)
}