feat: Implement unified update detection system (Phase 1)

Docker container image update detection with full stack implementation:

Backend:
- Add internal/updatedetection package with types, store, registry checker, manager
- Add registry checking to Docker agent (internal/dockeragent/registry.go)
- Add ImageDigest and UpdateStatus fields to container reports
- Add /api/infra-updates API endpoints for querying updates
- Integrate with alert system - fires after 24h of pending updates

Frontend:
- Add UpdateBadge and UpdateIcon components for update indicators
- Add updateStatus to DockerContainer TypeScript interface
- Display blue update badges in Docker unified table image column
- Add 'has:update' search filter support

Features:
- Registry digest comparison for Docker Hub, GHCR, private registries
- Auth token handling for Docker Hub public images
- Caching with 6h TTL (15min for errors)
- Configurable alert delay via UpdateAlertDelayHours (default: 24h)
- Alert metadata includes digests, pending time, image info
This commit is contained in:
rcourtman 2025-12-27 17:58:38 +00:00
parent 39941a3927
commit b50872b686
17 changed files with 2407 additions and 19 deletions

View file

@ -28,6 +28,7 @@ import { ResponsiveMetricCell } from '@/components/shared/responsive';
import { useBreakpoint } from '@/hooks/useBreakpoint';
import { StackedMemoryBar } from '@/components/Dashboard/StackedMemoryBar';
import { UrlEditPopover } from '@/components/shared/UrlEditPopover';
import { UpdateIcon } from '@/components/Docker/UpdateBadge';
import type { ColumnConfig } from '@/types/responsive';
const typeBadgeClass = (type: 'container' | 'service' | 'task' | 'unknown') => {
@ -611,6 +612,11 @@ const containerMatchesToken = (
return state.includes(token.value) || health.includes(token.value);
}
// Special filter for containers with updates available
if (token.key === 'has' && token.value === 'update') {
return container.updateStatus?.updateAvailable === true;
}
const fields: string[] = [
container.name,
container.id,
@ -1291,12 +1297,15 @@ const DockerContainerRow: Component<{
class="px-2 py-0.5 text-xs text-gray-700 dark:text-gray-300 overflow-hidden"
style={{ "max-width": "200px" }}
>
<span
class="block truncate"
title={container.image || undefined}
>
{container.image || '—'}
</span>
<div class="flex items-center gap-1.5 min-w-0">
<span
class="truncate"
title={container.image || undefined}
>
{container.image || '—'}
</span>
<UpdateIcon updateStatus={container.updateStatus} />
</div>
</div>
);
case 'status':

View file

@ -0,0 +1,75 @@
import { Component, Show } from 'solid-js';
import type { DockerContainerUpdateStatus } from '@/types/api';
interface UpdateBadgeProps {
updateStatus?: DockerContainerUpdateStatus;
compact?: boolean;
}
/**
* UpdateBadge displays a visual indicator when a container image has an update available.
* Uses a blue color scheme to differentiate from health/status badges.
*/
export const UpdateBadge: Component<UpdateBadgeProps> = (props) => {
const hasUpdate = () => props.updateStatus?.updateAvailable === true;
const hasError = () => Boolean(props.updateStatus?.error);
return (
<Show when={hasUpdate() || hasError()}>
<Show
when={hasUpdate()}
fallback={
// Show subtle error indicator if check failed
<Show when={hasError()}>
<span
class="inline-flex items-center gap-1 rounded-full px-1.5 py-0.5 text-xs font-medium bg-gray-100 text-gray-500 dark:bg-gray-800 dark:text-gray-400"
title={`Update check failed: ${props.updateStatus?.error}`}
>
<svg class="w-3 h-3" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2">
<path stroke-linecap="round" stroke-linejoin="round" d="M12 8v4m0 4h.01M21 12a9 9 0 11-18 0 9 9 0 0118 0z" />
</svg>
<Show when={!props.compact}>
<span>Check failed</span>
</Show>
</span>
</Show>
}
>
<span
class="inline-flex items-center gap-1 rounded-full px-1.5 py-0.5 text-xs font-medium bg-blue-100 text-blue-700 dark:bg-blue-900/40 dark:text-blue-300"
title={`Image update available. Current: ${props.updateStatus?.currentDigest?.slice(0, 19) || 'unknown'}... Latest: ${props.updateStatus?.latestDigest?.slice(0, 19) || 'unknown'}...`}
>
<svg class="w-3 h-3" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2">
<path stroke-linecap="round" stroke-linejoin="round" d="M4 16v1a3 3 0 003 3h10a3 3 0 003-3v-1m-4-8l-4-4m0 0L8 8m4-4v12" />
</svg>
<Show when={!props.compact}>
<span>Update</span>
</Show>
</span>
</Show>
</Show>
);
};
/**
* Compact version of UpdateBadge - just an icon with no text.
* Use this in table cells where space is limited.
*/
export const UpdateIcon: Component<{ updateStatus?: DockerContainerUpdateStatus }> = (props) => {
const hasUpdate = () => props.updateStatus?.updateAvailable === true;
return (
<Show when={hasUpdate()}>
<span
class="inline-flex items-center justify-center w-5 h-5 rounded-full bg-blue-100 text-blue-700 dark:bg-blue-900/40 dark:text-blue-300"
title={`Image update available`}
>
<svg class="w-3 h-3" fill="none" viewBox="0 0 24 24" stroke="currentColor" stroke-width="2">
<path stroke-linecap="round" stroke-linejoin="round" d="M4 16v1a3 3 0 003 3h10a3 3 0 003-3v-1m-4-8l-4-4m0 0L8 8m4-4v12" />
</svg>
</span>
</Show>
);
};
export default UpdateBadge;

View file

@ -347,6 +347,7 @@ export interface DockerContainer {
id: string;
name: string;
image: string;
imageDigest?: string; // Current image digest (sha256:...)
state: string;
status: string;
health?: string;
@ -369,6 +370,16 @@ export interface DockerContainer {
blockIo?: DockerContainerBlockIO;
mounts?: DockerContainerMount[];
podman?: PodmanContainerMetadata;
updateStatus?: DockerContainerUpdateStatus; // Image update detection status
}
// Update status for container images
export interface DockerContainerUpdateStatus {
updateAvailable: boolean;
currentDigest?: string;
latestDigest?: string;
lastChecked: number;
error?: string;
}
export interface DockerContainerPort {

View file

@ -315,6 +315,7 @@ type DockerThresholdConfig struct {
ServiceCritGapPct int `json:"serviceCriticalGapPercent"` // % of desired tasks missing to trigger critical (default: 50)
StateDisableConnectivity bool `json:"stateDisableConnectivity,omitempty"` // Disable container offline/state alerts globally
StatePoweredOffSeverity AlertLevel `json:"statePoweredOffSeverity,omitempty"` // Default severity for container state/offline alerts
UpdateAlertDelayHours int `json:"updateAlertDelayHours,omitempty"` // Hours to wait before alerting on available image updates (default: 24, 0 = disabled)
}
// PMGThresholdConfig represents Proxmox Mail Gateway-specific alert thresholds
@ -511,8 +512,9 @@ type Manager struct {
offlineConfirmations map[string]int // Track consecutive offline counts for all resources
dockerOfflineCount map[string]int // Track consecutive offline counts for Docker hosts
dockerStateConfirm map[string]int // Track consecutive state confirmations for Docker containers
dockerRestartTracking map[string]*dockerRestartRecord // Track restart counts and times for restart loop detection
dockerLastExitCode map[string]int // Track last exit code for OOM detection
dockerRestartTracking map[string]*dockerRestartRecord // Track restart counts and times for restart loop detection
dockerLastExitCode map[string]int // Track last exit code for OOM detection
dockerUpdateFirstSeen map[string]time.Time // Track when image updates were first detected for alert delay
// PMG quarantine growth tracking
pmgQuarantineHistory map[string][]pmgQuarantineSnapshot // Track quarantine snapshots for growth detection
// PMG anomaly detection tracking
@ -550,17 +552,18 @@ func NewManager() *Manager {
activeAlerts: make(map[string]*Alert),
historyManager: NewHistoryManager(alertsDir),
escalationStop: make(chan struct{}),
alertRateLimit: make(map[string][]time.Time),
recentAlerts: make(map[string]*Alert),
suppressedUntil: make(map[string]time.Time),
recentlyResolved: make(map[string]*ResolvedAlert),
pendingAlerts: make(map[string]time.Time),
nodeOfflineCount: make(map[string]int),
offlineConfirmations: make(map[string]int),
dockerOfflineCount: make(map[string]int),
dockerStateConfirm: make(map[string]int),
dockerRestartTracking: make(map[string]*dockerRestartRecord),
dockerLastExitCode: make(map[string]int),
alertRateLimit: make(map[string][]time.Time),
recentAlerts: make(map[string]*Alert),
suppressedUntil: make(map[string]time.Time),
recentlyResolved: make(map[string]*ResolvedAlert),
pendingAlerts: make(map[string]time.Time),
nodeOfflineCount: make(map[string]int),
offlineConfirmations: make(map[string]int),
dockerOfflineCount: make(map[string]int),
dockerStateConfirm: make(map[string]int),
dockerRestartTracking: make(map[string]*dockerRestartRecord),
dockerLastExitCode: make(map[string]int),
dockerUpdateFirstSeen: make(map[string]time.Time),
pmgQuarantineHistory: make(map[string][]pmgQuarantineSnapshot),
pmgAnomalyTrackers: make(map[string]*pmgAnomalyTracker),
ackState: make(map[string]ackRecord),
@ -1154,6 +1157,10 @@ func normalizeDockerDefaults(config *AlertConfig) {
config.DockerDefaults.StatePoweredOffSeverity = AlertLevelWarning
}
config.DockerDefaults.StatePoweredOffSeverity = normalizePoweredOffSeverity(config.DockerDefaults.StatePoweredOffSeverity)
// Default to 24 hours delay for update alerts; set to -1 to explicitly disable
if config.DockerDefaults.UpdateAlertDelayHours == 0 {
config.DockerDefaults.UpdateAlertDelayHours = 24
}
}
// normalizePMGDefaults ensures PMG (Proxmox Mail Gateway) defaults are set
@ -3617,6 +3624,7 @@ func (m *Manager) evaluateDockerContainer(host models.DockerHost, container mode
m.checkDockerContainerRestartLoop(host, container, resourceID, containerName, instanceName, nodeName)
m.checkDockerContainerOOMKill(host, container, resourceID, containerName, instanceName, nodeName)
m.checkDockerContainerMemoryLimit(host, container, resourceID, containerName, instanceName, nodeName)
m.checkDockerContainerImageUpdate(host, container, resourceID, containerName, instanceName, nodeName)
}
func (m *Manager) evaluateDockerService(host models.DockerHost, service models.DockerService, resourceID string) {
@ -4337,6 +4345,131 @@ func (m *Manager) clearDockerContainerMetricAlerts(resourceID string, metrics ..
}
}
// checkDockerContainerImageUpdate checks if an image update has been pending for too long
func (m *Manager) checkDockerContainerImageUpdate(host models.DockerHost, container models.DockerContainer, resourceID, containerName, instanceName, nodeName string) {
alertID := fmt.Sprintf("docker-container-update-%s", resourceID)
// Check if update detection is enabled
m.mu.RLock()
delayHours := m.config.DockerDefaults.UpdateAlertDelayHours
m.mu.RUnlock()
// Negative value means disabled
if delayHours < 0 {
m.clearAlert(alertID)
m.mu.Lock()
delete(m.dockerUpdateFirstSeen, resourceID)
m.mu.Unlock()
return
}
// Check if this container has an update status reported
if container.UpdateStatus == nil {
// No update status - clear any tracking and alerts
m.clearAlert(alertID)
m.mu.Lock()
delete(m.dockerUpdateFirstSeen, resourceID)
m.mu.Unlock()
return
}
// Check for errors in update detection (don't alert on errors)
if container.UpdateStatus.Error != "" {
// Update check failed - clear alert but keep tracking
m.clearAlert(alertID)
return
}
// Check if an update is available
if !container.UpdateStatus.UpdateAvailable {
// No update available - clear tracking and alert
m.clearAlert(alertID)
m.mu.Lock()
delete(m.dockerUpdateFirstSeen, resourceID)
m.mu.Unlock()
return
}
// Update is available - track when we first saw it
m.mu.Lock()
firstSeen, exists := m.dockerUpdateFirstSeen[resourceID]
if !exists {
firstSeen = time.Now()
m.dockerUpdateFirstSeen[resourceID] = firstSeen
}
m.mu.Unlock()
// Check if we've exceeded the delay threshold
pendingDuration := time.Since(firstSeen)
threshold := time.Duration(delayHours) * time.Hour
if pendingDuration < threshold {
// Not yet time to alert
log.Debug().
Str("container", containerName).
Str("host", host.DisplayName).
Str("image", container.Image).
Dur("pending", pendingDuration).
Dur("threshold", threshold).
Msg("Container update pending but below alert threshold")
return
}
// Create or update the alert
pendingHours := int(pendingDuration.Hours())
message := fmt.Sprintf("Docker container '%s' has an image update available for %d hours", containerName, pendingHours)
alert := &Alert{
ID: alertID,
Type: "docker-container-update",
Level: AlertLevelWarning,
ResourceID: resourceID,
ResourceName: containerName,
Node: nodeName,
Instance: instanceName,
Message: message,
StartTime: firstSeen,
LastSeen: time.Now(),
Metadata: map[string]interface{}{
"resourceType": "Docker Container",
"hostId": host.ID,
"hostName": host.DisplayName,
"hostHostname": host.Hostname,
"containerId": container.ID,
"containerName": containerName,
"image": container.Image,
"currentDigest": container.UpdateStatus.CurrentDigest,
"latestDigest": container.UpdateStatus.LatestDigest,
"lastChecked": container.UpdateStatus.LastChecked,
"firstSeen": firstSeen,
"pendingHours": pendingHours,
"thresholdHours": delayHours,
},
}
m.mu.Lock()
if existing, ok := m.activeAlerts[alertID]; ok && existing != nil {
// Update existing alert
existing.LastSeen = time.Now()
existing.Message = message
existing.Metadata = alert.Metadata
m.mu.Unlock()
return
}
m.preserveAlertState(alertID, alert)
m.activeAlerts[alertID] = alert
m.recentAlerts[alertID] = alert
m.historyManager.AddAlert(*alert)
m.dispatchAlert(alert, false)
m.mu.Unlock()
log.Warn().
Str("container", containerName).
Str("host", host.DisplayName).
Str("image", container.Image).
Int("pendingHours", pendingHours).
Msg("Docker container has pending image update")
}
func (m *Manager) cleanupDockerContainerAlerts(host models.DockerHost, seen map[string]struct{}) {
prefix := fmt.Sprintf("docker:%s/", strings.TrimSpace(host.ID))

View file

@ -0,0 +1,204 @@
package api
import (
"encoding/json"
"net/http"
"github.com/rcourtman/pulse-go-rewrite/internal/updatedetection"
"github.com/rcourtman/pulse-go-rewrite/internal/utils"
"github.com/rs/zerolog/log"
)
// UpdateDetectionHandlers manages API endpoints for infrastructure update detection.
// This is separate from UpdateHandlers which handles Pulse self-updates.
type UpdateDetectionHandlers struct {
manager *updatedetection.Manager
}
// NewUpdateDetectionHandlers creates a new update detection handlers group.
func NewUpdateDetectionHandlers(manager *updatedetection.Manager) *UpdateDetectionHandlers {
return &UpdateDetectionHandlers{manager: manager}
}
// HandleGetInfraUpdates returns all tracked infrastructure updates with optional filtering.
// GET /api/infra-updates
//
// ?hostId=<id> Filter by host
// ?resourceType=docker Filter by type
// ?severity=security Filter by severity
func (h *UpdateDetectionHandlers) HandleGetInfraUpdates(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
writeErrorResponse(w, http.StatusMethodNotAllowed, "method_not_allowed", "Only GET is allowed", nil)
return
}
if h.manager == nil {
if err := utils.WriteJSONResponse(w, map[string]any{
"updates": []any{},
"total": 0,
}); err != nil {
log.Error().Err(err).Msg("Failed to serialize empty updates response")
}
return
}
// Parse query filters
query := r.URL.Query()
filters := updatedetection.UpdateFilters{
HostID: query.Get("hostId"),
ResourceType: query.Get("resourceType"),
}
if severity := query.Get("severity"); severity != "" {
filters.Severity = updatedetection.UpdateSeverity(severity)
}
if updateType := query.Get("type"); updateType != "" {
filters.UpdateType = updatedetection.UpdateType(updateType)
}
updates := h.manager.GetUpdates(filters)
response := map[string]any{
"updates": updates,
"total": len(updates),
}
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to serialize updates response")
}
}
// HandleGetInfraUpdateForResource returns the update status for a specific resource.
// GET /api/infra-updates/{resourceId}
func (h *UpdateDetectionHandlers) HandleGetInfraUpdateForResource(w http.ResponseWriter, r *http.Request, resourceID string) {
if r.Method != http.MethodGet {
writeErrorResponse(w, http.StatusMethodNotAllowed, "method_not_allowed", "Only GET is allowed", nil)
return
}
if h.manager == nil {
writeErrorResponse(w, http.StatusNotFound, "not_found", "No update found for resource", nil)
return
}
update := h.manager.GetUpdatesForResource(resourceID)
if update == nil {
writeErrorResponse(w, http.StatusNotFound, "not_found", "No update found for resource", nil)
return
}
if err := utils.WriteJSONResponse(w, update); err != nil {
log.Error().Err(err).Msg("Failed to serialize update response")
}
}
// HandleGetInfraUpdatesSummary returns aggregated update statistics per host.
// GET /api/infra-updates/summary
func (h *UpdateDetectionHandlers) HandleGetInfraUpdatesSummary(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
writeErrorResponse(w, http.StatusMethodNotAllowed, "method_not_allowed", "Only GET is allowed", nil)
return
}
if h.manager == nil {
if err := utils.WriteJSONResponse(w, map[string]any{
"summaries": map[string]any{},
"totalUpdates": 0,
}); err != nil {
log.Error().Err(err).Msg("Failed to serialize empty summary response")
}
return
}
summaries := h.manager.GetSummary()
totalUpdates := h.manager.GetTotalCount()
response := map[string]any{
"summaries": summaries,
"totalUpdates": totalUpdates,
}
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to serialize summary response")
}
}
// HandleTriggerInfraUpdateCheck triggers an update check for a specific resource or host.
// POST /api/infra-updates/check
//
// { "hostId": "xxx" } Check all on host
// { "resourceId": "xxx" } Check specific resource
func (h *UpdateDetectionHandlers) HandleTriggerInfraUpdateCheck(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
writeErrorResponse(w, http.StatusMethodNotAllowed, "method_not_allowed", "Only POST is allowed", nil)
return
}
if h.manager == nil {
writeErrorResponse(w, http.StatusServiceUnavailable, "service_unavailable", "Update detection not available", nil)
return
}
// Limit request body
r.Body = http.MaxBytesReader(w, r.Body, 8*1024)
defer r.Body.Close()
var req struct {
HostID string `json:"hostId"`
ResourceID string `json:"resourceId"`
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
writeErrorResponse(w, http.StatusBadRequest, "invalid_json", "Failed to decode request body", map[string]string{"error": err.Error()})
return
}
// For now, return a placeholder response - the actual check will be performed
// by agents on their next cycle or when we add server-side registry checking
response := map[string]any{
"success": true,
"message": "Update check queued",
}
if req.HostID != "" {
response["hostId"] = req.HostID
}
if req.ResourceID != "" {
response["resourceId"] = req.ResourceID
}
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to serialize check response")
}
}
// HandleGetInfraUpdatesForHost returns all updates for a specific host.
// GET /api/infra-updates/host/{hostId}
func (h *UpdateDetectionHandlers) HandleGetInfraUpdatesForHost(w http.ResponseWriter, r *http.Request, hostID string) {
if r.Method != http.MethodGet {
writeErrorResponse(w, http.StatusMethodNotAllowed, "method_not_allowed", "Only GET is allowed", nil)
return
}
if h.manager == nil {
if err := utils.WriteJSONResponse(w, map[string]any{
"updates": []any{},
"total": 0,
"hostId": hostID,
}); err != nil {
log.Error().Err(err).Msg("Failed to serialize empty host updates response")
}
return
}
updates := h.manager.GetUpdatesForHost(hostID)
response := map[string]any{
"updates": updates,
"total": len(updates),
"hostId": hostID,
}
if err := utils.WriteJSONResponse(w, response); err != nil {
log.Error().Err(err).Msg("Failed to serialize host updates response")
}
}

View file

@ -105,6 +105,7 @@ type Agent struct {
prevContainerCPU map[string]cpuSample
preCPUStatsFailures int
reportBuffer *buffer.Queue[agentsdocker.Report]
registryChecker *RegistryChecker // For checking container image updates
}
// ErrStopRequested indicates the agent should terminate gracefully after acknowledging a stop command.
@ -257,6 +258,7 @@ func New(cfg Config) (*Agent, error) {
stateFilters: stateFilters,
prevContainerCPU: make(map[string]cpuSample),
reportBuffer: buffer.New[agentsdocker.Report](bufferCapacity),
registryChecker: NewRegistryChecker(*logger),
}
for _, state := range stateFilters {
@ -929,6 +931,7 @@ func (a *Agent) collectContainer(ctx context.Context, summary containertypes.Sum
ID: summary.ID,
Name: trimLeadingSlash(summary.Names),
Image: summary.Image,
ImageDigest: summary.ImageID, // sha256:... digest of the image
CreatedAt: createdAt,
State: summary.State,
Status: summary.Status,
@ -958,6 +961,23 @@ func (a *Agent) collectContainer(ctx context.Context, summary containertypes.Sum
}
}
// Check for image updates if registry checker is enabled
if a.registryChecker != nil && a.registryChecker.Enabled() {
// Use the container's current image digest for comparison
// The ImageDigest from summary.ImageID is the local image ID, which we use
// to compare with the registry's latest manifest digest
result := a.registryChecker.CheckImageUpdate(ctx, container.Image, container.ImageDigest)
if result != nil {
container.UpdateStatus = &agentsdocker.UpdateStatus{
UpdateAvailable: result.UpdateAvailable,
CurrentDigest: result.CurrentDigest,
LatestDigest: result.LatestDigest,
LastChecked: result.CheckedAt,
Error: result.Error,
}
}
}
if requestSize {
a.logger.Debug().
Str("container", container.Name).

View file

@ -0,0 +1,402 @@
package dockeragent
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
"regexp"
"strings"
"sync"
"time"
"github.com/rs/zerolog"
)
// RegistryChecker handles container image digest lookups against registries.
type RegistryChecker struct {
httpClient *http.Client
cache *digestCache
logger zerolog.Logger
mu sync.RWMutex
// Configuration
enabled bool
checkInterval time.Duration
lastFullCheck time.Time
}
// digestCache provides thread-safe caching of digest lookups.
type digestCache struct {
entries map[string]cacheEntry
mu sync.RWMutex
}
type cacheEntry struct {
latestDigest string
expiresAt time.Time
err string // cached error message
}
const (
// DefaultCacheTTL is the default time-to-live for cached digests.
defaultCacheTTL = 6 * time.Hour
// ErrorCacheTTL is the TTL for caching errors (shorter to allow retry).
errorCacheTTL = 15 * time.Minute
// DefaultCheckInterval is how often to check for updates.
defaultCheckInterval = 6 * time.Hour
)
// ImageUpdateResult contains the result of an image update check.
type ImageUpdateResult struct {
Image string `json:"image"`
CurrentDigest string `json:"currentDigest"`
LatestDigest string `json:"latestDigest"`
UpdateAvailable bool `json:"updateAvailable"`
CheckedAt time.Time `json:"checkedAt"`
Error string `json:"error,omitempty"`
}
// NewRegistryChecker creates a new registry checker for the Docker agent.
func NewRegistryChecker(logger zerolog.Logger) *RegistryChecker {
return &RegistryChecker{
httpClient: &http.Client{
Timeout: 30 * time.Second,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
MinVersion: tls.VersionTLS12,
},
MaxIdleConns: 10,
IdleConnTimeout: 90 * time.Second,
DisableCompression: false,
DisableKeepAlives: false,
},
},
cache: &digestCache{
entries: make(map[string]cacheEntry),
},
logger: logger,
enabled: true,
checkInterval: defaultCheckInterval,
}
}
// SetEnabled enables or disables update checking.
func (r *RegistryChecker) SetEnabled(enabled bool) {
r.mu.Lock()
defer r.mu.Unlock()
r.enabled = enabled
}
// Enabled returns whether update checking is enabled.
func (r *RegistryChecker) Enabled() bool {
r.mu.RLock()
defer r.mu.RUnlock()
return r.enabled
}
// ShouldCheck returns true if enough time has passed since the last full check.
func (r *RegistryChecker) ShouldCheck() bool {
r.mu.RLock()
defer r.mu.RUnlock()
if !r.enabled {
return false
}
return time.Since(r.lastFullCheck) >= r.checkInterval
}
// MarkChecked updates the last check timestamp.
func (r *RegistryChecker) MarkChecked() {
r.mu.Lock()
defer r.mu.Unlock()
r.lastFullCheck = time.Now()
}
// CheckImageUpdate checks if a newer version of the image is available.
func (r *RegistryChecker) CheckImageUpdate(ctx context.Context, image, currentDigest string) *ImageUpdateResult {
if !r.Enabled() {
return nil
}
registry, repository, tag := parseImageReference(image)
// Skip digest-pinned images (image@sha256:...)
if registry == "" {
return &ImageUpdateResult{
Image: image,
CurrentDigest: currentDigest,
UpdateAvailable: false,
CheckedAt: time.Now(),
Error: "digest-pinned image",
}
}
// Check cache first
cacheKey := fmt.Sprintf("%s/%s:%s", registry, repository, tag)
if cached := r.getCached(cacheKey); cached != nil {
if cached.err != "" {
return &ImageUpdateResult{
Image: image,
CurrentDigest: currentDigest,
UpdateAvailable: false,
CheckedAt: time.Now(),
Error: cached.err,
}
}
return &ImageUpdateResult{
Image: image,
CurrentDigest: currentDigest,
LatestDigest: cached.latestDigest,
UpdateAvailable: r.digestsDiffer(currentDigest, cached.latestDigest),
CheckedAt: time.Now(),
}
}
// Fetch latest digest from registry
latestDigest, err := r.fetchDigest(ctx, registry, repository, tag)
if err != nil {
// Cache the error to avoid hammering the registry
r.cacheError(cacheKey, err.Error())
r.logger.Debug().
Str("image", image).
Str("registry", registry).
Err(err).
Msg("Failed to fetch image digest from registry")
return &ImageUpdateResult{
Image: image,
CurrentDigest: currentDigest,
UpdateAvailable: false,
CheckedAt: time.Now(),
Error: err.Error(),
}
}
// Cache the successful result
r.cacheDigest(cacheKey, latestDigest)
return &ImageUpdateResult{
Image: image,
CurrentDigest: currentDigest,
LatestDigest: latestDigest,
UpdateAvailable: r.digestsDiffer(currentDigest, latestDigest),
CheckedAt: time.Now(),
}
}
// digestsDiffer compares two digests, handling format differences.
func (r *RegistryChecker) digestsDiffer(current, latest string) bool {
if current == "" || latest == "" {
return false
}
// Normalize digests - remove "sha256:" prefix for comparison if present in only one
normCurrent := strings.TrimPrefix(current, "sha256:")
normLatest := strings.TrimPrefix(latest, "sha256:")
return normCurrent != normLatest
}
// fetchDigest retrieves the digest for an image from the registry.
func (r *RegistryChecker) fetchDigest(ctx context.Context, registry, repository, tag string) (string, error) {
// Get auth token if needed
token, err := r.getAuthToken(ctx, registry, repository)
if err != nil {
return "", fmt.Errorf("auth: %w", err)
}
// Construct the manifest URL
manifestURL := fmt.Sprintf("https://%s/v2/%s/manifests/%s", registry, repository, tag)
req, err := http.NewRequestWithContext(ctx, http.MethodHead, manifestURL, nil)
if err != nil {
return "", fmt.Errorf("create request: %w", err)
}
// Accept headers for multi-arch manifest support
req.Header.Set("Accept", strings.Join([]string{
"application/vnd.docker.distribution.manifest.list.v2+json",
"application/vnd.docker.distribution.manifest.v2+json",
"application/vnd.oci.image.manifest.v1+json",
"application/vnd.oci.image.index.v1+json",
}, ", "))
if token != "" {
req.Header.Set("Authorization", "Bearer "+token)
}
resp, err := r.httpClient.Do(req)
if err != nil {
return "", fmt.Errorf("request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusUnauthorized {
return "", fmt.Errorf("authentication required")
}
if resp.StatusCode == http.StatusNotFound {
return "", fmt.Errorf("image not found")
}
if resp.StatusCode == http.StatusTooManyRequests {
return "", fmt.Errorf("rate limited")
}
if resp.StatusCode >= 400 {
return "", fmt.Errorf("registry error: %d", resp.StatusCode)
}
// Get digest from Docker-Content-Digest header
digest := resp.Header.Get("Docker-Content-Digest")
if digest == "" {
// Some registries don't return digest on HEAD, try etag
digest = resp.Header.Get("Etag")
if digest != "" {
// Clean up etag format
digest = strings.Trim(digest, "\"")
}
}
if digest == "" {
return "", fmt.Errorf("no digest in response")
}
return digest, nil
}
// getAuthToken retrieves an auth token for the registry.
func (r *RegistryChecker) getAuthToken(ctx context.Context, registry, repository string) (string, error) {
// Docker Hub requires auth token even for public images
if registry == "registry-1.docker.io" {
tokenURL := fmt.Sprintf("https://auth.docker.io/token?service=registry.docker.io&scope=repository:%s:pull", repository)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, tokenURL, nil)
if err != nil {
return "", err
}
resp, err := r.httpClient.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("token request failed: %d", resp.StatusCode)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
var tokenResp struct {
Token string `json:"token"`
}
if err := json.Unmarshal(body, &tokenResp); err != nil {
return "", err
}
return tokenResp.Token, nil
}
// For other registries, try anonymous access first
return "", nil
}
func (r *RegistryChecker) getCached(key string) *cacheEntry {
r.cache.mu.RLock()
defer r.cache.mu.RUnlock()
entry, ok := r.cache.entries[key]
if !ok {
return nil
}
if time.Now().After(entry.expiresAt) {
return nil
}
return &entry
}
func (r *RegistryChecker) cacheDigest(key, digest string) {
r.cache.mu.Lock()
defer r.cache.mu.Unlock()
r.cache.entries[key] = cacheEntry{
latestDigest: digest,
expiresAt: time.Now().Add(defaultCacheTTL),
}
}
func (r *RegistryChecker) cacheError(key, errMsg string) {
r.cache.mu.Lock()
defer r.cache.mu.Unlock()
r.cache.entries[key] = cacheEntry{
err: errMsg,
expiresAt: time.Now().Add(errorCacheTTL),
}
}
// CleanupCache removes expired entries from the cache.
func (r *RegistryChecker) CleanupCache() {
r.cache.mu.Lock()
defer r.cache.mu.Unlock()
now := time.Now()
for key, entry := range r.cache.entries {
if now.After(entry.expiresAt) {
delete(r.cache.entries, key)
}
}
}
// parseImageReference parses an image reference into registry, repository, and tag.
func parseImageReference(image string) (registry, repository, tag string) {
// Default values
registry = "registry-1.docker.io"
tag = "latest"
// Check if this is a digest-pinned image (image@sha256:...)
if strings.Contains(image, "@sha256:") {
return "", "", ""
}
// Split off the tag first
parts := strings.Split(image, ":")
if len(parts) > 1 {
// Check if the last part looks like a tag (not a port)
lastPart := parts[len(parts)-1]
if !strings.Contains(lastPart, "/") {
tag = lastPart
image = strings.Join(parts[:len(parts)-1], ":")
}
}
// Now parse the registry and repository
parts = strings.Split(image, "/")
// If first part looks like a registry (contains . or :, or is localhost)
if len(parts) > 1 && (strings.Contains(parts[0], ".") || strings.Contains(parts[0], ":") || parts[0] == "localhost") {
registry = parts[0]
repository = strings.Join(parts[1:], "/")
} else if len(parts) == 1 {
// Official image (e.g., "nginx")
repository = "library/" + parts[0]
} else {
// Docker Hub with namespace (e.g., "myrepo/myapp")
repository = image
}
return registry, repository, tag
}
// isValidDigest checks if a string looks like a valid digest.
var digestPattern = regexp.MustCompile(`^sha256:[a-f0-9]{64}$`)
func isValidDigest(s string) bool {
return digestPattern.MatchString(s)
}

View file

@ -0,0 +1,192 @@
package dockeragent
import (
"testing"
)
func TestParseImageReference(t *testing.T) {
tests := []struct {
name string
image string
wantReg string
wantRepo string
wantTag string
}{
{
name: "official image without tag",
image: "nginx",
wantReg: "registry-1.docker.io",
wantRepo: "library/nginx",
wantTag: "latest",
},
{
name: "official image with tag",
image: "nginx:1.25",
wantReg: "registry-1.docker.io",
wantRepo: "library/nginx",
wantTag: "1.25",
},
{
name: "docker hub with namespace",
image: "myrepo/myapp:v1",
wantReg: "registry-1.docker.io",
wantRepo: "myrepo/myapp",
wantTag: "v1",
},
{
name: "docker hub with namespace no tag",
image: "linuxserver/plex",
wantReg: "registry-1.docker.io",
wantRepo: "linuxserver/plex",
wantTag: "latest",
},
{
name: "ghcr.io image",
image: "ghcr.io/owner/repo:tag",
wantReg: "ghcr.io",
wantRepo: "owner/repo",
wantTag: "tag",
},
{
name: "private registry with port",
image: "registry.example.com:5000/app:v2",
wantReg: "registry.example.com:5000",
wantRepo: "app",
wantTag: "v2",
},
{
name: "localhost registry",
image: "localhost:5000/myimage:dev",
wantReg: "localhost:5000",
wantRepo: "myimage",
wantTag: "dev",
},
{
name: "digest pinned image",
image: "nginx@sha256:abc123def456",
wantReg: "",
wantRepo: "",
wantTag: "",
},
{
name: "lscr.io image",
image: "lscr.io/linuxserver/plex:latest",
wantReg: "lscr.io",
wantRepo: "linuxserver/plex",
wantTag: "latest",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotReg, gotRepo, gotTag := parseImageReference(tt.image)
if gotReg != tt.wantReg {
t.Errorf("registry = %q, want %q", gotReg, tt.wantReg)
}
if gotRepo != tt.wantRepo {
t.Errorf("repository = %q, want %q", gotRepo, tt.wantRepo)
}
if gotTag != tt.wantTag {
t.Errorf("tag = %q, want %q", gotTag, tt.wantTag)
}
})
}
}
func TestRegistryChecker_DigestsDiffer(t *testing.T) {
checker := &RegistryChecker{}
tests := []struct {
name string
current string
latest string
want bool
}{
{
name: "same digest",
current: "sha256:abc123",
latest: "sha256:abc123",
want: false,
},
{
name: "different digest",
current: "sha256:abc123",
latest: "sha256:def456",
want: true,
},
{
name: "empty current",
current: "",
latest: "sha256:abc123",
want: false,
},
{
name: "empty latest",
current: "sha256:abc123",
latest: "",
want: false,
},
{
name: "both empty",
current: "",
latest: "",
want: false,
},
{
name: "different with prefix normalization",
current: "sha256:abc123",
latest: "abc123",
want: false, // Should match after normalization
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := checker.digestsDiffer(tt.current, tt.latest)
if got != tt.want {
t.Errorf("digestsDiffer(%q, %q) = %v, want %v", tt.current, tt.latest, got, tt.want)
}
})
}
}
func TestRegistryChecker_EnableDisable(t *testing.T) {
checker := &RegistryChecker{enabled: true}
if !checker.Enabled() {
t.Error("Expected checker to be enabled initially")
}
checker.SetEnabled(false)
if checker.Enabled() {
t.Error("Expected checker to be disabled")
}
checker.SetEnabled(true)
if !checker.Enabled() {
t.Error("Expected checker to be enabled")
}
}
func TestIsValidDigest(t *testing.T) {
tests := []struct {
digest string
valid bool
}{
{"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", true},
{"sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", true},
{"sha256:short", false},
{"md5:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", false},
{"", false},
{"notadigest", false},
}
for _, tt := range tests {
t.Run(tt.digest, func(t *testing.T) {
got := isValidDigest(tt.digest)
if got != tt.valid {
t.Errorf("isValidDigest(%q) = %v, want %v", tt.digest, got, tt.valid)
}
})
}
}

View file

@ -416,6 +416,7 @@ type DockerContainer struct {
ID string `json:"id"`
Name string `json:"name"`
Image string `json:"image"`
ImageDigest string `json:"imageDigest,omitempty"` // Current image digest (sha256:...)
State string `json:"state"`
Status string `json:"status"`
Health string `json:"health,omitempty"`
@ -437,6 +438,16 @@ type DockerContainer struct {
BlockIO *DockerContainerBlockIO `json:"blockIo,omitempty"`
Mounts []DockerContainerMount `json:"mounts,omitempty"`
Podman *DockerPodmanContainer `json:"podman,omitempty"`
UpdateStatus *DockerContainerUpdateStatus `json:"updateStatus,omitempty"` // Image update detection status
}
// DockerContainerUpdateStatus tracks the image update status for a container.
type DockerContainerUpdateStatus struct {
UpdateAvailable bool `json:"updateAvailable"`
CurrentDigest string `json:"currentDigest,omitempty"`
LatestDigest string `json:"latestDigest,omitempty"`
LastChecked time.Time `json:"lastChecked"`
Error string `json:"error,omitempty"` // e.g., "rate limited", "auth required"
}
// DockerPodmanContainer captures Podman-specific annotations for a container.

View file

@ -1720,6 +1720,7 @@ func (m *Monitor) ApplyDockerReport(report agentsdocker.Report, tokenRecord *con
ID: payload.ID,
Name: payload.Name,
Image: payload.Image,
ImageDigest: payload.ImageDigest,
State: payload.State,
Status: payload.Status,
Health: payload.Health,
@ -1735,6 +1736,17 @@ func (m *Monitor) ApplyDockerReport(report agentsdocker.Report, tokenRecord *con
FinishedAt: payload.FinishedAt,
}
// Copy update status if provided by agent
if payload.UpdateStatus != nil {
container.UpdateStatus = &models.DockerContainerUpdateStatus{
UpdateAvailable: payload.UpdateStatus.UpdateAvailable,
CurrentDigest: payload.UpdateStatus.CurrentDigest,
LatestDigest: payload.UpdateStatus.LatestDigest,
LastChecked: payload.UpdateStatus.LastChecked,
Error: payload.UpdateStatus.Error,
}
}
if len(payload.Ports) > 0 {
ports := make([]models.DockerContainerPort, len(payload.Ports))
for i, port := range payload.Ports {

View file

@ -0,0 +1,270 @@
// Package updatedetection provides unified update detection across all Pulse-managed
// infrastructure types.
//
// The Manager coordinates update detection for Docker containers, receiving update status
// from Docker agents and checking registries on demand. It maintains an in-memory store
// of available updates and provides APIs for querying update status.
package updatedetection
import (
"context"
"sync"
"time"
"github.com/rs/zerolog"
)
// Manager coordinates update detection across all infrastructure types.
type Manager struct {
store *Store
registry *RegistryChecker
logger zerolog.Logger
mu sync.RWMutex
// Configuration
enabled bool
checkInterval time.Duration
alertDelayHours int
enableDockerUpdates bool
}
// ManagerConfig holds configuration for the update detection manager.
type ManagerConfig struct {
Enabled bool // Master switch for update detection
CheckInterval time.Duration // How often to check for updates (default 6h)
AlertDelayHours int // Hours before alerting on a new update (default 24)
EnableDockerUpdates bool // Enable Docker image update detection
}
// DefaultManagerConfig returns sensible default configuration.
func DefaultManagerConfig() ManagerConfig {
return ManagerConfig{
Enabled: true,
CheckInterval: 6 * time.Hour,
AlertDelayHours: 24,
EnableDockerUpdates: true,
}
}
// NewManager creates a new update detection manager.
func NewManager(cfg ManagerConfig, logger zerolog.Logger) *Manager {
return &Manager{
store: NewStore(),
registry: NewRegistryChecker(logger),
logger: logger.With().Str("component", "updatedetection").Logger(),
enabled: cfg.Enabled,
checkInterval: cfg.CheckInterval,
alertDelayHours: cfg.AlertDelayHours,
enableDockerUpdates: cfg.EnableDockerUpdates,
}
}
// ProcessDockerContainerUpdate processes an update status report from a Docker agent.
// This is called when the agent reports container data with update status.
func (m *Manager) ProcessDockerContainerUpdate(
hostID string,
containerID string,
containerName string,
image string,
currentDigest string,
updateStatus *ContainerUpdateStatus,
) {
if !m.enabled || !m.enableDockerUpdates {
return
}
if updateStatus == nil {
// No update status provided by agent - we can still track the container
// but won't know about updates yet
return
}
if !updateStatus.UpdateAvailable {
// No update available - remove any existing update entry
m.store.DeleteUpdatesForResource(containerID)
return
}
// Create or update the update entry
updateID := "docker:" + hostID + ":" + containerID
update := &UpdateInfo{
ID: updateID,
ResourceID: containerID,
ResourceType: "docker",
ResourceName: containerName,
HostID: hostID,
Type: UpdateTypeDockerImage,
CurrentDigest: updateStatus.CurrentDigest,
LatestDigest: updateStatus.LatestDigest,
LastChecked: updateStatus.LastChecked,
CurrentVersion: image,
}
if updateStatus.Error != "" {
update.Error = updateStatus.Error
}
m.store.UpsertUpdate(update)
m.logger.Debug().
Str("container", containerName).
Str("image", image).
Str("hostID", hostID).
Bool("hasUpdate", updateStatus.UpdateAvailable).
Msg("Processed container update status")
}
// CheckImageUpdate checks a specific image for updates using the registry API.
// This can be called on demand from the server side.
func (m *Manager) CheckImageUpdate(ctx context.Context, image, currentDigest string) (*ImageUpdateInfo, error) {
if !m.enabled {
return nil, nil
}
return m.registry.CheckImageUpdate(ctx, image, currentDigest)
}
// GetUpdates returns all tracked updates, optionally filtered.
func (m *Manager) GetUpdates(filters UpdateFilters) []*UpdateInfo {
all := m.store.GetAllUpdates()
if filters.IsEmpty() {
return all
}
result := make([]*UpdateInfo, 0)
for _, update := range all {
if filters.Matches(update) {
result = append(result, update)
}
}
return result
}
// GetUpdatesForHost returns all updates for a specific host.
func (m *Manager) GetUpdatesForHost(hostID string) []*UpdateInfo {
return m.store.GetUpdatesForHost(hostID)
}
// GetUpdatesForResource returns the update for a specific resource.
func (m *Manager) GetUpdatesForResource(resourceID string) *UpdateInfo {
return m.store.GetUpdatesForResource(resourceID)
}
// GetSummary returns aggregated update statistics by host.
func (m *Manager) GetSummary() map[string]*UpdateSummary {
return m.store.GetSummary()
}
// GetTotalCount returns the total number of tracked updates.
func (m *Manager) GetTotalCount() int {
return m.store.Count()
}
// DeleteUpdatesForHost removes all updates for a host (called when host is removed).
func (m *Manager) DeleteUpdatesForHost(hostID string) {
m.store.DeleteUpdatesForHost(hostID)
}
// AddRegistryConfig adds registry authentication configuration.
func (m *Manager) AddRegistryConfig(cfg RegistryConfig) {
m.registry.AddRegistryConfig(cfg)
}
// CleanupStale removes update entries that haven't been refreshed recently.
// This is called periodically to clean up stale entries from removed containers.
func (m *Manager) CleanupStale(maxAge time.Duration) int {
all := m.store.GetAllUpdates()
cutoff := time.Now().Add(-maxAge)
removed := 0
for _, update := range all {
if update.LastChecked.Before(cutoff) {
m.store.DeleteUpdate(update.ID)
removed++
}
}
if removed > 0 {
m.logger.Info().Int("removed", removed).Msg("Cleaned up stale update entries")
}
return removed
}
// UpdateFilters allows filtering update queries.
type UpdateFilters struct {
HostID string // Filter by host
ResourceType string // Filter by resource type (docker, lxc, vm, etc)
UpdateType UpdateType // Filter by update type
Severity UpdateSeverity
HasError *bool // Filter by error status
}
// IsEmpty returns true if no filters are set.
func (f *UpdateFilters) IsEmpty() bool {
return f.HostID == "" && f.ResourceType == "" && f.UpdateType == "" && f.Severity == "" && f.HasError == nil
}
// Matches returns true if the update matches all set filters.
func (f *UpdateFilters) Matches(update *UpdateInfo) bool {
if f.HostID != "" && update.HostID != f.HostID {
return false
}
if f.ResourceType != "" && update.ResourceType != f.ResourceType {
return false
}
if f.UpdateType != "" && update.Type != f.UpdateType {
return false
}
if f.Severity != "" && update.Severity != f.Severity {
return false
}
if f.HasError != nil {
hasError := update.Error != ""
if hasError != *f.HasError {
return false
}
}
return true
}
// Enabled returns whether update detection is enabled.
func (m *Manager) Enabled() bool {
m.mu.RLock()
defer m.mu.RUnlock()
return m.enabled
}
// SetEnabled enables or disables update detection.
func (m *Manager) SetEnabled(enabled bool) {
m.mu.Lock()
defer m.mu.Unlock()
m.enabled = enabled
}
// AlertDelayHours returns the configured delay before alerting on updates.
func (m *Manager) AlertDelayHours() int {
m.mu.RLock()
defer m.mu.RUnlock()
return m.alertDelayHours
}
// GetUpdatesReadyForAlert returns updates that have been pending for longer than the alert delay.
func (m *Manager) GetUpdatesReadyForAlert() []*UpdateInfo {
m.mu.RLock()
delay := time.Duration(m.alertDelayHours) * time.Hour
m.mu.RUnlock()
all := m.store.GetAllUpdates()
cutoff := time.Now().Add(-delay)
ready := make([]*UpdateInfo, 0)
for _, update := range all {
if update.FirstDetected.Before(cutoff) && update.Error == "" {
ready = append(ready, update)
}
}
return ready
}

View file

@ -0,0 +1,395 @@
package updatedetection
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
"regexp"
"strings"
"sync"
"time"
"github.com/rs/zerolog"
)
// RegistryConfig holds authentication for a container registry.
type RegistryConfig struct {
Host string `json:"host"` // e.g., "registry-1.docker.io", "ghcr.io"
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"` // token or password
Insecure bool `json:"insecure,omitempty"` // Skip TLS verification
}
// RegistryChecker handles digest lookups against container registries.
type RegistryChecker struct {
httpClient *http.Client
configs map[string]RegistryConfig // keyed by registry host
cache *digestCache
logger zerolog.Logger
mu sync.RWMutex
}
// digestCache provides thread-safe caching of digest lookups.
type digestCache struct {
entries map[string]cacheEntry
mu sync.RWMutex
}
type cacheEntry struct {
digest string
expiresAt time.Time
err string // cached error message
}
// DefaultCacheTTL is the default time-to-live for cached digests.
const DefaultCacheTTL = 6 * time.Hour
// ErrorCacheTTL is the TTL for caching errors (shorter to allow retry).
const ErrorCacheTTL = 15 * time.Minute
// NewRegistryChecker creates a new registry checker.
func NewRegistryChecker(logger zerolog.Logger) *RegistryChecker {
return &RegistryChecker{
httpClient: &http.Client{
Timeout: 30 * time.Second,
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
MinVersion: tls.VersionTLS12,
},
MaxIdleConns: 10,
IdleConnTimeout: 90 * time.Second,
DisableCompression: false,
DisableKeepAlives: false,
},
},
configs: make(map[string]RegistryConfig),
cache: &digestCache{
entries: make(map[string]cacheEntry),
},
logger: logger,
}
}
// AddRegistryConfig adds or updates registry authentication configuration.
func (r *RegistryChecker) AddRegistryConfig(cfg RegistryConfig) {
r.mu.Lock()
defer r.mu.Unlock()
r.configs[cfg.Host] = cfg
}
// ParseImageReference parses an image reference into registry, repository, and tag.
// Examples:
// - "nginx" -> registry-1.docker.io, library/nginx, latest
// - "nginx:1.25" -> registry-1.docker.io, library/nginx, 1.25
// - "myrepo/myapp:v1" -> registry-1.docker.io, myrepo/myapp, v1
// - "ghcr.io/owner/repo:tag" -> ghcr.io, owner/repo, tag
// - "registry.example.com:5000/app:v2" -> registry.example.com:5000, app, v2
func ParseImageReference(image string) (registry, repository, tag string) {
// Default values
registry = "registry-1.docker.io"
tag = "latest"
// Check if this is a digest-pinned image (image@sha256:...)
if strings.Contains(image, "@sha256:") {
// Digest-pinned images cannot be updated via tag comparison
return "", "", ""
}
// Split off the tag first
parts := strings.Split(image, ":")
if len(parts) > 1 {
// Check if the last part looks like a tag (not a port)
lastPart := parts[len(parts)-1]
if !strings.Contains(lastPart, "/") {
tag = lastPart
image = strings.Join(parts[:len(parts)-1], ":")
}
}
// Now parse the registry and repository
parts = strings.Split(image, "/")
// If first part looks like a registry (contains . or :, or is localhost)
if len(parts) > 1 && (strings.Contains(parts[0], ".") || strings.Contains(parts[0], ":") || parts[0] == "localhost") {
registry = parts[0]
repository = strings.Join(parts[1:], "/")
} else if len(parts) == 1 {
// Official image (e.g., "nginx")
repository = "library/" + parts[0]
} else {
// Docker Hub with namespace (e.g., "myrepo/myapp")
repository = image
}
return registry, repository, tag
}
// CheckImageUpdate compares current digest with registry's latest.
func (r *RegistryChecker) CheckImageUpdate(ctx context.Context, image, currentDigest string) (*ImageUpdateInfo, error) {
registry, repository, tag := ParseImageReference(image)
// Skip digest-pinned images
if registry == "" {
return &ImageUpdateInfo{
Image: image,
CurrentDigest: currentDigest,
UpdateAvailable: false,
CheckedAt: time.Now(),
Error: "digest-pinned image, cannot check for updates",
}, nil
}
// Check cache first
cacheKey := fmt.Sprintf("%s/%s:%s", registry, repository, tag)
if cached := r.getCached(cacheKey); cached != nil {
if cached.err != "" {
return &ImageUpdateInfo{
Image: image,
CurrentDigest: currentDigest,
UpdateAvailable: false,
CheckedAt: time.Now(),
Error: cached.err,
}, nil
}
return &ImageUpdateInfo{
Image: image,
CurrentDigest: currentDigest,
LatestDigest: cached.digest,
UpdateAvailable: currentDigest != "" && cached.digest != "" && currentDigest != cached.digest,
CheckedAt: time.Now(),
}, nil
}
// Fetch latest digest from registry
latestDigest, err := r.fetchDigest(ctx, registry, repository, tag)
if err != nil {
// Cache the error to avoid hammering the registry
r.cacheError(cacheKey, err.Error())
r.logger.Debug().
Str("image", image).
Str("registry", registry).
Err(err).
Msg("Failed to fetch image digest from registry")
return &ImageUpdateInfo{
Image: image,
CurrentDigest: currentDigest,
UpdateAvailable: false,
CheckedAt: time.Now(),
Error: err.Error(),
}, nil
}
// Cache the successful result
r.cacheDigest(cacheKey, latestDigest)
return &ImageUpdateInfo{
Image: image,
CurrentDigest: currentDigest,
LatestDigest: latestDigest,
UpdateAvailable: currentDigest != "" && latestDigest != "" && currentDigest != latestDigest,
CheckedAt: time.Now(),
}, nil
}
// fetchDigest retrieves the digest for an image from the registry.
func (r *RegistryChecker) fetchDigest(ctx context.Context, registry, repository, tag string) (string, error) {
// Get auth token if needed
token, err := r.getAuthToken(ctx, registry, repository)
if err != nil {
return "", fmt.Errorf("auth: %w", err)
}
// Construct the manifest URL
scheme := "https"
r.mu.RLock()
if cfg, ok := r.configs[registry]; ok && cfg.Insecure {
scheme = "http"
}
r.mu.RUnlock()
manifestURL := fmt.Sprintf("%s://%s/v2/%s/manifests/%s", scheme, registry, repository, tag)
req, err := http.NewRequestWithContext(ctx, http.MethodHead, manifestURL, nil)
if err != nil {
return "", fmt.Errorf("create request: %w", err)
}
// Accept headers for multi-arch manifest support
req.Header.Set("Accept", strings.Join([]string{
"application/vnd.docker.distribution.manifest.list.v2+json",
"application/vnd.docker.distribution.manifest.v2+json",
"application/vnd.oci.image.manifest.v1+json",
"application/vnd.oci.image.index.v1+json",
}, ", "))
if token != "" {
req.Header.Set("Authorization", "Bearer "+token)
}
resp, err := r.httpClient.Do(req)
if err != nil {
return "", fmt.Errorf("request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusUnauthorized {
return "", fmt.Errorf("authentication required")
}
if resp.StatusCode == http.StatusNotFound {
return "", fmt.Errorf("image not found")
}
if resp.StatusCode == http.StatusTooManyRequests {
return "", fmt.Errorf("rate limited")
}
if resp.StatusCode >= 400 {
return "", fmt.Errorf("registry error: %d", resp.StatusCode)
}
// Get digest from Docker-Content-Digest header
digest := resp.Header.Get("Docker-Content-Digest")
if digest == "" {
// Some registries don't return digest on HEAD, try etag
digest = resp.Header.Get("Etag")
if digest != "" {
// Clean up etag format
digest = strings.Trim(digest, "\"")
}
}
if digest == "" {
return "", fmt.Errorf("no digest in response")
}
return digest, nil
}
// getAuthToken retrieves an auth token for the registry.
// For Docker Hub, this implements the v2 token flow.
func (r *RegistryChecker) getAuthToken(ctx context.Context, registry, repository string) (string, error) {
r.mu.RLock()
cfg, hasConfig := r.configs[registry]
r.mu.RUnlock()
// Docker Hub requires auth token even for public images
if registry == "registry-1.docker.io" {
tokenURL := fmt.Sprintf("https://auth.docker.io/token?service=registry.docker.io&scope=repository:%s:pull", repository)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, tokenURL, nil)
if err != nil {
return "", err
}
// Add basic auth if configured
if hasConfig && cfg.Username != "" && cfg.Password != "" {
req.SetBasicAuth(cfg.Username, cfg.Password)
}
resp, err := r.httpClient.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("token request failed: %d", resp.StatusCode)
}
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
var tokenResp struct {
Token string `json:"token"`
}
if err := json.Unmarshal(body, &tokenResp); err != nil {
return "", err
}
return tokenResp.Token, nil
}
// For GitHub Container Registry
if registry == "ghcr.io" {
if hasConfig && cfg.Password != "" {
// Use the password as a PAT token
return cfg.Password, nil
}
// GHCR allows anonymous access for public images
return "", nil
}
// For other registries with basic auth
if hasConfig && cfg.Username != "" && cfg.Password != "" {
// Return empty - we'll use basic auth directly
return "", nil
}
return "", nil
}
func (r *RegistryChecker) getCached(key string) *cacheEntry {
r.cache.mu.RLock()
defer r.cache.mu.RUnlock()
entry, ok := r.cache.entries[key]
if !ok {
return nil
}
if time.Now().After(entry.expiresAt) {
return nil
}
return &entry
}
func (r *RegistryChecker) cacheDigest(key, digest string) {
r.cache.mu.Lock()
defer r.cache.mu.Unlock()
r.cache.entries[key] = cacheEntry{
digest: digest,
expiresAt: time.Now().Add(DefaultCacheTTL),
}
}
func (r *RegistryChecker) cacheError(key, errMsg string) {
r.cache.mu.Lock()
defer r.cache.mu.Unlock()
r.cache.entries[key] = cacheEntry{
err: errMsg,
expiresAt: time.Now().Add(ErrorCacheTTL),
}
}
// CleanupCache removes expired entries from the cache.
func (r *RegistryChecker) CleanupCache() {
r.cache.mu.Lock()
defer r.cache.mu.Unlock()
now := time.Now()
for key, entry := range r.cache.entries {
if now.After(entry.expiresAt) {
delete(r.cache.entries, key)
}
}
}
// CacheSize returns the current number of cached entries.
func (r *RegistryChecker) CacheSize() int {
r.cache.mu.RLock()
defer r.cache.mu.RUnlock()
return len(r.cache.entries)
}
// isValidDigest checks if a string looks like a valid digest.
var digestPattern = regexp.MustCompile(`^sha256:[a-f0-9]{64}$`)
func isValidDigest(s string) bool {
return digestPattern.MatchString(s)
}

View file

@ -0,0 +1,124 @@
package updatedetection
import (
"testing"
)
func TestParseImageReference(t *testing.T) {
tests := []struct {
name string
image string
wantReg string
wantRepo string
wantTag string
}{
{
name: "official image without tag",
image: "nginx",
wantReg: "registry-1.docker.io",
wantRepo: "library/nginx",
wantTag: "latest",
},
{
name: "official image with tag",
image: "nginx:1.25",
wantReg: "registry-1.docker.io",
wantRepo: "library/nginx",
wantTag: "1.25",
},
{
name: "docker hub with namespace",
image: "myrepo/myapp:v1",
wantReg: "registry-1.docker.io",
wantRepo: "myrepo/myapp",
wantTag: "v1",
},
{
name: "docker hub with namespace no tag",
image: "linuxserver/plex",
wantReg: "registry-1.docker.io",
wantRepo: "linuxserver/plex",
wantTag: "latest",
},
{
name: "ghcr.io image",
image: "ghcr.io/owner/repo:tag",
wantReg: "ghcr.io",
wantRepo: "owner/repo",
wantTag: "tag",
},
{
name: "private registry with port",
image: "registry.example.com:5000/app:v2",
wantReg: "registry.example.com:5000",
wantRepo: "app",
wantTag: "v2",
},
{
name: "localhost registry",
image: "localhost:5000/myimage:dev",
wantReg: "localhost:5000",
wantRepo: "myimage",
wantTag: "dev",
},
{
name: "digest pinned image",
image: "nginx@sha256:abc123def456",
wantReg: "",
wantRepo: "",
wantTag: "",
},
{
name: "lscr.io image",
image: "lscr.io/linuxserver/plex:latest",
wantReg: "lscr.io",
wantRepo: "linuxserver/plex",
wantTag: "latest",
},
{
name: "multi-level repository",
image: "gcr.io/google-containers/pause:3.2",
wantReg: "gcr.io",
wantRepo: "google-containers/pause",
wantTag: "3.2",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotReg, gotRepo, gotTag := ParseImageReference(tt.image)
if gotReg != tt.wantReg {
t.Errorf("registry = %q, want %q", gotReg, tt.wantReg)
}
if gotRepo != tt.wantRepo {
t.Errorf("repository = %q, want %q", gotRepo, tt.wantRepo)
}
if gotTag != tt.wantTag {
t.Errorf("tag = %q, want %q", gotTag, tt.wantTag)
}
})
}
}
func TestIsValidDigest(t *testing.T) {
tests := []struct {
digest string
valid bool
}{
{"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", true},
{"sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef", true},
{"sha256:short", false},
{"md5:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4", false},
{"", false},
{"notadigest", false},
}
for _, tt := range tests {
t.Run(tt.digest, func(t *testing.T) {
got := isValidDigest(tt.digest)
if got != tt.valid {
t.Errorf("isValidDigest(%q) = %v, want %v", tt.digest, got, tt.valid)
}
})
}
}

View file

@ -0,0 +1,224 @@
package updatedetection
import (
"sync"
"time"
)
// Store manages the in-memory storage of update information.
// It provides thread-safe access to update data and supports
// queries by host, resource, or global listing.
type Store struct {
mu sync.RWMutex
updates map[string]*UpdateInfo // keyed by UpdateInfo.ID
byHost map[string][]string // hostID -> []updateID
byResource map[string]string // resourceID -> updateID
}
// NewStore creates a new update store.
func NewStore() *Store {
return &Store{
updates: make(map[string]*UpdateInfo),
byHost: make(map[string][]string),
byResource: make(map[string]string),
}
}
// UpsertUpdate adds or updates an update entry.
func (s *Store) UpsertUpdate(info *UpdateInfo) {
s.mu.Lock()
defer s.mu.Unlock()
// Check if this is an update to an existing entry
existing, exists := s.updates[info.ID]
if exists {
// Preserve FirstDetected from the original
info.FirstDetected = existing.FirstDetected
} else {
// New entry, set FirstDetected if not already set
if info.FirstDetected.IsZero() {
info.FirstDetected = time.Now()
}
}
// Update the main store
s.updates[info.ID] = info
// Update byResource index
s.byResource[info.ResourceID] = info.ID
// Update byHost index
if !exists {
s.byHost[info.HostID] = append(s.byHost[info.HostID], info.ID)
}
}
// GetUpdatesForHost returns all updates for a specific host.
func (s *Store) GetUpdatesForHost(hostID string) []*UpdateInfo {
s.mu.RLock()
defer s.mu.RUnlock()
updateIDs := s.byHost[hostID]
result := make([]*UpdateInfo, 0, len(updateIDs))
for _, id := range updateIDs {
if update, ok := s.updates[id]; ok {
result = append(result, update)
}
}
return result
}
// GetUpdatesForResource returns the update for a specific resource, if any.
func (s *Store) GetUpdatesForResource(resourceID string) *UpdateInfo {
s.mu.RLock()
defer s.mu.RUnlock()
updateID, ok := s.byResource[resourceID]
if !ok {
return nil
}
return s.updates[updateID]
}
// GetAllUpdates returns all tracked updates.
func (s *Store) GetAllUpdates() []*UpdateInfo {
s.mu.RLock()
defer s.mu.RUnlock()
result := make([]*UpdateInfo, 0, len(s.updates))
for _, update := range s.updates {
result = append(result, update)
}
return result
}
// DeleteUpdate removes an update entry by ID.
func (s *Store) DeleteUpdate(id string) {
s.mu.Lock()
defer s.mu.Unlock()
update, exists := s.updates[id]
if !exists {
return
}
// Remove from main store
delete(s.updates, id)
// Remove from byResource index
delete(s.byResource, update.ResourceID)
// Remove from byHost index
hostUpdates := s.byHost[update.HostID]
for i, updateID := range hostUpdates {
if updateID == id {
s.byHost[update.HostID] = append(hostUpdates[:i], hostUpdates[i+1:]...)
break
}
}
// Clean up empty host entries
if len(s.byHost[update.HostID]) == 0 {
delete(s.byHost, update.HostID)
}
}
// DeleteUpdatesForResource removes any update associated with a resource.
func (s *Store) DeleteUpdatesForResource(resourceID string) {
s.mu.Lock()
defer s.mu.Unlock()
updateID, ok := s.byResource[resourceID]
if !ok {
return
}
update := s.updates[updateID]
if update == nil {
delete(s.byResource, resourceID)
return
}
// Remove from main store
delete(s.updates, updateID)
delete(s.byResource, resourceID)
// Remove from byHost index
hostUpdates := s.byHost[update.HostID]
for i, id := range hostUpdates {
if id == updateID {
s.byHost[update.HostID] = append(hostUpdates[:i], hostUpdates[i+1:]...)
break
}
}
if len(s.byHost[update.HostID]) == 0 {
delete(s.byHost, update.HostID)
}
}
// DeleteUpdatesForHost removes all updates for a host.
func (s *Store) DeleteUpdatesForHost(hostID string) {
s.mu.Lock()
defer s.mu.Unlock()
updateIDs := s.byHost[hostID]
for _, id := range updateIDs {
if update := s.updates[id]; update != nil {
delete(s.byResource, update.ResourceID)
}
delete(s.updates, id)
}
delete(s.byHost, hostID)
}
// GetSummary returns aggregated update statistics.
func (s *Store) GetSummary() map[string]*UpdateSummary {
s.mu.RLock()
defer s.mu.RUnlock()
summaries := make(map[string]*UpdateSummary)
for _, update := range s.updates {
summary, exists := summaries[update.HostID]
if !exists {
summary = &UpdateSummary{
HostID: update.HostID,
HostName: update.HostID, // Will be enriched by caller
}
summaries[update.HostID] = summary
}
summary.TotalUpdates++
if update.LastChecked.After(summary.LastChecked) {
summary.LastChecked = update.LastChecked
}
if update.Severity == SeveritySecurity {
summary.SecurityUpdates++
}
switch update.Type {
case UpdateTypeDockerImage, UpdateTypeKubernetesImage:
summary.ContainerUpdates++
case UpdateTypePackage, UpdateTypeProxmox:
summary.PackageUpdates++
}
}
return summaries
}
// Count returns the total number of tracked updates.
func (s *Store) Count() int {
s.mu.RLock()
defer s.mu.RUnlock()
return len(s.updates)
}
// CountForHost returns the number of updates for a specific host.
func (s *Store) CountForHost(hostID string) int {
s.mu.RLock()
defer s.mu.RUnlock()
return len(s.byHost[hostID])
}

View file

@ -0,0 +1,205 @@
package updatedetection
import (
"testing"
"time"
)
func TestStore_UpsertAndGet(t *testing.T) {
store := NewStore()
update := &UpdateInfo{
ID: "update-1",
ResourceID: "container-abc",
ResourceType: "docker",
ResourceName: "nginx",
HostID: "host-1",
Type: UpdateTypeDockerImage,
LastChecked: time.Now(),
}
store.UpsertUpdate(update)
// Test GetAllUpdates
all := store.GetAllUpdates()
if len(all) != 1 {
t.Errorf("expected 1 update, got %d", len(all))
}
// Test GetUpdatesForHost
hostUpdates := store.GetUpdatesForHost("host-1")
if len(hostUpdates) != 1 {
t.Errorf("expected 1 update for host, got %d", len(hostUpdates))
}
// Test GetUpdatesForResource
resourceUpdate := store.GetUpdatesForResource("container-abc")
if resourceUpdate == nil {
t.Fatal("expected update for resource, got nil")
}
if resourceUpdate.ID != "update-1" {
t.Errorf("expected update ID 'update-1', got %q", resourceUpdate.ID)
}
// Test Count
if store.Count() != 1 {
t.Errorf("expected count 1, got %d", store.Count())
}
}
func TestStore_UpsertPreservesFirstDetected(t *testing.T) {
store := NewStore()
firstTime := time.Now().Add(-24 * time.Hour)
update := &UpdateInfo{
ID: "update-1",
ResourceID: "container-abc",
HostID: "host-1",
FirstDetected: firstTime,
LastChecked: time.Now(),
}
store.UpsertUpdate(update)
// Upsert again with different LastChecked
update2 := &UpdateInfo{
ID: "update-1",
ResourceID: "container-abc",
HostID: "host-1",
FirstDetected: time.Now(), // Should be ignored
LastChecked: time.Now(),
}
store.UpsertUpdate(update2)
// FirstDetected should be preserved
result := store.GetUpdatesForResource("container-abc")
if result == nil {
t.Fatal("expected update, got nil")
}
if !result.FirstDetected.Equal(firstTime) {
t.Errorf("FirstDetected changed: got %v, want %v", result.FirstDetected, firstTime)
}
}
func TestStore_DeleteUpdate(t *testing.T) {
store := NewStore()
update := &UpdateInfo{
ID: "update-1",
ResourceID: "container-abc",
HostID: "host-1",
}
store.UpsertUpdate(update)
if store.Count() != 1 {
t.Fatal("expected 1 update before delete")
}
store.DeleteUpdate("update-1")
if store.Count() != 0 {
t.Errorf("expected 0 updates after delete, got %d", store.Count())
}
if store.GetUpdatesForResource("container-abc") != nil {
t.Error("expected nil for deleted resource")
}
if len(store.GetUpdatesForHost("host-1")) != 0 {
t.Error("expected empty updates for host after delete")
}
}
func TestStore_DeleteUpdatesForResource(t *testing.T) {
store := NewStore()
store.UpsertUpdate(&UpdateInfo{ID: "update-1", ResourceID: "container-abc", HostID: "host-1"})
store.UpsertUpdate(&UpdateInfo{ID: "update-2", ResourceID: "container-def", HostID: "host-1"})
store.DeleteUpdatesForResource("container-abc")
if store.Count() != 1 {
t.Errorf("expected 1 update after delete, got %d", store.Count())
}
if store.GetUpdatesForResource("container-abc") != nil {
t.Error("expected nil for deleted resource")
}
if store.GetUpdatesForResource("container-def") == nil {
t.Error("expected update for non-deleted resource")
}
}
func TestStore_DeleteUpdatesForHost(t *testing.T) {
store := NewStore()
store.UpsertUpdate(&UpdateInfo{ID: "update-1", ResourceID: "container-abc", HostID: "host-1"})
store.UpsertUpdate(&UpdateInfo{ID: "update-2", ResourceID: "container-def", HostID: "host-1"})
store.UpsertUpdate(&UpdateInfo{ID: "update-3", ResourceID: "container-ghi", HostID: "host-2"})
store.DeleteUpdatesForHost("host-1")
if store.Count() != 1 {
t.Errorf("expected 1 update after host delete, got %d", store.Count())
}
if len(store.GetUpdatesForHost("host-1")) != 0 {
t.Error("expected no updates for deleted host")
}
if len(store.GetUpdatesForHost("host-2")) != 1 {
t.Error("expected 1 update for other host")
}
}
func TestStore_GetSummary(t *testing.T) {
store := NewStore()
now := time.Now()
store.UpsertUpdate(&UpdateInfo{
ID: "update-1",
ResourceID: "container-1",
HostID: "host-1",
Type: UpdateTypeDockerImage,
Severity: SeveritySecurity,
LastChecked: now,
})
store.UpsertUpdate(&UpdateInfo{
ID: "update-2",
ResourceID: "container-2",
HostID: "host-1",
Type: UpdateTypeDockerImage,
LastChecked: now.Add(-time.Hour),
})
store.UpsertUpdate(&UpdateInfo{
ID: "update-3",
ResourceID: "host-1-packages",
HostID: "host-1",
Type: UpdateTypePackage,
LastChecked: now.Add(-2 * time.Hour),
})
summaries := store.GetSummary()
summary, ok := summaries["host-1"]
if !ok {
t.Fatal("expected summary for host-1")
}
if summary.TotalUpdates != 3 {
t.Errorf("expected 3 total updates, got %d", summary.TotalUpdates)
}
if summary.SecurityUpdates != 1 {
t.Errorf("expected 1 security update, got %d", summary.SecurityUpdates)
}
if summary.ContainerUpdates != 2 {
t.Errorf("expected 2 container updates, got %d", summary.ContainerUpdates)
}
if summary.PackageUpdates != 1 {
t.Errorf("expected 1 package update, got %d", summary.PackageUpdates)
}
if !summary.LastChecked.Equal(now) {
t.Errorf("expected LastChecked to be %v, got %v", now, summary.LastChecked)
}
}

View file

@ -0,0 +1,90 @@
// Package updatedetection provides unified update detection across all Pulse-managed
// infrastructure types: Docker containers, LXC, VMs, Proxmox hosts, and Kubernetes.
package updatedetection
import "time"
// UpdateType represents the category of update
type UpdateType string
const (
UpdateTypeDockerImage UpdateType = "docker_image"
UpdateTypePackage UpdateType = "package" // apt, yum, apk
UpdateTypeProxmox UpdateType = "proxmox" // pve/pbs specific
UpdateTypeKubernetesImage UpdateType = "k8s_image"
UpdateTypeHelmChart UpdateType = "helm_chart" // future
)
// UpdateSeverity indicates how critical the update is
type UpdateSeverity string
const (
SeverityUnknown UpdateSeverity = "unknown"
SeveritySecurity UpdateSeverity = "security"
SeverityBugfix UpdateSeverity = "bugfix"
SeverityFeature UpdateSeverity = "feature"
)
// UpdateInfo represents a single available update
type UpdateInfo struct {
// Core identification
ID string `json:"id"` // Unique ID for this update
ResourceID string `json:"resourceId"` // Pulse resource ID (e.g., docker container ID)
ResourceType string `json:"resourceType"` // "docker", "lxc", "vm", "node", "k8s_pod"
ResourceName string `json:"resourceName"` // Human-readable name
HostID string `json:"hostId"` // Which host/node owns this resource
// Update specifics
Type UpdateType `json:"type"`
Severity UpdateSeverity `json:"severity,omitempty"`
// Version/Image info
CurrentVersion string `json:"currentVersion,omitempty"` // e.g., "1.2.3" or image:tag
LatestVersion string `json:"latestVersion,omitempty"` // e.g., "1.2.4" or new digest
CurrentDigest string `json:"currentDigest,omitempty"` // For container images
LatestDigest string `json:"latestDigest,omitempty"`
// Package-specific (for apt/yum updates)
PackageName string `json:"packageName,omitempty"`
PackageCount int `json:"packageCount,omitempty"` // For summary: "15 packages"
SecurityCount int `json:"securityCount,omitempty"` // Security-only count
// Timing
FirstDetected time.Time `json:"firstDetected"`
LastChecked time.Time `json:"lastChecked"`
// Additional metadata
ChangelogURL string `json:"changelogUrl,omitempty"`
Metadata map[string]interface{} `json:"metadata,omitempty"`
Error string `json:"error,omitempty"` // If check failed
}
// UpdateSummary provides aggregated update stats for a host
type UpdateSummary struct {
HostID string `json:"hostId"`
HostName string `json:"hostName"`
TotalUpdates int `json:"totalUpdates"`
SecurityUpdates int `json:"securityUpdates"`
ContainerUpdates int `json:"containerUpdates"`
PackageUpdates int `json:"packageUpdates"`
LastChecked time.Time `json:"lastChecked"`
}
// ImageUpdateInfo contains the result of a container image update check
type ImageUpdateInfo struct {
Image string `json:"image"`
CurrentDigest string `json:"currentDigest"`
LatestDigest string `json:"latestDigest"`
UpdateAvailable bool `json:"updateAvailable"`
CheckedAt time.Time `json:"checkedAt"`
Error string `json:"error,omitempty"` // e.g., "rate limited", "auth required"
}
// ContainerUpdateStatus is included in container reports from the Docker agent
type ContainerUpdateStatus struct {
UpdateAvailable bool `json:"updateAvailable"`
CurrentDigest string `json:"currentDigest,omitempty"`
LatestDigest string `json:"latestDigest,omitempty"`
LastChecked time.Time `json:"lastChecked"`
Error string `json:"error,omitempty"` // e.g., "rate limited", "auth required"
}

View file

@ -55,6 +55,7 @@ type Container struct {
ID string `json:"id"`
Name string `json:"name"`
Image string `json:"image"`
ImageDigest string `json:"imageDigest,omitempty"` // Current image digest for update detection
CreatedAt time.Time `json:"createdAt"`
State string `json:"state"`
Status string `json:"status"`
@ -77,6 +78,7 @@ type Container struct {
BlockIO *ContainerBlockIO `json:"blockIo,omitempty"`
Mounts []ContainerMount `json:"mounts,omitempty"`
Podman *PodmanContainer `json:"podman,omitempty"`
UpdateStatus *UpdateStatus `json:"updateStatus,omitempty"` // Image update detection status
}
// ContainerPort tracks an exposed container port mapping.
@ -126,6 +128,15 @@ type ContainerMount struct {
Driver string `json:"driver,omitempty"`
}
// UpdateStatus tracks the image update status for a container.
type UpdateStatus struct {
UpdateAvailable bool `json:"updateAvailable"`
CurrentDigest string `json:"currentDigest,omitempty"`
LatestDigest string `json:"latestDigest,omitempty"`
LastChecked time.Time `json:"lastChecked"`
Error string `json:"error,omitempty"` // e.g., "rate limited", "auth required"
}
// AgentKey returns the stable identifier for a reporting agent.
func (r Report) AgentKey() string {
if r.Agent.ID != "" {