Add disk metrics, block I/O, and mount details to Docker monitoring

Extends Docker container monitoring with comprehensive disk and storage information:
- Writable layer size and root filesystem usage displayed in new Disk column
- Block I/O statistics (read/write bytes totals) shown in container drawer
- Mount metadata including type, source, destination, mode, and driver details
- Configurable via --collect-disk flag (enabled by default, can be disabled for large fleets)

Also fixes config watcher to consistently use production auth config path instead of following PULSE_DATA_DIR when in mock mode.
This commit is contained in:
rcourtman 2025-10-29 12:05:36 +00:00
parent 35ab52b75b
commit 32392d1212
14 changed files with 581 additions and 159 deletions

View file

@ -70,6 +70,7 @@ func loadConfig() dockeragent.Config {
envSwarmServices := strings.TrimSpace(os.Getenv("PULSE_SWARM_SERVICES"))
envSwarmTasks := strings.TrimSpace(os.Getenv("PULSE_SWARM_TASKS"))
envIncludeContainers := strings.TrimSpace(os.Getenv("PULSE_INCLUDE_CONTAINERS"))
envCollectDisk := strings.TrimSpace(os.Getenv("PULSE_COLLECT_DISK"))
defaultInterval := 30 * time.Second
if envInterval != "" {
@ -98,6 +99,11 @@ func loadConfig() dockeragent.Config {
includeContainersDefault = parseBool(envIncludeContainers)
}
collectDiskDefault := true
if envCollectDisk != "" {
collectDiskDefault = parseBool(envCollectDisk)
}
urlFlag := flag.String("url", envURL, "Pulse server URL (e.g. http://pulse:7655)")
tokenFlag := flag.String("token", envToken, "Pulse API token (required)")
intervalFlag := flag.Duration("interval", defaultInterval, "Reporting interval (e.g. 30s)")
@ -113,6 +119,7 @@ func loadConfig() dockeragent.Config {
includeServicesFlag := flag.Bool("swarm-services", includeServicesDefault, "Include Swarm service summaries in reports")
includeTasksFlag := flag.Bool("swarm-tasks", includeTasksDefault, "Include Swarm tasks in reports")
includeContainersFlag := flag.Bool("include-containers", includeContainersDefault, "Include per-container metrics in reports")
collectDiskFlag := flag.Bool("collect-disk", collectDiskDefault, "Collect per-container disk usage, block IO, and mount details in reports")
flag.Parse()
@ -178,6 +185,7 @@ func loadConfig() dockeragent.Config {
IncludeServices: *includeServicesFlag,
IncludeTasks: *includeTasksFlag,
IncludeContainers: *includeContainersFlag,
CollectDiskMetrics: *collectDiskFlag,
}
}

View file

@ -11,6 +11,7 @@ Every check interval (30s by default) the agent collects:
- Restart counters and exit codes
- CPU usage, memory consumption and limits
- Images, port mappings, network addresses, and start times
- Writable layer size, root filesystem size, block I/O totals, and mount metadata (shown in the Docker table drawer)
- Health-check failures, restart-loop windows, and recent exit codes (displayed in the UI under each container drawer)
Data is pushed to Pulse over HTTPS using your existing API token no inbound firewall rules required.
@ -157,6 +158,7 @@ docker run -d \
| `--swarm-services`, `PULSE_SWARM_SERVICES` | Include Swarm service summaries in reports. | `true` |
| `--swarm-tasks`, `PULSE_SWARM_TASKS` | Include individual Swarm tasks in reports. | `true` |
| `--include-containers`, `PULSE_INCLUDE_CONTAINERS` | Include per-container metrics (disable when only Swarm data is needed). | `true` |
| `--collect-disk`, `PULSE_COLLECT_DISK` | Collect per-container disk usage, block I/O, and mount metadata. Disable to skip Docker size queries on extremely large fleets. | `true` |
| `--hostname`, `PULSE_HOSTNAME` | Override host name reported to Pulse. | Docker info / OS hostname |
| `--agent-id`, `PULSE_AGENT_ID` | Stable ID for the agent (useful for clustering). | Docker engine ID / machine-id |
| `--insecure`, `PULSE_INSECURE_SKIP_VERIFY` | Skip TLS cert validation (unsafe). | `false` |

View file

@ -349,11 +349,46 @@ const DockerContainerRow: Component<{
});
let urlInputRef: HTMLInputElement | undefined;
const writableLayerBytes = createMemo(() => container.writableLayerBytes ?? 0);
const rootFilesystemBytes = createMemo(() => container.rootFilesystemBytes ?? 0);
const hasDiskStats = createMemo(() => writableLayerBytes() > 0 || rootFilesystemBytes() > 0);
const diskPercent = createMemo<number | null>(() => {
const total = rootFilesystemBytes();
if (!total || total <= 0) return null;
const used = writableLayerBytes();
if (used <= 0) return 0;
return Math.min(100, (used / total) * 100);
});
const diskUsageLabel = createMemo(() => {
const used = writableLayerBytes();
if (used <= 0) return '0 B';
return formatBytes(used, 0);
});
const diskSublabel = createMemo<string | undefined>(() => {
const total = rootFilesystemBytes();
if (!total || total <= 0) return undefined;
return `${diskUsageLabel()} / ${formatBytes(total, 0)}`;
});
const mounts = createMemo(() => container.mounts || []);
const hasMounts = createMemo(() => mounts().length > 0);
const blockIo = createMemo(() => container.blockIo);
const blockIoReadBytes = createMemo(() => blockIo()?.readBytes ?? 0);
const blockIoWriteBytes = createMemo(() => blockIo()?.writeBytes ?? 0);
const hasBlockIo = createMemo(() => {
const stats = blockIo();
if (!stats) return false;
const read = stats.readBytes ?? 0;
const write = stats.writeBytes ?? 0;
return read > 0 || write > 0;
});
const hasDrawerContent = createMemo(() => {
return (
(container.ports && container.ports.length > 0) ||
(container.labels && Object.keys(container.labels).length > 0) ||
(container.networks && container.networks.length > 0)
(container.networks && container.networks.length > 0) ||
hasMounts() ||
hasBlockIo()
);
});
@ -716,6 +751,21 @@ const DockerContainerRow: Component<{
/>
</Show>
</td>
<td class="px-2 py-0.5 min-w-[180px]">
<Show when={hasDiskStats()} fallback={<span class="text-xs text-gray-400"></span>}>
<Show
when={diskPercent() !== null}
fallback={<span class="text-xs text-gray-700 dark:text-gray-300">{diskUsageLabel()}</span>}
>
<MetricBar
value={diskPercent() ?? 0}
label={formatPercent(diskPercent() ?? 0)}
type="disk"
sublabel={diskSublabel() ?? diskUsageLabel()}
/>
</Show>
</Show>
</td>
<td class="px-2 py-0.5 text-xs text-gray-700 dark:text-gray-300">
<Show when={isRunning()} fallback={<span class="text-gray-400"></span>}>
{restarts()}
@ -780,6 +830,94 @@ const DockerContainerRow: Component<{
</div>
</Show>
<Show when={hasBlockIo()}>
<div class="min-w-[220px] rounded border border-gray-200 bg-white/70 p-2 shadow-sm dark:border-gray-600/70 dark:bg-gray-900/30">
<div class="text-[11px] font-medium uppercase tracking-wide text-gray-700 dark:text-gray-200">
Block I/O
</div>
<div class="mt-1 space-y-1 text-[11px] text-gray-600 dark:text-gray-300">
<div class="flex items-center justify-between">
<span>Read</span>
<span class="font-semibold text-gray-900 dark:text-gray-100">
{formatBytes(blockIoReadBytes())}
</span>
</div>
<div class="flex items-center justify-between">
<span>Write</span>
<span class="font-semibold text-gray-900 dark:text-gray-100">
{formatBytes(blockIoWriteBytes())}
</span>
</div>
</div>
</div>
</Show>
<Show when={hasMounts()}>
<div class="min-w-[220px] flex-1 rounded border border-gray-200 bg-white/70 p-2 shadow-sm dark:border-gray-600/70 dark:bg-gray-900/30">
<div class="text-[11px] font-medium uppercase tracking-wide text-gray-700 dark:text-gray-200">
Mounts
</div>
<div class="mt-1 space-y-1 text-[11px] text-gray-600 dark:text-gray-300">
<For each={mounts()}>
{(mount) => {
const destination = mount.destination || mount.source || mount.name || 'mount';
const rw = mount.rw === false ? 'read-only' : 'read-write';
return (
<div class="rounded border border-dashed border-gray-200 p-2 last:mb-0 dark:border-gray-700/70">
<div class="flex items-center justify-between gap-2">
<span class="truncate font-medium text-gray-700 dark:text-gray-200" title={destination}>
{destination}
</span>
<Show when={mount.type}>
<span class="text-[10px] uppercase tracking-wide text-gray-500 dark:text-gray-400">
{mount.type}
</span>
</Show>
</div>
<Show when={mount.source}>
<div class="mt-1 truncate text-[11px] text-gray-600 dark:text-gray-300" title={mount.source}>
{mount.source}
</div>
</Show>
<div class="mt-1 flex flex-wrap gap-1 text-[10px] text-gray-500 dark:text-gray-400">
<span
class={`rounded px-1.5 py-0.5 ${
mount.rw === false
? 'bg-gray-200 text-gray-700 dark:bg-gray-700/60 dark:text-gray-200'
: 'bg-green-100 text-green-700 dark:bg-green-900/40 dark:text-green-300'
}`}
>
{rw}
</span>
<Show when={mount.mode}>
<span class="rounded bg-gray-200 px-1.5 py-0.5 text-gray-700 dark:bg-gray-700/60 dark:text-gray-200">
mode: {mount.mode}
</span>
</Show>
<Show when={mount.driver}>
<span class="rounded bg-blue-100 px-1.5 py-0.5 text-blue-700 dark:bg-blue-900/40 dark:text-blue-200">
{mount.driver}
</span>
</Show>
<Show when={mount.name}>
<span class="rounded bg-purple-100 px-1.5 py-0.5 text-purple-700 dark:bg-purple-900/40 dark:text-purple-200">
{mount.name}
</span>
</Show>
<Show when={mount.propagation}>
<span class="rounded bg-gray-100 px-1.5 py-0.5 text-gray-600 dark:bg-gray-800/40 dark:text-gray-300">
{mount.propagation}
</span>
</Show>
</div>
</div>
);
}}
</For>
</div>
</div>
</Show>
<Show when={container.labels && Object.keys(container.labels).length > 0}>
<div class="min-w-[220px] flex-1 rounded border border-gray-200 bg-white/70 p-2 shadow-sm dark:border-gray-600/70 dark:bg-gray-900/30">
<div class="text-[11px] font-medium uppercase tracking-wide text-gray-700 dark:text-gray-200">
@ -1140,6 +1278,7 @@ const DockerServiceRow: Component<{
</td>
<td class="px-2 py-0.5 text-xs text-gray-400 dark:text-gray-500 min-w-[150px]"></td>
<td class="px-2 py-0.5 text-xs text-gray-400 dark:text-gray-500 min-w-[210px]"></td>
<td class="px-2 py-0.5 text-xs text-gray-400 dark:text-gray-500 min-w-[180px]"></td>
<td class="px-2 py-0.5 text-xs text-gray-700 dark:text-gray-300 whitespace-nowrap">
<span class="font-semibold text-gray-900 dark:text-gray-100">
{(service.runningTasks ?? 0)}/{service.desiredTasks ?? 0}
@ -1481,13 +1620,16 @@ const DockerUnifiedTable: Component<DockerUnifiedTableProps> = (props) => {
<th class="px-2 py-1 text-left text-[11px] sm:text-xs font-medium uppercase tracking-wider w-[14%] min-w-[150px]">
CPU
</th>
<th class="px-2 py-1 text-left text-[11px] sm:text-xs font-medium uppercase tracking-wider w-[17%] min-w-[210px]">
<th class="px-2 py-1 text-left text-[11px] sm:text-xs font-medium uppercase tracking-wider w-[16%] min-w-[210px]">
Memory
</th>
<th class="px-2 py-1 text-left text-[11px] sm:text-xs font-medium uppercase tracking-wider w-[10%]">
<th class="px-2 py-1 text-left text-[11px] sm:text-xs font-medium uppercase tracking-wider w-[15%] min-w-[180px]">
Disk
</th>
<th class="px-2 py-1 text-left text-[11px] sm:text-xs font-medium uppercase tracking-wider w-[9%]">
Tasks / Restarts
</th>
<th class="px-2 py-1 text-left text-[11px] sm:text-xs font-medium uppercase tracking-wider w-[10%]">
<th class="px-2 py-1 text-left text-[11px] sm:text-xs font-medium uppercase tracking-wider w-[9%]">
Updated / Uptime
</th>
</tr>
@ -1496,7 +1638,7 @@ const DockerUnifiedTable: Component<DockerUnifiedTableProps> = (props) => {
<For each={groupedRows()}>
{(group) => (
<>
<DockerHostGroupHeader host={group.host} colspan={8} />
<DockerHostGroupHeader host={group.host} colspan={9} />
<For each={group.rows}>
{(row) => {
// Build resource ID for metadata lookup
@ -1508,14 +1650,14 @@ const DockerUnifiedTable: Component<DockerUnifiedTableProps> = (props) => {
return row.kind === 'container' ? (
<DockerContainerRow
row={row}
columns={8}
columns={9}
customUrl={metadata?.customUrl}
onCustomUrlUpdate={props.onCustomUrlUpdate}
/>
) : (
<DockerServiceRow
row={row}
columns={8}
columns={9}
customUrl={metadata?.customUrl}
onCustomUrlUpdate={props.onCustomUrlUpdate}
/>

View file

@ -249,6 +249,10 @@ export interface DockerContainer {
ports?: DockerContainerPort[];
labels?: Record<string, string>;
networks?: DockerContainerNetwork[];
writableLayerBytes?: number;
rootFilesystemBytes?: number;
blockIo?: DockerContainerBlockIO;
mounts?: DockerContainerMount[];
}
export interface DockerContainerPort {
@ -264,6 +268,22 @@ export interface DockerContainerNetwork {
ipv6?: string;
}
export interface DockerContainerBlockIO {
readBytes?: number;
writeBytes?: number;
}
export interface DockerContainerMount {
type?: string;
source?: string;
destination?: string;
mode?: string;
rw?: boolean;
propagation?: string;
name?: string;
driver?: string;
}
export interface Host {
id: string;
hostname: string;

View file

@ -32,16 +32,60 @@ type ConfigWatcher struct {
// NewConfigWatcher creates a new config watcher
func NewConfigWatcher(config *Config) (*ConfigWatcher, error) {
// Determine env file path
envPath := filepath.Join(config.ConfigPath, ".env")
if config.ConfigPath == "" {
envPath = "/etc/pulse/.env"
// CRITICAL FIX: Config watcher must ALWAYS watch the persistent production config,
// NOT the mock data directory. Mock mode should only affect Proxmox data, not auth.
//
// Strategy:
// 1. Check PULSE_AUTH_CONFIG_DIR (dedicated env var for auth config)
// 2. If PULSE_DATA_DIR looks like production (/etc/pulse or /data), use it
// 3. Otherwise, always prefer /etc/pulse if it exists (production auth)
// 4. Fall back to /data for Docker environments
// 5. Last resort: use PULSE_DATA_DIR (may be mock/dev)
persistentDataDir := ""
dataDir := os.Getenv("PULSE_DATA_DIR")
// Option 1: Explicit auth config directory override
if authDir := os.Getenv("PULSE_AUTH_CONFIG_DIR"); authDir != "" {
persistentDataDir = authDir
log.Info().Str("authConfigDir", authDir).Msg("Using PULSE_AUTH_CONFIG_DIR for auth config")
} else if dataDir == "/etc/pulse" || dataDir == "/data" {
// Option 2: PULSE_DATA_DIR is already production, use it
persistentDataDir = dataDir
} else if _, err := os.Stat("/etc/pulse/.env"); err == nil {
// Option 3: /etc/pulse exists, use it (production)
persistentDataDir = "/etc/pulse"
if dataDir != "" && dataDir != persistentDataDir {
log.Warn().
Str("dataDir", dataDir).
Str("authConfigDir", persistentDataDir).
Msg("PULSE_DATA_DIR points to non-production directory - using /etc/pulse for auth config instead")
}
} else if _, err := os.Stat("/data/.env"); err == nil {
// Option 4: Docker environment
persistentDataDir = "/data"
} else if dataDir != "" {
// Option 5: Use PULSE_DATA_DIR as fallback
persistentDataDir = dataDir
if strings.Contains(persistentDataDir, "/mock-data") || strings.Contains(persistentDataDir, "/tmp/") {
log.Warn().
Str("authConfigDir", persistentDataDir).
Msg("WARNING: Auth config watcher is using temporary/mock directory - auth may be unstable")
}
} else {
// Option 6: Last resort default
persistentDataDir = "/etc/pulse"
}
// Check for Docker environment
if _, err := os.Stat("/data/.env"); err == nil {
envPath = "/data/.env"
}
envPath := filepath.Join(persistentDataDir, ".env")
// Log what we're watching for debugging
log.Info().
Str("watchingPath", envPath).
Str("authConfigDir", persistentDataDir).
Str("pulseDataDir", dataDir).
Str("configPathFromConfig", config.ConfigPath).
Msg("Config watcher initialized - watching production auth config")
// Determine mock.env path - skip in Docker or if directory doesn't exist
mockEnvPath := ""

View file

@ -48,6 +48,7 @@ type Config struct {
IncludeServices bool
IncludeTasks bool
IncludeContainers bool
CollectDiskMetrics bool
Logger *zerolog.Logger
}
@ -437,12 +438,13 @@ func (a *Agent) collectContainers(ctx context.Context) ([]agentsdocker.Container
}
func (a *Agent) collectContainer(ctx context.Context, summary types.Container) (agentsdocker.Container, error) {
const perContainerTimeout = 5 * time.Second
const perContainerTimeout = 15 * time.Second
containerCtx, cancel := context.WithTimeout(ctx, perContainerTimeout)
defer cancel()
inspect, err := a.docker.ContainerInspect(containerCtx, summary.ID)
requestSize := a.cfg.CollectDiskMetrics
inspect, _, err := a.docker.ContainerInspectWithRaw(containerCtx, summary.ID, requestSize)
if err != nil {
return agentsdocker.Container{}, fmt.Errorf("inspect: %w", err)
}
@ -452,6 +454,7 @@ func (a *Agent) collectContainer(ctx context.Context, summary types.Container) (
memUsage int64
memLimit int64
memPercent float64
blockIO *agentsdocker.ContainerBlockIO
)
if inspect.State.Running || inspect.State.Paused {
@ -468,6 +471,7 @@ func (a *Agent) collectContainer(ctx context.Context, summary types.Container) (
cpuPercent = calculateCPUPercent(stats, a.cpuCount)
memUsage, memLimit, memPercent = calculateMemoryUsage(stats)
blockIO = summarizeBlockIO(stats)
}
createdAt := time.Unix(summary.Created, 0)
@ -524,26 +528,66 @@ func (a *Agent) collectContainer(ctx context.Context, summary types.Container) (
finishedPtr = &finished
}
var writableLayerBytes int64
if inspect.SizeRw != nil {
writableLayerBytes = *inspect.SizeRw
}
var rootFsBytes int64
if inspect.SizeRootFs != nil {
rootFsBytes = *inspect.SizeRootFs
}
var mounts []agentsdocker.ContainerMount
if len(inspect.Mounts) > 0 {
mounts = make([]agentsdocker.ContainerMount, 0, len(inspect.Mounts))
for _, mount := range inspect.Mounts {
mounts = append(mounts, agentsdocker.ContainerMount{
Type: string(mount.Type),
Source: mount.Source,
Destination: mount.Destination,
Mode: mount.Mode,
RW: mount.RW,
Propagation: string(mount.Propagation),
Name: mount.Name,
Driver: mount.Driver,
})
}
}
container := agentsdocker.Container{
ID: summary.ID,
Name: trimLeadingSlash(summary.Names),
Image: summary.Image,
CreatedAt: createdAt,
State: summary.State,
Status: summary.Status,
Health: health,
CPUPercent: cpuPercent,
MemoryUsageBytes: memUsage,
MemoryLimitBytes: memLimit,
MemoryPercent: memPercent,
UptimeSeconds: uptimeSeconds,
RestartCount: inspect.RestartCount,
ExitCode: inspect.State.ExitCode,
StartedAt: startedPtr,
FinishedAt: finishedPtr,
Ports: ports,
Labels: labels,
Networks: networks,
ID: summary.ID,
Name: trimLeadingSlash(summary.Names),
Image: summary.Image,
CreatedAt: createdAt,
State: summary.State,
Status: summary.Status,
Health: health,
CPUPercent: cpuPercent,
MemoryUsageBytes: memUsage,
MemoryLimitBytes: memLimit,
MemoryPercent: memPercent,
UptimeSeconds: uptimeSeconds,
RestartCount: inspect.RestartCount,
ExitCode: inspect.State.ExitCode,
StartedAt: startedPtr,
FinishedAt: finishedPtr,
Ports: ports,
Labels: labels,
Networks: networks,
WritableLayerBytes: writableLayerBytes,
RootFilesystemBytes: rootFsBytes,
BlockIO: blockIO,
Mounts: mounts,
}
if requestSize {
a.logger.Debug().
Str("container", container.Name).
Int64("writableLayerBytes", writableLayerBytes).
Int64("rootFilesystemBytes", rootFsBytes).
Int("mountCount", len(mounts)).
Msg("Collected container disk metrics")
}
return container, nil
@ -782,6 +826,28 @@ func newHTTPClient(insecure bool) *http.Client {
}
}
func summarizeBlockIO(stats containertypes.StatsResponse) *agentsdocker.ContainerBlockIO {
var readBytes, writeBytes uint64
for _, entry := range stats.BlkioStats.IoServiceBytesRecursive {
switch strings.ToLower(entry.Op) {
case "read":
readBytes += entry.Value
case "write":
writeBytes += entry.Value
}
}
if readBytes == 0 && writeBytes == 0 {
return nil
}
return &agentsdocker.ContainerBlockIO{
ReadBytes: readBytes,
WriteBytes: writeBytes,
}
}
func calculateCPUPercent(stats containertypes.StatsResponse, hostCPUs int) float64 {
totalDelta := float64(stats.CPUStats.CPUUsage.TotalUsage - stats.PreCPUStats.CPUUsage.TotalUsage)
systemDelta := float64(stats.CPUStats.SystemUsage - stats.PreCPUStats.SystemUsage)

View file

@ -345,21 +345,23 @@ func (h Host) ToFrontend() HostFrontend {
// ToFrontend converts a DockerContainer to DockerContainerFrontend
func (c DockerContainer) ToFrontend() DockerContainerFrontend {
container := DockerContainerFrontend{
ID: c.ID,
Name: c.Name,
Image: c.Image,
State: c.State,
Status: c.Status,
Health: c.Health,
CPUPercent: c.CPUPercent,
MemoryUsage: c.MemoryUsage,
MemoryLimit: c.MemoryLimit,
MemoryPercent: c.MemoryPercent,
UptimeSeconds: c.UptimeSeconds,
RestartCount: c.RestartCount,
ExitCode: c.ExitCode,
CreatedAt: c.CreatedAt.Unix() * 1000,
Labels: c.Labels,
ID: c.ID,
Name: c.Name,
Image: c.Image,
State: c.State,
Status: c.Status,
Health: c.Health,
CPUPercent: c.CPUPercent,
MemoryUsage: c.MemoryUsage,
MemoryLimit: c.MemoryLimit,
MemoryPercent: c.MemoryPercent,
UptimeSeconds: c.UptimeSeconds,
RestartCount: c.RestartCount,
ExitCode: c.ExitCode,
CreatedAt: c.CreatedAt.Unix() * 1000,
Labels: c.Labels,
WritableLayerBytes: c.WritableLayerBytes,
RootFilesystemBytes: c.RootFilesystemBytes,
}
if c.StartedAt != nil {
@ -397,6 +399,30 @@ func (c DockerContainer) ToFrontend() DockerContainerFrontend {
container.Networks = networks
}
if c.BlockIO != nil {
container.BlockIO = &DockerContainerBlockIOFrontend{
ReadBytes: c.BlockIO.ReadBytes,
WriteBytes: c.BlockIO.WriteBytes,
}
}
if len(c.Mounts) > 0 {
mounts := make([]DockerContainerMountFrontend, len(c.Mounts))
for i, mount := range c.Mounts {
mounts[i] = DockerContainerMountFrontend{
Type: mount.Type,
Source: mount.Source,
Destination: mount.Destination,
Mode: mount.Mode,
RW: mount.RW,
Propagation: mount.Propagation,
Name: mount.Name,
Driver: mount.Driver,
}
}
container.Mounts = mounts
}
return container
}

View file

@ -226,25 +226,29 @@ type DockerHost struct {
// DockerContainer represents the state of a Docker container on a monitored host.
type DockerContainer struct {
ID string `json:"id"`
Name string `json:"name"`
Image string `json:"image"`
State string `json:"state"`
Status string `json:"status"`
Health string `json:"health,omitempty"`
CPUPercent float64 `json:"cpuPercent"`
MemoryUsage int64 `json:"memoryUsageBytes"`
MemoryLimit int64 `json:"memoryLimitBytes"`
MemoryPercent float64 `json:"memoryPercent"`
UptimeSeconds int64 `json:"uptimeSeconds"`
RestartCount int `json:"restartCount"`
ExitCode int `json:"exitCode"`
CreatedAt time.Time `json:"createdAt"`
StartedAt *time.Time `json:"startedAt,omitempty"`
FinishedAt *time.Time `json:"finishedAt,omitempty"`
Ports []DockerContainerPort `json:"ports,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Networks []DockerContainerNetworkLink `json:"networks,omitempty"`
ID string `json:"id"`
Name string `json:"name"`
Image string `json:"image"`
State string `json:"state"`
Status string `json:"status"`
Health string `json:"health,omitempty"`
CPUPercent float64 `json:"cpuPercent"`
MemoryUsage int64 `json:"memoryUsageBytes"`
MemoryLimit int64 `json:"memoryLimitBytes"`
MemoryPercent float64 `json:"memoryPercent"`
UptimeSeconds int64 `json:"uptimeSeconds"`
RestartCount int `json:"restartCount"`
ExitCode int `json:"exitCode"`
CreatedAt time.Time `json:"createdAt"`
StartedAt *time.Time `json:"startedAt,omitempty"`
FinishedAt *time.Time `json:"finishedAt,omitempty"`
Ports []DockerContainerPort `json:"ports,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Networks []DockerContainerNetworkLink `json:"networks,omitempty"`
WritableLayerBytes int64 `json:"writableLayerBytes,omitempty"`
RootFilesystemBytes int64 `json:"rootFilesystemBytes,omitempty"`
BlockIO *DockerContainerBlockIO `json:"blockIo,omitempty"`
Mounts []DockerContainerMount `json:"mounts,omitempty"`
}
// DockerContainerPort describes an exposed container port mapping.
@ -262,6 +266,24 @@ type DockerContainerNetworkLink struct {
IPv6 string `json:"ipv6,omitempty"`
}
// DockerContainerBlockIO captures aggregate block IO usage for a container.
type DockerContainerBlockIO struct {
ReadBytes uint64 `json:"readBytes,omitempty"`
WriteBytes uint64 `json:"writeBytes,omitempty"`
}
// DockerContainerMount describes a mount exposed to a container.
type DockerContainerMount struct {
Type string `json:"type,omitempty"`
Source string `json:"source,omitempty"`
Destination string `json:"destination,omitempty"`
Mode string `json:"mode,omitempty"`
RW bool `json:"rw"`
Propagation string `json:"propagation,omitempty"`
Name string `json:"name,omitempty"`
Driver string `json:"driver,omitempty"`
}
// DockerService summarises a Docker Swarm service.
type DockerService struct {
ID string `json:"id"`

View file

@ -132,25 +132,29 @@ type DockerHostFrontend struct {
// DockerContainerFrontend represents a Docker container for the frontend
type DockerContainerFrontend struct {
ID string `json:"id"`
Name string `json:"name"`
Image string `json:"image"`
State string `json:"state"`
Status string `json:"status"`
Health string `json:"health,omitempty"`
CPUPercent float64 `json:"cpuPercent"`
MemoryUsage int64 `json:"memoryUsageBytes"`
MemoryLimit int64 `json:"memoryLimitBytes"`
MemoryPercent float64 `json:"memoryPercent"`
UptimeSeconds int64 `json:"uptimeSeconds"`
RestartCount int `json:"restartCount"`
ExitCode int `json:"exitCode"`
CreatedAt int64 `json:"createdAt"`
StartedAt *int64 `json:"startedAt,omitempty"`
FinishedAt *int64 `json:"finishedAt,omitempty"`
Ports []DockerContainerPortFrontend `json:"ports,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Networks []DockerContainerNetworkFrontend `json:"networks,omitempty"`
ID string `json:"id"`
Name string `json:"name"`
Image string `json:"image"`
State string `json:"state"`
Status string `json:"status"`
Health string `json:"health,omitempty"`
CPUPercent float64 `json:"cpuPercent"`
MemoryUsage int64 `json:"memoryUsageBytes"`
MemoryLimit int64 `json:"memoryLimitBytes"`
MemoryPercent float64 `json:"memoryPercent"`
UptimeSeconds int64 `json:"uptimeSeconds"`
RestartCount int `json:"restartCount"`
ExitCode int `json:"exitCode"`
CreatedAt int64 `json:"createdAt"`
StartedAt *int64 `json:"startedAt,omitempty"`
FinishedAt *int64 `json:"finishedAt,omitempty"`
Ports []DockerContainerPortFrontend `json:"ports,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Networks []DockerContainerNetworkFrontend `json:"networks,omitempty"`
WritableLayerBytes int64 `json:"writableLayerBytes,omitempty"`
RootFilesystemBytes int64 `json:"rootFilesystemBytes,omitempty"`
BlockIO *DockerContainerBlockIOFrontend `json:"blockIo,omitempty"`
Mounts []DockerContainerMountFrontend `json:"mounts,omitempty"`
}
// DockerContainerPortFrontend represents a container port mapping
@ -168,6 +172,24 @@ type DockerContainerNetworkFrontend struct {
IPv6 string `json:"ipv6,omitempty"`
}
// DockerContainerBlockIOFrontend exposes aggregate block IO counters.
type DockerContainerBlockIOFrontend struct {
ReadBytes uint64 `json:"readBytes,omitempty"`
WriteBytes uint64 `json:"writeBytes,omitempty"`
}
// DockerContainerMountFrontend represents a container mount for the UI.
type DockerContainerMountFrontend struct {
Type string `json:"type,omitempty"`
Source string `json:"source,omitempty"`
Destination string `json:"destination,omitempty"`
Mode string `json:"mode,omitempty"`
RW bool `json:"rw"`
Propagation string `json:"propagation,omitempty"`
Name string `json:"name,omitempty"`
Driver string `json:"driver,omitempty"`
}
// DockerServiceFrontend represents a Swarm service for the frontend.
type DockerServiceFrontend struct {
ID string `json:"id"`

View file

@ -1395,6 +1395,33 @@ func (m *Monitor) ApplyDockerReport(report agentsdocker.Report, tokenRecord *con
container.Networks = networks
}
container.WritableLayerBytes = payload.WritableLayerBytes
container.RootFilesystemBytes = payload.RootFilesystemBytes
if payload.BlockIO != nil {
container.BlockIO = &models.DockerContainerBlockIO{
ReadBytes: payload.BlockIO.ReadBytes,
WriteBytes: payload.BlockIO.WriteBytes,
}
}
if len(payload.Mounts) > 0 {
mounts := make([]models.DockerContainerMount, len(payload.Mounts))
for i, mount := range payload.Mounts {
mounts[i] = models.DockerContainerMount{
Type: mount.Type,
Source: mount.Source,
Destination: mount.Destination,
Mode: mount.Mode,
RW: mount.RW,
Propagation: mount.Propagation,
Name: mount.Name,
Driver: mount.Driver,
}
}
container.Mounts = mounts
}
containers = append(containers, container)
}

View file

@ -108,3 +108,75 @@ func TestApplyDockerReportGeneratesUniqueIDsForCollidingHosts(t *testing.T) {
t.Fatalf("expected host2 to have 2 containers after update, got %d", len(found.Containers))
}
}
func TestApplyDockerReportIncludesContainerDiskDetails(t *testing.T) {
timestamp := time.Now().UTC()
report := agentsdocker.Report{
Agent: agentsdocker.AgentInfo{
ID: "agent-1",
Version: "1.2.3",
IntervalSeconds: 30,
},
Host: agentsdocker.HostInfo{
Hostname: "disk-host",
},
Containers: []agentsdocker.Container{
{
ID: "ctr-1",
Name: "app",
WritableLayerBytes: 512 * 1024 * 1024,
RootFilesystemBytes: 2 * 1024 * 1024 * 1024,
BlockIO: &agentsdocker.ContainerBlockIO{
ReadBytes: 123456,
WriteBytes: 654321,
},
Mounts: []agentsdocker.ContainerMount{
{
Type: "bind",
Source: "/srv/app/config",
Destination: "/config",
Mode: "rw",
RW: true,
Propagation: "rprivate",
Name: "",
Driver: "",
},
},
},
},
Timestamp: timestamp,
}
monitor := newTestMonitor(t)
host, err := monitor.ApplyDockerReport(report, nil)
if err != nil {
t.Fatalf("ApplyDockerReport returned error: %v", err)
}
if len(host.Containers) != 1 {
t.Fatalf("expected 1 container, got %d", len(host.Containers))
}
container := host.Containers[0]
if container.WritableLayerBytes != 512*1024*1024 {
t.Fatalf("expected writable layer bytes to match, got %d", container.WritableLayerBytes)
}
if container.RootFilesystemBytes != 2*1024*1024*1024 {
t.Fatalf("expected root filesystem bytes to match, got %d", container.RootFilesystemBytes)
}
if container.BlockIO == nil {
t.Fatalf("expected block IO stats to be populated")
}
if container.BlockIO.ReadBytes != 123456 || container.BlockIO.WriteBytes != 654321 {
t.Fatalf("unexpected block IO values: %+v", container.BlockIO)
}
if len(container.Mounts) != 1 {
t.Fatalf("expected mounts to be preserved, got %d", len(container.Mounts))
}
mount := container.Mounts[0]
if mount.Source != "/srv/app/config" || mount.Destination != "/config" || !mount.RW {
t.Fatalf("unexpected mount payload: %+v", mount)
}
}

View file

@ -1,52 +1 @@
# Mock Mode Configuration
# This file is part of the repository and provides default mock mode settings.
# Mock mode generates realistic test data for development without requiring real Proxmox infrastructure.
#
# Quick Start:
# npm run mock:on # Enable mock mode
# npm run mock:off # Disable mock mode
# npm run mock:edit # Edit this configuration
#
# The backend automatically reloads when this file changes (no manual restart needed).
#
# Documentation: docs/development/MOCK_MODE.md
# Enable/disable mock mode (false = use real Proxmox infrastructure)
PULSE_MOCK_MODE=false
# Number of mock nodes to generate (mix of clustered and standalone)
# First 5 nodes form a cluster, remaining nodes are standalone
PULSE_MOCK_NODES=7
# Average number of VMs per node
# Actual count varies based on node role (vm-heavy, container-heavy, light, mixed)
PULSE_MOCK_VMS_PER_NODE=5
# Average number of LXC containers per node
# Containers have lighter resource usage than VMs
PULSE_MOCK_LXCS_PER_NODE=8
# Number of standalone hosts (Pulse host agents) to simulate
PULSE_MOCK_GENERIC_HOSTS=6
# Number of Docker hosts to simulate
PULSE_MOCK_DOCKER_HOSTS=3
# Average number of containers per Docker host
PULSE_MOCK_DOCKER_CONTAINERS=12
# Enable realistic metric fluctuations (CPU, memory, disk, network)
# When true, metrics change every 2 seconds to simulate real workloads
PULSE_MOCK_RANDOM_METRICS=true
# Percentage of guests (VMs + containers) that should be in stopped state
# Set to 0 for all running, 100 for all stopped, 20 for realistic mix
PULSE_MOCK_STOPPED_PERCENT=20
# NOTE: PULSE_DATA_DIR is set dynamically by the toggle script:
# - Mock mode ON: /opt/pulse/tmp/mock-data
# - Mock mode OFF: /etc/pulse
# Do not set it here as it would override the toggle script's logic
# Local overrides (not tracked in git):
# Create mock.env.local for your personal settings - it will override these defaults

View file

@ -49,25 +49,29 @@ type HostInfo struct {
// Container captures the runtime state for a Docker container at report time.
type Container struct {
ID string `json:"id"`
Name string `json:"name"`
Image string `json:"image"`
CreatedAt time.Time `json:"createdAt"`
State string `json:"state"`
Status string `json:"status"`
Health string `json:"health,omitempty"`
CPUPercent float64 `json:"cpuPercent"`
MemoryUsageBytes int64 `json:"memoryUsageBytes"`
MemoryLimitBytes int64 `json:"memoryLimitBytes"`
MemoryPercent float64 `json:"memoryPercent"`
UptimeSeconds int64 `json:"uptimeSeconds"`
RestartCount int `json:"restartCount"`
ExitCode int `json:"exitCode"`
StartedAt *time.Time `json:"startedAt,omitempty"`
FinishedAt *time.Time `json:"finishedAt,omitempty"`
Ports []ContainerPort `json:"ports,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Networks []ContainerNetwork `json:"networks,omitempty"`
ID string `json:"id"`
Name string `json:"name"`
Image string `json:"image"`
CreatedAt time.Time `json:"createdAt"`
State string `json:"state"`
Status string `json:"status"`
Health string `json:"health,omitempty"`
CPUPercent float64 `json:"cpuPercent"`
MemoryUsageBytes int64 `json:"memoryUsageBytes"`
MemoryLimitBytes int64 `json:"memoryLimitBytes"`
MemoryPercent float64 `json:"memoryPercent"`
UptimeSeconds int64 `json:"uptimeSeconds"`
RestartCount int `json:"restartCount"`
ExitCode int `json:"exitCode"`
StartedAt *time.Time `json:"startedAt,omitempty"`
FinishedAt *time.Time `json:"finishedAt,omitempty"`
Ports []ContainerPort `json:"ports,omitempty"`
Labels map[string]string `json:"labels,omitempty"`
Networks []ContainerNetwork `json:"networks,omitempty"`
WritableLayerBytes int64 `json:"writableLayerBytes,omitempty"`
RootFilesystemBytes int64 `json:"rootFilesystemBytes,omitempty"`
BlockIO *ContainerBlockIO `json:"blockIo,omitempty"`
Mounts []ContainerMount `json:"mounts,omitempty"`
}
// ContainerPort tracks an exposed container port mapping.
@ -85,6 +89,24 @@ type ContainerNetwork struct {
IPv6 string `json:"ipv6,omitempty"`
}
// ContainerBlockIO summarises high-level block I/O metrics for a container.
type ContainerBlockIO struct {
ReadBytes uint64 `json:"readBytes,omitempty"`
WriteBytes uint64 `json:"writeBytes,omitempty"`
}
// ContainerMount describes a mount point exposed inside a container.
type ContainerMount struct {
Type string `json:"type,omitempty"`
Source string `json:"source,omitempty"`
Destination string `json:"destination,omitempty"`
Mode string `json:"mode,omitempty"`
RW bool `json:"rw"`
Propagation string `json:"propagation,omitempty"`
Name string `json:"name,omitempty"`
Driver string `json:"driver,omitempty"`
}
// AgentKey returns the stable identifier for a reporting agent.
func (r Report) AgentKey() string {
if r.Agent.ID != "" {

Binary file not shown.