From 5c54685f04a1fd6f7e43ba6e13f55536e8ba2846 Mon Sep 17 00:00:00 2001 From: rcourtman Date: Thu, 23 Oct 2025 11:40:31 +0000 Subject: [PATCH] Add API token scopes and standalone host agent Introduces granular permission scopes for API tokens (docker:report, docker:manage, host-agent:report, monitoring:read/write, settings:read/write) allowing tokens to be restricted to minimum required access. Legacy tokens default to full access until scopes are explicitly configured. Adds standalone host agent for monitoring Linux, macOS, and Windows servers outside Proxmox/Docker estates. New Servers workspace in UI displays uptime, OS metadata, and capacity metrics from enrolled agents. Includes comprehensive token management UI overhaul with scope presets, inline editing, and visual scope indicators. --- README.md | 7 +- cmd/pulse-host-agent/main.go | 147 ++++ docs/CONFIGURATION.md | 20 +- docs/HOST_AGENT.md | 101 +++ docs/development/API_TOKEN_SCOPES.md | 98 +++ frontend-modern/src/App.tsx | 34 + frontend-modern/src/api/security.ts | 13 +- .../src/components/Hosts/ServersOverview.tsx | 156 ++++ .../components/Settings/APITokenManager.tsx | 763 ++++++++++++++---- .../src/components/Settings/HostAgents.tsx | 378 +++++++++ .../src/components/Settings/Settings.tsx | 260 ++++-- .../src/components/icons/ApiIcon.tsx | 27 + .../src/components/icons/ServersIcon.tsx | 21 + frontend-modern/src/constants/apiScopes.ts | 64 ++ frontend-modern/src/types/api.ts | 43 + go.mod | 12 +- go.sum | 24 + internal/api/alerts.go | 34 + internal/api/auth.go | 58 ++ internal/api/auth_scope_test.go | 58 ++ internal/api/host_agents.go | 78 ++ internal/api/router.go | 68 +- internal/api/router_integration_test.go | 2 +- internal/api/security_setup_fix.go | 20 +- internal/api/security_tokens.go | 64 +- internal/api/security_tokens_test.go | 52 ++ internal/api/types.go | 1 + internal/config/api_tokens.go | 90 ++- internal/config/api_tokens_test.go | 58 ++ internal/config/config.go | 151 ++-- internal/config/persistence.go | 13 +- internal/config/persistence_test.go | 7 + internal/config/watcher.go | 1 + internal/hostagent/agent.go | 474 +++++++++++ internal/hostagent/version.go | 5 + internal/mock/generator.go | 292 ++++++- internal/models/state_snapshot.go | 8 + pkg/agents/host/report.go | 86 ++ scripts/build-release.sh | 43 + 39 files changed, 3499 insertions(+), 332 deletions(-) create mode 100644 cmd/pulse-host-agent/main.go create mode 100644 docs/HOST_AGENT.md create mode 100644 docs/development/API_TOKEN_SCOPES.md create mode 100644 frontend-modern/src/components/Hosts/ServersOverview.tsx create mode 100644 frontend-modern/src/components/Settings/HostAgents.tsx create mode 100644 frontend-modern/src/components/icons/ApiIcon.tsx create mode 100644 frontend-modern/src/components/icons/ServersIcon.tsx create mode 100644 frontend-modern/src/constants/apiScopes.ts create mode 100644 internal/api/auth_scope_test.go create mode 100644 internal/api/host_agents.go create mode 100644 internal/api/security_tokens_test.go create mode 100644 internal/config/api_tokens_test.go create mode 100644 internal/hostagent/agent.go create mode 100644 internal/hostagent/version.go create mode 100644 pkg/agents/host/report.go diff --git a/README.md b/README.md index 8bd50740f..56df67768 100644 --- a/README.md +++ b/README.md @@ -50,6 +50,7 @@ Pulse is built by a solo developer in evenings and weekends. Your support helps: - **Interactive Backup Explorer**: Cross-highlighted bar chart + grid with quick time-range pivots (24h/7d/30d/custom) and contextual tooltips for the busiest jobs - Proxmox Mail Gateway analytics: mail volume, spam/virus trends, quarantine health, and cluster node status - Optional Docker container monitoring via lightweight agent +- Standalone host agent for Linux, macOS, and Windows servers to capture uptime, OS metadata, and capacity metrics - Config export/import with encryption and authentication - Automatic stable updates with safe rollback (opt-in) - Dark/light themes, responsive design @@ -110,7 +111,7 @@ helm install pulse oci://ghcr.io/rcourtman/pulse-chart \ 1. Open `http://:7655` 2. **Complete the mandatory security setup** (first-time only) 3. Create your admin username and password -4. Use **Settings → Security → API tokens** to mint dedicated tokens for automation (issue one token per integration so you can revoke credentials individually) +4. Use **Settings → Security → API tokens** to mint dedicated tokens for automation. Assign scopes so each token only has the permissions it needs (e.g. `docker:report`, `host-agent:report`). Legacy tokens default to full access until you edit and save new scopes. **Option B: Automated Setup (No UI)** For automated deployments, configure authentication via environment variables: @@ -168,6 +169,10 @@ The script handles user creation, permissions, token generation, and registratio Deploy the lightweight [Pulse Docker agent](docs/DOCKER_MONITORING.md) on any host running Docker to stream container status and resource data back to Pulse. Install the agent alongside your stack, point it at your Pulse URL and API token, and the **Docker** workspace lights up with host summaries, restart loop detection, per-container CPU/memory charts, and quick filters for stacks and unhealthy workloads. +### Monitor Standalone Servers (optional) + +Install the [Pulse host agent](docs/HOST_AGENT.md) on Linux, macOS, or Windows machines that sit outside your Proxmox or Docker estate. Generate an API token scoped to `host-agent:report`, drop it into the install command, and the **Servers** workspace will populate with uptime, OS metadata, and capacity metrics. + ## Docker ### Basic diff --git a/cmd/pulse-host-agent/main.go b/cmd/pulse-host-agent/main.go new file mode 100644 index 000000000..069aa9710 --- /dev/null +++ b/cmd/pulse-host-agent/main.go @@ -0,0 +1,147 @@ +package main + +import ( + "context" + "flag" + "fmt" + "os" + "os/signal" + "strings" + "syscall" + "time" + + "github.com/rcourtman/pulse-go-rewrite/internal/hostagent" + "github.com/rs/zerolog" +) + +type multiValue []string + +func (m *multiValue) String() string { + return strings.Join(*m, ",") +} + +func (m *multiValue) Set(value string) error { + *m = append(*m, value) + return nil +} + +func main() { + cfg := loadConfig() + + logger := zerolog.New(os.Stdout).With().Timestamp().Logger() + cfg.Logger = &logger + + agent, err := hostagent.New(cfg) + if err != nil { + logger.Fatal().Err(err).Msg("failed to initialise host agent") + } + + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) + defer cancel() + + logger.Info(). + Str("pulse_url", cfg.PulseURL). + Str("agent_id", cfg.AgentID). + Dur("interval", cfg.Interval). + Msg("Starting Pulse host agent") + + if err := agent.Run(ctx); err != nil && err != context.Canceled { + logger.Fatal().Err(err).Msg("host agent terminated with error") + } + + logger.Info().Msg("Host agent stopped") +} + +func loadConfig() hostagent.Config { + envURL := strings.TrimSpace(os.Getenv("PULSE_URL")) + envToken := strings.TrimSpace(os.Getenv("PULSE_TOKEN")) + envInterval := strings.TrimSpace(os.Getenv("PULSE_INTERVAL")) + envHostname := strings.TrimSpace(os.Getenv("PULSE_HOSTNAME")) + envAgentID := strings.TrimSpace(os.Getenv("PULSE_AGENT_ID")) + envInsecure := strings.TrimSpace(os.Getenv("PULSE_INSECURE_SKIP_VERIFY")) + envTags := strings.TrimSpace(os.Getenv("PULSE_TAGS")) + envRunOnce := strings.TrimSpace(os.Getenv("PULSE_ONCE")) + + defaultInterval := 30 * time.Second + if envInterval != "" { + if parsed, err := time.ParseDuration(envInterval); err == nil { + defaultInterval = parsed + } + } + + urlFlag := flag.String("url", envURL, "Pulse server URL (e.g. https://pulse.example.com)") + tokenFlag := flag.String("token", envToken, "Pulse API token (required)") + intervalFlag := flag.Duration("interval", defaultInterval, "Reporting interval (e.g. 30s, 1m)") + hostnameFlag := flag.String("hostname", envHostname, "Override hostname reported to Pulse") + agentIDFlag := flag.String("agent-id", envAgentID, "Override agent identifier") + insecureFlag := flag.Bool("insecure", parseBool(envInsecure), "Skip TLS certificate verification") + runOnceFlag := flag.Bool("once", parseBool(envRunOnce), "Collect and send a single report, then exit") + showVersion := flag.Bool("version", false, "Print the agent version and exit") + + var tagFlags multiValue + flag.Var(&tagFlags, "tag", "Tag to apply to this host (repeatable)") + + flag.Parse() + + if *showVersion { + fmt.Println(hostagent.Version) + os.Exit(0) + } + + pulseURL := strings.TrimSpace(*urlFlag) + if pulseURL == "" { + pulseURL = "http://localhost:7655" + } + + token := strings.TrimSpace(*tokenFlag) + if token == "" { + fmt.Fprintln(os.Stderr, "error: Pulse API token is required (via --token or PULSE_TOKEN)") + os.Exit(1) + } + + interval := *intervalFlag + if interval <= 0 { + interval = 30 * time.Second + } + + tags := gatherTags(envTags, tagFlags) + + return hostagent.Config{ + PulseURL: pulseURL, + APIToken: token, + Interval: interval, + HostnameOverride: strings.TrimSpace(*hostnameFlag), + AgentID: strings.TrimSpace(*agentIDFlag), + Tags: tags, + InsecureSkipVerify: *insecureFlag, + RunOnce: *runOnceFlag, + } +} + +func gatherTags(env string, flags []string) []string { + tags := make([]string, 0) + if env != "" { + for _, tag := range strings.Split(env, ",") { + tag = strings.TrimSpace(tag) + if tag != "" { + tags = append(tags, tag) + } + } + } + for _, tag := range flags { + tag = strings.TrimSpace(tag) + if tag != "" { + tags = append(tags, tag) + } + } + return tags +} + +func parseBool(value string) bool { + switch strings.ToLower(strings.TrimSpace(value)) { + case "1", "true", "yes", "y", "on": + return true + default: + return false + } +} diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md index 28d9efa9a..c58d6a66d 100644 --- a/docs/CONFIGURATION.md +++ b/docs/CONFIGURATION.md @@ -470,7 +470,25 @@ curl -H "X-API-Token: $ANSIBLE_TOKEN" http://localhost:7655/api/nodes # API_TOKEN=your-secure-api-token ./pulse ``` -> **Tip:** Generate a distinct token for each automation workflow (Ansible, Docker agents, CI runners, etc.) so you can revoke one credential without affecting the others. +> **Tip:** Generate a distinct token for each automation workflow (Ansible, Docker agents, host agents, CI runners, etc.) so you can revoke one credential without affecting the others. + +### Token Scopes + +API tokens created in the UI can be restricted to the smallest set of permissions required by an integration: + +| Scope | Typical use | +|-------|-------------| +| `docker:report` | Docker agent submitting host/container telemetry | +| `docker:manage` | Docker agent lifecycle commands (restart, stop, etc.) | +| `host-agent:report` | Pulse host agent reporting OS metrics | +| `monitoring:read` | Read-only access to dashboards, state API, and alert history | +| `monitoring:write` | Acknowledge, silence, or clear alerts | +| `settings:read` | Fetch configuration snapshots and diagnostics | +| `settings:write` | Modify configuration, manage tokens, trigger updates | + +Leaving the scope list empty (or legacy tokens without scopes) grants full access. Tokens generated from specific panels (e.g. **Settings → Agents → Host agents**) automatically apply the relevant scope presets. + +> **Upgrade note:** After upgrading, existing tokens are treated as full-access (`*`). Visit **Settings → Security** to edit each legacy token and assign narrower scopes. **Option 2: Basic Authentication** ```bash diff --git a/docs/HOST_AGENT.md b/docs/HOST_AGENT.md new file mode 100644 index 000000000..74aa36790 --- /dev/null +++ b/docs/HOST_AGENT.md @@ -0,0 +1,101 @@ +# Pulse Host Agent + +The Pulse host agent extends monitoring to standalone servers that do not expose +Proxmox or Docker APIs. With it you can surface uptime, OS metadata, CPU load, +memory/disk utilisation, and connection health for any Linux, macOS, or Windows +machine alongside the rest of your infrastructure. + +## Prerequisites + +- Pulse `main` (or a release that includes `/api/agents/host/report`) +- An API token with the `host-agent:report` scope (create under **Settings → Security**) +- Outbound HTTP/HTTPS connectivity from the host back to Pulse + +> ℹ️ The agent only initiates outbound connections; no inbound firewall rules are required. + +## Quick Start + +> Replace `` with a Pulse API token limited to the `host-agent:report` scope. Tokens generated from **Settings → Agents → Host agents** already apply this scope. + +### Linux (systemd) + +```bash +sudo curl -fsSL https://github.com/rcourtman/Pulse/releases/latest/download/pulse-host-agent-linux-amd64 \ + -o /usr/local/bin/pulse-host-agent +sudo chmod +x /usr/local/bin/pulse-host-agent +sudo /usr/local/bin/pulse-host-agent \ + --url http://pulse.example.local:7655 \ + --token \ + --interval 30s +``` + +For persistence, drop a systemd unit (e.g. `/etc/systemd/system/pulse-host-agent.service`) referencing the same command and enable it with `systemctl enable --now pulse-host-agent`. + +### macOS (launchd) + +```bash +sudo curl -fsSL https://github.com/rcourtman/Pulse/releases/latest/download/pulse-host-agent-darwin-arm64 \ + -o /usr/local/bin/pulse-host-agent +sudo chmod +x /usr/local/bin/pulse-host-agent +sudo /usr/local/bin/pulse-host-agent \ + --url http://pulse.example.local:7655 \ + --token \ + --interval 30s +``` + +Create `~/Library/LaunchAgents/com.pulse.host-agent.plist` to keep the agent running between logins: + +```xml + + + + + Label + com.pulse.host-agent + ProgramArguments + + /usr/local/bin/pulse-host-agent + --url + http://pulse.example.local:7655 + --token + <api-token> + --interval + 30s + + RunAtLoad + KeepAlive + StandardOutPath/Users/your-user/Library/Logs/pulse-host-agent.log + StandardErrorPath/Users/your-user/Library/Logs/pulse-host-agent.log + + +``` + +Load it with `launchctl load ~/Library/LaunchAgents/com.pulse.host-agent.plist`. + +### Windows + +A Windows build will ship shortly. In the meantime run the Linux/WSL binary or compile from source (`GOOS=windows GOARCH=amd64`). + +## Command Flags + +| Flag | Description | +|------|-------------| +| `--url` | Pulse base URL (defaults to `http://localhost:7655`) | +| `--token` | API token with monitoring write access | +| `--interval` | Polling interval (`30s` default) | +| `--hostname` | Override reported hostname | +| `--agent-id` | Override agent identifier (used as dedupe key) | +| `--tag` | Optional tag(s) to annotate the host (repeatable) | +| `--insecure` | Skip TLS verification (development/testing only) | +| `--once` | Send a single report and exit | + +Run `pulse-host-agent --help` for the full list. + +## Viewing Hosts + +- **Settings → Agents → Host agents** lists every reporting host and provides ready-made install commands. +- The **Servers** tab surfaces host telemetry alongside Proxmox/Docker data in the main dashboard. + +## Updating + +Since the agent is a single static binary, updates are as simple as replacing the file and restarting your launchd/systemd unit. The Settings pane always links to the latest release artefacts. diff --git a/docs/development/API_TOKEN_SCOPES.md b/docs/development/API_TOKEN_SCOPES.md new file mode 100644 index 000000000..6949206e8 --- /dev/null +++ b/docs/development/API_TOKEN_SCOPES.md @@ -0,0 +1,98 @@ +# API Token Scope Design Brief + +## Objective +Introduce scoped API tokens so administrators can grant the minimum necessary permissions to each integration (Docker agent, host agent, future platform agents, automation scripts, etc.). This replaces today’s “all-or-nothing” tokens and provides safer rotation/revocation paths. + +## Security Rationale +- Agent and automation tokens are often deployed on hosts or third-party services we do not fully trust. If one leaks today, the attacker inherits full administrator powers (issuing new tokens, mutating settings, triggering installs/updates, etc.). +- Constraining tokens to the minimal scope limits the blast radius: a compromised reporting agent can only submit telemetry, not reconfigure Pulse or other integrations. +- Customers operating in regulated or multi-team environments increasingly ask for auditable least-privilege controls. Scopes give us the primitives to surface warnings on over-privileged tokens and eventually add rotation workflows. +- The feature still has to earn adoption, so pair the technical work with UI nudges and reporting that highlight “Full access” tokens and encourage admins to narrow permissions. + +## Requirements Overview + +1. **Token Model Changes** + - Each API token record stores a list of scopes (strings) or a bitmask. Recommended canonical strings: + - `monitoring:read` + - `monitoring:write` + - `docker:report` + - `docker:manage` + - `host-agent:report` + - `settings:read` + - `settings:write` + - `*` (legacy full-access sentinel; backend accepts for migration/edit flows but the UI should not expose it) + - Existing tokens must remain valid. Treat missing scopes as `["*"]` (full access) until the admin edits them. + +2. **Persistence & Migration** + - Extend the token persistence layer (currently BoltDB JSON) to include `scopes: []string`. + - On startup, detect tokens without the new field and default to `["*"]`. + - Expose the complete scope list when returning token metadata (internal API used by Settings UI). + +3. **Middleware Enforcement** + - Add a helper `RequireScope(scope string)` that checks the request’s token record for `scope` or `*`. + - Apply the helper according to the table below: + + | Endpoint (or group) | HTTP verbs | Required scope(s) | Notes | + |-------------------------------------------------|-------------------------------------|-----------------------------|------------------------------------------------------| + | `/api/agents/docker/report` | `POST` | `docker:report` | Docker agent heartbeat payloads | + | `/api/agents/docker/commands/*` | `POST` | `docker:manage` (optional) | If we expose command ack/management over tokens | + | `/api/agents/docker/hosts/*` | `DELETE`, `PUT`, `POST` | `docker:manage` | Admin actions for Docker hosts | + | `/api/agents/host/report` | `POST` | `host-agent:report` | Host agent reporting | + | `/api/state` | `GET` | `monitoring:read` | General state polling (if token-authenticated) | + | `/api/alerts/*` | `GET` | `monitoring:read` | Alerts reading APIs | + | `/api/alerts/*` (mutations) | `POST`, `PUT`, `DELETE` | `monitoring:write` | Acknowledge, silence, etc. | + | `/api/settings/*` | `GET` | `settings:read` | Settings reads via API | + | `/api/settings/*` | `POST`, `PUT`, `DELETE`, `PATCH` | `settings:write` | Any settings mutation | + | `/api/security/tokens*` | all verbs | _n/a (session only)_ | Leave browser-session only; do not allow API tokens yet | + | `/api/install/*`, `/api/updates/*` (mutations) | `POST`, `PUT` | `settings:write` | Sensitive operational endpoints | + + (More endpoints can be added as required; start with the rows above and expand during implementation.) + - Maintain compatibility for admin sessions (browser login) which continue to bypass token checks. + +4. **Token Generation API** + - Update `POST /api/security/tokens` to accept a `scopes` array. + - Validation rules: + - Reject unknown scope identifiers (except the `*` sentinel described above). + - If the array is omitted (legacy callers), default to `['*']` (full access) to preserve backward compatibility. + - If the array is provided but empty, reject with a 400 (“select at least one scope or delete the token”). + - Reject mixed arrays that contain both `'*'` and explicit scopes; if the UI submits such a payload, return a 400 with guidance (“either all scopes or full access”). + - Include the scope list in the response payload so the UI can display it. + +5. **UI/UX Adjustments** + - **Settings → Security** panel: + - When generating or editing a token, show a multi-select with friendly labels (“Docker agent reporting”, “Host agent reporting”, etc.). + - Display the scope summary in the token list (e.g. badges). + - For legacy tokens (implicit `*`), show “Full access” and allow editing to reduce scope. + - **Docker Agents / Host Agents screens:** + - When requesting a token: + - Docker: pre-select both `docker:report` (always) and `docker:manage` if the user needs lifecycle commands (hide manage behind a toggle if desired). + - Host agent: pre-select `host-agent:report`. + - Warn if the stored token lacks the required scope (fallback to showing `` placeholder). + +6. **Testing** + - Unit tests covering: + - Scope parsing/migration + - Middleware checks (token with/without required scope) + - Integration tests for agent endpoints verifying 403 on missing scope. + +7. **Documentation** + - Update `README.md` and relevant docs (e.g. `docs/CONFIGURATION.md`, `docs/HOST_AGENT.md`, Docker docs) to explain scoped tokens. + - Provide an upgrade note for existing installations (“legacy tokens default to full access; edit to restrict scope”). + +## Implementation Notes + +- Use constants for scope strings to avoid typos throughout the codebase. +- Token middleware already retrieves `APITokenRecord`; that struct should grow a `Scopes []string` field with helper methods (`HasScope`). +- For future extensibility, keep the scope checks granular but simple (string equality) rather than regex matching. +- Ensure the Settings UI gracefully handles lack of admin privileges (disable scope selection, show hint). +- Update agent commands (Docker/Host) to mention required scope in their description. +- Guardrails: the backend should never auto-insert `*` once a scoped token exists, and any admin edit that clears all scopes should surface a clear “delete token or assign scopes” decision. + +## Acceptance Criteria + +- Scoped tokens persisted and surfaced via API. +- Middleware rejects tokens missing required scope. +- UI can create, edit, and display scoped tokens; agent panels auto-fill only when valid. +- Documentation updated; existing tokens remain functional without manual migration. + +Once implemented delete this doc. diff --git a/frontend-modern/src/App.tsx b/frontend-modern/src/App.tsx index 8e6e2fdc8..19be26e54 100644 --- a/frontend-modern/src/App.tsx +++ b/frontend-modern/src/App.tsx @@ -21,6 +21,7 @@ import Replication from './components/Replication/Replication'; import Settings from './components/Settings/Settings'; import { Alerts } from './pages/Alerts'; import { DockerHosts } from './components/Docker/DockerHosts'; +import { ServersOverview } from './components/Hosts/ServersOverview'; import { ToastContainer } from './components/Toast/Toast'; import NotificationContainer from './components/NotificationContainer'; import { ErrorBoundary } from './components/ErrorBoundary'; @@ -42,6 +43,7 @@ import type { State } from '@/types/api'; import MailGateway from './components/PMG/MailGateway'; import { ProxmoxIcon } from '@/components/icons/ProxmoxIcon'; import { DockerIcon } from '@/components/icons/DockerIcon'; +import { ServersIcon } from '@/components/icons/ServersIcon'; import { AlertsIcon } from '@/components/icons/AlertsIcon'; import { SettingsGearIcon } from '@/components/icons/SettingsGearIcon'; import { TokenRevealDialog } from './components/TokenRevealDialog'; @@ -81,6 +83,17 @@ function DockerRoute() { return ; } +function HostsRoute() { + const wsContext = useContext(WebSocketContext); + if (!wsContext) { + return
Loading...
; + } + const { state } = wsContext; + return ( + + ); +} + function App() { const TooltipRoot = createTooltipSystem(); const owner = getOwner(); @@ -616,6 +629,7 @@ function App() { } /> } /> + (props.state().dockerHosts?.length ?? 0) > 0); + const hasServers = createMemo(() => (props.state().hosts?.length ?? 0) > 0); const hasProxmoxHosts = createMemo( () => (props.state().nodes?.length ?? 0) > 0 || @@ -709,6 +725,12 @@ function AppLayout(props: { } }); + createEffect(() => { + if (hasServers()) { + markPlatformSeen('servers'); + } + }); + const platformTabs = createMemo(() => { return [ { @@ -735,6 +757,18 @@ function AppLayout(props: { ), }, + { + id: 'servers' as const, + label: 'Servers', + route: '/servers', + settingsRoute: '/settings', + tooltip: 'Monitor standalone servers with the host agent', + enabled: hasServers() || !!seenPlatforms()['servers'], + live: hasServers(), + icon: ( + + ), + }, ]; }); diff --git a/frontend-modern/src/api/security.ts b/frontend-modern/src/api/security.ts index 1e4b40559..4955d5b8a 100644 --- a/frontend-modern/src/api/security.ts +++ b/frontend-modern/src/api/security.ts @@ -7,6 +7,7 @@ export interface APITokenRecord { suffix: string; createdAt: string; lastUsedAt?: string; + scopes?: string[]; } export interface CreateAPITokenResponse { @@ -20,10 +21,18 @@ export class SecurityAPI { return response.tokens ?? []; } - static async createToken(name?: string): Promise { + static async createToken(name?: string, scopes?: string[]): Promise { + const payload: Record = {}; + if (name) { + payload.name = name; + } + if (scopes) { + payload.scopes = scopes; + } + return apiFetchJSON('/api/security/tokens', { method: 'POST', - body: JSON.stringify({ name }), + body: JSON.stringify(payload), }); } diff --git a/frontend-modern/src/components/Hosts/ServersOverview.tsx b/frontend-modern/src/components/Hosts/ServersOverview.tsx new file mode 100644 index 000000000..fe0902e5c --- /dev/null +++ b/frontend-modern/src/components/Hosts/ServersOverview.tsx @@ -0,0 +1,156 @@ +import { For, Show, createMemo } from 'solid-js'; +import type { Component } from 'solid-js'; +import type { Host } from '@/types/api'; +import { Card } from '@/components/shared/Card'; +import { EmptyState } from '@/components/shared/EmptyState'; +import { formatBytes } from '@/utils/format'; + +interface ServersOverviewProps { + hosts: Host[]; + connectionHealth: Record; +} + +const statusClass: Record = { + online: 'bg-emerald-500/10 text-emerald-600 dark:text-emerald-400 border border-emerald-500/20', + degraded: 'bg-amber-500/10 text-amber-600 dark:text-amber-400 border border-amber-500/20', + offline: 'bg-rose-500/10 text-rose-600 dark:text-rose-400 border border-rose-500/20', +}; + +const formatStatus = (status: string | undefined) => { + if (!status) return 'unknown'; + const normalized = status.toLowerCase(); + if (normalized === 'online' || normalized === 'degraded' || normalized === 'offline') { + return normalized; + } + return status; +}; + +export const ServersOverview: Component = (props) => { + const sortedHosts = createMemo(() => + [...props.hosts].sort((a, b) => a.hostname.localeCompare(b.hostname)), + ); + + return ( +
+
+

Servers

+

+ Unified view of standalone hosts reporting via the Pulse host agent. +

+
+ + 0} + fallback={ + + } + > +
+ + {(host) => { + const status = formatStatus(host.status); + const statusClasses = + statusClass[status] ?? + 'bg-slate-500/10 text-slate-600 dark:text-slate-300 border border-slate-500/20'; + const lastSeen = new Date(host.lastSeen || Date.now()); + const connectionKey = `host-${host.id}`; + const isHealthy = props.connectionHealth[connectionKey] ?? status !== 'offline'; + const memoryUsage = + typeof host.memory?.usage === 'number' + ? Math.round((host.memory.usage + Number.EPSILON) * 10) / 10 + : undefined; + + return ( + +
+
+

+ {host.platform ?? 'unknown'} +

+

+ {host.displayName || host.hostname} +

+

+ {host.osName} + {host.osVersion ? ` ${host.osVersion}` : ''} +

+
+ + {status} + +
+ +
+
+
CPU Usage
+
+ {typeof host.cpuUsage === 'number' ? `${host.cpuUsage.toFixed(1)}%` : '—'} +
+
+
+
Memory
+
+ {host.memory?.total + ? `${formatBytes(host.memory.used ?? 0)} / ${formatBytes(host.memory.total)}${ + memoryUsage !== undefined ? ` (${memoryUsage.toFixed(1)}%)` : '' + }` + : '—'} +
+
+
+
Architecture
+
+ {host.architecture ?? '—'} +
+
+
+
Last Seen
+
+ {lastSeen.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' })} +
+
+
+
Connection
+
+ {isHealthy ? 'Healthy' : 'Unreachable'} +
+
+
+ + 0}> +
+

+ Storage +

+
    + + {(disk) => ( +
  • + + {disk.mountpoint || disk.device || 'disk'} •{' '} + {disk.type ? disk.type.toUpperCase() : '—'} + + + {formatBytes(disk.used ?? 0)} / {formatBytes(disk.total ?? 0)} + {typeof disk.usage === 'number' + ? ` (${disk.usage.toFixed(1)}%)` + : ''} + +
  • + )} +
    +
+
+
+
+ ); + }} +
+
+
+
+ ); +}; diff --git a/frontend-modern/src/components/Settings/APITokenManager.tsx b/frontend-modern/src/components/Settings/APITokenManager.tsx index 6ced9eef5..439c4115d 100644 --- a/frontend-modern/src/components/Settings/APITokenManager.tsx +++ b/frontend-modern/src/components/Settings/APITokenManager.tsx @@ -1,12 +1,20 @@ -import { Component, For, Show, createMemo, createSignal, onMount } from 'solid-js'; +import { Component, For, Show, createMemo, createSignal, onCleanup, onMount } from 'solid-js'; import { Card } from '@/components/shared/Card'; -import { SectionHeader } from '@/components/shared/SectionHeader'; import { SecurityAPI, type APITokenRecord } from '@/api/security'; import { showError, showSuccess } from '@/utils/toast'; import { formatRelativeTime } from '@/utils/format'; import { useWebSocket } from '@/App'; import type { DockerHost } from '@/types/api'; import { showTokenReveal, useTokenRevealState } from '@/stores/tokenReveal'; +import { + API_SCOPE_LABELS, + API_SCOPE_OPTIONS, + DOCKER_MANAGE_SCOPE, + DOCKER_REPORT_SCOPE, + HOST_AGENT_SCOPE, + SETTINGS_READ_SCOPE, + SETTINGS_WRITE_SCOPE, +} from '@/constants/apiScopes'; interface APITokenManagerProps { currentTokenHint?: string; @@ -14,6 +22,10 @@ interface APITokenManagerProps { refreshing?: boolean; } +const SCOPES_DOC_URL = + 'https://github.com/rcourtman/Pulse/blob/main/docs/CONFIGURATION.md#token-scopes'; +const WILDCARD_SCOPE = '*'; + export const APITokenManager: Component = (props) => { const { state } = useWebSocket(); const dockerHosts = createMemo(() => state.dockerHosts ?? []); @@ -23,11 +35,11 @@ export const APITokenManager: Component = (props) => { const tokenId = host.tokenId; if (!tokenId) continue; const displayName = host.displayName?.trim() || host.hostname || host.id; - const existing = usage.get(tokenId); - if (existing) { + const previous = usage.get(tokenId); + if (previous) { usage.set(tokenId, { - count: existing.count + 1, - hosts: [...existing.hosts, displayName], + count: previous.count + 1, + hosts: [...previous.hosts, displayName], }); } else { usage.set(tokenId, { count: 1, hosts: [displayName] }); @@ -37,12 +49,133 @@ export const APITokenManager: Component = (props) => { }); const [tokens, setTokens] = createSignal([]); + const sortedTokens = createMemo(() => + [...tokens()].sort( + (a, b) => new Date(b.createdAt).getTime() - new Date(a.createdAt).getTime(), + ), + ); + const totalTokens = createMemo(() => sortedTokens().length); + const wildcardCount = createMemo(() => + sortedTokens().filter((token) => { + const scopes = token.scopes; + return !scopes || scopes.length === 0 || scopes.includes('*'); + }).length, + ); + const scopedTokenCount = createMemo(() => totalTokens() - wildcardCount()); + const hasWildcardTokens = createMemo(() => wildcardCount() > 0); + const mostRecentLabel = createMemo(() => { + const first = sortedTokens()[0]; + if (!first) return '—'; + const timestamp = new Date(first.createdAt).getTime(); + return Number.isFinite(timestamp) ? formatRelativeTime(timestamp) : '—'; + }); + const [loading, setLoading] = createSignal(true); const [isGenerating, setIsGenerating] = createSignal(false); const [newTokenValue, setNewTokenValue] = createSignal(null); const [newTokenRecord, setNewTokenRecord] = createSignal(null); const [nameInput, setNameInput] = createSignal(''); const tokenRevealState = useTokenRevealState(); + const [selectedScopes, setSelectedScopes] = createSignal([]); + + type ScopeGroup = (typeof API_SCOPE_OPTIONS)[number]['group']; + type ScopeOption = (typeof API_SCOPE_OPTIONS)[number]; + const scopeGroupOrder: ScopeGroup[] = ['Monitoring', 'Agents', 'Settings']; + const scopeGroups = createMemo<[ScopeGroup, ScopeOption[]][]>(() => { + const grouped: Record = { + Monitoring: [], + Agents: [], + Settings: [], + }; + for (const option of API_SCOPE_OPTIONS) { + grouped[option.group].push(option); + } + return scopeGroupOrder + .map((group) => [group, grouped[group]] as [ScopeGroup, ScopeOption[]]) + .filter(([, options]) => options.length > 0); + }); + + const isFullAccessSelected = () => + selectedScopes().length === 0 || selectedScopes().includes(WILDCARD_SCOPE); + + const scopePresets: { label: string; scopes: string[]; description: string }[] = [ + { + label: 'Host agent', + scopes: [HOST_AGENT_SCOPE], + description: 'Allow pulse-host-agent to submit OS, CPU, and disk metrics.', + }, + { + label: 'Docker report', + scopes: [DOCKER_REPORT_SCOPE], + description: 'Permits Docker agents to stream host and container telemetry only.', + }, + { + label: 'Docker manage', + scopes: [DOCKER_REPORT_SCOPE, DOCKER_MANAGE_SCOPE], + description: 'Extends Docker reporting with lifecycle actions (restart, stop, etc.).', + }, + { + label: 'Settings read', + scopes: [SETTINGS_READ_SCOPE], + description: 'Read configuration snapshots and diagnostics without modifying anything.', + }, + { + label: 'Settings admin', + scopes: [SETTINGS_READ_SCOPE, SETTINGS_WRITE_SCOPE], + description: 'Full settings read/write – equivalent to automation with admin privileges.', + }, + ]; + + const presetMatchesSelection = (presetScopes: string[]) => { + const selection = [...selectedScopes()] + .filter((scope) => scope !== WILDCARD_SCOPE) + .sort(); + const target = [...presetScopes].sort(); + if (target.length === 0) { + return isFullAccessSelected(); + } + if (selection.length !== target.length) { + return false; + } + return target.every((scope) => selection.includes(scope)); + }; + + const presetButtonBase = + 'flex w-full items-start justify-between gap-3 rounded-md border px-3 py-2 text-left text-sm transition-colors'; + const presetButtonActive = + 'border-blue-400 ring-1 ring-blue-300 bg-blue-50/70 dark:border-blue-500 dark:ring-blue-400/40 dark:bg-blue-900/20'; + const presetButtonInactive = + 'border-gray-300 bg-white hover:border-blue-400 dark:border-gray-600 dark:bg-gray-900/60 dark:hover:border-blue-500'; + const selectedScopeChips = createMemo(() => + selectedScopes() + .filter((scope) => scope !== WILDCARD_SCOPE) + .map((scope) => ({ + value: scope, + label: API_SCOPE_LABELS[scope] ?? scope, + })) + .sort((a, b) => a.label.localeCompare(b.label)), + ); + const [advancedScopesOpen, setAdvancedScopesOpen] = createSignal(false); + + const applyScopePreset = (scopes: string[]) => { + const unique = Array.from(new Set(scopes)).filter(Boolean); + setSelectedScopes(unique); + }; + const clearScopes = () => setSelectedScopes([]); + + let createSectionRef: HTMLDivElement | undefined; + const [createHighlight, setCreateHighlight] = createSignal(false); + let highlightTimer: number | undefined; + const focusCreateSection = () => { + if (!createSectionRef) return; + createSectionRef.scrollIntoView({ behavior: 'smooth', block: 'start' }); + setCreateHighlight(true); + window.clearTimeout(highlightTimer); + highlightTimer = window.setTimeout(() => setCreateHighlight(false), 1600); + }; + onCleanup(() => { + if (highlightTimer) window.clearTimeout(highlightTimer); + }); const loadTokens = async () => { setLoading(true); @@ -65,7 +198,9 @@ export const APITokenManager: Component = (props) => { setIsGenerating(true); try { const trimmedName = nameInput().trim() || undefined; - const { token, record } = await SecurityAPI.createToken(trimmedName); + const scopeSelection = [...selectedScopes()].sort(); + const scopePayload = scopeSelection.length > 0 ? scopeSelection : undefined; + const { token, record } = await SecurityAPI.createToken(trimmedName, scopePayload); setTokens((prev) => [record, ...prev]); setNewTokenValue(token); @@ -89,7 +224,6 @@ export const APITokenManager: Component = (props) => { } catch (storageErr) { console.warn('Unable to persist API token in localStorage', storageErr); } - } catch (err) { console.error('Failed to generate API token', err); showError('Failed to generate API token'); @@ -112,7 +246,7 @@ export const APITokenManager: Component = (props) => { if (record.name?.trim()) return record.name.trim(); if (record.prefix && record.suffix) return `${record.prefix}…${record.suffix}`; if (record.prefix) return `${record.prefix}…`; - return 'unnamed token'; + return 'untitled token'; }; const handleDelete = async (record: APITokenRecord) => { @@ -120,7 +254,6 @@ export const APITokenManager: Component = (props) => { const displayName = tokenNameForDialog(record); let message = `Revoke token "${displayName}"? Any agents or integrations using it will stop working.`; - if (usage) { const hostListPreview = usage.hosts.slice(0, 5).join(', '); const extraCount = usage.hosts.length - 5; @@ -131,8 +264,7 @@ export const APITokenManager: Component = (props) => { message = `Token "${displayName}" is currently used by ${hostCountLabel}.\nHosts: ${hostSummary}\n\nRevoking it will cause those agents to stop reporting until you update them with a new token.\n\nContinue?`; } - const confirmed = window.confirm(message); - if (!confirmed) return; + if (!window.confirm(message)) return; try { await SecurityAPI.deleteToken(record.id); @@ -170,45 +302,197 @@ export const APITokenManager: Component = (props) => { }; return ( - -
-
-
- - - -
- +
+
+
+

API tokens

+

+ Authenticate host agents, Docker integrations, and automation pipelines with scoped access. +

+
-
- -
- - - - - Refreshing security status… -
-
+ +
+ + + + + Refreshing security status… +
+
+ +
+ +
+

Active tokens

+

+ Rotate tokens regularly and scope them to the minimum access required. +

+
+ +
+
+

+ Active tokens +

+

{totalTokens()}

+
+
+

+ Scoped tokens +

+

{scopedTokenCount()}

+
+
+

+ {hasWildcardTokens() ? 'Full access tokens' : 'Last generated'} +

+

+ {hasWildcardTokens() + ? wildcardCount() + : totalTokens() > 0 + ? mostRecentLabel() + : '—'} +

+
+
+ + 0} + fallback={ +
+

+ No tokens yet. Generate one to authenticate agents and automation. +

+ +
+ } + > +
+ + + + + + + + + + + + + + {(token) => { + const usage = dockerTokenUsage().get(token.id); + const hostTitle = usage ? usage.hosts.join(', ') : undefined; + const hostPreview = usage ? usage.hosts.slice(0, 2).join(', ') : ''; + const extraCount = usage ? usage.hosts.length - 2 : 0; + const hostSummary = + usage && usage.count === 1 + ? usage.hosts[0] + : usage + ? `${hostPreview}${extraCount > 0 ? `, +${extraCount} more` : ''}` + : ''; + const hostCountLabel = + usage && usage.count === 1 ? 'host' : usage ? 'hosts' : ''; + const rawScopes = token.scopes && token.scopes.length > 0 ? token.scopes : ['*']; + const scopeBadges = rawScopes.includes('*') + ? [{ value: '*', label: 'Full access' }] + : rawScopes.map((scope) => ({ + value: scope, + label: API_SCOPE_LABELS[scope] ?? scope, + })); + const rowIsWildcard = scopeBadges.some((scope) => scope.value === '*'); + + return ( + + + + + + + + + ); + }} + + +
LabelToken hintScopesCreatedLast usedActions
+
+ {token.name || 'Untitled token'} + + + Docker + + +
+ +
+ Used by Docker {hostCountLabel}: {hostSummary} +
+
+
+ {tokenHint(token)} + +
+ + {(scope) => { + const isWildcard = scope.value === '*'; + const badgeClass = isWildcard + ? 'inline-flex items-center rounded-full bg-amber-100 dark:bg-amber-900/40 px-2 py-0.5 text-[11px] font-semibold text-amber-800 dark:text-amber-200' + : 'inline-flex items-center rounded-full bg-gray-100 dark:bg-gray-800 px-2 py-0.5 text-[11px] font-medium text-gray-700 dark:text-gray-200'; + return ( + + {scope.label} + + ); + }} + +
+
+ {formatRelativeTime(new Date(token.createdAt).getTime())} + + {token.lastUsedAt ? formatRelativeTime(new Date(token.lastUsedAt).getTime()) : 'Never'} + + +
+
+
+
- {/* Generated token reminder - only show when dialog is not already visible */} -
+
- +
@@ -216,8 +500,8 @@ export const APITokenManager: Component = (props) => {

Token ready to copy

-

- We keep the full token inside a secure dialog. Reopen it if you still need to copy the value before navigating away. +

+ Tokens are only shown once. Copy it now or store it securely before you leave this page.

@@ -237,9 +521,9 @@ export const APITokenManager: Component = (props) => {

-
+
-
- Issue a dedicated token for each host or automation. That way, if a system is compromised, you can revoke just its token without disrupting anything else. -
+ { + createSectionRef = el; + }} + > +
+
+

Generate new token

+

+ Tokens are only displayed once. Follow the steps below to create a scoped credential. +

+
-
-

Generate new token

-
- setNameInput(event.currentTarget.value)} - placeholder="e.g., docker-host-1" - class="flex-1 rounded-lg border border-gray-300 dark:border-gray-600 bg-white dark:bg-gray-800 px-3 py-2 text-sm focus:outline-none focus:ring-2 focus:ring-blue-500" - /> +
    +
  1. +
    + 1 +
    +
    +

    Name the token

    +

    + Use something descriptive so you can identify the integration later. +

    + setNameInput(event.currentTarget.value)} + placeholder="e.g., docker-host-1" + class="w-full rounded-md border border-gray-300 bg-white px-3 py-2 text-sm text-gray-900 shadow-sm focus:outline-none focus:ring-2 focus:ring-blue-500 dark:border-gray-600 dark:bg-gray-800 dark:text-gray-100" + /> +
    +
  2. + +
  3. +
    + 2 +
    +
    +

    Set a baseline scope

    +

    + Start with a preset that matches the integration, or choose full access if you plan to trim later. +

    +
    + + + {(preset) => ( + + )} + +
    +
    + + Current selection + + 0} + fallback={Full access (wildcard)} + > +
    + + {(chip) => ( + + {chip.label} + + )} + +
    +
    +
    +
    +
  4. + +
  5. +
    + 3 +
    +
    +
    +

    Fine-tune permissions

    +

    + Toggle advanced scopes if the integration needs additional access beyond the preset. +

    +
    +
    + 0}> +
    + + {(chip) => ( + + {chip.value} + + )} + +
    +
    + +
    + +
    + + {([group, options]) => ( +
    +

    + {group} +

    +
    + + {(option) => { + const inputId = `scope-${option.value.replace(/[:]/g, '-')}`; + const checked = () => selectedScopes().includes(option.value); + return ( + + ); + }} + +
    +
    + )} +
    +
    +
    +
    +
  6. +
+
+ +
+ + +
-
+ -
-

Active tokens

- 0} - fallback={ -

- No API tokens yet. Generate one above to get started. -

- } - > -
- - - - - - - - - - - - - {(token) => { - const usage = dockerTokenUsage().get(token.id); - const hostTitle = usage ? usage.hosts.join(', ') : undefined; - const hostPreview = usage ? usage.hosts.slice(0, 2).join(', ') : ''; - const extraCount = usage ? usage.hosts.length - 2 : 0; - const hostSummary = - usage && usage.count === 1 - ? usage.hosts[0] - : usage - ? `${hostPreview}${extraCount > 0 ? `, +${extraCount} more` : ''}` - : ''; - const hostCountLabel = - usage && usage.count === 1 ? 'host' : usage ? 'hosts' : ''; + + +

+ Full access tokens detected +

+

+ Edit existing tokens to assign scopes, or generate replacements with the presets above so compromised credentials can’t control everything. +

+ +
+
- return ( -
- - - - - - - ); - }} - - -
LabelToken hintCreatedLast usedActions
-
- {token.name || 'Untitled token'} - - - Docker - - -
- -
- Used by Docker {hostCountLabel}: {hostSummary} -
-
-
- {tokenHint(token)} - - {formatRelativeTime(new Date(token.createdAt).getTime())} - - {token.lastUsedAt ? formatRelativeTime(new Date(token.lastUsedAt).getTime()) : 'Never'} - - -
-
-
-
+ +

Good practices

+
    +
  • + + Issue separate tokens for Docker agents, host agents, and automation pipelines so you can revoke them independently. +
  • +
  • + + Rotate tokens on a schedule and remove ones that haven’t been used recently. +
  • +
  • + + + View the{' '} + + scoped token guide + {' '} + for the full list of available permissions. + +
  • +
+
-
+
); }; + +export default APITokenManager; diff --git a/frontend-modern/src/components/Settings/HostAgents.tsx b/frontend-modern/src/components/Settings/HostAgents.tsx new file mode 100644 index 000000000..a984560ea --- /dev/null +++ b/frontend-modern/src/components/Settings/HostAgents.tsx @@ -0,0 +1,378 @@ +import { Component, For, Show, createEffect, createMemo, createSignal, onMount } from 'solid-js'; +import type { JSX } from 'solid-js'; +import { useWebSocket } from '@/App'; +import type { Host } from '@/types/api'; +import { SectionHeader } from '@/components/shared/SectionHeader'; +import { Card } from '@/components/shared/Card'; +import CopyButton from '@/components/shared/CopyButton'; +import { formatBytes, formatRelativeTime, formatUptime } from '@/utils/format'; +import { SecurityAPI } from '@/api/security'; +import { notificationStore } from '@/stores/notifications'; +import { showTokenReveal } from '@/stores/tokenReveal'; +import { HOST_AGENT_SCOPE } from '@/constants/apiScopes'; + +type HostAgentVariant = 'all' | 'linux' | 'macos' | 'windows'; + +interface HostAgentsProps { + variant?: HostAgentVariant; +} + +const RELEASE_BASE = 'https://github.com/rcourtman/Pulse/releases/latest/download'; + +const TOKEN_PLACEHOLDER = ''; + +const pulseUrl = () => { + if (typeof window === 'undefined') return 'http://localhost:7655'; + const { protocol, hostname, port } = window.location; + return `${protocol}//${hostname}${port ? `:${port}` : ''}`; +}; + +const commandsByVariant: Record = { + all: { + title: 'Installation quick start', + description: + 'Generate an API token from Settings → Security with the host agent reporting scope, then replace the highlighted token placeholder. Agents only require outbound HTTP(S) access to Pulse.', + snippets: [ + { + label: 'Linux (systemd)', + command: [ + `curl -fsSL ${RELEASE_BASE}/pulse-host-agent-linux-amd64 -o /usr/local/bin/pulse-host-agent`, + 'sudo chmod +x /usr/local/bin/pulse-host-agent', + `sudo /usr/local/bin/pulse-host-agent --url ${pulseUrl()} --token ${TOKEN_PLACEHOLDER} --interval 30s`, + ].join(' && '), + }, + { + label: 'macOS (launchd)', + command: [ + `curl -fsSL ${RELEASE_BASE}/pulse-host-agent-darwin-arm64 -o /usr/local/bin/pulse-host-agent`, + 'sudo chmod +x /usr/local/bin/pulse-host-agent', + `sudo /usr/local/bin/pulse-host-agent --url ${pulseUrl()} --token ${TOKEN_PLACEHOLDER} --interval 30s`, + ].join(' && '), + note: ( + + Create ~/Library/LaunchAgents/com.pulse.host-agent.plist to keep the agent running between logins. + + ), + }, + { + label: 'Ad-hoc execution', + command: `/usr/local/bin/pulse-host-agent --url ${pulseUrl()} --token ${TOKEN_PLACEHOLDER} --interval 30s`, + }, + ], + }, + linux: { + title: 'Install on Linux', + description: + 'Download the static binary, make it executable, and (optionally) register it as a systemd service. Replace the token placeholder with an API token scoped for host agent reporting.', + snippets: [ + { + label: 'Install + enable (systemd)', + command: [ + `curl -fsSL ${RELEASE_BASE}/pulse-host-agent-linux-amd64 -o /usr/local/bin/pulse-host-agent`, + 'sudo chmod +x /usr/local/bin/pulse-host-agent', + `sudo /usr/local/bin/pulse-host-agent --url ${pulseUrl()} --token ${TOKEN_PLACEHOLDER} --interval 30s`, + ].join(' && '), + note: ( + + For persistence, create /etc/systemd/system/pulse-host-agent.service and enable it with{' '} + systemctl enable --now pulse-host-agent. + + ), + }, + ], + }, + macos: { + title: 'Install on macOS', + description: + 'Use the universal macOS build (arm64) with an API token that grants the host agent reporting scope, then register it via launchd for continuous reporting.', + snippets: [ + { + label: 'Install binary', + command: [ + `curl -fsSL ${RELEASE_BASE}/pulse-host-agent-darwin-arm64 -o /usr/local/bin/pulse-host-agent`, + 'sudo chmod +x /usr/local/bin/pulse-host-agent', + ].join(' && '), + }, + { + label: 'Launchd service', + command: `launchctl load ~/Library/LaunchAgents/com.pulse.host-agent.plist`, + note: ( + + Create a plist pointing to{' '} + /usr/local/bin/pulse-host-agent --url {pulseUrl()} --token {TOKEN_PLACEHOLDER} --interval 30s to run at login. + + ), + }, + ], + }, + windows: { + title: 'Install on Windows', + description: + 'Native Windows builds are coming soon. In the interim you can run the Linux binary under WSL or compile from source using an API token scoped for host agent reporting.', + snippets: [ + { + label: 'Compile from source (PowerShell)', + command: [ + 'git clone https://github.com/rcourtman/Pulse.git', + 'cd Pulse', + 'go build -o pulse-host-agent.exe ./cmd/pulse-host-agent', + `./pulse-host-agent.exe --url ${pulseUrl()} --token ${TOKEN_PLACEHOLDER} --interval 30s`, + ].join(' && '), + note: ( + + Consider registering the executable as a Windows Service via sc.exe or NSSM once native artefacts ship. + + ), + }, + ], + }, +}; + +const platformFilters: Record = { + all: null, + linux: ['linux'], + macos: ['macos'], + windows: ['windows'], +}; + +export const HostAgents: Component = (props) => { + const variant: HostAgentVariant = props.variant ?? 'all'; + const { state } = useWebSocket(); + const [apiToken, setApiToken] = createSignal(''); + const [isGeneratingToken, setIsGeneratingToken] = createSignal(false); + const [tokenAccessDenied, setTokenAccessDenied] = createSignal(false); + + const hosts = createMemo(() => { + const list = state.hosts ?? []; + const filters = platformFilters[variant]; + const filtered = filters ? list.filter((host) => filters.includes((host.platform ?? '').toLowerCase())) : list; + return [...filtered].sort((a, b) => (a.hostname || '').localeCompare(b.hostname || '')); + }); + + const renderTags = (host: Host) => { + const tags = host.tags ?? []; + if (!tags.length) return '—'; + return tags.join(', '); + }; + + const installMeta = commandsByVariant[variant]; + + onMount(() => { + if (typeof window === 'undefined') return; + try { + const stored = window.localStorage.getItem('apiToken'); + if (stored) { + setApiToken(stored); + } + } catch (err) { + console.warn('Unable to read API token from localStorage', err); + } + }); + + createEffect(() => { + if (typeof window === 'undefined') return; + const token = apiToken(); + try { + if (token) { + window.localStorage.setItem('apiToken', token); + } else { + window.localStorage.removeItem('apiToken'); + } + } catch (err) { + console.warn('Unable to persist API token in localStorage', err); + } + }); + + const generateToken = async () => { + if (isGeneratingToken()) return; + if (tokenAccessDenied()) { + notificationStore.error('Administrator access required to generate host agent tokens.', 6000); + return; + } + + setIsGeneratingToken(true); + try { + const defaultName = `Host agent ${new Date().toISOString().slice(0, 10)}`; + const { token, record } = await SecurityAPI.createToken(defaultName, [HOST_AGENT_SCOPE]); + setApiToken(token); + showTokenReveal({ + token, + record, + source: 'host-agent', + note: 'Copy this token into the host agent install command or store it securely for automation.', + }); + notificationStore.success('Created host agent API token with reporting scope.', 6000); + } catch (err) { + console.error('Failed to create host agent token', err); + if (err instanceof Error && /authentication required|forbidden/i.test(err.message)) { + setTokenAccessDenied(true); + notificationStore.error('Sign in with an administrator account to generate tokens here.', 6000); + } else { + notificationStore.error('Failed to generate API token', 6000); + } + } finally { + setIsGeneratingToken(false); + } + }; + + const cardTitle = () => { + switch (variant) { + case 'linux': + return 'Linux servers'; + case 'macos': + return 'macOS devices'; + case 'windows': + return 'Windows servers'; + default: + return 'Host agents'; + } + }; + + const cardDescription = () => { + switch (variant) { + case 'linux': + return 'Install the Pulse host agent on Debian, Ubuntu, RHEL, Arch, or other Linux hosts to surface uptime and capacity metrics.'; + case 'macos': + return 'Deploy the lightweight host agent via launchd to keep macOS hardware in view alongside your Proxmox estate.'; + case 'windows': + return 'Track Windows Server hosts through a native Pulse agent. A first-party build is on the roadmap—compile from source today or watch this space.'; + default: + return 'Install the Pulse host agent on Linux, macOS, or Windows servers to surface uptime, OS metadata, and capacity metrics.'; + } + }; + + return ( +
+ + + +
+

API token

+
+ Manage tokens via Settings → Security +
+
+
+ setApiToken(e.currentTarget.value.trim())} + placeholder="Paste API token (leave blank to keep placeholder)" + class="flex-1 rounded-md border border-gray-300 dark:border-gray-600 bg-white dark:bg-gray-800 px-3 py-2 text-sm text-gray-900 dark:text-gray-100 focus:outline-none focus:ring-2 focus:ring-blue-500" + /> + + + + Token will be embedded in the commands below. + + +
+

+ Tokens generated here automatically include the host agent reporting scope (host-agent:report). +

+ +

{installMeta.title}

+

{installMeta.description}

+ +
+ + {(snippet) => ( +
+
+

{snippet.label}

+ + Copy command + +
+
+                  
+                    {snippet.command.replace(
+                      TOKEN_PLACEHOLDER,
+                      apiToken() || TOKEN_PLACEHOLDER,
+                    )}
+                  
+                
+ +

{snippet.note}

+
+
+ )} +
+
+
+ + +
+

Reporting hosts

+ {hosts().length} connected +
+ + 0} + fallback={ +

+ {variant === 'windows' + ? 'No Windows hosts have reported yet. Compile the agent from source or check back when native artefacts are published.' + : 'No host agents are reporting yet. Deploy the binary using the commands above to see hosts listed here.'} +

+ } + > +
+ + + + + + + + + + + + + + {(host) => ( + + + + + + + + + )} + + +
HostnamePlatformUptimeMemoryLast seenTags
+ {host.displayName || host.hostname || host.id} + + {host.platform || '—'} + + {host.uptimeSeconds ? formatUptime(host.uptimeSeconds) : '—'} + + {host.memory?.total + ? `${formatBytes(host.memory.used ?? 0)} / ${formatBytes(host.memory.total)}` + : '—'} + + {host.lastSeen ? formatRelativeTime(host.lastSeen) : '—'} + {renderTags(host)}
+
+
+
+
+ ); +}; + +export default HostAgents; diff --git a/frontend-modern/src/components/Settings/Settings.tsx b/frontend-modern/src/components/Settings/Settings.tsx index 3fb231396..04779d8c5 100644 --- a/frontend-modern/src/components/Settings/Settings.tsx +++ b/frontend-modern/src/components/Settings/Settings.tsx @@ -5,10 +5,11 @@ import { useWebSocket } from '@/App'; import { showSuccess, showError } from '@/utils/toast'; import { copyToClipboard } from '@/utils/clipboard'; import { NodeModal } from './NodeModal'; -import { APITokenManager } from './APITokenManager'; import { ChangePasswordModal } from './ChangePasswordModal'; import { GuestURLs } from './GuestURLs'; import { DockerAgents } from './DockerAgents'; +import { HostAgents } from './HostAgents'; +import APITokenManager from './APITokenManager'; import { OIDCPanel } from './OIDCPanel'; import { QuickSecuritySetup } from './QuickSecuritySetup'; import { SecurityPostureSummary } from './SecurityPostureSummary'; @@ -23,12 +24,17 @@ import { formField, labelClass, controlClass, formHelpText } from '@/components/ import Server from 'lucide-solid/icons/server'; import HardDrive from 'lucide-solid/icons/hard-drive'; import Mail from 'lucide-solid/icons/mail'; -import Link from 'lucide-solid/icons/link'; import Container from 'lucide-solid/icons/container'; import SettingsIcon from 'lucide-solid/icons/settings'; import Shield from 'lucide-solid/icons/shield'; import Activity from 'lucide-solid/icons/activity'; import Loader from 'lucide-solid/icons/loader'; +import Boxes from 'lucide-solid/icons/boxes'; +import Network from 'lucide-solid/icons/network'; +import Terminal from 'lucide-solid/icons/terminal'; +import Monitor from 'lucide-solid/icons/monitor'; +import Laptop from 'lucide-solid/icons/laptop'; +import { ApiIcon } from '@/components/icons/ApiIcon'; import type { NodeConfig } from '@/types/nodes'; import type { UpdateInfo, VersionInfo } from '@/api/updates'; import type { APITokenRecord } from '@/api/security'; @@ -222,8 +228,13 @@ type SettingsTab = | 'pbs' | 'pmg' | 'docker' + | 'podman' + | 'kubernetes' + | 'linuxServers' + | 'windowsServers' + | 'macServers' | 'system' - | 'urls' + | 'api' | 'security' | 'diagnostics' | 'updates'; @@ -245,13 +256,33 @@ const SETTINGS_HEADER_META: Record = (props) => { if (path.includes('/settings/pbs')) return 'pbs'; if (path.includes('/settings/pmg')) return 'pmg'; if (path.includes('/settings/docker')) return 'docker'; + if (path.includes('/settings/podman')) return 'podman'; + if (path.includes('/settings/kubernetes')) return 'kubernetes'; + if (path.includes('/settings/linuxServers')) return 'linuxServers'; + if (path.includes('/settings/windowsServers')) return 'windowsServers'; + if (path.includes('/settings/macServers')) return 'macServers'; if (path.includes('/settings/system')) return 'system'; + if (path.includes('/settings/api')) return 'api'; if (path.includes('/settings/security')) return 'security'; if (path.includes('/settings/diagnostics')) return 'diagnostics'; - if (path.includes('/settings/urls')) return 'urls'; if (path.includes('/settings/updates')) return 'updates'; return 'pve'; }; @@ -312,12 +348,13 @@ const Settings: Component = (props) => { const activeTab = () => currentTab(); const setActiveTab = (tab: SettingsTab) => { - if (currentTab() !== tab) { - setCurrentTab(tab); - } const targetPath = `/settings/${tab}`; if (location.pathname !== targetPath) { navigate(targetPath); + return; + } + if (currentTab() !== tab) { + setCurrentTab(tab); } }; @@ -693,30 +730,31 @@ const Settings: Component = (props) => { }; const tabGroups: { - id: 'proxmox' | 'docker' | 'administration'; + id: 'platforms' | 'administration'; label: string; - items: { id: SettingsTab; label: string; icon: JSX.Element }[]; + items: { id: SettingsTab; label: string; icon: JSX.Element; disabled?: boolean }[]; }[] = [ { - id: 'proxmox', - label: 'Proxmox', + id: 'platforms', + label: 'Platforms', items: [ { id: 'pve', label: 'Proxmox VE nodes', icon: }, { id: 'pbs', label: 'Proxmox Backup Server', icon: }, { id: 'pmg', label: 'Proxmox Mail Gateway', icon: }, - { id: 'urls', label: 'Guest URLs', icon: }, + { id: 'docker', label: 'Docker hosts', icon: }, + { id: 'podman', label: 'Podman hosts', icon: , disabled: true }, + { id: 'kubernetes', label: 'Kubernetes', icon: , disabled: true }, + { id: 'linuxServers', label: 'Linux servers', icon: }, + { id: 'windowsServers', label: 'Windows servers', icon: }, + { id: 'macServers', label: 'macOS devices', icon: }, ], }, - { - id: 'docker', - label: 'Docker', - items: [{ id: 'docker', label: 'Docker hosts', icon: }], - }, { id: 'administration', label: 'Administration', items: [ { id: 'system', label: 'System', icon: }, + { id: 'api', label: 'API access', icon: }, { id: 'security', label: 'Security', icon: }, { id: 'diagnostics', label: 'Diagnostics', icon: }, ], @@ -1802,7 +1840,7 @@ const Settings: Component = (props) => {
- +
setSidebarCollapsed(false)} @@ -1822,16 +1860,26 @@ const Settings: Component = (props) => {
- {(item) => ( + {(item) => { + const isActive = () => activeTab() === item.id; + return ( - )} + ); + }}
@@ -1849,7 +1898,7 @@ const Settings: Component = (props) => {
-
+
0}>
@@ -1858,26 +1907,36 @@ const Settings: Component = (props) => { style="-webkit-overflow-scrolling: touch;" > - {(tab) => ( - - )} + {(tab) => { + const isActive = activeTab() === tab.id; + const disabled = tab.disabled; + return ( + + ); + }}
-
+
{/* PVE Nodes Tab */}
@@ -2394,6 +2453,13 @@ const Settings: Component = (props) => {
+ +
+ +
@@ -3214,6 +3280,37 @@ const Settings: Component = (props) => { + {/* Podman Tab */} + + + + + {/* Kubernetes Tab */} + + + + + {/* Linux Host Agents */} + + + + + {/* Windows Host Agents */} + + + + + {/* macOS Host Agents */} + + + + {/* System Settings Tab */}
@@ -4273,6 +4370,36 @@ const Settings: Component = (props) => {
+ {/* API Access */} + +
+ + + +

+ Generate scoped tokens for Docker agents, host agents, and automation pipelines. + Tokens are shown once—store them securely and rotate when infrastructure changes. +

+ + View scope reference + +
+ + { + void loadSecurityStatus(); + }} + refreshing={securityStatusLoading()} + /> +
+
+ {/* Security Tab */}
@@ -4610,14 +4737,18 @@ const Settings: Component = (props) => {
{/* Content */} -
- { - void loadSecurityStatus(); - }} - refreshing={securityStatusLoading()} - /> +
+

+ API tokens now live under the dedicated API workspace. Generate new scoped tokens, + review existing access, and rotate credentials from the API menu. +

+
@@ -6189,13 +6320,6 @@ const Settings: Component = (props) => {
- {/* Guest URLs Tab */} - - -
@@ -6737,4 +6861,22 @@ const Settings: Component = (props) => { ); }; +const PlatformComingSoon: Component<{ name: string; description?: string }> = (props) => { + const description = + props.description ?? + `Support for ${props.name} is on the roadmap. Track progress on GitHub or join our community discussions to weigh in on requirements.`; + + return ( +
+ + +

+ We’re collecting feedback and prioritising the engineering work for this platform. If you’d like to influence + the roadmap or volunteer for early testing, please open a discussion on GitHub or reach out via Discord. +

+
+
+ ); +}; + export default Settings; diff --git a/frontend-modern/src/components/icons/ApiIcon.tsx b/frontend-modern/src/components/icons/ApiIcon.tsx new file mode 100644 index 000000000..87672abf8 --- /dev/null +++ b/frontend-modern/src/components/icons/ApiIcon.tsx @@ -0,0 +1,27 @@ +import type { Component } from 'solid-js'; + +interface ApiIconProps { + class?: string; +} + +export const ApiIcon: Component = (props) => ( + +); + +export default ApiIcon; diff --git a/frontend-modern/src/components/icons/ServersIcon.tsx b/frontend-modern/src/components/icons/ServersIcon.tsx new file mode 100644 index 000000000..84481291b --- /dev/null +++ b/frontend-modern/src/components/icons/ServersIcon.tsx @@ -0,0 +1,21 @@ +import type { Component } from 'solid-js'; + +interface ServersIconProps { + class?: string; +} + +export const ServersIcon: Component = (props) => ( + +); diff --git a/frontend-modern/src/constants/apiScopes.ts b/frontend-modern/src/constants/apiScopes.ts new file mode 100644 index 000000000..d4f9639a9 --- /dev/null +++ b/frontend-modern/src/constants/apiScopes.ts @@ -0,0 +1,64 @@ +export interface APIScopeOption { + value: string; + label: string; + description?: string; + group: 'Monitoring' | 'Agents' | 'Settings'; +} + +export const HOST_AGENT_SCOPE = 'host-agent:report'; +export const DOCKER_REPORT_SCOPE = 'docker:report'; +export const DOCKER_MANAGE_SCOPE = 'docker:manage'; +export const MONITORING_READ_SCOPE = 'monitoring:read'; +export const MONITORING_WRITE_SCOPE = 'monitoring:write'; +export const SETTINGS_READ_SCOPE = 'settings:read'; +export const SETTINGS_WRITE_SCOPE = 'settings:write'; + +export const API_SCOPE_OPTIONS: APIScopeOption[] = [ + { + value: MONITORING_READ_SCOPE, + label: 'Dashboards & alerts (read)', + description: 'View monitoring data, dashboards, and alert history.', + group: 'Monitoring', + }, + { + value: MONITORING_WRITE_SCOPE, + label: 'Alert actions (write)', + description: 'Acknowledge, silence, and clear alerts.', + group: 'Monitoring', + }, + { + value: DOCKER_REPORT_SCOPE, + label: 'Docker agent reporting', + description: 'Allow the Docker agent to submit host and container telemetry.', + group: 'Agents', + }, + { + value: DOCKER_MANAGE_SCOPE, + label: 'Docker lifecycle management', + description: 'Enable agent-triggered container commands and host actions.', + group: 'Agents', + }, + { + value: HOST_AGENT_SCOPE, + label: 'Host agent reporting', + description: 'Allow the host agent to send OS, CPU, and disk metrics.', + group: 'Agents', + }, + { + value: SETTINGS_READ_SCOPE, + label: 'Settings (read)', + description: 'Fetch configuration snapshots such as nodes and security posture.', + group: 'Settings', + }, + { + value: SETTINGS_WRITE_SCOPE, + label: 'Settings (write)', + description: 'Modify configuration, manage tokens, and trigger updates.', + group: 'Settings', + }, +]; + +export const API_SCOPE_LABELS = API_SCOPE_OPTIONS.reduce>((acc, option) => { + acc[option.value] = option.label; + return acc; +}, {}); diff --git a/frontend-modern/src/types/api.ts b/frontend-modern/src/types/api.ts index 63c10030b..53b261250 100644 --- a/frontend-modern/src/types/api.ts +++ b/frontend-modern/src/types/api.ts @@ -194,6 +194,49 @@ export interface DockerContainerNetwork { ipv6?: string; } +export interface Host { + id: string; + hostname: string; + displayName: string; + platform?: string; + osName?: string; + osVersion?: string; + kernelVersion?: string; + architecture?: string; + cpuCount?: number; + cpuUsage?: number; + loadAverage?: number[]; + memory: Memory; + disks?: Disk[]; + networkInterfaces?: HostNetworkInterface[]; + sensors?: HostSensorSummary; + status: string; + uptimeSeconds?: number; + lastSeen: number; + intervalSeconds?: number; + agentVersion?: string; + tokenId?: string; + tokenName?: string; + tokenHint?: string; + tokenLastUsedAt?: number; + tags?: string[]; +} + +export interface HostNetworkInterface { + name: string; + mac?: string; + addresses?: string[]; + rxBytes?: number; + txBytes?: number; + speedMbps?: number; +} + +export interface HostSensorSummary { + temperatureCelsius?: Record; + fanRpm?: Record; + additional?: Record; +} + export interface ReplicationJob { id: string; instance: string; diff --git a/go.mod b/go.mod index 9884e7d75..39500d2f7 100644 --- a/go.mod +++ b/go.mod @@ -12,11 +12,14 @@ require ( github.com/gorilla/websocket v1.5.3 github.com/joho/godotenv v1.5.1 github.com/oklog/ulid/v2 v2.1.1 + github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.23.2 github.com/rs/zerolog v1.34.0 + github.com/shirou/gopsutil/v3 v3.24.5 github.com/spf13/cobra v1.9.1 golang.org/x/crypto v0.42.0 golang.org/x/oauth2 v0.31.0 + golang.org/x/sys v0.36.0 golang.org/x/term v0.35.0 golang.org/x/time v0.13.0 gopkg.in/yaml.v3 v3.0.1 @@ -36,7 +39,9 @@ require ( github.com/go-jose/go-jose/v4 v4.0.5 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect @@ -46,11 +51,15 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect - github.com/pkg/errors v0.9.1 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.66.1 // indirect github.com/prometheus/procfs v0.16.1 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/spf13/pflag v1.0.7 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect go.opentelemetry.io/otel v1.38.0 // indirect @@ -58,7 +67,6 @@ require ( go.opentelemetry.io/otel/metric v1.38.0 // indirect go.opentelemetry.io/otel/trace v1.38.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect - golang.org/x/sys v0.36.0 // indirect google.golang.org/protobuf v1.36.8 // indirect gotest.tools/v3 v3.5.2 // indirect ) diff --git a/go.sum b/go.sum index 4312ec428..5c02639e8 100644 --- a/go.sum +++ b/go.sum @@ -39,7 +39,10 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -60,6 +63,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= @@ -90,6 +95,8 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= @@ -104,6 +111,12 @@ github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= +github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -115,6 +128,12 @@ github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= @@ -145,10 +164,14 @@ golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/oauth2 v0.31.0 h1:8Fq0yVZLh4j4YA47vHKFTa9Ew5XIrCP8LC6UeNZnLxo= golang.org/x/oauth2 v0.31.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= @@ -158,6 +181,7 @@ golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI= golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= diff --git a/internal/api/alerts.go b/internal/api/alerts.go index 5adf4dfb2..a7dda1589 100644 --- a/internal/api/alerts.go +++ b/internal/api/alerts.go @@ -9,6 +9,7 @@ import ( "time" "github.com/rcourtman/pulse-go-rewrite/internal/alerts" + "github.com/rcourtman/pulse-go-rewrite/internal/config" "github.com/rcourtman/pulse-go-rewrite/internal/mock" "github.com/rcourtman/pulse-go-rewrite/internal/models" "github.com/rcourtman/pulse-go-rewrite/internal/monitoring" @@ -681,26 +682,59 @@ func (h *AlertHandlers) HandleAlerts(w http.ResponseWriter, r *http.Request) { switch { case path == "config" && r.Method == http.MethodGet: + if !ensureScope(w, r, config.ScopeMonitoringRead) { + return + } h.GetAlertConfig(w, r) case path == "config" && r.Method == http.MethodPut: + if !ensureScope(w, r, config.ScopeMonitoringWrite) { + return + } h.UpdateAlertConfig(w, r) case path == "activate" && r.Method == http.MethodPost: + if !ensureScope(w, r, config.ScopeMonitoringWrite) { + return + } h.ActivateAlerts(w, r) case path == "active" && r.Method == http.MethodGet: + if !ensureScope(w, r, config.ScopeMonitoringRead) { + return + } h.GetActiveAlerts(w, r) case path == "history" && r.Method == http.MethodGet: + if !ensureScope(w, r, config.ScopeMonitoringRead) { + return + } h.GetAlertHistory(w, r) case path == "history" && r.Method == http.MethodDelete: + if !ensureScope(w, r, config.ScopeMonitoringWrite) { + return + } h.ClearAlertHistory(w, r) case path == "bulk/acknowledge" && r.Method == http.MethodPost: + if !ensureScope(w, r, config.ScopeMonitoringWrite) { + return + } h.BulkAcknowledgeAlerts(w, r) case path == "bulk/clear" && r.Method == http.MethodPost: + if !ensureScope(w, r, config.ScopeMonitoringWrite) { + return + } h.BulkClearAlerts(w, r) case strings.HasSuffix(path, "/acknowledge") && r.Method == http.MethodPost: + if !ensureScope(w, r, config.ScopeMonitoringWrite) { + return + } h.AcknowledgeAlert(w, r) case strings.HasSuffix(path, "/unacknowledge") && r.Method == http.MethodPost: + if !ensureScope(w, r, config.ScopeMonitoringWrite) { + return + } h.UnacknowledgeAlert(w, r) case strings.HasSuffix(path, "/clear") && r.Method == http.MethodPost: + if !ensureScope(w, r, config.ScopeMonitoringWrite) { + return + } h.ClearAlert(w, r) default: http.Error(w, "Not found", http.StatusNotFound) diff --git a/internal/api/auth.go b/internal/api/auth.go index 4dabc7d16..0192e4bd6 100644 --- a/internal/api/auth.go +++ b/internal/api/auth.go @@ -5,6 +5,7 @@ import ( cryptorand "crypto/rand" "encoding/base64" "encoding/hex" + "encoding/json" "fmt" "net/http" "os" @@ -618,6 +619,63 @@ func RequireAdmin(cfg *config.Config, handler http.HandlerFunc) http.HandlerFunc } } +// RequireScope ensures that token-authenticated requests include the specified scope. +// Session-based (browser) requests bypass the scope check. +func RequireScope(scope string, handler http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if scope == "" { + handler(w, r) + return + } + + record := getAPITokenRecordFromRequest(r) + if record == nil { + // Session-authenticated request + handler(w, r) + return + } + + if record.HasScope(scope) { + handler(w, r) + return + } + + log.Warn(). + Str("token_id", record.ID). + Str("required_scope", scope). + Msg("API token missing required scope") + + respondMissingScope(w, scope) + } +} + +func respondMissingScope(w http.ResponseWriter, scope string) { + if w == nil { + return + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusForbidden) + _ = json.NewEncoder(w).Encode(map[string]any{ + "error": "missing_scope", + "requiredScope": scope, + }) +} + +// ensureScope enforces that the request either originates from a session or a token +// possessing the specified scope. Returns true when access should continue. +func ensureScope(w http.ResponseWriter, r *http.Request, scope string) bool { + if scope == "" { + return true + } + record := getAPITokenRecordFromRequest(r) + if record == nil || record.HasScope(scope) { + return true + } + respondMissingScope(w, scope) + return false +} + func attachAPITokenRecord(r *http.Request, record *config.APITokenRecord) { if record == nil { return diff --git a/internal/api/auth_scope_test.go b/internal/api/auth_scope_test.go new file mode 100644 index 000000000..cc33be263 --- /dev/null +++ b/internal/api/auth_scope_test.go @@ -0,0 +1,58 @@ +package api + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/rcourtman/pulse-go-rewrite/internal/config" +) + +func TestRequireScopeAllowsSession(t *testing.T) { + handler := RequireScope(config.ScopeSettingsWrite, func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + req := httptest.NewRequest(http.MethodGet, "/", nil) + rr := httptest.NewRecorder() + + handler(rr, req) + + if rr.Code != http.StatusOK { + t.Fatalf("expected status 200 for session request, got %d", rr.Code) + } +} + +func TestRequireScopeRejectsMissingScope(t *testing.T) { + handler := RequireScope(config.ScopeSettingsWrite, func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + req := httptest.NewRequest(http.MethodGet, "/", nil) + record := config.APITokenRecord{ID: "token-1", Scopes: []string{config.ScopeMonitoringRead}} + attachAPITokenRecord(req, &record) + + rr := httptest.NewRecorder() + handler(rr, req) + + if rr.Code != http.StatusForbidden { + t.Fatalf("expected status 403 when scope missing, got %d", rr.Code) + } +} + +func TestRequireScopeAllowsMatchingScope(t *testing.T) { + handler := RequireScope(config.ScopeDockerReport, func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusAccepted) + }) + + req := httptest.NewRequest(http.MethodGet, "/", nil) + record := config.APITokenRecord{ID: "token-2", Scopes: []string{config.ScopeDockerReport}} + attachAPITokenRecord(req, &record) + + rr := httptest.NewRecorder() + handler(rr, req) + + if rr.Code != http.StatusAccepted { + t.Fatalf("expected status 202 when scope present, got %d", rr.Code) + } +} diff --git a/internal/api/host_agents.go b/internal/api/host_agents.go new file mode 100644 index 000000000..dca5e6296 --- /dev/null +++ b/internal/api/host_agents.go @@ -0,0 +1,78 @@ +package api + +import ( + "encoding/json" + "net/http" + "time" + + "github.com/rcourtman/pulse-go-rewrite/internal/monitoring" + "github.com/rcourtman/pulse-go-rewrite/internal/utils" + "github.com/rcourtman/pulse-go-rewrite/internal/websocket" + agentshost "github.com/rcourtman/pulse-go-rewrite/pkg/agents/host" + "github.com/rs/zerolog/log" +) + +// HostAgentHandlers manages ingest from the pulse-host-agent. +type HostAgentHandlers struct { + monitor *monitoring.Monitor + wsHub *websocket.Hub +} + +// NewHostAgentHandlers constructs a new handler set for host agents. +func NewHostAgentHandlers(m *monitoring.Monitor, hub *websocket.Hub) *HostAgentHandlers { + return &HostAgentHandlers{monitor: m, wsHub: hub} +} + +// SetMonitor updates the monitor reference for host agent handlers. +func (h *HostAgentHandlers) SetMonitor(m *monitoring.Monitor) { + h.monitor = m +} + +// HandleReport ingests host agent reports. +func (h *HostAgentHandlers) HandleReport(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + writeErrorResponse(w, http.StatusMethodNotAllowed, "method_not_allowed", "Only POST is allowed", nil) + return + } + + defer r.Body.Close() + + var report agentshost.Report + if err := json.NewDecoder(r.Body).Decode(&report); err != nil { + writeErrorResponse(w, http.StatusBadRequest, "invalid_json", "Failed to decode request body", map[string]string{"error": err.Error()}) + return + } + + if report.Timestamp.IsZero() { + report.Timestamp = time.Now().UTC() + } + + tokenRecord := getAPITokenRecordFromRequest(r) + + host, err := h.monitor.ApplyHostReport(report, tokenRecord) + if err != nil { + writeErrorResponse(w, http.StatusBadRequest, "invalid_report", err.Error(), nil) + return + } + + log.Debug(). + Str("hostId", host.ID). + Str("hostname", host.Hostname). + Str("platform", host.Platform). + Msg("Host agent report processed") + + go h.wsHub.BroadcastState(h.monitor.GetState().ToFrontend()) + + resp := map[string]any{ + "success": true, + "hostId": host.ID, + "lastSeen": host.LastSeen, + "platform": host.Platform, + "osName": host.OSName, + "osVersion": host.OSVersion, + } + + if err := utils.WriteJSONResponse(w, resp); err != nil { + log.Error().Err(err).Msg("Failed to serialize host agent response") + } +} diff --git a/internal/api/router.go b/internal/api/router.go index 506e93b2c..8ce912acb 100644 --- a/internal/api/router.go +++ b/internal/api/router.go @@ -39,6 +39,7 @@ type Router struct { configHandlers *ConfigHandlers notificationHandlers *NotificationHandlers dockerAgentHandlers *DockerAgentHandlers + hostAgentHandlers *HostAgentHandlers systemSettingsHandler *SystemSettingsHandler wsHub *websocket.Hub reloadFunc func() error @@ -120,21 +121,23 @@ func (r *Router) setupRoutes() { r.configHandlers = NewConfigHandlers(r.config, r.monitor, r.reloadFunc, r.wsHub, guestMetadataHandler, r.reloadSystemSettings) updateHandlers := NewUpdateHandlers(r.updateManager, r.config.DataPath) r.dockerAgentHandlers = NewDockerAgentHandlers(r.monitor, r.wsHub) + r.hostAgentHandlers = NewHostAgentHandlers(r.monitor, r.wsHub) // API routes r.mux.HandleFunc("/api/health", r.handleHealth) r.mux.HandleFunc("/api/monitoring/scheduler/health", RequireAuth(r.config, r.handleSchedulerHealth)) r.mux.HandleFunc("/api/state", r.handleState) - r.mux.HandleFunc("/api/agents/docker/report", RequireAuth(r.config, r.dockerAgentHandlers.HandleReport)) - r.mux.HandleFunc("/api/agents/docker/commands/", RequireAuth(r.config, r.dockerAgentHandlers.HandleCommandAck)) - r.mux.HandleFunc("/api/agents/docker/hosts/", RequireAdmin(r.config, r.dockerAgentHandlers.HandleDockerHostActions)) + r.mux.HandleFunc("/api/agents/docker/report", RequireAuth(r.config, RequireScope(config.ScopeDockerReport, r.dockerAgentHandlers.HandleReport))) + r.mux.HandleFunc("/api/agents/host/report", RequireAuth(r.config, RequireScope(config.ScopeHostReport, r.hostAgentHandlers.HandleReport))) + r.mux.HandleFunc("/api/agents/docker/commands/", RequireAuth(r.config, RequireScope(config.ScopeDockerManage, r.dockerAgentHandlers.HandleCommandAck))) + r.mux.HandleFunc("/api/agents/docker/hosts/", RequireAdmin(r.config, RequireScope(config.ScopeDockerManage, r.dockerAgentHandlers.HandleDockerHostActions))) r.mux.HandleFunc("/api/version", r.handleVersion) r.mux.HandleFunc("/api/storage/", r.handleStorage) r.mux.HandleFunc("/api/storage-charts", r.handleStorageCharts) r.mux.HandleFunc("/api/charts", r.handleCharts) r.mux.HandleFunc("/api/diagnostics", RequireAuth(r.config, r.handleDiagnostics)) - r.mux.HandleFunc("/api/diagnostics/temperature-proxy/register-nodes", RequireAdmin(r.config, r.handleDiagnosticsRegisterProxyNodes)) - r.mux.HandleFunc("/api/diagnostics/docker/prepare-token", RequireAdmin(r.config, r.handleDiagnosticsDockerPrepareToken)) + r.mux.HandleFunc("/api/diagnostics/temperature-proxy/register-nodes", RequireAdmin(r.config, RequireScope(config.ScopeSettingsWrite, r.handleDiagnosticsRegisterProxyNodes))) + r.mux.HandleFunc("/api/diagnostics/docker/prepare-token", RequireAdmin(r.config, RequireScope(config.ScopeSettingsWrite, r.handleDiagnosticsDockerPrepareToken))) r.mux.HandleFunc("/api/install/pulse-sensor-proxy", r.handleDownloadPulseSensorProxy) r.mux.HandleFunc("/api/install/install-sensor-proxy.sh", r.handleDownloadInstallerScript) r.mux.HandleFunc("/api/install/install-docker.sh", r.handleDownloadDockerInstallerScript) @@ -173,9 +176,9 @@ func (r *Router) setupRoutes() { r.mux.HandleFunc("/api/config/nodes", func(w http.ResponseWriter, req *http.Request) { switch req.Method { case http.MethodGet: - r.configHandlers.HandleGetNodes(w, req) + RequireAdmin(r.configHandlers.config, RequireScope(config.ScopeSettingsRead, r.configHandlers.HandleGetNodes))(w, req) case http.MethodPost: - RequireAdmin(r.configHandlers.config, r.configHandlers.HandleAddNode)(w, req) + RequireAdmin(r.configHandlers.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleAddNode))(w, req) default: http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) } @@ -184,7 +187,7 @@ func (r *Router) setupRoutes() { // Test node configuration endpoint (for new nodes) r.mux.HandleFunc("/api/config/nodes/test-config", func(w http.ResponseWriter, req *http.Request) { if req.Method == http.MethodPost { - r.configHandlers.HandleTestNodeConfig(w, req) + RequireAdmin(r.configHandlers.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleTestNodeConfig))(w, req) } else { http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) } @@ -193,7 +196,7 @@ func (r *Router) setupRoutes() { // Test connection endpoint r.mux.HandleFunc("/api/config/nodes/test-connection", func(w http.ResponseWriter, req *http.Request) { if req.Method == http.MethodPost { - r.configHandlers.HandleTestConnection(w, req) + RequireAdmin(r.configHandlers.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleTestConnection))(w, req) } else { http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) } @@ -201,13 +204,13 @@ func (r *Router) setupRoutes() { r.mux.HandleFunc("/api/config/nodes/", func(w http.ResponseWriter, req *http.Request) { switch req.Method { case http.MethodPut: - RequireAdmin(r.configHandlers.config, r.configHandlers.HandleUpdateNode)(w, req) + RequireAdmin(r.configHandlers.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleUpdateNode))(w, req) case http.MethodDelete: - RequireAdmin(r.configHandlers.config, r.configHandlers.HandleDeleteNode)(w, req) + RequireAdmin(r.configHandlers.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleDeleteNode))(w, req) case http.MethodPost: // Handle test endpoint if strings.HasSuffix(req.URL.Path, "/test") { - r.configHandlers.HandleTestNode(w, req) + RequireAdmin(r.configHandlers.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleTestNode))(w, req) } else { http.Error(w, "Not found", http.StatusNotFound) } @@ -220,10 +223,10 @@ func (r *Router) setupRoutes() { r.mux.HandleFunc("/api/config/system", func(w http.ResponseWriter, req *http.Request) { switch req.Method { case http.MethodGet: - r.configHandlers.HandleGetSystemSettings(w, req) + RequireAdmin(r.configHandlers.config, RequireScope(config.ScopeSettingsRead, r.configHandlers.HandleGetSystemSettings))(w, req) case http.MethodPut: // DEPRECATED - use /api/system/settings/update instead - RequireAdmin(r.configHandlers.config, r.configHandlers.HandleUpdateSystemSettingsOLD)(w, req) + RequireAdmin(r.configHandlers.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleUpdateSystemSettingsOLD))(w, req) default: http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) } @@ -233,9 +236,9 @@ func (r *Router) setupRoutes() { r.mux.HandleFunc("/api/system/mock-mode", func(w http.ResponseWriter, req *http.Request) { switch req.Method { case http.MethodGet: - r.configHandlers.HandleGetMockMode(w, req) + RequireAdmin(r.configHandlers.config, RequireScope(config.ScopeSettingsRead, r.configHandlers.HandleGetMockMode))(w, req) case http.MethodPost, http.MethodPut: - RequireAdmin(r.configHandlers.config, r.configHandlers.HandleUpdateMockMode)(w, req) + RequireAdmin(r.configHandlers.config, RequireScope(config.ScopeSettingsWrite, r.configHandlers.HandleUpdateMockMode))(w, req) default: http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) } @@ -248,20 +251,31 @@ func (r *Router) setupRoutes() { r.mux.HandleFunc("/api/logout", r.handleLogout) r.mux.HandleFunc("/api/login", r.handleLogin) r.mux.HandleFunc("/api/security/reset-lockout", r.handleResetLockout) - r.mux.HandleFunc("/api/security/oidc", RequireAdmin(r.config, r.handleOIDCConfig)) + r.mux.HandleFunc("/api/security/oidc", RequireAdmin(r.config, RequireScope(config.ScopeSettingsWrite, r.handleOIDCConfig))) r.mux.HandleFunc("/api/oidc/login", r.handleOIDCLogin) r.mux.HandleFunc(config.DefaultOIDCCallbackPath, r.handleOIDCCallback) r.mux.HandleFunc("/api/security/tokens", RequireAdmin(r.config, func(w http.ResponseWriter, req *http.Request) { switch req.Method { case http.MethodGet: + if !ensureScope(w, req, config.ScopeSettingsRead) { + return + } r.handleListAPITokens(w, req) case http.MethodPost: + if !ensureScope(w, req, config.ScopeSettingsWrite) { + return + } r.handleCreateAPIToken(w, req) default: http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) } })) - r.mux.HandleFunc("/api/security/tokens/", RequireAdmin(r.config, r.handleDeleteAPIToken)) + r.mux.HandleFunc("/api/security/tokens/", RequireAdmin(r.config, func(w http.ResponseWriter, req *http.Request) { + if !ensureScope(w, req, config.ScopeSettingsWrite) { + return + } + r.handleDeleteAPIToken(w, req) + })) r.mux.HandleFunc("/api/security/status", func(w http.ResponseWriter, req *http.Request) { if req.Method == http.MethodGet { w.Header().Set("Content-Type", "application/json") @@ -847,13 +861,13 @@ func (r *Router) setupRoutes() { r.mux.HandleFunc("/api/notifications/", r.notificationHandlers.HandleNotifications) // Settings routes - r.mux.HandleFunc("/api/settings", getSettings) - r.mux.HandleFunc("/api/settings/update", updateSettings) + r.mux.HandleFunc("/api/settings", RequireAdmin(r.config, RequireScope(config.ScopeSettingsRead, getSettings))) + r.mux.HandleFunc("/api/settings/update", RequireAdmin(r.config, RequireScope(config.ScopeSettingsWrite, updateSettings))) // System settings and API token management r.systemSettingsHandler = NewSystemSettingsHandler(r.config, r.persistence, r.wsHub, r.monitor, r.reloadSystemSettings) - r.mux.HandleFunc("/api/system/settings", r.systemSettingsHandler.HandleGetSystemSettings) - r.mux.HandleFunc("/api/system/settings/update", r.systemSettingsHandler.HandleUpdateSystemSettings) + r.mux.HandleFunc("/api/system/settings", RequireAdmin(r.config, RequireScope(config.ScopeSettingsRead, r.systemSettingsHandler.HandleGetSystemSettings))) + r.mux.HandleFunc("/api/system/settings/update", RequireAdmin(r.config, RequireScope(config.ScopeSettingsWrite, r.systemSettingsHandler.HandleUpdateSystemSettings))) r.mux.HandleFunc("/api/system/ssh-config", r.handleSSHConfig) r.mux.HandleFunc("/api/system/verify-temperature-ssh", r.handleVerifyTemperatureSSH) r.mux.HandleFunc("/api/system/proxy-public-key", r.handleProxyPublicKey) @@ -1040,6 +1054,9 @@ func (r *Router) SetMonitor(m *monitoring.Monitor) { if r.dockerAgentHandlers != nil { r.dockerAgentHandlers.SetMonitor(m) } + if r.hostAgentHandlers != nil { + r.hostAgentHandlers.SetMonitor(m) + } if r.systemSettingsHandler != nil { r.systemSettingsHandler.SetMonitor(m) } @@ -2231,6 +2248,11 @@ func (r *Router) handleState(w http.ResponseWriter, req *http.Request) { return } + if record := getAPITokenRecordFromRequest(req); record != nil && !record.HasScope(config.ScopeMonitoringRead) { + respondMissingScope(w, config.ScopeMonitoringRead) + return + } + log.Debug().Msg("[DEBUG] handleState: Before GetState") state := r.monitor.GetState() log.Debug().Msg("[DEBUG] handleState: After GetState, before ToFrontend") @@ -3257,7 +3279,7 @@ func (r *Router) handleDiagnosticsDockerPrepareToken(w http.ResponseWriter, req return } - record, err := config.NewAPITokenRecord(rawToken, name) + record, err := config.NewAPITokenRecord(rawToken, name, []string{config.ScopeDockerReport}) if err != nil { log.Error().Err(err).Msg("Failed to construct token record for docker migration") writeErrorResponse(w, http.StatusInternalServerError, "token_generation_failed", "Failed to generate API token", nil) diff --git a/internal/api/router_integration_test.go b/internal/api/router_integration_test.go index b4edb30e9..14d90b345 100644 --- a/internal/api/router_integration_test.go +++ b/internal/api/router_integration_test.go @@ -338,7 +338,7 @@ func TestAuthenticatedEndpointsRequireToken(t *testing.T) { srv := newIntegrationServerWithConfig(t, func(cfg *config.Config) { cfg.DisableAuth = false cfg.APITokenEnabled = true - record, err := config.NewAPITokenRecord(apiToken, "Integration test token") + record, err := config.NewAPITokenRecord(apiToken, "Integration test token", nil) if err != nil { t.Fatalf("create API token record: %v", err) } diff --git a/internal/api/security_setup_fix.go b/internal/api/security_setup_fix.go index 9927272c7..0100dd1c9 100644 --- a/internal/api/security_setup_fix.go +++ b/internal/api/security_setup_fix.go @@ -129,7 +129,7 @@ func handleQuickSecuritySetupFixed(r *Router) http.HandlerFunc { // Store the raw API token for displaying to the user rawAPIToken := setupRequest.APIToken - tokenRecord, err := config.NewAPITokenRecord(rawAPIToken, "Primary token") + tokenRecord, err := config.NewAPITokenRecord(rawAPIToken, "Primary token", nil) if err != nil { log.Error().Err(err).Msg("Failed to construct API token record") http.Error(w, "Failed to process API token", http.StatusInternalServerError) @@ -156,14 +156,14 @@ func handleQuickSecuritySetupFixed(r *Router) http.HandlerFunc { } log.Info().Msg("Runtime config updated with new security settings - active immediately") - // Save system settings to system.json - systemSettings := config.DefaultSystemSettings() - systemSettings.ConnectionTimeout = 10 // Default seconds - systemSettings.AutoUpdateEnabled = false // Default disabled - if err := r.persistence.SaveSystemSettings(*systemSettings); err != nil { - log.Error().Err(err).Msg("Failed to save system settings") - // Continue anyway - not critical for auth setup - } + // Save system settings to system.json + systemSettings := config.DefaultSystemSettings() + systemSettings.ConnectionTimeout = 10 // Default seconds + systemSettings.AutoUpdateEnabled = false // Default disabled + if err := r.persistence.SaveSystemSettings(*systemSettings); err != nil { + log.Error().Err(err).Msg("Failed to save system settings") + // Continue anyway - not critical for auth setup + } // Detect environment isSystemd := os.Getenv("INVOCATION_ID") != "" @@ -428,7 +428,7 @@ func (r *Router) HandleRegenerateAPIToken(w http.ResponseWriter, rq *http.Reques return } - tokenRecord, err := config.NewAPITokenRecord(rawToken, "Regenerated token") + tokenRecord, err := config.NewAPITokenRecord(rawToken, "Regenerated token", nil) if err != nil { log.Error().Err(err).Msg("Failed to construct API token record") http.Error(w, "Failed to generate token", http.StatusInternalServerError) diff --git a/internal/api/security_tokens.go b/internal/api/security_tokens.go index 4b106a9e3..dfa0369c8 100644 --- a/internal/api/security_tokens.go +++ b/internal/api/security_tokens.go @@ -2,8 +2,10 @@ package api import ( "encoding/json" + "fmt" "io" "net/http" + "sort" "strings" "time" @@ -19,6 +21,7 @@ type apiTokenDTO struct { Suffix string `json:"suffix"` CreatedAt time.Time `json:"createdAt"` LastUsedAt *time.Time `json:"lastUsedAt,omitempty"` + Scopes []string `json:"scopes"` } func toAPITokenDTO(record config.APITokenRecord) apiTokenDTO { @@ -29,9 +32,58 @@ func toAPITokenDTO(record config.APITokenRecord) apiTokenDTO { Suffix: record.Suffix, CreatedAt: record.CreatedAt, LastUsedAt: record.LastUsedAt, + Scopes: append([]string{}, record.Scopes...), } } +func normalizeRequestedScopes(raw *[]string) ([]string, error) { + if raw == nil { + return []string{config.ScopeWildcard}, nil + } + + requested := *raw + if len(requested) == 0 { + return nil, fmt.Errorf("select at least one scope or omit the field for full access") + } + + seen := make(map[string]struct{}, len(requested)) + normalized := make([]string, 0, len(requested)) + hasWildcard := false + + for _, scope := range requested { + scope = strings.TrimSpace(scope) + if scope == "" { + return nil, fmt.Errorf("scope identifiers cannot be blank") + } + if scope == config.ScopeWildcard { + hasWildcard = true + continue + } + if !config.IsKnownScope(scope) { + return nil, fmt.Errorf("unknown scope %q", scope) + } + if _, exists := seen[scope]; exists { + continue + } + seen[scope] = struct{}{} + normalized = append(normalized, scope) + } + + if hasWildcard { + if len(normalized) > 0 { + return nil, fmt.Errorf("wildcard '*' cannot be combined with other scopes") + } + return []string{config.ScopeWildcard}, nil + } + + if len(normalized) == 0 { + return nil, fmt.Errorf("select at least one scope") + } + + sort.Strings(normalized) + return normalized, nil +} + // handleListAPITokens returns all configured API tokens (metadata only). func (r *Router) handleListAPITokens(w http.ResponseWriter, req *http.Request) { if req.Method != http.MethodGet { @@ -51,7 +103,8 @@ func (r *Router) handleListAPITokens(w http.ResponseWriter, req *http.Request) { } type createTokenRequest struct { - Name string `json:"name"` + Name string `json:"name"` + Scopes *[]string `json:"scopes"` } // handleCreateAPIToken generates and stores a new API token. @@ -73,6 +126,13 @@ func (r *Router) handleCreateAPIToken(w http.ResponseWriter, req *http.Request) name = "API token" } + scopes, err := normalizeRequestedScopes(payload.Scopes) + if err != nil { + log.Warn().Err(err).Msg("Invalid scopes provided for API token creation") + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + rawToken, err := internalauth.GenerateAPIToken() if err != nil { log.Error().Err(err).Msg("Failed to generate API token") @@ -80,7 +140,7 @@ func (r *Router) handleCreateAPIToken(w http.ResponseWriter, req *http.Request) return } - record, err := config.NewAPITokenRecord(rawToken, name) + record, err := config.NewAPITokenRecord(rawToken, name, scopes) if err != nil { log.Error().Err(err).Str("token_name", name).Msg("Failed to construct API token record") http.Error(w, "Failed to generate token", http.StatusInternalServerError) diff --git a/internal/api/security_tokens_test.go b/internal/api/security_tokens_test.go new file mode 100644 index 000000000..4f89c2234 --- /dev/null +++ b/internal/api/security_tokens_test.go @@ -0,0 +1,52 @@ +package api + +import ( + "testing" + + "github.com/rcourtman/pulse-go-rewrite/internal/config" +) + +func TestNormalizeRequestedScopesDefaultsToWildcard(t *testing.T) { + scopes, err := normalizeRequestedScopes(nil) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(scopes) != 1 || scopes[0] != config.ScopeWildcard { + t.Fatalf("expected wildcard scope, got %#v", scopes) + } +} + +func TestNormalizeRequestedScopesValidList(t *testing.T) { + raw := []string{"docker:report", "docker:report", "monitoring:read"} + scopes, err := normalizeRequestedScopes(&raw) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(scopes) != 2 { + t.Fatalf("expected 2 scopes, got %d", len(scopes)) + } + if scopes[0] != config.ScopeDockerReport || scopes[1] != config.ScopeMonitoringRead { + t.Fatalf("unexpected scopes order: %#v", scopes) + } +} + +func TestNormalizeRequestedScopesRejectsMixedWildcard(t *testing.T) { + raw := []string{"*", "docker:report"} + if _, err := normalizeRequestedScopes(&raw); err == nil { + t.Fatal("expected error when mixing wildcard with explicit scopes") + } +} + +func TestNormalizeRequestedScopesRejectsUnknown(t *testing.T) { + raw := []string{"unknown"} + if _, err := normalizeRequestedScopes(&raw); err == nil { + t.Fatal("expected error for unknown scope") + } +} + +func TestNormalizeRequestedScopesRejectsEmpty(t *testing.T) { + raw := []string{} + if _, err := normalizeRequestedScopes(&raw); err == nil { + t.Fatal("expected error for empty scopes array") + } +} diff --git a/internal/api/types.go b/internal/api/types.go index 2eaf077b7..c76ae51aa 100644 --- a/internal/api/types.go +++ b/internal/api/types.go @@ -57,6 +57,7 @@ type StateResponse struct { VMs []models.VM `json:"vms"` Containers []models.Container `json:"containers"` DockerHosts []models.DockerHostFrontend `json:"dockerHosts"` + Hosts []models.HostFrontend `json:"hosts"` Storage []models.Storage `json:"storage"` CephClusters []models.CephCluster `json:"cephClusters"` PBSInstances []models.PBSInstance `json:"pbs"` diff --git a/internal/config/api_tokens.go b/internal/config/api_tokens.go index 0c65f3400..40b5f5f9f 100644 --- a/internal/config/api_tokens.go +++ b/internal/config/api_tokens.go @@ -9,6 +9,37 @@ import ( "github.com/rcourtman/pulse-go-rewrite/internal/auth" ) +// Canonical API token scope strings. +const ( + ScopeWildcard = "*" + ScopeMonitoringRead = "monitoring:read" + ScopeMonitoringWrite = "monitoring:write" + ScopeDockerReport = "docker:report" + ScopeDockerManage = "docker:manage" + ScopeHostReport = "host-agent:report" + ScopeSettingsRead = "settings:read" + ScopeSettingsWrite = "settings:write" +) + +// AllKnownScopes enumerates scopes recognized by the backend (excluding the wildcard sentinel). +var AllKnownScopes = []string{ + ScopeMonitoringRead, + ScopeMonitoringWrite, + ScopeDockerReport, + ScopeDockerManage, + ScopeHostReport, + ScopeSettingsRead, + ScopeSettingsWrite, +} + +var scopeLookup = func() map[string]struct{} { + lookup := make(map[string]struct{}, len(AllKnownScopes)) + for _, scope := range AllKnownScopes { + lookup[scope] = struct{}{} + } + return lookup +}() + // ErrInvalidToken is returned when a token value is empty or malformed. var ErrInvalidToken = errors.New("invalid API token") @@ -21,6 +52,20 @@ type APITokenRecord struct { Suffix string `json:"suffix,omitempty"` CreatedAt time.Time `json:"createdAt"` LastUsedAt *time.Time `json:"lastUsedAt,omitempty"` + Scopes []string `json:"scopes,omitempty"` +} + +// ensureScopes normalizes the scope slice, applying legacy defaults. +func (r *APITokenRecord) ensureScopes() { + if len(r.Scopes) == 0 { + r.Scopes = []string{ScopeWildcard} + return + } + + // Copy to avoid shared underlying slice if this record is reused. + scopes := make([]string, len(r.Scopes)) + copy(scopes, r.Scopes) + r.Scopes = scopes } // Clone returns a copy of the record with duplicated pointer fields. @@ -30,11 +75,12 @@ func (r *APITokenRecord) Clone() APITokenRecord { t := *r.LastUsedAt clone.LastUsedAt = &t } + clone.ensureScopes() return clone } // NewAPITokenRecord constructs a metadata record from the provided raw token. -func NewAPITokenRecord(rawToken, name string) (*APITokenRecord, error) { +func NewAPITokenRecord(rawToken, name string, scopes []string) (*APITokenRecord, error) { if rawToken == "" { return nil, ErrInvalidToken } @@ -47,12 +93,13 @@ func NewAPITokenRecord(rawToken, name string) (*APITokenRecord, error) { Prefix: tokenPrefix(rawToken), Suffix: tokenSuffix(rawToken), CreatedAt: now, + Scopes: normalizeScopes(scopes), } return record, nil } // NewHashedAPITokenRecord constructs a record from an already hashed token. -func NewHashedAPITokenRecord(hashedToken, name string, createdAt time.Time) (*APITokenRecord, error) { +func NewHashedAPITokenRecord(hashedToken, name string, createdAt time.Time, scopes []string) (*APITokenRecord, error) { if hashedToken == "" { return nil, ErrInvalidToken } @@ -67,6 +114,7 @@ func NewHashedAPITokenRecord(hashedToken, name string, createdAt time.Time) (*AP Prefix: tokenPrefix(hashedToken), Suffix: tokenSuffix(hashedToken), CreatedAt: createdAt, + Scopes: normalizeScopes(scopes), }, nil } @@ -150,6 +198,7 @@ func (c *Config) ValidateAPIToken(rawToken string) (*APITokenRecord, bool) { if auth.CompareAPIToken(rawToken, record.Hash) { now := time.Now().UTC() c.APITokens[idx].LastUsedAt = &now + c.APITokens[idx].ensureScopes() return &c.APITokens[idx], true } } @@ -158,6 +207,7 @@ func (c *Config) ValidateAPIToken(rawToken string) (*APITokenRecord, bool) { // UpsertAPIToken inserts or replaces a record by ID. func (c *Config) UpsertAPIToken(record APITokenRecord) { + record.ensureScopes() for idx, existing := range c.APITokens { if existing.ID == record.ID { c.APITokens[idx] = record @@ -182,6 +232,9 @@ func (c *Config) RemoveAPIToken(id string) bool { // SortAPITokens keeps tokens ordered newest-first and syncs the legacy APIToken field. func (c *Config) SortAPITokens() { + for i := range c.APITokens { + c.APITokens[i].ensureScopes() + } sort.SliceStable(c.APITokens, func(i, j int) bool { return c.APITokens[i].CreatedAt.After(c.APITokens[j].CreatedAt) }) @@ -193,3 +246,36 @@ func (c *Config) SortAPITokens() { c.APIToken = "" } } + +// normalizeScopes applies defaults and returns a safe copy of the input slice. +func normalizeScopes(scopes []string) []string { + if len(scopes) == 0 { + return []string{ScopeWildcard} + } + result := make([]string, len(scopes)) + copy(result, scopes) + return result +} + +// HasScope reports whether the record grants the requested scope or wildcard access. +func (r *APITokenRecord) HasScope(scope string) bool { + if scope == "" { + return true + } + r.ensureScopes() + for _, candidate := range r.Scopes { + if candidate == ScopeWildcard || candidate == scope { + return true + } + } + return false +} + +// IsKnownScope reports whether the provided string matches a supported scope identifier. +func IsKnownScope(scope string) bool { + if scope == ScopeWildcard { + return true + } + _, ok := scopeLookup[scope] + return ok +} diff --git a/internal/config/api_tokens_test.go b/internal/config/api_tokens_test.go new file mode 100644 index 000000000..6074f677f --- /dev/null +++ b/internal/config/api_tokens_test.go @@ -0,0 +1,58 @@ +package config + +import ( + "os" + "path/filepath" + "testing" +) + +func TestAPITokenRecordHasScope(t *testing.T) { + record := APITokenRecord{Scopes: []string{ScopeMonitoringRead}} + + if !record.HasScope(ScopeMonitoringRead) { + t.Fatalf("expected scope %q to be granted", ScopeMonitoringRead) + } + if record.HasScope(ScopeSettingsWrite) { + t.Fatalf("did not expect scope %q to be granted", ScopeSettingsWrite) + } + + record.Scopes = nil // legacy tokens with no scopes should default to wildcard + if !record.HasScope(ScopeSettingsWrite) { + t.Fatalf("expected wildcard to grant %q", ScopeSettingsWrite) + } + + if !IsKnownScope(ScopeMonitoringRead) { + t.Fatalf("expected %q to be known scope", ScopeMonitoringRead) + } + if IsKnownScope("unknown:scope") { + t.Fatalf("unexpected scope recognised") + } +} + +func TestLoadAPITokensAppliesLegacyScopes(t *testing.T) { + if len(AllKnownScopes) == 0 { + t.Fatal("expected known scopes to be defined") + } + + dir := t.TempDir() + persistence := NewConfigPersistence(dir) + if err := persistence.EnsureConfigDir(); err != nil { + t.Fatalf("EnsureConfigDir: %v", err) + } + + payload := `[{"id":"legacy","name":"legacy","hash":"abc","createdAt":"2024-01-01T00:00:00Z"}]` + if err := os.WriteFile(filepath.Join(dir, "api_tokens.json"), []byte(payload), 0600); err != nil { + t.Fatalf("write api_tokens.json: %v", err) + } + + tokens, err := persistence.LoadAPITokens() + if err != nil { + t.Fatalf("LoadAPITokens: %v", err) + } + if len(tokens) != 1 { + t.Fatalf("expected 1 token, got %d", len(tokens)) + } + if len(tokens[0].Scopes) != 1 || tokens[0].Scopes[0] != ScopeWildcard { + t.Fatalf("expected legacy token to default to wildcard scope, got %#v", tokens[0].Scopes) + } +} diff --git a/internal/config/config.go b/internal/config/config.go index 349403704..27acc87e6 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -76,15 +76,15 @@ type Config struct { // Monitoring settings // Note: PVE polling is hardcoded to 10s since Proxmox cluster/resources endpoint only updates every 10s - PBSPollingInterval time.Duration `envconfig:"PBS_POLLING_INTERVAL"` // PBS polling interval (60s default) - PMGPollingInterval time.Duration `envconfig:"PMG_POLLING_INTERVAL"` // PMG polling interval (60s default) - ConcurrentPolling bool `envconfig:"CONCURRENT_POLLING" default:"true"` - ConnectionTimeout time.Duration `envconfig:"CONNECTION_TIMEOUT" default:"45s"` // Increased for slow storage operations - MetricsRetentionDays int `envconfig:"METRICS_RETENTION_DAYS" default:"7"` - BackupPollingCycles int `envconfig:"BACKUP_POLLING_CYCLES" default:"10"` - BackupPollingInterval time.Duration `envconfig:"BACKUP_POLLING_INTERVAL"` - EnableBackupPolling bool `envconfig:"ENABLE_BACKUP_POLLING" default:"true"` - WebhookBatchDelay time.Duration `envconfig:"WEBHOOK_BATCH_DELAY" default:"10s"` + PBSPollingInterval time.Duration `envconfig:"PBS_POLLING_INTERVAL"` // PBS polling interval (60s default) + PMGPollingInterval time.Duration `envconfig:"PMG_POLLING_INTERVAL"` // PMG polling interval (60s default) + ConcurrentPolling bool `envconfig:"CONCURRENT_POLLING" default:"true"` + ConnectionTimeout time.Duration `envconfig:"CONNECTION_TIMEOUT" default:"45s"` // Increased for slow storage operations + MetricsRetentionDays int `envconfig:"METRICS_RETENTION_DAYS" default:"7"` + BackupPollingCycles int `envconfig:"BACKUP_POLLING_CYCLES" default:"10"` + BackupPollingInterval time.Duration `envconfig:"BACKUP_POLLING_INTERVAL"` + EnableBackupPolling bool `envconfig:"ENABLE_BACKUP_POLLING" default:"true"` + WebhookBatchDelay time.Duration `envconfig:"WEBHOOK_BATCH_DELAY" default:"10s"` AdaptivePollingEnabled bool `envconfig:"ADAPTIVE_POLLING_ENABLED" default:"false"` AdaptivePollingBaseInterval time.Duration `envconfig:"ADAPTIVE_POLLING_BASE_INTERVAL" default:"10s"` AdaptivePollingMinInterval time.Duration `envconfig:"ADAPTIVE_POLLING_MIN_INTERVAL" default:"5s"` @@ -485,36 +485,36 @@ func Load() (*Config, error) { // Initialize config with defaults cfg := &Config{ - BackendHost: "0.0.0.0", - BackendPort: 3000, - FrontendHost: "0.0.0.0", - FrontendPort: 7655, - ConfigPath: dataDir, - DataPath: dataDir, - ConcurrentPolling: true, - ConnectionTimeout: 60 * time.Second, - MetricsRetentionDays: 7, - BackupPollingCycles: 10, - BackupPollingInterval: 0, - EnableBackupPolling: true, - WebhookBatchDelay: 10 * time.Second, + BackendHost: "0.0.0.0", + BackendPort: 3000, + FrontendHost: "0.0.0.0", + FrontendPort: 7655, + ConfigPath: dataDir, + DataPath: dataDir, + ConcurrentPolling: true, + ConnectionTimeout: 60 * time.Second, + MetricsRetentionDays: 7, + BackupPollingCycles: 10, + BackupPollingInterval: 0, + EnableBackupPolling: true, + WebhookBatchDelay: 10 * time.Second, AdaptivePollingEnabled: false, AdaptivePollingBaseInterval: 10 * time.Second, AdaptivePollingMinInterval: 5 * time.Second, AdaptivePollingMaxInterval: 5 * time.Minute, - LogLevel: "info", - LogFormat: "auto", - LogMaxSize: 100, - LogMaxAge: 30, - LogCompress: true, - AllowedOrigins: "", // Empty means no CORS headers (same-origin only) - IframeEmbeddingAllow: "SAMEORIGIN", - PBSPollingInterval: 60 * time.Second, // Default PBS polling (slower) - PMGPollingInterval: 60 * time.Second, // Default PMG polling (aggregated stats) - DiscoveryEnabled: false, - DiscoverySubnet: "auto", - EnvOverrides: make(map[string]bool), - OIDC: NewOIDCConfig(), + LogLevel: "info", + LogFormat: "auto", + LogMaxSize: 100, + LogMaxAge: 30, + LogCompress: true, + AllowedOrigins: "", // Empty means no CORS headers (same-origin only) + IframeEmbeddingAllow: "SAMEORIGIN", + PBSPollingInterval: 60 * time.Second, // Default PBS polling (slower) + PMGPollingInterval: 60 * time.Second, // Default PMG polling (aggregated stats) + DiscoveryEnabled: false, + DiscoverySubnet: "auto", + EnvOverrides: make(map[string]bool), + OIDC: NewOIDCConfig(), } cfg.Discovery = DefaultDiscoveryConfig() @@ -548,26 +548,26 @@ func Load() (*Config, error) { cfg.PMGPollingInterval = time.Duration(systemSettings.PMGPollingInterval) * time.Second } - if systemSettings.BackupPollingInterval > 0 { - cfg.BackupPollingInterval = time.Duration(systemSettings.BackupPollingInterval) * time.Second - } else if systemSettings.BackupPollingInterval == 0 { - cfg.BackupPollingInterval = 0 - } - if systemSettings.BackupPollingEnabled != nil { - cfg.EnableBackupPolling = *systemSettings.BackupPollingEnabled - } - if systemSettings.AdaptivePollingEnabled != nil { - cfg.AdaptivePollingEnabled = *systemSettings.AdaptivePollingEnabled - } - if systemSettings.AdaptivePollingBaseInterval > 0 { - cfg.AdaptivePollingBaseInterval = time.Duration(systemSettings.AdaptivePollingBaseInterval) * time.Second - } - if systemSettings.AdaptivePollingMinInterval > 0 { - cfg.AdaptivePollingMinInterval = time.Duration(systemSettings.AdaptivePollingMinInterval) * time.Second - } - if systemSettings.AdaptivePollingMaxInterval > 0 { - cfg.AdaptivePollingMaxInterval = time.Duration(systemSettings.AdaptivePollingMaxInterval) * time.Second - } + if systemSettings.BackupPollingInterval > 0 { + cfg.BackupPollingInterval = time.Duration(systemSettings.BackupPollingInterval) * time.Second + } else if systemSettings.BackupPollingInterval == 0 { + cfg.BackupPollingInterval = 0 + } + if systemSettings.BackupPollingEnabled != nil { + cfg.EnableBackupPolling = *systemSettings.BackupPollingEnabled + } + if systemSettings.AdaptivePollingEnabled != nil { + cfg.AdaptivePollingEnabled = *systemSettings.AdaptivePollingEnabled + } + if systemSettings.AdaptivePollingBaseInterval > 0 { + cfg.AdaptivePollingBaseInterval = time.Duration(systemSettings.AdaptivePollingBaseInterval) * time.Second + } + if systemSettings.AdaptivePollingMinInterval > 0 { + cfg.AdaptivePollingMinInterval = time.Duration(systemSettings.AdaptivePollingMinInterval) * time.Second + } + if systemSettings.AdaptivePollingMaxInterval > 0 { + cfg.AdaptivePollingMaxInterval = time.Duration(systemSettings.AdaptivePollingMaxInterval) * time.Second + } if systemSettings.UpdateChannel != "" { cfg.UpdateChannel = systemSettings.UpdateChannel @@ -602,11 +602,11 @@ func Load() (*Config, error) { } else { // No system.json exists - create default one log.Info().Msg("No system.json found, creating default") - defaultSettings := DefaultSystemSettings() - defaultSettings.ConnectionTimeout = int(cfg.ConnectionTimeout.Seconds()) - if err := persistence.SaveSystemSettings(*defaultSettings); err != nil { - log.Warn().Err(err).Msg("Failed to create default system.json") - } + defaultSettings := DefaultSystemSettings() + defaultSettings.ConnectionTimeout = int(cfg.ConnectionTimeout.Seconds()) + if err := persistence.SaveSystemSettings(*defaultSettings); err != nil { + log.Warn().Err(err).Msg("Failed to create default system.json") + } } if oidcSettings, err := persistence.LoadOIDCConfig(); err == nil && oidcSettings != nil { @@ -783,6 +783,7 @@ func Load() (*Config, error) { Prefix: prefix, Suffix: suffix, CreatedAt: time.Now().UTC(), + Scopes: []string{ScopeWildcard}, } cfg.APITokens = append(cfg.APITokens, record) } @@ -800,7 +801,7 @@ func Load() (*Config, error) { // Legacy migration: if a single token is present without metadata, wrap it. if !cfg.HasAPITokens() && cfg.APIToken != "" { - if record, err := NewHashedAPITokenRecord(cfg.APIToken, "Legacy token", time.Now().UTC()); err == nil { + if record, err := NewHashedAPITokenRecord(cfg.APIToken, "Legacy token", time.Now().UTC(), nil); err == nil { cfg.APITokens = []APITokenRecord{*record} cfg.SortAPITokens() log.Info().Msg("Migrated legacy API token into token record store") @@ -1121,20 +1122,20 @@ func SaveConfig(cfg *Config) error { adaptiveEnabled := cfg.AdaptivePollingEnabled systemSettings := SystemSettings{ // Note: PVE polling is hardcoded to 10s - UpdateChannel: cfg.UpdateChannel, - AutoUpdateEnabled: cfg.AutoUpdateEnabled, - AutoUpdateCheckInterval: int(cfg.AutoUpdateCheckInterval.Hours()), - AutoUpdateTime: cfg.AutoUpdateTime, - AllowedOrigins: cfg.AllowedOrigins, - ConnectionTimeout: int(cfg.ConnectionTimeout.Seconds()), - LogLevel: cfg.LogLevel, - DiscoveryEnabled: cfg.DiscoveryEnabled, - DiscoverySubnet: cfg.DiscoverySubnet, - DiscoveryConfig: CloneDiscoveryConfig(cfg.Discovery), - AdaptivePollingEnabled: &adaptiveEnabled, - AdaptivePollingBaseInterval: int(cfg.AdaptivePollingBaseInterval / time.Second), - AdaptivePollingMinInterval: int(cfg.AdaptivePollingMinInterval / time.Second), - AdaptivePollingMaxInterval: int(cfg.AdaptivePollingMaxInterval / time.Second), + UpdateChannel: cfg.UpdateChannel, + AutoUpdateEnabled: cfg.AutoUpdateEnabled, + AutoUpdateCheckInterval: int(cfg.AutoUpdateCheckInterval.Hours()), + AutoUpdateTime: cfg.AutoUpdateTime, + AllowedOrigins: cfg.AllowedOrigins, + ConnectionTimeout: int(cfg.ConnectionTimeout.Seconds()), + LogLevel: cfg.LogLevel, + DiscoveryEnabled: cfg.DiscoveryEnabled, + DiscoverySubnet: cfg.DiscoverySubnet, + DiscoveryConfig: CloneDiscoveryConfig(cfg.Discovery), + AdaptivePollingEnabled: &adaptiveEnabled, + AdaptivePollingBaseInterval: int(cfg.AdaptivePollingBaseInterval / time.Second), + AdaptivePollingMinInterval: int(cfg.AdaptivePollingMinInterval / time.Second), + AdaptivePollingMaxInterval: int(cfg.AdaptivePollingMaxInterval / time.Second), // APIToken removed - now handled via .env only } if err := globalPersistence.SaveSystemSettings(systemSettings); err != nil { diff --git a/internal/config/persistence.go b/internal/config/persistence.go index 97267f378..68539b951 100644 --- a/internal/config/persistence.go +++ b/internal/config/persistence.go @@ -126,6 +126,10 @@ func (c *ConfigPersistence) LoadAPITokens() ([]APITokenRecord, error) { return nil, err } + for i := range tokens { + tokens[i].ensureScopes() + } + return tokens, nil } @@ -145,7 +149,14 @@ func (c *ConfigPersistence) SaveAPITokens(tokens []APITokenRecord) error { } } - data, err := json.MarshalIndent(tokens, "", " ") + sanitized := make([]APITokenRecord, len(tokens)) + for i := range tokens { + record := tokens[i] + record.ensureScopes() + sanitized[i] = record + } + + data, err := json.MarshalIndent(sanitized, "", " ") if err != nil { return err } diff --git a/internal/config/persistence_test.go b/internal/config/persistence_test.go index da5391f10..18ba54c8d 100644 --- a/internal/config/persistence_test.go +++ b/internal/config/persistence_test.go @@ -292,6 +292,7 @@ func TestExportConfigIncludesAPITokens(t *testing.T) { Prefix: "hash-1", Suffix: "-0001", CreatedAt: createdAt, + Scopes: []string{config.ScopeWildcard}, }, { ID: "token-2", @@ -300,6 +301,7 @@ func TestExportConfigIncludesAPITokens(t *testing.T) { Prefix: "hash-2", Suffix: "-0002", CreatedAt: createdAt.Add(time.Hour), + Scopes: []string{config.ScopeMonitoringRead}, }, } @@ -401,6 +403,7 @@ func TestImportConfigTransactionalSuccess(t *testing.T) { Prefix: "hashn1", Suffix: "n1", CreatedAt: time.Date(2024, 1, 1, 12, 0, 0, 0, time.UTC), + Scopes: []string{config.ScopeMonitoringRead, config.ScopeMonitoringWrite}, }, } if err := source.SaveAPITokens(newTokens); err != nil { @@ -467,6 +470,7 @@ func TestImportConfigTransactionalSuccess(t *testing.T) { Prefix: "hasho1", Suffix: "o1", CreatedAt: time.Date(2023, 1, 1, 12, 0, 0, 0, time.UTC), + Scopes: []string{config.ScopeWildcard}, }, } if err := target.SaveAPITokens(oldTokens); err != nil { @@ -569,6 +573,7 @@ func TestImportConfigRollbackOnFailure(t *testing.T) { Prefix: "hashn", Suffix: "-n", CreatedAt: time.Date(2024, 2, 2, 12, 0, 0, 0, time.UTC), + Scopes: []string{config.ScopeDockerReport}, }, } if err := source.SaveAPITokens(newTokens); err != nil { @@ -619,6 +624,7 @@ func TestImportConfigRollbackOnFailure(t *testing.T) { Prefix: "hasho", Suffix: "-o", CreatedAt: time.Date(2023, 3, 3, 12, 0, 0, 0, time.UTC), + Scopes: []string{config.ScopeWildcard}, }, } if err := target.SaveAPITokens(baselineTokens); err != nil { @@ -763,6 +769,7 @@ func TestImportAcceptsVersion40Bundle(t *testing.T) { Prefix: "hashk", Suffix: "-k", CreatedAt: time.Date(2022, 4, 4, 12, 0, 0, 0, time.UTC), + Scopes: []string{config.ScopeWildcard}, }, } if err := target.SaveAPITokens(baselineTokens); err != nil { diff --git a/internal/config/watcher.go b/internal/config/watcher.go index bbd17f244..8ddb3c526 100644 --- a/internal/config/watcher.go +++ b/internal/config/watcher.go @@ -347,6 +347,7 @@ func (cw *ConfigWatcher) reloadConfig() { Prefix: prefix, Suffix: suffix, CreatedAt: time.Now().UTC(), + Scopes: []string{ScopeWildcard}, }) } } diff --git a/internal/hostagent/agent.go b/internal/hostagent/agent.go new file mode 100644 index 000000000..9eb25c949 --- /dev/null +++ b/internal/hostagent/agent.go @@ -0,0 +1,474 @@ +package hostagent + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "net/http" + "runtime" + "sort" + "strings" + "time" + + agentshost "github.com/rcourtman/pulse-go-rewrite/pkg/agents/host" + "github.com/rs/zerolog" + gocpu "github.com/shirou/gopsutil/v3/cpu" + godisk "github.com/shirou/gopsutil/v3/disk" + gohost "github.com/shirou/gopsutil/v3/host" + goload "github.com/shirou/gopsutil/v3/load" + gomem "github.com/shirou/gopsutil/v3/mem" + gonet "github.com/shirou/gopsutil/v3/net" +) + +// Config controls the behaviour of the host agent. +type Config struct { + PulseURL string + APIToken string + Interval time.Duration + HostnameOverride string + AgentID string + Tags []string + InsecureSkipVerify bool + RunOnce bool + Logger *zerolog.Logger +} + +// Agent is responsible for collecting host metrics and shipping them to Pulse. +type Agent struct { + cfg Config + logger zerolog.Logger + httpClient *http.Client + + hostInfo *gohost.InfoStat + hostname string + displayName string + platform string + osName string + osVersion string + kernelVersion string + architecture string + machineID string + agentID string + interval time.Duration + trimmedPulseURL string + + prevCPUTimes *gocpu.TimesStat +} + +const defaultInterval = 30 * time.Second + +// New constructs a fully initialised host Agent. +func New(cfg Config) (*Agent, error) { + if cfg.Interval <= 0 { + cfg.Interval = defaultInterval + } + + if cfg.Logger == nil { + defaultLogger := zerolog.New(zerolog.NewConsoleWriter()).With().Timestamp().Logger() + cfg.Logger = &defaultLogger + } + + logger := cfg.Logger.With().Str("component", "host-agent").Logger() + + if strings.TrimSpace(cfg.APIToken) == "" { + return nil, fmt.Errorf("api token is required") + } + + pulseURL := cfg.PulseURL + if strings.TrimSpace(pulseURL) == "" { + pulseURL = "http://localhost:7655" + } + pulseURL = strings.TrimRight(pulseURL, "/") + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + info, err := gohost.InfoWithContext(ctx) + if err != nil { + return nil, fmt.Errorf("fetch host info: %w", err) + } + + hostname := strings.TrimSpace(cfg.HostnameOverride) + if hostname == "" { + hostname = strings.TrimSpace(info.Hostname) + } + if hostname == "" { + hostname = "unknown-host" + } + + displayName := hostname + + machineID := strings.TrimSpace(info.HostID) + + agentID := strings.TrimSpace(cfg.AgentID) + if agentID == "" { + agentID = machineID + } + if agentID == "" { + agentID = hostname + } + + platform := normalisePlatform(info.Platform) + osName := strings.TrimSpace(info.PlatformFamily) + if osName == "" { + osName = strings.TrimSpace(info.Platform) + } + osVersion := strings.TrimSpace(info.PlatformVersion) + kernelVersion := strings.TrimSpace(info.KernelVersion) + arch := strings.TrimSpace(info.KernelArch) + if arch == "" { + arch = runtime.GOARCH + } + tlsConfig := &tls.Config{MinVersion: tls.VersionTLS12} + if cfg.InsecureSkipVerify { + //nolint:gosec // Insecure mode is explicitly user-controlled. + tlsConfig.InsecureSkipVerify = true + } + + client := &http.Client{ + Timeout: 15 * time.Second, + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: tlsConfig, + }, + } + + trimmedTags := make([]string, 0, len(cfg.Tags)) + seenTags := make(map[string]struct{}, len(cfg.Tags)) + for _, tag := range cfg.Tags { + tag = strings.TrimSpace(tag) + if tag == "" { + continue + } + if _, exists := seenTags[tag]; exists { + continue + } + seenTags[tag] = struct{}{} + trimmedTags = append(trimmedTags, tag) + } + cfg.Tags = trimmedTags + + return &Agent{ + cfg: cfg, + logger: logger, + httpClient: client, + hostInfo: info, + hostname: hostname, + displayName: displayName, + platform: platform, + osName: osName, + osVersion: osVersion, + kernelVersion: kernelVersion, + architecture: arch, + machineID: machineID, + agentID: agentID, + interval: cfg.Interval, + trimmedPulseURL: pulseURL, + }, nil +} + +// Run executes the agent until the context is cancelled. +func (a *Agent) Run(ctx context.Context) error { + if a.cfg.RunOnce { + return a.runOnce(ctx) + } + + ticker := time.NewTicker(a.interval) + defer ticker.Stop() + + if err := a.process(ctx); err != nil && !errors.Is(err, context.Canceled) { + a.logger.Error().Err(err).Msg("initial report failed") + } + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + if err := a.process(ctx); err != nil { + if errors.Is(err, context.Canceled) { + return err + } + a.logger.Error().Err(err).Msg("failed to send report") + } + } + } +} + +func (a *Agent) runOnce(ctx context.Context) error { + return a.process(ctx) +} + +func (a *Agent) process(ctx context.Context) error { + report, err := a.buildReport(ctx) + if err != nil { + return fmt.Errorf("build report: %w", err) + } + if err := a.sendReport(ctx, report); err != nil { + return fmt.Errorf("send report: %w", err) + } + a.logger.Debug(). + Str("hostname", report.Host.Hostname). + Str("platform", report.Host.Platform). + Msg("host report sent") + return nil +} + +func (a *Agent) buildReport(ctx context.Context) (agentshost.Report, error) { + collectCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + uptime, _ := gohost.UptimeWithContext(collectCtx) + loadAvg, _ := goload.AvgWithContext(collectCtx) + cpuCount, _ := gocpu.CountsWithContext(collectCtx, true) + cpuUsage, err := a.calculateCPUUsage(collectCtx) + if err != nil { + a.logger.Debug().Err(err).Msg("failed to compute cpu usage") + } + + memStats, err := gomem.VirtualMemoryWithContext(collectCtx) + if err != nil { + return agentshost.Report{}, fmt.Errorf("memory stats: %w", err) + } + + disks := a.collectDisks(collectCtx) + network := a.collectNetwork(collectCtx) + + var loadValues []float64 + if loadAvg != nil { + loadValues = []float64{loadAvg.Load1, loadAvg.Load5, loadAvg.Load15} + } + + swapUsed := int64(0) + if memStats.SwapTotal > memStats.SwapFree { + swapUsed = int64(memStats.SwapTotal - memStats.SwapFree) + } + + report := agentshost.Report{ + Agent: agentshost.AgentInfo{ + ID: a.agentID, + Version: Version, + IntervalSeconds: int(a.interval / time.Second), + Hostname: a.hostname, + }, + Host: agentshost.HostInfo{ + ID: a.machineID, + Hostname: a.hostname, + DisplayName: a.displayName, + MachineID: a.machineID, + Platform: a.platform, + OSName: a.osName, + OSVersion: a.osVersion, + KernelVersion: a.kernelVersion, + Architecture: a.architecture, + CPUModel: "", + CPUCount: cpuCount, + UptimeSeconds: int64(uptime), + LoadAverage: loadValues, + }, + Metrics: agentshost.Metrics{ + CPUUsagePercent: cpuUsage, + Memory: agentshost.MemoryMetric{ + TotalBytes: int64(memStats.Total), + UsedBytes: int64(memStats.Used), + FreeBytes: int64(memStats.Free), + Usage: memStats.UsedPercent, + SwapTotal: int64(memStats.SwapTotal), + SwapUsed: swapUsed, + }, + }, + Disks: disks, + Network: network, + Sensors: agentshost.Sensors{}, + Tags: append([]string(nil), a.cfg.Tags...), + Timestamp: time.Now().UTC(), + } + + return report, nil +} + +func (a *Agent) calculateCPUUsage(ctx context.Context) (float64, error) { + times, err := gocpu.TimesWithContext(ctx, false) + if err != nil { + return 0, err + } + if len(times) == 0 { + return 0, nil + } + current := times[0] + + if a.prevCPUTimes == nil { + a.prevCPUTimes = ¤t + return 0, nil + } + + prev := a.prevCPUTimes + a.prevCPUTimes = ¤t + + deltaTotal := current.Total() - prev.Total() + if deltaTotal <= 0 { + return 0, nil + } + + deltaIdle := current.Idle - prev.Idle + if deltaIdle < 0 { + deltaIdle = 0 + } + + usage := (1 - (deltaIdle / deltaTotal)) * 100 + if usage < 0 { + usage = 0 + } + if usage > 100 { + usage = 100 + } + + return usage, nil +} + +func (a *Agent) collectDisks(ctx context.Context) []agentshost.Disk { + partitions, err := godisk.PartitionsWithContext(ctx, true) + if err != nil { + a.logger.Debug().Err(err).Msg("failed to fetch disk partitions") + return nil + } + + disks := make([]agentshost.Disk, 0, len(partitions)) + seen := make(map[string]struct{}, len(partitions)) + + for _, part := range partitions { + if part.Mountpoint == "" { + continue + } + if _, ok := seen[part.Mountpoint]; ok { + continue + } + seen[part.Mountpoint] = struct{}{} + + usage, err := godisk.UsageWithContext(ctx, part.Mountpoint) + if err != nil { + continue + } + if usage.Total == 0 { + continue + } + + disks = append(disks, agentshost.Disk{ + Device: part.Device, + Mountpoint: part.Mountpoint, + Filesystem: part.Fstype, + Type: part.Fstype, + TotalBytes: int64(usage.Total), + UsedBytes: int64(usage.Used), + FreeBytes: int64(usage.Free), + Usage: usage.UsedPercent, + }) + } + + sort.Slice(disks, func(i, j int) bool { return disks[i].Mountpoint < disks[j].Mountpoint }) + return disks +} + +func (a *Agent) collectNetwork(ctx context.Context) []agentshost.NetworkInterface { + ifaces, err := gonet.InterfacesWithContext(ctx) + if err != nil { + a.logger.Debug().Err(err).Msg("failed to fetch network interfaces") + return nil + } + + ioCounters, err := gonet.IOCountersWithContext(ctx, true) + if err != nil { + a.logger.Debug().Err(err).Msg("failed to fetch network counters") + } + ioMap := make(map[string]gonet.IOCountersStat, len(ioCounters)) + for _, stat := range ioCounters { + ioMap[stat.Name] = stat + } + + interfaces := make([]agentshost.NetworkInterface, 0, len(ifaces)) + + for _, iface := range ifaces { + if len(iface.Addrs) == 0 { + continue + } + if isLoopback(iface.Flags) { + continue + } + + addresses := make([]string, 0, len(iface.Addrs)) + for _, addr := range iface.Addrs { + if addr.Addr != "" { + addresses = append(addresses, addr.Addr) + } + } + if len(addresses) == 0 { + continue + } + + counter := ioMap[iface.Name] + ifaceEntry := agentshost.NetworkInterface{ + Name: iface.Name, + MAC: iface.HardwareAddr, + Addresses: addresses, + RXBytes: counter.BytesRecv, + TXBytes: counter.BytesSent, + } + + interfaces = append(interfaces, ifaceEntry) + } + + sort.Slice(interfaces, func(i, j int) bool { return interfaces[i].Name < interfaces[j].Name }) + return interfaces +} + +func (a *Agent) sendReport(ctx context.Context, report agentshost.Report) error { + payload, err := json.Marshal(report) + if err != nil { + return fmt.Errorf("marshal report: %w", err) + } + + url := fmt.Sprintf("%s/api/agents/host/report", a.trimmedPulseURL) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(payload)) + if err != nil { + return fmt.Errorf("create request: %w", err) + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+a.cfg.APIToken) + req.Header.Set("X-API-Token", a.cfg.APIToken) + req.Header.Set("User-Agent", "pulse-host-agent/"+Version) + + resp, err := a.httpClient.Do(req) + if err != nil { + return fmt.Errorf("send request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode >= 300 { + return fmt.Errorf("pulse responded with status %s", resp.Status) + } + + return nil +} + +func normalisePlatform(platform string) string { + platform = strings.ToLower(strings.TrimSpace(platform)) + switch platform { + case "darwin": + return "macos" + default: + return platform + } +} + +func isLoopback(flags []string) bool { + for _, flag := range flags { + if strings.EqualFold(flag, "loopback") { + return true + } + } + return false +} diff --git a/internal/hostagent/version.go b/internal/hostagent/version.go new file mode 100644 index 000000000..b4d6c2b42 --- /dev/null +++ b/internal/hostagent/version.go @@ -0,0 +1,5 @@ +package hostagent + +// Version is the semantic version of the Pulse host agent binary. It can be +// overridden at build time via -ldflags when producing release artefacts. +var Version = "0.1.0-dev" diff --git a/internal/mock/generator.go b/internal/mock/generator.go index f52b39520..bcc797475 100644 --- a/internal/mock/generator.go +++ b/internal/mock/generator.go @@ -18,12 +18,16 @@ type MockConfig struct { LXCsPerNode int DockerHostCount int DockerContainersPerHost int + GenericHostCount int RandomMetrics bool HighLoadNodes []string // Specific nodes to simulate high load StoppedPercent float64 // Percentage of guests that should be stopped } -const dockerConnectionPrefix = "docker-" +const ( + dockerConnectionPrefix = "docker-" + hostConnectionPrefix = "host-" +) var DefaultConfig = MockConfig{ NodeCount: 7, // Test the 5-9 node range by default @@ -31,6 +35,7 @@ var DefaultConfig = MockConfig{ LXCsPerNode: 8, DockerHostCount: 3, DockerContainersPerHost: 12, + GenericHostCount: 4, RandomMetrics: true, StoppedPercent: 0.2, } @@ -80,6 +85,35 @@ var dockerAgentVersions = []string{ "0.1.0-dev", } +var genericHostProfiles = []struct { + Platform string + OSName string + OSVersion string + Kernel string + Architecture string +}{ + {"linux", "Debian GNU/Linux", "12 (bookworm)", "6.8.12-1-amd64", "x86_64"}, + {"linux", "Ubuntu Server", "24.04 LTS", "6.8.0-31-generic", "x86_64"}, + {"linux", "Rocky Linux", "9.3", "5.14.0-427.22.1.el9_4.x86_64", "x86_64"}, + {"linux", "Alpine Linux", "3.20.1", "6.6.32-0-lts", "x86_64"}, + {"windows", "Windows Server", "2022 Datacenter", "10.0.20348.2244", "x86_64"}, + {"windows", "Windows 11 Pro", "23H2", "10.0.22631.3737", "x86_64"}, + {"macos", "macOS Ventura", "13.6.8", "22.6.0", "arm64"}, + {"macos", "macOS Sonoma", "14.6.1", "23G93", "arm64"}, +} + +var genericHostPrefixes = []string{ + "apollo", "centauri", "ceres", "europa", "hyperion", + "kepler", "meridian", "orion", "polaris", "spectrum", + "vega", "zenith", "halcyon", "icarus", "rigel", +} + +var hostAgentVersions = []string{ + "0.1.0", + "0.1.1", + "0.2.0-alpha", +} + // Common tags used for VMs and containers var commonTags = []string{ "production", "staging", "development", "testing", @@ -170,6 +204,7 @@ func GenerateMockData(config MockConfig) models.StateSnapshot { data := models.StateSnapshot{ Nodes: generateNodes(config), DockerHosts: generateDockerHosts(config), + Hosts: generateHosts(config), VMs: []models.VM{}, Containers: []models.Container{}, PhysicalDisks: []models.PhysicalDisk{}, @@ -189,6 +224,10 @@ func GenerateMockData(config MockConfig) models.StateSnapshot { data.ConnectionHealth[dockerConnectionPrefix+host.ID] = host.Status != "offline" } + for _, host := range data.Hosts { + data.ConnectionHealth[hostConnectionPrefix+host.ID] = host.Status != "offline" + } + // Generate VMs and containers for each node vmidCounter := 100 for nodeIdx, node := range data.Nodes { @@ -1066,6 +1105,192 @@ func generateDockerHosts(config MockConfig) []models.DockerHost { return hosts } +func generateHosts(config MockConfig) []models.Host { + count := config.GenericHostCount + if count <= 0 { + return nil + } + + now := time.Now() + hosts := make([]models.Host, 0, count) + usedNames := make(map[string]struct{}, count) + + for i := 0; i < count; i++ { + profile := genericHostProfiles[rand.Intn(len(genericHostProfiles))] + + baseName := genericHostPrefixes[rand.Intn(len(genericHostPrefixes))] + suffix := 1 + rand.Intn(900) + hostname := fmt.Sprintf("%s-%d", baseName, suffix) + for { + if _, exists := usedNames[hostname]; !exists { + usedNames[hostname] = struct{}{} + break + } + suffix++ + hostname = fmt.Sprintf("%s-%d", baseName, suffix) + } + + displayName := strings.ToUpper(hostname[:1]) + hostname[1:] + + cpuCount := 4 + rand.Intn(28) // 4-32 cores + if profile.Platform == "macos" { + cpuCount = 8 + rand.Intn(10) + } + cpuUsage := clampFloat(10+rand.Float64()*55, 4, 94) + + memTotalGiB := 16 + rand.Intn(192) + if profile.Platform == "macos" { + memTotalGiB = 16 + rand.Intn(64) + } + memTotal := int64(memTotalGiB) << 30 + memUsage := clampFloat(30+rand.Float64()*50, 12, 96) + memUsed := int64(float64(memTotal) * (memUsage / 100.0)) + memFree := memTotal - memUsed + + swapTotal := int64(rand.Intn(32)) << 30 + swapUsed := int64(float64(swapTotal) * rand.Float64()) + + rootDiskTotal := int64(120+rand.Intn(680)) << 30 + rootDiskUsage := clampFloat(25+rand.Float64()*55, 8, 95) + rootDiskUsed := int64(float64(rootDiskTotal) * (rootDiskUsage / 100.0)) + rootDisk := models.Disk{ + Total: rootDiskTotal, + Used: rootDiskUsed, + Free: rootDiskTotal - rootDiskUsed, + Usage: rootDiskUsage, + Mountpoint: "/", + Type: "ext4", + Device: "/dev/sda1", + } + if profile.Platform == "windows" { + rootDisk.Mountpoint = "C:" + rootDisk.Type = "ntfs" + rootDisk.Device = `\\.\PHYSICALDRIVE0` + } + if profile.Platform == "macos" { + rootDisk.Type = "apfs" + rootDisk.Device = "/dev/disk1s1" + } + + disks := []models.Disk{rootDisk} + if rand.Float64() < 0.45 { + dataDiskTotal := int64(200+rand.Intn(1400)) << 30 + dataDiskUsage := clampFloat(35+rand.Float64()*45, 6, 97) + dataDiskUsed := int64(float64(dataDiskTotal) * (dataDiskUsage / 100.0)) + mount := "/data" + device := "/dev/sdb1" + fsType := "xfs" + if profile.Platform == "windows" { + mount = "D:" + device = `\\.\PHYSICALDRIVE1` + fsType = "ntfs" + } + if profile.Platform == "macos" { + mount = "/Volumes/Data" + device = "/dev/disk3s1" + fsType = "apfs" + } + disks = append(disks, models.Disk{ + Total: dataDiskTotal, + Used: dataDiskUsed, + Free: dataDiskTotal - dataDiskUsed, + Usage: dataDiskUsage, + Mountpoint: mount, + Type: fsType, + Device: device, + }) + } + + primaryIP := fmt.Sprintf("192.168.%d.%d", 10+rand.Intn(60), 10+rand.Intn(200)) + network := []models.HostNetworkInterface{ + { + Name: "eth0", + MAC: fmt.Sprintf("02:42:%02x:%02x:%02x:%02x", rand.Intn(256), rand.Intn(256), rand.Intn(256), rand.Intn(256)), + Addresses: []string{primaryIP}, + RXBytes: uint64(256+rand.Intn(4096)) * 1024 * 1024, + TXBytes: uint64(256+rand.Intn(4096)) * 1024 * 1024, + }, + } + if rand.Float64() < 0.32 { + network[0].Addresses = append(network[0].Addresses, fmt.Sprintf("10.%d.%d.%d", 10+rand.Intn(90), rand.Intn(200), rand.Intn(200))) + } + + var loadAverage []float64 + if profile.Platform == "linux" { + loadAverage = []float64{ + clampFloat(rand.Float64()*float64(cpuCount)/4, 0.05, float64(cpuCount)*0.8), + clampFloat(rand.Float64()*float64(cpuCount)/4, 0.05, float64(cpuCount)*0.8), + clampFloat(rand.Float64()*float64(cpuCount)/4, 0.05, float64(cpuCount)*0.8), + } + } + + sensors := models.HostSensorSummary{} + if profile.Platform == "linux" || profile.Platform == "macos" { + sensors.TemperatureCelsius = map[string]float64{ + "cpu.package": clampFloat(38+rand.Float64()*22, 30, 85), + } + if rand.Float64() < 0.4 { + sensors.Additional = map[string]float64{ + "nvme0": clampFloat(40+rand.Float64()*20, 30, 90), + } + } + } + + status := "online" + if rand.Float64() < 0.1 { + status = "offline" + } else if rand.Float64() < 0.12 { + status = "degraded" + } + + lastSeen := now.Add(-time.Duration(rand.Intn(60)) * time.Second) + if status == "offline" { + lastSeen = now.Add(-time.Duration(300+rand.Intn(2400)) * time.Second) + } + + uptimeSeconds := int64(3600*(12+rand.Intn(720))) + int64(rand.Intn(3600)) + intervalSeconds := 30 + rand.Intn(45) + + tags := make([]string, 0, 2) + for _, candidate := range []string{"production", "lab", "edge", "backup", "database", "web"} { + if rand.Float64() < 0.18 { + tags = append(tags, candidate) + } + } + + host := models.Host{ + ID: fmt.Sprintf("host-%s-%d", profile.Platform, i+1), + Hostname: hostname, + DisplayName: displayName, + Platform: profile.Platform, + OSName: profile.OSName, + OSVersion: profile.OSVersion, + KernelVersion: profile.Kernel, + Architecture: profile.Architecture, + CPUCount: cpuCount, + CPUUsage: cpuUsage, + LoadAverage: loadAverage, + Memory: models.Memory{Total: memTotal, Used: memUsed, Free: memFree, Usage: memUsage, SwapTotal: swapTotal, SwapUsed: swapUsed}, + Disks: disks, + NetworkInterfaces: network, + Sensors: sensors, + Status: status, + UptimeSeconds: uptimeSeconds, + IntervalSeconds: intervalSeconds, + LastSeen: lastSeen, + AgentVersion: hostAgentVersions[rand.Intn(len(hostAgentVersions))], + Tags: tags, + } + + hosts = append(hosts, host) + } + + sort.Slice(hosts, func(i, j int) bool { + return hosts[i].Hostname < hosts[j].Hostname + }) + + return hosts +} func generateDockerContainers(hostName string, hostIdx int, config MockConfig) []models.DockerContainer { base := config.DockerContainersPerHost if base < 1 { @@ -2669,6 +2894,7 @@ func generateSnapshots(vms []models.VM, containers []models.Container) []models. // UpdateMetrics simulates changing metrics over time func UpdateMetrics(data *models.StateSnapshot, config MockConfig) { updateDockerHosts(data, config) + updateHosts(data, config) if !config.RandomMetrics { return @@ -3018,6 +3244,70 @@ func updateDockerHosts(data *models.StateSnapshot, config MockConfig) { } } +func updateHosts(data *models.StateSnapshot, config MockConfig) { + if len(data.Hosts) == 0 { + return + } + + now := time.Now() + step := int64(updateInterval.Seconds()) + if step <= 0 { + step = 2 + } + + for i := range data.Hosts { + host := &data.Hosts[i] + + if data.ConnectionHealth != nil { + data.ConnectionHealth[hostConnectionPrefix+host.ID] = host.Status != "offline" + } + + if host.Status == "offline" { + if config.RandomMetrics && rand.Float64() < 0.02 { + host.Status = "online" + host.LastSeen = now + host.UptimeSeconds = int64(120 + rand.Intn(3600)) + } + continue + } + + host.LastSeen = now.Add(-time.Duration(rand.Intn(25)) * time.Second) + host.UptimeSeconds += step + + if !config.RandomMetrics { + continue + } + + host.CPUUsage = clampFloat(host.CPUUsage+(rand.Float64()-0.5)*5, 4, 97) + + memUsage := clampFloat(host.Memory.Usage+(rand.Float64()-0.5)*3, 12, 96) + host.Memory.Usage = memUsage + host.Memory.Used = int64(float64(host.Memory.Total) * (memUsage / 100.0)) + host.Memory.Free = host.Memory.Total - host.Memory.Used + + for j := range host.Disks { + change := (rand.Float64() - 0.5) * 1.2 + host.Disks[j].Usage = clampFloat(host.Disks[j].Usage+change, 5, 98) + host.Disks[j].Used = int64(float64(host.Disks[j].Total) * (host.Disks[j].Usage / 100.0)) + host.Disks[j].Free = host.Disks[j].Total - host.Disks[j].Used + } + + if len(host.LoadAverage) == 3 { + for j := range host.LoadAverage { + host.LoadAverage[j] = clampFloat(host.LoadAverage[j]+(rand.Float64()-0.5)*0.4, 0.05, float64(host.CPUCount)) + } + } + + if host.Status == "degraded" { + if rand.Float64() < 0.25 { + host.Status = "online" + } + } else if rand.Float64() < 0.05 { + host.Status = "degraded" + } + } +} + func fluctuateFloat(value, variance, min, max float64) float64 { change := (rand.Float64()*2 - 1) * variance newValue := value * (1 + change) diff --git a/internal/models/state_snapshot.go b/internal/models/state_snapshot.go index 237a5adc6..954750b3a 100644 --- a/internal/models/state_snapshot.go +++ b/internal/models/state_snapshot.go @@ -8,6 +8,7 @@ type StateSnapshot struct { VMs []VM `json:"vms"` Containers []Container `json:"containers"` DockerHosts []DockerHost `json:"dockerHosts"` + Hosts []Host `json:"hosts"` Storage []Storage `json:"storage"` CephClusters []CephCluster `json:"cephClusters"` PhysicalDisks []PhysicalDisk `json:"physicalDisks"` @@ -46,6 +47,7 @@ func (s *State) GetSnapshot() StateSnapshot { VMs: append([]VM{}, s.VMs...), Containers: append([]Container{}, s.Containers...), DockerHosts: append([]DockerHost{}, s.DockerHosts...), + Hosts: append([]Host{}, s.Hosts...), Storage: append([]Storage{}, s.Storage...), CephClusters: append([]CephCluster{}, s.CephClusters...), PhysicalDisks: append([]PhysicalDisk{}, s.PhysicalDisks...), @@ -102,6 +104,11 @@ func (s StateSnapshot) ToFrontend() StateFrontend { dockerHosts[i] = host.ToFrontend() } + hosts := make([]HostFrontend, len(s.Hosts)) + for i, host := range s.Hosts { + hosts[i] = host.ToFrontend() + } + // Convert storage storage := make([]StorageFrontend, len(s.Storage)) for i, st := range s.Storage { @@ -124,6 +131,7 @@ func (s StateSnapshot) ToFrontend() StateFrontend { VMs: vms, Containers: containers, DockerHosts: dockerHosts, + Hosts: hosts, Storage: storage, CephClusters: cephClusters, PhysicalDisks: s.PhysicalDisks, diff --git a/pkg/agents/host/report.go b/pkg/agents/host/report.go new file mode 100644 index 000000000..67c75dc31 --- /dev/null +++ b/pkg/agents/host/report.go @@ -0,0 +1,86 @@ +package host + +import "time" + +// Report represents the payload sent by the pulse-host-agent. +type Report struct { + Agent AgentInfo `json:"agent"` + Host HostInfo `json:"host"` + Metrics Metrics `json:"metrics"` + Disks []Disk `json:"disks,omitempty"` + Network []NetworkInterface `json:"network,omitempty"` + Sensors Sensors `json:"sensors,omitempty"` + Tags []string `json:"tags,omitempty"` + Timestamp time.Time `json:"timestamp"` + SequenceID string `json:"sequenceId,omitempty"` +} + +// AgentInfo describes the reporting agent. +type AgentInfo struct { + ID string `json:"id"` + Version string `json:"version,omitempty"` + IntervalSeconds int `json:"intervalSeconds,omitempty"` + Hostname string `json:"hostname,omitempty"` +} + +// HostInfo contains platform and identification details about the monitored host. +type HostInfo struct { + ID string `json:"id,omitempty"` + Hostname string `json:"hostname"` + DisplayName string `json:"displayName,omitempty"` + MachineID string `json:"machineId,omitempty"` + Platform string `json:"platform,omitempty"` + OSName string `json:"osName,omitempty"` + OSVersion string `json:"osVersion,omitempty"` + KernelVersion string `json:"kernelVersion,omitempty"` + Architecture string `json:"architecture,omitempty"` + CPUModel string `json:"cpuModel,omitempty"` + CPUCount int `json:"cpuCount,omitempty"` + UptimeSeconds int64 `json:"uptimeSeconds,omitempty"` + LoadAverage []float64 `json:"loadAverage,omitempty"` +} + +// Metrics encapsulates primary resource metrics for a host. +type Metrics struct { + CPUUsagePercent float64 `json:"cpuUsagePercent,omitempty"` + Memory MemoryMetric `json:"memory,omitempty"` +} + +// MemoryMetric captures memory usage statistics in bytes. +type MemoryMetric struct { + TotalBytes int64 `json:"totalBytes,omitempty"` + UsedBytes int64 `json:"usedBytes,omitempty"` + FreeBytes int64 `json:"freeBytes,omitempty"` + Usage float64 `json:"usage,omitempty"` + SwapTotal int64 `json:"swapTotalBytes,omitempty"` + SwapUsed int64 `json:"swapUsedBytes,omitempty"` +} + +// Disk represents disk utilisation metrics. +type Disk struct { + Device string `json:"device,omitempty"` + Mountpoint string `json:"mountpoint,omitempty"` + Filesystem string `json:"filesystem,omitempty"` + Type string `json:"type,omitempty"` + TotalBytes int64 `json:"totalBytes,omitempty"` + UsedBytes int64 `json:"usedBytes,omitempty"` + FreeBytes int64 `json:"freeBytes,omitempty"` + Usage float64 `json:"usage,omitempty"` +} + +// NetworkInterface summarises network adapter statistics. +type NetworkInterface struct { + Name string `json:"name"` + MAC string `json:"mac,omitempty"` + Addresses []string `json:"addresses,omitempty"` + RXBytes uint64 `json:"rxBytes,omitempty"` + TXBytes uint64 `json:"txBytes,omitempty"` + SpeedMbps *int64 `json:"speedMbps,omitempty"` +} + +// Sensors captures optional sensor readings reported by the agent. +type Sensors struct { + TemperatureCelsius map[string]float64 `json:"temperatureCelsius,omitempty"` + FanRPM map[string]float64 `json:"fanRpm,omitempty"` + Additional map[string]float64 `json:"additional,omitempty"` +} diff --git a/scripts/build-release.sh b/scripts/build-release.sh index 6af6f3de8..f743e2a32 100755 --- a/scripts/build-release.sh +++ b/scripts/build-release.sh @@ -66,6 +66,13 @@ for build_name in "${!builds[@]}"; do -o "$BUILD_DIR/pulse-docker-agent-$build_name" \ ./cmd/pulse-docker-agent + # Build host agent binary + env $build_env go build \ + -ldflags="-s -w -X github.com/rcourtman/pulse-go-rewrite/internal/hostagent.Version=v${VERSION}" \ + -trimpath \ + -o "$BUILD_DIR/pulse-host-agent-$build_name" \ + ./cmd/pulse-host-agent + # Build temperature proxy binary env $build_env go build \ -ldflags="-s -w -X main.Version=v${VERSION} -X main.BuildTime=${build_time} -X main.GitCommit=${git_commit}" \ @@ -85,6 +92,7 @@ for build_name in "${!builds[@]}"; do # Copy binaries and VERSION file cp "$BUILD_DIR/pulse-$build_name" "$staging_dir/bin/pulse" cp "$BUILD_DIR/pulse-docker-agent-$build_name" "$staging_dir/bin/pulse-docker-agent" + cp "$BUILD_DIR/pulse-host-agent-$build_name" "$staging_dir/bin/pulse-host-agent" cp "$BUILD_DIR/pulse-sensor-proxy-$build_name" "$staging_dir/bin/pulse-sensor-proxy" cp "scripts/install-docker-agent.sh" "$staging_dir/scripts/install-docker-agent.sh" chmod 755 "$staging_dir/scripts/install-docker-agent.sh" @@ -112,6 +120,7 @@ mkdir -p "$universal_dir/scripts" for build_name in "${!builds[@]}"; do cp "$BUILD_DIR/pulse-$build_name" "$universal_dir/bin/pulse-${build_name}" cp "$BUILD_DIR/pulse-docker-agent-$build_name" "$universal_dir/bin/pulse-docker-agent-${build_name}" + cp "$BUILD_DIR/pulse-host-agent-$build_name" "$universal_dir/bin/pulse-host-agent-${build_name}" cp "$BUILD_DIR/pulse-sensor-proxy-$build_name" "$universal_dir/bin/pulse-sensor-proxy-${build_name}" done @@ -188,9 +197,43 @@ esac EOF chmod +x "$universal_dir/bin/pulse-sensor-proxy" +cat > "$universal_dir/bin/pulse-host-agent" << 'EOF' +#!/bin/sh +# Auto-detect architecture and run appropriate pulse-host-agent binary + +ARCH=$(uname -m) +case "$ARCH" in + x86_64|amd64) + exec "$(dirname "$0")/pulse-host-agent-linux-amd64" "$@" + ;; + aarch64|arm64) + exec "$(dirname "$0")/pulse-host-agent-linux-arm64" "$@" + ;; + armv7l|armhf) + exec "$(dirname "$0")/pulse-host-agent-linux-armv7" "$@" + ;; + *) + echo "Unsupported architecture: $ARCH" >&2 + exit 1 + ;; +esac +EOF +chmod +x "$universal_dir/bin/pulse-host-agent" + # Add VERSION file echo "$VERSION" > "$universal_dir/VERSION" +# Build host agent for macOS arm64 +echo "Building host agent for macOS arm64..." +env GOOS=darwin GOARCH=arm64 go build \ + -ldflags="-s -w -X github.com/rcourtman/pulse-go-rewrite/internal/hostagent.Version=v${VERSION}" \ + -trimpath \ + -o "$BUILD_DIR/pulse-host-agent-darwin-arm64" \ + ./cmd/pulse-host-agent + +# Package macOS host agent +tar -czf "$RELEASE_DIR/pulse-host-agent-v${VERSION}-darwin-arm64.tar.gz" -C "$BUILD_DIR" pulse-host-agent-darwin-arm64 + # Create universal tarball cd "$universal_dir" tar -czf "../../$RELEASE_DIR/pulse-v${VERSION}.tar.gz" .