Preserve monitored system warning status

This commit is contained in:
rcourtman 2026-03-23 22:33:05 +00:00
parent 848ac49561
commit f7b2eb40ce
8 changed files with 145 additions and 2 deletions

View file

@ -226,6 +226,10 @@ show the counted monitored systems coming from agent-backed infrastructure, but
the shared API helper must expose the canonical unified-resource grouping
explanation instead of rebuilding count reasons from install or registration
state.
That shared ledger read must also preserve canonical grouped system status,
including `warning`, so lifecycle-adjacent operator surfaces do not mislabel
live agent-backed infrastructure as `Unknown` when the unified-resource layer
already resolved a governed degraded state.
Lifecycle-adjacent workspace copy must also keep the same commercial framing:
infrastructure operations may point operators to Pulse Pro for billing, but it
must describe that boundary in monitored-system, plan-limit, and license-status

View file

@ -234,6 +234,11 @@ shared monitored-system explanation summary, sanitized grouping reasons, and
included top-level surfaces exactly as the unified-resource resolver computed
them, while the frontend client stays in lockstep with that nested payload
shape.
That same ledger contract must also preserve the canonical monitored-system
status enum end to end. Backend normalization may fail closed for unsupported
values, but it must not flatten governed `warning` state to `unknown`, because
the billing and inventory surfaces need the real top-level runtime status the
unified-resource resolver computed.
That client contract must also fail closed when older or partial payloads omit
the nested explanation object: the frontend may normalize missing explanation
fields to empty reasons/surfaces plus a safe default summary, but it must not

View file

@ -147,6 +147,10 @@ ledger explanation reads: storage- and recovery-adjacent surfaces may coexist
with counted monitored-system inventory, but any support-facing count
reasoning must come from the canonical unified-resource grouping explanation
payload rather than from storage or recovery heuristics.
That adjacent ledger read must also preserve canonical grouped system status,
including `warning`, so recovery- and storage-adjacent support views do not
flatten governed degraded state into a fake `unknown` label when the shared
unified-resource resolver already computed the top-level status.
The same API resource serializer also refreshes canonical identity and policy
metadata through the shared unified-resource helper before it writes resource
payloads, so storage and recovery links inherit the same canonical metadata

View file

@ -84,4 +84,44 @@ describe('MonitoredSystemLedgerAPI', () => {
expect(result.systems[0]?.explanation.reasons).toEqual([]);
expect(result.systems[0]?.explanation.surfaces).toEqual([]);
});
it('preserves canonical warning status from the API contract', async () => {
vi.mocked(apiFetchJSON).mockResolvedValueOnce({
systems: [
{
name: 'server-1',
type: 'host',
status: 'warning',
last_seen: '2026-01-01T00:00:00Z',
source: 'agent',
},
],
total: 1,
limit: 5,
});
const result = await MonitoredSystemLedgerAPI.getLedger();
expect(result.systems[0]?.status).toBe('warning');
});
it('fails closed to unknown for unsupported status values', async () => {
vi.mocked(apiFetchJSON).mockResolvedValueOnce({
systems: [
{
name: 'server-1',
type: 'host',
status: 'degraded',
last_seen: '2026-01-01T00:00:00Z',
source: 'agent',
},
],
total: 1,
limit: 5,
});
const result = await MonitoredSystemLedgerAPI.getLedger();
expect(result.systems[0]?.status).toBe('unknown');
});
});

View file

@ -1,5 +1,7 @@
import { apiFetchJSON } from '@/utils/apiClient';
export type MonitoredSystemLedgerStatus = 'online' | 'warning' | 'offline' | 'unknown';
export interface MonitoredSystemLedgerExplanationReason {
kind: string;
signal: string;
@ -21,7 +23,7 @@ export interface MonitoredSystemLedgerExplanation {
export interface MonitoredSystemLedgerEntry {
name: string;
type: string;
status: string; // "online" | "offline" | "unknown"
status: MonitoredSystemLedgerStatus;
last_seen: string; // RFC3339 or empty
source: string;
explanation?: MonitoredSystemLedgerExplanation;
@ -51,6 +53,7 @@ function normalizeMonitoredSystemLedgerEntry(
const explanation = entry.explanation;
return {
...entry,
status: normalizeMonitoredSystemLedgerStatus(entry.status),
explanation: {
summary:
explanation?.summary ??
@ -60,3 +63,17 @@ function normalizeMonitoredSystemLedgerEntry(
},
};
}
function normalizeMonitoredSystemLedgerStatus(
status: MonitoredSystemLedgerStatus | string | null | undefined,
): MonitoredSystemLedgerStatus {
switch ((status ?? '').trim().toLowerCase()) {
case 'online':
case 'warning':
case 'offline':
case 'unknown':
return status.trim().toLowerCase() as MonitoredSystemLedgerStatus;
default:
return 'unknown';
}
}

View file

@ -573,6 +573,77 @@ func TestContract_AIIntelligenceCorrelationsJSONSnapshot(t *testing.T) {
assertJSONSnapshot(t, got, want)
}
func TestContract_MonitoredSystemLedgerJSONSnapshot(t *testing.T) {
payload := MonitoredSystemLedgerResponse{
Systems: []MonitoredSystemLedgerEntry{
{
Name: "Tower",
Type: "host",
Status: "warning",
LastSeen: "2026-03-18T17:30:00Z",
Source: "agent",
Explanation: MonitoredSystemLedgerExplanation{
Summary: "Counts as one monitored system because Pulse sees one top-level host view from agent.",
Reasons: []MonitoredSystemLedgerExplanationReason{
{
Kind: "standalone",
Signal: "single-top-level-view",
Summary: "No overlapping top-level source matched this system.",
},
},
Surfaces: []MonitoredSystemLedgerExplanationSurface{
{
Name: "Tower",
Type: "host",
Source: "agent",
},
},
},
},
},
Total: 1,
Limit: 5,
}
got, err := json.Marshal(payload)
if err != nil {
t.Fatalf("marshal monitored system ledger response: %v", err)
}
const want = `{
"systems":[
{
"name":"Tower",
"type":"host",
"status":"warning",
"last_seen":"2026-03-18T17:30:00Z",
"source":"agent",
"explanation":{
"summary":"Counts as one monitored system because Pulse sees one top-level host view from agent.",
"reasons":[
{
"kind":"standalone",
"signal":"single-top-level-view",
"summary":"No overlapping top-level source matched this system."
}
],
"surfaces":[
{
"name":"Tower",
"type":"host",
"source":"agent"
}
]
}
}
],
"total":1,
"limit":5
}`
assertJSONSnapshot(t, got, want)
}
func TestContract_ResolveAuthEnvPathUsesCanonicalRuntimeDataDir(t *testing.T) {
envDir := t.TempDir()
t.Setenv("PULSE_DATA_DIR", envDir)

View file

@ -124,7 +124,7 @@ func (r *Router) handleMonitoredSystemLedger(w http.ResponseWriter, req *http.Re
func normalizeStatus(s string) string {
switch s {
case "online", "offline":
case "online", "warning", "offline", "unknown":
return s
default:
return "unknown"

View file

@ -50,7 +50,9 @@ func TestNormalizeStatus(t *testing.T) {
want string
}{
{"online", "online"},
{"warning", "warning"},
{"offline", "offline"},
{"unknown", "unknown"},
{"", "unknown"},
{"degraded", "unknown"},
{"running", "unknown"},