Preserve canonical infrastructure rows across realtime hydrate

This commit is contained in:
rcourtman 2026-04-11 14:30:07 +01:00
parent d643b0fb51
commit 9226db4717
6 changed files with 565 additions and 14 deletions

View file

@ -90,6 +90,11 @@ querying, and the operator-facing storage health presentation layer.
That same adjacent API boundary now also owns SSO outbound discovery and metadata fetch trust: storage- and recovery-adjacent surfaces may share `internal/api/sso_outbound.go`, `internal/api/saml_service.go`, and `internal/api/oidc_service.go`, but they must not fork separate metadata/discovery HTTP clients, redirect policies, or credential-file read rules when they depend on shared backend auth helpers.
5. Route canonical storage/recovery resource selection through `frontend-modern/src/hooks/useUnifiedResources.ts` and the owning `unified-resources` contract
That shared hook now also projects resource `clusterId` through the shared cluster-name helper, so storage and recovery links keep the same cluster-context label as other unified-resource consumers instead of rebuilding a local fallback chain.
That shared hook must keep realtime transport merges canonical for
storage/recovery consumers too: thinner websocket `state.resources`
payloads may refresh status and metrics, but they must not downgrade richer
REST-hydrated platform summary fields or synthesize standalone `clusterId`
values from resource names while the same session is open.
Storage and recovery consumers must also inherit the hook's canonical
`ResourceType` normalization for route/query filters, so storage subtypes
such as `physical_disk` stay on the same cache-backed snapshot instead of

View file

@ -168,6 +168,15 @@ assembly branch.
snapshot freshness must come from websocket `state.resources` instead of
layering confirmatory dashboard/infrastructure REST refetch loops over
already-owned resource updates.
That shared hook must also preserve canonical row shape across transport
boundaries: thinner realtime `state.resources` payloads must merge into the
existing canonical resource snapshot instead of downgrading richer REST-only
infrastructure details such as disk I/O, source metadata, or platform
summary fields after first hydrate.
Canonical cluster membership in that shared hook must come only from
explicit cluster identity such as Kubernetes context or platform cluster
labels; standalone resource names must never be repurposed as synthetic
`clusterId` values.
10. Keep the dashboard overview shell on the compact governed summary route
rather than the unfiltered list transport. `frontend-modern/src/pages/Dashboard.tsx`
and `frontend-modern/src/hooks/useDashboardOverview.ts` may consume the

View file

@ -67,6 +67,33 @@ const createWsResource = (overrides: Partial<Resource> = {}): Resource => ({
...overrides,
});
const createThinWsPBSResource = (overrides: Partial<Resource> = {}): Resource =>
({
id: 'pbs-1',
type: 'pbs',
name: 'backup-vault',
displayName: 'backup-vault',
platformId: 'pbs-main',
platformType: 'proxmox-pbs',
sourceType: 'api',
status: 'online',
lastSeen: Date.parse('2026-02-06T12:00:00Z'),
cpu: { current: 9 },
memory: {
current: 36,
used: 6 * 1024 * 1024,
total: 16 * 1024 * 1024,
free: 10 * 1024 * 1024,
},
platformData: {
host: '198.51.100.10',
version: '3.2.1',
connectionHealth: 'healthy',
numDatastores: 2,
},
...overrides,
}) as Resource;
const flushAsync = async () => {
await Promise.resolve();
await Promise.resolve();
@ -382,6 +409,156 @@ describe('useUnifiedResources', () => {
dispose();
});
it('does not invent standalone cluster ids from resource names', async () => {
apiFetchMock.mockResolvedValueOnce({
ok: true,
json: async () => ({
data: [
{
...v2Resource,
id: 'docker-host-1',
type: 'docker-host',
name: 'Ops Services 01',
canonicalIdentity: {
displayName: 'Ops Services 01',
hostname: 'ops-services-01',
platformId: 'ops-services-01',
},
sources: ['docker'],
identity: {
hostnames: ['ops-services-01'],
},
docker: {
hostSourceId: 'orion-2-mock',
hostname: 'ops-services-01',
runtime: 'docker',
runtimeVersion: '27.3.1',
dockerVersion: '27.3.1',
os: 'Alpine Linux 3.19',
kernelVersion: '6.6.32-1-lts',
architecture: 'aarch64',
agentVersion: '0.1.0-dev',
},
},
],
}),
});
let dispose = () => {};
let result: ReturnType<UseUnifiedResourcesModule['useUnifiedResources']> | undefined;
createRoot((d) => {
dispose = d;
result = useUnifiedResources();
});
await result!.refetch();
expect(result!.resources()[0]?.clusterId).toBeUndefined();
dispose();
});
it('preserves richer REST resource details across thinner websocket updates', async () => {
setWsConnected(false);
setWsInitialDataReceived(false);
setWsState('resources', []);
apiFetchMock.mockResolvedValueOnce({
ok: true,
json: async () => ({
data: [
{
...v2Resource,
metrics: {
...v2Resource.metrics,
diskRead: { value: 1_250_000 },
diskWrite: { value: 640_000 },
},
},
{
id: 'pbs-1',
type: 'pbs',
name: 'backup-vault',
status: 'online',
lastSeen: '2026-02-06T12:00:00Z',
sources: ['pbs'],
canonicalIdentity: {
displayName: 'backup-vault',
hostname: 'backup-vault',
platformId: '198.51.100.10',
},
metrics: {
cpu: { percent: 9 },
memory: { used: 6 * 1024 * 1024, total: 16 * 1024 * 1024, percent: 36 },
},
pbs: {
instanceId: 'pbs-main',
hostname: '198.51.100.10',
version: '3.2.1',
datastoreCount: 2,
backupJobCount: 4,
connectionHealth: 'healthy',
},
},
],
}),
});
let dispose = () => {};
let result: ReturnType<UseUnifiedResourcesModule['useUnifiedResources']> | undefined;
createRoot((d) => {
dispose = d;
result = useUnifiedResources();
});
await result!.refetch();
expect(result!.resources().find((resource) => resource.id === 'node-1')?.diskIO).toEqual({
readRate: 1_250_000,
writeRate: 640_000,
});
expect(result!.resources().find((resource) => resource.id === 'pbs-1')?.platformData?.pbs).toEqual(
expect.objectContaining({
datastoreCount: 2,
backupJobCount: 4,
connectionHealth: 'healthy',
}),
);
batch(() => {
setWsState(
'resources',
reconcile(
[
createWsResource({
cpu: { current: 42 },
}),
createThinWsPBSResource(),
],
{ key: 'id' },
),
);
setWsState('lastUpdate', 1738843203000);
setWsConnected(true);
setWsInitialDataReceived(true);
});
await flushAsync();
expect(result!.resources().find((resource) => resource.id === 'node-1')?.cpu?.current).toBe(42);
expect(result!.resources().find((resource) => resource.id === 'node-1')?.diskIO).toEqual({
readRate: 1_250_000,
writeRate: 640_000,
});
expect(result!.resources().find((resource) => resource.id === 'pbs-1')?.platformData?.pbs).toEqual(
expect.objectContaining({
datastoreCount: 2,
backupJobCount: 4,
connectionHealth: 'healthy',
}),
);
dispose();
});
it('falls back to proxmox temperature when agent temperature is unavailable', async () => {
apiFetchMock.mockResolvedValueOnce({
ok: true,

View file

@ -23,10 +23,8 @@ import { logger } from '@/utils/logger';
import { eventBus } from '@/stores/events';
import { canonicalDiscoveryResourceType } from '@/utils/discoveryTarget';
import { canonicalizeFrontendResourceType } from '@/utils/resourceTypeCompat';
import {
getPreferredNormalizedPlatformId,
getPreferredResourceClusterName,
} from '@/utils/resourceIdentity';
import { getPreferredNormalizedPlatformId } from '@/utils/resourceIdentity';
import { getExplicitResourceClusterName } from '@/utils/agentResources';
import {
resolvePlatformTypeFromSources,
resolveSourceTypeFromSources,
@ -108,6 +106,8 @@ type APIHostRAIDArray = {
rebuildSpeed?: string;
};
type JsonRecord = Record<string, unknown>;
type APIKubernetesData = {
clusterId?: string;
clusterName?: string;
@ -570,6 +570,332 @@ const metricToResourceMetric = (metric?: APIMetricValue) => {
};
};
const asRecord = (value: unknown): JsonRecord | undefined =>
value && typeof value === 'object' && !Array.isArray(value) ? (value as JsonRecord) : undefined;
const mergeStringArrays = (
incoming?: string[],
existing?: string[],
): string[] | undefined => {
const merged = [...(incoming ?? []), ...(existing ?? [])]
.map((value) => asTrimmedString(value))
.filter((value): value is string => Boolean(value));
return merged.length > 0 ? Array.from(new Set(merged)) : undefined;
};
const mergeRecord = <T extends JsonRecord>(incoming?: T, existing?: T): T | undefined => {
if (!incoming) return existing;
if (!existing) return incoming;
return { ...existing, ...incoming };
};
const mergePlatformData = (
incomingValue: Resource['platformData'],
existingValue: Resource['platformData'],
): Resource['platformData'] => {
const incoming = asRecord(incomingValue);
const existing = asRecord(existingValue);
if (!incoming) return existingValue;
if (!existing) return incomingValue;
const merged: JsonRecord = { ...existing, ...incoming };
for (const key of [
'agent',
'docker',
'proxmox',
'pbs',
'pmg',
'kubernetes',
'vmware',
'storage',
'physicalDisk',
'ceph',
'metrics',
'discoveryTarget',
]) {
const nested = mergeRecord(asRecord(incoming[key]), asRecord(existing[key]));
if (nested) {
merged[key] = nested;
}
}
const sourceStatus = mergeRecord(
asRecord(incoming.sourceStatus),
asRecord(existing.sourceStatus),
);
if (sourceStatus) {
merged.sourceStatus = sourceStatus;
}
const sources = mergeStringArrays(
Array.isArray(incoming.sources) ? (incoming.sources as string[]) : undefined,
Array.isArray(existing.sources) ? (existing.sources as string[]) : undefined,
);
if (sources) {
merged.sources = sources;
}
return merged;
};
const deriveLegacySourceList = (resource: Resource): string[] | undefined => {
switch (resource.platformType) {
case 'proxmox-pve':
return resource.sourceType === 'hybrid' ? ['proxmox', 'agent'] : ['proxmox'];
case 'docker':
return ['docker'];
case 'kubernetes':
return resource.sourceType === 'hybrid' ? ['agent', 'kubernetes'] : ['kubernetes'];
case 'proxmox-pbs':
return ['pbs'];
case 'proxmox-pmg':
return ['pmg'];
case 'truenas':
return ['truenas'];
case 'vmware-vsphere':
return ['vmware'];
default:
return resource.sourceType === 'agent' ? ['agent'] : undefined;
}
};
const canonicalizeLegacyPlatformData = (resource: Resource): Resource['platformData'] => {
const platformData = asRecord(resource.platformData);
if (!platformData) {
return resource.platformData;
}
const normalized: JsonRecord = { ...platformData };
const normalizedSources =
Array.isArray(platformData.sources) && platformData.sources.length > 0
? (platformData.sources as string[])
: deriveLegacySourceList(resource);
if (normalizedSources && normalizedSources.length > 0) {
normalized.sources = normalizedSources;
}
if (!asRecord(platformData.agent)) {
const agentPayload: JsonRecord = {};
for (const [legacyKey, nextKey] of [
['agentId', 'agentId'],
['agentVersion', 'agentVersion'],
['hostname', 'hostname'],
['platform', 'platform'],
['osName', 'osName'],
['osVersion', 'osVersion'],
['kernelVersion', 'kernelVersion'],
['architecture', 'architecture'],
['commandsEnabled', 'commandsEnabled'],
] as const) {
if (platformData[legacyKey] !== undefined) {
agentPayload[nextKey] = platformData[legacyKey];
}
}
if (platformData.memory !== undefined) agentPayload.memory = platformData.memory;
if (platformData.interfaces !== undefined) agentPayload.networkInterfaces = platformData.interfaces;
if (platformData.disks !== undefined) agentPayload.disks = platformData.disks;
if (Object.keys(agentPayload).length > 0) {
normalized.agent = agentPayload;
}
}
if (!asRecord(platformData.docker)) {
const dockerPayload: JsonRecord = {};
for (const [legacyKey, nextKey] of [
['agentId', 'agentId'],
['runtime', 'runtime'],
['runtimeVersion', 'runtimeVersion'],
['dockerVersion', 'dockerVersion'],
['os', 'os'],
['kernelVersion', 'kernelVersion'],
['architecture', 'architecture'],
['agentVersion', 'agentVersion'],
['hostname', 'hostname'],
['displayName', 'displayName'],
['machineId', 'machineId'],
['containerCount', 'containerCount'],
['uptimeSeconds', 'uptimeSeconds'],
['intervalSeconds', 'intervalSeconds'],
['temperature', 'temperature'],
['hostSourceId', 'hostSourceId'],
] as const) {
if (platformData[legacyKey] !== undefined) {
dockerPayload[nextKey] = platformData[legacyKey];
}
}
if (platformData.swarm !== undefined) dockerPayload.swarm = platformData.swarm;
if (platformData.interfaces !== undefined) dockerPayload.networkInterfaces = platformData.interfaces;
if (platformData.disks !== undefined) dockerPayload.disks = platformData.disks;
if (Object.keys(dockerPayload).length > 0) {
normalized.docker = dockerPayload;
}
}
if (!asRecord(platformData.proxmox)) {
const proxmoxPayload: JsonRecord = {};
for (const [legacyKey, nextKey] of [
['instance', 'instance'],
['node', 'nodeName'],
['clusterName', 'clusterName'],
['vmid', 'vmid'],
['cpus', 'cpus'],
['template', 'template'],
['swapUsed', 'swapUsed'],
['swapTotal', 'swapTotal'],
['balloon', 'balloon'],
] as const) {
if (platformData[legacyKey] !== undefined) {
proxmoxPayload[nextKey] = platformData[legacyKey];
}
}
if (platformData.disks !== undefined) proxmoxPayload.disks = platformData.disks;
if (Object.keys(proxmoxPayload).length > 0) {
normalized.proxmox = proxmoxPayload;
}
}
if (!asRecord(platformData.pbs)) {
const pbsPayload: JsonRecord = {};
if (platformData.host !== undefined) pbsPayload.hostname = platformData.host;
if (platformData.version !== undefined) pbsPayload.version = platformData.version;
if (platformData.connectionHealth !== undefined) {
pbsPayload.connectionHealth = platformData.connectionHealth;
}
if (platformData.numDatastores !== undefined) {
pbsPayload.datastoreCount = platformData.numDatastores;
}
if (Object.keys(pbsPayload).length > 0) {
normalized.pbs = pbsPayload;
}
}
if (!asRecord(platformData.pmg)) {
const pmgPayload: JsonRecord = {};
if (platformData.host !== undefined) pmgPayload.hostname = platformData.host;
if (platformData.version !== undefined) pmgPayload.version = platformData.version;
if (platformData.connectionHealth !== undefined) {
pmgPayload.connectionHealth = platformData.connectionHealth;
}
for (const [legacyKey, nextKey] of [
['nodeCount', 'nodeCount'],
['queueActive', 'queueActive'],
['queueDeferred', 'queueDeferred'],
['queueHold', 'queueHold'],
['queueIncoming', 'queueIncoming'],
['queueTotal', 'queueTotal'],
] as const) {
if (platformData[legacyKey] !== undefined) {
pmgPayload[nextKey] = platformData[legacyKey];
}
}
if (Object.keys(pmgPayload).length > 0) {
normalized.pmg = pmgPayload;
}
}
if (!asRecord(platformData.kubernetes)) {
const kubernetesPayload: JsonRecord = {};
for (const [legacyKey, nextKey] of [
['agentId', 'agentId'],
['clusterId', 'clusterId'],
['context', 'context'],
['nodeName', 'nodeName'],
['namespace', 'namespace'],
['clusterName', 'clusterName'],
['pendingUninstall', 'pendingUninstall'],
] as const) {
if (platformData[legacyKey] !== undefined) {
kubernetesPayload[nextKey] = platformData[legacyKey];
}
}
if (Object.keys(kubernetesPayload).length > 0) {
normalized.kubernetes = kubernetesPayload;
}
}
return normalized;
};
const canonicalizeRealtimeResource = (resource: Resource): Resource => {
const platformData = canonicalizeLegacyPlatformData(resource);
const platformRecord = asRecord(platformData);
const normalizedBase = {
...resource,
platformData,
};
return {
...normalizedBase,
clusterId: resource.clusterId ?? getExplicitResourceClusterName(normalizedBase),
platformData,
agent: resource.agent ?? (platformRecord?.agent as Resource['agent']),
proxmox: resource.proxmox ?? (platformRecord?.proxmox as Resource['proxmox']),
pbs: resource.pbs ?? (platformRecord?.pbs as Resource['pbs']),
kubernetes: resource.kubernetes ?? (platformRecord?.kubernetes as Resource['kubernetes']),
vmware: resource.vmware ?? (platformRecord?.vmware as Resource['vmware']),
storage: resource.storage ?? (platformRecord?.storage as Resource['storage']),
physicalDisk:
resource.physicalDisk ?? (platformRecord?.physicalDisk as Resource['physicalDisk']),
};
};
const mergeCanonicalIdentity = (
incoming?: Resource['canonicalIdentity'],
existing?: Resource['canonicalIdentity'],
): Resource['canonicalIdentity'] => {
if (!incoming) return existing;
if (!existing) return incoming;
const aliases = mergeStringArrays(incoming.aliases, existing.aliases);
return {
...existing,
...incoming,
aliases,
};
};
const mergeCanonicalResource = (incoming: Resource, existing?: Resource): Resource => {
if (!existing) {
return incoming;
}
return {
...existing,
...incoming,
clusterId: incoming.clusterId ?? existing.clusterId,
discoveryTarget: incoming.discoveryTarget ?? existing.discoveryTarget,
metricsTarget: incoming.metricsTarget ?? existing.metricsTarget,
canonicalIdentity: mergeCanonicalIdentity(incoming.canonicalIdentity, existing.canonicalIdentity),
policy: incoming.policy ?? existing.policy,
aiSafeSummary: incoming.aiSafeSummary ?? existing.aiSafeSummary,
recentChanges: incoming.recentChanges ?? existing.recentChanges,
facetCounts: incoming.facetCounts ?? existing.facetCounts,
diskIO: incoming.diskIO ?? existing.diskIO,
agent: mergeRecord(incoming.agent as JsonRecord | undefined, existing.agent as JsonRecord | undefined) as Resource['agent'],
proxmox: mergeRecord(incoming.proxmox as JsonRecord | undefined, existing.proxmox as JsonRecord | undefined) as Resource['proxmox'],
pbs: mergeRecord(incoming.pbs as JsonRecord | undefined, existing.pbs as JsonRecord | undefined) as Resource['pbs'],
kubernetes: mergeRecord(incoming.kubernetes as JsonRecord | undefined, existing.kubernetes as JsonRecord | undefined) as Resource['kubernetes'],
vmware: mergeRecord(incoming.vmware as JsonRecord | undefined, existing.vmware as JsonRecord | undefined) as Resource['vmware'],
storage: mergeRecord(incoming.storage as JsonRecord | undefined, existing.storage as JsonRecord | undefined) as Resource['storage'],
physicalDisk: mergeRecord(incoming.physicalDisk as JsonRecord | undefined, existing.physicalDisk as JsonRecord | undefined) as Resource['physicalDisk'],
identity: mergeRecord(incoming.identity as JsonRecord | undefined, existing.identity as JsonRecord | undefined) as Resource['identity'],
platformData: mergePlatformData(incoming.platformData, existing.platformData),
tags: incoming.tags && incoming.tags.length > 0 ? incoming.tags : existing.tags,
labels:
incoming.labels && Object.keys(incoming.labels).length > 0 ? incoming.labels : existing.labels,
};
};
const mergeCanonicalResourceSnapshot = (
incoming: Resource[],
existing: Resource[],
): Resource[] => {
if (incoming.length === 0) {
return [];
}
const existingById = new Map(existing.map((resource) => [resource.id, resource] as const));
return incoming.map((resource) =>
mergeCanonicalResource(canonicalizeRealtimeResource(resource), existingById.get(resource.id)),
);
};
const toResource = (v2: APIResource): Resource => {
const sources = (v2.sources || []).filter(
(s): s is string => typeof s === 'string' && s.trim().length > 0,
@ -602,7 +928,7 @@ const toResource = (v2: APIResource): Resource => {
sourceType: resolveSourceTypeFromSources(sources),
parentId: v2.parentId,
parentName: v2.parentName,
clusterId: getPreferredResourceClusterName(v2),
clusterId: getExplicitResourceClusterName(v2),
status: resolveStatus(v2.status),
incidentCount: v2.incidentCount,
incidentCode: v2.incidentCode,
@ -1137,23 +1463,31 @@ export function useUnifiedResources(options?: UseUnifiedResourcesOptions) {
}
const wsResources = Array.isArray(wsStore.state.resources) ? wsStore.state.resources : [];
const projectedResources = filterCanonicalUnifiedResources(wsResources, query, typeFilter);
const now = Date.now();
clearInitialHydrationTimeout();
const allResourcesEntry = getUnifiedResourcesCacheEntry(
buildScopedUnifiedResourcesCacheKey(ALL_RESOURCES_CACHE_KEY, currentOrgScope),
);
setUnifiedResourcesCache(allResourcesEntry, wsResources, now);
const mergedWsResources = mergeCanonicalResourceSnapshot(
wsResources,
allResourcesEntry.resources,
);
const projectedResources = filterCanonicalUnifiedResources(mergedWsResources, query, typeFilter);
const now = Date.now();
clearInitialHydrationTimeout();
setUnifiedResourcesCache(allResourcesEntry, mergedWsResources, now);
allResourcesEntry.lastFetchAt = now;
if (projectedResources === null) {
return;
}
setUnifiedResourcesCache(cacheEntry, projectedResources, now);
const mergedProjectedResources = mergeCanonicalResourceSnapshot(
projectedResources,
cacheEntry.resources,
);
setUnifiedResourcesCache(cacheEntry, mergedProjectedResources, now);
cacheEntry.lastFetchAt = now;
batch(() => {
setResources(reconcile(projectedResources, { key: 'id' }));
setResources(reconcile(mergedProjectedResources, { key: 'id' }));
setError(undefined);
setLoading(false);
});

View file

@ -5,6 +5,7 @@ import {
getActionableDockerRuntimeIdFromResource,
getActionableKubernetesClusterIdFromResource,
getExplicitAgentIdFromResource,
getExplicitResourceClusterName,
getMetricsChartKeyCandidatesFromResource,
getPreferredResourceClusterName,
hasDockerWorkloadsScope,
@ -184,6 +185,28 @@ describe('agentResources', () => {
).toBe('proxmox-cluster');
});
it('keeps explicit cluster membership separate from display-name fallback', () => {
expect(
getExplicitResourceClusterName(
makeResource({
type: 'docker-host',
name: 'ops-services-01',
}),
),
).toBeUndefined();
expect(
getExplicitResourceClusterName(
makeResource({
type: 'agent',
identity: {
clusterName: 'Core Fabric',
},
}),
),
).toBe('Core Fabric');
});
it('detects docker workloads scope from explicit docker facets instead of source lists', () => {
expect(
hasDockerWorkloadsScope(

View file

@ -150,14 +150,17 @@ export const getPreferredResourceKubernetesContext = (
);
};
export const getPreferredResourceClusterName = (
export const getExplicitResourceClusterName = (
resource: ResourceClusterNameLike,
): string | undefined =>
getPreferredResourceKubernetesContext(resource) ||
asTrimmedString(resource.identity?.clusterName) ||
asTrimmedString(resource.proxmox?.clusterName) ||
asTrimmedString(resource.platformData?.proxmox?.clusterName) ||
asTrimmedString(resource.name);
asTrimmedString(resource.platformData?.proxmox?.clusterName);
export const getPreferredResourceClusterName = (
resource: ResourceClusterNameLike,
): string | undefined => getExplicitResourceClusterName(resource) || asTrimmedString(resource.name);
export const getMetricsChartKeyCandidatesFromResource = (resource: Resource): string[] => {
const candidates = [