diff --git a/docs/release-control/v6/internal/subsystems/storage-recovery.md b/docs/release-control/v6/internal/subsystems/storage-recovery.md index cf77f1334..0ec7c5077 100644 --- a/docs/release-control/v6/internal/subsystems/storage-recovery.md +++ b/docs/release-control/v6/internal/subsystems/storage-recovery.md @@ -80,6 +80,10 @@ querying, and the operator-facing storage health presentation layer. That same adjacent API boundary now also owns SSO outbound discovery and metadata fetch trust: storage- and recovery-adjacent surfaces may share `internal/api/sso_outbound.go`, `internal/api/saml_service.go`, and `internal/api/oidc_service.go`, but they must not fork separate metadata/discovery HTTP clients, redirect policies, or credential-file read rules when they depend on shared backend auth helpers. 5. Route canonical storage/recovery resource selection through `frontend-modern/src/hooks/useUnifiedResources.ts` and the owning `unified-resources` contract That shared hook now also projects resource `clusterId` through the shared cluster-name helper, so storage and recovery links keep the same cluster-context label as other unified-resource consumers instead of rebuilding a local fallback chain. + Storage and recovery consumers must also inherit the hook's canonical + `ResourceType` normalization for route/query filters, so storage subtypes + such as `physical_disk` stay on the same cache-backed snapshot instead of + relying on storage-local filter aliases. 6. Preserve API-owned node identity continuity in shared `internal/api/` helpers so storage and recovery transport attachments do not fork by hostname-versus-IP drift across the same runtime. 7. Preserve fail-closed API assignment and lookup behavior in shared `internal/api/` helpers so storage and recovery surfaces do not inherit orphaned profile or resource references from unrelated transport mutations. 8. Preserve canonical configured public endpoint selection in shared `internal/api/` helpers so recovery and storage links do not inherit loopback-local scheme drift from admin-originated setup/install flows. diff --git a/docs/release-control/v6/internal/subsystems/unified-resources.md b/docs/release-control/v6/internal/subsystems/unified-resources.md index 48924aa25..643fee69f 100644 --- a/docs/release-control/v6/internal/subsystems/unified-resources.md +++ b/docs/release-control/v6/internal/subsystems/unified-resources.md @@ -269,6 +269,10 @@ assembly branch. broader cache already reflects canonical resource truth, and fresh empty snapshots must remain cacheable instead of regressing route handoffs back to transient full-page loading shells. + That same shared cache boundary must normalize route/query type filters + through the canonical frontend-to-`ResourceType` resolver before slicing + the snapshot, so compatibility values such as `disk` / `physical_disk` + stay on one cache truth instead of falling back to ad hoc filter aliases. ## Current State diff --git a/frontend-modern/src/hooks/__tests__/useUnifiedResources.test.ts b/frontend-modern/src/hooks/__tests__/useUnifiedResources.test.ts index 60069dc68..67e91eca7 100644 --- a/frontend-modern/src/hooks/__tests__/useUnifiedResources.test.ts +++ b/frontend-modern/src/hooks/__tests__/useUnifiedResources.test.ts @@ -771,6 +771,50 @@ describe('useUnifiedResources', () => { disposeFiltered(); }); + it('seeds a physical-disk filtered cache from a fresh all-resources snapshot', async () => { + apiFetchMock.mockResolvedValueOnce({ + ok: true, + json: async () => ({ + data: [ + v2Resource, + { + ...v2Resource, + id: 'disk-1', + type: 'physical_disk', + name: 'nvme0n1', + }, + ], + }), + }); + + let disposeAll = () => {}; + let allResources: ReturnType | undefined; + createRoot((d) => { + disposeAll = d; + allResources = useUnifiedResources({ query: '', cacheKey: 'all-resources' }); + }); + + await flushAsync(); + await waitForResourceCount(() => allResources!.resources().length, 2); + expect(apiFetchMock).toHaveBeenCalledTimes(1); + + disposeAll(); + + let disposeFiltered = () => {}; + let filtered: ReturnType | undefined; + createRoot((d) => { + disposeFiltered = d; + filtered = useUnifiedResources({ query: 'type=physical_disk' }); + }); + + await flushAsync(); + expect(apiFetchMock).toHaveBeenCalledTimes(1); + expect(filtered!.loading()).toBe(false); + expect(filtered!.resources().map((resource) => resource.id)).toEqual(['disk-1']); + + disposeFiltered(); + }); + it('treats an empty fresh snapshot as cacheable on remount', async () => { apiFetchMock.mockResolvedValueOnce({ ok: true, diff --git a/frontend-modern/src/hooks/useUnifiedResources.ts b/frontend-modern/src/hooks/useUnifiedResources.ts index 6746d866d..a90315dca 100644 --- a/frontend-modern/src/hooks/useUnifiedResources.ts +++ b/frontend-modern/src/hooks/useUnifiedResources.ts @@ -472,9 +472,12 @@ const resolveType = (value?: string): ResourceType => { const canonicalFrontendType = canonicalizeFrontendResourceType(normalized); switch (canonicalFrontendType) { case 'agent': + case 'storage': case 'docker-host': case 'k8s-cluster': case 'k8s-node': + case 'k8s-deployment': + case 'k8s-service': case 'vm': case 'system-container': case 'oci-container': @@ -484,6 +487,8 @@ const resolveType = (value?: string): ResourceType => { case 'pmg': case 'ceph': return canonicalFrontendType; + case 'disk': + return 'physical_disk'; default: break; } @@ -777,7 +782,7 @@ const parseUnifiedResourcesTypeFilter = (query: string): Set | nul .map((candidate) => asTrimmedString(candidate)) .filter((candidate): candidate is string => candidate !== undefined) .forEach((candidate) => { - types.add(canonicalizeFrontendResourceType(candidate)); + types.add(resolveType(candidate)); }); } @@ -807,7 +812,7 @@ const seedUnifiedResourcesCacheFromAllResources = ( } entry.resources = allResourcesEntry.resources.filter((resource) => - typeFilter.has(canonicalizeFrontendResourceType(resource.type)), + typeFilter.has(resolveType(resource.type)), ); entry.hasSnapshot = true; entry.cachedAt = allResourcesEntry.cachedAt;