From 655c9f81c3f41987a6b2defa4c1f5b9775771e81 Mon Sep 17 00:00:00 2001 From: rcourtman Date: Fri, 12 Dec 2025 23:13:40 +0000 Subject: [PATCH] feat(kubernetes): Add Kubernetes mock data and UI Backend: - Add K8s cluster, node, pod, deployment mock data generation - Configurable via PULSE_MOCK_K8S_CLUSTERS, PULSE_MOCK_K8S_NODES, PULSE_MOCK_K8S_PODS, PULSE_MOCK_K8S_DEPLOYMENTS env vars - Generate realistic cluster data with versions, namespaces, pod phases - Add dynamic metric updates for K8s resources - Deep copy K8s data in cloneState to prevent race conditions Frontend: - Add KubernetesClusters component with 4 view modes: Clusters, Nodes, Pods, Deployments - Filter bar with search, status filter, show hidden toggle - Nodes view: status, roles, CPU/memory/pod capacity, kubelet version - Pods view: namespace, status, ready containers, restarts, image, age - Deployments view: replicas, ready/up-to-date status - Matches Docker/Dashboard table styling patterns --- frontend-modern/src/App.tsx | 35 + .../Kubernetes/KubernetesClusters.tsx | 795 ++++++++++++++++++ frontend-modern/src/stores/websocket.ts | 16 + frontend-modern/src/types/api.ts | 61 ++ internal/mock/generator.go | 551 +++++++++++- internal/mock/integration.go | 112 ++- internal/models/models_frontend.go | 4 +- mock.env | 7 + 8 files changed, 1529 insertions(+), 52 deletions(-) create mode 100644 frontend-modern/src/components/Kubernetes/KubernetesClusters.tsx diff --git a/frontend-modern/src/App.tsx b/frontend-modern/src/App.tsx index 71862d4e7..15ae3e81f 100644 --- a/frontend-modern/src/App.tsx +++ b/frontend-modern/src/App.tsx @@ -41,6 +41,7 @@ import BoxesIcon from 'lucide-solid/icons/boxes'; import MonitorIcon from 'lucide-solid/icons/monitor'; import BellIcon from 'lucide-solid/icons/bell'; import SettingsIcon from 'lucide-solid/icons/settings'; +import NetworkIcon from 'lucide-solid/icons/network'; import { TokenRevealDialog } from './components/TokenRevealDialog'; import { useAlertsActivation } from './stores/alertsActivation'; import { UpdateProgressModal } from './components/UpdateProgressModal'; @@ -65,6 +66,11 @@ const SettingsPage = lazy(() => import('./components/Settings/Settings')); const DockerHosts = lazy(() => import('./components/Docker/DockerHosts').then((module) => ({ default: module.DockerHosts })), ); +const KubernetesClusters = lazy(() => + import('./components/Kubernetes/KubernetesClusters').then((module) => ({ + default: module.KubernetesClusters, + })), +); const HostsOverview = lazy(() => import('./components/Hosts/HostsOverview').then((module) => ({ default: module.HostsOverview, @@ -112,6 +118,14 @@ function HostsRoute() { return ; } +function KubernetesRoute() { + const wsContext = useContext(WebSocketContext); + if (!wsContext) { + return
Loading...
; + } + return ; +} + // Helper to detect if an update is actively in progress (not just checking for updates) function isUpdateInProgress(status: string | undefined): boolean { if (!status) return false; @@ -877,6 +891,7 @@ function App() { } /> } /> + } /> @@ -998,6 +1013,7 @@ function AppLayout(props: { const path = location.pathname; if (path.startsWith('/proxmox')) return 'proxmox'; if (path.startsWith('/docker')) return 'docker'; + if (path.startsWith('/kubernetes')) return 'kubernetes'; if (path.startsWith('/hosts')) return 'hosts'; if (path.startsWith('/servers')) return 'hosts'; // Legacy redirect if (path.startsWith('/alerts')) return 'alerts'; @@ -1005,6 +1021,7 @@ function AppLayout(props: { return 'proxmox'; }; const hasDockerHosts = createMemo(() => (props.state().dockerHosts?.length ?? 0) > 0); + const hasKubernetesClusters = createMemo(() => (props.state().kubernetesClusters?.length ?? 0) > 0); const hasHosts = createMemo(() => (props.state().hosts?.length ?? 0) > 0); const hasProxmoxHosts = createMemo( () => @@ -1019,6 +1036,12 @@ function AppLayout(props: { } }); + createEffect(() => { + if (hasKubernetesClusters()) { + markPlatformSeen('kubernetes'); + } + }); + createEffect(() => { if (hasProxmoxHosts()) { markPlatformSeen('proxmox'); @@ -1057,6 +1080,18 @@ function AppLayout(props: { ), }, + { + id: 'kubernetes' as const, + label: 'Kubernetes', + route: '/kubernetes', + settingsRoute: '/settings/agents', + tooltip: 'Monitor Kubernetes clusters and workloads', + enabled: hasKubernetesClusters() || !!seenPlatforms()['kubernetes'], + live: hasKubernetesClusters(), + icon: ( + + ), + }, { id: 'hosts' as const, label: 'Hosts', diff --git a/frontend-modern/src/components/Kubernetes/KubernetesClusters.tsx b/frontend-modern/src/components/Kubernetes/KubernetesClusters.tsx new file mode 100644 index 000000000..762ef0122 --- /dev/null +++ b/frontend-modern/src/components/Kubernetes/KubernetesClusters.tsx @@ -0,0 +1,795 @@ +import type { Component } from 'solid-js'; +import { For, Show, createMemo, createSignal } from 'solid-js'; +import type { + KubernetesCluster, + KubernetesDeployment, + KubernetesNode, + KubernetesPod, +} from '@/types/api'; +import { Card } from '@/components/shared/Card'; +import { EmptyState } from '@/components/shared/EmptyState'; +import { ScrollableTable } from '@/components/shared/ScrollableTable'; +import { StatusDot } from '@/components/shared/StatusDot'; +import { formatRelativeTime, formatBytes } from '@/utils/format'; +import { DEGRADED_HEALTH_STATUSES, OFFLINE_HEALTH_STATUSES, type StatusIndicator } from '@/utils/status'; + +interface KubernetesClustersProps { + clusters: KubernetesCluster[]; +} + +type ViewMode = 'clusters' | 'nodes' | 'pods' | 'deployments'; +type StatusFilter = 'all' | 'healthy' | 'unhealthy'; + +const normalize = (value?: string | null): string => (value || '').trim().toLowerCase(); + +const getStatusIndicator = (status: string | undefined | null): StatusIndicator => { + const normalized = normalize(status); + if (!normalized) return { variant: 'muted', label: 'Unknown' }; + if (OFFLINE_HEALTH_STATUSES.has(normalized)) return { variant: 'danger', label: 'Offline' }; + if (DEGRADED_HEALTH_STATUSES.has(normalized)) return { variant: 'warning', label: 'Degraded' }; + if (normalized === 'online') return { variant: 'success', label: 'Online' }; + return { variant: 'muted', label: status ?? 'Unknown' }; +}; + +const isPodHealthy = (pod: KubernetesPod): boolean => { + const phase = normalize(pod.phase); + if (!phase) return false; + if (phase !== 'running') return false; + + const containers = pod.containers ?? []; + if (containers.length === 0) return true; + + return containers.every((container) => { + if (!container.ready) return false; + const state = normalize(container.state); + if (!state) return true; + return state === 'running'; + }); +}; + +const isDeploymentHealthy = (d: KubernetesDeployment): boolean => { + const desired = d.desiredReplicas ?? 0; + if (desired <= 0) return true; + const available = d.availableReplicas ?? 0; + const ready = d.readyReplicas ?? 0; + const updated = d.updatedReplicas ?? 0; + return available >= desired && ready >= desired && updated >= desired; +}; + +const getClusterDisplayName = (cluster: KubernetesCluster): string => { + return cluster.customDisplayName || cluster.displayName || cluster.name || cluster.id; +}; + +const summarizeNodes = (nodes: KubernetesNode[] | undefined) => { + const list = nodes ?? []; + const notReady = list.filter((n) => !n.ready).length; + const unschedulable = list.filter((n) => !!n.unschedulable).length; + return { total: list.length, notReady, unschedulable }; +}; + +const summarizePods = (pods: KubernetesPod[] | undefined) => { + const list = pods ?? []; + const unhealthy = list.filter((p) => !isPodHealthy(p)).length; + return { total: list.length, unhealthy }; +}; + +const summarizeDeployments = (deployments: KubernetesDeployment[] | undefined) => { + const list = deployments ?? []; + const unhealthy = list.filter((d) => !isDeploymentHealthy(d)).length; + return { total: list.length, unhealthy }; +}; + +const getPodStatusBadge = (pod: KubernetesPod) => { + if (isPodHealthy(pod)) { + return { class: 'bg-green-100 text-green-700 dark:bg-green-900/40 dark:text-green-300', label: 'Running' }; + } + const phase = normalize(pod.phase); + if (phase === 'pending') { + return { class: 'bg-yellow-100 text-yellow-700 dark:bg-yellow-900/40 dark:text-yellow-300', label: 'Pending' }; + } + if (phase === 'failed') { + return { class: 'bg-red-100 text-red-700 dark:bg-red-900/40 dark:text-red-300', label: 'Failed' }; + } + if (phase === 'succeeded') { + return { class: 'bg-blue-100 text-blue-700 dark:bg-blue-900/40 dark:text-blue-300', label: 'Completed' }; + } + // Check for CrashLoopBackOff or other container issues + const containers = pod.containers ?? []; + const crashingContainer = containers.find((c) => c.reason?.toLowerCase().includes('crash')); + if (crashingContainer) { + return { class: 'bg-red-100 text-red-700 dark:bg-red-900/40 dark:text-red-300', label: 'CrashLoop' }; + } + const waitingContainer = containers.find((c) => normalize(c.state) === 'waiting'); + if (waitingContainer) { + return { class: 'bg-amber-100 text-amber-700 dark:bg-amber-900/40 dark:text-amber-300', label: waitingContainer.reason || 'Waiting' }; + } + return { class: 'bg-amber-100 text-amber-700 dark:bg-amber-900/40 dark:text-amber-300', label: pod.phase || 'Unknown' }; +}; + +// Get primary container image (first container) +const getPrimaryImage = (pod: KubernetesPod): string => { + const containers = pod.containers ?? []; + if (containers.length === 0) return '—'; + const image = containers[0].image ?? ''; + // Truncate long image names, show just the image:tag part + const parts = image.split('/'); + return parts[parts.length - 1] || image || '—'; +}; + +// Format age from timestamp +const formatAge = (timestamp?: number | string | null): string => { + if (!timestamp) return '—'; + const ts = typeof timestamp === 'string' ? Date.parse(timestamp) : timestamp; + if (isNaN(ts)) return '—'; + return formatRelativeTime(ts); +}; + +export const KubernetesClusters: Component = (props) => { + const [search, setSearch] = createSignal(''); + const [viewMode, setViewMode] = createSignal('clusters'); + const [statusFilter, setStatusFilter] = createSignal('all'); + const [showHidden, setShowHidden] = createSignal(false); + + // Get all nodes flattened across clusters + const allNodes = createMemo(() => { + const clusters = props.clusters ?? []; + const nodes: Array<{ cluster: KubernetesCluster; node: KubernetesNode }> = []; + for (const cluster of clusters) { + if (!showHidden() && cluster.hidden) continue; + for (const node of cluster.nodes ?? []) { + nodes.push({ cluster, node }); + } + } + return nodes; + }); + + // Get all pods flattened across clusters + const allPods = createMemo(() => { + const clusters = props.clusters ?? []; + const pods: Array<{ cluster: KubernetesCluster; pod: KubernetesPod }> = []; + for (const cluster of clusters) { + if (!showHidden() && cluster.hidden) continue; + for (const pod of cluster.pods ?? []) { + pods.push({ cluster, pod }); + } + } + return pods; + }); + + // Get all deployments flattened across clusters + const allDeployments = createMemo(() => { + const clusters = props.clusters ?? []; + const deps: Array<{ cluster: KubernetesCluster; deployment: KubernetesDeployment }> = []; + for (const cluster of clusters) { + if (!showHidden() && cluster.hidden) continue; + for (const dep of cluster.deployments ?? []) { + deps.push({ cluster, deployment: dep }); + } + } + return deps; + }); + + const visibleClusters = createMemo(() => { + const term = search().trim().toLowerCase(); + const clusters = props.clusters ?? []; + const status = statusFilter(); + + return clusters + .filter((cluster) => showHidden() || !cluster.hidden) + .filter((cluster) => { + if (status === 'all') return true; + const clusterStatus = normalize(cluster.status); + const isHealthy = clusterStatus === 'online'; + if (status === 'healthy') return isHealthy; + if (status === 'unhealthy') return !isHealthy; + return true; + }) + .filter((cluster) => { + if (!term) return true; + const haystack = [ + getClusterDisplayName(cluster), + cluster.id, + cluster.server ?? '', + cluster.context ?? '', + cluster.version ?? '', + ] + .join(' ') + .toLowerCase(); + return haystack.includes(term); + }) + .sort((a, b) => getClusterDisplayName(a).localeCompare(getClusterDisplayName(b))); + }); + + const filteredNodes = createMemo(() => { + const term = search().trim().toLowerCase(); + const status = statusFilter(); + + return allNodes() + .filter(({ node }) => { + if (status === 'all') return true; + const isHealthy = node.ready && !node.unschedulable; + if (status === 'healthy') return isHealthy; + if (status === 'unhealthy') return !isHealthy; + return true; + }) + .filter(({ cluster, node }) => { + if (!term) return true; + const haystack = [ + node.name, + getClusterDisplayName(cluster), + node.kubeletVersion ?? '', + node.osImage ?? '', + ...(node.roles ?? []), + ] + .join(' ') + .toLowerCase(); + return haystack.includes(term); + }); + }); + + const filteredPods = createMemo(() => { + const term = search().trim().toLowerCase(); + const status = statusFilter(); + + return allPods() + .filter(({ pod }) => { + if (status === 'all') return true; + const healthy = isPodHealthy(pod); + if (status === 'healthy') return healthy; + if (status === 'unhealthy') return !healthy; + return true; + }) + .filter(({ cluster, pod }) => { + if (!term) return true; + const haystack = [ + pod.name, + pod.namespace, + pod.nodeName ?? '', + pod.phase ?? '', + getClusterDisplayName(cluster), + ...(pod.containers ?? []).map(c => c.image ?? ''), + ] + .join(' ') + .toLowerCase(); + return haystack.includes(term); + }); + }); + + const filteredDeployments = createMemo(() => { + const term = search().trim().toLowerCase(); + const status = statusFilter(); + + return allDeployments() + .filter(({ deployment }) => { + if (status === 'all') return true; + const healthy = isDeploymentHealthy(deployment); + if (status === 'healthy') return healthy; + if (status === 'unhealthy') return !healthy; + return true; + }) + .filter(({ cluster, deployment }) => { + if (!term) return true; + const haystack = [ + deployment.name, + deployment.namespace, + getClusterDisplayName(cluster), + ] + .join(' ') + .toLowerCase(); + return haystack.includes(term); + }); + }); + + const isEmpty = createMemo(() => (props.clusters?.length ?? 0) === 0); + + const hasActiveFilters = createMemo( + () => search().trim() !== '' || statusFilter() !== 'all' || showHidden(), + ); + + const handleReset = () => { + setSearch(''); + setStatusFilter('all'); + setShowHidden(false); + setViewMode('clusters'); + }; + + return ( +
+ {/* Header */} +
+

Kubernetes

+

+ Cluster health from the unified agent Kubernetes module. +

+
+ + {/* Filter Bar */} + +
+ {/* Search */} +
+
+ setSearch(e.currentTarget.value)} + onKeyDown={(e) => { + if (e.key === 'Escape') { + setSearch(''); + e.currentTarget.blur(); + } + }} + class="w-full pl-9 pr-8 py-1.5 text-sm border border-gray-300 dark:border-gray-600 rounded-lg bg-white dark:bg-gray-900 text-gray-800 dark:text-gray-200 placeholder-gray-400 dark:placeholder-gray-500 focus:ring-2 focus:ring-blue-500/20 focus:border-blue-500 dark:focus:border-blue-400 outline-none transition-all" + /> + + + + + + +
+
+ + {/* Filters */} +
+ {/* View Mode Toggle */} +
+ + + + +
+ + + } + > + {/* Clusters View */} + + + + + + + + + + + + + + + + + }> + {(cluster) => { + const indicator = () => getStatusIndicator(cluster.status); + const nodes = () => summarizeNodes(cluster.nodes); + const pods = () => summarizePods(cluster.pods); + const deployments = () => summarizeDeployments(cluster.deployments); + + return ( + + + + + + + + + + ); + }} + + +
ClusterStatusNodesPodsDeploymentsVersionLast Seen
No clusters match the current filters.
+
+ {getClusterDisplayName(cluster)} + + + Pending uninstall + + + + + Hidden + + +
+
+ {cluster.server || '—'} +
+
+
+ + {indicator().label} +
+
+ 0 ? 'text-amber-600 dark:text-amber-400' : ''}>{nodes().total - nodes().notReady} + /{nodes().total} + 0}> + ready + + + 0 ? 'text-amber-600 dark:text-amber-400' : ''}>{pods().total - pods().unhealthy} + /{pods().total} + 0}> + healthy + + + 0 ? 'text-amber-600 dark:text-amber-400' : ''}>{deployments().total - deployments().unhealthy} + /{deployments().total} + 0}> + ok + + + {cluster.version || '—'} + + {cluster.lastSeen ? formatRelativeTime(cluster.lastSeen) : '—'} +
+
+
+ + {/* Nodes View */} + + + + + + + + + + + + + + + + + + }> + {({ cluster, node }) => { + const isHealthy = () => node.ready && !node.unschedulable; + const roles = () => (node.roles ?? []).join(', ') || 'worker'; + + return ( + + + + + + + + + + + ); + }} + + +
NodeClusterStatusRolesCPUMemoryPodsVersion
No nodes match the current filters.
+
{node.name}
+
+ {node.osImage || '—'} +
+
+ {getClusterDisplayName(cluster)} + + + + {!node.ready ? 'NotReady' : 'Unschedulable'} + + }> + + + Ready + + + + + {roles()} + + + {node.allocatableCpuCores ?? node.capacityCpuCores ?? '—'} cores + + {node.allocatableMemoryBytes ? formatBytes(node.allocatableMemoryBytes) : node.capacityMemoryBytes ? formatBytes(node.capacityMemoryBytes) : '—'} + + {node.allocatablePods ?? node.capacityPods ?? '—'} + + {node.kubeletVersion || '—'} +
+
+
+ + {/* Pods View */} + + + + + + + + + + + + + + + + + + }> + {({ cluster, pod }) => { + const statusBadge = () => getPodStatusBadge(pod); + const containers = () => pod.containers ?? []; + const readyContainers = () => containers().filter(c => c.ready).length; + + return ( + + + + + + + + + + + ); + }} + + +
PodNamespaceClusterStatusReadyRestartsImageAge
No pods match the current filters.
+
{pod.name}
+
+ {pod.nodeName || 'unscheduled'} +
+
+ + {pod.namespace} + + + {getClusterDisplayName(cluster)} + + + {statusBadge().label} + + + + {readyContainers()}/{containers().length} + + + 0} fallback={0}> + {pod.restarts} + + + + {getPrimaryImage(pod)} + + + {formatAge(pod.createdAt)} +
+
+
+ + {/* Deployments View */} + + + + + + + + + + + + + + + + + }> + {({ cluster, deployment }) => { + const healthy = () => isDeploymentHealthy(deployment); + const desired = () => deployment.desiredReplicas ?? 0; + const ready = () => deployment.readyReplicas ?? 0; + const updated = () => deployment.updatedReplicas ?? 0; + + return ( + + + + + + + + + + ); + }} + + +
DeploymentNamespaceClusterStatusReplicasReadyUp-to-date
No deployments match the current filters.
+
{deployment.name}
+
+ + {deployment.namespace} + + + {getClusterDisplayName(cluster)} + + + + Progressing + + }> + + + Available + + + + {desired()} + + = desired() ? 'text-green-600 dark:text-green-400' : 'text-amber-600 dark:text-amber-400'}> + {ready()}/{desired()} + + + = desired() ? 'text-green-600 dark:text-green-400' : 'text-amber-600 dark:text-amber-400'}> + {updated()}/{desired()} + +
+
+
+ + +
+ ); +}; diff --git a/frontend-modern/src/stores/websocket.ts b/frontend-modern/src/stores/websocket.ts index bfe9aa828..f32d54b37 100644 --- a/frontend-modern/src/stores/websocket.ts +++ b/frontend-modern/src/stores/websocket.ts @@ -10,6 +10,8 @@ import type { Container, DockerHost, Host, + KubernetesCluster, + RemovedKubernetesCluster, } from '@/types/api'; import type { ActivationState as ActivationStateType } from '@/types/alerts'; import { logger } from '@/utils/logger'; @@ -30,6 +32,8 @@ export function createWebSocketStore(url: string) { vms: [], containers: [], dockerHosts: [], + kubernetesClusters: [], + removedKubernetesClusters: [], hosts: [], replicationJobs: [], storage: [], @@ -552,6 +556,18 @@ export function createWebSocketStore(url: string) { setState('hosts', reconcile(processedHosts, { key: 'id' })); } } + if (message.data.kubernetesClusters !== undefined) { + const clusters = Array.isArray(message.data.kubernetesClusters) + ? (message.data.kubernetesClusters as KubernetesCluster[]) + : []; + setState('kubernetesClusters', reconcile(clusters, { key: 'id' })); + } + if (message.data.removedKubernetesClusters !== undefined) { + const removed = Array.isArray(message.data.removedKubernetesClusters) + ? (message.data.removedKubernetesClusters as RemovedKubernetesCluster[]) + : []; + setState('removedKubernetesClusters', reconcile(removed, { key: 'id' })); + } if (message.data.storage !== undefined) setState('storage', reconcile(message.data.storage, { key: 'id' })); if (message.data.cephClusters !== undefined) setState('cephClusters', reconcile(message.data.cephClusters, { key: 'id' })); diff --git a/frontend-modern/src/types/api.ts b/frontend-modern/src/types/api.ts index 3e281d548..eb82bdc35 100644 --- a/frontend-modern/src/types/api.ts +++ b/frontend-modern/src/types/api.ts @@ -59,6 +59,9 @@ export interface KubernetesCluster { tokenLastUsedAt?: number; hidden?: boolean; pendingUninstall?: boolean; + nodes?: KubernetesNode[]; + pods?: KubernetesPod[]; + deployments?: KubernetesDeployment[]; } export interface RemovedKubernetesCluster { @@ -68,6 +71,64 @@ export interface RemovedKubernetesCluster { removedAt: number; } +export interface KubernetesNode { + uid: string; + name: string; + ready: boolean; + unschedulable?: boolean; + kubeletVersion?: string; + containerRuntimeVersion?: string; + osImage?: string; + kernelVersion?: string; + architecture?: string; + capacityCpuCores?: number; + capacityMemoryBytes?: number; + capacityPods?: number; + allocatableCpuCores?: number; + allocatableMemoryBytes?: number; + allocatablePods?: number; + roles?: string[]; +} + +export interface KubernetesPodContainer { + name: string; + image?: string; + ready: boolean; + restartCount?: number; + state?: string; + reason?: string; + message?: string; +} + +export interface KubernetesPod { + uid: string; + name: string; + namespace: string; + nodeName?: string; + phase?: string; + reason?: string; + message?: string; + qosClass?: string; + createdAt?: number; + startTime?: number; + restarts?: number; + labels?: Record; + ownerKind?: string; + ownerName?: string; + containers?: KubernetesPodContainer[]; +} + +export interface KubernetesDeployment { + uid: string; + name: string; + namespace: string; + desiredReplicas?: number; + updatedReplicas?: number; + readyReplicas?: number; + availableReplicas?: number; + labels?: Record; +} + export interface Node { id: string; name: string; diff --git a/internal/mock/generator.go b/internal/mock/generator.go index 564c15488..19524f49f 100644 --- a/internal/mock/generator.go +++ b/internal/mock/generator.go @@ -35,31 +35,40 @@ func titleCase(s string) string { } type MockConfig struct { - NodeCount int - VMsPerNode int - LXCsPerNode int - DockerHostCount int - DockerContainersPerHost int - GenericHostCount int - RandomMetrics bool - HighLoadNodes []string // Specific nodes to simulate high load - StoppedPercent float64 // Percentage of guests that should be stopped + NodeCount int + VMsPerNode int + LXCsPerNode int + DockerHostCount int + DockerContainersPerHost int + GenericHostCount int + K8sClusterCount int + K8sNodesPerCluster int + K8sPodsPerCluster int + K8sDeploymentsPerCluster int + RandomMetrics bool + HighLoadNodes []string // Specific nodes to simulate high load + StoppedPercent float64 // Percentage of guests that should be stopped } const ( - dockerConnectionPrefix = "docker-" - hostConnectionPrefix = "host-" + dockerConnectionPrefix = "docker-" + kubernetesConnectionPrefix = "kubernetes-" + hostConnectionPrefix = "host-" ) var DefaultConfig = MockConfig{ - NodeCount: 7, // Test the 5-9 node range by default - VMsPerNode: 5, - LXCsPerNode: 8, - DockerHostCount: 3, - DockerContainersPerHost: 12, - GenericHostCount: 4, - RandomMetrics: true, - StoppedPercent: 0.2, + NodeCount: 7, // Test the 5-9 node range by default + VMsPerNode: 5, + LXCsPerNode: 8, + DockerHostCount: 3, + DockerContainersPerHost: 12, + GenericHostCount: 4, + K8sClusterCount: 2, + K8sNodesPerCluster: 4, + K8sPodsPerCluster: 30, + K8sDeploymentsPerCluster: 12, + RandomMetrics: true, + StoppedPercent: 0.2, } var appNames = []string{ @@ -163,6 +172,77 @@ var hostAgentVersions = []string{ "0.2.0-alpha", } +var k8sClusterNames = []string{ + "production", + "staging", + "development", + "edge", + "internal", + "platform", +} + +var k8sNamespaces = []string{ + "default", + "kube-system", + "monitoring", + "logging", + "ingress-nginx", + "cert-manager", + "argocd", + "apps", + "services", + "databases", + "cache", +} + +var k8sPodPrefixes = []string{ + "nginx", + "redis", + "postgres", + "mysql", + "mongodb", + "prometheus", + "grafana", + "loki", + "jaeger", + "api", + "auth", + "worker", + "cron", + "coredns", + "metrics-server", + "cert-manager", + "ingress-controller", + "fluentd", +} + +var k8sVersions = []string{ + "v1.31.2", + "v1.30.4", + "v1.29.8", +} + +var k8sImages = []string{ + "nginx:1.27", + "redis:7.4", + "postgres:16", + "mysql:8.4", + "mongo:7.0", + "prom/prometheus:v2.54", + "grafana/grafana:11.3", + "grafana/loki:3.0", + "busybox:1.36", + "alpine:3.20", +} + +var k8sNodeOS = []string{ + "Ubuntu 24.04.1 LTS", + "Ubuntu 22.04.5 LTS", + "Debian GNU/Linux 12 (bookworm)", + "Fedora CoreOS 40", + "Talos Linux v1.8", +} + // Common tags used for VMs and containers var commonTags = []string{ "production", "staging", "development", "testing", @@ -252,17 +332,19 @@ func GenerateMockData(config MockConfig) models.StateSnapshot { // rand is automatically seeded in Go 1.20+ data := models.StateSnapshot{ - Nodes: generateNodes(config), - DockerHosts: generateDockerHosts(config), - Hosts: generateHosts(config), - VMs: []models.VM{}, - Containers: []models.Container{}, - PhysicalDisks: []models.PhysicalDisk{}, - ReplicationJobs: []models.ReplicationJob{}, - LastUpdate: time.Now(), - ConnectionHealth: make(map[string]bool), - Stats: models.Stats{}, - ActiveAlerts: []models.Alert{}, + Nodes: generateNodes(config), + DockerHosts: generateDockerHosts(config), + KubernetesClusters: generateKubernetesClusters(config), + RemovedKubernetesClusters: []models.RemovedKubernetesCluster{}, + Hosts: generateHosts(config), + VMs: []models.VM{}, + Containers: []models.Container{}, + PhysicalDisks: []models.PhysicalDisk{}, + ReplicationJobs: []models.ReplicationJob{}, + LastUpdate: time.Now(), + ConnectionHealth: make(map[string]bool), + Stats: models.Stats{}, + ActiveAlerts: []models.Alert{}, } // Generate physical disks for each node @@ -274,6 +356,10 @@ func GenerateMockData(config MockConfig) models.StateSnapshot { data.ConnectionHealth[dockerConnectionPrefix+host.ID] = host.Status != "offline" } + for _, cluster := range data.KubernetesClusters { + data.ConnectionHealth[kubernetesConnectionPrefix+cluster.ID] = cluster.Status != "offline" + } + for _, host := range data.Hosts { data.ConnectionHealth[hostConnectionPrefix+host.ID] = host.Status != "offline" } @@ -1053,6 +1139,342 @@ func generateGuestOSMetadata() (string, string) { return choice.Name, choice.Version } +func generateKubernetesClusters(config MockConfig) []models.KubernetesCluster { + clusterCount := config.K8sClusterCount + if clusterCount <= 0 { + return []models.KubernetesCluster{} + } + + nodeCount := config.K8sNodesPerCluster + if nodeCount <= 0 { + nodeCount = 4 + } + + podCount := config.K8sPodsPerCluster + if podCount < 0 { + podCount = 0 + } + + deploymentCount := config.K8sDeploymentsPerCluster + if deploymentCount < 0 { + deploymentCount = 0 + } + + now := time.Now() + clusters := make([]models.KubernetesCluster, 0, clusterCount) + + for i := 0; i < clusterCount; i++ { + name := k8sClusterNames[i%len(k8sClusterNames)] + clusterID := fmt.Sprintf("k8s-%s-%d", strings.ToLower(name), i+1) + server := fmt.Sprintf("https://%s.k8s.local:6443", strings.ToLower(name)) + context := fmt.Sprintf("%s-context", strings.ToLower(name)) + + nodes := generateKubernetesNodes(clusterID, nodeCount) + pods := generateKubernetesPods(clusterID, nodes, podCount) + deployments := generateKubernetesDeployments(clusterID, deploymentCount) + + lastSeen := now.Add(-time.Duration(rand.Intn(20)) * time.Second) + status := "online" + + // Make the last cluster offline occasionally for UI coverage. + if clusterCount > 1 && i == clusterCount-1 && rand.Float64() < 0.55 { + status = "offline" + lastSeen = now.Add(-time.Duration(5+rand.Intn(20)) * time.Minute) + } else if clusterHasIssues(nodes, pods, deployments) { + status = "degraded" + } + + clusters = append(clusters, models.KubernetesCluster{ + ID: clusterID, + AgentID: fmt.Sprintf("%s-agent", clusterID), + Name: name, + DisplayName: titleCase(name), + Server: server, + Context: context, + Version: k8sVersions[rand.Intn(len(k8sVersions))], + Status: status, + LastSeen: lastSeen, + IntervalSeconds: 30, + AgentVersion: "0.1.0-mock", + Nodes: nodes, + Pods: pods, + Deployments: deployments, + Hidden: false, + PendingUninstall: false, + }) + } + + return clusters +} + +func generateKubernetesNodes(clusterID string, count int) []models.KubernetesNode { + if count <= 0 { + return []models.KubernetesNode{} + } + + nodes := make([]models.KubernetesNode, 0, count) + architectures := []string{"amd64", "arm64"} + runtimes := []string{"containerd://1.7.21", "containerd://1.7.20", "cri-o://1.30.4"} + + for i := 0; i < count; i++ { + name := fmt.Sprintf("%s-node-%d", clusterID, i+1) + ready := rand.Float64() > 0.08 + unschedulable := rand.Float64() < 0.08 + + roles := []string{"worker"} + if i == 0 { + roles = []string{"control-plane"} + } else if i == 1 && count > 3 { + roles = []string{"worker", "gpu"} + } + + cpu := int64(2 + rand.Intn(30)) + memGiB := int64(8 + rand.Intn(248)) + pods := int64(110 + rand.Intn(50)) + capacityMemory := memGiB * 1024 * 1024 * 1024 + allocCPU := cpu - int64(rand.Intn(2)) + if allocCPU < 1 { + allocCPU = 1 + } + allocMem := capacityMemory - int64(rand.Intn(3))*1024*1024*1024 + if allocMem < 1 { + allocMem = capacityMemory + } + + nodes = append(nodes, models.KubernetesNode{ + UID: fmt.Sprintf("%s-%s", name, randomHexString(8)), + Name: name, + Ready: ready, + Unschedulable: unschedulable, + KubeletVersion: k8sVersions[rand.Intn(len(k8sVersions))], + ContainerRuntimeVersion: runtimes[rand.Intn(len(runtimes))], + OSImage: k8sNodeOS[rand.Intn(len(k8sNodeOS))], + KernelVersion: dockerKernelVersions[rand.Intn(len(dockerKernelVersions))], + Architecture: architectures[rand.Intn(len(architectures))], + CapacityCPU: cpu, + CapacityMemoryBytes: capacityMemory, + CapacityPods: pods, + AllocCPU: allocCPU, + AllocMemoryBytes: allocMem, + AllocPods: pods - int64(rand.Intn(10)), + Roles: roles, + }) + } + + // Ensure at least one node issue sometimes. + if count > 2 && rand.Float64() < 0.35 { + idx := 1 + rand.Intn(count-1) + nodes[idx].Ready = false + } + + return nodes +} + +func generateKubernetesPods(clusterID string, nodes []models.KubernetesNode, count int) []models.KubernetesPod { + if count <= 0 { + return []models.KubernetesPod{} + } + + now := time.Now() + pods := make([]models.KubernetesPod, 0, count) + + for i := 0; i < count; i++ { + namespace := k8sNamespaces[rand.Intn(len(k8sNamespaces))] + prefix := k8sPodPrefixes[rand.Intn(len(k8sPodPrefixes))] + name := fmt.Sprintf("%s-%s-%d", prefix, randomHexString(5), i+1) + nodeName := "" + if len(nodes) > 0 && rand.Float64() > 0.08 { + nodeName = nodes[rand.Intn(len(nodes))].Name + } + + createdAt := now.Add(-time.Duration(30+rand.Intn(7200)) * time.Second) + startTime := createdAt.Add(time.Duration(10+rand.Intn(120)) * time.Second) + qos := []string{"Guaranteed", "Burstable", "BestEffort"}[rand.Intn(3)] + + phase := "Running" + reason := "" + message := "" + containerState := "running" + containerReason := "" + containerMessage := "" + containerReady := true + restarts := 0 + + roll := rand.Float64() + switch { + case roll < 0.08: + phase = "Pending" + reason = "Unschedulable" + message = "0/3 nodes available" + containerState = "waiting" + containerReason = "PodInitializing" + containerReady = false + case roll < 0.14: + phase = "Running" + containerState = "waiting" + containerReason = "CrashLoopBackOff" + containerMessage = "Back-off restarting failed container" + containerReady = false + restarts = 3 + rand.Intn(20) + case roll < 0.18: + phase = "Failed" + reason = "Error" + message = "Pod terminated with non-zero exit code" + containerState = "terminated" + containerReason = "Error" + containerReady = false + restarts = 1 + rand.Intn(6) + case roll < 0.22: + phase = "Unknown" + reason = "NodeLost" + message = "Node status is unknown" + containerState = "unknown" + containerReady = false + } + + containerCount := 1 + if rand.Float64() < 0.12 { + containerCount = 2 + } + + containers := make([]models.KubernetesPodContainer, 0, containerCount) + for c := 0; c < containerCount; c++ { + image := k8sImages[rand.Intn(len(k8sImages))] + containers = append(containers, models.KubernetesPodContainer{ + Name: fmt.Sprintf("%s-%d", prefix, c+1), + Image: image, + Ready: containerReady, + RestartCount: int32(restarts), + State: containerState, + Reason: containerReason, + Message: containerMessage, + }) + } + + ownerKind := []string{"Deployment", "StatefulSet", "DaemonSet", "Job"}[rand.Intn(4)] + ownerName := fmt.Sprintf("%s-%s", strings.ToLower(prefix), randomHexString(4)) + + labels := map[string]string{ + "app.kubernetes.io/name": prefix, + "app.kubernetes.io/instance": ownerName, + "app.kubernetes.io/managed-by": "mock", + } + + pod := models.KubernetesPod{ + UID: fmt.Sprintf("%s-%s", name, randomHexString(10)), + Name: name, + Namespace: namespace, + NodeName: nodeName, + Phase: phase, + Reason: reason, + Message: message, + QoSClass: qos, + CreatedAt: createdAt, + StartTime: &startTime, + Restarts: restarts, + Labels: labels, + OwnerKind: ownerKind, + OwnerName: ownerName, + Containers: containers, + } + + if phase == "Pending" { + pod.StartTime = nil + } + + pods = append(pods, pod) + } + + return pods +} + +func generateKubernetesDeployments(clusterID string, count int) []models.KubernetesDeployment { + if count <= 0 { + return []models.KubernetesDeployment{} + } + + deployments := make([]models.KubernetesDeployment, 0, count) + for i := 0; i < count; i++ { + namespace := k8sNamespaces[rand.Intn(len(k8sNamespaces))] + prefix := k8sPodPrefixes[rand.Intn(len(k8sPodPrefixes))] + name := fmt.Sprintf("%s-%s", prefix, randomHexString(4)) + + desired := int32(1 + rand.Intn(6)) + updated := desired + ready := desired + available := desired + + // Degrade some deployments for UI coverage. + if rand.Float64() < 0.20 && desired > 0 { + ready = int32(rand.Intn(int(desired))) + available = ready + updated = int32(rand.Intn(int(desired) + 1)) + } + + deployments = append(deployments, models.KubernetesDeployment{ + UID: fmt.Sprintf("%s-%s", name, randomHexString(10)), + Name: name, + Namespace: namespace, + DesiredReplicas: desired, + UpdatedReplicas: updated, + ReadyReplicas: ready, + AvailableReplicas: available, + Labels: map[string]string{ + "app.kubernetes.io/name": prefix, + "cluster": clusterID, + }, + }) + } + + return deployments +} + +func clusterHasIssues(nodes []models.KubernetesNode, pods []models.KubernetesPod, deployments []models.KubernetesDeployment) bool { + for _, node := range nodes { + if !node.Ready || node.Unschedulable { + return true + } + } + + for _, pod := range pods { + if !kubernetesPodHealthy(pod) { + return true + } + } + + for _, d := range deployments { + if !kubernetesDeploymentHealthy(d) { + return true + } + } + + return false +} + +func kubernetesPodHealthy(pod models.KubernetesPod) bool { + if strings.ToLower(strings.TrimSpace(pod.Phase)) != "running" { + return false + } + for _, c := range pod.Containers { + if !c.Ready { + return false + } + state := strings.ToLower(strings.TrimSpace(c.State)) + if state != "" && state != "running" { + return false + } + } + return true +} + +func kubernetesDeploymentHealthy(d models.KubernetesDeployment) bool { + desired := d.DesiredReplicas + if desired <= 0 { + return true + } + return d.ReadyReplicas >= desired && d.AvailableReplicas >= desired && d.UpdatedReplicas >= desired +} + func generateDockerHosts(config MockConfig) []models.DockerHost { hostCount := config.DockerHostCount if hostCount <= 0 { @@ -3107,6 +3529,7 @@ func generateSnapshots(vms []models.VM, containers []models.Container) []models. // UpdateMetrics simulates changing metrics over time func UpdateMetrics(data *models.StateSnapshot, config MockConfig) { updateDockerHosts(data, config) + updateKubernetesClusters(data, config) updateHosts(data, config) if !config.RandomMetrics { @@ -3316,6 +3739,74 @@ func UpdateMetrics(data *models.StateSnapshot, config MockConfig) { data.LastUpdate = time.Now() } +func updateKubernetesClusters(data *models.StateSnapshot, config MockConfig) { + if len(data.KubernetesClusters) == 0 { + return + } + + now := time.Now() + + for i := range data.KubernetesClusters { + cluster := &data.KubernetesClusters[i] + + if cluster.Status != "offline" { + cluster.LastSeen = now.Add(-time.Duration(rand.Intn(12)) * time.Second) + } else if config.RandomMetrics && rand.Float64() < 0.01 { + cluster.Status = "online" + cluster.LastSeen = now + } + + if config.RandomMetrics { + // Small chance to flip a node Ready state. + if len(cluster.Nodes) > 0 && rand.Float64() < 0.05 { + idx := rand.Intn(len(cluster.Nodes)) + cluster.Nodes[idx].Ready = !cluster.Nodes[idx].Ready + } + + // Small chance to flip a pod into/out of CrashLoopBackOff. + if len(cluster.Pods) > 0 && rand.Float64() < 0.07 { + idx := rand.Intn(len(cluster.Pods)) + pod := &cluster.Pods[idx] + if kubernetesPodHealthy(*pod) { + pod.Phase = "Running" + pod.Reason = "" + pod.Message = "" + pod.Restarts += 1 + rand.Intn(3) + for j := range pod.Containers { + pod.Containers[j].Ready = false + pod.Containers[j].State = "waiting" + pod.Containers[j].Reason = "CrashLoopBackOff" + pod.Containers[j].Message = "Back-off restarting failed container" + pod.Containers[j].RestartCount += int32(1 + rand.Intn(3)) + } + } else { + pod.Phase = "Running" + pod.Reason = "" + pod.Message = "" + for j := range pod.Containers { + pod.Containers[j].Ready = true + pod.Containers[j].State = "running" + pod.Containers[j].Reason = "" + pod.Containers[j].Message = "" + } + } + } + } + + if cluster.Status != "offline" { + if clusterHasIssues(cluster.Nodes, cluster.Pods, cluster.Deployments) { + cluster.Status = "degraded" + } else { + cluster.Status = "online" + } + } + + if data.ConnectionHealth != nil { + data.ConnectionHealth[kubernetesConnectionPrefix+cluster.ID] = cluster.Status != "offline" + } + } +} + func updateDockerHosts(data *models.StateSnapshot, config MockConfig) { if len(data.DockerHosts) == 0 { return diff --git a/internal/mock/integration.go b/internal/mock/integration.go index 42322897e..22bb9c3d5 100644 --- a/internal/mock/integration.go +++ b/internal/mock/integration.go @@ -91,6 +91,10 @@ func enableMockMode(fromInit bool) { Int("host_agents", config.GenericHostCount). Int("docker_hosts", config.DockerHostCount). Int("docker_containers_per_host", config.DockerContainersPerHost). + Int("k8s_clusters", config.K8sClusterCount). + Int("k8s_nodes_per_cluster", config.K8sNodesPerCluster). + Int("k8s_pods_per_cluster", config.K8sPodsPerCluster). + Int("k8s_deployments_per_cluster", config.K8sDeploymentsPerCluster). Bool("random_metrics", config.RandomMetrics). Float64("stopped_percent", config.StoppedPercent). Msg("Mock mode enabled") @@ -209,6 +213,30 @@ func LoadMockConfig() MockConfig { } } + if val := os.Getenv("PULSE_MOCK_K8S_CLUSTERS"); val != "" { + if n, err := strconv.Atoi(val); err == nil && n >= 0 { + config.K8sClusterCount = n + } + } + + if val := os.Getenv("PULSE_MOCK_K8S_NODES"); val != "" { + if n, err := strconv.Atoi(val); err == nil && n >= 0 { + config.K8sNodesPerCluster = n + } + } + + if val := os.Getenv("PULSE_MOCK_K8S_PODS"); val != "" { + if n, err := strconv.Atoi(val); err == nil && n >= 0 { + config.K8sPodsPerCluster = n + } + } + + if val := os.Getenv("PULSE_MOCK_K8S_DEPLOYMENTS"); val != "" { + if n, err := strconv.Atoi(val); err == nil && n >= 0 { + config.K8sDeploymentsPerCluster = n + } + } + if val := os.Getenv("PULSE_MOCK_RANDOM_METRICS"); val != "" { config.RandomMetrics = val == "true" } @@ -239,6 +267,10 @@ func SetMockConfig(cfg MockConfig) { Int("lxcs_per_node", cfg.LXCsPerNode). Int("docker_hosts", cfg.DockerHostCount). Int("docker_containers_per_host", cfg.DockerContainersPerHost). + Int("k8s_clusters", cfg.K8sClusterCount). + Int("k8s_nodes_per_cluster", cfg.K8sNodesPerCluster). + Int("k8s_pods_per_cluster", cfg.K8sPodsPerCluster). + Int("k8s_deployments_per_cluster", cfg.K8sDeploymentsPerCluster). Bool("random_metrics", cfg.RandomMetrics). Float64("stopped_percent", cfg.StoppedPercent). Msg("Mock configuration updated") @@ -301,27 +333,67 @@ func GetMockAlertHistory(limit int) []models.Alert { } func cloneState(state models.StateSnapshot) models.StateSnapshot { + kubernetesClusters := make([]models.KubernetesCluster, len(state.KubernetesClusters)) + for i, cluster := range state.KubernetesClusters { + clusterCopy := cluster + + clusterCopy.Nodes = append([]models.KubernetesNode(nil), cluster.Nodes...) + + clusterCopy.Pods = make([]models.KubernetesPod, len(cluster.Pods)) + for j, pod := range cluster.Pods { + podCopy := pod + + if pod.Labels != nil { + labelsCopy := make(map[string]string, len(pod.Labels)) + for k, v := range pod.Labels { + labelsCopy[k] = v + } + podCopy.Labels = labelsCopy + } + + podCopy.Containers = append([]models.KubernetesPodContainer(nil), pod.Containers...) + clusterCopy.Pods[j] = podCopy + } + + clusterCopy.Deployments = make([]models.KubernetesDeployment, len(cluster.Deployments)) + for j, dep := range cluster.Deployments { + depCopy := dep + if dep.Labels != nil { + labelsCopy := make(map[string]string, len(dep.Labels)) + for k, v := range dep.Labels { + labelsCopy[k] = v + } + depCopy.Labels = labelsCopy + } + clusterCopy.Deployments[j] = depCopy + } + + kubernetesClusters[i] = clusterCopy + } + copyState := models.StateSnapshot{ - Nodes: append([]models.Node(nil), state.Nodes...), - VMs: append([]models.VM(nil), state.VMs...), - Containers: append([]models.Container(nil), state.Containers...), - DockerHosts: append([]models.DockerHost(nil), state.DockerHosts...), - Hosts: append([]models.Host(nil), state.Hosts...), - PMGInstances: append([]models.PMGInstance(nil), state.PMGInstances...), - Storage: append([]models.Storage(nil), state.Storage...), - CephClusters: append([]models.CephCluster(nil), state.CephClusters...), - PhysicalDisks: append([]models.PhysicalDisk(nil), state.PhysicalDisks...), - PBSInstances: append([]models.PBSInstance(nil), state.PBSInstances...), - PBSBackups: append([]models.PBSBackup(nil), state.PBSBackups...), - PMGBackups: append([]models.PMGBackup(nil), state.PMGBackups...), - ReplicationJobs: append([]models.ReplicationJob(nil), state.ReplicationJobs...), - Metrics: append([]models.Metric(nil), state.Metrics...), - Performance: state.Performance, - Stats: state.Stats, - ActiveAlerts: append([]models.Alert(nil), state.ActiveAlerts...), - RecentlyResolved: append([]models.ResolvedAlert(nil), state.RecentlyResolved...), - LastUpdate: state.LastUpdate, - ConnectionHealth: make(map[string]bool, len(state.ConnectionHealth)), + Nodes: append([]models.Node(nil), state.Nodes...), + VMs: append([]models.VM(nil), state.VMs...), + Containers: append([]models.Container(nil), state.Containers...), + DockerHosts: append([]models.DockerHost(nil), state.DockerHosts...), + KubernetesClusters: kubernetesClusters, + RemovedKubernetesClusters: append([]models.RemovedKubernetesCluster(nil), state.RemovedKubernetesClusters...), + Hosts: append([]models.Host(nil), state.Hosts...), + PMGInstances: append([]models.PMGInstance(nil), state.PMGInstances...), + Storage: append([]models.Storage(nil), state.Storage...), + CephClusters: append([]models.CephCluster(nil), state.CephClusters...), + PhysicalDisks: append([]models.PhysicalDisk(nil), state.PhysicalDisks...), + PBSInstances: append([]models.PBSInstance(nil), state.PBSInstances...), + PBSBackups: append([]models.PBSBackup(nil), state.PBSBackups...), + PMGBackups: append([]models.PMGBackup(nil), state.PMGBackups...), + ReplicationJobs: append([]models.ReplicationJob(nil), state.ReplicationJobs...), + Metrics: append([]models.Metric(nil), state.Metrics...), + Performance: state.Performance, + Stats: state.Stats, + ActiveAlerts: append([]models.Alert(nil), state.ActiveAlerts...), + RecentlyResolved: append([]models.ResolvedAlert(nil), state.RecentlyResolved...), + LastUpdate: state.LastUpdate, + ConnectionHealth: make(map[string]bool, len(state.ConnectionHealth)), } copyState.PVEBackups = models.PVEBackups{ diff --git a/internal/models/models_frontend.go b/internal/models/models_frontend.go index c7d0d59f2..9ecb18c23 100644 --- a/internal/models/models_frontend.go +++ b/internal/models/models_frontend.go @@ -523,8 +523,8 @@ type StateFrontend struct { Containers []ContainerFrontend `json:"containers"` DockerHosts []DockerHostFrontend `json:"dockerHosts"` RemovedDockerHosts []RemovedDockerHostFrontend `json:"removedDockerHosts"` - KubernetesClusters []KubernetesClusterFrontend `json:"kubernetesClusters,omitempty"` - RemovedKubernetesClusters []RemovedKubernetesClusterFrontend `json:"removedKubernetesClusters,omitempty"` + KubernetesClusters []KubernetesClusterFrontend `json:"kubernetesClusters"` + RemovedKubernetesClusters []RemovedKubernetesClusterFrontend `json:"removedKubernetesClusters"` Hosts []HostFrontend `json:"hosts"` Storage []StorageFrontend `json:"storage"` CephClusters []CephClusterFrontend `json:"cephClusters"` diff --git a/mock.env b/mock.env index 8fe1baffa..8d180ef01 100644 --- a/mock.env +++ b/mock.env @@ -2,5 +2,12 @@ PULSE_MOCK_MODE=false PULSE_MOCK_NODES=7 PULSE_MOCK_VMS_PER_NODE=5 PULSE_MOCK_LXCS_PER_NODE=8 +PULSE_MOCK_DOCKER_HOSTS=3 +PULSE_MOCK_DOCKER_CONTAINERS=12 +PULSE_MOCK_GENERIC_HOSTS=4 +PULSE_MOCK_K8S_CLUSTERS=2 +PULSE_MOCK_K8S_NODES=4 +PULSE_MOCK_K8S_PODS=30 +PULSE_MOCK_K8S_DEPLOYMENTS=12 PULSE_MOCK_RANDOM_METRICS=true PULSE_MOCK_STOPPED_PERCENT=20