Surface ZFS pool membership on physical disks

This commit is contained in:
rcourtman 2026-04-23 20:38:33 +01:00
parent 795c16755b
commit 386099aeee
13 changed files with 842 additions and 21 deletions

View file

@ -1252,6 +1252,12 @@ func (a *Agent) collectSMARTData(ctx context.Context) []agentshost.DiskSMART {
result = append(result, entry)
}
if pools, err := ZFSDiskPoolMap(ctx); err != nil {
a.logger.Debug().Err(err).Msg("Failed to collect ZFS pool membership for SMART annotation")
} else if len(pools) > 0 {
annotateSMARTWithZFSPools(result, pools)
}
a.logger.Debug().
Int("diskCount", len(result)).
Msg("Collected S.M.A.R.T. disk data")

333
internal/hostagent/zfs.go Normal file
View file

@ -0,0 +1,333 @@
package hostagent
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
agentshost "github.com/rcourtman/pulse-go-rewrite/pkg/agents/host"
)
var (
zpoolLookPath = exec.LookPath
zpoolStat = os.Stat
zpoolRun = func(ctx context.Context, name string, args ...string) ([]byte, error) {
cmd := exec.CommandContext(ctx, name, args...)
return cmd.Output()
}
)
var commonZpoolPaths = []string{
"/usr/sbin/zpool",
"/sbin/zpool",
"/usr/local/sbin/zpool",
"/usr/bin/zpool",
"/bin/zpool",
}
// ZFSDiskPoolMap returns a map from a pool-member device identifier (as
// reported by `zpool status -P`) to the name of the pool it belongs to.
// Keys are inserted in multiple normalized forms so callers can match
// against /dev paths, bare device names, by-id paths, or partition names.
// Returns an empty map if zpool is not installed or no pools are present.
func ZFSDiskPoolMap(ctx context.Context) (map[string]string, error) {
zpoolPath, err := resolveZpoolPath()
if err != nil {
return map[string]string{}, nil
}
listCtx, cancelList := context.WithTimeout(ctx, 3*time.Second)
defer cancelList()
listOut, err := zpoolRun(listCtx, zpoolPath, "list", "-H", "-o", "name")
if err != nil {
return nil, fmt.Errorf("zpool list: %w", err)
}
result := make(map[string]string)
for _, line := range strings.Split(string(listOut), "\n") {
pool := strings.TrimSpace(line)
if pool == "" {
continue
}
members, err := collectZpoolMembers(ctx, zpoolPath, pool)
if err != nil {
continue
}
for _, member := range members {
for _, key := range normalizeZFSMemberKeys(member) {
if _, ok := result[key]; !ok {
result[key] = pool
}
}
}
}
return result, nil
}
func collectZpoolMembers(ctx context.Context, zpoolPath, pool string) ([]string, error) {
detailCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
out, err := zpoolRun(detailCtx, zpoolPath, "status", "-P", pool)
if err != nil {
return nil, fmt.Errorf("zpool status -P %s: %w", pool, err)
}
return parseZpoolStatusMembers(pool, string(out)), nil
}
// parseZpoolStatusMembers pulls leaf device names from `zpool status -P`
// output. The config block looks like:
//
// config:
// NAME STATE READ WRITE CKSUM
// tank ONLINE 0 0 0
// mirror-0 ONLINE 0 0 0
// /dev/sda3 ONLINE 0 0 0
// /dev/disk/by-id/ata-... ONLINE 0 0 0
// logs
// /dev/nvme0n1p1 ONLINE 0 0 0
//
// We keep every token that doesn't match a known non-leaf keyword and
// isn't the pool name itself.
func parseZpoolStatusMembers(pool, output string) []string {
var members []string
seen := map[string]struct{}{}
inConfig := false
for _, raw := range strings.Split(output, "\n") {
trimmed := strings.TrimSpace(raw)
if trimmed == "" {
continue
}
lower := strings.ToLower(trimmed)
if strings.HasPrefix(lower, "config:") {
inConfig = true
continue
}
if strings.HasPrefix(lower, "errors:") ||
strings.HasPrefix(lower, "pool:") ||
strings.HasPrefix(lower, "state:") ||
strings.HasPrefix(lower, "scan:") ||
strings.HasPrefix(lower, "status:") ||
strings.HasPrefix(lower, "action:") ||
strings.HasPrefix(lower, "see:") {
inConfig = false
continue
}
if !inConfig {
continue
}
fields := strings.Fields(trimmed)
if len(fields) == 0 {
continue
}
name := fields[0]
if name == "NAME" || name == pool {
continue
}
if isZFSVdevKeyword(name) {
continue
}
if _, ok := seen[name]; ok {
continue
}
seen[name] = struct{}{}
members = append(members, name)
}
return members
}
// annotateSMARTWithZFSPools stamps each SMART entry's Pool field when a
// matching leaf device is found in the supplied pool map. Entries with a
// non-empty Pool are left untouched so callers can pre-populate from other
// sources (e.g. Unraid topology) without being overwritten.
func annotateSMARTWithZFSPools(smartData []agentshost.DiskSMART, pools map[string]string) {
if len(pools) == 0 || len(smartData) == 0 {
return
}
for i := range smartData {
if smartData[i].Pool != "" {
continue
}
if pool := poolForSMARTEntry(pools, smartData[i]); pool != "" {
smartData[i].Pool = pool
}
}
}
func poolForSMARTEntry(pools map[string]string, entry agentshost.DiskSMART) string {
seen := map[string]struct{}{}
try := func(key string) string {
key = strings.ToLower(strings.TrimSpace(key))
if key == "" {
return ""
}
if _, ok := seen[key]; ok {
return ""
}
seen[key] = struct{}{}
if pool, ok := pools[key]; ok {
return pool
}
return ""
}
for _, key := range normalizeZFSMemberKeys(entry.Device) {
if pool := try(key); pool != "" {
return pool
}
}
if entry.Serial != "" {
if pool := try(entry.Serial); pool != "" {
return pool
}
}
if entry.WWN != "" {
wwn := strings.TrimPrefix(strings.ToLower(strings.TrimSpace(entry.WWN)), "0x")
if pool := try(wwn); pool != "" {
return pool
}
}
return ""
}
func isZFSVdevKeyword(name string) bool {
lower := strings.ToLower(name)
switch lower {
case "logs", "log", "cache", "spares", "spare", "special", "dedup":
return true
}
if strings.HasPrefix(lower, "mirror") ||
strings.HasPrefix(lower, "raidz") ||
strings.HasPrefix(lower, "draid") {
return true
}
return false
}
// normalizeZFSMemberKeys derives candidate map keys from a leaf-device name
// so that a caller with a plain /dev/sda, a by-id path, or a bare "sda"
// can all find the pool.
func normalizeZFSMemberKeys(raw string) []string {
name := strings.ToLower(strings.TrimSpace(raw))
if name == "" {
return nil
}
keys := map[string]struct{}{name: {}}
trimmed := strings.TrimPrefix(name, "/dev/")
trimmed = strings.TrimPrefix(trimmed, "disk/by-id/")
trimmed = strings.TrimPrefix(trimmed, "disk/by-path/")
trimmed = strings.TrimPrefix(trimmed, "disk/by-uuid/")
if trimmed != "" {
keys[trimmed] = struct{}{}
}
if base := stripZFSPartitionSuffix(trimmed); base != "" && base != trimmed {
keys[base] = struct{}{}
}
if strings.HasPrefix(trimmed, "ata-") ||
strings.HasPrefix(trimmed, "scsi-") ||
strings.HasPrefix(trimmed, "nvme-") ||
strings.HasPrefix(trimmed, "wwn-") {
if serial := zfsSerialFromByID(trimmed); serial != "" {
keys[serial] = struct{}{}
}
}
out := make([]string, 0, len(keys))
for k := range keys {
out = append(out, k)
}
return out
}
func stripZFSPartitionSuffix(name string) string {
if name == "" {
return ""
}
if strings.HasSuffix(name, "-part") {
return name
}
if idx := strings.LastIndex(name, "-part"); idx > 0 {
suffix := name[idx+len("-part"):]
if allZFSDigits(suffix) {
return name[:idx]
}
}
if idx := strings.LastIndex(name, "p"); idx > 0 {
suffix := name[idx+1:]
if suffix != "" && allZFSDigits(suffix) {
prev := name[:idx]
if len(prev) > 0 && isZFSDigit(prev[len(prev)-1]) {
return prev
}
}
}
i := len(name)
for i > 0 && isZFSDigit(name[i-1]) {
i--
}
if i == len(name) || i == 0 {
return name
}
prefix := name[:i]
for j := 0; j < len(prefix); j++ {
c := prefix[j]
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')) {
return name
}
}
return prefix
}
func zfsSerialFromByID(name string) string {
n := strings.TrimSpace(name)
if idx := strings.LastIndex(n, "-part"); idx > 0 {
n = n[:idx]
}
if idx := strings.LastIndex(n, "_"); idx > 0 {
return n[idx+1:]
}
if strings.HasPrefix(n, "wwn-0x") {
return strings.TrimPrefix(n, "wwn-0x")
}
if strings.HasPrefix(n, "wwn-") {
return strings.TrimPrefix(n, "wwn-")
}
return ""
}
func allZFSDigits(s string) bool {
for i := 0; i < len(s); i++ {
if !isZFSDigit(s[i]) {
return false
}
}
return len(s) > 0
}
func isZFSDigit(b byte) bool { return b >= '0' && b <= '9' }
// resolveZpoolPath mirrors the mdadm path-resolution pattern: prefer common
// absolute paths, then fall back to PATH.
func resolveZpoolPath() (string, error) {
for _, candidate := range commonZpoolPaths {
candidate = filepath.Clean(candidate)
if !filepath.IsAbs(candidate) {
continue
}
if _, err := zpoolStat(candidate); err == nil {
return candidate, nil
}
}
path, err := zpoolLookPath("zpool")
if err != nil {
return "", fmt.Errorf("zpool binary not found in PATH or common locations")
}
path = filepath.Clean(path)
if !filepath.IsAbs(path) {
return "", fmt.Errorf("zpool path is not absolute: %q", path)
}
if _, err := zpoolStat(path); err != nil {
return "", fmt.Errorf("zpool path unavailable: %w", err)
}
return path, nil
}

View file

@ -0,0 +1,102 @@
package hostagent
import (
"testing"
)
func TestParseZpoolStatusMembers(t *testing.T) {
output := ` pool: tank
state: ONLINE
scan: none requested
config:
NAME STATE READ WRITE CKSUM
tank ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
/dev/disk/by-id/ata-Samsung_SSD_870_EVO_1TB_S5Y2NX0R500001Z-part3 ONLINE 0 0 0
/dev/disk/by-id/ata-Samsung_SSD_870_EVO_1TB_S5Y2NX0R500002Z-part3 ONLINE 0 0 0
logs
/dev/nvme0n1p1 ONLINE 0 0 0
cache
/dev/sdc ONLINE 0 0 0
errors: No known data errors
`
got := parseZpoolStatusMembers("tank", output)
want := []string{
"/dev/disk/by-id/ata-Samsung_SSD_870_EVO_1TB_S5Y2NX0R500001Z-part3",
"/dev/disk/by-id/ata-Samsung_SSD_870_EVO_1TB_S5Y2NX0R500002Z-part3",
"/dev/nvme0n1p1",
"/dev/sdc",
}
if len(got) != len(want) {
t.Fatalf("member count: got %d (%v), want %d (%v)", len(got), got, len(want), want)
}
for i := range want {
if got[i] != want[i] {
t.Fatalf("member[%d]: got %q, want %q", i, got[i], want[i])
}
}
}
func TestParseZpoolStatusMembersSkipsPoolAndVdevKeywords(t *testing.T) {
output := `config:
NAME STATE READ WRITE CKSUM
rpool ONLINE 0 0 0
raidz2-0 ONLINE 0 0 0
sda ONLINE 0 0 0
sdb ONLINE 0 0 0
sdc ONLINE 0 0 0
spares
sdd AVAIL
errors: No known data errors
`
got := parseZpoolStatusMembers("rpool", output)
want := []string{"sda", "sdb", "sdc", "sdd"}
if len(got) != len(want) {
t.Fatalf("member count: got %d (%v), want %d", len(got), got, len(want))
}
for i := range want {
if got[i] != want[i] {
t.Fatalf("member[%d]: got %q, want %q", i, got[i], want[i])
}
}
}
func TestNormalizeZFSMemberKeysCoverage(t *testing.T) {
got := normalizeZFSMemberKeys("/dev/disk/by-id/ata-Samsung_SSD_870_EVO_1TB_S5Y2NX0R500001Z-part3")
expected := []string{
"/dev/disk/by-id/ata-samsung_ssd_870_evo_1tb_s5y2nx0r500001z-part3",
"ata-samsung_ssd_870_evo_1tb_s5y2nx0r500001z-part3",
"ata-samsung_ssd_870_evo_1tb_s5y2nx0r500001z",
"s5y2nx0r500001z",
}
gotSet := map[string]struct{}{}
for _, k := range got {
gotSet[k] = struct{}{}
}
for _, want := range expected {
if _, ok := gotSet[want]; !ok {
t.Fatalf("missing key %q in %v", want, got)
}
}
}
func TestStripZFSPartitionSuffix(t *testing.T) {
cases := map[string]string{
"sda": "sda",
"sda3": "sda",
"nvme0n1": "nvme0n1",
"nvme0n1p1": "nvme0n1",
"nvme10n1p3": "nvme10n1",
"ata-Foo_SERIAL-part3": "ata-Foo_SERIAL",
"": "",
}
for in, want := range cases {
if got := stripZFSPartitionSuffix(in); got != want {
t.Fatalf("stripZFSPartitionSuffix(%q) = %q, want %q", in, got, want)
}
}
}

View file

@ -368,6 +368,7 @@ type HostDiskSMART struct {
Temperature int `json:"temperature"` // Temperature in Celsius
Health string `json:"health,omitempty"` // PASSED, FAILED, UNKNOWN
Standby bool `json:"standby,omitempty"` // True if disk was in standby
Pool string `json:"pool,omitempty"` // ZFS pool this disk belongs to (empty if not a ZFS member)
Attributes *SMARTAttributes `json:"attributes,omitempty"`
}
@ -1210,14 +1211,15 @@ type PhysicalDisk struct {
DevPath string `json:"devPath"` // /dev/nvme0n1, /dev/sda
Model string `json:"model"`
Serial string `json:"serial"`
WWN string `json:"wwn"` // World Wide Name
Type string `json:"type"` // nvme, sata, sas
Size int64 `json:"size"` // bytes
Health string `json:"health"` // PASSED, FAILED, UNKNOWN
Wearout int `json:"wearout"` // SSD wear metric from Proxmox (0-100, -1 when unavailable)
Temperature int `json:"temperature"` // Celsius (if available)
RPM int `json:"rpm"` // 0 for SSDs
Used string `json:"used"` // Filesystem or partition usage
WWN string `json:"wwn"` // World Wide Name
Type string `json:"type"` // nvme, sata, sas
Size int64 `json:"size"` // bytes
Health string `json:"health"` // PASSED, FAILED, UNKNOWN
Wearout int `json:"wearout"` // SSD wear metric from Proxmox (0-100, -1 when unavailable)
Temperature int `json:"temperature"` // Celsius (if available)
RPM int `json:"rpm"` // 0 for SSDs
Used string `json:"used"` // Filesystem or partition usage
StorageGroup string `json:"storageGroup"` // Pool/VG/array this disk belongs to (e.g. ZFS pool name); empty if not matched
SmartAttributes *SMARTAttributes `json:"smartAttributes,omitempty"`
LastChecked time.Time `json:"lastChecked"`
}

View file

@ -124,6 +124,7 @@ func convertUnifiedHostSMART(smart []unifiedresources.HostSMARTMeta) []models.Ho
Temperature: disk.Temperature,
Health: disk.Health,
Standby: disk.Standby,
Pool: disk.Pool,
Attributes: cloneSMARTAttributesModel(disk.Attributes),
}
}

View file

@ -3310,6 +3310,7 @@ func hostSensorsFromReadStateView(sensors *unifiedresources.HostSensorMeta) mode
Temperature: smart.Temperature,
Health: smart.Health,
Standby: smart.Standby,
Pool: smart.Pool,
Attributes: smartAttributesCopy(smart.Attributes),
})
}

View file

@ -365,6 +365,7 @@ func convertAgentSMARTToModels(smart []agentshost.DiskSMART) []models.HostDiskSM
Temperature: disk.Temperature,
Health: disk.Health,
Standby: disk.Standby,
Pool: disk.Pool,
}
if disk.Attributes != nil {
entry.Attributes = convertAgentSMARTAttributes(disk.Attributes)

View file

@ -798,6 +798,7 @@ func (m *Monitor) maybePollPhysicalDisksAsync(
var allDisks []models.PhysicalDisk
polledNodes := make(map[string]bool) // Track which nodes we successfully polled
zfsPoolingEnabled := zfsMonitoringEnabledFromEnv()
for _, node := range nodeList {
// Check if context timed out
@ -843,6 +844,21 @@ func (m *Monitor) maybePollPhysicalDisksAsync(
// Mark this node as successfully polled
polledNodes[node.Node] = true
// Build a disk→pool assignment for this node so each physical disk
// knows which ZFS pool (if any) it belongs to. Errors are
// non-fatal; we simply leave StorageGroup empty.
var poolAssignment *diskPoolAssignment
if zfsPoolingEnabled {
if pools, pErr := pveClient.GetZFSPoolsWithDetails(diskCtx, node.Node); pErr == nil {
poolAssignment = buildDiskPoolAssignment(pools)
} else {
log.Debug().
Err(pErr).
Str("node", node.Node).
Msg("Could not fetch ZFS pool details for disk→pool mapping; StorageGroup will be empty")
}
}
// Record each disk; alert evaluation happens after host-agent SMART merges
// so the canonical disk view includes post-merge health/wearout data.
for _, disk := range disks {
@ -863,6 +879,9 @@ func (m *Monitor) maybePollPhysicalDisksAsync(
Used: disk.Used,
LastChecked: time.Now(),
}
if poolAssignment != nil {
physicalDisk.StorageGroup = poolAssignment.lookup(physicalDisk)
}
allDisks = append(allDisks, physicalDisk)
}

View file

@ -0,0 +1,222 @@
package monitoring
import (
"strings"
"github.com/rcourtman/pulse-go-rewrite/internal/models"
"github.com/rcourtman/pulse-go-rewrite/pkg/proxmox"
)
// diskPoolAssignment maps a disk's canonical key (lowercase, prefix-stripped) to
// its owning pool. Keys are derived from leaf-device names in ZFS pool trees and
// are consulted with several candidate forms of a physical disk's identity.
type diskPoolAssignment struct {
keyToPool map[string]string // normalised leaf name / by-id path → pool name
serialPool map[string]string // lowercase serial fragment → pool name (for by-id matches)
}
// buildDiskPoolAssignment flattens the ZFS pool trees returned by Proxmox and
// indexes every leaf device under several normalised keys so a physical disk
// can be matched regardless of whether zpool references it by /dev path,
// /dev/disk/by-id path, or partition name.
func buildDiskPoolAssignment(pools []proxmox.ZFSPoolInfo) *diskPoolAssignment {
assignment := &diskPoolAssignment{
keyToPool: make(map[string]string),
serialPool: make(map[string]string),
}
for _, pool := range pools {
if pool.Name == "" {
continue
}
for _, dev := range pool.Devices {
assignment.indexLeaves(dev, pool.Name)
}
}
return assignment
}
func (a *diskPoolAssignment) indexLeaves(dev proxmox.ZFSPoolDevice, poolName string) {
if dev.Leaf == 1 {
name := strings.TrimSpace(dev.Name)
if name != "" {
for _, key := range normaliseLeafKeys(name) {
if _, exists := a.keyToPool[key]; !exists {
a.keyToPool[key] = poolName
}
}
if serial := serialFromByID(name); serial != "" {
if _, exists := a.serialPool[serial]; !exists {
a.serialPool[serial] = poolName
}
}
}
}
for _, child := range dev.Children {
a.indexLeaves(child, poolName)
}
}
// lookup returns the pool name a disk belongs to, or "" if no match.
func (a *diskPoolAssignment) lookup(disk models.PhysicalDisk) string {
if a == nil {
return ""
}
for _, key := range diskLookupKeys(disk) {
if pool, ok := a.keyToPool[key]; ok {
return pool
}
}
serial := strings.ToLower(strings.TrimSpace(disk.Serial))
if serial != "" {
if pool, ok := a.serialPool[serial]; ok {
return pool
}
}
wwn := strings.ToLower(strings.TrimSpace(disk.WWN))
wwn = strings.TrimPrefix(wwn, "0x")
if wwn != "" {
if pool, ok := a.serialPool[wwn]; ok {
return pool
}
}
return ""
}
// normaliseLeafKeys derives candidate lookup keys from a zpool leaf-device name.
// Keys are lowercased and stripped of common prefixes so they can match against
// physical-disk DevPath variants.
func normaliseLeafKeys(raw string) []string {
name := strings.ToLower(strings.TrimSpace(raw))
if name == "" {
return nil
}
keys := map[string]struct{}{name: {}}
trimmed := strings.TrimPrefix(name, "/dev/")
trimmed = strings.TrimPrefix(trimmed, "disk/by-id/")
trimmed = strings.TrimPrefix(trimmed, "disk/by-path/")
trimmed = strings.TrimPrefix(trimmed, "disk/by-uuid/")
if trimmed != "" {
keys[trimmed] = struct{}{}
}
if base := stripPartitionSuffix(trimmed); base != "" {
keys[base] = struct{}{}
}
out := make([]string, 0, len(keys))
for k := range keys {
out = append(out, k)
}
return out
}
// diskLookupKeys derives candidate lookup keys from a physical disk's
// identifying fields. Order matters: the most specific matches come first.
func diskLookupKeys(disk models.PhysicalDisk) []string {
devPath := strings.ToLower(strings.TrimSpace(disk.DevPath))
if devPath == "" {
return nil
}
seen := map[string]struct{}{}
add := func(k string) {
if k == "" {
return
}
seen[k] = struct{}{}
}
add(devPath)
add(strings.TrimPrefix(devPath, "/dev/"))
base := strings.TrimPrefix(devPath, "/dev/")
add(stripPartitionSuffix(base))
out := make([]string, 0, len(seen))
for k := range seen {
out = append(out, k)
}
return out
}
// stripPartitionSuffix removes a trailing partition identifier. Handles both
// sdX style (trailing digits after an all-alphabetic prefix) and the
// p-separator style used by devices whose name already contains digits
// (nvme0n1p3, mmcblk0p1).
func stripPartitionSuffix(name string) string {
if name == "" {
return ""
}
// p-separator form: the base name already contains a digit, and the
// partition is appended after a literal "p". nvme0n1p3 → nvme0n1.
if idx := strings.LastIndex(name, "p"); idx > 0 {
suffix := name[idx+1:]
if suffix != "" && allDigits(suffix) {
prev := name[:idx]
if len(prev) > 0 && isDigit(prev[len(prev)-1]) {
return prev
}
}
}
// sdX / hdX form: strip trailing digits only when the remaining prefix
// is all alphabetic. This avoids over-stripping device names that
// carry digits as part of their identifier (nvme0n1, mmcblk0).
i := len(name)
for i > 0 && isDigit(name[i-1]) {
i--
}
if i == len(name) || i == 0 {
return name
}
prefix := name[:i]
for j := 0; j < len(prefix); j++ {
if !isAlpha(prefix[j]) {
return name
}
}
return prefix
}
func isAlpha(b byte) bool {
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z')
}
// serialFromByID extracts a lowercase serial-like fragment from a by-id path.
// Proxmox commonly references ZFS members via /dev/disk/by-id/ata-MODEL_SERIAL
// or nvme-MODEL_SERIAL; the trailing token is the disk serial.
func serialFromByID(raw string) string {
name := strings.ToLower(strings.TrimSpace(raw))
name = strings.TrimPrefix(name, "/dev/")
name = strings.TrimPrefix(name, "disk/by-id/")
if !(strings.HasPrefix(name, "ata-") ||
strings.HasPrefix(name, "scsi-") ||
strings.HasPrefix(name, "nvme-") ||
strings.HasPrefix(name, "wwn-")) {
return ""
}
// Strip optional partition suffix like "-part1"
if idx := strings.LastIndex(name, "-part"); idx > 0 {
name = name[:idx]
}
// The last underscore-separated token is typically the serial.
if idx := strings.LastIndex(name, "_"); idx > 0 {
return name[idx+1:]
}
if strings.HasPrefix(name, "wwn-0x") {
return strings.TrimPrefix(name, "wwn-0x")
}
if strings.HasPrefix(name, "wwn-") {
return strings.TrimPrefix(name, "wwn-")
}
return ""
}
func allDigits(s string) bool {
for i := 0; i < len(s); i++ {
if !isDigit(s[i]) {
return false
}
}
return len(s) > 0
}
func isDigit(b byte) bool { return b >= '0' && b <= '9' }

View file

@ -0,0 +1,125 @@
package monitoring
import (
"testing"
"github.com/rcourtman/pulse-go-rewrite/internal/models"
"github.com/rcourtman/pulse-go-rewrite/pkg/proxmox"
)
func TestDiskPoolAssignmentLookup(t *testing.T) {
pools := []proxmox.ZFSPoolInfo{
{
Name: "rpool",
Devices: []proxmox.ZFSPoolDevice{
{
Name: "mirror-0",
Children: []proxmox.ZFSPoolDevice{
{Name: "/dev/disk/by-id/ata-Samsung_SSD_870_EVO_1TB_S5Y2NX0R500001Z-part3", Leaf: 1},
{Name: "/dev/disk/by-id/ata-Samsung_SSD_870_EVO_1TB_S5Y2NX0R500002Z-part3", Leaf: 1},
},
},
},
},
{
Name: "tank",
Devices: []proxmox.ZFSPoolDevice{
{Name: "sdc", Leaf: 1},
{Name: "/dev/nvme1n1p1", Leaf: 1},
},
},
{
Name: "scratch",
Devices: []proxmox.ZFSPoolDevice{
{Name: "wwn-0x50014ee2123456ab", Leaf: 1},
},
},
}
assignment := buildDiskPoolAssignment(pools)
cases := []struct {
label string
disk models.PhysicalDisk
want string
}{
{
label: "by-id match via serial token",
disk: models.PhysicalDisk{DevPath: "/dev/sda", Serial: "S5Y2NX0R500001Z"},
want: "rpool",
},
{
label: "short leaf name matches partition-stripped devpath",
disk: models.PhysicalDisk{DevPath: "/dev/sdc"},
want: "tank",
},
{
label: "nvme partition leaf matches devpath with partition stripping",
disk: models.PhysicalDisk{DevPath: "/dev/nvme1n1"},
want: "tank",
},
{
label: "wwn leaf matches via disk WWN",
disk: models.PhysicalDisk{DevPath: "/dev/sdd", WWN: "0x50014ee2123456ab"},
want: "scratch",
},
{
label: "disk with no pool membership returns empty",
disk: models.PhysicalDisk{DevPath: "/dev/sde", Serial: "UNUSED1"},
want: "",
},
{
label: "blank devpath returns empty",
disk: models.PhysicalDisk{DevPath: ""},
want: "",
},
}
for _, tc := range cases {
t.Run(tc.label, func(t *testing.T) {
got := assignment.lookup(tc.disk)
if got != tc.want {
t.Fatalf("lookup = %q, want %q", got, tc.want)
}
})
}
}
func TestNilAssignmentLookupReturnsEmpty(t *testing.T) {
var a *diskPoolAssignment
got := a.lookup(models.PhysicalDisk{DevPath: "/dev/sda"})
if got != "" {
t.Fatalf("nil lookup = %q, want empty", got)
}
}
func TestStripPartitionSuffix(t *testing.T) {
cases := map[string]string{
"sda": "sda",
"sda3": "sda",
"nvme0n1": "nvme0n1",
"nvme0n1p1": "nvme0n1",
"nvme10n1p3": "nvme10n1",
"": "",
}
for in, want := range cases {
if got := stripPartitionSuffix(in); got != want {
t.Fatalf("stripPartitionSuffix(%q) = %q, want %q", in, got, want)
}
}
}
func TestSerialFromByID(t *testing.T) {
cases := map[string]string{
"/dev/disk/by-id/ata-Samsung_SSD_870_EVO_1TB_S5Y2NX0R500001Z": "s5y2nx0r500001z",
"/dev/disk/by-id/ata-Samsung_SSD_870_EVO_1TB_S5Y2NX0R500001Z-part3": "s5y2nx0r500001z",
"nvme-INTEL_SSDPEKNW512G8_BTNH123456789": "btnh123456789",
"wwn-0x50014ee2123456ab": "50014ee2123456ab",
"/dev/sda": "",
}
for in, want := range cases {
if got := serialFromByID(in); got != want {
t.Fatalf("serialFromByID(%q) = %q, want %q", in, got, want)
}
}
}

View file

@ -188,6 +188,7 @@ func resourceFromHost(host models.Host) (Resource, ResourceIdentity) {
Temperature: s.Temperature,
Health: s.Health,
Standby: s.Standby,
Pool: s.Pool,
Attributes: cloneSMARTAttributes(s.Attributes),
}
}
@ -506,6 +507,11 @@ func resourceFromHostSMARTDisk(host models.Host, disk models.HostDiskSMART) (Res
unraidDisk := matchUnraidDisk(host.Unraid, disk)
assessment := storagehealth.AssessHostSMARTDisk(disk)
storageGroup := unraidDiskGroup(unraidDisk)
if storageGroup == "" {
storageGroup = strings.TrimSpace(disk.Pool)
}
resource := Resource{
Type: ResourceTypePhysicalDisk,
Name: name,
@ -524,7 +530,7 @@ func resourceFromHostSMARTDisk(host models.Host, disk models.HostDiskSMART) (Res
Temperature: disk.Temperature,
Used: used,
StorageRole: unraidDiskRole(unraidDisk),
StorageGroup: unraidDiskGroup(unraidDisk),
StorageGroup: storageGroup,
StorageState: unraidDiskState(unraidDisk),
SMART: convertSMARTAttributes(disk.Attributes),
Risk: physicalDiskRiskFromAssessment(assessment),
@ -1295,18 +1301,19 @@ func resourceFromPhysicalDisk(disk models.PhysicalDisk) (Resource, ResourceIdent
assessment := storagehealth.AssessPhysicalDisk(disk)
pdMeta := &PhysicalDiskMeta{
DevPath: disk.DevPath,
Model: disk.Model,
Serial: disk.Serial,
WWN: disk.WWN,
DiskType: disk.Type,
SizeBytes: disk.Size,
Health: disk.Health,
Wearout: disk.Wearout,
Temperature: disk.Temperature,
RPM: disk.RPM,
Used: disk.Used,
Risk: physicalDiskRiskFromAssessment(assessment),
DevPath: disk.DevPath,
Model: disk.Model,
Serial: disk.Serial,
WWN: disk.WWN,
DiskType: disk.Type,
SizeBytes: disk.Size,
Health: disk.Health,
Wearout: disk.Wearout,
Temperature: disk.Temperature,
RPM: disk.RPM,
Used: disk.Used,
StorageGroup: disk.StorageGroup,
Risk: physicalDiskRiskFromAssessment(assessment),
}
if disk.SmartAttributes != nil {

View file

@ -452,6 +452,7 @@ type HostSMARTMeta struct {
Temperature int `json:"temperature"`
Health string `json:"health"`
Standby bool `json:"standby,omitempty"`
Pool string `json:"pool,omitempty"`
Attributes *models.SMARTAttributes `json:"attributes,omitempty"`
}

View file

@ -129,6 +129,7 @@ type DiskSMART struct {
Temperature int `json:"temperature"` // Temperature in Celsius
Health string `json:"health,omitempty"` // PASSED, FAILED, UNKNOWN
Standby bool `json:"standby,omitempty"` // True if disk was in standby
Pool string `json:"pool,omitempty"` // ZFS pool this disk belongs to (empty if not a ZFS member)
Attributes *SMARTAttributes `json:"attributes,omitempty"`
}