📝 CodeRabbit Chat: Add unit tests
Some checks are pending
Docker Publish / build-and-push (push) Waiting to run

This commit is contained in:
coderabbitai[bot] 2026-04-07 08:46:57 +00:00 committed by GitHub
parent 92d321c5e2
commit 61fc011c13
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 1134 additions and 0 deletions

View file

@ -39,6 +39,12 @@ const sampleConfig = {
onBulkComplete: true,
minSeverity: "High" as const,
},
autoScan: { enabled: false, pollInterval: 15 },
forceRescan: false,
scanTimeoutMinutes: 20,
bulkTimeoutMinutes: 120,
scannerMemoryMB: 2048,
scannerPidsLimit: 512,
};
describe("getScannerConfig", () => {
@ -87,6 +93,72 @@ describe("updateScannerConfig", () => {
});
});
describe("updateScannerConfig resource limit fields", () => {
afterEach(() => vi.clearAllMocks());
it("includes scanTimeoutMinutes in the PUT body", async () => {
const cfg = { ...sampleConfig, scanTimeoutMinutes: 45 };
mockFetch.mockResolvedValueOnce(okResponse({ config: cfg }));
await updateScannerConfig(cfg);
const [, opts] = mockFetch.mock.calls[0];
const body = JSON.parse(opts?.body as string);
expect(body.scanTimeoutMinutes).toBe(45);
});
it("includes bulkTimeoutMinutes in the PUT body", async () => {
const cfg = { ...sampleConfig, bulkTimeoutMinutes: 240 };
mockFetch.mockResolvedValueOnce(okResponse({ config: cfg }));
await updateScannerConfig(cfg);
const [, opts] = mockFetch.mock.calls[0];
const body = JSON.parse(opts?.body as string);
expect(body.bulkTimeoutMinutes).toBe(240);
});
it("includes scannerMemoryMB in the PUT body", async () => {
const cfg = { ...sampleConfig, scannerMemoryMB: 4096 };
mockFetch.mockResolvedValueOnce(okResponse({ config: cfg }));
await updateScannerConfig(cfg);
const [, opts] = mockFetch.mock.calls[0];
const body = JSON.parse(opts?.body as string);
expect(body.scannerMemoryMB).toBe(4096);
});
it("includes scannerPidsLimit in the PUT body", async () => {
const cfg = { ...sampleConfig, scannerPidsLimit: 1024 };
mockFetch.mockResolvedValueOnce(okResponse({ config: cfg }));
await updateScannerConfig(cfg);
const [, opts] = mockFetch.mock.calls[0];
const body = JSON.parse(opts?.body as string);
expect(body.scannerPidsLimit).toBe(1024);
});
it("returns the resource limit values echoed back by the server", async () => {
const cfg = {
...sampleConfig,
scanTimeoutMinutes: 30,
bulkTimeoutMinutes: 90,
scannerMemoryMB: 1024,
scannerPidsLimit: 256,
};
mockFetch.mockResolvedValueOnce(okResponse({ config: cfg }));
const result = await updateScannerConfig(cfg);
expect(result.scanTimeoutMinutes).toBe(30);
expect(result.bulkTimeoutMinutes).toBe(90);
expect(result.scannerMemoryMB).toBe(1024);
expect(result.scannerPidsLimit).toBe(256);
});
});
describe("testScanNotification", () => {
afterEach(() => vi.clearAllMocks());

View file

@ -0,0 +1,149 @@
import { describe, expect, it } from "vitest";
import type { ScannerConfig } from "./types";
// ---------------------------------------------------------------------------
// ScannerConfig resource limit fields added in this PR
// ---------------------------------------------------------------------------
// These tests verify the runtime shape of objects that conform to the
// ScannerConfig interface, ensuring the four new fields are present and
// correctly typed. Because TypeScript interfaces are compile-time only, we
// create plain objects that satisfy the interface and assert their values.
function makeScannerConfig(overrides: Partial<ScannerConfig> = {}): ScannerConfig {
return {
grypeImage: "anchore/grype:v0.110.0",
trivyImage: "aquasec/trivy:0.69.3",
syftImage: "anchore/syft:v1.42.3",
defaultScanner: "grype",
grypeArgs: "",
trivyArgs: "",
notifications: {
onScanComplete: true,
onBulkComplete: true,
minSeverity: "High",
},
autoScan: {
enabled: false,
pollInterval: 15,
},
forceRescan: false,
scanTimeoutMinutes: 20,
bulkTimeoutMinutes: 120,
scannerMemoryMB: 2048,
scannerPidsLimit: 512,
...overrides,
};
}
describe("ScannerConfig resource limit fields", () => {
it("has numeric scanTimeoutMinutes defaulting to 20", () => {
const cfg = makeScannerConfig();
expect(typeof cfg.scanTimeoutMinutes).toBe("number");
expect(cfg.scanTimeoutMinutes).toBe(20);
});
it("has numeric bulkTimeoutMinutes defaulting to 120", () => {
const cfg = makeScannerConfig();
expect(typeof cfg.bulkTimeoutMinutes).toBe("number");
expect(cfg.bulkTimeoutMinutes).toBe(120);
});
it("has numeric scannerMemoryMB defaulting to 2048", () => {
const cfg = makeScannerConfig();
expect(typeof cfg.scannerMemoryMB).toBe("number");
expect(cfg.scannerMemoryMB).toBe(2048);
});
it("has numeric scannerPidsLimit defaulting to 512", () => {
const cfg = makeScannerConfig();
expect(typeof cfg.scannerPidsLimit).toBe("number");
expect(cfg.scannerPidsLimit).toBe(512);
});
it("accepts custom resource limit values", () => {
const cfg = makeScannerConfig({
scanTimeoutMinutes: 60,
bulkTimeoutMinutes: 240,
scannerMemoryMB: 4096,
scannerPidsLimit: 1024,
});
expect(cfg.scanTimeoutMinutes).toBe(60);
expect(cfg.bulkTimeoutMinutes).toBe(240);
expect(cfg.scannerMemoryMB).toBe(4096);
expect(cfg.scannerPidsLimit).toBe(1024);
});
it("resource limit fields are independent of other config fields", () => {
const cfg = makeScannerConfig({
defaultScanner: "trivy",
scanTimeoutMinutes: 30,
});
expect(cfg.defaultScanner).toBe("trivy");
expect(cfg.scanTimeoutMinutes).toBe(30);
// Other resource limit fields remain at defaults
expect(cfg.bulkTimeoutMinutes).toBe(120);
expect(cfg.scannerMemoryMB).toBe(2048);
expect(cfg.scannerPidsLimit).toBe(512);
});
it("minimum boundary value of 1 is valid for scanTimeoutMinutes", () => {
const cfg = makeScannerConfig({ scanTimeoutMinutes: 1 });
expect(cfg.scanTimeoutMinutes).toBe(1);
});
it("large values are representable", () => {
const cfg = makeScannerConfig({
scanTimeoutMinutes: 9999,
bulkTimeoutMinutes: 9999,
scannerMemoryMB: 65536,
scannerPidsLimit: 32768,
});
expect(cfg.scanTimeoutMinutes).toBe(9999);
expect(cfg.scannerMemoryMB).toBe(65536);
});
});
describe("ScannerConfig updateScannerConfig round-trip resource limits", () => {
// Verify that spreading/cloning a ScannerConfig preserves the new fields,
// matching how scanner-section.tsx updates draft state via setDraft({...draft, field: value}).
it("spread preserves all resource limit fields", () => {
const original = makeScannerConfig({
scanTimeoutMinutes: 45,
bulkTimeoutMinutes: 180,
scannerMemoryMB: 8192,
scannerPidsLimit: 768,
});
const updated: ScannerConfig = { ...original, grypeImage: "anchore/grype:v2" };
expect(updated.scanTimeoutMinutes).toBe(45);
expect(updated.bulkTimeoutMinutes).toBe(180);
expect(updated.scannerMemoryMB).toBe(8192);
expect(updated.scannerPidsLimit).toBe(768);
expect(updated.grypeImage).toBe("anchore/grype:v2");
});
it("individual field override does not affect other resource limit fields", () => {
const original = makeScannerConfig();
const updated: ScannerConfig = { ...original, scanTimeoutMinutes: 99 };
expect(updated.scanTimeoutMinutes).toBe(99);
expect(updated.bulkTimeoutMinutes).toBe(original.bulkTimeoutMinutes);
expect(updated.scannerMemoryMB).toBe(original.scannerMemoryMB);
expect(updated.scannerPidsLimit).toBe(original.scannerPidsLimit);
});
it("serialises and deserialises resource limit fields via JSON", () => {
const original = makeScannerConfig({
scanTimeoutMinutes: 30,
bulkTimeoutMinutes: 90,
scannerMemoryMB: 1024,
scannerPidsLimit: 256,
});
const json = JSON.stringify(original);
const restored = JSON.parse(json) as ScannerConfig;
expect(restored.scanTimeoutMinutes).toBe(30);
expect(restored.bulkTimeoutMinutes).toBe(90);
expect(restored.scannerMemoryMB).toBe(1024);
expect(restored.scannerPidsLimit).toBe(256);
});
});

View file

@ -426,4 +426,279 @@ func TestParseScannerConfigEnvOverrides(t *testing.T) {
if cfg.NotifyMinSeverity != "Critical" {
t.Fatalf("expected NotifyMinSeverity 'Critical', got %q", cfg.NotifyMinSeverity)
}
}
// ─── parseScannerConfig resource limits ──────────────────────────────────────
// TestParseScannerConfigResourceLimitDefaults verifies default values for the
// four new resource-limit fields added in this PR.
func TestParseScannerConfigResourceLimitDefaults(t *testing.T) {
t.Setenv("SCANNER_TIMEOUT_MINUTES", "")
t.Setenv("SCANNER_BULK_TIMEOUT_MINUTES", "")
t.Setenv("SCANNER_MEMORY_MB", "")
t.Setenv("SCANNER_PIDS_LIMIT", "")
cfg := parseScannerConfig()
if cfg.ScanTimeoutMinutes != 20 {
t.Fatalf("expected default ScanTimeoutMinutes=20, got %d", cfg.ScanTimeoutMinutes)
}
if cfg.BulkTimeoutMinutes != 120 {
t.Fatalf("expected default BulkTimeoutMinutes=120, got %d", cfg.BulkTimeoutMinutes)
}
if cfg.ScannerMemoryMB != 2048 {
t.Fatalf("expected default ScannerMemoryMB=2048, got %d", cfg.ScannerMemoryMB)
}
if cfg.ScannerPidsLimit != 512 {
t.Fatalf("expected default ScannerPidsLimit=512, got %d", cfg.ScannerPidsLimit)
}
}
// TestParseScannerConfigResourceLimitEnvOverrides verifies that positive integer
// env vars override the defaults.
func TestParseScannerConfigResourceLimitEnvOverrides(t *testing.T) {
t.Setenv("SCANNER_TIMEOUT_MINUTES", "30")
t.Setenv("SCANNER_BULK_TIMEOUT_MINUTES", "240")
t.Setenv("SCANNER_MEMORY_MB", "4096")
t.Setenv("SCANNER_PIDS_LIMIT", "1024")
cfg := parseScannerConfig()
if cfg.ScanTimeoutMinutes != 30 {
t.Fatalf("expected ScanTimeoutMinutes=30, got %d", cfg.ScanTimeoutMinutes)
}
if cfg.BulkTimeoutMinutes != 240 {
t.Fatalf("expected BulkTimeoutMinutes=240, got %d", cfg.BulkTimeoutMinutes)
}
if cfg.ScannerMemoryMB != 4096 {
t.Fatalf("expected ScannerMemoryMB=4096, got %d", cfg.ScannerMemoryMB)
}
if cfg.ScannerPidsLimit != 1024 {
t.Fatalf("expected ScannerPidsLimit=1024, got %d", cfg.ScannerPidsLimit)
}
}
// TestParseScannerConfigResourceLimitIgnoresZeroAndNegative verifies that a
// value of "0" or a negative integer does not override the default (the guard
// requires n > 0).
func TestParseScannerConfigResourceLimitIgnoresZeroAndNegative(t *testing.T) {
t.Setenv("SCANNER_TIMEOUT_MINUTES", "0")
t.Setenv("SCANNER_BULK_TIMEOUT_MINUTES", "-5")
t.Setenv("SCANNER_MEMORY_MB", "0")
t.Setenv("SCANNER_PIDS_LIMIT", "-1")
cfg := parseScannerConfig()
if cfg.ScanTimeoutMinutes != 20 {
t.Fatalf("zero SCANNER_TIMEOUT_MINUTES must not override default, got %d", cfg.ScanTimeoutMinutes)
}
if cfg.BulkTimeoutMinutes != 120 {
t.Fatalf("negative SCANNER_BULK_TIMEOUT_MINUTES must not override default, got %d", cfg.BulkTimeoutMinutes)
}
if cfg.ScannerMemoryMB != 2048 {
t.Fatalf("zero SCANNER_MEMORY_MB must not override default, got %d", cfg.ScannerMemoryMB)
}
if cfg.ScannerPidsLimit != 512 {
t.Fatalf("negative SCANNER_PIDS_LIMIT must not override default, got %d", cfg.ScannerPidsLimit)
}
}
// TestParseScannerConfigResourceLimitIgnoresNonNumeric verifies that a
// non-numeric env var value does not override the default.
func TestParseScannerConfigResourceLimitIgnoresNonNumeric(t *testing.T) {
t.Setenv("SCANNER_TIMEOUT_MINUTES", "abc")
t.Setenv("SCANNER_BULK_TIMEOUT_MINUTES", "two-hours")
t.Setenv("SCANNER_MEMORY_MB", "4gib")
t.Setenv("SCANNER_PIDS_LIMIT", "many")
cfg := parseScannerConfig()
if cfg.ScanTimeoutMinutes != 20 {
t.Fatalf("non-numeric SCANNER_TIMEOUT_MINUTES must keep default, got %d", cfg.ScanTimeoutMinutes)
}
if cfg.BulkTimeoutMinutes != 120 {
t.Fatalf("non-numeric SCANNER_BULK_TIMEOUT_MINUTES must keep default, got %d", cfg.BulkTimeoutMinutes)
}
if cfg.ScannerMemoryMB != 2048 {
t.Fatalf("non-numeric SCANNER_MEMORY_MB must keep default, got %d", cfg.ScannerMemoryMB)
}
if cfg.ScannerPidsLimit != 512 {
t.Fatalf("non-numeric SCANNER_PIDS_LIMIT must keep default, got %d", cfg.ScannerPidsLimit)
}
}
// ─── Manager merge resource limits ─────────────────────────────────────────
// TestScannerMergeResourceLimitsFileOverridesEnv verifies that positive
// pointer-int file config values for the new resource-limit fields override
// the env-derived defaults.
func TestScannerMergeResourceLimitsFileOverridesEnv(t *testing.T) {
envCfg := NewConfig()
// env defaults (from parseScannerConfig)
envCfg.Scanner.ScanTimeoutMinutes = 20
envCfg.Scanner.BulkTimeoutMinutes = 120
envCfg.Scanner.ScannerMemoryMB = 2048
envCfg.Scanner.ScannerPidsLimit = 512
timeout := 45
bulk := 200
mem := 8192
pids := 256
m := &Manager{
envSnapshot: EnvSnapshot{},
envConfig: envCfg,
filePath: filepath.Join(t.TempDir(), "config.json"),
fileConfig: FileConfig{
Scanner: &FileScannerConfig{
ScanTimeoutMinutes: &timeout,
BulkTimeoutMinutes: &bulk,
ScannerMemoryMB: &mem,
ScannerPidsLimit: &pids,
},
},
}
m.merged, m.sources = m.merge()
merged := m.Config()
if merged.Scanner.ScanTimeoutMinutes != 45 {
t.Fatalf("expected ScanTimeoutMinutes=45, got %d", merged.Scanner.ScanTimeoutMinutes)
}
if merged.Scanner.BulkTimeoutMinutes != 200 {
t.Fatalf("expected BulkTimeoutMinutes=200, got %d", merged.Scanner.BulkTimeoutMinutes)
}
if merged.Scanner.ScannerMemoryMB != 8192 {
t.Fatalf("expected ScannerMemoryMB=8192, got %d", merged.Scanner.ScannerMemoryMB)
}
if merged.Scanner.ScannerPidsLimit != 256 {
t.Fatalf("expected ScannerPidsLimit=256, got %d", merged.Scanner.ScannerPidsLimit)
}
}
// TestScannerMergeResourceLimitsZeroFileValueDoesNotOverride verifies that a
// pointer-int with value 0 in the file config does NOT override the env default
// (the guard requires *ptr > 0).
func TestScannerMergeResourceLimitsZeroFileValueDoesNotOverride(t *testing.T) {
envCfg := NewConfig()
envCfg.Scanner.ScanTimeoutMinutes = 20
envCfg.Scanner.BulkTimeoutMinutes = 120
envCfg.Scanner.ScannerMemoryMB = 2048
envCfg.Scanner.ScannerPidsLimit = 512
zero := 0
m := &Manager{
envSnapshot: EnvSnapshot{},
envConfig: envCfg,
filePath: filepath.Join(t.TempDir(), "config.json"),
fileConfig: FileConfig{
Scanner: &FileScannerConfig{
ScanTimeoutMinutes: &zero,
BulkTimeoutMinutes: &zero,
ScannerMemoryMB: &zero,
ScannerPidsLimit: &zero,
},
},
}
m.merged, m.sources = m.merge()
merged := m.Config()
if merged.Scanner.ScanTimeoutMinutes != 20 {
t.Fatalf("zero ScanTimeoutMinutes in file must not override env default, got %d", merged.Scanner.ScanTimeoutMinutes)
}
if merged.Scanner.BulkTimeoutMinutes != 120 {
t.Fatalf("zero BulkTimeoutMinutes in file must not override env default, got %d", merged.Scanner.BulkTimeoutMinutes)
}
if merged.Scanner.ScannerMemoryMB != 2048 {
t.Fatalf("zero ScannerMemoryMB in file must not override env default, got %d", merged.Scanner.ScannerMemoryMB)
}
if merged.Scanner.ScannerPidsLimit != 512 {
t.Fatalf("zero ScannerPidsLimit in file must not override env default, got %d", merged.Scanner.ScannerPidsLimit)
}
}
// TestScannerMergeResourceLimitsNilFileFieldsPreserveEnv verifies that nil
// pointer-int file fields leave the env-derived values unchanged.
func TestScannerMergeResourceLimitsNilFileFieldsPreserveEnv(t *testing.T) {
envCfg := NewConfig()
envCfg.Scanner.ScanTimeoutMinutes = 30
envCfg.Scanner.BulkTimeoutMinutes = 90
envCfg.Scanner.ScannerMemoryMB = 1024
envCfg.Scanner.ScannerPidsLimit = 128
m := &Manager{
envSnapshot: EnvSnapshot{},
envConfig: envCfg,
filePath: filepath.Join(t.TempDir(), "config.json"),
fileConfig: FileConfig{
Scanner: &FileScannerConfig{
// all resource-limit fields deliberately nil
},
},
}
m.merged, m.sources = m.merge()
merged := m.Config()
if merged.Scanner.ScanTimeoutMinutes != 30 {
t.Fatalf("nil file field must preserve env ScanTimeoutMinutes=30, got %d", merged.Scanner.ScanTimeoutMinutes)
}
if merged.Scanner.BulkTimeoutMinutes != 90 {
t.Fatalf("nil file field must preserve env BulkTimeoutMinutes=90, got %d", merged.Scanner.BulkTimeoutMinutes)
}
if merged.Scanner.ScannerMemoryMB != 1024 {
t.Fatalf("nil file field must preserve env ScannerMemoryMB=1024, got %d", merged.Scanner.ScannerMemoryMB)
}
if merged.Scanner.ScannerPidsLimit != 128 {
t.Fatalf("nil file field must preserve env ScannerPidsLimit=128, got %d", merged.Scanner.ScannerPidsLimit)
}
}
// TestUpdateScannerConfigPersistsResourceLimits verifies that UpdateScannerConfig
// persists the new resource-limit pointer-int fields and reflects them in Config().
func TestUpdateScannerConfigPersistsResourceLimits(t *testing.T) {
m := &Manager{
envSnapshot: EnvSnapshot{},
envConfig: NewConfig(),
filePath: filepath.Join(t.TempDir(), "config.json"),
}
m.merged, m.sources = m.merge()
timeout := 60
bulk := 180
mem := 4096
pids := 768
scanner := &FileScannerConfig{
ScanTimeoutMinutes: &timeout,
BulkTimeoutMinutes: &bulk,
ScannerMemoryMB: &mem,
ScannerPidsLimit: &pids,
}
if err := m.UpdateScannerConfig(scanner); err != nil {
t.Fatalf("UpdateScannerConfig returned unexpected error: %v", err)
}
merged := m.Config()
if merged.Scanner.ScanTimeoutMinutes != 60 {
t.Fatalf("expected ScanTimeoutMinutes=60, got %d", merged.Scanner.ScanTimeoutMinutes)
}
if merged.Scanner.BulkTimeoutMinutes != 180 {
t.Fatalf("expected BulkTimeoutMinutes=180, got %d", merged.Scanner.BulkTimeoutMinutes)
}
if merged.Scanner.ScannerMemoryMB != 4096 {
t.Fatalf("expected ScannerMemoryMB=4096, got %d", merged.Scanner.ScannerMemoryMB)
}
if merged.Scanner.ScannerPidsLimit != 768 {
t.Fatalf("expected ScannerPidsLimit=768, got %d", merged.Scanner.ScannerPidsLimit)
}
}
// TestNewManagerSetsEnvSnapshotForResourceLimitVars verifies that NewManager
// recognises the four new resource-limit env vars when deciding whether
// scanner config comes from the environment.
func TestNewManagerSetsEnvSnapshotForResourceLimitVars(t *testing.T) {
t.Setenv("SCANNER_TIMEOUT_MINUTES", "30")
m := NewManager()
if !m.envSnapshot.ScannerSet {
t.Fatal("expected ScannerSet=true when SCANNER_TIMEOUT_MINUTES is set")
}
}

View file

@ -231,3 +231,431 @@ func TestRingBuffer_Overflow(t *testing.T) {
t.Errorf("got %q, want %q", got, "KLMNOPQR")
}
}
// ─── BuildScannerHostConfig ───────────────────────────────────────────────────
func TestBuildScannerHostConfig_DockerSocketBind(t *testing.T) {
hc := BuildScannerHostConfig(ScannerKindGrype, ScannerLimits{})
found := false
for _, b := range hc.Binds {
if b == "/var/run/docker.sock:/var/run/docker.sock" {
found = true
break
}
}
if !found {
t.Fatalf("expected docker socket bind in Binds, got %v", hc.Binds)
}
}
func TestBuildScannerHostConfig_GrypeCacheVolumeBind(t *testing.T) {
hc := BuildScannerHostConfig(ScannerKindGrype, ScannerLimits{})
expected := ScannerCacheVolumes[ScannerKindGrype].Name + ":" + ScannerCacheVolumes[ScannerKindGrype].MountPath
found := false
for _, b := range hc.Binds {
if b == expected {
found = true
break
}
}
if !found {
t.Fatalf("expected grype cache volume bind %q in %v", expected, hc.Binds)
}
}
func TestBuildScannerHostConfig_TrivyCacheVolumeBind(t *testing.T) {
hc := BuildScannerHostConfig(ScannerKindTrivy, ScannerLimits{})
expected := ScannerCacheVolumes[ScannerKindTrivy].Name + ":" + ScannerCacheVolumes[ScannerKindTrivy].MountPath
found := false
for _, b := range hc.Binds {
if b == expected {
found = true
break
}
}
if !found {
t.Fatalf("expected trivy cache volume bind %q in %v", expected, hc.Binds)
}
}
func TestBuildScannerHostConfig_SyftCacheVolumeBind(t *testing.T) {
hc := BuildScannerHostConfig(ScannerKindSyft, ScannerLimits{})
expected := ScannerCacheVolumes[ScannerKindSyft].Name + ":" + ScannerCacheVolumes[ScannerKindSyft].MountPath
found := false
for _, b := range hc.Binds {
if b == expected {
found = true
break
}
}
if !found {
t.Fatalf("expected syft cache volume bind %q in %v", expected, hc.Binds)
}
}
func TestBuildScannerHostConfig_MemoryLimit(t *testing.T) {
limits := ScannerLimits{MemoryBytes: 512 * 1024 * 1024}
hc := BuildScannerHostConfig(ScannerKindGrype, limits)
if hc.Resources.Memory != 512*1024*1024 {
t.Fatalf("expected Memory=%d, got %d", 512*1024*1024, hc.Resources.Memory)
}
}
func TestBuildScannerHostConfig_ZeroMemoryNotSet(t *testing.T) {
hc := BuildScannerHostConfig(ScannerKindGrype, ScannerLimits{MemoryBytes: 0})
if hc.Resources.Memory != 0 {
t.Fatalf("expected Memory=0 when not set, got %d", hc.Resources.Memory)
}
}
func TestBuildScannerHostConfig_PidsLimit(t *testing.T) {
limits := ScannerLimits{PidsLimit: 256}
hc := BuildScannerHostConfig(ScannerKindGrype, limits)
if hc.Resources.PidsLimit == nil {
t.Fatal("expected PidsLimit to be set, got nil")
}
if *hc.Resources.PidsLimit != 256 {
t.Fatalf("expected PidsLimit=256, got %d", *hc.Resources.PidsLimit)
}
}
func TestBuildScannerHostConfig_ZeroPidsNotSet(t *testing.T) {
hc := BuildScannerHostConfig(ScannerKindGrype, ScannerLimits{PidsLimit: 0})
if hc.Resources.PidsLimit != nil {
t.Fatalf("expected PidsLimit=nil when not set, got %v", *hc.Resources.PidsLimit)
}
}
func TestBuildScannerHostConfig_UnknownKindNoExtraBinds(t *testing.T) {
// An unrecognised kind should not panic and should still include the socket.
hc := BuildScannerHostConfig("unknown-kind", ScannerLimits{})
if len(hc.Binds) != 1 || hc.Binds[0] != "/var/run/docker.sock:/var/run/docker.sock" {
t.Fatalf("unexpected binds for unknown kind: %v", hc.Binds)
}
}
// ─── TempScanFile ─────────────────────────────────────────────────────────────
func TestTempScanFile_ContainsJobID(t *testing.T) {
path := TempScanFile("abc-123")
if !strings.Contains(path, "abc-123") {
t.Fatalf("expected job ID in path, got %q", path)
}
}
func TestTempScanFile_HasJSONSuffix(t *testing.T) {
path := TempScanFile("job-xyz")
if !strings.HasSuffix(path, ".json") {
t.Fatalf("expected .json suffix, got %q", path)
}
}
func TestTempScanFile_DifferentIDsDifferentPaths(t *testing.T) {
p1 := TempScanFile("id-1")
p2 := TempScanFile("id-2")
if p1 == p2 {
t.Fatalf("different job IDs must produce different paths")
}
}
// ─── readFilePrefix ───────────────────────────────────────────────────────────
func TestReadFilePrefix_ReadsUpToN(t *testing.T) {
f := filepath.Join(t.TempDir(), "test.txt")
if err := os.WriteFile(f, []byte("hello world"), 0o600); err != nil {
t.Fatalf("write: %v", err)
}
got := readFilePrefix(f, 5)
if got != "hello" {
t.Fatalf("expected %q, got %q", "hello", got)
}
}
func TestReadFilePrefix_ReturnsEmptyForMissingFile(t *testing.T) {
got := readFilePrefix("/nonexistent/path/file.txt", 100)
if got != "" {
t.Fatalf("expected empty string for missing file, got %q", got)
}
}
func TestReadFilePrefix_ReturnsFullContentWhenShorterThanN(t *testing.T) {
f := filepath.Join(t.TempDir(), "short.txt")
content := "hi"
if err := os.WriteFile(f, []byte(content), 0o600); err != nil {
t.Fatalf("write: %v", err)
}
got := readFilePrefix(f, 1024)
if got != content {
t.Fatalf("expected full content %q, got %q", content, got)
}
}
func TestReadFilePrefix_TrimsWhitespace(t *testing.T) {
f := filepath.Join(t.TempDir(), "ws.txt")
if err := os.WriteFile(f, []byte(" hello "), 0o600); err != nil {
t.Fatalf("write: %v", err)
}
got := readFilePrefix(f, 100)
if got != "hello" {
t.Fatalf("expected trimmed content, got %q", got)
}
}
// ─── countingWriter ───────────────────────────────────────────────────────────
func TestCountingWriter_CountsBytes(t *testing.T) {
var buf strings.Builder
var n int64
cw := &countingWriter{w: &writerAdapter{b: &buf}, n: &n}
cw.Write([]byte("hello"))
cw.Write([]byte(" world"))
if n != 11 {
t.Fatalf("expected n=11, got %d", n)
}
if buf.String() != "hello world" {
t.Fatalf("unexpected content: %q", buf.String())
}
}
func TestCountingWriter_NilCounter(t *testing.T) {
// Must not panic when n is nil.
var buf strings.Builder
cw := &countingWriter{w: &writerAdapter{b: &buf}, n: nil}
if _, err := cw.Write([]byte("test")); err != nil {
t.Fatalf("unexpected error: %v", err)
}
}
// writerAdapter adapts strings.Builder to io.Writer.
type writerAdapter struct{ b *strings.Builder }
func (w *writerAdapter) Write(p []byte) (int, error) {
return w.b.Write(p)
}
// ─── ScannerCacheVolumes constants ───────────────────────────────────────────
func TestScannerCacheVolumes_AllKindsDefined(t *testing.T) {
for _, kind := range []ScannerKind{ScannerKindGrype, ScannerKindTrivy, ScannerKindSyft} {
v, ok := ScannerCacheVolumes[kind]
if !ok {
t.Errorf("missing cache volume entry for kind %q", kind)
continue
}
if v.Name == "" {
t.Errorf("empty Name for kind %q", kind)
}
if v.MountPath == "" {
t.Errorf("empty MountPath for kind %q", kind)
}
}
}
// ─── parseGrypeOutputStream edge cases ───────────────────────────────────────
func TestParseGrypeOutputStream_Empty(t *testing.T) {
out := grypeOutput{Matches: []grypeMatch{}}
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(&out); err != nil {
t.Fatalf("encode: %v", err)
}
vulns, err := parseGrypeOutputStream(&buf)
if err != nil {
t.Fatalf("unexpected error for empty matches: %v", err)
}
if len(vulns) != 0 {
t.Fatalf("expected 0 vulns, got %d", len(vulns))
}
}
func TestParseGrypeOutputStream_InvalidJSON(t *testing.T) {
_, err := parseGrypeOutputStream(strings.NewReader("not json"))
if err == nil {
t.Fatal("expected error for invalid JSON")
}
}
func TestParseGrypeOutputStream_NormalizeSeverity(t *testing.T) {
out := grypeOutput{Matches: []grypeMatch{
{
Vulnerability: grypeVulnerability{ID: "CVE-1", Severity: "critical"},
Artifact: grypeArtifact{Name: "pkg", Version: "1.0"},
},
{
Vulnerability: grypeVulnerability{ID: "CVE-2", Severity: "UNKNOWN_SEVERITY"},
Artifact: grypeArtifact{Name: "pkg2", Version: "2.0"},
},
}}
var buf bytes.Buffer
json.NewEncoder(&buf).Encode(&out)
vulns, err := parseGrypeOutputStream(&buf)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if vulns[0].Severity != "Critical" {
t.Errorf("expected Critical, got %q", vulns[0].Severity)
}
if vulns[1].Severity != "Unknown" {
t.Errorf("expected Unknown for unrecognised severity, got %q", vulns[1].Severity)
}
}
func TestParseGrypeOutputStream_MultipleFixVersions(t *testing.T) {
// When there are multiple fix versions, the first one should be used.
out := grypeOutput{Matches: []grypeMatch{
{
Vulnerability: grypeVulnerability{
ID: "CVE-2024-0001",
Severity: "High",
Fix: grypeFixInfo{Versions: []string{"2.0.0", "3.0.0"}},
},
Artifact: grypeArtifact{Name: "lib", Version: "1.0"},
},
}}
var buf bytes.Buffer
json.NewEncoder(&buf).Encode(&out)
vulns, err := parseGrypeOutputStream(&buf)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if vulns[0].FixedVersion != "2.0.0" {
t.Errorf("expected first fix version, got %q", vulns[0].FixedVersion)
}
}
func TestParseGrypeOutputStream_NoFixVersions(t *testing.T) {
out := grypeOutput{Matches: []grypeMatch{
{
Vulnerability: grypeVulnerability{ID: "CVE-2024-0002", Severity: "Low"},
Artifact: grypeArtifact{Name: "pkg", Version: "0.5"},
},
}}
var buf bytes.Buffer
json.NewEncoder(&buf).Encode(&out)
vulns, err := parseGrypeOutputStream(&buf)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if vulns[0].FixedVersion != "" {
t.Errorf("expected empty FixedVersion when no fix, got %q", vulns[0].FixedVersion)
}
}
// ─── parseTrivyOutputStream edge cases ───────────────────────────────────────
func TestParseTrivyOutputStream_Empty(t *testing.T) {
out := trivyOutput{Results: []trivyResult{}}
var buf bytes.Buffer
json.NewEncoder(&buf).Encode(&out)
vulns, err := parseTrivyOutputStream(&buf)
if err != nil {
t.Fatalf("unexpected error for empty results: %v", err)
}
if len(vulns) != 0 {
t.Fatalf("expected 0 vulns, got %d", len(vulns))
}
}
func TestParseTrivyOutputStream_InvalidJSON(t *testing.T) {
_, err := parseTrivyOutputStream(strings.NewReader("{bad json"))
if err == nil {
t.Fatal("expected error for invalid JSON")
}
}
func TestParseTrivyOutputStream_MultipleResults(t *testing.T) {
out := trivyOutput{Results: []trivyResult{
{Target: "image1", Vulnerabilities: []trivyVulnerability{
{VulnerabilityID: "CVE-1", Severity: "High", PkgName: "pkg1", InstalledVersion: "1.0"},
}},
{Target: "image2", Vulnerabilities: []trivyVulnerability{
{VulnerabilityID: "CVE-2", Severity: "Critical", PkgName: "pkg2", InstalledVersion: "2.0"},
}},
}}
var buf bytes.Buffer
json.NewEncoder(&buf).Encode(&out)
vulns, err := parseTrivyOutputStream(&buf)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(vulns) != 2 {
t.Fatalf("expected 2 vulns from 2 results, got %d", len(vulns))
}
}
func TestParseTrivyOutputStream_ResultWithNoVulnerabilities(t *testing.T) {
out := trivyOutput{Results: []trivyResult{
{Target: "clean-image", Vulnerabilities: nil},
}}
var buf bytes.Buffer
json.NewEncoder(&buf).Encode(&out)
vulns, err := parseTrivyOutputStream(&buf)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(vulns) != 0 {
t.Fatalf("expected 0 vulns for clean image, got %d", len(vulns))
}
}
func TestParseTrivyOutputStream_NormalizeSeverity(t *testing.T) {
out := trivyOutput{Results: []trivyResult{
{Target: "img", Vulnerabilities: []trivyVulnerability{
{VulnerabilityID: "CVE-A", Severity: "MEDIUM", PkgName: "pkg"},
}},
}}
var buf bytes.Buffer
json.NewEncoder(&buf).Encode(&out)
vulns, err := parseTrivyOutputStream(&buf)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if vulns[0].Severity != "Medium" {
t.Errorf("expected Medium, got %q", vulns[0].Severity)
}
}
// ─── enrichParseErrorForEmptyOutput additional edge cases ─────────────────────
func TestEnrichParseErrorForEmptyOutput_LongStderrTruncatedTo512(t *testing.T) {
outPath := filepath.Join(t.TempDir(), "empty.json")
os.WriteFile(outPath, nil, 0o600)
longStderr := strings.Repeat("x", 1000)
parseErr := fmt.Errorf("wrap: %w", io.EOF)
err := enrichParseErrorForEmptyOutput("trivy", outPath, longStderr, parseErr)
if err == nil {
t.Fatal("expected non-nil error")
}
// The tail embedded in the error message must not exceed 512 chars of x's
msg := err.Error()
// Count 'x' characters — should be ≤ 512
count := strings.Count(msg, "x")
if count > 512 {
t.Fatalf("stderr tail in error message too long: %d chars of 'x'", count)
}
}
func TestEnrichParseErrorForEmptyOutput_EmptyStderrTail(t *testing.T) {
outPath := filepath.Join(t.TempDir(), "empty2.json")
os.WriteFile(outPath, nil, 0o600)
parseErr := fmt.Errorf("wrap: %w", io.EOF)
err := enrichParseErrorForEmptyOutput("grype", outPath, "", parseErr)
if err == nil {
t.Fatal("expected non-nil error")
}
if strings.Contains(err.Error(), "stderr tail") {
t.Fatalf("empty stderr tail should not appear in message, got %q", err.Error())
}
if !strings.Contains(err.Error(), "empty grype output") {
t.Fatalf("expected 'empty grype output' in message, got %q", err.Error())
}
}

View file

@ -1,7 +1,10 @@
package scanner
import (
"context"
"sync/atomic"
"testing"
"time"
"github.com/hhftechnology/vps-monitor/internal/models"
)
@ -169,4 +172,211 @@ func TestContainsHostExactMatch(t *testing.T) {
if containsHost([]string{"host-abc"}, "host") {
t.Fatal("containsHost must not do partial matching")
}
}
// ─── Helper: minimal ScannerService without Docker ───────────────────────────
// newTestScannerService creates a ScannerService backed by the supplied config
// without requiring a registry or database (the timeout/limits helpers only
// access the config).
func newTestScannerService(cfg *models.ScannerConfig) *ScannerService {
s := &ScannerService{}
s.config.Store(cfg)
return s
}
// ─── scanTimeout ─────────────────────────────────────────────────────────────
func TestScanTimeoutUsesConfiguredValue(t *testing.T) {
cfg := &models.ScannerConfig{ScanTimeoutMinutes: 45}
s := newTestScannerService(cfg)
if got := s.scanTimeout(); got != 45*time.Minute {
t.Fatalf("expected 45m, got %v", got)
}
}
func TestScanTimeoutDefaultWhenZero(t *testing.T) {
cfg := &models.ScannerConfig{ScanTimeoutMinutes: 0}
s := newTestScannerService(cfg)
if got := s.scanTimeout(); got != 20*time.Minute {
t.Fatalf("expected default 20m, got %v", got)
}
}
func TestScanTimeoutDefaultWhenNilConfig(t *testing.T) {
s := &ScannerService{} // config is nil pointer (zero atomic.Pointer)
if got := s.scanTimeout(); got != 20*time.Minute {
t.Fatalf("expected default 20m for nil config, got %v", got)
}
}
func TestScanTimeoutNegativeValueUsesDefault(t *testing.T) {
cfg := &models.ScannerConfig{ScanTimeoutMinutes: -10}
s := newTestScannerService(cfg)
if got := s.scanTimeout(); got != 20*time.Minute {
t.Fatalf("expected default 20m for negative value, got %v", got)
}
}
// ─── bulkTimeout ─────────────────────────────────────────────────────────────
func TestBulkTimeoutUsesConfiguredValue(t *testing.T) {
cfg := &models.ScannerConfig{BulkTimeoutMinutes: 240}
s := newTestScannerService(cfg)
if got := s.bulkTimeout(); got != 240*time.Minute {
t.Fatalf("expected 240m, got %v", got)
}
}
func TestBulkTimeoutDefaultWhenZero(t *testing.T) {
cfg := &models.ScannerConfig{BulkTimeoutMinutes: 0}
s := newTestScannerService(cfg)
if got := s.bulkTimeout(); got != 120*time.Minute {
t.Fatalf("expected default 120m, got %v", got)
}
}
func TestBulkTimeoutDefaultWhenNilConfig(t *testing.T) {
s := &ScannerService{}
if got := s.bulkTimeout(); got != 120*time.Minute {
t.Fatalf("expected default 120m for nil config, got %v", got)
}
}
// ─── scannerLimits ────────────────────────────────────────────────────────────
func TestScannerLimitsUsesConfiguredValues(t *testing.T) {
cfg := &models.ScannerConfig{ScannerMemoryMB: 4096, ScannerPidsLimit: 1024}
s := newTestScannerService(cfg)
limits := s.scannerLimits()
if limits.MemoryBytes != 4096*1024*1024 {
t.Fatalf("expected MemoryBytes=%d, got %d", int64(4096*1024*1024), limits.MemoryBytes)
}
if limits.PidsLimit != 1024 {
t.Fatalf("expected PidsLimit=1024, got %d", limits.PidsLimit)
}
}
func TestScannerLimitsDefaultsWhenZero(t *testing.T) {
cfg := &models.ScannerConfig{ScannerMemoryMB: 0, ScannerPidsLimit: 0}
s := newTestScannerService(cfg)
limits := s.scannerLimits()
if limits.MemoryBytes != 2048*1024*1024 {
t.Fatalf("expected default MemoryBytes=%d, got %d", int64(2048*1024*1024), limits.MemoryBytes)
}
if limits.PidsLimit != 512 {
t.Fatalf("expected default PidsLimit=512, got %d", limits.PidsLimit)
}
}
func TestScannerLimitsDefaultsWhenNilConfig(t *testing.T) {
s := &ScannerService{}
limits := s.scannerLimits()
if limits.MemoryBytes != 2048*1024*1024 {
t.Fatalf("expected default MemoryBytes for nil config, got %d", limits.MemoryBytes)
}
if limits.PidsLimit != 512 {
t.Fatalf("expected default PidsLimit=512 for nil config, got %d", limits.PidsLimit)
}
}
func TestScannerLimitsMemoryConversion(t *testing.T) {
// Verify MB-to-bytes multiplication: 1 MB → 1 048 576 bytes
cfg := &models.ScannerConfig{ScannerMemoryMB: 1, ScannerPidsLimit: 1}
s := newTestScannerService(cfg)
if got := s.scannerLimits().MemoryBytes; got != 1024*1024 {
t.Fatalf("1 MB should be %d bytes, got %d", 1024*1024, got)
}
}
// ─── humanBytes ──────────────────────────────────────────────────────────────
func TestHumanBytesBytes(t *testing.T) {
cases := []struct {
n int64
want string
}{
{0, "0 B"},
{1, "1 B"},
{1023, "1023 B"},
}
for _, c := range cases {
if got := humanBytes(c.n); got != c.want {
t.Errorf("humanBytes(%d) = %q, want %q", c.n, got, c.want)
}
}
}
func TestHumanBytesKilobytes(t *testing.T) {
if got := humanBytes(1024); got != "1.0 KB" {
t.Fatalf("expected '1.0 KB', got %q", got)
}
if got := humanBytes(2048); got != "2.0 KB" {
t.Fatalf("expected '2.0 KB', got %q", got)
}
}
func TestHumanBytesMegabytes(t *testing.T) {
if got := humanBytes(1024 * 1024); got != "1.0 MB" {
t.Fatalf("expected '1.0 MB', got %q", got)
}
if got := humanBytes(512 * 1024 * 1024); got != "512.0 MB" {
t.Fatalf("expected '512.0 MB', got %q", got)
}
}
func TestHumanBytesGigabytes(t *testing.T) {
if got := humanBytes(1024 * 1024 * 1024); got != "1.0 GB" {
t.Fatalf("expected '1.0 GB', got %q", got)
}
}
// ─── heartbeat ───────────────────────────────────────────────────────────────
// TestHeartbeatUpdatesJobProgress verifies that the heartbeat goroutine updates
// the job's Progress field while the scan is still running.
func TestHeartbeatUpdatesJobProgress(t *testing.T) {
s := newTestScannerService(&models.ScannerConfig{})
job := &models.ScanJob{Status: models.ScanJobScanning}
var bytesWritten int64
atomic.StoreInt64(&bytesWritten, 1536) // 1.5 KB
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// Use a ticker period override isn't possible, so we rely on the real
// 5-second ticker. Instead we just exercise the cancel path directly
// after a brief run.
done := make(chan struct{})
go func() {
s.heartbeat(ctx, job, &bytesWritten, time.Now().Add(-10*time.Second))
close(done)
}()
// Cancel promptly and confirm the goroutine exits.
cancel()
select {
case <-done:
case <-time.After(2 * time.Second):
t.Fatal("heartbeat did not exit after context cancel")
}
}
// TestHeartbeatDoesNotOverwriteNonScanningStatus verifies that heartbeat leaves
// the Progress field alone once the job transitions out of ScanJobScanning.
func TestHeartbeatDoesNotOverwriteNonScanningStatus(t *testing.T) {
s := newTestScannerService(&models.ScannerConfig{})
job := &models.ScanJob{Status: models.ScanJobComplete, Progress: "original"}
var bytesWritten int64
ctx, cancel := context.WithCancel(context.Background())
cancel() // cancel immediately
s.heartbeat(ctx, job, &bytesWritten, time.Now())
if job.Progress != "original" {
t.Fatalf("expected progress to remain 'original', got %q", job.Progress)
}
}