mirror of
https://github.com/rcourtman/Pulse.git
synced 2026-04-28 11:30:15 +00:00
New test files with expanded coverage: API tests: - ai_handler_test.go: AI handler unit tests with mocking - agent_profiles_tools_test.go: Profile management tests - alerts_endpoints_test.go: Alert API endpoint tests - alerts_test.go: Updated for interface changes - audit_handlers_test.go: Audit handler tests - frontend_embed_test.go: Frontend embedding tests - metadata_handlers_test.go, metadata_provider_test.go: Metadata tests - notifications_test.go: Updated for interface changes - profile_suggestions_test.go: Profile suggestion tests - saml_service_test.go: SAML authentication tests - sensor_proxy_gate_test.go: Sensor proxy tests - updates_test.go: Updated for interface changes Agent tests: - dockeragent/signature_test.go: Docker agent signature tests - hostagent/agent_metrics_test.go: Host agent metrics tests - hostagent/commands_test.go: Command execution tests - hostagent/network_helpers_test.go: Network helper tests - hostagent/proxmox_setup_test.go: Updated setup tests - kubernetesagent/*_test.go: Kubernetes agent tests Core package tests: - monitoring/kubernetes_agents_test.go, reload_test.go - remoteconfig/client_test.go, signature_test.go - sensors/collector_test.go - updates/adapter_installsh_*_test.go: Install adapter tests - updates/manager_*_test.go: Update manager tests - websocket/hub_*_test.go: WebSocket hub tests Library tests: - pkg/audit/export_test.go: Audit export tests - pkg/metrics/store_test.go: Metrics store tests - pkg/proxmox/*_test.go: Proxmox client tests - pkg/reporting/reporting_test.go: Reporting tests - pkg/server/*_test.go: Server tests - pkg/tlsutil/extra_test.go: TLS utility tests Total: ~8000 lines of new test code
112 lines
3.2 KiB
Go
112 lines
3.2 KiB
Go
package proxmox
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"strings"
|
|
"testing"
|
|
"time"
|
|
)
|
|
|
|
func TestClusterClientEndpointFingerprint(t *testing.T) {
|
|
cc := &ClusterClient{
|
|
config: ClientConfig{Fingerprint: "base"},
|
|
endpointFingerprints: map[string]string{
|
|
"node1": "node-fp",
|
|
},
|
|
}
|
|
|
|
if got := cc.getEndpointFingerprint("node1"); got != "node-fp" {
|
|
t.Fatalf("expected node fingerprint, got %s", got)
|
|
}
|
|
if got := cc.getEndpointFingerprint("node2"); got != "base" {
|
|
t.Fatalf("expected base fingerprint, got %s", got)
|
|
}
|
|
}
|
|
|
|
func TestClusterClientMarkAndClearError(t *testing.T) {
|
|
cc := &ClusterClient{
|
|
name: "cluster",
|
|
nodeHealth: map[string]bool{"node1": true},
|
|
lastError: make(map[string]string),
|
|
lastHealthCheck: make(map[string]time.Time),
|
|
}
|
|
|
|
cc.markUnhealthyWithError("node1", "connection refused")
|
|
if cc.nodeHealth["node1"] {
|
|
t.Fatal("expected node to be unhealthy")
|
|
}
|
|
if errMsg := cc.lastError["node1"]; errMsg == "" || !strings.Contains(errMsg, "Connection refused") {
|
|
t.Fatalf("unexpected error message: %q", errMsg)
|
|
}
|
|
|
|
cc.clearEndpointError("node1")
|
|
if !cc.nodeHealth["node1"] {
|
|
t.Fatal("expected node to be healthy after clear")
|
|
}
|
|
if _, ok := cc.lastError["node1"]; ok {
|
|
t.Fatal("expected lastError to be cleared")
|
|
}
|
|
}
|
|
|
|
func TestClusterClientApplyRateLimitCooldown(t *testing.T) {
|
|
cc := &ClusterClient{rateLimitUntil: make(map[string]time.Time)}
|
|
cc.applyRateLimitCooldown("node1", 100*time.Millisecond)
|
|
if when, ok := cc.rateLimitUntil["node1"]; !ok || time.Until(when) <= 0 {
|
|
t.Fatalf("expected cooldown set, got %v", when)
|
|
}
|
|
}
|
|
|
|
func TestExecuteWithFailoverSkipsUnhealthyMarking(t *testing.T) {
|
|
cc := &ClusterClient{
|
|
name: "cluster",
|
|
endpoints: []string{"node1"},
|
|
clients: map[string]*Client{"node1": {}},
|
|
nodeHealth: map[string]bool{"node1": true},
|
|
lastError: make(map[string]string),
|
|
lastHealthCheck: map[string]time.Time{"node1": time.Now()},
|
|
rateLimitUntil: make(map[string]time.Time),
|
|
}
|
|
|
|
err := cc.executeWithFailover(context.Background(), func(*Client) error {
|
|
return fmt.Errorf("No QEMU guest agent")
|
|
})
|
|
if err == nil {
|
|
t.Fatal("expected error")
|
|
}
|
|
if !cc.nodeHealth["node1"] {
|
|
t.Fatal("expected node to remain healthy for VM-specific error")
|
|
}
|
|
if len(cc.lastError) != 0 {
|
|
t.Fatalf("expected no lastError, got %+v", cc.lastError)
|
|
}
|
|
|
|
err = cc.executeWithFailover(context.Background(), func(*Client) error {
|
|
return fmt.Errorf("authentication failed")
|
|
})
|
|
if err == nil {
|
|
t.Fatal("expected auth error")
|
|
}
|
|
if !cc.nodeHealth["node1"] {
|
|
t.Fatal("expected node to remain healthy for auth error")
|
|
}
|
|
}
|
|
|
|
func TestExecuteWithFailoverClearsErrorOnSuccess(t *testing.T) {
|
|
cc := &ClusterClient{
|
|
name: "cluster",
|
|
endpoints: []string{"node1"},
|
|
clients: map[string]*Client{"node1": {}},
|
|
nodeHealth: map[string]bool{"node1": true},
|
|
lastError: map[string]string{"node1": "stale"},
|
|
lastHealthCheck: map[string]time.Time{"node1": time.Now()},
|
|
rateLimitUntil: make(map[string]time.Time),
|
|
}
|
|
|
|
if err := cc.executeWithFailover(context.Background(), func(*Client) error { return nil }); err != nil {
|
|
t.Fatalf("unexpected error: %v", err)
|
|
}
|
|
if _, ok := cc.lastError["node1"]; ok {
|
|
t.Fatal("expected lastError to be cleared")
|
|
}
|
|
}
|