chore: remove additional dead code

Remove 241 lines of unreachable code across internal and pkg:
- internal/crypto/crypto.go: unused NewCryptoManager wrapper
- internal/monitoring/scheduler.go: unused fixedIntervalSelector type
- internal/ssh/knownhosts/manager.go: unused hostKeyExists function
- internal/updates/manager.go: unused getLatestRelease wrapper
- internal/updates/updater.go: unused GetAll method
- pkg/discovery/discovery.go: unused scanWorker and runPhase (legacy compat)
- pkg/proxmox/client.go: unused post, getTaskStatus, waitForTaskCompletion, getTaskLog
- pkg/proxmox/cluster_client.go: unused markUnhealthy wrapper
This commit is contained in:
rcourtman 2025-11-27 05:13:26 +00:00
parent 25542ae51d
commit b0ce0d932f
8 changed files with 1 additions and 241 deletions

View file

@ -19,11 +19,6 @@ type CryptoManager struct {
key []byte
}
// NewCryptoManager creates a new crypto manager using the default data directory.
func NewCryptoManager() (*CryptoManager, error) {
return NewCryptoManagerAt(utils.GetDataDir())
}
// NewCryptoManagerAt creates a new crypto manager with an explicit data directory override.
func NewCryptoManagerAt(dataDir string) (*CryptoManager, error) {
key, err := getOrCreateKeyAt(dataDir)

View file

@ -275,17 +275,6 @@ func (noopStalenessSource) StalenessScore(instanceType InstanceType, instanceNam
return 0, false
}
type fixedIntervalSelector struct {
interval time.Duration
}
func (f *fixedIntervalSelector) SelectInterval(req IntervalRequest) time.Duration {
if f.interval > 0 {
return f.interval
}
return req.BaseInterval
}
type adaptiveIntervalSelector struct {
mu sync.Mutex
state map[string]time.Duration

View file

@ -231,25 +231,6 @@ func (m *manager) ensureKnownHostsFile() error {
return f.Close()
}
func hostKeyExists(path, host string) (bool, error) {
f, err := os.Open(path)
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
if hostLineMatches(host, scanner.Text()) {
return true, nil
}
}
return false, scanner.Err()
}
func appendHostKey(path string, entries [][]byte) error {
f, err := os.OpenFile(path, os.O_APPEND|os.O_WRONLY, 0o600)
if err != nil {

View file

@ -578,15 +578,6 @@ func (m *Manager) GetCachedUpdateInfo() *UpdateInfo {
return m.checkCache[channel]
}
// getLatestRelease fetches the latest release from GitHub using saved config
func (m *Manager) getLatestRelease(ctx context.Context, currentVer *Version) (*ReleaseInfo, error) {
channel := m.config.UpdateChannel
if channel == "" {
channel = "stable"
}
return m.getLatestReleaseForChannel(ctx, channel, currentVer)
}
// getLatestReleaseForChannel fetches the latest release from GitHub for a specific channel
func (m *Manager) getLatestReleaseForChannel(ctx context.Context, channel string, currentVer *Version) (*ReleaseInfo, error) {
if channel == "" {

View file

@ -79,8 +79,3 @@ func (r *UpdaterRegistry) Get(deploymentType string) (Updater, error) {
}
return updater, nil
}
// GetAll returns all registered updaters
func (r *UpdaterRegistry) GetAll() map[string]Updater {
return r.updaters
}

View file

@ -458,26 +458,6 @@ func (s *Scanner) populateServerHostname(ctx context.Context, server *Discovered
}
}
// scanWorker scans IPs from the channel
// NOTE: This function is kept for backward compatibility but is not actively used.
// New code should use scanWorkerWithProgress which includes progress tracking.
func (s *Scanner) scanWorker(ctx context.Context, wg *sync.WaitGroup, phase string, ipChan <-chan string, resultChan chan<- discoveredResult, errorChan chan<- phaseError) {
defer wg.Done()
for ip := range ipChan {
select {
case <-ctx.Done():
return
default:
for _, port := range proxmoxProbePorts {
if server := s.discoverAtPort(ctx, ip, port); server != nil {
resultChan <- discoveredResult{Phase: phase, Server: server}
}
}
}
}
}
// scanWorkerWithProgress scans IPs and reports progress
func (s *Scanner) scanWorkerWithProgress(ctx context.Context, wg *sync.WaitGroup, phase string, ipChan <-chan string, resultChan chan<- discoveredResult, progressChan chan<- int) {
defer wg.Done()
@ -499,81 +479,7 @@ func (s *Scanner) scanWorkerWithProgress(ctx context.Context, wg *sync.WaitGroup
}
}
// runPhase runs a scanning phase without progress tracking
// NOTE: This function is kept for backward compatibility but is not actively used.
// New code should use runPhaseWithProgress which includes progress tracking.
func (s *Scanner) runPhase(ctx context.Context, phase string, ips []string, callback ServerCallback, result *DiscoveryResult) error {
if len(ips) == 0 {
return nil
}
workerCount := s.policy.MaxConcurrent
if workerCount <= 0 {
workerCount = 1
}
ipChan := make(chan string, len(ips))
resultChan := make(chan discoveredResult, len(ips))
errorChan := make(chan phaseError, len(ips))
var wg sync.WaitGroup
for i := 0; i < workerCount; i++ {
wg.Add(1)
go s.scanWorker(ctx, &wg, phase, ipChan, resultChan, errorChan)
}
for _, ip := range ips {
ipChan <- ip
}
close(ipChan)
go func() {
wg.Wait()
close(resultChan)
close(errorChan)
}()
for resultChan != nil || errorChan != nil {
select {
case res, ok := <-resultChan:
if !ok {
resultChan = nil
continue
}
if res.Server == nil {
continue
}
result.Servers = append(result.Servers, *res.Server)
log.Info().
Str("phase", res.Phase).
Str("ip", res.Server.IP).
Str("type", res.Server.Type).
Str("hostname", res.Server.Hostname).
Msg("Discovered server")
if callback != nil {
callback(*res.Server, res.Phase)
}
case perr, ok := <-errorChan:
if !ok {
errorChan = nil
continue
}
if perr.Message == "" {
continue
}
result.Errors = append(result.Errors, fmt.Sprintf("%s: %s", perr.Phase, perr.Message))
case <-ctx.Done():
return ctx.Err()
}
}
return nil
}
// runPhaseWithProgress wraps runPhase with progress tracking and reporting
// runPhaseWithProgress runs a scanning phase with progress tracking and reporting
func (s *Scanner) runPhaseWithProgress(ctx context.Context, phase string, phaseNumber, totalPhases int, ips []string, serverCallback ServerCallback, progressCallback ProgressCallback, totalProcessed *int, totalTargets int, result *DiscoveryResult) error {
if len(ips) == 0 {
return nil

View file

@ -440,11 +440,6 @@ func (c *Client) get(ctx context.Context, path string) (*http.Response, error) {
return c.request(ctx, "GET", path, nil)
}
// post performs a POST request
func (c *Client) post(ctx context.Context, path string, data url.Values) (*http.Response, error) {
return c.request(ctx, "POST", path, data)
}
// Node represents a Proxmox VE node
type Node struct {
Node string `json:"node"`
@ -1069,93 +1064,6 @@ func (c *Client) GetBackupTasks(ctx context.Context) ([]Task, error) {
return allTasks, nil
}
type taskStatusResponse struct {
Status string `json:"status"`
ExitStatus string `json:"exitstatus"`
Type string `json:"type"`
UPID string `json:"upid"`
ID string `json:"id"`
}
type taskLogEntry struct {
LineNumber int `json:"n"`
Text string `json:"t"`
}
func (c *Client) getTaskStatus(ctx context.Context, node, upid string) (*taskStatusResponse, error) {
encodedUPID := url.PathEscape(upid)
resp, err := c.get(ctx, fmt.Sprintf("/nodes/%s/tasks/%s/status", node, encodedUPID))
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("failed to get task status (status %d): %s", resp.StatusCode, strings.TrimSpace(string(body)))
}
var result struct {
Data taskStatusResponse `json:"data"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return nil, err
}
return &result.Data, nil
}
func (c *Client) waitForTaskCompletion(ctx context.Context, node, upid string) (*taskStatusResponse, error) {
ticker := time.NewTicker(500 * time.Millisecond)
defer ticker.Stop()
for {
status, err := c.getTaskStatus(ctx, node, upid)
if err != nil {
return nil, err
}
if status != nil && strings.ToLower(status.Status) != "running" && strings.ToLower(status.Status) != "active" {
return status, nil
}
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-ticker.C:
}
}
}
func (c *Client) getTaskLog(ctx context.Context, node, upid string) ([]string, error) {
encodedUPID := url.PathEscape(upid)
resp, err := c.get(ctx, fmt.Sprintf("/nodes/%s/tasks/%s/log?start=0", node, encodedUPID))
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return nil, fmt.Errorf("failed to get task log (status %d): %s", resp.StatusCode, strings.TrimSpace(string(body)))
}
var result struct {
Data []taskLogEntry `json:"data"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return nil, err
}
lines := make([]string, 0, len(result.Data))
for _, entry := range result.Data {
lines = append(lines, entry.Text)
}
return lines, nil
}
// GetContainerInterfaces returns the network interfaces (with IPs) for a container.
func (c *Client) GetContainerInterfaces(ctx context.Context, node string, vmid int) ([]ContainerInterface, error) {
resp, err := c.get(ctx, fmt.Sprintf("/nodes/%s/lxc/%d/interfaces", node, vmid))

View file

@ -379,11 +379,6 @@ func (cc *ClusterClient) getHealthyClient(ctx context.Context) (*Client, error)
return client, nil
}
// markUnhealthy marks an endpoint as unhealthy
func (cc *ClusterClient) markUnhealthy(endpoint string) {
cc.markUnhealthyWithError(endpoint, "")
}
// markUnhealthyWithError marks an endpoint as unhealthy and captures the error
func (cc *ClusterClient) markUnhealthyWithError(endpoint string, errMsg string) {
cc.mu.Lock()