mirror of
https://github.com/OpenRouterTeam/spawn.git
synced 2026-04-28 03:49:31 +00:00
fix: remove 100-entry history cap — keep all records (#2819)
The MAX_HISTORY_ENTRIES=100 cap silently archived records when you spawned more than 100 times, making older active servers vanish from `spawn list`. The cap was solving a non-problem — 1000 records is ~500KB. Removed: - MAX_HISTORY_ENTRIES constant and trimming logic - archiveRecords() and readExistingArchive() (no longer needed) - Smart trim tests (history-trimming.test.ts rewritten to test ordering only) Existing archive files (~/.spawn/history-YYYY-MM-DD.json) are still readable by recoverFromArchives() for corruption recovery. Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
a7cebd4054
commit
21c0e1511c
5 changed files with 55 additions and 805 deletions
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "@openrouter/spawn",
|
||||
"version": "0.25.3",
|
||||
"version": "0.25.4",
|
||||
"type": "module",
|
||||
"bin": {
|
||||
"spawn": "cli.js"
|
||||
|
|
|
|||
|
|
@ -226,7 +226,7 @@ describe("clearHistory", () => {
|
|||
expect(filterHistory(undefined, "sprite")).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("should return correct count for exactly MAX_HISTORY_ENTRIES records", () => {
|
||||
it("should return correct count for 100 records", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
for (let i = 0; i < 100; i++) {
|
||||
records.push({
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* Focuses on uncovered paths: saveLaunchCmd, saveMetadata,
|
||||
* markRecordDeleted, updateRecordIp, updateRecordConnection, getActiveServers,
|
||||
* removeRecord, archiving, trimming edge cases, and v1 loose schema handling.
|
||||
* removeRecord, no-cap behavior, and v1 loose schema handling.
|
||||
* (generateSpawnId is covered in history-spawn-id.test.ts)
|
||||
* (clearHistory is covered in clear-history.test.ts)
|
||||
*/
|
||||
|
|
@ -11,7 +11,7 @@
|
|||
import type { SpawnRecord } from "../history.js";
|
||||
|
||||
import { afterEach, beforeEach, describe, expect, it, spyOn } from "bun:test";
|
||||
import { existsSync, mkdirSync, readdirSync, readFileSync, rmSync, writeFileSync } from "node:fs";
|
||||
import { existsSync, mkdirSync, readFileSync, rmSync, writeFileSync } from "node:fs";
|
||||
import { join } from "node:path";
|
||||
import {
|
||||
filterHistory,
|
||||
|
|
@ -662,13 +662,13 @@ describe("history.ts coverage", () => {
|
|||
});
|
||||
});
|
||||
|
||||
// ── Trimming with archiving ───────────────────────────────────────────
|
||||
// ── No trimming — all records retained ───────────────────────────────
|
||||
|
||||
describe("trimming and archiving", () => {
|
||||
it("archives deleted records first when over limit", () => {
|
||||
// Create 99 non-deleted + 2 deleted = 101 total, should archive the 2 deleted
|
||||
describe("no history cap", () => {
|
||||
it("retains all records when over 100 entries", () => {
|
||||
// Create 100 non-deleted + 1 deleted = 101 total, all should be kept
|
||||
const records: SpawnRecord[] = [];
|
||||
for (let i = 0; i < 99; i++) {
|
||||
for (let i = 0; i < 100; i++) {
|
||||
records.push({
|
||||
id: `r-${i}`,
|
||||
agent: "claude",
|
||||
|
|
@ -695,7 +695,7 @@ describe("history.ts coverage", () => {
|
|||
}),
|
||||
);
|
||||
|
||||
// Save one more to trigger trimming
|
||||
// Save one more — no trimming should occur
|
||||
saveSpawnRecord({
|
||||
id: "new-1",
|
||||
agent: "codex",
|
||||
|
|
@ -704,15 +704,11 @@ describe("history.ts coverage", () => {
|
|||
});
|
||||
|
||||
const data = JSON.parse(readFileSync(join(testDir, "history.json"), "utf-8"));
|
||||
// Should have 100 records (99 non-deleted + 1 new)
|
||||
expect(data.records.length).toBeLessThanOrEqual(100);
|
||||
// Deleted record should be removed
|
||||
// All 102 records should be retained (101 existing + 1 new)
|
||||
expect(data.records).toHaveLength(102);
|
||||
// Deleted record should still be present
|
||||
const hasDeleted = data.records.some((r: SpawnRecord) => r.connection?.deleted);
|
||||
expect(hasDeleted).toBe(false);
|
||||
|
||||
// Archive file should exist
|
||||
const archiveFiles = readdirSync(testDir).filter((f) => f.startsWith("history-"));
|
||||
expect(archiveFiles.length).toBeGreaterThan(0);
|
||||
expect(hasDeleted).toBe(true);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,31 +1,18 @@
|
|||
import type { SpawnRecord } from "../history.js";
|
||||
|
||||
import { afterEach, beforeEach, describe, expect, it } from "bun:test";
|
||||
import { existsSync, mkdirSync, readdirSync, readFileSync, rmSync, writeFileSync } from "node:fs";
|
||||
import { existsSync, mkdirSync, rmSync, writeFileSync } from "node:fs";
|
||||
import { join } from "node:path";
|
||||
import { filterHistory, HISTORY_SCHEMA_VERSION, loadHistory, saveSpawnRecord } from "../history.js";
|
||||
import { filterHistory, loadHistory, saveSpawnRecord } from "../history.js";
|
||||
|
||||
/**
|
||||
* Tests for history trimming and boundary behavior.
|
||||
* Tests for filterHistory ordering and saveSpawnRecord behavior.
|
||||
*
|
||||
* The saveSpawnRecord function has a MAX_HISTORY_ENTRIES = 100 cap that
|
||||
* trims old entries when history grows too large. Smart trimming evicts
|
||||
* soft-deleted records first, then oldest non-deleted records. Evicted
|
||||
* records are archived to dated backup files so nothing is permanently lost.
|
||||
*
|
||||
* Also tests filterHistory ordering guarantees (reverse chronological).
|
||||
* History has no entry cap — all records are kept indefinitely.
|
||||
* These tests verify ordering guarantees and basic save/load behavior.
|
||||
*/
|
||||
|
||||
function getArchiveFiles(dir: string): string[] {
|
||||
return readdirSync(dir).filter((f) => f.startsWith("history-") && f.endsWith(".json") && f !== "history.json");
|
||||
}
|
||||
|
||||
function loadArchive(dir: string, filename: string): SpawnRecord[] {
|
||||
const raw = readFileSync(join(dir, filename), "utf-8");
|
||||
return JSON.parse(raw);
|
||||
}
|
||||
|
||||
describe("History Trimming and Boundaries", () => {
|
||||
describe("History Ordering and Save Behavior", () => {
|
||||
let testDir: string;
|
||||
let originalEnv: NodeJS.ProcessEnv;
|
||||
|
||||
|
|
@ -50,586 +37,58 @@ describe("History Trimming and Boundaries", () => {
|
|||
}
|
||||
});
|
||||
|
||||
// ── MAX_HISTORY_ENTRIES trimming ────────────────────────────────────────
|
||||
// ── saveSpawnRecord ──────────────────────────────────────────────────────
|
||||
|
||||
describe("MAX_HISTORY_ENTRIES trimming (100 entries)", () => {
|
||||
it("should keep all entries when at exactly 100", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
for (let i = 0; i < 99; i++) {
|
||||
records.push({
|
||||
describe("saveSpawnRecord", () => {
|
||||
it("should keep all entries with no cap", () => {
|
||||
// Save 200 records — all should be retained
|
||||
for (let i = 0; i < 200; i++) {
|
||||
saveSpawnRecord({
|
||||
id: `id-${i}`,
|
||||
agent: `agent-${i}`,
|
||||
cloud: `cloud-${i}`,
|
||||
timestamp: `2026-01-01T00:${String(i).padStart(2, "0")}:00.000Z`,
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
// Adding one more brings us to exactly 100
|
||||
saveSpawnRecord({
|
||||
agent: "agent-99",
|
||||
cloud: "cloud-99",
|
||||
timestamp: "2026-01-01T01:39:00.000Z",
|
||||
});
|
||||
|
||||
const loaded = loadHistory();
|
||||
expect(loaded).toHaveLength(100);
|
||||
// First entry should still be agent-0 (nothing trimmed)
|
||||
expect(loaded[0].agent).toBe("agent-0");
|
||||
expect(loaded[99].agent).toBe("agent-99");
|
||||
// No archive should be created
|
||||
expect(getArchiveFiles(testDir)).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("should trim to 100 when adding entry that exceeds the limit", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
for (let i = 0; i < 100; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: `cloud-${i}`,
|
||||
timestamp: `2026-01-01T00:${String(i).padStart(2, "0")}:00.000Z`,
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
// Adding 101st entry should trigger trimming
|
||||
saveSpawnRecord({
|
||||
agent: "agent-100",
|
||||
cloud: "cloud-100",
|
||||
timestamp: "2026-01-02T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const loaded = loadHistory();
|
||||
expect(loaded).toHaveLength(100);
|
||||
// The oldest entry (agent-0) should be trimmed
|
||||
expect(loaded[0].agent).toBe("agent-1");
|
||||
// The newest entry should be the one we just added
|
||||
expect(loaded[99].agent).toBe("agent-100");
|
||||
// Archive should contain the trimmed record
|
||||
const archives = getArchiveFiles(testDir);
|
||||
expect(archives).toHaveLength(1);
|
||||
const archived = loadArchive(testDir, archives[0]);
|
||||
expect(archived).toHaveLength(1);
|
||||
expect(archived[0].agent).toBe("agent-0");
|
||||
});
|
||||
|
||||
it("should trim correctly when history is well over the limit", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
for (let i = 0; i < 150; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: `cloud-${i}`,
|
||||
timestamp: `2026-01-${String(Math.floor(i / 24) + 1).padStart(2, "0")}T${String(i % 24).padStart(2, "0")}:00:00.000Z`,
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
// Adding another entry to 150 existing entries
|
||||
saveSpawnRecord({
|
||||
agent: "agent-150",
|
||||
cloud: "cloud-150",
|
||||
timestamp: "2026-01-10T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const loaded = loadHistory();
|
||||
expect(loaded).toHaveLength(100);
|
||||
// Should keep the most recent 100 entries: agent-51 through agent-150
|
||||
expect(loaded[0].agent).toBe("agent-51");
|
||||
expect(loaded[99].agent).toBe("agent-150");
|
||||
// Archive should contain 51 trimmed records (agent-0 through agent-50)
|
||||
const archives = getArchiveFiles(testDir);
|
||||
expect(archives).toHaveLength(1);
|
||||
const archived = loadArchive(testDir, archives[0]);
|
||||
expect(archived).toHaveLength(51);
|
||||
});
|
||||
|
||||
it("should not trim when history has fewer than 100 entries", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
for (let i = 0; i < 50; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: `cloud-${i}`,
|
||||
timestamp: `2026-01-01T00:${String(i).padStart(2, "0")}:00.000Z`,
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
saveSpawnRecord({
|
||||
agent: "agent-50",
|
||||
cloud: "cloud-50",
|
||||
timestamp: "2026-01-01T00:50:00.000Z",
|
||||
});
|
||||
|
||||
const loaded = loadHistory();
|
||||
expect(loaded).toHaveLength(51);
|
||||
expect(loaded[0].agent).toBe("agent-0");
|
||||
expect(loaded[50].agent).toBe("agent-50");
|
||||
// No archive when under the limit
|
||||
expect(getArchiveFiles(testDir)).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("should preserve prompt fields through trimming", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
for (let i = 0; i < 100; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: `cloud-${i}`,
|
||||
timestamp: `2026-01-01T00:${String(i).padStart(2, "0")}:00.000Z`,
|
||||
...(i >= 90
|
||||
? {
|
||||
prompt: `Prompt for agent-${i}`,
|
||||
}
|
||||
: {}),
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
saveSpawnRecord({
|
||||
agent: "agent-100",
|
||||
cloud: "cloud-100",
|
||||
timestamp: "2026-01-02T00:00:00.000Z",
|
||||
prompt: "Final prompt",
|
||||
});
|
||||
|
||||
const loaded = loadHistory();
|
||||
expect(loaded).toHaveLength(100);
|
||||
// Check that prompts survive trimming for remaining entries
|
||||
const withPrompts = loaded.filter((r) => r.prompt);
|
||||
expect(withPrompts.length).toBe(11); // agents 90-99 + agent-100
|
||||
expect(withPrompts[withPrompts.length - 1].prompt).toBe("Final prompt");
|
||||
});
|
||||
|
||||
it("should handle sequential saves that cross the limit", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
for (let i = 0; i < 98; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: `cloud-${i}`,
|
||||
timestamp: "2026-01-01T00:00:00.000Z",
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
// Save 3 more (98 + 3 = 101, triggers trim at 101)
|
||||
saveSpawnRecord({
|
||||
agent: "new-98",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-02-01T00:00:00.000Z",
|
||||
});
|
||||
saveSpawnRecord({
|
||||
agent: "new-99",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-02-02T00:00:00.000Z",
|
||||
});
|
||||
saveSpawnRecord({
|
||||
agent: "new-100",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-02-03T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const loaded = loadHistory();
|
||||
expect(loaded).toHaveLength(100);
|
||||
// The newest entry should be last
|
||||
expect(loaded[loaded.length - 1].agent).toBe("new-100");
|
||||
expect(loaded[loaded.length - 2].agent).toBe("new-99");
|
||||
expect(loaded[loaded.length - 3].agent).toBe("new-98");
|
||||
// agent-0 should be trimmed since we went from 98 to 101
|
||||
expect(loaded[0].agent).toBe("agent-1");
|
||||
});
|
||||
});
|
||||
|
||||
// ── Smart trimming: deleted records evicted first ──────────────────────
|
||||
|
||||
describe("smart trimming — deleted records evicted first", () => {
|
||||
it("should evict deleted records before non-deleted when over limit", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
// 80 non-deleted records
|
||||
for (let i = 0; i < 80; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: `2026-01-01T00:${String(i).padStart(2, "0")}:00.000Z`,
|
||||
});
|
||||
}
|
||||
// 20 deleted records (mixed throughout)
|
||||
for (let i = 0; i < 20; i++) {
|
||||
records.push({
|
||||
agent: `deleted-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: `2026-01-02T00:${String(i).padStart(2, "0")}:00.000Z`,
|
||||
connection: {
|
||||
ip: "1.2.3.4",
|
||||
user: "root",
|
||||
deleted: true,
|
||||
deleted_at: "2026-01-03T00:00:00.000Z",
|
||||
},
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
// Adding 101st entry (100 existing + 1 new)
|
||||
saveSpawnRecord({
|
||||
agent: "new-entry",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-04T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const loaded = loadHistory();
|
||||
// 80 non-deleted + 1 new = 81 total (under limit after removing 20 deleted)
|
||||
expect(loaded).toHaveLength(81);
|
||||
// All non-deleted records should be preserved
|
||||
expect(loaded[0].agent).toBe("agent-0");
|
||||
expect(loaded[79].agent).toBe("agent-79");
|
||||
expect(loaded[80].agent).toBe("new-entry");
|
||||
// No deleted records should remain
|
||||
expect(loaded.filter((r) => r.connection?.deleted)).toHaveLength(0);
|
||||
// Archive should contain the 20 deleted records
|
||||
const archives = getArchiveFiles(testDir);
|
||||
expect(archives).toHaveLength(1);
|
||||
const archived = loadArchive(testDir, archives[0]);
|
||||
expect(archived).toHaveLength(20);
|
||||
expect(archived.every((r) => r.agent.startsWith("deleted-"))).toBe(true);
|
||||
});
|
||||
|
||||
it("should trim oldest non-deleted when still over limit after removing deleted", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
// 98 non-deleted records
|
||||
for (let i = 0; i < 98; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: `2026-01-01T00:${String(i).padStart(2, "0")}:00.000Z`,
|
||||
});
|
||||
}
|
||||
// 2 deleted records
|
||||
for (let i = 0; i < 2; i++) {
|
||||
records.push({
|
||||
agent: `deleted-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-02T00:00:00.000Z",
|
||||
connection: {
|
||||
ip: "1.2.3.4",
|
||||
user: "root",
|
||||
deleted: true,
|
||||
deleted_at: "2026-01-03T00:00:00.000Z",
|
||||
},
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
// Adding one more: 101 total, only 2 deleted — removing them gives 99, still need to check 99 <= 100 which is fine
|
||||
// Wait, 98 non-deleted + 1 new = 99 non-deleted. 99 <= 100. So only deleted are archived.
|
||||
saveSpawnRecord({
|
||||
agent: "new-entry",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-04T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const loaded = loadHistory();
|
||||
// 98 + 1 new = 99 non-deleted (under limit)
|
||||
expect(loaded).toHaveLength(99);
|
||||
expect(loaded[0].agent).toBe("agent-0");
|
||||
expect(loaded[98].agent).toBe("new-entry");
|
||||
|
||||
// Archive has the 2 deleted
|
||||
const archives = getArchiveFiles(testDir);
|
||||
expect(archives).toHaveLength(1);
|
||||
const archived = loadArchive(testDir, archives[0]);
|
||||
expect(archived).toHaveLength(2);
|
||||
});
|
||||
|
||||
it("should trim oldest non-deleted records when 0 deleted and over limit", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
for (let i = 0; i < 100; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: `2026-01-01T00:${String(i).padStart(2, "0")}:00.000Z`,
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
saveSpawnRecord({
|
||||
agent: "new-entry",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-04T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const loaded = loadHistory();
|
||||
expect(loaded).toHaveLength(100);
|
||||
// Oldest should be trimmed
|
||||
expect(loaded[0].agent).toBe("agent-1");
|
||||
expect(loaded[99].agent).toBe("new-entry");
|
||||
// Archive should have the overflow record
|
||||
const archives = getArchiveFiles(testDir);
|
||||
expect(archives).toHaveLength(1);
|
||||
const archived = loadArchive(testDir, archives[0]);
|
||||
expect(archived).toHaveLength(1);
|
||||
expect(archived[0].agent).toBe("agent-0");
|
||||
});
|
||||
|
||||
it("should handle deleted records mixed throughout history order", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
// Create 100 records where every 5th is deleted
|
||||
for (let i = 0; i < 100; i++) {
|
||||
const isDeleted = i % 5 === 0;
|
||||
const record: SpawnRecord = {
|
||||
agent: `agent-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: `2026-01-01T${String(Math.floor(i / 60)).padStart(2, "0")}:${String(i % 60).padStart(2, "0")}:00.000Z`,
|
||||
};
|
||||
if (isDeleted) {
|
||||
record.connection = {
|
||||
ip: "1.2.3.4",
|
||||
user: "root",
|
||||
deleted: true,
|
||||
deleted_at: "2026-01-03T00:00:00.000Z",
|
||||
};
|
||||
}
|
||||
records.push(record);
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
// 100 records (20 deleted, 80 non-deleted) + 1 new = 101
|
||||
saveSpawnRecord({
|
||||
agent: "new-entry",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-04T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const loaded = loadHistory();
|
||||
// 80 non-deleted + 1 new = 81 (under limit)
|
||||
expect(loaded).toHaveLength(81);
|
||||
// No deleted records
|
||||
expect(loaded.filter((r) => r.connection?.deleted)).toHaveLength(0);
|
||||
// All non-deleted originals preserved in order
|
||||
const nonDeletedOriginals = records.filter((r) => !r.connection?.deleted);
|
||||
for (let i = 0; i < nonDeletedOriginals.length; i++) {
|
||||
expect(loaded[i].agent).toBe(nonDeletedOriginals[i].agent);
|
||||
}
|
||||
expect(loaded[80].agent).toBe("new-entry");
|
||||
});
|
||||
|
||||
it("should archive both deleted and overflow when still over limit", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
// 99 non-deleted
|
||||
for (let i = 0; i < 99; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: "cloud",
|
||||
cloud: "hetzner",
|
||||
timestamp: `2026-01-01T${String(Math.floor(i / 60)).padStart(2, "0")}:${String(i % 60).padStart(2, "0")}:00.000Z`,
|
||||
});
|
||||
}
|
||||
// 5 deleted
|
||||
for (let i = 0; i < 5; i++) {
|
||||
records.push({
|
||||
agent: `deleted-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-02T00:00:00.000Z",
|
||||
connection: {
|
||||
ip: "1.2.3.4",
|
||||
user: "root",
|
||||
deleted: true,
|
||||
deleted_at: "2026-01-03T00:00:00.000Z",
|
||||
},
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
// 104 existing + 1 new = 105. Remove 5 deleted = 100 non-deleted. 100 <= 100, fits.
|
||||
saveSpawnRecord({
|
||||
agent: "new-entry",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-04T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const loaded = loadHistory();
|
||||
expect(loaded).toHaveLength(100);
|
||||
expect(loaded).toHaveLength(200);
|
||||
expect(loaded[0].agent).toBe("agent-0");
|
||||
expect(loaded[99].agent).toBe("new-entry");
|
||||
// Archive should have 5 deleted
|
||||
const archives = getArchiveFiles(testDir);
|
||||
expect(archives).toHaveLength(1);
|
||||
const archived = loadArchive(testDir, archives[0]);
|
||||
expect(archived).toHaveLength(5);
|
||||
expect(archived.every((r) => r.agent.startsWith("deleted-"))).toBe(true);
|
||||
expect(loaded[199].agent).toBe("agent-199");
|
||||
});
|
||||
|
||||
it("should archive deleted + oldest overflow when both need trimming", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
// 102 non-deleted
|
||||
for (let i = 0; i < 102; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: `2026-01-01T${String(Math.floor(i / 60)).padStart(2, "0")}:${String(i % 60).padStart(2, "0")}:00.000Z`,
|
||||
});
|
||||
}
|
||||
// 3 deleted
|
||||
for (let i = 0; i < 3; i++) {
|
||||
records.push({
|
||||
agent: `deleted-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-02T00:00:00.000Z",
|
||||
connection: {
|
||||
ip: "1.2.3.4",
|
||||
user: "root",
|
||||
deleted: true,
|
||||
deleted_at: "2026-01-03T00:00:00.000Z",
|
||||
},
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
// 105 existing + 1 new = 106. Remove 3 deleted = 103 non-deleted. 103 > 100 → trim 3 oldest.
|
||||
it("should assign id when missing", () => {
|
||||
saveSpawnRecord({
|
||||
agent: "new-entry",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-04T00:00:00.000Z",
|
||||
id: "",
|
||||
agent: "claude",
|
||||
cloud: "sprite",
|
||||
timestamp: "2026-01-01T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const loaded = loadHistory();
|
||||
expect(loaded).toHaveLength(100);
|
||||
// Oldest 3 non-deleted should be trimmed
|
||||
expect(loaded[0].agent).toBe("agent-3");
|
||||
expect(loaded[99].agent).toBe("new-entry");
|
||||
// Archive should have 3 deleted + 3 overflow = 6
|
||||
const archives = getArchiveFiles(testDir);
|
||||
expect(archives).toHaveLength(1);
|
||||
const archived = loadArchive(testDir, archives[0]);
|
||||
expect(archived).toHaveLength(6);
|
||||
expect(loaded).toHaveLength(1);
|
||||
expect(typeof loaded[0].id).toBe("string");
|
||||
expect(loaded[0].id.length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ── Archive file behavior ─────────────────────────────────────────────
|
||||
|
||||
describe("archive file behavior", () => {
|
||||
it("should append to existing archive file from same day", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
for (let i = 0; i < 100; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-01T00:00:00.000Z",
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
// First trim
|
||||
saveSpawnRecord({
|
||||
agent: "first-new",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-02T00:00:00.000Z",
|
||||
});
|
||||
|
||||
// Second trim (now history has agent-1 through first-new, 100 entries)
|
||||
saveSpawnRecord({
|
||||
agent: "second-new",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-03T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const archives = getArchiveFiles(testDir);
|
||||
expect(archives).toHaveLength(1);
|
||||
// Both trims should append to same archive file
|
||||
const archived = loadArchive(testDir, archives[0]);
|
||||
expect(archived).toHaveLength(2);
|
||||
expect(archived[0].agent).toBe("agent-0");
|
||||
expect(archived[1].agent).toBe("agent-1");
|
||||
});
|
||||
|
||||
it("should create archive with correct date format in name", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
for (let i = 0; i < 100; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-01T00:00:00.000Z",
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
saveSpawnRecord({
|
||||
agent: "new-entry",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-02T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const archives = getArchiveFiles(testDir);
|
||||
expect(archives).toHaveLength(1);
|
||||
// Should match YYYY-MM-DD pattern
|
||||
expect(archives[0]).toMatch(/^history-\d{4}-\d{2}-\d{2}\.json$/);
|
||||
});
|
||||
|
||||
it("should write valid pretty-printed JSON to archive", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
for (let i = 0; i < 100; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-01T00:00:00.000Z",
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
saveSpawnRecord({
|
||||
agent: "new-entry",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-02T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const archives = getArchiveFiles(testDir);
|
||||
const raw = readFileSync(join(testDir, archives[0]), "utf-8");
|
||||
expect(() => JSON.parse(raw)).not.toThrow();
|
||||
expect(raw).toContain(" "); // pretty-printed
|
||||
expect(raw.endsWith("\n")).toBe(true); // trailing newline
|
||||
});
|
||||
|
||||
it("should still save record even if archive write fails gracefully", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
for (let i = 0; i < 100; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-01T00:00:00.000Z",
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
// Pre-create a directory with the archive name to cause write to fail
|
||||
const date = new Date().toISOString().slice(0, 10);
|
||||
mkdirSync(join(testDir, `history-${date}.json`), {
|
||||
recursive: true,
|
||||
});
|
||||
|
||||
// Save should still work even though archive write fails
|
||||
saveSpawnRecord({
|
||||
agent: "new-entry",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-02T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const loaded = loadHistory();
|
||||
expect(loaded).toHaveLength(100);
|
||||
expect(loaded[99].agent).toBe("new-entry");
|
||||
});
|
||||
});
|
||||
|
||||
// ── filterHistory reverse chronological ordering ────────────────────────
|
||||
// ── filterHistory ordering guarantees ────────────────────────────────────
|
||||
|
||||
describe("filterHistory ordering guarantees", () => {
|
||||
it("should return records in reverse chronological order (newest first)", () => {
|
||||
const records: SpawnRecord[] = [
|
||||
{
|
||||
id: "r1",
|
||||
agent: "claude",
|
||||
cloud: "sprite",
|
||||
timestamp: "2026-01-01T00:00:00.000Z",
|
||||
},
|
||||
{
|
||||
id: "r2",
|
||||
agent: "codex",
|
||||
cloud: "hetzner",
|
||||
timestamp: "2026-01-02T00:00:00.000Z",
|
||||
},
|
||||
{
|
||||
id: "r3",
|
||||
agent: "claude",
|
||||
cloud: "hetzner",
|
||||
timestamp: "2026-01-03T00:00:00.000Z",
|
||||
|
|
@ -639,7 +98,6 @@ describe("History Trimming and Boundaries", () => {
|
|||
|
||||
const result = filterHistory();
|
||||
expect(result).toHaveLength(3);
|
||||
// Newest should be first (reverse of file order)
|
||||
expect(result[0].timestamp).toBe("2026-01-03T00:00:00.000Z");
|
||||
expect(result[1].timestamp).toBe("2026-01-02T00:00:00.000Z");
|
||||
expect(result[2].timestamp).toBe("2026-01-01T00:00:00.000Z");
|
||||
|
|
@ -648,21 +106,25 @@ describe("History Trimming and Boundaries", () => {
|
|||
it("should maintain reverse order after filtering by agent", () => {
|
||||
const records: SpawnRecord[] = [
|
||||
{
|
||||
id: "r1",
|
||||
agent: "claude",
|
||||
cloud: "sprite",
|
||||
timestamp: "2026-01-01T00:00:00.000Z",
|
||||
},
|
||||
{
|
||||
id: "r2",
|
||||
agent: "codex",
|
||||
cloud: "hetzner",
|
||||
timestamp: "2026-01-02T00:00:00.000Z",
|
||||
},
|
||||
{
|
||||
id: "r3",
|
||||
agent: "claude",
|
||||
cloud: "hetzner",
|
||||
timestamp: "2026-01-03T00:00:00.000Z",
|
||||
},
|
||||
{
|
||||
id: "r4",
|
||||
agent: "codex",
|
||||
cloud: "sprite",
|
||||
timestamp: "2026-01-04T00:00:00.000Z",
|
||||
|
|
@ -679,16 +141,19 @@ describe("History Trimming and Boundaries", () => {
|
|||
it("should maintain reverse order after filtering by cloud", () => {
|
||||
const records: SpawnRecord[] = [
|
||||
{
|
||||
id: "r1",
|
||||
agent: "claude",
|
||||
cloud: "sprite",
|
||||
timestamp: "2026-01-01T00:00:00.000Z",
|
||||
},
|
||||
{
|
||||
id: "r2",
|
||||
agent: "codex",
|
||||
cloud: "hetzner",
|
||||
timestamp: "2026-01-02T00:00:00.000Z",
|
||||
},
|
||||
{
|
||||
id: "r3",
|
||||
agent: "claude",
|
||||
cloud: "sprite",
|
||||
timestamp: "2026-01-03T00:00:00.000Z",
|
||||
|
|
@ -705,21 +170,25 @@ describe("History Trimming and Boundaries", () => {
|
|||
it("should maintain reverse order after filtering by both agent and cloud", () => {
|
||||
const records: SpawnRecord[] = [
|
||||
{
|
||||
id: "r1",
|
||||
agent: "claude",
|
||||
cloud: "sprite",
|
||||
timestamp: "2026-01-01T00:00:00.000Z",
|
||||
},
|
||||
{
|
||||
id: "r2",
|
||||
agent: "claude",
|
||||
cloud: "hetzner",
|
||||
timestamp: "2026-01-02T00:00:00.000Z",
|
||||
},
|
||||
{
|
||||
id: "r3",
|
||||
agent: "codex",
|
||||
cloud: "sprite",
|
||||
timestamp: "2026-01-03T00:00:00.000Z",
|
||||
},
|
||||
{
|
||||
id: "r4",
|
||||
agent: "claude",
|
||||
cloud: "sprite",
|
||||
timestamp: "2026-01-04T00:00:00.000Z",
|
||||
|
|
@ -736,6 +205,7 @@ describe("History Trimming and Boundaries", () => {
|
|||
it("should return single-element array unchanged for one matching record", () => {
|
||||
const records: SpawnRecord[] = [
|
||||
{
|
||||
id: "r1",
|
||||
agent: "claude",
|
||||
cloud: "sprite",
|
||||
timestamp: "2026-01-01T00:00:00.000Z",
|
||||
|
|
@ -748,161 +218,4 @@ describe("History Trimming and Boundaries", () => {
|
|||
expect(result[0].agent).toBe("claude");
|
||||
});
|
||||
});
|
||||
|
||||
// ── Boundary: empty and single-entry history ────────────────────────────
|
||||
|
||||
describe("boundary conditions", () => {
|
||||
it("should handle saving to empty history", () => {
|
||||
saveSpawnRecord({
|
||||
agent: "claude",
|
||||
cloud: "sprite",
|
||||
timestamp: "2026-01-01T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const loaded = loadHistory();
|
||||
expect(loaded).toHaveLength(1);
|
||||
expect(loaded[0].agent).toBe("claude");
|
||||
});
|
||||
|
||||
it("should handle saving when history file does not exist yet", () => {
|
||||
// testDir exists but history.json does not
|
||||
expect(existsSync(join(testDir, "history.json"))).toBe(false);
|
||||
|
||||
saveSpawnRecord({
|
||||
agent: "claude",
|
||||
cloud: "sprite",
|
||||
timestamp: "2026-01-01T00:00:00.000Z",
|
||||
});
|
||||
|
||||
expect(existsSync(join(testDir, "history.json"))).toBe(true);
|
||||
const loaded = loadHistory();
|
||||
expect(loaded).toHaveLength(1);
|
||||
});
|
||||
|
||||
it("should handle saving when SPAWN_HOME directory does not exist", () => {
|
||||
const deepDir = join(testDir, "deep", "nested", "path");
|
||||
process.env.SPAWN_HOME = deepDir;
|
||||
expect(existsSync(deepDir)).toBe(false);
|
||||
|
||||
saveSpawnRecord({
|
||||
agent: "claude",
|
||||
cloud: "sprite",
|
||||
timestamp: "2026-01-01T00:00:00.000Z",
|
||||
});
|
||||
|
||||
expect(existsSync(deepDir)).toBe(true);
|
||||
const loaded = loadHistory();
|
||||
expect(loaded).toHaveLength(1);
|
||||
});
|
||||
|
||||
it("should filter correctly on empty history", () => {
|
||||
expect(filterHistory("claude")).toEqual([]);
|
||||
expect(filterHistory(undefined, "sprite")).toEqual([]);
|
||||
expect(filterHistory("claude", "sprite")).toEqual([]);
|
||||
});
|
||||
|
||||
it("should handle loading history with extra unexpected fields gracefully", () => {
|
||||
const records = [
|
||||
{
|
||||
agent: "claude",
|
||||
cloud: "sprite",
|
||||
timestamp: "2026-01-01T00:00:00.000Z",
|
||||
extra_field: "should not break",
|
||||
nested: {
|
||||
foo: "bar",
|
||||
},
|
||||
},
|
||||
];
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
const loaded = loadHistory();
|
||||
expect(loaded).toHaveLength(1);
|
||||
expect(loaded[0].agent).toBe("claude");
|
||||
expect(loaded[0].cloud).toBe("sprite");
|
||||
});
|
||||
|
||||
it("should handle history file containing empty array", () => {
|
||||
writeFileSync(join(testDir, "history.json"), "[]");
|
||||
const loaded = loadHistory();
|
||||
expect(loaded).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
// ── Trimming preserves file format ──────────────────────────────────────
|
||||
|
||||
describe("file format after trimming", () => {
|
||||
it("should write valid JSON after trimming", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
for (let i = 0; i < 100; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: `cloud-${i}`,
|
||||
timestamp: "2026-01-01T00:00:00.000Z",
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
saveSpawnRecord({
|
||||
agent: "agent-100",
|
||||
cloud: "cloud-100",
|
||||
timestamp: "2026-01-02T00:00:00.000Z",
|
||||
});
|
||||
|
||||
// Read raw file and verify it's valid v1 JSON
|
||||
const raw = readFileSync(join(testDir, "history.json"), "utf-8");
|
||||
expect(() => JSON.parse(raw)).not.toThrow();
|
||||
const parsed = JSON.parse(raw);
|
||||
expect(parsed.version).toBe(HISTORY_SCHEMA_VERSION);
|
||||
expect(Array.isArray(parsed.records)).toBe(true);
|
||||
expect(parsed.records).toHaveLength(100);
|
||||
});
|
||||
|
||||
it("should write pretty-printed JSON with trailing newline after trimming", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
for (let i = 0; i < 100; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: `cloud-${i}`,
|
||||
timestamp: "2026-01-01T00:00:00.000Z",
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
saveSpawnRecord({
|
||||
agent: "agent-100",
|
||||
cloud: "cloud-100",
|
||||
timestamp: "2026-01-02T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const raw = readFileSync(join(testDir, "history.json"), "utf-8");
|
||||
// Pretty-printed JSON has indentation
|
||||
expect(raw).toContain(" ");
|
||||
// Trailing newline
|
||||
expect(raw.endsWith("\n")).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
// ── Race-like sequential saves near the boundary ────────────────────────
|
||||
|
||||
describe("sequential saves at the boundary", () => {
|
||||
// NOTE: "99 to 100" and "100 to 101" boundary tests were removed as duplicates
|
||||
// of "should keep all entries when at exactly 100" and "should trim to 100 when
|
||||
// adding entry that exceeds the limit" in the MAX_HISTORY_ENTRIES section above.
|
||||
|
||||
it("should handle rapid sequential saves that build up from zero", () => {
|
||||
for (let i = 0; i < 105; i++) {
|
||||
saveSpawnRecord({
|
||||
agent: `agent-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: `2026-01-01T${String(Math.floor(i / 60)).padStart(2, "0")}:${String(i % 60).padStart(2, "0")}:00.000Z`,
|
||||
});
|
||||
}
|
||||
|
||||
const loaded = loadHistory();
|
||||
expect(loaded).toHaveLength(100);
|
||||
// Should have the most recent 100 entries: agent-5 through agent-104
|
||||
expect(loaded[0].agent).toBe("agent-5");
|
||||
expect(loaded[99].agent).toBe("agent-104");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -406,40 +406,6 @@ export function loadHistory(): SpawnRecord[] {
|
|||
return recoverFromArchives();
|
||||
}
|
||||
|
||||
const MAX_HISTORY_ENTRIES = 100;
|
||||
|
||||
/** Read existing records from an archive file, returning [] if missing or corrupted. */
|
||||
function readExistingArchive(archivePath: string): SpawnRecord[] {
|
||||
if (!existsSync(archivePath)) {
|
||||
return [];
|
||||
}
|
||||
const result = tryCatch((): unknown => JSON.parse(readFileSync(archivePath, "utf-8")));
|
||||
if (result.ok && Array.isArray(result.data)) {
|
||||
return result.data.filter((el) => v.safeParse(SpawnRecordSchema, el).success);
|
||||
}
|
||||
// Corrupted archive — overwrite
|
||||
return [];
|
||||
}
|
||||
|
||||
/** Archive evicted records to a dated backup file so nothing is permanently lost. */
|
||||
function archiveRecords(records: SpawnRecord[]): void {
|
||||
if (records.length === 0) {
|
||||
return;
|
||||
}
|
||||
// Non-fatal — archive failure should not block saving
|
||||
tryCatchIf(isFileError, () => {
|
||||
const dir = getSpawnDir();
|
||||
const date = new Date().toISOString().slice(0, 10);
|
||||
const archivePath = join(dir, `history-${date}.json`);
|
||||
const existing = readExistingArchive(archivePath);
|
||||
const merged = [
|
||||
...existing,
|
||||
...records,
|
||||
];
|
||||
atomicWriteJson(archivePath, merged);
|
||||
});
|
||||
}
|
||||
|
||||
export function saveSpawnRecord(record: SpawnRecord): void {
|
||||
const dir = getSpawnDir();
|
||||
if (!existsSync(dir)) {
|
||||
|
|
@ -454,33 +420,8 @@ export function saveSpawnRecord(record: SpawnRecord): void {
|
|||
}
|
||||
|
||||
withHistoryLock(() => {
|
||||
let history = loadHistory();
|
||||
const history = loadHistory();
|
||||
history.push(record);
|
||||
// Smart trim: evict deleted records first, then oldest, and archive evicted
|
||||
if (history.length > MAX_HISTORY_ENTRIES) {
|
||||
const nonDeleted: SpawnRecord[] = [];
|
||||
const deleted: SpawnRecord[] = [];
|
||||
for (const r of history) {
|
||||
if (r.connection?.deleted) {
|
||||
deleted.push(r);
|
||||
} else {
|
||||
nonDeleted.push(r);
|
||||
}
|
||||
}
|
||||
if (nonDeleted.length <= MAX_HISTORY_ENTRIES) {
|
||||
// Removing deleted records is enough
|
||||
history = nonDeleted;
|
||||
archiveRecords(deleted);
|
||||
} else {
|
||||
// Still over limit — trim oldest non-deleted records too
|
||||
const overflow = nonDeleted.slice(0, nonDeleted.length - MAX_HISTORY_ENTRIES);
|
||||
history = nonDeleted.slice(nonDeleted.length - MAX_HISTORY_ENTRIES);
|
||||
archiveRecords([
|
||||
...deleted,
|
||||
...overflow,
|
||||
]);
|
||||
}
|
||||
}
|
||||
writeHistory(history);
|
||||
});
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue