mirror of
https://github.com/OpenRouterTeam/spawn.git
synced 2026-05-01 21:30:21 +00:00
fix(history): smart trimming evicts deleted records first, archives overflow (#2168)
* fix(history): smart trimming evicts deleted records first, archives overflow When history exceeds 100 entries, deleted records (useless for `spawn ls`) are now evicted first. If still over the limit, oldest non-deleted records are also trimmed. All evicted records are archived to dated backup files (history-YYYY-MM-DD.json) so nothing is permanently lost. Previously, blind .slice() could silently discard records with active connections that `spawn ls` depends on. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com> * style: fix biome formatting issues Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com> --------- Co-authored-by: lab <6723574+louisgv@users.noreply.github.com> Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
47ba11aa50
commit
7c516ac887
3 changed files with 461 additions and 7 deletions
|
|
@ -1,5 +1,5 @@
|
|||
import { describe, it, expect, beforeEach, afterEach } from "bun:test";
|
||||
import { existsSync, mkdirSync, rmSync, writeFileSync, readFileSync } from "node:fs";
|
||||
import { existsSync, mkdirSync, rmSync, writeFileSync, readFileSync, readdirSync } from "node:fs";
|
||||
import { join } from "node:path";
|
||||
import { homedir } from "node:os";
|
||||
import type { SpawnRecord } from "../history.js";
|
||||
|
|
@ -9,13 +9,22 @@ import { loadHistory, saveSpawnRecord, filterHistory } from "../history.js";
|
|||
* Tests for history trimming and boundary behavior.
|
||||
*
|
||||
* The saveSpawnRecord function has a MAX_HISTORY_ENTRIES = 100 cap that
|
||||
* trims old entries when history grows too large. This prevents unbounded
|
||||
* growth of the history.json file but must correctly preserve the most
|
||||
* recent entries and not lose data prematurely.
|
||||
* trims old entries when history grows too large. Smart trimming evicts
|
||||
* soft-deleted records first, then oldest non-deleted records. Evicted
|
||||
* records are archived to dated backup files so nothing is permanently lost.
|
||||
*
|
||||
* Also tests filterHistory ordering guarantees (reverse chronological).
|
||||
*/
|
||||
|
||||
function getArchiveFiles(dir: string): string[] {
|
||||
return readdirSync(dir).filter((f) => f.startsWith("history-") && f.endsWith(".json") && f !== "history.json");
|
||||
}
|
||||
|
||||
function loadArchive(dir: string, filename: string): SpawnRecord[] {
|
||||
const raw = readFileSync(join(dir, filename), "utf-8");
|
||||
return JSON.parse(raw);
|
||||
}
|
||||
|
||||
describe("History Trimming and Boundaries", () => {
|
||||
let testDir: string;
|
||||
let originalEnv: NodeJS.ProcessEnv;
|
||||
|
|
@ -67,6 +76,8 @@ describe("History Trimming and Boundaries", () => {
|
|||
// First entry should still be agent-0 (nothing trimmed)
|
||||
expect(loaded[0].agent).toBe("agent-0");
|
||||
expect(loaded[99].agent).toBe("agent-99");
|
||||
// No archive should be created
|
||||
expect(getArchiveFiles(testDir)).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("should trim to 100 when adding entry that exceeds the limit", () => {
|
||||
|
|
@ -93,6 +104,12 @@ describe("History Trimming and Boundaries", () => {
|
|||
expect(loaded[0].agent).toBe("agent-1");
|
||||
// The newest entry should be the one we just added
|
||||
expect(loaded[99].agent).toBe("agent-100");
|
||||
// Archive should contain the trimmed record
|
||||
const archives = getArchiveFiles(testDir);
|
||||
expect(archives).toHaveLength(1);
|
||||
const archived = loadArchive(testDir, archives[0]);
|
||||
expect(archived).toHaveLength(1);
|
||||
expect(archived[0].agent).toBe("agent-0");
|
||||
});
|
||||
|
||||
it("should trim correctly when history is well over the limit", () => {
|
||||
|
|
@ -118,6 +135,11 @@ describe("History Trimming and Boundaries", () => {
|
|||
// Should keep the most recent 100 entries: agent-51 through agent-150
|
||||
expect(loaded[0].agent).toBe("agent-51");
|
||||
expect(loaded[99].agent).toBe("agent-150");
|
||||
// Archive should contain 51 trimmed records (agent-0 through agent-50)
|
||||
const archives = getArchiveFiles(testDir);
|
||||
expect(archives).toHaveLength(1);
|
||||
const archived = loadArchive(testDir, archives[0]);
|
||||
expect(archived).toHaveLength(51);
|
||||
});
|
||||
|
||||
it("should not trim when history has fewer than 100 entries", () => {
|
||||
|
|
@ -141,6 +163,8 @@ describe("History Trimming and Boundaries", () => {
|
|||
expect(loaded).toHaveLength(51);
|
||||
expect(loaded[0].agent).toBe("agent-0");
|
||||
expect(loaded[50].agent).toBe("agent-50");
|
||||
// No archive when under the limit
|
||||
expect(getArchiveFiles(testDir)).toHaveLength(0);
|
||||
});
|
||||
|
||||
it("should preserve prompt fields through trimming", () => {
|
||||
|
|
@ -213,6 +237,383 @@ describe("History Trimming and Boundaries", () => {
|
|||
});
|
||||
});
|
||||
|
||||
// ── Smart trimming: deleted records evicted first ──────────────────────
|
||||
|
||||
describe("smart trimming — deleted records evicted first", () => {
|
||||
it("should evict deleted records before non-deleted when over limit", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
// 80 non-deleted records
|
||||
for (let i = 0; i < 80; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: `2026-01-01T00:${String(i).padStart(2, "0")}:00.000Z`,
|
||||
});
|
||||
}
|
||||
// 20 deleted records (mixed throughout)
|
||||
for (let i = 0; i < 20; i++) {
|
||||
records.push({
|
||||
agent: `deleted-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: `2026-01-02T00:${String(i).padStart(2, "0")}:00.000Z`,
|
||||
connection: {
|
||||
ip: "1.2.3.4",
|
||||
user: "root",
|
||||
deleted: true,
|
||||
deleted_at: "2026-01-03T00:00:00.000Z",
|
||||
},
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
// Adding 101st entry (100 existing + 1 new)
|
||||
saveSpawnRecord({
|
||||
agent: "new-entry",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-04T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const loaded = loadHistory();
|
||||
// 80 non-deleted + 1 new = 81 total (under limit after removing 20 deleted)
|
||||
expect(loaded).toHaveLength(81);
|
||||
// All non-deleted records should be preserved
|
||||
expect(loaded[0].agent).toBe("agent-0");
|
||||
expect(loaded[79].agent).toBe("agent-79");
|
||||
expect(loaded[80].agent).toBe("new-entry");
|
||||
// No deleted records should remain
|
||||
expect(loaded.filter((r) => r.connection?.deleted)).toHaveLength(0);
|
||||
// Archive should contain the 20 deleted records
|
||||
const archives = getArchiveFiles(testDir);
|
||||
expect(archives).toHaveLength(1);
|
||||
const archived = loadArchive(testDir, archives[0]);
|
||||
expect(archived).toHaveLength(20);
|
||||
expect(archived.every((r) => r.agent.startsWith("deleted-"))).toBe(true);
|
||||
});
|
||||
|
||||
it("should trim oldest non-deleted when still over limit after removing deleted", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
// 98 non-deleted records
|
||||
for (let i = 0; i < 98; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: `2026-01-01T00:${String(i).padStart(2, "0")}:00.000Z`,
|
||||
});
|
||||
}
|
||||
// 2 deleted records
|
||||
for (let i = 0; i < 2; i++) {
|
||||
records.push({
|
||||
agent: `deleted-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-02T00:00:00.000Z",
|
||||
connection: {
|
||||
ip: "1.2.3.4",
|
||||
user: "root",
|
||||
deleted: true,
|
||||
deleted_at: "2026-01-03T00:00:00.000Z",
|
||||
},
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
// Adding one more: 101 total, only 2 deleted — removing them gives 99, still need to check 99 <= 100 which is fine
|
||||
// Wait, 98 non-deleted + 1 new = 99 non-deleted. 99 <= 100. So only deleted are archived.
|
||||
saveSpawnRecord({
|
||||
agent: "new-entry",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-04T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const loaded = loadHistory();
|
||||
// 98 + 1 new = 99 non-deleted (under limit)
|
||||
expect(loaded).toHaveLength(99);
|
||||
expect(loaded[0].agent).toBe("agent-0");
|
||||
expect(loaded[98].agent).toBe("new-entry");
|
||||
|
||||
// Archive has the 2 deleted
|
||||
const archives = getArchiveFiles(testDir);
|
||||
expect(archives).toHaveLength(1);
|
||||
const archived = loadArchive(testDir, archives[0]);
|
||||
expect(archived).toHaveLength(2);
|
||||
});
|
||||
|
||||
it("should trim oldest non-deleted records when 0 deleted and over limit", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
for (let i = 0; i < 100; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: `2026-01-01T00:${String(i).padStart(2, "0")}:00.000Z`,
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
saveSpawnRecord({
|
||||
agent: "new-entry",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-04T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const loaded = loadHistory();
|
||||
expect(loaded).toHaveLength(100);
|
||||
// Oldest should be trimmed
|
||||
expect(loaded[0].agent).toBe("agent-1");
|
||||
expect(loaded[99].agent).toBe("new-entry");
|
||||
// Archive should have the overflow record
|
||||
const archives = getArchiveFiles(testDir);
|
||||
expect(archives).toHaveLength(1);
|
||||
const archived = loadArchive(testDir, archives[0]);
|
||||
expect(archived).toHaveLength(1);
|
||||
expect(archived[0].agent).toBe("agent-0");
|
||||
});
|
||||
|
||||
it("should handle deleted records mixed throughout history order", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
// Create 100 records where every 5th is deleted
|
||||
for (let i = 0; i < 100; i++) {
|
||||
const isDeleted = i % 5 === 0;
|
||||
const record: SpawnRecord = {
|
||||
agent: `agent-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: `2026-01-01T${String(Math.floor(i / 60)).padStart(2, "0")}:${String(i % 60).padStart(2, "0")}:00.000Z`,
|
||||
};
|
||||
if (isDeleted) {
|
||||
record.connection = {
|
||||
ip: "1.2.3.4",
|
||||
user: "root",
|
||||
deleted: true,
|
||||
deleted_at: "2026-01-03T00:00:00.000Z",
|
||||
};
|
||||
}
|
||||
records.push(record);
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
// 100 records (20 deleted, 80 non-deleted) + 1 new = 101
|
||||
saveSpawnRecord({
|
||||
agent: "new-entry",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-04T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const loaded = loadHistory();
|
||||
// 80 non-deleted + 1 new = 81 (under limit)
|
||||
expect(loaded).toHaveLength(81);
|
||||
// No deleted records
|
||||
expect(loaded.filter((r) => r.connection?.deleted)).toHaveLength(0);
|
||||
// All non-deleted originals preserved in order
|
||||
const nonDeletedOriginals = records.filter((r) => !r.connection?.deleted);
|
||||
for (let i = 0; i < nonDeletedOriginals.length; i++) {
|
||||
expect(loaded[i].agent).toBe(nonDeletedOriginals[i].agent);
|
||||
}
|
||||
expect(loaded[80].agent).toBe("new-entry");
|
||||
});
|
||||
|
||||
it("should archive both deleted and overflow when still over limit", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
// 99 non-deleted
|
||||
for (let i = 0; i < 99; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: `2026-01-01T${String(Math.floor(i / 60)).padStart(2, "0")}:${String(i % 60).padStart(2, "0")}:00.000Z`,
|
||||
});
|
||||
}
|
||||
// 5 deleted
|
||||
for (let i = 0; i < 5; i++) {
|
||||
records.push({
|
||||
agent: `deleted-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-02T00:00:00.000Z",
|
||||
connection: {
|
||||
ip: "1.2.3.4",
|
||||
user: "root",
|
||||
deleted: true,
|
||||
deleted_at: "2026-01-03T00:00:00.000Z",
|
||||
},
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
// 104 existing + 1 new = 105. Remove 5 deleted = 100 non-deleted. 100 <= 100, fits.
|
||||
saveSpawnRecord({
|
||||
agent: "new-entry",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-04T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const loaded = loadHistory();
|
||||
expect(loaded).toHaveLength(100);
|
||||
expect(loaded[0].agent).toBe("agent-0");
|
||||
expect(loaded[99].agent).toBe("new-entry");
|
||||
// Archive should have 5 deleted
|
||||
const archives = getArchiveFiles(testDir);
|
||||
expect(archives).toHaveLength(1);
|
||||
const archived = loadArchive(testDir, archives[0]);
|
||||
expect(archived).toHaveLength(5);
|
||||
expect(archived.every((r) => r.agent.startsWith("deleted-"))).toBe(true);
|
||||
});
|
||||
|
||||
it("should archive deleted + oldest overflow when both need trimming", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
// 102 non-deleted
|
||||
for (let i = 0; i < 102; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: `2026-01-01T${String(Math.floor(i / 60)).padStart(2, "0")}:${String(i % 60).padStart(2, "0")}:00.000Z`,
|
||||
});
|
||||
}
|
||||
// 3 deleted
|
||||
for (let i = 0; i < 3; i++) {
|
||||
records.push({
|
||||
agent: `deleted-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-02T00:00:00.000Z",
|
||||
connection: {
|
||||
ip: "1.2.3.4",
|
||||
user: "root",
|
||||
deleted: true,
|
||||
deleted_at: "2026-01-03T00:00:00.000Z",
|
||||
},
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
// 105 existing + 1 new = 106. Remove 3 deleted = 103 non-deleted. 103 > 100 → trim 3 oldest.
|
||||
saveSpawnRecord({
|
||||
agent: "new-entry",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-04T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const loaded = loadHistory();
|
||||
expect(loaded).toHaveLength(100);
|
||||
// Oldest 3 non-deleted should be trimmed
|
||||
expect(loaded[0].agent).toBe("agent-3");
|
||||
expect(loaded[99].agent).toBe("new-entry");
|
||||
// Archive should have 3 deleted + 3 overflow = 6
|
||||
const archives = getArchiveFiles(testDir);
|
||||
expect(archives).toHaveLength(1);
|
||||
const archived = loadArchive(testDir, archives[0]);
|
||||
expect(archived).toHaveLength(6);
|
||||
});
|
||||
});
|
||||
|
||||
// ── Archive file behavior ─────────────────────────────────────────────
|
||||
|
||||
describe("archive file behavior", () => {
|
||||
it("should append to existing archive file from same day", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
for (let i = 0; i < 100; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-01T00:00:00.000Z",
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
// First trim
|
||||
saveSpawnRecord({
|
||||
agent: "first-new",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-02T00:00:00.000Z",
|
||||
});
|
||||
|
||||
// Second trim (now history has agent-1 through first-new, 100 entries)
|
||||
saveSpawnRecord({
|
||||
agent: "second-new",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-03T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const archives = getArchiveFiles(testDir);
|
||||
expect(archives).toHaveLength(1);
|
||||
// Both trims should append to same archive file
|
||||
const archived = loadArchive(testDir, archives[0]);
|
||||
expect(archived).toHaveLength(2);
|
||||
expect(archived[0].agent).toBe("agent-0");
|
||||
expect(archived[1].agent).toBe("agent-1");
|
||||
});
|
||||
|
||||
it("should create archive with correct date format in name", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
for (let i = 0; i < 100; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-01T00:00:00.000Z",
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
saveSpawnRecord({
|
||||
agent: "new-entry",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-02T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const archives = getArchiveFiles(testDir);
|
||||
expect(archives).toHaveLength(1);
|
||||
// Should match YYYY-MM-DD pattern
|
||||
expect(archives[0]).toMatch(/^history-\d{4}-\d{2}-\d{2}\.json$/);
|
||||
});
|
||||
|
||||
it("should write valid pretty-printed JSON to archive", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
for (let i = 0; i < 100; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-01T00:00:00.000Z",
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
saveSpawnRecord({
|
||||
agent: "new-entry",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-02T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const archives = getArchiveFiles(testDir);
|
||||
const raw = readFileSync(join(testDir, archives[0]), "utf-8");
|
||||
expect(() => JSON.parse(raw)).not.toThrow();
|
||||
expect(raw).toContain(" "); // pretty-printed
|
||||
expect(raw.endsWith("\n")).toBe(true); // trailing newline
|
||||
});
|
||||
|
||||
it("should still save record even if archive write fails gracefully", () => {
|
||||
const records: SpawnRecord[] = [];
|
||||
for (let i = 0; i < 100; i++) {
|
||||
records.push({
|
||||
agent: `agent-${i}`,
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-01T00:00:00.000Z",
|
||||
});
|
||||
}
|
||||
writeFileSync(join(testDir, "history.json"), JSON.stringify(records));
|
||||
|
||||
// Pre-create a directory with the archive name to cause write to fail
|
||||
const date = new Date().toISOString().slice(0, 10);
|
||||
mkdirSync(join(testDir, `history-${date}.json`), {
|
||||
recursive: true,
|
||||
});
|
||||
|
||||
// Save should still work even though archive write fails
|
||||
saveSpawnRecord({
|
||||
agent: "new-entry",
|
||||
cloud: "cloud",
|
||||
timestamp: "2026-01-02T00:00:00.000Z",
|
||||
});
|
||||
|
||||
const loaded = loadHistory();
|
||||
expect(loaded).toHaveLength(100);
|
||||
expect(loaded[99].agent).toBe("new-entry");
|
||||
});
|
||||
});
|
||||
|
||||
// ── filterHistory reverse chronological ordering ────────────────────────
|
||||
|
||||
describe("filterHistory ordering guarantees", () => {
|
||||
|
|
|
|||
|
|
@ -130,6 +130,38 @@ export function loadHistory(): SpawnRecord[] {
|
|||
|
||||
const MAX_HISTORY_ENTRIES = 100;
|
||||
|
||||
/** Archive evicted records to a dated backup file so nothing is permanently lost. */
|
||||
function archiveRecords(records: SpawnRecord[]): void {
|
||||
if (records.length === 0) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const dir = getSpawnDir();
|
||||
const date = new Date().toISOString().slice(0, 10);
|
||||
const archivePath = join(dir, `history-${date}.json`);
|
||||
let existing: SpawnRecord[] = [];
|
||||
if (existsSync(archivePath)) {
|
||||
try {
|
||||
const data = JSON.parse(readFileSync(archivePath, "utf-8"));
|
||||
if (Array.isArray(data)) {
|
||||
existing = data;
|
||||
}
|
||||
} catch {
|
||||
// Corrupted archive — overwrite
|
||||
}
|
||||
}
|
||||
const merged = [
|
||||
...existing,
|
||||
...records,
|
||||
];
|
||||
writeFileSync(archivePath, JSON.stringify(merged, null, 2) + "\n", {
|
||||
mode: 0o600,
|
||||
});
|
||||
} catch {
|
||||
// Non-fatal — archive failure should not block saving
|
||||
}
|
||||
}
|
||||
|
||||
export function saveSpawnRecord(record: SpawnRecord): void {
|
||||
const dir = getSpawnDir();
|
||||
if (!existsSync(dir)) {
|
||||
|
|
@ -140,9 +172,30 @@ export function saveSpawnRecord(record: SpawnRecord): void {
|
|||
}
|
||||
let history = loadHistory();
|
||||
history.push(record);
|
||||
// Trim to most recent entries to prevent unbounded growth
|
||||
// Smart trim: evict deleted records first, then oldest, and archive evicted
|
||||
if (history.length > MAX_HISTORY_ENTRIES) {
|
||||
history = history.slice(history.length - MAX_HISTORY_ENTRIES);
|
||||
const nonDeleted: SpawnRecord[] = [];
|
||||
const deleted: SpawnRecord[] = [];
|
||||
for (const r of history) {
|
||||
if (r.connection?.deleted) {
|
||||
deleted.push(r);
|
||||
} else {
|
||||
nonDeleted.push(r);
|
||||
}
|
||||
}
|
||||
if (nonDeleted.length <= MAX_HISTORY_ENTRIES) {
|
||||
// Removing deleted records is enough
|
||||
history = nonDeleted;
|
||||
archiveRecords(deleted);
|
||||
} else {
|
||||
// Still over limit — trim oldest non-deleted records too
|
||||
const overflow = nonDeleted.slice(0, nonDeleted.length - MAX_HISTORY_ENTRIES);
|
||||
history = nonDeleted.slice(nonDeleted.length - MAX_HISTORY_ENTRIES);
|
||||
archiveRecords([
|
||||
...deleted,
|
||||
...overflow,
|
||||
]);
|
||||
}
|
||||
}
|
||||
writeFileSync(getHistoryPath(), JSON.stringify(history, null, 2) + "\n", {
|
||||
mode: 0o600,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue