Added settings for lorebook classification

This commit is contained in:
Kurvaz 2026-01-04 13:19:07 -07:00
parent cc78349a77
commit 20f7cb5788
7 changed files with 191 additions and 17 deletions

View file

@ -1,6 +1,6 @@
{
"name": "aventura",
"version": "0.1.4",
"version": "0.1.5",
"description": "AI-powered adventure and creative writing frontend",
"type": "module",
"scripts": {

2
src-tauri/Cargo.lock generated
View file

@ -260,7 +260,7 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8"
[[package]]
name = "aventura"
version = "0.1.0"
version = "0.1.4"
dependencies = [
"serde",
"serde_json",

View file

@ -1,6 +1,6 @@
[package]
name = "aventura"
version = "0.1.4"
version = "0.1.5"
description = "AI-powered adventure and creative writing frontend"
authors = ["you"]
edition = "2021"

View file

@ -1,7 +1,7 @@
{
"$schema": "https://schema.tauri.app/config/2",
"productName": "Aventura",
"version": "0.1.4",
"version": "0.1.5",
"identifier": "com.karelian.aventura",
"build": {
"beforeDevCommand": "npm run dev",

View file

@ -28,6 +28,8 @@
let editingStoryPrompt = $state<'adventure' | 'creativeWriting' | null>(null);
let editingProcess = $state<keyof AdvancedWizardSettings | null>(null);
let editingClassifierPrompt = $state(false);
let editingLorebookClassifier = $state(false);
let editingLorebookClassifierPrompt = $state(false);
let editingMemoryPrompt = $state<'chapterAnalysis' | 'chapterSummarization' | 'retrievalDecision' | null>(null);
let editingSuggestionsPrompt = $state(false);
let editingStyleReviewerPrompt = $state(false);
@ -945,11 +947,142 @@
{/if}
</div>
{/each}
<!-- Lorebook Import Classification Subsection -->
<div class="card bg-surface-900 p-3">
<div class="flex items-center justify-between mb-2">
<div class="flex items-center gap-2">
<FolderOpen class="h-4 w-4 text-green-400" />
<span class="text-sm font-medium text-surface-200">Lorebook Import Classification</span>
</div>
<div class="flex items-center gap-2">
<button
class="text-xs text-surface-400 hover:text-surface-200"
onclick={() => settings.resetLorebookClassifierSettings()}
title="Reset to default"
>
<RotateCcw class="h-3 w-3" />
</button>
<button
class="text-xs text-accent-400 hover:text-accent-300"
onclick={() => editingLorebookClassifier = !editingLorebookClassifier}
>
{editingLorebookClassifier ? 'Close' : 'Edit'}
</button>
</div>
</div>
{#if editingLorebookClassifier}
<div class="space-y-3 mt-3 pt-3 border-t border-surface-700">
<!-- Profile and Model Selector -->
<ModelSelector
profileId={settings.systemServicesSettings.lorebookClassifier?.profileId ?? settings.apiSettings.mainNarrativeProfileId}
model={settings.systemServicesSettings.lorebookClassifier?.model ?? 'x-ai/grok-4.1-fast'}
onProfileChange={(id) => {
settings.systemServicesSettings.lorebookClassifier.profileId = id;
settings.saveSystemServicesSettings();
}}
onModelChange={(m) => {
settings.systemServicesSettings.lorebookClassifier.model = m;
settings.saveSystemServicesSettings();
}}
onManageProfiles={() => { showProfileModal = true; editingProfile = null; }}
/>
<!-- Temperature -->
<div>
<label class="mb-1 block text-xs font-medium text-surface-400">
Temperature: {(settings.systemServicesSettings.lorebookClassifier?.temperature ?? 0.1).toFixed(2)}
</label>
<input
type="range"
min="0"
max="1"
step="0.05"
value={settings.systemServicesSettings.lorebookClassifier?.temperature ?? 0.1}
oninput={(e) => {
settings.systemServicesSettings.lorebookClassifier.temperature = parseFloat(e.currentTarget.value);
settings.saveSystemServicesSettings();
}}
class="w-full h-2"
/>
</div>
<!-- Batch Size -->
<div>
<label class="mb-1 block text-xs font-medium text-surface-400">
Batch Size: {settings.systemServicesSettings.lorebookClassifier?.batchSize ?? 50}
</label>
<input
type="range"
min="10"
max="100"
step="10"
value={settings.systemServicesSettings.lorebookClassifier?.batchSize ?? 50}
oninput={(e) => {
settings.systemServicesSettings.lorebookClassifier.batchSize = parseInt(e.currentTarget.value);
settings.saveSystemServicesSettings();
}}
class="w-full h-2"
/>
<div class="flex justify-between text-xs text-surface-500">
<span>Smaller batches</span>
<span>Larger batches</span>
</div>
</div>
<!-- Max Concurrent -->
<div>
<label class="mb-1 block text-xs font-medium text-surface-400">
Max Concurrent: {settings.systemServicesSettings.lorebookClassifier?.maxConcurrent ?? 5}
</label>
<input
type="range"
min="1"
max="10"
step="1"
value={settings.systemServicesSettings.lorebookClassifier?.maxConcurrent ?? 5}
oninput={(e) => {
settings.systemServicesSettings.lorebookClassifier.maxConcurrent = parseInt(e.currentTarget.value);
settings.saveSystemServicesSettings();
}}
class="w-full h-2"
/>
<div class="flex justify-between text-xs text-surface-500">
<span>Sequential</span>
<span>Parallel</span>
</div>
</div>
<!-- System Prompt -->
<div>
<label class="mb-1 block text-xs font-medium text-surface-400">System Prompt</label>
<textarea
value={settings.systemServicesSettings.lorebookClassifier?.systemPrompt ?? ''}
oninput={(e) => {
settings.systemServicesSettings.lorebookClassifier.systemPrompt = e.currentTarget.value;
}}
onblur={() => settings.saveSystemServicesSettings()}
class="input text-xs min-h-[100px] resize-y font-mono w-full"
rows="5"
></textarea>
</div>
</div>
{:else}
<div class="text-xs text-surface-400">
<span class="text-surface-500">Model:</span> {settings.systemServicesSettings.lorebookClassifier?.model ?? 'x-ai/grok-4.1-fast'}
<span class="mx-2"></span>
<span class="text-surface-500">Temp:</span> {(settings.systemServicesSettings.lorebookClassifier?.temperature ?? 0.1).toFixed(1)}
<span class="mx-2"></span>
<span class="text-surface-500">Batch:</span> {settings.systemServicesSettings.lorebookClassifier?.batchSize ?? 50}
</div>
{/if}
</div>
</div>
{/if}
</div>
<!-- Classifier Section -->
<!-- World State Classifier Section -->
<div class="border-t border-surface-700 pt-3">
<div class="flex items-center justify-between">
<button
@ -1024,7 +1157,7 @@
<input
type="range"
min="500"
max="4000"
max="8192"
step="100"
bind:value={settings.systemServicesSettings.classifier.maxTokens}
onchange={() => settings.saveSystemServicesSettings()}

View file

@ -178,7 +178,7 @@ function inferEntryType(name: string, content: string): EntryType {
}
/**
* LLM-based entry type classification using grok-4.1-fast with reasoning.
* LLM-based entry type classification using configurable settings.
* Classifies entries in batches with concurrent requests for faster processing.
*/
export async function classifyEntriesWithLLM(
@ -187,8 +187,11 @@ export async function classifyEntriesWithLLM(
): Promise<ImportedEntry[]> {
if (entries.length === 0) return entries;
// Use the main narrative profile for lorebook classification
const profileId = settings.apiSettings.mainNarrativeProfileId;
// Get lorebook classifier settings
const lorebookSettings = settings.systemServicesSettings.lorebookClassifier;
// Use specified profile, or fall back to main narrative profile
const profileId = lorebookSettings.profileId ?? settings.apiSettings.mainNarrativeProfileId;
const apiSettings = settings.getApiSettingsForProfile(profileId);
if (!apiSettings.openaiApiKey) {
@ -197,11 +200,16 @@ export async function classifyEntriesWithLLM(
}
const provider = new OpenAIProvider(apiSettings);
const BATCH_SIZE = 50;
const MAX_CONCURRENT = 5;
const BATCH_SIZE = lorebookSettings.batchSize;
const MAX_CONCURRENT = lorebookSettings.maxConcurrent;
const classifiedEntries = [...entries];
log('Starting LLM classification', { totalEntries: entries.length, maxConcurrent: MAX_CONCURRENT });
log('Starting LLM classification', {
totalEntries: entries.length,
batchSize: BATCH_SIZE,
maxConcurrent: MAX_CONCURRENT,
model: lorebookSettings.model,
});
// Create batches
const batches: { startIndex: number; batch: ImportedEntry[]; batchIndex: number }[] = [];
@ -242,16 +250,16 @@ Respond with ONLY valid JSON in this exact format:
[{"index": 0, "type": "character"}, {"index": 1, "type": "location"}, ...]`;
const response = await provider.generateResponse({
model: 'x-ai/grok-4.1-fast',
model: lorebookSettings.model,
messages: [
{
role: 'system',
content: 'You are a precise classifier for fantasy/RPG lorebook entries. Analyze the name, content, and keywords to determine the most appropriate category. Be decisive - pick the single best category for each entry. Respond only with the JSON array.',
content: lorebookSettings.systemPrompt,
},
{ role: 'user', content: prompt },
],
temperature: 0.1,
maxTokens: 8192,
temperature: lorebookSettings.temperature,
maxTokens: lorebookSettings.maxTokens,
extraBody: {
reasoning: { effort: 'high' },
},

View file

@ -337,7 +337,7 @@ Query based ONLY on the information visible in the chapter summaries or things t
timelineFillAnswer: `You answer specific questions about story chapters. Be concise and factual. Only include information that directly answers the question. If the chapter doesn't contain relevant information, say "Not mentioned in this chapter."`,
};
// Classifier service settings
// Classifier service settings (World State Classifier - extracts entities from narrative)
export interface ClassifierSettings {
profileId: string | null; // API profile to use (null = use default profile)
model: string;
@ -356,6 +356,31 @@ export function getDefaultClassifierSettings(): ClassifierSettings {
};
}
// Lorebook Import Classifier settings (classifies imported lorebook entries by type)
export interface LorebookClassifierSettings {
profileId: string | null; // API profile to use (null = use main narrative profile)
model: string;
temperature: number;
maxTokens: number;
systemPrompt: string;
batchSize: number; // Entries per batch for LLM classification
maxConcurrent: number; // Max concurrent batch requests
}
export const DEFAULT_LOREBOOK_CLASSIFIER_PROMPT = `You are a precise classifier for fantasy/RPG lorebook entries. Analyze the name, content, and keywords to determine the most appropriate category. Be decisive - pick the single best category for each entry. Respond only with the JSON array.`;
export function getDefaultLorebookClassifierSettings(): LorebookClassifierSettings {
return {
profileId: null, // null = use main narrative profile
model: 'x-ai/grok-4.1-fast',
temperature: 0.1,
maxTokens: 8192,
systemPrompt: DEFAULT_LOREBOOK_CLASSIFIER_PROMPT,
batchSize: 50,
maxConcurrent: 5,
};
}
// Memory service settings
export interface MemorySettings {
profileId: string | null; // API profile to use (null = use default profile)
@ -532,6 +557,7 @@ export function getDefaultUpdateSettings(): UpdateSettings {
// Combined system services settings
export interface SystemServicesSettings {
classifier: ClassifierSettings;
lorebookClassifier: LorebookClassifierSettings;
memory: MemorySettings;
suggestions: SuggestionsSettings;
styleReviewer: StyleReviewerSettings;
@ -543,6 +569,7 @@ export interface SystemServicesSettings {
export function getDefaultSystemServicesSettings(): SystemServicesSettings {
return {
classifier: getDefaultClassifierSettings(),
lorebookClassifier: getDefaultLorebookClassifierSettings(),
memory: getDefaultMemorySettings(),
suggestions: getDefaultSuggestionsSettings(),
styleReviewer: getDefaultStyleReviewerSettings(),
@ -698,6 +725,7 @@ class SettingsStore {
const defaults = getDefaultSystemServicesSettings();
this.systemServicesSettings = {
classifier: { ...defaults.classifier, ...loaded.classifier },
lorebookClassifier: { ...defaults.lorebookClassifier, ...loaded.lorebookClassifier },
memory: { ...defaults.memory, ...loaded.memory },
suggestions: { ...defaults.suggestions, ...loaded.suggestions },
styleReviewer: { ...defaults.styleReviewer, ...loaded.styleReviewer },
@ -1191,6 +1219,11 @@ class SettingsStore {
await this.saveSystemServicesSettings();
}
async resetLorebookClassifierSettings() {
this.systemServicesSettings.lorebookClassifier = getDefaultLorebookClassifierSettings();
await this.saveSystemServicesSettings();
}
async resetMemorySettings() {
this.systemServicesSettings.memory = getDefaultMemorySettings();
await this.saveSystemServicesSettings();