talemate/talemate_frontend/src/components/DebugToolPromptLog.vue
veguAI bb1cf6941b
0.27.0 (#137)
* move memory agent to directory structure

* chromadb settings rework

* memory agent improvements
embedding presets
support switching embeddings without restart
support custom sentence transformer embeddings

* toggle to hide / show disabled clients

* add memory debug tools

* chromadb no longer needs its dedicated config entry

* add missing emits

* fix initial value

* hidden disabled clients no longer cause enumeration issues with client actions

* improve memory agent error handling and hot reloading

* more memory agent error handling

* DEBUG_MEMORY_REQUESTS off

* relock

* sim suite: fix issue with removing or changing characters

* relock

* fix issue where actor dialogue editor would break with multiple characters in the scene

* remove cruft

* implement interrupt function

* margin adjustments

* fix rubber banding issue in world editor when editing certain text fields

* status notification when re-importing vectorb due to embeddings change

* properly open new client context on agent actions

* move jiggle apply to the end of prompt tune stack

* narrator agent length limit and jiggle settings added - also improve post generation cleanup

* progress story prompt improvements

* narrator prompt and cleanup tweaks

* prompt tweak

* revert

* autocomplete dialogue improvements

* Unified process (#141)

* progress to unified process

* --dev arg

* use gunicorn to serve built frontend

* gunicorn config adjustments

* remove dist from gitignore

* revert

* uvicorn instead

* save decode

* graceful shutdown

* refactor unified process

* clean up frontend log messages

* more logging fixes

* 0.27.0

* startup message

* clean up scripts a bit

* fixes to update.bat

* fixes to install.bat

* sim suite supports generation cancellation

* debug

* simplify narrator prompts

* prompt tweaks

* unified docker file

* update docker compose config for unified docker file

* cruft

* fix startup in linux docker

* download punkt so its available

* prompt tweaks

* fix bug when editing scene outline would wipe message history

* add o1 models

* add sampler, scheduler and cfg config to a1111 visualizer

* update installation docs

* visualizer configurable timeout

* memory agent docs

* docs

* relock

* relock

* fix issue where changing embeddings on immutable scene would hang

* remove debug message

* take torch install out of poetry since conditionals don't work.

* torch gets installed through some dependency so put it back into poetry, but reinstall with cuda if cuda support exists

* fix install syntax

* no need for torchvision

* torch cuda install added to linux install script

* add torch cuda install to update.bat

* docs

* docs

* relock

* fix install.sh

* handle torch+cuda install in docker

* docs

* typo
2024-09-23 12:55:34 +03:00

132 lines
No EOL
4.8 KiB
Vue

<template>
<v-card class="ma-4">
<v-card-text class="text-muted text-caption">
Inspect the prompts and responses generated by the AI.
</v-card-text>
</v-card>
<v-list-item density="compact">
<v-list-item-title>
<v-chip size="x-small" color="primary">Max. {{ max_prompts }}</v-chip>
<v-btn color="delete" class="ml-2" variant="text" size="small" @click="clearPrompts" prepend-icon="mdi-close">Clear</v-btn>
<v-slider density="compact" v-model="max_prompts" min="1" hide-details max="250" step="1" color="primary"></v-slider>
</v-list-item-title>
</v-list-item>
<v-list-item v-for="(prompt, index) in prompts" :key="index" @click="openPromptView(prompt)">
<v-list-item-title class="text-caption">
<v-row>
<v-col cols="2" class="text-info">#{{ prompt.num }}</v-col>
<v-col cols="10" class="text-right">
<v-chip size="x-small" class="mr-1" color="primary" variant="text" label>{{ prompt.prompt_tokens }}<v-icon size="14"
class="ml-1">mdi-arrow-down-bold</v-icon></v-chip>
<v-chip size="x-small" class="mr-1" color="secondary" variant="text" label>{{ prompt.response_tokens }}<v-icon size="14"
class="ml-1">mdi-arrow-up-bold</v-icon></v-chip>
<v-chip size="x-small" variant="text" label color="grey-darken-1">{{ prompt.time }}s<v-icon size="14" class="ml-1">mdi-clock</v-icon></v-chip>
</v-col>
</v-row>
</v-list-item-title>
<v-list-item-subtitle class="text-caption">
<v-chip size="x-small" color="grey-lightne-1" variant="text">{{ prompt.agent_name }}</v-chip>
<v-chip size="x-small" color="grey" variant="text">{{ prompt.agent_action }}</v-chip>
</v-list-item-subtitle>
<v-divider class="mt-1"></v-divider>
</v-list-item>
<DebugToolPromptView ref="promptView" />
</template>
<script>
import DebugToolPromptView from './DebugToolPromptView.vue';
export default {
name: 'DebugToolPromptLog',
data() {
return {
prompts: [],
total: 1,
max_prompts: 50,
}
},
components: {
DebugToolPromptView,
},
inject: [
'getWebsocket',
'registerMessageHandler',
'setWaitingForInput',
],
methods: {
clearPrompts() {
this.prompts = [];
this.total = 0;
},
handleMessage(data) {
if(data.type === "system"&& data.id === "scene.loaded") {
this.prompts = [];
this.total = 0;
return;
}
if(data.type === "prompt_sent") {
// add to prompts array, and truncate if necessary (max 50)
// get active agent (last in agent_stack if agent_stack is not empty)
let agent = null;
let agentName = null;
let agentAction = null;
if(data.data.agent_stack.length > 0) {
agent = data.data.agent_stack[data.data.agent_stack.length - 1];
// split by . to get agent name and action
let agentParts = agent.split('.');
agentName = agentParts[0];
agentAction = agentParts[1];
}
this.prompts.unshift({
prompt: data.data.prompt,
response: data.data.response,
kind: data.data.kind,
response_tokens: data.data.response_tokens,
prompt_tokens: data.data.prompt_tokens,
agent_stack: data.data.agent_stack,
agent: agent,
agent_name: agentName,
agent_action: agentAction,
client_name: data.data.client_name,
client_type: data.data.client_type,
time: parseInt(data.data.time),
num: this.total++,
generation_parameters: data.data.generation_parameters,
inference_preset: data.data.inference_preset,
// immutable copy of original generation parameters
original_generation_parameters: JSON.parse(JSON.stringify(data.data.generation_parameters)),
original_prompt: data.data.prompt,
original_response: data.data.response,
})
while(this.prompts.length > this.max_prompts) {
this.prompts.pop();
}
}
},
openPromptView(prompt) {
this.$refs.promptView.open(prompt, this.prompts);
}
},
created() {
this.registerMessageHandler(this.handleMessage);
},
}
</script>