mirror of
https://github.com/vegu-ai/talemate.git
synced 2025-09-02 10:29:17 +00:00
0.9.0 (#3)
* fixes #2: character creator description generation will not honor changes to the content context * decrease output of base attribute generation from 2-3 sentences to 1-2 sentences * conversation agent tweaks set other character names as stopping strings via client context * xwin llm template * conversation template tweaks * fixes #6: agent busy status not always reflected in ux * conversation min response length requirement reduced include character base details with conversation prompt * fixes #4: Prompt log * reset prompt log on scene load openai tokens as ? for now * version to 0.9.0
This commit is contained in:
parent
7f11b4859e
commit
44a91094e6
23 changed files with 435 additions and 143 deletions
|
@ -4,7 +4,7 @@ build-backend = "poetry.masonry.api"
|
|||
|
||||
[tool.poetry]
|
||||
name = "talemate"
|
||||
version = "0.8.0"
|
||||
version = "0.9.0"
|
||||
description = "AI-backed roleplay and narrative tools"
|
||||
authors = ["FinalWombat"]
|
||||
license = "GNU Affero General Public License v3.0"
|
||||
|
|
|
@ -2,4 +2,4 @@ from .agents import Agent
|
|||
from .client import TextGeneratorWebuiClient
|
||||
from .tale_mate import *
|
||||
|
||||
VERSION = "0.8.0"
|
||||
VERSION = "0.9.0"
|
|
@ -12,6 +12,32 @@ import talemate.util as util
|
|||
from talemate.emit import emit
|
||||
|
||||
|
||||
__all__ = [
|
||||
"Agent",
|
||||
"set_processing",
|
||||
]
|
||||
|
||||
def set_processing(fn):
|
||||
"""
|
||||
decorator that emits the agent status as processing while the function
|
||||
is running.
|
||||
|
||||
Done via a try - final block to ensure the status is reset even if
|
||||
the function fails.
|
||||
"""
|
||||
|
||||
async def wrapper(self, *args, **kwargs):
|
||||
try:
|
||||
await self.emit_status(processing=True)
|
||||
return await fn(self, *args, **kwargs)
|
||||
finally:
|
||||
await self.emit_status(processing=False)
|
||||
|
||||
wrapper.__name__ = fn.__name__
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class Agent(ABC):
|
||||
"""
|
||||
Base agent class, defines a role
|
||||
|
@ -20,6 +46,8 @@ class Agent(ABC):
|
|||
agent_type = "agent"
|
||||
verbose_name = None
|
||||
|
||||
set_processing = set_processing
|
||||
|
||||
@property
|
||||
def agent_details(self):
|
||||
if hasattr(self, "client"):
|
||||
|
@ -51,15 +79,28 @@ class Agent(ABC):
|
|||
@property
|
||||
def status(self):
|
||||
if self.ready:
|
||||
return "idle"
|
||||
return "idle" if getattr(self, "processing", 0) == 0 else "busy"
|
||||
else:
|
||||
return "uninitialized"
|
||||
|
||||
async def emit_status(self, processing: bool = None):
|
||||
if processing is not None:
|
||||
self.processing = processing
|
||||
|
||||
status = "busy" if getattr(self, "processing", False) else self.status
|
||||
# should keep a count of processing requests, and when the
|
||||
# number is 0 status is "idle", if the number is greater than 0
|
||||
# status is "busy"
|
||||
#
|
||||
# increase / decrease based on value of `processing`
|
||||
|
||||
if getattr(self, "processing", None) is None:
|
||||
self.processing = 0
|
||||
|
||||
if not processing:
|
||||
self.processing -= 1
|
||||
self.processing = max(0, self.processing)
|
||||
else:
|
||||
self.processing += 1
|
||||
|
||||
status = "busy" if self.processing > 0 else "idle"
|
||||
|
||||
emit(
|
||||
"agent_status",
|
||||
|
|
|
@ -11,7 +11,7 @@ from talemate.emit import emit
|
|||
from talemate.scene_message import CharacterMessage, DirectorMessage
|
||||
from talemate.prompts import Prompt
|
||||
|
||||
from .base import Agent
|
||||
from .base import Agent, set_processing
|
||||
from .registry import register
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
@ -30,6 +30,8 @@ class ConversationAgent(Agent):
|
|||
agent_type = "conversation"
|
||||
verbose_name = "Conversation"
|
||||
|
||||
min_dialogue_length = 75
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
client: client.TaleMateClient,
|
||||
|
@ -162,25 +164,18 @@ class ConversationAgent(Agent):
|
|||
if "#" in result:
|
||||
result = result.split("#")[0]
|
||||
|
||||
result = result.replace("\n", " ").strip()
|
||||
|
||||
|
||||
# Check for occurrence of a character name followed by a colon
|
||||
# that does NOT match the character name of the current character
|
||||
if "." in result and re.search(rf"(?!{self.character.name})\w+:", result):
|
||||
result = re.sub(rf"(?!{character.name})\w+:(.*\n*)*", "", result)
|
||||
|
||||
result = result.replace("\n", "__LINEBREAK__").strip()
|
||||
|
||||
# Removes partial sentence at the end
|
||||
result = re.sub(r"[^\.\?\!\*]+(\n|$)", "", result)
|
||||
|
||||
|
||||
result = result.replace(" :", ":")
|
||||
|
||||
result = result.strip().strip('"').strip()
|
||||
|
||||
result = result.replace("[", "*").replace("]", "*")
|
||||
result = result.replace("**", "*")
|
||||
|
||||
result = result.replace("__LINEBREAK__", "\n")
|
||||
|
||||
# if there is an uneven number of '*' add one to the end
|
||||
|
||||
if result.count("*") % 2 == 1:
|
||||
|
@ -188,13 +183,12 @@ class ConversationAgent(Agent):
|
|||
|
||||
return result
|
||||
|
||||
@set_processing
|
||||
async def converse(self, actor, editor=None):
|
||||
"""
|
||||
Have a conversation with the AI
|
||||
"""
|
||||
|
||||
await self.emit_status(processing=True)
|
||||
|
||||
history = actor.history
|
||||
self.current_memory_context = None
|
||||
|
||||
|
@ -212,7 +206,7 @@ class ConversationAgent(Agent):
|
|||
empty_result_count = 0
|
||||
|
||||
# Validate AI response
|
||||
while loop_count < max_loops:
|
||||
while loop_count < max_loops and len(total_result) < self.min_dialogue_length:
|
||||
log.debug("conversation agent", result=result)
|
||||
result = await self.client.send_prompt(
|
||||
await self.build_prompt(character, char_message=total_result)
|
||||
|
@ -227,7 +221,7 @@ class ConversationAgent(Agent):
|
|||
|
||||
loop_count += 1
|
||||
|
||||
if len(total_result) >= 250:
|
||||
if len(total_result) > self.min_dialogue_length:
|
||||
break
|
||||
|
||||
# if result is empty, increment empty_result_count
|
||||
|
@ -240,9 +234,6 @@ class ConversationAgent(Agent):
|
|||
|
||||
result = result.replace(" :", ":")
|
||||
|
||||
# Removes any line starting with another character name followed by a colon
|
||||
total_result = re.sub(rf"(?!{character.name})\w+:(.*\n*)*", "", total_result)
|
||||
|
||||
total_result = total_result.split("#")[0]
|
||||
|
||||
# Removes partial sentence at the end
|
||||
|
@ -277,6 +268,4 @@ class ConversationAgent(Agent):
|
|||
# Add message and response to conversation history
|
||||
actor.scene.push_history(messages)
|
||||
|
||||
await self.emit_status(processing=False)
|
||||
|
||||
return messages
|
||||
|
|
|
@ -14,6 +14,7 @@ from talemate.automated_action import AutomatedAction
|
|||
import talemate.automated_action as automated_action
|
||||
from .conversation import ConversationAgent
|
||||
from .registry import register
|
||||
from .base import set_processing
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from talemate import Actor, Character, Player, Scene
|
||||
|
@ -68,37 +69,34 @@ class DirectorAgent(ConversationAgent):
|
|||
log.info("question_direction", response=response)
|
||||
return response, evaluation, prompt
|
||||
|
||||
|
||||
@set_processing
|
||||
async def direct(self, character: Character, goal_override:str=None):
|
||||
|
||||
await self.emit_status(processing=True)
|
||||
analysis, current_goal, action = await self.decide_action(character, goal_override=goal_override)
|
||||
|
||||
try:
|
||||
if action == "watch":
|
||||
return None
|
||||
if action == "watch":
|
||||
return None
|
||||
|
||||
if action == "direct":
|
||||
return await self.direct_character_with_self_reflection(character, analysis, goal_override=current_goal)
|
||||
if action == "direct":
|
||||
return await self.direct_character_with_self_reflection(character, analysis, goal_override=current_goal)
|
||||
|
||||
if action.startswith("narrate"):
|
||||
if action.startswith("narrate"):
|
||||
|
||||
narration_type = action.split(":")[1]
|
||||
narration_type = action.split(":")[1]
|
||||
|
||||
direct_narrative = await self.direct_narrative(analysis, narration_type=narration_type, goal=current_goal)
|
||||
if direct_narrative:
|
||||
narrator = self.scene.get_helper("narrator").agent
|
||||
narrator_response = await narrator.progress_story(direct_narrative)
|
||||
if not narrator_response:
|
||||
return None
|
||||
narrator_message = NarratorMessage(narrator_response, source="progress_story")
|
||||
self.scene.push_history(narrator_message)
|
||||
emit("narrator", narrator_message)
|
||||
return True
|
||||
finally:
|
||||
await self.emit_status(processing=False)
|
||||
direct_narrative = await self.direct_narrative(analysis, narration_type=narration_type, goal=current_goal)
|
||||
if direct_narrative:
|
||||
narrator = self.scene.get_helper("narrator").agent
|
||||
narrator_response = await narrator.progress_story(direct_narrative)
|
||||
if not narrator_response:
|
||||
return None
|
||||
narrator_message = NarratorMessage(narrator_response, source="progress_story")
|
||||
self.scene.push_history(narrator_message)
|
||||
emit("narrator", narrator_message)
|
||||
return True
|
||||
|
||||
|
||||
@set_processing
|
||||
async def direct_narrative(self, analysis:str, narration_type:str="progress", goal:str=None):
|
||||
|
||||
if goal is None:
|
||||
|
@ -120,6 +118,7 @@ class DirectorAgent(ConversationAgent):
|
|||
|
||||
return response
|
||||
|
||||
@set_processing
|
||||
async def direct_character_with_self_reflection(self, character: Character, analysis:str, goal_override:str=None):
|
||||
|
||||
max_retries = 3
|
||||
|
@ -162,6 +161,7 @@ class DirectorAgent(ConversationAgent):
|
|||
|
||||
return response
|
||||
|
||||
@set_processing
|
||||
async def transform_character_direction_to_inner_monologue(self, character:Character, direction:str):
|
||||
|
||||
inner_monologue = await Prompt.request(
|
||||
|
@ -179,6 +179,7 @@ class DirectorAgent(ConversationAgent):
|
|||
return inner_monologue
|
||||
|
||||
|
||||
@set_processing
|
||||
async def direct_character(
|
||||
self,
|
||||
character: Character,
|
||||
|
@ -229,6 +230,7 @@ class DirectorAgent(ConversationAgent):
|
|||
|
||||
|
||||
|
||||
@set_processing
|
||||
async def direct_character_self_reflect(self, direction:str, character: Character, goal:str, direction_prompt:Prompt) -> (bool, str):
|
||||
|
||||
change_matches = ["change", "retry", "alter", "reconsider"]
|
||||
|
@ -253,6 +255,7 @@ class DirectorAgent(ConversationAgent):
|
|||
return keep, response
|
||||
|
||||
|
||||
@set_processing
|
||||
async def direct_character_analyze(self, direction:str, character: Character, goal:str, direction_prompt:Prompt):
|
||||
|
||||
prompt = Prompt.get("director.direct-character-analyze", vars={
|
||||
|
@ -317,6 +320,7 @@ class DirectorAgent(ConversationAgent):
|
|||
else:
|
||||
return ""
|
||||
|
||||
@set_processing
|
||||
async def goal_analyze(self, goal:str):
|
||||
|
||||
prompt = Prompt.get("director.goal-analyze", vars={
|
||||
|
|
|
@ -7,6 +7,7 @@ from typing import TYPE_CHECKING, Callable, List, Optional, Union
|
|||
import talemate.util as util
|
||||
from talemate.emit import wait_for_input
|
||||
from talemate.prompts import Prompt
|
||||
from talemate.agents.base import set_processing
|
||||
|
||||
from .conversation import ConversationAgent
|
||||
from .registry import register
|
||||
|
@ -23,10 +24,6 @@ class NarratorAgent(ConversationAgent):
|
|||
if "#" in result:
|
||||
result = result.split("#")[0]
|
||||
|
||||
|
||||
# Removes partial sentence at the end
|
||||
# result = re.sub(r"[^\.\?\!]+(\n|$)", "", result)
|
||||
|
||||
cleaned = []
|
||||
for line in result.split("\n"):
|
||||
if ":" in line.strip():
|
||||
|
@ -35,14 +32,12 @@ class NarratorAgent(ConversationAgent):
|
|||
|
||||
return "\n".join(cleaned)
|
||||
|
||||
@set_processing
|
||||
async def narrate_scene(self):
|
||||
"""
|
||||
Narrate the scene
|
||||
"""
|
||||
|
||||
await self.emit_status(processing=True)
|
||||
|
||||
|
||||
response = await Prompt.request(
|
||||
"narrator.narrate-scene",
|
||||
self.client,
|
||||
|
@ -55,17 +50,14 @@ class NarratorAgent(ConversationAgent):
|
|||
|
||||
response = f"*{response.strip('*')}*"
|
||||
|
||||
await self.emit_status(processing=False)
|
||||
|
||||
return response
|
||||
|
||||
@set_processing
|
||||
async def progress_story(self, narrative_direction:str=None):
|
||||
"""
|
||||
Narrate the scene
|
||||
"""
|
||||
|
||||
await self.emit_status(processing=True)
|
||||
|
||||
scene = self.scene
|
||||
director = scene.get_helper("director").agent
|
||||
pc = scene.get_player_character()
|
||||
|
@ -113,17 +105,13 @@ class NarratorAgent(ConversationAgent):
|
|||
response = response.replace("*", "")
|
||||
response = f"*{response}*"
|
||||
|
||||
await self.emit_status(processing=False)
|
||||
|
||||
return response
|
||||
|
||||
@set_processing
|
||||
async def narrate_query(self, query:str, at_the_end:bool=False, as_narrative:bool=True):
|
||||
"""
|
||||
Narrate a specific query
|
||||
"""
|
||||
|
||||
await self.emit_status(processing=True)
|
||||
|
||||
response = await Prompt.request(
|
||||
"narrator.narrate-query",
|
||||
self.client,
|
||||
|
@ -141,15 +129,14 @@ class NarratorAgent(ConversationAgent):
|
|||
if as_narrative:
|
||||
response = f"*{response}*"
|
||||
|
||||
await self.emit_status(processing=False)
|
||||
return response
|
||||
|
||||
@set_processing
|
||||
async def narrate_character(self, character):
|
||||
"""
|
||||
Narrate a specific character
|
||||
"""
|
||||
|
||||
await self.emit_status(processing=True)
|
||||
budget = self.client.max_token_length - 300
|
||||
|
||||
memory_budget = min(int(budget * 0.05), 200)
|
||||
|
@ -176,11 +163,9 @@ class NarratorAgent(ConversationAgent):
|
|||
response = self.clean_result(response.strip())
|
||||
response = f"*{response}*"
|
||||
|
||||
await self.emit_status(processing=False)
|
||||
return response
|
||||
|
||||
|
||||
|
||||
@set_processing
|
||||
async def augment_context(self):
|
||||
|
||||
"""
|
||||
|
|
|
@ -8,7 +8,7 @@ import talemate.util as util
|
|||
from talemate.prompts import Prompt
|
||||
from talemate.scene_message import DirectorMessage
|
||||
|
||||
from .base import Agent
|
||||
from .base import Agent, set_processing
|
||||
from .registry import register
|
||||
|
||||
import structlog
|
||||
|
@ -40,6 +40,7 @@ class SummarizeAgent(Agent):
|
|||
super().connect(scene)
|
||||
scene.signals["history_add"].connect(self.on_history_add)
|
||||
|
||||
@set_processing
|
||||
async def build_archive(self, scene):
|
||||
end = None
|
||||
|
||||
|
@ -67,7 +68,6 @@ class SummarizeAgent(Agent):
|
|||
if end is None:
|
||||
# nothing to archive yet
|
||||
return
|
||||
await self.emit_status(processing=True)
|
||||
|
||||
extra_context = None
|
||||
if recent_entry:
|
||||
|
@ -91,13 +91,12 @@ class SummarizeAgent(Agent):
|
|||
)
|
||||
|
||||
scene.push_archive(data_objects.ArchiveEntry(summarized, start, end))
|
||||
await self.emit_status(processing=False)
|
||||
|
||||
return True
|
||||
|
||||
@set_processing
|
||||
async def analyze_dialoge(self, dialogue):
|
||||
instruction = "Examine the dialogue from the beginning and find the first line that marks a scene change. Repeat the line back to me exactly as it is written"
|
||||
await self.emit_status(processing=True)
|
||||
|
||||
prepare_response = "The first line that marks a scene change is: "
|
||||
|
||||
|
@ -110,10 +109,9 @@ class SummarizeAgent(Agent):
|
|||
|
||||
response = self.clean_result(response)
|
||||
|
||||
await self.emit_status(processing=False)
|
||||
|
||||
return response
|
||||
|
||||
@set_processing
|
||||
async def summarize(
|
||||
self,
|
||||
text: str,
|
||||
|
@ -125,8 +123,6 @@ class SummarizeAgent(Agent):
|
|||
Summarize the given text
|
||||
"""
|
||||
|
||||
await self.emit_status(processing=True)
|
||||
|
||||
response = await Prompt.request("summarizer.summarize-dialogue", self.client, "summarize", vars={
|
||||
"dialogue": text,
|
||||
"scene": self.scene,
|
||||
|
@ -135,14 +131,12 @@ class SummarizeAgent(Agent):
|
|||
|
||||
self.scene.log.info("summarize", dialogue=text, response=response)
|
||||
|
||||
await self.emit_status(processing=False)
|
||||
|
||||
return self.clean_result(response)
|
||||
|
||||
@set_processing
|
||||
async def simple_summary(
|
||||
self, text: str, prompt_kind: str = "summarize", instructions: str = "Summarize"
|
||||
):
|
||||
await self.emit_status(processing=True)
|
||||
prompt = [
|
||||
text,
|
||||
"",
|
||||
|
@ -153,62 +147,52 @@ class SummarizeAgent(Agent):
|
|||
response = await self.client.send_prompt("\n".join(map(str, prompt)), kind=prompt_kind)
|
||||
if ":" in response:
|
||||
response = response.split(":")[1].strip()
|
||||
await self.emit_status(processing=False)
|
||||
return response
|
||||
|
||||
|
||||
@set_processing
|
||||
async def request_world_state(self):
|
||||
|
||||
await self.emit_status(processing=True)
|
||||
try:
|
||||
t1 = time.time()
|
||||
|
||||
t1 = time.time()
|
||||
_, world_state = await Prompt.request(
|
||||
"summarizer.request-world-state",
|
||||
self.client,
|
||||
"analyze",
|
||||
vars = {
|
||||
"scene": self.scene,
|
||||
"max_tokens": self.client.max_token_length,
|
||||
"object_type": "character",
|
||||
"object_type_plural": "characters",
|
||||
}
|
||||
)
|
||||
|
||||
_, world_state = await Prompt.request(
|
||||
"summarizer.request-world-state",
|
||||
self.client,
|
||||
"analyze",
|
||||
vars = {
|
||||
"scene": self.scene,
|
||||
"max_tokens": self.client.max_token_length,
|
||||
"object_type": "character",
|
||||
"object_type_plural": "characters",
|
||||
}
|
||||
)
|
||||
self.scene.log.debug("request_world_state", response=world_state, time=time.time() - t1)
|
||||
|
||||
self.scene.log.debug("request_world_state", response=world_state, time=time.time() - t1)
|
||||
|
||||
return world_state
|
||||
finally:
|
||||
await self.emit_status(processing=False)
|
||||
return world_state
|
||||
|
||||
|
||||
@set_processing
|
||||
async def request_world_state_inline(self):
|
||||
|
||||
"""
|
||||
EXPERIMENTAL, Overall the one shot request seems about as coherent as the inline request, but the inline request is is about twice as slow and would need to run on every dialogue line.
|
||||
"""
|
||||
|
||||
await self.emit_status(processing=True)
|
||||
try:
|
||||
t1 = time.time()
|
||||
|
||||
t1 = time.time()
|
||||
# first, we need to get the marked items (objects etc.)
|
||||
|
||||
# first, we need to get the marked items (objects etc.)
|
||||
marked_items_response = await Prompt.request(
|
||||
"summarizer.request-world-state-inline-items",
|
||||
self.client,
|
||||
"analyze_freeform",
|
||||
vars = {
|
||||
"scene": self.scene,
|
||||
"max_tokens": self.client.max_token_length,
|
||||
}
|
||||
)
|
||||
|
||||
marked_items_response = await Prompt.request(
|
||||
"summarizer.request-world-state-inline-items",
|
||||
self.client,
|
||||
"analyze_freeform",
|
||||
vars = {
|
||||
"scene": self.scene,
|
||||
"max_tokens": self.client.max_token_length,
|
||||
}
|
||||
)
|
||||
|
||||
self.scene.log.debug("request_world_state_inline", marked_items=marked_items_response, time=time.time() - t1)
|
||||
|
||||
return marked_items_response
|
||||
finally:
|
||||
await self.emit_status(processing=False)
|
||||
self.scene.log.debug("request_world_state_inline", marked_items=marked_items_response, time=time.time() - t1)
|
||||
|
||||
return marked_items_response
|
|
@ -11,11 +11,17 @@ __all__ = [
|
|||
'ContextModel',
|
||||
]
|
||||
|
||||
|
||||
class ConversationContext(BaseModel):
|
||||
talking_character: str = None
|
||||
other_characters: list[str] = Field(default_factory=list)
|
||||
|
||||
class ContextModel(BaseModel):
|
||||
"""
|
||||
Pydantic model for the context data.
|
||||
"""
|
||||
nuke_repetition: float = Field(0.0, ge=0.0, le=3.0)
|
||||
conversation: ConversationContext = Field(default_factory=ConversationContext)
|
||||
|
||||
# Define the context variable as an empty dictionary
|
||||
context_data = ContextVar('context_data', default=ContextModel().dict())
|
||||
|
|
|
@ -9,7 +9,6 @@ from talemate.client.registry import register
|
|||
from talemate.emit import emit
|
||||
from talemate.config import load_config
|
||||
import talemate.client.system_prompts as system_prompts
|
||||
|
||||
import structlog
|
||||
|
||||
__all__ = [
|
||||
|
@ -142,5 +141,14 @@ class OpenAIClient:
|
|||
|
||||
log.debug("openai response", response=response)
|
||||
|
||||
emit("prompt_sent", data={
|
||||
"kind": kind,
|
||||
"prompt": prompt,
|
||||
"response": response,
|
||||
# TODO use tiktoken
|
||||
"prompt_tokens": "?",
|
||||
"response_tokens": "?",
|
||||
})
|
||||
|
||||
self.emit_status(processing=False)
|
||||
return response
|
||||
|
|
|
@ -417,11 +417,21 @@ class TextGeneratorWebuiClient(RESTTaleMateClient):
|
|||
prompt,
|
||||
)
|
||||
|
||||
stopping_strings = ["<|end_of_turn|>"]
|
||||
|
||||
conversation_context = client_context_attribute("conversation")
|
||||
|
||||
stopping_strings += [
|
||||
f"{character}:" for character in conversation_context["other_characters"]
|
||||
]
|
||||
|
||||
log.debug("prompt_config_conversation", stopping_strings=stopping_strings, conversation_context=conversation_context)
|
||||
|
||||
config = {
|
||||
"prompt": prompt,
|
||||
"max_new_tokens": 75,
|
||||
"chat_prompt_size": self.max_token_length,
|
||||
"stopping_strings": ["<|end_of_turn|>", "\n\n"],
|
||||
"stopping_strings": stopping_strings,
|
||||
}
|
||||
config.update(PRESET_TALEMATE_CONVERSATION)
|
||||
|
||||
|
@ -616,7 +626,15 @@ class TextGeneratorWebuiClient(RESTTaleMateClient):
|
|||
|
||||
response = response.split("#")[0]
|
||||
self.emit_status(processing=False)
|
||||
await asyncio.sleep(0.01)
|
||||
|
||||
emit("prompt_sent", data={
|
||||
"kind": kind,
|
||||
"prompt": message["prompt"],
|
||||
"response": response,
|
||||
"prompt_tokens": token_length,
|
||||
"response_tokens": int(len(response) / 3.6)
|
||||
})
|
||||
|
||||
return response
|
||||
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@ ReceiveInput = signal("receive_input")
|
|||
ClientStatus = signal("client_status")
|
||||
AgentStatus = signal("agent_status")
|
||||
ClientBootstraps = signal("client_bootstraps")
|
||||
PromptSent = signal("prompt_sent")
|
||||
|
||||
RemoveMessage = signal("remove_message")
|
||||
|
||||
|
@ -42,4 +43,5 @@ handlers = {
|
|||
"world_state": WorldState,
|
||||
"archived_history": ArchivedHistory,
|
||||
"message_edited": MessageEdited,
|
||||
"prompt_sent": PromptSent,
|
||||
}
|
||||
|
|
|
@ -5,7 +5,9 @@
|
|||
<|CLOSE_SECTION|>
|
||||
<|SECTION:CHARACTERS|>
|
||||
{% for character in characters -%}
|
||||
{{ character.name }}: {{ character.description }}
|
||||
{{ character.name }}:
|
||||
{{ character.filtered_sheet(['name', 'description', 'age', 'gender']) }}
|
||||
{{ query_memory(character.name+' personality', as_question_answer= False) }}
|
||||
|
||||
{% endfor %}
|
||||
<|CLOSE_SECTION|>
|
||||
|
@ -28,9 +30,7 @@ Based on {{ talking_character.name}}'s example dialogue style, create a continua
|
|||
|
||||
You may chose to have {{ talking_character.name}} respond to {{main_character.name}}'s last message, or you may chose to have {{ talking_character.name}} perform a new action that is in line with {{ talking_character.name}}'s character.
|
||||
|
||||
{% if scene.history and scene.history[-1].type == "director" -%}
|
||||
Follow the instructions to you for your next message as {{ talking_character.name}}. NEVER directly respond to the instructions, but use the direction we have given you as you perform {{ talking_character.name }}'s response to {{main_character.name}}. You can separate thoughts and actual dialogue by containing thoughts inside curly brackets. Example: "{stuff you want to keep private} stuff you want to say publicly."
|
||||
{% endif -%}
|
||||
Use an informal and colloquial register with a conversational tone…Overall, their dialog is Informal, conversational, natural, and spontaneous, with a sense of immediacy.
|
||||
<|CLOSE_SECTION|>
|
||||
|
||||
<|SECTION:SCENE|>
|
||||
|
|
|
@ -48,12 +48,12 @@ Examples: John, Mary, Jane, Bob, Alice, etc.
|
|||
Respond with a number only
|
||||
{% endif -%}
|
||||
{% if character_sheet.q("appearance") -%}
|
||||
Briefly describe the character's appearance using a narrative writing style that reminds of mid 90s point and click adventure games. (2 - 3 sentences). {{ spice("Make it {spice}.", spices) }}
|
||||
Briefly describe the character's appearance using a narrative writing style that reminds of mid 90s point and click adventure games. (1 - 2 sentences). {{ spice("Make it {spice}.", spices) }}
|
||||
{% endif -%}
|
||||
{% block generate_appearance %}
|
||||
{% endblock %}
|
||||
{% if character_sheet.q("personality") -%}
|
||||
Briefly describe the character's personality using a narrative writing style that reminds of mid 90s point and click adventure games. (2 - 3 sentences). {{ spice("Make it {spice}.", spices) }}
|
||||
Briefly describe the character's personality using a narrative writing style that reminds of mid 90s point and click adventure games. (1 - 2 sentences). {{ spice("Make it {spice}.", spices) }}
|
||||
{% endif -%}
|
||||
{% if character_sheet.q("family and fiends") %}
|
||||
List close family and friends of {{ character_sheet("name") }}. Respond with a comma separated list of names. (2 - 3 names, include age)
|
||||
|
@ -69,7 +69,7 @@ List some things that {{ character_sheet("name") }} dislikes. Respond with a com
|
|||
Examples: cats, dogs, pizza, etc.
|
||||
{% endif -%}
|
||||
{% if character_sheet.q("clothes and accessories") -%}
|
||||
Briefly describe the character's clothes and accessories using a narrative writing style that reminds of mid 90s point and click adventure games. (2 - 3 sentences). {{ spice("Make it {spice}.", spices) }}
|
||||
Briefly describe the character's clothes and accessories using a narrative writing style that reminds of mid 90s point and click adventure games. (1 - 2 sentences). {{ spice("Make it {spice}.", spices) }}
|
||||
{% endif %}
|
||||
{% block generate_misc %}{% endblock -%}
|
||||
{% for custom_attribute, instructions in custom_attributes.items() -%}
|
||||
|
|
|
@ -4,6 +4,6 @@
|
|||
<|SECTION:TASK|>
|
||||
Summarize {{ character.name }} based on the character sheet above.
|
||||
|
||||
Use a narrative writing style that reminds of mid 90s point and click adventure games about a {{ content_context }}
|
||||
Use a narrative writing style that reminds of mid 90s point and click adventure games about {{ content_context }}
|
||||
<|CLOSE_SECTION|>
|
||||
{{ set_prepared_response(character.name+ " is ") }}
|
|
@ -148,13 +148,13 @@ class CharacterCreatorServerPlugin:
|
|||
async def handle_submit_step3(self, data:dict):
|
||||
|
||||
creator = self.scene.get_helper("creator").agent
|
||||
character, _ = self.apply_step_data(data)
|
||||
character, step_data = self.apply_step_data(data)
|
||||
|
||||
self.emit_step_start(3)
|
||||
|
||||
description = await creator.create_character_description(
|
||||
character,
|
||||
content_context=self.character_creation_data.scenario_context,
|
||||
content_context=step_data.scenario_context,
|
||||
)
|
||||
|
||||
character.description = description
|
||||
|
|
|
@ -292,6 +292,14 @@ class WebsocketHandler(Receiver):
|
|||
}
|
||||
)
|
||||
|
||||
def handle_prompt_sent(self, emission: Emission):
|
||||
self.queue_put(
|
||||
{
|
||||
"type": "prompt_sent",
|
||||
"data": emission.data,
|
||||
}
|
||||
)
|
||||
|
||||
def handle_clear_screen(self, emission: Emission):
|
||||
self.queue_put(
|
||||
{
|
||||
|
|
|
@ -23,6 +23,7 @@ from talemate.exceptions import ExitScene, RestartSceneLoop, ResetScene, Talemat
|
|||
from talemate.world_state import WorldState
|
||||
from talemate.config import SceneConfig
|
||||
from talemate.scene_assets import SceneAssets
|
||||
from talemate.client.context import ClientContext, ConversationContext
|
||||
import talemate.automated_action as automated_action
|
||||
|
||||
|
||||
|
@ -140,6 +141,23 @@ class Character:
|
|||
|
||||
return random.choice(self.example_dialogue)
|
||||
|
||||
def filtered_sheet(self, attributes: list[str]):
|
||||
|
||||
"""
|
||||
Same as sheet but only returns the attributes in the given list
|
||||
|
||||
Attributes that dont exist will be ignored
|
||||
"""
|
||||
|
||||
sheet_list = []
|
||||
|
||||
for key, value in self.base_attributes.items():
|
||||
if key.lower() not in attributes:
|
||||
continue
|
||||
sheet_list.append(f"{key}: {value}")
|
||||
|
||||
return "\n".join(sheet_list)
|
||||
|
||||
def save(self, file_path: str):
|
||||
"""
|
||||
Save this Character instance properties to a json file at the given file path.
|
||||
|
@ -413,8 +431,14 @@ class Actor:
|
|||
|
||||
self.agent.character = self.character
|
||||
|
||||
messages = await self.agent.converse(self, editor=editor)
|
||||
await asyncio.sleep(0)
|
||||
conversation_context = ConversationContext(
|
||||
talking_character=self.character.name,
|
||||
other_characters=[actor.character.name for actor in self.scene.actors if actor != self],
|
||||
)
|
||||
|
||||
with ClientContext(conversation=conversation_context):
|
||||
messages = await self.agent.converse(self, editor=editor)
|
||||
|
||||
return messages
|
||||
|
||||
|
||||
|
|
86
talemate_frontend/src/components/DebugToolPromptLog.vue
Normal file
86
talemate_frontend/src/components/DebugToolPromptLog.vue
Normal file
|
@ -0,0 +1,86 @@
|
|||
<template>
|
||||
<v-list-subheader class="text-uppercase"><v-icon>mdi-post-outline</v-icon> Prompts
|
||||
<v-chip size="x-small" color="primary">{{ max_prompts }}</v-chip>
|
||||
</v-list-subheader>
|
||||
|
||||
<v-list-item density="compact">
|
||||
<v-slider density="compact" v-model="max_prompts" min="1" hide-details max="250" step="1" color="primary"></v-slider>
|
||||
</v-list-item>
|
||||
|
||||
<v-list-item v-for="(prompt, index) in prompts" :key="index" @click="openPromptView(prompt)">
|
||||
<v-list-item-title class="text-caption">
|
||||
{{ prompt.kind }}
|
||||
</v-list-item-title>
|
||||
<v-list-item-subtitle>
|
||||
<v-chip size="x-small"><v-icon size="14"
|
||||
class="mr-1">mdi-pound</v-icon>{{ prompt.num }}</v-chip>
|
||||
<v-chip size="x-small" color="primary">{{ prompt.prompt_tokens }}<v-icon size="14"
|
||||
class="ml-1">mdi-arrow-down-bold</v-icon></v-chip>
|
||||
<v-chip size="x-small" color="secondary">{{ prompt.response_tokens }}<v-icon size="14"
|
||||
class="ml-1">mdi-arrow-up-bold</v-icon></v-chip>
|
||||
</v-list-item-subtitle>
|
||||
<v-divider class="mt-1"></v-divider>
|
||||
</v-list-item>
|
||||
|
||||
<DebugToolPromptView ref="promptView" />
|
||||
</template>
|
||||
<script>
|
||||
|
||||
import DebugToolPromptView from './DebugToolPromptView.vue';
|
||||
|
||||
export default {
|
||||
name: 'DebugToolPromptLog',
|
||||
data() {
|
||||
return {
|
||||
prompts: [],
|
||||
total: 0,
|
||||
max_prompts: 50,
|
||||
}
|
||||
},
|
||||
components: {
|
||||
DebugToolPromptView,
|
||||
},
|
||||
inject: [
|
||||
'getWebsocket',
|
||||
'registerMessageHandler',
|
||||
'setWaitingForInput',
|
||||
],
|
||||
|
||||
methods: {
|
||||
handleMessage(data) {
|
||||
|
||||
if(data.type === "system"&& data.id === "scene.loaded") {
|
||||
this.prompts = [];
|
||||
this.total = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
if(data.type === "prompt_sent") {
|
||||
// add to prompts array, and truncate if necessary (max 50)
|
||||
this.prompts.unshift({
|
||||
prompt: data.data.prompt,
|
||||
response: data.data.response,
|
||||
kind: data.data.kind,
|
||||
response_tokens: data.data.response_tokens,
|
||||
prompt_tokens: data.data.prompt_tokens,
|
||||
num: this.total++,
|
||||
})
|
||||
|
||||
while(this.prompts.length > this.max_prompts) {
|
||||
this.prompts.pop();
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
openPromptView(prompt) {
|
||||
this.$refs.promptView.open(prompt);
|
||||
}
|
||||
},
|
||||
|
||||
created() {
|
||||
this.registerMessageHandler(this.handleMessage);
|
||||
},
|
||||
|
||||
}
|
||||
|
||||
</script>
|
68
talemate_frontend/src/components/DebugToolPromptView.vue
Normal file
68
talemate_frontend/src/components/DebugToolPromptView.vue
Normal file
|
@ -0,0 +1,68 @@
|
|||
<template>
|
||||
<v-dialog v-model="dialog" max-width="50%">
|
||||
<v-card>
|
||||
<v-card-title>
|
||||
#{{ prompt.num }} - {{ prompt.kind }}
|
||||
</v-card-title>
|
||||
<v-tabs color="primary" v-model="tab">
|
||||
<v-tab value="prompt">
|
||||
Prompt
|
||||
</v-tab>
|
||||
<v-tab value="response">
|
||||
Response
|
||||
</v-tab>
|
||||
</v-tabs>
|
||||
|
||||
<v-window v-model="tab">
|
||||
<v-window-item value="prompt">
|
||||
<v-card flat>
|
||||
<v-card-text style="max-height:600px; overflow-y:scroll;">
|
||||
<div class="prompt-view">{{ prompt.prompt }}</div>
|
||||
</v-card-text>
|
||||
</v-card>
|
||||
</v-window-item>
|
||||
<v-window-item value="response">
|
||||
<v-card flat>
|
||||
<v-card-text style="max-height:600px; overflow-y:scroll;">
|
||||
<div class="prompt-view">{{ prompt.response }}</div>
|
||||
</v-card-text>
|
||||
</v-card>
|
||||
</v-window-item>
|
||||
</v-window>
|
||||
</v-card>
|
||||
</v-dialog>
|
||||
</template>
|
||||
<script>
|
||||
|
||||
export default {
|
||||
name: 'DebugToolPromptView',
|
||||
data() {
|
||||
return {
|
||||
prompt: null,
|
||||
dialog: false,
|
||||
tab: "prompt"
|
||||
}
|
||||
},
|
||||
methods: {
|
||||
open(prompt) {
|
||||
this.prompt = prompt;
|
||||
this.dialog = true;
|
||||
},
|
||||
close() {
|
||||
this.dialog = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
</script>
|
||||
|
||||
<style scoped>
|
||||
|
||||
.prompt-view {
|
||||
font-family: monospace;
|
||||
font-size: 12px;
|
||||
white-space: pre-wrap;
|
||||
word-wrap: break-word;
|
||||
}
|
||||
|
||||
</style>
|
54
talemate_frontend/src/components/DebugTools.vue
Normal file
54
talemate_frontend/src/components/DebugTools.vue
Normal file
|
@ -0,0 +1,54 @@
|
|||
<template>
|
||||
|
||||
<v-list-item>
|
||||
<v-checkbox density="compact" v-model="log_socket_messages" label="Log Websocket Messages" color="primary"></v-checkbox>
|
||||
<v-text-field v-if="log_socket_messages === true" density="compact" v-model="filter_socket_messages" label="Filter Websocket Messages" color="primary"></v-text-field>
|
||||
</v-list-item>
|
||||
|
||||
<DebugToolPromptLog ref="promptLog"/>
|
||||
</template>
|
||||
<script>
|
||||
|
||||
import DebugToolPromptLog from './DebugToolPromptLog.vue';
|
||||
|
||||
export default {
|
||||
name: 'DebugTools',
|
||||
components: {
|
||||
DebugToolPromptLog,
|
||||
},
|
||||
data() {
|
||||
return {
|
||||
expanded: false,
|
||||
log_socket_messages: false,
|
||||
filter_socket_messages: null,
|
||||
}
|
||||
},
|
||||
|
||||
inject: [
|
||||
'getWebsocket',
|
||||
'registerMessageHandler',
|
||||
'setWaitingForInput',
|
||||
],
|
||||
|
||||
methods: {
|
||||
handleMessage(data) {
|
||||
if(this.log_socket_messages) {
|
||||
|
||||
if(this.filter_socket_messages) {
|
||||
if(data.type.indexOf(this.filter_socket_messages) === -1) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
console.log(data);
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
created() {
|
||||
this.registerMessageHandler(this.handleMessage);
|
||||
},
|
||||
|
||||
}
|
||||
|
||||
</script>
|
|
@ -53,6 +53,14 @@
|
|||
</v-list>
|
||||
</v-navigation-drawer>
|
||||
|
||||
<!-- debug tools navigation drawer -->
|
||||
<v-navigation-drawer v-model="debugDrawer" app location="right">
|
||||
<v-list>
|
||||
<v-list-subheader class="text-uppercase"><v-icon>mdi-bug</v-icon> Debug Tools</v-list-subheader>
|
||||
<DebugTools ref="debugTools"></DebugTools>
|
||||
</v-list>
|
||||
</v-navigation-drawer>
|
||||
|
||||
<!-- system bar -->
|
||||
<v-system-bar>
|
||||
<v-icon icon="mdi-network-outline"></v-icon>
|
||||
|
@ -94,6 +102,7 @@
|
|||
Talemate
|
||||
</v-toolbar-title>
|
||||
<v-spacer></v-spacer>
|
||||
<v-app-bar-nav-icon @click="toggleNavigation('debug')"><v-icon>mdi-bug</v-icon></v-app-bar-nav-icon>
|
||||
<v-app-bar-nav-icon @click="openAppConfig()"><v-icon>mdi-cog</v-icon></v-app-bar-nav-icon>
|
||||
<v-app-bar-nav-icon @click="toggleNavigation('settings')" v-if="configurationRequired()"
|
||||
color="red"><v-icon>mdi-application-cog</v-icon></v-app-bar-nav-icon>
|
||||
|
@ -145,6 +154,7 @@ import CharacterSheet from './CharacterSheet.vue';
|
|||
import SceneHistory from './SceneHistory.vue';
|
||||
import CreativeEditor from './CreativeEditor.vue';
|
||||
import AppConfig from './AppConfig.vue';
|
||||
import DebugTools from './DebugTools.vue';
|
||||
|
||||
export default {
|
||||
components: {
|
||||
|
@ -160,6 +170,7 @@ export default {
|
|||
SceneHistory,
|
||||
CreativeEditor,
|
||||
AppConfig,
|
||||
DebugTools,
|
||||
},
|
||||
name: 'TalemateApp',
|
||||
data() {
|
||||
|
@ -169,6 +180,7 @@ export default {
|
|||
sceneActive: false,
|
||||
drawer: false,
|
||||
sceneDrawer: true,
|
||||
debugDrawer: false,
|
||||
websocket: null,
|
||||
inputDisabled: false,
|
||||
waitingForInput: false,
|
||||
|
@ -369,6 +381,8 @@ export default {
|
|||
this.sceneDrawer = !this.sceneDrawer;
|
||||
else if (navigation == "settings")
|
||||
this.drawer = !this.drawer;
|
||||
else if (navigation == "debug")
|
||||
this.debugDrawer = !this.debugDrawer;
|
||||
},
|
||||
getClients() {
|
||||
if (!this.$refs.aiClient) {
|
||||
|
|
1
templates/llm-prompt/Xwin-LM.jinja2
Normal file
1
templates/llm-prompt/Xwin-LM.jinja2
Normal file
|
@ -0,0 +1 @@
|
|||
{{ system_message }} USER: {{ set_response(prompt, " ASSISTANT:") }}
|
Loading…
Add table
Reference in a new issue