mirror of
https://github.com/vegu-ai/talemate.git
synced 2025-09-03 19:09:12 +00:00
* linting * improve prompt devtools: test changes, show more information * some more polish for the new promp devtools * up default conversation gen length to 128 * openai client tweaks, talemate sets max_tokens on gpt-3.5 generations * support new openai embeddings (and default to text-embedding-3-small) * ux polish for character sheet and character state ux * actor instructions * experiment using # for context / instructions * fix bug where regenerating history would mess up time stamps * remove trailing ] * prevent client ctx from being unset * fix issue where sometimes you'd need to delete a client twice for it to disappear * upgrade dependencies * set 0.19.0 * fix performance degradation caused by circular loading animation * remove coqui studio support * fix issue when switching from unsaved creative mode to loading a scene * third party client / agent support * edit dialogue examples through character / actor editor * remove "edit dialogue" action from editor - replaced by character actor instructions * different icon for delete * prompt adjustment for acting instructions * adhoc context generation for character attributes and details * add adhoc generation for character description * contextual generation tweaks * contextual generation for dialogue examples fix some formatting issues * contextual generation for world entries * prepopulate initial recen scenarios with demo scenes add experimental holodeck scenario * scene info scene experimental * assortment of fixes for holodeck improvements * more holodeck fixes * refactor holodeck instructions * rename holodeck to simulation suite * better scene status messages * add new gpt-3.5-turbo model, better json response coercion for older models * allow exclusion of characters when persisting based on world state * better error handling of world state response * better error handling of world state response * more simulation suite fixes * progress color * world state character name mapping support * if neither quote nor asterisk is in message default to quotes * fix rerun of new paraphrase op * sim suite ping that ensure's characters are not aware of sim * fixes for better character name assessment simulation suite can now give the player character a proper name * fix bug with new status notifications * sim suite adjustments and fixes and tuning * sim suite tweaks * impl scene restore from file * prompting tweaks for reinforcement messages and acting instructions * more tweaks * dialogue prompt tweaks for rerun + rewrite * fix bug with character entry / exit with narration * linting * simsuite screenshots * screenshots
67 lines
2 KiB
Python
67 lines
2 KiB
Python
import pydantic
|
|
from openai import AsyncOpenAI
|
|
|
|
from talemate.client.base import ClientBase
|
|
from talemate.client.registry import register
|
|
|
|
|
|
class Defaults(pydantic.BaseModel):
|
|
api_url: str = "http://localhost:1234"
|
|
max_token_length: int = 4096
|
|
|
|
@register()
|
|
class TestClient(ClientBase):
|
|
client_type = "test"
|
|
|
|
class Meta(ClientBase.Meta):
|
|
name_prefix: str = "test"
|
|
title: str = "Test"
|
|
defaults: Defaults = Defaults()
|
|
|
|
def set_client(self, **kwargs):
|
|
self.client = AsyncOpenAI(base_url=self.api_url + "/v1", api_key="sk-1111")
|
|
|
|
def tune_prompt_parameters(self, parameters: dict, kind: str):
|
|
|
|
"""
|
|
Talemate adds a bunch of parameters to the prompt, but not all of them are valid for all clients.
|
|
|
|
This method is called before the prompt is sent to the client, and it allows the client to remove
|
|
any parameters that it doesn't support.
|
|
"""
|
|
|
|
super().tune_prompt_parameters(parameters, kind)
|
|
|
|
keys = list(parameters.keys())
|
|
|
|
valid_keys = ["temperature", "top_p"]
|
|
|
|
for key in keys:
|
|
if key not in valid_keys:
|
|
del parameters[key]
|
|
|
|
async def get_model_name(self):
|
|
|
|
"""
|
|
This should return the name of the model that is being used.
|
|
"""
|
|
|
|
return "Mock test model"
|
|
|
|
async def generate(self, prompt: str, parameters: dict, kind: str):
|
|
"""
|
|
Generates text from the given prompt and parameters.
|
|
"""
|
|
human_message = {"role": "user", "content": prompt.strip()}
|
|
|
|
self.log.debug("generate", prompt=prompt[:128] + " ...", parameters=parameters)
|
|
|
|
try:
|
|
response = await self.client.chat.completions.create(
|
|
model=self.model_name, messages=[human_message], **parameters
|
|
)
|
|
|
|
return response.choices[0].message.content
|
|
except Exception as e:
|
|
self.log.error("generate error", e=e)
|
|
return ""
|