track reasoning token and cached token ()

This commit is contained in:
Shuchang Zheng 2025-03-20 16:42:57 -07:00 committed by GitHub
parent 185464f8ec
commit eb3eb4eede
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 112 additions and 16 deletions

View file

@ -0,0 +1,37 @@
"""keep track of reasoning token and cached token in the step and thought tables
Revision ID: 3aa168d1ffa5
Revises: c6c0eee7f88d
Create Date: 2025-03-20 23:24:39.160800+00:00
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "3aa168d1ffa5"
down_revision: Union[str, None] = "c6c0eee7f88d"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("observer_thoughts", sa.Column("reasoning_token_count", sa.Integer(), nullable=True))
op.add_column("observer_thoughts", sa.Column("cached_token_count", sa.Integer(), nullable=True))
op.add_column("steps", sa.Column("reasoning_token_count", sa.Integer(), nullable=True))
op.add_column("steps", sa.Column("cached_token_count", sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("steps", "cached_token_count")
op.drop_column("steps", "reasoning_token_count")
op.drop_column("observer_thoughts", "cached_token_count")
op.drop_column("observer_thoughts", "reasoning_token_count")
# ### end Alembic commands ###

40
poetry.lock generated
View file

@ -2651,13 +2651,13 @@ files = [
[[package]]
name = "litellm"
version = "1.60.6"
version = "1.63.12"
description = "Library to easily interface with LLM API providers"
optional = false
python-versions = "!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*,>=3.8"
files = [
{file = "litellm-1.60.6-py3-none-any.whl", hash = "sha256:7c2d61f5073c823aa7b069328fed34e61d0e9a1777f91e758c1770724d060578"},
{file = "litellm-1.60.6.tar.gz", hash = "sha256:b9fdd38b482abc6b6d6afffa6fbf25912b70b1b34ca91a5c798aba2d81bef322"},
{file = "litellm-1.63.12-py3-none-any.whl", hash = "sha256:ae72a9d7099100b4b1172aaa2954bf6d7b205d47ba76beec5cd53f62dd57913e"},
{file = "litellm-1.63.12.tar.gz", hash = "sha256:db875fb0b5d2bebdcf68805bc0bd4733dcebf3630e9eef4753cfe414a53120fc"},
]
[package.dependencies]
@ -2667,7 +2667,7 @@ httpx = ">=0.23.0"
importlib-metadata = ">=6.8.0"
jinja2 = ">=3.1.2,<4.0.0"
jsonschema = ">=4.22.0,<5.0.0"
openai = ">=1.61.0"
openai = ">=1.66.1"
pydantic = ">=2.0.0,<3.0.0"
python-dotenv = ">=0.2.0"
tiktoken = ">=0.7.0"
@ -2675,7 +2675,7 @@ tokenizers = "*"
[package.extras]
extra-proxy = ["azure-identity (>=1.15.0,<2.0.0)", "azure-keyvault-secrets (>=4.8.0,<5.0.0)", "google-cloud-kms (>=2.21.3,<3.0.0)", "prisma (==0.11.0)", "resend (>=0.8.0,<0.9.0)"]
proxy = ["PyJWT (>=2.8.0,<3.0.0)", "apscheduler (>=3.10.4,<4.0.0)", "backoff", "cryptography (>=43.0.1,<44.0.0)", "fastapi (>=0.115.5,<0.116.0)", "fastapi-sso (>=0.16.0,<0.17.0)", "gunicorn (>=22.0.0,<23.0.0)", "orjson (>=3.9.7,<4.0.0)", "pynacl (>=1.5.0,<2.0.0)", "python-multipart (>=0.0.18,<0.0.19)", "pyyaml (>=6.0.1,<7.0.0)", "rq", "uvicorn (>=0.29.0,<0.30.0)", "uvloop (>=0.21.0,<0.22.0)"]
proxy = ["PyJWT (>=2.8.0,<3.0.0)", "apscheduler (>=3.10.4,<4.0.0)", "backoff", "boto3 (==1.34.34)", "cryptography (>=43.0.1,<44.0.0)", "fastapi (>=0.115.5,<0.116.0)", "fastapi-sso (>=0.16.0,<0.17.0)", "gunicorn (>=22.0.0,<23.0.0)", "orjson (>=3.9.7,<4.0.0)", "pynacl (>=1.5.0,<2.0.0)", "python-multipart (>=0.0.18,<0.0.19)", "pyyaml (>=6.0.1,<7.0.0)", "rq", "uvicorn (>=0.29.0,<0.30.0)", "uvloop (>=0.21.0,<0.22.0)", "websockets (>=13.1.0,<14.0.0)"]
[[package]]
name = "mako"
@ -3337,13 +3337,13 @@ sympy = "*"
[[package]]
name = "openai"
version = "1.63.2"
version = "1.68.0"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.8"
files = [
{file = "openai-1.63.2-py3-none-any.whl", hash = "sha256:1f38b27b5a40814c2b7d8759ec78110df58c4a614c25f182809ca52b080ff4d4"},
{file = "openai-1.63.2.tar.gz", hash = "sha256:aeabeec984a7d2957b4928ceaa339e2ead19c61cfcf35ae62b7c363368d26360"},
{file = "openai-1.68.0-py3-none-any.whl", hash = "sha256:20e279b0f3a78cb4a95f3eab2a180f3ee30c6a196aeebd6bf642a4f88ab85ee1"},
{file = "openai-1.68.0.tar.gz", hash = "sha256:c570c06c9ba10f98b891ac30a3dd7b5c89ed48094c711c7a3f35fb5ade6c0757"},
]
[package.dependencies]
@ -3351,8 +3351,10 @@ anyio = ">=3.5.0,<5"
distro = ">=1.7.0,<2"
httpx = ">=0.23.0,<1"
jiter = ">=0.4.0,<1"
numpy = ">=2.0.2"
pydantic = ">=1.9.0,<3"
sniffio = "*"
sounddevice = ">=0.5.1"
tqdm = ">4"
typing-extensions = ">=4.11,<5"
@ -5324,6 +5326,26 @@ files = [
{file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"},
]
[[package]]
name = "sounddevice"
version = "0.5.1"
description = "Play and Record Sound with Python"
optional = false
python-versions = ">=3.7"
files = [
{file = "sounddevice-0.5.1-py3-none-any.whl", hash = "sha256:e2017f182888c3f3c280d9fbac92e5dbddac024a7e3442f6e6116bd79dab8a9c"},
{file = "sounddevice-0.5.1-py3-none-macosx_10_6_x86_64.macosx_10_6_universal2.whl", hash = "sha256:d16cb23d92322526a86a9490c427bf8d49e273d9ccc0bd096feecd229cde6031"},
{file = "sounddevice-0.5.1-py3-none-win32.whl", hash = "sha256:d84cc6231526e7a08e89beff229c37f762baefe5e0cc2747cbe8e3a565470055"},
{file = "sounddevice-0.5.1-py3-none-win_amd64.whl", hash = "sha256:4313b63f2076552b23ac3e0abd3bcfc0c1c6a696fc356759a13bd113c9df90f1"},
{file = "sounddevice-0.5.1.tar.gz", hash = "sha256:09ca991daeda8ce4be9ac91e15a9a81c8f81efa6b695a348c9171ea0c16cb041"},
]
[package.dependencies]
CFFI = ">=1.0"
[package.extras]
numpy = ["NumPy"]
[[package]]
name = "soupsieve"
version = "2.6"
@ -6535,4 +6557,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.0"
python-versions = "^3.11,<3.12"
content-hash = "a375238b024ba31d21440421bbd89689139b6846789e70bd45e1515f615dbc2a"
content-hash = "91915e847d90ac222cdedc44682ab71870d5b369e3a6b50c3a333a09a94bcbfc"

View file

@ -16,7 +16,7 @@ python-multipart = "^0.0.6"
toml = "^0.10.2"
jinja2 = "^3.1.2"
uvicorn = {extras = ["standard"], version = "^0.24.0.post1"}
litellm = "1.60.6"
litellm = "^1.63.12"
selenium = "^4.13.0"
playwright = "1.46.0"
pre-commit = "^3.5.0"

View file

@ -163,12 +163,11 @@ class LLMAPIHandlerFactory:
LOG.exception("Failed to calculate LLM cost", error=str(e))
llm_cost = 0
prompt_tokens = response.get("usage", {}).get("prompt_tokens", 0)
# TODO (suchintan): Properly support reasoning tokens
reasoning_tokens = response.get("usage", {}).get("reasoning_tokens", 0)
LOG.debug("Reasoning tokens", reasoning_tokens=reasoning_tokens)
completion_tokens = response.get("usage", {}).get("completion_tokens", 0) + reasoning_tokens
reasoning_tokens = (
response.get("usage", {}).get("completion_tokens_details", {}).get("reasoning_tokens", 0)
)
completion_tokens = response.get("usage", {}).get("completion_tokens", 0)
cached_tokens = response.get("usage", {}).get("prompt_tokens_details", {}).get("cached_tokens", 0)
if step:
await app.DATABASE.update_step(
@ -178,6 +177,8 @@ class LLMAPIHandlerFactory:
incremental_cost=llm_cost,
incremental_input_tokens=prompt_tokens if prompt_tokens > 0 else None,
incremental_output_tokens=completion_tokens if completion_tokens > 0 else None,
incremental_reasoning_tokens=reasoning_tokens if reasoning_tokens > 0 else None,
incremental_cached_tokens=cached_tokens if cached_tokens > 0 else None,
)
if thought:
await app.DATABASE.update_thought(
@ -186,6 +187,8 @@ class LLMAPIHandlerFactory:
input_token_count=prompt_tokens if prompt_tokens > 0 else None,
output_token_count=completion_tokens if completion_tokens > 0 else None,
thought_cost=llm_cost,
reasoning_token_count=reasoning_tokens if reasoning_tokens > 0 else None,
cached_token_count=cached_tokens if cached_tokens > 0 else None,
)
parsed_response = parse_api_response(response, llm_config.add_assistant_prefix)
await app.ARTIFACT_MANAGER.create_llm_artifact(
@ -348,6 +351,10 @@ class LLMAPIHandlerFactory:
llm_cost = 0
prompt_tokens = response.get("usage", {}).get("prompt_tokens", 0)
completion_tokens = response.get("usage", {}).get("completion_tokens", 0)
reasoning_tokens = (
response.get("usage", {}).get("completion_tokens_details", {}).get("reasoning_tokens", 0)
)
cached_tokens = response.get("usage", {}).get("prompt_tokens_details", {}).get("cached_tokens", 0)
if step:
await app.DATABASE.update_step(
task_id=step.task_id,
@ -356,6 +363,8 @@ class LLMAPIHandlerFactory:
incremental_cost=llm_cost,
incremental_input_tokens=prompt_tokens if prompt_tokens > 0 else None,
incremental_output_tokens=completion_tokens if completion_tokens > 0 else None,
incremental_reasoning_tokens=reasoning_tokens if reasoning_tokens > 0 else None,
incremental_cached_tokens=cached_tokens if cached_tokens > 0 else None,
)
if thought:
await app.DATABASE.update_thought(
@ -363,6 +372,8 @@ class LLMAPIHandlerFactory:
organization_id=thought.organization_id,
input_token_count=prompt_tokens if prompt_tokens > 0 else None,
output_token_count=completion_tokens if completion_tokens > 0 else None,
reasoning_token_count=reasoning_tokens if reasoning_tokens > 0 else None,
cached_token_count=cached_tokens if cached_tokens > 0 else None,
thought_cost=llm_cost,
)
parsed_response = parse_api_response(response, llm_config.add_assistant_prefix)

View file

@ -492,6 +492,8 @@ class AgentDB:
incremental_cost: float | None = None,
incremental_input_tokens: int | None = None,
incremental_output_tokens: int | None = None,
incremental_reasoning_tokens: int | None = None,
incremental_cached_tokens: int | None = None,
) -> Step:
try:
async with self.Session() as session:
@ -517,6 +519,10 @@ class AgentDB:
step.input_token_count = incremental_input_tokens + (step.input_token_count or 0)
if incremental_output_tokens is not None:
step.output_token_count = incremental_output_tokens + (step.output_token_count or 0)
if incremental_reasoning_tokens is not None:
step.reasoning_token_count = incremental_reasoning_tokens + (step.reasoning_token_count or 0)
if incremental_cached_tokens is not None:
step.cached_token_count = incremental_cached_tokens + (step.cached_token_count or 0)
await session.commit()
updated_step = await self.get_step(task_id, step_id, organization_id)
@ -2290,6 +2296,8 @@ class AgentDB:
output: dict[str, Any] | None = None,
input_token_count: int | None = None,
output_token_count: int | None = None,
reasoning_token_count: int | None = None,
cached_token_count: int | None = None,
thought_cost: float | None = None,
organization_id: str | None = None,
) -> Thought:
@ -2309,6 +2317,8 @@ class AgentDB:
output=output,
input_token_count=input_token_count,
output_token_count=output_token_count,
reasoning_token_count=reasoning_token_count,
cached_token_count=cached_token_count,
thought_cost=thought_cost,
organization_id=organization_id,
)
@ -2330,6 +2340,8 @@ class AgentDB:
output: dict[str, Any] | None = None,
input_token_count: int | None = None,
output_token_count: int | None = None,
reasoning_token_count: int | None = None,
cached_token_count: int | None = None,
thought_cost: float | None = None,
organization_id: str | None = None,
) -> Thought:
@ -2362,6 +2374,10 @@ class AgentDB:
thought_obj.input_token_count = input_token_count
if output_token_count:
thought_obj.output_token_count = output_token_count
if reasoning_token_count:
thought_obj.reasoning_token_count = reasoning_token_count
if cached_token_count:
thought_obj.cached_token_count = cached_token_count
if thought_cost:
thought_obj.thought_cost = thought_cost
await session.commit()

View file

@ -117,6 +117,8 @@ class StepModel(Base):
)
input_token_count = Column(Integer, default=0)
output_token_count = Column(Integer, default=0)
reasoning_token_count = Column(Integer, default=0)
cached_token_count = Column(Integer, default=0)
step_cost = Column(Numeric, default=0)
@ -612,6 +614,8 @@ class ThoughtModel(Base):
answer = Column(String, nullable=True)
input_token_count = Column(Integer, nullable=True)
output_token_count = Column(Integer, nullable=True)
reasoning_token_count = Column(Integer, nullable=True)
cached_token_count = Column(Integer, nullable=True)
thought_cost = Column(Numeric, nullable=True)
observer_thought_type = Column(String, nullable=True, default=ThoughtType.plan)

View file

@ -108,6 +108,8 @@ def convert_to_step(step_model: StepModel, debug_enabled: bool = False) -> Step:
organization_id=step_model.organization_id,
input_token_count=step_model.input_token_count,
output_token_count=step_model.output_token_count,
reasoning_token_count=step_model.reasoning_token_count,
cached_token_count=step_model.cached_token_count,
step_cost=step_model.step_cost,
)

View file

@ -52,6 +52,8 @@ class Step(BaseModel):
organization_id: str | None = None
input_token_count: int = 0
output_token_count: int = 0
reasoning_token_count: int = 0
cached_token_count: int = 0
step_cost: float = 0
def validate_update(

View file

@ -92,6 +92,8 @@ class Thought(BaseModel):
output: dict[str, Any] | None = None
input_token_count: int | None = None
output_token_count: int | None = None
reasoning_token_count: int | None = None
cached_token_count: int | None = None
thought_cost: float | None = None
created_at: datetime