From 43af9c254a15aaa1dc479e9580cb4490255375d3 Mon Sep 17 00:00:00 2001 From: Simon Hellmayr Date: Wed, 14 Jan 2026 11:15:57 +0100 Subject: [PATCH 1/6] feat(ai): add cache writes for gen_ai --- sentry_sdk/ai/__init__.py | 10 + sentry_sdk/ai/monitoring.py | 7 + sentry_sdk/consts.py | 6 + sentry_sdk/integrations/anthropic.py | 102 +++++- sentry_sdk/integrations/litellm.py | 4 + sentry_sdk/integrations/openai.py | 11 +- .../integrations/openai_agents/utils.py | 24 +- .../integrations/pydantic_ai/spans/utils.py | 22 ++ .../integrations/anthropic/test_anthropic.py | 89 +++++- tests/integrations/openai/test_openai.py | 292 +++++++++++++++++- 10 files changed, 540 insertions(+), 27 deletions(-) diff --git a/sentry_sdk/ai/__init__.py b/sentry_sdk/ai/__init__.py index fbcb9c061d..6eeeed3d76 100644 --- a/sentry_sdk/ai/__init__.py +++ b/sentry_sdk/ai/__init__.py @@ -1,3 +1,4 @@ +from .monitoring import record_token_usage # noqa: F401 from .utils import ( set_data_normalized, GEN_AI_MESSAGE_ROLE_MAPPING, @@ -5,3 +6,12 @@ normalize_message_role, normalize_message_roles, ) # noqa: F401 + +__all__ = [ + "record_token_usage", + "set_data_normalized", + "GEN_AI_MESSAGE_ROLE_MAPPING", + "GEN_AI_MESSAGE_ROLE_REVERSE_MAPPING", + "normalize_message_role", + "normalize_message_roles", +] diff --git a/sentry_sdk/ai/monitoring.py b/sentry_sdk/ai/monitoring.py index 5655712d53..581e967bd4 100644 --- a/sentry_sdk/ai/monitoring.py +++ b/sentry_sdk/ai/monitoring.py @@ -100,6 +100,7 @@ def record_token_usage( span: "Span", input_tokens: "Optional[int]" = None, input_tokens_cached: "Optional[int]" = None, + input_tokens_cache_write: "Optional[int]" = None, output_tokens: "Optional[int]" = None, output_tokens_reasoning: "Optional[int]" = None, total_tokens: "Optional[int]" = None, @@ -118,6 +119,12 @@ def record_token_usage( input_tokens_cached, ) + if input_tokens_cache_write is not None: + span.set_data( + SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE, + input_tokens_cache_write, + ) + if output_tokens is not None: span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens) diff --git a/sentry_sdk/consts.py b/sentry_sdk/consts.py index 59d3997c9a..e53533018f 100644 --- a/sentry_sdk/consts.py +++ b/sentry_sdk/consts.py @@ -632,6 +632,12 @@ class SPANDATA: Example: 50 """ + GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE = "gen_ai.usage.input_tokens.cache_write" + """ + The number of tokens written to the cache when processing the AI input (prompt). + Example: 100 + """ + GEN_AI_USAGE_OUTPUT_TOKENS = "gen_ai.usage.output_tokens" """ The number of tokens in the output. diff --git a/sentry_sdk/integrations/anthropic.py b/sentry_sdk/integrations/anthropic.py index 45d810da72..2bc48e54e3 100644 --- a/sentry_sdk/integrations/anthropic.py +++ b/sentry_sdk/integrations/anthropic.py @@ -75,20 +75,36 @@ def _capture_exception(exc: "Any") -> None: sentry_sdk.capture_event(event, hint=hint) -def _get_token_usage(result: "Messages") -> "tuple[int, int]": +def _get_token_usage(result: "Messages") -> "tuple[int, int, int, int]": """ Get token usage from the Anthropic response. + Returns: (input_tokens, output_tokens, cache_read_input_tokens, cache_write_input_tokens) """ input_tokens = 0 output_tokens = 0 + cache_read_input_tokens = 0 + cache_write_input_tokens = 0 if hasattr(result, "usage"): usage = result.usage if hasattr(usage, "input_tokens") and isinstance(usage.input_tokens, int): input_tokens = usage.input_tokens if hasattr(usage, "output_tokens") and isinstance(usage.output_tokens, int): output_tokens = usage.output_tokens - - return input_tokens, output_tokens + if hasattr(usage, "cache_read_input_tokens") and isinstance( + usage.cache_read_input_tokens, int + ): + cache_read_input_tokens = usage.cache_read_input_tokens + if hasattr(usage, "cache_creation_input_tokens") and isinstance( + usage.cache_creation_input_tokens, int + ): + cache_write_input_tokens = usage.cache_creation_input_tokens + + return ( + input_tokens, + output_tokens, + cache_read_input_tokens, + cache_write_input_tokens, + ) def _collect_ai_data( @@ -96,8 +112,10 @@ def _collect_ai_data( model: "str | None", input_tokens: int, output_tokens: int, + cache_read_input_tokens: int, + cache_write_input_tokens: int, content_blocks: "list[str]", -) -> "tuple[str | None, int, int, list[str]]": +) -> "tuple[str | None, int, int, int, int, list[str]]": """ Collect model information, token usage, and collect content blocks from the AI streaming response. """ @@ -107,6 +125,14 @@ def _collect_ai_data( usage = event.message.usage input_tokens += usage.input_tokens output_tokens += usage.output_tokens + if hasattr(usage, "cache_read_input_tokens") and isinstance( + usage.cache_read_input_tokens, int + ): + cache_read_input_tokens += usage.cache_read_input_tokens + if hasattr(usage, "cache_creation_input_tokens") and isinstance( + usage.cache_creation_input_tokens, int + ): + cache_write_input_tokens += usage.cache_creation_input_tokens model = event.message.model or model elif event.type == "content_block_start": pass @@ -120,7 +146,14 @@ def _collect_ai_data( elif event.type == "message_delta": output_tokens += event.usage.output_tokens - return model, input_tokens, output_tokens, content_blocks + return ( + model, + input_tokens, + output_tokens, + cache_read_input_tokens, + cache_write_input_tokens, + content_blocks, + ) def _transform_anthropic_content_block( @@ -265,6 +298,8 @@ def _set_output_data( model: "str | None", input_tokens: "int | None", output_tokens: "int | None", + cache_read_input_tokens: "int | None", + cache_write_input_tokens: "int | None", content_blocks: "list[Any]", finish_span: bool = False, ) -> None: @@ -300,6 +335,8 @@ def _set_output_data( span, input_tokens=input_tokens, output_tokens=output_tokens, + input_tokens_cached=cache_read_input_tokens, + input_tokens_cache_write=cache_write_input_tokens, ) if finish_span: @@ -334,7 +371,12 @@ def _sentry_patched_create_common(f: "Any", *args: "Any", **kwargs: "Any") -> "A with capture_internal_exceptions(): if hasattr(result, "content"): - input_tokens, output_tokens = _get_token_usage(result) + ( + input_tokens, + output_tokens, + cache_read_input_tokens, + cache_write_input_tokens, + ) = _get_token_usage(result) content_blocks = [] for content_block in result.content: @@ -351,6 +393,8 @@ def _sentry_patched_create_common(f: "Any", *args: "Any", **kwargs: "Any") -> "A model=getattr(result, "model", None), input_tokens=input_tokens, output_tokens=output_tokens, + cache_read_input_tokens=cache_read_input_tokens, + cache_write_input_tokens=cache_write_input_tokens, content_blocks=content_blocks, finish_span=True, ) @@ -363,13 +407,26 @@ def new_iterator() -> "Iterator[MessageStreamEvent]": model = None input_tokens = 0 output_tokens = 0 + cache_read_input_tokens = 0 + cache_write_input_tokens = 0 content_blocks: "list[str]" = [] for event in old_iterator: - model, input_tokens, output_tokens, content_blocks = ( - _collect_ai_data( - event, model, input_tokens, output_tokens, content_blocks - ) + ( + model, + input_tokens, + output_tokens, + cache_read_input_tokens, + cache_write_input_tokens, + content_blocks, + ) = _collect_ai_data( + event, + model, + input_tokens, + output_tokens, + cache_read_input_tokens, + cache_write_input_tokens, + content_blocks, ) yield event @@ -379,6 +436,8 @@ def new_iterator() -> "Iterator[MessageStreamEvent]": model=model, input_tokens=input_tokens, output_tokens=output_tokens, + cache_read_input_tokens=cache_read_input_tokens, + cache_write_input_tokens=cache_write_input_tokens, content_blocks=[{"text": "".join(content_blocks), "type": "text"}], finish_span=True, ) @@ -387,13 +446,26 @@ async def new_iterator_async() -> "AsyncIterator[MessageStreamEvent]": model = None input_tokens = 0 output_tokens = 0 + cache_read_input_tokens = 0 + cache_write_input_tokens = 0 content_blocks: "list[str]" = [] async for event in old_iterator: - model, input_tokens, output_tokens, content_blocks = ( - _collect_ai_data( - event, model, input_tokens, output_tokens, content_blocks - ) + ( + model, + input_tokens, + output_tokens, + cache_read_input_tokens, + cache_write_input_tokens, + content_blocks, + ) = _collect_ai_data( + event, + model, + input_tokens, + output_tokens, + cache_read_input_tokens, + cache_write_input_tokens, + content_blocks, ) yield event @@ -403,6 +475,8 @@ async def new_iterator_async() -> "AsyncIterator[MessageStreamEvent]": model=model, input_tokens=input_tokens, output_tokens=output_tokens, + cache_read_input_tokens=cache_read_input_tokens, + cache_write_input_tokens=cache_write_input_tokens, content_blocks=[{"text": "".join(content_blocks), "type": "text"}], finish_span=True, ) diff --git a/sentry_sdk/integrations/litellm.py b/sentry_sdk/integrations/litellm.py index 5ec079367e..52e488d408 100644 --- a/sentry_sdk/integrations/litellm.py +++ b/sentry_sdk/integrations/litellm.py @@ -222,6 +222,10 @@ def _success_callback( record_token_usage( span, input_tokens=getattr(usage, "prompt_tokens", None), + input_tokens_cached=getattr(usage, "cache_read_input_tokens", None), + input_tokens_cache_write=getattr( + usage, "cache_write_input_tokens", None + ), output_tokens=getattr(usage, "completion_tokens", None), total_tokens=getattr(usage, "total_tokens", None), ) diff --git a/sentry_sdk/integrations/openai.py b/sentry_sdk/integrations/openai.py index 66dc4a1c48..1e9211deb5 100644 --- a/sentry_sdk/integrations/openai.py +++ b/sentry_sdk/integrations/openai.py @@ -133,7 +133,12 @@ def _calculate_token_usage( if hasattr(response, "usage"): input_tokens = _get_usage(response.usage, ["input_tokens", "prompt_tokens"]) - if hasattr(response.usage, "input_tokens_details"): + if hasattr(response.usage, "prompt_tokens_details"): + input_tokens_cached = _get_usage( + response.usage.prompt_tokens_details, ["cached_tokens"] + ) + # OpenAI also supports input_tokens_details for compatibility + elif hasattr(response.usage, "input_tokens_details"): input_tokens_cached = _get_usage( response.usage.input_tokens_details, ["cached_tokens"] ) @@ -145,6 +150,10 @@ def _calculate_token_usage( output_tokens_reasoning = _get_usage( response.usage.output_tokens_details, ["reasoning_tokens"] ) + elif hasattr(response.usage, "completion_tokens_details"): + output_tokens_reasoning = _get_usage( + response.usage.completion_tokens_details, ["reasoning_tokens"] + ) total_tokens = _get_usage(response.usage, ["total_tokens"]) diff --git a/sentry_sdk/integrations/openai_agents/utils.py b/sentry_sdk/integrations/openai_agents/utils.py index a24d0e909d..d45b8546c6 100644 --- a/sentry_sdk/integrations/openai_agents/utils.py +++ b/sentry_sdk/integrations/openai_agents/utils.py @@ -96,15 +96,23 @@ def _set_agent_data(span: "sentry_sdk.tracing.Span", agent: "agents.Agent") -> N def _set_usage_data(span: "sentry_sdk.tracing.Span", usage: "Usage") -> None: span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, usage.input_tokens) - span.set_data( - SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED, - usage.input_tokens_details.cached_tokens, - ) + + if hasattr(usage, "input_tokens_details") and usage.input_tokens_details: + if hasattr(usage.input_tokens_details, "cached_tokens"): + span.set_data( + SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED, + usage.input_tokens_details.cached_tokens, + ) + span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, usage.output_tokens) - span.set_data( - SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING, - usage.output_tokens_details.reasoning_tokens, - ) + + if hasattr(usage, "output_tokens_details") and usage.output_tokens_details: + if hasattr(usage.output_tokens_details, "reasoning_tokens"): + span.set_data( + SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING, + usage.output_tokens_details.reasoning_tokens, + ) + span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, usage.total_tokens) diff --git a/sentry_sdk/integrations/pydantic_ai/spans/utils.py b/sentry_sdk/integrations/pydantic_ai/spans/utils.py index 89fef172e1..5d71ade1e6 100644 --- a/sentry_sdk/integrations/pydantic_ai/spans/utils.py +++ b/sentry_sdk/integrations/pydantic_ai/spans/utils.py @@ -28,8 +28,30 @@ def _set_usage_data( if hasattr(usage, "input_tokens") and usage.input_tokens is not None: span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, usage.input_tokens) + # Pydantic AI uses cache_read_tokens (not input_tokens_cached) + if hasattr(usage, "cache_read_tokens") and usage.cache_read_tokens is not None: + span.set_data( + SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED, usage.cache_read_tokens + ) + + # Pydantic AI uses cache_write_tokens (not input_tokens_cache_write) + if hasattr(usage, "cache_write_tokens") and usage.cache_write_tokens is not None: + span.set_data( + SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE, + usage.cache_write_tokens, + ) + if hasattr(usage, "output_tokens") and usage.output_tokens is not None: span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, usage.output_tokens) + if ( + hasattr(usage, "output_tokens_reasoning") + and usage.output_tokens_reasoning is not None + ): + span.set_data( + SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING, + usage.output_tokens_reasoning, + ) + if hasattr(usage, "total_tokens") and usage.total_tokens is not None: span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, usage.total_tokens) diff --git a/tests/integrations/anthropic/test_anthropic.py b/tests/integrations/anthropic/test_anthropic.py index a8b2feba37..d1554481f0 100644 --- a/tests/integrations/anthropic/test_anthropic.py +++ b/tests/integrations/anthropic/test_anthropic.py @@ -853,10 +853,11 @@ def test_collect_ai_data_with_input_json_delta(): output_tokens = 20 content_blocks = [] - model, new_input_tokens, new_output_tokens, new_content_blocks = _collect_ai_data( - event, model, input_tokens, output_tokens, content_blocks + model, new_input_tokens, new_output_tokens, _, _, new_content_blocks = ( + _collect_ai_data( + event, model, input_tokens, output_tokens, 0, 0, content_blocks + ) ) - assert model is None assert new_input_tokens == input_tokens assert new_output_tokens == output_tokens @@ -884,6 +885,8 @@ def test_set_output_data_with_input_json_delta(sentry_init): model="", input_tokens=10, output_tokens=20, + cache_read_input_tokens=0, + cache_write_input_tokens=0, content_blocks=[{"text": "".join(json_deltas), "type": "text"}], ) @@ -2154,3 +2157,83 @@ def test_binary_content_not_stored_when_prompts_disabled(sentry_init, capture_ev # Messages should not be stored assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] + + +def test_cache_tokens_nonstreaming(sentry_init, capture_events): + """Test cache read/write tokens are tracked for non-streaming responses.""" + sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + events = capture_events() + client = Anthropic(api_key="z") + + client.messages._post = mock.Mock( + return_value=Message( + id="id", + model="claude-3-5-sonnet-20241022", + role="assistant", + content=[TextBlock(type="text", text="Response")], + type="message", + usage=Usage( + input_tokens=100, + output_tokens=50, + cache_read_input_tokens=80, + cache_creation_input_tokens=20, + ), + ) + ) + + with start_transaction(name="anthropic"): + client.messages.create( + max_tokens=1024, + messages=[{"role": "user", "content": "Hello"}], + model="claude-3-5-sonnet-20241022", + ) + + (span,) = events[0]["spans"] + assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 80 + assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 20 + + +def test_cache_tokens_streaming(sentry_init, capture_events): + """Test cache tokens are tracked for streaming responses.""" + client = Anthropic(api_key="z") + returned_stream = Stream(cast_to=None, response=None, client=client) + returned_stream._iterator = [ + MessageStartEvent( + type="message_start", + message=Message( + id="id", + model="claude-3-5-sonnet-20241022", + role="assistant", + content=[], + type="message", + usage=Usage( + input_tokens=100, + output_tokens=0, + cache_read_input_tokens=80, + cache_creation_input_tokens=20, + ), + ), + ), + MessageDeltaEvent( + type="message_delta", + delta=Delta(stop_reason="end_turn"), + usage=MessageDeltaUsage(output_tokens=10), + ), + ] + + sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + events = capture_events() + client.messages._post = mock.Mock(return_value=returned_stream) + + with start_transaction(name="anthropic"): + for _ in client.messages.create( + max_tokens=1024, + messages=[{"role": "user", "content": "Hello"}], + model="claude-3-5-sonnet-20241022", + stream=True, + ): + pass + + (span,) = events[0]["spans"] + assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 80 + assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 20 \ No newline at end of file diff --git a/tests/integrations/openai/test_openai.py b/tests/integrations/openai/test_openai.py index 814289c887..1e88b21e86 100644 --- a/tests/integrations/openai/test_openai.py +++ b/tests/integrations/openai/test_openai.py @@ -39,7 +39,7 @@ SKIP_RESPONSES_TESTS = True from sentry_sdk import start_transaction -from sentry_sdk.consts import SPANDATA +from sentry_sdk.consts import OP, SPANDATA from sentry_sdk.integrations.openai import ( OpenAIIntegration, _calculate_token_usage, @@ -1559,3 +1559,293 @@ def test_openai_message_truncation(sentry_init, capture_events): if SPANDATA.GEN_AI_REQUEST_MESSAGES in span_meta: messages_meta = span_meta[SPANDATA.GEN_AI_REQUEST_MESSAGES] assert "len" in messages_meta.get("", {}) + + +def test_openai_cache_tokens_prompt_tokens_details(sentry_init, capture_events): + """Test that cached tokens are tracked from prompt_tokens_details (OpenAI standard).""" + sentry_init( + integrations=[OpenAIIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + # Mock OpenAI client + import openai + + client = openai.OpenAI(api_key="test-key") + + # Create a response with cached tokens in prompt_tokens_details + mock_response = ChatCompletion( + id="chatcmpl-test", + model="gpt-4o", + object="chat.completion", + created=1234567890, + choices=[ + Choice( + finish_reason="stop", + index=0, + message=ChatCompletionMessage( + role="assistant", + content="Hello! How can I help you?", + ), + ) + ], + usage=CompletionUsage( + completion_tokens=10, + prompt_tokens=100, + total_tokens=110, + ), + ) + + # Add prompt_tokens_details with cached_tokens + mock_response.usage.prompt_tokens_details = {"cached_tokens": 80} + + client.chat.completions._post = mock.Mock(return_value=mock_response) + + with start_transaction(name="openai"): + client.chat.completions.create( + model="gpt-4o", + messages=[{"role": "user", "content": "Hello"}], + ) + + assert len(events) == 1 + (event,) = events + (span,) = event["spans"] + + assert span["op"] == OP.GEN_AI_CHAT + assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 100 + assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 + assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 110 + # Check cached tokens from prompt_tokens_details + assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 80 + # OpenAI doesn't track cache writes, so this should not be present + assert SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE not in span["data"] + + +def test_openai_cache_tokens_input_tokens_details_compat(sentry_init, capture_events): + """Test that cached tokens are tracked from input_tokens_details (compatibility).""" + sentry_init( + integrations=[OpenAIIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + import openai + + client = openai.OpenAI(api_key="test-key") + + # Create a response with cached tokens in input_tokens_details (for compatibility) + mock_response = ChatCompletion( + id="chatcmpl-test", + model="gpt-4o", + object="chat.completion", + created=1234567890, + choices=[ + Choice( + finish_reason="stop", + index=0, + message=ChatCompletionMessage( + role="assistant", + content="Response", + ), + ) + ], + usage=CompletionUsage( + completion_tokens=20, + prompt_tokens=150, + total_tokens=170, + ), + ) + + # Add input_tokens_details with cached_tokens (compatibility path) + mock_response.usage.input_tokens_details = {"cached_tokens": 120} + + client.chat.completions._post = mock.Mock(return_value=mock_response) + + with start_transaction(name="openai"): + client.chat.completions.create( + model="gpt-4o", + messages=[{"role": "user", "content": "Hello"}], + ) + + assert len(events) == 1 + (event,) = events + (span,) = event["spans"] + + assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 150 + assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + # Check cached tokens from input_tokens_details + assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 120 + + +def test_openai_reasoning_tokens_compatibility(sentry_init, capture_events): + """Test that reasoning tokens are tracked from both output_tokens_details and completion_tokens_details.""" + sentry_init( + integrations=[OpenAIIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + import openai + + client = openai.OpenAI(api_key="test-key") + + # Create a response with reasoning tokens in completion_tokens_details + mock_response = ChatCompletion( + id="chatcmpl-test", + model="o1-preview", + object="chat.completion", + created=1234567890, + choices=[ + Choice( + finish_reason="stop", + index=0, + message=ChatCompletionMessage( + role="assistant", + content="Response", + ), + ) + ], + usage=CompletionUsage( + completion_tokens=50, + prompt_tokens=100, + total_tokens=150, + ), + ) + + # Add completion_tokens_details with reasoning_tokens (OpenAI standard) + mock_response.usage.completion_tokens_details = {"reasoning_tokens": 30} + + client.chat.completions._post = mock.Mock(return_value=mock_response) + + with start_transaction(name="openai"): + client.chat.completions.create( + model="o1-preview", + messages=[{"role": "user", "content": "Solve this problem"}], + ) + + assert len(events) == 1 + (event,) = events + (span,) = event["spans"] + + # Check reasoning tokens from completion_tokens_details + assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING] == 30 + + +def test_openai_no_cache_tokens(sentry_init, capture_events): + """Test that requests without cache usage don't have cache fields.""" + sentry_init( + integrations=[OpenAIIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + import openai + + client = openai.OpenAI(api_key="test-key") + + # Response without any cache or reasoning token details + mock_response = ChatCompletion( + id="chatcmpl-test", + model="gpt-3.5-turbo", + object="chat.completion", + created=1234567890, + choices=[ + Choice( + finish_reason="stop", + index=0, + message=ChatCompletionMessage( + role="assistant", + content="Response", + ), + ) + ], + usage=CompletionUsage( + completion_tokens=25, + prompt_tokens=50, + total_tokens=75, + ), + ) + + client.chat.completions._post = mock.Mock(return_value=mock_response) + + with start_transaction(name="openai"): + client.chat.completions.create( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "Hello"}], + ) + + assert len(events) == 1 + (event,) = events + (span,) = event["spans"] + + assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 50 + assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 25 + assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 75 + # Cache and reasoning fields should not be present + assert SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED not in span["data"] + assert SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE not in span["data"] + assert SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING not in span["data"] + + +def test_openai_cache_tokens_with_reasoning(sentry_init, capture_events): + """Test that both cached tokens and reasoning tokens can be tracked together.""" + sentry_init( + integrations=[OpenAIIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + import openai + + client = openai.OpenAI(api_key="test-key") + + # Response with both cached tokens and reasoning tokens + mock_response = ChatCompletion( + id="chatcmpl-test", + model="o1-preview", + object="chat.completion", + created=1234567890, + choices=[ + Choice( + finish_reason="stop", + index=0, + message=ChatCompletionMessage( + role="assistant", + content="Detailed reasoning response", + ), + ) + ], + usage=CompletionUsage( + completion_tokens=100, + prompt_tokens=200, + total_tokens=300, + ), + ) + + # Add both prompt and completion token details + mock_response.usage.prompt_tokens_details = {"cached_tokens": 150} + mock_response.usage.completion_tokens_details = {"reasoning_tokens": 75} + + client.chat.completions._post = mock.Mock(return_value=mock_response) + + with start_transaction(name="openai"): + client.chat.completions.create( + model="o1-preview", + messages=[{"role": "user", "content": "Complex problem"}], + ) + + assert len(events) == 1 + (event,) = events + (span,) = event["spans"] + + # Check both cached and reasoning tokens + assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 200 + assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 100 + assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 150 + assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING] == 75 From be6c53b334237f4839a438a2521e8838daa193fa Mon Sep 17 00:00:00 2001 From: Simon Hellmayr Date: Thu, 15 Jan 2026 14:57:30 +0100 Subject: [PATCH 2/6] simplify --- sentry_sdk/ai/__init__.py | 10 - sentry_sdk/integrations/openai.py | 11 +- .../integrations/anthropic/test_anthropic.py | 2 +- tests/integrations/openai/test_openai.py | 292 +----------------- 4 files changed, 3 insertions(+), 312 deletions(-) diff --git a/sentry_sdk/ai/__init__.py b/sentry_sdk/ai/__init__.py index 6eeeed3d76..fbcb9c061d 100644 --- a/sentry_sdk/ai/__init__.py +++ b/sentry_sdk/ai/__init__.py @@ -1,4 +1,3 @@ -from .monitoring import record_token_usage # noqa: F401 from .utils import ( set_data_normalized, GEN_AI_MESSAGE_ROLE_MAPPING, @@ -6,12 +5,3 @@ normalize_message_role, normalize_message_roles, ) # noqa: F401 - -__all__ = [ - "record_token_usage", - "set_data_normalized", - "GEN_AI_MESSAGE_ROLE_MAPPING", - "GEN_AI_MESSAGE_ROLE_REVERSE_MAPPING", - "normalize_message_role", - "normalize_message_roles", -] diff --git a/sentry_sdk/integrations/openai.py b/sentry_sdk/integrations/openai.py index 1e9211deb5..66dc4a1c48 100644 --- a/sentry_sdk/integrations/openai.py +++ b/sentry_sdk/integrations/openai.py @@ -133,12 +133,7 @@ def _calculate_token_usage( if hasattr(response, "usage"): input_tokens = _get_usage(response.usage, ["input_tokens", "prompt_tokens"]) - if hasattr(response.usage, "prompt_tokens_details"): - input_tokens_cached = _get_usage( - response.usage.prompt_tokens_details, ["cached_tokens"] - ) - # OpenAI also supports input_tokens_details for compatibility - elif hasattr(response.usage, "input_tokens_details"): + if hasattr(response.usage, "input_tokens_details"): input_tokens_cached = _get_usage( response.usage.input_tokens_details, ["cached_tokens"] ) @@ -150,10 +145,6 @@ def _calculate_token_usage( output_tokens_reasoning = _get_usage( response.usage.output_tokens_details, ["reasoning_tokens"] ) - elif hasattr(response.usage, "completion_tokens_details"): - output_tokens_reasoning = _get_usage( - response.usage.completion_tokens_details, ["reasoning_tokens"] - ) total_tokens = _get_usage(response.usage, ["total_tokens"]) diff --git a/tests/integrations/anthropic/test_anthropic.py b/tests/integrations/anthropic/test_anthropic.py index d1554481f0..e8bc4648b6 100644 --- a/tests/integrations/anthropic/test_anthropic.py +++ b/tests/integrations/anthropic/test_anthropic.py @@ -2236,4 +2236,4 @@ def test_cache_tokens_streaming(sentry_init, capture_events): (span,) = events[0]["spans"] assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 80 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 20 \ No newline at end of file + assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 20 diff --git a/tests/integrations/openai/test_openai.py b/tests/integrations/openai/test_openai.py index 1e88b21e86..814289c887 100644 --- a/tests/integrations/openai/test_openai.py +++ b/tests/integrations/openai/test_openai.py @@ -39,7 +39,7 @@ SKIP_RESPONSES_TESTS = True from sentry_sdk import start_transaction -from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.consts import SPANDATA from sentry_sdk.integrations.openai import ( OpenAIIntegration, _calculate_token_usage, @@ -1559,293 +1559,3 @@ def test_openai_message_truncation(sentry_init, capture_events): if SPANDATA.GEN_AI_REQUEST_MESSAGES in span_meta: messages_meta = span_meta[SPANDATA.GEN_AI_REQUEST_MESSAGES] assert "len" in messages_meta.get("", {}) - - -def test_openai_cache_tokens_prompt_tokens_details(sentry_init, capture_events): - """Test that cached tokens are tracked from prompt_tokens_details (OpenAI standard).""" - sentry_init( - integrations=[OpenAIIntegration(include_prompts=True)], - traces_sample_rate=1.0, - send_default_pii=True, - ) - events = capture_events() - - # Mock OpenAI client - import openai - - client = openai.OpenAI(api_key="test-key") - - # Create a response with cached tokens in prompt_tokens_details - mock_response = ChatCompletion( - id="chatcmpl-test", - model="gpt-4o", - object="chat.completion", - created=1234567890, - choices=[ - Choice( - finish_reason="stop", - index=0, - message=ChatCompletionMessage( - role="assistant", - content="Hello! How can I help you?", - ), - ) - ], - usage=CompletionUsage( - completion_tokens=10, - prompt_tokens=100, - total_tokens=110, - ), - ) - - # Add prompt_tokens_details with cached_tokens - mock_response.usage.prompt_tokens_details = {"cached_tokens": 80} - - client.chat.completions._post = mock.Mock(return_value=mock_response) - - with start_transaction(name="openai"): - client.chat.completions.create( - model="gpt-4o", - messages=[{"role": "user", "content": "Hello"}], - ) - - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] - - assert span["op"] == OP.GEN_AI_CHAT - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 110 - # Check cached tokens from prompt_tokens_details - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 80 - # OpenAI doesn't track cache writes, so this should not be present - assert SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE not in span["data"] - - -def test_openai_cache_tokens_input_tokens_details_compat(sentry_init, capture_events): - """Test that cached tokens are tracked from input_tokens_details (compatibility).""" - sentry_init( - integrations=[OpenAIIntegration(include_prompts=True)], - traces_sample_rate=1.0, - send_default_pii=True, - ) - events = capture_events() - - import openai - - client = openai.OpenAI(api_key="test-key") - - # Create a response with cached tokens in input_tokens_details (for compatibility) - mock_response = ChatCompletion( - id="chatcmpl-test", - model="gpt-4o", - object="chat.completion", - created=1234567890, - choices=[ - Choice( - finish_reason="stop", - index=0, - message=ChatCompletionMessage( - role="assistant", - content="Response", - ), - ) - ], - usage=CompletionUsage( - completion_tokens=20, - prompt_tokens=150, - total_tokens=170, - ), - ) - - # Add input_tokens_details with cached_tokens (compatibility path) - mock_response.usage.input_tokens_details = {"cached_tokens": 120} - - client.chat.completions._post = mock.Mock(return_value=mock_response) - - with start_transaction(name="openai"): - client.chat.completions.create( - model="gpt-4o", - messages=[{"role": "user", "content": "Hello"}], - ) - - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] - - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 150 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - # Check cached tokens from input_tokens_details - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 120 - - -def test_openai_reasoning_tokens_compatibility(sentry_init, capture_events): - """Test that reasoning tokens are tracked from both output_tokens_details and completion_tokens_details.""" - sentry_init( - integrations=[OpenAIIntegration(include_prompts=True)], - traces_sample_rate=1.0, - send_default_pii=True, - ) - events = capture_events() - - import openai - - client = openai.OpenAI(api_key="test-key") - - # Create a response with reasoning tokens in completion_tokens_details - mock_response = ChatCompletion( - id="chatcmpl-test", - model="o1-preview", - object="chat.completion", - created=1234567890, - choices=[ - Choice( - finish_reason="stop", - index=0, - message=ChatCompletionMessage( - role="assistant", - content="Response", - ), - ) - ], - usage=CompletionUsage( - completion_tokens=50, - prompt_tokens=100, - total_tokens=150, - ), - ) - - # Add completion_tokens_details with reasoning_tokens (OpenAI standard) - mock_response.usage.completion_tokens_details = {"reasoning_tokens": 30} - - client.chat.completions._post = mock.Mock(return_value=mock_response) - - with start_transaction(name="openai"): - client.chat.completions.create( - model="o1-preview", - messages=[{"role": "user", "content": "Solve this problem"}], - ) - - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] - - # Check reasoning tokens from completion_tokens_details - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING] == 30 - - -def test_openai_no_cache_tokens(sentry_init, capture_events): - """Test that requests without cache usage don't have cache fields.""" - sentry_init( - integrations=[OpenAIIntegration(include_prompts=True)], - traces_sample_rate=1.0, - send_default_pii=True, - ) - events = capture_events() - - import openai - - client = openai.OpenAI(api_key="test-key") - - # Response without any cache or reasoning token details - mock_response = ChatCompletion( - id="chatcmpl-test", - model="gpt-3.5-turbo", - object="chat.completion", - created=1234567890, - choices=[ - Choice( - finish_reason="stop", - index=0, - message=ChatCompletionMessage( - role="assistant", - content="Response", - ), - ) - ], - usage=CompletionUsage( - completion_tokens=25, - prompt_tokens=50, - total_tokens=75, - ), - ) - - client.chat.completions._post = mock.Mock(return_value=mock_response) - - with start_transaction(name="openai"): - client.chat.completions.create( - model="gpt-3.5-turbo", - messages=[{"role": "user", "content": "Hello"}], - ) - - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] - - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 50 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 25 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 75 - # Cache and reasoning fields should not be present - assert SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED not in span["data"] - assert SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE not in span["data"] - assert SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING not in span["data"] - - -def test_openai_cache_tokens_with_reasoning(sentry_init, capture_events): - """Test that both cached tokens and reasoning tokens can be tracked together.""" - sentry_init( - integrations=[OpenAIIntegration(include_prompts=True)], - traces_sample_rate=1.0, - send_default_pii=True, - ) - events = capture_events() - - import openai - - client = openai.OpenAI(api_key="test-key") - - # Response with both cached tokens and reasoning tokens - mock_response = ChatCompletion( - id="chatcmpl-test", - model="o1-preview", - object="chat.completion", - created=1234567890, - choices=[ - Choice( - finish_reason="stop", - index=0, - message=ChatCompletionMessage( - role="assistant", - content="Detailed reasoning response", - ), - ) - ], - usage=CompletionUsage( - completion_tokens=100, - prompt_tokens=200, - total_tokens=300, - ), - ) - - # Add both prompt and completion token details - mock_response.usage.prompt_tokens_details = {"cached_tokens": 150} - mock_response.usage.completion_tokens_details = {"reasoning_tokens": 75} - - client.chat.completions._post = mock.Mock(return_value=mock_response) - - with start_transaction(name="openai"): - client.chat.completions.create( - model="o1-preview", - messages=[{"role": "user", "content": "Complex problem"}], - ) - - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] - - # Check both cached and reasoning tokens - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 200 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 150 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING] == 75 From 9a3ce8863df479488c957dbd4346be13593f70d6 Mon Sep 17 00:00:00 2001 From: Simon Hellmayr Date: Thu, 15 Jan 2026 16:02:32 +0100 Subject: [PATCH 3/6] changes to litellm --- sentry_sdk/integrations/litellm.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/sentry_sdk/integrations/litellm.py b/sentry_sdk/integrations/litellm.py index 52e488d408..5ec079367e 100644 --- a/sentry_sdk/integrations/litellm.py +++ b/sentry_sdk/integrations/litellm.py @@ -222,10 +222,6 @@ def _success_callback( record_token_usage( span, input_tokens=getattr(usage, "prompt_tokens", None), - input_tokens_cached=getattr(usage, "cache_read_input_tokens", None), - input_tokens_cache_write=getattr( - usage, "cache_write_input_tokens", None - ), output_tokens=getattr(usage, "completion_tokens", None), total_tokens=getattr(usage, "total_tokens", None), ) From 315d853ffa07fe1583961c46fdc155cb6744433d Mon Sep 17 00:00:00 2001 From: Simon Hellmayr Date: Fri, 16 Jan 2026 09:04:15 +0100 Subject: [PATCH 4/6] revert openai-agents changes --- .../integrations/openai_agents/utils.py | 24 +++++++------------ 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/sentry_sdk/integrations/openai_agents/utils.py b/sentry_sdk/integrations/openai_agents/utils.py index d45b8546c6..a24d0e909d 100644 --- a/sentry_sdk/integrations/openai_agents/utils.py +++ b/sentry_sdk/integrations/openai_agents/utils.py @@ -96,23 +96,15 @@ def _set_agent_data(span: "sentry_sdk.tracing.Span", agent: "agents.Agent") -> N def _set_usage_data(span: "sentry_sdk.tracing.Span", usage: "Usage") -> None: span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, usage.input_tokens) - - if hasattr(usage, "input_tokens_details") and usage.input_tokens_details: - if hasattr(usage.input_tokens_details, "cached_tokens"): - span.set_data( - SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED, - usage.input_tokens_details.cached_tokens, - ) - + span.set_data( + SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED, + usage.input_tokens_details.cached_tokens, + ) span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, usage.output_tokens) - - if hasattr(usage, "output_tokens_details") and usage.output_tokens_details: - if hasattr(usage.output_tokens_details, "reasoning_tokens"): - span.set_data( - SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING, - usage.output_tokens_details.reasoning_tokens, - ) - + span.set_data( + SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING, + usage.output_tokens_details.reasoning_tokens, + ) span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, usage.total_tokens) From 37782db6fad8bd72e2d962f63aa1351b2b70dcab Mon Sep 17 00:00:00 2001 From: Simon Hellmayr Date: Fri, 16 Jan 2026 10:55:19 +0100 Subject: [PATCH 5/6] add pydantic test without mocks --- .../pydantic_ai/test_pydantic_ai.py | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/tests/integrations/pydantic_ai/test_pydantic_ai.py b/tests/integrations/pydantic_ai/test_pydantic_ai.py index 7387f1be15..47123a6892 100644 --- a/tests/integrations/pydantic_ai/test_pydantic_ai.py +++ b/tests/integrations/pydantic_ai/test_pydantic_ai.py @@ -2730,3 +2730,32 @@ async def test_binary_content_in_agent_run(sentry_init, capture_events): if "gen_ai.request.messages" in chat_span["data"]: messages_str = str(chat_span["data"]["gen_ai.request.messages"]) assert any(keyword in messages_str for keyword in ["blob", "image", "base64"]) + +@pytest.mark.asyncio +async def test_set_usage_data_with_cache_tokens(sentry_init, capture_events): + """Test that cache_read_tokens and cache_write_tokens are tracked.""" + import sentry_sdk + from pydantic_ai.usage import RequestUsage + from sentry_sdk.integrations.pydantic_ai.spans.utils import _set_usage_data + from sentry_sdk.consts import SPANDATA + + sentry_init(integrations=[PydanticAIIntegration()], traces_sample_rate=1.0) + + events = capture_events() + + with sentry_sdk.start_transaction(op="test", name="test"): + span = sentry_sdk.start_span(op="test_span") + usage = RequestUsage( + input_tokens=100, + output_tokens=50, + cache_read_tokens=80, + cache_write_tokens=20, + ) + _set_usage_data(span, usage) + span.finish() + + (event,) = events + (span_data,) = event["spans"] + assert span_data["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 80 + assert span_data["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 20 + From 1d109a697e105fd8b4afd0680c06638d806d154d Mon Sep 17 00:00:00 2001 From: Simon Hellmayr Date: Fri, 16 Jan 2026 14:53:08 +0100 Subject: [PATCH 6/6] reformat --- tests/integrations/pydantic_ai/test_pydantic_ai.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integrations/pydantic_ai/test_pydantic_ai.py b/tests/integrations/pydantic_ai/test_pydantic_ai.py index 47123a6892..fc6a27d714 100644 --- a/tests/integrations/pydantic_ai/test_pydantic_ai.py +++ b/tests/integrations/pydantic_ai/test_pydantic_ai.py @@ -2731,6 +2731,7 @@ async def test_binary_content_in_agent_run(sentry_init, capture_events): messages_str = str(chat_span["data"]["gen_ai.request.messages"]) assert any(keyword in messages_str for keyword in ["blob", "image", "base64"]) + @pytest.mark.asyncio async def test_set_usage_data_with_cache_tokens(sentry_init, capture_events): """Test that cache_read_tokens and cache_write_tokens are tracked.""" @@ -2758,4 +2759,3 @@ async def test_set_usage_data_with_cache_tokens(sentry_init, capture_events): (span_data,) = event["spans"] assert span_data["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 80 assert span_data["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 20 -