Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 7 additions & 2 deletions sentry_sdk/integrations/openai_agents/patches/agent_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
if TYPE_CHECKING:
from typing import Any, Optional

from sentry_sdk.tracing import Span

try:
import agents
except ImportError:
Expand All @@ -27,13 +29,15 @@ def _patch_agent_run():
original_execute_final_output = agents._run_impl.RunImpl.execute_final_output

def _start_invoke_agent_span(context_wrapper, agent, kwargs):
# type: (agents.RunContextWrapper, agents.Agent, dict[str, Any]) -> None
# type: (agents.RunContextWrapper, agents.Agent, dict[str, Any]) -> Span
"""Start an agent invocation span"""
# Store the agent on the context wrapper so we can access it later
context_wrapper._sentry_current_agent = agent
span = invoke_agent_span(context_wrapper, agent, kwargs)
context_wrapper._sentry_agent_span = span

return span

def _end_invoke_agent_span(context_wrapper, agent, output=None):
# type: (agents.RunContextWrapper, agents.Agent, Optional[Any]) -> None
"""End the agent invocation span"""
Expand Down Expand Up @@ -73,7 +77,8 @@ async def patched_run_single_turn(cls, *args, **kwargs):
if current_agent and current_agent != agent:
_end_invoke_agent_span(context_wrapper, current_agent)

_start_invoke_agent_span(context_wrapper, agent, kwargs)
span = _start_invoke_agent_span(context_wrapper, agent, kwargs)
agent._sentry_agent_span = span

# Call original method with all the correct parameters
result = await original_run_single_turn(*args, **kwargs)
Expand Down
27 changes: 26 additions & 1 deletion sentry_sdk/integrations/openai_agents/patches/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from sentry_sdk.integrations import DidNotEnable

from ..spans import ai_client_span, update_ai_client_span
from sentry_sdk.consts import SPANDATA

from typing import TYPE_CHECKING

Expand Down Expand Up @@ -33,13 +34,37 @@ def wrapped_get_model(cls, agent, run_config):
model = original_get_model(agent, run_config)
original_get_response = model.get_response

# Wrap _fetch_response if it exists (for OpenAI models) to capture raw response model
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

out of curiosity, why do we need to capture the raw response model?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the only place I found to get the response model. OpenAI Agents does not expose this, we only get the request model, but not the response.

if hasattr(model, "_fetch_response"):
original_fetch_response = model._fetch_response

@wraps(original_fetch_response)
async def wrapped_fetch_response(*args, **kwargs):
# type: (*Any, **Any) -> Any
response = await original_fetch_response(*args, **kwargs)
if hasattr(response, "model"):
agent._sentry_raw_response_model = str(response.model)
return response

model._fetch_response = wrapped_fetch_response

@wraps(original_get_response)
async def wrapped_get_response(*args, **kwargs):
# type: (*Any, **Any) -> Any
with ai_client_span(agent, kwargs) as span:
result = await original_get_response(*args, **kwargs)

update_ai_client_span(span, agent, kwargs, result)
response_model = getattr(agent, "_sentry_raw_response_model", None)
if response_model:
agent_span = getattr(agent, "_sentry_agent_span", None)
if agent_span:
agent_span.set_data(
SPANDATA.GEN_AI_RESPONSE_MODEL, response_model
)

delattr(agent, "_sentry_raw_response_model")

update_ai_client_span(span, agent, kwargs, result, response_model)

return result

Expand Down
4 changes: 3 additions & 1 deletion sentry_sdk/integrations/openai_agents/patches/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,11 @@ async def wrapper(*args, **kwargs):
# Isolate each workflow so that when agents are run in asyncio tasks they
# don't touch each other's scopes
with sentry_sdk.isolation_scope():
agent = args[0]
# Clone agent because agent invocation spans are attached per run.
agent = args[0].clone()
with agent_workflow_span(agent):
result = None
args = (agent, *args[1:])
try:
result = await original_func(*args, **kwargs)
return result
Expand Down
12 changes: 9 additions & 3 deletions sentry_sdk/integrations/openai_agents/spans/ai_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

if TYPE_CHECKING:
from agents import Agent
from typing import Any
from typing import Any, Optional


def ai_client_span(agent, get_response_kwargs):
Expand All @@ -35,8 +35,14 @@ def ai_client_span(agent, get_response_kwargs):
return span


def update_ai_client_span(span, agent, get_response_kwargs, result):
# type: (sentry_sdk.tracing.Span, Agent, dict[str, Any], Any) -> None
def update_ai_client_span(
span, agent, get_response_kwargs, result, response_model=None
):
# type: (sentry_sdk.tracing.Span, Agent, dict[str, Any], Any, Optional[str]) -> None
_set_usage_data(span, result.usage)
_set_output_data(span, result)
_create_mcp_execute_tool_spans(span, result)

# Set response model if captured from raw response
if response_model is not None:
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, response_model)
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from sentry_sdk.utils import safe_serialize

from ..consts import SPAN_ORIGIN
from ..utils import _set_agent_data
from ..utils import _set_agent_data, _set_usage_data

from typing import TYPE_CHECKING

Expand Down Expand Up @@ -84,6 +84,10 @@ def update_invoke_agent_span(context, agent, output):
span = getattr(context, "_sentry_agent_span", None)

if span:
# Add aggregated usage data from context_wrapper
if hasattr(context, "usage"):
_set_usage_data(span, context.usage)

if should_send_default_pii():
set_data_normalized(
span, SPANDATA.GEN_AI_RESPONSE_TEXT, output, unpack=False
Expand Down
Loading