diff --git a/sentry_sdk/integrations/langchain.py b/sentry_sdk/integrations/langchain.py index 49fa04c034..87c01ff326 100644 --- a/sentry_sdk/integrations/langchain.py +++ b/sentry_sdk/integrations/langchain.py @@ -1,4 +1,3 @@ -import contextvars import itertools import sys import json @@ -162,44 +161,6 @@ def _transform_langchain_message_content(content: "Any") -> "Any": return content -# Contextvar to track agent names in a stack for re-entrant agent support -_agent_stack: "contextvars.ContextVar[Optional[List[Optional[str]]]]" = ( - contextvars.ContextVar("langchain_agent_stack", default=None) -) - - -def _push_agent(agent_name: "Optional[str]") -> None: - """Push an agent name onto the stack.""" - stack = _agent_stack.get() - if stack is None: - stack = [] - else: - # Copy the list to maintain contextvar isolation across async contexts - stack = stack.copy() - stack.append(agent_name) - _agent_stack.set(stack) - - -def _pop_agent() -> "Optional[str]": - """Pop an agent name from the stack and return it.""" - stack = _agent_stack.get() - if stack: - # Copy the list to maintain contextvar isolation across async contexts - stack = stack.copy() - agent_name = stack.pop() - _agent_stack.set(stack) - return agent_name - return None - - -def _get_current_agent() -> "Optional[str]": - """Get the current agent name (top of stack) without removing it.""" - stack = _agent_stack.get() - if stack: - return stack[-1] - return None - - def _get_system_instructions(messages: "List[List[BaseMessage]]") -> "List[str]": system_instructions = [] @@ -465,9 +426,11 @@ def on_chat_model_start( if ai_system: span.set_data(SPANDATA.GEN_AI_SYSTEM, ai_system) - agent_name = _get_current_agent() - if agent_name: - span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name) + agent_metadata = kwargs.get("metadata") + if isinstance(agent_metadata, dict) and "lc_agent_name" in agent_metadata: + span.set_data( + SPANDATA.GEN_AI_AGENT_NAME, agent_metadata["lc_agent_name"] + ) for key, attribute in DATA_FIELDS.items(): if key in all_params and all_params[key] is not None: @@ -665,9 +628,11 @@ def on_tool_start( if tool_description is not None: span.set_data(SPANDATA.GEN_AI_TOOL_DESCRIPTION, tool_description) - agent_name = _get_current_agent() - if agent_name: - span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name) + agent_metadata = kwargs.get("metadata") + if isinstance(agent_metadata, dict) and "lc_agent_name" in agent_metadata: + span.set_data( + SPANDATA.GEN_AI_AGENT_NAME, agent_metadata["lc_agent_name"] + ) if should_send_default_pii() and self.include_prompts: set_data_normalized( @@ -987,58 +952,53 @@ def new_invoke(self: "Any", *args: "Any", **kwargs: "Any") -> "Any": if integration is None: return f(self, *args, **kwargs) - agent_name, tools = _get_request_data(self, args, kwargs) + run_name, tools = _get_request_data(self, args, kwargs) start_span_function = get_start_span_function() with start_span_function( op=OP.GEN_AI_INVOKE_AGENT, - name=f"invoke_agent {agent_name}" if agent_name else "invoke_agent", + name=f"invoke_agent {run_name}" if run_name else "invoke_agent", origin=LangchainIntegration.origin, ) as span: - _push_agent(agent_name) - try: - if agent_name: - span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name) + if run_name: + span.set_data(SPANDATA.GEN_AI_AGENT_NAME, run_name) - span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent") - span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, False) + span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent") + span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, False) - _set_tools_on_span(span, tools) + _set_tools_on_span(span, tools) - # Run the agent - result = f(self, *args, **kwargs) + # Run the agent + result = f(self, *args, **kwargs) - input = result.get("input") - if ( - input is not None - and should_send_default_pii() - and integration.include_prompts - ): - normalized_messages = normalize_message_roles([input]) - scope = sentry_sdk.get_current_scope() - messages_data = truncate_and_annotate_messages( - normalized_messages, span, scope + input = result.get("input") + if ( + input is not None + and should_send_default_pii() + and integration.include_prompts + ): + normalized_messages = normalize_message_roles([input]) + scope = sentry_sdk.get_current_scope() + messages_data = truncate_and_annotate_messages( + normalized_messages, span, scope + ) + if messages_data is not None: + set_data_normalized( + span, + SPANDATA.GEN_AI_REQUEST_MESSAGES, + messages_data, + unpack=False, ) - if messages_data is not None: - set_data_normalized( - span, - SPANDATA.GEN_AI_REQUEST_MESSAGES, - messages_data, - unpack=False, - ) - output = result.get("output") - if ( - output is not None - and should_send_default_pii() - and integration.include_prompts - ): - set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, output) + output = result.get("output") + if ( + output is not None + and should_send_default_pii() + and integration.include_prompts + ): + set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, output) - return result - finally: - # Ensure agent is popped even if an exception occurs - _pop_agent() + return result return new_invoke @@ -1050,21 +1010,16 @@ def new_stream(self: "Any", *args: "Any", **kwargs: "Any") -> "Any": if integration is None: return f(self, *args, **kwargs) - agent_name, tools = _get_request_data(self, args, kwargs) + run_name, tools = _get_request_data(self, args, kwargs) start_span_function = get_start_span_function() span = start_span_function( op=OP.GEN_AI_INVOKE_AGENT, - name=f"invoke_agent {agent_name}" if agent_name else "invoke_agent", + name=f"invoke_agent {run_name}" if run_name else "invoke_agent", origin=LangchainIntegration.origin, ) span.__enter__() - _push_agent(agent_name) - - if agent_name: - span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name) - span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent") span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True) @@ -1117,7 +1072,6 @@ def new_iterator() -> "Iterator[Any]": raise finally: # Ensure cleanup happens even if iterator is abandoned or fails - _pop_agent() span.__exit__(*exc_info) async def new_iterator_async() -> "AsyncIterator[Any]": @@ -1143,7 +1097,6 @@ async def new_iterator_async() -> "AsyncIterator[Any]": raise finally: # Ensure cleanup happens even if iterator is abandoned or fails - _pop_agent() span.__exit__(*exc_info) if str(type(result)) == "": diff --git a/tests/integrations/langchain/test_langchain.py b/tests/integrations/langchain/test_langchain.py index 498a5d6f4a..9243fcda53 100644 --- a/tests/integrations/langchain/test_langchain.py +++ b/tests/integrations/langchain/test_langchain.py @@ -259,6 +259,8 @@ def test_langchain_create_agent( assert chat_spans[0]["origin"] == "auto.ai.langchain" assert chat_spans[0]["data"]["gen_ai.system"] == "openai-chat" + assert chat_spans[0]["data"]["gen_ai.agent.name"] == "word_length_agent" + assert chat_spans[0]["data"]["gen_ai.usage.input_tokens"] == 10 assert chat_spans[0]["data"]["gen_ai.usage.output_tokens"] == 20 assert chat_spans[0]["data"]["gen_ai.usage.total_tokens"] == 30 @@ -415,6 +417,10 @@ def test_tool_execution_span( assert chat_spans[1]["origin"] == "auto.ai.langchain" assert tool_exec_span["origin"] == "auto.ai.langchain" + assert chat_spans[0]["data"]["gen_ai.agent.name"] == "word_length_agent" + assert chat_spans[1]["data"]["gen_ai.agent.name"] == "word_length_agent" + assert tool_exec_span["data"]["gen_ai.agent.name"] == "word_length_agent" + assert chat_spans[0]["data"]["gen_ai.usage.input_tokens"] == 142 assert chat_spans[0]["data"]["gen_ai.usage.output_tokens"] == 50 assert chat_spans[0]["data"]["gen_ai.usage.total_tokens"] == 192