Skip to content

Commit 092da2a

Browse files
feat: inject runtime arg for ToolRuntime access (#370)
Theoretically allows for access to tool call id, state, etc when available in a LG context (like with `create_agent`). This works w/ the new `langgraph-prebuilt` and `langchain-core`. ```py import asyncio from mcp import ClientSession, StdioServerParameters from mcp.client.stdio import stdio_client from langchain.agents import create_agent from langchain_mcp_adapters.interceptors import MCPToolCallRequest from langchain_mcp_adapters.tools import load_mcp_tools from langchain_openai import ChatOpenAI from mcp.types import CallToolResult from typing_extensions import TypedDict async def main(): async def runtime_interceptor( request: MCPToolCallRequest, handler ) -> CallToolResult: print(request.runtime.tool_call_id) print(request.runtime.context) print(request.runtime.state) return await handler(request) server_params = StdioServerParameters( command="python", args=["./math_server.py"], ) async with stdio_client(server_params) as (read, write): async with ClientSession(read, write) as session: await session.initialize() tools = await load_mcp_tools( session, tool_interceptors=[runtime_interceptor] ) class ContextSchema(TypedDict): user_id: str agent = create_agent( model=ChatOpenAI(model="gpt-4o-mini"), tools=tools, context_schema=ContextSchema, ) await agent.ainvoke( {"messages": [{"role": "user", "content": "What is 5 + 3?"}]}, context={"user_id": "123"}, ) if __name__ == "__main__": asyncio.run(main()) """ [11/21/25 10:02:42] INFO Processing request of type ListToolsRequest server.py:674 call_f3Unpu6pMXc3erYy2U540a8s {'user_id': '123'} {'messages': [HumanMessage(content='What is 5 + 3?', additional_kwargs={}, response_metadata={}, id='ca0e9e84-34a0-4540-b92f-bcf023316562'), AIMessage(content='', additional_kwargs={'refusal': None}, response_metadata={'token_usage': {'completion_tokens': 17, 'prompt_tokens': 73, 'total_tokens': 90, 'completion_tokens_details': {'accepted_prediction_tokens': 0, 'audio_tokens': 0, 'reasoning_tokens': 0, 'rejected_prediction_tokens': 0}, 'prompt_tokens_details': {'audio_tokens': 0, 'cached_tokens': 0}}, 'model_provider': 'openai', 'model_name': 'gpt-4o-mini-2024-07-18', 'system_fingerprint': 'fp_560af6e559', 'id': 'chatcmpl-CeMwlKviSagQxVwtkocmYe9YzNlRb', 'service_tier': 'default', 'finish_reason': 'tool_calls', 'logprobs': None}, id='lc_run--f8fff704-5f76-4526-8dbd-62878b5fbc15-0', tool_calls=[{'name': 'add', 'args': {'a': 5, 'b': 3}, 'id': 'call_f3Unpu6pMXc3erYy2U540a8s', 'type': 'tool_call'}], usage_metadata={'input_tokens': 73, 'output_tokens': 17, 'total_tokens': 90, 'input_token_details': {'audio': 0, 'cache_read': 0}, 'output_token_details': {'audio': 0, 'reasoning': 0}})]} [11/21/25 10:02:44] INFO Processing request of type CallToolRequest """ ```
1 parent c6d6c56 commit 092da2a

File tree

3 files changed

+57
-22
lines changed

3 files changed

+57
-22
lines changed

langchain_mcp_adapters/interceptors.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,14 +12,14 @@
1212
from dataclasses import dataclass, replace
1313
from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable
1414

15+
from langchain_core.messages import ToolMessage
1516
from mcp.types import CallToolResult
1617
from typing_extensions import NotRequired, TypedDict, Unpack
1718

1819
if TYPE_CHECKING:
1920
from collections.abc import Awaitable, Callable
2021

21-
22-
MCPToolCallResult = CallToolResult
22+
MCPToolCallResult = CallToolResult | ToolMessage
2323

2424

2525
class _MCPToolCallRequestOverrides(TypedDict, total=False):

langchain_mcp_adapters/tools.py

Lines changed: 19 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
from collections.abc import Awaitable, Callable
88
from typing import Any, get_args
99

10+
from langchain_core.messages import ToolMessage
1011
from langchain_core.tools import (
1112
BaseTool,
1213
InjectedToolArg,
@@ -35,33 +36,34 @@
3536
)
3637
from langchain_mcp_adapters.sessions import Connection, create_session
3738

38-
try:
39-
from langgraph.runtime import get_runtime
40-
except ImportError:
41-
42-
def get_runtime() -> None:
43-
"""no-op runtime getter."""
44-
return
45-
46-
4739
NonTextContent = ImageContent | AudioContent | ResourceLink | EmbeddedResource
4840
MAX_ITERATIONS = 1000
4941

5042

5143
def _convert_call_tool_result(
5244
call_tool_result: MCPToolCallResult,
53-
) -> tuple[str | list[str], list[NonTextContent] | None]:
45+
) -> tuple[str | list[str] | ToolMessage, list[NonTextContent] | None]:
5446
"""Convert MCP MCPToolCallResult to LangChain tool result format.
5547
5648
Args:
57-
call_tool_result: The result from calling an MCP tool.
49+
call_tool_result: The result from calling an MCP tool. Can be either
50+
a CallToolResult (MCP format) or a ToolMessage (LangChain format).
5851
5952
Returns:
60-
A tuple containing the text content and any non-text content.
53+
A tuple containing the text content (which may be a ToolMessage) and any
54+
non-text content. When a ToolMessage is returned by an interceptor, it's
55+
placed in the first position of the tuple as the content, with None as
56+
the artifact.
6157
6258
Raises:
6359
ToolException: If the tool call resulted in an error.
6460
"""
61+
# If the interceptor returned a ToolMessage directly, return it as the content
62+
# with None as the artifact to match the content_and_artifact format
63+
if isinstance(call_tool_result, ToolMessage):
64+
return call_tool_result, None
65+
66+
# Otherwise, convert from CallToolResult
6567
text_contents: list[TextContent] = []
6668
non_text_contents = []
6769
for content in call_tool_result.content:
@@ -184,15 +186,18 @@ def convert_mcp_tool_to_langchain_tool(
184186
raise ValueError(msg)
185187

186188
async def call_tool(
189+
runtime: Any = None, # noqa: ANN401
187190
**arguments: dict[str, Any],
188-
) -> tuple[str | list[str], list[NonTextContent] | None]:
191+
) -> tuple[str | list[str] | ToolMessage, list[NonTextContent] | None]:
189192
"""Execute tool call with interceptor chain and return formatted result.
190193
191194
Args:
195+
runtime: LangGraph tool runtime if available, otherwise None.
192196
**arguments: Tool arguments as keyword args.
193197
194198
Returns:
195-
Tuple of (text_content, non_text_content).
199+
A tuple of (text_content, non_text_content), where text_content may be
200+
a ToolMessage if an interceptor returned one directly.
196201
"""
197202
mcp_callbacks = (
198203
callbacks.to_mcp_format(
@@ -202,12 +207,6 @@ async def call_tool(
202207
else _MCPCallbacks()
203208
)
204209

205-
# try to get runtime if we're in a langgraph context
206-
try:
207-
runtime = get_runtime()
208-
except Exception: # noqa: BLE001
209-
runtime = None
210-
211210
# Create the innermost handler that actually executes the tool call
212211
async def execute_tool(request: MCPToolCallRequest) -> MCPToolCallResult:
213212
"""Execute the actual MCP tool call with optional session creation.

tests/test_interceptors.py

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
"""Tests for the interceptor system functionality."""
22

33
import pytest
4+
from langchain_core.messages import ToolMessage
45
from mcp.server import FastMCP
56
from mcp.types import (
67
CallToolResult,
@@ -283,3 +284,38 @@ async def failing_interceptor(
283284
add_tool = next(tool for tool in tools if tool.name == "add")
284285
with pytest.raises(ValueError, match="Interceptor failed"):
285286
await add_tool.ainvoke({"a": 2, "b": 3})
287+
288+
async def test_interceptor_returns_tool_message(self, socket_enabled):
289+
"""Test that interceptor can return a ToolMessage directly."""
290+
291+
async def tool_message_interceptor(
292+
request: MCPToolCallRequest,
293+
handler,
294+
) -> ToolMessage:
295+
# Return a ToolMessage directly instead of CallToolResult
296+
return ToolMessage(
297+
content="Custom ToolMessage response",
298+
name=request.name,
299+
tool_call_id="test-call-id",
300+
)
301+
302+
with run_streamable_http(_create_math_server, 8209):
303+
tools = await load_mcp_tools(
304+
None,
305+
connection={
306+
"url": "http://localhost:8209/mcp",
307+
"transport": "streamable_http",
308+
},
309+
tool_interceptors=[tool_message_interceptor],
310+
)
311+
312+
add_tool = next(tool for tool in tools if tool.name == "add")
313+
result = await add_tool.ainvoke(
314+
{"args": {"a": 2, "b": 3}, "id": "test-call-id", "type": "tool_call"}
315+
)
316+
317+
# The interceptor returns a ToolMessage which should be returned as-is
318+
assert isinstance(result, ToolMessage)
319+
assert result.content == "Custom ToolMessage response"
320+
assert result.name == "add"
321+
assert result.tool_call_id == "test-call-id"

0 commit comments

Comments
 (0)