|
1 | 1 | import asyncio |
| 2 | +import json |
2 | 3 | import logging |
3 | 4 | import os |
4 | | -import json |
5 | 5 | from dataclasses import dataclass |
6 | 6 | from datetime import datetime |
7 | 7 | from pathlib import Path |
|
13 | 13 | from dotenv import load_dotenv |
14 | 14 | from mcp import ClientSession, StdioServerParameters |
15 | 15 | from mcp.client.stdio import stdio_client |
16 | | - |
17 | 16 | from shared import ScaffoldTracker, Tracker, build_mcp_command, setup_logging, validate_mcp_manifest |
18 | 17 |
|
19 | 18 | logger = logging.getLogger(__name__) |
@@ -95,13 +94,39 @@ async def initialize(self): |
95 | 94 | if not self.suppress_logs: |
96 | 95 | logger.info(f"Loaded {len(self.tools)} MCP tools") |
97 | 96 |
|
| 97 | + def _clean_schema_for_databricks(self, schema: dict[str, Any]) -> dict[str, Any]: |
| 98 | + """Remove JSON schema fields that Databricks serving doesn't support.""" |
| 99 | + if not isinstance(schema, dict): |
| 100 | + return schema |
| 101 | + |
| 102 | + cleaned = {} |
| 103 | + for key, value in schema.items(): |
| 104 | + if key in ("minimum", "maximum", "exclusiveMinimum", "exclusiveMaximum"): |
| 105 | + if schema.get("type") == "integer": |
| 106 | + continue |
| 107 | + |
| 108 | + if key == "input_examples": |
| 109 | + continue |
| 110 | + |
| 111 | + if isinstance(value, dict): |
| 112 | + cleaned[key] = self._clean_schema_for_databricks(value) |
| 113 | + elif isinstance(value, list): |
| 114 | + cleaned[key] = [ |
| 115 | + self._clean_schema_for_databricks(item) if isinstance(item, dict) else item for item in value |
| 116 | + ] |
| 117 | + else: |
| 118 | + cleaned[key] = value |
| 119 | + |
| 120 | + return cleaned |
| 121 | + |
98 | 122 | def _convert_mcp_tool(self, mcp_tool) -> dict[str, Any]: |
| 123 | + parameters = self._clean_schema_for_databricks(mcp_tool.inputSchema) |
99 | 124 | return { |
100 | 125 | "type": "function", |
101 | 126 | "function": { |
102 | 127 | "name": mcp_tool.name, |
103 | 128 | "description": mcp_tool.description or "", |
104 | | - "parameters": mcp_tool.inputSchema, |
| 129 | + "parameters": parameters, |
105 | 130 | }, |
106 | 131 | } |
107 | 132 |
|
@@ -346,7 +371,7 @@ def cli( |
346 | 371 | prompt: str, |
347 | 372 | app_name: str | None = None, |
348 | 373 | model: str = "openrouter/minimax/minimax-m2", # other good options: "openrouter/moonshotai/kimi-k2-thinking", "gemini/gemini-2.5-pro", |
349 | | - # some open-weights platform provide openai/anthropic-like API that can be used like |
| 374 | + # some open-weights platform provide openai/anthropic-like API that can be used like |
350 | 375 | # OPENAI_API_KEY=$DATABRICKS_TOKEN OPENAI_API_BASE=https://$DATABRICKS_HOST/serving-endpoints uv run cli/single_run.py "..." --backend=litellm --model="openai/databricks-gpt-oss-120b" |
351 | 376 | # OPENAI_API_BASE="https://api.minimax.io/v1" OPENAI_API_KEY="$MINIMAX_API_KEY" uv run cli/single_run.py "..."" --backend=litellm --model="openai/MiniMax-M2" |
352 | 377 | # ANTHROPIC_BASE_URL="https://api.minimax.io/anthropic" ANTHROPIC_API_KEY="$MINIMAX_API_KEY" uv run cli/single_run.py "..." --backend=litellm --model="anthropic/MiniMax-M2" |
|
0 commit comments