|
| 1 | +import { Client } from "@modelcontextprotocol/sdk/client/index.js"; |
| 2 | +import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js"; |
| 3 | +import OpenAI from "openai"; |
| 4 | +import { z } from "zod"; |
| 5 | + |
| 6 | +import readline from 'node:readline'; |
| 7 | +import process from "node:process"; |
| 8 | + |
| 9 | +const transport = new StdioClientTransport({ |
| 10 | + command: "node", |
| 11 | + args: ["./build/index.js"] |
| 12 | +}); |
| 13 | + |
| 14 | +const client = new Client( |
| 15 | + { |
| 16 | + name: "example-client", |
| 17 | + version: "1.0.0" |
| 18 | + } |
| 19 | +); |
| 20 | + |
| 21 | +const openai = new OpenAI({ |
| 22 | + baseURL: "https://models.github.ai/inference", |
| 23 | + apiKey: process.env.GITHUB_TOKEN, |
| 24 | +}); |
| 25 | + |
| 26 | +const read = readline.createInterface({ |
| 27 | + input: process.stdin, |
| 28 | + output: process.stdout |
| 29 | +}); |
| 30 | + |
| 31 | +// 0. Add convert function |
| 32 | +function toLLMTool(tool: { |
| 33 | + name: string; |
| 34 | + description?: string; |
| 35 | + inputSchema: any; |
| 36 | +}) { |
| 37 | + // Create a zod schema based on the input_schema |
| 38 | + const schema = z.object(tool.inputSchema); |
| 39 | + |
| 40 | + return { |
| 41 | + type: "function" as const, // Explicitly set type to "function" |
| 42 | + function: { |
| 43 | + name: tool.name, |
| 44 | + description: tool.description, |
| 45 | + parameters: { |
| 46 | + type: "object", |
| 47 | + properties: tool.inputSchema.properties, |
| 48 | + required: tool.inputSchema.required, |
| 49 | + }, |
| 50 | + }, |
| 51 | + }; |
| 52 | +} |
| 53 | + |
| 54 | +// function that calls MCP tool based on LLm response |
| 55 | +async function callTools( |
| 56 | + tool_calls: OpenAI.Chat.Completions.ChatCompletionMessageToolCall[], |
| 57 | + toolResults: any[] |
| 58 | + ): Promise<void> { |
| 59 | + for (const tool_call of tool_calls) { |
| 60 | + const toolName = tool_call.function.name; |
| 61 | + const args = tool_call.function.arguments; |
| 62 | + |
| 63 | + console.log(`Calling tool "${toolName}" with args ${JSON.stringify(args)}`); |
| 64 | + |
| 65 | + |
| 66 | + // 2. Call the server's tool |
| 67 | + const toolResult = await client.callTool({ |
| 68 | + name: toolName, |
| 69 | + arguments: JSON.parse(args), |
| 70 | + }); |
| 71 | + |
| 72 | + console.log("\nTool result: ", toolResult); |
| 73 | + |
| 74 | + // 3. Do something with the result |
| 75 | + // TODO |
| 76 | + |
| 77 | + } |
| 78 | +} |
| 79 | + |
| 80 | +async function main() { |
| 81 | + await client.connect(transport); |
| 82 | + |
| 83 | + // List tools |
| 84 | + const tools = await client.listTools(); |
| 85 | + |
| 86 | + |
| 87 | + |
| 88 | + let toolDescriptions = ""; |
| 89 | + const llmTools = []; |
| 90 | + |
| 91 | + for(let tool of tools.tools) { |
| 92 | + toolDescriptions += `${tool.name}, ${tool.description}\n`; |
| 93 | + |
| 94 | + // 1. convert this response to LLM tool |
| 95 | + llmTools.push(toLLMTool(tool)); |
| 96 | + } |
| 97 | + |
| 98 | + console.log("Welcome to E-shopping server"); |
| 99 | + let keepRunning = true; |
| 100 | + |
| 101 | + while(keepRunning) { |
| 102 | + |
| 103 | + let command = await getInput("Provide user prompt (type h for help or quit to stop): "); |
| 104 | + console.log(command); |
| 105 | + |
| 106 | + if(command == "quit") { |
| 107 | + break; |
| 108 | + } else if(command == "h") { |
| 109 | + console.log("FEATURES"); |
| 110 | + console.log(toolDescriptions); |
| 111 | + } else { |
| 112 | + |
| 113 | + // 2. take the command response as a user prompt to the LLM |
| 114 | + const messages: OpenAI.Chat.Completions.ChatCompletionMessageParam[] = [ |
| 115 | + { |
| 116 | + role: "user", |
| 117 | + content: command, |
| 118 | + }, |
| 119 | + ]; |
| 120 | + |
| 121 | + // 3. make LLM call |
| 122 | + console.log("Querying LLM: ", messages[0].content); |
| 123 | + |
| 124 | + let response = openai.chat.completions.create({ |
| 125 | + model: "gpt-4o-mini", |
| 126 | + max_tokens: 1000, |
| 127 | + messages, |
| 128 | + tools: llmTools, |
| 129 | + }); |
| 130 | + // 4. read LLM response and call suitable MCP Server tool if any.. |
| 131 | + let results: any[] = []; |
| 132 | + |
| 133 | + // 1. Go through the LLM response,for each choice, check if it has tool calls |
| 134 | + for (const choice of (await response).choices) { |
| 135 | + const message = choice.message; |
| 136 | + if (message.tool_calls) { |
| 137 | + console.log("Making tool call") |
| 138 | + await callTools(message.tool_calls, results); |
| 139 | + } |
| 140 | + } |
| 141 | + } |
| 142 | + |
| 143 | + } |
| 144 | + read.close(); |
| 145 | + |
| 146 | + console.log("Disconnected, Bye!"); |
| 147 | + return; |
| 148 | +} |
| 149 | + |
| 150 | +function getInput(query: string): Promise<string> { |
| 151 | + return new Promise(resolve => { |
| 152 | + read.question(query, (answer: string) => { |
| 153 | + resolve(answer); |
| 154 | + }); |
| 155 | + }); |
| 156 | +} |
| 157 | + |
| 158 | +main().catch((error) => { |
| 159 | + console.error("Error: ", error); |
| 160 | + |
| 161 | +}); |
| 162 | + |
0 commit comments