|
| 1 | +# Client Server Interaction |
| 2 | + |
| 3 | +## How do clients and servers communicate? |
| 4 | + |
| 5 | +1. Client sends an initialisation request to the server and receives the protocol version and capabilities from the server. |
| 6 | + |
| 7 | + |
| 8 | + |
| 9 | +2. Client sends an initialisation notification to the server this acts as an acknowledgement of the server's response. |
| 10 | + |
| 11 | + |
| 12 | + |
| 13 | +3. Normal message exchange begins. The client and server can send messages to each other using the request-response pattern or notifications. |
| 14 | + |
| 15 | + |
| 16 | + |
| 17 | +The MCP documentation for [Client Developers](https://modelcontextprotocol.io/quickstart/client) describes how to connect to a server as a client. The following code snippet demonstrates how to implement this through the `stdio` transport method: |
| 18 | + |
| 19 | +```python |
| 20 | +async def connect_to_server(self, server_script_path: str): |
| 21 | + """Connect to an MCP server |
| 22 | +
|
| 23 | + Args: |
| 24 | + server_script_path: Path to the server script (.py or .js) |
| 25 | + """ |
| 26 | + is_python = server_script_path.endswith('.py') |
| 27 | + is_js = server_script_path.endswith('.js') |
| 28 | + if not (is_python or is_js): |
| 29 | + raise ValueError("Server script must be a .py or .js file") |
| 30 | + |
| 31 | + command = "python" if is_python else "node" |
| 32 | + server_params = StdioServerParameters( |
| 33 | + command=command, |
| 34 | + args=[server_script_path], |
| 35 | + env=None |
| 36 | + ) |
| 37 | + |
| 38 | + stdio_transport = await self.exit_stack.enter_async_context(stdio_client(server_params)) |
| 39 | + self.stdio, self.write = stdio_transport |
| 40 | + self.session = await self.exit_stack.enter_async_context(ClientSession(self.stdio, self.write)) |
| 41 | + |
| 42 | + await self.session.initialize() |
| 43 | + |
| 44 | + # List available tools |
| 45 | + response = await self.session.list_tools() |
| 46 | + tools = response.tools |
| 47 | + print("\nConnected to server with tools:", [tool.name for tool in tools]) |
| 48 | +``` |
| 49 | + |
| 50 | +Breaking this down, we have: |
| 51 | + |
| 52 | +1. Create server parameters for handling `stdio` transport: |
| 53 | + |
| 54 | +```python |
| 55 | +is_python = server_script_path.endswith('.py') |
| 56 | +is_js = server_script_path.endswith('.js') |
| 57 | +if not (is_python or is_js): |
| 58 | + raise ValueError("Server script must be a .py or .js file") |
| 59 | + |
| 60 | +command = "python" if is_python else "node" |
| 61 | +server_params = StdioServerParameters( |
| 62 | + command=command, |
| 63 | + args=[server_script_path], |
| 64 | + env=None |
| 65 | +) |
| 66 | +``` |
| 67 | + |
| 68 | +2. Create a `stdio` client session: |
| 69 | + |
| 70 | +```python |
| 71 | +stdio_transport = await self.exit_stack.enter_async_context(stdio_client(server_params)) |
| 72 | +self.stdio, self.write = stdio_transport |
| 73 | +self.session = await self.exit_stack.enter_async_context(ClientSession(self.stdio, self.write)) |
| 74 | +``` |
| 75 | + |
| 76 | +3. Perform initialisation with the server: |
| 77 | + |
| 78 | +```python |
| 79 | +await self.session.initialize() |
| 80 | +``` |
| 81 | + |
| 82 | +4. Begin message exchange with the server: |
| 83 | + |
| 84 | +```python |
| 85 | +# List available tools |
| 86 | +response = await self.session.list_tools() |
| 87 | +tools = response.tools |
| 88 | +print("\nConnected to server with tools:", [tool.name for tool in tools]) |
| 89 | +``` |
| 90 | + |
| 91 | +## How does the client process user queries? |
| 92 | + |
| 93 | +The MCP documentation for [Client Developers](https://modelcontextprotocol.io/quickstart/client) describes how to process user queries during the message exchange process using the LLM to make decisions on which tools should be chosen to fulfil the users request. The following code snippet demonstrates how to implement this: |
| 94 | + |
| 95 | +```python |
| 96 | +async def process_query(self, query: str) -> str: |
| 97 | + """Process a query using Claude and available tools""" |
| 98 | + messages = [ |
| 99 | + { |
| 100 | + "role": "user", |
| 101 | + "content": query |
| 102 | + } |
| 103 | + ] |
| 104 | + |
| 105 | + response = await self.session.list_tools() |
| 106 | + available_tools = [{ |
| 107 | + "name": tool.name, |
| 108 | + "description": tool.description, |
| 109 | + "input_schema": tool.inputSchema |
| 110 | + } for tool in response.tools] |
| 111 | + |
| 112 | + # Initial Claude API call |
| 113 | + response = self.anthropic.messages.create( |
| 114 | + model="claude-3-5-sonnet-20241022", |
| 115 | + max_tokens=1000, |
| 116 | + messages=messages, |
| 117 | + tools=available_tools |
| 118 | + ) |
| 119 | + |
| 120 | + # Process response and handle tool calls |
| 121 | + final_text = [] |
| 122 | + |
| 123 | + assistant_message_content = [] |
| 124 | + for content in response.content: |
| 125 | + if content.type == 'text': |
| 126 | + final_text.append(content.text) |
| 127 | + assistant_message_content.append(content) |
| 128 | + elif content.type == 'tool_use': |
| 129 | + tool_name = content.name |
| 130 | + tool_args = content.input |
| 131 | + |
| 132 | + # Execute tool call |
| 133 | + result = await self.session.call_tool(tool_name, tool_args) |
| 134 | + final_text.append(f"[Calling tool {tool_name} with args {tool_args}]") |
| 135 | + |
| 136 | + assistant_message_content.append(content) |
| 137 | + messages.append({ |
| 138 | + "role": "assistant", |
| 139 | + "content": assistant_message_content |
| 140 | + }) |
| 141 | + messages.append({ |
| 142 | + "role": "user", |
| 143 | + "content": [ |
| 144 | + { |
| 145 | + "type": "tool_result", |
| 146 | + "tool_use_id": content.id, |
| 147 | + "content": result.content |
| 148 | + } |
| 149 | + ] |
| 150 | + }) |
| 151 | + |
| 152 | + # Get next response from Claude |
| 153 | + response = self.anthropic.messages.create( |
| 154 | + model="claude-3-5-sonnet-20241022", |
| 155 | + max_tokens=1000, |
| 156 | + messages=messages, |
| 157 | + tools=available_tools |
| 158 | + ) |
| 159 | + |
| 160 | + final_text.append(response.content[0].text) |
| 161 | + |
| 162 | + return "\n".join(final_text) |
| 163 | +``` |
| 164 | + |
| 165 | +Breaking this down, we have: |
| 166 | + |
| 167 | +1. Format user message: |
| 168 | +```python |
| 169 | +messages = [ |
| 170 | + { |
| 171 | + "role": "user", |
| 172 | + "content": query |
| 173 | + } |
| 174 | + ] |
| 175 | +``` |
| 176 | +> The messages object is responsible for storing the message history allowing the LLM to have access to prior context. |
| 177 | +
|
| 178 | +2. List available tools: |
| 179 | +```python |
| 180 | +response = await self.session.list_tools() |
| 181 | +``` |
| 182 | +> This is the same as the HTTP request to list tools in the [direct execution documentation](direct_execution.md). |
| 183 | +
|
| 184 | +3. Format available tools to be handed to Anthropic API: |
| 185 | +```python |
| 186 | +available_tools = [{ |
| 187 | + "name": tool.name, |
| 188 | + "description": tool.description, |
| 189 | + "input_schema": tool.inputSchema |
| 190 | + } for tool in response.tools] |
| 191 | +``` |
| 192 | + |
| 193 | +4. Make request to Anthropic API passing user message and available tools objects: |
| 194 | + |
| 195 | +```python |
| 196 | +response = self.anthropic.messages.create( |
| 197 | + model="claude-3-5-sonnet-20241022", |
| 198 | + max_tokens=1000, |
| 199 | + messages=messages, |
| 200 | + tools=available_tools |
| 201 | + ) |
| 202 | +``` |
| 203 | + |
| 204 | +5. Iterate through the response and process either the text or tool_use content type: |
| 205 | +``` |
| 206 | +for content in response.content: |
| 207 | + if content.type == 'text': |
| 208 | + ... |
| 209 | +
|
| 210 | + elif content.type == 'tool_use': |
| 211 | + ... |
| 212 | +``` |
| 213 | +6. If the response content type is tool_use then the tool is executed: |
| 214 | +```python |
| 215 | +elif content.type == 'tool_use': |
| 216 | + tool_name = content.name |
| 217 | + tool_args = content.input |
| 218 | + |
| 219 | + # Execute tool call |
| 220 | + result = await self.session.call_tool(tool_name, tool_args) |
| 221 | + final_text.append(f"[Calling tool {tool_name} with args {tool_args}]") |
| 222 | +``` |
| 223 | + |
| 224 | +7. Update messages object with next context: |
| 225 | +``` |
| 226 | +# assistant response message |
| 227 | +messages.append({ |
| 228 | + "role": "assistant", |
| 229 | + "content": assistant_message_content |
| 230 | +}) |
| 231 | +# user message with tool result |
| 232 | +messages.append({ |
| 233 | + "role": "user", |
| 234 | + "content": [ |
| 235 | + { |
| 236 | + "type": "tool_result", |
| 237 | + "tool_use_id": content.id, |
| 238 | + "content": result.content |
| 239 | + } |
| 240 | + ] |
| 241 | +}) |
| 242 | +``` |
| 243 | + |
| 244 | +8. Finally, re-prompt LLM with the updated messages object to get a final response to the user: |
| 245 | + |
| 246 | +```python |
| 247 | +response = self.anthropic.messages.create( |
| 248 | + model="claude-3-5-sonnet-20241022", |
| 249 | + max_tokens=1000, |
| 250 | + messages=messages, |
| 251 | + tools=available_tools |
| 252 | +) |
| 253 | + |
| 254 | +final_text.append(response.content[0].text) |
| 255 | +``` |
| 256 | + |
| 257 | +The following diagram illustrates the end-to-end process of requesting the agent to login to the Lichess API from user query to response using the MCP server and LLM: |
| 258 | + |
| 259 | + |
| 260 | + |
| 261 | +## Simplified Client Connection using FastMCP |
| 262 | + |
| 263 | +The [FastMCP documentation](https://github.com/modelcontextprotocol/python-sdk?tab=readme-ov-file#writing-mcp-clients) outlines how the above two code snippets can be performed in a single block: |
| 264 | + |
| 265 | +```python |
| 266 | +from mcp import ClientSession, StdioServerParameters, types |
| 267 | +from mcp.client.stdio import stdio_client |
| 268 | + |
| 269 | +# Create server parameters for stdio connection |
| 270 | +server_params = StdioServerParameters( |
| 271 | + command="python", # Executable |
| 272 | + args=["example_server.py"], # Optional command line arguments |
| 273 | + env=None, # Optional environment variables |
| 274 | +) |
| 275 | + |
| 276 | + |
| 277 | +async def run(): |
| 278 | + async with stdio_client(server_params) as (read, write): |
| 279 | + async with ClientSession( |
| 280 | + read, write |
| 281 | + ) as session: |
| 282 | + # Initialize the connection |
| 283 | + await session.initialize() |
| 284 | + |
| 285 | + # List available prompts |
| 286 | + prompts = await session.list_prompts() |
| 287 | + |
| 288 | + # Get a prompt |
| 289 | + prompt = await session.get_prompt( |
| 290 | + "example-prompt", arguments={"arg1": "value"} |
| 291 | + ) |
| 292 | + |
| 293 | + # List available resources |
| 294 | + resources = await session.list_resources() |
| 295 | + |
| 296 | + # List available tools |
| 297 | + tools = await session.list_tools() |
| 298 | + |
| 299 | + # Read a resource |
| 300 | + content, mime_type = await session.read_resource("file://some/path") |
| 301 | + |
| 302 | + # Call a tool |
| 303 | + result = await session.call_tool("tool-name", arguments={"arg1": "value"}) |
| 304 | + |
| 305 | + |
| 306 | +if __name__ == "__main__": |
| 307 | + import asyncio |
| 308 | + |
| 309 | + asyncio.run(run()) |
| 310 | +``` |
0 commit comments