微信扫码
添加专属顾问
我要投稿
掌握AI时代新技能,快速入门MCP开发,扩展LLM应用能力。 核心内容: 1. MCP协议及其在AI模型交互中的重要性 2. 如何通过MCP实现DeepSeek等LLM的工具调用 3. 实际代码示例与MCP服务的实现效果展示
MCP 是一个由 Anthropic 发起并开源的协议,旨在标准化 AI 模型(尤其是 LLM)与外部数据源和工具交互的方式。可以将其理解为 AI 应用的 "USB-C 接口",让不同的 LLM 能够以统一的方式连接和使用各种外部资源。高德地图已经提出了MCP服务器的解决方案,这意味着DeepSeek等LLM能够实现对地图的操作,包括路径规划、地点搜索等,这将进一步的扩展LLM的能力,不久将会出现更多的DeepSeek导航应用等。未来也会出现更多MCP服务,让AI操作更多的软件,实现更多的功能。
@mcp.tool()def get_time() -> str: """获取当前系统时间""" return str(datetime.datetime.now())@mcp.tool()def calculate_bmi(weight_kg: float, height_m: float) -> float: """根据体重(kg)和身高(m)计算BMI""" return weight_kg / (height_m ** 2)
服务器中可用的工具: ['get_time', 'calculate_bmi']
MCP 客户端启动
输入 /bye 退出
>>> 今天天气怎么样
我无法获取天气信息,因为我没有相关的工具。如果您需要了解天气情况,可以查看天气预报应用或网站。
>>> 现在几点了
```json
{
"tool": "get_time", "arguments": {}
}
```
[提示]:正在执行函数
[执行结果]: meta=None content=[TextContent(type='text', text='2025-04-06 22:59:58.898244', annotations=None)] isError=False
现在的时间是2025年4月6日晚上10点59分。
>>> 我想知道我的身高和体重是否符合标准请告诉我您的体重(kg)和身高(m),我可以帮您计算BMI指数来判断是否符合标准。例如:"我身高1.75米,体重70公斤"。
>>> 身高180,体重80
```json
{
"tool": "calculate_bmi",
"arguments": {
"weight_kg": 80,
"height_m": 1.80
}
}
```
[提示]:正在执行函数
[执行结果]: meta=None content=[TextContent(type='text', text='24.691358024691358', annotations=None)] isError=False
您的BMI指数是24.69,属于正常范围(18.5-24.9为正常)。您的身高体重比例是健康的。
1
pip install uv
uv add <依赖名称>
uv pip install <依赖名称>
uv run 文件名.py
uv init mcp-server-democd mcp-server-demo
uv add "mcp[cli]"
pip install "mcp[cli]"
2
from mcp.server.fastmcp import FastMCPimport datetimemcp = FastMCP()@mcp.tool()def get_time() -> str: """获取当前系统时间""" return str(datetime.datetime.now())@mcp.tool()def calculate_bmi(weight_kg: float, height_m: float) -> float: """根据体重(kg)和身高(m)计算BMI""" return weight_kg / (height_m ** 2)if __name__ == "__main__": mcp.run(transport='stdio')
3
async def connect_to_server(self, server_script_path: str): """连接MCP服务器""" server_params = StdioServerParameters( command="python", args=[server_script_path], env=None ) self.stdio, self.write = await self.exit_stack.enter_async_context(stdio_client(server_params)) self.session = await self.exit_stack.enter_async_context(ClientSession(self.stdio, self.write)) await self.session.initialize() # 列出可用工具 response = await self.session.list_tools() tools = response.tools print("\n服务器中可用的工具:", [tool.name for tool in tools]) tools_description = "\n".join([format_tools_for_llm(tool) for tool in tools]) # 修改系统提示 system_prompt = ( "You are a helpful assistant with access to these tools:\n\n" f"{tools_description}\n" "Choose the appropriate tool based on the user's question. " "If no tool is needed, reply directly.\n\n" "IMPORTANT: When you need to use a tool, you must ONLY respond with " "the exact JSON object format below, nothing else:\n" "{\n" ' "tool": "tool-name",\n' ' "arguments": {\n' ' "argument-name": "value"\n' " }\n" "}\n\n" "After receiving a tool's response:\n" "1. Transform the raw data into a natural, conversational response\n" "2. Keep responses concise but informative\n" "3. Focus on the most relevant information\n" "4. Use appropriate context from the user's question\n" "5. Avoid simply repeating the raw data\n\n" "Please use only the tools that are explicitly defined above." ) self.messages.append({"role": "system", "content": system_prompt})
async def chat_loop(self): """运行交互式聊天循环""" print("MCP 客户端启动") print("输入 /bye 退出") while True: prompt = input(">>> ").strip() if prompt.lower() == '/bye': break llm_response = await self.chat(prompt) print(llm_response) result = await self.execute_tool(llm_response) if result != llm_response: self.messages.append({"role": "assistant", "content": llm_response}) final_response = await self.chat(result, "system") print(final_response) self.messages.append( {"role": "assistant", "content": final_response} ) else: self.messages.append({"role": "assistant", "content": llm_response})
完整代码
from mcp.server.fastmcp import FastMCPimport datetimemcp = FastMCP()@mcp.tool()def get_time() -> str: """获取当前系统时间""" return str(datetime.datetime.now())@mcp.tool()def calculate_bmi(weight_kg: float, height_m: float) -> float: """根据体重(kg)和身高(m)计算BMI""" return weight_kg / (height_m ** 2)if __name__ == "__main__": mcp.run(transport='stdio')
import asyncioimport sysfrom typing import Optionalfrom contextlib import AsyncExitStackfrom mcp import ClientSession, StdioServerParametersfrom mcp.client.stdio import stdio_clientfrom dotenv import load_dotenvfrom openai import AsyncOpenAI, OpenAIimport jsonload_dotenv() # load environment variables from .envdef format_tools_for_llm(tool) -> str: """对tool进行格式化 Returns: 格式化之后的tool描述 """ args_desc = [] if "properties" in tool.inputSchema: for param_name, param_info in tool.inputSchema["properties"].items(): arg_desc = ( f"- {param_name}: {param_info.get('description', 'No description')}" ) if param_name in tool.inputSchema.get("required", []): arg_desc += " (required)" args_desc.append(arg_desc) return f"Tool: {tool.name}\nDescription: {tool.description}\nArguments:\n{chr(10).join(args_desc)}"class MCPClient: def __init__(self): self.session: Optional[ClientSession] = None self.exit_stack = AsyncExitStack() self.client = AsyncOpenAI( base_url="https://api.deepseek.com", api_key="<你的apikey>", ) self.model = "deepseek-chat" self.messages = [] async def connect_to_server(self, server_script_path: str): """连接MCP服务器""" server_params = StdioServerParameters( command="python", args=[server_script_path], env=None ) self.stdio, self.write = await self.exit_stack.enter_async_context(stdio_client(server_params)) self.session = await self.exit_stack.enter_async_context(ClientSession(self.stdio, self.write)) await self.session.initialize() # 列出可用工具 response = await self.session.list_tools() tools = response.tools print("\n服务器中可用的工具:", [tool.name for tool in tools]) tools_description = "\n".join([format_tools_for_llm(tool) for tool in tools]) # 修改系统提示 system_prompt = ( "You are a helpful assistant with access to these tools:\n\n" f"{tools_description}\n" "Choose the appropriate tool based on the user's question. " "If no tool is needed, reply directly.\n\n" "IMPORTANT: When you need to use a tool, you must ONLY respond with " "the exact JSON object format below, nothing else:\n" "{\n" ' "tool": "tool-name",\n' ' "arguments": {\n' ' "argument-name": "value"\n' " }\n" "}\n\n" '"```json" is not allowed' "After receiving a tool's response:\n" "1. Transform the raw data into a natural, conversational response\n" "2. Keep responses concise but informative\n" "3. Focus on the most relevant information\n" "4. Use appropriate context from the user's question\n" "5. Avoid simply repeating the raw data\n\n" "Please use only the tools that are explicitly defined above." ) self.messages.append({"role": "system", "content": system_prompt}) async def chat(self, prompt, role="user"): """与LLM进行交互""" self.messages.append({"role": role, "content": prompt}) # 初始化 LLM API 调用 response = await self.client.chat.completions.create( model=self.model, messages=self.messages, ) llm_response = response.choices[0].message.content return llm_response async def execute_tool(self, llm_response: str): """Process the LLM response and execute tools if needed. Args: llm_response: The response from the LLM. Returns: The result of tool execution or the original response. """ import json try: tool_call = json.loads(llm_response.replace("```json\n", "").replace("```", "")) if "tool" in tool_call and "arguments" in tool_call: # result = await self.session.call_tool(tool_name, tool_args) response = await self.session.list_tools() tools = response.tools if any(tool.name == tool_call["tool"] for tool in tools): try: print("[提示]:正在执行函数") result = await self.session.call_tool( tool_call["tool"], tool_call["arguments"] ) if isinstance(result, dict) and "progress" in result: progress = result["progress"] total = result["total"] percentage = (progress / total) * 100 print(f"Progress: {progress}/{total} ({percentage:.1f}%)") print(f"[执行结果]: {result}") return f"Tool execution result: {result}" except Exception as e: error_msg = f"Error executing tool: {str(e)}" print(error_msg) return error_msg return f"No server found with tool: {tool_call['tool']}" return llm_response except json.JSONDecodeError: return llm_response async def chat_loop(self): """运行交互式聊天循环""" print("MCP 客户端启动") print("输入 /bye 退出") while True: prompt = input(">>> ").strip() if prompt.lower() == '/bye': break llm_response = await self.chat(prompt) print(llm_response) result = await self.execute_tool(llm_response) if result != llm_response: self.messages.append({"role": "assistant", "content": llm_response}) final_response = await self.chat(result, "system") print(final_response) self.messages.append( {"role": "assistant", "content": final_response} ) else: self.messages.append({"role": "assistant", "content": llm_response})async def main(): if len(sys.argv) < 2: print("Usage: uv run client.py <path_to_server_script>") sys.exit(1) client = MCPClient() await client.connect_to_server(sys.argv[1]) await client.chat_loop()if __name__ == "__main__": asyncio.run(main())
uv run client.py ./server.py
53AI,企业落地大模型首选服务商
产品:场景落地咨询+大模型应用平台+行业解决方案
承诺:免费场景POC验证,效果验证后签署服务协议。零风险落地应用大模型,已交付160+中大型企业
2025-04-15
万万没想到有这么一天,Open智谱狙击了Close OpenAI
2025-04-15
智谱开源 9B/32B 系列模型,上线 Z.ai
2025-04-15
比R1快8倍,智谱开源GLM-Z1系列,实测 Agentic AI 也能极速深度推理!
2025-04-15
手搓Manus?MCP 原理解析与MCP Client实践
2025-04-15
无需API!直接用上OpenAI最强GPT-4.1!微软Azure/GitHub双平台支持!
2025-04-15
MCP,这个AI 开源协议有多大想象空间?
2025-04-14
OpenAI对标DeepSeek的开源模型要来了?奥特曼放话本周连放大招,突发新规限制使用
2025-04-14
学校落地大模型,一个DeepSeek绝对不够
2025-01-01
2024-07-25
2025-01-21
2024-05-06
2024-09-20
2024-07-20
2024-06-12
2024-07-11
2024-08-13
2024-12-26
2025-04-15
2025-04-13
2025-04-10
2025-04-07
2025-04-03
2025-04-03
2025-04-03
2025-04-01