mcp tutorial
This commit is contained in:
parent
a6a34d2f14
commit
674240c90b
|
|
@ -1,55 +1,143 @@
|
||||||
from pocketflow import Node, Flow
|
from pocketflow import Node, Flow
|
||||||
from utils import call_llm
|
from utils import call_llm, get_tools, call_tool
|
||||||
|
import yaml
|
||||||
|
import sys
|
||||||
|
|
||||||
class ChatNode(Node):
|
class GetToolsNode(Node):
|
||||||
def prep(self, shared):
|
def prep(self, shared):
|
||||||
# Initialize messages if this is the first run
|
"""Initialize and get tools"""
|
||||||
if "messages" not in shared:
|
# The question is now passed from main via shared
|
||||||
shared["messages"] = []
|
print("🔍 Getting available tools...")
|
||||||
print("Welcome to the chat! Type 'exit' to end the conversation.")
|
return "simple_server.py"
|
||||||
|
|
||||||
# Get user input
|
def exec(self, server_path):
|
||||||
user_input = input("\nYou: ")
|
"""Retrieve tools from the MCP server"""
|
||||||
|
tools = get_tools(server_path)
|
||||||
|
return tools
|
||||||
|
|
||||||
# Check if user wants to exit
|
def post(self, shared, prep_res, exec_res):
|
||||||
if user_input.lower() == 'exit':
|
"""Store tools and process to decision node"""
|
||||||
return None
|
tools = exec_res
|
||||||
|
shared["tools"] = tools
|
||||||
|
|
||||||
# Add user message to history
|
# Format tool information for later use
|
||||||
shared["messages"].append({"role": "user", "content": user_input})
|
tool_info = []
|
||||||
|
for i, tool in enumerate(tools, 1):
|
||||||
|
properties = tool.inputSchema.get('properties', {})
|
||||||
|
required = tool.inputSchema.get('required', [])
|
||||||
|
|
||||||
# Return all messages for the LLM
|
params = []
|
||||||
return shared["messages"]
|
for param_name, param_info in properties.items():
|
||||||
|
param_type = param_info.get('type', 'unknown')
|
||||||
|
req_status = "(Required)" if param_name in required else "(Optional)"
|
||||||
|
params.append(f" - {param_name} ({param_type}): {req_status}")
|
||||||
|
|
||||||
def exec(self, messages):
|
tool_info.append(f"[{i}] {tool.name}\n Description: {tool.description}\n Parameters:\n" + "\n".join(params))
|
||||||
if messages is None:
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Call LLM with the entire conversation history
|
shared["tool_info"] = "\n".join(tool_info)
|
||||||
response = call_llm(messages)
|
return "decide"
|
||||||
|
|
||||||
|
class DecideToolNode(Node):
|
||||||
|
def prep(self, shared):
|
||||||
|
"""Prepare the prompt for LLM to process the question"""
|
||||||
|
tool_info = shared["tool_info"]
|
||||||
|
question = shared["question"]
|
||||||
|
|
||||||
|
prompt = f"""
|
||||||
|
### CONTEXT
|
||||||
|
You are an assistant that can use tools via Model Context Protocol (MCP).
|
||||||
|
|
||||||
|
### ACTION SPACE
|
||||||
|
{tool_info}
|
||||||
|
|
||||||
|
### TASK
|
||||||
|
Answer this question: "{question}"
|
||||||
|
|
||||||
|
## NEXT ACTION
|
||||||
|
Analyze the question, extract any numbers or parameters, and decide which tool to use.
|
||||||
|
Return your response in this format:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
thinking: |
|
||||||
|
<your step-by-step reasoning about what the question is asking and what numbers to extract>
|
||||||
|
tool: <name of the tool to use>
|
||||||
|
reason: <why you chose this tool>
|
||||||
|
parameters:
|
||||||
|
<parameter_name>: <parameter_value>
|
||||||
|
<parameter_name>: <parameter_value>
|
||||||
|
```
|
||||||
|
IMPORTANT:
|
||||||
|
1. Extract numbers from the question properly
|
||||||
|
2. Use proper indentation (4 spaces) for multi-line fields
|
||||||
|
3. Use the | character for multi-line text fields
|
||||||
|
"""
|
||||||
|
return prompt
|
||||||
|
|
||||||
|
def exec(self, prompt):
|
||||||
|
"""Call LLM to process the question and decide which tool to use"""
|
||||||
|
print("🤔 Analyzing question and deciding which tool to use...")
|
||||||
|
response = call_llm(prompt)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
def post(self, shared, prep_res, exec_res):
|
def post(self, shared, prep_res, exec_res):
|
||||||
if prep_res is None or exec_res is None:
|
"""Extract decision from YAML and save to shared context"""
|
||||||
print("\nGoodbye!")
|
try:
|
||||||
return None # End the conversation
|
yaml_str = exec_res.split("```yaml")[1].split("```")[0].strip()
|
||||||
|
decision = yaml.safe_load(yaml_str)
|
||||||
|
|
||||||
# Print the assistant's response
|
shared["tool_name"] = decision["tool"]
|
||||||
print(f"\nAssistant: {exec_res}")
|
shared["parameters"] = decision["parameters"]
|
||||||
|
shared["thinking"] = decision.get("thinking", "")
|
||||||
|
|
||||||
# Add assistant message to history
|
print(f"💡 Selected tool: {decision['tool']}")
|
||||||
shared["messages"].append({"role": "assistant", "content": exec_res})
|
print(f"🔢 Extracted parameters: {decision['parameters']}")
|
||||||
|
|
||||||
# Loop back to continue the conversation
|
return "execute"
|
||||||
return "continue"
|
except Exception as e:
|
||||||
|
print(f"❌ Error parsing LLM response: {e}")
|
||||||
|
print("Raw response:", exec_res)
|
||||||
|
return None
|
||||||
|
|
||||||
# Create the flow with self-loop
|
class ExecuteToolNode(Node):
|
||||||
chat_node = ChatNode()
|
def prep(self, shared):
|
||||||
chat_node - "continue" >> chat_node # Loop back to continue conversation
|
"""Prepare tool execution parameters"""
|
||||||
|
return shared["tool_name"], shared["parameters"]
|
||||||
|
|
||||||
|
def exec(self, inputs):
|
||||||
|
"""Execute the chosen tool"""
|
||||||
|
tool_name, parameters = inputs
|
||||||
|
print(f"🔧 Executing tool '{tool_name}' with parameters: {parameters}")
|
||||||
|
result = call_tool("simple_server.py", tool_name, parameters)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def post(self, shared, prep_res, exec_res):
|
||||||
|
print(f"\n✅ Final Answer: {exec_res}")
|
||||||
|
return "done"
|
||||||
|
|
||||||
flow = Flow(start=chat_node)
|
|
||||||
|
|
||||||
# Start the chat
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
shared = {}
|
# Default question
|
||||||
|
default_question = "What is 982713504867129384651 plus 73916582047365810293746529?"
|
||||||
|
|
||||||
|
# Get question from command line if provided with --
|
||||||
|
question = default_question
|
||||||
|
for arg in sys.argv[1:]:
|
||||||
|
if arg.startswith("--"):
|
||||||
|
question = arg[2:]
|
||||||
|
break
|
||||||
|
|
||||||
|
print(f"🤔 Processing question: {question}")
|
||||||
|
|
||||||
|
# Create nodes
|
||||||
|
get_tools_node = GetToolsNode()
|
||||||
|
decide_node = DecideToolNode()
|
||||||
|
execute_node = ExecuteToolNode()
|
||||||
|
|
||||||
|
# Connect nodes
|
||||||
|
get_tools_node - "decide" >> decide_node
|
||||||
|
decide_node - "execute" >> execute_node
|
||||||
|
|
||||||
|
# Create and run flow
|
||||||
|
flow = Flow(start=get_tools_node)
|
||||||
|
shared = {"question": question}
|
||||||
flow.run(shared)
|
flow.run(shared)
|
||||||
|
|
@ -4,16 +4,13 @@ import asyncio
|
||||||
from mcp import ClientSession, StdioServerParameters
|
from mcp import ClientSession, StdioServerParameters
|
||||||
from mcp.client.stdio import stdio_client
|
from mcp.client.stdio import stdio_client
|
||||||
|
|
||||||
def call_llm(messages):
|
def call_llm(prompt):
|
||||||
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
|
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
|
||||||
|
r = client.chat.completions.create(
|
||||||
response = client.chat.completions.create(
|
|
||||||
model="gpt-4o",
|
model="gpt-4o",
|
||||||
messages=messages,
|
messages=[{"role": "user", "content": prompt}]
|
||||||
temperature=0.7
|
|
||||||
)
|
)
|
||||||
|
return r.choices[0].message.content
|
||||||
return response.choices[0].message.content
|
|
||||||
|
|
||||||
def get_tools(server_script_path):
|
def get_tools(server_script_path):
|
||||||
"""Get available tools from an MCP server.
|
"""Get available tools from an MCP server.
|
||||||
|
|
@ -32,7 +29,41 @@ def get_tools(server_script_path):
|
||||||
|
|
||||||
return asyncio.run(_get_tools())
|
return asyncio.run(_get_tools())
|
||||||
|
|
||||||
def call_tool(server_script_path, tool_name, arguments):
|
def local_get_tools(server_script_path=None):
|
||||||
|
"""A simple dummy implementation of get_tools without MCP."""
|
||||||
|
tools = [
|
||||||
|
{
|
||||||
|
"name": "add",
|
||||||
|
"description": "Add two numbers together",
|
||||||
|
"inputSchema": {
|
||||||
|
"properties": {
|
||||||
|
"a": {"type": "integer"},
|
||||||
|
"b": {"type": "integer"}
|
||||||
|
},
|
||||||
|
"required": ["a", "b"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
class DictObject(dict):
|
||||||
|
"""A simple class that behaves both as a dictionary and as an object with attributes."""
|
||||||
|
def __init__(self, data):
|
||||||
|
super().__init__(data)
|
||||||
|
for key, value in data.items():
|
||||||
|
if isinstance(value, dict):
|
||||||
|
self[key] = DictObject(value)
|
||||||
|
elif isinstance(value, list) and value and isinstance(value[0], dict):
|
||||||
|
self[key] = [DictObject(item) for item in value]
|
||||||
|
|
||||||
|
def __getattr__(self, key):
|
||||||
|
try:
|
||||||
|
return self[key]
|
||||||
|
except KeyError:
|
||||||
|
raise AttributeError(f"'DictObject' object has no attribute '{key}'")
|
||||||
|
|
||||||
|
return [DictObject(tool) for tool in tools]
|
||||||
|
|
||||||
|
def call_tool(server_script_path=None, tool_name=None, arguments=None):
|
||||||
"""Call a tool on an MCP server.
|
"""Call a tool on an MCP server.
|
||||||
"""
|
"""
|
||||||
async def _call_tool():
|
async def _call_tool():
|
||||||
|
|
@ -49,11 +80,22 @@ def call_tool(server_script_path, tool_name, arguments):
|
||||||
|
|
||||||
return asyncio.run(_call_tool())
|
return asyncio.run(_call_tool())
|
||||||
|
|
||||||
|
def local_call_tool(server_script_path=None, tool_name=None, arguments=None):
|
||||||
|
"""A simple dummy implementation of call_tool without MCP."""
|
||||||
|
# Simple implementation of tools
|
||||||
|
if tool_name == "add":
|
||||||
|
if "a" in arguments and "b" in arguments:
|
||||||
|
return arguments["a"] + arguments["b"]
|
||||||
|
else:
|
||||||
|
return "Error: Missing required arguments 'a' or 'b'"
|
||||||
|
else:
|
||||||
|
return f"Error: Unknown tool '{tool_name}'"
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
# Test the LLM call
|
print("=== Testing call_llm ===")
|
||||||
messages = [{"role": "user", "content": "In a few words, what's the meaning of life?"}]
|
prompt = "In a few words, what is the meaning of life?"
|
||||||
response = call_llm(messages)
|
print(f"Prompt: {prompt}")
|
||||||
print(f"Prompt: {messages[0]['content']}")
|
response = call_llm(prompt)
|
||||||
print(f"Response: {response}")
|
print(f"Response: {response}")
|
||||||
|
|
||||||
# Find available tools
|
# Find available tools
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue