mcp tutorial
This commit is contained in:
parent
a6a34d2f14
commit
674240c90b
|
|
@ -1,55 +1,143 @@
|
|||
from pocketflow import Node, Flow
|
||||
from utils import call_llm
|
||||
from utils import call_llm, get_tools, call_tool
|
||||
import yaml
|
||||
import sys
|
||||
|
||||
class ChatNode(Node):
|
||||
class GetToolsNode(Node):
|
||||
def prep(self, shared):
|
||||
# Initialize messages if this is the first run
|
||||
if "messages" not in shared:
|
||||
shared["messages"] = []
|
||||
print("Welcome to the chat! Type 'exit' to end the conversation.")
|
||||
|
||||
# Get user input
|
||||
user_input = input("\nYou: ")
|
||||
|
||||
# Check if user wants to exit
|
||||
if user_input.lower() == 'exit':
|
||||
return None
|
||||
|
||||
# Add user message to history
|
||||
shared["messages"].append({"role": "user", "content": user_input})
|
||||
|
||||
# Return all messages for the LLM
|
||||
return shared["messages"]
|
||||
"""Initialize and get tools"""
|
||||
# The question is now passed from main via shared
|
||||
print("🔍 Getting available tools...")
|
||||
return "simple_server.py"
|
||||
|
||||
def exec(self, messages):
|
||||
if messages is None:
|
||||
return None
|
||||
def exec(self, server_path):
|
||||
"""Retrieve tools from the MCP server"""
|
||||
tools = get_tools(server_path)
|
||||
return tools
|
||||
|
||||
def post(self, shared, prep_res, exec_res):
|
||||
"""Store tools and process to decision node"""
|
||||
tools = exec_res
|
||||
shared["tools"] = tools
|
||||
|
||||
# Call LLM with the entire conversation history
|
||||
response = call_llm(messages)
|
||||
# Format tool information for later use
|
||||
tool_info = []
|
||||
for i, tool in enumerate(tools, 1):
|
||||
properties = tool.inputSchema.get('properties', {})
|
||||
required = tool.inputSchema.get('required', [])
|
||||
|
||||
params = []
|
||||
for param_name, param_info in properties.items():
|
||||
param_type = param_info.get('type', 'unknown')
|
||||
req_status = "(Required)" if param_name in required else "(Optional)"
|
||||
params.append(f" - {param_name} ({param_type}): {req_status}")
|
||||
|
||||
tool_info.append(f"[{i}] {tool.name}\n Description: {tool.description}\n Parameters:\n" + "\n".join(params))
|
||||
|
||||
shared["tool_info"] = "\n".join(tool_info)
|
||||
return "decide"
|
||||
|
||||
class DecideToolNode(Node):
|
||||
def prep(self, shared):
|
||||
"""Prepare the prompt for LLM to process the question"""
|
||||
tool_info = shared["tool_info"]
|
||||
question = shared["question"]
|
||||
|
||||
prompt = f"""
|
||||
### CONTEXT
|
||||
You are an assistant that can use tools via Model Context Protocol (MCP).
|
||||
|
||||
### ACTION SPACE
|
||||
{tool_info}
|
||||
|
||||
### TASK
|
||||
Answer this question: "{question}"
|
||||
|
||||
## NEXT ACTION
|
||||
Analyze the question, extract any numbers or parameters, and decide which tool to use.
|
||||
Return your response in this format:
|
||||
|
||||
```yaml
|
||||
thinking: |
|
||||
<your step-by-step reasoning about what the question is asking and what numbers to extract>
|
||||
tool: <name of the tool to use>
|
||||
reason: <why you chose this tool>
|
||||
parameters:
|
||||
<parameter_name>: <parameter_value>
|
||||
<parameter_name>: <parameter_value>
|
||||
```
|
||||
IMPORTANT:
|
||||
1. Extract numbers from the question properly
|
||||
2. Use proper indentation (4 spaces) for multi-line fields
|
||||
3. Use the | character for multi-line text fields
|
||||
"""
|
||||
return prompt
|
||||
|
||||
def exec(self, prompt):
|
||||
"""Call LLM to process the question and decide which tool to use"""
|
||||
print("🤔 Analyzing question and deciding which tool to use...")
|
||||
response = call_llm(prompt)
|
||||
return response
|
||||
|
||||
def post(self, shared, prep_res, exec_res):
|
||||
if prep_res is None or exec_res is None:
|
||||
print("\nGoodbye!")
|
||||
return None # End the conversation
|
||||
|
||||
# Print the assistant's response
|
||||
print(f"\nAssistant: {exec_res}")
|
||||
|
||||
# Add assistant message to history
|
||||
shared["messages"].append({"role": "assistant", "content": exec_res})
|
||||
|
||||
# Loop back to continue the conversation
|
||||
return "continue"
|
||||
"""Extract decision from YAML and save to shared context"""
|
||||
try:
|
||||
yaml_str = exec_res.split("```yaml")[1].split("```")[0].strip()
|
||||
decision = yaml.safe_load(yaml_str)
|
||||
|
||||
shared["tool_name"] = decision["tool"]
|
||||
shared["parameters"] = decision["parameters"]
|
||||
shared["thinking"] = decision.get("thinking", "")
|
||||
|
||||
print(f"💡 Selected tool: {decision['tool']}")
|
||||
print(f"🔢 Extracted parameters: {decision['parameters']}")
|
||||
|
||||
return "execute"
|
||||
except Exception as e:
|
||||
print(f"❌ Error parsing LLM response: {e}")
|
||||
print("Raw response:", exec_res)
|
||||
return None
|
||||
|
||||
# Create the flow with self-loop
|
||||
chat_node = ChatNode()
|
||||
chat_node - "continue" >> chat_node # Loop back to continue conversation
|
||||
class ExecuteToolNode(Node):
|
||||
def prep(self, shared):
|
||||
"""Prepare tool execution parameters"""
|
||||
return shared["tool_name"], shared["parameters"]
|
||||
|
||||
def exec(self, inputs):
|
||||
"""Execute the chosen tool"""
|
||||
tool_name, parameters = inputs
|
||||
print(f"🔧 Executing tool '{tool_name}' with parameters: {parameters}")
|
||||
result = call_tool("simple_server.py", tool_name, parameters)
|
||||
return result
|
||||
|
||||
def post(self, shared, prep_res, exec_res):
|
||||
print(f"\n✅ Final Answer: {exec_res}")
|
||||
return "done"
|
||||
|
||||
flow = Flow(start=chat_node)
|
||||
|
||||
# Start the chat
|
||||
if __name__ == "__main__":
|
||||
shared = {}
|
||||
flow.run(shared)
|
||||
# Default question
|
||||
default_question = "What is 982713504867129384651 plus 73916582047365810293746529?"
|
||||
|
||||
# Get question from command line if provided with --
|
||||
question = default_question
|
||||
for arg in sys.argv[1:]:
|
||||
if arg.startswith("--"):
|
||||
question = arg[2:]
|
||||
break
|
||||
|
||||
print(f"🤔 Processing question: {question}")
|
||||
|
||||
# Create nodes
|
||||
get_tools_node = GetToolsNode()
|
||||
decide_node = DecideToolNode()
|
||||
execute_node = ExecuteToolNode()
|
||||
|
||||
# Connect nodes
|
||||
get_tools_node - "decide" >> decide_node
|
||||
decide_node - "execute" >> execute_node
|
||||
|
||||
# Create and run flow
|
||||
flow = Flow(start=get_tools_node)
|
||||
shared = {"question": question}
|
||||
flow.run(shared)
|
||||
|
|
@ -4,16 +4,13 @@ import asyncio
|
|||
from mcp import ClientSession, StdioServerParameters
|
||||
from mcp.client.stdio import stdio_client
|
||||
|
||||
def call_llm(messages):
|
||||
def call_llm(prompt):
|
||||
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
|
||||
|
||||
response = client.chat.completions.create(
|
||||
r = client.chat.completions.create(
|
||||
model="gpt-4o",
|
||||
messages=messages,
|
||||
temperature=0.7
|
||||
messages=[{"role": "user", "content": prompt}]
|
||||
)
|
||||
|
||||
return response.choices[0].message.content
|
||||
return r.choices[0].message.content
|
||||
|
||||
def get_tools(server_script_path):
|
||||
"""Get available tools from an MCP server.
|
||||
|
|
@ -32,7 +29,41 @@ def get_tools(server_script_path):
|
|||
|
||||
return asyncio.run(_get_tools())
|
||||
|
||||
def call_tool(server_script_path, tool_name, arguments):
|
||||
def local_get_tools(server_script_path=None):
|
||||
"""A simple dummy implementation of get_tools without MCP."""
|
||||
tools = [
|
||||
{
|
||||
"name": "add",
|
||||
"description": "Add two numbers together",
|
||||
"inputSchema": {
|
||||
"properties": {
|
||||
"a": {"type": "integer"},
|
||||
"b": {"type": "integer"}
|
||||
},
|
||||
"required": ["a", "b"]
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
class DictObject(dict):
|
||||
"""A simple class that behaves both as a dictionary and as an object with attributes."""
|
||||
def __init__(self, data):
|
||||
super().__init__(data)
|
||||
for key, value in data.items():
|
||||
if isinstance(value, dict):
|
||||
self[key] = DictObject(value)
|
||||
elif isinstance(value, list) and value and isinstance(value[0], dict):
|
||||
self[key] = [DictObject(item) for item in value]
|
||||
|
||||
def __getattr__(self, key):
|
||||
try:
|
||||
return self[key]
|
||||
except KeyError:
|
||||
raise AttributeError(f"'DictObject' object has no attribute '{key}'")
|
||||
|
||||
return [DictObject(tool) for tool in tools]
|
||||
|
||||
def call_tool(server_script_path=None, tool_name=None, arguments=None):
|
||||
"""Call a tool on an MCP server.
|
||||
"""
|
||||
async def _call_tool():
|
||||
|
|
@ -49,11 +80,22 @@ def call_tool(server_script_path, tool_name, arguments):
|
|||
|
||||
return asyncio.run(_call_tool())
|
||||
|
||||
def local_call_tool(server_script_path=None, tool_name=None, arguments=None):
|
||||
"""A simple dummy implementation of call_tool without MCP."""
|
||||
# Simple implementation of tools
|
||||
if tool_name == "add":
|
||||
if "a" in arguments and "b" in arguments:
|
||||
return arguments["a"] + arguments["b"]
|
||||
else:
|
||||
return "Error: Missing required arguments 'a' or 'b'"
|
||||
else:
|
||||
return f"Error: Unknown tool '{tool_name}'"
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Test the LLM call
|
||||
messages = [{"role": "user", "content": "In a few words, what's the meaning of life?"}]
|
||||
response = call_llm(messages)
|
||||
print(f"Prompt: {messages[0]['content']}")
|
||||
print("=== Testing call_llm ===")
|
||||
prompt = "In a few words, what is the meaning of life?"
|
||||
print(f"Prompt: {prompt}")
|
||||
response = call_llm(prompt)
|
||||
print(f"Response: {response}")
|
||||
|
||||
# Find available tools
|
||||
|
|
|
|||
Loading…
Reference in New Issue