add tutorial
This commit is contained in:
parent
6dbb32cdc2
commit
4c7c448670
|
|
@ -0,0 +1,85 @@
|
|||
# PocketFlow Research Agent - Tutorial for Dummy
|
||||
|
||||
This project demonstrates a simple LLM-powered research agent built with PocketFlow, a minimalist LLM framework in 100 lines. For more information on PocketFlow and how to build LLM agents, check out:
|
||||
|
||||
- [LLM Agents are simply Graph — Tutorial For Dummies](https://zacharyhuang.substack.com/p/llm-agent-internal-as-a-graph-tutorial)
|
||||
- [PocketFlow GitHub](https://github.com/the-pocket/PocketFlow)
|
||||
- [PocketFlow Documentation](https://the-pocket.github.io/PocketFlow/)
|
||||
|
||||
## What It Does
|
||||
|
||||
This agent can:
|
||||
1. Answer questions by searching for information when needed
|
||||
2. Make decisions about when to search and when to answer
|
||||
3. Generate helpful responses based on collected research
|
||||
|
||||
## Setting Up
|
||||
|
||||
### Prerequisites
|
||||
- Python 3.8+
|
||||
- OpenAI API key
|
||||
|
||||
### Installation
|
||||
|
||||
1. Install the required packages:
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Structure
|
||||
|
||||
- [`main.py`](./main.py): Entry point and user interface
|
||||
- [`flow.py`](./flow.py): Creates and connects the agent flow
|
||||
- [`nodes.py`](./nodes.py): Defines the decision and action nodes
|
||||
- [`utils.py`](./utils.py): Contains utility functions for LLM calls and web searches
|
||||
|
||||
## Quick Start Guide
|
||||
|
||||
### Step 1: Set Up Your OpenAI API Key
|
||||
|
||||
First, you must provide your OpenAI API key:
|
||||
|
||||
```bash
|
||||
export OPENAI_API_KEY="your-api-key-here"
|
||||
```
|
||||
|
||||
### Step 2: Test Utilities
|
||||
|
||||
Verify that your API key is working by testing the utilities:
|
||||
|
||||
```bash
|
||||
python utils.py
|
||||
```
|
||||
|
||||
This will test both the LLM call functionality and the web search capability.
|
||||
|
||||
### Step 3: Run the Agent
|
||||
|
||||
Run the agent with the default question ("Who won the Nobel Prize in Physics 2024?"):
|
||||
|
||||
```bash
|
||||
python main.py
|
||||
```
|
||||
|
||||
### Step 4: Ask Custom Questions
|
||||
|
||||
To ask your own question, use the `--` prefix:
|
||||
|
||||
```bash
|
||||
python main.py --"What is quantum computing?"
|
||||
```
|
||||
|
||||
## How It Works
|
||||
|
||||
The agent is structured as a simple directed graph with three main nodes:
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
A[DecideAction] -->|"search"| B[SearchWeb]
|
||||
A -->|"answer"| C[AnswerQuestion]
|
||||
B -->|"decide"| A
|
||||
```
|
||||
|
||||
1. **DecideAction**: Determines whether to search for information or provide an answer
|
||||
2. **SearchWeb**: Searches the web for information
|
||||
3. **AnswerQuestion**: Creates a final answer once enough information is gathered
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
from pocketflow import Flow
|
||||
from nodes import DecideAction, SearchWeb, AnswerQuestion
|
||||
|
||||
def create_agent_flow():
|
||||
"""
|
||||
Create and connect the nodes to form a complete agent flow.
|
||||
|
||||
The flow works like this:
|
||||
1. DecideAction node decides whether to search or answer
|
||||
2. If search, go to SearchWeb node
|
||||
3. If answer, go to AnswerQuestion node
|
||||
4. After SearchWeb completes, go back to DecideAction
|
||||
|
||||
Returns:
|
||||
Flow: A complete research agent flow
|
||||
"""
|
||||
# Create instances of each node
|
||||
decide = DecideAction()
|
||||
search = SearchWeb()
|
||||
answer = AnswerQuestion()
|
||||
|
||||
# Connect the nodes
|
||||
# If DecideAction returns "search", go to SearchWeb
|
||||
decide - "search" >> search
|
||||
|
||||
# If DecideAction returns "answer", go to AnswerQuestion
|
||||
decide - "answer" >> answer
|
||||
|
||||
# After SearchWeb completes and returns "decide", go back to DecideAction
|
||||
search - "decide" >> decide
|
||||
|
||||
# Create and return the flow, starting with the DecideAction node
|
||||
return Flow(start=decide)
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
import sys
|
||||
from flow import create_agent_flow
|
||||
|
||||
def main():
|
||||
"""Simple function to process a question."""
|
||||
# Default question
|
||||
default_question = "Who won the Nobel Prize in Physics 2024?"
|
||||
|
||||
# Get question from command line if provided with --
|
||||
question = default_question
|
||||
for arg in sys.argv[1:]:
|
||||
if arg.startswith("--"):
|
||||
question = arg[2:]
|
||||
break
|
||||
|
||||
# Create the agent flow
|
||||
agent_flow = create_agent_flow()
|
||||
|
||||
# Process the question
|
||||
shared = {"question": question}
|
||||
print(f"🤔 Processing question: {question}")
|
||||
agent_flow.run(shared)
|
||||
print("\n🎯 Final Answer:")
|
||||
print(shared.get("answer", "No answer found"))
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,128 @@
|
|||
from pocketflow import Node
|
||||
from utils import call_llm, search_web
|
||||
import yaml
|
||||
|
||||
class DecideAction(Node):
|
||||
def prep(self, shared):
|
||||
"""Prepare the context and question for the decision-making process."""
|
||||
# Get the current context (default to "No previous search" if none exists)
|
||||
context = shared.get("context", "No previous search")
|
||||
# Get the question from the shared store
|
||||
question = shared["question"]
|
||||
# Return both for the exec step
|
||||
return question, context
|
||||
|
||||
def exec(self, inputs):
|
||||
"""Call the LLM to decide whether to search or answer."""
|
||||
question, context = inputs
|
||||
|
||||
print(f"🤔 Agent deciding what to do next...")
|
||||
|
||||
# Create a prompt to help the LLM decide what to do next
|
||||
prompt = f"""
|
||||
### CONTEXT
|
||||
You are a research assistant that can search the web.
|
||||
Question: {question}
|
||||
Previous Research: {context}
|
||||
|
||||
### ACTION SPACE
|
||||
[1] search
|
||||
Description: Look up more information on the web
|
||||
Parameters:
|
||||
- query (str): What to search for
|
||||
|
||||
[2] answer
|
||||
Description: Answer the question with current knowledge
|
||||
Parameters:
|
||||
- answer (str): Final answer to the question
|
||||
|
||||
## NEXT ACTION
|
||||
Decide the next action based on the context and available actions.
|
||||
Return your response in this format:
|
||||
|
||||
```yaml
|
||||
thinking: |
|
||||
<your step-by-step reasoning process>
|
||||
action: search OR answer
|
||||
reason: <why you chose this action>
|
||||
search_query: <specific search query if action is search>
|
||||
```"""
|
||||
|
||||
# Call the LLM to make a decision
|
||||
response = call_llm(prompt)
|
||||
|
||||
# Parse the response to get the decision
|
||||
yaml_str = response.split("```yaml")[1].split("```")[0].strip()
|
||||
decision = yaml.safe_load(yaml_str)
|
||||
|
||||
return decision
|
||||
|
||||
def post(self, shared, prep_res, exec_res):
|
||||
"""Save the decision and determine the next step in the flow."""
|
||||
# If LLM decided to search, save the search query
|
||||
if exec_res["action"] == "search":
|
||||
shared["search_query"] = exec_res["search_query"]
|
||||
print(f"🔍 Agent decided to search for: {exec_res['search_query']}")
|
||||
else:
|
||||
print(f"💡 Agent decided to answer the question")
|
||||
|
||||
# Return the action to determine the next node in the flow
|
||||
return exec_res["action"]
|
||||
|
||||
class SearchWeb(Node):
|
||||
def prep(self, shared):
|
||||
"""Get the search query from the shared store."""
|
||||
return shared["search_query"]
|
||||
|
||||
def exec(self, search_query):
|
||||
"""Search the web for the given query."""
|
||||
# Call the search utility function
|
||||
print(f"🌐 Searching the web for: {search_query}")
|
||||
results = search_web(search_query)
|
||||
return results
|
||||
|
||||
def post(self, shared, prep_res, exec_res):
|
||||
"""Save the search results and go back to the decision node."""
|
||||
# Add the search results to the context in the shared store
|
||||
previous = shared.get("context", "")
|
||||
shared["context"] = previous + "\n\nSEARCH: " + shared["search_query"] + "\nRESULTS: " + exec_res
|
||||
|
||||
print(f"📚 Found information, analyzing results...")
|
||||
|
||||
# Always go back to the decision node after searching
|
||||
return "decide"
|
||||
|
||||
class AnswerQuestion(Node):
|
||||
def prep(self, shared):
|
||||
"""Get the question and context for answering."""
|
||||
return shared["question"], shared.get("context", "")
|
||||
|
||||
def exec(self, inputs):
|
||||
"""Call the LLM to generate a final answer."""
|
||||
question, context = inputs
|
||||
|
||||
print(f"✍️ Crafting final answer...")
|
||||
|
||||
# Create a prompt for the LLM to answer the question
|
||||
prompt = f"""
|
||||
### CONTEXT
|
||||
Based on the following information, answer the question.
|
||||
Question: {question}
|
||||
Research: {context}
|
||||
|
||||
## YOUR ANSWER:
|
||||
Provide a comprehensive answer using the research results.
|
||||
"""
|
||||
# Call the LLM to generate an answer
|
||||
answer = call_llm(prompt)
|
||||
return answer
|
||||
|
||||
def post(self, shared, prep_res, exec_res):
|
||||
"""Save the final answer and complete the flow."""
|
||||
# Save the answer in the shared store
|
||||
shared["answer"] = exec_res
|
||||
|
||||
print(f"✅ Answer generated successfully")
|
||||
|
||||
# We're done - no need to continue the flow
|
||||
return "done"
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
pocketflow>=0.0.1
|
||||
aiohttp>=3.8.0 # For async HTTP requests
|
||||
openai>=1.0.0 # For async LLM calls
|
||||
duckduckgo-search>=7.5.2 # For web search
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
from openai import OpenAI
|
||||
import os
|
||||
from duckduckgo_search import DDGS
|
||||
|
||||
def call_llm(prompt):
|
||||
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
|
||||
r = client.chat.completions.create(
|
||||
model="gpt-4o",
|
||||
messages=[{"role": "user", "content": prompt}]
|
||||
)
|
||||
return r.choices[0].message.content
|
||||
|
||||
def search_web(query):
|
||||
results = DDGS().text(query, max_results=5)
|
||||
# Convert results to a string
|
||||
results_str = "\n\n".join([f"Title: {r['title']}\nURL: {r['href']}\nSnippet: {r['body']}" for r in results])
|
||||
return results_str
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("## Testing call_llm")
|
||||
prompt = "In a few words, what is the meaning of life?"
|
||||
print(f"## Prompt: {prompt}")
|
||||
response = call_llm(prompt)
|
||||
print(f"## Response: {response}")
|
||||
|
||||
print("## Testing search_web")
|
||||
query = "Who won the Nobel Prize in Physics 2024?"
|
||||
print(f"## Query: {query}")
|
||||
results = search_web(query)
|
||||
print(f"## Results: {results}")
|
||||
Loading…
Reference in New Issue