update para

This commit is contained in:
zachary62 2025-03-20 17:33:10 -04:00
parent 24782d1af1
commit 3544dd5440
9 changed files with 305 additions and 4 deletions

View File

@ -0,0 +1,7 @@
# Chain-of-Thought
The simplest implementation is to tell the AI to "think step by step" or to provide examples of step-by-step reasoning in the prompt. This guides the AI to break down its thinking.
Further more thinking models like sonnet 3.7, O1 natively support.
However, we can also simulate without thinking model.

View File

@ -4,9 +4,6 @@ from pocketflow import Node, Flow
from utils import fake_stream_llm, stream_llm
class StreamNode(Node):
def __init__(self, max_retries=1, wait=0):
super().__init__(max_retries=max_retries, wait=wait)
def prep(self, shared):
# Create interrupt event
interrupt_event = threading.Event()

View File

@ -39,3 +39,14 @@ Parallel took: 1.00 seconds
- **Parallel**: Total time ≈ longest single item time
- Good for: I/O-bound tasks, independent operations
## Tech Dive Deep
- **Python's GIL** prevents true CPU-bound parallelism, but LLM calls are I/O-bound
- **Async/await** overlaps waiting time between requests
- Example: `await client.chat.completions.create(...)`
- See: [OpenAI's async usage](https://github.com/openai/openai-python?tab=readme-ov-file#async-usage)
For maximum performance and cost efficiency, consider using batch APIs:
- [OpenAI's Batch API](https://platform.openai.com/docs/guides/batch) lets you process multiple prompts in a single request
- Reduces overhead and can be more cost-effective for large workloads

View File

@ -0,0 +1,58 @@
# PocketFlow Research Agent - Tutorial for Dummy
This project demonstrates a simple yet powerful LLM-powered research agent built with PocketFlow, a minimalist LLM framework in just 100 lines of code! This implementation is based directly on the tutorial post [LLM Agents are simply Graph — Tutorial For Dummies](https://zacharyhuang.substack.com/p/llm-agent-internal-as-a-graph-tutorial).
## Getting Started
1. Install the packages you need with this simple command:
```bash
pip install -r requirements.txt
```
2. Let's get your OpenAI API key ready:
```bash
export OPENAI_API_KEY="your-api-key-here"
```
3. Let's do a quick check to make sure your API key is working properly:
```bash
python utils.py
```
This will test both the LLM call and web search features. If you see responses, you're good to go!
4. Try out the agent with the default question (about Nobel Prize winners):
```bash
python main.py
```
5. Got a burning question? Ask anything you want by using the `--` prefix:
```bash
python main.py --"What is quantum computing?"
```
## How It Works?
The magic happens through a simple but powerful graph structure with three main parts:
```mermaid
graph TD
A[DecideAction] -->|"search"| B[SearchWeb]
A -->|"answer"| C[AnswerQuestion]
B -->|"decide"| A
```
Here's what each part does:
1. **DecideAction**: The brain that figures out whether to search or answer
2. **SearchWeb**: The researcher that goes out and finds information
3. **AnswerQuestion**: The writer that crafts the final answer
Here's what's in each file:
- [`main.py`](./main.py): The starting point - runs the whole show!
- [`flow.py`](./flow.py): Connects everything together into a smart agent
- [`nodes.py`](./nodes.py): The building blocks that make decisions and take actions
- [`utils.py`](./utils.py): Helper functions for talking to the LLM and searching the web

View File

@ -0,0 +1,33 @@
from pocketflow import Flow
from nodes import DecideAction, SearchWeb, UnreliableAnswerNode
def create_agent_flow():
"""
Create and connect the nodes to form a complete agent flow.
The flow works like this:
1. DecideAction node decides whether to search or answer
2. If search, go to SearchWeb node
3. If answer, go to UnreliableAnswerNode (which has a 50% chance of giving nonsense answers)
4. After SearchWeb completes, go back to DecideAction
Returns:
Flow: A complete research agent flow with unreliable answering capability
"""
# Create instances of each node
decide = DecideAction()
search = SearchWeb()
answer = UnreliableAnswerNode()
# Connect the nodes
# If DecideAction returns "search", go to SearchWeb
decide - "search" >> search
# If DecideAction returns "answer", go to UnreliableAnswerNode
decide - "answer" >> answer
# After SearchWeb completes and returns "decide", go back to DecideAction
search - "decide" >> decide
# Create and return the flow, starting with the DecideAction node
return Flow(start=decide)

View File

@ -0,0 +1,27 @@
import sys
from flow import create_agent_flow
def main():
"""Simple function to process a question."""
# Default question
default_question = "Who won the Nobel Prize in Physics 2024?"
# Get question from command line if provided with --
question = default_question
for arg in sys.argv[1:]:
if arg.startswith("--"):
question = arg[2:]
break
# Create the agent flow
agent_flow = create_agent_flow()
# Process the question
shared = {"question": question}
print(f"🤔 Processing question: {question}")
agent_flow.run(shared)
print("\n🎯 Final Answer:")
print(shared.get("answer", "No answer found"))
if __name__ == "__main__":
main()

View File

@ -0,0 +1,134 @@
from pocketflow import Node
from utils import call_llm, search_web
import yaml
import random
class DecideAction(Node):
def prep(self, shared):
"""Prepare the context and question for the decision-making process."""
# Get the current context (default to "No previous search" if none exists)
context = shared.get("context", "No previous search")
# Get the question from the shared store
question = shared["question"]
# Return both for the exec step
return question, context
def exec(self, inputs):
"""Call the LLM to decide whether to search or answer."""
question, context = inputs
print(f"🤔 Agent deciding what to do next...")
# Create a prompt to help the LLM decide what to do next
prompt = f"""
### CONTEXT
You are a research assistant that can search the web.
Question: {question}
Previous Research: {context}
### ACTION SPACE
[1] search
Description: Look up more information on the web
Parameters:
- query (str): What to search for
[2] answer
Description: Answer the question with current knowledge
Parameters:
- answer (str): Final answer to the question
## NEXT ACTION
Decide the next action based on the context and available actions.
Return your response in this format:
```yaml
thinking: |
<your step-by-step reasoning process>
action: search OR answer
reason: <why you chose this action>
search_query: <specific search query if action is search>
```"""
# Call the LLM to make a decision
response = call_llm(prompt)
# Parse the response to get the decision
yaml_str = response.split("```yaml")[1].split("```")[0].strip()
decision = yaml.safe_load(yaml_str)
return decision
def post(self, shared, prep_res, exec_res):
"""Save the decision and determine the next step in the flow."""
# If LLM decided to search, save the search query
if exec_res["action"] == "search":
shared["search_query"] = exec_res["search_query"]
print(f"🔍 Agent decided to search for: {exec_res['search_query']}")
else:
print(f"💡 Agent decided to answer the question")
# Return the action to determine the next node in the flow
return exec_res["action"]
class SearchWeb(Node):
def prep(self, shared):
"""Get the search query from the shared store."""
return shared["search_query"]
def exec(self, search_query):
"""Search the web for the given query."""
# Call the search utility function
print(f"🌐 Searching the web for: {search_query}")
results = search_web(search_query)
return results
def post(self, shared, prep_res, exec_res):
"""Save the search results and go back to the decision node."""
# Add the search results to the context in the shared store
previous = shared.get("context", "")
shared["context"] = previous + "\n\nSEARCH: " + shared["search_query"] + "\nRESULTS: " + exec_res
print(f"📚 Found information, analyzing results...")
# Always go back to the decision node after searching
return "decide"
class UnreliableAnswerNode(Node):
def prep(self, shared):
"""Get the question and context for answering."""
return shared["question"], shared.get("context", "")
def exec(self, inputs):
"""Call the LLM to generate a final answer with 50% chance of returning a dummy answer."""
question, context = inputs
# 50% chance to return a dummy answer
if random.random() < 0.5:
print(f"🤪 Generating unreliable dummy answer...")
return "Sorry, I'm on a coffee break right now. All information I provide is completely made up anyway. The answer to your question is 42, or maybe purple unicorns. Who knows? Certainly not me!"
print(f"✍️ Crafting final answer...")
# Create a prompt for the LLM to answer the question
prompt = f"""
### CONTEXT
Based on the following information, answer the question.
Question: {question}
Research: {context}
## YOUR ANSWER:
Provide a comprehensive answer using the research results.
"""
# Call the LLM to generate an answer
answer = call_llm(prompt)
return answer
def post(self, shared, prep_res, exec_res):
"""Save the final answer and complete the flow."""
# Save the answer in the shared store
shared["answer"] = exec_res
print(f"✅ Answer generated successfully")
# We're done - no need to continue the flow
return "done"

View File

@ -0,0 +1,4 @@
pocketflow>=0.0.1
aiohttp>=3.8.0 # For async HTTP requests
openai>=1.0.0 # For async LLM calls
duckduckgo-search>=7.5.2 # For web search

View File

@ -0,0 +1,30 @@
from openai import OpenAI
import os
from duckduckgo_search import DDGS
def call_llm(prompt):
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY", "your-api-key"))
r = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": prompt}]
)
return r.choices[0].message.content
def search_web(query):
results = DDGS().text(query, max_results=5)
# Convert results to a string
results_str = "\n\n".join([f"Title: {r['title']}\nURL: {r['href']}\nSnippet: {r['body']}" for r in results])
return results_str
if __name__ == "__main__":
print("## Testing call_llm")
prompt = "In a few words, what is the meaning of life?"
print(f"## Prompt: {prompt}")
response = call_llm(prompt)
print(f"## Response: {response}")
print("## Testing search_web")
query = "Who won the Nobel Prize in Physics 2024?"
print(f"## Query: {query}")
results = search_web(query)
print(f"## Results: {results}")