add demo notebook

This commit is contained in:
zachary62 2025-03-23 12:15:54 -04:00
parent 1d560e7737
commit 4e09e3025a
2 changed files with 369 additions and 297 deletions

View File

@ -2,6 +2,9 @@
This project demonstrates a simple yet powerful LLM-powered research agent. This implementation is based directly on the tutorial: [LLM Agents are simply Graph — Tutorial For Dummies](https://zacharyhuang.substack.com/p/llm-agent-internal-as-a-graph-tutorial). This project demonstrates a simple yet powerful LLM-powered research agent. This implementation is based directly on the tutorial: [LLM Agents are simply Graph — Tutorial For Dummies](https://zacharyhuang.substack.com/p/llm-agent-internal-as-a-graph-tutorial).
👉 Run the tutorial in your browser: [Try Google Colab Notebook](
https://colab.research.google.com/github/The-Pocket/PocketFlow/blob/main/cookbook/pocketflow-agent/demo.ipynb)
## Features ## Features
- Performs web searches to gather information - Performs web searches to gather information

View File

@ -1,300 +1,369 @@
{ {
"cells": [ "cells": [
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 1,
"metadata": { "metadata": {
"vscode": { "vscode": {
"languageId": "plaintext" "languageId": "plaintext"
},
"id": "8MeqVASIxKBH"
},
"outputs": [],
"source": [
"! pip install pocketflow>=0.0.1\n",
"! pip install aiohttp>=3.8.0\n",
"! pip install openai>=1.0.0\n",
"! pip install duckduckgo-search>=7.5.2"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"vscode": {
"languageId": "plaintext"
},
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "wUp_sNU1xKBI",
"outputId": "a647f919-b253-48c8-c132-5eef582e29c7"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"## Testing call_llm\n",
"## Prompt: In a few words, what is the meaning of life?\n",
"## Response: The meaning of life is a deeply personal and philosophical question. For many, it involves seeking happiness, forming relationships, pursuing knowledge, or finding purpose and fulfillment. It's a journey that varies for each individual.\n",
"## Testing search_web\n",
"## Query: Who won the Nobel Prize in Physics 2024?\n",
"## Results: Title: Press release: The Nobel Prize in Physics 2024 - NobelPrize.org\n",
"URL: https://www.nobelprize.org/prizes/physics/2024/press-release/\n",
"Snippet: The Nobel Prize in Physics 2024 was awarded jointly to John J. Hopfield and Geoffrey Hinton \"for foundational discoveries and inventions that enable machine learning with artificial neural networks\"\n",
"\n",
"Title: Pioneers in artificial intelligence win the Nobel Prize in physics\n",
"URL: https://apnews.com/article/nobel-prize-physics-fc0567de3f2ca45f81a7359a017cd542\n",
"Snippet: Two pioneers of artificial intelligence have won the Nobel Prize in physics. John Hopfield and Geoffrey Hinton were awarded the prize Tuesday for discoveries and inventions that formed the building blocks of machine learning.\n",
"\n",
"Title: Nobel Prize 2024: All the Winners | TIME\n",
"URL: https://time.com/7065011/nobel-prize-2024-winners/\n",
"Snippet: The 2024 Nobel Prize announcements began on Oct. 7, recognizing groundbreaking contributions to humanity. The first prize, in the category of physiology or medicine, went to a pair of American ...\n",
"\n",
"Title: Nobel physics prize 2024 won by AI pioneers John Hopfield and Geoffrey ...\n",
"URL: https://www.reuters.com/science/hopfield-hinton-win-2024-nobel-prize-physics-2024-10-08/\n",
"Snippet: John Hopfield and Geoffrey Hinton won for discoveries that paved the way for the AI boom.\n",
"\n",
"Title: Nobel Prize in physics 2024 awarded for work on artificial intelligence ...\n",
"URL: https://www.cnn.com/2024/10/08/science/nobel-prize-physics-hopfield-hinton-machine-learning-intl/index.html\n",
"Snippet: The 2024 Nobel Prize in physics has been awarded to John Hopfield and Geoffrey Hinton for their fundamental discoveries in machine learning, which paved the way for how artificial intelligence is ...\n"
]
}
],
"source": [
"# utils.py\n",
"from openai import OpenAI\n",
"import os\n",
"from duckduckgo_search import DDGS\n",
"\n",
"def call_llm(prompt):\n",
" client = OpenAI(api_key=\"os.environ.get(\"OPENAI_API_KEY\", \"your-api-key\")\")\n",
" r = client.chat.completions.create(\n",
" model=\"gpt-4o\",\n",
" messages=[{\"role\": \"user\", \"content\": prompt}]\n",
" )\n",
" return r.choices[0].message.content\n",
"\n",
"def search_web(query):\n",
" results = DDGS().text(query, max_results=5)\n",
" # Convert results to a string\n",
" results_str = \"\\n\\n\".join([f\"Title: {r['title']}\\nURL: {r['href']}\\nSnippet: {r['body']}\" for r in results])\n",
" return results_str\n",
"\n",
"print(\"## Testing call_llm\")\n",
"prompt = \"In a few words, what is the meaning of life?\"\n",
"print(f\"## Prompt: {prompt}\")\n",
"response = call_llm(prompt)\n",
"print(f\"## Response: {response}\")\n",
"\n",
"print(\"## Testing search_web\")\n",
"query = \"Who won the Nobel Prize in Physics 2024?\"\n",
"print(f\"## Query: {query}\")\n",
"results = search_web(query)\n",
"print(f\"## Results: {results}\")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"vscode": {
"languageId": "plaintext"
},
"id": "T0ETd4C2xKBI"
},
"outputs": [],
"source": [
"# nodes.py\n",
"from pocketflow import Node\n",
"import yaml\n",
"\n",
"class DecideAction(Node):\n",
" def prep(self, shared):\n",
" \"\"\"Prepare the context and question for the decision-making process.\"\"\"\n",
" # Get the current context (default to \"No previous search\" if none exists)\n",
" context = shared.get(\"context\", \"No previous search\")\n",
" # Get the question from the shared store\n",
" question = shared[\"question\"]\n",
" # Return both for the exec step\n",
" return question, context\n",
"\n",
" def exec(self, inputs):\n",
" \"\"\"Call the LLM to decide whether to search or answer.\"\"\"\n",
" question, context = inputs\n",
"\n",
" print(f\"🤔 Agent deciding what to do next...\")\n",
"\n",
" # Create a prompt to help the LLM decide what to do next with proper yaml formatting\n",
" prompt = f\"\"\"\n",
"### CONTEXT\n",
"You are a research assistant that can search the web.\n",
"Question: {question}\n",
"Previous Research: {context}\n",
"\n",
"### ACTION SPACE\n",
"[1] search\n",
" Description: Look up more information on the web\n",
" Parameters:\n",
" - query (str): What to search for\n",
"\n",
"[2] answer\n",
" Description: Answer the question with current knowledge\n",
" Parameters:\n",
" - answer (str): Final answer to the question\n",
"\n",
"## NEXT ACTION\n",
"Decide the next action based on the context and available actions.\n",
"Return your response in this format:\n",
"\n",
"```yaml\n",
"thinking: |\n",
" <your step-by-step reasoning process>\n",
"action: search OR answer\n",
"reason: <why you chose this action>\n",
"answer: <if action is answer>\n",
"search_query: <specific search query if action is search>\n",
"```\n",
"IMPORTANT: Make sure to:\n",
"1. Use proper indentation (4 spaces) for all multi-line fields\n",
"2. Use the | character for multi-line text fields\n",
"3. Keep single-line fields without the | character\n",
"\"\"\"\n",
"\n",
" # Call the LLM to make a decision\n",
" response = call_llm(prompt)\n",
"\n",
" # Parse the response to get the decision\n",
" yaml_str = response.split(\"```yaml\")[1].split(\"```\")[0].strip()\n",
" decision = yaml.safe_load(yaml_str)\n",
"\n",
" return decision\n",
"\n",
" def post(self, shared, prep_res, exec_res):\n",
" \"\"\"Save the decision and determine the next step in the flow.\"\"\"\n",
" # If LLM decided to search, save the search query\n",
" if exec_res[\"action\"] == \"search\":\n",
" shared[\"search_query\"] = exec_res[\"search_query\"]\n",
" print(f\"🔍 Agent decided to search for: {exec_res['search_query']}\")\n",
" else:\n",
" shared[\"context\"] = exec_res[\"answer\"] #save the context if LLM gives the answer without searching.\n",
" print(f\"💡 Agent decided to answer the question\")\n",
"\n",
" # Return the action to determine the next node in the flow\n",
" return exec_res[\"action\"]\n",
"\n",
"class SearchWeb(Node):\n",
" def prep(self, shared):\n",
" \"\"\"Get the search query from the shared store.\"\"\"\n",
" return shared[\"search_query\"]\n",
"\n",
" def exec(self, search_query):\n",
" \"\"\"Search the web for the given query.\"\"\"\n",
" # Call the search utility function\n",
" print(f\"🌐 Searching the web for: {search_query}\")\n",
" results = search_web(search_query)\n",
" return results\n",
"\n",
" def post(self, shared, prep_res, exec_res):\n",
" \"\"\"Save the search results and go back to the decision node.\"\"\"\n",
" # Add the search results to the context in the shared store\n",
" previous = shared.get(\"context\", \"\")\n",
" shared[\"context\"] = previous + \"\\n\\nSEARCH: \" + shared[\"search_query\"] + \"\\nRESULTS: \" + exec_res\n",
"\n",
" print(f\"📚 Found information, analyzing results...\")\n",
"\n",
" # Always go back to the decision node after searching\n",
" return \"decide\"\n",
"\n",
"class AnswerQuestion(Node):\n",
" def prep(self, shared):\n",
" \"\"\"Get the question and context for answering.\"\"\"\n",
" return shared[\"question\"], shared.get(\"context\", \"\")\n",
"\n",
" def exec(self, inputs):\n",
" \"\"\"Call the LLM to generate a final answer.\"\"\"\n",
" question, context = inputs\n",
"\n",
" print(f\"✍️ Crafting final answer...\")\n",
"\n",
" # Create a prompt for the LLM to answer the question\n",
" prompt = f\"\"\"\n",
"### CONTEXT\n",
"Based on the following information, answer the question.\n",
"Question: {question}\n",
"Research: {context}\n",
"\n",
"## YOUR ANSWER:\n",
"Provide a comprehensive answer using the research results.\n",
"\"\"\"\n",
" # Call the LLM to generate an answer\n",
" answer = call_llm(prompt)\n",
" return answer\n",
"\n",
" def post(self, shared, prep_res, exec_res):\n",
" \"\"\"Save the final answer and complete the flow.\"\"\"\n",
" # Save the answer in the shared store\n",
" shared[\"answer\"] = exec_res\n",
"\n",
" print(f\"✅ Answer generated successfully\")\n",
"\n",
" # We're done - no need to continue the flow\n",
" return \"done\""
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"vscode": {
"languageId": "plaintext"
},
"id": "0B4jCAmXxKBI"
},
"outputs": [],
"source": [
"# flow.py\n",
"from pocketflow import Flow\n",
"\n",
"def create_agent_flow():\n",
" \"\"\"\n",
" Create and connect the nodes to form a complete agent flow.\n",
"\n",
" The flow works like this:\n",
" 1. DecideAction node decides whether to search or answer\n",
" 2. If search, go to SearchWeb node\n",
" 3. If answer, go to AnswerQuestion node\n",
" 4. After SearchWeb completes, go back to DecideAction\n",
"\n",
" Returns:\n",
" Flow: A complete research agent flow\n",
" \"\"\"\n",
" # Create instances of each node\n",
" decide = DecideAction()\n",
" search = SearchWeb()\n",
" answer = AnswerQuestion()\n",
"\n",
" # Connect the nodes\n",
" # If DecideAction returns \"search\", go to SearchWeb\n",
" decide - \"search\" >> search\n",
"\n",
" # If DecideAction returns \"answer\", go to AnswerQuestion\n",
" decide - \"answer\" >> answer\n",
"\n",
" # After SearchWeb completes and returns \"decide\", go back to DecideAction\n",
" search - \"decide\" >> decide\n",
"\n",
" # Create and return the flow, starting with the DecideAction node\n",
" return Flow(start=decide)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"vscode": {
"languageId": "plaintext"
},
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "bIwsNEDCxKBI",
"outputId": "e6c02020-6fae-4377-8f0a-01d2580dd659"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"🤔 Processing question: Who won the Nobel Prize in Physics 2024?\n",
"🤔 Agent deciding what to do next...\n",
"🔍 Agent decided to search for: 2024 Nobel Prize in Physics winner\n",
"🌐 Searching the web for: 2024 Nobel Prize in Physics winner\n",
"📚 Found information, analyzing results...\n",
"🤔 Agent deciding what to do next...\n",
"💡 Agent decided to answer the question\n",
"✍️ Crafting final answer...\n",
"✅ Answer generated successfully\n",
"\n",
"🎯 Final Answer:\n",
"John J. Hopfield and Geoffrey Hinton won the 2024 Nobel Prize in Physics. They were awarded this prestigious recognition for their foundational discoveries and inventions that have significantly advanced the field of machine learning by enabling the use of artificial neural networks. These contributions have had a profound impact on the development and application of machine learning technologies.\n"
]
}
],
"source": [
"# main.py\n",
"import sys\n",
"\n",
"def main():\n",
" \"\"\"Simple function to process a question.\"\"\"\n",
" # Default question\n",
" default_question = \"Who won the Nobel Prize in Physics 2024?\"\n",
"\n",
" # Get question from command line if provided with --\n",
" question = default_question\n",
" for arg in sys.argv[1:]:\n",
" if arg.startswith(\"--\"):\n",
" question = arg[2:]\n",
" break\n",
"\n",
" # Create the agent flow\n",
" agent_flow = create_agent_flow()\n",
"\n",
" # Process the question\n",
" shared = {\"question\": question}\n",
" print(f\"🤔 Processing question: {question}\")\n",
" agent_flow.run(shared)\n",
" print(\"\\n🎯 Final Answer:\")\n",
" print(shared.get(\"answer\", \"No answer found\"))\n",
"\n",
"main()"
]
}
],
"metadata": {
"language_info": {
"name": "python"
},
"colab": {
"provenance": []
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
} }
},
"outputs": [],
"source": [
"! pip install pocketflow>=0.0.1\n",
"! pip install aiohttp>=3.8.0 \n",
"! pip install openai>=1.0.0 \n",
"! pip install duckduckgo-search>=7.5.2 "
]
}, },
{ "nbformat": 4,
"cell_type": "code", "nbformat_minor": 0
"execution_count": null,
"metadata": {
"vscode": {
"languageId": "plaintext"
}
},
"outputs": [],
"source": [
"# utils.py\n",
"from openai import OpenAI\n",
"import os\n",
"from duckduckgo_search import DDGS\n",
"\n",
"def call_llm(prompt): \n",
" client = OpenAI(api_key=os.environ.get(\"OPENAI_API_KEY\", \"your-api-key\"))\n",
" r = client.chat.completions.create(\n",
" model=\"gpt-4o\",\n",
" messages=[{\"role\": \"user\", \"content\": prompt}]\n",
" )\n",
" return r.choices[0].message.content\n",
"\n",
"def search_web(query):\n",
" results = DDGS().text(query, max_results=5)\n",
" # Convert results to a string\n",
" results_str = \"\\n\\n\".join([f\"Title: {r['title']}\\nURL: {r['href']}\\nSnippet: {r['body']}\" for r in results])\n",
" return results_str\n",
"\n",
"print(\"## Testing call_llm\")\n",
"prompt = \"In a few words, what is the meaning of life?\"\n",
"print(f\"## Prompt: {prompt}\")\n",
"response = call_llm(prompt)\n",
"print(f\"## Response: {response}\")\n",
"\n",
"print(\"## Testing search_web\")\n",
"query = \"Who won the Nobel Prize in Physics 2024?\"\n",
"print(f\"## Query: {query}\")\n",
"results = search_web(query)\n",
"print(f\"## Results: {results}\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"vscode": {
"languageId": "plaintext"
}
},
"outputs": [],
"source": [
"# nodes.py\n",
"from pocketflow import Node\n",
"from utils import call_llm, search_web\n",
"import yaml\n",
"\n",
"class DecideAction(Node):\n",
" def prep(self, shared):\n",
" \"\"\"Prepare the context and question for the decision-making process.\"\"\"\n",
" # Get the current context (default to \"No previous search\" if none exists)\n",
" context = shared.get(\"context\", \"No previous search\")\n",
" # Get the question from the shared store\n",
" question = shared[\"question\"]\n",
" # Return both for the exec step\n",
" return question, context\n",
" \n",
" def exec(self, inputs):\n",
" \"\"\"Call the LLM to decide whether to search or answer.\"\"\"\n",
" question, context = inputs\n",
" \n",
" print(f\"🤔 Agent deciding what to do next...\")\n",
" \n",
" # Create a prompt to help the LLM decide what to do next with proper yaml formatting\n",
" prompt = f\"\"\"\n",
"### CONTEXT\n",
"You are a research assistant that can search the web.\n",
"Question: {question}\n",
"Previous Research: {context}\n",
"\n",
"### ACTION SPACE\n",
"[1] search\n",
" Description: Look up more information on the web\n",
" Parameters:\n",
" - query (str): What to search for\n",
"\n",
"[2] answer\n",
" Description: Answer the question with current knowledge\n",
" Parameters:\n",
" - answer (str): Final answer to the question\n",
"\n",
"## NEXT ACTION\n",
"Decide the next action based on the context and available actions.\n",
"Return your response in this format:\n",
"\n",
"```yaml\n",
"thinking: |\n",
" <your step-by-step reasoning process>\n",
"action: search OR answer\n",
"reason: <why you chose this action>\n",
"answer: <if action is answer>\n",
"search_query: <specific search query if action is search>\n",
"```\n",
"IMPORTANT: Make sure to:\n",
"1. Use proper indentation (4 spaces) for all multi-line fields\n",
"2. Use the | character for multi-line text fields\n",
"3. Keep single-line fields without the | character\n",
"\"\"\"\n",
" \n",
" # Call the LLM to make a decision\n",
" response = call_llm(prompt)\n",
" \n",
" # Parse the response to get the decision\n",
" yaml_str = response.split(\"```yaml\")[1].split(\"```\")[0].strip()\n",
" decision = yaml.safe_load(yaml_str)\n",
" \n",
" return decision\n",
" \n",
" def post(self, shared, prep_res, exec_res):\n",
" \"\"\"Save the decision and determine the next step in the flow.\"\"\"\n",
" # If LLM decided to search, save the search query\n",
" if exec_res[\"action\"] == \"search\":\n",
" shared[\"search_query\"] = exec_res[\"search_query\"]\n",
" print(f\"🔍 Agent decided to search for: {exec_res['search_query']}\")\n",
" else:\n",
" shared[\"context\"] = exec_res[\"answer\"] #save the context if LLM gives the answer without searching.\n",
" print(f\"💡 Agent decided to answer the question\")\n",
" \n",
" # Return the action to determine the next node in the flow\n",
" return exec_res[\"action\"]\n",
"\n",
"class SearchWeb(Node):\n",
" def prep(self, shared):\n",
" \"\"\"Get the search query from the shared store.\"\"\"\n",
" return shared[\"search_query\"]\n",
" \n",
" def exec(self, search_query):\n",
" \"\"\"Search the web for the given query.\"\"\"\n",
" # Call the search utility function\n",
" print(f\"🌐 Searching the web for: {search_query}\")\n",
" results = search_web(search_query)\n",
" return results\n",
" \n",
" def post(self, shared, prep_res, exec_res):\n",
" \"\"\"Save the search results and go back to the decision node.\"\"\"\n",
" # Add the search results to the context in the shared store\n",
" previous = shared.get(\"context\", \"\")\n",
" shared[\"context\"] = previous + \"\\n\\nSEARCH: \" + shared[\"search_query\"] + \"\\nRESULTS: \" + exec_res\n",
" \n",
" print(f\"📚 Found information, analyzing results...\")\n",
" \n",
" # Always go back to the decision node after searching\n",
" return \"decide\"\n",
"\n",
"class AnswerQuestion(Node):\n",
" def prep(self, shared):\n",
" \"\"\"Get the question and context for answering.\"\"\"\n",
" return shared[\"question\"], shared.get(\"context\", \"\")\n",
" \n",
" def exec(self, inputs):\n",
" \"\"\"Call the LLM to generate a final answer.\"\"\"\n",
" question, context = inputs\n",
" \n",
" print(f\"✍️ Crafting final answer...\")\n",
" \n",
" # Create a prompt for the LLM to answer the question\n",
" prompt = f\"\"\"\n",
"### CONTEXT\n",
"Based on the following information, answer the question.\n",
"Question: {question}\n",
"Research: {context}\n",
"\n",
"## YOUR ANSWER:\n",
"Provide a comprehensive answer using the research results.\n",
"\"\"\"\n",
" # Call the LLM to generate an answer\n",
" answer = call_llm(prompt)\n",
" return answer\n",
" \n",
" def post(self, shared, prep_res, exec_res):\n",
" \"\"\"Save the final answer and complete the flow.\"\"\"\n",
" # Save the answer in the shared store\n",
" shared[\"answer\"] = exec_res\n",
" \n",
" print(f\"✅ Answer generated successfully\")\n",
" \n",
" # We're done - no need to continue the flow\n",
" return \"done\" "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"vscode": {
"languageId": "plaintext"
}
},
"outputs": [],
"source": [
"# flow.py\n",
"from pocketflow import Flow\n",
"\n",
"def create_agent_flow():\n",
" \"\"\"\n",
" Create and connect the nodes to form a complete agent flow.\n",
" \n",
" The flow works like this:\n",
" 1. DecideAction node decides whether to search or answer\n",
" 2. If search, go to SearchWeb node\n",
" 3. If answer, go to AnswerQuestion node\n",
" 4. After SearchWeb completes, go back to DecideAction\n",
" \n",
" Returns:\n",
" Flow: A complete research agent flow\n",
" \"\"\"\n",
" # Create instances of each node\n",
" decide = DecideAction()\n",
" search = SearchWeb()\n",
" answer = AnswerQuestion()\n",
" \n",
" # Connect the nodes\n",
" # If DecideAction returns \"search\", go to SearchWeb\n",
" decide - \"search\" >> search\n",
" \n",
" # If DecideAction returns \"answer\", go to AnswerQuestion\n",
" decide - \"answer\" >> answer\n",
" \n",
" # After SearchWeb completes and returns \"decide\", go back to DecideAction\n",
" search - \"decide\" >> decide\n",
" \n",
" # Create and return the flow, starting with the DecideAction node\n",
" return Flow(start=decide) "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"vscode": {
"languageId": "plaintext"
}
},
"outputs": [],
"source": [
"# main.py\n",
"import sys\n",
"\n",
"def main():\n",
" \"\"\"Simple function to process a question.\"\"\"\n",
" # Default question\n",
" default_question = \"Who won the Nobel Prize in Physics 2024?\"\n",
" \n",
" # Get question from command line if provided with --\n",
" question = default_question\n",
" for arg in sys.argv[1:]:\n",
" if arg.startswith(\"--\"):\n",
" question = arg[2:]\n",
" break\n",
" \n",
" # Create the agent flow\n",
" agent_flow = create_agent_flow()\n",
" \n",
" # Process the question\n",
" shared = {\"question\": question}\n",
" print(f\"🤔 Processing question: {question}\")\n",
" agent_flow.run(shared)\n",
" print(\"\\n🎯 Final Answer:\")\n",
" print(shared.get(\"answer\", \"No answer found\"))\n",
"\n",
"main()\n"
]
}
],
"metadata": {
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 2
} }