From e90ea8a9c0b26b8044f406a96cf2f476a3504f44 Mon Sep 17 00:00:00 2001 From: Azraf <69325302+turboslapper@users.noreply.github.com> Date: Wed, 22 Jan 2025 02:55:53 -0500 Subject: [PATCH 1/5] Replace `initialize_agent` with `create_react_agent` for LangGraph compatibility - Updated code to use `create_react_agent` instead of the deprecated `initialize_agent`. - Removed explicit `AgentType` parameters as they are implicitly supported by `create_react_agent`. - Replaced `verbose=True` with `debug=True` for detailed logging. Partially addresses #29277 --- docs/docs/integrations/callbacks/llmonitor.md | 13 +++++- .../callbacks/sagemaker_tracking.ipynb | 9 ++-- .../providers/comet_tracking.ipynb | 13 +++--- .../integrations/providers/google_serper.mdx | 10 ++-- .../docs/integrations/providers/searchapi.mdx | 10 ++-- docs/docs/integrations/tools/awslambda.ipynb | 9 ++-- docs/docs/integrations/tools/bash.ipynb | 8 ++-- docs/docs/integrations/tools/bearly.ipynb | 12 ++--- .../integrations/tools/chatgpt_plugins.ipynb | 43 +++++++++++++---- docs/docs/integrations/tools/connery.ipynb | 16 ++++--- docs/docs/integrations/tools/gitlab.ipynb | 8 ++-- .../integrations/tools/google_finance.ipynb | 11 +++-- docs/docs/integrations/tools/memorize.ipynb | 16 ++++--- docs/docs/integrations/tools/nasa.ipynb | 8 ++-- .../integrations/tools/openweathermap.ipynb | 9 ++-- docs/docs/integrations/tools/playwright.ipynb | 13 +++--- docs/docs/integrations/tools/searchapi.ipynb | 9 ++-- docs/docs/integrations/tools/zapier.ipynb | 46 ++++++++++--------- 18 files changed, 165 insertions(+), 98 deletions(-) diff --git a/docs/docs/integrations/callbacks/llmonitor.md b/docs/docs/integrations/callbacks/llmonitor.md index ac455c63c0582..1402f0e264a2c 100644 --- a/docs/docs/integrations/callbacks/llmonitor.md +++ b/docs/docs/integrations/callbacks/llmonitor.md @@ -83,16 +83,25 @@ agent_executor.run("how many letters in the word educa?", callbacks=[handler]) Another example: ```python -from langchain.agents import load_tools, initialize_agent, AgentType +from langchain.agents import load_tools from langchain_openai import OpenAI from langchain_community.callbacks.llmonitor_callback import LLMonitorCallbackHandler +from langgraph.prebuilt import create_react_agent handler = LLMonitorCallbackHandler() llm = OpenAI(temperature=0) tools = load_tools(["serpapi", "llm-math"], llm=llm) -agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, metadata={ "agent_name": "GirlfriendAgeFinder" }) # <- recommended, assign a custom name +system_prompt = "You are a helpful assistant named GirlfriendAgeFinder." + +agent = create_react_agent( + model=llm, + tools=tools, + # Add a system prompt (or other advanced instructions). + state_modifier=system_prompt, + debug=False, +) agent.run( "Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?", diff --git a/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb b/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb index adaa7d1571750..04b13faf257b0 100644 --- a/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb +++ b/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb @@ -89,7 +89,8 @@ }, "outputs": [], "source": [ - "from langchain.agents import initialize_agent, load_tools\n", + "from langgraph.prebuilt import create_react_agent\n", + "from langchain.agents import load_tools\n", "from langchain.chains import LLMChain, SimpleSequentialChain\n", "from langchain_core.prompts import PromptTemplate\n", "from langchain_openai import OpenAI\n", @@ -294,8 +295,10 @@ " tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm, callbacks=[sagemaker_callback])\n", "\n", " # Initialize agent with all the tools\n", - " agent = initialize_agent(\n", - " tools, llm, agent=\"zero-shot-react-description\", callbacks=[sagemaker_callback]\n", + " agent = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True,\n", " )\n", "\n", " # Run agent\n", diff --git a/docs/docs/integrations/providers/comet_tracking.ipynb b/docs/docs/integrations/providers/comet_tracking.ipynb index e8752aed96c1f..2c794a93a940d 100644 --- a/docs/docs/integrations/providers/comet_tracking.ipynb +++ b/docs/docs/integrations/providers/comet_tracking.ipynb @@ -192,7 +192,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import initialize_agent, load_tools\n", + "from langgraph.prebuilt import create_react_agent\n", + "from langchain.agents import load_tools\n", "from langchain_community.callbacks import CometCallbackHandler\n", "from langchain_core.callbacks import StdOutCallbackHandler\n", "from langchain_openai import OpenAI\n", @@ -207,12 +208,10 @@ "llm = OpenAI(temperature=0.9, callbacks=callbacks)\n", "\n", "tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm, callbacks=callbacks)\n", - "agent = initialize_agent(\n", - " tools,\n", - " llm,\n", - " agent=\"zero-shot-react-description\",\n", - " callbacks=callbacks,\n", - " verbose=True,\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True,\n", ")\n", "agent.run(\n", " \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n", diff --git a/docs/docs/integrations/providers/google_serper.mdx b/docs/docs/integrations/providers/google_serper.mdx index 0401e66b53581..db154a0f2e8aa 100644 --- a/docs/docs/integrations/providers/google_serper.mdx +++ b/docs/docs/integrations/providers/google_serper.mdx @@ -23,8 +23,8 @@ You can use it as part of a Self Ask chain: ```python from langchain_community.utilities import GoogleSerperAPIWrapper from langchain_openai import OpenAI -from langchain.agents import initialize_agent, Tool -from langchain.agents import AgentType +from langchain.agents import Tool +from langgraph.prebuilt import create_react_agent import os @@ -41,7 +41,11 @@ tools = [ ) ] -self_ask_with_search = initialize_agent(tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True) +self_ask_with_search = create_react_agent( + model=llm, + tools=tools, + debug=True, +) self_ask_with_search.run("What is the hometown of the reigning men's U.S. Open champion?") ``` diff --git a/docs/docs/integrations/providers/searchapi.mdx b/docs/docs/integrations/providers/searchapi.mdx index 1dfaded161009..18628370f9800 100644 --- a/docs/docs/integrations/providers/searchapi.mdx +++ b/docs/docs/integrations/providers/searchapi.mdx @@ -22,8 +22,8 @@ You can use it as part of a Self Ask chain: ```python from langchain_community.utilities import SearchApiAPIWrapper from langchain_openai import OpenAI -from langchain.agents import initialize_agent, Tool -from langchain.agents import AgentType +from langchain.agents import Tool +from langgraph.prebuilt import create_react_agent import os @@ -40,7 +40,11 @@ tools = [ ) ] -self_ask_with_search = initialize_agent(tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True) +self_ask_with_search = create_react_agent( + model=llm, + tools=tools, + debug=True, +) self_ask_with_search.run("Who lived longer: Plato, Socrates, or Aristotle?") ``` diff --git a/docs/docs/integrations/tools/awslambda.ipynb b/docs/docs/integrations/tools/awslambda.ipynb index 08a7ed4e6928f..ccbf5962efbff 100644 --- a/docs/docs/integrations/tools/awslambda.ipynb +++ b/docs/docs/integrations/tools/awslambda.ipynb @@ -62,7 +62,8 @@ }, "outputs": [], "source": [ - "from langchain.agents import AgentType, initialize_agent, load_tools\n", + "from langgraph.prebuilt import create_react_agent\n", + "from langchain.agents import load_tools\n", "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", @@ -74,8 +75,10 @@ " function_name=\"testFunction1\",\n", ")\n", "\n", - "agent = initialize_agent(\n", - " tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True\n", ")\n", "\n", "agent.run(\"Send an email to test@testing123.com saying hello world.\")" diff --git a/docs/docs/integrations/tools/bash.ipynb b/docs/docs/integrations/tools/bash.ipynb index b01f070926f4e..efe06a3f33818 100644 --- a/docs/docs/integrations/tools/bash.ipynb +++ b/docs/docs/integrations/tools/bash.ipynb @@ -154,7 +154,7 @@ } ], "source": [ - "from langchain.agents import AgentType, initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0)\n", @@ -162,8 +162,10 @@ "shell_tool.description = shell_tool.description + f\"args {shell_tool.args}\".replace(\n", " \"{\", \"{{\"\n", ").replace(\"}\", \"}}\")\n", - "self_ask_with_search = initialize_agent(\n", - " [shell_tool], llm, agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n", + "self_ask_with_search = create_react_agent(\n", + " model=llm,\n", + " tools=[shell_tool],\n", + " debug=True\n", ")\n", "self_ask_with_search.run(\n", " \"Download the langchain.com webpage and grep for all urls. Return only a sorted list of them. Be sure to use double quotes.\"\n", diff --git a/docs/docs/integrations/tools/bearly.ipynb b/docs/docs/integrations/tools/bearly.ipynb index 72b6b15e5187c..91e236adff289 100644 --- a/docs/docs/integrations/tools/bearly.ipynb +++ b/docs/docs/integrations/tools/bearly.ipynb @@ -47,7 +47,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import AgentType, initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain_openai import ChatOpenAI" ] }, @@ -175,12 +175,10 @@ "outputs": [], "source": [ "llm = ChatOpenAI(model=\"gpt-4\", temperature=0)\n", - "agent = initialize_agent(\n", - " tools,\n", - " llm,\n", - " agent=AgentType.OPENAI_FUNCTIONS,\n", - " verbose=True,\n", - " handle_parsing_errors=True,\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True,\n", ")" ] }, diff --git a/docs/docs/integrations/tools/chatgpt_plugins.ipynb b/docs/docs/integrations/tools/chatgpt_plugins.ipynb index 809a13869e261..1e0446b0e9e9d 100644 --- a/docs/docs/integrations/tools/chatgpt_plugins.ipynb +++ b/docs/docs/integrations/tools/chatgpt_plugins.ipynb @@ -32,17 +32,25 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "70d493c8", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], "source": [ "%pip install --upgrade --quiet langchain-community" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "66cc9494-c060-4bc2-92bf-6d88a45690da", "metadata": {}, "outputs": [], @@ -52,12 +60,25 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, "id": "d41405b5", "metadata": {}, - "outputs": [], + "outputs": [ + { + "ename": "ModuleNotFoundError", + "evalue": "No module named 'langgraph'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[2], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mlanggraph\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mprebuilt\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m create_react_agent\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mlangchain\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01magents\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m load_tools\n\u001b[1;32m 3\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mlangchain_openai\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m ChatOpenAI\n", + "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'langgraph'" + ] + } + ], "source": [ - "from langchain.agents import AgentType, initialize_agent, load_tools\n", + "from langgraph.prebuilt import create_react_agent\n", + "from langchain.agents import load_tools\n", "from langchain_openai import ChatOpenAI" ] }, @@ -116,8 +137,10 @@ "tools = load_tools([\"requests_all\"])\n", "tools += [tool]\n", "\n", - "agent_chain = initialize_agent(\n", - " tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n", + "agent_chain = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True\n", ")\n", "agent_chain.run(\"what t shirts are available in klarna?\")" ] @@ -133,7 +156,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python 3", "language": "python", "name": "python3" }, @@ -147,7 +170,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.12" + "version": "3.13.1" } }, "nbformat": 4, diff --git a/docs/docs/integrations/tools/connery.ipynb b/docs/docs/integrations/tools/connery.ipynb index 5070996bba999..7109f0045c6a7 100644 --- a/docs/docs/integrations/tools/connery.ipynb +++ b/docs/docs/integrations/tools/connery.ipynb @@ -109,7 +109,7 @@ "source": [ "import os\n", "\n", - "from langchain.agents import AgentType, initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.agent_toolkits.connery import ConneryToolkit\n", "from langchain_community.tools.connery import ConneryService\n", "from langchain_openai import ChatOpenAI\n", @@ -130,8 +130,10 @@ "\n", "# Use OpenAI Functions agent to execute the prompt using actions from the Connery Toolkit.\n", "llm = ChatOpenAI(temperature=0)\n", - "agent = initialize_agent(\n", - " connery_toolkit.get_tools(), llm, AgentType.OPENAI_FUNCTIONS, verbose=True\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=connery_toolkit.get_tools(),\n", + " debug=True\n", ")\n", "result = agent.run(\n", " f\"\"\"Make a short summary of the webpage http://www.paulgraham.com/vb.html in three sentences\n", @@ -157,7 +159,7 @@ "source": [ "import os\n", "\n", - "from langchain.agents import AgentType, initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.tools.connery import ConneryService\n", "from langchain_openai import ChatOpenAI\n", "\n", @@ -233,8 +235,10 @@ ], "source": [ "llm = ChatOpenAI(temperature=0)\n", - "agent = initialize_agent(\n", - " [send_email_action], llm, AgentType.OPENAI_FUNCTIONS, verbose=True\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=[send_email_action],\n", + " debug=True # Equivalent to verbose=True\n", ")\n", "agent_run_result = agent.run(\n", " f\"Send an email to the {recepient_email} and say that I will be late for the meeting.\"\n", diff --git a/docs/docs/integrations/tools/gitlab.ipynb b/docs/docs/integrations/tools/gitlab.ipynb index 622622d9add22..d51c2aed3cf6b 100644 --- a/docs/docs/integrations/tools/gitlab.ipynb +++ b/docs/docs/integrations/tools/gitlab.ipynb @@ -100,7 +100,7 @@ "source": [ "import os\n", "\n", - "from langchain.agents import AgentType, initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.agent_toolkits.gitlab.toolkit import GitLabToolkit\n", "from langchain_community.utilities.gitlab import GitLabAPIWrapper\n", "from langchain_openai import OpenAI" @@ -132,8 +132,10 @@ "llm = OpenAI(temperature=0)\n", "gitlab = GitLabAPIWrapper()\n", "toolkit = GitLabToolkit.from_gitlab_api_wrapper(gitlab)\n", - "agent = initialize_agent(\n", - " toolkit.get_tools(), llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=toolkit.get_tools(),\n", + " debug=True\n", ")" ] }, diff --git a/docs/docs/integrations/tools/google_finance.ipynb b/docs/docs/integrations/tools/google_finance.ipynb index 315b4dae78f3b..d9a384511e96f 100644 --- a/docs/docs/integrations/tools/google_finance.ipynb +++ b/docs/docs/integrations/tools/google_finance.ipynb @@ -74,15 +74,18 @@ "source": [ "import os\n", "\n", - "from langchain.agents import AgentType, initialize_agent, load_tools\n", + "from langgraph.prebuilt import create_react_agent\n", + "from langchain.agents import load_tools\n", "from langchain_openai import OpenAI\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = \"\"\n", "os.environ[\"SERP_API_KEY\"] = \"\"\n", "llm = OpenAI()\n", "tools = load_tools([\"google-scholar\", \"google-finance\"], llm=llm)\n", - "agent = initialize_agent(\n", - " tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True\n", ")\n", "agent.run(\"what is google's stock\")" ] @@ -104,7 +107,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.5" + "version": "3.13.1" } }, "nbformat": 4, diff --git a/docs/docs/integrations/tools/memorize.ipynb b/docs/docs/integrations/tools/memorize.ipynb index db7ec349854d6..e7abdedde2e29 100644 --- a/docs/docs/integrations/tools/memorize.ipynb +++ b/docs/docs/integrations/tools/memorize.ipynb @@ -26,7 +26,8 @@ "source": [ "import os\n", "\n", - "from langchain.agents import AgentExecutor, AgentType, initialize_agent, load_tools\n", + "from langgraph.prebuilt import create_react_agent\n", + "from langchain.agents import load_tools\n", "from langchain.chains import LLMChain\n", "from langchain.memory import ConversationBufferMemory\n", "from langchain_community.llms import GradientLLM" @@ -118,12 +119,13 @@ "metadata": {}, "outputs": [], "source": [ - "agent = initialize_agent(\n", - " tools,\n", - " llm,\n", - " agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n", - " verbose=True,\n", - " # memory=ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True),\n", + "memory = ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True)\n", + "\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True,\n", + " state_modifier=memory\n", ")" ] }, diff --git a/docs/docs/integrations/tools/nasa.ipynb b/docs/docs/integrations/tools/nasa.ipynb index 9aa420d8d17c0..d20a9213c3d2c 100644 --- a/docs/docs/integrations/tools/nasa.ipynb +++ b/docs/docs/integrations/tools/nasa.ipynb @@ -39,16 +39,18 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import AgentType, initialize_agent\n", "from langchain_community.agent_toolkits.nasa.toolkit import NasaToolkit\n", "from langchain_community.utilities.nasa import NasaAPIWrapper\n", "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "llm = OpenAI(temperature=0, openai_api_key=\"\")\n", "nasa = NasaAPIWrapper()\n", "toolkit = NasaToolkit.from_nasa_api_wrapper(nasa)\n", - "agent = initialize_agent(\n", - " toolkit.get_tools(), llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=toolkit.get_tools(),\n", + " debug=True,\n", ")" ] }, diff --git a/docs/docs/integrations/tools/openweathermap.ipynb b/docs/docs/integrations/tools/openweathermap.ipynb index 0535fe6e0c719..0f7f6d8f83bec 100644 --- a/docs/docs/integrations/tools/openweathermap.ipynb +++ b/docs/docs/integrations/tools/openweathermap.ipynb @@ -83,7 +83,8 @@ "source": [ "import os\n", "\n", - "from langchain.agents import AgentType, initialize_agent, load_tools\n", + "from langchain.agents import load_tools\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain_openai import OpenAI\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = \"\"\n", @@ -93,8 +94,10 @@ "\n", "tools = load_tools([\"openweathermap-api\"], llm)\n", "\n", - "agent_chain = initialize_agent(\n", - " tools=tools, llm=llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n", + "agent_chain = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True\n", ")" ] }, diff --git a/docs/docs/integrations/tools/playwright.ipynb b/docs/docs/integrations/tools/playwright.ipynb index e09f4e79cd24d..f019754a17f91 100644 --- a/docs/docs/integrations/tools/playwright.ipynb +++ b/docs/docs/integrations/tools/playwright.ipynb @@ -246,18 +246,17 @@ } ], "source": [ - "from langchain.agents import AgentType, initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain_anthropic import ChatAnthropic\n", "\n", "llm = ChatAnthropic(\n", " model_name=\"claude-3-haiku-20240307\", temperature=0\n", ") # or any other LLM, e.g., ChatOpenAI(), OpenAI()\n", "\n", - "agent_chain = initialize_agent(\n", - " tools,\n", - " llm,\n", - " agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,\n", - " verbose=True,\n", + "agent_chain = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True\n", ")" ] }, @@ -298,7 +297,7 @@ "```\n", "\n", "\u001b[0m\n", - "Observation: \u001b[31;1m\u001b[1;3mLangChain We value your privacy We use cookies to analyze our traffic. By clicking \"Accept All\", you consent to our use of cookies. Privacy Policy Customize Reject All Accept All Customize Consent Preferences We may use cookies to help you navigate efficiently and perform certain functions. You will find detailed information about all cookies under each consent category below. The cookies that are categorized as \"Necessary\" are stored on your browser as they are essential for enabling the basic functionalities of the site.... Show more Necessary Always Active Necessary cookies are required to enable the basic features of this site, such as providing secure log-in or adjusting your consent preferences. These cookies do not store any personally identifiable data. Functional Functional cookies help perform certain functionalities like sharing the content of the website on social media platforms, collecting feedback, and other third-party features. Analytics Analytical cookies are used to understand how visitors interact with the website. These cookies help provide information on metrics such as the number of visitors, bounce rate, traffic source, etc. Performance Performance cookies are used to understand and analyze the key performance indexes of the website which helps in delivering a better user experience for the visitors. Advertisement Advertisement cookies are used to provide visitors with customized advertisements based on the pages you visited previously and to analyze the effectiveness of the ad campaigns. Uncategorized Other uncategorized cookies are those that are being analyzed and have not been classified into a category as yet. Reject All Save My Preferences Accept All Products LangChain LangSmith LangGraph Methods Retrieval Agents Evaluation Resources Blog Case Studies Use Case Inspiration Experts Changelog Docs LangChain Docs LangSmith Docs Company About Careers Pricing Get a demo Sign up LangChain’s suite of products supports developers along each step of the LLM application lifecycle. Applications that can reason. Powered by LangChain. Get a demo Sign up for free From startups to global enterprises, ambitious builders choose LangChain products. Build LangChain is a framework to build with LLMs by chaining interoperable components. LangGraph is the framework for building controllable agentic workflows. Run Deploy your LLM applications at scale with LangGraph Cloud, our infrastructure purpose-built for agents. Manage Debug, collaborate, test, and monitor your LLM app in LangSmith - whether it's built with a LangChain framework or not. Build your app with LangChain Build context-aware, reasoning applications with LangChain’s flexible framework that leverages your company’s data and APIs. Future-proof your application by making vendor optionality part of your LLM infrastructure design. Learn more about LangChain Run at scale with LangGraph Cloud Deploy your LangGraph app with LangGraph Cloud for fault-tolerant scalability - including support for async background jobs, built-in persistence, and distributed task queues. Learn more about LangGraph Manage LLM performance with LangSmith Ship faster with LangSmith’s debug, test, deploy, and monitoring workflows. Don’t rely on “vibes” – add engineering rigor to your LLM-development workflow, whether you’re building with LangChain or not. Learn more about LangSmith Hear from our happy customers LangChain, LangGraph, and LangSmith help teams of all sizes, across all industries - from ambitious startups to established enterprises. “LangSmith helped us improve the accuracy and performance of Retool’s fine-tuned models. Not only did we deliver a better product by iterating with LangSmith, but we’re shipping new AI features to our users in a fraction of the time it would have taken without it.” Jamie Cuffe Head of Self-Serve and New Products “By combining the benefits of LangSmith and standing on the shoulders of a gigantic open-source community, we’re able to identify the right approaches of using LLMs in an enterprise-setting faster.” Yusuke Kaji General Manager of AI “Working with LangChain and LangSmith on the Elastic AI Assistant had a significant positive impact on the overall pace and quality of the development and shipping experience. We couldn’t have achieved  the product experience delivered to our customers without LangChain, and we couldn’t have done it at the same pace without LangSmith.” James Spiteri Director of Security Products “As soon as we heard about LangSmith, we moved our entire development stack onto it. We could have built evaluation, testing and monitoring tools in house, but with LangSmith it took us 10x less time to get a 1000x better tool.” Jose Peña Senior Manager The reference architecture enterprises adopt for success. LangChain’s suite of products can be used independently or stacked together for multiplicative impact – guiding you through building, running, and managing your LLM apps. 15M+ Monthly Downloads 100K+ Apps Powered 75K+ GitHub Stars 3K+ Contributors The biggest developer community in GenAI Learn alongside the 1M+ developers who are pushing the industry forward. Explore LangChain Get started with the LangSmith platform today Get a demo Sign up for free Teams building with LangChain are driving operational efficiency, increasing discovery & personalization, and delivering premium products that generate revenue. Discover Use Cases Get inspired by companies who have done it. Financial Services FinTech Technology LangSmith is the enterprise DevOps platform built for LLMs. Explore LangSmith Gain visibility to make trade offs between cost, latency, and quality. Increase developer productivity. Eliminate manual, error-prone testing. Reduce hallucinations and improve reliability. Enterprise deployment options to keep data secure. Ready to start shipping 
", + "Observation: \u001b[31;1m\u001b[1;3mLangChain We value your privacy We use cookies to analyze our traffic. By clicking \"Accept All\", you consent to our use of cookies. Privacy Policy Customize Reject All Accept All Customize Consent Preferences We may use cookies to help you navigate efficiently and perform certain functions. You will find detailed information about all cookies under each consent category below. The cookies that are categorized as \"Necessary\" are stored on your browser as they are essential for enabling the basic functionalities of the site.... Show more Necessary Always Active Necessary cookies are required to enable the basic features of this site, such as providing secure log-in or adjusting your consent preferences. These cookies do not store any personally identifiable data. Functional Functional cookies help perform certain functionalities like sharing the content of the website on social media platforms, collecting feedback, and other third-party features. Analytics Analytical cookies are used to understand how visitors interact with the website. These cookies help provide information on metrics such as the number of visitors, bounce rate, traffic source, etc. Performance Performance cookies are used to understand and analyze the key performance indexes of the website which helps in delivering a better user experience for the visitors. Advertisement Advertisement cookies are used to provide visitors with customized advertisements based on the pages you visited previously and to analyze the effectiveness of the ad campaigns. Uncategorized Other uncategorized cookies are those that are being analyzed and have not been classified into a category as yet. Reject All Save My Preferences Accept All Products LangChain LangSmith LangGraph Methods Retrieval Agents Evaluation Resources Blog Case Studies Use Case Inspiration Experts Changelog Docs LangChain Docs LangSmith Docs Company About Careers Pricing Get a demo Sign up LangChain’s suite of products supports developers along each step of the LLM application lifecycle. Applications that can reason. Powered by LangChain. Get a demo Sign up for free From startups to global enterprises, ambitious builders choose LangChain products. Build LangChain is a framework to build with LLMs by chaining interoperable components. LangGraph is the framework for building controllable agentic workflows. Run Deploy your LLM applications at scale with LangGraph Cloud, our infrastructure purpose-built for agents. Manage Debug, collaborate, test, and monitor your LLM app in LangSmith - whether it's built with a LangChain framework or not. Build your app with LangChain Build context-aware, reasoning applications with LangChain’s flexible framework that leverages your company’s data and APIs. Future-proof your application by making vendor optionality part of your LLM infrastructure design. Learn more about LangChain Run at scale with LangGraph Cloud Deploy your LangGraph app with LangGraph Cloud for fault-tolerant scalability - including support for async background jobs, built-in persistence, and distributed task queues. Learn more about LangGraph Manage LLM performance with LangSmith Ship faster with LangSmith’s debug, test, deploy, and monitoring workflows. Don’t rely on “vibes” – add engineering rigor to your LLM-development workflow, whether you’re building with LangChain or not. Learn more about LangSmith Hear from our happy customers LangChain, LangGraph, and LangSmith help teams of all sizes, across all industries - from ambitious startups to established enterprises. “LangSmith helped us improve the accuracy and performance of Retool’s fine-tuned models. Not only did we deliver a better product by iterating with LangSmith, but we’re shipping new AI features to our users in a fraction of the time it would have taken without it.” Jamie Cuffe Head of Self-Serve and New Products “By combining the benefits of LangSmith and standing on the shoulders of a gigantic open-source community, we’re able to identify the right approaches of using LLMs in an enterprise-setting faster.” Yusuke Kaji General Manager of AI “Working with LangChain and LangSmith on the Elastic AI Assistant had a significant positive impact on the overall pace and quality of the development and shipping experience. We couldn’t have achieved  the product experience delivered to our customers without LangChain, and we couldn’t have done it at the same pace without LangSmith.” James Spiteri Director of Security Products “As soon as we heard about LangSmith, we moved our entire development stack onto it. We could have built evaluation, testing and monitoring tools in house, but with LangSmith it took us 10x less time to get a 1000x better tool.” Jose Peña Senior Manager The reference architecture enterprises adopt for success. LangChain’s suite of products can be used independently or stacked together for multiplicative impact – guiding you through building, running, and managing your LLM apps. 15M+ Monthly Downloads 100K+ Apps Powered 75K+ GitHub Stars 3K+ Contributors The biggest developer community in GenAI Learn alongside the 1M+ developers who are pushing the industry forward. Explore LangChain Get started with the LangSmith platform today Get a demo Sign up for free Teams building with LangChain are driving operational efficiency, increasing discovery & personalization, and delivering premium products that generate revenue. Discover Use Cases Get inspired by companies who have done it. Financial Services FinTech Technology LangSmith is the enterprise DevOps platform built for LLMs. Explore LangSmith Gain visibility to make trade offs between cost, latency, and quality. Increase developer productivity. Eliminate manual, error-prone testing. Reduce hallucinations and improve reliability. Enterprise deployment options to keep data secure. Ready to start shipping 
\n", "reliable GenAI apps faster? Get started with LangChain, LangGraph, and LangSmith to enhance your LLM app development, from prototype to production. Get a demo Sign up for free Products LangChain LangSmith LangGraph Agents Evaluation Retrieval Resources Python Docs JS/TS Docs GitHub Integrations Templates Changelog LangSmith Trust Portal Company About Blog Twitter LinkedIn YouTube Community Marketing Assets Sign up for our newsletter to stay up to date Thank you! Your submission has been received! Oops! Something went wrong while submitting the form. All systems operational Privacy Policy Terms of Service\u001b[0m\n", "Thought:\u001b[32;1m\u001b[1;3mBased on the text extracted from the langchain.com website, the main headers I can see are:\n", "\n", diff --git a/docs/docs/integrations/tools/searchapi.ipynb b/docs/docs/integrations/tools/searchapi.ipynb index c086b4d18d1c0..037ecf68edc25 100644 --- a/docs/docs/integrations/tools/searchapi.ipynb +++ b/docs/docs/integrations/tools/searchapi.ipynb @@ -124,7 +124,7 @@ } ], "source": [ - "from langchain.agents import AgentType, initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.utilities import SearchApiAPIWrapper\n", "from langchain_core.tools import Tool\n", "from langchain_openai import OpenAI\n", @@ -139,9 +139,12 @@ " )\n", "]\n", "\n", - "self_ask_with_search = initialize_agent(\n", - " tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True\n", + "self_ask_with_search = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True\n", ")\n", + "\n", "self_ask_with_search.run(\"Who lived longer: Plato, Socrates, or Aristotle?\")" ] }, diff --git a/docs/docs/integrations/tools/zapier.ipynb b/docs/docs/integrations/tools/zapier.ipynb index 3c73d1f15ac11..bc6b2665b740e 100644 --- a/docs/docs/integrations/tools/zapier.ipynb +++ b/docs/docs/integrations/tools/zapier.ipynb @@ -60,7 +60,7 @@ }, "outputs": [], "source": [ - "from langchain.agents import AgentType, initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.agent_toolkits import ZapierToolkit\n", "from langchain_community.utilities.zapier import ZapierNLAWrapper\n", "from langchain_openai import OpenAI" @@ -91,8 +91,10 @@ "llm = OpenAI(temperature=0)\n", "zapier = ZapierNLAWrapper()\n", "toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier)\n", - "agent = initialize_agent(\n", - " toolkit.get_tools(), llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=toolkit.get_tools(),\n", + " debug=True\n", ")" ] }, @@ -110,19 +112,19 @@ "text": [ "\n", "\n", - "\u001B[1m> Entering new AgentExecutor chain...\u001B[0m\n", - "\u001B[32;1m\u001B[1;3m I need to find the email and summarize it.\n", + "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", + "\u001b[32;1m\u001b[1;3m I need to find the email and summarize it.\n", "Action: Gmail: Find Email\n", - "Action Input: Find the latest email from Silicon Valley Bank\u001B[0m\n", - "Observation: \u001B[31;1m\u001B[1;3m{\"from__name\": \"Silicon Valley Bridge Bank, N.A.\", \"from__email\": \"sreply@svb.com\", \"body_plain\": \"Dear Clients, After chaotic, tumultuous & stressful days, we have clarity on path for SVB, FDIC is fully insuring all deposits & have an ask for clients & partners as we rebuild. Tim Mayopoulos Finished chain.\u001B[0m\n" + "\u001b[1m> Finished chain.\u001b[0m\n" ] }, { @@ -286,18 +288,18 @@ "text": [ "\n", "\n", - "\u001B[1m> Entering new SimpleSequentialChain chain...\u001B[0m\n", - "\u001B[36;1m\u001B[1;3m{\"from__name\": \"Silicon Valley Bridge Bank, N.A.\", \"from__email\": \"sreply@svb.com\", \"body_plain\": \"Dear Clients, After chaotic, tumultuous & stressful days, we have clarity on path for SVB, FDIC is fully insuring all deposits & have an ask for clients & partners as we rebuild. Tim Mayopoulos Entering new SimpleSequentialChain chain...\u001b[0m\n", + "\u001b[36;1m\u001b[1;3m{\"from__name\": \"Silicon Valley Bridge Bank, N.A.\", \"from__email\": \"sreply@svb.com\", \"body_plain\": \"Dear Clients, After chaotic, tumultuous & stressful days, we have clarity on path for SVB, FDIC is fully insuring all deposits & have an ask for clients & partners as we rebuild. Tim Mayopoulos Finished chain.\u001B[0m\n" + "\u001b[1m> Finished chain.\u001b[0m\n" ] }, { @@ -341,8 +343,10 @@ "llm = OpenAI(temperature=0)\n", "zapier = ZapierNLAWrapper(zapier_nla_oauth_access_token=\"\")\n", "toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier)\n", - "agent = initialize_agent(\n", - " toolkit.get_tools(), llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=toolkit.get_tools(),\n", + " debug=True\n", ")\n", "\n", "agent.run(\n", From 5815b4849de1d9db2c3ec103545c361c75b3cc5b Mon Sep 17 00:00:00 2001 From: Azraf Nahian <69325302+turboslapper@users.noreply.github.com> Date: Thu, 23 Jan 2025 00:06:02 -0500 Subject: [PATCH 2/5] Added more updates --- docs/docs/integrations/callbacks/argilla.ipynb | 11 +++++------ docs/docs/integrations/memory/zep_memory.ipynb | 13 ++++++------- .../docs/integrations/memory/zep_memory_cloud.ipynb | 13 ++++++------- docs/docs/integrations/providers/flyte.mdx | 13 ++++++------- .../integrations/tools/dalle_image_generator.ipynb | 9 +++++++-- docs/docs/integrations/tools/eleven_labs_tts.ipynb | 10 +++++----- docs/docs/integrations/tools/google_jobs.ipynb | 9 ++++++--- docs/docs/integrations/tools/gradio_tools.ipynb | 9 ++++++--- docs/docs/integrations/tools/human_tools.ipynb | 12 ++++++------ docs/docs/integrations/tools/sceneXplain.ipynb | 9 ++++++--- 10 files changed, 59 insertions(+), 49 deletions(-) diff --git a/docs/docs/integrations/callbacks/argilla.ipynb b/docs/docs/integrations/callbacks/argilla.ipynb index 03df4421060cb..645cd60b64719 100644 --- a/docs/docs/integrations/callbacks/argilla.ipynb +++ b/docs/docs/integrations/callbacks/argilla.ipynb @@ -360,7 +360,8 @@ } ], "source": [ - "from langchain.agents import AgentType, initialize_agent, load_tools\n", + "from langgraph.prebuilt import create_react_agent\n", + "from langchain.agents import load_tools\n", "from langchain_core.callbacks.stdout import StdOutCallbackHandler\n", "from langchain_openai import OpenAI\n", "\n", @@ -373,11 +374,9 @@ "llm = OpenAI(temperature=0.9, callbacks=callbacks)\n", "\n", "tools = load_tools([\"serpapi\"], llm=llm, callbacks=callbacks)\n", - "agent = initialize_agent(\n", - " tools,\n", - " llm,\n", - " agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n", - " callbacks=callbacks,\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", ")\n", "agent.run(\"Who was the first president of the United States of America?\")" ] diff --git a/docs/docs/integrations/memory/zep_memory.ipynb b/docs/docs/integrations/memory/zep_memory.ipynb index cf722cab4223e..c635fcb8f8483 100644 --- a/docs/docs/integrations/memory/zep_memory.ipynb +++ b/docs/docs/integrations/memory/zep_memory.ipynb @@ -47,7 +47,7 @@ "source": [ "from uuid import uuid4\n", "\n", - "from langchain.agents import AgentType, initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.memory.zep_memory import ZepMemory\n", "from langchain_community.retrievers import ZepRetriever\n", "from langchain_community.utilities import WikipediaAPIWrapper\n", @@ -134,12 +134,11 @@ "\n", "# Initialize the agent\n", "llm = OpenAI(temperature=0, openai_api_key=openai_key)\n", - "agent_chain = initialize_agent(\n", - " tools,\n", - " llm,\n", - " agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,\n", - " verbose=True,\n", - " memory=memory,\n", + "agent_chain = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " state_modifier=memory,\n", + " debug=True,\n", ")" ] }, diff --git a/docs/docs/integrations/memory/zep_memory_cloud.ipynb b/docs/docs/integrations/memory/zep_memory_cloud.ipynb index 0dca18045fc39..f28d9ec395070 100644 --- a/docs/docs/integrations/memory/zep_memory_cloud.ipynb +++ b/docs/docs/integrations/memory/zep_memory_cloud.ipynb @@ -69,7 +69,7 @@ "source": [ "from uuid import uuid4\n", "\n", - "from langchain.agents import AgentType, initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.memory.zep_cloud_memory import ZepCloudMemory\n", "from langchain_community.retrievers import ZepCloudRetriever\n", "from langchain_community.utilities import WikipediaAPIWrapper\n", @@ -138,12 +138,11 @@ "\n", "# Initialize the agent\n", "llm = OpenAI(temperature=0, openai_api_key=openai_key)\n", - "agent_chain = initialize_agent(\n", - " tools,\n", - " llm,\n", - " agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,\n", - " verbose=True,\n", - " memory=memory,\n", + "agent_chain = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " state_modifier=memory,\n", + " debug=True,\n", ")" ] }, diff --git a/docs/docs/integrations/providers/flyte.mdx b/docs/docs/integrations/providers/flyte.mdx index 5fe20d896517c..265fdcdce7e9c 100644 --- a/docs/docs/integrations/providers/flyte.mdx +++ b/docs/docs/integrations/providers/flyte.mdx @@ -25,7 +25,8 @@ First, import the necessary dependencies to support your LangChain experiments. import os from flytekit import ImageSpec, task -from langchain.agents import AgentType, initialize_agent, load_tools +from langgraph.prebuilt import create_react_agent +from langchain.agents import load_tools from langchain.callbacks import FlyteCallbackHandler from langchain.chains import LLMChain from langchain_openai import ChatOpenAI @@ -124,12 +125,10 @@ def langchain_agent() -> str: tools = load_tools( ["serpapi", "llm-math"], llm=llm, callbacks=[FlyteCallbackHandler()] ) - agent = initialize_agent( - tools, - llm, - agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, - callbacks=[FlyteCallbackHandler()], - verbose=True, + agent = create_react_agent( + model=llm, + tools=tools, + debug=True, ) return agent.run( "Who is Leonardo DiCaprio's girlfriend? Could you calculate her current age and raise it to the power of 0.43?" diff --git a/docs/docs/integrations/tools/dalle_image_generator.ipynb b/docs/docs/integrations/tools/dalle_image_generator.ipynb index 1386304cc6418..1744f0ffed933 100644 --- a/docs/docs/integrations/tools/dalle_image_generator.ipynb +++ b/docs/docs/integrations/tools/dalle_image_generator.ipynb @@ -158,10 +158,15 @@ } ], "source": [ - "from langchain.agents import initialize_agent, load_tools\n", + "from langgraph.prebuilt import create_react_agent\n", + "from langchain.agents import load_tools\n", "\n", "tools = load_tools([\"dalle-image-generator\"])\n", - "agent = initialize_agent(tools, llm, agent=\"zero-shot-react-description\", verbose=True)\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True\n", + ")\n", "output = agent.run(\"Create an image of a halloween night at a haunted museum\")" ] } diff --git a/docs/docs/integrations/tools/eleven_labs_tts.ipynb b/docs/docs/integrations/tools/eleven_labs_tts.ipynb index ff70757d2588f..a8fddf8078f79 100644 --- a/docs/docs/integrations/tools/eleven_labs_tts.ipynb +++ b/docs/docs/integrations/tools/eleven_labs_tts.ipynb @@ -126,7 +126,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import AgentType, initialize_agent, load_tools\n", + "from langgraph.prebuilt import create_react_agent\n", + "from langchain.agents import load_tools\n", "from langchain_openai import OpenAI" ] }, @@ -139,11 +140,10 @@ "source": [ "llm = OpenAI(temperature=0)\n", "tools = load_tools([\"eleven_labs_text2speech\"])\n", - "agent = initialize_agent(\n", + "agent = create_react_agent(\n", + " model=llm,\n", " tools=tools,\n", - " llm=llm,\n", - " agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,\n", - " verbose=True,\n", + " debug=True\n", ")" ] }, diff --git a/docs/docs/integrations/tools/google_jobs.ipynb b/docs/docs/integrations/tools/google_jobs.ipynb index 855fb4ec3f25f..d197ea363fc09 100644 --- a/docs/docs/integrations/tools/google_jobs.ipynb +++ b/docs/docs/integrations/tools/google_jobs.ipynb @@ -193,14 +193,17 @@ "source": [ "import os\n", "\n", - "from langchain.agents import AgentType, initialize_agent, load_tools\n", + "from langgraph.prebuilt import create_react_agent\n", + "from langchain.agents import load_tools\n", "from langchain_openai import OpenAI\n", "\n", "OpenAI.api_key = os.environ[\"OPENAI_API_KEY\"]\n", "llm = OpenAI()\n", "tools = load_tools([\"google-jobs\"], llm=llm)\n", - "agent = initialize_agent(\n", - " tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True\n", ")\n", "agent.run(\"give me an entry level job posting related to physics\")" ] diff --git a/docs/docs/integrations/tools/gradio_tools.ipynb b/docs/docs/integrations/tools/gradio_tools.ipynb index 8c8ad5772d721..574fd09551d0e 100644 --- a/docs/docs/integrations/tools/gradio_tools.ipynb +++ b/docs/docs/integrations/tools/gradio_tools.ipynb @@ -183,7 +183,7 @@ " StableDiffusionTool,\n", " TextToVideoTool,\n", ")\n", - "from langchain.agents import initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain.memory import ConversationBufferMemory\n", "from langchain_openai import OpenAI\n", "\n", @@ -197,8 +197,11 @@ "]\n", "\n", "\n", - "agent = initialize_agent(\n", - " tools, llm, memory=memory, agent=\"conversational-react-description\", verbose=True\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " state_modifier=memory,\n", + " debug=True,\n", ")\n", "output = agent.run(\n", " input=(\n", diff --git a/docs/docs/integrations/tools/human_tools.ipynb b/docs/docs/integrations/tools/human_tools.ipynb index ab44da90f2b36..14c09a35fb6a7 100644 --- a/docs/docs/integrations/tools/human_tools.ipynb +++ b/docs/docs/integrations/tools/human_tools.ipynb @@ -27,7 +27,8 @@ }, "outputs": [], "source": [ - "from langchain.agents import AgentType, initialize_agent, load_tools\n", + "from langgraph.prebuilt import create_react_agent\n", + "from langchain.agents import load_tools\n", "from langchain_openai import ChatOpenAI, OpenAI\n", "\n", "llm = ChatOpenAI(temperature=0.0)\n", @@ -37,11 +38,10 @@ " llm=math_llm,\n", ")\n", "\n", - "agent_chain = initialize_agent(\n", - " tools,\n", - " llm,\n", - " agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n", - " verbose=True,\n", + "agent_chain = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True\n", ")" ] }, diff --git a/docs/docs/integrations/tools/sceneXplain.ipynb b/docs/docs/integrations/tools/sceneXplain.ipynb index 62fc83fb1c9cc..174f76d180ef4 100644 --- a/docs/docs/integrations/tools/sceneXplain.ipynb +++ b/docs/docs/integrations/tools/sceneXplain.ipynb @@ -94,14 +94,17 @@ } ], "source": [ - "from langchain.agents import initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain.memory import ConversationBufferMemory\n", "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "memory = ConversationBufferMemory(memory_key=\"chat_history\")\n", - "agent = initialize_agent(\n", - " tools, llm, memory=memory, agent=\"conversational-react-description\", verbose=True\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " state_modifier=memory,\n", + " debug=True,\n", ")\n", "output = agent.run(\n", " input=(\n", From 08ef8357d8001d3550fc0a16e0396310ed8633f2 Mon Sep 17 00:00:00 2001 From: Azraf Nahian <69325302+turboslapper@users.noreply.github.com> Date: Thu, 23 Jan 2025 14:48:30 -0500 Subject: [PATCH 3/5] Completed page 2 of 3 --- .../memory/xata_chat_message_history.ipynb | 13 +++++----- .../providers/mlflow_tracking.ipynb | 8 ++++-- .../providers/wandb_tracking.ipynb | 10 +++---- .../tools/azure_cognitive_services.ipynb | 7 +++-- .../integrations/tools/google_drive.ipynb | 7 +++-- .../integrations/tools/google_serper.ipynb | 8 +++--- docs/docs/integrations/tools/jira.ipynb | 8 +++--- .../docs/integrations/tools/openapi_nla.ipynb | 26 +++++++++---------- docs/docs/integrations/tools/steam.ipynb | 8 +++--- 9 files changed, 50 insertions(+), 45 deletions(-) diff --git a/docs/docs/integrations/memory/xata_chat_message_history.ipynb b/docs/docs/integrations/memory/xata_chat_message_history.ipynb index 325b7117d8bca..742d9632851be 100644 --- a/docs/docs/integrations/memory/xata_chat_message_history.ipynb +++ b/docs/docs/integrations/memory/xata_chat_message_history.ipynb @@ -219,7 +219,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import AgentType, initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain.agents.agent_toolkits import create_retriever_tool\n", "from langchain_openai import ChatOpenAI\n", "\n", @@ -232,12 +232,11 @@ "\n", "llm = ChatOpenAI(temperature=0)\n", "\n", - "agent = initialize_agent(\n", - " tools,\n", - " llm,\n", - " agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,\n", - " verbose=True,\n", - " memory=memory,\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " state_modifier=memory,\n", + " debug=True,\n", ")" ] }, diff --git a/docs/docs/integrations/providers/mlflow_tracking.ipynb b/docs/docs/integrations/providers/mlflow_tracking.ipynb index c21161aaf6a85..4728e14c56bc7 100644 --- a/docs/docs/integrations/providers/mlflow_tracking.ipynb +++ b/docs/docs/integrations/providers/mlflow_tracking.ipynb @@ -358,13 +358,17 @@ "outputs": [], "source": [ "script_content = \"\"\"\n", - "from langchain.agents import AgentType, initialize_agent, load_tools\n", + "from langgraph.prebuilt import create_react_agent\n", + "from langchain.agents import load_tools\n", "from langchain_openai import ChatOpenAI\n", "import mlflow\n", "\n", "llm = ChatOpenAI(model_name=\"gpt-4o\", temperature=0)\n", "tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\n", - "agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION)\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + ")\n", "\n", "# IMPORTANT: call set_model() to register the instance to be logged.\n", "mlflow.models.set_model(agent)\n", diff --git a/docs/docs/integrations/providers/wandb_tracking.ipynb b/docs/docs/integrations/providers/wandb_tracking.ipynb index ea49435e24efd..6957989223fcd 100644 --- a/docs/docs/integrations/providers/wandb_tracking.ipynb +++ b/docs/docs/integrations/providers/wandb_tracking.ipynb @@ -586,7 +586,8 @@ }, "outputs": [], "source": [ - "from langchain.agents import AgentType, initialize_agent, load_tools" + "from langgraph.prebuilt import create_react_agent\n", + "from langchain.agents import load_tools" ] }, { @@ -659,10 +660,9 @@ "source": [ "# SCENARIO 3 - Agent with Tools\n", "tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm)\n", - "agent = initialize_agent(\n", - " tools,\n", - " llm,\n", - " agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", ")\n", "agent.run(\n", " \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\",\n", diff --git a/docs/docs/integrations/tools/azure_cognitive_services.ipynb b/docs/docs/integrations/tools/azure_cognitive_services.ipynb index e38896f5e62f3..bdb6412d429f7 100644 --- a/docs/docs/integrations/tools/azure_cognitive_services.ipynb +++ b/docs/docs/integrations/tools/azure_cognitive_services.ipynb @@ -116,7 +116,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import AgentType, initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain_openai import OpenAI" ] }, @@ -127,11 +127,10 @@ "outputs": [], "source": [ "llm = OpenAI(temperature=0)\n", - "agent = initialize_agent(\n", + "agent = create_react_agent(\n", " tools=toolkit.get_tools(),\n", " llm=llm,\n", - " agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,\n", - " verbose=True,\n", + " debug=True,\n", ")" ] }, diff --git a/docs/docs/integrations/tools/google_drive.ipynb b/docs/docs/integrations/tools/google_drive.ipynb index bc28a0597ac1f..296fa2c57bf39 100644 --- a/docs/docs/integrations/tools/google_drive.ipynb +++ b/docs/docs/integrations/tools/google_drive.ipynb @@ -171,14 +171,13 @@ }, "outputs": [], "source": [ - "from langchain.agents import AgentType, initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", - "agent = initialize_agent(\n", + "agent = create_react_agent(\n", + " model=llm,\n", " tools=tools,\n", - " llm=llm,\n", - " agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,\n", ")" ] }, diff --git a/docs/docs/integrations/tools/google_serper.ipynb b/docs/docs/integrations/tools/google_serper.ipynb index 3ea5265635f76..a1b9c6f621bcc 100644 --- a/docs/docs/integrations/tools/google_serper.ipynb +++ b/docs/docs/integrations/tools/google_serper.ipynb @@ -168,7 +168,7 @@ } ], "source": [ - "from langchain.agents import AgentType, initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.utilities import GoogleSerperAPIWrapper\n", "from langchain_core.tools import Tool\n", "from langchain_openai import OpenAI\n", @@ -183,8 +183,10 @@ " )\n", "]\n", "\n", - "self_ask_with_search = initialize_agent(\n", - " tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True\n", + "self_ask_with_search = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True,\n", ")\n", "self_ask_with_search.run(\n", " \"What is the hometown of the reigning men's U.S. Open champion?\"\n", diff --git a/docs/docs/integrations/tools/jira.ipynb b/docs/docs/integrations/tools/jira.ipynb index 045cd5d2d132f..fd6b3febf7769 100644 --- a/docs/docs/integrations/tools/jira.ipynb +++ b/docs/docs/integrations/tools/jira.ipynb @@ -73,7 +73,7 @@ "source": [ "import os\n", "\n", - "from langchain.agents import AgentType, initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.agent_toolkits.jira.toolkit import JiraToolkit\n", "from langchain_community.utilities.jira import JiraAPIWrapper\n", "from langchain_openai import OpenAI" @@ -189,8 +189,10 @@ }, "outputs": [], "source": [ - "agent = initialize_agent(\n", - " toolkit.get_tools(), llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n", + "agent = create_react_agent(\n", + " tools=toolkit.get_tools(),\n", + " llm=llm,\n", + " debug=True,\n", ")" ] }, diff --git a/docs/docs/integrations/tools/openapi_nla.ipynb b/docs/docs/integrations/tools/openapi_nla.ipynb index 4f11fd8045de2..9bb89417d2a5d 100644 --- a/docs/docs/integrations/tools/openapi_nla.ipynb +++ b/docs/docs/integrations/tools/openapi_nla.ipynb @@ -23,7 +23,7 @@ }, "outputs": [], "source": [ - "from langchain.agents import AgentType, initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.agent_toolkits import NLAToolkit\n", "from langchain_community.utilities import Requests\n", "from langchain_openai import OpenAI" @@ -122,13 +122,12 @@ "outputs": [], "source": [ "natural_language_tools = speak_toolkit.get_tools() + klarna_toolkit.get_tools()\n", - "mrkl = initialize_agent(\n", - " natural_language_tools,\n", - " llm,\n", - " agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n", - " verbose=True,\n", - " agent_kwargs={\"format_instructions\": openapi_format_instructions},\n", - ")" + "mrkl = create_react_agent(\n", + " model=llm,\n", + " tools=natural_language_tools,\n", + " state_modifier={\"format_instructions\": openapi_format_instructions},\n", + " debug=True,\n", + ")\n" ] }, { @@ -288,12 +287,11 @@ "outputs": [], "source": [ "# Create an agent with the new tools\n", - "mrkl = initialize_agent(\n", - " natural_language_api_tools,\n", - " llm,\n", - " agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n", - " verbose=True,\n", - " agent_kwargs={\"format_instructions\": openapi_format_instructions},\n", + "mrkl = create_react_agent(\n", + " model=llm,\n", + " tools=natural_language_api_tools,\n", + " state_modifier={\"format_instructions\": openapi_format_instructions},\n", + " debug=True,\n", ")" ] }, diff --git a/docs/docs/integrations/tools/steam.ipynb b/docs/docs/integrations/tools/steam.ipynb index d6af64660c69e..5f4f626a22210 100644 --- a/docs/docs/integrations/tools/steam.ipynb +++ b/docs/docs/integrations/tools/steam.ipynb @@ -74,7 +74,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import AgentType, initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.agent_toolkits.steam.toolkit import SteamToolkit\n", "from langchain_community.utilities.steam import SteamWebAPIWrapper\n", "from langchain_openai import OpenAI" @@ -89,8 +89,10 @@ "llm = OpenAI(temperature=0)\n", "Steam = SteamWebAPIWrapper()\n", "toolkit = SteamToolkit.from_steam_api_wrapper(Steam)\n", - "agent = initialize_agent(\n", - " toolkit.get_tools(), llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=toolkit.get_tools(),\n", + " debug=True,\n", ")" ] }, From c7eff2bf534443a5eca27a76a87dc58fe29c1326 Mon Sep 17 00:00:00 2001 From: Azraf Nahian <69325302+turboslapper@users.noreply.github.com> Date: Thu, 23 Jan 2025 15:07:42 -0500 Subject: [PATCH 4/5] Finished page 3 --- .../integrations/callbacks/comet_tracing.ipynb | 15 ++++++++++----- .../integrations/llms/amazon_api_gateway.ipynb | 12 ++++++------ .../integrations/providers/aim_tracking.ipynb | 12 ++++++------ .../integrations/providers/clearml_tracking.ipynb | 12 ++++++------ .../integrations/providers/wandb_tracing.ipynb | 9 ++++++--- docs/docs/integrations/tools/ainetwork.ipynb | 9 ++++----- docs/docs/integrations/tools/clickup.ipynb | 8 +++++--- .../integrations/tools/e2b_data_analysis.ipynb | 12 +++++------- docs/docs/integrations/tools/edenai_tools.ipynb | 12 +++++------- docs/docs/integrations/tools/graphql.ipynb | 9 ++++++--- docs/docs/integrations/tools/office365.ipynb | 9 ++++----- .../integrations/tools/yahoo_finance_news.ipynb | 11 +++++------ 12 files changed, 68 insertions(+), 62 deletions(-) diff --git a/docs/docs/integrations/callbacks/comet_tracing.ipynb b/docs/docs/integrations/callbacks/comet_tracing.ipynb index f2cfcb5c6e4b1..6122efc917cf2 100644 --- a/docs/docs/integrations/callbacks/comet_tracing.ipynb +++ b/docs/docs/integrations/callbacks/comet_tracing.ipynb @@ -41,7 +41,8 @@ "# here we are configuring the comet project\n", "os.environ[\"COMET_PROJECT_NAME\"] = \"comet-example-langchain-tracing\"\n", "\n", - "from langchain.agents import AgentType, initialize_agent, load_tools" + "from langgraph.prebuilt import create_react_agent\n", + "from langchain.agents import load_tools" ] }, { @@ -76,8 +77,10 @@ }, "outputs": [], "source": [ - "agent = initialize_agent(\n", - " tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True,\n", ")\n", "\n", "agent.run(\"What is 2 raised to .123243 power?\") # this should be traced\n", @@ -104,8 +107,10 @@ "# Recreate the LLM, tools and agent and passing the callback to each of them\n", "llm = OpenAI(temperature=0)\n", "tools = load_tools([\"llm-math\"], llm=llm)\n", - "agent = initialize_agent(\n", - " tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True,\n", ")\n", "\n", "agent.run(\n", diff --git a/docs/docs/integrations/llms/amazon_api_gateway.ipynb b/docs/docs/integrations/llms/amazon_api_gateway.ipynb index fbccba37e5d02..892a998ea8a81 100644 --- a/docs/docs/integrations/llms/amazon_api_gateway.ipynb +++ b/docs/docs/integrations/llms/amazon_api_gateway.ipynb @@ -130,7 +130,8 @@ } ], "source": [ - "from langchain.agents import AgentType, initialize_agent, load_tools\n", + "from langgraph.prebuilt import create_react_agent\n", + "from langchain.agents import load_tools\n", "\n", "parameters = {\n", " \"max_new_tokens\": 50,\n", @@ -147,11 +148,10 @@ "tools = load_tools([\"python_repl\", \"llm-math\"], llm=llm)\n", "\n", "# Finally, let's initialize an agent with the tools, the language model, and the type of agent we want to use.\n", - "agent = initialize_agent(\n", - " tools,\n", - " llm,\n", - " agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n", - " verbose=True,\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True,\n", ")\n", "\n", "# Now let's test it out!\n", diff --git a/docs/docs/integrations/providers/aim_tracking.ipynb b/docs/docs/integrations/providers/aim_tracking.ipynb index e078a349f8bbe..8218a97085e03 100644 --- a/docs/docs/integrations/providers/aim_tracking.ipynb +++ b/docs/docs/integrations/providers/aim_tracking.ipynb @@ -224,7 +224,8 @@ }, "outputs": [], "source": [ - "from langchain.agents import AgentType, initialize_agent, load_tools" + "from langgraph.prebuilt import create_react_agent\n", + "from langchain.agents import load_tools" ] }, { @@ -269,11 +270,10 @@ "source": [ "# scenario 3 - Agent with Tools\n", "tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm, callbacks=callbacks)\n", - "agent = initialize_agent(\n", - " tools,\n", - " llm,\n", - " agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n", - " callbacks=callbacks,\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True,\n", ")\n", "agent.run(\n", " \"Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?\"\n", diff --git a/docs/docs/integrations/providers/clearml_tracking.ipynb b/docs/docs/integrations/providers/clearml_tracking.ipynb index e08b3e9928a85..c24ad2edff813 100644 --- a/docs/docs/integrations/providers/clearml_tracking.ipynb +++ b/docs/docs/integrations/providers/clearml_tracking.ipynb @@ -542,15 +542,15 @@ } ], "source": [ - "from langchain.agents import AgentType, initialize_agent, load_tools\n", + "from langgraph.prebuilt import create_react_agent\n", + "from langchain.agents import load_tools\n", "\n", "# SCENARIO 2 - Agent with Tools\n", "tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm, callbacks=callbacks)\n", - "agent = initialize_agent(\n", - " tools,\n", - " llm,\n", - " agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n", - " callbacks=callbacks,\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True,\n", ")\n", "agent.run(\"Who is the wife of the person who sang summer of 69?\")\n", "clearml_callback.flush_tracker(\n", diff --git a/docs/docs/integrations/providers/wandb_tracing.ipynb b/docs/docs/integrations/providers/wandb_tracing.ipynb index ce6480d1a0d06..3f642ee0c3b7b 100644 --- a/docs/docs/integrations/providers/wandb_tracing.ipynb +++ b/docs/docs/integrations/providers/wandb_tracing.ipynb @@ -39,7 +39,8 @@ "# here we are configuring the wandb project name\n", "os.environ[\"WANDB_PROJECT\"] = \"langchain-tracing\"\n", "\n", - "from langchain.agents import AgentType, initialize_agent, load_tools\n", + "from langgraph.prebuilt import create_react_agent\n", + "from langchain.agents import load_tools\n", "from langchain_openai import OpenAI" ] }, @@ -75,8 +76,10 @@ }, "outputs": [], "source": [ - "agent = initialize_agent(\n", - " tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True,\n", ")\n", "\n", "agent.run(\"What is 2 raised to .123243 power?\") # this should be traced\n", diff --git a/docs/docs/integrations/tools/ainetwork.ipynb b/docs/docs/integrations/tools/ainetwork.ipynb index b33d4c48b9492..1b10b2e497e1a 100644 --- a/docs/docs/integrations/tools/ainetwork.ipynb +++ b/docs/docs/integrations/tools/ainetwork.ipynb @@ -130,15 +130,14 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import AgentType, initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0)\n", - "agent = initialize_agent(\n", + "agent = create_react_agent(\n", + " model=llm,\n", " tools=tools,\n", - " llm=llm,\n", - " verbose=True,\n", - " agent=AgentType.OPENAI_FUNCTIONS,\n", + " debug=True,\n", ")" ] }, diff --git a/docs/docs/integrations/tools/clickup.ipynb b/docs/docs/integrations/tools/clickup.ipynb index 6cfe2e118fd35..38fa0090310c7 100644 --- a/docs/docs/integrations/tools/clickup.ipynb +++ b/docs/docs/integrations/tools/clickup.ipynb @@ -30,7 +30,7 @@ "%autoreload 2\n", "from datetime import datetime\n", "\n", - "from langchain.agents import AgentType, initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.agent_toolkits.clickup.toolkit import ClickupToolkit\n", "from langchain_community.utilities.clickup import ClickupAPIWrapper\n", "from langchain_openai import OpenAI" @@ -167,8 +167,10 @@ "source": [ "llm = OpenAI(temperature=0, openai_api_key=\"\")\n", "\n", - "agent = initialize_agent(\n", - " toolkit.get_tools(), llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=toolkit.get_tools(),\n", + " debug=True,\n", ")" ] }, diff --git a/docs/docs/integrations/tools/e2b_data_analysis.ipynb b/docs/docs/integrations/tools/e2b_data_analysis.ipynb index 769e6a7fc7888..7d2cbfbe38107 100644 --- a/docs/docs/integrations/tools/e2b_data_analysis.ipynb +++ b/docs/docs/integrations/tools/e2b_data_analysis.ipynb @@ -66,7 +66,7 @@ "source": [ "import os\n", "\n", - "from langchain.agents import AgentType, initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain_openai import ChatOpenAI\n", "\n", "os.environ[\"E2B_API_KEY\"] = \"\"\n", @@ -155,12 +155,10 @@ "tools = [e2b_data_analysis_tool.as_tool()]\n", "\n", "llm = ChatOpenAI(model=\"gpt-4\", temperature=0)\n", - "agent = initialize_agent(\n", - " tools,\n", - " llm,\n", - " agent=AgentType.OPENAI_FUNCTIONS,\n", - " verbose=True,\n", - " handle_parsing_errors=True,\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True,\n", ")" ] }, diff --git a/docs/docs/integrations/tools/edenai_tools.ipynb b/docs/docs/integrations/tools/edenai_tools.ipynb index dc19ba7507fd6..4c9b62d79fada 100644 --- a/docs/docs/integrations/tools/edenai_tools.ipynb +++ b/docs/docs/integrations/tools/edenai_tools.ipynb @@ -74,7 +74,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import AgentType, initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.llms import EdenAI\n", "\n", "llm = EdenAI(\n", @@ -90,12 +90,10 @@ " EdenAiParsingIDTool(providers=[\"amazon\", \"klippa\"], language=\"en\"),\n", " EdenAiParsingInvoiceTool(providers=[\"amazon\", \"google\"], language=\"en\"),\n", "]\n", - "agent_chain = initialize_agent(\n", - " tools,\n", - " llm,\n", - " agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n", - " verbose=True,\n", - " return_intermediate_steps=True,\n", + "agent_chain = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True,\n", ")" ] }, diff --git a/docs/docs/integrations/tools/graphql.ipynb b/docs/docs/integrations/tools/graphql.ipynb index bc0ded37bd04c..981be95990847 100644 --- a/docs/docs/integrations/tools/graphql.ipynb +++ b/docs/docs/integrations/tools/graphql.ipynb @@ -56,7 +56,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import AgentType, initialize_agent, load_tools\n", + "from langgraph.prebuilt import create_react_agent\n", + "from langchain.agents import load_tools\n", "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", @@ -66,8 +67,10 @@ " graphql_endpoint=\"https://swapi-graphql.netlify.app/.netlify/functions/index\",\n", ")\n", "\n", - "agent = initialize_agent(\n", - " tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True,\n", ")" ] }, diff --git a/docs/docs/integrations/tools/office365.ipynb b/docs/docs/integrations/tools/office365.ipynb index 5a7163dd1ccce..a5d1086963fac 100644 --- a/docs/docs/integrations/tools/office365.ipynb +++ b/docs/docs/integrations/tools/office365.ipynb @@ -100,7 +100,7 @@ }, "outputs": [], "source": [ - "from langchain.agents import AgentType, initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain_openai import OpenAI" ] }, @@ -113,11 +113,10 @@ "outputs": [], "source": [ "llm = OpenAI(temperature=0)\n", - "agent = initialize_agent(\n", + "agent = create_react_agent(\n", " tools=toolkit.get_tools(),\n", - " llm=llm,\n", - " verbose=False,\n", - " agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,\n", + " model=llm,\n", + " debug=False,\n", ")" ] }, diff --git a/docs/docs/integrations/tools/yahoo_finance_news.ipynb b/docs/docs/integrations/tools/yahoo_finance_news.ipynb index 743e358a95407..f3befc161bb20 100644 --- a/docs/docs/integrations/tools/yahoo_finance_news.ipynb +++ b/docs/docs/integrations/tools/yahoo_finance_news.ipynb @@ -52,17 +52,16 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import AgentType, initialize_agent\n", + "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.tools.yahoo_finance_news import YahooFinanceNewsTool\n", "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0.0)\n", "tools = [YahooFinanceNewsTool()]\n", - "agent_chain = initialize_agent(\n", - " tools,\n", - " llm,\n", - " agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n", - " verbose=True,\n", + "agent = create_react_agent(\n", + " model=llm,\n", + " tools=tools,\n", + " debug=True,\n", ")" ] }, From 3812ec13bc5ad300146c5d74b0d0963045cfe3da Mon Sep 17 00:00:00 2001 From: Azraf <69325302+turboslapper@users.noreply.github.com> Date: Fri, 24 Jan 2025 01:03:06 -0500 Subject: [PATCH 5/5] fix: auto-format examples and resolve linting issues --- docs/docs/integrations/callbacks/argilla.ipynb | 2 +- .../integrations/callbacks/comet_tracing.ipynb | 4 ++-- .../callbacks/sagemaker_tracking.ipynb | 2 +- .../integrations/llms/amazon_api_gateway.ipynb | 2 +- .../memory/xata_chat_message_history.ipynb | 2 +- docs/docs/integrations/memory/zep_memory.ipynb | 2 +- .../integrations/memory/zep_memory_cloud.ipynb | 2 +- .../integrations/providers/aim_tracking.ipynb | 4 ++-- .../providers/clearml_tracking.ipynb | 2 +- .../integrations/providers/comet_tracking.ipynb | 2 +- .../integrations/providers/wandb_tracing.ipynb | 4 ++-- .../integrations/providers/wandb_tracking.ipynb | 4 ++-- docs/docs/integrations/tools/ainetwork.ipynb | 2 +- docs/docs/integrations/tools/awslambda.ipynb | 8 ++------ .../tools/azure_cognitive_services.ipynb | 4 ++-- docs/docs/integrations/tools/bash.ipynb | 8 ++------ docs/docs/integrations/tools/bearly.ipynb | 4 ++-- .../integrations/tools/chatgpt_plugins.ipynb | 10 +++------- docs/docs/integrations/tools/clickup.ipynb | 4 ++-- docs/docs/integrations/tools/connery.ipynb | 12 ++++-------- .../tools/dalle_image_generator.ipynb | 8 ++------ .../integrations/tools/e2b_data_analysis.ipynb | 2 +- docs/docs/integrations/tools/edenai_tools.ipynb | 2 +- .../integrations/tools/eleven_labs_tts.ipynb | 10 +++------- docs/docs/integrations/tools/gitlab.ipynb | 10 +++------- docs/docs/integrations/tools/google_drive.ipynb | 2 +- .../docs/integrations/tools/google_finance.ipynb | 8 ++------ docs/docs/integrations/tools/google_jobs.ipynb | 8 ++------ docs/docs/integrations/tools/google_serper.ipynb | 2 +- docs/docs/integrations/tools/gradio_tools.ipynb | 2 +- docs/docs/integrations/tools/graphql.ipynb | 2 +- docs/docs/integrations/tools/human_tools.ipynb | 8 ++------ docs/docs/integrations/tools/jira.ipynb | 4 ++-- docs/docs/integrations/tools/memorize.ipynb | 11 +++-------- docs/docs/integrations/tools/office365.ipynb | 4 ++-- docs/docs/integrations/tools/openapi_nla.ipynb | 6 +++--- .../docs/integrations/tools/openweathermap.ipynb | 8 ++------ docs/docs/integrations/tools/playwright.ipynb | 8 ++------ docs/docs/integrations/tools/sceneXplain.ipynb | 2 +- docs/docs/integrations/tools/searchapi.ipynb | 8 ++------ docs/docs/integrations/tools/steam.ipynb | 4 ++-- .../integrations/tools/yahoo_finance_news.ipynb | 2 +- docs/docs/integrations/tools/zapier.ipynb | 16 ++++------------ 43 files changed, 78 insertions(+), 143 deletions(-) diff --git a/docs/docs/integrations/callbacks/argilla.ipynb b/docs/docs/integrations/callbacks/argilla.ipynb index 645cd60b64719..7c94692cbbd57 100644 --- a/docs/docs/integrations/callbacks/argilla.ipynb +++ b/docs/docs/integrations/callbacks/argilla.ipynb @@ -360,10 +360,10 @@ } ], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain.agents import load_tools\n", "from langchain_core.callbacks.stdout import StdOutCallbackHandler\n", "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "argilla_callback = ArgillaCallbackHandler(\n", " dataset_name=\"langchain-dataset\",\n", diff --git a/docs/docs/integrations/callbacks/comet_tracing.ipynb b/docs/docs/integrations/callbacks/comet_tracing.ipynb index 6122efc917cf2..957b902ec3fc1 100644 --- a/docs/docs/integrations/callbacks/comet_tracing.ipynb +++ b/docs/docs/integrations/callbacks/comet_tracing.ipynb @@ -41,8 +41,8 @@ "# here we are configuring the comet project\n", "os.environ[\"COMET_PROJECT_NAME\"] = \"comet-example-langchain-tracing\"\n", "\n", - "from langgraph.prebuilt import create_react_agent\n", - "from langchain.agents import load_tools" + "from langchain.agents import load_tools\n", + "from langgraph.prebuilt import create_react_agent" ] }, { diff --git a/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb b/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb index 04b13faf257b0..ccfd8ab1d1b46 100644 --- a/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb +++ b/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb @@ -89,11 +89,11 @@ }, "outputs": [], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain.agents import load_tools\n", "from langchain.chains import LLMChain, SimpleSequentialChain\n", "from langchain_core.prompts import PromptTemplate\n", "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "from sagemaker.analytics import ExperimentAnalytics\n", "from sagemaker.experiments.run import Run\n", "from sagemaker.session import Session" diff --git a/docs/docs/integrations/llms/amazon_api_gateway.ipynb b/docs/docs/integrations/llms/amazon_api_gateway.ipynb index 892a998ea8a81..a92d054f42b29 100644 --- a/docs/docs/integrations/llms/amazon_api_gateway.ipynb +++ b/docs/docs/integrations/llms/amazon_api_gateway.ipynb @@ -130,8 +130,8 @@ } ], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain.agents import load_tools\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "parameters = {\n", " \"max_new_tokens\": 50,\n", diff --git a/docs/docs/integrations/memory/xata_chat_message_history.ipynb b/docs/docs/integrations/memory/xata_chat_message_history.ipynb index 742d9632851be..8e08286246888 100644 --- a/docs/docs/integrations/memory/xata_chat_message_history.ipynb +++ b/docs/docs/integrations/memory/xata_chat_message_history.ipynb @@ -219,9 +219,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain.agents.agent_toolkits import create_retriever_tool\n", "from langchain_openai import ChatOpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "tool = create_retriever_tool(\n", " vector_store.as_retriever(),\n", diff --git a/docs/docs/integrations/memory/zep_memory.ipynb b/docs/docs/integrations/memory/zep_memory.ipynb index c635fcb8f8483..77c6bed501d23 100644 --- a/docs/docs/integrations/memory/zep_memory.ipynb +++ b/docs/docs/integrations/memory/zep_memory.ipynb @@ -47,13 +47,13 @@ "source": [ "from uuid import uuid4\n", "\n", - "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.memory.zep_memory import ZepMemory\n", "from langchain_community.retrievers import ZepRetriever\n", "from langchain_community.utilities import WikipediaAPIWrapper\n", "from langchain_core.messages import AIMessage, HumanMessage\n", "from langchain_core.tools import Tool\n", "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "# Set this to your Zep server URL\n", "ZEP_API_URL = \"http://localhost:8000\"\n", diff --git a/docs/docs/integrations/memory/zep_memory_cloud.ipynb b/docs/docs/integrations/memory/zep_memory_cloud.ipynb index f28d9ec395070..e11373aa6236d 100644 --- a/docs/docs/integrations/memory/zep_memory_cloud.ipynb +++ b/docs/docs/integrations/memory/zep_memory_cloud.ipynb @@ -69,13 +69,13 @@ "source": [ "from uuid import uuid4\n", "\n", - "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.memory.zep_cloud_memory import ZepCloudMemory\n", "from langchain_community.retrievers import ZepCloudRetriever\n", "from langchain_community.utilities import WikipediaAPIWrapper\n", "from langchain_core.messages import AIMessage, HumanMessage\n", "from langchain_core.tools import Tool\n", "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "session_id = str(uuid4()) # This is a unique identifier for the session" ] diff --git a/docs/docs/integrations/providers/aim_tracking.ipynb b/docs/docs/integrations/providers/aim_tracking.ipynb index 8218a97085e03..fceccac56bfcb 100644 --- a/docs/docs/integrations/providers/aim_tracking.ipynb +++ b/docs/docs/integrations/providers/aim_tracking.ipynb @@ -224,8 +224,8 @@ }, "outputs": [], "source": [ - "from langgraph.prebuilt import create_react_agent\n", - "from langchain.agents import load_tools" + "from langchain.agents import load_tools\n", + "from langgraph.prebuilt import create_react_agent" ] }, { diff --git a/docs/docs/integrations/providers/clearml_tracking.ipynb b/docs/docs/integrations/providers/clearml_tracking.ipynb index c24ad2edff813..53fe1baac498a 100644 --- a/docs/docs/integrations/providers/clearml_tracking.ipynb +++ b/docs/docs/integrations/providers/clearml_tracking.ipynb @@ -542,8 +542,8 @@ } ], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain.agents import load_tools\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "# SCENARIO 2 - Agent with Tools\n", "tools = load_tools([\"serpapi\", \"llm-math\"], llm=llm, callbacks=callbacks)\n", diff --git a/docs/docs/integrations/providers/comet_tracking.ipynb b/docs/docs/integrations/providers/comet_tracking.ipynb index 2c794a93a940d..985ae46e8b63d 100644 --- a/docs/docs/integrations/providers/comet_tracking.ipynb +++ b/docs/docs/integrations/providers/comet_tracking.ipynb @@ -192,11 +192,11 @@ "metadata": {}, "outputs": [], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain.agents import load_tools\n", "from langchain_community.callbacks import CometCallbackHandler\n", "from langchain_core.callbacks import StdOutCallbackHandler\n", "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "comet_callback = CometCallbackHandler(\n", " project_name=\"comet-example-langchain\",\n", diff --git a/docs/docs/integrations/providers/wandb_tracing.ipynb b/docs/docs/integrations/providers/wandb_tracing.ipynb index 3f642ee0c3b7b..44026f6296241 100644 --- a/docs/docs/integrations/providers/wandb_tracing.ipynb +++ b/docs/docs/integrations/providers/wandb_tracing.ipynb @@ -39,9 +39,9 @@ "# here we are configuring the wandb project name\n", "os.environ[\"WANDB_PROJECT\"] = \"langchain-tracing\"\n", "\n", - "from langgraph.prebuilt import create_react_agent\n", "from langchain.agents import load_tools\n", - "from langchain_openai import OpenAI" + "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent" ] }, { diff --git a/docs/docs/integrations/providers/wandb_tracking.ipynb b/docs/docs/integrations/providers/wandb_tracking.ipynb index 6957989223fcd..2ae22b1b9538b 100644 --- a/docs/docs/integrations/providers/wandb_tracking.ipynb +++ b/docs/docs/integrations/providers/wandb_tracking.ipynb @@ -586,8 +586,8 @@ }, "outputs": [], "source": [ - "from langgraph.prebuilt import create_react_agent\n", - "from langchain.agents import load_tools" + "from langchain.agents import load_tools\n", + "from langgraph.prebuilt import create_react_agent" ] }, { diff --git a/docs/docs/integrations/tools/ainetwork.ipynb b/docs/docs/integrations/tools/ainetwork.ipynb index 1b10b2e497e1a..156085ff77d51 100644 --- a/docs/docs/integrations/tools/ainetwork.ipynb +++ b/docs/docs/integrations/tools/ainetwork.ipynb @@ -130,8 +130,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain_openai import ChatOpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "llm = ChatOpenAI(temperature=0)\n", "agent = create_react_agent(\n", diff --git a/docs/docs/integrations/tools/awslambda.ipynb b/docs/docs/integrations/tools/awslambda.ipynb index ccbf5962efbff..afb2117ce6bac 100644 --- a/docs/docs/integrations/tools/awslambda.ipynb +++ b/docs/docs/integrations/tools/awslambda.ipynb @@ -62,9 +62,9 @@ }, "outputs": [], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain.agents import load_tools\n", "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "llm = OpenAI(temperature=0)\n", "\n", @@ -75,11 +75,7 @@ " function_name=\"testFunction1\",\n", ")\n", "\n", - "agent = create_react_agent(\n", - " model=llm,\n", - " tools=tools,\n", - " debug=True\n", - ")\n", + "agent = create_react_agent(model=llm, tools=tools, debug=True)\n", "\n", "agent.run(\"Send an email to test@testing123.com saying hello world.\")" ] diff --git a/docs/docs/integrations/tools/azure_cognitive_services.ipynb b/docs/docs/integrations/tools/azure_cognitive_services.ipynb index bdb6412d429f7..22b5d67be9d20 100644 --- a/docs/docs/integrations/tools/azure_cognitive_services.ipynb +++ b/docs/docs/integrations/tools/azure_cognitive_services.ipynb @@ -116,8 +116,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langgraph.prebuilt import create_react_agent\n", - "from langchain_openai import OpenAI" + "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent" ] }, { diff --git a/docs/docs/integrations/tools/bash.ipynb b/docs/docs/integrations/tools/bash.ipynb index efe06a3f33818..a05a23a49399f 100644 --- a/docs/docs/integrations/tools/bash.ipynb +++ b/docs/docs/integrations/tools/bash.ipynb @@ -154,19 +154,15 @@ } ], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain_openai import ChatOpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "llm = ChatOpenAI(temperature=0)\n", "\n", "shell_tool.description = shell_tool.description + f\"args {shell_tool.args}\".replace(\n", " \"{\", \"{{\"\n", ").replace(\"}\", \"}}\")\n", - "self_ask_with_search = create_react_agent(\n", - " model=llm,\n", - " tools=[shell_tool],\n", - " debug=True\n", - ")\n", + "self_ask_with_search = create_react_agent(model=llm, tools=[shell_tool], debug=True)\n", "self_ask_with_search.run(\n", " \"Download the langchain.com webpage and grep for all urls. Return only a sorted list of them. Be sure to use double quotes.\"\n", ")" diff --git a/docs/docs/integrations/tools/bearly.ipynb b/docs/docs/integrations/tools/bearly.ipynb index 91e236adff289..29ee91fa6ca23 100644 --- a/docs/docs/integrations/tools/bearly.ipynb +++ b/docs/docs/integrations/tools/bearly.ipynb @@ -47,8 +47,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langgraph.prebuilt import create_react_agent\n", - "from langchain_openai import ChatOpenAI" + "from langchain_openai import ChatOpenAI\n", + "from langgraph.prebuilt import create_react_agent" ] }, { diff --git a/docs/docs/integrations/tools/chatgpt_plugins.ipynb b/docs/docs/integrations/tools/chatgpt_plugins.ipynb index 1e0446b0e9e9d..b061420b9a238 100644 --- a/docs/docs/integrations/tools/chatgpt_plugins.ipynb +++ b/docs/docs/integrations/tools/chatgpt_plugins.ipynb @@ -77,9 +77,9 @@ } ], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain.agents import load_tools\n", - "from langchain_openai import ChatOpenAI" + "from langchain_openai import ChatOpenAI\n", + "from langgraph.prebuilt import create_react_agent" ] }, { @@ -137,11 +137,7 @@ "tools = load_tools([\"requests_all\"])\n", "tools += [tool]\n", "\n", - "agent_chain = create_react_agent(\n", - " model=llm,\n", - " tools=tools,\n", - " debug=True\n", - ")\n", + "agent_chain = create_react_agent(model=llm, tools=tools, debug=True)\n", "agent_chain.run(\"what t shirts are available in klarna?\")" ] }, diff --git a/docs/docs/integrations/tools/clickup.ipynb b/docs/docs/integrations/tools/clickup.ipynb index 38fa0090310c7..725de806e817b 100644 --- a/docs/docs/integrations/tools/clickup.ipynb +++ b/docs/docs/integrations/tools/clickup.ipynb @@ -30,10 +30,10 @@ "%autoreload 2\n", "from datetime import datetime\n", "\n", - "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.agent_toolkits.clickup.toolkit import ClickupToolkit\n", "from langchain_community.utilities.clickup import ClickupAPIWrapper\n", - "from langchain_openai import OpenAI" + "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent" ] }, { diff --git a/docs/docs/integrations/tools/connery.ipynb b/docs/docs/integrations/tools/connery.ipynb index 7109f0045c6a7..b0e8dbdae2199 100644 --- a/docs/docs/integrations/tools/connery.ipynb +++ b/docs/docs/integrations/tools/connery.ipynb @@ -109,10 +109,10 @@ "source": [ "import os\n", "\n", - "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.agent_toolkits.connery import ConneryToolkit\n", "from langchain_community.tools.connery import ConneryService\n", "from langchain_openai import ChatOpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "# Specify your Connery Runner credentials.\n", "os.environ[\"CONNERY_RUNNER_URL\"] = \"\"\n", @@ -130,11 +130,7 @@ "\n", "# Use OpenAI Functions agent to execute the prompt using actions from the Connery Toolkit.\n", "llm = ChatOpenAI(temperature=0)\n", - "agent = create_react_agent(\n", - " model=llm,\n", - " tools=connery_toolkit.get_tools(),\n", - " debug=True\n", - ")\n", + "agent = create_react_agent(model=llm, tools=connery_toolkit.get_tools(), debug=True)\n", "result = agent.run(\n", " f\"\"\"Make a short summary of the webpage http://www.paulgraham.com/vb.html in three sentences\n", "and send it to {recepient_email}. Include the link to the webpage into the body of the email.\"\"\"\n", @@ -159,9 +155,9 @@ "source": [ "import os\n", "\n", - "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.tools.connery import ConneryService\n", "from langchain_openai import ChatOpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "# Specify your Connery Runner credentials.\n", "os.environ[\"CONNERY_RUNNER_URL\"] = \"\"\n", @@ -238,7 +234,7 @@ "agent = create_react_agent(\n", " model=llm,\n", " tools=[send_email_action],\n", - " debug=True # Equivalent to verbose=True\n", + " debug=True, # Equivalent to verbose=True\n", ")\n", "agent_run_result = agent.run(\n", " f\"Send an email to the {recepient_email} and say that I will be late for the meeting.\"\n", diff --git a/docs/docs/integrations/tools/dalle_image_generator.ipynb b/docs/docs/integrations/tools/dalle_image_generator.ipynb index 1744f0ffed933..37085548e2d26 100644 --- a/docs/docs/integrations/tools/dalle_image_generator.ipynb +++ b/docs/docs/integrations/tools/dalle_image_generator.ipynb @@ -158,15 +158,11 @@ } ], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain.agents import load_tools\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "tools = load_tools([\"dalle-image-generator\"])\n", - "agent = create_react_agent(\n", - " model=llm,\n", - " tools=tools,\n", - " debug=True\n", - ")\n", + "agent = create_react_agent(model=llm, tools=tools, debug=True)\n", "output = agent.run(\"Create an image of a halloween night at a haunted museum\")" ] } diff --git a/docs/docs/integrations/tools/e2b_data_analysis.ipynb b/docs/docs/integrations/tools/e2b_data_analysis.ipynb index 7d2cbfbe38107..8a880f6715152 100644 --- a/docs/docs/integrations/tools/e2b_data_analysis.ipynb +++ b/docs/docs/integrations/tools/e2b_data_analysis.ipynb @@ -66,8 +66,8 @@ "source": [ "import os\n", "\n", - "from langgraph.prebuilt import create_react_agent\n", "from langchain_openai import ChatOpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "os.environ[\"E2B_API_KEY\"] = \"\"\n", "os.environ[\"OPENAI_API_KEY\"] = \"\"" diff --git a/docs/docs/integrations/tools/edenai_tools.ipynb b/docs/docs/integrations/tools/edenai_tools.ipynb index 4c9b62d79fada..0e778ff8d635b 100644 --- a/docs/docs/integrations/tools/edenai_tools.ipynb +++ b/docs/docs/integrations/tools/edenai_tools.ipynb @@ -74,8 +74,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.llms import EdenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "llm = EdenAI(\n", " feature=\"text\", provider=\"openai\", params={\"temperature\": 0.2, \"max_tokens\": 250}\n", diff --git a/docs/docs/integrations/tools/eleven_labs_tts.ipynb b/docs/docs/integrations/tools/eleven_labs_tts.ipynb index a8fddf8078f79..8d40ce3dd0321 100644 --- a/docs/docs/integrations/tools/eleven_labs_tts.ipynb +++ b/docs/docs/integrations/tools/eleven_labs_tts.ipynb @@ -126,9 +126,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain.agents import load_tools\n", - "from langchain_openai import OpenAI" + "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent" ] }, { @@ -140,11 +140,7 @@ "source": [ "llm = OpenAI(temperature=0)\n", "tools = load_tools([\"eleven_labs_text2speech\"])\n", - "agent = create_react_agent(\n", - " model=llm,\n", - " tools=tools,\n", - " debug=True\n", - ")" + "agent = create_react_agent(model=llm, tools=tools, debug=True)" ] }, { diff --git a/docs/docs/integrations/tools/gitlab.ipynb b/docs/docs/integrations/tools/gitlab.ipynb index d51c2aed3cf6b..369611fe4c079 100644 --- a/docs/docs/integrations/tools/gitlab.ipynb +++ b/docs/docs/integrations/tools/gitlab.ipynb @@ -100,10 +100,10 @@ "source": [ "import os\n", "\n", - "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.agent_toolkits.gitlab.toolkit import GitLabToolkit\n", "from langchain_community.utilities.gitlab import GitLabAPIWrapper\n", - "from langchain_openai import OpenAI" + "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent" ] }, { @@ -132,11 +132,7 @@ "llm = OpenAI(temperature=0)\n", "gitlab = GitLabAPIWrapper()\n", "toolkit = GitLabToolkit.from_gitlab_api_wrapper(gitlab)\n", - "agent = create_react_agent(\n", - " model=llm,\n", - " tools=toolkit.get_tools(),\n", - " debug=True\n", - ")" + "agent = create_react_agent(model=llm, tools=toolkit.get_tools(), debug=True)" ] }, { diff --git a/docs/docs/integrations/tools/google_drive.ipynb b/docs/docs/integrations/tools/google_drive.ipynb index 296fa2c57bf39..ac1fcf40e80ae 100644 --- a/docs/docs/integrations/tools/google_drive.ipynb +++ b/docs/docs/integrations/tools/google_drive.ipynb @@ -171,8 +171,8 @@ }, "outputs": [], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "llm = OpenAI(temperature=0)\n", "agent = create_react_agent(\n", diff --git a/docs/docs/integrations/tools/google_finance.ipynb b/docs/docs/integrations/tools/google_finance.ipynb index d9a384511e96f..4a1a2c225a5b2 100644 --- a/docs/docs/integrations/tools/google_finance.ipynb +++ b/docs/docs/integrations/tools/google_finance.ipynb @@ -74,19 +74,15 @@ "source": [ "import os\n", "\n", - "from langgraph.prebuilt import create_react_agent\n", "from langchain.agents import load_tools\n", "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = \"\"\n", "os.environ[\"SERP_API_KEY\"] = \"\"\n", "llm = OpenAI()\n", "tools = load_tools([\"google-scholar\", \"google-finance\"], llm=llm)\n", - "agent = create_react_agent(\n", - " model=llm,\n", - " tools=tools,\n", - " debug=True\n", - ")\n", + "agent = create_react_agent(model=llm, tools=tools, debug=True)\n", "agent.run(\"what is google's stock\")" ] } diff --git a/docs/docs/integrations/tools/google_jobs.ipynb b/docs/docs/integrations/tools/google_jobs.ipynb index d197ea363fc09..9f3fb8d7e59b0 100644 --- a/docs/docs/integrations/tools/google_jobs.ipynb +++ b/docs/docs/integrations/tools/google_jobs.ipynb @@ -193,18 +193,14 @@ "source": [ "import os\n", "\n", - "from langgraph.prebuilt import create_react_agent\n", "from langchain.agents import load_tools\n", "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "OpenAI.api_key = os.environ[\"OPENAI_API_KEY\"]\n", "llm = OpenAI()\n", "tools = load_tools([\"google-jobs\"], llm=llm)\n", - "agent = create_react_agent(\n", - " model=llm,\n", - " tools=tools,\n", - " debug=True\n", - ")\n", + "agent = create_react_agent(model=llm, tools=tools, debug=True)\n", "agent.run(\"give me an entry level job posting related to physics\")" ] }, diff --git a/docs/docs/integrations/tools/google_serper.ipynb b/docs/docs/integrations/tools/google_serper.ipynb index a1b9c6f621bcc..4fe1b4c50018e 100644 --- a/docs/docs/integrations/tools/google_serper.ipynb +++ b/docs/docs/integrations/tools/google_serper.ipynb @@ -168,10 +168,10 @@ } ], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.utilities import GoogleSerperAPIWrapper\n", "from langchain_core.tools import Tool\n", "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "llm = OpenAI(temperature=0)\n", "search = GoogleSerperAPIWrapper()\n", diff --git a/docs/docs/integrations/tools/gradio_tools.ipynb b/docs/docs/integrations/tools/gradio_tools.ipynb index 574fd09551d0e..ab5fc59e51697 100644 --- a/docs/docs/integrations/tools/gradio_tools.ipynb +++ b/docs/docs/integrations/tools/gradio_tools.ipynb @@ -183,9 +183,9 @@ " StableDiffusionTool,\n", " TextToVideoTool,\n", ")\n", - "from langgraph.prebuilt import create_react_agent\n", "from langchain.memory import ConversationBufferMemory\n", "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "llm = OpenAI(temperature=0)\n", "memory = ConversationBufferMemory(memory_key=\"chat_history\")\n", diff --git a/docs/docs/integrations/tools/graphql.ipynb b/docs/docs/integrations/tools/graphql.ipynb index 981be95990847..de1b350d8059f 100644 --- a/docs/docs/integrations/tools/graphql.ipynb +++ b/docs/docs/integrations/tools/graphql.ipynb @@ -56,9 +56,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain.agents import load_tools\n", "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "llm = OpenAI(temperature=0)\n", "\n", diff --git a/docs/docs/integrations/tools/human_tools.ipynb b/docs/docs/integrations/tools/human_tools.ipynb index 14c09a35fb6a7..100ff68e189f0 100644 --- a/docs/docs/integrations/tools/human_tools.ipynb +++ b/docs/docs/integrations/tools/human_tools.ipynb @@ -27,9 +27,9 @@ }, "outputs": [], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain.agents import load_tools\n", "from langchain_openai import ChatOpenAI, OpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "llm = ChatOpenAI(temperature=0.0)\n", "math_llm = OpenAI(temperature=0.0)\n", @@ -38,11 +38,7 @@ " llm=math_llm,\n", ")\n", "\n", - "agent_chain = create_react_agent(\n", - " model=llm,\n", - " tools=tools,\n", - " debug=True\n", - ")" + "agent_chain = create_react_agent(model=llm, tools=tools, debug=True)" ] }, { diff --git a/docs/docs/integrations/tools/jira.ipynb b/docs/docs/integrations/tools/jira.ipynb index fd6b3febf7769..ee6f6599021aa 100644 --- a/docs/docs/integrations/tools/jira.ipynb +++ b/docs/docs/integrations/tools/jira.ipynb @@ -73,10 +73,10 @@ "source": [ "import os\n", "\n", - "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.agent_toolkits.jira.toolkit import JiraToolkit\n", "from langchain_community.utilities.jira import JiraAPIWrapper\n", - "from langchain_openai import OpenAI" + "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent" ] }, { diff --git a/docs/docs/integrations/tools/memorize.ipynb b/docs/docs/integrations/tools/memorize.ipynb index e7abdedde2e29..4fd51d658eb90 100644 --- a/docs/docs/integrations/tools/memorize.ipynb +++ b/docs/docs/integrations/tools/memorize.ipynb @@ -26,11 +26,11 @@ "source": [ "import os\n", "\n", - "from langgraph.prebuilt import create_react_agent\n", "from langchain.agents import load_tools\n", "from langchain.chains import LLMChain\n", "from langchain.memory import ConversationBufferMemory\n", - "from langchain_community.llms import GradientLLM" + "from langchain_community.llms import GradientLLM\n", + "from langgraph.prebuilt import create_react_agent" ] }, { @@ -121,12 +121,7 @@ "source": [ "memory = ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True)\n", "\n", - "agent = create_react_agent(\n", - " model=llm,\n", - " tools=tools,\n", - " debug=True,\n", - " state_modifier=memory\n", - ")" + "agent = create_react_agent(model=llm, tools=tools, debug=True, state_modifier=memory)" ] }, { diff --git a/docs/docs/integrations/tools/office365.ipynb b/docs/docs/integrations/tools/office365.ipynb index a5d1086963fac..26e1d257be979 100644 --- a/docs/docs/integrations/tools/office365.ipynb +++ b/docs/docs/integrations/tools/office365.ipynb @@ -100,8 +100,8 @@ }, "outputs": [], "source": [ - "from langgraph.prebuilt import create_react_agent\n", - "from langchain_openai import OpenAI" + "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent" ] }, { diff --git a/docs/docs/integrations/tools/openapi_nla.ipynb b/docs/docs/integrations/tools/openapi_nla.ipynb index 9bb89417d2a5d..0f39cc89df854 100644 --- a/docs/docs/integrations/tools/openapi_nla.ipynb +++ b/docs/docs/integrations/tools/openapi_nla.ipynb @@ -23,10 +23,10 @@ }, "outputs": [], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.agent_toolkits import NLAToolkit\n", "from langchain_community.utilities import Requests\n", - "from langchain_openai import OpenAI" + "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent" ] }, { @@ -127,7 +127,7 @@ " tools=natural_language_tools,\n", " state_modifier={\"format_instructions\": openapi_format_instructions},\n", " debug=True,\n", - ")\n" + ")" ] }, { diff --git a/docs/docs/integrations/tools/openweathermap.ipynb b/docs/docs/integrations/tools/openweathermap.ipynb index 0f7f6d8f83bec..fa9e574337944 100644 --- a/docs/docs/integrations/tools/openweathermap.ipynb +++ b/docs/docs/integrations/tools/openweathermap.ipynb @@ -84,8 +84,8 @@ "import os\n", "\n", "from langchain.agents import load_tools\n", - "from langgraph.prebuilt import create_react_agent\n", "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = \"\"\n", "os.environ[\"OPENWEATHERMAP_API_KEY\"] = \"\"\n", @@ -94,11 +94,7 @@ "\n", "tools = load_tools([\"openweathermap-api\"], llm)\n", "\n", - "agent_chain = create_react_agent(\n", - " model=llm,\n", - " tools=tools,\n", - " debug=True\n", - ")" + "agent_chain = create_react_agent(model=llm, tools=tools, debug=True)" ] }, { diff --git a/docs/docs/integrations/tools/playwright.ipynb b/docs/docs/integrations/tools/playwright.ipynb index f019754a17f91..2bf70dce640de 100644 --- a/docs/docs/integrations/tools/playwright.ipynb +++ b/docs/docs/integrations/tools/playwright.ipynb @@ -246,18 +246,14 @@ } ], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain_anthropic import ChatAnthropic\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "llm = ChatAnthropic(\n", " model_name=\"claude-3-haiku-20240307\", temperature=0\n", ") # or any other LLM, e.g., ChatOpenAI(), OpenAI()\n", "\n", - "agent_chain = create_react_agent(\n", - " model=llm,\n", - " tools=tools,\n", - " debug=True\n", - ")" + "agent_chain = create_react_agent(model=llm, tools=tools, debug=True)" ] }, { diff --git a/docs/docs/integrations/tools/sceneXplain.ipynb b/docs/docs/integrations/tools/sceneXplain.ipynb index 174f76d180ef4..a1bfd869abc0a 100644 --- a/docs/docs/integrations/tools/sceneXplain.ipynb +++ b/docs/docs/integrations/tools/sceneXplain.ipynb @@ -94,9 +94,9 @@ } ], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain.memory import ConversationBufferMemory\n", "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "llm = OpenAI(temperature=0)\n", "memory = ConversationBufferMemory(memory_key=\"chat_history\")\n", diff --git a/docs/docs/integrations/tools/searchapi.ipynb b/docs/docs/integrations/tools/searchapi.ipynb index 037ecf68edc25..6c714fcb1b11d 100644 --- a/docs/docs/integrations/tools/searchapi.ipynb +++ b/docs/docs/integrations/tools/searchapi.ipynb @@ -124,10 +124,10 @@ } ], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.utilities import SearchApiAPIWrapper\n", "from langchain_core.tools import Tool\n", "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "llm = OpenAI(temperature=0)\n", "search = SearchApiAPIWrapper()\n", @@ -139,11 +139,7 @@ " )\n", "]\n", "\n", - "self_ask_with_search = create_react_agent(\n", - " model=llm,\n", - " tools=tools,\n", - " debug=True\n", - ")\n", + "self_ask_with_search = create_react_agent(model=llm, tools=tools, debug=True)\n", "\n", "self_ask_with_search.run(\"Who lived longer: Plato, Socrates, or Aristotle?\")" ] diff --git a/docs/docs/integrations/tools/steam.ipynb b/docs/docs/integrations/tools/steam.ipynb index 5f4f626a22210..c01fb303600e7 100644 --- a/docs/docs/integrations/tools/steam.ipynb +++ b/docs/docs/integrations/tools/steam.ipynb @@ -74,10 +74,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.agent_toolkits.steam.toolkit import SteamToolkit\n", "from langchain_community.utilities.steam import SteamWebAPIWrapper\n", - "from langchain_openai import OpenAI" + "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent" ] }, { diff --git a/docs/docs/integrations/tools/yahoo_finance_news.ipynb b/docs/docs/integrations/tools/yahoo_finance_news.ipynb index f3befc161bb20..1ad1756485b21 100644 --- a/docs/docs/integrations/tools/yahoo_finance_news.ipynb +++ b/docs/docs/integrations/tools/yahoo_finance_news.ipynb @@ -52,9 +52,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.tools.yahoo_finance_news import YahooFinanceNewsTool\n", "from langchain_openai import ChatOpenAI\n", + "from langgraph.prebuilt import create_react_agent\n", "\n", "llm = ChatOpenAI(temperature=0.0)\n", "tools = [YahooFinanceNewsTool()]\n", diff --git a/docs/docs/integrations/tools/zapier.ipynb b/docs/docs/integrations/tools/zapier.ipynb index bc6b2665b740e..9e6cd43aa980c 100644 --- a/docs/docs/integrations/tools/zapier.ipynb +++ b/docs/docs/integrations/tools/zapier.ipynb @@ -60,10 +60,10 @@ }, "outputs": [], "source": [ - "from langgraph.prebuilt import create_react_agent\n", "from langchain_community.agent_toolkits import ZapierToolkit\n", "from langchain_community.utilities.zapier import ZapierNLAWrapper\n", - "from langchain_openai import OpenAI" + "from langchain_openai import OpenAI\n", + "from langgraph.prebuilt import create_react_agent" ] }, { @@ -91,11 +91,7 @@ "llm = OpenAI(temperature=0)\n", "zapier = ZapierNLAWrapper()\n", "toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier)\n", - "agent = create_react_agent(\n", - " model=llm,\n", - " tools=toolkit.get_tools(),\n", - " debug=True\n", - ")" + "agent = create_react_agent(model=llm, tools=toolkit.get_tools(), debug=True)" ] }, { @@ -343,11 +339,7 @@ "llm = OpenAI(temperature=0)\n", "zapier = ZapierNLAWrapper(zapier_nla_oauth_access_token=\"\")\n", "toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier)\n", - "agent = create_react_agent(\n", - " model=llm,\n", - " tools=toolkit.get_tools(),\n", - " debug=True\n", - ")\n", + "agent = create_react_agent(model=llm, tools=toolkit.get_tools(), debug=True)\n", "\n", "agent.run(\n", " \"Summarize the last email I received regarding Silicon Valley Bank. Send the summary to the #test-zapier channel in slack.\"\n",