Quickstart
Status: ACTIVE (pulled from docs.langchain.com) Source: https://docs.langchain.com/oss/python/langgraph/quickstart Timestamp: 2026-05-11
Build a calculator agent using either the Graph API or the Functional API.
Graph API
1. Define tools and model
from langchain.tools import tool
from langchain.chat_models import init_chat_model
model = init_chat_model("claude-sonnet-4-6", temperature=0)
@tool
def multiply(a: int, b: int) -> int:
"""Multiply `a` and `b`."""
return a * b
@tool
def add(a: int, b: int) -> int:
"""Adds `a` and `b`."""
return a + b
tools = [add, multiply]
tools_by_name = {tool.name: tool for tool in tools}
model_with_tools = model.bind_tools(tools)
2. Define state
from typing_extensions import TypedDict, Annotated
from langchain.messages import AnyMessage
import operator
class MessagesState(TypedDict):
messages: Annotated[list[AnyMessage], operator.add]
llm_calls: int
3. Define nodes and edges
from langchain.messages import SystemMessage, ToolMessage
from langgraph.graph import StateGraph, START, END
def llm_call(state: dict):
return {
"messages": [
model_with_tools.invoke(
[SystemMessage(content="You are a helpful assistant.")]
+ state["messages"]
)
],
"llm_calls": state.get('llm_calls', 0) + 1
}
def tool_node(state: dict):
result = []
for tool_call in state["messages"][-1].tool_calls:
tool = tools_by_name[tool_call["name"]]
observation = tool.invoke(tool_call["args"])
result.append(ToolMessage(content=observation, tool_call_id=tool_call["id"]))
return {"messages": result}
def should_continue(state: MessagesState):
if state["messages"][-1].tool_calls:
return "tool_node"
return END
4. Build and compile
agent_builder = StateGraph(MessagesState)
agent_builder.add_node("llm_call", llm_call)
agent_builder.add_node("tool_node", tool_node)
agent_builder.add_edge(START, "llm_call")
agent_builder.add_conditional_edges("llm_call", should_continue, ["tool_node", END])
agent_builder.add_edge("tool_node", "llm_call")
agent = agent_builder.compile()
# Invoke
from langchain.messages import HumanMessage
result = agent.invoke({"messages": [HumanMessage(content="Add 3 and 4.")]})
Functional API
from langgraph.func import entrypoint, task
from langgraph.graph import add_messages
from langchain_core.messages import BaseMessage
from langchain.messages import SystemMessage, HumanMessage
@task
def call_llm(messages: list[BaseMessage]):
return model_with_tools.invoke(
[SystemMessage(content="You are a helpful assistant.")] + messages
)
@task
def call_tool(tool_call):
tool = tools_by_name[tool_call["name"]]
return tool.invoke(tool_call)
@entrypoint()
def agent(messages: list[BaseMessage]):
model_response = call_llm(messages).result()
while True:
if not model_response.tool_calls:
break
futures = [call_tool(tc) for tc in model_response.tool_calls]
results = [f.result() for f in futures]
messages = add_messages(messages, [model_response, *results])
model_response = call_llm(messages).result()
messages = add_messages(messages, model_response)
return messages
messages = [HumanMessage(content="Add 3 and 4.")]
for chunk in agent.stream(messages, stream_mode="updates"):
print(chunk)
Next Steps
- Graph API overview for deep understanding of State, Nodes, Edges
- Functional API overview for the task/entrypoint model
- Workflows and Agents for common design patterns