From 7bc489f1f4924dad6bbe36c33f3053d4fc4070ad Mon Sep 17 00:00:00 2001 From: Nuno Campos Date: Tue, 16 Jul 2024 13:50:20 -0700 Subject: [PATCH] Add node state, customizable graph input and output schemas --- .../langgraph/langgraph/channels/any_value.py | 3 + libs/langgraph/langgraph/channels/binop.py | 6 + libs/langgraph/langgraph/channels/context.py | 7 + .../channels/dynamic_barrier_value.py | 3 + .../langgraph/channels/ephemeral_value.py | 3 + .../langgraph/channels/last_value.py | 3 + .../langgraph/channels/named_barrier_value.py | 3 + libs/langgraph/langgraph/channels/topic.py | 7 + libs/langgraph/langgraph/graph/graph.py | 5 +- libs/langgraph/langgraph/graph/state.py | 224 +++++++++++++++--- libs/langgraph/pyproject.toml | 2 +- .../tests/__snapshots__/test_pregel.ambr | 12 +- libs/langgraph/tests/test_pregel.py | 68 +++++- libs/langgraph/tests/test_pregel_async.py | 60 +++++ 14 files changed, 358 insertions(+), 48 deletions(-) diff --git a/libs/langgraph/langgraph/channels/any_value.py b/libs/langgraph/langgraph/channels/any_value.py index 7c2bd2a70..345daa7c7 100644 --- a/libs/langgraph/langgraph/channels/any_value.py +++ b/libs/langgraph/langgraph/channels/any_value.py @@ -15,6 +15,9 @@ class AnyValue(Generic[Value], BaseChannel[Value, Value, Value]): def __init__(self, typ: Type[Value]) -> None: self.typ = typ + def __eq__(self, value: object) -> bool: + return isinstance(value, AnyValue) + @property def ValueType(self) -> Type[Value]: """The type of the value stored in the channel.""" diff --git a/libs/langgraph/langgraph/channels/binop.py b/libs/langgraph/langgraph/channels/binop.py index 685713eeb..e7969a100 100644 --- a/libs/langgraph/langgraph/channels/binop.py +++ b/libs/langgraph/langgraph/channels/binop.py @@ -55,6 +55,12 @@ def __init__(self, typ: Type[Value], operator: Callable[[Value, Value], Value]): except Exception: pass + def __eq__(self, value: object) -> bool: + return ( + isinstance(value, BinaryOperatorAggregate) + and value.operator == self.operator + ) + @property def ValueType(self) -> Type[Value]: """The type of the value stored in the channel.""" diff --git a/libs/langgraph/langgraph/channels/context.py b/libs/langgraph/langgraph/channels/context.py index 6d168460a..914de9348 100644 --- a/libs/langgraph/langgraph/channels/context.py +++ b/libs/langgraph/langgraph/channels/context.py @@ -47,6 +47,13 @@ def __init__( self.ctx = ctx self.actx = actx + def __eq__(self, value: object) -> bool: + return ( + isinstance(value, Context) + and value.ctx == self.ctx + and value.actx == self.actx + ) + @property def ValueType(self) -> Any: """The type of the value stored in the channel.""" diff --git a/libs/langgraph/langgraph/channels/dynamic_barrier_value.py b/libs/langgraph/langgraph/channels/dynamic_barrier_value.py index c27c8ed85..bb0d447fa 100644 --- a/libs/langgraph/langgraph/channels/dynamic_barrier_value.py +++ b/libs/langgraph/langgraph/channels/dynamic_barrier_value.py @@ -32,6 +32,9 @@ def __init__(self, typ: Type[Value]) -> None: self.names = None self.seen = set() + def __eq__(self, value: object) -> bool: + return isinstance(value, DynamicBarrierValue) and value.names == self.names + @property def ValueType(self) -> Type[Value]: """The type of the value stored in the channel.""" diff --git a/libs/langgraph/langgraph/channels/ephemeral_value.py b/libs/langgraph/langgraph/channels/ephemeral_value.py index 6c6f49dd7..15e11550d 100644 --- a/libs/langgraph/langgraph/channels/ephemeral_value.py +++ b/libs/langgraph/langgraph/channels/ephemeral_value.py @@ -15,6 +15,9 @@ def __init__(self, typ: Type[Value], guard: bool = True) -> None: self.typ = typ self.guard = guard + def __eq__(self, value: object) -> bool: + return isinstance(value, EphemeralValue) and value.guard == self.guard + @property def ValueType(self) -> Type[Value]: """The type of the value stored in the channel.""" diff --git a/libs/langgraph/langgraph/channels/last_value.py b/libs/langgraph/langgraph/channels/last_value.py index d03e3eb13..a207ebce3 100644 --- a/libs/langgraph/langgraph/channels/last_value.py +++ b/libs/langgraph/langgraph/channels/last_value.py @@ -14,6 +14,9 @@ class LastValue(Generic[Value], BaseChannel[Value, Value, Value]): def __init__(self, typ: Type[Value]) -> None: self.typ = typ + def __eq__(self, value: object) -> bool: + return isinstance(value, LastValue) + @property def ValueType(self) -> Type[Value]: """The type of the value stored in the channel.""" diff --git a/libs/langgraph/langgraph/channels/named_barrier_value.py b/libs/langgraph/langgraph/channels/named_barrier_value.py index 99532bbe8..bdfd4660b 100644 --- a/libs/langgraph/langgraph/channels/named_barrier_value.py +++ b/libs/langgraph/langgraph/channels/named_barrier_value.py @@ -16,6 +16,9 @@ def __init__(self, typ: Type[Value], names: set[Value]) -> None: self.names = names self.seen = set() + def __eq__(self, value: object) -> bool: + return isinstance(value, NamedBarrierValue) and value.names == self.names + @property def ValueType(self) -> Type[Value]: """The type of the value stored in the channel.""" diff --git a/libs/langgraph/langgraph/channels/topic.py b/libs/langgraph/langgraph/channels/topic.py index 0268043c9..5e7af6a26 100644 --- a/libs/langgraph/langgraph/channels/topic.py +++ b/libs/langgraph/langgraph/channels/topic.py @@ -41,6 +41,13 @@ def __init__( self.seen = set[Value]() self.values = list[Value]() + def __eq__(self, value: object) -> bool: + return ( + isinstance(value, Topic) + and value.unique == self.unique + and value.accumulate == self.accumulate + ) + @property def ValueType(self) -> Any: """The type of the value stored in the channel.""" diff --git a/libs/langgraph/langgraph/graph/graph.py b/libs/langgraph/langgraph/graph/graph.py index 24c6396a9..4841642f5 100644 --- a/libs/langgraph/langgraph/graph/graph.py +++ b/libs/langgraph/langgraph/graph/graph.py @@ -480,7 +480,8 @@ def add_edge( start_nodes[start], end_nodes[end], label, conditional ) - for key, (node, metadata) in self.builder.nodes.items(): + for key, n in self.builder.nodes.items(): + node = n.runnable if xray: subgraph = ( node.get_graph( @@ -501,7 +502,7 @@ def add_edge( start_nodes[key] = n end_nodes[key] = n else: - n = graph.add_node(node, key, metadata=metadata) + n = graph.add_node(node, key, metadata=n.metadata) start_nodes[key] = n end_nodes[key] = n for start, end in sorted(self.builder._all_edges): diff --git a/libs/langgraph/langgraph/graph/state.py b/libs/langgraph/langgraph/graph/state.py index e8178bf28..518c73670 100644 --- a/libs/langgraph/langgraph/graph/state.py +++ b/libs/langgraph/langgraph/graph/state.py @@ -2,9 +2,10 @@ import typing import warnings from functools import partial -from inspect import signature +from inspect import isclass, isfunction, signature from typing import ( Any, + NamedTuple, Optional, Sequence, Type, @@ -17,6 +18,9 @@ from langchain_core.pydantic_v1 import BaseModel from langchain_core.runnables import Runnable, RunnableConfig from langchain_core.runnables.base import RunnableLike +from langchain_core.runnables.utils import ( + create_model, +) from langgraph.channels.base import BaseChannel from langgraph.channels.binop import BinaryOperatorAggregate @@ -34,14 +38,13 @@ Branch, CompiledGraph, Graph, - NodeSpec, Send, ) from langgraph.managed.base import ManagedValue, is_managed_value from langgraph.pregel.read import ChannelRead, PregelNode from langgraph.pregel.types import All from langgraph.pregel.write import SKIP_WRITE, ChannelWrite, ChannelWriteEntry -from langgraph.utils import RunnableCallable +from langgraph.utils import RunnableCallable, coerce_to_runnable logger = logging.getLogger(__name__) @@ -58,6 +61,13 @@ def _warn_invalid_state_schema(schema: Union[Type[Any], Any]) -> None: ) +class StateNodeSpec(NamedTuple): + runnable: Runnable + metadata: dict[str, Any] + input: Type[Any] + output: Type[Any] + + class StateGraph(Graph): """A graph whose nodes communicate by reading and writing to a shared state. The signature of each node is State -> Partial. @@ -109,16 +119,38 @@ class StateGraph(Graph): >>> print(step1) {'x': [0.5, 0.75]}""" + nodes: dict[str, StateNodeSpec] + channels: dict[str, BaseChannel] + managed: dict[str, Type[ManagedValue]] + schemas: dict[Type[Any], dict[str, Union[BaseChannel, Type[ManagedValue]]]] + def __init__( - self, state_schema: Type[Any], config_schema: Optional[Type[Any]] = None + self, + state_schema: Optional[Type[Any]] = None, + config_schema: Optional[Type[Any]] = None, + *, + input: Optional[Type[Any]] = None, + output: Optional[Type[Any]] = None, ) -> None: super().__init__() - _warn_invalid_state_schema(state_schema) + if state_schema is None: + if input is None or output is None: + raise ValueError("Must provide state_schema or input and output") + else: + if input is None: + input = state_schema + if output is None: + output = state_schema + self.schemas = {} + self.channels = {} + self.managed = {} self.schema = state_schema + self.input = input + self.output = output + self._add_schema(state_schema) + self._add_schema(input) + self._add_schema(output) self.config_schema = config_schema - self.channels, self.managed = _get_channels(state_schema) - if any(isinstance(c, BinaryOperatorAggregate) for c in self.channels.values()): - self.support_multiple_edges = True self.waiting_edges: set[tuple[tuple[str, ...], str]] = set() @property @@ -127,8 +159,42 @@ def _all_edges(self) -> set[tuple[str, str]]: (start, end) for starts, end in self.waiting_edges for start in starts } + def _add_schema(self, schema: Type[Any]) -> None: + if schema not in self.schemas: + _warn_invalid_state_schema(schema) + channels, managed = _get_channels(schema) + self.schemas[schema] = {**channels, **managed} + for key, channel in channels.items(): + if key in self.channels: + if self.channels[key] != channel: + print(self.channels[key], channel) + raise ValueError( + f"Channel '{key}' already exists with a different type" + ) + else: + self.channels[key] = channel + for key, managed in managed.items(): + if key in self.managed: + if self.managed[key] != managed: + raise ValueError( + f"Managed value '{key}' already exists with a different type" + ) + else: + self.managed[key] = managed + if any( + isinstance(c, BinaryOperatorAggregate) for c in self.channels.values() + ): + self.support_multiple_edges = True + @overload - def add_node(self, node: RunnableLike) -> None: + def add_node( + self, + node: RunnableLike, + *, + metadata: Optional[dict[str, Any]] = None, + input: Optional[Type[Any]] = None, + output: Optional[Type[Any]] = None, + ) -> None: """Adds a new node to the state graph. Will take the name of the function/runnable as the node name. @@ -144,7 +210,15 @@ def add_node(self, node: RunnableLike) -> None: ... @overload - def add_node(self, node: str, action: RunnableLike) -> None: + def add_node( + self, + node: str, + action: RunnableLike, + *, + metadata: Optional[dict[str, Any]] = None, + input: Optional[Type[Any]] = None, + output: Optional[Type[Any]] = None, + ) -> None: """Adds a new node to the state graph. Args: @@ -160,7 +234,13 @@ def add_node(self, node: str, action: RunnableLike) -> None: ... def add_node( - self, node: Union[str, RunnableLike], action: Optional[RunnableLike] = None + self, + node: Union[str, RunnableLike], + action: Optional[RunnableLike] = None, + *, + metadata: Optional[dict[str, Any]] = None, + input: Optional[Type[Any]] = None, + output: Optional[Type[Any]] = None, ) -> None: """Adds a new node to the state graph. @@ -213,7 +293,42 @@ def add_node( ) if node in self.channels: raise ValueError(f"'{node}' is already being used as a state key") - return super().add_node(node, action) + if self.compiled: + logger.warning( + "Adding a node to a graph that has already been compiled. This will " + "not be reflected in the compiled graph." + ) + if not isinstance(node, str): + action = node + node = getattr(action, "name", action.__name__) + if node in self.nodes: + raise ValueError(f"Node `{node}` already present.") + if node == END or node == START: + raise ValueError(f"Node `{node}` is reserved.") + try: + if isfunction(action) and ( + hints := get_type_hints(action.__call__) or get_type_hints(action) + ): + if input is None: + input_hint = hints[list(hints.keys())[0]] + if isinstance(input_hint, type) and get_type_hints(input_hint): + input = input_hint + if output is None: + output_hint = hints.get("return", Any) + if isinstance(output_hint, type) and get_type_hints(output_hint): + output = output_hint + except TypeError: + pass + if input is not None: + self._add_schema(input) + if output is not None: + self._add_schema(output) + self.nodes[node] = StateNodeSpec( + coerce_to_runnable(action, name=node, trace=False), + metadata, + input=input or self.schema, + output=output or self.schema, + ) def add_edge(self, start_key: Union[str, list[str]], end_key: str) -> None: """Adds a directed edge from the start node to the end node. @@ -287,15 +402,14 @@ def compile( ) # prepare output channels - state_keys = list(self.channels) output_channels = ( - state_keys[0] - if state_keys == ["__root__"] + "__root__" + if len(self.schemas[self.output]) == 1 + and "__root__" in self.schemas[self.output] else [ key - for key in state_keys - if not isinstance(self.channels[key], Context) - and not is_managed_value(self.channels[key]) + for key, val in self.schemas[self.output].items() + if not isinstance(val, Context) and not is_managed_value(val) ] ) @@ -303,7 +417,7 @@ def compile( builder=self, config_type=self.config_schema, nodes={}, - channels={**self.channels, START: EphemeralValue(self.schema)}, + channels={**self.channels, START: EphemeralValue(self.input)}, input_channels=START, stream_mode="updates", output_channels=output_channels, @@ -338,10 +452,52 @@ class CompiledStateGraph(CompiledGraph): def get_input_schema( self, config: Optional[RunnableConfig] = None ) -> type[BaseModel]: - return self.get_output_schema(config) + if isclass(self.builder.input) and issubclass(self.builder.input, BaseModel): + return self.builder.input + else: + keys = list(self.builder.schemas[self.builder.input].keys()) + if len(keys) == 1 and keys[0] == "__root__": + return create_model( # type: ignore[call-overload] + self.get_name("Input"), + __root__=(self.channels[keys[0]].UpdateType, None), + ) + else: + return create_model( # type: ignore[call-overload] + self.get_name("Input"), + **{ + k: (self.channels[k].UpdateType, None) + for k in self.builder.schemas[self.builder.input] + if k in self.channels + and not isinstance(self.channels[k], Context) + }, + ) - def attach_node(self, key: str, node: Optional[NodeSpec]) -> None: - state_keys = list(self.builder.channels) + def get_output_schema( + self, config: Optional[RunnableConfig] = None + ) -> type[BaseModel]: + if isclass(self.builder.input) and issubclass(self.builder.output, BaseModel): + return self.builder.output + + return super().get_output_schema(config) + + def attach_node(self, key: str, node: Optional[StateNodeSpec]) -> None: + if key == START: + input_schema = self.builder.input + else: + input_schema = node.input if node else self.builder.schema + input_values = { + k: v if is_managed_value(v) else k + for k, v in self.builder.schemas[input_schema].items() + } + is_single_input = len(input_values) == 1 and "__root__" in input_values + + output_keys = [ + k + for k, v in self.builder.schemas[ + node.output if node else self.builder.schema + ].items() + if not is_managed_value(v) + ] def _get_state_key(input: dict, config: RunnableConfig, *, key: str) -> Any: if input is None: @@ -355,9 +511,9 @@ def _get_state_key(input: dict, config: RunnableConfig, *, key: str) -> Any: raise InvalidUpdateError(f"Expected dict, got {input}") # state updaters - state_write_entries = ( + write_entries = ( [ChannelWriteEntry("__root__", skip_none=True)] - if state_keys == ["__root__"] + if output_keys == ["__root__"] else [ ChannelWriteEntry( key, @@ -365,7 +521,7 @@ def _get_state_key(input: dict, config: RunnableConfig, *, key: str) -> Any: _get_state_key, key=key, trace=False, recurse=False ), ) - for key in state_keys + for key in output_keys ] ) @@ -377,9 +533,9 @@ def _get_state_key(input: dict, config: RunnableConfig, *, key: str) -> Any: channels=[START], writers=[ ChannelWrite( - state_write_entries, + write_entries, tags=[TAG_HIDDEN], - require_at_least_one_of=state_keys, + require_at_least_one_of=output_keys, ), ], ) @@ -388,23 +544,17 @@ def _get_state_key(input: dict, config: RunnableConfig, *, key: str) -> Any: self.nodes[key] = PregelNode( triggers=[], # read state keys and managed values - channels=( - state_keys - if state_keys == ["__root__"] - else ({chan: chan for chan in state_keys} | self.builder.managed) - ), + channels=(list(input_values) if is_single_input else input_values), # coerce state dict to schema class (eg. pydantic model) mapper=( - None - if state_keys == ["__root__"] - else partial(_coerce_state, self.builder.schema) + None if is_single_input else partial(_coerce_state, input_schema) ), writers=[ # publish to this channel and state keys ChannelWrite( - [ChannelWriteEntry(key, key)] + state_write_entries, + [ChannelWriteEntry(key, key)] + write_entries, tags=[TAG_HIDDEN], - require_at_least_one_of=state_keys, + require_at_least_one_of=output_keys, ), ], metadata=node.metadata, diff --git a/libs/langgraph/pyproject.toml b/libs/langgraph/pyproject.toml index 1d0732ca5..9010c23d3 100644 --- a/libs/langgraph/pyproject.toml +++ b/libs/langgraph/pyproject.toml @@ -61,7 +61,7 @@ omit = ["tests/*"] [tool.pytest-watcher] now = true delay = 0.1 -runner_args = ["--ff", "-vv", "--snapshot-update"] +runner_args = ["-x", "--ff", "-vv", "--snapshot-update"] patterns = ["*.py"] [build-system] diff --git a/libs/langgraph/tests/__snapshots__/test_pregel.ambr b/libs/langgraph/tests/__snapshots__/test_pregel.ambr index 28b27a82c..bbfb425eb 100644 --- a/libs/langgraph/tests/__snapshots__/test_pregel.ambr +++ b/libs/langgraph/tests/__snapshots__/test_pregel.ambr @@ -115,7 +115,7 @@ ''' # --- # name: test_conditional_entrypoint_graph_state - '{"title": "LangGraphOutput", "type": "object", "properties": {"input": {"title": "Input", "type": "string"}, "output": {"title": "Output", "type": "string"}, "steps": {"title": "Steps", "type": "array", "items": {"type": "string"}}}}' + '{"title": "LangGraphInput", "type": "object", "properties": {"input": {"title": "Input", "type": "string"}, "output": {"title": "Output", "type": "string"}, "steps": {"title": "Steps", "type": "array", "items": {"type": "string"}}}}' # --- # name: test_conditional_entrypoint_graph_state.1 '{"title": "LangGraphOutput", "type": "object", "properties": {"input": {"title": "Input", "type": "string"}, "output": {"title": "Output", "type": "string"}, "steps": {"title": "Steps", "type": "array", "items": {"type": "string"}}}}' @@ -196,7 +196,7 @@ ''' # --- # name: test_conditional_entrypoint_to_multiple_state_graph - '{"title": "LangGraphOutput", "type": "object", "properties": {"locations": {"title": "Locations", "type": "array", "items": {"type": "string"}}, "results": {"title": "Results", "type": "array", "items": {"type": "string"}}}}' + '{"title": "LangGraphInput", "type": "object", "properties": {"locations": {"title": "Locations", "type": "array", "items": {"type": "string"}}, "results": {"title": "Results", "type": "array", "items": {"type": "string"}}}}' # --- # name: test_conditional_entrypoint_to_multiple_state_graph.1 '{"title": "LangGraphOutput", "type": "object", "properties": {"locations": {"title": "Locations", "type": "array", "items": {"type": "string"}}, "results": {"title": "Results", "type": "array", "items": {"type": "string"}}}}' @@ -509,7 +509,7 @@ ''' # --- # name: test_conditional_state_graph - '{"title": "LangGraphOutput", "type": "object", "properties": {"input": {"title": "Input", "type": "string"}, "agent_outcome": {"title": "Agent Outcome", "anyOf": [{"$ref": "#/definitions/AgentAction"}, {"$ref": "#/definitions/AgentFinish"}]}, "intermediate_steps": {"title": "Intermediate Steps", "type": "array", "items": {"type": "array", "minItems": 2, "maxItems": 2, "items": [{"$ref": "#/definitions/AgentAction"}, {"type": "string"}]}}}, "definitions": {"AgentAction": {"title": "AgentAction", "description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "type": "object", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"title": "Tool Input", "anyOf": [{"type": "string"}, {"type": "object"}]}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentAction", "enum": ["AgentAction"], "type": "string"}}, "required": ["tool", "tool_input", "log"]}, "AgentFinish": {"title": "AgentFinish", "description": "The final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "type": "object", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentFinish", "enum": ["AgentFinish"], "type": "string"}}, "required": ["return_values", "log"]}}}' + '{"title": "LangGraphInput", "type": "object", "properties": {"input": {"title": "Input", "type": "string"}, "agent_outcome": {"title": "Agent Outcome", "anyOf": [{"$ref": "#/definitions/AgentAction"}, {"$ref": "#/definitions/AgentFinish"}]}, "intermediate_steps": {"title": "Intermediate Steps", "type": "array", "items": {"type": "array", "minItems": 2, "maxItems": 2, "items": [{"$ref": "#/definitions/AgentAction"}, {"type": "string"}]}}}, "definitions": {"AgentAction": {"title": "AgentAction", "description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "type": "object", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"title": "Tool Input", "anyOf": [{"type": "string"}, {"type": "object"}]}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentAction", "enum": ["AgentAction"], "type": "string"}}, "required": ["tool", "tool_input", "log"]}, "AgentFinish": {"title": "AgentFinish", "description": "The final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "type": "object", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentFinish", "enum": ["AgentFinish"], "type": "string"}}, "required": ["return_values", "log"]}}}' # --- # name: test_conditional_state_graph.1 '{"title": "LangGraphOutput", "type": "object", "properties": {"input": {"title": "Input", "type": "string"}, "agent_outcome": {"title": "Agent Outcome", "anyOf": [{"$ref": "#/definitions/AgentAction"}, {"$ref": "#/definitions/AgentFinish"}]}, "intermediate_steps": {"title": "Intermediate Steps", "type": "array", "items": {"type": "array", "minItems": 2, "maxItems": 2, "items": [{"$ref": "#/definitions/AgentAction"}, {"type": "string"}]}}}, "definitions": {"AgentAction": {"title": "AgentAction", "description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "type": "object", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"title": "Tool Input", "anyOf": [{"type": "string"}, {"type": "object"}]}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentAction", "enum": ["AgentAction"], "type": "string"}}, "required": ["tool", "tool_input", "log"]}, "AgentFinish": {"title": "AgentFinish", "description": "The final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "type": "object", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentFinish", "enum": ["AgentFinish"], "type": "string"}}, "required": ["return_values", "log"]}}}' @@ -629,7 +629,7 @@ ''' # --- # name: test_message_graph - '{"title": "LangGraphOutput", "type": "array", "items": {"anyOf": [{"$ref": "#/definitions/AIMessage"}, {"$ref": "#/definitions/HumanMessage"}, {"$ref": "#/definitions/ChatMessage"}, {"$ref": "#/definitions/SystemMessage"}, {"$ref": "#/definitions/FunctionMessage"}, {"$ref": "#/definitions/ToolMessage"}]}, "definitions": {"ToolCall": {"title": "ToolCall", "type": "object", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"title": "Id", "type": "string"}, "type": {"title": "Type", "enum": ["tool_call"], "type": "string"}}, "required": ["name", "args", "id"]}, "InvalidToolCall": {"title": "InvalidToolCall", "type": "object", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "string"}, "id": {"title": "Id", "type": "string"}, "error": {"title": "Error", "type": "string"}, "type": {"title": "Type", "enum": ["invalid_tool_call"], "type": "string"}}, "required": ["name", "args", "id", "error"]}, "UsageMetadata": {"title": "UsageMetadata", "type": "object", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}}, "required": ["input_tokens", "output_tokens", "total_tokens"]}, "AIMessage": {"title": "AIMessage", "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "ai", "enum": ["ai"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}, "example": {"title": "Example", "default": false, "type": "boolean"}, "tool_calls": {"title": "Tool Calls", "default": [], "type": "array", "items": {"$ref": "#/definitions/ToolCall"}}, "invalid_tool_calls": {"title": "Invalid Tool Calls", "default": [], "type": "array", "items": {"$ref": "#/definitions/InvalidToolCall"}}, "usage_metadata": {"$ref": "#/definitions/UsageMetadata"}}, "required": ["content"]}, "HumanMessage": {"title": "HumanMessage", "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "human", "enum": ["human"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}, "example": {"title": "Example", "default": false, "type": "boolean"}}, "required": ["content"]}, "ChatMessage": {"title": "ChatMessage", "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "chat", "enum": ["chat"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"]}, "SystemMessage": {"title": "SystemMessage", "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "system", "enum": ["system"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}}, "required": ["content"]}, "FunctionMessage": {"title": "FunctionMessage", "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "function", "enum": ["function"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}}, "required": ["content", "name"]}, "ToolMessage": {"title": "ToolMessage", "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "tool", "enum": ["tool"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"title": "Artifact"}}, "required": ["content", "tool_call_id"]}}}' + '{"title": "LangGraphInput", "type": "array", "items": {"anyOf": [{"$ref": "#/definitions/AIMessage"}, {"$ref": "#/definitions/HumanMessage"}, {"$ref": "#/definitions/ChatMessage"}, {"$ref": "#/definitions/SystemMessage"}, {"$ref": "#/definitions/FunctionMessage"}, {"$ref": "#/definitions/ToolMessage"}]}, "definitions": {"ToolCall": {"title": "ToolCall", "type": "object", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"title": "Id", "type": "string"}, "type": {"title": "Type", "enum": ["tool_call"], "type": "string"}}, "required": ["name", "args", "id"]}, "InvalidToolCall": {"title": "InvalidToolCall", "type": "object", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "string"}, "id": {"title": "Id", "type": "string"}, "error": {"title": "Error", "type": "string"}, "type": {"title": "Type", "enum": ["invalid_tool_call"], "type": "string"}}, "required": ["name", "args", "id", "error"]}, "UsageMetadata": {"title": "UsageMetadata", "type": "object", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}}, "required": ["input_tokens", "output_tokens", "total_tokens"]}, "AIMessage": {"title": "AIMessage", "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "ai", "enum": ["ai"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}, "example": {"title": "Example", "default": false, "type": "boolean"}, "tool_calls": {"title": "Tool Calls", "default": [], "type": "array", "items": {"$ref": "#/definitions/ToolCall"}}, "invalid_tool_calls": {"title": "Invalid Tool Calls", "default": [], "type": "array", "items": {"$ref": "#/definitions/InvalidToolCall"}}, "usage_metadata": {"$ref": "#/definitions/UsageMetadata"}}, "required": ["content"]}, "HumanMessage": {"title": "HumanMessage", "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "human", "enum": ["human"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}, "example": {"title": "Example", "default": false, "type": "boolean"}}, "required": ["content"]}, "ChatMessage": {"title": "ChatMessage", "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "chat", "enum": ["chat"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"]}, "SystemMessage": {"title": "SystemMessage", "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "system", "enum": ["system"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}}, "required": ["content"]}, "FunctionMessage": {"title": "FunctionMessage", "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "function", "enum": ["function"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}}, "required": ["content", "name"]}, "ToolMessage": {"title": "ToolMessage", "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "tool", "enum": ["tool"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"title": "Artifact"}}, "required": ["content", "tool_call_id"]}}}' # --- # name: test_message_graph.1 '{"title": "LangGraphOutput", "type": "array", "items": {"anyOf": [{"$ref": "#/definitions/AIMessage"}, {"$ref": "#/definitions/HumanMessage"}, {"$ref": "#/definitions/ChatMessage"}, {"$ref": "#/definitions/SystemMessage"}, {"$ref": "#/definitions/FunctionMessage"}, {"$ref": "#/definitions/ToolMessage"}]}, "definitions": {"ToolCall": {"title": "ToolCall", "type": "object", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"title": "Id", "type": "string"}, "type": {"title": "Type", "enum": ["tool_call"], "type": "string"}}, "required": ["name", "args", "id"]}, "InvalidToolCall": {"title": "InvalidToolCall", "type": "object", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "string"}, "id": {"title": "Id", "type": "string"}, "error": {"title": "Error", "type": "string"}, "type": {"title": "Type", "enum": ["invalid_tool_call"], "type": "string"}}, "required": ["name", "args", "id", "error"]}, "UsageMetadata": {"title": "UsageMetadata", "type": "object", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}}, "required": ["input_tokens", "output_tokens", "total_tokens"]}, "AIMessage": {"title": "AIMessage", "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "ai", "enum": ["ai"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}, "example": {"title": "Example", "default": false, "type": "boolean"}, "tool_calls": {"title": "Tool Calls", "default": [], "type": "array", "items": {"$ref": "#/definitions/ToolCall"}}, "invalid_tool_calls": {"title": "Invalid Tool Calls", "default": [], "type": "array", "items": {"$ref": "#/definitions/InvalidToolCall"}}, "usage_metadata": {"$ref": "#/definitions/UsageMetadata"}}, "required": ["content"]}, "HumanMessage": {"title": "HumanMessage", "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "human", "enum": ["human"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}, "example": {"title": "Example", "default": false, "type": "boolean"}}, "required": ["content"]}, "ChatMessage": {"title": "ChatMessage", "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "chat", "enum": ["chat"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"]}, "SystemMessage": {"title": "SystemMessage", "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "system", "enum": ["system"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}}, "required": ["content"]}, "FunctionMessage": {"title": "FunctionMessage", "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "function", "enum": ["function"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}}, "required": ["content", "name"]}, "ToolMessage": {"title": "ToolMessage", "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "tool", "enum": ["tool"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"title": "Artifact"}}, "required": ["content", "tool_call_id"]}}}' @@ -887,7 +887,7 @@ ''' # --- # name: test_prebuilt_chat - '{"title": "LangGraphOutput", "type": "object", "properties": {"messages": {"title": "Messages", "type": "array", "items": {"$ref": "#/definitions/BaseMessage"}}}, "definitions": {"BaseMessage": {"title": "BaseMessage", "description": "Base abstract message class.\\n\\nMessages are the inputs and outputs of ChatModels.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}}, "required": ["content", "type"]}}}' + '{"title": "LangGraphInput", "type": "object", "properties": {"messages": {"title": "Messages", "type": "array", "items": {"$ref": "#/definitions/BaseMessage"}}}, "definitions": {"BaseMessage": {"title": "BaseMessage", "description": "Base abstract message class.\\n\\nMessages are the inputs and outputs of ChatModels.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}}, "required": ["content", "type"]}}}' # --- # name: test_prebuilt_chat.1 '{"title": "LangGraphOutput", "type": "object", "properties": {"messages": {"title": "Messages", "type": "array", "items": {"$ref": "#/definitions/BaseMessage"}}}, "definitions": {"BaseMessage": {"title": "BaseMessage", "description": "Base abstract message class.\\n\\nMessages are the inputs and outputs of ChatModels.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}}, "required": ["content", "type"]}}}' @@ -969,7 +969,7 @@ ''' # --- # name: test_prebuilt_tool_chat - '{"title": "LangGraphOutput", "type": "object", "properties": {"messages": {"title": "Messages", "type": "array", "items": {"$ref": "#/definitions/BaseMessage"}}}, "definitions": {"BaseMessage": {"title": "BaseMessage", "description": "Base abstract message class.\\n\\nMessages are the inputs and outputs of ChatModels.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}}, "required": ["content", "type"]}}}' + '{"title": "LangGraphInput", "type": "object", "properties": {"messages": {"title": "Messages", "type": "array", "items": {"$ref": "#/definitions/BaseMessage"}}}, "definitions": {"BaseMessage": {"title": "BaseMessage", "description": "Base abstract message class.\\n\\nMessages are the inputs and outputs of ChatModels.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}}, "required": ["content", "type"]}}}' # --- # name: test_prebuilt_tool_chat.1 '{"title": "LangGraphOutput", "type": "object", "properties": {"messages": {"title": "Messages", "type": "array", "items": {"$ref": "#/definitions/BaseMessage"}}}, "definitions": {"BaseMessage": {"title": "BaseMessage", "description": "Base abstract message class.\\n\\nMessages are the inputs and outputs of ChatModels.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}}, "required": ["content", "type"]}}}' diff --git a/libs/langgraph/tests/test_pregel.py b/libs/langgraph/tests/test_pregel.py index 66d1b26fb..7fdd55913 100644 --- a/libs/langgraph/tests/test_pregel.py +++ b/libs/langgraph/tests/test_pregel.py @@ -232,6 +232,64 @@ def logic(inp: str) -> str: graph.invoke("", {"configurable": {"thread_id": "thread-1"}}) +def test_node_schemas() -> None: + from langchain_core.messages import HumanMessage + + class State(TypedDict): + hello: str + bye: str + messages: Annotated[list[str], add_messages] + + class StateForA(TypedDict): + hello: str + messages: Annotated[list[str], add_messages] + + def node_a(state: StateForA) -> State: + assert state == { + "hello": "there", + "messages": [HumanMessage(content="hello", id=AnyStr())], + } + + class StateForB(TypedDict): + bye: str + now: int + + def node_b(state: StateForB) -> StateForB: + assert state == { + "bye": "world", + "now": None, + } + return { + "now": 123, + "hello": "again", # ignored because not in output schema + } + + class StateForC(TypedDict): + hello: str + now: int + + def node_c(state: StateForC) -> StateForC: + assert state == { + "hello": "there", + "now": 123, + } + + builder = StateGraph(State) + builder.add_node("a", node_a) + builder.add_node("b", node_b) + builder.add_node("c", node_c) + builder.add_edge(START, "a") + builder.add_edge("a", "b") + builder.add_edge("b", "c") + graph = builder.compile() + + assert graph.invoke({"hello": "there", "bye": "world", "messages": "hello"}) == { + "hello": "there", + "bye": "world", + "messages": [HumanMessage(content="hello", id=AnyStr())], + } + + def test_reducer_before_first_node() -> None: from langchain_core.messages import HumanMessage @@ -2241,6 +2299,10 @@ class AgentState(TypedDict, total=False): intermediate_steps: Annotated[list[tuple[AgentAction, str]], operator.add] session: Annotated[httpx.Client, Context(httpx.Client)] + class ToolState(TypedDict, total=False): + agent_outcome: Union[AgentAction, AgentFinish] + session: Annotated[httpx.Client, Context(httpx.Client)] + # Assemble the tools @tool() def search_api(query: str) -> str: @@ -2279,9 +2341,11 @@ def agent_parser(input: str) -> dict[str, Union[AgentAction, AgentFinish]]: agent = prompt | llm | agent_parser # Define tool execution logic - def execute_tools(data: AgentState) -> dict: + def execute_tools(data: ToolState) -> dict: # check session in data assert isinstance(data["session"], httpx.Client) + assert "input" not in data + assert "intermediate_steps" not in data # execute the tool agent_action: AgentAction = data.pop("agent_outcome") observation = {t.name: t for t in tools}[agent_action.tool].invoke( @@ -2303,7 +2367,7 @@ def should_continue(data: AgentState) -> str: workflow = StateGraph(AgentState) workflow.add_node("agent", agent) - workflow.add_node("tools", execute_tools) + workflow.add_node("tools", execute_tools, input=ToolState) workflow.set_entry_point("agent") diff --git a/libs/langgraph/tests/test_pregel_async.py b/libs/langgraph/tests/test_pregel_async.py index 41f99d53f..47a9c0b4f 100644 --- a/libs/langgraph/tests/test_pregel_async.py +++ b/libs/langgraph/tests/test_pregel_async.py @@ -389,6 +389,66 @@ async def alittlewhile(input: State) -> None: await checkpointer.__aexit__(None, None, None) +async def test_node_schemas() -> None: + from langchain_core.messages import HumanMessage + + class State(TypedDict): + hello: str + bye: str + messages: Annotated[list[str], add_messages] + + class StateForA(TypedDict): + hello: str + messages: Annotated[list[str], add_messages] + + async def node_a(state: StateForA) -> State: + assert state == { + "hello": "there", + "messages": [HumanMessage(content="hello", id=AnyStr())], + } + + class StateForB(TypedDict): + bye: str + now: int + + async def node_b(state: StateForB) -> StateForB: + assert state == { + "bye": "world", + "now": None, + } + return { + "now": 123, + "hello": "again", # ignored because not in output schema + } + + class StateForC(TypedDict): + hello: str + now: int + + async def node_c(state: StateForC) -> StateForC: + assert state == { + "hello": "there", + "now": 123, + } + + builder = StateGraph(State) + builder.add_node("a", node_a) + builder.add_node("b", node_b) + builder.add_node("c", node_c) + builder.add_edge(START, "a") + builder.add_edge("a", "b") + builder.add_edge("b", "c") + graph = builder.compile() + + assert await graph.ainvoke( + {"hello": "there", "bye": "world", "messages": "hello"} + ) == { + "hello": "there", + "bye": "world", + "messages": [HumanMessage(content="hello", id=AnyStr())], + } + + async def test_invoke_single_process_in_out(mocker: MockerFixture) -> None: add_one = mocker.Mock(side_effect=lambda x: x + 1) chain = Channel.subscribe_to("input") | add_one | Channel.write_to("output")