Agent#

class langchain.agents.agent.Agent[source]#

Bases: BaseSingleActionAgent

Deprecated since version 0.1.0: Use Use new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc. instead.

Agent that calls the language model and deciding the action.

This is driven by a LLMChain. The prompt in the LLMChain MUST include a variable called โ€œagent_scratchpadโ€ where the agent can put its intermediary work.

Create a new model by parsing and validating input data from keyword arguments.

Raises ValidationError if the input data cannot be parsed to form a valid model.

param allowed_tools: List[str] | None = None#

Allowed tools for the agent. If None, all tools are allowed.

param llm_chain: LLMChain [Required]#

LLMChain to use for agent.

param output_parser: AgentOutputParser [Required]#

Output parser to use for agent.

async aplan(intermediate_steps: List[Tuple[AgentAction, str]], callbacks: List[BaseCallbackHandler] | BaseCallbackManager | None = None, **kwargs: Any) AgentAction | AgentFinish[source]#

Async given input, decided what to do.

Parameters:
  • intermediate_steps (List[Tuple[AgentAction, str]]) โ€“ Steps the LLM has taken to date, along with observations.

  • callbacks (List[BaseCallbackHandler] | BaseCallbackManager | None) โ€“ Callbacks to run.

  • **kwargs (Any) โ€“ User inputs.

Returns:

Action specifying what tool to use.

Return type:

AgentAction | AgentFinish

abstract classmethod create_prompt(tools: Sequence[BaseTool]) BasePromptTemplate[source]#

Create a prompt for this class.

Parameters:

tools (Sequence[BaseTool]) โ€“ Tools to use.

Returns:

Prompt template.

Return type:

BasePromptTemplate

classmethod from_llm_and_tools(llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: BaseCallbackManager | None = None, output_parser: AgentOutputParser | None = None, **kwargs: Any) Agent[source]#

Construct an agent from an LLM and tools.

Parameters:
Returns:

Agent object.

Return type:

Agent

get_allowed_tools() List[str] | None[source]#

Get allowed tools.

Return type:

List[str] | None

get_full_inputs(intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any) Dict[str, Any][source]#

Create the full inputs for the LLMChain from intermediate steps.

Parameters:
  • intermediate_steps (List[Tuple[AgentAction, str]]) โ€“ Steps the LLM has taken to date, along with observations.

  • **kwargs (Any) โ€“ User inputs.

Returns:

Full inputs for the LLMChain.

Return type:

Dict[str, Any]

plan(intermediate_steps: List[Tuple[AgentAction, str]], callbacks: List[BaseCallbackHandler] | BaseCallbackManager | None = None, **kwargs: Any) AgentAction | AgentFinish[source]#

Given input, decided what to do.

Parameters:
  • intermediate_steps (List[Tuple[AgentAction, str]]) โ€“ Steps the LLM has taken to date, along with observations.

  • callbacks (List[BaseCallbackHandler] | BaseCallbackManager | None) โ€“ Callbacks to run.

  • **kwargs (Any) โ€“ User inputs.

Returns:

Action specifying what tool to use.

Return type:

AgentAction | AgentFinish

return_stopped_response(early_stopping_method: str, intermediate_steps: List[Tuple[AgentAction, str]], **kwargs: Any) AgentFinish[source]#

Return response when agent has been stopped due to max iterations.

Parameters:
  • early_stopping_method (str) โ€“ Method to use for early stopping.

  • intermediate_steps (List[Tuple[AgentAction, str]]) โ€“ Steps the LLM has taken to date, along with observations.

  • **kwargs (Any) โ€“ User inputs.

Returns:

Agent finish object.

Return type:

AgentFinish

Raises:

ValueError โ€“ If early_stopping_method is not in [โ€˜forceโ€™, โ€˜generateโ€™].

save(file_path: Path | str) None#

Save the agent.

Parameters:

file_path (Path | str) โ€“ Path to file to save the agent to.

Return type:

None

Example: .. code-block:: python

# If working with agent executor agent.agent.save(file_path=โ€path/agent.yamlโ€)

tool_run_logging_kwargs() Dict[source]#

Return logging kwargs for tool run.

Return type:

Dict

abstract property llm_prefix: str#

Prefix to append the LLM call with.

abstract property observation_prefix: str#

Prefix to append the observation with.

property return_values: List[str]#

Return values of the agent.