Skip to content

LLM

Base LLM.

BaseLLM

Bases: ABC

Base LLM Class.

Source code in src/llm_agents_from_scratch/base/llm.py
class BaseLLM(ABC):
    """Base LLM Class."""

    @abstractmethod
    async def complete(self, prompt: str, **kwargs: Any) -> CompleteResult:
        """Text Complete.

        Args:
            prompt (str): The prompt the LLM should use as input.
            **kwargs (Any): Additional keyword arguments.

        Returns:
            str: The completion of the prompt.
        """

    @abstractmethod
    async def structured_output(
        self,
        prompt: str,
        mdl: type[StructuredOutputType],
        **kwargs: Any,
    ) -> StructuredOutputType:
        """Structured output interface for returning ~pydantic.BaseModels.

        Args:
            prompt (str): The prompt to elicit the structured output response.
            mdl (type[StructuredOutputType]): The ~pydantic.BaseModel to output.
            **kwargs (Any): Additional keyword arguments.

        Returns:
            StructuredOutputType: The structured output as the specified `mdl`
                type.
        """

    @abstractmethod
    async def chat(
        self,
        input: str,
        chat_history: Sequence[ChatMessage] | None = None,
        tools: Sequence[Tool] | None = None,
        **kwargs: Any,
    ) -> tuple[ChatMessage, ChatMessage]:
        """Chat interface.

        Args:
            input (str): The user's current input.
            chat_history (Sequence[ChatMessage]|None, optional): chat history.
            tools (Sequence[BaseTool]|None, optional): tools that the LLM
                can call.
            **kwargs (Any): Additional keyword arguments.

        Returns:
            tuple[ChatMessage, ChatMessage]: A tuple of ChatMessage with the
                first message corresponding to the ChatMessage created from the
                supplied input string, and the second ChatMessage is the
                response from the LLM structured.
        """

    @abstractmethod
    async def continue_chat_with_tool_results(
        self,
        tool_call_results: Sequence[ToolCallResult],
        chat_history: Sequence[ChatMessage],
        tools: Sequence[Tool] | None = None,
        **kwargs: Any,
    ) -> tuple[list[ChatMessage], ChatMessage]:
        """Continue a chat by submitting tool call results.

        Args:
            tool_call_results (Sequence[ToolCallResult]):
                Tool call results.
            chat_history (Sequence[ChatMessage]): The chat history.
                Defaults to None.
            tools (Sequence[BaseTool]|None, optional): tools that the LLM
                can call.
            **kwargs (Any): Additional keyword arguments.

        Returns:
            tuple[list[ChatMessage], ChatMessage]: A tuple whose first element
                is a list of ChatMessage objects corresponding to the
                supplied ToolCallResult converted objects. The second element
                is the response ChatMessage from the LLM.
        """

complete abstractmethod async

complete(prompt, **kwargs)

Text Complete.

Parameters:

Name Type Description Default
prompt str

The prompt the LLM should use as input.

required
**kwargs Any

Additional keyword arguments.

{}

Returns:

Name Type Description
str CompleteResult

The completion of the prompt.

Source code in src/llm_agents_from_scratch/base/llm.py
@abstractmethod
async def complete(self, prompt: str, **kwargs: Any) -> CompleteResult:
    """Text Complete.

    Args:
        prompt (str): The prompt the LLM should use as input.
        **kwargs (Any): Additional keyword arguments.

    Returns:
        str: The completion of the prompt.
    """

structured_output abstractmethod async

structured_output(prompt, mdl, **kwargs)

Structured output interface for returning ~pydantic.BaseModels.

Parameters:

Name Type Description Default
prompt str

The prompt to elicit the structured output response.

required
mdl type[StructuredOutputType]

The ~pydantic.BaseModel to output.

required
**kwargs Any

Additional keyword arguments.

{}

Returns:

Name Type Description
StructuredOutputType StructuredOutputType

The structured output as the specified mdl type.

Source code in src/llm_agents_from_scratch/base/llm.py
@abstractmethod
async def structured_output(
    self,
    prompt: str,
    mdl: type[StructuredOutputType],
    **kwargs: Any,
) -> StructuredOutputType:
    """Structured output interface for returning ~pydantic.BaseModels.

    Args:
        prompt (str): The prompt to elicit the structured output response.
        mdl (type[StructuredOutputType]): The ~pydantic.BaseModel to output.
        **kwargs (Any): Additional keyword arguments.

    Returns:
        StructuredOutputType: The structured output as the specified `mdl`
            type.
    """

chat abstractmethod async

chat(input, chat_history=None, tools=None, **kwargs)

Chat interface.

Parameters:

Name Type Description Default
input str

The user's current input.

required
chat_history Sequence[ChatMessage] | None

chat history.

None
tools Sequence[BaseTool] | None

tools that the LLM can call.

None
**kwargs Any

Additional keyword arguments.

{}

Returns:

Type Description
tuple[ChatMessage, ChatMessage]

tuple[ChatMessage, ChatMessage]: A tuple of ChatMessage with the first message corresponding to the ChatMessage created from the supplied input string, and the second ChatMessage is the response from the LLM structured.

Source code in src/llm_agents_from_scratch/base/llm.py
@abstractmethod
async def chat(
    self,
    input: str,
    chat_history: Sequence[ChatMessage] | None = None,
    tools: Sequence[Tool] | None = None,
    **kwargs: Any,
) -> tuple[ChatMessage, ChatMessage]:
    """Chat interface.

    Args:
        input (str): The user's current input.
        chat_history (Sequence[ChatMessage]|None, optional): chat history.
        tools (Sequence[BaseTool]|None, optional): tools that the LLM
            can call.
        **kwargs (Any): Additional keyword arguments.

    Returns:
        tuple[ChatMessage, ChatMessage]: A tuple of ChatMessage with the
            first message corresponding to the ChatMessage created from the
            supplied input string, and the second ChatMessage is the
            response from the LLM structured.
    """

continue_chat_with_tool_results abstractmethod async

continue_chat_with_tool_results(
    tool_call_results, chat_history, tools=None, **kwargs
)

Continue a chat by submitting tool call results.

Parameters:

Name Type Description Default
tool_call_results Sequence[ToolCallResult]

Tool call results.

required
chat_history Sequence[ChatMessage]

The chat history. Defaults to None.

required
tools Sequence[BaseTool] | None

tools that the LLM can call.

None
**kwargs Any

Additional keyword arguments.

{}

Returns:

Type Description
tuple[list[ChatMessage], ChatMessage]

tuple[list[ChatMessage], ChatMessage]: A tuple whose first element is a list of ChatMessage objects corresponding to the supplied ToolCallResult converted objects. The second element is the response ChatMessage from the LLM.

Source code in src/llm_agents_from_scratch/base/llm.py
@abstractmethod
async def continue_chat_with_tool_results(
    self,
    tool_call_results: Sequence[ToolCallResult],
    chat_history: Sequence[ChatMessage],
    tools: Sequence[Tool] | None = None,
    **kwargs: Any,
) -> tuple[list[ChatMessage], ChatMessage]:
    """Continue a chat by submitting tool call results.

    Args:
        tool_call_results (Sequence[ToolCallResult]):
            Tool call results.
        chat_history (Sequence[ChatMessage]): The chat history.
            Defaults to None.
        tools (Sequence[BaseTool]|None, optional): tools that the LLM
            can call.
        **kwargs (Any): Additional keyword arguments.

    Returns:
        tuple[list[ChatMessage], ChatMessage]: A tuple whose first element
            is a list of ChatMessage objects corresponding to the
            supplied ToolCallResult converted objects. The second element
            is the response ChatMessage from the LLM.
    """