Skip to content

base.calls

A base abstract interface for calling LLMs.

BaseCall

Bases: BasePrompt, Generic[BaseCallResponseT, BaseCallResponseChunkT, BaseToolT, MessageParamT], ABC

The base class abstract interface for calling LLMs.

Source code in mirascope/base/calls.py
class BaseCall(
    BasePrompt,
    Generic[BaseCallResponseT, BaseCallResponseChunkT, BaseToolT, MessageParamT],
    ABC,
):
    """The base class abstract interface for calling LLMs."""

    api_key: ClassVar[Optional[str]] = None
    base_url: ClassVar[Optional[str]] = None
    call_params: ClassVar[BaseCallParams] = BaseCallParams[BaseToolT](
        model="gpt-3.5-turbo-0125"
    )
    configuration: ClassVar[BaseConfig] = BaseConfig(llm_ops=[], client_wrappers=[])
    _provider: ClassVar[str] = "base"

    @abstractmethod
    def call(
        self, retries: Union[int, Retrying] = 0, **kwargs: Any
    ) -> BaseCallResponseT:
        """A call to an LLM.

        An implementation of this function must return a response that extends
        `BaseCallResponse`. This ensures a consistent API and convenience across e.g.
        different model providers.
        """
        ...  # pragma: no cover

    @abstractmethod
    async def call_async(
        self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
    ) -> BaseCallResponseT:
        """An asynchronous call to an LLM.

        An implementation of this function must return a response that extends
        `BaseCallResponse`. This ensures a consistent API and convenience across e.g.
        different model providers.
        """
        ...  # pragma: no cover

    @abstractmethod
    def stream(
        self, retries: Union[int, Retrying] = 0, **kwargs: Any
    ) -> Generator[BaseCallResponseChunkT, None, None]:
        """A call to an LLM that streams the response in chunks.

        An implementation of this function must yield response chunks that extend
        `BaseCallResponseChunk`. This ensures a consistent API and convenience across
        e.g. different model providers.
        """
        ...  # pragma: no cover

    @abstractmethod
    async def stream_async(
        self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
    ) -> AsyncGenerator[BaseCallResponseChunkT, None]:
        """A asynchronous call to an LLM that streams the response in chunks.

        An implementation of this function must yield response chunks that extend
        `BaseCallResponseChunk`. This ensures a consistent API and convenience across
        e.g. different model providers."""
        yield ...  # type: ignore # pragma: no cover

    @classmethod
    def from_prompt(
        cls, prompt_type: type[BasePromptT], call_params: BaseCallParams
    ) -> type[BasePromptT]:
        """Returns a call_type generated dynamically from this base call.

        Args:
            prompt_type: The prompt class to use for the call. Properties and class
                variables of this class will be used to create the new call class. Must
                be a class that can be instantiated.
            call_params: The call params to use for the call.

        Returns:
            A new call class with new call_type.
        """

        fields: dict[str, Any] = {
            name: (field.annotation, field.default)
            for name, field in prompt_type.model_fields.items()
        }

        class_vars = {
            name: value
            for name, value in prompt_type.__dict__.items()
            if name not in prompt_type.model_fields
        }
        new_call = create_model(prompt_type.__name__, __base__=cls, **fields)

        for var_name, var_value in class_vars.items():
            setattr(new_call, var_name, var_value)
        setattr(new_call, "call_params", call_params)

        return cast(type[BasePromptT], new_call)

    ############################## PRIVATE METHODS ###################################

    def _setup(
        self,
        kwargs: dict[str, Any],
        base_tool_type: Optional[Type[BaseToolT]] = None,
    ) -> tuple[dict[str, Any], Optional[list[Type[BaseToolT]]]]:
        """Returns the call params kwargs and tool types.

        The tools in the call params first get converted into BaseToolT types. We then
        need both the converted tools for the response (so it can construct actual tool
        instances if present in the response) as well as the actual schemas injected
        through kwargs. This function handles that setup.
        """
        call_params = self.call_params.model_copy(update=kwargs)
        kwargs = call_params.kwargs(tool_type=base_tool_type)
        tool_types = None
        if "tools" in kwargs and base_tool_type is not None:
            tool_types = kwargs.pop("tools")
            kwargs["tools"] = [tool_type.tool_schema() for tool_type in tool_types]
        return kwargs, tool_types

    def _get_possible_user_message(
        self, messages: list[Any]
    ) -> Optional[MessageParamT]:
        """Returns the most recent message if it's a user message, otherwise `None`."""
        return messages[-1] if messages[-1]["role"] == "user" else None

call(retries=0, **kwargs) abstractmethod

A call to an LLM.

An implementation of this function must return a response that extends BaseCallResponse. This ensures a consistent API and convenience across e.g. different model providers.

Source code in mirascope/base/calls.py
@abstractmethod
def call(
    self, retries: Union[int, Retrying] = 0, **kwargs: Any
) -> BaseCallResponseT:
    """A call to an LLM.

    An implementation of this function must return a response that extends
    `BaseCallResponse`. This ensures a consistent API and convenience across e.g.
    different model providers.
    """
    ...  # pragma: no cover

call_async(retries=0, **kwargs) abstractmethod async

An asynchronous call to an LLM.

An implementation of this function must return a response that extends BaseCallResponse. This ensures a consistent API and convenience across e.g. different model providers.

Source code in mirascope/base/calls.py
@abstractmethod
async def call_async(
    self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
) -> BaseCallResponseT:
    """An asynchronous call to an LLM.

    An implementation of this function must return a response that extends
    `BaseCallResponse`. This ensures a consistent API and convenience across e.g.
    different model providers.
    """
    ...  # pragma: no cover

from_prompt(prompt_type, call_params) classmethod

Returns a call_type generated dynamically from this base call.

Parameters:

Name Type Description Default
prompt_type type[BasePromptT]

The prompt class to use for the call. Properties and class variables of this class will be used to create the new call class. Must be a class that can be instantiated.

required
call_params BaseCallParams

The call params to use for the call.

required

Returns:

Type Description
type[BasePromptT]

A new call class with new call_type.

Source code in mirascope/base/calls.py
@classmethod
def from_prompt(
    cls, prompt_type: type[BasePromptT], call_params: BaseCallParams
) -> type[BasePromptT]:
    """Returns a call_type generated dynamically from this base call.

    Args:
        prompt_type: The prompt class to use for the call. Properties and class
            variables of this class will be used to create the new call class. Must
            be a class that can be instantiated.
        call_params: The call params to use for the call.

    Returns:
        A new call class with new call_type.
    """

    fields: dict[str, Any] = {
        name: (field.annotation, field.default)
        for name, field in prompt_type.model_fields.items()
    }

    class_vars = {
        name: value
        for name, value in prompt_type.__dict__.items()
        if name not in prompt_type.model_fields
    }
    new_call = create_model(prompt_type.__name__, __base__=cls, **fields)

    for var_name, var_value in class_vars.items():
        setattr(new_call, var_name, var_value)
    setattr(new_call, "call_params", call_params)

    return cast(type[BasePromptT], new_call)

stream(retries=0, **kwargs) abstractmethod

A call to an LLM that streams the response in chunks.

An implementation of this function must yield response chunks that extend BaseCallResponseChunk. This ensures a consistent API and convenience across e.g. different model providers.

Source code in mirascope/base/calls.py
@abstractmethod
def stream(
    self, retries: Union[int, Retrying] = 0, **kwargs: Any
) -> Generator[BaseCallResponseChunkT, None, None]:
    """A call to an LLM that streams the response in chunks.

    An implementation of this function must yield response chunks that extend
    `BaseCallResponseChunk`. This ensures a consistent API and convenience across
    e.g. different model providers.
    """
    ...  # pragma: no cover

stream_async(retries=0, **kwargs) abstractmethod async

A asynchronous call to an LLM that streams the response in chunks.

An implementation of this function must yield response chunks that extend BaseCallResponseChunk. This ensures a consistent API and convenience across e.g. different model providers.

Source code in mirascope/base/calls.py
@abstractmethod
async def stream_async(
    self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
) -> AsyncGenerator[BaseCallResponseChunkT, None]:
    """A asynchronous call to an LLM that streams the response in chunks.

    An implementation of this function must yield response chunks that extend
    `BaseCallResponseChunk`. This ensures a consistent API and convenience across
    e.g. different model providers."""
    yield ...  # type: ignore # pragma: no cover

BaseCallParams

Bases: BaseModel, Generic[BaseToolT]

The parameters with which to make a call.

Source code in mirascope/base/types.py
class BaseCallParams(BaseModel, Generic[BaseToolT]):
    """The parameters with which to make a call."""

    model: str
    tools: Optional[list[Union[Callable, Type[BaseToolT]]]] = None

    model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)

    def kwargs(
        self,
        tool_type: Optional[Type[BaseToolT]] = None,
        exclude: Optional[set[str]] = None,
    ) -> dict[str, Any]:
        """Returns all parameters for the call as a keyword arguments dictionary."""
        extra_exclude = {"tools"}
        exclude = extra_exclude if exclude is None else exclude.union(extra_exclude)
        kwargs = {
            key: value
            for key, value in self.model_dump(exclude=exclude).items()
            if value is not None
        }
        if not self.tools or tool_type is None:
            return kwargs
        kwargs["tools"] = [
            tool if isclass(tool) else convert_function_to_tool(tool, tool_type)
            for tool in self.tools
        ]
        return kwargs

kwargs(tool_type=None, exclude=None)

Returns all parameters for the call as a keyword arguments dictionary.

Source code in mirascope/base/types.py
def kwargs(
    self,
    tool_type: Optional[Type[BaseToolT]] = None,
    exclude: Optional[set[str]] = None,
) -> dict[str, Any]:
    """Returns all parameters for the call as a keyword arguments dictionary."""
    extra_exclude = {"tools"}
    exclude = extra_exclude if exclude is None else exclude.union(extra_exclude)
    kwargs = {
        key: value
        for key, value in self.model_dump(exclude=exclude).items()
        if value is not None
    }
    if not self.tools or tool_type is None:
        return kwargs
    kwargs["tools"] = [
        tool if isclass(tool) else convert_function_to_tool(tool, tool_type)
        for tool in self.tools
    ]
    return kwargs

BasePrompt

Bases: BaseModel

The base class for working with prompts.

This class is implemented as the base for all prompting needs across various model providers.

Example:

from mirascope import BasePrompt


class BookRecommendationPrompt(BasePrompt):
    """A prompt for recommending a book."""

    prompt_template = """
    SYSTEM: You are the world's greatest librarian.
    USER: Please recommend a {genre} book.
    """

    genre: str


prompt = BookRecommendationPrompt(genre="fantasy")
print(prompt.messages())
#> [{"role": "user", "content": "Please recommend a fantasy book."}]

print(prompt)
#> Please recommend a fantasy book.
Source code in mirascope/base/prompts.py
class BasePrompt(BaseModel):
    '''The base class for working with prompts.

    This class is implemented as the base for all prompting needs across various model
    providers.

    Example:

    ```python
    from mirascope import BasePrompt


    class BookRecommendationPrompt(BasePrompt):
        """A prompt for recommending a book."""

        prompt_template = """
        SYSTEM: You are the world's greatest librarian.
        USER: Please recommend a {genre} book.
        """

        genre: str


    prompt = BookRecommendationPrompt(genre="fantasy")
    print(prompt.messages())
    #> [{"role": "user", "content": "Please recommend a fantasy book."}]

    print(prompt)
    #> Please recommend a fantasy book.
    ```
    '''

    tags: ClassVar[list[str]] = []
    prompt_template: ClassVar[str] = ""

    def __str__(self) -> str:
        """Returns the formatted template."""
        return self._format_template(self.prompt_template)

    def messages(self) -> Union[list[Message], Any]:
        """Returns the template as a formatted list of messages."""
        message_type_by_role = {
            MessageRole.SYSTEM: SystemMessage,
            MessageRole.USER: UserMessage,
            MessageRole.ASSISTANT: AssistantMessage,
            MessageRole.MODEL: ModelMessage,
            MessageRole.TOOL: ToolMessage,
        }
        return [
            message_type_by_role[MessageRole(message["role"])](**message)
            for message in self._parse_messages(list(message_type_by_role.keys()))
        ]

    def dump(
        self,
    ) -> dict[str, Any]:
        """Dumps the contents of the prompt into a dictionary."""
        return {
            "tags": self.tags,
            "template": dedent(self.prompt_template).strip("\n"),
            "inputs": self.model_dump(),
        }

    ############################## PRIVATE METHODS ###################################

    def _format_template(self, template: str):
        """Formats the given `template` with attributes matching template variables."""
        dedented_template = dedent(template).strip()
        template_vars = [
            var
            for _, var, _, _ in Formatter().parse(dedented_template)
            if var is not None
        ]

        values = {}
        for var in template_vars:
            attr = getattr(self, var)
            if attr and isinstance(attr, list):
                if isinstance(attr[0], list):
                    values[var] = "\n\n".join(
                        ["\n".join([str(subitem) for subitem in item]) for item in attr]
                    )
                else:
                    values[var] = "\n".join([str(item) for item in attr])
            else:
                values[var] = str(attr)

        return dedented_template.format(**values)

    def _parse_messages(self, roles: list[str]) -> list[Message]:
        """Returns messages parsed from the `template` ClassVar.

        Raises:
            ValueError: if the template contains an unknown role.
        """
        messages = []
        re_roles = "|".join([role.upper() for role in roles] + ["MESSAGES"])
        for match in re.finditer(
            rf"({re_roles}):((.|\n)+?)(?=({re_roles}):|\Z)",
            self.prompt_template,
        ):
            role = match.group(1).lower()
            if role == "messages":
                template_var = [
                    var
                    for _, var, _, _ in Formatter().parse(match.group(2))
                    if var is not None
                ][0]
                attribute = getattr(self, template_var)
                if attribute is None or not isinstance(attribute, list):
                    raise ValueError(
                        f"MESSAGES keyword used with attribute `{template_var}`, which "
                        "is not a `list` of messages."
                    )
                messages += attribute
            else:
                content = self._format_template(match.group(2))
                if content:
                    messages.append({"role": role, "content": content})
        if len(messages) == 0:
            messages.append(
                {
                    "role": "user",
                    "content": self._format_template(self.prompt_template),
                }
            )
        return messages

dump()

Dumps the contents of the prompt into a dictionary.

Source code in mirascope/base/prompts.py
def dump(
    self,
) -> dict[str, Any]:
    """Dumps the contents of the prompt into a dictionary."""
    return {
        "tags": self.tags,
        "template": dedent(self.prompt_template).strip("\n"),
        "inputs": self.model_dump(),
    }

messages()

Returns the template as a formatted list of messages.

Source code in mirascope/base/prompts.py
def messages(self) -> Union[list[Message], Any]:
    """Returns the template as a formatted list of messages."""
    message_type_by_role = {
        MessageRole.SYSTEM: SystemMessage,
        MessageRole.USER: UserMessage,
        MessageRole.ASSISTANT: AssistantMessage,
        MessageRole.MODEL: ModelMessage,
        MessageRole.TOOL: ToolMessage,
    }
    return [
        message_type_by_role[MessageRole(message["role"])](**message)
        for message in self._parse_messages(list(message_type_by_role.keys()))
    ]