Skip to content

openai.calls

A module for calling OpenAI's Chat Completion models.

BaseCall

Bases: BasePrompt, Generic[BaseCallResponseT, BaseCallResponseChunkT, BaseToolT, MessageParamT], ABC

The base class abstract interface for calling LLMs.

Source code in mirascope/base/calls.py
class BaseCall(
    BasePrompt,
    Generic[BaseCallResponseT, BaseCallResponseChunkT, BaseToolT, MessageParamT],
    ABC,
):
    """The base class abstract interface for calling LLMs."""

    api_key: ClassVar[Optional[str]] = None
    base_url: ClassVar[Optional[str]] = None
    call_params: ClassVar[BaseCallParams] = BaseCallParams[BaseToolT](
        model="gpt-3.5-turbo-0125"
    )
    configuration: ClassVar[BaseConfig] = BaseConfig(llm_ops=[], client_wrappers=[])
    _provider: ClassVar[str] = "base"

    @abstractmethod
    def call(
        self, retries: Union[int, Retrying] = 0, **kwargs: Any
    ) -> BaseCallResponseT:
        """A call to an LLM.

        An implementation of this function must return a response that extends
        `BaseCallResponse`. This ensures a consistent API and convenience across e.g.
        different model providers.
        """
        ...  # pragma: no cover

    @abstractmethod
    async def call_async(
        self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
    ) -> BaseCallResponseT:
        """An asynchronous call to an LLM.

        An implementation of this function must return a response that extends
        `BaseCallResponse`. This ensures a consistent API and convenience across e.g.
        different model providers.
        """
        ...  # pragma: no cover

    @abstractmethod
    def stream(
        self, retries: Union[int, Retrying] = 0, **kwargs: Any
    ) -> Generator[BaseCallResponseChunkT, None, None]:
        """A call to an LLM that streams the response in chunks.

        An implementation of this function must yield response chunks that extend
        `BaseCallResponseChunk`. This ensures a consistent API and convenience across
        e.g. different model providers.
        """
        ...  # pragma: no cover

    @abstractmethod
    async def stream_async(
        self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
    ) -> AsyncGenerator[BaseCallResponseChunkT, None]:
        """A asynchronous call to an LLM that streams the response in chunks.

        An implementation of this function must yield response chunks that extend
        `BaseCallResponseChunk`. This ensures a consistent API and convenience across
        e.g. different model providers."""
        yield ...  # type: ignore # pragma: no cover

    @classmethod
    def from_prompt(
        cls, prompt_type: type[BasePromptT], call_params: BaseCallParams
    ) -> type[BasePromptT]:
        """Returns a call_type generated dynamically from this base call.

        Args:
            prompt_type: The prompt class to use for the call. Properties and class
                variables of this class will be used to create the new call class. Must
                be a class that can be instantiated.
            call_params: The call params to use for the call.

        Returns:
            A new call class with new call_type.
        """

        fields: dict[str, Any] = {
            name: (field.annotation, field.default)
            for name, field in prompt_type.model_fields.items()
        }

        class_vars = {
            name: value
            for name, value in prompt_type.__dict__.items()
            if name not in prompt_type.model_fields
        }
        new_call = create_model(prompt_type.__name__, __base__=cls, **fields)

        for var_name, var_value in class_vars.items():
            setattr(new_call, var_name, var_value)
        setattr(new_call, "call_params", call_params)

        return cast(type[BasePromptT], new_call)

    ############################## PRIVATE METHODS ###################################

    def _setup(
        self,
        kwargs: dict[str, Any],
        base_tool_type: Optional[Type[BaseToolT]] = None,
    ) -> tuple[dict[str, Any], Optional[list[Type[BaseToolT]]]]:
        """Returns the call params kwargs and tool types.

        The tools in the call params first get converted into BaseToolT types. We then
        need both the converted tools for the response (so it can construct actual tool
        instances if present in the response) as well as the actual schemas injected
        through kwargs. This function handles that setup.
        """
        call_params = self.call_params.model_copy(update=kwargs)
        kwargs = call_params.kwargs(tool_type=base_tool_type)
        tool_types = None
        if "tools" in kwargs and base_tool_type is not None:
            tool_types = kwargs.pop("tools")
            kwargs["tools"] = [tool_type.tool_schema() for tool_type in tool_types]
        return kwargs, tool_types

    def _get_possible_user_message(
        self, messages: list[Any]
    ) -> Optional[MessageParamT]:
        """Returns the most recent message if it's a user message, otherwise `None`."""
        return messages[-1] if messages[-1]["role"] == "user" else None

call(retries=0, **kwargs) abstractmethod

A call to an LLM.

An implementation of this function must return a response that extends BaseCallResponse. This ensures a consistent API and convenience across e.g. different model providers.

Source code in mirascope/base/calls.py
@abstractmethod
def call(
    self, retries: Union[int, Retrying] = 0, **kwargs: Any
) -> BaseCallResponseT:
    """A call to an LLM.

    An implementation of this function must return a response that extends
    `BaseCallResponse`. This ensures a consistent API and convenience across e.g.
    different model providers.
    """
    ...  # pragma: no cover

call_async(retries=0, **kwargs) abstractmethod async

An asynchronous call to an LLM.

An implementation of this function must return a response that extends BaseCallResponse. This ensures a consistent API and convenience across e.g. different model providers.

Source code in mirascope/base/calls.py
@abstractmethod
async def call_async(
    self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
) -> BaseCallResponseT:
    """An asynchronous call to an LLM.

    An implementation of this function must return a response that extends
    `BaseCallResponse`. This ensures a consistent API and convenience across e.g.
    different model providers.
    """
    ...  # pragma: no cover

from_prompt(prompt_type, call_params) classmethod

Returns a call_type generated dynamically from this base call.

Parameters:

Name Type Description Default
prompt_type type[BasePromptT]

The prompt class to use for the call. Properties and class variables of this class will be used to create the new call class. Must be a class that can be instantiated.

required
call_params BaseCallParams

The call params to use for the call.

required

Returns:

Type Description
type[BasePromptT]

A new call class with new call_type.

Source code in mirascope/base/calls.py
@classmethod
def from_prompt(
    cls, prompt_type: type[BasePromptT], call_params: BaseCallParams
) -> type[BasePromptT]:
    """Returns a call_type generated dynamically from this base call.

    Args:
        prompt_type: The prompt class to use for the call. Properties and class
            variables of this class will be used to create the new call class. Must
            be a class that can be instantiated.
        call_params: The call params to use for the call.

    Returns:
        A new call class with new call_type.
    """

    fields: dict[str, Any] = {
        name: (field.annotation, field.default)
        for name, field in prompt_type.model_fields.items()
    }

    class_vars = {
        name: value
        for name, value in prompt_type.__dict__.items()
        if name not in prompt_type.model_fields
    }
    new_call = create_model(prompt_type.__name__, __base__=cls, **fields)

    for var_name, var_value in class_vars.items():
        setattr(new_call, var_name, var_value)
    setattr(new_call, "call_params", call_params)

    return cast(type[BasePromptT], new_call)

stream(retries=0, **kwargs) abstractmethod

A call to an LLM that streams the response in chunks.

An implementation of this function must yield response chunks that extend BaseCallResponseChunk. This ensures a consistent API and convenience across e.g. different model providers.

Source code in mirascope/base/calls.py
@abstractmethod
def stream(
    self, retries: Union[int, Retrying] = 0, **kwargs: Any
) -> Generator[BaseCallResponseChunkT, None, None]:
    """A call to an LLM that streams the response in chunks.

    An implementation of this function must yield response chunks that extend
    `BaseCallResponseChunk`. This ensures a consistent API and convenience across
    e.g. different model providers.
    """
    ...  # pragma: no cover

stream_async(retries=0, **kwargs) abstractmethod async

A asynchronous call to an LLM that streams the response in chunks.

An implementation of this function must yield response chunks that extend BaseCallResponseChunk. This ensures a consistent API and convenience across e.g. different model providers.

Source code in mirascope/base/calls.py
@abstractmethod
async def stream_async(
    self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
) -> AsyncGenerator[BaseCallResponseChunkT, None]:
    """A asynchronous call to an LLM that streams the response in chunks.

    An implementation of this function must yield response chunks that extend
    `BaseCallResponseChunk`. This ensures a consistent API and convenience across
    e.g. different model providers."""
    yield ...  # type: ignore # pragma: no cover

MessageRole

Bases: _Enum

Roles that the BasePrompt messages parser can parse from the template.

SYSTEM: A system message. USER: A user message. ASSISTANT: A message response from the assistant or chat client. MODEL: A message response from the assistant or chat client. Model is used by Google's Gemini instead of assistant, which doesn't have system messages. CHATBOT: A message response from the chat client. Chatbot is used by Cohere instead of assistant. TOOL: A message representing the output of calling a tool.

Source code in mirascope/enums.py
class MessageRole(_Enum):
    """Roles that the `BasePrompt` messages parser can parse from the template.

    SYSTEM: A system message.
    USER: A user message.
    ASSISTANT: A message response from the assistant or chat client.
    MODEL: A message response from the assistant or chat client. Model is used by
        Google's Gemini instead of assistant, which doesn't have system messages.
    CHATBOT: A message response from the chat client. Chatbot is used by Cohere instead
        of assistant.
    TOOL: A message representing the output of calling a tool.
    """

    SYSTEM = "system"
    USER = "user"
    ASSISTANT = "assistant"
    MODEL = "model"
    CHATBOT = "chatbot"
    TOOL = "tool"

OpenAICall

Bases: BaseCall[OpenAICallResponse, OpenAICallResponseChunk, OpenAITool, ChatCompletionUserMessageParam]

A base class for calling OpenAI's Chat Completion models.

Example:

from mirascope.openai import OpenAICall


class BookRecommender(OpenAICall):
    prompt_template = "Please recommend a {genre} book"

    genre: str

response = BookRecommender(genre="fantasy").call()
print(response.content)
#> There are many great books to read, it ultimately depends...
Source code in mirascope/openai/calls.py
class OpenAICall(
    BaseCall[
        OpenAICallResponse,
        OpenAICallResponseChunk,
        OpenAITool,
        ChatCompletionUserMessageParam,
    ]
):
    """A base class for calling OpenAI's Chat Completion models.

    Example:

    ```python
    from mirascope.openai import OpenAICall


    class BookRecommender(OpenAICall):
        prompt_template = "Please recommend a {genre} book"

        genre: str

    response = BookRecommender(genre="fantasy").call()
    print(response.content)
    #> There are many great books to read, it ultimately depends...
    ```
    """

    call_params: ClassVar[OpenAICallParams] = OpenAICallParams()
    _provider: ClassVar[str] = "openai"

    def messages(self) -> list[ChatCompletionMessageParam]:
        """Returns the template as a formatted list of messages."""
        message_type_by_role = {
            MessageRole.SYSTEM: ChatCompletionSystemMessageParam,
            MessageRole.USER: ChatCompletionUserMessageParam,
            MessageRole.ASSISTANT: ChatCompletionAssistantMessageParam,
            MessageRole.TOOL: ChatCompletionToolMessageParam,
        }
        return [
            message_type_by_role[MessageRole(message["role"])](**message)
            for message in self._parse_messages(list(message_type_by_role.keys()))
        ]

    @retry
    def call(
        self, retries: Union[int, Retrying] = 0, **kwargs: Any
    ) -> OpenAICallResponse:
        """Makes a call to the model using this `OpenAICall` instance.

        Args:
            retries: An integer for the number of times to retry the call or
                a `tenacity.Retrying` instance.
            **kwargs: Additional keyword arguments parameters to pass to the call. These
                will override any existing arguments in `call_params`.

        Returns:
            A `OpenAICallResponse` instance.

        Raises:
            OpenAIError: raises any OpenAI errors, see:
                https://platform.openai.com/docs/guides/error-codes/api-errors
        """
        kwargs, tool_types = self._setup_openai_kwargs(kwargs)
        client = self._setup_openai_client(OpenAI)
        create = get_wrapped_call(
            client.chat.completions.create,
            self,
            response_type=OpenAICallResponse,
            tool_types=tool_types,
        )
        messages = self._update_messages_if_json(self.messages(), tool_types)
        user_message_param = self._get_possible_user_message(messages)
        start_time = datetime.datetime.now().timestamp() * 1000
        completion = create(
            messages=messages,
            stream=False,
            **kwargs,
        )
        return OpenAICallResponse(
            response=completion,
            user_message_param=user_message_param,
            tool_types=tool_types,
            start_time=start_time,
            end_time=datetime.datetime.now().timestamp() * 1000,
            cost=openai_api_calculate_cost(completion.usage, completion.model),
            response_format=self.call_params.response_format,
        )

    @retry
    async def call_async(
        self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
    ) -> OpenAICallResponse:
        """Makes an asynchronous call to the model using this `OpenAICall`.

        Args:
            retries: An integer for the number of times to retry the call or
                a `tenacity.AsyncRetrying` instance.
            **kwargs: Additional keyword arguments parameters to pass to the call. These
                will override any existing arguments in `call_params`.

        Returns:
            An `OpenAICallResponse` instance.

        Raises:
            OpenAIError: raises any OpenAI errors, see:
                https://platform.openai.com/docs/guides/error-codes/api-errors
        """
        kwargs, tool_types = self._setup_openai_kwargs(kwargs)
        client = self._setup_openai_client(AsyncOpenAI)
        create = get_wrapped_call(
            client.chat.completions.create,
            self,
            is_async=True,
            response_type=OpenAICallResponse,
            tool_types=tool_types,
        )
        messages = self._update_messages_if_json(self.messages(), tool_types)
        user_message_param = self._get_possible_user_message(messages)
        start_time = datetime.datetime.now().timestamp() * 1000
        completion = await create(
            messages=messages,
            stream=False,
            **kwargs,
        )
        return OpenAICallResponse(
            response=completion,
            user_message_param=user_message_param,
            tool_types=tool_types,
            start_time=start_time,
            end_time=datetime.datetime.now().timestamp() * 1000,
            cost=openai_api_calculate_cost(completion.usage, completion.model),
            response_format=self.call_params.response_format,
        )

    @retry
    def stream(
        self, retries: Union[int, Retrying] = 0, **kwargs: Any
    ) -> Generator[OpenAICallResponseChunk, None, None]:
        """Streams the response for a call using this `OpenAICall`.

        Args:
            retries: An integer for the number of times to retry the call or
                a `tenacity.Retrying` instance.
            **kwargs: Additional keyword arguments parameters to pass to the call. These
                will override any existing arguments in `call_params`.

        Yields:
            A `OpenAICallResponseChunk` for each chunk of the response.

        Raises:
            OpenAIError: raises any OpenAI errors, see:
                https://platform.openai.com/docs/guides/error-codes/api-errors
        """
        kwargs, tool_types = self._setup_openai_kwargs(kwargs)
        client = self._setup_openai_client(OpenAI)
        create = get_wrapped_call(
            client.chat.completions.create,
            self,
            response_chunk_type=OpenAICallResponseChunk,
            tool_types=tool_types,
        )
        messages = self._update_messages_if_json(self.messages(), tool_types)
        user_message_param = self._get_possible_user_message(messages)
        if not isinstance(client, AzureOpenAI):
            kwargs["stream_options"] = {"include_usage": True}
        stream = create(
            messages=messages,
            stream=True,
            **kwargs,
        )
        for chunk in stream:
            yield OpenAICallResponseChunk(
                chunk=chunk,
                user_message_param=user_message_param,
                tool_types=tool_types,
                cost=openai_api_calculate_cost(chunk.usage, chunk.model),
                response_format=self.call_params.response_format,
            )

    @retry
    async def stream_async(
        self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
    ) -> AsyncGenerator[OpenAICallResponseChunk, None]:
        """Streams the response for an asynchronous call using this `OpenAICall`.

        Args:
            retries: An integer for the number of times to retry the call or
                a `tenacity.AsyncRetrying` instance.
            **kwargs: Additional keyword arguments parameters to pass to the call. These
                will override any existing arguments in `call_params`.

        Yields:
            A `OpenAICallResponseChunk` for each chunk of the response.

        Raises:
            OpenAIError: raises any OpenAI errors, see:
                https://platform.openai.com/docs/guides/error-codes/api-errors
        """
        kwargs, tool_types = self._setup_openai_kwargs(kwargs)
        client = self._setup_openai_client(AsyncOpenAI)
        create = get_wrapped_call(
            client.chat.completions.create,
            self,
            is_async=True,
            response_chunk_type=OpenAICallResponseChunk,
            tool_types=tool_types,
        )
        messages = self._update_messages_if_json(self.messages(), tool_types)
        user_message_param = self._get_possible_user_message(messages)
        if not isinstance(client, AsyncAzureOpenAI):
            kwargs["stream_options"] = {"include_usage": True}
        stream = await create(
            messages=messages,
            stream=True,
            **kwargs,
        )
        async for chunk in stream:
            yield OpenAICallResponseChunk(
                chunk=chunk,
                user_message_param=user_message_param,
                tool_types=tool_types,
                cost=openai_api_calculate_cost(chunk.usage, chunk.model),
                response_format=self.call_params.response_format,
            )

    ############################## PRIVATE METHODS ###################################

    def _setup_openai_kwargs(
        self,
        kwargs: dict[str, Any],
    ) -> tuple[
        dict[str, Any],
        Optional[list[Type[OpenAITool]]],
    ]:
        """Overrides the `BaseCall._setup` for Anthropic specific setup."""
        kwargs, tool_types = self._setup(kwargs, OpenAITool)
        if (
            self.call_params.response_format == ResponseFormat(type="json_object")
            and tool_types
        ):
            kwargs.pop("tools")
        return kwargs, tool_types

    @overload
    def _setup_openai_client(self, client_type: type[OpenAI]) -> OpenAI:
        ...  # pragma: no cover

    @overload
    def _setup_openai_client(self, client_type: type[AsyncOpenAI]) -> AsyncOpenAI:
        ...  # pragma: no cover

    def _setup_openai_client(
        self, client_type: Union[type[OpenAI], type[AsyncOpenAI]]
    ) -> Union[OpenAI, AsyncOpenAI]:
        """Returns the proper OpenAI/AsyncOpenAI client, including wrapping it."""
        using_azure = "inner_azure_client_wrapper" in [
            getattr(wrapper, __name__, None)
            for wrapper in self.configuration.client_wrappers
        ]
        client = client_type(
            api_key=self.api_key if not using_azure else "make-azure-not-fail",
            base_url=self.base_url,
        )
        if client_type == OpenAI:
            client = get_wrapped_client(client, self)
        elif client_type == AsyncOpenAI:
            client = get_wrapped_async_client(client, self)
        return client

    def _update_messages_if_json(
        self,
        messages: list[ChatCompletionMessageParam],
        tool_types: Optional[list[type[OpenAITool]]],
    ) -> list[ChatCompletionMessageParam]:
        if (
            self.call_params.response_format == ResponseFormat(type="json_object")
            and tool_types
        ):
            messages.append(
                ChatCompletionUserMessageParam(
                    role="user", content=_json_mode_content(tool_type=tool_types[0])
                )
            )
        return messages

call(retries=0, **kwargs)

Makes a call to the model using this OpenAICall instance.

Parameters:

Name Type Description Default
retries Union[int, Retrying]

An integer for the number of times to retry the call or a tenacity.Retrying instance.

0
**kwargs Any

Additional keyword arguments parameters to pass to the call. These will override any existing arguments in call_params.

{}

Returns:

Type Description
OpenAICallResponse

A OpenAICallResponse instance.

Raises:

Type Description
OpenAIError

raises any OpenAI errors, see: https://platform.openai.com/docs/guides/error-codes/api-errors

Source code in mirascope/openai/calls.py
@retry
def call(
    self, retries: Union[int, Retrying] = 0, **kwargs: Any
) -> OpenAICallResponse:
    """Makes a call to the model using this `OpenAICall` instance.

    Args:
        retries: An integer for the number of times to retry the call or
            a `tenacity.Retrying` instance.
        **kwargs: Additional keyword arguments parameters to pass to the call. These
            will override any existing arguments in `call_params`.

    Returns:
        A `OpenAICallResponse` instance.

    Raises:
        OpenAIError: raises any OpenAI errors, see:
            https://platform.openai.com/docs/guides/error-codes/api-errors
    """
    kwargs, tool_types = self._setup_openai_kwargs(kwargs)
    client = self._setup_openai_client(OpenAI)
    create = get_wrapped_call(
        client.chat.completions.create,
        self,
        response_type=OpenAICallResponse,
        tool_types=tool_types,
    )
    messages = self._update_messages_if_json(self.messages(), tool_types)
    user_message_param = self._get_possible_user_message(messages)
    start_time = datetime.datetime.now().timestamp() * 1000
    completion = create(
        messages=messages,
        stream=False,
        **kwargs,
    )
    return OpenAICallResponse(
        response=completion,
        user_message_param=user_message_param,
        tool_types=tool_types,
        start_time=start_time,
        end_time=datetime.datetime.now().timestamp() * 1000,
        cost=openai_api_calculate_cost(completion.usage, completion.model),
        response_format=self.call_params.response_format,
    )

call_async(retries=0, **kwargs) async

Makes an asynchronous call to the model using this OpenAICall.

Parameters:

Name Type Description Default
retries Union[int, AsyncRetrying]

An integer for the number of times to retry the call or a tenacity.AsyncRetrying instance.

0
**kwargs Any

Additional keyword arguments parameters to pass to the call. These will override any existing arguments in call_params.

{}

Returns:

Type Description
OpenAICallResponse

An OpenAICallResponse instance.

Raises:

Type Description
OpenAIError

raises any OpenAI errors, see: https://platform.openai.com/docs/guides/error-codes/api-errors

Source code in mirascope/openai/calls.py
@retry
async def call_async(
    self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
) -> OpenAICallResponse:
    """Makes an asynchronous call to the model using this `OpenAICall`.

    Args:
        retries: An integer for the number of times to retry the call or
            a `tenacity.AsyncRetrying` instance.
        **kwargs: Additional keyword arguments parameters to pass to the call. These
            will override any existing arguments in `call_params`.

    Returns:
        An `OpenAICallResponse` instance.

    Raises:
        OpenAIError: raises any OpenAI errors, see:
            https://platform.openai.com/docs/guides/error-codes/api-errors
    """
    kwargs, tool_types = self._setup_openai_kwargs(kwargs)
    client = self._setup_openai_client(AsyncOpenAI)
    create = get_wrapped_call(
        client.chat.completions.create,
        self,
        is_async=True,
        response_type=OpenAICallResponse,
        tool_types=tool_types,
    )
    messages = self._update_messages_if_json(self.messages(), tool_types)
    user_message_param = self._get_possible_user_message(messages)
    start_time = datetime.datetime.now().timestamp() * 1000
    completion = await create(
        messages=messages,
        stream=False,
        **kwargs,
    )
    return OpenAICallResponse(
        response=completion,
        user_message_param=user_message_param,
        tool_types=tool_types,
        start_time=start_time,
        end_time=datetime.datetime.now().timestamp() * 1000,
        cost=openai_api_calculate_cost(completion.usage, completion.model),
        response_format=self.call_params.response_format,
    )

messages()

Returns the template as a formatted list of messages.

Source code in mirascope/openai/calls.py
def messages(self) -> list[ChatCompletionMessageParam]:
    """Returns the template as a formatted list of messages."""
    message_type_by_role = {
        MessageRole.SYSTEM: ChatCompletionSystemMessageParam,
        MessageRole.USER: ChatCompletionUserMessageParam,
        MessageRole.ASSISTANT: ChatCompletionAssistantMessageParam,
        MessageRole.TOOL: ChatCompletionToolMessageParam,
    }
    return [
        message_type_by_role[MessageRole(message["role"])](**message)
        for message in self._parse_messages(list(message_type_by_role.keys()))
    ]

stream(retries=0, **kwargs)

Streams the response for a call using this OpenAICall.

Parameters:

Name Type Description Default
retries Union[int, Retrying]

An integer for the number of times to retry the call or a tenacity.Retrying instance.

0
**kwargs Any

Additional keyword arguments parameters to pass to the call. These will override any existing arguments in call_params.

{}

Yields:

Type Description
OpenAICallResponseChunk

A OpenAICallResponseChunk for each chunk of the response.

Raises:

Type Description
OpenAIError

raises any OpenAI errors, see: https://platform.openai.com/docs/guides/error-codes/api-errors

Source code in mirascope/openai/calls.py
@retry
def stream(
    self, retries: Union[int, Retrying] = 0, **kwargs: Any
) -> Generator[OpenAICallResponseChunk, None, None]:
    """Streams the response for a call using this `OpenAICall`.

    Args:
        retries: An integer for the number of times to retry the call or
            a `tenacity.Retrying` instance.
        **kwargs: Additional keyword arguments parameters to pass to the call. These
            will override any existing arguments in `call_params`.

    Yields:
        A `OpenAICallResponseChunk` for each chunk of the response.

    Raises:
        OpenAIError: raises any OpenAI errors, see:
            https://platform.openai.com/docs/guides/error-codes/api-errors
    """
    kwargs, tool_types = self._setup_openai_kwargs(kwargs)
    client = self._setup_openai_client(OpenAI)
    create = get_wrapped_call(
        client.chat.completions.create,
        self,
        response_chunk_type=OpenAICallResponseChunk,
        tool_types=tool_types,
    )
    messages = self._update_messages_if_json(self.messages(), tool_types)
    user_message_param = self._get_possible_user_message(messages)
    if not isinstance(client, AzureOpenAI):
        kwargs["stream_options"] = {"include_usage": True}
    stream = create(
        messages=messages,
        stream=True,
        **kwargs,
    )
    for chunk in stream:
        yield OpenAICallResponseChunk(
            chunk=chunk,
            user_message_param=user_message_param,
            tool_types=tool_types,
            cost=openai_api_calculate_cost(chunk.usage, chunk.model),
            response_format=self.call_params.response_format,
        )

stream_async(retries=0, **kwargs) async

Streams the response for an asynchronous call using this OpenAICall.

Parameters:

Name Type Description Default
retries Union[int, AsyncRetrying]

An integer for the number of times to retry the call or a tenacity.AsyncRetrying instance.

0
**kwargs Any

Additional keyword arguments parameters to pass to the call. These will override any existing arguments in call_params.

{}

Yields:

Type Description
AsyncGenerator[OpenAICallResponseChunk, None]

A OpenAICallResponseChunk for each chunk of the response.

Raises:

Type Description
OpenAIError

raises any OpenAI errors, see: https://platform.openai.com/docs/guides/error-codes/api-errors

Source code in mirascope/openai/calls.py
@retry
async def stream_async(
    self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
) -> AsyncGenerator[OpenAICallResponseChunk, None]:
    """Streams the response for an asynchronous call using this `OpenAICall`.

    Args:
        retries: An integer for the number of times to retry the call or
            a `tenacity.AsyncRetrying` instance.
        **kwargs: Additional keyword arguments parameters to pass to the call. These
            will override any existing arguments in `call_params`.

    Yields:
        A `OpenAICallResponseChunk` for each chunk of the response.

    Raises:
        OpenAIError: raises any OpenAI errors, see:
            https://platform.openai.com/docs/guides/error-codes/api-errors
    """
    kwargs, tool_types = self._setup_openai_kwargs(kwargs)
    client = self._setup_openai_client(AsyncOpenAI)
    create = get_wrapped_call(
        client.chat.completions.create,
        self,
        is_async=True,
        response_chunk_type=OpenAICallResponseChunk,
        tool_types=tool_types,
    )
    messages = self._update_messages_if_json(self.messages(), tool_types)
    user_message_param = self._get_possible_user_message(messages)
    if not isinstance(client, AsyncAzureOpenAI):
        kwargs["stream_options"] = {"include_usage": True}
    stream = await create(
        messages=messages,
        stream=True,
        **kwargs,
    )
    async for chunk in stream:
        yield OpenAICallResponseChunk(
            chunk=chunk,
            user_message_param=user_message_param,
            tool_types=tool_types,
            cost=openai_api_calculate_cost(chunk.usage, chunk.model),
            response_format=self.call_params.response_format,
        )

OpenAICallParams

Bases: BaseCallParams[OpenAITool]

The parameters to use when calling the OpenAI API.

Source code in mirascope/openai/types.py
class OpenAICallParams(BaseCallParams[OpenAITool]):
    """The parameters to use when calling the OpenAI API."""

    model: str = "gpt-4o-2024-05-13"
    frequency_penalty: Optional[float] = None
    logit_bias: Optional[dict[str, int]] = None
    logprobs: Optional[bool] = None
    max_tokens: Optional[int] = None
    n: Optional[int] = None
    presence_penalty: Optional[float] = None
    response_format: Optional[ResponseFormat] = None
    seed: Optional[int] = None
    stop: Union[Optional[str], list[str]] = None
    temperature: Optional[float] = None
    tool_choice: Optional[ChatCompletionToolChoiceOptionParam] = None
    top_logprobs: Optional[int] = None
    top_p: Optional[float] = None
    user: Optional[str] = None
    # Values defined below take precedence over values defined elsewhere. Use these
    # params to pass additional parameters to the API if necessary that aren't already
    # available as params.
    extra_headers: Optional[Headers] = None
    extra_query: Optional[Query] = None
    extra_body: Optional[Body] = None
    timeout: Optional[Union[float, Timeout]] = None

    model_config = ConfigDict(arbitrary_types_allowed=True)

    def kwargs(
        self,
        tool_type: Optional[Type[OpenAITool]] = OpenAITool,
        exclude: Optional[set[str]] = None,
    ) -> dict[str, Any]:
        """Returns the keyword argument call parameters."""
        return super().kwargs(tool_type, exclude)

kwargs(tool_type=OpenAITool, exclude=None)

Returns the keyword argument call parameters.

Source code in mirascope/openai/types.py
def kwargs(
    self,
    tool_type: Optional[Type[OpenAITool]] = OpenAITool,
    exclude: Optional[set[str]] = None,
) -> dict[str, Any]:
    """Returns the keyword argument call parameters."""
    return super().kwargs(tool_type, exclude)

OpenAICallResponse

Bases: BaseCallResponse[ChatCompletion, OpenAITool]

A convenience wrapper around the OpenAI ChatCompletion response.

When using Mirascope's convenience wrappers to interact with OpenAI models via OpenAICall, responses using OpenAICall.call() will return a OpenAICallResponse, whereby the implemented properties allow for simpler syntax and a convenient developer experience.

Example:

from mirascope.openai import OpenAICall


class BookRecommender(OpenAICall):
    prompt_template = "Please recommend a {genre} book"

    genre: str


response = Bookrecommender(genre="fantasy").call()
print(response.content)
#> The Name of the Wind

print(response.message)
#> ChatCompletionMessage(content='The Name of the Wind', role='assistant',
#  function_call=None, tool_calls=None)

print(response.choices)
#> [Choice(finish_reason='stop', index=0, logprobs=None,
#  message=ChatCompletionMessage(content='The Name of the Wind', role='assistant',
#  function_call=None, tool_calls=None))]
Source code in mirascope/openai/types.py
class OpenAICallResponse(BaseCallResponse[ChatCompletion, OpenAITool]):
    """A convenience wrapper around the OpenAI `ChatCompletion` response.

    When using Mirascope's convenience wrappers to interact with OpenAI models via
    `OpenAICall`, responses using `OpenAICall.call()` will return a
    `OpenAICallResponse`, whereby the implemented properties allow for simpler syntax
    and a convenient developer experience.

    Example:

    ```python
    from mirascope.openai import OpenAICall


    class BookRecommender(OpenAICall):
        prompt_template = "Please recommend a {genre} book"

        genre: str


    response = Bookrecommender(genre="fantasy").call()
    print(response.content)
    #> The Name of the Wind

    print(response.message)
    #> ChatCompletionMessage(content='The Name of the Wind', role='assistant',
    #  function_call=None, tool_calls=None)

    print(response.choices)
    #> [Choice(finish_reason='stop', index=0, logprobs=None,
    #  message=ChatCompletionMessage(content='The Name of the Wind', role='assistant',
    #  function_call=None, tool_calls=None))]
    ```
    """

    response_format: Optional[ResponseFormat] = None
    user_message_param: Optional[ChatCompletionUserMessageParam] = None

    @property
    def message_param(self) -> ChatCompletionAssistantMessageParam:
        """Returns the assistants's response as a message parameter."""
        return self.message.model_dump(exclude={"function_call"})  # type: ignore

    @property
    def choices(self) -> list[Choice]:
        """Returns the array of chat completion choices."""
        return self.response.choices

    @property
    def choice(self) -> Choice:
        """Returns the 0th choice."""
        return self.choices[0]

    @property
    def message(self) -> ChatCompletionMessage:
        """Returns the message of the chat completion for the 0th choice."""
        return self.choice.message

    @property
    def content(self) -> str:
        """Returns the content of the chat completion for the 0th choice."""
        return self.message.content if self.message.content is not None else ""

    @property
    def model(self) -> str:
        """Returns the name of the response model."""
        return self.response.model

    @property
    def id(self) -> str:
        """Returns the id of the response."""
        return self.response.id

    @property
    def finish_reasons(self) -> list[str]:
        """Returns the finish reasons of the response."""
        return [str(choice.finish_reason) for choice in self.response.choices]

    @property
    def tool_calls(self) -> Optional[list[ChatCompletionMessageToolCall]]:
        """Returns the tool calls for the 0th choice message."""
        return self.message.tool_calls

    @property
    def tools(self) -> Optional[list[OpenAITool]]:
        """Returns the tools for the 0th choice message.

        Raises:
            ValidationError: if a tool call doesn't match the tool's schema.
        """
        if not self.tool_types:
            return None

        if self.choice.finish_reason == "length":
            raise RuntimeError(
                "Finish reason was `length`, indicating the model ran out of tokens "
                "(and could not complete the tool call if trying to)"
            )

        def reconstruct_tools_from_content() -> list[OpenAITool]:
            # Note: we only handle single tool calls in this case
            tool_type = self.tool_types[0]  # type: ignore
            return [
                tool_type.from_tool_call(
                    ChatCompletionMessageToolCall(
                        id="id",
                        function=Function(
                            name=tool_type.name(), arguments=self.content
                        ),
                        type="function",
                    )
                )
            ]

        if self.response_format == ResponseFormat(type="json_object"):
            return reconstruct_tools_from_content()

        if not self.tool_calls:
            # Let's see if we got an assistant message back instead and try to
            # reconstruct a tool call in this case. We'll assume if it starts with
            # an open curly bracket that we got a tool call assistant message.
            if "{" == self.content[0]:
                # Note: we only handle single tool calls in JSON mode.
                return reconstruct_tools_from_content()
            return None

        extracted_tools = []
        for tool_call in self.tool_calls:
            for tool_type in self.tool_types:
                if tool_call.function.name == tool_type.name():
                    extracted_tools.append(tool_type.from_tool_call(tool_call))
                    break

        return extracted_tools

    @property
    def tool(self) -> Optional[OpenAITool]:
        """Returns the 0th tool for the 0th choice message.

        Raises:
            ValidationError: if the tool call doesn't match the tool's schema.
        """
        tools = self.tools
        if tools:
            return tools[0]
        return None

    @classmethod
    def tool_message_params(
        self, tools_and_outputs: list[tuple[OpenAITool, str]]
    ) -> list[ChatCompletionToolMessageParam]:
        return [
            ChatCompletionToolMessageParam(
                role="tool",
                content=output,
                tool_call_id=tool.tool_call.id,
                name=tool.name(),  # type: ignore
            )
            for tool, output in tools_and_outputs
        ]

    @property
    def usage(self) -> Optional[CompletionUsage]:
        """Returns the usage of the chat completion."""
        if self.response.usage:
            return self.response.usage
        return None

    @property
    def input_tokens(self) -> Optional[int]:
        """Returns the number of input tokens."""
        if self.usage:
            return self.usage.prompt_tokens
        return None

    @property
    def output_tokens(self) -> Optional[int]:
        """Returns the number of output tokens."""
        if self.usage:
            return self.usage.completion_tokens
        return None

    def dump(self) -> dict[str, Any]:
        """Dumps the response to a dictionary."""
        return {
            "start_time": self.start_time,
            "end_time": self.end_time,
            "output": self.response.model_dump(),
            "cost": self.cost,
        }

choice: Choice property

Returns the 0th choice.

choices: list[Choice] property

Returns the array of chat completion choices.

content: str property

Returns the content of the chat completion for the 0th choice.

finish_reasons: list[str] property

Returns the finish reasons of the response.

id: str property

Returns the id of the response.

input_tokens: Optional[int] property

Returns the number of input tokens.

message: ChatCompletionMessage property

Returns the message of the chat completion for the 0th choice.

message_param: ChatCompletionAssistantMessageParam property

Returns the assistants's response as a message parameter.

model: str property

Returns the name of the response model.

output_tokens: Optional[int] property

Returns the number of output tokens.

tool: Optional[OpenAITool] property

Returns the 0th tool for the 0th choice message.

Raises:

Type Description
ValidationError

if the tool call doesn't match the tool's schema.

tool_calls: Optional[list[ChatCompletionMessageToolCall]] property

Returns the tool calls for the 0th choice message.

tools: Optional[list[OpenAITool]] property

Returns the tools for the 0th choice message.

Raises:

Type Description
ValidationError

if a tool call doesn't match the tool's schema.

usage: Optional[CompletionUsage] property

Returns the usage of the chat completion.

dump()

Dumps the response to a dictionary.

Source code in mirascope/openai/types.py
def dump(self) -> dict[str, Any]:
    """Dumps the response to a dictionary."""
    return {
        "start_time": self.start_time,
        "end_time": self.end_time,
        "output": self.response.model_dump(),
        "cost": self.cost,
    }

OpenAICallResponseChunk

Bases: BaseCallResponseChunk[ChatCompletionChunk, OpenAITool]

Convenience wrapper around chat completion streaming chunks.

When using Mirascope's convenience wrappers to interact with OpenAI models via OpenAICall.stream, responses will return an OpenAICallResponseChunk, whereby the implemented properties allow for simpler syntax and a convenient developer experience.

Example:

from mirascope.openai import OpenAICall


class Math(OpenAICall):
    prompt_template = "What is 1 + 2?"


content = ""
for chunk in Math().stream():
    content += chunk.content
    print(content)
#> 1
#  1 +
#  1 + 2
#  1 + 2 equals
#  1 + 2 equals
#  1 + 2 equals 3
#  1 + 2 equals 3.
Source code in mirascope/openai/types.py
class OpenAICallResponseChunk(BaseCallResponseChunk[ChatCompletionChunk, OpenAITool]):
    """Convenience wrapper around chat completion streaming chunks.

    When using Mirascope's convenience wrappers to interact with OpenAI models via
    `OpenAICall.stream`, responses will return an `OpenAICallResponseChunk`, whereby
    the implemented properties allow for simpler syntax and a convenient developer
    experience.

    Example:

    ```python
    from mirascope.openai import OpenAICall


    class Math(OpenAICall):
        prompt_template = "What is 1 + 2?"


    content = ""
    for chunk in Math().stream():
        content += chunk.content
        print(content)
    #> 1
    #  1 +
    #  1 + 2
    #  1 + 2 equals
    #  1 + 2 equals
    #  1 + 2 equals 3
    #  1 + 2 equals 3.
    ```
    """

    response_format: Optional[ResponseFormat] = None
    user_message_param: Optional[ChatCompletionUserMessageParam] = None

    @property
    def choices(self) -> list[ChunkChoice]:
        """Returns the array of chat completion choices."""
        return self.chunk.choices

    @property
    def choice(self) -> ChunkChoice:
        """Returns the 0th choice."""
        return self.chunk.choices[0]

    @property
    def delta(self) -> Optional[ChoiceDelta]:
        """Returns the delta for the 0th choice."""
        if self.chunk.choices:
            return self.chunk.choices[0].delta
        return None

    @property
    def content(self) -> str:
        """Returns the content for the 0th choice delta."""
        return (
            self.delta.content if self.delta is not None and self.delta.content else ""
        )

    @property
    def model(self) -> str:
        """Returns the name of the response model."""
        return self.chunk.model

    @property
    def id(self) -> str:
        """Returns the id of the response."""
        return self.chunk.id

    @property
    def finish_reasons(self) -> list[str]:
        """Returns the finish reasons of the response."""
        return [str(choice.finish_reason) for choice in self.chunk.choices]

    @property
    def tool_calls(self) -> Optional[list[ChoiceDeltaToolCall]]:
        """Returns the partial tool calls for the 0th choice message.

        The first `list[ChoiceDeltaToolCall]` will contain the name of the tool and
        index, and subsequent `list[ChoiceDeltaToolCall]`s will contain the arguments
        which will be strings that need to be concatenated with future
        `list[ChoiceDeltaToolCall]`s to form a complete JSON tool calls. The last
        `list[ChoiceDeltaToolCall]` will be None indicating end of stream.
        """
        if self.delta:
            return self.delta.tool_calls
        return None

    @property
    def usage(self) -> Optional[CompletionUsage]:
        """Returns the usage of the chat completion."""
        if self.chunk.usage:
            return self.chunk.usage
        return None

    @property
    def input_tokens(self) -> Optional[int]:
        """Returns the number of input tokens."""
        if self.usage:
            return self.usage.prompt_tokens
        return None

    @property
    def output_tokens(self) -> Optional[int]:
        """Returns the number of output tokens."""
        if self.usage:
            return self.usage.completion_tokens
        return None

choice: ChunkChoice property

Returns the 0th choice.

choices: list[ChunkChoice] property

Returns the array of chat completion choices.

content: str property

Returns the content for the 0th choice delta.

delta: Optional[ChoiceDelta] property

Returns the delta for the 0th choice.

finish_reasons: list[str] property

Returns the finish reasons of the response.

id: str property

Returns the id of the response.

input_tokens: Optional[int] property

Returns the number of input tokens.

model: str property

Returns the name of the response model.

output_tokens: Optional[int] property

Returns the number of output tokens.

tool_calls: Optional[list[ChoiceDeltaToolCall]] property

Returns the partial tool calls for the 0th choice message.

The first list[ChoiceDeltaToolCall] will contain the name of the tool and index, and subsequent list[ChoiceDeltaToolCall]s will contain the arguments which will be strings that need to be concatenated with future list[ChoiceDeltaToolCall]s to form a complete JSON tool calls. The last list[ChoiceDeltaToolCall] will be None indicating end of stream.

usage: Optional[CompletionUsage] property

Returns the usage of the chat completion.

OpenAITool

Bases: BaseTool[ChatCompletionMessageToolCall]

A base class for easy use of tools with the OpenAI Chat client.

OpenAITool internally handles the logic that allows you to use tools with simple calls such as OpenAICallResponse.tool or OpenAITool.fn, as seen in the examples below.

Example:

from mirascope.openai import OpenAICall, OpenAICallParams


def animal_matcher(fav_food: str, fav_color: str) -> str:
    """Tells you your most likely favorite animal from personality traits.

    Args:
        fav_food: your favorite food.
        fav_color: your favorite color.

    Returns:
        The animal most likely to be your favorite based on traits.
    """
    return "Your favorite animal is the best one, a frog."


class AnimalMatcher(OpenAICall):
    prompt_template = """
    Tell me my favorite animal if my favorite food is {food} and my
    favorite color is {color}.
    """

    food: str
    color: str

    call_params = OpenAICallParams(tools=[animal_matcher])


response = AnimalMatcher(food="pizza", color="red").call
tool = response.tool
print(tool.fn(**tool.args))
#> Your favorite animal is the best one, a frog.
Source code in mirascope/openai/tools.py
class OpenAITool(BaseTool[ChatCompletionMessageToolCall]):
    '''A base class for easy use of tools with the OpenAI Chat client.

    `OpenAITool` internally handles the logic that allows you to use tools with simple
    calls such as `OpenAICallResponse.tool` or `OpenAITool.fn`, as seen in the
    examples below.

    Example:

    ```python
    from mirascope.openai import OpenAICall, OpenAICallParams


    def animal_matcher(fav_food: str, fav_color: str) -> str:
        """Tells you your most likely favorite animal from personality traits.

        Args:
            fav_food: your favorite food.
            fav_color: your favorite color.

        Returns:
            The animal most likely to be your favorite based on traits.
        """
        return "Your favorite animal is the best one, a frog."


    class AnimalMatcher(OpenAICall):
        prompt_template = """
        Tell me my favorite animal if my favorite food is {food} and my
        favorite color is {color}.
        """

        food: str
        color: str

        call_params = OpenAICallParams(tools=[animal_matcher])


    response = AnimalMatcher(food="pizza", color="red").call
    tool = response.tool
    print(tool.fn(**tool.args))
    #> Your favorite animal is the best one, a frog.
    ```
    '''

    @classmethod
    def tool_schema(cls) -> ChatCompletionToolParam:
        """Constructs a tool schema for use with the OpenAI Chat client.

        A Mirascope `OpenAITool` is deconstructed into a JSON schema, and relevant keys
        are renamed to match the OpenAI `ChatCompletionToolParam` schema used to make
        function/tool calls in OpenAI API.

        Returns:
            The constructed `ChatCompletionToolParam` schema.
        """
        fn = super().tool_schema()
        return cast(ChatCompletionToolParam, {"type": "function", "function": fn})

    @classmethod
    def from_tool_call(
        cls,
        tool_call: ChatCompletionMessageToolCall,
        allow_partial: bool = False,
    ) -> OpenAITool:
        """Extracts an instance of the tool constructed from a tool call response.

        Given `ChatCompletionMessageToolCall` from an OpenAI chat completion response,
        takes its function arguments and creates an `OpenAITool` instance from it.

        Args:
            tool_call: The `ChatCompletionMessageToolCall` to extract the tool from.
            allow_partial: Whether to allow partial JSON schemas.

        Returns:
            An instance of the tool constructed from the tool call.

        Raises:
            ValidationError: if the tool call doesn't match the tool schema.
        """
        if allow_partial:
            model_json = from_json(tool_call.function.arguments, allow_partial=True)
        else:
            try:
                model_json = json.loads(tool_call.function.arguments)
            except json.JSONDecodeError as e:
                raise ValueError() from e

        model_json["tool_call"] = tool_call.model_dump()
        return cls.model_validate(model_json)

    @classmethod
    def from_model(cls, model: Type[BaseModel]) -> Type[OpenAITool]:
        """Constructs a `OpenAITool` type from a `BaseModel` type."""
        return convert_base_model_to_tool(model, OpenAITool)

    @classmethod
    def from_fn(cls, fn: Callable) -> Type[OpenAITool]:
        """Constructs a `OpenAITool` type from a function."""
        return convert_function_to_tool(fn, OpenAITool)

    @classmethod
    def from_base_type(cls, base_type: Type[BaseType]) -> Type[OpenAITool]:
        """Constructs a `OpenAITool` type from a `BaseType` type."""
        return convert_base_type_to_tool(base_type, OpenAITool)

from_base_type(base_type) classmethod

Constructs a OpenAITool type from a BaseType type.

Source code in mirascope/openai/tools.py
@classmethod
def from_base_type(cls, base_type: Type[BaseType]) -> Type[OpenAITool]:
    """Constructs a `OpenAITool` type from a `BaseType` type."""
    return convert_base_type_to_tool(base_type, OpenAITool)

from_fn(fn) classmethod

Constructs a OpenAITool type from a function.

Source code in mirascope/openai/tools.py
@classmethod
def from_fn(cls, fn: Callable) -> Type[OpenAITool]:
    """Constructs a `OpenAITool` type from a function."""
    return convert_function_to_tool(fn, OpenAITool)

from_model(model) classmethod

Constructs a OpenAITool type from a BaseModel type.

Source code in mirascope/openai/tools.py
@classmethod
def from_model(cls, model: Type[BaseModel]) -> Type[OpenAITool]:
    """Constructs a `OpenAITool` type from a `BaseModel` type."""
    return convert_base_model_to_tool(model, OpenAITool)

from_tool_call(tool_call, allow_partial=False) classmethod

Extracts an instance of the tool constructed from a tool call response.

Given ChatCompletionMessageToolCall from an OpenAI chat completion response, takes its function arguments and creates an OpenAITool instance from it.

Parameters:

Name Type Description Default
tool_call ChatCompletionMessageToolCall

The ChatCompletionMessageToolCall to extract the tool from.

required
allow_partial bool

Whether to allow partial JSON schemas.

False

Returns:

Type Description
OpenAITool

An instance of the tool constructed from the tool call.

Raises:

Type Description
ValidationError

if the tool call doesn't match the tool schema.

Source code in mirascope/openai/tools.py
@classmethod
def from_tool_call(
    cls,
    tool_call: ChatCompletionMessageToolCall,
    allow_partial: bool = False,
) -> OpenAITool:
    """Extracts an instance of the tool constructed from a tool call response.

    Given `ChatCompletionMessageToolCall` from an OpenAI chat completion response,
    takes its function arguments and creates an `OpenAITool` instance from it.

    Args:
        tool_call: The `ChatCompletionMessageToolCall` to extract the tool from.
        allow_partial: Whether to allow partial JSON schemas.

    Returns:
        An instance of the tool constructed from the tool call.

    Raises:
        ValidationError: if the tool call doesn't match the tool schema.
    """
    if allow_partial:
        model_json = from_json(tool_call.function.arguments, allow_partial=True)
    else:
        try:
            model_json = json.loads(tool_call.function.arguments)
        except json.JSONDecodeError as e:
            raise ValueError() from e

    model_json["tool_call"] = tool_call.model_dump()
    return cls.model_validate(model_json)

tool_schema() classmethod

Constructs a tool schema for use with the OpenAI Chat client.

A Mirascope OpenAITool is deconstructed into a JSON schema, and relevant keys are renamed to match the OpenAI ChatCompletionToolParam schema used to make function/tool calls in OpenAI API.

Returns:

Type Description
ChatCompletionToolParam

The constructed ChatCompletionToolParam schema.

Source code in mirascope/openai/tools.py
@classmethod
def tool_schema(cls) -> ChatCompletionToolParam:
    """Constructs a tool schema for use with the OpenAI Chat client.

    A Mirascope `OpenAITool` is deconstructed into a JSON schema, and relevant keys
    are renamed to match the OpenAI `ChatCompletionToolParam` schema used to make
    function/tool calls in OpenAI API.

    Returns:
        The constructed `ChatCompletionToolParam` schema.
    """
    fn = super().tool_schema()
    return cast(ChatCompletionToolParam, {"type": "function", "function": fn})

get_wrapped_async_client(client, self)

Get a wrapped async client.

Source code in mirascope/base/ops_utils.py
def get_wrapped_async_client(client: T, self: Union[BaseCall, BaseEmbedder]) -> T:
    """Get a wrapped async client."""
    if self.configuration.client_wrappers:
        for op in self.configuration.client_wrappers:
            if op == "langfuse":  # pragma: no cover
                from langfuse.openai import AsyncOpenAI as LangfuseAsyncOpenAI

                client = LangfuseAsyncOpenAI(
                    api_key=self.api_key, base_url=self.base_url
                )
            elif op == "logfire":  # pragma: no cover
                import logfire

                if self._provider == "openai":
                    logfire.instrument_openai(client)  # type: ignore
                elif self._provider == "anthropic":
                    logfire.instrument_anthropic(client)  # type: ignore
            elif callable(op):
                client = op(client)
    return client

get_wrapped_call(call, self, **kwargs)

Wrap a call to add the llm_ops parameter if it exists.

Source code in mirascope/base/ops_utils.py
def get_wrapped_call(call: C, self: Union[BaseCall, BaseEmbedder], **kwargs) -> C:
    """Wrap a call to add the `llm_ops` parameter if it exists."""
    if self.configuration.llm_ops:
        wrapped_call = call
        for op in self.configuration.llm_ops:
            if op == "weave":  # pragma: no cover
                import weave

                wrapped_call = weave.op()(wrapped_call)
            elif callable(op):
                wrapped_call = op(
                    wrapped_call,
                    self._provider,
                    **kwargs,
                )
        return wrapped_call
    return call

get_wrapped_client(client, self)

Get a wrapped client.

Source code in mirascope/base/ops_utils.py
def get_wrapped_client(client: T, self: Union[BaseCall, BaseEmbedder]) -> T:
    """Get a wrapped client."""
    if self.configuration.client_wrappers:
        for op in self.configuration.client_wrappers:  # pragma: no cover
            if op == "langfuse":
                from langfuse.openai import OpenAI as LangfuseOpenAI

                client = LangfuseOpenAI(api_key=self.api_key, base_url=self.base_url)
            elif op == "logfire":  # pragma: no cover
                import logfire

                if self._provider == "openai":
                    logfire.instrument_openai(client)  # type: ignore
                elif self._provider == "anthropic":
                    logfire.instrument_anthropic(client)  # type: ignore
            elif callable(op):
                client = op(client)
    return client

openai_api_calculate_cost(usage, model='gpt-3.5-turbo-16k')

Calculate the cost of a completion using the OpenAI API.

https://openai.com/pricing

Model Input Output gpt-4o $5.00 / 1M tokens $15.00 / 1M tokens gpt-4o-2024-05-13 $5.00 / 1M tokens $15.00 / 1M tokens gpt-4-turbo $10.00 / 1M tokens $30.00 / 1M tokens gpt-4-turbo-2024-04-09 $10.00 / 1M tokens $30.00 / 1M tokens gpt-3.5-turbo-0125 $0.50 / 1M tokens $1.50 / 1M tokens gpt-3.5-turbo-1106 $1.00 / 1M tokens $2.00 / 1M tokens gpt-4-1106-preview $10.00 / 1M tokens $30.00 / 1M tokens gpt-4 $30.00 / 1M tokens $60.00 / 1M tokens text-embedding-3-small $0.02 / 1M tokens text-embedding-3-large $0.13 / 1M tokens text-embedding-ada-0002 $0.10 / 1M tokens

Source code in mirascope/openai/utils.py
def openai_api_calculate_cost(
    usage: Optional[CompletionUsage], model="gpt-3.5-turbo-16k"
) -> Optional[float]:
    """Calculate the cost of a completion using the OpenAI API.

    https://openai.com/pricing

    Model                   Input               Output
    gpt-4o                  $5.00 / 1M tokens   $15.00 / 1M tokens
    gpt-4o-2024-05-13       $5.00 / 1M tokens   $15.00 / 1M tokens
    gpt-4-turbo             $10.00 / 1M tokens  $30.00 / 1M tokens
    gpt-4-turbo-2024-04-09  $10.00 / 1M tokens  $30.00 / 1M tokens
    gpt-3.5-turbo-0125	    $0.50 / 1M tokens	$1.50 / 1M tokens
    gpt-3.5-turbo-1106	    $1.00 / 1M tokens	$2.00 / 1M tokens
    gpt-4-1106-preview	    $10.00 / 1M tokens 	$30.00 / 1M tokens
    gpt-4	                $30.00 / 1M tokens	$60.00 / 1M tokens
    text-embedding-3-small	$0.02 / 1M tokens
    text-embedding-3-large	$0.13 / 1M tokens
    text-embedding-ada-0002	$0.10 / 1M tokens
    """
    pricing = {
        "gpt-4o": {
            "prompt": 0.000_005,
            "completion": 0.000_015,
        },
        "gpt-4o-2024-05-13": {
            "prompt": 0.000_005,
            "completion": 0.000_015,
        },
        "gpt-4-turbo": {
            "prompt": 0.000_01,
            "completion": 0.000_03,
        },
        "gpt-4-turbo-2024-04-09": {
            "prompt": 0.000_01,
            "completion": 0.000_03,
        },
        "gpt-3.5-turbo-0125": {
            "prompt": 0.000_000_5,
            "completion": 0.000_001_5,
        },
        "gpt-3.5-turbo-1106": {
            "prompt": 0.000_001,
            "completion": 0.000_002,
        },
        "gpt-4-1106-preview": {
            "prompt": 0.000_01,
            "completion": 0.000_03,
        },
        "gpt-4": {
            "prompt": 0.000_003,
            "completion": 0.000_006,
        },
        "gpt-3.5-turbo-4k": {
            "prompt": 0.000_015,
            "completion": 0.000_02,
        },
        "gpt-3.5-turbo-16k": {
            "prompt": 0.000_003,
            "completion": 0.000_004,
        },
        "gpt-4-8k": {
            "prompt": 0.000_003,
            "completion": 0.000_006,
        },
        "gpt-4-32k": {
            "prompt": 0.000_006,
            "completion": 0.000_012,
        },
        "text-embedding-3-small": {
            "prompt": 0.000_000_02,
            "completion": 0.000_000_02,
        },
        "text-embedding-ada-002": {
            "prompt": 0.000_000_1,
            "completion": 0.000_000_1,
        },
        "text-embedding-3-large": {
            "prompt": 0.000_000_13,
            "completion": 0.000_000_13,
        },
    }
    if usage is None:
        return None
    try:
        model_pricing = pricing[model]
    except KeyError:
        return None

    prompt_cost = usage.prompt_tokens * model_pricing["prompt"]
    completion_cost = usage.completion_tokens * model_pricing["completion"]
    total_cost = prompt_cost + completion_cost

    return total_cost

retry(fn)

Decorator for retrying a function.

Source code in mirascope/base/utils.py
def retry(fn: F) -> F:
    """Decorator for retrying a function."""

    @wraps(fn)
    def wrapper(*args, **kwargs):
        """Wrapper for retrying a function."""
        retries = kwargs.pop("retries", 0)
        if isinstance(retries, int):
            if retries > 0:
                retries = Retrying(stop=stop_after_attempt(retries))
            else:
                return fn(*args, **kwargs)
        try:
            for attempt in retries:
                with attempt:
                    result = fn(*args, **kwargs)
                if (
                    attempt.retry_state.outcome
                    and not attempt.retry_state.outcome.failed
                ):
                    attempt.retry_state.set_result(result)
            return result
        except RetryError:
            raise

    @wraps(fn)
    async def wrapper_async(*args, **kwargs):
        """Wrapper for retrying an async function."""
        retries = kwargs.pop("retries", 0)
        if isinstance(retries, int):
            if retries > 0:
                retries = AsyncRetrying(stop=stop_after_attempt(retries))
            else:
                return await fn(*args, **kwargs)
        try:
            async for attempt in retries:
                with attempt:
                    result = await fn(*args, **kwargs)
                if (
                    attempt.retry_state.outcome
                    and not attempt.retry_state.outcome.failed
                ):
                    attempt.retry_state.set_result(result)
            return result
        except RetryError:
            raise

    @wraps(fn)
    def wrapper_generator(*args, **kwargs):
        """Wrapper for retrying a generator function."""
        retries = kwargs.pop("retries", 0)
        if isinstance(retries, int):
            if retries > 0:
                retries = Retrying(stop=stop_after_attempt(retries))
            else:
                for value in fn(*args, **kwargs):
                    yield value
                return
        try:
            for attempt in retries:
                with attempt:
                    for value in fn(*args, **kwargs):
                        yield value
        except RetryError:
            raise

    @wraps(fn)
    async def wrapper_generator_async(*args, **kwargs):
        """Wrapper for retrying an async generator function."""
        retries = kwargs.pop("retries", 0)
        if isinstance(retries, int):
            if retries > 0:
                retries = AsyncRetrying(stop=stop_after_attempt(retries))
            else:
                async for value in fn(*args, **kwargs):
                    yield value
                return
        try:
            async for attempt in retries:
                with attempt:
                    async for value in fn(*args, **kwargs):
                        yield value
        except RetryError:
            raise

    if inspect.iscoroutinefunction(fn):
        return cast(F, wrapper_async)
    elif inspect.isgeneratorfunction(fn):
        return cast(F, wrapper_generator)
    elif inspect.isasyncgenfunction(fn):
        return cast(F, wrapper_generator_async)
    else:
        return cast(F, wrapper)