Skip to content

openai

A module for interacting with OpenAI models.

OpenAICall

Bases: BaseCall[OpenAICallResponse, OpenAICallResponseChunk, OpenAITool]

A base class for calling OpenAI's Chat Completion models.

Example:

from mirascope.openai import OpenAICall


class BookRecommender(OpenAICall):
    prompt_template = "Please recommend a {genre} book"

    genre: str

response = BookRecommender(genre="fantasy").call()
print(response.content)
#> There are many great books to read, it ultimately depends...
Source code in mirascope/openai/calls.py
class OpenAICall(BaseCall[OpenAICallResponse, OpenAICallResponseChunk, OpenAITool]):
    """A base class for calling OpenAI's Chat Completion models.

    Example:

    ```python
    from mirascope.openai import OpenAICall


    class BookRecommender(OpenAICall):
        prompt_template = "Please recommend a {genre} book"

        genre: str

    response = BookRecommender(genre="fantasy").call()
    print(response.content)
    #> There are many great books to read, it ultimately depends...
    ```
    """

    call_params: ClassVar[OpenAICallParams] = OpenAICallParams()
    _provider: ClassVar[str] = "openai"

    def messages(self) -> list[ChatCompletionMessageParam]:
        """Returns the template as a formatted list of messages."""
        message_type_by_role = {
            MessageRole.SYSTEM: ChatCompletionSystemMessageParam,
            MessageRole.USER: ChatCompletionUserMessageParam,
            MessageRole.ASSISTANT: ChatCompletionAssistantMessageParam,
            MessageRole.TOOL: ChatCompletionToolMessageParam,
        }
        return [
            message_type_by_role[MessageRole(message["role"])](**message)
            for message in self._parse_messages(list(message_type_by_role.keys()))
        ]

    @retry
    def call(
        self, retries: Union[int, Retrying] = 0, **kwargs: Any
    ) -> OpenAICallResponse:
        """Makes a call to the model using this `OpenAICall` instance.

        Args:
            retries: An integer for the number of times to retry the call or
                a `tenacity.Retrying` instance.
            **kwargs: Additional keyword arguments parameters to pass to the call. These
                will override any existing arguments in `call_params`.

        Returns:
            A `OpenAICallResponse` instance.

        Raises:
            OpenAIError: raises any OpenAI errors, see:
                https://platform.openai.com/docs/guides/error-codes/api-errors
        """
        kwargs, tool_types = self._setup_openai_kwargs(kwargs)
        client = get_wrapped_client(
            OpenAI(api_key=self.api_key, base_url=self.base_url), self
        )
        create = get_wrapped_call(
            client.chat.completions.create,
            self,
            response_type=OpenAICallResponse,
            tool_types=tool_types,
        )
        messages = self._update_messages_if_json(self.messages(), tool_types)
        start_time = datetime.datetime.now().timestamp() * 1000
        completion = create(
            messages=messages,
            stream=False,
            **kwargs,
        )
        return OpenAICallResponse(
            response=completion,
            tool_types=tool_types,
            start_time=start_time,
            end_time=datetime.datetime.now().timestamp() * 1000,
            cost=openai_api_calculate_cost(completion.usage, completion.model),
            response_format=self.call_params.response_format,
        )

    @retry
    async def call_async(
        self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
    ) -> OpenAICallResponse:
        """Makes an asynchronous call to the model using this `OpenAICall`.

        Args:
            retries: An integer for the number of times to retry the call or
                a `tenacity.AsyncRetrying` instance.
            **kwargs: Additional keyword arguments parameters to pass to the call. These
                will override any existing arguments in `call_params`.

        Returns:
            An `OpenAICallResponse` instance.

        Raises:
            OpenAIError: raises any OpenAI errors, see:
                https://platform.openai.com/docs/guides/error-codes/api-errors
        """
        kwargs, tool_types = self._setup_openai_kwargs(kwargs)
        client = get_wrapped_async_client(
            AsyncOpenAI(api_key=self.api_key, base_url=self.base_url), self
        )
        create = get_wrapped_call(
            client.chat.completions.create,
            self,
            is_async=True,
            response_type=OpenAICallResponse,
            tool_types=tool_types,
        )
        messages = self._update_messages_if_json(self.messages(), tool_types)
        start_time = datetime.datetime.now().timestamp() * 1000
        completion = await create(
            messages=messages,
            stream=False,
            **kwargs,
        )
        return OpenAICallResponse(
            response=completion,
            tool_types=tool_types,
            start_time=start_time,
            end_time=datetime.datetime.now().timestamp() * 1000,
            cost=openai_api_calculate_cost(completion.usage, completion.model),
            response_format=self.call_params.response_format,
        )

    @retry
    def stream(
        self, retries: Union[int, Retrying] = 0, **kwargs: Any
    ) -> Generator[OpenAICallResponseChunk, None, None]:
        """Streams the response for a call using this `OpenAICall`.

        Args:
            retries: An integer for the number of times to retry the call or
                a `tenacity.Retrying` instance.
            **kwargs: Additional keyword arguments parameters to pass to the call. These
                will override any existing arguments in `call_params`.

        Yields:
            A `OpenAICallResponseChunk` for each chunk of the response.

        Raises:
            OpenAIError: raises any OpenAI errors, see:
                https://platform.openai.com/docs/guides/error-codes/api-errors
        """
        kwargs, tool_types = self._setup_openai_kwargs(kwargs)
        client = get_wrapped_client(
            OpenAI(api_key=self.api_key, base_url=self.base_url), self
        )
        create = get_wrapped_call(
            client.chat.completions.create,
            self,
            response_chunk_type=OpenAICallResponseChunk,
            tool_types=tool_types,
        )
        messages = self._update_messages_if_json(self.messages(), tool_types)
        if not isinstance(client, AzureOpenAI):
            kwargs["stream_options"] = {"include_usage": True}
        stream = create(
            messages=messages,
            stream=True,
            **kwargs,
        )
        for chunk in stream:
            yield OpenAICallResponseChunk(
                chunk=chunk,
                tool_types=tool_types,
                response_format=self.call_params.response_format,
            )

    @retry
    async def stream_async(
        self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
    ) -> AsyncGenerator[OpenAICallResponseChunk, None]:
        """Streams the response for an asynchronous call using this `OpenAICall`.

        Args:
            retries: An integer for the number of times to retry the call or
                a `tenacity.AsyncRetrying` instance.
            **kwargs: Additional keyword arguments parameters to pass to the call. These
                will override any existing arguments in `call_params`.

        Yields:
            A `OpenAICallResponseChunk` for each chunk of the response.

        Raises:
            OpenAIError: raises any OpenAI errors, see:
                https://platform.openai.com/docs/guides/error-codes/api-errors
        """
        kwargs, tool_types = self._setup_openai_kwargs(kwargs)
        client = get_wrapped_async_client(
            AsyncOpenAI(api_key=self.api_key, base_url=self.base_url), self
        )
        create = get_wrapped_call(
            client.chat.completions.create,
            self,
            is_async=True,
            response_chunk_type=OpenAICallResponseChunk,
            tool_types=tool_types,
        )
        messages = self._update_messages_if_json(self.messages(), tool_types)
        if not isinstance(client, AsyncAzureOpenAI):
            kwargs["stream_options"] = {"include_usage": True}
        stream = await create(
            messages=messages,
            stream=True,
            **kwargs,
        )
        async for chunk in stream:
            yield OpenAICallResponseChunk(
                chunk=chunk,
                tool_types=tool_types,
                response_format=self.call_params.response_format,
            )

    ############################## PRIVATE METHODS ###################################

    def _setup_openai_kwargs(
        self,
        kwargs: dict[str, Any],
    ) -> tuple[
        dict[str, Any],
        Optional[list[Type[OpenAITool]]],
    ]:
        """Overrides the `BaseCall._setup` for Anthropic specific setup."""
        kwargs, tool_types = self._setup(kwargs, OpenAITool)
        if (
            self.call_params.response_format == ResponseFormat(type="json_object")
            and tool_types
        ):
            kwargs.pop("tools")
        return kwargs, tool_types

    def _update_messages_if_json(
        self,
        messages: list[ChatCompletionMessageParam],
        tool_types: Optional[list[type[OpenAITool]]],
    ) -> list[ChatCompletionMessageParam]:
        if (
            self.call_params.response_format == ResponseFormat(type="json_object")
            and tool_types
        ):
            messages.append(
                ChatCompletionUserMessageParam(
                    role="user", content=_json_mode_content(tool_type=tool_types[0])
                )
            )
        return messages

call(retries=0, **kwargs)

Makes a call to the model using this OpenAICall instance.

Parameters:

Name Type Description Default
retries Union[int, Retrying]

An integer for the number of times to retry the call or a tenacity.Retrying instance.

0
**kwargs Any

Additional keyword arguments parameters to pass to the call. These will override any existing arguments in call_params.

{}

Returns:

Type Description
OpenAICallResponse

A OpenAICallResponse instance.

Raises:

Type Description
OpenAIError

raises any OpenAI errors, see: https://platform.openai.com/docs/guides/error-codes/api-errors

Source code in mirascope/openai/calls.py
@retry
def call(
    self, retries: Union[int, Retrying] = 0, **kwargs: Any
) -> OpenAICallResponse:
    """Makes a call to the model using this `OpenAICall` instance.

    Args:
        retries: An integer for the number of times to retry the call or
            a `tenacity.Retrying` instance.
        **kwargs: Additional keyword arguments parameters to pass to the call. These
            will override any existing arguments in `call_params`.

    Returns:
        A `OpenAICallResponse` instance.

    Raises:
        OpenAIError: raises any OpenAI errors, see:
            https://platform.openai.com/docs/guides/error-codes/api-errors
    """
    kwargs, tool_types = self._setup_openai_kwargs(kwargs)
    client = get_wrapped_client(
        OpenAI(api_key=self.api_key, base_url=self.base_url), self
    )
    create = get_wrapped_call(
        client.chat.completions.create,
        self,
        response_type=OpenAICallResponse,
        tool_types=tool_types,
    )
    messages = self._update_messages_if_json(self.messages(), tool_types)
    start_time = datetime.datetime.now().timestamp() * 1000
    completion = create(
        messages=messages,
        stream=False,
        **kwargs,
    )
    return OpenAICallResponse(
        response=completion,
        tool_types=tool_types,
        start_time=start_time,
        end_time=datetime.datetime.now().timestamp() * 1000,
        cost=openai_api_calculate_cost(completion.usage, completion.model),
        response_format=self.call_params.response_format,
    )

call_async(retries=0, **kwargs) async

Makes an asynchronous call to the model using this OpenAICall.

Parameters:

Name Type Description Default
retries Union[int, AsyncRetrying]

An integer for the number of times to retry the call or a tenacity.AsyncRetrying instance.

0
**kwargs Any

Additional keyword arguments parameters to pass to the call. These will override any existing arguments in call_params.

{}

Returns:

Type Description
OpenAICallResponse

An OpenAICallResponse instance.

Raises:

Type Description
OpenAIError

raises any OpenAI errors, see: https://platform.openai.com/docs/guides/error-codes/api-errors

Source code in mirascope/openai/calls.py
@retry
async def call_async(
    self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
) -> OpenAICallResponse:
    """Makes an asynchronous call to the model using this `OpenAICall`.

    Args:
        retries: An integer for the number of times to retry the call or
            a `tenacity.AsyncRetrying` instance.
        **kwargs: Additional keyword arguments parameters to pass to the call. These
            will override any existing arguments in `call_params`.

    Returns:
        An `OpenAICallResponse` instance.

    Raises:
        OpenAIError: raises any OpenAI errors, see:
            https://platform.openai.com/docs/guides/error-codes/api-errors
    """
    kwargs, tool_types = self._setup_openai_kwargs(kwargs)
    client = get_wrapped_async_client(
        AsyncOpenAI(api_key=self.api_key, base_url=self.base_url), self
    )
    create = get_wrapped_call(
        client.chat.completions.create,
        self,
        is_async=True,
        response_type=OpenAICallResponse,
        tool_types=tool_types,
    )
    messages = self._update_messages_if_json(self.messages(), tool_types)
    start_time = datetime.datetime.now().timestamp() * 1000
    completion = await create(
        messages=messages,
        stream=False,
        **kwargs,
    )
    return OpenAICallResponse(
        response=completion,
        tool_types=tool_types,
        start_time=start_time,
        end_time=datetime.datetime.now().timestamp() * 1000,
        cost=openai_api_calculate_cost(completion.usage, completion.model),
        response_format=self.call_params.response_format,
    )

messages()

Returns the template as a formatted list of messages.

Source code in mirascope/openai/calls.py
def messages(self) -> list[ChatCompletionMessageParam]:
    """Returns the template as a formatted list of messages."""
    message_type_by_role = {
        MessageRole.SYSTEM: ChatCompletionSystemMessageParam,
        MessageRole.USER: ChatCompletionUserMessageParam,
        MessageRole.ASSISTANT: ChatCompletionAssistantMessageParam,
        MessageRole.TOOL: ChatCompletionToolMessageParam,
    }
    return [
        message_type_by_role[MessageRole(message["role"])](**message)
        for message in self._parse_messages(list(message_type_by_role.keys()))
    ]

stream(retries=0, **kwargs)

Streams the response for a call using this OpenAICall.

Parameters:

Name Type Description Default
retries Union[int, Retrying]

An integer for the number of times to retry the call or a tenacity.Retrying instance.

0
**kwargs Any

Additional keyword arguments parameters to pass to the call. These will override any existing arguments in call_params.

{}

Yields:

Type Description
OpenAICallResponseChunk

A OpenAICallResponseChunk for each chunk of the response.

Raises:

Type Description
OpenAIError

raises any OpenAI errors, see: https://platform.openai.com/docs/guides/error-codes/api-errors

Source code in mirascope/openai/calls.py
@retry
def stream(
    self, retries: Union[int, Retrying] = 0, **kwargs: Any
) -> Generator[OpenAICallResponseChunk, None, None]:
    """Streams the response for a call using this `OpenAICall`.

    Args:
        retries: An integer for the number of times to retry the call or
            a `tenacity.Retrying` instance.
        **kwargs: Additional keyword arguments parameters to pass to the call. These
            will override any existing arguments in `call_params`.

    Yields:
        A `OpenAICallResponseChunk` for each chunk of the response.

    Raises:
        OpenAIError: raises any OpenAI errors, see:
            https://platform.openai.com/docs/guides/error-codes/api-errors
    """
    kwargs, tool_types = self._setup_openai_kwargs(kwargs)
    client = get_wrapped_client(
        OpenAI(api_key=self.api_key, base_url=self.base_url), self
    )
    create = get_wrapped_call(
        client.chat.completions.create,
        self,
        response_chunk_type=OpenAICallResponseChunk,
        tool_types=tool_types,
    )
    messages = self._update_messages_if_json(self.messages(), tool_types)
    if not isinstance(client, AzureOpenAI):
        kwargs["stream_options"] = {"include_usage": True}
    stream = create(
        messages=messages,
        stream=True,
        **kwargs,
    )
    for chunk in stream:
        yield OpenAICallResponseChunk(
            chunk=chunk,
            tool_types=tool_types,
            response_format=self.call_params.response_format,
        )

stream_async(retries=0, **kwargs) async

Streams the response for an asynchronous call using this OpenAICall.

Parameters:

Name Type Description Default
retries Union[int, AsyncRetrying]

An integer for the number of times to retry the call or a tenacity.AsyncRetrying instance.

0
**kwargs Any

Additional keyword arguments parameters to pass to the call. These will override any existing arguments in call_params.

{}

Yields:

Type Description
AsyncGenerator[OpenAICallResponseChunk, None]

A OpenAICallResponseChunk for each chunk of the response.

Raises:

Type Description
OpenAIError

raises any OpenAI errors, see: https://platform.openai.com/docs/guides/error-codes/api-errors

Source code in mirascope/openai/calls.py
@retry
async def stream_async(
    self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
) -> AsyncGenerator[OpenAICallResponseChunk, None]:
    """Streams the response for an asynchronous call using this `OpenAICall`.

    Args:
        retries: An integer for the number of times to retry the call or
            a `tenacity.AsyncRetrying` instance.
        **kwargs: Additional keyword arguments parameters to pass to the call. These
            will override any existing arguments in `call_params`.

    Yields:
        A `OpenAICallResponseChunk` for each chunk of the response.

    Raises:
        OpenAIError: raises any OpenAI errors, see:
            https://platform.openai.com/docs/guides/error-codes/api-errors
    """
    kwargs, tool_types = self._setup_openai_kwargs(kwargs)
    client = get_wrapped_async_client(
        AsyncOpenAI(api_key=self.api_key, base_url=self.base_url), self
    )
    create = get_wrapped_call(
        client.chat.completions.create,
        self,
        is_async=True,
        response_chunk_type=OpenAICallResponseChunk,
        tool_types=tool_types,
    )
    messages = self._update_messages_if_json(self.messages(), tool_types)
    if not isinstance(client, AsyncAzureOpenAI):
        kwargs["stream_options"] = {"include_usage": True}
    stream = await create(
        messages=messages,
        stream=True,
        **kwargs,
    )
    async for chunk in stream:
        yield OpenAICallResponseChunk(
            chunk=chunk,
            tool_types=tool_types,
            response_format=self.call_params.response_format,
        )

OpenAICallParams

Bases: BaseCallParams[OpenAITool]

The parameters to use when calling the OpenAI API.

Source code in mirascope/openai/types.py
class OpenAICallParams(BaseCallParams[OpenAITool]):
    """The parameters to use when calling the OpenAI API."""

    model: str = "gpt-4o-2024-05-13"
    frequency_penalty: Optional[float] = None
    logit_bias: Optional[dict[str, int]] = None
    logprobs: Optional[bool] = None
    max_tokens: Optional[int] = None
    n: Optional[int] = None
    presence_penalty: Optional[float] = None
    response_format: Optional[ResponseFormat] = None
    seed: Optional[int] = None
    stop: Union[Optional[str], list[str]] = None
    temperature: Optional[float] = None
    tool_choice: Optional[ChatCompletionToolChoiceOptionParam] = None
    top_logprobs: Optional[int] = None
    top_p: Optional[float] = None
    user: Optional[str] = None
    # Values defined below take precedence over values defined elsewhere. Use these
    # params to pass additional parameters to the API if necessary that aren't already
    # available as params.
    extra_headers: Optional[Headers] = None
    extra_query: Optional[Query] = None
    extra_body: Optional[Body] = None
    timeout: Optional[Union[float, Timeout]] = None

    model_config = ConfigDict(arbitrary_types_allowed=True)

    def kwargs(
        self,
        tool_type: Optional[Type[OpenAITool]] = OpenAITool,
        exclude: Optional[set[str]] = None,
    ) -> dict[str, Any]:
        """Returns the keyword argument call parameters."""
        return super().kwargs(tool_type, exclude)

kwargs(tool_type=OpenAITool, exclude=None)

Returns the keyword argument call parameters.

Source code in mirascope/openai/types.py
def kwargs(
    self,
    tool_type: Optional[Type[OpenAITool]] = OpenAITool,
    exclude: Optional[set[str]] = None,
) -> dict[str, Any]:
    """Returns the keyword argument call parameters."""
    return super().kwargs(tool_type, exclude)

OpenAICallResponse

Bases: BaseCallResponse[ChatCompletion, OpenAITool]

A convenience wrapper around the OpenAI ChatCompletion response.

When using Mirascope's convenience wrappers to interact with OpenAI models via OpenAICall, responses using OpenAICall.call() will return a OpenAICallResponse, whereby the implemented properties allow for simpler syntax and a convenient developer experience.

Example:

from mirascope.openai import OpenAICall


class BookRecommender(OpenAICall):
    prompt_template = "Please recommend a {genre} book"

    genre: str


response = Bookrecommender(genre="fantasy").call()
print(response.content)
#> The Name of the Wind

print(response.message)
#> ChatCompletionMessage(content='The Name of the Wind', role='assistant',
#  function_call=None, tool_calls=None)

print(response.choices)
#> [Choice(finish_reason='stop', index=0, logprobs=None,
#  message=ChatCompletionMessage(content='The Name of the Wind', role='assistant',
#  function_call=None, tool_calls=None))]
Source code in mirascope/openai/types.py
class OpenAICallResponse(BaseCallResponse[ChatCompletion, OpenAITool]):
    """A convenience wrapper around the OpenAI `ChatCompletion` response.

    When using Mirascope's convenience wrappers to interact with OpenAI models via
    `OpenAICall`, responses using `OpenAICall.call()` will return a
    `OpenAICallResponse`, whereby the implemented properties allow for simpler syntax
    and a convenient developer experience.

    Example:

    ```python
    from mirascope.openai import OpenAICall


    class BookRecommender(OpenAICall):
        prompt_template = "Please recommend a {genre} book"

        genre: str


    response = Bookrecommender(genre="fantasy").call()
    print(response.content)
    #> The Name of the Wind

    print(response.message)
    #> ChatCompletionMessage(content='The Name of the Wind', role='assistant',
    #  function_call=None, tool_calls=None)

    print(response.choices)
    #> [Choice(finish_reason='stop', index=0, logprobs=None,
    #  message=ChatCompletionMessage(content='The Name of the Wind', role='assistant',
    #  function_call=None, tool_calls=None))]
    ```
    """

    response_format: Optional[ResponseFormat] = None

    @property
    def choices(self) -> list[Choice]:
        """Returns the array of chat completion choices."""
        return self.response.choices

    @property
    def choice(self) -> Choice:
        """Returns the 0th choice."""
        return self.choices[0]

    @property
    def message(self) -> ChatCompletionMessage:
        """Returns the message of the chat completion for the 0th choice."""
        return self.choice.message

    @property
    def content(self) -> str:
        """Returns the content of the chat completion for the 0th choice."""
        return self.message.content if self.message.content is not None else ""

    @property
    def tool_calls(self) -> Optional[list[ChatCompletionMessageToolCall]]:
        """Returns the tool calls for the 0th choice message."""
        return self.message.tool_calls

    @property
    def tools(self) -> Optional[list[OpenAITool]]:
        """Returns the tools for the 0th choice message.

        Raises:
            ValidationError: if a tool call doesn't match the tool's schema.
        """
        if not self.tool_types:
            return None

        if self.choice.finish_reason == "length":
            raise RuntimeError(
                "Finish reason was `length`, indicating the model ran out of tokens "
                "(and could not complete the tool call if trying to)"
            )

        def reconstruct_tools_from_content() -> list[OpenAITool]:
            # Note: we only handle single tool calls in this case
            tool_type = self.tool_types[0]  # type: ignore
            return [
                tool_type.from_tool_call(
                    ChatCompletionMessageToolCall(
                        id="id",
                        function=Function(
                            name=tool_type.__name__, arguments=self.content
                        ),
                        type="function",
                    )
                )
            ]

        if self.response_format == ResponseFormat(type="json_object"):
            return reconstruct_tools_from_content()

        if not self.tool_calls:
            # Let's see if we got an assistant message back instead and try to
            # reconstruct a tool call in this case. We'll assume if it starts with
            # an open curly bracket that we got a tool call assistant message.
            if "{" == self.content[0]:
                # Note: we only handle single tool calls in JSON mode.
                return reconstruct_tools_from_content()
            return None

        extracted_tools = []
        for tool_call in self.tool_calls:
            for tool_type in self.tool_types:
                if tool_call.function.name == tool_type.__name__:
                    extracted_tools.append(tool_type.from_tool_call(tool_call))
                    break

        return extracted_tools

    @property
    def tool(self) -> Optional[OpenAITool]:
        """Returns the 0th tool for the 0th choice message.

        Raises:
            ValidationError: if the tool call doesn't match the tool's schema.
        """
        tools = self.tools
        if tools:
            return tools[0]
        return None

    @property
    def usage(self) -> Optional[CompletionUsage]:
        """Returns the usage of the chat completion."""
        if self.response.usage:
            return self.response.usage
        return None

    @property
    def input_tokens(self) -> Optional[int]:
        """Returns the number of input tokens."""
        if self.usage:
            return self.usage.prompt_tokens
        return None

    @property
    def output_tokens(self) -> Optional[int]:
        """Returns the number of output tokens."""
        if self.usage:
            return self.usage.completion_tokens
        return None

    def dump(self) -> dict[str, Any]:
        """Dumps the response to a dictionary."""
        return {
            "start_time": self.start_time,
            "end_time": self.end_time,
            "output": self.response.model_dump(),
            "cost": self.cost,
        }

choice: Choice property

Returns the 0th choice.

choices: list[Choice] property

Returns the array of chat completion choices.

content: str property

Returns the content of the chat completion for the 0th choice.

input_tokens: Optional[int] property

Returns the number of input tokens.

message: ChatCompletionMessage property

Returns the message of the chat completion for the 0th choice.

output_tokens: Optional[int] property

Returns the number of output tokens.

tool: Optional[OpenAITool] property

Returns the 0th tool for the 0th choice message.

Raises:

Type Description
ValidationError

if the tool call doesn't match the tool's schema.

tool_calls: Optional[list[ChatCompletionMessageToolCall]] property

Returns the tool calls for the 0th choice message.

tools: Optional[list[OpenAITool]] property

Returns the tools for the 0th choice message.

Raises:

Type Description
ValidationError

if a tool call doesn't match the tool's schema.

usage: Optional[CompletionUsage] property

Returns the usage of the chat completion.

dump()

Dumps the response to a dictionary.

Source code in mirascope/openai/types.py
def dump(self) -> dict[str, Any]:
    """Dumps the response to a dictionary."""
    return {
        "start_time": self.start_time,
        "end_time": self.end_time,
        "output": self.response.model_dump(),
        "cost": self.cost,
    }

OpenAICallResponseChunk

Bases: BaseCallResponseChunk[ChatCompletionChunk, OpenAITool]

Convenience wrapper around chat completion streaming chunks.

When using Mirascope's convenience wrappers to interact with OpenAI models via OpenAICall.stream, responses will return an OpenAICallResponseChunk, whereby the implemented properties allow for simpler syntax and a convenient developer experience.

Example:

from mirascope.openai import OpenAICall


class Math(OpenAICall):
    prompt_template = "What is 1 + 2?"


content = ""
for chunk in Math().stream():
    content += chunk.content
    print(content)
#> 1
#  1 +
#  1 + 2
#  1 + 2 equals
#  1 + 2 equals
#  1 + 2 equals 3
#  1 + 2 equals 3.
Source code in mirascope/openai/types.py
class OpenAICallResponseChunk(BaseCallResponseChunk[ChatCompletionChunk, OpenAITool]):
    """Convenience wrapper around chat completion streaming chunks.

    When using Mirascope's convenience wrappers to interact with OpenAI models via
    `OpenAICall.stream`, responses will return an `OpenAICallResponseChunk`, whereby
    the implemented properties allow for simpler syntax and a convenient developer
    experience.

    Example:

    ```python
    from mirascope.openai import OpenAICall


    class Math(OpenAICall):
        prompt_template = "What is 1 + 2?"


    content = ""
    for chunk in Math().stream():
        content += chunk.content
        print(content)
    #> 1
    #  1 +
    #  1 + 2
    #  1 + 2 equals
    #  1 + 2 equals
    #  1 + 2 equals 3
    #  1 + 2 equals 3.
    ```
    """

    response_format: Optional[ResponseFormat] = None

    @property
    def choices(self) -> list[ChunkChoice]:
        """Returns the array of chat completion choices."""
        return self.chunk.choices

    @property
    def choice(self) -> ChunkChoice:
        """Returns the 0th choice."""
        return self.chunk.choices[0]

    @property
    def delta(self) -> Optional[ChoiceDelta]:
        """Returns the delta for the 0th choice."""
        if self.chunk.choices:
            return self.chunk.choices[0].delta
        return None

    @property
    def content(self) -> str:
        """Returns the content for the 0th choice delta."""
        return (
            self.delta.content if self.delta is not None and self.delta.content else ""
        )

    @property
    def tool_calls(self) -> Optional[list[ChoiceDeltaToolCall]]:
        """Returns the partial tool calls for the 0th choice message.

        The first `list[ChoiceDeltaToolCall]` will contain the name of the tool and
        index, and subsequent `list[ChoiceDeltaToolCall]`s will contain the arguments
        which will be strings that need to be concatenated with future
        `list[ChoiceDeltaToolCall]`s to form a complete JSON tool calls. The last
        `list[ChoiceDeltaToolCall]` will be None indicating end of stream.
        """
        if self.delta:
            return self.delta.tool_calls
        return None

choice: ChunkChoice property

Returns the 0th choice.

choices: list[ChunkChoice] property

Returns the array of chat completion choices.

content: str property

Returns the content for the 0th choice delta.

delta: Optional[ChoiceDelta] property

Returns the delta for the 0th choice.

tool_calls: Optional[list[ChoiceDeltaToolCall]] property

Returns the partial tool calls for the 0th choice message.

The first list[ChoiceDeltaToolCall] will contain the name of the tool and index, and subsequent list[ChoiceDeltaToolCall]s will contain the arguments which will be strings that need to be concatenated with future list[ChoiceDeltaToolCall]s to form a complete JSON tool calls. The last list[ChoiceDeltaToolCall] will be None indicating end of stream.

OpenAIEmbedder

Bases: BaseEmbedder[OpenAIEmbeddingResponse]

OpenAI Embedder

Example:

import os
from mirascope.openai import OpenAIEmbedder

os.environ["OPENAI_API_KEY"] = "YOUR_OPENAI_API_KEY"

openai_embedder = OpenAIEmbedder()
response = openai_embedder.embed(["your text to embed"])
print(response)
Source code in mirascope/openai/embedders.py
class OpenAIEmbedder(BaseEmbedder[OpenAIEmbeddingResponse]):
    """OpenAI Embedder

    Example:

    ```python
    import os
    from mirascope.openai import OpenAIEmbedder

    os.environ["OPENAI_API_KEY"] = "YOUR_OPENAI_API_KEY"

    openai_embedder = OpenAIEmbedder()
    response = openai_embedder.embed(["your text to embed"])
    print(response)
    ```
    """

    dimensions: Optional[int] = 1536
    embed_batch_size: Optional[int] = 20
    max_workers: Optional[int] = 64
    embedding_params: ClassVar[OpenAIEmbeddingParams] = OpenAIEmbeddingParams(
        model="text-embedding-3-small"
    )
    _provider: ClassVar[str] = "openai"

    def embed(self, inputs: list[str]) -> OpenAIEmbeddingResponse:
        """Call the embedder with multiple inputs"""
        if self.embed_batch_size is None:
            return self._embed(inputs)

        input_batches = [
            inputs[i : i + self.embed_batch_size]
            for i in range(0, len(inputs), self.embed_batch_size)
        ]

        embedding_responses: list[OpenAIEmbeddingResponse] = [
            response
            for response in ThreadPoolExecutor(self.max_workers).map(
                lambda inputs: self._embed(inputs),
                input_batches,
            )
        ]
        return self._merge_batch_embeddings(embedding_responses)

    async def embed_async(self, inputs: list[str]) -> OpenAIEmbeddingResponse:
        """Asynchronously call the embedder with multiple inputs"""
        if self.embed_batch_size is None:
            return await self._embed_async(inputs)

        input_batches = [
            inputs[i : i + self.embed_batch_size]
            for i in range(0, len(inputs), self.embed_batch_size)
        ]
        embedding_responses: list[OpenAIEmbeddingResponse] = await asyncio.gather(
            *[self._embed_async(inputs) for inputs in input_batches]
        )
        return self._merge_batch_embeddings(embedding_responses)

    def __call__(self, input: list[str]) -> list[list[float]]:
        """Call the embedder with a input

        Chroma expects parameter to be `input`.
        """
        embedding_response = self.embed(input)

        return embedding_response.embeddings

    ############################## PRIVATE METHODS ###################################

    def _embed(self, inputs: list[str]) -> OpenAIEmbeddingResponse:
        """Call the embedder with a single input"""
        client = get_wrapped_client(
            OpenAI(api_key=self.api_key, base_url=self.base_url), self
        )
        kwargs = self.embedding_params.kwargs()
        if self.embedding_params.model != "text-embedding-ada-002":
            kwargs["dimensions"] = self.dimensions
        start_time = datetime.datetime.now().timestamp() * 1000
        embeddings = client.embeddings.create(input=inputs, **kwargs)
        return OpenAIEmbeddingResponse(
            response=embeddings,
            start_time=start_time,
            end_time=datetime.datetime.now().timestamp() * 1000,
        )

    async def _embed_async(self, inputs: list[str]) -> OpenAIEmbeddingResponse:
        """Asynchronously call the embedder with a single input"""
        client = get_wrapped_async_client(
            AsyncOpenAI(api_key=self.api_key, base_url=self.base_url), self
        )
        kwargs = self.embedding_params.kwargs()
        if self.embedding_params.model != "text-embedding-ada-002":
            kwargs["dimensions"] = self.dimensions
        start_time = datetime.datetime.now().timestamp() * 1000
        embeddings = await client.embeddings.create(input=inputs, **kwargs)
        return OpenAIEmbeddingResponse(
            response=embeddings,
            start_time=start_time,
            end_time=datetime.datetime.now().timestamp() * 1000,
        )

    def _merge_batch_embeddings(
        self, openai_embeddings: list[OpenAIEmbeddingResponse]
    ) -> OpenAIEmbeddingResponse:
        """Merge a batch of embeddings into a single embedding"""
        embeddings: list[Embedding] = []
        usage = Usage(
            prompt_tokens=0,
            total_tokens=0,
        )
        start_time = float("inf")
        end_time: float = 0.0
        i: int = 0
        for openai_embedding in openai_embeddings:
            for embedding in openai_embedding.response.data:
                embedding.index = i
                embeddings.append(embedding)
                i += 1
            usage.prompt_tokens += openai_embedding.response.usage.prompt_tokens
            usage.total_tokens += openai_embedding.response.usage.total_tokens
            start_time = min(start_time, openai_embedding.start_time)
            end_time = max(end_time, openai_embedding.end_time)
        create_embedding_response = CreateEmbeddingResponse(
            data=embeddings,
            model=openai_embeddings[0].response.model,
            object=openai_embeddings[0].response.object,
            usage=usage,
        )
        return OpenAIEmbeddingResponse(
            response=create_embedding_response,
            start_time=start_time,
            end_time=end_time,
        )

__call__(input)

Call the embedder with a input

Chroma expects parameter to be input.

Source code in mirascope/openai/embedders.py
def __call__(self, input: list[str]) -> list[list[float]]:
    """Call the embedder with a input

    Chroma expects parameter to be `input`.
    """
    embedding_response = self.embed(input)

    return embedding_response.embeddings

embed(inputs)

Call the embedder with multiple inputs

Source code in mirascope/openai/embedders.py
def embed(self, inputs: list[str]) -> OpenAIEmbeddingResponse:
    """Call the embedder with multiple inputs"""
    if self.embed_batch_size is None:
        return self._embed(inputs)

    input_batches = [
        inputs[i : i + self.embed_batch_size]
        for i in range(0, len(inputs), self.embed_batch_size)
    ]

    embedding_responses: list[OpenAIEmbeddingResponse] = [
        response
        for response in ThreadPoolExecutor(self.max_workers).map(
            lambda inputs: self._embed(inputs),
            input_batches,
        )
    ]
    return self._merge_batch_embeddings(embedding_responses)

embed_async(inputs) async

Asynchronously call the embedder with multiple inputs

Source code in mirascope/openai/embedders.py
async def embed_async(self, inputs: list[str]) -> OpenAIEmbeddingResponse:
    """Asynchronously call the embedder with multiple inputs"""
    if self.embed_batch_size is None:
        return await self._embed_async(inputs)

    input_batches = [
        inputs[i : i + self.embed_batch_size]
        for i in range(0, len(inputs), self.embed_batch_size)
    ]
    embedding_responses: list[OpenAIEmbeddingResponse] = await asyncio.gather(
        *[self._embed_async(inputs) for inputs in input_batches]
    )
    return self._merge_batch_embeddings(embedding_responses)

OpenAIEmbeddingResponse

Bases: BaseEmbeddingResponse[CreateEmbeddingResponse]

A convenience wrapper around the OpenAI CreateEmbeddingResponse response.

Source code in mirascope/openai/types.py
class OpenAIEmbeddingResponse(BaseEmbeddingResponse[CreateEmbeddingResponse]):
    """A convenience wrapper around the OpenAI `CreateEmbeddingResponse` response."""

    @property
    def embeddings(self) -> list[list[float]]:
        """Returns the raw embeddings."""
        embeddings_model: list[Embedding] = [
            embedding for embedding in self.response.data
        ]
        return [embedding.embedding for embedding in embeddings_model]

embeddings: list[list[float]] property

Returns the raw embeddings.

OpenAIExtractor

Bases: BaseExtractor[OpenAICall, OpenAITool, OpenAIToolStream, T], Generic[T]

A class for extracting structured information using OpenAI chat models.

Example:

from typing import Literal, Type

from mirascope.openai import OpenAIExtractor
from pydantic import BaseModel


class TaskDetails(BaseModel):
    title: str
    priority: Literal["low", "normal", "high"]
    due_date: str


class TaskExtractor(OpenAIExtractor[TaskDetails]):
    extract_schema: Type[TaskDetails] = TaskDetails

    prompt_template = """
    Please extract the task details:
    {task}
    """

    task: str


task_description = "Submit quarterly report by next Friday. Task is high priority."
task = TaskExtractor(task=task_description).extract(retries=3)
assert isinstance(task, TaskDetails)
print(task)
#> title='Submit quarterly report' priority='high' due_date='next Friday'
Source code in mirascope/openai/extractors.py
class OpenAIExtractor(
    BaseExtractor[OpenAICall, OpenAITool, OpenAIToolStream, T], Generic[T]
):
    '''A class for extracting structured information using OpenAI chat models.

    Example:

    ```python
    from typing import Literal, Type

    from mirascope.openai import OpenAIExtractor
    from pydantic import BaseModel


    class TaskDetails(BaseModel):
        title: str
        priority: Literal["low", "normal", "high"]
        due_date: str


    class TaskExtractor(OpenAIExtractor[TaskDetails]):
        extract_schema: Type[TaskDetails] = TaskDetails

        prompt_template = """
        Please extract the task details:
        {task}
        """

        task: str


    task_description = "Submit quarterly report by next Friday. Task is high priority."
    task = TaskExtractor(task=task_description).extract(retries=3)
    assert isinstance(task, TaskDetails)
    print(task)
    #> title='Submit quarterly report' priority='high' due_date='next Friday'
    ```
    '''

    call_params: ClassVar[OpenAICallParams] = OpenAICallParams()
    _provider: ClassVar[str] = "openai"

    def extract(self, retries: Union[int, Retrying] = 0, **kwargs: Any) -> T:
        """Extracts `extract_schema` from the OpenAI call response.

        The `extract_schema` is converted into an `OpenAITool`, complete with a
        description of the tool, all of the fields, and their types. This allows us to
        take advantage of OpenAI's tool/function calling functionality to extract
        information from a prompt according to the context provided by the `BaseModel`
        schema.

        Args:
            retries: The maximum number of times to retry the query on validation error.
            **kwargs: Additional keyword arguments parameters to pass to the call. These
                will override any existing arguments in `call_params`.

        Returns:
            The `extract_schema` instance extracted from the completion.

        Raises:
            AttributeError: if there is no tool in the call creation.
            ValidationError: if the schema cannot be instantiated from the completion.
            OpenAIError: raises any OpenAI errors, see:
                https://platform.openai.com/docs/guides/error-codes/api-errors
        """
        return self._extract(OpenAICall, OpenAITool, retries, **kwargs)

    async def extract_async(
        self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
    ) -> T:
        """Asynchronously extracts `extract_schema` from the OpenAI call response.

        The `extract_schema` is converted into an `OpenAITool`, complete with a
        description of the tool, all of the fields, and their types. This allows us to
        take advantage of OpenAI's tool/function calling functionality to extract
        information from a prompt according to the context provided by the `BaseModel`
        schema.

        Args:
            retries: The maximum number of times to retry the query on validation error.
            **kwargs: Additional keyword arguments parameters to pass to the call. These
                will override any existing arguments in `call_params`.

        Returns:
            The `extract_schema` instance extracted from the completion.

        Raises:
            AttributeError: if there is no tool in the call creation.
            ValidationError: if the schema cannot be instantiated from the completion.
            OpenAIError: raises any OpenAI errors, see:
                https://platform.openai.com/docs/guides/error-codes/api-errors
        """
        return await self._extract_async(OpenAICall, OpenAITool, retries, **kwargs)

    def stream(
        self, retries: Union[int, Retrying] = 0, **kwargs: Any
    ) -> Generator[T, None, None]:
        """Streams partial instances of `extract_schema` as the schema is streamed.

        The `extract_schema` is converted into a `partial(OpenAITool)`, which allows for
        any field (i.e.function argument) in the tool to be `None`. This allows us to
        stream partial results as we construct the tool from the streamed chunks.

        Args:
            retries: The maximum number of times to retry the query on validation error.
            **kwargs: Additional keyword argument parameters to pass to the call. These
                will override any existing arguments in `call_params`.

        Yields:
            The partial `extract_schema` instance from the current buffer.

        Raises:
            AttributeError: if there is no tool in the call creation.
            ValidationError: if the schema cannot be instantiated from the completion.
            OpenAIError: raises any OpenAI errors, see:
                https://platform.openai.com/docs/guides/error-codes/api-errors
        """
        yield from self._stream(
            OpenAICall, OpenAITool, OpenAIToolStream, retries, **kwargs
        )

    async def stream_async(
        self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
    ) -> AsyncGenerator[T, None]:
        """Asynchronously streams partial instances of `extract_schema` as streamed.

        The `extract_schema` is converted into a `partial(OpenAITool)`, which allows for
        any field (i.e.function argument) in the tool to be `None`. This allows us to
        stream partial results as we construct the tool from the streamed chunks.

        Args:
            retries: The maximum number of times to retry the query on validation error.
            **kwargs: Additional keyword arguments parameters to pass to the call. These
                will override any existing arguments in `call_params`.

        Yields:
            The partial `extract_schema` instance from the current buffer.

        Raises:
            AttributeError: if there is no tool in the call creation.
            ValidationError: if the schema cannot be instantiated from the completion.
            OpenAIError: raises any OpenAI errors, see:
                https://platform.openai.com/docs/guides/error-codes/api-errors
        """
        async for partial_tool in self._stream_async(
            OpenAICall, OpenAITool, OpenAIToolStream, retries, **kwargs
        ):
            yield partial_tool

extract(retries=0, **kwargs)

Extracts extract_schema from the OpenAI call response.

The extract_schema is converted into an OpenAITool, complete with a description of the tool, all of the fields, and their types. This allows us to take advantage of OpenAI's tool/function calling functionality to extract information from a prompt according to the context provided by the BaseModel schema.

Parameters:

Name Type Description Default
retries Union[int, Retrying]

The maximum number of times to retry the query on validation error.

0
**kwargs Any

Additional keyword arguments parameters to pass to the call. These will override any existing arguments in call_params.

{}

Returns:

Type Description
T

The extract_schema instance extracted from the completion.

Raises:

Type Description
AttributeError

if there is no tool in the call creation.

ValidationError

if the schema cannot be instantiated from the completion.

OpenAIError

raises any OpenAI errors, see: https://platform.openai.com/docs/guides/error-codes/api-errors

Source code in mirascope/openai/extractors.py
def extract(self, retries: Union[int, Retrying] = 0, **kwargs: Any) -> T:
    """Extracts `extract_schema` from the OpenAI call response.

    The `extract_schema` is converted into an `OpenAITool`, complete with a
    description of the tool, all of the fields, and their types. This allows us to
    take advantage of OpenAI's tool/function calling functionality to extract
    information from a prompt according to the context provided by the `BaseModel`
    schema.

    Args:
        retries: The maximum number of times to retry the query on validation error.
        **kwargs: Additional keyword arguments parameters to pass to the call. These
            will override any existing arguments in `call_params`.

    Returns:
        The `extract_schema` instance extracted from the completion.

    Raises:
        AttributeError: if there is no tool in the call creation.
        ValidationError: if the schema cannot be instantiated from the completion.
        OpenAIError: raises any OpenAI errors, see:
            https://platform.openai.com/docs/guides/error-codes/api-errors
    """
    return self._extract(OpenAICall, OpenAITool, retries, **kwargs)

extract_async(retries=0, **kwargs) async

Asynchronously extracts extract_schema from the OpenAI call response.

The extract_schema is converted into an OpenAITool, complete with a description of the tool, all of the fields, and their types. This allows us to take advantage of OpenAI's tool/function calling functionality to extract information from a prompt according to the context provided by the BaseModel schema.

Parameters:

Name Type Description Default
retries Union[int, AsyncRetrying]

The maximum number of times to retry the query on validation error.

0
**kwargs Any

Additional keyword arguments parameters to pass to the call. These will override any existing arguments in call_params.

{}

Returns:

Type Description
T

The extract_schema instance extracted from the completion.

Raises:

Type Description
AttributeError

if there is no tool in the call creation.

ValidationError

if the schema cannot be instantiated from the completion.

OpenAIError

raises any OpenAI errors, see: https://platform.openai.com/docs/guides/error-codes/api-errors

Source code in mirascope/openai/extractors.py
async def extract_async(
    self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
) -> T:
    """Asynchronously extracts `extract_schema` from the OpenAI call response.

    The `extract_schema` is converted into an `OpenAITool`, complete with a
    description of the tool, all of the fields, and their types. This allows us to
    take advantage of OpenAI's tool/function calling functionality to extract
    information from a prompt according to the context provided by the `BaseModel`
    schema.

    Args:
        retries: The maximum number of times to retry the query on validation error.
        **kwargs: Additional keyword arguments parameters to pass to the call. These
            will override any existing arguments in `call_params`.

    Returns:
        The `extract_schema` instance extracted from the completion.

    Raises:
        AttributeError: if there is no tool in the call creation.
        ValidationError: if the schema cannot be instantiated from the completion.
        OpenAIError: raises any OpenAI errors, see:
            https://platform.openai.com/docs/guides/error-codes/api-errors
    """
    return await self._extract_async(OpenAICall, OpenAITool, retries, **kwargs)

stream(retries=0, **kwargs)

Streams partial instances of extract_schema as the schema is streamed.

The extract_schema is converted into a partial(OpenAITool), which allows for any field (i.e.function argument) in the tool to be None. This allows us to stream partial results as we construct the tool from the streamed chunks.

Parameters:

Name Type Description Default
retries Union[int, Retrying]

The maximum number of times to retry the query on validation error.

0
**kwargs Any

Additional keyword argument parameters to pass to the call. These will override any existing arguments in call_params.

{}

Yields:

Type Description
T

The partial extract_schema instance from the current buffer.

Raises:

Type Description
AttributeError

if there is no tool in the call creation.

ValidationError

if the schema cannot be instantiated from the completion.

OpenAIError

raises any OpenAI errors, see: https://platform.openai.com/docs/guides/error-codes/api-errors

Source code in mirascope/openai/extractors.py
def stream(
    self, retries: Union[int, Retrying] = 0, **kwargs: Any
) -> Generator[T, None, None]:
    """Streams partial instances of `extract_schema` as the schema is streamed.

    The `extract_schema` is converted into a `partial(OpenAITool)`, which allows for
    any field (i.e.function argument) in the tool to be `None`. This allows us to
    stream partial results as we construct the tool from the streamed chunks.

    Args:
        retries: The maximum number of times to retry the query on validation error.
        **kwargs: Additional keyword argument parameters to pass to the call. These
            will override any existing arguments in `call_params`.

    Yields:
        The partial `extract_schema` instance from the current buffer.

    Raises:
        AttributeError: if there is no tool in the call creation.
        ValidationError: if the schema cannot be instantiated from the completion.
        OpenAIError: raises any OpenAI errors, see:
            https://platform.openai.com/docs/guides/error-codes/api-errors
    """
    yield from self._stream(
        OpenAICall, OpenAITool, OpenAIToolStream, retries, **kwargs
    )

stream_async(retries=0, **kwargs) async

Asynchronously streams partial instances of extract_schema as streamed.

The extract_schema is converted into a partial(OpenAITool), which allows for any field (i.e.function argument) in the tool to be None. This allows us to stream partial results as we construct the tool from the streamed chunks.

Parameters:

Name Type Description Default
retries Union[int, AsyncRetrying]

The maximum number of times to retry the query on validation error.

0
**kwargs Any

Additional keyword arguments parameters to pass to the call. These will override any existing arguments in call_params.

{}

Yields:

Type Description
AsyncGenerator[T, None]

The partial extract_schema instance from the current buffer.

Raises:

Type Description
AttributeError

if there is no tool in the call creation.

ValidationError

if the schema cannot be instantiated from the completion.

OpenAIError

raises any OpenAI errors, see: https://platform.openai.com/docs/guides/error-codes/api-errors

Source code in mirascope/openai/extractors.py
async def stream_async(
    self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
) -> AsyncGenerator[T, None]:
    """Asynchronously streams partial instances of `extract_schema` as streamed.

    The `extract_schema` is converted into a `partial(OpenAITool)`, which allows for
    any field (i.e.function argument) in the tool to be `None`. This allows us to
    stream partial results as we construct the tool from the streamed chunks.

    Args:
        retries: The maximum number of times to retry the query on validation error.
        **kwargs: Additional keyword arguments parameters to pass to the call. These
            will override any existing arguments in `call_params`.

    Yields:
        The partial `extract_schema` instance from the current buffer.

    Raises:
        AttributeError: if there is no tool in the call creation.
        ValidationError: if the schema cannot be instantiated from the completion.
        OpenAIError: raises any OpenAI errors, see:
            https://platform.openai.com/docs/guides/error-codes/api-errors
    """
    async for partial_tool in self._stream_async(
        OpenAICall, OpenAITool, OpenAIToolStream, retries, **kwargs
    ):
        yield partial_tool

OpenAITool

Bases: BaseTool[ChatCompletionMessageToolCall]

A base class for easy use of tools with the OpenAI Chat client.

OpenAITool internally handles the logic that allows you to use tools with simple calls such as OpenAICallResponse.tool or OpenAITool.fn, as seen in the examples below.

Example:

from mirascope.openai import OpenAICall, OpenAICallParams


def animal_matcher(fav_food: str, fav_color: str) -> str:
    """Tells you your most likely favorite animal from personality traits.

    Args:
        fav_food: your favorite food.
        fav_color: your favorite color.

    Returns:
        The animal most likely to be your favorite based on traits.
    """
    return "Your favorite animal is the best one, a frog."


class AnimalMatcher(OpenAICall):
    prompt_template = """
    Tell me my favorite animal if my favorite food is {food} and my
    favorite color is {color}.
    """

    food: str
    color: str

    call_params = OpenAICallParams(tools=[animal_matcher])


response = AnimalMatcher(food="pizza", color="red").call
tool = response.tool
print(tool.fn(**tool.args))
#> Your favorite animal is the best one, a frog.
Source code in mirascope/openai/tools.py
class OpenAITool(BaseTool[ChatCompletionMessageToolCall]):
    '''A base class for easy use of tools with the OpenAI Chat client.

    `OpenAITool` internally handles the logic that allows you to use tools with simple
    calls such as `OpenAICallResponse.tool` or `OpenAITool.fn`, as seen in the
    examples below.

    Example:

    ```python
    from mirascope.openai import OpenAICall, OpenAICallParams


    def animal_matcher(fav_food: str, fav_color: str) -> str:
        """Tells you your most likely favorite animal from personality traits.

        Args:
            fav_food: your favorite food.
            fav_color: your favorite color.

        Returns:
            The animal most likely to be your favorite based on traits.
        """
        return "Your favorite animal is the best one, a frog."


    class AnimalMatcher(OpenAICall):
        prompt_template = """
        Tell me my favorite animal if my favorite food is {food} and my
        favorite color is {color}.
        """

        food: str
        color: str

        call_params = OpenAICallParams(tools=[animal_matcher])


    response = AnimalMatcher(food="pizza", color="red").call
    tool = response.tool
    print(tool.fn(**tool.args))
    #> Your favorite animal is the best one, a frog.
    ```
    '''

    @classmethod
    def tool_schema(cls) -> ChatCompletionToolParam:
        """Constructs a tool schema for use with the OpenAI Chat client.

        A Mirascope `OpenAITool` is deconstructed into a JSON schema, and relevant keys
        are renamed to match the OpenAI `ChatCompletionToolParam` schema used to make
        function/tool calls in OpenAI API.

        Returns:
            The constructed `ChatCompletionToolParam` schema.
        """
        fn = super().tool_schema()
        return cast(ChatCompletionToolParam, {"type": "function", "function": fn})

    @classmethod
    def from_tool_call(
        cls,
        tool_call: ChatCompletionMessageToolCall,
        allow_partial: bool = False,
    ) -> OpenAITool:
        """Extracts an instance of the tool constructed from a tool call response.

        Given `ChatCompletionMessageToolCall` from an OpenAI chat completion response,
        takes its function arguments and creates an `OpenAITool` instance from it.

        Args:
            tool_call: The `ChatCompletionMessageToolCall` to extract the tool from.

        Returns:
            An instance of the tool constructed from the tool call.

        Raises:
            ValidationError: if the tool call doesn't match the tool schema.
        """
        if allow_partial:
            model_json = from_json(tool_call.function.arguments, allow_partial=True)
        else:
            try:
                model_json = json.loads(tool_call.function.arguments)
            except json.JSONDecodeError as e:
                raise ValueError() from e

        model_json["tool_call"] = tool_call.model_dump()
        return cls.model_validate(model_json)

    @classmethod
    def from_model(cls, model: Type[BaseModel]) -> Type[OpenAITool]:
        """Constructs a `OpenAITool` type from a `BaseModel` type."""
        return convert_base_model_to_tool(model, OpenAITool)

    @classmethod
    def from_fn(cls, fn: Callable) -> Type[OpenAITool]:
        """Constructs a `OpenAITool` type from a function."""
        return convert_function_to_tool(fn, OpenAITool)

    @classmethod
    def from_base_type(cls, base_type: Type[BaseType]) -> Type[OpenAITool]:
        """Constructs a `OpenAITool` type from a `BaseType` type."""
        return convert_base_type_to_tool(base_type, OpenAITool)

from_base_type(base_type) classmethod

Constructs a OpenAITool type from a BaseType type.

Source code in mirascope/openai/tools.py
@classmethod
def from_base_type(cls, base_type: Type[BaseType]) -> Type[OpenAITool]:
    """Constructs a `OpenAITool` type from a `BaseType` type."""
    return convert_base_type_to_tool(base_type, OpenAITool)

from_fn(fn) classmethod

Constructs a OpenAITool type from a function.

Source code in mirascope/openai/tools.py
@classmethod
def from_fn(cls, fn: Callable) -> Type[OpenAITool]:
    """Constructs a `OpenAITool` type from a function."""
    return convert_function_to_tool(fn, OpenAITool)

from_model(model) classmethod

Constructs a OpenAITool type from a BaseModel type.

Source code in mirascope/openai/tools.py
@classmethod
def from_model(cls, model: Type[BaseModel]) -> Type[OpenAITool]:
    """Constructs a `OpenAITool` type from a `BaseModel` type."""
    return convert_base_model_to_tool(model, OpenAITool)

from_tool_call(tool_call, allow_partial=False) classmethod

Extracts an instance of the tool constructed from a tool call response.

Given ChatCompletionMessageToolCall from an OpenAI chat completion response, takes its function arguments and creates an OpenAITool instance from it.

Parameters:

Name Type Description Default
tool_call ChatCompletionMessageToolCall

The ChatCompletionMessageToolCall to extract the tool from.

required

Returns:

Type Description
OpenAITool

An instance of the tool constructed from the tool call.

Raises:

Type Description
ValidationError

if the tool call doesn't match the tool schema.

Source code in mirascope/openai/tools.py
@classmethod
def from_tool_call(
    cls,
    tool_call: ChatCompletionMessageToolCall,
    allow_partial: bool = False,
) -> OpenAITool:
    """Extracts an instance of the tool constructed from a tool call response.

    Given `ChatCompletionMessageToolCall` from an OpenAI chat completion response,
    takes its function arguments and creates an `OpenAITool` instance from it.

    Args:
        tool_call: The `ChatCompletionMessageToolCall` to extract the tool from.

    Returns:
        An instance of the tool constructed from the tool call.

    Raises:
        ValidationError: if the tool call doesn't match the tool schema.
    """
    if allow_partial:
        model_json = from_json(tool_call.function.arguments, allow_partial=True)
    else:
        try:
            model_json = json.loads(tool_call.function.arguments)
        except json.JSONDecodeError as e:
            raise ValueError() from e

    model_json["tool_call"] = tool_call.model_dump()
    return cls.model_validate(model_json)

tool_schema() classmethod

Constructs a tool schema for use with the OpenAI Chat client.

A Mirascope OpenAITool is deconstructed into a JSON schema, and relevant keys are renamed to match the OpenAI ChatCompletionToolParam schema used to make function/tool calls in OpenAI API.

Returns:

Type Description
ChatCompletionToolParam

The constructed ChatCompletionToolParam schema.

Source code in mirascope/openai/tools.py
@classmethod
def tool_schema(cls) -> ChatCompletionToolParam:
    """Constructs a tool schema for use with the OpenAI Chat client.

    A Mirascope `OpenAITool` is deconstructed into a JSON schema, and relevant keys
    are renamed to match the OpenAI `ChatCompletionToolParam` schema used to make
    function/tool calls in OpenAI API.

    Returns:
        The constructed `ChatCompletionToolParam` schema.
    """
    fn = super().tool_schema()
    return cast(ChatCompletionToolParam, {"type": "function", "function": fn})

OpenAIToolStream

Bases: BaseToolStream[OpenAICallResponseChunk, OpenAITool]

A base class for streaming tools from response chunks.

Source code in mirascope/openai/tool_streams.py
class OpenAIToolStream(BaseToolStream[OpenAICallResponseChunk, OpenAITool]):
    """A base class for streaming tools from response chunks."""

    @classmethod
    @overload
    def from_stream(
        cls,
        stream: Generator[OpenAICallResponseChunk, None, None],
        allow_partial: Literal[True],
    ) -> Generator[Optional[OpenAITool], None, None]:
        yield ...  # type: ignore  # pragma: no cover

    @classmethod
    @overload
    def from_stream(
        cls,
        stream: Generator[OpenAICallResponseChunk, None, None],
        allow_partial: Literal[False],
    ) -> Generator[OpenAITool, None, None]:
        yield ...  # type: ignore  # pragma: no cover

    @classmethod
    @overload
    def from_stream(
        cls,
        stream: Generator[OpenAICallResponseChunk, None, None],
        allow_partial: bool = False,
    ) -> Generator[Optional[OpenAITool], None, None]:
        yield ...  # type: ignore  # pragma: no cover

    @classmethod
    def from_stream(cls, stream, allow_partial=False):
        """Yields partial tools from the given stream of chunks.

        Args:
            stream: The generator of chunks from which to stream tools.
            allow_partial: Whether to allow partial tools.

        Raises:
            RuntimeError: if a tool in the stream is of an unknown type.
        """
        cls._check_version_for_partial(allow_partial)
        current_tool_call = ChatCompletionMessageToolCall(
            id="", function=Function(arguments="", name=""), type="function"
        )
        current_tool_type = None
        for chunk in stream:
            tool, current_tool_call, current_tool_type, starting_new = _handle_chunk(
                chunk, current_tool_call, current_tool_type, allow_partial
            )
            if tool is not None:
                yield tool
            if starting_new:
                yield None
        if current_tool_type:
            yield current_tool_type.from_tool_call(current_tool_call)

    @classmethod
    @overload
    async def from_async_stream(
        cls,
        stream: AsyncGenerator[OpenAICallResponseChunk, None],
        allow_partial: Literal[True],
    ) -> AsyncGenerator[Optional[OpenAITool], None]:
        yield ...  # type: ignore  # pragma: no cover

    @classmethod
    @overload
    async def from_async_stream(
        cls,
        stream: AsyncGenerator[OpenAICallResponseChunk, None],
        allow_partial: Literal[False],
    ) -> AsyncGenerator[OpenAITool, None]:
        yield ...  # type: ignore  # pragma: no cover

    @classmethod
    @overload
    async def from_async_stream(
        cls,
        stream: AsyncGenerator[OpenAICallResponseChunk, None],
        allow_partial: bool = False,
    ) -> AsyncGenerator[Optional[OpenAITool], None]:
        yield ...  # type: ignore  # pragma: no cover

    @classmethod
    async def from_async_stream(cls, async_stream, allow_partial=False):
        """Yields partial tools from the given stream of chunks asynchronously.

        Args:
            stream: The async generator of chunks from which to stream tools.
            allow_partial: Whether to allow partial tools.

        Raises:
            RuntimeError: if a tool in the stream is of an unknown type.
        """
        cls._check_version_for_partial(allow_partial)
        current_tool_call = ChatCompletionMessageToolCall(
            id="", function=Function(arguments="", name=""), type="function"
        )
        current_tool_type = None
        async for chunk in async_stream:
            tool, current_tool_call, current_tool_type, starting_new = _handle_chunk(
                chunk, current_tool_call, current_tool_type, allow_partial
            )
            if tool is not None:
                yield tool
            if starting_new:
                yield None
        if current_tool_type:
            yield current_tool_type.from_tool_call(current_tool_call)

from_async_stream(async_stream, allow_partial=False) async classmethod

Yields partial tools from the given stream of chunks asynchronously.

Parameters:

Name Type Description Default
stream

The async generator of chunks from which to stream tools.

required
allow_partial

Whether to allow partial tools.

False

Raises:

Type Description
RuntimeError

if a tool in the stream is of an unknown type.

Source code in mirascope/openai/tool_streams.py
@classmethod
async def from_async_stream(cls, async_stream, allow_partial=False):
    """Yields partial tools from the given stream of chunks asynchronously.

    Args:
        stream: The async generator of chunks from which to stream tools.
        allow_partial: Whether to allow partial tools.

    Raises:
        RuntimeError: if a tool in the stream is of an unknown type.
    """
    cls._check_version_for_partial(allow_partial)
    current_tool_call = ChatCompletionMessageToolCall(
        id="", function=Function(arguments="", name=""), type="function"
    )
    current_tool_type = None
    async for chunk in async_stream:
        tool, current_tool_call, current_tool_type, starting_new = _handle_chunk(
            chunk, current_tool_call, current_tool_type, allow_partial
        )
        if tool is not None:
            yield tool
        if starting_new:
            yield None
    if current_tool_type:
        yield current_tool_type.from_tool_call(current_tool_call)

from_stream(stream, allow_partial=False) classmethod

Yields partial tools from the given stream of chunks.

Parameters:

Name Type Description Default
stream

The generator of chunks from which to stream tools.

required
allow_partial

Whether to allow partial tools.

False

Raises:

Type Description
RuntimeError

if a tool in the stream is of an unknown type.

Source code in mirascope/openai/tool_streams.py
@classmethod
def from_stream(cls, stream, allow_partial=False):
    """Yields partial tools from the given stream of chunks.

    Args:
        stream: The generator of chunks from which to stream tools.
        allow_partial: Whether to allow partial tools.

    Raises:
        RuntimeError: if a tool in the stream is of an unknown type.
    """
    cls._check_version_for_partial(allow_partial)
    current_tool_call = ChatCompletionMessageToolCall(
        id="", function=Function(arguments="", name=""), type="function"
    )
    current_tool_type = None
    for chunk in stream:
        tool, current_tool_call, current_tool_type, starting_new = _handle_chunk(
            chunk, current_tool_call, current_tool_type, allow_partial
        )
        if tool is not None:
            yield tool
        if starting_new:
            yield None
    if current_tool_type:
        yield current_tool_type.from_tool_call(current_tool_call)

azure_client_wrapper(azure_endpoint, azure_deployment=None, api_version=None, api_key=None, azure_ad_token=None, azure_ad_token_provider=None, organization=None)

Returns a client wrapper for using OpenAI models on Microsoft Azure.

Source code in mirascope/openai/utils.py
def azure_client_wrapper(
    azure_endpoint: str,
    azure_deployment: Optional[str] = None,
    api_version: Optional[str] = None,
    api_key: Optional[str] = None,
    azure_ad_token: Optional[str] = None,
    azure_ad_token_provider: Optional[
        Union[AzureADTokenProvider, AsyncAzureADTokenProvider]
    ] = None,
    organization: Optional[str] = None,
) -> Callable[[Union[OpenAI, AsyncOpenAI]], Union[AzureOpenAI, AsyncAzureOpenAI]]:
    """Returns a client wrapper for using OpenAI models on Microsoft Azure."""

    def inner_wrapper(client: Union[OpenAI, AsyncOpenAI]):
        """Returns matching `AzureOpenAI` or `AsyncAzureOpenAI` client."""
        kwargs = {
            "azure_endpoint": azure_endpoint,
            "azure_deployment": azure_deployment,
            "api_version": api_version,
            "api_key": api_key,
            "azure_ad_token": azure_ad_token,
            "azure_ad_token_provider": azure_ad_token_provider,
            "organization": organization,
        }
        if isinstance(client, OpenAI):
            client = AzureOpenAI(**kwargs)  # type: ignore
        elif isinstance(client, AsyncOpenAI):
            client = AsyncAzureOpenAI(**kwargs)  # type: ignore
        return client

    return inner_wrapper

openai_api_calculate_cost(usage, model='gpt-3.5-turbo-16k')

Calculate the cost of a completion using the OpenAI API.

https://openai.com/pricing

Model Input Output gpt-4o $5.00 / 1M tokens $15.00 / 1M tokens gpt-4o-2024-05-13 $5.00 / 1M tokens $15.00 / 1M tokens gpt-4-turbo $10.00 / 1M tokens $30.00 / 1M tokens gpt-4-turbo-2024-04-09 $10.00 / 1M tokens $30.00 / 1M tokens gpt-3.5-turbo-0125 $0.50 / 1M tokens $1.50 / 1M tokens gpt-3.5-turbo-1106 $1.00 / 1M tokens $2.00 / 1M tokens gpt-4-1106-preview $10.00 / 1M tokens $30.00 / 1M tokens gpt-4 $30.00 / 1M tokens $60.00 / 1M tokens text-embedding-3-small $0.02 / 1M tokens text-embedding-3-large $0.13 / 1M tokens text-embedding-ada-0002 $0.10 / 1M tokens

Source code in mirascope/openai/utils.py
def openai_api_calculate_cost(
    usage: Optional[CompletionUsage], model="gpt-3.5-turbo-16k"
) -> Optional[float]:
    """Calculate the cost of a completion using the OpenAI API.

    https://openai.com/pricing

    Model                   Input               Output
    gpt-4o                  $5.00 / 1M tokens   $15.00 / 1M tokens
    gpt-4o-2024-05-13       $5.00 / 1M tokens   $15.00 / 1M tokens
    gpt-4-turbo             $10.00 / 1M tokens  $30.00 / 1M tokens
    gpt-4-turbo-2024-04-09  $10.00 / 1M tokens  $30.00 / 1M tokens
    gpt-3.5-turbo-0125	    $0.50 / 1M tokens	$1.50 / 1M tokens
    gpt-3.5-turbo-1106	    $1.00 / 1M tokens	$2.00 / 1M tokens
    gpt-4-1106-preview	    $10.00 / 1M tokens 	$30.00 / 1M tokens
    gpt-4	                $30.00 / 1M tokens	$60.00 / 1M tokens
    text-embedding-3-small	$0.02 / 1M tokens
    text-embedding-3-large	$0.13 / 1M tokens
    text-embedding-ada-0002	$0.10 / 1M tokens
    """
    pricing = {
        "gpt-4o": {
            "prompt": 0.000_005,
            "completion": 0.000_015,
        },
        "gpt-4o-2024-05-13": {
            "prompt": 0.000_005,
            "completion": 0.000_015,
        },
        "gpt-4-turbo": {
            "prompt": 0.000_01,
            "completion": 0.000_03,
        },
        "gpt-4-turbo-2024-04-09": {
            "prompt": 0.000_01,
            "completion": 0.000_03,
        },
        "gpt-3.5-turbo-0125": {
            "prompt": 0.000_000_5,
            "completion": 0.000_001_5,
        },
        "gpt-3.5-turbo-1106": {
            "prompt": 0.000_001,
            "completion": 0.000_002,
        },
        "gpt-4-1106-preview": {
            "prompt": 0.000_01,
            "completion": 0.000_03,
        },
        "gpt-4": {
            "prompt": 0.000_003,
            "completion": 0.000_006,
        },
        "gpt-3.5-turbo-4k": {
            "prompt": 0.000_015,
            "completion": 0.000_02,
        },
        "gpt-3.5-turbo-16k": {
            "prompt": 0.000_003,
            "completion": 0.000_004,
        },
        "gpt-4-8k": {
            "prompt": 0.000_003,
            "completion": 0.000_006,
        },
        "gpt-4-32k": {
            "prompt": 0.000_006,
            "completion": 0.000_012,
        },
        "text-embedding-3-small": {
            "prompt": 0.000_000_02,
            "completion": 0.000_000_02,
        },
        "text-embedding-ada-002": {
            "prompt": 0.000_000_1,
            "completion": 0.000_000_1,
        },
        "text-embedding-3-large": {
            "prompt": 0.000_000_13,
            "completion": 0.000_000_13,
        },
    }
    if usage is None:
        return None
    try:
        model_pricing = pricing[model]
    except KeyError:
        return None

    prompt_cost = usage.prompt_tokens * model_pricing["prompt"]
    completion_cost = usage.completion_tokens * model_pricing["completion"]
    total_cost = prompt_cost + completion_cost

    return total_cost