Skip to content

mistral

A module for interacting with Mistral models.

MistralAsyncStream

Bases: BaseAsyncStream[MistralCallResponseChunk, UserMessage, AssistantMessage, MistralTool]

A class for streaming responses from Mistral's API.

Source code in mirascope/mistral/types.py
class MistralAsyncStream(
    BaseAsyncStream[
        MistralCallResponseChunk,
        UserMessage,
        AssistantMessage,
        MistralTool,
    ]
):
    """A class for streaming responses from Mistral's API."""

    def __init__(self, stream: AsyncGenerator[MistralCallResponseChunk, None]):
        """Initializes an instance of `MistralAsyncStream`."""
        super().__init__(stream, AssistantMessage)

MistralCall

Bases: BaseCall[MistralCallResponse, MistralCallResponseChunk, MistralTool, UserMessage]

A class for" prompting Mistral's chat API.

Example:

from mirascope.mistral import MistralCall

class BookRecommender(MistralCall):
    prompt_template = "Please recommend a {genre} book"

    genre: str

response = BookRecommender(genre="fantasy").call()
print(response.content)
#> There are many great books to read, it ultimately depends...
Source code in mirascope/mistral/calls.py
class MistralCall(
    BaseCall[MistralCallResponse, MistralCallResponseChunk, MistralTool, UserMessage]
):
    """A class for" prompting Mistral's chat API.

    Example:

    ```python
    from mirascope.mistral import MistralCall

    class BookRecommender(MistralCall):
        prompt_template = "Please recommend a {genre} book"

        genre: str

    response = BookRecommender(genre="fantasy").call()
    print(response.content)
    #> There are many great books to read, it ultimately depends...
    ```
    """

    call_params: ClassVar[MistralCallParams] = MistralCallParams()
    _provider: ClassVar[str] = "mistral"

    def messages(self) -> list[Message]:
        """Returns the template as a formatted list of messages."""
        return self._parse_messages(
            [MessageRole.SYSTEM, MessageRole.USER, MessageRole.ASSISTANT]
        )

    @retry
    def call(
        self, retries: Union[int, Retrying] = 0, **kwargs: Any
    ) -> MistralCallResponse:
        """Makes a call to the model using this `MistralCall` instance.

        Args:
            **kwargs: Additional keyword arguments parameters to pass to the call. These
                will override any existing arguments in `call_params`.

        Returns:
            A `MistralCallResponse` instance.

        Raises:
            MistralException: raises any Mistral errors, see:
                https://github.com/mistralai/client-python/blob/main/src/mistralai/exceptions.py
        """
        kwargs, tool_types = self._setup(kwargs, MistralTool)
        client = get_wrapped_client(
            MistralClient(
                api_key=self.api_key,
                endpoint=self.base_url if self.base_url else ENDPOINT,
            ),
            self,
        )
        chat = get_wrapped_call(
            client.chat,
            self,
            response_type=MistralCallResponse,
            tool_types=tool_types,
        )
        messages = self.messages()
        user_message_param = self._get_possible_user_message(messages)
        start_time = datetime.datetime.now().timestamp() * 1000
        completion = chat(messages=messages, **kwargs)
        return MistralCallResponse(
            response=completion,
            user_message_param=user_message_param,
            tool_types=tool_types,
            start_time=start_time,
            cost=mistral_api_calculate_cost(completion.usage, completion.model),
            end_time=datetime.datetime.now().timestamp() * 1000,
        )

    @retry
    async def call_async(
        self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
    ) -> MistralCallResponse:
        """Makes an asynchronous call to the model using this `MistralCall` instance.

        Args:
            **kwargs: Additional keyword arguments parameters to pass to the call. These
                will override any existing arguments in `call_params`.

        Returns:
            A `MistralCallResponse` instance.

        Raises:
            MistralException: raises any Mistral errors, see:
                https://github.com/mistralai/client-python/blob/main/src/mistralai/exceptions.py
        """
        kwargs, tool_types = self._setup(kwargs, MistralTool)
        client = get_wrapped_async_client(
            MistralAsyncClient(
                api_key=self.api_key,
                endpoint=self.base_url if self.base_url else ENDPOINT,
            ),
            self,
        )
        chat = get_wrapped_call(
            client.chat,
            self,
            is_async=True,
            response_type=MistralCallResponse,
            tool_types=tool_types,
        )
        messages = self.messages()
        user_message_param = self._get_possible_user_message(messages)
        start_time = datetime.datetime.now().timestamp() * 1000
        completion = await chat(messages=messages, **kwargs)
        return MistralCallResponse(
            response=completion,
            user_message_param=user_message_param,
            tool_types=tool_types,
            start_time=start_time,
            end_time=datetime.datetime.now().timestamp() * 1000,
            cost=mistral_api_calculate_cost(completion.usage, completion.model),
        )

    @retry
    def stream(
        self, retries: Union[int, Retrying] = 0, **kwargs: Any
    ) -> Generator[MistralCallResponseChunk, None, None]:
        """Streams the response for a call using this `MistralCall` instance.

        Args:
            **kwargs: Additional keyword arguments parameters to pass to the call. These
                will override any existing arguments in `call_params`.

        Returns:
            A `MistralCallResponseChunk` for each chunk of the response.

        Raises:
            MistralException: raises any Mistral errors, see:
                https://github.com/mistralai/client-python/blob/main/src/mistralai/exceptions.py
        """
        kwargs, tool_types = self._setup(kwargs, MistralTool)
        client = get_wrapped_client(
            MistralClient(
                api_key=self.api_key,
                endpoint=self.base_url if self.base_url else ENDPOINT,
            ),
            self,
        )
        chat_stream = get_wrapped_call(
            client.chat_stream,
            self,
            response_chunk_type=MistralCallResponseChunk,
            tool_types=tool_types,
        )
        messages = self.messages()
        user_message_param = self._get_possible_user_message(messages)
        for chunk in chat_stream(messages=messages, **kwargs):
            yield MistralCallResponseChunk(
                chunk=chunk,
                user_message_param=user_message_param,
                tool_types=tool_types,
            )

    @retry
    async def stream_async(
        self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
    ) -> AsyncGenerator[MistralCallResponseChunk, None]:
        """Streams the response for an asynchronous call using this `MistralCall`.

        Args:
            **kwargs: Additional keyword arguments parameters to pass to the call. These
                will override any existing arguments in `call_params`.

        Returns:
            A `MistralCallResponseChunk` for each chunk of the response.

        Raises:
            MistralException: raises any Mistral errors, see:
                https://github.com/mistralai/client-python/blob/main/src/mistralai/exceptions.py
        """
        kwargs, tool_types = self._setup(kwargs, MistralTool)
        client = get_wrapped_async_client(
            MistralAsyncClient(
                api_key=self.api_key,
                endpoint=self.base_url if self.base_url else ENDPOINT,
            ),
            self,
        )
        chat_stream = get_wrapped_call(
            client.chat_stream,
            self,
            is_async=True,
            response_chunk_type=MistralCallResponseChunk,
            tool_types=tool_types,
        )
        messages = self.messages()
        user_message_param = self._get_possible_user_message(messages)
        async for chunk in chat_stream(messages=messages, **kwargs):
            yield MistralCallResponseChunk(
                chunk=chunk,
                user_message_param=user_message_param,
                tool_types=tool_types,
            )

call(retries=0, **kwargs)

Makes a call to the model using this MistralCall instance.

Parameters:

Name Type Description Default
**kwargs Any

Additional keyword arguments parameters to pass to the call. These will override any existing arguments in call_params.

{}

Returns:

Type Description
MistralCallResponse

A MistralCallResponse instance.

Raises:

Type Description
MistralException

raises any Mistral errors, see: https://github.com/mistralai/client-python/blob/main/src/mistralai/exceptions.py

Source code in mirascope/mistral/calls.py
@retry
def call(
    self, retries: Union[int, Retrying] = 0, **kwargs: Any
) -> MistralCallResponse:
    """Makes a call to the model using this `MistralCall` instance.

    Args:
        **kwargs: Additional keyword arguments parameters to pass to the call. These
            will override any existing arguments in `call_params`.

    Returns:
        A `MistralCallResponse` instance.

    Raises:
        MistralException: raises any Mistral errors, see:
            https://github.com/mistralai/client-python/blob/main/src/mistralai/exceptions.py
    """
    kwargs, tool_types = self._setup(kwargs, MistralTool)
    client = get_wrapped_client(
        MistralClient(
            api_key=self.api_key,
            endpoint=self.base_url if self.base_url else ENDPOINT,
        ),
        self,
    )
    chat = get_wrapped_call(
        client.chat,
        self,
        response_type=MistralCallResponse,
        tool_types=tool_types,
    )
    messages = self.messages()
    user_message_param = self._get_possible_user_message(messages)
    start_time = datetime.datetime.now().timestamp() * 1000
    completion = chat(messages=messages, **kwargs)
    return MistralCallResponse(
        response=completion,
        user_message_param=user_message_param,
        tool_types=tool_types,
        start_time=start_time,
        cost=mistral_api_calculate_cost(completion.usage, completion.model),
        end_time=datetime.datetime.now().timestamp() * 1000,
    )

call_async(retries=0, **kwargs) async

Makes an asynchronous call to the model using this MistralCall instance.

Parameters:

Name Type Description Default
**kwargs Any

Additional keyword arguments parameters to pass to the call. These will override any existing arguments in call_params.

{}

Returns:

Type Description
MistralCallResponse

A MistralCallResponse instance.

Raises:

Type Description
MistralException

raises any Mistral errors, see: https://github.com/mistralai/client-python/blob/main/src/mistralai/exceptions.py

Source code in mirascope/mistral/calls.py
@retry
async def call_async(
    self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
) -> MistralCallResponse:
    """Makes an asynchronous call to the model using this `MistralCall` instance.

    Args:
        **kwargs: Additional keyword arguments parameters to pass to the call. These
            will override any existing arguments in `call_params`.

    Returns:
        A `MistralCallResponse` instance.

    Raises:
        MistralException: raises any Mistral errors, see:
            https://github.com/mistralai/client-python/blob/main/src/mistralai/exceptions.py
    """
    kwargs, tool_types = self._setup(kwargs, MistralTool)
    client = get_wrapped_async_client(
        MistralAsyncClient(
            api_key=self.api_key,
            endpoint=self.base_url if self.base_url else ENDPOINT,
        ),
        self,
    )
    chat = get_wrapped_call(
        client.chat,
        self,
        is_async=True,
        response_type=MistralCallResponse,
        tool_types=tool_types,
    )
    messages = self.messages()
    user_message_param = self._get_possible_user_message(messages)
    start_time = datetime.datetime.now().timestamp() * 1000
    completion = await chat(messages=messages, **kwargs)
    return MistralCallResponse(
        response=completion,
        user_message_param=user_message_param,
        tool_types=tool_types,
        start_time=start_time,
        end_time=datetime.datetime.now().timestamp() * 1000,
        cost=mistral_api_calculate_cost(completion.usage, completion.model),
    )

messages()

Returns the template as a formatted list of messages.

Source code in mirascope/mistral/calls.py
def messages(self) -> list[Message]:
    """Returns the template as a formatted list of messages."""
    return self._parse_messages(
        [MessageRole.SYSTEM, MessageRole.USER, MessageRole.ASSISTANT]
    )

stream(retries=0, **kwargs)

Streams the response for a call using this MistralCall instance.

Parameters:

Name Type Description Default
**kwargs Any

Additional keyword arguments parameters to pass to the call. These will override any existing arguments in call_params.

{}

Returns:

Type Description
Generator[MistralCallResponseChunk, None, None]

A MistralCallResponseChunk for each chunk of the response.

Raises:

Type Description
MistralException

raises any Mistral errors, see: https://github.com/mistralai/client-python/blob/main/src/mistralai/exceptions.py

Source code in mirascope/mistral/calls.py
@retry
def stream(
    self, retries: Union[int, Retrying] = 0, **kwargs: Any
) -> Generator[MistralCallResponseChunk, None, None]:
    """Streams the response for a call using this `MistralCall` instance.

    Args:
        **kwargs: Additional keyword arguments parameters to pass to the call. These
            will override any existing arguments in `call_params`.

    Returns:
        A `MistralCallResponseChunk` for each chunk of the response.

    Raises:
        MistralException: raises any Mistral errors, see:
            https://github.com/mistralai/client-python/blob/main/src/mistralai/exceptions.py
    """
    kwargs, tool_types = self._setup(kwargs, MistralTool)
    client = get_wrapped_client(
        MistralClient(
            api_key=self.api_key,
            endpoint=self.base_url if self.base_url else ENDPOINT,
        ),
        self,
    )
    chat_stream = get_wrapped_call(
        client.chat_stream,
        self,
        response_chunk_type=MistralCallResponseChunk,
        tool_types=tool_types,
    )
    messages = self.messages()
    user_message_param = self._get_possible_user_message(messages)
    for chunk in chat_stream(messages=messages, **kwargs):
        yield MistralCallResponseChunk(
            chunk=chunk,
            user_message_param=user_message_param,
            tool_types=tool_types,
        )

stream_async(retries=0, **kwargs) async

Streams the response for an asynchronous call using this MistralCall.

Parameters:

Name Type Description Default
**kwargs Any

Additional keyword arguments parameters to pass to the call. These will override any existing arguments in call_params.

{}

Returns:

Type Description
AsyncGenerator[MistralCallResponseChunk, None]

A MistralCallResponseChunk for each chunk of the response.

Raises:

Type Description
MistralException

raises any Mistral errors, see: https://github.com/mistralai/client-python/blob/main/src/mistralai/exceptions.py

Source code in mirascope/mistral/calls.py
@retry
async def stream_async(
    self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
) -> AsyncGenerator[MistralCallResponseChunk, None]:
    """Streams the response for an asynchronous call using this `MistralCall`.

    Args:
        **kwargs: Additional keyword arguments parameters to pass to the call. These
            will override any existing arguments in `call_params`.

    Returns:
        A `MistralCallResponseChunk` for each chunk of the response.

    Raises:
        MistralException: raises any Mistral errors, see:
            https://github.com/mistralai/client-python/blob/main/src/mistralai/exceptions.py
    """
    kwargs, tool_types = self._setup(kwargs, MistralTool)
    client = get_wrapped_async_client(
        MistralAsyncClient(
            api_key=self.api_key,
            endpoint=self.base_url if self.base_url else ENDPOINT,
        ),
        self,
    )
    chat_stream = get_wrapped_call(
        client.chat_stream,
        self,
        is_async=True,
        response_chunk_type=MistralCallResponseChunk,
        tool_types=tool_types,
    )
    messages = self.messages()
    user_message_param = self._get_possible_user_message(messages)
    async for chunk in chat_stream(messages=messages, **kwargs):
        yield MistralCallResponseChunk(
            chunk=chunk,
            user_message_param=user_message_param,
            tool_types=tool_types,
        )

MistralCallParams

Bases: BaseCallParams[MistralTool]

The parameters to use when calling the Mistral API.

Source code in mirascope/mistral/types.py
class MistralCallParams(BaseCallParams[MistralTool]):
    """The parameters to use when calling the Mistral API."""

    model: str = "open-mixtral-8x7b"
    endpoint: Optional[str] = None
    temperature: Optional[float] = None
    max_tokens: Optional[int] = None
    top_p: Optional[float] = None
    random_seed: Optional[int] = None
    safe_mode: Optional[bool] = None
    safe_prompt: Optional[bool] = None
    tool_choice: Optional[ToolChoice] = None
    model_config = ConfigDict(arbitrary_types_allowed=True)

MistralCallResponse

Bases: BaseCallResponse[ChatCompletionResponse, MistralTool]

Convenience wrapper for Mistral's chat model completions.

When using Mirascope's convenience wrappers to interact with Mistral models via MistralCall, responses using MistralCall.call() will return a MistralCallResponse, whereby the implemented properties allow for simpler syntax and a convenient developer experience.

Example:

from mirascope.mistral import MistralCall

class BookRecommender(MistralCall):
    prompt_template = "Please recommend a {genre} book"

    genre: str

response = Bookrecommender(genre="fantasy").call()
print(response.content)
#> The Name of the Wind

print(response.message)
#> ChatMessage(content='The Name of the Wind', role='assistant',
#  function_call=None, tool_calls=None)

print(response.choices)
#> [Choice(finish_reason='stop', index=0, logprobs=None,
#  message=ChatMessage(content='The Name of the Wind', role='assistant',
#  function_call=None, tool_calls=None))]
Source code in mirascope/mistral/types.py
class MistralCallResponse(BaseCallResponse[ChatCompletionResponse, MistralTool]):
    """Convenience wrapper for Mistral's chat model completions.

    When using Mirascope's convenience wrappers to interact with Mistral models via
    `MistralCall`, responses using `MistralCall.call()` will return a
    `MistralCallResponse`, whereby the implemented properties allow for simpler syntax
    and a convenient developer experience.

    Example:

    ```python
    from mirascope.mistral import MistralCall

    class BookRecommender(MistralCall):
        prompt_template = "Please recommend a {genre} book"

        genre: str

    response = Bookrecommender(genre="fantasy").call()
    print(response.content)
    #> The Name of the Wind

    print(response.message)
    #> ChatMessage(content='The Name of the Wind', role='assistant',
    #  function_call=None, tool_calls=None)

    print(response.choices)
    #> [Choice(finish_reason='stop', index=0, logprobs=None,
    #  message=ChatMessage(content='The Name of the Wind', role='assistant',
    #  function_call=None, tool_calls=None))]
    ```

    """

    user_message_param: Optional[Message] = None

    @property
    def message_param(self) -> Message:
        """Returns the assistants's response as a message parameter."""
        return self.message.model_dump()  # type: ignore

    @property
    def choices(self) -> list[ChatCompletionResponseChoice]:
        """Returns the array of chat completion choices."""
        return self.response.choices

    @property
    def choice(self) -> ChatCompletionResponseChoice:
        """Returns the 0th choice."""
        return self.choices[0]

    @property
    def message(self) -> ChatMessage:
        """Returns the message of the chat completion for the 0th choice."""
        return self.choice.message

    @property
    def content(self) -> str:
        """The content of the chat completion for the 0th choice."""
        content = self.message.content
        # We haven't seen the `list[str]` response type in practice, so for now we
        # return the first item in the list
        return content if isinstance(content, str) else content[0]

    @property
    def model(self) -> str:
        """Returns the name of the response model."""
        return self.response.model

    @property
    def id(self) -> str:
        """Returns the id of the response."""
        return self.response.id

    @property
    def finish_reasons(self) -> list[str]:
        """Returns the finish reasons of the response."""
        return [
            choice.finish_reason if choice.finish_reason else ""
            for choice in self.choices
        ]

    @property
    def tool_calls(self) -> Optional[list[ToolCall]]:
        """Returns the tool calls for the 0th choice message."""
        return self.message.tool_calls

    @property
    def tools(self) -> Optional[list[MistralTool]]:
        """Returns the tools for the 0th choice message.

        Raises:
            ValidationError: if the tool call doesn't match the tool's schema.
        """
        if not self.tool_types or not self.tool_calls or len(self.tool_calls) == 0:
            return None

        if self.choice.finish_reason in ["length", "error"]:
            raise RuntimeError(
                f"Finish reason was {self.choice.finish_reason}, indicating the model "
                "ran out of token or failed (and could not complete the tool call if "
                "trying to)."
            )

        extracted_tools = []
        for tool_call in self.tool_calls:
            for tool_type in self.tool_types:
                if tool_call.function.name == tool_type.name():
                    extracted_tools.append(tool_type.from_tool_call(tool_call))
                    break

        return extracted_tools

    @property
    def tool(self) -> Optional[MistralTool]:
        """Returns the 0th tool for the 0th choice message.

        Raises:
            ValidationError: if the tool call doesn't match the tool's schema.
        """
        tools = self.tools
        if tools:
            return tools[0]
        return None

    @classmethod
    def tool_message_params(
        cls, tools_and_outputs: list[tuple[MistralTool, str]]
    ) -> list[ToolMessage]:
        """Returns the tool message parameters for tool call results."""
        return [
            {
                "role": "tool",
                "content": output,
                "tool_call_id": tool.tool_call.id,
                "name": tool.name(),
            }  # type: ignore
            for tool, output in tools_and_outputs
        ]

    @property
    def usage(self) -> UsageInfo:
        """Returns the usage of the chat completion."""
        return self.response.usage

    @property
    def input_tokens(self) -> int:
        """Returns the number of input tokens."""
        return self.usage.prompt_tokens

    @property
    def output_tokens(self) -> Optional[int]:
        """Returns the number of output tokens."""
        return self.usage.completion_tokens

    def dump(self) -> dict[str, Any]:
        """Dumps the response to a dictionary."""
        return {
            "start_time": self.start_time,
            "end_time": self.end_time,
            "output": self.response.model_dump(),
            "cost": self.cost,
        }

choice: ChatCompletionResponseChoice property

Returns the 0th choice.

choices: list[ChatCompletionResponseChoice] property

Returns the array of chat completion choices.

content: str property

The content of the chat completion for the 0th choice.

finish_reasons: list[str] property

Returns the finish reasons of the response.

id: str property

Returns the id of the response.

input_tokens: int property

Returns the number of input tokens.

message: ChatMessage property

Returns the message of the chat completion for the 0th choice.

message_param: Message property

Returns the assistants's response as a message parameter.

model: str property

Returns the name of the response model.

output_tokens: Optional[int] property

Returns the number of output tokens.

tool: Optional[MistralTool] property

Returns the 0th tool for the 0th choice message.

Raises:

Type Description
ValidationError

if the tool call doesn't match the tool's schema.

tool_calls: Optional[list[ToolCall]] property

Returns the tool calls for the 0th choice message.

tools: Optional[list[MistralTool]] property

Returns the tools for the 0th choice message.

Raises:

Type Description
ValidationError

if the tool call doesn't match the tool's schema.

usage: UsageInfo property

Returns the usage of the chat completion.

dump()

Dumps the response to a dictionary.

Source code in mirascope/mistral/types.py
def dump(self) -> dict[str, Any]:
    """Dumps the response to a dictionary."""
    return {
        "start_time": self.start_time,
        "end_time": self.end_time,
        "output": self.response.model_dump(),
        "cost": self.cost,
    }

tool_message_params(tools_and_outputs) classmethod

Returns the tool message parameters for tool call results.

Source code in mirascope/mistral/types.py
@classmethod
def tool_message_params(
    cls, tools_and_outputs: list[tuple[MistralTool, str]]
) -> list[ToolMessage]:
    """Returns the tool message parameters for tool call results."""
    return [
        {
            "role": "tool",
            "content": output,
            "tool_call_id": tool.tool_call.id,
            "name": tool.name(),
        }  # type: ignore
        for tool, output in tools_and_outputs
    ]

MistralCallResponseChunk

Bases: BaseCallResponseChunk[ChatCompletionStreamResponse, MistralTool]

Convenience wrapper around chat completion streaming chunks.

When using Mirascope's convenience wrappers to interact with Mistral models via MistralCall.stream, responses will return an MistralCallResponseChunk, whereby the implemented properties allow for simpler syntax and a convenient developer experience.

Example:

from mirascope.mistral import MistralCall


class Math(MistralCall):
    prompt_template = "What is 1 + 2?"


content = ""
for chunk in Math().stream():
    content += chunk.content
    print(content)
#> 1
#  1 +
#  1 + 2
#  1 + 2 equals
#  1 + 2 equals
#  1 + 2 equals 3
#  1 + 2 equals 3.
Source code in mirascope/mistral/types.py
class MistralCallResponseChunk(
    BaseCallResponseChunk[ChatCompletionStreamResponse, MistralTool]
):
    """Convenience wrapper around chat completion streaming chunks.

    When using Mirascope's convenience wrappers to interact with Mistral models via
    `MistralCall.stream`, responses will return an `MistralCallResponseChunk`, whereby
    the implemented properties allow for simpler syntax and a convenient developer
    experience.

    Example:

    ```python
    from mirascope.mistral import MistralCall


    class Math(MistralCall):
        prompt_template = "What is 1 + 2?"


    content = ""
    for chunk in Math().stream():
        content += chunk.content
        print(content)
    #> 1
    #  1 +
    #  1 + 2
    #  1 + 2 equals
    #  1 + 2 equals
    #  1 + 2 equals 3
    #  1 + 2 equals 3.
    ```
    """

    user_message_param: Optional[Message] = None

    @property
    def choices(self) -> list[ChatCompletionResponseStreamChoice]:
        """Returns the array of chat completion choices."""
        return self.chunk.choices

    @property
    def choice(self) -> ChatCompletionResponseStreamChoice:
        """Returns the 0th choice."""
        return self.choices[0]

    @property
    def delta(self) -> DeltaMessage:
        """Returns the delta of the 0th choice."""
        return self.choice.delta

    @property
    def content(self) -> str:
        """Returns the content of the delta."""
        return self.delta.content if self.delta.content is not None else ""

    @property
    def model(self) -> str:
        """Returns the name of the response model."""
        return self.chunk.model

    @property
    def id(self) -> str:
        """Returns the id of the response."""
        return self.chunk.id

    @property
    def finish_reasons(self) -> list[str]:
        """Returns the finish reasons of the response."""
        return [
            choice.finish_reason if choice.finish_reason else ""
            for choice in self.choices
        ]

    @property
    def tool_calls(self) -> Optional[list[ToolCall]]:
        """Returns the partial tool calls for the 0th choice message."""
        return self.delta.tool_calls

    @property
    def usage(self) -> Optional[UsageInfo]:
        """Returns the usage of the chat completion."""
        return self.chunk.usage

    @property
    def input_tokens(self) -> Optional[int]:
        """Returns the number of input tokens."""
        if self.usage:
            return self.usage.prompt_tokens
        return None

    @property
    def output_tokens(self) -> Optional[int]:
        """Returns the number of output tokens."""
        if self.usage:
            return self.usage.completion_tokens
        return None

choice: ChatCompletionResponseStreamChoice property

Returns the 0th choice.

choices: list[ChatCompletionResponseStreamChoice] property

Returns the array of chat completion choices.

content: str property

Returns the content of the delta.

delta: DeltaMessage property

Returns the delta of the 0th choice.

finish_reasons: list[str] property

Returns the finish reasons of the response.

id: str property

Returns the id of the response.

input_tokens: Optional[int] property

Returns the number of input tokens.

model: str property

Returns the name of the response model.

output_tokens: Optional[int] property

Returns the number of output tokens.

tool_calls: Optional[list[ToolCall]] property

Returns the partial tool calls for the 0th choice message.

usage: Optional[UsageInfo] property

Returns the usage of the chat completion.

MistralExtractor

Bases: BaseExtractor[MistralCall, MistralTool, Any, T], Generic[T]

A class for extracting structured information using Mistral Chat models.

Example:

from mirascope.mistral import MistralExtractor
from pydantic import BaseModel
from typing import Literal, Type

class TaskDetails(BaseModel):
    title: str
    priority: Literal["low", "normal", "high"]
    due_date: str

class TaskExtractor(MistralExtractor[TaskDetails]):
    extract_schema: Type[TaskDetails] = TaskDetails
    call_params = MistralCallParams(model="mistral-large-latest")

    prompt_template = """
    Prepare the budget report by next Monday. It's a high priority task.
    """


task = TaskExtractor().extract(retries=3)
assert isinstance(task, TaskDetails)
print(task)
# > title='Prepare the budget report' priority='high' due_date='next Monday'
Source code in mirascope/mistral/extractors.py
class MistralExtractor(BaseExtractor[MistralCall, MistralTool, Any, T], Generic[T]):
    '''A class for extracting structured information using Mistral Chat models.

    Example:

    ```python
    from mirascope.mistral import MistralExtractor
    from pydantic import BaseModel
    from typing import Literal, Type

    class TaskDetails(BaseModel):
        title: str
        priority: Literal["low", "normal", "high"]
        due_date: str

    class TaskExtractor(MistralExtractor[TaskDetails]):
        extract_schema: Type[TaskDetails] = TaskDetails
        call_params = MistralCallParams(model="mistral-large-latest")

        prompt_template = """
        Prepare the budget report by next Monday. It's a high priority task.
        """


    task = TaskExtractor().extract(retries=3)
    assert isinstance(task, TaskDetails)
    print(task)
    # > title='Prepare the budget report' priority='high' due_date='next Monday'
    ```
    '''

    call_params: ClassVar[MistralCallParams] = MistralCallParams()
    _provider: ClassVar[str] = "mistral"

    def extract(self, retries: Union[int, Retrying] = 0, **kwargs: Any) -> T:
        """Extracts `extract_schema` from the Mistral call response.

        The `extract_schema` is converted into an `MistralTool`, complete with a
        description of the tool, all of the fields, and their types. This allows us to
        take advantage of Mistrals's tool/function calling functionality to extract
        information from a prompt according to the context provided by the `BaseModel`
        schema.

        Args:
            retries: The maximum number of times to retry the query on validation error.
            **kwargs: Additional keyword arguments parameters to pass to the call. These
                will override any existing arguments in `call_params`.

        Returns:
            The `Schema` instance extracted from the completion.

        Raises:
            AttributeError: if there is no tool in the call creation.
            ValidationError: if the schema cannot be instantiated from the completion.
            MistralException: raises any Mistral exceptions, see:
                https://github.com/mistralai/client-python/blob/main/src/mistralai/exceptions.py
        """
        return self._extract(MistralCall, MistralTool, retries, **kwargs)

    async def extract_async(
        self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
    ) -> T:
        """Asynchronously extracts `extract_schema` from the Mistral call response.

        The `extract_schema` is converted into an `MistralTool`, complete with a
        description of the tool, all of the fields, and their types. This allows us to
        take advantage of Mistrals's tool/function calling functionality to extract
        information from a prompt according to the context provided by the `BaseModel`
        schema.

        Args:
            retries: The maximum number of times to retry the query on validation error.
            **kwargs: Additional keyword arguments parameters to pass to the call. These
                will override any existing arguments in `call_params`.

        Returns:
            The `Schema` instance extracted from the completion.

        Raises:
            AttributeError: if there is no tool in the call creation.
            ValidationError: if the schema cannot be instantiated from the completion.
            MistralException: raises any Mistral exceptions, see:
                https://github.com/mistralai/client-python/blob/main/src/mistralai/exceptions.py
        """
        return await self._extract_async(MistralCall, MistralTool, retries, **kwargs)

extract(retries=0, **kwargs)

Extracts extract_schema from the Mistral call response.

The extract_schema is converted into an MistralTool, complete with a description of the tool, all of the fields, and their types. This allows us to take advantage of Mistrals's tool/function calling functionality to extract information from a prompt according to the context provided by the BaseModel schema.

Parameters:

Name Type Description Default
retries Union[int, Retrying]

The maximum number of times to retry the query on validation error.

0
**kwargs Any

Additional keyword arguments parameters to pass to the call. These will override any existing arguments in call_params.

{}

Returns:

Type Description
T

The Schema instance extracted from the completion.

Raises:

Type Description
AttributeError

if there is no tool in the call creation.

ValidationError

if the schema cannot be instantiated from the completion.

MistralException

raises any Mistral exceptions, see: https://github.com/mistralai/client-python/blob/main/src/mistralai/exceptions.py

Source code in mirascope/mistral/extractors.py
def extract(self, retries: Union[int, Retrying] = 0, **kwargs: Any) -> T:
    """Extracts `extract_schema` from the Mistral call response.

    The `extract_schema` is converted into an `MistralTool`, complete with a
    description of the tool, all of the fields, and their types. This allows us to
    take advantage of Mistrals's tool/function calling functionality to extract
    information from a prompt according to the context provided by the `BaseModel`
    schema.

    Args:
        retries: The maximum number of times to retry the query on validation error.
        **kwargs: Additional keyword arguments parameters to pass to the call. These
            will override any existing arguments in `call_params`.

    Returns:
        The `Schema` instance extracted from the completion.

    Raises:
        AttributeError: if there is no tool in the call creation.
        ValidationError: if the schema cannot be instantiated from the completion.
        MistralException: raises any Mistral exceptions, see:
            https://github.com/mistralai/client-python/blob/main/src/mistralai/exceptions.py
    """
    return self._extract(MistralCall, MistralTool, retries, **kwargs)

extract_async(retries=0, **kwargs) async

Asynchronously extracts extract_schema from the Mistral call response.

The extract_schema is converted into an MistralTool, complete with a description of the tool, all of the fields, and their types. This allows us to take advantage of Mistrals's tool/function calling functionality to extract information from a prompt according to the context provided by the BaseModel schema.

Parameters:

Name Type Description Default
retries Union[int, AsyncRetrying]

The maximum number of times to retry the query on validation error.

0
**kwargs Any

Additional keyword arguments parameters to pass to the call. These will override any existing arguments in call_params.

{}

Returns:

Type Description
T

The Schema instance extracted from the completion.

Raises:

Type Description
AttributeError

if there is no tool in the call creation.

ValidationError

if the schema cannot be instantiated from the completion.

MistralException

raises any Mistral exceptions, see: https://github.com/mistralai/client-python/blob/main/src/mistralai/exceptions.py

Source code in mirascope/mistral/extractors.py
async def extract_async(
    self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
) -> T:
    """Asynchronously extracts `extract_schema` from the Mistral call response.

    The `extract_schema` is converted into an `MistralTool`, complete with a
    description of the tool, all of the fields, and their types. This allows us to
    take advantage of Mistrals's tool/function calling functionality to extract
    information from a prompt according to the context provided by the `BaseModel`
    schema.

    Args:
        retries: The maximum number of times to retry the query on validation error.
        **kwargs: Additional keyword arguments parameters to pass to the call. These
            will override any existing arguments in `call_params`.

    Returns:
        The `Schema` instance extracted from the completion.

    Raises:
        AttributeError: if there is no tool in the call creation.
        ValidationError: if the schema cannot be instantiated from the completion.
        MistralException: raises any Mistral exceptions, see:
            https://github.com/mistralai/client-python/blob/main/src/mistralai/exceptions.py
    """
    return await self._extract_async(MistralCall, MistralTool, retries, **kwargs)

MistralStream

Bases: BaseStream[MistralCallResponseChunk, UserMessage, AssistantMessage, MistralTool]

A class for streaming responses from Mistral's API.

Source code in mirascope/mistral/types.py
class MistralStream(
    BaseStream[
        MistralCallResponseChunk,
        UserMessage,
        AssistantMessage,
        MistralTool,
    ]
):
    """A class for streaming responses from Mistral's API."""

    def __init__(self, stream: Generator[MistralCallResponseChunk, None, None]):
        """Initializes an instance of `MistralStream`."""
        super().__init__(stream, AssistantMessage)

MistralTool

Bases: BaseTool[ToolCall]

A base class for easy use of tools with the Mistral client.

MistralTool internally handles the logic that allows you to use tools with simple calls such as MistralCallResponse.tool or MistralTool.fn, as seen in the examples below.

Example:

```python import os

from mirascope.mistral import MistralCall, MistralCallParams

def animal_matcher(fav_food: str, fav_color: str) -> str: """Tells you your most likely favorite animal from personality traits.

Args:
    fav_food: your favorite food.
    fav_color: your favorite color.

Returns:
    The animal most likely to be your favorite based on traits.
"""
return "Your favorite animal is the best one, a frog."

class AnimalMatcher(MistralCall): prompt_template = """\ Tell me my favorite animal if my favorite food is {food} and my favorite color is {color}. """

food: str
color: str

api_key = os.getenv("MISTRAL_API_KEY")
call_params = MistralCallParams(
    model="mistral-large-latest", tools=[animal_matcher]
)

prompt = AnimalMatcher(food="pizza", color="green") response = prompt.call()

if tools := response.tools: for tool in tools: print(tool.fn(**tool.args))

> Your favorite animal is the best one, a frog.

Source code in mirascope/mistral/tools.py
class MistralTool(BaseTool[ToolCall]):
    '''A base class for easy use of tools with the Mistral client.

    `MistralTool` internally handles the logic that allows you to use tools with simple
    calls such as `MistralCallResponse.tool` or `MistralTool.fn`, as seen in the 
    examples below.

    Example:

    ```python
    import os

    from mirascope.mistral import MistralCall, MistralCallParams


    def animal_matcher(fav_food: str, fav_color: str) -> str:
        """Tells you your most likely favorite animal from personality traits.

        Args:
            fav_food: your favorite food.
            fav_color: your favorite color.

        Returns:
            The animal most likely to be your favorite based on traits.
        """
        return "Your favorite animal is the best one, a frog."


    class AnimalMatcher(MistralCall):
        prompt_template = """\\
            Tell me my favorite animal if my favorite food is {food} and my
            favorite color is {color}.
        """

        food: str
        color: str

        api_key = os.getenv("MISTRAL_API_KEY")
        call_params = MistralCallParams(
            model="mistral-large-latest", tools=[animal_matcher]
        )


    prompt = AnimalMatcher(food="pizza", color="green")
    response = prompt.call()

    if tools := response.tools:
        for tool in tools:
            print(tool.fn(**tool.args))
    #> Your favorite animal is the best one, a frog.
    '''

    @classmethod
    def tool_schema(cls) -> dict[str, Any]:
        """Constructs a tool schema for use with the Mistral Chat client.

        A Mirascope `MistralTool` is deconstructed into a JSON schema, and relevant keys
        are renamed to match the Mistral API schema used to make functional/tool calls
        in Mistral API.

        Returns:
            The constructed tool schema.
        """
        fn = super().tool_schema()
        return {"type": "function", "function": fn}

    @classmethod
    def from_tool_call(cls, tool_call: ToolCall) -> MistralTool:
        """Extracts an instance of the tool constructed from a tool call response.

        Given `ToolCall` from a Mistral chat completion response, takes its function
        arguments and creates a `MistralTool` instance from it.

        Args:
            tool_call: The Mistral `ToolCall` to extract the tool from.

        Returns:
            An instance of the tool constructed from the tool call.

        Raises:
            ValueError: if the tool call doesn't match the tool schema.
        """
        try:
            model_json = json.loads(tool_call.function.arguments)
        except json.JSONDecodeError as e:
            raise ValueError() from e

        model_json["tool_call"] = tool_call
        return cls.model_validate(model_json)

    @classmethod
    def from_model(cls, model: Type[BaseModel]) -> Type[MistralTool]:
        """Constructs a `MistralTool` type from a `BaseModel` type."""
        return convert_base_model_to_tool(model, MistralTool)

    @classmethod
    def from_fn(cls, fn: Callable) -> Type[MistralTool]:
        """Constructs a `MistralTool` type from a function."""
        return convert_function_to_tool(fn, MistralTool)

    @classmethod
    def from_base_type(cls, base_type: Type[BaseType]) -> Type[MistralTool]:
        """Constructs a `MistralTool` type from a `BaseType` type."""
        return convert_base_type_to_tool(base_type, MistralTool)

from_base_type(base_type) classmethod

Constructs a MistralTool type from a BaseType type.

Source code in mirascope/mistral/tools.py
@classmethod
def from_base_type(cls, base_type: Type[BaseType]) -> Type[MistralTool]:
    """Constructs a `MistralTool` type from a `BaseType` type."""
    return convert_base_type_to_tool(base_type, MistralTool)

from_fn(fn) classmethod

Constructs a MistralTool type from a function.

Source code in mirascope/mistral/tools.py
@classmethod
def from_fn(cls, fn: Callable) -> Type[MistralTool]:
    """Constructs a `MistralTool` type from a function."""
    return convert_function_to_tool(fn, MistralTool)

from_model(model) classmethod

Constructs a MistralTool type from a BaseModel type.

Source code in mirascope/mistral/tools.py
@classmethod
def from_model(cls, model: Type[BaseModel]) -> Type[MistralTool]:
    """Constructs a `MistralTool` type from a `BaseModel` type."""
    return convert_base_model_to_tool(model, MistralTool)

from_tool_call(tool_call) classmethod

Extracts an instance of the tool constructed from a tool call response.

Given ToolCall from a Mistral chat completion response, takes its function arguments and creates a MistralTool instance from it.

Parameters:

Name Type Description Default
tool_call ToolCall

The Mistral ToolCall to extract the tool from.

required

Returns:

Type Description
MistralTool

An instance of the tool constructed from the tool call.

Raises:

Type Description
ValueError

if the tool call doesn't match the tool schema.

Source code in mirascope/mistral/tools.py
@classmethod
def from_tool_call(cls, tool_call: ToolCall) -> MistralTool:
    """Extracts an instance of the tool constructed from a tool call response.

    Given `ToolCall` from a Mistral chat completion response, takes its function
    arguments and creates a `MistralTool` instance from it.

    Args:
        tool_call: The Mistral `ToolCall` to extract the tool from.

    Returns:
        An instance of the tool constructed from the tool call.

    Raises:
        ValueError: if the tool call doesn't match the tool schema.
    """
    try:
        model_json = json.loads(tool_call.function.arguments)
    except json.JSONDecodeError as e:
        raise ValueError() from e

    model_json["tool_call"] = tool_call
    return cls.model_validate(model_json)

tool_schema() classmethod

Constructs a tool schema for use with the Mistral Chat client.

A Mirascope MistralTool is deconstructed into a JSON schema, and relevant keys are renamed to match the Mistral API schema used to make functional/tool calls in Mistral API.

Returns:

Type Description
dict[str, Any]

The constructed tool schema.

Source code in mirascope/mistral/tools.py
@classmethod
def tool_schema(cls) -> dict[str, Any]:
    """Constructs a tool schema for use with the Mistral Chat client.

    A Mirascope `MistralTool` is deconstructed into a JSON schema, and relevant keys
    are renamed to match the Mistral API schema used to make functional/tool calls
    in Mistral API.

    Returns:
        The constructed tool schema.
    """
    fn = super().tool_schema()
    return {"type": "function", "function": fn}

mistral_api_calculate_cost(usage, model='open-mistral-7b')

Calculate the cost of a completion using the Mistral API.

https://mistral.ai/technology/#pricing

Model Input Output open-mistral-7b $0.25/1M tokens $0.25/1M tokens open-mixtral-8x7b $0.7/1M tokens $0.7/1M tokens open-mixtral-8x22b $2/1M tokens $6/1M tokens mistral-small $2/1M tokens $6/1M tokens mistral-medium $2.7/1M tokens $8.1/1M tokens mistral-large $8/1M tokens $24/1M tokens

Source code in mirascope/mistral/utils.py
def mistral_api_calculate_cost(
    usage: UsageInfo, model="open-mistral-7b"
) -> Optional[float]:
    """Calculate the cost of a completion using the Mistral API.

    https://mistral.ai/technology/#pricing

    Model                     Input               Output
    open-mistral-7b	          $0.25/1M tokens	  $0.25/1M tokens
    open-mixtral-8x7b	      $0.7/1M tokens	  $0.7/1M tokens
    open-mixtral-8x22b	      $2/1M tokens	      $6/1M tokens
    mistral-small		      $2/1M tokens	      $6/1M tokens
    mistral-medium		      $2.7/1M tokens	  $8.1/1M tokens
    mistral-large		      $8/1M tokens	      $24/1M tokens
    """
    pricing = {
        "open-mistral-7b": {"prompt": 0.000_000_25, "completion": 0.000_000_25},
        "open-mixtral-8x7b": {"prompt": 0.000_000_7, "completion": 0.000_000_7},
        "open-mixtral-8x22b": {"prompt": 0.000_002, "completion": 0.000_006},
        "mistral-small": {"prompt": 0.000_002, "completion": 0.000_006},
        "mistral-medium": {"prompt": 0.000_002_7, "completion": 0.000_008_1},
        "mistral-large": {"prompt": 0.000_008, "completion": 0.000_024},
    }

    try:
        model_pricing = pricing[model]
    except KeyError:
        return None

    completion_tokens = usage.completion_tokens or 0
    prompt_cost = usage.prompt_tokens * model_pricing["prompt"]
    completion_cost = completion_tokens * model_pricing["completion"]
    total_cost = prompt_cost + completion_cost

    return total_cost