Skip to content

gemini.calls

A module for calling Google's Gemini Chat API.

BaseCall

Bases: BasePrompt, Generic[BaseCallResponseT, BaseCallResponseChunkT, BaseToolT, MessageParamT], ABC

The base class abstract interface for calling LLMs.

Source code in mirascope/base/calls.py
class BaseCall(
    BasePrompt,
    Generic[BaseCallResponseT, BaseCallResponseChunkT, BaseToolT, MessageParamT],
    ABC,
):
    """The base class abstract interface for calling LLMs."""

    api_key: ClassVar[Optional[str]] = None
    base_url: ClassVar[Optional[str]] = None
    call_params: ClassVar[BaseCallParams] = BaseCallParams[BaseToolT](
        model="gpt-3.5-turbo-0125"
    )
    configuration: ClassVar[BaseConfig] = BaseConfig(llm_ops=[], client_wrappers=[])
    _provider: ClassVar[str] = "base"

    @abstractmethod
    def call(
        self, retries: Union[int, Retrying] = 0, **kwargs: Any
    ) -> BaseCallResponseT:
        """A call to an LLM.

        An implementation of this function must return a response that extends
        `BaseCallResponse`. This ensures a consistent API and convenience across e.g.
        different model providers.
        """
        ...  # pragma: no cover

    @abstractmethod
    async def call_async(
        self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
    ) -> BaseCallResponseT:
        """An asynchronous call to an LLM.

        An implementation of this function must return a response that extends
        `BaseCallResponse`. This ensures a consistent API and convenience across e.g.
        different model providers.
        """
        ...  # pragma: no cover

    @abstractmethod
    def stream(
        self, retries: Union[int, Retrying] = 0, **kwargs: Any
    ) -> Generator[BaseCallResponseChunkT, None, None]:
        """A call to an LLM that streams the response in chunks.

        An implementation of this function must yield response chunks that extend
        `BaseCallResponseChunk`. This ensures a consistent API and convenience across
        e.g. different model providers.
        """
        ...  # pragma: no cover

    @abstractmethod
    async def stream_async(
        self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
    ) -> AsyncGenerator[BaseCallResponseChunkT, None]:
        """A asynchronous call to an LLM that streams the response in chunks.

        An implementation of this function must yield response chunks that extend
        `BaseCallResponseChunk`. This ensures a consistent API and convenience across
        e.g. different model providers."""
        yield ...  # type: ignore # pragma: no cover

    @classmethod
    def from_prompt(
        cls, prompt_type: type[BasePromptT], call_params: BaseCallParams
    ) -> type[BasePromptT]:
        """Returns a call_type generated dynamically from this base call.

        Args:
            prompt_type: The prompt class to use for the call. Properties and class
                variables of this class will be used to create the new call class. Must
                be a class that can be instantiated.
            call_params: The call params to use for the call.

        Returns:
            A new call class with new call_type.
        """

        fields: dict[str, Any] = {
            name: (field.annotation, field.default)
            for name, field in prompt_type.model_fields.items()
        }

        class_vars = {
            name: value
            for name, value in prompt_type.__dict__.items()
            if name not in prompt_type.model_fields
        }
        new_call = create_model(prompt_type.__name__, __base__=cls, **fields)

        for var_name, var_value in class_vars.items():
            setattr(new_call, var_name, var_value)
        setattr(new_call, "call_params", call_params)

        return cast(type[BasePromptT], new_call)

    ############################## PRIVATE METHODS ###################################

    def _setup(
        self,
        kwargs: dict[str, Any],
        base_tool_type: Optional[Type[BaseToolT]] = None,
    ) -> tuple[dict[str, Any], Optional[list[Type[BaseToolT]]]]:
        """Returns the call params kwargs and tool types.

        The tools in the call params first get converted into BaseToolT types. We then
        need both the converted tools for the response (so it can construct actual tool
        instances if present in the response) as well as the actual schemas injected
        through kwargs. This function handles that setup.
        """
        call_params = self.call_params.model_copy(update=kwargs)
        kwargs = call_params.kwargs(tool_type=base_tool_type)
        tool_types = None
        if "tools" in kwargs and base_tool_type is not None:
            tool_types = kwargs.pop("tools")
            kwargs["tools"] = [tool_type.tool_schema() for tool_type in tool_types]
        return kwargs, tool_types

    def _get_possible_user_message(
        self, messages: list[Any]
    ) -> Optional[MessageParamT]:
        """Returns the most recent message if it's a user message, otherwise `None`."""
        return messages[-1] if messages[-1]["role"] == "user" else None

call(retries=0, **kwargs) abstractmethod

A call to an LLM.

An implementation of this function must return a response that extends BaseCallResponse. This ensures a consistent API and convenience across e.g. different model providers.

Source code in mirascope/base/calls.py
@abstractmethod
def call(
    self, retries: Union[int, Retrying] = 0, **kwargs: Any
) -> BaseCallResponseT:
    """A call to an LLM.

    An implementation of this function must return a response that extends
    `BaseCallResponse`. This ensures a consistent API and convenience across e.g.
    different model providers.
    """
    ...  # pragma: no cover

call_async(retries=0, **kwargs) abstractmethod async

An asynchronous call to an LLM.

An implementation of this function must return a response that extends BaseCallResponse. This ensures a consistent API and convenience across e.g. different model providers.

Source code in mirascope/base/calls.py
@abstractmethod
async def call_async(
    self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
) -> BaseCallResponseT:
    """An asynchronous call to an LLM.

    An implementation of this function must return a response that extends
    `BaseCallResponse`. This ensures a consistent API and convenience across e.g.
    different model providers.
    """
    ...  # pragma: no cover

from_prompt(prompt_type, call_params) classmethod

Returns a call_type generated dynamically from this base call.

Parameters:

Name Type Description Default
prompt_type type[BasePromptT]

The prompt class to use for the call. Properties and class variables of this class will be used to create the new call class. Must be a class that can be instantiated.

required
call_params BaseCallParams

The call params to use for the call.

required

Returns:

Type Description
type[BasePromptT]

A new call class with new call_type.

Source code in mirascope/base/calls.py
@classmethod
def from_prompt(
    cls, prompt_type: type[BasePromptT], call_params: BaseCallParams
) -> type[BasePromptT]:
    """Returns a call_type generated dynamically from this base call.

    Args:
        prompt_type: The prompt class to use for the call. Properties and class
            variables of this class will be used to create the new call class. Must
            be a class that can be instantiated.
        call_params: The call params to use for the call.

    Returns:
        A new call class with new call_type.
    """

    fields: dict[str, Any] = {
        name: (field.annotation, field.default)
        for name, field in prompt_type.model_fields.items()
    }

    class_vars = {
        name: value
        for name, value in prompt_type.__dict__.items()
        if name not in prompt_type.model_fields
    }
    new_call = create_model(prompt_type.__name__, __base__=cls, **fields)

    for var_name, var_value in class_vars.items():
        setattr(new_call, var_name, var_value)
    setattr(new_call, "call_params", call_params)

    return cast(type[BasePromptT], new_call)

stream(retries=0, **kwargs) abstractmethod

A call to an LLM that streams the response in chunks.

An implementation of this function must yield response chunks that extend BaseCallResponseChunk. This ensures a consistent API and convenience across e.g. different model providers.

Source code in mirascope/base/calls.py
@abstractmethod
def stream(
    self, retries: Union[int, Retrying] = 0, **kwargs: Any
) -> Generator[BaseCallResponseChunkT, None, None]:
    """A call to an LLM that streams the response in chunks.

    An implementation of this function must yield response chunks that extend
    `BaseCallResponseChunk`. This ensures a consistent API and convenience across
    e.g. different model providers.
    """
    ...  # pragma: no cover

stream_async(retries=0, **kwargs) abstractmethod async

A asynchronous call to an LLM that streams the response in chunks.

An implementation of this function must yield response chunks that extend BaseCallResponseChunk. This ensures a consistent API and convenience across e.g. different model providers.

Source code in mirascope/base/calls.py
@abstractmethod
async def stream_async(
    self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
) -> AsyncGenerator[BaseCallResponseChunkT, None]:
    """A asynchronous call to an LLM that streams the response in chunks.

    An implementation of this function must yield response chunks that extend
    `BaseCallResponseChunk`. This ensures a consistent API and convenience across
    e.g. different model providers."""
    yield ...  # type: ignore # pragma: no cover

GeminiCall

Bases: BaseCall[GeminiCallResponse, GeminiCallResponseChunk, GeminiTool, ContentDict]

A class for prompting Google's Gemini Chat API.

This prompt supports the message types: USER, MODEL, TOOL

Example:

from google.generativeai import configure  # type: ignore
from mirascope.gemini import GeminiCall

configure(api_key="YOUR_API_KEY")


class BookRecommender(GeminiCall):
    prompt_template = """
    USER: You're the world's greatest librarian.
    MODEL: Ok, I understand I'm the world's greatest librarian. How can I help?
    USER: Please recommend some {genre} books.

    genre: str


response = BookRecommender(genre="fantasy").call()
print(response.content)
#> As the world's greatest librarian, I am delighted to recommend...
Source code in mirascope/gemini/calls.py
class GeminiCall(
    BaseCall[GeminiCallResponse, GeminiCallResponseChunk, GeminiTool, ContentDict]
):
    '''A class for prompting Google's Gemini Chat API.

    This prompt supports the message types: USER, MODEL, TOOL

    Example:

    ```python
    from google.generativeai import configure  # type: ignore
    from mirascope.gemini import GeminiCall

    configure(api_key="YOUR_API_KEY")


    class BookRecommender(GeminiCall):
        prompt_template = """
        USER: You're the world's greatest librarian.
        MODEL: Ok, I understand I'm the world's greatest librarian. How can I help?
        USER: Please recommend some {genre} books.

        genre: str


    response = BookRecommender(genre="fantasy").call()
    print(response.content)
    #> As the world's greatest librarian, I am delighted to recommend...
    ```
    '''

    call_params: ClassVar[GeminiCallParams] = GeminiCallParams()
    _provider: ClassVar[str] = "gemini"

    def messages(self) -> ContentsType:
        """Returns the `ContentsType` messages for Gemini `generate_content`.

        Raises:
            ValueError: if the docstring contains an unknown role.
        """
        return [
            {"role": message["role"], "parts": [message["content"]]}
            for message in self._parse_messages(
                [MessageRole.MODEL, MessageRole.USER, MessageRole.TOOL]
            )
        ]

    @retry
    def call(
        self, retries: Union[int, Retrying] = 0, **kwargs: Any
    ) -> GeminiCallResponse:
        """Makes an call to the model using this `GeminiCall` instance.

        Args:
            **kwargs: Additional keyword arguments that will be used for generating the
                response. These will override any existing argument settings in call
                params.

        Returns:
            A `GeminiCallResponse` instance.
        """
        kwargs, tool_types = self._setup(kwargs, GeminiTool)
        model_name = kwargs.pop("model")
        gemini_pro_model = get_wrapped_client(
            GenerativeModel(model_name=model_name), self
        )
        generate_content = get_wrapped_call(
            gemini_pro_model.generate_content,
            self,
            response_type=GeminiCallResponse,
            tool_types=tool_types,
            model_name=model_name,
        )
        messages = self.messages()
        user_message_param = self._get_possible_user_message(messages)
        start_time = datetime.datetime.now().timestamp() * 1000
        response = generate_content(
            messages,
            stream=False,
            tools=kwargs.pop("tools") if "tools" in kwargs else None,
            **kwargs,
        )
        return GeminiCallResponse(
            response=response,
            user_message_param=user_message_param,
            tool_types=tool_types,
            start_time=start_time,
            end_time=datetime.datetime.now().timestamp() * 1000,
            cost=None,
        )

    @retry
    async def call_async(
        self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
    ) -> GeminiCallResponse:
        """Makes an asynchronous call to the model using this `GeminiCall` instance.

        Args:
            **kwargs: Additional keyword arguments that will be used for generating the
                response. These will override any existing argument settings in call
                params.

        Returns:
            A `GeminiCallResponse` instance.
        """
        kwargs, tool_types = self._setup(kwargs, GeminiTool)
        model_name = kwargs.pop("model")
        gemini_pro_model = get_wrapped_async_client(
            GenerativeModel(model_name=model_name), self
        )
        generate_content_async = get_wrapped_call(
            gemini_pro_model.generate_content_async,
            self,
            is_async=True,
            response_type=GeminiCallResponse,
            tool_types=tool_types,
            model_name=model_name,
        )
        messages = self.messages()
        user_message_param = self._get_possible_user_message(messages)
        start_time = datetime.datetime.now().timestamp() * 1000
        response = await generate_content_async(
            messages,
            stream=False,
            tools=kwargs.pop("tools") if "tools" in kwargs else None,
            **kwargs,
        )
        return GeminiCallResponse(
            response=response,
            user_message_param=user_message_param,
            tool_types=tool_types,
            start_time=start_time,
            end_time=datetime.datetime.now().timestamp() * 1000,
            cost=None,
        )

    @retry
    def stream(
        self, retries: Union[int, Retrying] = 0, **kwargs: Any
    ) -> Generator[GeminiCallResponseChunk, None, None]:
        """Streams the response for a call using this `GeminiCall`.

        Args:
            **kwargs: Additional keyword arguments parameters to pass to the call. These
                will override any existing arguments in `call_params`.

        Yields:
            A `GeminiCallResponseChunk` for each chunk of the response.
        """
        kwargs, tool_types = self._setup(kwargs, GeminiTool)
        model_name = kwargs.pop("model")
        gemini_pro_model = get_wrapped_client(
            GenerativeModel(model_name=model_name), self
        )
        generate_content = get_wrapped_call(
            gemini_pro_model.generate_content,
            self,
            response_chunk_type=GeminiCallResponseChunk,
            tool_types=tool_types,
            model_name=model_name,
        )
        messages = self.messages()
        user_message_param = self._get_possible_user_message(messages)
        stream = generate_content(
            messages,
            stream=True,
            tools=kwargs.pop("tools") if "tools" in kwargs else None,
            **kwargs,
        )
        for chunk in stream:
            yield GeminiCallResponseChunk(
                chunk=chunk,
                user_message_param=user_message_param,
                tool_types=tool_types,
            )

    @retry
    async def stream_async(
        self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
    ) -> AsyncGenerator[GeminiCallResponseChunk, None]:
        """Streams the response asynchronously for a call using this `GeminiCall`.

        Args:
            **kwargs: Additional keyword arguments parameters to pass to the call. These
                will override any existing arguments in `call_params`.

        Yields:
            A `GeminiCallResponseChunk` for each chunk of the response.
        """
        kwargs, tool_types = self._setup(kwargs, GeminiTool)
        model_name = kwargs.pop("model")
        gemini_pro_model = get_wrapped_async_client(
            GenerativeModel(model_name=model_name), self
        )
        generate_content_async = get_wrapped_call(
            gemini_pro_model.generate_content_async,
            self,
            is_async=True,
            response_chunk_type=GeminiCallResponseChunk,
            tool_types=tool_types,
            model_name=model_name,
        )
        messages = self.messages()
        user_message_param = self._get_possible_user_message(messages)
        stream = generate_content_async(
            messages,
            stream=True,
            tools=kwargs.pop("tools") if "tools" in kwargs else None,
            **kwargs,
        )
        if inspect.iscoroutine(stream):
            stream = await stream
        async for chunk in stream:
            yield GeminiCallResponseChunk(
                chunk=chunk,
                user_message_param=user_message_param,
                tool_types=tool_types,
            )

call(retries=0, **kwargs)

Makes an call to the model using this GeminiCall instance.

Parameters:

Name Type Description Default
**kwargs Any

Additional keyword arguments that will be used for generating the response. These will override any existing argument settings in call params.

{}

Returns:

Type Description
GeminiCallResponse

A GeminiCallResponse instance.

Source code in mirascope/gemini/calls.py
@retry
def call(
    self, retries: Union[int, Retrying] = 0, **kwargs: Any
) -> GeminiCallResponse:
    """Makes an call to the model using this `GeminiCall` instance.

    Args:
        **kwargs: Additional keyword arguments that will be used for generating the
            response. These will override any existing argument settings in call
            params.

    Returns:
        A `GeminiCallResponse` instance.
    """
    kwargs, tool_types = self._setup(kwargs, GeminiTool)
    model_name = kwargs.pop("model")
    gemini_pro_model = get_wrapped_client(
        GenerativeModel(model_name=model_name), self
    )
    generate_content = get_wrapped_call(
        gemini_pro_model.generate_content,
        self,
        response_type=GeminiCallResponse,
        tool_types=tool_types,
        model_name=model_name,
    )
    messages = self.messages()
    user_message_param = self._get_possible_user_message(messages)
    start_time = datetime.datetime.now().timestamp() * 1000
    response = generate_content(
        messages,
        stream=False,
        tools=kwargs.pop("tools") if "tools" in kwargs else None,
        **kwargs,
    )
    return GeminiCallResponse(
        response=response,
        user_message_param=user_message_param,
        tool_types=tool_types,
        start_time=start_time,
        end_time=datetime.datetime.now().timestamp() * 1000,
        cost=None,
    )

call_async(retries=0, **kwargs) async

Makes an asynchronous call to the model using this GeminiCall instance.

Parameters:

Name Type Description Default
**kwargs Any

Additional keyword arguments that will be used for generating the response. These will override any existing argument settings in call params.

{}

Returns:

Type Description
GeminiCallResponse

A GeminiCallResponse instance.

Source code in mirascope/gemini/calls.py
@retry
async def call_async(
    self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
) -> GeminiCallResponse:
    """Makes an asynchronous call to the model using this `GeminiCall` instance.

    Args:
        **kwargs: Additional keyword arguments that will be used for generating the
            response. These will override any existing argument settings in call
            params.

    Returns:
        A `GeminiCallResponse` instance.
    """
    kwargs, tool_types = self._setup(kwargs, GeminiTool)
    model_name = kwargs.pop("model")
    gemini_pro_model = get_wrapped_async_client(
        GenerativeModel(model_name=model_name), self
    )
    generate_content_async = get_wrapped_call(
        gemini_pro_model.generate_content_async,
        self,
        is_async=True,
        response_type=GeminiCallResponse,
        tool_types=tool_types,
        model_name=model_name,
    )
    messages = self.messages()
    user_message_param = self._get_possible_user_message(messages)
    start_time = datetime.datetime.now().timestamp() * 1000
    response = await generate_content_async(
        messages,
        stream=False,
        tools=kwargs.pop("tools") if "tools" in kwargs else None,
        **kwargs,
    )
    return GeminiCallResponse(
        response=response,
        user_message_param=user_message_param,
        tool_types=tool_types,
        start_time=start_time,
        end_time=datetime.datetime.now().timestamp() * 1000,
        cost=None,
    )

messages()

Returns the ContentsType messages for Gemini generate_content.

Raises:

Type Description
ValueError

if the docstring contains an unknown role.

Source code in mirascope/gemini/calls.py
def messages(self) -> ContentsType:
    """Returns the `ContentsType` messages for Gemini `generate_content`.

    Raises:
        ValueError: if the docstring contains an unknown role.
    """
    return [
        {"role": message["role"], "parts": [message["content"]]}
        for message in self._parse_messages(
            [MessageRole.MODEL, MessageRole.USER, MessageRole.TOOL]
        )
    ]

stream(retries=0, **kwargs)

Streams the response for a call using this GeminiCall.

Parameters:

Name Type Description Default
**kwargs Any

Additional keyword arguments parameters to pass to the call. These will override any existing arguments in call_params.

{}

Yields:

Type Description
GeminiCallResponseChunk

A GeminiCallResponseChunk for each chunk of the response.

Source code in mirascope/gemini/calls.py
@retry
def stream(
    self, retries: Union[int, Retrying] = 0, **kwargs: Any
) -> Generator[GeminiCallResponseChunk, None, None]:
    """Streams the response for a call using this `GeminiCall`.

    Args:
        **kwargs: Additional keyword arguments parameters to pass to the call. These
            will override any existing arguments in `call_params`.

    Yields:
        A `GeminiCallResponseChunk` for each chunk of the response.
    """
    kwargs, tool_types = self._setup(kwargs, GeminiTool)
    model_name = kwargs.pop("model")
    gemini_pro_model = get_wrapped_client(
        GenerativeModel(model_name=model_name), self
    )
    generate_content = get_wrapped_call(
        gemini_pro_model.generate_content,
        self,
        response_chunk_type=GeminiCallResponseChunk,
        tool_types=tool_types,
        model_name=model_name,
    )
    messages = self.messages()
    user_message_param = self._get_possible_user_message(messages)
    stream = generate_content(
        messages,
        stream=True,
        tools=kwargs.pop("tools") if "tools" in kwargs else None,
        **kwargs,
    )
    for chunk in stream:
        yield GeminiCallResponseChunk(
            chunk=chunk,
            user_message_param=user_message_param,
            tool_types=tool_types,
        )

stream_async(retries=0, **kwargs) async

Streams the response asynchronously for a call using this GeminiCall.

Parameters:

Name Type Description Default
**kwargs Any

Additional keyword arguments parameters to pass to the call. These will override any existing arguments in call_params.

{}

Yields:

Type Description
AsyncGenerator[GeminiCallResponseChunk, None]

A GeminiCallResponseChunk for each chunk of the response.

Source code in mirascope/gemini/calls.py
@retry
async def stream_async(
    self, retries: Union[int, AsyncRetrying] = 0, **kwargs: Any
) -> AsyncGenerator[GeminiCallResponseChunk, None]:
    """Streams the response asynchronously for a call using this `GeminiCall`.

    Args:
        **kwargs: Additional keyword arguments parameters to pass to the call. These
            will override any existing arguments in `call_params`.

    Yields:
        A `GeminiCallResponseChunk` for each chunk of the response.
    """
    kwargs, tool_types = self._setup(kwargs, GeminiTool)
    model_name = kwargs.pop("model")
    gemini_pro_model = get_wrapped_async_client(
        GenerativeModel(model_name=model_name), self
    )
    generate_content_async = get_wrapped_call(
        gemini_pro_model.generate_content_async,
        self,
        is_async=True,
        response_chunk_type=GeminiCallResponseChunk,
        tool_types=tool_types,
        model_name=model_name,
    )
    messages = self.messages()
    user_message_param = self._get_possible_user_message(messages)
    stream = generate_content_async(
        messages,
        stream=True,
        tools=kwargs.pop("tools") if "tools" in kwargs else None,
        **kwargs,
    )
    if inspect.iscoroutine(stream):
        stream = await stream
    async for chunk in stream:
        yield GeminiCallResponseChunk(
            chunk=chunk,
            user_message_param=user_message_param,
            tool_types=tool_types,
        )

GeminiCallParams

Bases: BaseCallParams[GeminiTool]

The parameters to use when calling the Gemini API calls.

Example:

from mirascope.gemini import GeminiCall, GeminiCallParams


class BookRecommendation(GeminiPrompt):
    prompt_template = "Please recommend a {genre} book"

    genre: str

    call_params = GeminiCallParams(
        model="gemini-1.0-pro-001",
        generation_config={"candidate_count": 2},
    )


response = BookRecommender(genre="fantasy").call()
print(response.content)
#> The Name of the Wind
Source code in mirascope/gemini/types.py
class GeminiCallParams(BaseCallParams[GeminiTool]):
    """The parameters to use when calling the Gemini API calls.

    Example:

    ```python
    from mirascope.gemini import GeminiCall, GeminiCallParams


    class BookRecommendation(GeminiPrompt):
        prompt_template = "Please recommend a {genre} book"

        genre: str

        call_params = GeminiCallParams(
            model="gemini-1.0-pro-001",
            generation_config={"candidate_count": 2},
        )


    response = BookRecommender(genre="fantasy").call()
    print(response.content)
    #> The Name of the Wind
    ```
    """

    model: str = "gemini-1.0-pro"
    generation_config: Optional[dict[str, Any]] = {"candidate_count": 1}
    safety_settings: Optional[Any] = None
    request_options: Optional[dict[str, Any]] = None

GeminiCallResponse

Bases: BaseCallResponse[Union[GenerateContentResponse, AsyncGenerateContentResponse], GeminiTool]

Convenience wrapper around Gemini's GenerateContentResponse.

When using Mirascope's convenience wrappers to interact with Gemini models via GeminiCall, responses using GeminiCall.call() will return a GeminiCallResponse, whereby the implemented properties allow for simpler syntax and a convenient developer experience.

Example:

from mirascope.gemini import GeminiPrompt


class BookRecommender(GeminiPrompt):
    prompt_template = "Please recommend a {genre} book"

    genre: str


response = BookRecommender(genre="fantasy").call()
print(response.content)
#> The Lord of the Rings
Source code in mirascope/gemini/types.py
class GeminiCallResponse(
    BaseCallResponse[
        Union[GenerateContentResponse, AsyncGenerateContentResponse], GeminiTool
    ]
):
    """Convenience wrapper around Gemini's `GenerateContentResponse`.

    When using Mirascope's convenience wrappers to interact with Gemini models via
    `GeminiCall`, responses using `GeminiCall.call()` will return a
    `GeminiCallResponse`, whereby the implemented properties allow for simpler syntax
    and a convenient developer experience.

    Example:

    ```python
    from mirascope.gemini import GeminiPrompt


    class BookRecommender(GeminiPrompt):
        prompt_template = "Please recommend a {genre} book"

        genre: str


    response = BookRecommender(genre="fantasy").call()
    print(response.content)
    #> The Lord of the Rings
    ```
    """

    user_message_param: Optional[ContentDict] = None

    @property
    def message_param(self) -> ContentDict:
        """Returns the models's response as a message parameter."""
        return {"role": "model", "parts": self.response.parts}

    @property
    def tools(self) -> Optional[list[GeminiTool]]:
        """Returns the list of tools for the 0th candidate's 0th content part."""
        if self.tool_types is None:
            return None

        if self.response.candidates[0].finish_reason != 1:  # STOP = 1
            raise RuntimeError(
                "Generation stopped before the stop sequence. "
                "This is likely due to a limit on output tokens that is too low. "
                "Note that this could also indicate no tool is beind called, so we "
                "recommend that you check the output of the call to confirm."
                f"Finish Reason: {self.response.candidates[0].finish_reason}"
            )

        tool_calls = [
            part.function_call for part in self.response.candidates[0].content.parts
        ]

        extracted_tools = []
        for tool_call in tool_calls:
            for tool_type in self.tool_types:
                if tool_call.name == tool_type.name():
                    extracted_tools.append(tool_type.from_tool_call(tool_call))
                    break

        return extracted_tools

    @property
    def tool(self) -> Optional[GeminiTool]:
        """Returns the 0th tool for the 0th candidate's 0th content part.

        Raises:
            ValidationError: if the tool call doesn't match the tool's schema.
        """
        tools = self.tools
        if tools:
            return tools[0]
        return None

    @classmethod
    def tool_message_params(
        self, tools_and_outputs: list[tuple[GeminiTool, object]]
    ) -> list[FunctionResponse]:
        """Returns the tool message parameters for tool call results."""
        return [
            FunctionResponse(name=tool.name(), response={"result": output})
            for tool, output in tools_and_outputs
        ]

    @property
    def content(self) -> str:
        """Returns the contained string content for the 0th choice."""
        return self.response.candidates[0].content.parts[0].text

    @property
    def id(self) -> Optional[str]:
        """Returns the id of the response.

        google.generativeai does not return an id
        """
        return None

    @property
    def finish_reasons(self) -> list[str]:
        """Returns the finish reasons of the response."""
        finish_reasons = [
            "FINISH_REASON_UNSPECIFIED",
            "STOP",
            "MAX_TOKENS",
            "SAFETY",
            "RECITATION",
            "OTHER",
        ]

        return [
            finish_reasons[candidate.finish_reason]
            for candidate in self.response.candidates
        ]

    @property
    def model(self) -> None:
        """Returns the model name.

        google.generativeai does not return model, so we return None
        """
        return None

    @property
    def usage(self) -> None:
        """Returns the usage of the chat completion.

        google.generativeai does not have Usage, so we return None
        """
        return None

    @property
    def input_tokens(self) -> None:
        """Returns the number of input tokens."""
        return None

    @property
    def output_tokens(self) -> None:
        """Returns the number of output tokens."""
        return None

    def dump(self) -> dict[str, Any]:
        """Dumps the response to a dictionary."""
        return {
            "start_time": self.start_time,
            "end_time": self.end_time,
            "output": str(self.response),
            "cost": self.cost,
        }

content: str property

Returns the contained string content for the 0th choice.

finish_reasons: list[str] property

Returns the finish reasons of the response.

id: Optional[str] property

Returns the id of the response.

google.generativeai does not return an id

input_tokens: None property

Returns the number of input tokens.

message_param: ContentDict property

Returns the models's response as a message parameter.

model: None property

Returns the model name.

google.generativeai does not return model, so we return None

output_tokens: None property

Returns the number of output tokens.

tool: Optional[GeminiTool] property

Returns the 0th tool for the 0th candidate's 0th content part.

Raises:

Type Description
ValidationError

if the tool call doesn't match the tool's schema.

tools: Optional[list[GeminiTool]] property

Returns the list of tools for the 0th candidate's 0th content part.

usage: None property

Returns the usage of the chat completion.

google.generativeai does not have Usage, so we return None

dump()

Dumps the response to a dictionary.

Source code in mirascope/gemini/types.py
def dump(self) -> dict[str, Any]:
    """Dumps the response to a dictionary."""
    return {
        "start_time": self.start_time,
        "end_time": self.end_time,
        "output": str(self.response),
        "cost": self.cost,
    }

tool_message_params(tools_and_outputs) classmethod

Returns the tool message parameters for tool call results.

Source code in mirascope/gemini/types.py
@classmethod
def tool_message_params(
    self, tools_and_outputs: list[tuple[GeminiTool, object]]
) -> list[FunctionResponse]:
    """Returns the tool message parameters for tool call results."""
    return [
        FunctionResponse(name=tool.name(), response={"result": output})
        for tool, output in tools_and_outputs
    ]

GeminiCallResponseChunk

Bases: BaseCallResponseChunk[GenerateContentResponse, GeminiTool]

Convenience wrapper around chat completion streaming chunks.

When using Mirascope's convenience wrappers to interact with Gemini models via GeminiCall, responses using GeminiCall.stream() will return a GeminiCallResponseChunk, whereby the implemented properties allow for simpler syntax and a convenient developer experience.

Example:

from mirascope.gemini import GeminiCall


class Math(GeminiCall):
    prompt_template = "What is 1 + 2?"


content = ""
for chunk in Math().stream():
    content += chunk.content
    print(content)
#> 1
#  1 +
#  1 + 2
#  1 + 2 equals
#  1 + 2 equals
#  1 + 2 equals 3
#  1 + 2 equals 3.
Source code in mirascope/gemini/types.py
class GeminiCallResponseChunk(
    BaseCallResponseChunk[GenerateContentResponse, GeminiTool]
):
    """Convenience wrapper around chat completion streaming chunks.

    When using Mirascope's convenience wrappers to interact with Gemini models via
    `GeminiCall`, responses using `GeminiCall.stream()` will return a
    `GeminiCallResponseChunk`, whereby the implemented properties allow for simpler
    syntax and a convenient developer experience.

    Example:

    ```python
    from mirascope.gemini import GeminiCall


    class Math(GeminiCall):
        prompt_template = "What is 1 + 2?"


    content = ""
    for chunk in Math().stream():
        content += chunk.content
        print(content)
    #> 1
    #  1 +
    #  1 + 2
    #  1 + 2 equals
    #  1 + 2 equals
    #  1 + 2 equals 3
    #  1 + 2 equals 3.
    ```
    """

    user_message_param: Optional[ContentDict] = None

    @property
    def content(self) -> str:
        """Returns the chunk content for the 0th choice."""
        return self.chunk.candidates[0].content.parts[0].text

    @property
    def id(self) -> Optional[str]:
        """Returns the id of the response.

        google.generativeai does not return an id
        """
        return None

    @property
    def finish_reasons(self) -> list[str]:
        """Returns the finish reasons of the response."""
        finish_reasons = [
            "FINISH_REASON_UNSPECIFIED",
            "STOP",
            "MAX_TOKENS",
            "SAFETY",
            "RECITATION",
            "OTHER",
        ]

        return [
            finish_reasons[candidate.finish_reason]
            for candidate in self.chunk.candidates
        ]

    @property
    def model(self) -> None:
        """Returns the model name.

        google.generativeai does not return model, so we return None
        """
        return None

    @property
    def usage(self) -> None:
        """Returns the usage of the chat completion.

        google.generativeai does not have Usage, so we return None
        """
        return None

    @property
    def input_tokens(self) -> None:
        """Returns the number of input tokens."""
        return None

    @property
    def output_tokens(self) -> None:
        """Returns the number of output tokens."""
        return None

content: str property

Returns the chunk content for the 0th choice.

finish_reasons: list[str] property

Returns the finish reasons of the response.

id: Optional[str] property

Returns the id of the response.

google.generativeai does not return an id

input_tokens: None property

Returns the number of input tokens.

model: None property

Returns the model name.

google.generativeai does not return model, so we return None

output_tokens: None property

Returns the number of output tokens.

usage: None property

Returns the usage of the chat completion.

google.generativeai does not have Usage, so we return None

GeminiTool

Bases: BaseTool[FunctionCall]

A base class for easy use of tools with the Gemini API.

GeminiTool internally handles the logic that allows you to use tools with simple calls such as GeminiCompletion.tool or GeminiTool.fn, as seen in the examples below.

Example:

from mirascope.gemini import GeminiCall, GeminiCallParams, GeminiTool


class CurrentWeather(GeminiTool):
    """A tool for getting the current weather in a location."""

    location: str


class WeatherForecast(GeminiPrompt):
    prompt_template = "What is the current weather in {city}?"

    city: str

    call_params = GeminiCallParams(
        model="gemini-pro",
        tools=[CurrentWeather],
    )


prompt = WeatherPrompt()
forecast = WeatherForecast(city="Tokyo").call().tool
print(forecast.location)
#> Tokyo
Source code in mirascope/gemini/tools.py
class GeminiTool(BaseTool[FunctionCall]):
    '''A base class for easy use of tools with the Gemini API.

    `GeminiTool` internally handles the logic that allows you to use tools with simple
    calls such as `GeminiCompletion.tool` or `GeminiTool.fn`, as seen in the
    examples below.

    Example:

    ```python
    from mirascope.gemini import GeminiCall, GeminiCallParams, GeminiTool


    class CurrentWeather(GeminiTool):
        """A tool for getting the current weather in a location."""

        location: str


    class WeatherForecast(GeminiPrompt):
        prompt_template = "What is the current weather in {city}?"

        city: str

        call_params = GeminiCallParams(
            model="gemini-pro",
            tools=[CurrentWeather],
        )


    prompt = WeatherPrompt()
    forecast = WeatherForecast(city="Tokyo").call().tool
    print(forecast.location)
    #> Tokyo
    ```
    '''

    model_config = ConfigDict(arbitrary_types_allowed=True)

    @classmethod
    def tool_schema(cls) -> Tool:
        """Constructs a tool schema for use with the Gemini API.

        A Mirascope `GeminiTool` is deconstructed into a `Tool` schema for use with the
        Gemini API.

        Returns:
            The constructed `Tool` schema.
        """
        tool_schema = super().tool_schema()
        if "parameters" in tool_schema:
            if "$defs" in tool_schema["parameters"]:
                raise ValueError(
                    "Unfortunately Google's Gemini API cannot handle nested structures "
                    "with $defs."
                )
            tool_schema["parameters"]["properties"] = {
                prop: {
                    key: value for key, value in prop_schema.items() if key != "title"
                }
                for prop, prop_schema in tool_schema["parameters"]["properties"].items()
            }
        return Tool(function_declarations=[FunctionDeclaration(**tool_schema)])

    @classmethod
    def from_tool_call(cls, tool_call: FunctionCall) -> GeminiTool:
        """Extracts an instance of the tool constructed from a tool call response.

        Given a `GenerateContentResponse` from a Gemini chat completion response, this
        method extracts the tool call and constructs an instance of the tool.

        Args:
            tool_call: The `GenerateContentResponse` from which to extract the tool.

        Returns:
            An instance of the tool constructed from the tool call.

        Raises:
            ValueError: if the tool call doesn't have any arguments.
            ValidationError: if the tool call doesn't match the tool schema.
        """
        if not tool_call.args:
            raise ValueError("Tool call doesn't have any arguments.")
        model_json = {key: value for key, value in tool_call.args.items()}
        model_json["tool_call"] = tool_call
        return cls.model_validate(model_json)

    @classmethod
    def from_model(cls, model: Type[BaseModel]) -> Type[GeminiTool]:
        """Constructs a `GeminiTool` type from a `BaseModel` type."""
        return convert_base_model_to_tool(model, GeminiTool)

    @classmethod
    def from_fn(cls, fn: Callable) -> Type[GeminiTool]:
        """Constructs a `GeminiTool` type from a function."""
        return convert_function_to_tool(fn, GeminiTool)

    @classmethod
    def from_base_type(cls, base_type: Type[BaseType]) -> Type[GeminiTool]:
        """Constructs a `GeminiTool` type from a `BaseType` type."""
        return convert_base_type_to_tool(base_type, GeminiTool)

from_base_type(base_type) classmethod

Constructs a GeminiTool type from a BaseType type.

Source code in mirascope/gemini/tools.py
@classmethod
def from_base_type(cls, base_type: Type[BaseType]) -> Type[GeminiTool]:
    """Constructs a `GeminiTool` type from a `BaseType` type."""
    return convert_base_type_to_tool(base_type, GeminiTool)

from_fn(fn) classmethod

Constructs a GeminiTool type from a function.

Source code in mirascope/gemini/tools.py
@classmethod
def from_fn(cls, fn: Callable) -> Type[GeminiTool]:
    """Constructs a `GeminiTool` type from a function."""
    return convert_function_to_tool(fn, GeminiTool)

from_model(model) classmethod

Constructs a GeminiTool type from a BaseModel type.

Source code in mirascope/gemini/tools.py
@classmethod
def from_model(cls, model: Type[BaseModel]) -> Type[GeminiTool]:
    """Constructs a `GeminiTool` type from a `BaseModel` type."""
    return convert_base_model_to_tool(model, GeminiTool)

from_tool_call(tool_call) classmethod

Extracts an instance of the tool constructed from a tool call response.

Given a GenerateContentResponse from a Gemini chat completion response, this method extracts the tool call and constructs an instance of the tool.

Parameters:

Name Type Description Default
tool_call FunctionCall

The GenerateContentResponse from which to extract the tool.

required

Returns:

Type Description
GeminiTool

An instance of the tool constructed from the tool call.

Raises:

Type Description
ValueError

if the tool call doesn't have any arguments.

ValidationError

if the tool call doesn't match the tool schema.

Source code in mirascope/gemini/tools.py
@classmethod
def from_tool_call(cls, tool_call: FunctionCall) -> GeminiTool:
    """Extracts an instance of the tool constructed from a tool call response.

    Given a `GenerateContentResponse` from a Gemini chat completion response, this
    method extracts the tool call and constructs an instance of the tool.

    Args:
        tool_call: The `GenerateContentResponse` from which to extract the tool.

    Returns:
        An instance of the tool constructed from the tool call.

    Raises:
        ValueError: if the tool call doesn't have any arguments.
        ValidationError: if the tool call doesn't match the tool schema.
    """
    if not tool_call.args:
        raise ValueError("Tool call doesn't have any arguments.")
    model_json = {key: value for key, value in tool_call.args.items()}
    model_json["tool_call"] = tool_call
    return cls.model_validate(model_json)

tool_schema() classmethod

Constructs a tool schema for use with the Gemini API.

A Mirascope GeminiTool is deconstructed into a Tool schema for use with the Gemini API.

Returns:

Type Description
Tool

The constructed Tool schema.

Source code in mirascope/gemini/tools.py
@classmethod
def tool_schema(cls) -> Tool:
    """Constructs a tool schema for use with the Gemini API.

    A Mirascope `GeminiTool` is deconstructed into a `Tool` schema for use with the
    Gemini API.

    Returns:
        The constructed `Tool` schema.
    """
    tool_schema = super().tool_schema()
    if "parameters" in tool_schema:
        if "$defs" in tool_schema["parameters"]:
            raise ValueError(
                "Unfortunately Google's Gemini API cannot handle nested structures "
                "with $defs."
            )
        tool_schema["parameters"]["properties"] = {
            prop: {
                key: value for key, value in prop_schema.items() if key != "title"
            }
            for prop, prop_schema in tool_schema["parameters"]["properties"].items()
        }
    return Tool(function_declarations=[FunctionDeclaration(**tool_schema)])

MessageRole

Bases: _Enum

Roles that the BasePrompt messages parser can parse from the template.

SYSTEM: A system message. USER: A user message. ASSISTANT: A message response from the assistant or chat client. MODEL: A message response from the assistant or chat client. Model is used by Google's Gemini instead of assistant, which doesn't have system messages. CHATBOT: A message response from the chat client. Chatbot is used by Cohere instead of assistant. TOOL: A message representing the output of calling a tool.

Source code in mirascope/enums.py
class MessageRole(_Enum):
    """Roles that the `BasePrompt` messages parser can parse from the template.

    SYSTEM: A system message.
    USER: A user message.
    ASSISTANT: A message response from the assistant or chat client.
    MODEL: A message response from the assistant or chat client. Model is used by
        Google's Gemini instead of assistant, which doesn't have system messages.
    CHATBOT: A message response from the chat client. Chatbot is used by Cohere instead
        of assistant.
    TOOL: A message representing the output of calling a tool.
    """

    SYSTEM = "system"
    USER = "user"
    ASSISTANT = "assistant"
    MODEL = "model"
    CHATBOT = "chatbot"
    TOOL = "tool"

get_wrapped_async_client(client, self)

Get a wrapped async client.

Source code in mirascope/base/ops_utils.py
def get_wrapped_async_client(client: T, self: Union[BaseCall, BaseEmbedder]) -> T:
    """Get a wrapped async client."""
    if self.configuration.client_wrappers:
        for op in self.configuration.client_wrappers:
            if op == "langfuse":  # pragma: no cover
                from langfuse.openai import AsyncOpenAI as LangfuseAsyncOpenAI

                client = LangfuseAsyncOpenAI(
                    api_key=self.api_key, base_url=self.base_url
                )
            elif op == "logfire":  # pragma: no cover
                import logfire

                if self._provider == "openai":
                    logfire.instrument_openai(client)  # type: ignore
                elif self._provider == "anthropic":
                    logfire.instrument_anthropic(client)  # type: ignore
            elif callable(op):
                client = op(client)
    return client

get_wrapped_call(call, self, **kwargs)

Wrap a call to add the llm_ops parameter if it exists.

Source code in mirascope/base/ops_utils.py
def get_wrapped_call(call: C, self: Union[BaseCall, BaseEmbedder], **kwargs) -> C:
    """Wrap a call to add the `llm_ops` parameter if it exists."""
    if self.configuration.llm_ops:
        wrapped_call = call
        for op in self.configuration.llm_ops:
            if op == "weave":  # pragma: no cover
                import weave

                wrapped_call = weave.op()(wrapped_call)
            elif callable(op):
                wrapped_call = op(
                    wrapped_call,
                    self._provider,
                    **kwargs,
                )
        return wrapped_call
    return call

get_wrapped_client(client, self)

Get a wrapped client.

Source code in mirascope/base/ops_utils.py
def get_wrapped_client(client: T, self: Union[BaseCall, BaseEmbedder]) -> T:
    """Get a wrapped client."""
    if self.configuration.client_wrappers:
        for op in self.configuration.client_wrappers:  # pragma: no cover
            if op == "langfuse":
                from langfuse.openai import OpenAI as LangfuseOpenAI

                client = LangfuseOpenAI(api_key=self.api_key, base_url=self.base_url)
            elif op == "logfire":  # pragma: no cover
                import logfire

                if self._provider == "openai":
                    logfire.instrument_openai(client)  # type: ignore
                elif self._provider == "anthropic":
                    logfire.instrument_anthropic(client)  # type: ignore
            elif callable(op):
                client = op(client)
    return client

retry(fn)

Decorator for retrying a function.

Source code in mirascope/base/utils.py
def retry(fn: F) -> F:
    """Decorator for retrying a function."""

    @wraps(fn)
    def wrapper(*args, **kwargs):
        """Wrapper for retrying a function."""
        retries = kwargs.pop("retries", 0)
        if isinstance(retries, int):
            if retries > 0:
                retries = Retrying(stop=stop_after_attempt(retries))
            else:
                return fn(*args, **kwargs)
        try:
            for attempt in retries:
                with attempt:
                    result = fn(*args, **kwargs)
                if (
                    attempt.retry_state.outcome
                    and not attempt.retry_state.outcome.failed
                ):
                    attempt.retry_state.set_result(result)
            return result
        except RetryError:
            raise

    @wraps(fn)
    async def wrapper_async(*args, **kwargs):
        """Wrapper for retrying an async function."""
        retries = kwargs.pop("retries", 0)
        if isinstance(retries, int):
            if retries > 0:
                retries = AsyncRetrying(stop=stop_after_attempt(retries))
            else:
                return await fn(*args, **kwargs)
        try:
            async for attempt in retries:
                with attempt:
                    result = await fn(*args, **kwargs)
                if (
                    attempt.retry_state.outcome
                    and not attempt.retry_state.outcome.failed
                ):
                    attempt.retry_state.set_result(result)
            return result
        except RetryError:
            raise

    @wraps(fn)
    def wrapper_generator(*args, **kwargs):
        """Wrapper for retrying a generator function."""
        retries = kwargs.pop("retries", 0)
        if isinstance(retries, int):
            if retries > 0:
                retries = Retrying(stop=stop_after_attempt(retries))
            else:
                for value in fn(*args, **kwargs):
                    yield value
                return
        try:
            for attempt in retries:
                with attempt:
                    for value in fn(*args, **kwargs):
                        yield value
        except RetryError:
            raise

    @wraps(fn)
    async def wrapper_generator_async(*args, **kwargs):
        """Wrapper for retrying an async generator function."""
        retries = kwargs.pop("retries", 0)
        if isinstance(retries, int):
            if retries > 0:
                retries = AsyncRetrying(stop=stop_after_attempt(retries))
            else:
                async for value in fn(*args, **kwargs):
                    yield value
                return
        try:
            async for attempt in retries:
                with attempt:
                    async for value in fn(*args, **kwargs):
                        yield value
        except RetryError:
            raise

    if inspect.iscoroutinefunction(fn):
        return cast(F, wrapper_async)
    elif inspect.isgeneratorfunction(fn):
        return cast(F, wrapper_generator)
    elif inspect.isasyncgenfunction(fn):
        return cast(F, wrapper_generator_async)
    else:
        return cast(F, wrapper)