Skip to content

mirascope.core.base.call_factory

call_factory

A factory method for creating provider-specific call decorators.

Parameters:

Name Type Description Default
TCallResponse type[_BaseCallResponseT]

The provider-specific BaseCallResponse type.

required
TCallResponseChunk type[_BaseCallResponseChunkT]

The provider-specific BaseCallResponseChunk type.

required
TDynamicConfig type[_BaseDynamicConfigT]

The provider-specific BaseDynamicConfig type.

required
TToolType type[_BaseToolT]

The provider-specific BaseTool type.

required
TStream type[_BaseStreamT]

The provider-specific BaseStream type.

required
TCallParams type[_BaseCallParamsT]

The provider-specific BaseCallParams type.

required
default_call_params _BaseCallParamsT

The default call parameters to use, which must match the TCallParams type if provided.

required
setup_call SetupCall[_BaseClientT, _BaseDynamicConfigT, _BaseCallParamsT, _ResponseT, _ResponseChunkT, _BaseToolT]

The helper method for setting up a call, which returns the configured create function, the prompt template, the list of provider-specific messages, the list of provider-specific tool types, and the finalized call_kwargs with which to make the API call with the create function.

required
get_json_output GetJsonOutput[_BaseCallResponseT | _BaseCallResponseChunkT]

The helper method for getting JSON output from a call response.

required
handle_stream HandleStream[_ResponseChunkT, _BaseCallResponseChunkT, _BaseToolT]

The helper method for converting a provider's original stream generator into a generator that returns tuples of (chunk, tool) where chunk and tool are provider-specific BaseCallResponseChunk and BaseTool instances, respectively.

required
handle_stream_async HandleStreamAsync[_ResponseChunkT, _BaseCallResponseChunkT, _BaseToolT]

The same helper method as handle_stream except for handling asynchronous streaming.

required
Source code in mirascope/core/base/_call_factory.py
def call_factory(  # noqa: ANN202
    *,
    TCallResponse: type[_BaseCallResponseT],
    TCallResponseChunk: type[_BaseCallResponseChunkT],
    TDynamicConfig: type[_BaseDynamicConfigT],
    TToolType: type[_BaseToolT],
    TStream: type[_BaseStreamT],
    TCallParams: type[_BaseCallParamsT],
    default_call_params: _BaseCallParamsT,
    setup_call: SetupCall[
        _BaseClientT,
        _BaseDynamicConfigT,
        _BaseCallParamsT,
        _ResponseT,
        _ResponseChunkT,
        _BaseToolT,
    ],
    get_json_output: GetJsonOutput[_BaseCallResponseT | _BaseCallResponseChunkT],
    handle_stream: HandleStream[_ResponseChunkT, _BaseCallResponseChunkT, _BaseToolT],
    handle_stream_async: HandleStreamAsync[
        _ResponseChunkT, _BaseCallResponseChunkT, _BaseToolT
    ],
):
    """A factory method for creating provider-specific call decorators.

    Args:
        TCallResponse: The provider-specific `BaseCallResponse` type.
        TCallResponseChunk: The provider-specific `BaseCallResponseChunk` type.
        TDynamicConfig: The provider-specific `BaseDynamicConfig` type.
        TToolType: The provider-specific `BaseTool` type.
        TStream: The provider-specific `BaseStream` type.
        TCallParams: The provider-specific `BaseCallParams` type.
        default_call_params: The default call parameters to use, which must match the
            `TCallParams` type if provided.
        setup_call: The helper method for setting up a call, which returns the
            configured create function, the prompt template, the list of
            provider-specific messages, the list of provider-specific tool types, and
            the finalized `call_kwargs` with which to make the API call with the create
            function.
        get_json_output: The helper method for getting JSON output from a call response.
        handle_stream: The helper method for converting a provider's original stream
            generator into a generator that returns tuples of `(chunk, tool)` where
            `chunk` and `tool` are provider-specific `BaseCallResponseChunk` and
            `BaseTool` instances, respectively.
        handle_stream_async: The same helper method as `handle_stream` except for
            handling asynchronous streaming.
    """

    @overload
    def base_call(
        model: str,
        *,
        stream: Literal[False] = False,
        tools: list[type[BaseTool] | Callable] | None = None,
        response_model: None = None,
        output_parser: None = None,
        json_mode: bool = False,
        client: _BaseClientT | None = None,
        call_params: TCallParams | None = None,
    ) -> LLMFunctionDecorator[TDynamicConfig, TCallResponse, TCallResponse]: ...

    @overload
    def base_call(
        model: str,
        *,
        stream: Literal[False] = False,
        tools: list[type[BaseTool] | Callable] | None = None,
        response_model: None = None,
        output_parser: Callable[[TCallResponse], _ParsedOutputT],
        json_mode: bool = False,
        client: _BaseClientT | None = None,
        call_params: TCallParams | None = None,
    ) -> LLMFunctionDecorator[TDynamicConfig, _ParsedOutputT, _ParsedOutputT]: ...

    @overload
    def base_call(
        model: str,
        *,
        stream: Literal[False] = False,
        tools: list[type[BaseTool] | Callable] | None = None,
        response_model: None = None,
        output_parser: Callable[[TCallResponseChunk], _ParsedOutputT],
        json_mode: bool = False,
        client: _BaseClientT | None = None,
        call_params: TCallParams | None = None,
    ) -> NoReturn: ...

    @overload
    def base_call(
        model: str,
        *,
        stream: Literal[True] = True,
        tools: list[type[BaseTool] | Callable] | None = None,
        response_model: None = None,
        output_parser: None = None,
        json_mode: bool = False,
        client: _BaseClientT | None = None,
        call_params: TCallParams | None = None,
    ) -> LLMFunctionDecorator[TDynamicConfig, TStream, TStream]: ...

    @overload
    def base_call(
        model: str,
        *,
        stream: Literal[True] = True,
        tools: list[type[BaseTool] | Callable] | None = None,
        response_model: None = None,
        output_parser: Callable[[TCallResponseChunk], _ParsedOutputT],
        json_mode: bool = False,
        client: _BaseClientT | None = None,
        call_params: TCallParams | None = None,
    ) -> NoReturn: ...

    @overload
    def base_call(
        model: str,
        *,
        stream: Literal[True] = True,
        tools: list[type[BaseTool] | Callable] | None = None,
        response_model: None = None,
        output_parser: Callable[[TCallResponse], _ParsedOutputT],
        json_mode: bool = False,
        client: _BaseClientT | None = None,
        call_params: TCallParams | None = None,
    ) -> NoReturn: ...

    @overload
    def base_call(
        model: str,
        *,
        stream: Literal[False] = False,
        tools: list[type[BaseTool] | Callable] | None = None,
        response_model: type[_ResponseModelT],
        output_parser: None = None,
        json_mode: bool = False,
        client: _BaseClientT | None = None,
        call_params: TCallParams | None = None,
    ) -> LLMFunctionDecorator[TDynamicConfig, _ResponseModelT, _ResponseModelT]: ...

    @overload
    def base_call(
        model: str,
        *,
        stream: Literal[False] = False,
        tools: list[type[BaseTool] | Callable] | None = None,
        response_model: type[_ResponseModelT],
        output_parser: Callable[[_ResponseModelT], _ParsedOutputT],
        json_mode: bool = False,
        client: _BaseClientT | None = None,
        call_params: TCallParams | None = None,
    ) -> LLMFunctionDecorator[TDynamicConfig, _ParsedOutputT, _ParsedOutputT]: ...

    @overload
    def base_call(
        model: str,
        *,
        stream: Literal[True],
        tools: list[type[BaseTool] | Callable] | None = None,
        response_model: type[_ResponseModelT],
        output_parser: None = None,
        json_mode: bool = False,
        client: _BaseClientT | None = None,
        call_params: TCallParams | None = None,
    ) -> LLMFunctionDecorator[
        TDynamicConfig, Iterable[_ResponseModelT], AsyncIterable[_ResponseModelT]
    ]: ...

    @overload
    def base_call(
        model: str,
        *,
        stream: Literal[True],
        tools: list[type[BaseTool] | Callable] | None = None,
        response_model: type[_ResponseModelT],
        output_parser: Callable[[TCallResponse], _ParsedOutputT]
        | Callable[[TCallResponseChunk], _ParsedOutputT]
        | Callable[[_ResponseModelT], _ParsedOutputT]
        | None,
        json_mode: bool = False,
        client: _BaseClientT | None = None,
        call_params: TCallParams | None = None,
    ) -> NoReturn: ...

    def base_call(
        model: str,
        *,
        stream: bool = False,
        tools: list[type[BaseTool] | Callable] | None = None,
        response_model: type[_ResponseModelT] | None = None,
        output_parser: Callable[[TCallResponse], _ParsedOutputT]
        | Callable[[TCallResponseChunk], _ParsedOutputT]
        | Callable[[_ResponseModelT], _ParsedOutputT]
        | None = None,
        json_mode: bool = False,
        client: _BaseClientT | None = None,
        call_params: TCallParams | None = None,
    ) -> LLMFunctionDecorator[
        TDynamicConfig,
        TCallResponse
        | _ParsedOutputT
        | TStream
        | _ResponseModelT
        | Iterable[_ResponseModelT],
        TCallResponse
        | _ParsedOutputT
        | TStream
        | _ResponseModelT
        | AsyncIterable[_ResponseModelT],
    ]:
        if stream and output_parser:
            raise ValueError("Cannot use `output_parser` with `stream=True`.")

        if call_params is None:
            call_params = default_call_params

        if response_model:
            if stream:
                return partial(
                    structured_stream_factory(
                        TCallResponse=TCallResponse,
                        TCallResponseChunk=TCallResponseChunk,
                        TStream=TStream,
                        TToolType=TToolType,
                        setup_call=setup_call,
                        get_json_output=get_json_output,
                    ),
                    model=model,
                    response_model=response_model,
                    json_mode=json_mode,
                    client=client,
                    call_params=call_params,
                )  # pyright: ignore [reportReturnType, reportCallIssue]
            else:
                return partial(
                    extract_factory(
                        TCallResponse=TCallResponse,
                        TToolType=TToolType,
                        setup_call=setup_call,
                        get_json_output=get_json_output,
                    ),
                    model=model,
                    response_model=response_model,
                    output_parser=output_parser,
                    json_mode=json_mode,
                    client=client,
                    call_params=call_params,
                )  # pyright: ignore [reportCallIssue]

        if stream:
            return partial(
                stream_factory(
                    TCallResponse=TCallResponse,
                    TStream=TStream,
                    setup_call=setup_call,
                    handle_stream=handle_stream,
                    handle_stream_async=handle_stream_async,
                ),
                model=model,
                tools=tools,
                json_mode=json_mode,
                client=client,
                call_params=call_params,
            )  # pyright: ignore [reportReturnType]
        return partial(
            create_factory(TCallResponse=TCallResponse, setup_call=setup_call),
            model=model,
            tools=tools,
            output_parser=output_parser,
            json_mode=json_mode,
            client=client,
            call_params=call_params,
        )  # pyright: ignore [reportReturnType, reportCallIssue]

    return base_call