Skip to content

wandb.wandb

Prompts with WandB and OpenAI integration to support logging functionality.

BaseCallResponse

Bases: BaseModel, Generic[ResponseT, BaseToolT], ABC

A base abstract interface for LLM call responses.

Attributes:

Name Type Description
response ResponseT

The original response from whichever model response this wraps.

Source code in mirascope/base/types.py
class BaseCallResponse(BaseModel, Generic[ResponseT, BaseToolT], ABC):
    """A base abstract interface for LLM call responses.

    Attributes:
        response: The original response from whichever model response this wraps.
    """

    response: ResponseT
    user_message_param: Optional[Any] = None
    tool_types: Optional[list[Type[BaseToolT]]] = None
    start_time: float  # The start time of the completion in ms
    end_time: float  # The end time of the completion in ms
    cost: Optional[float] = None  # The cost of the completion in dollars

    model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)

    @property
    @abstractmethod
    def message_param(self) -> Any:
        """Returns the assistant's response as a message parameter."""
        ...  # pragma: no cover

    @property
    @abstractmethod
    def tools(self) -> Optional[list[BaseToolT]]:
        """Returns the tools for the 0th choice message."""
        ...  # pragma: no cover

    @property
    @abstractmethod
    def tool(self) -> Optional[BaseToolT]:
        """Returns the 0th tool for the 0th choice message."""
        ...  # pragma: no cover

    @classmethod
    @abstractmethod
    def tool_message_params(
        cls, tools_and_outputs: list[tuple[BaseToolT, Any]]
    ) -> list[Any]:
        """Returns the tool message parameters for tool call results."""
        ...  # pragma: no cover

    @property
    @abstractmethod
    def content(self) -> str:
        """Should return the string content of the response.

        If there are multiple choices in a response, this method should select the 0th
        choice and return it's string content.

        If there is no string content (e.g. when using tools), this method must return
        the empty string.
        """
        ...  # pragma: no cover

    @property
    @abstractmethod
    def finish_reasons(self) -> Union[None, list[str]]:
        """Should return the finish reasons of the response.

        If there is no finish reason, this method must return None.
        """
        ...  # pragma: no cover

    @property
    @abstractmethod
    def model(self) -> Optional[str]:
        """Should return the name of the response model."""
        ...  # pragma: no cover

    @property
    @abstractmethod
    def id(self) -> Optional[str]:
        """Should return the id of the response."""
        ...  # pragma: no cover

    @property
    @abstractmethod
    def usage(self) -> Any:
        """Should return the usage of the response.

        If there is no usage, this method must return None.
        """
        ...  # pragma: no cover

    @property
    @abstractmethod
    def input_tokens(self) -> Optional[Union[int, float]]:
        """Should return the number of input tokens.

        If there is no input_tokens, this method must return None.
        """
        ...  # pragma: no cover

    @property
    @abstractmethod
    def output_tokens(self) -> Optional[Union[int, float]]:
        """Should return the number of output tokens.

        If there is no output_tokens, this method must return None.
        """
        ...  # pragma: no cover

content: str abstractmethod property

Should return the string content of the response.

If there are multiple choices in a response, this method should select the 0th choice and return it's string content.

If there is no string content (e.g. when using tools), this method must return the empty string.

finish_reasons: Union[None, list[str]] abstractmethod property

Should return the finish reasons of the response.

If there is no finish reason, this method must return None.

id: Optional[str] abstractmethod property

Should return the id of the response.

input_tokens: Optional[Union[int, float]] abstractmethod property

Should return the number of input tokens.

If there is no input_tokens, this method must return None.

message_param: Any abstractmethod property

Returns the assistant's response as a message parameter.

model: Optional[str] abstractmethod property

Should return the name of the response model.

output_tokens: Optional[Union[int, float]] abstractmethod property

Should return the number of output tokens.

If there is no output_tokens, this method must return None.

tool: Optional[BaseToolT] abstractmethod property

Returns the 0th tool for the 0th choice message.

tools: Optional[list[BaseToolT]] abstractmethod property

Returns the tools for the 0th choice message.

usage: Any abstractmethod property

Should return the usage of the response.

If there is no usage, this method must return None.

tool_message_params(tools_and_outputs) abstractmethod classmethod

Returns the tool message parameters for tool call results.

Source code in mirascope/base/types.py
@classmethod
@abstractmethod
def tool_message_params(
    cls, tools_and_outputs: list[tuple[BaseToolT, Any]]
) -> list[Any]:
    """Returns the tool message parameters for tool call results."""
    ...  # pragma: no cover

BasePrompt

Bases: BaseModel

The base class for working with prompts.

This class is implemented as the base for all prompting needs across various model providers.

Example:

from mirascope import BasePrompt


class BookRecommendationPrompt(BasePrompt):
    """A prompt for recommending a book."""

    prompt_template = """
    SYSTEM: You are the world's greatest librarian.
    USER: Please recommend a {genre} book.
    """

    genre: str


prompt = BookRecommendationPrompt(genre="fantasy")
print(prompt.messages())
#> [{"role": "user", "content": "Please recommend a fantasy book."}]

print(prompt)
#> Please recommend a fantasy book.
Source code in mirascope/base/prompts.py
class BasePrompt(BaseModel):
    '''The base class for working with prompts.

    This class is implemented as the base for all prompting needs across various model
    providers.

    Example:

    ```python
    from mirascope import BasePrompt


    class BookRecommendationPrompt(BasePrompt):
        """A prompt for recommending a book."""

        prompt_template = """
        SYSTEM: You are the world's greatest librarian.
        USER: Please recommend a {genre} book.
        """

        genre: str


    prompt = BookRecommendationPrompt(genre="fantasy")
    print(prompt.messages())
    #> [{"role": "user", "content": "Please recommend a fantasy book."}]

    print(prompt)
    #> Please recommend a fantasy book.
    ```
    '''

    tags: ClassVar[list[str]] = []
    prompt_template: ClassVar[str] = ""

    def __str__(self) -> str:
        """Returns the formatted template."""
        return self._format_template(self.prompt_template)

    def messages(self) -> Union[list[Message], Any]:
        """Returns the template as a formatted list of messages."""
        message_type_by_role = {
            MessageRole.SYSTEM: SystemMessage,
            MessageRole.USER: UserMessage,
            MessageRole.ASSISTANT: AssistantMessage,
            MessageRole.MODEL: ModelMessage,
            MessageRole.TOOL: ToolMessage,
        }
        return [
            message_type_by_role[MessageRole(message["role"])](**message)
            for message in self._parse_messages(list(message_type_by_role.keys()))
        ]

    def dump(
        self,
    ) -> dict[str, Any]:
        """Dumps the contents of the prompt into a dictionary."""
        return {
            "tags": self.tags,
            "template": dedent(self.prompt_template).strip("\n"),
            "inputs": self.model_dump(),
        }

    ############################## PRIVATE METHODS ###################################

    def _format_template(self, template: str):
        """Formats the given `template` with attributes matching template variables."""
        dedented_template = dedent(template).strip()
        template_vars = [
            var
            for _, var, _, _ in Formatter().parse(dedented_template)
            if var is not None
        ]

        values = {}
        for var in template_vars:
            attr = getattr(self, var)
            if attr and isinstance(attr, list):
                if isinstance(attr[0], list):
                    values[var] = "\n\n".join(
                        ["\n".join([str(subitem) for subitem in item]) for item in attr]
                    )
                else:
                    values[var] = "\n".join([str(item) for item in attr])
            else:
                values[var] = str(attr)

        return dedented_template.format(**values)

    def _parse_messages(self, roles: list[str]) -> list[Message]:
        """Returns messages parsed from the `template` ClassVar.

        Raises:
            ValueError: if the template contains an unknown role.
        """
        messages = []
        re_roles = "|".join([role.upper() for role in roles] + ["MESSAGES"])
        for match in re.finditer(
            rf"({re_roles}):((.|\n)+?)(?=({re_roles}):|\Z)",
            self.prompt_template,
        ):
            role = match.group(1).lower()
            if role == "messages":
                template_var = [
                    var
                    for _, var, _, _ in Formatter().parse(match.group(2))
                    if var is not None
                ][0]
                attribute = getattr(self, template_var)
                if attribute is None or not isinstance(attribute, list):
                    raise ValueError(
                        f"MESSAGES keyword used with attribute `{template_var}`, which "
                        "is not a `list` of messages."
                    )
                messages += attribute
            else:
                content = self._format_template(match.group(2))
                if content:
                    messages.append({"role": role, "content": content})
        if len(messages) == 0:
            messages.append(
                {
                    "role": "user",
                    "content": self._format_template(self.prompt_template),
                }
            )
        return messages

dump()

Dumps the contents of the prompt into a dictionary.

Source code in mirascope/base/prompts.py
def dump(
    self,
) -> dict[str, Any]:
    """Dumps the contents of the prompt into a dictionary."""
    return {
        "tags": self.tags,
        "template": dedent(self.prompt_template).strip("\n"),
        "inputs": self.model_dump(),
    }

messages()

Returns the template as a formatted list of messages.

Source code in mirascope/base/prompts.py
def messages(self) -> Union[list[Message], Any]:
    """Returns the template as a formatted list of messages."""
    message_type_by_role = {
        MessageRole.SYSTEM: SystemMessage,
        MessageRole.USER: UserMessage,
        MessageRole.ASSISTANT: AssistantMessage,
        MessageRole.MODEL: ModelMessage,
        MessageRole.TOOL: ToolMessage,
    }
    return [
        message_type_by_role[MessageRole(message["role"])](**message)
        for message in self._parse_messages(list(message_type_by_role.keys()))
    ]

BaseTool

Bases: BaseModel, Generic[ToolCallT], ABC

A base class for easy use of tools with prompts.

BaseTool is an abstract class interface and should not be used directly. When implementing a class that extends BaseTool, you must include the original tool_call from which this till was instantiated. Make sure to skip tool_call when generating the schema by annotating it with SkipJsonSchema.

Source code in mirascope/base/tools.py
class BaseTool(BaseModel, Generic[ToolCallT], ABC):
    """A base class for easy use of tools with prompts.

    `BaseTool` is an abstract class interface and should not be used directly. When
    implementing a class that extends `BaseTool`, you must include the original
    `tool_call` from which this till was instantiated. Make sure to skip `tool_call`
    when generating the schema by annotating it with `SkipJsonSchema`.
    """

    tool_call: SkipJsonSchema[ToolCallT]

    model_config = ConfigDict(arbitrary_types_allowed=True)

    @classmethod
    def name(cls) -> str:
        """Returns the name of the tool."""
        return cls.__name__

    @classmethod
    def description(cls) -> str:
        """Returns the description of the tool."""
        return inspect.cleandoc(cls.__doc__) if cls.__doc__ else DEFAULT_TOOL_DOCSTRING

    @property
    def args(self) -> dict[str, Any]:
        """The arguments of the tool as a dictionary."""
        return {
            field: getattr(self, field)
            for field in self.model_fields
            if field != "tool_call"
        }

    @property
    def fn(self) -> Callable[..., str]:
        """Returns the function that the tool describes."""
        raise RuntimeError("Tool does not have an attached function.")

    def call(self) -> str:
        """Calls the tool's `fn` with the tool's `args`."""
        return self.fn(**self.args)

    @classmethod
    def tool_schema(cls) -> Any:
        """Constructs a JSON Schema tool schema from the `BaseModel` schema defined."""
        model_schema = cls.model_json_schema()
        model_schema.pop("title", None)
        model_schema.pop("description", None)

        fn = {"name": cls.name(), "description": cls.description()}
        if model_schema["properties"]:
            fn["parameters"] = model_schema  # type: ignore

        return fn

    @classmethod
    @abstractmethod
    def from_tool_call(cls, tool_call: ToolCallT) -> BaseTool:
        """Extracts an instance of the tool constructed from a tool call response."""
        ...  # pragma: no cover

    @classmethod
    @abstractmethod
    def from_model(cls, model: type[BaseModel]) -> type[BaseTool]:
        """Constructs a `BaseTool` type from a `BaseModel` type."""
        ...  # pragma: no cover

    @classmethod
    @abstractmethod
    def from_fn(cls, fn: Callable) -> type[BaseTool]:
        """Constructs a `BaseTool` type from a function."""
        ...  # pragma: no cover

    @classmethod
    @abstractmethod
    def from_base_type(cls, base_type: type[BaseType]) -> type[BaseTool]:
        """Constructs a `BaseTool` type from a `BaseType` type."""
        ...  # pragma: no cover

args: dict[str, Any] property

The arguments of the tool as a dictionary.

fn: Callable[..., str] property

Returns the function that the tool describes.

call()

Calls the tool's fn with the tool's args.

Source code in mirascope/base/tools.py
def call(self) -> str:
    """Calls the tool's `fn` with the tool's `args`."""
    return self.fn(**self.args)

description() classmethod

Returns the description of the tool.

Source code in mirascope/base/tools.py
@classmethod
def description(cls) -> str:
    """Returns the description of the tool."""
    return inspect.cleandoc(cls.__doc__) if cls.__doc__ else DEFAULT_TOOL_DOCSTRING

from_base_type(base_type) abstractmethod classmethod

Constructs a BaseTool type from a BaseType type.

Source code in mirascope/base/tools.py
@classmethod
@abstractmethod
def from_base_type(cls, base_type: type[BaseType]) -> type[BaseTool]:
    """Constructs a `BaseTool` type from a `BaseType` type."""
    ...  # pragma: no cover

from_fn(fn) abstractmethod classmethod

Constructs a BaseTool type from a function.

Source code in mirascope/base/tools.py
@classmethod
@abstractmethod
def from_fn(cls, fn: Callable) -> type[BaseTool]:
    """Constructs a `BaseTool` type from a function."""
    ...  # pragma: no cover

from_model(model) abstractmethod classmethod

Constructs a BaseTool type from a BaseModel type.

Source code in mirascope/base/tools.py
@classmethod
@abstractmethod
def from_model(cls, model: type[BaseModel]) -> type[BaseTool]:
    """Constructs a `BaseTool` type from a `BaseModel` type."""
    ...  # pragma: no cover

from_tool_call(tool_call) abstractmethod classmethod

Extracts an instance of the tool constructed from a tool call response.

Source code in mirascope/base/tools.py
@classmethod
@abstractmethod
def from_tool_call(cls, tool_call: ToolCallT) -> BaseTool:
    """Extracts an instance of the tool constructed from a tool call response."""
    ...  # pragma: no cover

name() classmethod

Returns the name of the tool.

Source code in mirascope/base/tools.py
@classmethod
def name(cls) -> str:
    """Returns the name of the tool."""
    return cls.__name__

tool_schema() classmethod

Constructs a JSON Schema tool schema from the BaseModel schema defined.

Source code in mirascope/base/tools.py
@classmethod
def tool_schema(cls) -> Any:
    """Constructs a JSON Schema tool schema from the `BaseModel` schema defined."""
    model_schema = cls.model_json_schema()
    model_schema.pop("title", None)
    model_schema.pop("description", None)

    fn = {"name": cls.name(), "description": cls.description()}
    if model_schema["properties"]:
        fn["parameters"] = model_schema  # type: ignore

    return fn

WandbCallMixin

Bases: _WandbBaseCall, Generic[BaseCallResponseT]

A mixin for integrating a call with Weights & Biases.

Use this class's built in call_with_trace method to log traces to WandB along with your calls to LLM. These calls will include all of the additional metadata information such as the prompt template, template variables, and more.

Example:

import os

from mirascope.openai import OpenAICall, OpenAICallResponse
from mirascope.wandb import WandbCallMixin
import wandb

wandb.login(key="YOUR_WANDB_API_KEY")
wandb.init(project="wandb_logged_chain")

os.environ["OPENAI_API_KEY"] = "YOUR_OPENAI_API_KEY"


class BookRecommender(OpenAICall, WandbCallMixin[OpenAICallResponse]):
    prompt_template = """
    SYSTEM:
    You are the world's greatest librarian.

    USER:
    Please recommend a {genre} book.
    """

    genre: str


recommender = BookRecommender(span_type="llm", genre="fantasy")
response, span = recommender.call_with_trace()
#           ^ this is a `Span` returned from the trace (or trace error).
Source code in mirascope/wandb/wandb.py
class WandbCallMixin(_WandbBaseCall, Generic[BaseCallResponseT]):
    '''A mixin for integrating a call with Weights & Biases.

    Use this class's built in `call_with_trace` method to log traces to WandB along with
    your calls to LLM. These calls will include all of the additional metadata
    information such as the prompt template, template variables, and more.

    Example:

    ```python
    import os

    from mirascope.openai import OpenAICall, OpenAICallResponse
    from mirascope.wandb import WandbCallMixin
    import wandb

    wandb.login(key="YOUR_WANDB_API_KEY")
    wandb.init(project="wandb_logged_chain")

    os.environ["OPENAI_API_KEY"] = "YOUR_OPENAI_API_KEY"


    class BookRecommender(OpenAICall, WandbCallMixin[OpenAICallResponse]):
        prompt_template = """
        SYSTEM:
        You are the world's greatest librarian.

        USER:
        Please recommend a {genre} book.
        """

        genre: str


    recommender = BookRecommender(span_type="llm", genre="fantasy")
    response, span = recommender.call_with_trace()
    #           ^ this is a `Span` returned from the trace (or trace error).
    ```
    '''

    span_type: Literal["tool", "llm", "chain", "agent"]

    def call_with_trace(
        self,
        parent: Optional[Trace] = None,
        **kwargs: Any,
    ) -> tuple[Optional[BaseCallResponseT], Trace]:
        """Creates an LLM response and logs it via a W&B `Trace`.

        Args:
            parent: The parent trace to connect to.

        Returns:
            A tuple containing the completion and its trace (which has been connected
                to the parent).
        """
        try:
            start_time = datetime.datetime.now().timestamp() * 1000
            response = self.call(**kwargs)
            tool_type = None
            if response.tool_types and len(response.tool_types) > 0:
                tool_type = response.tool_types[0].__bases__[0]  # type: ignore
            span = trace(self, response, tool_type, parent, **kwargs)
            return response, span  # type: ignore
        except Exception as e:
            return None, trace_error(self, e, parent, start_time, **kwargs)

call_with_trace(parent=None, **kwargs)

Creates an LLM response and logs it via a W&B Trace.

Parameters:

Name Type Description Default
parent Optional[Trace]

The parent trace to connect to.

None

Returns:

Type Description
tuple[Optional[BaseCallResponseT], Trace]

A tuple containing the completion and its trace (which has been connected to the parent).

Source code in mirascope/wandb/wandb.py
def call_with_trace(
    self,
    parent: Optional[Trace] = None,
    **kwargs: Any,
) -> tuple[Optional[BaseCallResponseT], Trace]:
    """Creates an LLM response and logs it via a W&B `Trace`.

    Args:
        parent: The parent trace to connect to.

    Returns:
        A tuple containing the completion and its trace (which has been connected
            to the parent).
    """
    try:
        start_time = datetime.datetime.now().timestamp() * 1000
        response = self.call(**kwargs)
        tool_type = None
        if response.tool_types and len(response.tool_types) > 0:
            tool_type = response.tool_types[0].__bases__[0]  # type: ignore
        span = trace(self, response, tool_type, parent, **kwargs)
        return response, span  # type: ignore
    except Exception as e:
        return None, trace_error(self, e, parent, start_time, **kwargs)

WandbExtractorMixin

Bases: _WandbBaseExtractor, Generic[T]

A extractor mixin for integrating with Weights & Biases.

Use this class's built in extract_with_trace method to log traces to WandB along with your calls to the LLM. These calls will include all of the additional metadata information such as the prompt template, template variables, and more.

Example:

import os
from typing import Type

from mirascope.openai import OpenAIExtractor
from mirascope.wandb import WandbExtractorMixin
from pydantic import BaseModel
import wandb

wandb.login(key="YOUR_WANDB_API_KEY")
wandb.init(project="wandb_logged_chain")

os.environ["OPENAI_API_KEY"] = "YOUR_OPENAI_API_KEY"


class Book(BaseModel):
    title: str
    author: str


class BookRecommender(OpenAIExtractor[Book], WandbExtractorMixin[Book]):
    extract_schema: Type[Book] = Book
    prompt_template = """
    SYSTEM:
    You are the world's greatest librarian.

    USER:
    Please recommend a {genre} book.
    """

    genre: str


recommender = BookRecommender(span_type="tool", genre="fantasy")
book, span = recommender.extract_with_trace()
#       ^ this is a `Span` returned from the trace (or trace error).
Source code in mirascope/wandb/wandb.py
class WandbExtractorMixin(_WandbBaseExtractor, Generic[T]):
    '''A extractor mixin for integrating with Weights & Biases.

    Use this class's built in `extract_with_trace` method to log traces to WandB along
    with your calls to the LLM. These calls will include all of the additional metadata
    information such as the prompt template, template variables, and more.

    Example:

    ```python
    import os
    from typing import Type

    from mirascope.openai import OpenAIExtractor
    from mirascope.wandb import WandbExtractorMixin
    from pydantic import BaseModel
    import wandb

    wandb.login(key="YOUR_WANDB_API_KEY")
    wandb.init(project="wandb_logged_chain")

    os.environ["OPENAI_API_KEY"] = "YOUR_OPENAI_API_KEY"


    class Book(BaseModel):
        title: str
        author: str


    class BookRecommender(OpenAIExtractor[Book], WandbExtractorMixin[Book]):
        extract_schema: Type[Book] = Book
        prompt_template = """
        SYSTEM:
        You are the world's greatest librarian.

        USER:
        Please recommend a {genre} book.
        """

        genre: str


    recommender = BookRecommender(span_type="tool", genre="fantasy")
    book, span = recommender.extract_with_trace()
    #       ^ this is a `Span` returned from the trace (or trace error).
    ```
    '''

    span_type: Literal["tool", "llm", "chain", "agent"]

    def extract_with_trace(
        self,
        parent: Optional[Trace] = None,
        retries: int = 0,
        **kwargs: Any,
    ) -> tuple[Optional[T], Trace]:
        """Extracts `extract_schema` from the LLM call response and traces it.

        The `extract_schema` is converted into an tool, complete with a description of
        the tool, all of the fields, and their types. This allows us to take advantage
        of tool/function calling functionality to extract information from a response
        according to the context provided by the `BaseModel` schema.

        Args:
            parent: The parent trace to connect to.
            retries: The maximum number of times to retry the query on validation error.
            **kwargs: Additional keyword arguments parameters to pass to the call. These
                will override any existing arguments in `call_params`.

        Returns:
            The `Schema` instance extracted from the response and it's trace.
        """
        try:
            start_time = datetime.datetime.now().timestamp() * 1000
            model = self.extract(retries=retries, **kwargs)
            span = trace(self, model._response, parent, **kwargs)  # type: ignore
            return model, span  # type: ignore
        except Exception as e:
            return None, trace_error(self, e, parent, start_time, **kwargs)

extract_with_trace(parent=None, retries=0, **kwargs)

Extracts extract_schema from the LLM call response and traces it.

The extract_schema is converted into an tool, complete with a description of the tool, all of the fields, and their types. This allows us to take advantage of tool/function calling functionality to extract information from a response according to the context provided by the BaseModel schema.

Parameters:

Name Type Description Default
parent Optional[Trace]

The parent trace to connect to.

None
retries int

The maximum number of times to retry the query on validation error.

0
**kwargs Any

Additional keyword arguments parameters to pass to the call. These will override any existing arguments in call_params.

{}

Returns:

Type Description
tuple[Optional[T], Trace]

The Schema instance extracted from the response and it's trace.

Source code in mirascope/wandb/wandb.py
def extract_with_trace(
    self,
    parent: Optional[Trace] = None,
    retries: int = 0,
    **kwargs: Any,
) -> tuple[Optional[T], Trace]:
    """Extracts `extract_schema` from the LLM call response and traces it.

    The `extract_schema` is converted into an tool, complete with a description of
    the tool, all of the fields, and their types. This allows us to take advantage
    of tool/function calling functionality to extract information from a response
    according to the context provided by the `BaseModel` schema.

    Args:
        parent: The parent trace to connect to.
        retries: The maximum number of times to retry the query on validation error.
        **kwargs: Additional keyword arguments parameters to pass to the call. These
            will override any existing arguments in `call_params`.

    Returns:
        The `Schema` instance extracted from the response and it's trace.
    """
    try:
        start_time = datetime.datetime.now().timestamp() * 1000
        model = self.extract(retries=retries, **kwargs)
        span = trace(self, model._response, parent, **kwargs)  # type: ignore
        return model, span  # type: ignore
    except Exception as e:
        return None, trace_error(self, e, parent, start_time, **kwargs)

trace(call, response, tool_type, parent, **kwargs)

Returns a trace connected to parent.

Parameters:

Name Type Description Default
response BaseCallResponse

The response to trace. Handles BaseCallResponse for call/stream, and BaseModel for extractions.

required
tool_type Optional[Type[BaseTool]]

The BaseTool provider-specific tool type e.g. OpenAITool

required
parent Optional[Trace]

The parent trace to connect to.

required

Returns:

Type Description
Trace

The created trace, connected to the parent.

Source code in mirascope/wandb/wandb.py
def trace(
    call: Union[_WandbBaseCall, _WandbBaseExtractor],
    response: BaseCallResponse,
    tool_type: Optional[Type[BaseTool]],
    parent: Optional[Trace],
    **kwargs: Any,
) -> Trace:
    """Returns a trace connected to parent.

    Args:
        response: The response to trace. Handles `BaseCallResponse` for call/stream, and
            `BaseModel` for extractions.
        tool_type: The `BaseTool` provider-specific tool type e.g. `OpenAITool`
        parent: The parent trace to connect to.

    Returns:
        The created trace, connected to the parent.
    """
    tool = response.tool
    if tool is not None:
        outputs = {
            "assistant": tool.model_dump(),
            "tool_output": tool.fn(**tool.args),
        }
    else:
        outputs = {"assistant": response.content}

    metadata = {
        "call_params": call.call_params.model_copy(update=kwargs).kwargs(tool_type)
    }
    if response.response.usage is not None:
        metadata["usage"] = response.response.usage.model_dump()
    span = Trace(
        name=call.__class__.__name__,
        kind=call.span_type,
        status_code="success",
        status_message=None,
        metadata=metadata,
        start_time_ms=round(response.start_time),
        end_time_ms=round(response.end_time),
        inputs={message["role"]: message["content"] for message in call.messages()},
        outputs=outputs,
    )
    if parent:
        parent.add_child(span)
    return span

trace_error(call, error, parent, start_time, **kwargs)

Returns an error trace connected to parent.

Start time is set to time of prompt creation, and end time is set to the time function is called.

Parameters:

Name Type Description Default
error Exception

The error to trace.

required
parent Optional[Trace]

The parent trace to connect to.

required
start_time float

The time the call to OpenAI was started.

required

Returns:

Type Description
Trace

The created error trace, connected to the parent.

Source code in mirascope/wandb/wandb.py
def trace_error(
    call: Union[_WandbBaseCall, _WandbBaseExtractor],
    error: Exception,
    parent: Optional[Trace],
    start_time: float,
    **kwargs: Any,
) -> Trace:
    """Returns an error trace connected to parent.

    Start time is set to time of prompt creation, and end time is set to the time
    function is called.

    Args:
        error: The error to trace.
        parent: The parent trace to connect to.
        start_time: The time the call to OpenAI was started.

    Returns:
        The created error trace, connected to the parent.
    """
    span = Trace(
        name=call.__class__.__name__,
        kind=call.span_type,
        status_code="error",
        status_message=str(error),
        metadata={"call_params": call.call_params.model_copy(update=kwargs).kwargs()},
        start_time_ms=round(start_time),
        end_time_ms=round(datetime.datetime.now().timestamp() * 1000),
        inputs={message["role"]: message["content"] for message in call.messages()},
        outputs=None,
    )
    if parent:
        parent.add_child(span)
    return span