Skip to content

Converters

Various conversion classes and methods to convert between Conatus's standardized output and the native format of the AI provider.

For more information, see the Models internals page.

Converters are not part of the public API

The converters are not part of the public API. They are subject to change without warning. Do not be surprised if you find this page particularly messy.

OpenAI Converters

Chat Completions API

conatus.models.open_ai.cc_conversion

Conversion functions for OpenAI's ChatCompletion API.

OpenAIChatCompletionConverters

Conversion functions for OpenAI's ChatCompletion API.

assistant_message_to_openai_assistant_message staticmethod

assistant_message_to_openai_assistant_message(
    message: AssistantAIMessage,
) -> ChatCompletionAssistantMessageParam

Convert an AssistantAIMessage to an OpenAI assistant message.

PARAMETER DESCRIPTION
message

The AssistantAIMessage to convert.

TYPE: AssistantAIMessage

RETURNS DESCRIPTION
ChatCompletionAssistantMessageParam

The OpenAI assistant message.

Source code in conatus/models/open_ai/cc_conversion.py
@staticmethod
def assistant_message_to_openai_assistant_message(
    message: AssistantAIMessage,
) -> ChatCompletionAssistantMessageParam:
    """Convert an AssistantAIMessage to an OpenAI assistant message.

    Args:
        message: The AssistantAIMessage to convert.

    Returns:
        The OpenAI assistant message.
    """
    assistant_msg_content: list[ContentArrayOfContentPart] = []
    tool_calls: list[ChatCompletionMessageToolCallParam] = []
    for content_part in message.content:
        match content_part:
            case AssistantAIMessageContentTextPart():
                assistant_msg_content.append(
                    ChatCompletionContentPartTextParam(
                        text=content_part.text,
                        type="text",
                    )
                )
            case AssistantAIMessageContentRefusalPart():
                assistant_msg_content.append(
                    ChatCompletionContentPartRefusalParam(
                        refusal=content_part.refusal,
                        type="refusal",
                    )
                )
            case AssistantAIMessageContentReasoningPart():
                assistant_msg_content.append(
                    ChatCompletionContentPartTextParam(
                        text=(
                            f"<thinking>{content_part.reasoning}</thinking>"
                        ),
                        type="text",
                    )
                )
            # content_part -> AssistantAIMessageContentToolCallPart
            case AssistantAIMessageContentToolCallPart():  # pragma: no branch # noqa: E501
                tool_calls.append(
                    ChatCompletionMessageToolCallParam(
                        id=content_part.tool_call.call_id or "",
                        type="function",
                        function=Function(
                            name=content_part.tool_call.name,
                            arguments=content_part.tool_call.arguments_as_str,
                        ),
                    )
                )
    assistant_content_to_send = (
        assistant_msg_content if len(assistant_msg_content) > 0 else None
    )
    partial_msg = ChatCompletionAssistantMessageParam(
        role="assistant",
        content=assistant_content_to_send,
    )
    if len(tool_calls) > 0:
        partial_msg["tool_calls"] = tool_calls
    return partial_msg

user_message_to_openai_user_message staticmethod

user_message_to_openai_user_message(
    message: UserAIMessage,
) -> ChatCompletionUserMessageParam

Convert a UserAIMessage to an OpenAI user message.

PARAMETER DESCRIPTION
message

The UserAIMessage to convert.

TYPE: UserAIMessage

RETURNS DESCRIPTION
ChatCompletionUserMessageParam

The OpenAI user message.

Source code in conatus/models/open_ai/cc_conversion.py
@staticmethod
def user_message_to_openai_user_message(
    message: UserAIMessage,
) -> ChatCompletionUserMessageParam:
    """Convert a UserAIMessage to an OpenAI user message.

    Args:
        message: The UserAIMessage to convert.

    Returns:
        The OpenAI user message.
    """
    content_parts: list[ChatCompletionContentPartParam] = []
    if isinstance(message.content, str):
        return ChatCompletionUserMessageParam(
            role="user",
            content=message.content,
        )
    for content_part in message.content:
        match content_part:
            case UserAIMessageContentTextPart():
                content_parts.append(
                    ChatCompletionContentPartTextParam(
                        text=content_part.text,
                        type="text",
                    )
                )
            case UserAIMessageContentImagePart():  # pragma: no branch
                prepend = (
                    "data:image/jpeg;base64,"
                    if content_part.is_base64
                    else ""
                )
                content_parts.append(
                    ChatCompletionContentPartImageParam(
                        image_url=ImageURL(
                            url=prepend + content_part.image_data,
                            # If detail is None, we specify "auto"
                            # to let the model decide the best detail level
                            detail=content_part.detail or "auto",
                        ),
                        type="image_url",
                    )
                )
    return ChatCompletionUserMessageParam(
        role="user",
        content=content_parts,
    )

system_message_to_openai_system_message staticmethod

system_message_to_openai_system_message(
    message: SystemAIMessage, model_name: str | None = None
) -> (
    ChatCompletionSystemMessageParam
    | ChatCompletionDeveloperMessageParam
)

Convert a SystemAIMessage to an OpenAI system message.

PARAMETER DESCRIPTION
message

The SystemAIMessage to convert.

TYPE: SystemAIMessage

model_name

The model name.

TYPE: str | None DEFAULT: None

RETURNS DESCRIPTION
ChatCompletionSystemMessageParam | ChatCompletionDeveloperMessageParam

The OpenAI system message.

Source code in conatus/models/open_ai/cc_conversion.py
@staticmethod
def system_message_to_openai_system_message(
    message: SystemAIMessage,
    model_name: str | None = None,
) -> ChatCompletionSystemMessageParam | ChatCompletionDeveloperMessageParam:
    """Convert a SystemAIMessage to an OpenAI system message.

    Args:
        message: The SystemAIMessage to convert.
        model_name: The model name.

    Returns:
        The OpenAI system message.
    """
    return (
        ChatCompletionDeveloperMessageParam(
            role="developer",
            content=message.content,
        )
        if str(model_name).startswith("o")
        or str(model_name) == "computer-use-preview"
        else ChatCompletionSystemMessageParam(
            role="system",
            content=message.content,
        )
    )

ai_message_to_openai_message staticmethod

ai_message_to_openai_message(
    message: ConversationAIMessage,
) -> ChatCompletionMessageParam

Convert an AI message to an OpenAI message.

PARAMETER DESCRIPTION
message

The AI message to convert.

TYPE: ConversationAIMessage

RETURNS DESCRIPTION
ChatCompletionMessageParam

The OpenAI message.

Source code in conatus/models/open_ai/cc_conversion.py
@staticmethod
def ai_message_to_openai_message(
    message: ConversationAIMessage,
) -> ChatCompletionMessageParam:
    """Convert an AI message to an OpenAI message.

    Args:
        message: The AI message to convert.

    Returns:
        The OpenAI message.
    """
    # Using this alias to avoid going over 80 characters
    self_ = OpenAIChatCompletionConverters

    match message:
        case ToolResponseAIMessage():
            return ChatCompletionToolMessageParam(
                role="tool",
                content=message.content_as_string,
                # Empty string if no tool call ID
                # (In general this shouldn't happen unless you)
                # change AI provider mid-session)
                tool_call_id=message.tool_call_id or "",
            )
        case AssistantAIMessage():
            return self_.assistant_message_to_openai_assistant_message(
                message
            )
        case UserAIMessage():  # pragma: no branch
            return self_.user_message_to_openai_user_message(message)

ai_prompt_messages_to_openai_messages staticmethod

ai_prompt_messages_to_openai_messages(
    messages: Iterable[ConversationAIMessage],
) -> list[ChatCompletionMessageParam]

Convert an AI prompt messages to OpenAI messages.

PARAMETER DESCRIPTION
messages

The AI prompt messages to convert.

TYPE: Iterable[ConversationAIMessage]

RETURNS DESCRIPTION
list[ChatCompletionMessageParam]

The OpenAI messages.

Source code in conatus/models/open_ai/cc_conversion.py
@staticmethod
def ai_prompt_messages_to_openai_messages(
    messages: Iterable[ConversationAIMessage],
) -> list[ChatCompletionMessageParam]:
    """Convert an AI prompt messages to OpenAI messages.

    Args:
        messages: The AI prompt messages to convert.

    Returns:
        The OpenAI messages.
    """
    openai_messages: list[ChatCompletionMessageParam] = []
    for message in messages:
        converted_message_or_messages = (
            OpenAIChatCompletionConverters.ai_message_to_openai_message(
                message
            )
        )
        openai_messages.append(converted_message_or_messages)
    return openai_messages

ai_prompt_tools_to_openai_tools staticmethod

ai_prompt_tools_to_openai_tools(
    tools: Iterable[AIToolSpecification] | None,
) -> list[ChatCompletionToolParam] | None

Convert an AI prompt tools to OpenAI tools.

This leverages the generate_openai_json_schema function to convert the Pydantic model to an OpenAI-compatible JSON schema.

PARAMETER DESCRIPTION
tools

The tools to convert to OpenAI tools.

TYPE: Iterable[AIToolSpecification] | None

RETURNS DESCRIPTION
list[ChatCompletionToolParam] | None

The OpenAI tools.

Source code in conatus/models/open_ai/cc_conversion.py
@staticmethod
def ai_prompt_tools_to_openai_tools(
    tools: Iterable[AIToolSpecification] | None,
) -> list[ChatCompletionToolParam] | None:
    """Convert an AI prompt tools to OpenAI tools.

    This leverages the `generate_openai_json_schema` function to convert the
    Pydantic model to an OpenAI-compatible JSON schema.

    Args:
        tools: The tools to convert to OpenAI tools.

    Returns:
        The OpenAI tools.
    """
    if tools is None:
        return None
    chat_tool_params: list[ChatCompletionToolParam] = []
    for tool in tools:
        function_def = generate_openai_json_schema(
            json_schema_pydantic_model=tool.json_schema_pydantic_model,
            strict_mode=tool.strict_mode,
        )
        chat_tool_params.append(
            ChatCompletionToolParam(
                function={
                    "name": function_def["name"],
                    "description": function_def["description"],
                    "parameters": function_def["parameters"],
                    "strict": function_def["strict"],
                },
                type="function",
            )
        )
    return chat_tool_params

convert_output_schema_to_openai_output_schema staticmethod

convert_output_schema_to_openai_output_schema(
    output_schema: TypeAdapter[ParamType] | None,
    *,
    strict_mode: bool = True
) -> (
    tuple[ResponseFormatJSONSchema, bool]
    | tuple[None, Literal[False]]
)

Convert an output schema to an OpenAI output schema.

Essentially, it's just creating a "structured output" JSON schema.

PARAMETER DESCRIPTION
output_schema

The output schema to convert.

TYPE: TypeAdapter[ParamType] | None

strict_mode

Whether to use strict mode.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
tuple[ResponseFormatJSONSchema, bool] | tuple[None, Literal[False]]

If the output schema is not provided, returns None and False. Otherwise, returns the OpenAI output schema and a boolean indicating whether the schema has been converted from a non-object type to an object type.

Source code in conatus/models/open_ai/cc_conversion.py
@staticmethod
def convert_output_schema_to_openai_output_schema(
    output_schema: TypeAdapter[ParamType] | None,
    *,
    strict_mode: bool = True,
) -> tuple[ResponseFormatJSONSchema, bool] | tuple[None, Literal[False]]:
    """Convert an output schema to an OpenAI output schema.

    Essentially, it's just creating a "structured output" JSON schema.

    Args:
        output_schema: The output schema to convert.
        strict_mode: Whether to use strict mode.

    Returns:
        If the output schema is not provided, returns None and False.
            Otherwise, returns the OpenAI output schema and a boolean
            indicating whether the schema has been converted from a
            non-object type to an object type.
    """
    if output_schema is None:
        return None, False
    generated_schema, conversion_was_necessary = (
        generate_structured_output_openai_json_schema(
            output_schema,
            strict_mode=strict_mode,
        )
    )
    return ResponseFormatJSONSchema(
        type="json_schema",
        json_schema={
            "name": generated_schema["name"],
            "description": generated_schema["description"],
            "schema": generated_schema["schema"],
            "strict": generated_schema["strict"],
        },
    ), conversion_was_necessary

openai_response_to_ai_response staticmethod

openai_response_to_ai_response(
    raw_response: ChatCompletion,
    prompt: AIPrompt[OutputSchemaType],
    model_name: str | None = None,
    *,
    output_schema_was_converted_to_item_object: bool = False
) -> AIResponse[OutputSchemaType]

Convert an OpenAI response to an AIResponse.

PARAMETER DESCRIPTION
raw_response

The OpenAI response to convert.

TYPE: ChatCompletion

prompt

The prompt that was used to generate the response.

TYPE: AIPrompt[OutputSchemaType]

model_name

The model name of the response.

TYPE: str | None DEFAULT: None

output_schema_was_converted_to_item_object

Whether the output schema was converted to an item object.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
AIResponse[OutputSchemaType]

The AIResponse.

Source code in conatus/models/open_ai/cc_conversion.py
@staticmethod
def openai_response_to_ai_response(
    raw_response: ChatCompletion,
    prompt: AIPrompt[OutputSchemaType],
    model_name: str | None = None,
    *,
    output_schema_was_converted_to_item_object: bool = False,
) -> AIResponse[OutputSchemaType]:
    """Convert an OpenAI response to an AIResponse.

    Args:
        raw_response: The OpenAI response to convert.
        prompt: The prompt that was used to generate the response.
        model_name: The model name of the response.
        output_schema_was_converted_to_item_object: Whether the output
            schema was converted to an item object.

    Returns:
        The AIResponse.
    """
    return AIResponse(
        prompt=prompt,
        message_received=(
            OpenAIChatCompletionConverters.openai_assistant_message_to_assistant_ai_message(
                raw_response.choices[0].message
            )
        ).complete(),
        finish_reason=(
            OpenAIChatCompletionConverters.openai_finish_reason_to_finish_reason(
                raw_response.choices[0].finish_reason
            )
        ),
        usage=(
            OpenAIChatCompletionConverters.openai_usage_to_completion_usage(
                raw_response.usage,
                model_name=model_name,
            )
        ),
        uid=raw_response.id,
        output_schema_was_converted_to_item_object=output_schema_was_converted_to_item_object,
    )

openai_assistant_message_to_assistant_ai_message staticmethod

openai_assistant_message_to_assistant_ai_message(
    raw_assistant_message: (
        ChatCompletionMessage | ChoiceDelta
    ),
) -> IncompleteAssistantAIMessage

Convert an OpenAI assistant message to an AssistantAIMessage.

PARAMETER DESCRIPTION
raw_assistant_message

The OpenAI assistant message to convert.

TYPE: ChatCompletionMessage | ChoiceDelta

RETURNS DESCRIPTION
IncompleteAssistantAIMessage

The AssistantAIMessage.

Source code in conatus/models/open_ai/cc_conversion.py
@staticmethod
def openai_assistant_message_to_assistant_ai_message(
    raw_assistant_message: ChatCompletionMessage | ChoiceDelta,
) -> IncompleteAssistantAIMessage:
    """Convert an OpenAI assistant message to an AssistantAIMessage.

    Args:
        raw_assistant_message: The OpenAI assistant message to convert.

    Returns:
        The AssistantAIMessage.
    """
    all_content_parts: list[IncompleteAssistantAIMessageContentPart] = []
    # Add text content if present
    if raw_assistant_message.content:
        all_content_parts.append(
            IncompleteAssistantAIMessageContentTextPart(
                text=raw_assistant_message.content,
                type="text",
            )
        )
    if (
        raw_assistant_message.tool_calls
        and len(raw_assistant_message.tool_calls) > 0
    ):
        all_content_parts.extend(
            [
                IncompleteAssistantAIMessageContentToolCallPart(
                    tool_call=IncompleteAIToolCall(
                        call_id=tool_call.id,
                        name=(
                            tool_call.function.name
                            if tool_call.function
                            else None
                        ),
                        returned_arguments=(
                            tool_call.function.arguments
                            if tool_call.function
                            else None
                        ),
                    ),
                )
                for tool_call in raw_assistant_message.tool_calls
            ]
        )
    return IncompleteAssistantAIMessage(
        content=list(all_content_parts),
        refusal=raw_assistant_message.refusal,
        role="assistant",
    )

openai_finish_reason_to_finish_reason staticmethod

openai_finish_reason_to_finish_reason(
    finish_reason: (
        Literal[
            "stop",
            "length",
            "tool_calls",
            "content_filter",
            "function_call",
        ]
        | None
    ),
) -> FinishReasons | None

Convert an OpenAI finish reason to a FinishReasons.

PARAMETER DESCRIPTION
finish_reason

The OpenAI finish reason to convert.

TYPE: Literal['stop', 'length', 'tool_calls', 'content_filter', 'function_call'] | None

RETURNS DESCRIPTION
FinishReasons | None

The FinishReasons.

Source code in conatus/models/open_ai/cc_conversion.py
@staticmethod
def openai_finish_reason_to_finish_reason(
    finish_reason: Literal[
        "stop", "length", "tool_calls", "content_filter", "function_call"
    ]
    | None,
) -> FinishReasons | None:
    """Convert an OpenAI finish reason to a `FinishReasons`.

    Args:
        finish_reason: The OpenAI finish reason to convert.

    Returns:
        The FinishReasons.
    """
    mapping: dict[str, FinishReasons] = {
        "stop": "stop",
        "length": "length",
        "tool_calls": "tool_calls",
        "content_filter": "content_filter",
        "function_call": "tool_calls",
    }
    return mapping[finish_reason] if finish_reason else None

openai_chunk_to_ai_response staticmethod

openai_chunk_to_ai_response(
    chunk: ChatCompletionChunk,
    prompt: AIPrompt[OutputSchemaType],
    model_name: str | None = None,
) -> IncompleteAIResponse[OutputSchemaType]

Convert an OpenAI chunk to an IncompleteAIResponse.

PARAMETER DESCRIPTION
chunk

The OpenAI chunk to convert.

TYPE: ChatCompletionChunk

prompt

The prompt that was used to generate the chunk.

TYPE: AIPrompt[OutputSchemaType]

model_name

The model name of the chunk.

TYPE: str | None DEFAULT: None

RETURNS DESCRIPTION
IncompleteAIResponse[OutputSchemaType]

The IncompleteAIResponse.

Source code in conatus/models/open_ai/cc_conversion.py
@staticmethod
def openai_chunk_to_ai_response(
    chunk: ChatCompletionChunk,
    prompt: AIPrompt[OutputSchemaType],
    model_name: str | None = None,
) -> IncompleteAIResponse[OutputSchemaType]:
    """Convert an OpenAI chunk to an IncompleteAIResponse.

    Args:
        chunk: The OpenAI chunk to convert.
        prompt: The prompt that was used to generate the chunk.
        model_name: The model name of the chunk.

    Returns:
        The IncompleteAIResponse.
    """
    return IncompleteAIResponse(
        prompt=prompt,
        message_received=(
            OpenAIChatCompletionConverters.openai_assistant_message_to_assistant_ai_message(
                chunk.choices[0].delta
            )
            if len(chunk.choices) > 0
            else IncompleteAssistantAIMessage()
        ),
        finish_reason=(
            OpenAIChatCompletionConverters.openai_finish_reason_to_finish_reason(
                chunk.choices[0].finish_reason
            )
            if len(chunk.choices) > 0
            else None
        ),
        usage=(
            OpenAIChatCompletionConverters.openai_usage_to_completion_usage(
                chunk.usage,
                model_name=model_name,
            )
        ),
    )

openai_usage_to_completion_usage staticmethod

openai_usage_to_completion_usage(
    usage: CompletionUsage | None,
    model_name: str | None = None,
) -> CompletionUsage

Convert an OpenAI usage to a CompletionUsage.

We default to 0 if the usage is None.

PARAMETER DESCRIPTION
usage

The OpenAI usage to convert to a CompletionUsage.

TYPE: CompletionUsage | None

model_name

The model name of the completion usage.

TYPE: str | None DEFAULT: None

RETURNS DESCRIPTION
CompletionUsage

The CompletionUsage.

Source code in conatus/models/open_ai/cc_conversion.py
@staticmethod
def openai_usage_to_completion_usage(
    usage: OpenAICompletionUsage | None,
    model_name: str | None = None,
) -> CompletionUsage:
    """Convert an OpenAI usage to a CompletionUsage.

    We default to 0 if the usage is None.

    Args:
        usage: The OpenAI usage to convert to a CompletionUsage.
        model_name: The model name of the completion usage.

    Returns:
        The CompletionUsage.
    """
    prompt_tokens = usage.prompt_tokens if usage else 0
    completion_tokens = usage.completion_tokens if usage else 0
    total_tokens = usage.total_tokens if usage else 0
    extra_fields: dict[str, int | None] | None = None
    if usage and (
        d := usage.completion_tokens_details
    ):  # pragma: no branch
        extra_fields = {
            "accepted_prediction_tokens": d.accepted_prediction_tokens or 0,
            "rejected_prediction_tokens": d.rejected_prediction_tokens or 0,
            "reasoning_tokens": d.reasoning_tokens or 0,
        }
    if usage and (pd := usage.prompt_tokens_details):  # pragma: no branch
        extra_fields = extra_fields or {}
        extra_fields["cached_tokens"] = pd.cached_tokens or 0

    usage_was_never_given = usage is None

    return CompletionUsage(
        model_name=model_name,
        prompt_tokens=prompt_tokens,
        completion_tokens=completion_tokens,
        total_tokens=total_tokens,
        cached_used_tokens=(
            extra_fields.get("cached_tokens") if extra_fields else None
        ),
        cached_created_tokens=None,
        extra_fields=extra_fields,
        usage_was_never_given=usage_was_never_given,
    )

Responses API

conatus.models.open_ai.response_conversion

Conversion functions for OpenAI responses API.

OpenAIResponseConverters

Conversion functions for OpenAI responses API.

AccidentalComputerCallError

Bases: Exception

Raised when OpenAI sends a computer call in ResponseOutputMessage.

This is a bug in OpenAI's API, and they will fix it at some point.

system_message_to_openai_message staticmethod

system_message_to_openai_message(
    message: SystemAIMessage, model_name: str | None = None
) -> EasyInputMessageParam

Convert a SystemAIMessage to an OpenAI message.

PARAMETER DESCRIPTION
message

The SystemAIMessage to convert.

TYPE: SystemAIMessage

model_name

The model name of the message.

TYPE: str | None DEFAULT: None

RETURNS DESCRIPTION
EasyInputMessageParam

The OpenAI message.

Source code in conatus/models/open_ai/response_conversion.py
@staticmethod
def system_message_to_openai_message(
    message: SystemAIMessage,
    model_name: str | None = None,
) -> rt.EasyInputMessageParam:
    """Convert a SystemAIMessage to an OpenAI message.

    Args:
        message: The SystemAIMessage to convert.
        model_name: The model name of the message.

    Returns:
        The OpenAI message.
    """
    return rt.EasyInputMessageParam(
        content=message.content,
        role=(
            "developer"
            if str(model_name).startswith("o")
            or str(model_name) == "computer-use-preview"
            else "system"
        ),
        type="message",
    )

ai_prompt_messages_to_openai_messages staticmethod

ai_prompt_messages_to_openai_messages(
    messages: Iterable[ConversationAIMessage],
) -> ResponseInputParam

Convert an AI prompt messages to OpenAI messages.

PARAMETER DESCRIPTION
messages

The AI prompt messages to convert.

TYPE: Iterable[ConversationAIMessage]

RETURNS DESCRIPTION
ResponseInputParam

The OpenAI messages.

Source code in conatus/models/open_ai/response_conversion.py
@staticmethod
def ai_prompt_messages_to_openai_messages(
    messages: Iterable[ConversationAIMessage],
) -> rt.ResponseInputParam:
    """Convert an AI prompt messages to OpenAI messages.

    Args:
        messages: The AI prompt messages to convert.

    Returns:
        The OpenAI messages.
    """
    list_of_inputs: list[rt.ResponseInputItemParam] = []
    for message in messages:
        list_of_inputs.extend(
            OpenAIResponseConverters.ai_message_to_openai_message(message)
        )
    return list_of_inputs

ai_message_to_openai_message staticmethod

ai_message_to_openai_message(
    message: ConversationAIMessage,
) -> Sequence[ResponseInputItemParam]

Convert an AI message to an OpenAI message.

PARAMETER DESCRIPTION
message

The AI message to convert.

TYPE: ConversationAIMessage

RETURNS DESCRIPTION
Sequence[ResponseInputItemParam]

The OpenAI message.

Source code in conatus/models/open_ai/response_conversion.py
@staticmethod
def ai_message_to_openai_message(
    message: ConversationAIMessage,
) -> Sequence[rt.ResponseInputItemParam]:
    """Convert an AI message to an OpenAI message.

    Args:
        message: The AI message to convert.

    Returns:
        The OpenAI message.
    """
    self_ = OpenAIResponseConverters
    match message:
        case AssistantAIMessage():
            return self_.assistant_message_to_openai_message(message)
        case UserAIMessage():
            return [self_.user_message_to_openai_message(message)]
        case ToolResponseAIMessage():  # pragma: no branch
            return self_.tool_response_message_to_openai_message(message)

assistant_message_to_openai_message staticmethod

assistant_message_to_openai_message(
    message: AssistantAIMessage,
) -> list[ResponseInputItemParam]

Convert an AssistantAIMessage to an OpenAI message.

PARAMETER DESCRIPTION
message

The AssistantAIMessage to convert.

TYPE: AssistantAIMessage

RETURNS DESCRIPTION
list[ResponseInputItemParam]

The OpenAI message.

Source code in conatus/models/open_ai/response_conversion.py
@staticmethod
def assistant_message_to_openai_message(
    message: AssistantAIMessage,
) -> list[rt.ResponseInputItemParam]:
    """Convert an AssistantAIMessage to an OpenAI message.

    Args:
        message: The AssistantAIMessage to convert.

    Returns:
        The OpenAI message.
    """
    self_ = OpenAIResponseConverters
    list_of_inputs: list[rt.ResponseInputItemParam] = []
    for content_part in message.content:
        match content_part:
            case AssistantAIMessageContentTextPart():
                list_of_inputs.append(
                    rt.ResponseOutputMessageParam(
                        content=[
                            rt.ResponseOutputTextParam(
                                text=content_part.text,
                                type="output_text",
                                annotations=[],
                            )
                        ],
                        role="assistant",
                        type="message",
                        id=content_part.uid or "",
                        status="completed",
                    )
                )
            case AssistantAIMessageContentRefusalPart():
                list_of_inputs.append(
                    rt.ResponseOutputMessageParam(
                        content=[
                            rt.ResponseOutputRefusalParam(
                                refusal=content_part.refusal,
                                type="refusal",
                            )
                        ],
                        role="assistant",
                        type="message",
                        id=content_part.uid or "",
                        status="completed",
                    )
                )
            case AssistantAIMessageContentReasoningPart():
                list_of_inputs.append(
                    rt.ResponseReasoningItemParam(
                        summary=[
                            rt.response_reasoning_item_param.Summary(
                                text=content_part.reasoning,
                                type="summary_text",
                            )
                        ],
                        id=content_part.uid or "",
                        type="reasoning",
                    )
                )
            # content_part -> AssistantAIMessageContentToolCallPart
            case AssistantAIMessageContentToolCallPart():  # pragma: no branch # noqa: E501
                if isinstance(content_part.tool_call, AIToolCall):
                    list_of_inputs.append(
                        rt.response_function_tool_call_param.ResponseFunctionToolCallParam(
                            call_id=content_part.tool_call.call_id or "",
                            type="function_call",
                            id=content_part.uid or "",
                            arguments=content_part.tool_call.arguments_as_str,
                            name=content_part.tool_call.name,
                            status="completed",
                        )
                    )
                else:
                    oai_computer_tool_call = self_.computer_tool_call_to_openai_computer_tool_call(  # noqa: E501
                        content_part.tool_call,
                        content_part.uid,
                    )
                    if (
                        oai_computer_tool_call is not None
                    ):  # pragma: no branch
                        list_of_inputs.append(oai_computer_tool_call)
    return list_of_inputs

user_message_to_openai_message staticmethod

user_message_to_openai_message(
    message: UserAIMessage,
) -> EasyInputMessageParam

Convert a UserAIMessage to an OpenAI message.

PARAMETER DESCRIPTION
message

The UserAIMessage to convert.

TYPE: UserAIMessage

RETURNS DESCRIPTION
EasyInputMessageParam

The OpenAI message.

Source code in conatus/models/open_ai/response_conversion.py
@staticmethod
def user_message_to_openai_message(
    message: UserAIMessage,
) -> rt.EasyInputMessageParam:
    """Convert a UserAIMessage to an OpenAI message.

    Args:
        message: The UserAIMessage to convert.

    Returns:
        The OpenAI message.
    """
    content_list: list[ResponseInputContentParam] = []
    message_content_list = (
        [message.content]
        if isinstance(message.content, str)
        else message.content
    )
    for content_part in message_content_list:
        match content_part:
            case UserAIMessageContentTextPart():
                content_list.append(
                    rt.ResponseInputTextParam(
                        text=content_part.text, type="input_text"
                    )
                )
            case UserAIMessageContentImagePart():
                prepend = (
                    "data:image/jpeg;base64,"
                    if content_part.is_base64
                    else ""
                )
                content_list.append(
                    rt.ResponseInputImageParam(
                        image_url=prepend + content_part.image_data,
                        type="input_image",
                        detail=content_part.detail or "auto",
                    )
                )
            case str():  # pragma: no branch
                content_list.append(
                    rt.ResponseInputTextParam(
                        text=content_part, type="input_text"
                    )
                )
    return rt.EasyInputMessageParam(
        content=content_list,
        role="user",
        type="message",
    )

tool_response_message_to_openai_message staticmethod

tool_response_message_to_openai_message(
    message: ToolResponseAIMessage,
) -> list[FunctionCallOutput | ComputerCallOutput]

Convert a ToolResponseAIMessage to an OpenAI message.

PARAMETER DESCRIPTION
message

The ToolResponseAIMessage to convert.

TYPE: ToolResponseAIMessage

RETURNS DESCRIPTION
list[FunctionCallOutput | ComputerCallOutput]

The OpenAI message.

Source code in conatus/models/open_ai/response_conversion.py
@staticmethod
def tool_response_message_to_openai_message(
    message: ToolResponseAIMessage,
) -> list[
    rt.response_input_param.FunctionCallOutput
    | rt.response_input_param.ComputerCallOutput
]:
    """Convert a ToolResponseAIMessage to an OpenAI message.

    Args:
        message: The ToolResponseAIMessage to convert.

    Returns:
        The OpenAI message.
    """
    maybe_computer_use_output = message.content.get(
        "computer_use_output", ""
    )
    return [
        rt.response_input_param.FunctionCallOutput(
            call_id=message.tool_call_id or "",
            output=message.content_as_string,
            type="function_call_output",
            status="completed",
        )
        if not message.for_computer_use
        else rt.response_input_param.ComputerCallOutput(
            call_id=message.tool_call_id or "",
            output=ResponseComputerToolCallOutputScreenshotParam(
                type="computer_screenshot",
                image_url=f"data:image/jpeg;base64,{maybe_computer_use_output}",
            ),
            type="computer_call_output",
            status="completed",
        )
    ]

computer_tool_call_to_openai_computer_tool_call staticmethod

computer_tool_call_to_openai_computer_tool_call(
    tool_call: ComputerUseAction, uid: str | None
) -> ResponseComputerToolCallParam | None

Convert a ComputerUseAction to an OpenAI computer tool call.

Note that not all ComputerUseActions can be converted to an OpenAI computer tool call. Most of the time, this will not matter, since we convert the ComputerUseAction to an OpenAI computer tool call and vice versa. But if you want to convert computer use actions from Anthropic to OpenAI, some stuff might be lost in translation.

PARAMETER DESCRIPTION
tool_call

The ComputerUseAction to convert.

TYPE: ComputerUseAction

uid

The ID of the tool call.

TYPE: str | None

RETURNS DESCRIPTION
ResponseComputerToolCallParam | None

The OpenAI computer tool call.

Source code in conatus/models/open_ai/response_conversion.py
@staticmethod
def computer_tool_call_to_openai_computer_tool_call(  # noqa: C901, PLR0912
    tool_call: ComputerUseAction,
    uid: str | None,
) -> rt.ResponseComputerToolCallParam | None:
    """Convert a ComputerUseAction to an OpenAI computer tool call.

    Note that not all ComputerUseActions can be converted to an OpenAI
    computer tool call. Most of the time, this will not matter, since
    we convert the ComputerUseAction to an OpenAI computer tool call
    and vice versa. But if you want to convert computer use actions from
    Anthropic to OpenAI, some stuff might be lost in translation.

    Args:
        tool_call: The ComputerUseAction to convert.
        uid: The ID of the tool call.

    Returns:
        The OpenAI computer tool call.
    """
    action: oai_cu_param.Action
    match tool_call:
        case CULeftMouseDown():
            return None
        case CULeftMouseUp():
            return None
        case CULeftMouseTripleClick():
            return None
        case CUCursorPosition():
            return None
        case CULeftMouseClick():
            x, y = tool_call.xy or (0, 0)
            action = oai_cu_param.ActionClick(
                button="left", type="click", x=x, y=y
            )
        case CULeftMouseDoubleClick():
            x, y = tool_call.xy or (0, 0)
            action = oai_cu_param.ActionDoubleClick(
                type="double_click", x=x, y=y
            )
        case CURightClick():
            x, y = tool_call.xy or (0, 0)
            action = oai_cu_param.ActionClick(
                button="right", type="click", x=x, y=y
            )
        case CUMiddleClick():
            x, y = tool_call.xy or (0, 0)
            action = oai_cu_param.ActionClick(
                button="wheel", type="click", x=x, y=y
            )
        case CUBackClick():
            x, y = tool_call.xy or (0, 0)
            action = oai_cu_param.ActionClick(
                button="back", type="click", x=x, y=y
            )
        case CUForwardClick():
            x, y = tool_call.xy or (0, 0)
            action = oai_cu_param.ActionClick(
                button="forward", type="click", x=x, y=y
            )
        case CUDrag():
            action = oai_cu_param.ActionDrag(
                path=[{"x": xy[0], "y": xy[1]} for xy in tool_call.xy],
                type="drag",
            )
        case CUKeypress() | CUKeypressHold():
            action = oai_cu_param.ActionKeypress(
                keys=tool_call.keys, type="keypress"
            )
        case CUMove():
            action = oai_cu_param.ActionMove(
                x=tool_call.xy[0], y=tool_call.xy[1], type="move"
            )
        case CUScreenshot():
            action = oai_cu_param.ActionScreenshot(
                type="screenshot",
            )
        case CUScroll():
            start_xy = tool_call.start_xy or (0, 0)
            distance_xy = tool_call.distance_xy or (0, 0)
            action = oai_cu_param.ActionScroll(
                x=start_xy[0],
                y=start_xy[1],
                scroll_x=distance_xy[0],
                scroll_y=distance_xy[1],
                type="scroll",
            )
        case CUType():
            action = oai_cu_param.ActionType(
                text=tool_call.text,
                type="type",
            )
        case CUWait():  # pragma: no branch
            action = oai_cu_param.ActionWait(
                type="wait",
            )

    return oai_cu_param.ResponseComputerToolCallParam(
        id=uid or "",
        call_id=tool_call.call_id or "",
        action=action,
        pending_safety_checks=[],
        status="completed",
        type="computer_call",
    )

ai_prompt_tools_to_openai_tools staticmethod

ai_prompt_tools_to_openai_tools(
    prompt: AIPrompt,
) -> list[ToolParam] | None

Convert an AI prompt tools to OpenAI tools.

PARAMETER DESCRIPTION
prompt

The AI prompt to convert.

TYPE: AIPrompt

RETURNS DESCRIPTION
list[ToolParam] | None

The OpenAI tools.

Source code in conatus/models/open_ai/response_conversion.py
@staticmethod
def ai_prompt_tools_to_openai_tools(
    prompt: AIPrompt,
) -> list[rt.ToolParam] | None:
    """Convert an AI prompt tools to OpenAI tools.

    Args:
        prompt: The AI prompt to convert.

    Returns:
        The OpenAI tools.
    """
    if prompt.tools is None:
        return None
    function_tools: list[rt.ToolParam] = []
    for tool in prompt.tools:
        function_def = generate_openai_json_schema(
            json_schema_pydantic_model=tool.json_schema_pydantic_model,
            strict_mode=tool.strict_mode,
        )
        function_tools.append(
            rt.FunctionToolParam(
                name=function_def["name"],
                parameters=function_def.get("parameters", {}),
                strict=function_def.get("strict", False) or False,
                type="function",
                description=function_def.get("description", ""),
            )
        )
    if prompt.computer_use_config is not None:
        function_tools.append(
            rt.ComputerToolParam(
                type="computer_use_preview",
                environment=prompt.computer_use_config["environment"],
                display_width=prompt.computer_use_config["display_width"],
                display_height=prompt.computer_use_config["display_height"],
            )
        )
    return function_tools

convert_output_schema_to_openai_output_schema staticmethod

convert_output_schema_to_openai_output_schema(
    output_schema: TypeAdapter[ParamType] | None,
    *,
    strict_mode: bool = True
) -> (
    tuple[ResponseTextConfigParam, bool]
    | tuple[None, Literal[False]]
)

Convert an output schema to an OpenAI output schema.

Essentially, it's just creating a "structured output" JSON schema.

PARAMETER DESCRIPTION
output_schema

The output schema to convert.

TYPE: TypeAdapter[ParamType] | None

strict_mode

Whether to use strict mode.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
tuple[ResponseTextConfigParam, bool] | tuple[None, Literal[False]]

If the output schema is not provided, returns None and False. Otherwise, returns the OpenAI output schema and a boolean indicating whether the schema has been converted from a non-object type to an object type.

Source code in conatus/models/open_ai/response_conversion.py
@staticmethod
def convert_output_schema_to_openai_output_schema(
    output_schema: TypeAdapter[ParamType] | None,
    *,
    strict_mode: bool = True,
) -> tuple[ResponseTextConfigParam, bool] | tuple[None, Literal[False]]:
    """Convert an output schema to an OpenAI output schema.

    Essentially, it's just creating a "structured output" JSON schema.

    Args:
        output_schema: The output schema to convert.
        strict_mode: Whether to use strict mode.

    Returns:
        If the output schema is not provided, returns None and False.
            Otherwise, returns the OpenAI output schema and a boolean
            indicating whether the schema has been converted from a
            non-object type to an object type.
    """
    if output_schema is None:
        return None, False
    json_schema, conversion_was_necessary = (
        generate_structured_output_openai_json_schema(
            output_schema, strict_mode=strict_mode
        )
    )
    return (
        ResponseTextConfigParam(
            format=ResponseFormatTextJSONSchemaConfigParam(
                type="json_schema",
                **json_schema,
            ),
        ),
        conversion_was_necessary,
    )

openai_response_to_ai_response staticmethod

openai_response_to_ai_response(
    raw_response: Response,
    prompt: AIPrompt[OutputSchemaType],
    model_name: str | None = None,
    *,
    return_incomplete: Literal[True],
    output_schema_was_converted_to_item_object: bool = False
) -> IncompleteAIResponse[OutputSchemaType]
openai_response_to_ai_response(
    raw_response: Response,
    prompt: AIPrompt[OutputSchemaType],
    model_name: str | None = None,
    *,
    return_incomplete: Literal[False],
    output_schema_was_converted_to_item_object: bool = False
) -> AIResponse[OutputSchemaType]
openai_response_to_ai_response(
    raw_response: Response,
    prompt: AIPrompt[OutputSchemaType],
    model_name: str | None = None,
    *,
    return_incomplete: bool = False,
    output_schema_was_converted_to_item_object: bool = False
) -> (
    AIResponse[OutputSchemaType]
    | IncompleteAIResponse[OutputSchemaType]
)

Convert an OpenAI response to an AIResponse.

PARAMETER DESCRIPTION
raw_response

The OpenAI response to convert.

TYPE: Response

prompt

The prompt to convert.

TYPE: AIPrompt[OutputSchemaType]

model_name

The model name of the response.

TYPE: str | None DEFAULT: None

return_incomplete

Whether to return an IncompleteAIResponse instead of an AIResponse

TYPE: bool DEFAULT: False

output_schema_was_converted_to_item_object

Whether the output schema was converted to an item object. This will only be honored if the method returns a AIResponse .

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
AIResponse[OutputSchemaType] | IncompleteAIResponse[OutputSchemaType]

The AIResponse or IncompleteAIResponse.

Source code in conatus/models/open_ai/response_conversion.py
@staticmethod
def openai_response_to_ai_response(
    raw_response: Response,
    prompt: AIPrompt[OutputSchemaType],
    model_name: str | None = None,
    *,
    return_incomplete: bool = False,
    output_schema_was_converted_to_item_object: bool = False,
) -> AIResponse[OutputSchemaType] | IncompleteAIResponse[OutputSchemaType]:
    """Convert an OpenAI response to an AIResponse.

    Args:
        raw_response: The OpenAI response to convert.
        prompt: The prompt to convert.
        model_name: The model name of the response.
        return_incomplete: Whether to return an
            [`IncompleteAIResponse`][conatus.models.inputs_outputs.response.IncompleteAIResponse]
            instead of an [`AIResponse`
            ][conatus.models.inputs_outputs.response.AIResponse]
        output_schema_was_converted_to_item_object: Whether the output
            schema was converted to an item object. This will only be
            honored if the method returns a [`AIResponse`
            ][conatus.models.inputs_outputs.response.AIResponse].

    Returns:
        The AIResponse or IncompleteAIResponse.
    """
    self_ = OpenAIResponseConverters
    message_received = (
        self_.openai_assistant_message_to_assistant_ai_message(
            raw_response.output,
            prompt.computer_use_config,
        )
    )
    finish_reason = self_.openai_finish_reason_to_finish_reason(
        raw_response, message_received
    )
    usage = self_.openai_usage_to_completion_usage(
        raw_response.usage, model_name
    )
    if return_incomplete:
        return IncompleteAIResponse(
            prompt=prompt,
            message_received=message_received,
            finish_reason=finish_reason,
            usage=usage,
            uid=raw_response.id,
        )
    return AIResponse(
        prompt=prompt,
        message_received=message_received.complete(),
        finish_reason=finish_reason,
        usage=usage,
        uid=raw_response.id,
        output_schema_was_converted_to_item_object=output_schema_was_converted_to_item_object,
    )

openai_assistant_message_to_assistant_ai_message staticmethod

openai_assistant_message_to_assistant_ai_message(
    oai_response: list[ResponseOutputItem],
    cu_config: ComputerUseConfig | None = None,
) -> IncompleteAssistantAIMessage

Convert an OpenAI Response to an IncompleteAssistantAIMessage.

PARAMETER DESCRIPTION
oai_response

The OpenAI response to convert.

TYPE: list[ResponseOutputItem]

cu_config

The environment in which the action is executed.

TYPE: ComputerUseConfig | None DEFAULT: None

RETURNS DESCRIPTION
IncompleteAssistantAIMessage

The IncompleteAssistantAIMessage.

Source code in conatus/models/open_ai/response_conversion.py
@staticmethod
def openai_assistant_message_to_assistant_ai_message(
    oai_response: list[rt.ResponseOutputItem],
    cu_config: ComputerUseConfig | None = None,
) -> IncompleteAssistantAIMessage:
    """Convert an OpenAI Response to an IncompleteAssistantAIMessage.

    Args:
        oai_response: The OpenAI response to convert.
        cu_config: The environment in which the action is executed.

    Returns:
        The IncompleteAssistantAIMessage.
    """
    self_ = OpenAIResponseConverters
    all_content_parts: list[
        IncompleteAssistantAIMessageContentPart
        | AssistantAIMessageContentToolCallPart
    ] = []
    for content_part in oai_response:
        all_content_parts.extend(
            self_.openai_response_output_item_to_assistant_ai_msg_parts(
                content_part,
                cu_config,
            )
        )
    refusal = "\n".join(
        processed_content_part.refusal or ""
        for processed_content_part in all_content_parts
        if isinstance(
            processed_content_part,
            IncompleteAssistantAIMessageContentRefusalPart,
        )
    )
    return IncompleteAssistantAIMessage(
        content=all_content_parts,
        refusal=refusal or None,
        role="assistant",
    )

openai_finish_reason_to_finish_reason staticmethod

openai_finish_reason_to_finish_reason(
    raw_response: Response,
    message_received: (
        AssistantAIMessage | IncompleteAssistantAIMessage
    ),
) -> FinishReasons

Convert an OpenAI finish reason to a FinishReasons.

PARAMETER DESCRIPTION
raw_response

The OpenAI response to convert.

TYPE: Response

message_received

The assistant message received from the OpenAI response.

TYPE: AssistantAIMessage | IncompleteAssistantAIMessage

RETURNS DESCRIPTION
FinishReasons

The FinishReasons.

Source code in conatus/models/open_ai/response_conversion.py
@staticmethod
def openai_finish_reason_to_finish_reason(
    raw_response: Response,
    message_received: AssistantAIMessage | IncompleteAssistantAIMessage,
) -> FinishReasons:
    """Convert an OpenAI finish reason to a FinishReasons.

    Args:
        raw_response: The OpenAI response to convert.
        message_received: The assistant message received from the OpenAI
            response.

    Returns:
        The FinishReasons.
    """
    if len(message_received.tool_call_content_parts_local_execution) > 0:
        return "tool_calls"
    if raw_response.incomplete_details is None:
        return "stop"
    if raw_response.incomplete_details.reason == "max_output_tokens":
        return "length"
    return "content_filter"

openai_usage_to_completion_usage staticmethod

openai_usage_to_completion_usage(
    raw_usage: ResponseUsage | None,
    model_name: str | None = None,
) -> CompletionUsage

Convert an OpenAI usage to a CompletionUsage.

PARAMETER DESCRIPTION
raw_usage

The OpenAI usage to convert.

TYPE: ResponseUsage | None

model_name

The model name of the completion usage.

TYPE: str | None DEFAULT: None

RETURNS DESCRIPTION
CompletionUsage

The CompletionUsage.

Source code in conatus/models/open_ai/response_conversion.py
@staticmethod
def openai_usage_to_completion_usage(
    raw_usage: rt.ResponseUsage | None,
    model_name: str | None = None,
) -> CompletionUsage:
    """Convert an OpenAI usage to a CompletionUsage.

    Args:
        raw_usage: The OpenAI usage to convert.
        model_name: The model name of the completion usage.

    Returns:
        The CompletionUsage.
    """
    prompt_tokens = raw_usage.input_tokens if raw_usage else 0
    completion_tokens = raw_usage.output_tokens if raw_usage else 0
    reasoning_tokens = (
        raw_usage.output_tokens_details.reasoning_tokens
        if raw_usage
        else None
    )
    cached_used_tokens = (
        raw_usage.input_tokens_details.cached_tokens if raw_usage else None
    )
    total_tokens = raw_usage.total_tokens if raw_usage else 0
    return CompletionUsage(
        model_name=model_name,
        prompt_tokens=prompt_tokens,
        completion_tokens=completion_tokens,
        total_tokens=total_tokens,
        cached_used_tokens=cached_used_tokens,
        extra_fields={
            "reasoning_tokens": reasoning_tokens,
        },
        usage_was_never_given=raw_usage is None,
    )

oai_output_msg_to_assistant_ai_msg_parts staticmethod

oai_output_msg_to_assistant_ai_msg_parts(
    oai_output_msg: ResponseOutputMessage,
) -> list[IncompleteAssistantAIMessageContentPart]

Convert an OpenAI msg to an IncompleteAssistantAIMessageContentPart.

PARAMETER DESCRIPTION
oai_output_msg

The OpenAI output message to convert.

TYPE: ResponseOutputMessage

RETURNS DESCRIPTION
list[IncompleteAssistantAIMessageContentPart]

The IncompleteAssistantAIMessageContentPart.

RAISES DESCRIPTION
AccidentalComputerCallError

If OpenAI returns a computer call in a ResponseOutputMessage object. This is a bug in OpenAI's API, and they will fix it at some point. Note that we're catching this error in other methods, so end users shouldn't see this error.

Source code in conatus/models/open_ai/response_conversion.py
@staticmethod
def oai_output_msg_to_assistant_ai_msg_parts(
    oai_output_msg: rt.ResponseOutputMessage,
) -> list[IncompleteAssistantAIMessageContentPart]:
    """Convert an OpenAI msg to an IncompleteAssistantAIMessageContentPart.

    Args:
        oai_output_msg: The OpenAI output message to convert.

    Returns:
        The IncompleteAssistantAIMessageContentPart.

    Raises:
        AccidentalComputerCallError: If OpenAI returns a computer call
            in a `ResponseOutputMessage` object. This is a bug in
            OpenAI's API, and they will fix it at some point. Note that
            we're catching this error in other methods, so end users
            shouldn't see this error.
    """
    content_parts: list[IncompleteAssistantAIMessageContentPart] = []
    # NOTE: OpenAI is being nasty here and returning objects they shouldn't
    # be!
    if (
        oai_output_msg.content is None  # pyright: ignore[reportUnnecessaryComparison]
        and oai_output_msg.type == "computer_call"
    ):  # pragma: no cover
        raise OpenAIResponseConverters.AccidentalComputerCallError  # pyright: ignore[reportUnreachable]
    for content_part in oai_output_msg.content:
        match content_part:
            case rt.ResponseOutputText():
                content_parts.append(
                    IncompleteAssistantAIMessageContentTextPart(
                        text=content_part.text,
                        type="text",
                        uid=oai_output_msg.id,
                    )
                )
            case rt.ResponseOutputRefusal():  # pragma: no branch
                content_parts.append(
                    IncompleteAssistantAIMessageContentRefusalPart(
                        refusal=content_part.refusal,
                        type="refusal",
                        uid=oai_output_msg.id,
                    )
                )
    return content_parts

openai_computer_tool_click_to_computer_use_action staticmethod

openai_computer_tool_click_to_computer_use_action(
    action: ActionClick,
    cu_config: ComputerUseConfig,
    call_id: str,
) -> ComputerUseAction

Convert an OpenAI computer tool click to a ComputerUseAction.

PARAMETER DESCRIPTION
action

The OpenAI computer tool click to convert.

TYPE: ActionClick

cu_config

The environment in which the action is executed.

TYPE: ComputerUseConfig

call_id

The ID of the tool call.

TYPE: str

RETURNS DESCRIPTION
ComputerUseAction

The ComputerUseAction.

Source code in conatus/models/open_ai/response_conversion.py
@staticmethod
def openai_computer_tool_click_to_computer_use_action(
    action: oai_cu_action.ActionClick,
    cu_config: ComputerUseConfig,
    call_id: str,
) -> ComputerUseAction:
    """Convert an OpenAI computer tool click to a ComputerUseAction.

    Args:
        action: The OpenAI computer tool click to convert.
        cu_config: The environment in which the action is executed.
        call_id: The ID of the tool call.

    Returns:
        The ComputerUseAction.
    """

    # Little hack for type hinting
    class Kwargs(TypedDict):
        environment_variable: RuntimeVariable
        environment: ComputerUseEnvironment
        call_id: str

    kwargs: Kwargs = {
        "environment_variable": cu_config["environment_variable"],
        "environment": cu_config["environment"],
        "call_id": call_id,
    }
    match action.button:
        case "left":
            return CULeftMouseClick(xy=(action.x, action.y), **kwargs)
        case "right":
            return CURightClick(xy=(action.x, action.y), **kwargs)
        case "wheel":
            return CUMiddleClick(xy=(action.x, action.y), **kwargs)
        case "back":
            return CUBackClick(xy=(action.x, action.y), **kwargs)
        case "forward":  # pragma: no branch
            return CUForwardClick(xy=(action.x, action.y), **kwargs)

openai_computer_tool_call_to_computer_tool_call staticmethod

openai_computer_tool_call_to_computer_tool_call(
    oai_computer_tool_call: ResponseComputerToolCall,
    cu_config: ComputerUseConfig,
    call_id: str,
) -> ComputerUseAction

Convert an OpenAI computer tool call to a ComputerUseAction.

PARAMETER DESCRIPTION
oai_computer_tool_call

The OpenAI computer tool call to convert.

TYPE: ResponseComputerToolCall

cu_config

The environment in which the action is executed.

TYPE: ComputerUseConfig

call_id

The ID of the tool call.

TYPE: str

RETURNS DESCRIPTION
ComputerUseAction

The ComputerUseAction.

Source code in conatus/models/open_ai/response_conversion.py
@staticmethod
def openai_computer_tool_call_to_computer_tool_call(  # noqa: PLR0911
    oai_computer_tool_call: rt.ResponseComputerToolCall,
    cu_config: ComputerUseConfig,
    call_id: str,
) -> ComputerUseAction:
    """Convert an OpenAI computer tool call to a ComputerUseAction.

    Args:
        oai_computer_tool_call: The OpenAI computer tool call to convert.
        cu_config: The environment in which the action is executed.
        call_id: The ID of the tool call.

    Returns:
        The ComputerUseAction.
    """

    # Little hack for type hinting
    class Kwargs(TypedDict):
        environment_variable: RuntimeVariable
        environment: ComputerUseEnvironment
        call_id: str

    kwargs: Kwargs = {
        "environment_variable": cu_config["environment_variable"],
        "environment": cu_config["environment"],
        "call_id": call_id,
    }
    self_ = OpenAIResponseConverters
    action = oai_computer_tool_call.action
    match action:
        case oai_cu_action.ActionClick():
            return self_.openai_computer_tool_click_to_computer_use_action(
                action,
                cu_config,
                call_id,
            )
        case oai_cu_action.ActionDoubleClick():
            return CULeftMouseDoubleClick(xy=(action.x, action.y), **kwargs)
        case oai_cu_action.ActionDrag():
            return CUDrag(
                xy=[(segment.x, segment.y) for segment in action.path],
                **kwargs,
            )
        case oai_cu_action.ActionKeypress():
            return CUKeypress(keys=action.keys, **kwargs)
        case oai_cu_action.ActionMove():
            return CUMove(xy=(action.x, action.y), **kwargs)
        case oai_cu_action.ActionScreenshot():
            return CUScreenshot(**kwargs)
        case oai_cu_action.ActionScroll():
            return CUScroll(
                start_xy=(action.x, action.y),
                distance_xy=(action.scroll_x, action.scroll_y),
                **kwargs,
            )
        case oai_cu_action.ActionType():
            return CUType(text=action.text, **kwargs)
        case oai_cu_action.ActionWait():  # pragma: no branch
            return CUWait(**kwargs)

openai_response_output_item_to_assistant_ai_msg_parts staticmethod

openai_response_output_item_to_assistant_ai_msg_parts(
    oai_output_item: ResponseOutputItem,
    cu_config: ComputerUseConfig | None = None,
) -> Sequence[
    IncompleteAssistantAIMessageContentPart
    | AssistantAIMessageContentToolCallPart
]

Convert an OpenAI output item to an incomplete content part.

Note that we're returning a list, because one OpenAI output item can be converted to multiple content parts (e.g. text and refusal)

PARAMETER DESCRIPTION
oai_output_item

The OpenAI output item to convert.

TYPE: ResponseOutputItem

cu_config

The environment in which the action is executed.

TYPE: ComputerUseConfig | None DEFAULT: None

RETURNS DESCRIPTION
Sequence[IncompleteAssistantAIMessageContentPart | AssistantAIMessageContentToolCallPart]

The IncompleteAssistantAIMessageContentPart list

RAISES DESCRIPTION
ValueError

If no Computer Use environment is provided, but a computer use output item is nevertheless encountered.

Source code in conatus/models/open_ai/response_conversion.py
@staticmethod
def openai_response_output_item_to_assistant_ai_msg_parts(  # noqa: PLR0911
    oai_output_item: rt.ResponseOutputItem,
    cu_config: ComputerUseConfig | None = None,
) -> Sequence[
    IncompleteAssistantAIMessageContentPart
    | AssistantAIMessageContentToolCallPart
]:
    """Convert an OpenAI output item to an incomplete content part.

    Note that we're returning a list, because one OpenAI output item
    can be converted to multiple content parts (e.g. text and refusal)

    Args:
        oai_output_item: The OpenAI output item to convert.
        cu_config: The environment in which the action is executed.

    Returns:
        The IncompleteAssistantAIMessageContentPart list

    Raises:
        ValueError: If no Computer Use environment is provided, but
            a computer use output item is nevertheless encountered.
    """
    self_ = OpenAIResponseConverters
    match oai_output_item:
        case rt.ResponseOutputMessage():
            try:
                return self_.oai_output_msg_to_assistant_ai_msg_parts(
                    oai_output_item
                )
            except (
                OpenAIResponseConverters.AccidentalComputerCallError
            ):  # pragma: no cover
                logger.warning(
                    "OpenAI returned a computer call in a "
                    "`ResponseOutputMessage` object. This means that the "
                    "model is returning an empty `ResponseOutputMessage` "
                    "object, which is a bug in OpenAI's API. We're "
                    "ignoring it."
                )
                return []
        case rt.ResponseFunctionToolCall():
            return [
                IncompleteAssistantAIMessageContentToolCallPart(
                    tool_call=IncompleteAIToolCall(
                        call_id=oai_output_item.call_id,
                        name=oai_output_item.name,
                        returned_arguments=oai_output_item.arguments,
                    ),
                    uid=oai_output_item.id,
                )
            ]
        case rt.ResponseFunctionWebSearch():
            return [
                IncompleteAssistantAIMessageContentToolCallPart(
                    tool_call=IncompleteAIToolCall(
                        call_id=None,
                        name="openai$web_search",
                        returned_arguments=None,
                        requires_local_execution=False,
                    ),
                    uid=oai_output_item.id,
                )
            ]
        case rt.ResponseComputerToolCall():
            if cu_config is None:
                msg = (
                    "Environment is required to execute computer tool calls"
                )
                raise ValueError(msg)
            return [
                AssistantAIMessageContentToolCallPart(
                    tool_call=self_.openai_computer_tool_call_to_computer_tool_call(
                        oai_output_item,
                        cu_config,
                        call_id=oai_output_item.call_id,
                    ),
                    uid=oai_output_item.id,
                )
            ]
        case rt.ResponseReasoningItem():  # pragma: no branch
            return [
                IncompleteAssistantAIMessageContentReasoningPart(
                    reasoning=(
                        "\n".join(
                            content.text
                            for content in oai_output_item.summary
                        )
                    ),
                    uid=oai_output_item.id,
                )
            ]
        case (
            ImageGenerationCall()
            | rt.ResponseFileSearchToolCall()
            | rt.ResponseCodeInterpreterToolCall()
            | LocalShellCall()
            | McpCall()
            | McpListTools()
            | McpApprovalRequest()
        ):  # pragma: no branch
            pass

    return []

openai_chunk_to_ai_response staticmethod

openai_chunk_to_ai_response(
    chunk: ResponseStreamEvent,
    prompt: AIPrompt[OutputSchemaType],
    *,
    model_name: str | None = None,
    cu_config: ComputerUseConfig | None = None
) -> IncompleteAIResponse[OutputSchemaType] | None

Convert an OpenAI chunk to an AIResponse.

PARAMETER DESCRIPTION
chunk

The OpenAI chunk to convert.

TYPE: ResponseStreamEvent

prompt

The prompt to convert.

TYPE: AIPrompt[OutputSchemaType]

model_name

The model name of the response.

TYPE: str | None DEFAULT: None

cu_config

The environment in which the action is executed.

TYPE: ComputerUseConfig | None DEFAULT: None

RETURNS DESCRIPTION
IncompleteAIResponse[OutputSchemaType] | None

The IncompleteAIResponse or None.

Source code in conatus/models/open_ai/response_conversion.py
@staticmethod
def openai_chunk_to_ai_response(
    chunk: rt.ResponseStreamEvent,
    prompt: AIPrompt[OutputSchemaType],
    *,
    model_name: str | None = None,
    cu_config: ComputerUseConfig | None = None,
) -> IncompleteAIResponse[OutputSchemaType] | None:
    """Convert an OpenAI chunk to an AIResponse.

    Args:
        chunk: The OpenAI chunk to convert.
        prompt: The prompt to convert.
        model_name: The model name of the response.
        cu_config: The environment in which the action is executed.

    Returns:
        The IncompleteAIResponse or None.
    """
    self_ = OpenAIResponseConverters
    content_part: (
        IncompleteAssistantAIMessageContentPart
        | AssistantAIMessageContentToolCallPart
        | None
    ) = None
    match chunk:
        case rt.ResponseCreatedEvent() | rt.ResponseInProgressEvent():
            return self_.openai_response_to_ai_response(
                chunk.response,
                prompt,
                model_name,
                return_incomplete=True,
            )
        case rt.ResponseOutputItemAddedEvent():
            content_parts = (
                self_.openai_response_output_item_to_assistant_ai_msg_parts(
                    chunk.item,
                    cu_config,
                )
            )
            if len(content_parts) > 0:
                content_part = content_parts[0]
            else:
                content_part = None
        case rt.ResponseFunctionCallArgumentsDeltaEvent():
            content_part = IncompleteAssistantAIMessageContentToolCallPart(
                tool_call=IncompleteAIToolCall(
                    call_id=chunk.item_id,
                    returned_arguments=chunk.delta,
                    type="function",
                ),
                type="tool_call",
                uid=chunk.item_id,
            )
        case rt.ResponseTextDeltaEvent():
            content_part = IncompleteAssistantAIMessageContentTextPart(
                text=chunk.delta,
                type="text",
                uid=chunk.item_id,
            )
        case rt.ResponseReasoningSummaryTextDeltaEvent():
            content_part = IncompleteAssistantAIMessageContentReasoningPart(
                reasoning=chunk.delta,
                type="reasoning",
                uid=chunk.item_id,
                index=chunk.output_index,
            )
        case _:  # pragma: no branch
            return None
    return IncompleteAIResponse(
        prompt=prompt,
        message_received=IncompleteAssistantAIMessage(
            content=[content_part] if content_part else [],
            role="assistant",
        ),
        finish_reason=None,
        usage=None,
    )

Anthropic Converters

conatus.models.anthropic.conversion

Conversion functions for Anthropic.

AnthropicKeypress

Bases: BaseModel

Anthropic keypress action.

AnthropicKeypressHold

Bases: BaseModel

Anthropic keypress hold action.

AnthropicType

Bases: BaseModel

Anthropic type action.

AnthropicCursorPosition

Bases: BaseModel

Anthropic cursor position action.

AnthropicMouseMove

Bases: BaseModel

Anthropic mouse move action.

AnthropicLeftMouseDown

Bases: BaseModel

Anthropic left mouse down action.

AnthropicLeftMouseUp

Bases: BaseModel

Anthropic left mouse up action.

AnthropicLeftMouseClick

Bases: BaseModel

Anthropic left mouse click action.

AnthropicLeftMouseDrag

Bases: BaseModel

Anthropic left mouse drag action.

AnthropicRightClick

Bases: BaseModel

Anthropic right click action.

AnthropicMiddleClick

Bases: BaseModel

Anthropic middle click action.

AnthropicLeftMouseDoubleClick

Bases: BaseModel

Anthropic left mouse double click action.

AnthropicLeftMouseTripleClick

Bases: BaseModel

Anthropic left mouse triple click action.

AnthropicScroll

Bases: BaseModel

Anthropic scroll action.

AnthropicWait

Bases: BaseModel

Anthropic wait action.

AnthropicScreenshot

Bases: BaseModel

Anthropic screenshot action.

AnthropicComputerUse

Bases: BaseModel

The computer use config for Anthropic.

AnthropicConverters

Conversion functions for Anthropic.

CUActionKwargs

Bases: TypedDict

The kwargs for the computer use action.

anthropic_computer_use_block_to_tool_call staticmethod

anthropic_computer_use_block_to_tool_call(
    block: BetaToolUseBlock, cu_config: ComputerUseConfig
) -> ComputerUseAction

Convert an Anthropic computer use block to a tool call.

PARAMETER DESCRIPTION
block

The Anthropic computer use block to convert.

TYPE: BetaToolUseBlock

cu_config

The computer use config to use.

TYPE: ComputerUseConfig

RETURNS DESCRIPTION
ComputerUseAction

The tool call.

Source code in conatus/models/anthropic/conversion.py
@staticmethod
def anthropic_computer_use_block_to_tool_call(  # noqa: C901, PLR0911, PLR0912
    block: BetaToolUseBlock,
    cu_config: ComputerUseConfig,
) -> ComputerUseAction:
    """Convert an Anthropic computer use block to a tool call.

    Args:
        block: The Anthropic computer use block to convert.
        cu_config: The computer use config to use.

    Returns:
        The tool call.
    """
    tool_call = AnthropicComputerUse(**block.to_dict())  # type: ignore[arg-type]  # pyright: ignore[reportArgumentType]
    kwargs: AnthropicConverters.CUActionKwargs = {
        "environment_variable": cu_config["environment_variable"],
        "environment": cu_config["environment"],
        "call_id": block.id,
    }
    match tool_call.input:
        case AnthropicKeypress():
            return CUKeypress(
                keys=tool_call.input.text.split("+"), **kwargs
            )
        case AnthropicKeypressHold():
            return CUKeypressHold(
                keys=tool_call.input.text.split("+"),
                duration=tool_call.input.duration,
                **kwargs,
            )
        case AnthropicType():
            return CUType(text=tool_call.input.text, **kwargs)
        case AnthropicCursorPosition():
            return CUCursorPosition(**kwargs)
        case AnthropicMouseMove():
            return CUMove(xy=tool_call.input.coordinate, **kwargs)
        case AnthropicLeftMouseDown():
            return CULeftMouseDown(xy=tool_call.input.coordinate, **kwargs)
        case AnthropicLeftMouseUp():
            return CULeftMouseUp(xy=tool_call.input.coordinate, **kwargs)
        case AnthropicLeftMouseClick():
            return CULeftMouseClick(xy=tool_call.input.coordinate, **kwargs)
        case AnthropicLeftMouseDrag():
            return CUDrag(
                xy=[
                    tool_call.input.start_coordinate,
                    tool_call.input.coordinate,
                ],
                **kwargs,
            )
        case AnthropicLeftMouseDoubleClick():
            return CULeftMouseDoubleClick(
                xy=tool_call.input.coordinate, **kwargs
            )
        case AnthropicLeftMouseTripleClick():
            return CULeftMouseTripleClick(
                xy=tool_call.input.coordinate, **kwargs
            )
        case AnthropicRightClick():
            return CURightClick(xy=tool_call.input.coordinate, **kwargs)
        case AnthropicMiddleClick():
            return CUMiddleClick(xy=tool_call.input.coordinate, **kwargs)
        case AnthropicScroll():
            return CUScroll(
                start_xy=tool_call.input.coordinate,
                direction=tool_call.input.scroll_direction,
                magnitude=tool_call.input.scroll_amount,
                **kwargs,
            )
        case AnthropicWait():
            return CUWait(duration=tool_call.input.duration, **kwargs)
        case AnthropicScreenshot():  # pragma: no branch
            return CUScreenshot(**kwargs)

anthropic_block_to_assistant_msg_parts staticmethod

anthropic_block_to_assistant_msg_parts(
    block: BetaContentBlock,
    cu_config: ComputerUseConfig | None = None,
    *,
    model: Model | None = None,
    index: int | None = None
) -> list[
    IncompleteAssistantAIMessageContentPart
    | AssistantAIMessageContentToolCallPart
]

Convert an Anthropic block to an assistant message part.

PARAMETER DESCRIPTION
block

The Anthropic block to convert.

TYPE: BetaContentBlock

cu_config

The computer use config to use.

TYPE: ComputerUseConfig | None DEFAULT: None

model

The model that produced the block.

TYPE: Model | None DEFAULT: None

index

The index of the block.

TYPE: int | None DEFAULT: None

RETURNS DESCRIPTION
list[IncompleteAssistantAIMessageContentPart | AssistantAIMessageContentToolCallPart]

The assistant message part.

Source code in conatus/models/anthropic/conversion.py
@staticmethod
def anthropic_block_to_assistant_msg_parts(
    block: BetaContentBlock,
    cu_config: ComputerUseConfig | None = None,
    *,
    model: Model | None = None,
    index: int | None = None,
) -> list[
    IncompleteAssistantAIMessageContentPart
    | AssistantAIMessageContentToolCallPart
]:
    """Convert an Anthropic block to an assistant message part.

    Args:
        block: The Anthropic block to convert.
        cu_config: The computer use config to use.
        model: The model that produced the block.
        index: The index of the block.

    Returns:
        The assistant message part.
    """
    self_: type[AnthropicConverters] = AnthropicConverters
    match block:
        case BetaTextBlock():
            return [
                IncompleteAssistantAIMessageContentTextPart(
                    text=block.text,
                )
            ]
        case BetaThinkingBlock():
            return [
                IncompleteAssistantAIMessageContentReasoningPart(
                    reasoning=block.thinking,
                    uid=block.signature,
                    model_name=model,
                    index=index,
                )
            ]
        case BetaRedactedThinkingBlock():
            return [
                IncompleteAssistantAIMessageContentReasoningPart(
                    reasoning=block.data,
                    is_redacted=True,
                    model_name=model,
                    index=index,
                )
            ]
        case BetaToolUseBlock():
            if cu_config is None or block.name != "computer":
                if str(block.input) == "{}":
                    block_input = ""
                else:
                    block_input = str(block.input)
                return [
                    IncompleteAssistantAIMessageContentToolCallPart(
                        tool_call=IncompleteAIToolCall(
                            call_id=block.id,
                            name=block.name,
                            returned_arguments=block_input,
                            could_be_structured_output=True,
                            index=index,
                        ),
                        uid=block.id,
                    )
                ]
            return [
                AssistantAIMessageContentToolCallPart(
                    tool_call=self_.anthropic_computer_use_block_to_tool_call(
                        block, cu_config
                    ),
                    uid=block.id,
                )
            ]
        case _:  # pragma: no branch
            msg = "Server-side search is not supported in Conatus."
            raise NotImplementedError(msg)

anthropic_delta_to_assistant_msg_parts staticmethod

anthropic_delta_to_assistant_msg_parts(
    delta: BetaRawContentBlockDelta,
    index: int,
    model_name: Model | None = None,
) -> IncompleteAssistantAIMessageContentPart | None

Convert an Anthropic delta to an assistant message part.

PARAMETER DESCRIPTION
delta

The Anthropic delta to convert.

TYPE: BetaRawContentBlockDelta

model_name

The name of the model that produced the delta.

TYPE: Model | None DEFAULT: None

index

The index of the delta.

TYPE: int

RETURNS DESCRIPTION
IncompleteAssistantAIMessageContentPart | None

The assistant message part.

Source code in conatus/models/anthropic/conversion.py
@staticmethod
def anthropic_delta_to_assistant_msg_parts(
    delta: BetaRawContentBlockDelta,
    index: int,
    model_name: Model | None = None,
) -> IncompleteAssistantAIMessageContentPart | None:
    """Convert an Anthropic delta to an assistant message part.

    Args:
        delta: The Anthropic delta to convert.
        model_name: The name of the model that produced the delta.
        index: The index of the delta.

    Returns:
        The assistant message part.
    """
    match delta:
        case BetaTextDelta():
            return IncompleteAssistantAIMessageContentTextPart(
                text=delta.text,
            )
        case BetaInputJSONDelta():
            return IncompleteAssistantAIMessageContentToolCallPart(
                tool_call=IncompleteAIToolCall(
                    call_id=None,
                    name=None,
                    returned_arguments=delta.partial_json,
                    index=index,
                    could_be_structured_output=True,
                ),
                uid=None,
            )
        case BetaCitationsDelta():
            return None
        case BetaThinkingDelta():
            return IncompleteAssistantAIMessageContentReasoningPart(
                reasoning=delta.thinking,
                uid=None,
                model_name=model_name,
                index=index,
            )
        case BetaSignatureDelta():  # pragma: no branch
            return IncompleteAssistantAIMessageContentReasoningPart(
                reasoning=None,
                uid=delta.signature,
                model_name=model_name,
                index=index,
            )

anthropic_message_to_ai_message staticmethod

anthropic_message_to_ai_message(
    message: BetaMessage,
    cu_config: ComputerUseConfig | None = None,
) -> IncompleteAssistantAIMessage

Convert an Anthropic message to an AI message.

PARAMETER DESCRIPTION
message

The Anthropic message to convert.

TYPE: BetaMessage

cu_config

The computer use config to use.

TYPE: ComputerUseConfig | None DEFAULT: None

RETURNS DESCRIPTION
IncompleteAssistantAIMessage

The AI message.

Source code in conatus/models/anthropic/conversion.py
@staticmethod
def anthropic_message_to_ai_message(
    message: BetaMessage,
    cu_config: ComputerUseConfig | None = None,
) -> IncompleteAssistantAIMessage:
    """Convert an Anthropic message to an AI message.

    Args:
        message: The Anthropic message to convert.
        cu_config: The computer use config to use.

    Returns:
        The AI message.
    """
    self_ = AnthropicConverters
    all_content_parts: list[
        IncompleteAssistantAIMessageContentPart
        | AssistantAIMessageContentToolCallPart
    ] = []
    for content_part in message.content:
        all_content_parts.extend(
            self_.anthropic_block_to_assistant_msg_parts(
                block=content_part,
                cu_config=cu_config,
                model=message.model,
            )
        )
    return IncompleteAssistantAIMessage(content=all_content_parts)

anthropic_response_to_ai_response staticmethod

anthropic_response_to_ai_response(
    response: BetaMessage,
    prompt: AIPrompt[OutputSchemaType],
) -> IncompleteAIResponse[OutputSchemaType]

Convert an Anthropic response to an AI response.

PARAMETER DESCRIPTION
response

The Anthropic response to convert.

TYPE: BetaMessage

prompt

The prompt that was used to generate the response.

TYPE: AIPrompt[OutputSchemaType]

RETURNS DESCRIPTION
IncompleteAIResponse[OutputSchemaType]

The AI response.

Source code in conatus/models/anthropic/conversion.py
@staticmethod
def anthropic_response_to_ai_response(
    response: BetaMessage,
    prompt: AIPrompt[OutputSchemaType],
) -> IncompleteAIResponse[OutputSchemaType]:
    """Convert an Anthropic response to an AI response.

    Args:
        response: The Anthropic response to convert.
        prompt: The prompt that was used to generate the response.

    Returns:
        The AI response.
    """
    message_received = AnthropicConverters.anthropic_message_to_ai_message(
        response,
        prompt.computer_use_config,
    )
    return IncompleteAIResponse(
        prompt=prompt,
        message_received=message_received,
        finish_reason=AnthropicConverters.anthropic_stop_reason_to_finish_reason(
            response.stop_reason,
        ),
        usage=AnthropicConverters.anthropic_usage_to_completion_usage(
            response.usage,
            model_name=response.model,
        ),
        uid=response.id,
    )

anthropic_stop_reason_to_finish_reason staticmethod

anthropic_stop_reason_to_finish_reason(
    stop_reason: (
        Literal[
            "end_turn",
            "max_tokens",
            "stop_sequence",
            "tool_use",
            "pause_turn",
            "refusal",
        ]
        | None
    ),
) -> FinishReasons | None

Convert an Anthropic stop reason to a finish reason.

PARAMETER DESCRIPTION
stop_reason

The Anthropic stop reason to convert.

TYPE: Literal['end_turn', 'max_tokens', 'stop_sequence', 'tool_use', 'pause_turn', 'refusal'] | None

RETURNS DESCRIPTION
FinishReasons | None

The finish reason.

Source code in conatus/models/anthropic/conversion.py
@staticmethod
def anthropic_stop_reason_to_finish_reason(  # noqa: PLR0911
    stop_reason: Literal[
        "end_turn",
        "max_tokens",
        "stop_sequence",
        "tool_use",
        "pause_turn",
        "refusal",
    ]
    | None,
) -> FinishReasons | None:
    """Convert an Anthropic stop reason to a finish reason.

    Args:
        stop_reason: The Anthropic stop reason to convert.

    Returns:
        The finish reason.
    """
    match stop_reason:
        case "end_turn":
            return "stop"
        case "max_tokens":
            return "length"
        case "stop_sequence":
            return "stop"
        case "tool_use":
            return "tool_calls"
        case "pause_turn":
            return "timeout"
        case "refusal":
            return "content_filter"
        case None:  # pragma: no cover
            return None

anthropic_usage_to_completion_usage staticmethod

anthropic_usage_to_completion_usage(
    usage: BetaUsage, model_name: Model | None = None
) -> CompletionUsage

Convert an Anthropic usage to a completion usage.

PARAMETER DESCRIPTION
usage

The Anthropic usage to convert.

TYPE: BetaUsage

model_name

The name of the model that produced the usage.

TYPE: Model | None DEFAULT: None

RETURNS DESCRIPTION
CompletionUsage

The completion usage.

Source code in conatus/models/anthropic/conversion.py
@staticmethod
def anthropic_usage_to_completion_usage(
    usage: BetaUsage,
    model_name: Model | None = None,
) -> CompletionUsage:
    """Convert an Anthropic usage to a completion usage.

    Args:
        usage: The Anthropic usage to convert.
        model_name: The name of the model that produced the usage.

    Returns:
        The completion usage.
    """
    return CompletionUsage(
        model_name=model_name,
        prompt_tokens=usage.input_tokens,
        completion_tokens=usage.output_tokens,
        total_tokens=usage.input_tokens + usage.output_tokens,
        cached_used_tokens=usage.cache_read_input_tokens,
        cached_created_tokens=usage.cache_creation_input_tokens,
        usage_was_never_given=False,
    )

ai_prompt_to_anthropic_messages staticmethod

ai_prompt_to_anthropic_messages(
    messages: Iterable[ConversationAIMessage],
) -> list[BetaMessageParam]

Convert an AI prompt to a list of Anthropic messages.

PARAMETER DESCRIPTION
messages

The messages to convert.

TYPE: Iterable[ConversationAIMessage]

RETURNS DESCRIPTION
list[BetaMessageParam]

The list of Anthropic messages.

Source code in conatus/models/anthropic/conversion.py
@staticmethod
def ai_prompt_to_anthropic_messages(
    messages: Iterable[ConversationAIMessage],
) -> list[BetaMessageParam]:
    """Convert an AI prompt to a list of Anthropic messages.

    Args:
        messages: The messages to convert.

    Returns:
        The list of Anthropic messages.
    """
    return [
        AnthropicConverters.ai_message_to_anthropic_message(message)
        for message in messages
    ]

ai_message_to_anthropic_message staticmethod

ai_message_to_anthropic_message(
    message: ConversationAIMessage,
) -> BetaMessageParam

Convert an AI message to an Anthropic message.

PARAMETER DESCRIPTION
message

The AI message to convert.

TYPE: ConversationAIMessage

RETURNS DESCRIPTION
BetaMessageParam

The list of Anthropic messages.

Source code in conatus/models/anthropic/conversion.py
@staticmethod
def ai_message_to_anthropic_message(
    message: ConversationAIMessage,
) -> BetaMessageParam:
    """Convert an AI message to an Anthropic message.

    Args:
        message: The AI message to convert.

    Returns:
        The list of Anthropic messages.
    """
    self_ = AnthropicConverters
    match message:
        case UserAIMessage():
            return self_.user_ai_message_to_anthropic_message(message)
        case AssistantAIMessage():
            return self_.assistant_ai_message_to_anthropic_message(message)
        case ToolResponseAIMessage():  # pragma: no branch
            return BetaMessageParam(
                content=[
                    self_.tool_response_ai_message_to_anthropic_message(
                        message
                    )
                ],
                role="user",
            )

user_ai_message_to_anthropic_message staticmethod

user_ai_message_to_anthropic_message(
    message: UserAIMessage,
) -> BetaMessageParam

Convert a user AI message to an Anthropic message.

PARAMETER DESCRIPTION
message

The user AI message to convert.

TYPE: UserAIMessage

RETURNS DESCRIPTION
BetaMessageParam

The Anthropic message.

Source code in conatus/models/anthropic/conversion.py
@staticmethod
def user_ai_message_to_anthropic_message(
    message: UserAIMessage,
) -> BetaMessageParam:
    """Convert a user AI message to an Anthropic message.

    Args:
        message: The user AI message to convert.

    Returns:
        The Anthropic message.
    """
    self_ = AnthropicConverters
    return BetaMessageParam(
        content=message.content
        if isinstance(message.content, str)
        else self_.user_ai_msg_content_parts_to_anthropic_content_parts(
            list(message.content)
        ),
        role="user",
    )

user_ai_msg_content_parts_to_anthropic_content_parts staticmethod

user_ai_msg_content_parts_to_anthropic_content_parts(
    content_parts: list[UserAIMessageContentPart],
) -> list[BetaTextBlockParam | BetaImageBlockParam]

Convert to Anthropic content parts.

PARAMETER DESCRIPTION
content_parts

The list of user AI message content parts to convert.

TYPE: list[UserAIMessageContentPart]

RETURNS DESCRIPTION
list[BetaTextBlockParam | BetaImageBlockParam]

The list of Anthropic content parts.

Source code in conatus/models/anthropic/conversion.py
@staticmethod
def user_ai_msg_content_parts_to_anthropic_content_parts(
    content_parts: list[UserAIMessageContentPart],
) -> list[BetaTextBlockParam | BetaImageBlockParam]:
    """Convert to Anthropic content parts.

    Args:
        content_parts: The list of user AI message content parts to convert.

    Returns:
        The list of Anthropic content parts.
    """
    new_content_parts: list[BetaTextBlockParam | BetaImageBlockParam] = []
    for content_part in content_parts:
        match content_part:
            case UserAIMessageContentTextPart():
                new_content_parts.append(
                    BetaTextBlockParam(
                        text=content_part.text,
                        type="text",
                    )
                )
            case UserAIMessageContentImagePart():  # pragma: no branch
                if content_part.is_base64:
                    new_content_parts.append(
                        BetaImageBlockParam(
                            source=BetaBase64ImageSourceParam(
                                data=content_part.image_data,
                                media_type="image/jpeg",
                                type="base64",
                            ),
                            type="image",
                        )
                    )
                else:
                    new_content_parts.append(
                        BetaImageBlockParam(
                            source=BetaURLImageSourceParam(
                                url=content_part.image_data,
                                type="url",
                            ),
                            type="image",
                        )
                    )
    return new_content_parts

assistant_ai_message_to_anthropic_message staticmethod

assistant_ai_message_to_anthropic_message(
    message: AssistantAIMessage,
) -> BetaMessageParam

Convert an assistant AI message to an Anthropic message.

PARAMETER DESCRIPTION
message

The assistant AI message to convert.

TYPE: AssistantAIMessage

RETURNS DESCRIPTION
BetaMessageParam

The Anthropic message.

Source code in conatus/models/anthropic/conversion.py
@staticmethod
def assistant_ai_message_to_anthropic_message(
    message: AssistantAIMessage,
) -> BetaMessageParam:
    """Convert an assistant AI message to an Anthropic message.

    Args:
        message: The assistant AI message to convert.

    Returns:
        The Anthropic message.
    """
    new_content_parts: list[
        BetaTextBlockParam
        | BetaToolUseBlockParam
        | BetaThinkingBlockParam
        | BetaRedactedThinkingBlockParam
    ] = []
    for content_part in message.content:
        match content_part:
            case AssistantAIMessageContentTextPart():
                new_content_parts.append(
                    BetaTextBlockParam(text=content_part.text, type="text")
                )
            case AssistantAIMessageContentToolCallPart():
                new_content_parts.append(
                    BetaToolUseBlockParam(
                        id=content_part.tool_call.call_id or "",
                        name=content_part.tool_call.name,
                        input=content_part.tool_call.arguments_as_dict,
                        type="tool_use",
                    )
                )
            case AssistantAIMessageContentRefusalPart():
                new_content_parts.append(
                    BetaTextBlockParam(
                        text=content_part.refusal,
                        type="text",
                    )
                )
            case AssistantAIMessageContentReasoningPart():  # pragma: no branch # noqa: E501
                if content_part.is_redacted:
                    new_content_parts.append(
                        BetaRedactedThinkingBlockParam(
                            data=content_part.reasoning,
                            type="redacted_thinking",
                        )
                    )
                else:
                    new_content_parts.append(
                        BetaThinkingBlockParam(
                            thinking=content_part.reasoning,
                            type="thinking",
                            signature=content_part.uid or "",
                        )
                    )
    return BetaMessageParam(content=new_content_parts, role="assistant")

tool_response_ai_message_to_anthropic_message staticmethod

tool_response_ai_message_to_anthropic_message(
    message: ToolResponseAIMessage,
) -> BetaToolResultBlockParam

Convert a tool response AI message to an Anthropic message.

PARAMETER DESCRIPTION
message

The tool response AI message to convert.

TYPE: ToolResponseAIMessage

RETURNS DESCRIPTION
BetaToolResultBlockParam

The Anthropic message.

Source code in conatus/models/anthropic/conversion.py
@staticmethod
def tool_response_ai_message_to_anthropic_message(
    message: ToolResponseAIMessage,
) -> BetaToolResultBlockParam:
    """Convert a tool response AI message to an Anthropic message.

    Args:
        message: The tool response AI message to convert.

    Returns:
        The Anthropic message.
    """
    if not message.for_computer_use:
        return BetaToolResultBlockParam(
            tool_use_id=message.tool_call_id or "",
            content=message.content_as_string,
            type="tool_result",
            is_error=not message.success,
        )
    return BetaToolResultBlockParam(
        tool_use_id=message.tool_call_id or "",
        content=[
            BetaImageBlockParam(
                source=BetaBase64ImageSourceParam(
                    data=str(
                        message.content.get("computer_use_output", "")
                    ),
                    media_type="image/jpeg",
                    type="base64",
                ),
                type="image",
            )
        ],
        type="tool_result",
        is_error=not message.success,
    )

ai_prompt_tools_to_anthropic_tools staticmethod

ai_prompt_tools_to_anthropic_tools(
    prompt: AIPrompt,
) -> list[BetaToolUnionParam]

Convert an AI prompt to a list of Anthropic tools.

PARAMETER DESCRIPTION
prompt

The AI prompt to convert.

TYPE: AIPrompt

RETURNS DESCRIPTION
list[BetaToolUnionParam]

The list of Anthropic tools.

Source code in conatus/models/anthropic/conversion.py
@staticmethod
def ai_prompt_tools_to_anthropic_tools(
    prompt: AIPrompt,
) -> list[BetaToolUnionParam]:
    """Convert an AI prompt to a list of Anthropic tools.

    Args:
        prompt: The AI prompt to convert.

    Returns:
        The list of Anthropic tools.
    """
    if prompt.tools is None:
        return []
    function_tools: list[BetaToolUnionParam] = []
    for tool in prompt.tools:
        function_definition = generate_openai_json_schema(
            json_schema_pydantic_model=tool.json_schema_pydantic_model,
            strict_mode=True,
        )
        function_tools.append(
            BetaToolParam(
                name=function_definition["name"],
                description=function_definition["description"],
                input_schema=function_definition["parameters"],
            )
        )
    computer_use_config = prompt.computer_use_config
    if computer_use_config is not None:
        function_tools.append(
            BetaToolComputerUse20250124Param(
                name="computer",
                display_height_px=computer_use_config["display_height"],
                display_width_px=computer_use_config["display_width"],
                type="computer_20250124",
            )
        )
    return function_tools

anthropic_chunk_to_incomplete_ai_response staticmethod

anthropic_chunk_to_incomplete_ai_response(
    chunk: BetaRawMessageStreamEvent,
    prompt: AIPrompt[OutputSchemaType],
    model_name: Model,
) -> IncompleteAIResponse[OutputSchemaType] | None

Convert an Anthropic chunk to an incomplete AI response.

PARAMETER DESCRIPTION
chunk

The Anthropic chunk to convert.

TYPE: BetaRawMessageStreamEvent

prompt

The prompt that was used to generate the response.

TYPE: AIPrompt[OutputSchemaType]

model_name

The name of the model that produced the chunk.

TYPE: Model

RETURNS DESCRIPTION
IncompleteAIResponse[OutputSchemaType] | None

The incomplete AI response.

Source code in conatus/models/anthropic/conversion.py
@staticmethod
def anthropic_chunk_to_incomplete_ai_response(
    chunk: BetaRawMessageStreamEvent,
    prompt: AIPrompt[OutputSchemaType],
    model_name: Model,
) -> IncompleteAIResponse[OutputSchemaType] | None:
    """Convert an Anthropic chunk to an incomplete AI response.

    Args:
        chunk: The Anthropic chunk to convert.
        prompt: The prompt that was used to generate the response.
        model_name: The name of the model that produced the chunk.

    Returns:
        The incomplete AI response.
    """
    self_ = AnthropicConverters
    content_parts: list[
        IncompleteAssistantAIMessageContentPart
        | AssistantAIMessageContentToolCallPart
    ] = []
    finish_reason: FinishReasons | None = None
    usage: CompletionUsage | None = None
    match chunk:
        case BetaRawMessageStartEvent():
            return self_.anthropic_response_to_ai_response(
                chunk.message, prompt
            )
        case BetaRawMessageDeltaEvent():
            usage = CompletionUsage(
                completion_tokens=chunk.usage.output_tokens,
                usage_was_never_given=False,
            )
            finish_reason = self_.anthropic_stop_reason_to_finish_reason(
                chunk.delta.stop_reason
            )
        case BetaRawMessageStopEvent():
            return None
        case BetaRawContentBlockStartEvent():
            content_parts.extend(
                self_.anthropic_block_to_assistant_msg_parts(
                    block=chunk.content_block,
                    cu_config=prompt.computer_use_config,
                    model=model_name,
                    index=chunk.index,
                )
            )
        case BetaRawContentBlockDeltaEvent():
            content_part = self_.anthropic_delta_to_assistant_msg_parts(
                chunk.delta,
                chunk.index,
                model_name,
            )
            if content_part is not None:
                content_parts.append(content_part)
        case BetaRawContentBlockStopEvent():  # pragma: no branch
            return None

    return IncompleteAIResponse(
        prompt=prompt,
        message_received=IncompleteAssistantAIMessage(
            content=content_parts,
        ),
        finish_reason=finish_reason,
        usage=usage,
    )

convert_output_schema_to_anthropic_tool staticmethod

convert_output_schema_to_anthropic_tool(
    output_schema: TypeAdapter[ParamType] | None,
    *,
    strict_mode: bool = True
) -> (
    tuple[BetaToolParam, bool] | tuple[None, Literal[False]]
)

Convert an output schema to an Anthropic output schema.

PARAMETER DESCRIPTION
output_schema

The output schema to convert.

TYPE: TypeAdapter[ParamType] | None

strict_mode

Whether to use strict mode.

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
tuple[BetaToolParam, bool] | tuple[None, Literal[False]]

If the output schema is not provided, returns None and False. Otherwise, returns the Anthropic output schema and a boolean indicating whether the schema has been converted from a non-object type to an item object.

Source code in conatus/models/anthropic/conversion.py
@staticmethod
def convert_output_schema_to_anthropic_tool(
    output_schema: TypeAdapter[ParamType] | None,
    *,
    strict_mode: bool = True,
) -> tuple[BetaToolParam, bool] | tuple[None, Literal[False]]:
    """Convert an output schema to an Anthropic output schema.

    Args:
        output_schema: The output schema to convert.
        strict_mode: Whether to use strict mode.

    Returns:
        If the output schema is not provided, returns None and False.
            Otherwise, returns the Anthropic output schema and a boolean
            indicating whether the schema has been converted from a
            non-object type to an item object.
    """
    if output_schema is None:
        return None, False
    definition, conversion_was_necessary = generate_openai_json_schema(
        json_schema_pydantic_model=output_schema,
        strict_mode=strict_mode,
        convert_non_objects_to_objects=True,
    )
    return BetaToolParam(
        name=definition["name"],
        description=definition["description"],
        input_schema=definition["parameters"],
    ), conversion_was_necessary

Google Converters

conatus.models.google.conversion

Conversion functions for Google.

GoogleConverters

Conversion functions for Google GenAI.

ai_prompt_to_google_messages staticmethod

ai_prompt_to_google_messages(
    messages: Iterable[ConversationAIMessage],
) -> list[Content]

Convert messages to the Google AI format.

PARAMETER DESCRIPTION
messages

The messages to convert.

TYPE: Iterable[ConversationAIMessage]

RETURNS DESCRIPTION
list[Content]

The converted messages.

Source code in conatus/models/google/conversion.py
@staticmethod
def ai_prompt_to_google_messages(
    messages: Iterable[ConversationAIMessage],
) -> list[types.Content]:
    """Convert messages to the Google AI format.

    Args:
        messages: The messages to convert.

    Returns:
        The converted messages.
    """
    self_ = GoogleConverters
    google_messages: list[types.Content] = []
    for message in messages:
        match message:
            case UserAIMessage():
                google_messages.append(
                    self_.user_ai_message_to_google_msg(message)
                )
            case AssistantAIMessage():
                google_messages.append(
                    self_.assistant_ai_message_to_google_msg(message)
                )
            case ToolResponseAIMessage():  # pragma: no branch
                google_messages.append(
                    types.Content(
                        role="user",
                        parts=[
                            types.Part(
                                function_response=types.FunctionResponse(
                                    id=message.tool_call_id,
                                    name=message.tool_name,
                                    response=message.content,
                                )
                            )
                        ],
                    )
                )
    return google_messages

assistant_ai_message_to_google_msg staticmethod

assistant_ai_message_to_google_msg(
    message: AssistantAIMessage,
) -> Content

Convert an assistant message to the Google AI format.

PARAMETER DESCRIPTION
message

The message to convert.

TYPE: AssistantAIMessage

RETURNS DESCRIPTION
Content

The converted message.

Source code in conatus/models/google/conversion.py
@staticmethod
def assistant_ai_message_to_google_msg(
    message: AssistantAIMessage,
) -> types.Content:
    """Convert an assistant message to the Google AI format.

    Args:
        message: The message to convert.

    Returns:
        The converted message.
    """
    parts: list[types.Part] = []
    for part in message.content:
        match part:
            case AssistantAIMessageContentTextPart():
                parts.append(types.Part(text=part.text))
            case AssistantAIMessageContentToolCallPart():
                parts.append(
                    types.Part(
                        function_call=types.FunctionCall(
                            id=part.tool_call.call_id,
                            name=part.tool_call.name,
                            args=part.tool_call.arguments_as_dict,
                        )
                    )
                )
            case AssistantAIMessageContentReasoningPart():
                parts.append(types.Part(text=part.reasoning, thought=True))
            case AssistantAIMessageContentRefusalPart():  # pragma: no branch # noqa: E501
                pass
    return types.Content(
        role="model",
        parts=parts,
    )

user_ai_message_to_google_msg staticmethod

user_ai_message_to_google_msg(
    message: UserAIMessage,
) -> Content

Convert a user message to the Google AI format.

PARAMETER DESCRIPTION
message

The message to convert.

TYPE: UserAIMessage

RETURNS DESCRIPTION
Content

The converted message.

Source code in conatus/models/google/conversion.py
@staticmethod
def user_ai_message_to_google_msg(
    message: UserAIMessage,
) -> types.Content:
    """Convert a user message to the Google AI format.

    Args:
        message: The message to convert.

    Returns:
        The converted message.
    """
    parts: list[types.Part] = []
    if isinstance(message.content, str):
        parts.append(types.Part(text=message.content))
    else:
        for part in message.content:
            match part:
                case UserAIMessageContentTextPart():
                    parts.append(types.Part(text=part.text))
                case UserAIMessageContentImagePart():  # pragma: no branch
                    parts.append(
                        types.Part.from_bytes(
                            data=base64.b64decode(part.image_data),
                            mime_type="image/jpeg",
                        )
                    )
    return types.Content(
        role="user",
        parts=parts,
    )

ai_prompt_tools_to_google_tools staticmethod

ai_prompt_tools_to_google_tools(
    prompt: AIPrompt,
) -> list[FunctionDeclarationDict] | None

Convert tools to the Google AI format.

PARAMETER DESCRIPTION
prompt

The prompt to convert.

TYPE: AIPrompt

RETURNS DESCRIPTION
list[FunctionDeclarationDict] | None

The converted tools.

Source code in conatus/models/google/conversion.py
@staticmethod
def ai_prompt_tools_to_google_tools(
    prompt: AIPrompt,
) -> list[types.FunctionDeclarationDict] | None:
    """Convert tools to the Google AI format.

    Args:
        prompt: The prompt to convert.

    Returns:
        The converted tools.
    """
    if prompt.tools is None:
        return None
    function_tools: list[types.FunctionDeclarationDict] = []
    for tool in prompt.tools:
        function_definition = generate_openai_json_schema(
            json_schema_pydantic_model=tool.json_schema_pydantic_model,
            strict_mode=True,
            remove_refs=True,
        )
        del function_definition["parameters"]["additionalProperties"]
        function_tools.append(
            types.FunctionDeclarationDict(
                name=function_definition["name"],
                description=function_definition["description"],
                parameters=function_definition["parameters"],  # type: ignore[typeddict-item]  # pyright: ignore[reportArgumentType]
            )
        )
    return function_tools

convert_output_schema_to_google_tool staticmethod

convert_output_schema_to_google_tool(
    output_schema: TypeAdapter[ParamType] | None,
    *,
    strict_mode: bool = False
) -> tuple[SchemaDict, bool] | tuple[None, Literal[False]]

Convert an output schema to a Google AI tool.

PARAMETER DESCRIPTION
output_schema

The output schema to convert.

TYPE: TypeAdapter[ParamType] | None

strict_mode

Whether to use strict mode.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
tuple[SchemaDict, bool] | tuple[None, Literal[False]]

A tuple of the converted tool and a boolean indicating whether the conversion was necessary.

Source code in conatus/models/google/conversion.py
@staticmethod
def convert_output_schema_to_google_tool(
    output_schema: TypeAdapter[ParamType] | None,
    *,
    strict_mode: bool = False,
) -> tuple[types.SchemaDict, bool] | tuple[None, Literal[False]]:
    """Convert an output schema to a Google AI tool.

    Args:
        output_schema: The output schema to convert.
        strict_mode: Whether to use strict mode.

    Returns:
        A tuple of the converted tool and a boolean indicating whether
            the conversion was necessary.
    """
    if output_schema is None:
        return None, False
    definition, conversion_was_necessary = generate_openai_json_schema(
        json_schema_pydantic_model=output_schema,
        strict_mode=strict_mode,
        convert_non_objects_to_objects=True,
        remove_refs=True,
    )
    return types.SchemaDict(
        **definition["parameters"]  # type: ignore[typeddict-item]  # pyright: ignore[reportArgumentType]
    ), conversion_was_necessary

google_response_to_ai_response staticmethod

google_response_to_ai_response(
    response: GenerateContentResponse,
    prompt: AIPrompt[OutputSchemaType],
) -> IncompleteAIResponse[OutputSchemaType]

Convert a Google response to an AI response.

PARAMETER DESCRIPTION
response

The response to convert.

TYPE: GenerateContentResponse

prompt

The prompt used to generate the response.

TYPE: AIPrompt[OutputSchemaType]

RETURNS DESCRIPTION
IncompleteAIResponse[OutputSchemaType]

The converted response.

RAISES DESCRIPTION
ValueError

If the response is invalid (e.g. no candidates found, no content found in the candidate, no parts found in the content, or no content found in the candidate).

Source code in conatus/models/google/conversion.py
@staticmethod
def google_response_to_ai_response(
    response: types.GenerateContentResponse,
    prompt: AIPrompt[OutputSchemaType],
) -> IncompleteAIResponse[OutputSchemaType]:
    """Convert a Google response to an AI response.

    Args:
        response: The response to convert.
        prompt: The prompt used to generate the response.

    Returns:
        The converted response.

    Raises:
        ValueError: If the response is invalid (e.g. no candidates found,
            no content found in the candidate, no parts found in the
            content, or no content found in the candidate).
    """
    if response.candidates is None:
        msg = "No candidates found in the response"
        raise ValueError(msg)
    if len(response.candidates) != 1:
        msg = "Expected exactly one candidate in the response"
        raise ValueError(msg)
    candidate = response.candidates[0]
    if candidate.content is None:
        msg = "No content found in the candidate"
        raise ValueError(msg)
    if candidate.content.parts is None:
        msg = "No parts found in the content"
        raise ValueError(msg)
    message_received = GoogleConverters.goog_response_to_assistant_msg(
        candidate.content.parts
    )
    if response.prompt_feedback is not None:
        refusal_as_json = (
            response.prompt_feedback.model_dump_json(exclude_none=True),
        )
        message_received = IncompleteAssistantAIMessage(
            content=message_received.content,
            refusal=(f"As JSON: {refusal_as_json}\n"),
        )
    return IncompleteAIResponse[OutputSchemaType](
        prompt=prompt,
        uid=response.response_id,
        message_received=message_received,
        usage=GoogleConverters.google_response_usage_to_completion_usage(
            response.usage_metadata,
            model_name=response.model_version,
        ),
    )

goog_response_to_assistant_msg staticmethod

goog_response_to_assistant_msg(
    parts: list[Part],
) -> IncompleteAssistantAIMessage

Convert a Google content to an AI message.

PARAMETER DESCRIPTION
parts

The parts to convert.

TYPE: list[Part]

RETURNS DESCRIPTION
IncompleteAssistantAIMessage

The converted message.

RAISES DESCRIPTION
ValueError

If a function call name is missing.

Source code in conatus/models/google/conversion.py
@staticmethod
def goog_response_to_assistant_msg(
    parts: list[types.Part],
) -> IncompleteAssistantAIMessage:
    """Convert a Google content to an AI message.

    Args:
        parts: The parts to convert.

    Returns:
        The converted message.

    Raises:
        ValueError: If a function call name is missing.
    """
    new_parts: list[
        IncompleteAssistantAIMessageContentPart
        | AssistantAIMessageContentToolCallPart
    ] = []
    # NOTE: The Google API theoretically returns only attribute per part,
    # and this is how we're supposed to distinguish between the different
    # part types. I don't trust that guarantee, so we assume that multiple
    # attributes can be present.
    for part in parts:
        if part.text is not None:
            if part.thought is not None and part.thought:
                new_parts.append(
                    IncompleteAssistantAIMessageContentReasoningPart(
                        reasoning=part.text
                    )
                )
            else:
                new_parts.append(
                    IncompleteAssistantAIMessageContentTextPart(
                        text=part.text
                    )
                )
        if part.function_call is not None:
            if part.function_call.name is None:
                msg = "Function call name is required"
                raise ValueError(msg)
            new_parts.append(
                IncompleteAssistantAIMessageContentToolCallPart(
                    tool_call=IncompleteAIToolCall(
                        call_id=part.function_call.id,
                        name=part.function_call.name or "",
                        returned_arguments=json.dumps(
                            part.function_call.args or {}
                        ),
                    )
                )
            )
        if part.executable_code is not None:
            language = (
                "python"
                if part.executable_code.language is types.Language.PYTHON
                else ""
            )
            new_parts.append(
                IncompleteAssistantAIMessageContentTextPart(
                    text=(
                        f"```{language}\n{part.executable_code.code}\n```\n"
                    )
                )
            )
        if part.code_execution_result is not None:
            new_parts.append(
                IncompleteAssistantAIMessageContentTextPart(
                    text=(
                        f"```\n"
                        f"# Execution status: "
                        f"{part.code_execution_result.outcome}\n"
                        f"{part.code_execution_result.output}\n"
                        f"```\n"
                    )
                )
            )
    return IncompleteAssistantAIMessage(content=new_parts)

google_response_usage_to_completion_usage staticmethod

google_response_usage_to_completion_usage(
    usage_metadata: (
        GenerateContentResponseUsageMetadata | None
    ),
    model_name: str | None = None,
) -> CompletionUsage | None

Convert a Google response usage to a completion usage.

PARAMETER DESCRIPTION
usage_metadata

The usage metadata to convert.

TYPE: GenerateContentResponseUsageMetadata | None

model_name

The model name.

TYPE: str | None DEFAULT: None

RETURNS DESCRIPTION
CompletionUsage | None

The converted usage.

Source code in conatus/models/google/conversion.py
@staticmethod
def google_response_usage_to_completion_usage(
    usage_metadata: types.GenerateContentResponseUsageMetadata | None,
    model_name: str | None = None,
) -> CompletionUsage | None:
    """Convert a Google response usage to a completion usage.

    Args:
        usage_metadata: The usage metadata to convert.
        model_name: The model name.

    Returns:
        The converted usage.
    """
    if usage_metadata is None:
        return None
    extra: dict[str, int | None] | None = None
    if usage_metadata.tool_use_prompt_token_count is not None:
        extra = extra or {}
        extra["tools_token_count"] = (
            usage_metadata.tool_use_prompt_token_count
        )
    return CompletionUsage(
        model_name=model_name,
        prompt_tokens=usage_metadata.prompt_token_count or 0,
        completion_tokens=usage_metadata.candidates_token_count or 0,
        total_tokens=usage_metadata.total_token_count or 0,
        cached_used_tokens=usage_metadata.cached_content_token_count
        or None,
        cached_created_tokens=None,
        extra_fields=extra,
        usage_was_never_given=False,
        always_override_previous_usage=True,
    )