Skip to content

ccproxy.claude_sdk.converter

ccproxy.claude_sdk.converter

Message format converter for Claude SDK interactions.

MessageConverter

Handles conversion between Anthropic API format and Claude SDK format.

format_messages_to_prompt staticmethod

format_messages_to_prompt(messages)

Convert Anthropic messages format to a single prompt string.

Parameters:

Name Type Description Default
messages list[dict[str, Any]]

List of messages in Anthropic format

required

Returns:

Type Description
str

Single prompt string formatted for Claude SDK

Source code in ccproxy/claude_sdk/converter.py
@staticmethod
def format_messages_to_prompt(messages: list[dict[str, Any]]) -> str:
    """
    Convert Anthropic messages format to a single prompt string.

    Args:
        messages: List of messages in Anthropic format

    Returns:
        Single prompt string formatted for Claude SDK
    """
    prompt_parts = []

    for message in messages:
        role = message.get("role", "")
        content = message.get("content", "")

        if isinstance(content, list):
            # Handle content blocks
            text_parts = []
            for block in content:
                if block.get("type") == "text":
                    text_parts.append(block.get("text", ""))
            content = " ".join(text_parts)

        if role == "user":
            prompt_parts.append(f"Human: {content}")
        elif role == "assistant":
            prompt_parts.append(f"Assistant: {content}")
        elif role == "system":
            # System messages are handled via options
            continue

    return "\n\n".join(prompt_parts)

convert_to_anthropic_response staticmethod

convert_to_anthropic_response(
    assistant_message,
    result_message,
    model,
    mode=FORWARD,
    pretty_format=True,
)

Convert Claude SDK messages to Anthropic API response format.

Parameters:

Name Type Description Default
assistant_message AssistantMessage

The assistant message from Claude SDK

required
result_message ResultMessage

The result message from Claude SDK

required
model str

The model name used

required
mode SDKMessageMode

System message handling mode (forward, ignore, formatted)

FORWARD
pretty_format bool

Whether to use pretty formatting (true: indented JSON with newlines, false: compact with escaped content)

True

Returns:

Type Description
MessageResponse

Response in Anthropic API format

Source code in ccproxy/claude_sdk/converter.py
@staticmethod
def convert_to_anthropic_response(
    assistant_message: sdk_models.AssistantMessage,
    result_message: sdk_models.ResultMessage,
    model: str,
    mode: SDKMessageMode = SDKMessageMode.FORWARD,
    pretty_format: bool = True,
) -> "MessageResponse":
    """
    Convert Claude SDK messages to Anthropic API response format.

    Args:
        assistant_message: The assistant message from Claude SDK
        result_message: The result message from Claude SDK
        model: The model name used
        mode: System message handling mode (forward, ignore, formatted)
        pretty_format: Whether to use pretty formatting (true: indented JSON with newlines, false: compact with escaped content)

    Returns:
        Response in Anthropic API format
    """
    # Extract token usage from result message
    usage = result_message.usage_model

    # Log token extraction for debugging
    # logger.debug(
    #     "assistant_message_content",
    #     content_blocks=[block.type for block in assistant_message.content],
    #     content_count=len(assistant_message.content),
    # )

    logger.debug(
        "token_usage_extracted",
        input_tokens=usage.input_tokens,
        output_tokens=usage.output_tokens,
        cache_read_tokens=usage.cache_read_input_tokens,
        cache_write_tokens=usage.cache_creation_input_tokens,
        source="claude_sdk",
    )

    # Build usage information
    usage_info = usage.model_dump(mode="json")

    # Add cost information if available
    if result_message.total_cost_usd is not None:
        usage_info["cost_usd"] = result_message.total_cost_usd

    # Convert content blocks to Anthropic format, preserving thinking blocks
    content_blocks = []

    for block in assistant_message.content:
        if isinstance(block, sdk_models.TextBlock):
            # Handle text content directly without thinking block parsing
            text = block.text
            if mode == SDKMessageMode.FORMATTED:
                escaped_text = MessageConverter._escape_content_for_xml(
                    text, pretty_format
                )
                formatted_text = (
                    f"<text>\n{escaped_text}\n</text>\n"
                    if pretty_format
                    else f"<text>{escaped_text}</text>"
                )
                content_blocks.append({"type": "text", "text": formatted_text})
            else:
                content_blocks.append({"type": "text", "text": text})

        elif isinstance(block, sdk_models.ToolUseBlock):
            if mode == SDKMessageMode.FORWARD:
                content_blocks.append(block.to_sdk_block())
            elif mode == SDKMessageMode.FORMATTED:
                tool_data = block.model_dump(mode="json")
                formatted_json = MessageConverter._format_json_data(
                    tool_data, pretty_format
                )
                escaped_json = MessageConverter._escape_content_for_xml(
                    formatted_json, pretty_format
                )
                formatted_text = (
                    f"<tool_use_sdk>\n{escaped_json}\n</tool_use_sdk>\n"
                    if pretty_format
                    else f"<tool_use_sdk>{escaped_json}</tool_use_sdk>"
                )
                content_blocks.append({"type": "text", "text": formatted_text})

        elif isinstance(block, sdk_models.ToolResultBlock):
            if mode == SDKMessageMode.FORWARD:
                content_blocks.append(block.to_sdk_block())
            elif mode == SDKMessageMode.FORMATTED:
                tool_result_data = block.model_dump(mode="json")
                formatted_json = MessageConverter._format_json_data(
                    tool_result_data, pretty_format
                )
                escaped_json = MessageConverter._escape_content_for_xml(
                    formatted_json, pretty_format
                )
                formatted_text = (
                    f"<tool_result_sdk>\n{escaped_json}\n</tool_result_sdk>\n"
                    if pretty_format
                    else f"<tool_result_sdk>{escaped_json}</tool_result_sdk>"
                )
                content_blocks.append({"type": "text", "text": formatted_text})

        elif isinstance(block, sdk_models.ThinkingBlock):
            if mode == SDKMessageMode.FORWARD:
                thinking_block = {
                    "type": "thinking",
                    "thinking": block.thinking,
                }
                if block.signature is not None:
                    thinking_block["signature"] = block.signature
                content_blocks.append(thinking_block)
            elif mode == SDKMessageMode.FORMATTED:
                # Format thinking block with signature in XML tag attribute
                signature_attr = (
                    f' signature="{block.signature}"' if block.signature else ""
                )
                if pretty_format:
                    escaped_text = MessageConverter._escape_content_for_xml(
                        block.thinking, pretty_format
                    )
                    formatted_text = (
                        f"<thinking{signature_attr}>\n{escaped_text}\n</thinking>\n"
                    )
                else:
                    escaped_text = MessageConverter._escape_content_for_xml(
                        block.thinking, pretty_format
                    )
                    formatted_text = (
                        f"<thinking{signature_attr}>{escaped_text}</thinking>"
                    )
                content_blocks.append({"type": "text", "text": formatted_text})

    return MessageResponse.model_validate(
        {
            "id": f"msg_{result_message.session_id}",
            "type": "message",
            "role": "assistant",
            "content": content_blocks,
            "model": model,
            "stop_reason": result_message.stop_reason,
            "stop_sequence": None,
            "usage": usage_info,
        }
    )

create_streaming_start_chunks staticmethod

create_streaming_start_chunks(
    message_id, model, input_tokens=0
)

Create the initial streaming chunks for Anthropic API format.

Parameters:

Name Type Description Default
message_id str

The message ID

required
model str

The model name

required
input_tokens int

Number of input tokens for the request

0

Returns:

Type Description
list[tuple[str, dict[str, Any]]]

List of tuples (event_type, chunk) for initial streaming chunks

Source code in ccproxy/claude_sdk/converter.py
@staticmethod
def create_streaming_start_chunks(
    message_id: str, model: str, input_tokens: int = 0
) -> list[tuple[str, dict[str, Any]]]:
    """
    Create the initial streaming chunks for Anthropic API format.

    Args:
        message_id: The message ID
        model: The model name
        input_tokens: Number of input tokens for the request

    Returns:
        List of tuples (event_type, chunk) for initial streaming chunks
    """
    return [
        # First, send message_start with event type
        (
            "message_start",
            {
                "type": "message_start",
                "message": {
                    "id": message_id,
                    "type": "message",
                    "role": "assistant",
                    "model": model,
                    "content": [],
                    "stop_reason": None,
                    "stop_sequence": None,
                    "usage": {
                        "input_tokens": input_tokens,
                        "cache_creation_input_tokens": 0,
                        "cache_read_input_tokens": 0,
                        "output_tokens": 1,
                        "service_tier": "standard",
                    },
                },
            },
        ),
    ]

create_streaming_delta_chunk staticmethod

create_streaming_delta_chunk(text)

Create a streaming delta chunk for Anthropic API format.

Parameters:

Name Type Description Default
text str

The text content to include

required

Returns:

Type Description
tuple[str, dict[str, Any]]

Tuple of (event_type, chunk)

Source code in ccproxy/claude_sdk/converter.py
@staticmethod
def create_streaming_delta_chunk(text: str) -> tuple[str, dict[str, Any]]:
    """
    Create a streaming delta chunk for Anthropic API format.

    Args:
        text: The text content to include

    Returns:
        Tuple of (event_type, chunk)
    """
    return (
        "content_block_delta",
        {
            "type": "content_block_delta",
            "index": 0,
            "delta": {"type": "text_delta", "text": text},
        },
    )

create_streaming_end_chunks staticmethod

create_streaming_end_chunks(
    stop_reason="end_turn", stop_sequence=None
)

Create the final streaming chunks for Anthropic API format.

Parameters:

Name Type Description Default
stop_reason str

The reason for stopping

'end_turn'
stop_sequence str | None

The stop sequence used (if any)

None

Returns:

Type Description
list[tuple[str, dict[str, Any]]]

List of tuples (event_type, chunk) for final streaming chunks

Source code in ccproxy/claude_sdk/converter.py
@staticmethod
def create_streaming_end_chunks(
    stop_reason: str = "end_turn", stop_sequence: str | None = None
) -> list[tuple[str, dict[str, Any]]]:
    """
    Create the final streaming chunks for Anthropic API format.

    Args:
        stop_reason: The reason for stopping
        stop_sequence: The stop sequence used (if any)

    Returns:
        List of tuples (event_type, chunk) for final streaming chunks
    """
    return [
        # Then, send message_delta with stop reason and usage
        (
            "message_delta",
            {
                "type": "message_delta",
                "delta": {
                    "stop_reason": stop_reason,
                    "stop_sequence": stop_sequence,
                },
                "usage": {"output_tokens": 0},
            },
        ),
        # Finally, send message_stop
        ("message_stop", {"type": "message_stop"}),
    ]

create_ping_chunk staticmethod

create_ping_chunk()

Create a ping chunk for keeping the connection alive.

Returns:

Type Description
tuple[str, dict[str, Any]]

Tuple of (event_type, chunk)

Source code in ccproxy/claude_sdk/converter.py
@staticmethod
def create_ping_chunk() -> tuple[str, dict[str, Any]]:
    """
    Create a ping chunk for keeping the connection alive.

    Returns:
        Tuple of (event_type, chunk)
    """
    return ("ping", {"type": "ping"})