Skip to content

ccproxy.adapters

ccproxy.adapters

Adapter modules for API format conversion.

APIAdapter

Bases: ABC

Abstract base class for API format adapters.

Combines all transformation interfaces to provide a complete adapter for converting between different API formats.

adapt_request abstractmethod async

adapt_request(request)

Convert a request from one API format to another.

Parameters:

Name Type Description Default
request dict[str, Any]

The request data to convert

required

Returns:

Type Description
dict[str, Any]

The converted request data

Raises:

Type Description
ValueError

If the request format is invalid or unsupported

Source code in ccproxy/adapters/base.py
@abstractmethod
async def adapt_request(self, request: dict[str, Any]) -> dict[str, Any]:
    """Convert a request from one API format to another.

    Args:
        request: The request data to convert

    Returns:
        The converted request data

    Raises:
        ValueError: If the request format is invalid or unsupported
    """
    pass

adapt_response abstractmethod async

adapt_response(response)

Convert a response from one API format to another.

Parameters:

Name Type Description Default
response dict[str, Any]

The response data to convert

required

Returns:

Type Description
dict[str, Any]

The converted response data

Raises:

Type Description
ValueError

If the response format is invalid or unsupported

Source code in ccproxy/adapters/base.py
@abstractmethod
async def adapt_response(self, response: dict[str, Any]) -> dict[str, Any]:
    """Convert a response from one API format to another.

    Args:
        response: The response data to convert

    Returns:
        The converted response data

    Raises:
        ValueError: If the response format is invalid or unsupported
    """
    pass

adapt_stream abstractmethod async

adapt_stream(stream)

Convert a streaming response from one API format to another.

Parameters:

Name Type Description Default
stream AsyncIterator[dict[str, Any]]

The streaming response data to convert

required

Yields:

Type Description
AsyncIterator[dict[str, Any]]

The converted streaming response chunks

Raises:

Type Description
ValueError

If the stream format is invalid or unsupported

Source code in ccproxy/adapters/base.py
@abstractmethod
async def adapt_stream(
    self, stream: AsyncIterator[dict[str, Any]]
) -> AsyncIterator[dict[str, Any]]:
    """Convert a streaming response from one API format to another.

    Args:
        stream: The streaming response data to convert

    Yields:
        The converted streaming response chunks

    Raises:
        ValueError: If the stream format is invalid or unsupported
    """
    # This should be implemented as an async generator
    # async def adapt_stream(self, stream):
    #     async for item in stream:
    #         yield transformed_item
    raise NotImplementedError

BaseAPIAdapter

BaseAPIAdapter(name)

Bases: APIAdapter

Base implementation with common functionality.

Source code in ccproxy/adapters/base.py
def __init__(self, name: str):
    self.name = name

OpenAIAdapter

OpenAIAdapter()

Bases: APIAdapter

OpenAI API adapter for converting between OpenAI and Anthropic formats.

Source code in ccproxy/adapters/openai/adapter.py
def __init__(self) -> None:
    """Initialize the OpenAI adapter."""
    pass

adapt_request

adapt_request(request)

Convert OpenAI request format to Anthropic format.

Parameters:

Name Type Description Default
request dict[str, Any]

OpenAI format request

required

Returns:

Type Description
dict[str, Any]

Anthropic format request

Raises:

Type Description
ValueError

If the request format is invalid or unsupported

Source code in ccproxy/adapters/openai/adapter.py
def adapt_request(self, request: dict[str, Any]) -> dict[str, Any]:
    """Convert OpenAI request format to Anthropic format.

    Args:
        request: OpenAI format request

    Returns:
        Anthropic format request

    Raises:
        ValueError: If the request format is invalid or unsupported
    """
    try:
        # Parse OpenAI request
        openai_req = OpenAIChatCompletionRequest(**request)
    except Exception as e:
        raise ValueError(f"Invalid OpenAI request format: {e}") from e

    # Map OpenAI model to Claude model
    model = map_openai_model_to_claude(openai_req.model)

    # Convert messages
    messages, system_prompt = self._convert_messages_to_anthropic(
        openai_req.messages
    )

    # Build Anthropic request
    anthropic_request = {
        "model": model,
        "messages": messages,
        "max_tokens": openai_req.max_tokens or 4096,
    }

    # Add system prompt if present
    if system_prompt:
        anthropic_request["system"] = system_prompt

    # Add optional parameters
    if openai_req.temperature is not None:
        anthropic_request["temperature"] = openai_req.temperature

    if openai_req.top_p is not None:
        anthropic_request["top_p"] = openai_req.top_p

    if openai_req.stream is not None:
        anthropic_request["stream"] = openai_req.stream

    if openai_req.stop is not None:
        if isinstance(openai_req.stop, str):
            anthropic_request["stop_sequences"] = [openai_req.stop]
        else:
            anthropic_request["stop_sequences"] = openai_req.stop

    # Handle metadata - combine user field and metadata
    metadata = {}
    if openai_req.user:
        metadata["user_id"] = openai_req.user
    if openai_req.metadata:
        metadata.update(openai_req.metadata)
    if metadata:
        anthropic_request["metadata"] = metadata

    # Handle response format - add to system prompt for JSON mode
    if openai_req.response_format:
        format_type = (
            openai_req.response_format.type if openai_req.response_format else None
        )

        if format_type == "json_object" and system_prompt is not None:
            system_prompt += "\nYou must respond with valid JSON only."
            anthropic_request["system"] = system_prompt
        elif format_type == "json_schema" and system_prompt is not None:
            # For JSON schema, we can add more specific instructions
            if openai_req.response_format and hasattr(
                openai_req.response_format, "json_schema"
            ):
                system_prompt += f"\nYou must respond with valid JSON that conforms to this schema: {openai_req.response_format.json_schema}"
            anthropic_request["system"] = system_prompt

    # Handle reasoning_effort (o1 models) -> thinking configuration
    # Automatically enable thinking for o1 models even without explicit reasoning_effort
    if (
        openai_req.reasoning_effort
        or openai_req.model.startswith("o1")
        or openai_req.model.startswith("o3")
    ):
        # Map reasoning effort to thinking tokens
        thinking_tokens_map = {
            "low": 1000,
            "medium": 5000,
            "high": 10000,
        }

        # Default thinking tokens based on model if reasoning_effort not specified
        default_thinking_tokens = 5000  # medium by default
        if openai_req.model.startswith("o3"):
            default_thinking_tokens = 10000  # high for o3 models
        elif openai_req.model == "o1-mini":
            default_thinking_tokens = 3000  # lower for mini model

        thinking_tokens = (
            thinking_tokens_map.get(
                openai_req.reasoning_effort, default_thinking_tokens
            )
            if openai_req.reasoning_effort
            else default_thinking_tokens
        )

        anthropic_request["thinking"] = {
            "type": "enabled",
            "budget_tokens": thinking_tokens,
        }

        # Ensure max_tokens is greater than budget_tokens
        current_max_tokens = cast(int, anthropic_request.get("max_tokens", 4096))
        if current_max_tokens <= thinking_tokens:
            # Set max_tokens to be 2x thinking tokens + some buffer for response
            anthropic_request["max_tokens"] = thinking_tokens + max(
                thinking_tokens, 4096
            )
            logger.debug(
                "max_tokens_adjusted_for_thinking",
                original_max_tokens=current_max_tokens,
                thinking_tokens=thinking_tokens,
                new_max_tokens=anthropic_request["max_tokens"],
                operation="adapt_request",
            )

        # When thinking is enabled, temperature must be 1.0
        if (
            anthropic_request.get("temperature") is not None
            and anthropic_request["temperature"] != 1.0
        ):
            logger.debug(
                "temperature_adjusted_for_thinking",
                original_temperature=anthropic_request["temperature"],
                new_temperature=1.0,
                operation="adapt_request",
            )
            anthropic_request["temperature"] = 1.0
        elif "temperature" not in anthropic_request:
            # Set default temperature to 1.0 for thinking mode
            anthropic_request["temperature"] = 1.0

        logger.debug(
            "thinking_enabled",
            reasoning_effort=openai_req.reasoning_effort,
            model=openai_req.model,
            thinking_tokens=thinking_tokens,
            temperature=anthropic_request["temperature"],
            operation="adapt_request",
        )

    # Note: seed, logprobs, top_logprobs, and store don't have direct Anthropic equivalents
    if openai_req.seed is not None:
        logger.debug(
            "unsupported_parameter_ignored",
            parameter="seed",
            value=openai_req.seed,
            operation="adapt_request",
        )
    if openai_req.logprobs or openai_req.top_logprobs:
        logger.debug(
            "unsupported_parameters_ignored",
            parameters=["logprobs", "top_logprobs"],
            logprobs=openai_req.logprobs,
            top_logprobs=openai_req.top_logprobs,
            operation="adapt_request",
        )
    if openai_req.store:
        logger.debug(
            "unsupported_parameter_ignored",
            parameter="store",
            value=openai_req.store,
            operation="adapt_request",
        )

    # Handle tools/functions
    if openai_req.tools:
        anthropic_request["tools"] = self._convert_tools_to_anthropic(
            openai_req.tools
        )
    elif openai_req.functions:
        # Convert deprecated functions to tools
        anthropic_request["tools"] = self._convert_functions_to_anthropic(
            openai_req.functions
        )

    if openai_req.tool_choice:
        # Convert tool choice - can be string or OpenAIToolChoice object
        if isinstance(openai_req.tool_choice, str):
            anthropic_request["tool_choice"] = (
                self._convert_tool_choice_to_anthropic(openai_req.tool_choice)
            )
        else:
            # Convert OpenAIToolChoice object to dict
            tool_choice_dict = {
                "type": openai_req.tool_choice.type,
                "function": openai_req.tool_choice.function,
            }
            anthropic_request["tool_choice"] = (
                self._convert_tool_choice_to_anthropic(tool_choice_dict)
            )
    elif openai_req.function_call:
        # Convert deprecated function_call to tool_choice
        anthropic_request["tool_choice"] = self._convert_function_call_to_anthropic(
            openai_req.function_call
        )

    logger.debug(
        "format_conversion_completed",
        from_format="openai",
        to_format="anthropic",
        original_model=openai_req.model,
        anthropic_model=anthropic_request.get("model"),
        has_tools=bool(anthropic_request.get("tools")),
        has_system=bool(anthropic_request.get("system")),
        message_count=len(cast(list[Any], anthropic_request["messages"])),
        operation="adapt_request",
    )
    return anthropic_request

adapt_response

adapt_response(response)

Convert Anthropic response format to OpenAI format.

Parameters:

Name Type Description Default
response dict[str, Any]

Anthropic format response

required

Returns:

Type Description
dict[str, Any]

OpenAI format response

Raises:

Type Description
ValueError

If the response format is invalid or unsupported

Source code in ccproxy/adapters/openai/adapter.py
def adapt_response(self, response: dict[str, Any]) -> dict[str, Any]:
    """Convert Anthropic response format to OpenAI format.

    Args:
        response: Anthropic format response

    Returns:
        OpenAI format response

    Raises:
        ValueError: If the response format is invalid or unsupported
    """
    try:
        # Extract original model from response metadata if available
        original_model = response.get("model", "gpt-4")

        # Generate response ID
        request_id = generate_openai_response_id()

        # Convert content
        content = ""
        tool_calls = []

        if "content" in response and response["content"]:
            for block in response["content"]:
                if block.get("type") == "text":
                    content += block.get("text", "")
                elif block.get("type") == "thinking":
                    # Handle thinking blocks - we can include them with a marker
                    thinking_text = block.get("thinking", "")
                    signature = block.get("signature")
                    if thinking_text:
                        content += f'<thinking signature="{signature}">{thinking_text}</thinking>'
                elif block.get("type") == "tool_use":
                    tool_calls.append(format_openai_tool_call(block))
                else:
                    logger.warning(
                        "unsupported_content_block_type", type=block.get("type")
                    )

        # Create OpenAI message
        # When there are tool calls but no content, use empty string instead of None
        # Otherwise, if content is empty string, convert to None
        final_content: str | None = content
        if tool_calls and not content:
            final_content = ""
        elif content == "":
            final_content = None

        message = OpenAIResponseMessage(
            role="assistant",
            content=final_content,
            tool_calls=tool_calls if tool_calls else None,
        )

        # Map stop reason
        finish_reason = self._convert_stop_reason_to_openai(
            response.get("stop_reason")
        )

        # Ensure finish_reason is a valid literal type
        if finish_reason not in ["stop", "length", "tool_calls", "content_filter"]:
            finish_reason = "stop"

        # Cast to proper literal type
        valid_finish_reason = cast(
            Literal["stop", "length", "tool_calls", "content_filter"], finish_reason
        )

        # Create choice
        choice = OpenAIChoice(
            index=0,
            message=message,
            finish_reason=valid_finish_reason,
            logprobs=None,  # Anthropic doesn't support logprobs
        )

        # Create usage
        usage_info = response.get("usage", {})
        usage = OpenAIUsage(
            prompt_tokens=usage_info.get("input_tokens", 0),
            completion_tokens=usage_info.get("output_tokens", 0),
            total_tokens=usage_info.get("input_tokens", 0)
            + usage_info.get("output_tokens", 0),
        )

        # Create OpenAI response
        openai_response = OpenAIChatCompletionResponse(
            id=request_id,
            object="chat.completion",
            created=int(time.time()),
            model=original_model,
            choices=[choice],
            usage=usage,
            system_fingerprint=generate_openai_system_fingerprint(),
        )

        logger.debug(
            "format_conversion_completed",
            from_format="anthropic",
            to_format="openai",
            response_id=request_id,
            original_model=original_model,
            finish_reason=valid_finish_reason,
            content_length=len(content) if content else 0,
            tool_calls_count=len(tool_calls),
            input_tokens=usage_info.get("input_tokens", 0),
            output_tokens=usage_info.get("output_tokens", 0),
            operation="adapt_response",
            choice=choice,
        )
        return openai_response.model_dump()

    except Exception as e:
        raise ValueError(f"Invalid Anthropic response format: {e}") from e

adapt_stream async

adapt_stream(stream)

Convert Anthropic streaming response to OpenAI streaming format.

Parameters:

Name Type Description Default
stream AsyncIterator[dict[str, Any]]

Anthropic streaming response

required

Yields:

Type Description
AsyncIterator[dict[str, Any]]

OpenAI format streaming chunks

Raises:

Type Description
ValueError

If the stream format is invalid or unsupported

Source code in ccproxy/adapters/openai/adapter.py
async def adapt_stream(
    self, stream: AsyncIterator[dict[str, Any]]
) -> AsyncIterator[dict[str, Any]]:
    """Convert Anthropic streaming response to OpenAI streaming format.

    Args:
        stream: Anthropic streaming response

    Yields:
        OpenAI format streaming chunks

    Raises:
        ValueError: If the stream format is invalid or unsupported
    """
    # Create stream processor
    processor = OpenAIStreamProcessor(
        enable_usage=True,
        enable_tool_calls=True,
        enable_text_chunking=False,  # Keep text as-is for compatibility
    )

    try:
        # Process the stream and parse SSE format back to dict objects
        async for sse_chunk in processor.process_stream(stream):
            if sse_chunk.startswith("data: "):
                data_str = sse_chunk[6:].strip()
                if data_str and data_str != "[DONE]":
                    try:
                        yield json.loads(data_str)
                    except json.JSONDecodeError:
                        logger.warning(
                            "streaming_chunk_parse_failed",
                            chunk_data=data_str[:100] + "..."
                            if len(data_str) > 100
                            else data_str,
                            operation="adapt_stream",
                        )
                        continue
    except Exception as e:
        raise ValueError(f"Error processing streaming response: {e}") from e

adapt_error

adapt_error(error_body)

Convert Anthropic error format to OpenAI error format.

Parameters:

Name Type Description Default
error_body dict[str, Any]

Anthropic error response

required

Returns:

Type Description
dict[str, Any]

OpenAI-formatted error response

Source code in ccproxy/adapters/openai/adapter.py
def adapt_error(self, error_body: dict[str, Any]) -> dict[str, Any]:
    """Convert Anthropic error format to OpenAI error format.

    Args:
        error_body: Anthropic error response

    Returns:
        OpenAI-formatted error response
    """
    # Extract error details from Anthropic format
    anthropic_error = error_body.get("error", {})
    error_type = anthropic_error.get("type", "internal_server_error")
    error_message = anthropic_error.get("message", "An error occurred")

    # Map Anthropic error types to OpenAI error types
    error_type_mapping = {
        "invalid_request_error": "invalid_request_error",
        "authentication_error": "invalid_request_error",
        "permission_error": "invalid_request_error",
        "not_found_error": "invalid_request_error",
        "rate_limit_error": "rate_limit_error",
        "internal_server_error": "internal_server_error",
        "overloaded_error": "server_error",
    }

    openai_error_type = error_type_mapping.get(error_type, "invalid_request_error")

    # Return OpenAI-formatted error
    return {
        "error": {
            "message": error_message,
            "type": openai_error_type,
            "code": error_type,  # Preserve original error type as code
        }
    }