Skip to content

ccproxy.adapters.openai

ccproxy.adapters.openai

OpenAI adapter module for API format conversion.

This module provides the OpenAI adapter implementation for converting between OpenAI and Anthropic API formats.

OpenAIAdapter

OpenAIAdapter()

Bases: APIAdapter

OpenAI API adapter for converting between OpenAI and Anthropic formats.

Source code in ccproxy/adapters/openai/adapter.py
def __init__(self) -> None:
    """Initialize the OpenAI adapter."""
    pass

adapt_request

adapt_request(request)

Convert OpenAI request format to Anthropic format.

Parameters:

Name Type Description Default
request dict[str, Any]

OpenAI format request

required

Returns:

Type Description
dict[str, Any]

Anthropic format request

Raises:

Type Description
ValueError

If the request format is invalid or unsupported

Source code in ccproxy/adapters/openai/adapter.py
def adapt_request(self, request: dict[str, Any]) -> dict[str, Any]:
    """Convert OpenAI request format to Anthropic format.

    Args:
        request: OpenAI format request

    Returns:
        Anthropic format request

    Raises:
        ValueError: If the request format is invalid or unsupported
    """
    try:
        # Parse OpenAI request
        openai_req = OpenAIChatCompletionRequest(**request)
    except Exception as e:
        raise ValueError(f"Invalid OpenAI request format: {e}") from e

    # Map OpenAI model to Claude model
    model = map_openai_model_to_claude(openai_req.model)

    # Convert messages
    messages, system_prompt = self._convert_messages_to_anthropic(
        openai_req.messages
    )

    # Build Anthropic request
    anthropic_request = {
        "model": model,
        "messages": messages,
        "max_tokens": openai_req.max_tokens or 4096,
    }

    # Add system prompt if present
    if system_prompt:
        anthropic_request["system"] = system_prompt

    # Add optional parameters
    if openai_req.temperature is not None:
        anthropic_request["temperature"] = openai_req.temperature

    if openai_req.top_p is not None:
        anthropic_request["top_p"] = openai_req.top_p

    if openai_req.stream is not None:
        anthropic_request["stream"] = openai_req.stream

    if openai_req.stop is not None:
        if isinstance(openai_req.stop, str):
            anthropic_request["stop_sequences"] = [openai_req.stop]
        else:
            anthropic_request["stop_sequences"] = openai_req.stop

    # Handle metadata - combine user field and metadata
    metadata = {}
    if openai_req.user:
        metadata["user_id"] = openai_req.user
    if openai_req.metadata:
        metadata.update(openai_req.metadata)
    if metadata:
        anthropic_request["metadata"] = metadata

    # Handle response format - add to system prompt for JSON mode
    if openai_req.response_format:
        format_type = (
            openai_req.response_format.type if openai_req.response_format else None
        )

        if format_type == "json_object" and system_prompt is not None:
            system_prompt += "\nYou must respond with valid JSON only."
            anthropic_request["system"] = system_prompt
        elif format_type == "json_schema" and system_prompt is not None:
            # For JSON schema, we can add more specific instructions
            if openai_req.response_format and hasattr(
                openai_req.response_format, "json_schema"
            ):
                system_prompt += f"\nYou must respond with valid JSON that conforms to this schema: {openai_req.response_format.json_schema}"
            anthropic_request["system"] = system_prompt

    # Handle reasoning_effort (o1 models) -> thinking configuration
    # Automatically enable thinking for o1 models even without explicit reasoning_effort
    if (
        openai_req.reasoning_effort
        or openai_req.model.startswith("o1")
        or openai_req.model.startswith("o3")
    ):
        # Map reasoning effort to thinking tokens
        thinking_tokens_map = {
            "low": 1000,
            "medium": 5000,
            "high": 10000,
        }

        # Default thinking tokens based on model if reasoning_effort not specified
        default_thinking_tokens = 5000  # medium by default
        if openai_req.model.startswith("o3"):
            default_thinking_tokens = 10000  # high for o3 models
        elif openai_req.model == "o1-mini":
            default_thinking_tokens = 3000  # lower for mini model

        thinking_tokens = (
            thinking_tokens_map.get(
                openai_req.reasoning_effort, default_thinking_tokens
            )
            if openai_req.reasoning_effort
            else default_thinking_tokens
        )

        anthropic_request["thinking"] = {
            "type": "enabled",
            "budget_tokens": thinking_tokens,
        }

        # Ensure max_tokens is greater than budget_tokens
        current_max_tokens = cast(int, anthropic_request.get("max_tokens", 4096))
        if current_max_tokens <= thinking_tokens:
            # Set max_tokens to be 2x thinking tokens + some buffer for response
            anthropic_request["max_tokens"] = thinking_tokens + max(
                thinking_tokens, 4096
            )
            logger.debug(
                "max_tokens_adjusted_for_thinking",
                original_max_tokens=current_max_tokens,
                thinking_tokens=thinking_tokens,
                new_max_tokens=anthropic_request["max_tokens"],
                operation="adapt_request",
            )

        # When thinking is enabled, temperature must be 1.0
        if (
            anthropic_request.get("temperature") is not None
            and anthropic_request["temperature"] != 1.0
        ):
            logger.debug(
                "temperature_adjusted_for_thinking",
                original_temperature=anthropic_request["temperature"],
                new_temperature=1.0,
                operation="adapt_request",
            )
            anthropic_request["temperature"] = 1.0
        elif "temperature" not in anthropic_request:
            # Set default temperature to 1.0 for thinking mode
            anthropic_request["temperature"] = 1.0

        logger.debug(
            "thinking_enabled",
            reasoning_effort=openai_req.reasoning_effort,
            model=openai_req.model,
            thinking_tokens=thinking_tokens,
            temperature=anthropic_request["temperature"],
            operation="adapt_request",
        )

    # Note: seed, logprobs, top_logprobs, and store don't have direct Anthropic equivalents
    if openai_req.seed is not None:
        logger.debug(
            "unsupported_parameter_ignored",
            parameter="seed",
            value=openai_req.seed,
            operation="adapt_request",
        )
    if openai_req.logprobs or openai_req.top_logprobs:
        logger.debug(
            "unsupported_parameters_ignored",
            parameters=["logprobs", "top_logprobs"],
            logprobs=openai_req.logprobs,
            top_logprobs=openai_req.top_logprobs,
            operation="adapt_request",
        )
    if openai_req.store:
        logger.debug(
            "unsupported_parameter_ignored",
            parameter="store",
            value=openai_req.store,
            operation="adapt_request",
        )

    # Handle tools/functions
    if openai_req.tools:
        anthropic_request["tools"] = self._convert_tools_to_anthropic(
            openai_req.tools
        )
    elif openai_req.functions:
        # Convert deprecated functions to tools
        anthropic_request["tools"] = self._convert_functions_to_anthropic(
            openai_req.functions
        )

    if openai_req.tool_choice:
        # Convert tool choice - can be string or OpenAIToolChoice object
        if isinstance(openai_req.tool_choice, str):
            anthropic_request["tool_choice"] = (
                self._convert_tool_choice_to_anthropic(openai_req.tool_choice)
            )
        else:
            # Convert OpenAIToolChoice object to dict
            tool_choice_dict = {
                "type": openai_req.tool_choice.type,
                "function": openai_req.tool_choice.function,
            }
            anthropic_request["tool_choice"] = (
                self._convert_tool_choice_to_anthropic(tool_choice_dict)
            )
    elif openai_req.function_call:
        # Convert deprecated function_call to tool_choice
        anthropic_request["tool_choice"] = self._convert_function_call_to_anthropic(
            openai_req.function_call
        )

    logger.debug(
        "format_conversion_completed",
        from_format="openai",
        to_format="anthropic",
        original_model=openai_req.model,
        anthropic_model=anthropic_request.get("model"),
        has_tools=bool(anthropic_request.get("tools")),
        has_system=bool(anthropic_request.get("system")),
        message_count=len(cast(list[Any], anthropic_request["messages"])),
        operation="adapt_request",
    )
    return anthropic_request

adapt_response

adapt_response(response)

Convert Anthropic response format to OpenAI format.

Parameters:

Name Type Description Default
response dict[str, Any]

Anthropic format response

required

Returns:

Type Description
dict[str, Any]

OpenAI format response

Raises:

Type Description
ValueError

If the response format is invalid or unsupported

Source code in ccproxy/adapters/openai/adapter.py
def adapt_response(self, response: dict[str, Any]) -> dict[str, Any]:
    """Convert Anthropic response format to OpenAI format.

    Args:
        response: Anthropic format response

    Returns:
        OpenAI format response

    Raises:
        ValueError: If the response format is invalid or unsupported
    """
    try:
        # Extract original model from response metadata if available
        original_model = response.get("model", "gpt-4")

        # Generate response ID
        request_id = generate_openai_response_id()

        # Convert content
        content = ""
        tool_calls = []

        if "content" in response and response["content"]:
            for block in response["content"]:
                if block.get("type") == "text":
                    content += block.get("text", "")
                elif block.get("type") == "thinking":
                    # Handle thinking blocks - we can include them with a marker
                    thinking_text = block.get("thinking", "")
                    signature = block.get("signature")
                    if thinking_text:
                        content += f'<thinking signature="{signature}">{thinking_text}</thinking>'
                elif block.get("type") == "tool_use":
                    tool_calls.append(format_openai_tool_call(block))
                else:
                    logger.warning(
                        "unsupported_content_block_type", type=block.get("type")
                    )

        # Create OpenAI message
        # When there are tool calls but no content, use empty string instead of None
        # Otherwise, if content is empty string, convert to None
        final_content: str | None = content
        if tool_calls and not content:
            final_content = ""
        elif content == "":
            final_content = None

        message = OpenAIResponseMessage(
            role="assistant",
            content=final_content,
            tool_calls=tool_calls if tool_calls else None,
        )

        # Map stop reason
        finish_reason = self._convert_stop_reason_to_openai(
            response.get("stop_reason")
        )

        # Ensure finish_reason is a valid literal type
        if finish_reason not in ["stop", "length", "tool_calls", "content_filter"]:
            finish_reason = "stop"

        # Cast to proper literal type
        valid_finish_reason = cast(
            Literal["stop", "length", "tool_calls", "content_filter"], finish_reason
        )

        # Create choice
        choice = OpenAIChoice(
            index=0,
            message=message,
            finish_reason=valid_finish_reason,
            logprobs=None,  # Anthropic doesn't support logprobs
        )

        # Create usage
        usage_info = response.get("usage", {})
        usage = OpenAIUsage(
            prompt_tokens=usage_info.get("input_tokens", 0),
            completion_tokens=usage_info.get("output_tokens", 0),
            total_tokens=usage_info.get("input_tokens", 0)
            + usage_info.get("output_tokens", 0),
        )

        # Create OpenAI response
        openai_response = OpenAIChatCompletionResponse(
            id=request_id,
            object="chat.completion",
            created=int(time.time()),
            model=original_model,
            choices=[choice],
            usage=usage,
            system_fingerprint=generate_openai_system_fingerprint(),
        )

        logger.debug(
            "format_conversion_completed",
            from_format="anthropic",
            to_format="openai",
            response_id=request_id,
            original_model=original_model,
            finish_reason=valid_finish_reason,
            content_length=len(content) if content else 0,
            tool_calls_count=len(tool_calls),
            input_tokens=usage_info.get("input_tokens", 0),
            output_tokens=usage_info.get("output_tokens", 0),
            operation="adapt_response",
            choice=choice,
        )
        return openai_response.model_dump()

    except Exception as e:
        raise ValueError(f"Invalid Anthropic response format: {e}") from e

adapt_stream async

adapt_stream(stream)

Convert Anthropic streaming response to OpenAI streaming format.

Parameters:

Name Type Description Default
stream AsyncIterator[dict[str, Any]]

Anthropic streaming response

required

Yields:

Type Description
AsyncIterator[dict[str, Any]]

OpenAI format streaming chunks

Raises:

Type Description
ValueError

If the stream format is invalid or unsupported

Source code in ccproxy/adapters/openai/adapter.py
async def adapt_stream(
    self, stream: AsyncIterator[dict[str, Any]]
) -> AsyncIterator[dict[str, Any]]:
    """Convert Anthropic streaming response to OpenAI streaming format.

    Args:
        stream: Anthropic streaming response

    Yields:
        OpenAI format streaming chunks

    Raises:
        ValueError: If the stream format is invalid or unsupported
    """
    # Create stream processor
    processor = OpenAIStreamProcessor(
        enable_usage=True,
        enable_tool_calls=True,
        enable_text_chunking=False,  # Keep text as-is for compatibility
    )

    try:
        # Process the stream and parse SSE format back to dict objects
        async for sse_chunk in processor.process_stream(stream):
            if sse_chunk.startswith("data: "):
                data_str = sse_chunk[6:].strip()
                if data_str and data_str != "[DONE]":
                    try:
                        yield json.loads(data_str)
                    except json.JSONDecodeError:
                        logger.warning(
                            "streaming_chunk_parse_failed",
                            chunk_data=data_str[:100] + "..."
                            if len(data_str) > 100
                            else data_str,
                            operation="adapt_stream",
                        )
                        continue
    except Exception as e:
        raise ValueError(f"Error processing streaming response: {e}") from e

adapt_error

adapt_error(error_body)

Convert Anthropic error format to OpenAI error format.

Parameters:

Name Type Description Default
error_body dict[str, Any]

Anthropic error response

required

Returns:

Type Description
dict[str, Any]

OpenAI-formatted error response

Source code in ccproxy/adapters/openai/adapter.py
def adapt_error(self, error_body: dict[str, Any]) -> dict[str, Any]:
    """Convert Anthropic error format to OpenAI error format.

    Args:
        error_body: Anthropic error response

    Returns:
        OpenAI-formatted error response
    """
    # Extract error details from Anthropic format
    anthropic_error = error_body.get("error", {})
    error_type = anthropic_error.get("type", "internal_server_error")
    error_message = anthropic_error.get("message", "An error occurred")

    # Map Anthropic error types to OpenAI error types
    error_type_mapping = {
        "invalid_request_error": "invalid_request_error",
        "authentication_error": "invalid_request_error",
        "permission_error": "invalid_request_error",
        "not_found_error": "invalid_request_error",
        "rate_limit_error": "rate_limit_error",
        "internal_server_error": "internal_server_error",
        "overloaded_error": "server_error",
    }

    openai_error_type = error_type_mapping.get(error_type, "invalid_request_error")

    # Return OpenAI-formatted error
    return {
        "error": {
            "message": error_message,
            "type": openai_error_type,
            "code": error_type,  # Preserve original error type as code
        }
    }

OpenAIChatCompletionResponse

Bases: BaseModel

OpenAI chat completion response.

OpenAIChoice

Bases: BaseModel

OpenAI choice in response.

OpenAIMessage

Bases: BaseModel

OpenAI message model.

OpenAIMessageContent

Bases: BaseModel

OpenAI message content block.

OpenAIResponseMessage

Bases: BaseModel

OpenAI response message.

OpenAIStreamingChatCompletionResponse

Bases: BaseModel

OpenAI streaming chat completion response.

OpenAIToolCall

Bases: BaseModel

OpenAI tool call.

OpenAIUsage

Bases: BaseModel

OpenAI usage information.

OpenAISSEFormatter

Formats streaming responses to match OpenAI's SSE format.

format_data_event staticmethod

format_data_event(data)

Format a data event for OpenAI-compatible Server-Sent Events.

Parameters:

Name Type Description Default
data dict[str, Any]

Event data dictionary

required

Returns:

Type Description
str

Formatted SSE string

Source code in ccproxy/adapters/openai/streaming.py
@staticmethod
def format_data_event(data: dict[str, Any]) -> str:
    """Format a data event for OpenAI-compatible Server-Sent Events.

    Args:
        data: Event data dictionary

    Returns:
        Formatted SSE string
    """
    json_data = json.dumps(data, separators=(",", ":"))
    return f"data: {json_data}\n\n"

format_first_chunk staticmethod

format_first_chunk(
    message_id, model, created, role="assistant"
)

Format the first chunk with role and basic metadata.

Parameters:

Name Type Description Default
message_id str

Unique identifier for the completion

required
model str

Model name being used

required
created int

Unix timestamp when the completion was created

required
role str

Role of the assistant

'assistant'

Returns:

Type Description
str

Formatted SSE string

Source code in ccproxy/adapters/openai/streaming.py
@staticmethod
def format_first_chunk(
    message_id: str, model: str, created: int, role: str = "assistant"
) -> str:
    """Format the first chunk with role and basic metadata.

    Args:
        message_id: Unique identifier for the completion
        model: Model name being used
        created: Unix timestamp when the completion was created
        role: Role of the assistant

    Returns:
        Formatted SSE string
    """
    data = {
        "id": message_id,
        "object": "chat.completion.chunk",
        "created": created,
        "model": model,
        "choices": [
            {
                "index": 0,
                "delta": {"role": role},
                "logprobs": None,
                "finish_reason": None,
            }
        ],
    }
    return OpenAISSEFormatter.format_data_event(data)

format_content_chunk staticmethod

format_content_chunk(
    message_id, model, created, content, choice_index=0
)

Format a content chunk with text delta.

Parameters:

Name Type Description Default
message_id str

Unique identifier for the completion

required
model str

Model name being used

required
created int

Unix timestamp when the completion was created

required
content str

Text content to include in the delta

required
choice_index int

Index of the choice (usually 0)

0

Returns:

Type Description
str

Formatted SSE string

Source code in ccproxy/adapters/openai/streaming.py
@staticmethod
def format_content_chunk(
    message_id: str, model: str, created: int, content: str, choice_index: int = 0
) -> str:
    """Format a content chunk with text delta.

    Args:
        message_id: Unique identifier for the completion
        model: Model name being used
        created: Unix timestamp when the completion was created
        content: Text content to include in the delta
        choice_index: Index of the choice (usually 0)

    Returns:
        Formatted SSE string
    """
    data = {
        "id": message_id,
        "object": "chat.completion.chunk",
        "created": created,
        "model": model,
        "choices": [
            {
                "index": choice_index,
                "delta": {"content": content},
                "logprobs": None,
                "finish_reason": None,
            }
        ],
    }
    return OpenAISSEFormatter.format_data_event(data)

format_tool_call_chunk staticmethod

format_tool_call_chunk(
    message_id,
    model,
    created,
    tool_call_id,
    function_name=None,
    function_arguments=None,
    tool_call_index=0,
    choice_index=0,
)

Format a tool call chunk.

Parameters:

Name Type Description Default
message_id str

Unique identifier for the completion

required
model str

Model name being used

required
created int

Unix timestamp when the completion was created

required
tool_call_id str

ID of the tool call

required
function_name str | None

Name of the function being called

None
function_arguments str | None

Arguments for the function

None
tool_call_index int

Index of the tool call

0
choice_index int

Index of the choice (usually 0)

0

Returns:

Type Description
str

Formatted SSE string

Source code in ccproxy/adapters/openai/streaming.py
@staticmethod
def format_tool_call_chunk(
    message_id: str,
    model: str,
    created: int,
    tool_call_id: str,
    function_name: str | None = None,
    function_arguments: str | None = None,
    tool_call_index: int = 0,
    choice_index: int = 0,
) -> str:
    """Format a tool call chunk.

    Args:
        message_id: Unique identifier for the completion
        model: Model name being used
        created: Unix timestamp when the completion was created
        tool_call_id: ID of the tool call
        function_name: Name of the function being called
        function_arguments: Arguments for the function
        tool_call_index: Index of the tool call
        choice_index: Index of the choice (usually 0)

    Returns:
        Formatted SSE string
    """
    tool_call: dict[str, Any] = {
        "index": tool_call_index,
        "id": tool_call_id,
        "type": "function",
        "function": {},
    }

    if function_name is not None:
        tool_call["function"]["name"] = function_name

    if function_arguments is not None:
        tool_call["function"]["arguments"] = function_arguments

    data = {
        "id": message_id,
        "object": "chat.completion.chunk",
        "created": created,
        "model": model,
        "choices": [
            {
                "index": choice_index,
                "delta": {"tool_calls": [tool_call]},
                "logprobs": None,
                "finish_reason": None,
            }
        ],
    }
    return OpenAISSEFormatter.format_data_event(data)

format_final_chunk staticmethod

format_final_chunk(
    message_id,
    model,
    created,
    finish_reason="stop",
    choice_index=0,
    usage=None,
)

Format the final chunk with finish_reason.

Parameters:

Name Type Description Default
message_id str

Unique identifier for the completion

required
model str

Model name being used

required
created int

Unix timestamp when the completion was created

required
finish_reason str

Reason for completion (stop, length, tool_calls, etc.)

'stop'
choice_index int

Index of the choice (usually 0)

0
usage dict[str, int] | None

Optional usage information to include

None

Returns:

Type Description
str

Formatted SSE string

Source code in ccproxy/adapters/openai/streaming.py
@staticmethod
def format_final_chunk(
    message_id: str,
    model: str,
    created: int,
    finish_reason: str = "stop",
    choice_index: int = 0,
    usage: dict[str, int] | None = None,
) -> str:
    """Format the final chunk with finish_reason.

    Args:
        message_id: Unique identifier for the completion
        model: Model name being used
        created: Unix timestamp when the completion was created
        finish_reason: Reason for completion (stop, length, tool_calls, etc.)
        choice_index: Index of the choice (usually 0)
        usage: Optional usage information to include

    Returns:
        Formatted SSE string
    """
    data = {
        "id": message_id,
        "object": "chat.completion.chunk",
        "created": created,
        "model": model,
        "choices": [
            {
                "index": choice_index,
                "delta": {},
                "logprobs": None,
                "finish_reason": finish_reason,
            }
        ],
    }

    # Add usage if provided
    if usage:
        data["usage"] = usage

    return OpenAISSEFormatter.format_data_event(data)

format_error_chunk staticmethod

format_error_chunk(
    message_id, model, created, error_type, error_message
)

Format an error chunk.

Parameters:

Name Type Description Default
message_id str

Unique identifier for the completion

required
model str

Model name being used

required
created int

Unix timestamp when the completion was created

required
error_type str

Type of error

required
error_message str

Error message

required

Returns:

Type Description
str

Formatted SSE string

Source code in ccproxy/adapters/openai/streaming.py
@staticmethod
def format_error_chunk(
    message_id: str, model: str, created: int, error_type: str, error_message: str
) -> str:
    """Format an error chunk.

    Args:
        message_id: Unique identifier for the completion
        model: Model name being used
        created: Unix timestamp when the completion was created
        error_type: Type of error
        error_message: Error message

    Returns:
        Formatted SSE string
    """
    data = {
        "id": message_id,
        "object": "chat.completion.chunk",
        "created": created,
        "model": model,
        "choices": [
            {"index": 0, "delta": {}, "logprobs": None, "finish_reason": "error"}
        ],
        "error": {"type": error_type, "message": error_message},
    }
    return OpenAISSEFormatter.format_data_event(data)

format_done staticmethod

format_done()

Format the final DONE event.

Returns:

Type Description
str

Formatted SSE termination string

Source code in ccproxy/adapters/openai/streaming.py
@staticmethod
def format_done() -> str:
    """Format the final DONE event.

    Returns:
        Formatted SSE termination string
    """
    return "data: [DONE]\n\n"

OpenAIStreamProcessor

OpenAIStreamProcessor(
    message_id=None,
    model="claude-3-5-sonnet-20241022",
    created=None,
    enable_usage=True,
    enable_tool_calls=True,
    enable_text_chunking=True,
    chunk_size_words=3,
)

Processes Anthropic/Claude streaming responses into OpenAI format.

Parameters:

Name Type Description Default
message_id str | None

Response ID, generated if not provided

None
model str

Model name for responses

'claude-3-5-sonnet-20241022'
created int | None

Creation timestamp, current time if not provided

None
enable_usage bool

Whether to include usage information

True
enable_tool_calls bool

Whether to process tool calls

True
enable_text_chunking bool

Whether to chunk text content

True
chunk_size_words int

Number of words per text chunk

3
Source code in ccproxy/adapters/openai/streaming.py
def __init__(
    self,
    message_id: str | None = None,
    model: str = "claude-3-5-sonnet-20241022",
    created: int | None = None,
    enable_usage: bool = True,
    enable_tool_calls: bool = True,
    enable_text_chunking: bool = True,
    chunk_size_words: int = 3,
):
    """Initialize the stream processor.

    Args:
        message_id: Response ID, generated if not provided
        model: Model name for responses
        created: Creation timestamp, current time if not provided
        enable_usage: Whether to include usage information
        enable_tool_calls: Whether to process tool calls
        enable_text_chunking: Whether to chunk text content
        chunk_size_words: Number of words per text chunk
    """
    self.message_id = message_id or generate_openai_response_id()
    self.model = model
    self.created = created or int(time.time())
    self.enable_usage = enable_usage
    self.enable_tool_calls = enable_tool_calls
    self.enable_text_chunking = enable_text_chunking
    self.chunk_size_words = chunk_size_words
    self.formatter = OpenAISSEFormatter()

    # State tracking
    self.role_sent = False
    self.accumulated_content = ""
    self.tool_calls: dict[str, dict[str, Any]] = {}
    self.usage_info: dict[str, int] | None = None
    # Thinking block tracking
    self.current_thinking_text = ""
    self.current_thinking_signature: str | None = None
    self.thinking_block_active = False

process_stream async

process_stream(claude_stream)

Process a Claude/Anthropic stream into OpenAI format.

Parameters:

Name Type Description Default
claude_stream AsyncIterator[dict[str, Any]]

Async iterator of Claude response chunks

required

Yields:

Type Description
AsyncIterator[str]

OpenAI-formatted SSE strings

Source code in ccproxy/adapters/openai/streaming.py
async def process_stream(
    self, claude_stream: AsyncIterator[dict[str, Any]]
) -> AsyncIterator[str]:
    """Process a Claude/Anthropic stream into OpenAI format.

    Args:
        claude_stream: Async iterator of Claude response chunks

    Yields:
        OpenAI-formatted SSE strings
    """
    try:
        async for chunk in claude_stream:
            async for sse_chunk in self._process_chunk(chunk):
                yield sse_chunk

        # Send final chunk
        if self.usage_info and self.enable_usage:
            yield self.formatter.format_final_chunk(
                self.message_id,
                self.model,
                self.created,
                finish_reason="stop",
                usage=self.usage_info,
            )
        else:
            yield self.formatter.format_final_chunk(
                self.message_id, self.model, self.created, finish_reason="stop"
            )

        # Send DONE event
        yield self.formatter.format_done()

    except Exception as e:
        # Send error chunk
        yield self.formatter.format_error_chunk(
            self.message_id, self.model, self.created, "error", str(e)
        )
        yield self.formatter.format_done()

map_openai_model_to_claude

map_openai_model_to_claude(openai_model)

Map OpenAI model name to Claude model name.

Parameters:

Name Type Description Default
openai_model str

OpenAI model identifier

required

Returns:

Type Description
str

Claude model identifier

Source code in ccproxy/adapters/openai/adapter.py
def map_openai_model_to_claude(openai_model: str) -> str:
    """Map OpenAI model name to Claude model name.

    Args:
        openai_model: OpenAI model identifier

    Returns:
        Claude model identifier
    """
    # Direct mapping first
    claude_model = OPENAI_TO_CLAUDE_MODEL_MAPPING.get(openai_model)
    if claude_model:
        return claude_model

    # Pattern matching for versioned models
    if openai_model.startswith("gpt-4o-mini"):
        return "claude-3-5-haiku-latest"
    elif openai_model.startswith("gpt-4o") or openai_model.startswith("gpt-4"):
        return "claude-3-7-sonnet-20250219"
    elif openai_model.startswith("gpt-3.5"):
        return "claude-3-5-haiku-latest"
    elif openai_model.startswith("o1"):
        return "claude-sonnet-4-20250514"
    elif openai_model.startswith("o3"):
        return "claude-opus-4-20250514"
    elif openai_model.startswith("gpt"):
        return "claude-sonnet-4-20250514"

    # If it's already a Claude model, pass through unchanged
    if openai_model.startswith("claude-"):
        return openai_model

    # For unknown models, pass through unchanged we may change
    # this to a default model in the future
    return openai_model

format_openai_tool_call

format_openai_tool_call(tool_use)

Convert Anthropic tool use to OpenAI tool call format.

Source code in ccproxy/adapters/openai/models.py
def format_openai_tool_call(tool_use: dict[str, Any]) -> OpenAIToolCall:
    """Convert Anthropic tool use to OpenAI tool call format."""
    tool_input = tool_use.get("input", {})
    if isinstance(tool_input, dict):
        arguments_str = json.dumps(tool_input)
    else:
        arguments_str = str(tool_input)

    return OpenAIToolCall(
        id=tool_use.get("id", ""),
        type="function",
        function=OpenAIFunctionCall(
            name=tool_use.get("name", ""),
            arguments=arguments_str,
        ),
    )

generate_openai_response_id

generate_openai_response_id()

Generate an OpenAI-compatible response ID.

Source code in ccproxy/adapters/openai/models.py
def generate_openai_response_id() -> str:
    """Generate an OpenAI-compatible response ID."""
    return f"chatcmpl-{uuid.uuid4().hex[:29]}"

generate_openai_system_fingerprint

generate_openai_system_fingerprint()

Generate an OpenAI-compatible system fingerprint.

Source code in ccproxy/adapters/openai/models.py
def generate_openai_system_fingerprint() -> str:
    """Generate an OpenAI-compatible system fingerprint."""
    return f"fp_{uuid.uuid4().hex[:8]}"