Skip to content

ccproxy.testing.content_generation

ccproxy.testing.content_generation

Content generation utilities for testing requests and responses.

MessageContentGenerator

MessageContentGenerator()

Generate realistic message content for testing.

Source code in ccproxy/testing/content_generation.py
def __init__(self) -> None:
    self.response_templates = self._load_response_templates()
    self.request_templates = self._load_request_templates()

get_request_message_content

get_request_message_content(message_type)

Get request message content based on type.

Source code in ccproxy/testing/content_generation.py
def get_request_message_content(self, message_type: str) -> str:
    """Get request message content based on type."""
    if message_type in self.request_templates:
        return random.choice(self.request_templates[message_type])
    else:
        # Fallback to short message for unknown types
        return random.choice(self.request_templates["short"])

get_response_content

get_response_content(message_type, model)

Generate response content with realistic token counts.

Source code in ccproxy/testing/content_generation.py
def get_response_content(
    self, message_type: str, model: str
) -> tuple[str, int, int]:
    """Generate response content with realistic token counts."""
    # Select base template
    if message_type == "tool_use":
        base_content = random.choice(self.response_templates["tool_use"])
        # Add calculation result
        result = random.randint(1, 1000)
        content = f"{base_content} The result is {result}."
    elif message_type in self.response_templates:
        content = random.choice(self.response_templates[message_type])
    else:
        # Mix of different lengths for unknown types
        template_type = random.choice(["short", "medium", "long"])
        content = random.choice(self.response_templates[template_type])

    # Calculate realistic token counts based on content
    # Rough estimate: ~4 characters per token
    estimated_output_tokens = max(1, len(content) // 4)

    # Add some randomness but keep it realistic
    output_tokens = random.randint(
        max(1, estimated_output_tokens - 10), estimated_output_tokens + 20
    )

    # Input tokens based on typical request sizes (10-500 range)
    input_tokens = random.randint(10, 500)

    return content, input_tokens, output_tokens

PayloadBuilder

PayloadBuilder()

Build request payloads for different API formats.

Source code in ccproxy/testing/content_generation.py
def __init__(self) -> None:
    self.content_generator = MessageContentGenerator()

build_anthropic_payload

build_anthropic_payload(scenario)

Build Anthropic format payload.

Source code in ccproxy/testing/content_generation.py
def build_anthropic_payload(self, scenario: RequestScenario) -> dict[str, Any]:
    """Build Anthropic format payload."""
    payload = {
        "model": scenario.model,
        "messages": [
            {
                "role": "user",
                "content": self.content_generator.get_request_message_content(
                    scenario.message_type
                ),
            }
        ],
        "stream": scenario.streaming,
        "max_tokens": random.randint(100, 4000),  # Realistic token limits
    }

    if scenario.message_type == "tool_use":
        payload["tools"] = [
            {
                "name": "calculator",
                "description": "Perform basic calculations",
                "input_schema": {
                    "type": "object",
                    "properties": {
                        "expression": {
                            "type": "string",
                            "description": "Math expression to evaluate",
                        }
                    },
                    "required": ["expression"],
                },
            }
        ]

    return payload

build_openai_payload

build_openai_payload(scenario)

Build OpenAI format payload.

Source code in ccproxy/testing/content_generation.py
def build_openai_payload(self, scenario: RequestScenario) -> dict[str, Any]:
    """Build OpenAI format payload."""
    messages = [
        {
            "role": "user",
            "content": self.content_generator.get_request_message_content(
                scenario.message_type
            ),
        }
    ]

    payload = {
        "model": scenario.model,
        "messages": messages,
        "stream": scenario.streaming,
        "max_tokens": random.randint(100, 4000),  # Realistic token limits
    }

    if scenario.message_type == "tool_use":
        payload["tools"] = [
            {
                "type": "function",
                "function": {
                    "name": "calculator",
                    "description": "Perform basic calculations",
                    "parameters": {
                        "type": "object",
                        "properties": {
                            "expression": {
                                "type": "string",
                                "description": "Math expression to evaluate",
                            }
                        },
                        "required": ["expression"],
                    },
                },
            }
        ]

    return payload

build_payload

build_payload(scenario)

Build request payload based on scenario format.

Source code in ccproxy/testing/content_generation.py
def build_payload(self, scenario: RequestScenario) -> dict[str, Any]:
    """Build request payload based on scenario format."""
    # Use custom payload if provided
    if scenario.custom_payload:
        return scenario.custom_payload

    # Build format-specific payload
    if scenario.api_format == "openai":
        return self.build_openai_payload(scenario)
    else:
        return self.build_anthropic_payload(scenario)