import { type DocEntry } from "./openai";
export const anthropicDocs: DocEntry[] = [
{
id: "anthropic-messages",
title: "Messages API (Claude)",
category: "chat",
provider: "anthropic",
keywords: ["messages", "claude", "chat", "conversation", "anthropic"],
content: `# Anthropic Messages API (Claude)
## Endpoint
POST https://api.anthropic.com/v1/messages
## Headers
- x-api-key: YOUR_API_KEY
- anthropic-version: 2023-06-01
- content-type: application/json
## Request Body
\`\`\`json
{
"model": "claude-sonnet-4-20250514" | "claude-3-5-sonnet-20241022" | "claude-3-5-haiku-20241022" | "claude-3-opus-20240229" | "claude-3-haiku-20240307",
"max_tokens": number (required, max 8192 for most, 64000 for some),
"messages": [
{
"role": "user" | "assistant",
"content": "string" | [
{"type": "text", "text": "string"},
{"type": "image", "source": {"type": "base64", "media_type": "image/jpeg"|"image/png"|"image/gif"|"image/webp", "data": "base64string"}},
{"type": "document", "source": {"type": "base64", "media_type": "application/pdf", "data": "base64string"}},
{"type": "tool_result", "tool_use_id": "string", "content": "string"}
]
}
],
"system": "string" | [{"type": "text", "text": "string"}, {"type": "tool_result", ...}],
"temperature": number (0-1, default varies),
"top_p": number (0-1),
"top_k": number (1-40),
"stop_sequences": string[],
"stream": boolean,
"tools": [
{
"name": "string",
"description": "string",
"input_schema": {JSON Schema object}
}
],
"tool_choice": {"type": "auto" | "any" | "tool", "name": "string (if type=tool)"},
"metadata": {"user_id": "string"}
}
\`\`\`
## Response
\`\`\`json
{
"id": "msg_01xxx",
"type": "message",
"role": "assistant",
"content": [
{"type": "text", "text": "Hello! How can I help you?"},
{
"type": "tool_use",
"id": "toolu_01xxx",
"name": "get_weather",
"input": {"location": "San Francisco"}
}
],
"model": "claude-sonnet-4-20250514",
"stop_reason": "end_turn" | "max_tokens" | "stop_sequence" | "tool_use",
"stop_sequence": null,
"usage": {
"input_tokens": 10,
"output_tokens": 25,
"cache_creation_input_tokens": 0,
"cache_read_input_tokens": 0
}
}
\`\`\`
## Key Differences from OpenAI
1. "system" is a top-level field, not a message role
2. Messages can only be "user" or "assistant" (no "system" role)
3. Tool definitions use "input_schema" instead of "parameters"
4. Tool results use "tool_result" type in content array
5. Response content is an array (can have text + tool_use)
6. Max tokens is REQUIRED in every request
## Available Models (2024-2025)
- claude-sonnet-4-20250514: Latest flagship, best overall
- claude-3-5-sonnet-20241022: Previous flagship, great coding
- claude-3-5-haiku-20241022: Fast, cost-effective
- claude-3-opus-20240229: Most capable (older)
- claude-3-haiku-20240307: Fastest, cheapest
## Python SDK Example
\`\`\`python
import anthropic
client = anthropic.Anthropic(api_key="sk-ant-...")
message = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
system="You are a helpful assistant",
messages=[
{"role": "user", "content": "Hello, Claude!"}
],
temperature=0.7
)
print(message.content[0].text)
\`\`\`
## Node.js SDK Example
\`\`\`javascript
import Anthropic from "@anthropic-ai/sdk";
const client = new Anthropic({ apiKey: "sk-ant-..." });
const message = await client.messages.create({
model: "claude-sonnet-4-20250514",
max_tokens: 1024,
system: "You are a helpful assistant",
messages: [
{ role: "user", content: "Hello, Claude!" }
],
temperature: 0.7
});
console.log(message.content[0].text);
\`\`\`
## cURL Example
\`\`\`bash
curl https://api.anthropic.com/v1/messages \\
-H "x-api-key: $ANTHROPIC_API_KEY" \\
-H "anthropic-version: 2023-06-01" \\
-H "content-type: application/json" \\
-d '{
"model": "claude-sonnet-4-20250514",
"max_tokens": 1024,
"messages": [{"role": "user", "content": "Hello!"}]
}'
\`\`\``
},
{
id: "anthropic-tools",
title: "Tool Use (Function Calling)",
category: "tools",
provider: "anthropic",
keywords: ["tools", "function", "tool use", "function calling"],
content: `# Anthropic Tool Use
## Tool Definition Format
\`\`\`json
{
"name": "get_weather",
"description": "Get current weather for a location",
"input_schema": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City name"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"]
}
},
"required": ["location"]
}
}
\`\`\`
## Tool Choice Options
- {"type": "auto"}: Model decides (default)
- {"type": "any"}: Model MUST use at least one tool
- {"type": "tool", "name": "specific_tool"}: Force specific tool
## Handling Tool Calls
\`\`\`python
import anthropic
client = anthropic.Anthropic()
tools = [
{
"name": "get_weather",
"description": "Get weather for location",
"input_schema": {
"type": "object",
"properties": {
"location": {"type": "string"},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}
},
"required": ["location"]
}
}
]
response = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[{"role": "user", "content": "What's the weather in London?"}],
tools=tools
)
# Check for tool use
for content in response.content:
if content.type == "tool_use":
tool_name = content.name
tool_input = content.input
tool_id = content.id
# Execute tool
result = get_weather(tool_input["location"])
# Send result back
response2 = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[
{"role": "user", "content": "What's the weather in London?"},
{"role": "assistant", "content": response.content},
{
"role": "user",
"content": [
{
"type": "tool_result",
"tool_use_id": tool_id,
"content": str(result)
}
]
}
],
tools=tools
)
\`\`\`
## Important Notes
- Tool results are sent as "user" role messages
- Tool result content is an array with "tool_result" type
- Each tool_result must include tool_use_id from the tool_use block
- Model can call multiple tools in one response`
},
{
id: "anthropic-vision",
title: "Vision (Image Input)",
category: "multimodal",
provider: "anthropic",
keywords: ["vision", "image", "picture", "photo", "multimodal"],
content: `# Anthropic Vision (Image Input)
## Supported Models
All Claude 3+ models support images:
- claude-sonnet-4-20250514
- claude-3-5-sonnet-20241022
- claude-3-5-haiku-20241022
- claude-3-opus-20240229
- claude-3-haiku-20240307
## Image Format
\`\`\`json
{
"role": "user",
"content": [
{"type": "text", "text": "Describe this image"},
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/jpeg" | "image/png" | "image/gif" | "image/webp",
"data": "base64encodedstring..."
}
}
]
}
\`\`\`
## Multiple Images
\`\`\`json
{
"role": "user",
"content": [
{"type": "text", "text": "Compare these images"},
{"type": "image", "source": {"type": "base64", "media_type": "image/jpeg", "data": "..."}},
{"type": "image", "source": {"type": "base64", "media_type": "image/png", "data": "..."}}
]
}
\`\`\`
## Python Example
\`\`\`python
import anthropic
import base64
client = anthropic.Anthropic()
with open("image.jpg", "rb") as f:
image_data = base64.b64encode(f.read()).decode()
message = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
messages=[{
"role": "user",
"content": [
{"type": "text", "text": "What is in this image?"},
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/jpeg",
"data": image_data
}
}
]
}]
)
\`\`\`
## Image Limits
- Max 20 images per request
- Max image size: 10MB each`
},
{
id: "anthropic-prompt-caching",
title: "Prompt Caching",
category: "optimization",
provider: "anthropic",
keywords: ["cache", "caching", "optimization", "cost"],
content: `# Anthropic Prompt Caching
## How It Works
Cache prefixes of your prompt (system prompt + early messages) for faster, cheaper responses.
## Usage
Add "cache_control": {"type": "ephemeral"} to content blocks:
\`\`\`python
message = client.messages.create(
model="claude-sonnet-4-20250514",
max_tokens=1024,
system=[
{
"type": "text",
"text": "Long system prompt that repeats...",
"cache_control": {"type": "ephemeral"}
}
],
messages=[
{"role": "user", "content": "Hello"}
]
)
\`\`\`
## Pricing with Caching
- Cache creation: 25% of input token price (one-time)
- Cache read: 10% of input token price (subsequent)
- Regular input: 100%
## Requirements
- Minimum 1024 tokens for cache prefix
- Cache expires after 5 minutes of inactivity
- Only available on claude-3-5-sonnet and newer models
## Response Usage Fields
- cache_creation_input_tokens: Tokens cached this request
- cache_read_input_tokens: Tokens read from cache`
},
{
id: "anthropic-rate-limits",
title: "Rate Limits & Pricing",
category: "limits",
provider: "anthropic",
keywords: ["rate limit", "pricing", "cost", "tokens", "limits"],
content: `# Anthropic Rate Limits & Pricing
## Rate Limits (Tier 1 - Free)
- Requests per minute: 50 RPM
- Tokens per minute: 40,000 TPM
- Tokens per day: 1,000,000
## Higher Tiers
Increase automatically with usage and payment method.
## Pricing (per 1M tokens)
- claude-sonnet-4: Input $3.00, Output $15.00
- claude-3-5-sonnet: Input $3.00, Output $15.00
- claude-3-5-haiku: Input $0.80, Output $4.00
- claude-3-opus: Input $15.00, Output $75.00
- claude-3-haiku: Input $0.25, Output $1.25
## Error Codes
- 400: Bad Request
- 401: Invalid API key
- 403: Permission denied
- 429: Rate limit exceeded
- 500: Internal server error
- 529: Overloaded (temporary)
## Context Windows
- claude-sonnet-4: 200K tokens
- claude-3-5-sonnet: 200K tokens
- claude-3-5-haiku: 200K tokens
- claude-3-opus: 200K tokens
- claude-3-haiku: 200K tokens`
}
];