import { type DocEntry } from "./openai";
export const xaiDocs: DocEntry[] = [
{
id: "xai-chat",
title: "xAI (Grok) Chat API",
category: "chat",
provider: "xai",
keywords: ["xai", "grok", "chat", "completions", "elon", "twitter"],
content: `# xAI (Grok) Chat API
xAI provides the Grok model series via OpenAI-compatible API.
## Endpoint
POST https://api.x.ai/v1/chat/completions
## Headers
- Authorization: Bearer YOUR_API_KEY
- Content-Type: application/json
## Request Body
\`\`\`json
{
"model": "grok-3" | "grok-3-fast" | "grok-3-mini" | "grok-3-mini-fast" | "grok-2-1212",
"messages": [
{
"role": "system" | "user" | "assistant",
"content": "string"
}
],
"max_tokens": number,
"temperature": number (0-2, default 1),
"top_p": number (0-1),
"stream": boolean,
"stop": string[],
"tools": [
{
"type": "function",
"function": {
"name": "string",
"description": "string",
"parameters": {JSON Schema}
}
}
],
"tool_choice": "auto" | "none" | "required"
}
\`\`\`
## Response
OpenAI-compatible format:
\`\`\`json
{
"id": "xxx",
"object": "chat.completion",
"created": 1234567890,
"model": "grok-3",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "string",
"tool_calls": [...]
},
"finish_reason": "stop" | "length" | "tool_calls"
}
],
"usage": {
"prompt_tokens": 0,
"completion_tokens": 0,
"total_tokens": 0
}
}
\`\`\`
## Available Models
- grok-3: Latest flagship model
- grok-3-fast: Optimized for speed
- grok-3-mini: Smaller, cost-effective
- grok-3-mini-fast: Fast mini variant
- grok-2-1212: Previous generation
## Python SDK Example
\`\`\`python
from openai import OpenAI
client = OpenAI(
base_url="https://api.x.ai/v1",
api_key="xai-..."
)
response = client.chat.completions.create(
model="grok-3",
messages=[
{"role": "user", "content": "Hello!"}
],
max_tokens=1024
)
print(response.choices[0].message.content)
\`\`\`
## Node.js Example
\`\`\`javascript
import OpenAI from "openai";
const client = new OpenAI({
baseURL: "https://api.x.ai/v1",
apiKey: "xai-..."
});
const response = await client.chat.completions.create({
model: "grok-3",
messages: [{ role: "user", content: "Hello!" }],
max_tokens: 1024
});
console.log(response.choices[0].message.content);
\`\`\`
## cURL Example
\`\`\`bash
curl https://api.x.ai/v1/chat/completions \\
-H "Authorization: Bearer $XAI_API_KEY" \\
-H "Content-Type: application/json" \\
-d '{
"model": "grok-3",
"messages": [{"role": "user", "content": "Hello!"}],
"max_tokens": 1024
}'
\`\`\``
},
{
id: "xai-rate-limits",
title: "xAI Rate Limits & Pricing",
category: "limits",
provider: "xai",
keywords: ["xai", "grok", "rate limit", "pricing", "cost", "limits"],
content: `# xAI Rate Limits & Pricing
## Rate Limits
- Varies by plan and model
- Contact xAI for specific limits
## Pricing
- grok-3: Competitive with other flagship models
- grok-3-mini: Lower cost
- Check api.x.ai for current pricing
## Context Windows
- grok-3: 128K+ tokens
- grok-3-mini: Large context window
## Features
- OpenAI-compatible API
- Real-time knowledge (X/Twitter integration)
- Function calling support
- Streaming support`
}
];