import { type DocEntry } from "./openai";
export const mistralDocs: DocEntry[] = [
{
id: "mistral-chat",
title: "Mistral Chat API",
category: "chat",
provider: "mistral",
keywords: ["mistral", "chat", "completions", "chat completion"],
content: `# Mistral Chat API
## Endpoint
POST https://api.mistral.ai/v1/chat/completions
## Headers
- Authorization: Bearer YOUR_API_KEY
- Content-Type: application/json
## Request Body
\`\`\`json
{
"model": "mistral-large-latest" | "mistral-small-latest" | "codestral-latest" | "ministral-8b-latest" | "ministral-3b-latest" | "open-mistral-nemo",
"messages": [
{
"role": "system" | "user" | "assistant" | "tool",
"content": "string" | [{"type": "text", "text": "string"} | {"type": "image_url", "image_url": "string"}]
}
],
"temperature": number (0-1, default 0.7),
"top_p": number (0-1, default 1),
"max_tokens": number,
"stream": boolean,
"safe_prompt": boolean (enable content filtering),
"random_seed": number,
"response_format": {"type": "text" | "json_object"},
"tools": [
{
"type": "function",
"function": {
"name": "string",
"description": "string",
"parameters": {JSON Schema}
}
}
],
"tool_choice": "auto" | "any" | "none"
}
\`\`\`
## Response
\`\`\`json
{
"id": "cmpl-xxx",
"object": "chat.completion",
"model": "mistral-large-latest",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "string",
"tool_calls": [
{
"id": "xxx",
"type": "function",
"function": {"name": "string", "arguments": "JSON string"}
}
]
},
"finish_reason": "stop" | "length" | "tool_calls"
}
],
"usage": {
"prompt_tokens": 0,
"completion_tokens": 0,
"total_tokens": 0
}
}
\`\`\`
## Available Models
- mistral-large-latest: Most capable (large-latest)
- mistral-small-latest: Fast, cost-effective
- codestral-latest: Code-specialized (256K context)
- ministral-8b-latest: Small, fast
- ministral-3b-latest: Smallest, fastest
- open-mistral-nemo: Open-weight model
- pixtral-large-latest: Multimodal (vision)
## Python SDK Example
\`\`\`python
from mistralai import Mistral
client = Mistral(api_key="YOUR_API_KEY")
response = client.chat.complete(
model="mistral-large-latest",
messages=[
{"role": "system", "content": "You are helpful"},
{"role": "user", "content": "Hello!"}
],
temperature=0.7,
max_tokens=1024
)
print(response.choices[0].message.content)
\`\`\`
## Node.js Example
\`\`\`javascript
import MistralClient from "@mistralai/mistralai";
const client = new MistralClient("YOUR_API_KEY");
const response = await client.chat({
model: "mistral-large-latest",
messages: [
{ role: "user", content: "Hello!" }
],
maxTokens: 1024
});
console.log(response.choices[0].message.content);
\`\`\`
## cURL Example
\`\`\`bash
curl https://api.mistral.ai/v1/chat/completions \\
-H "Authorization: Bearer $MISTRAL_API_KEY" \\
-H "Content-Type: application/json" \\
-d '{
"model": "mistral-large-latest",
"messages": [{"role": "user", "content": "Hello!"}],
"temperature": 0.7
}'
\`\`\``
},
{
id: "mistral-embeddings",
title: "Mistral Embeddings API",
category: "embeddings",
provider: "mistral",
keywords: ["mistral", "embeddings", "vector", "semantic"],
content: `# Mistral Embeddings API
## Endpoint
POST https://api.mistral.ai/v1/embeddings
## Request
\`\`\`json
{
"model": "mistral-embed",
"input": ["text1", "text2"],
"encoding_format": "float"
}
\`\`\`
## Response
\`\`\`json
{
"id": "xxx",
"object": "list",
"data": [
{
"object": "embedding",
"index": 0,
"embedding": [0.1, 0.2, ...]
}
],
"model": "mistral-embed",
"usage": {"prompt_tokens": 10, "total_tokens": 10}
}
\`\`\`
## Model Details
- mistral-embed: 1024 dimensions
- Optimized for retrieval tasks
- Max input: 8192 tokens per request
## Python Example
\`\`\`python
from mistralai import Mistral
client = Mistral(api_key="YOUR_API_KEY")
response = client.embeddings.create(
model="mistral-embed",
input=["Hello world"]
)
embedding = response.data[0].embedding
\`\`\``
},
{
id: "mistral-function-calling",
title: "Mistral Function Calling",
category: "tools",
provider: "mistral",
keywords: ["mistral", "function", "tools", "tool calling"],
content: `# Mistral Function Calling
## Tool Definition
Same format as OpenAI:
\`\`\`json
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get weather",
"parameters": {
"type": "object",
"properties": {
"location": {"type": "string"}
},
"required": ["location"]
}
}
}
\`\`\`
## Tool Choice
- "auto": Model decides (default)
- "any": Model must call at least one tool
- "none": No tool calls
## Supported Models
- mistral-large-latest
- mistral-small-latest
- codestral-latest
- open-mistral-nemo
## Python Example
\`\`\`python
from mistralai import Mistral
client = Mistral(api_key="YOUR_API_KEY")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get weather",
"parameters": {
"type": "object",
"properties": {
"location": {"type": "string"}
},
"required": ["location"]
}
}
}
]
response = client.chat.complete(
model="mistral-large-latest",
messages=[{"role": "user", "content": "Weather in Paris?"}],
tools=tools,
tool_choice="auto"
)
# Check for tool calls
if response.choices[0].message.tool_calls:
for tc in response.choices[0].message.tool_calls:
result = execute(tc.function.name, tc.function.arguments)
response2 = client.chat.complete(
model="mistral-large-latest",
messages=[
{"role": "user", "content": "Weather in Paris?"},
{"role": "assistant", "content": "", "tool_calls": [tc]},
{"role": "tool", "tool_call_id": tc.id, "content": str(result)}
],
tools=tools
)
\`\`\``
},
{
id: "mistral-rate-limits",
title: "Mistral Rate Limits & Pricing",
category: "limits",
provider: "mistral",
keywords: ["mistral", "rate limit", "pricing", "cost"],
content: `# Mistral Rate Limits & Pricing
## Rate Limits
- Varies by plan
- Free tier: Limited requests
- Paid tier: Higher limits
## Pricing (per 1M tokens)
- mistral-large-latest: Input $2.00, Output $6.00
- mistral-small-latest: Input $0.20, Output $0.60
- codestral-latest: Input $0.30, Output $0.90
- ministral-8b-latest: Input $0.10, Output $0.10
- ministral-3b-latest: Input $0.04, Output $0.04
- open-mistral-nemo: Input $0.15, Output $0.15
- mistral-embed: $0.10
- pixtral-large-latest: Input $2.00, Output $6.00
## Context Windows
- mistral-large: 128K tokens
- mistral-small: 32K tokens
- codestral: 256K tokens
- ministral-8b: 128K tokens
- ministral-3b: 128K tokens
- open-mistral-nemo: 128K tokens
## Error Codes
- 400: Bad Request
- 401: Unauthorized
- 429: Rate limit exceeded
- 500: Internal error`
}
];