"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.geminiDocs = void 0;
exports.geminiDocs = [
{
id: "gemini-generate-content",
title: "Gemini Generate Content API",
category: "chat",
provider: "gemini",
keywords: ["gemini", "generate", "content", "chat", "google", "bard"],
content: `# Google Gemini Generate Content API
## Endpoint
POST https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent?key=YOUR_API_KEY
For streaming:
POST https://generativelanguage.googleapis.com/v1beta/models/{model}:streamGenerateContent?key=YOUR_API_KEY&alt=sse
## Request Body
\`\`\`json
{
"contents": [
{
"role": "user" | "model",
"parts": [
{"text": "string"},
{
"inline_data": {
"mime_type": "image/jpeg" | "image/png" | "audio/wav" | "video/mp4" | "application/pdf",
"data": "base64string"
}
},
{
"file_data": {
"mime_type": "application/pdf",
"file_uri": "https://..."
}
}
]
}
],
"system_instruction": {
"parts": [{"text": "You are a helpful assistant"}]
},
"generationConfig": {
"temperature": number (0-2),
"topP": number (0-1),
"topK": number,
"maxOutputTokens": number,
"stopSequences": string[],
"responseMimeType": "text/plain" | "application/json",
"responseSchema": {JSON Schema}
},
"tools": [
{
"functionDeclarations": [
{
"name": "string",
"description": "string",
"parameters": {JSON Schema}
}
]
},
{"googleSearch": {}},
{"codeExecution": {}}
],
"toolConfig": {
"functionCallingConfig": {
"mode": "AUTO" | "ANY" | "NONE"
}
},
"safetySettings": [
{
"category": "HARM_CATEGORY_HARASSMENT" | "HARM_CATEGORY_HATE_SPEECH" | "HARM_CATEGORY_SEXUALLY_EXPLICIT" | "HARM_CATEGORY_DANGEROUS_CONTENT",
"threshold": "BLOCK_LOW_AND_ABOVE" | "BLOCK_MEDIUM_AND_ABOVE" | "BLOCK_ONLY_HIGH" | "BLOCK_NONE"
}
]
}
\`\`\`
## Response
\`\`\`json
{
"candidates": [
{
"content": {
"role": "model",
"parts": [{"text": "Hello! How can I help you?"}]
},
"finishReason": "STOP" | "MAX_TOKENS" | "SAFETY" | "RECITATION",
"safetyRatings": [...],
"citationMetadata": {...}
}
],
"usageMetadata": {
"promptTokenCount": 0,
"candidatesTokenCount": 0,
"totalTokenCount": 0,
"cachedContentTokenCount": 0
},
"promptFeedback": {
"safetyRatings": [...]
}
}
\`\`\`
## Available Models
- gemini-2.5-pro-preview: Most capable, multimodal
- gemini-2.0-flash: Fast, versatile, multimodal
- gemini-2.0-flash-lite: Fastest, cheapest
- gemini-1.5-pro: Previous gen, 2M context
- gemini-1.5-flash: Fast, 1M context
- gemini-embedding-exp: Embeddings model
## Python SDK Example
\`\`\`python
import google.generativeai as genai
genai.configure(api_key="AIza...")
model = genai.GenerativeModel("gemini-2.0-flash")
response = model.generate_content(
"What is Python?",
generation_config={
"temperature": 0.7,
"max_output_tokens": 1024
}
)
print(response.text)
\`\`\`
## Node.js SDK Example
\`\`\`javascript
import { GoogleGenerativeAI } from "@google/generative-ai";
const genai = new GoogleGenerativeAI("AIza...");
const model = genai.getGenerativeModel({ model: "gemini-2.0-flash" });
const result = await model.generateContent("What is Python?");
console.log(result.response.text());
\`\`\`
## cURL Example
\`\`\`bash
curl "https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key=$GEMINI_API_KEY" \\
-H "Content-Type: application/json" \\
-d '{
"contents": [{
"parts": [{"text": "What is Python?"}]
}]
}'
\`\`\``
},
{
id: "gemini-function-calling",
title: "Gemini Function Calling",
category: "tools",
provider: "gemini",
keywords: ["gemini", "function", "tools", "tool calling", "function calling"],
content: `# Gemini Function Calling
## Tool Definition
\`\`\`json
{
"functionDeclarations": [
{
"name": "get_weather",
"description": "Get current weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City name"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"]
}
},
"required": ["location"]
}
}
]
}
\`\`\`
## Function Calling Modes
- "AUTO": Model decides (default)
- "ANY": Model must call at least one function
- "NONE": Model cannot call functions
- "ANY" with allowedFunctionNames: Restrict to specific functions
## Handling Function Calls
\`\`\`python
import google.generativeai as genai
model = genai.GenerativeModel(
"gemini-2.0-flash",
tools=[{
"function_declarations": [
{
"name": "get_weather",
"description": "Get weather",
"parameters": {
"type": "object",
"properties": {
"location": {"type": "string"}
},
"required": ["location"]
}
}
]
}]
)
response = model.generate_content("What's weather in London?")
# Check for function calls
for part in response.candidates[0].content.parts:
if part.function_call:
name = part.function_call.name
args = dict(part.function_call.args)
result = execute_function(name, args)
# Send result back
response2 = model.generate_content(
genai.protos.Content(
parts=[genai.protos.Part(
function_response=genai.protos.FunctionResponse(
name=name,
response={"result": result}
)
)],
role="function"
)
)
\`\`\`
## Built-in Tools
- googleSearch: Ground responses in Google Search
- codeExecution: Execute Python code
- retriever: Search your Vertex AI Search data`
},
{
id: "gemini-multimodal",
title: "Gemini Multimodal (Images, Audio, Video)",
category: "multimodal",
provider: "gemini",
keywords: ["gemini", "image", "audio", "video", "multimodal", "vision"],
content: `# Gemini Multimodal Input
## Supported Input Types
- Text
- Images: JPEG, PNG, WEBP, HEIC, HEIF
- Audio: WAV, MP3, AIFF, AAC, OGG, FLAC
- Video: MP4, MPEG, MOV, AVI, FLV, MPG, WEBM, WMV, 3GPP
- PDF documents
## Image Input (Inline)
\`\`\`json
{
"contents": [{
"parts": [
{"text": "Describe this image"},
{
"inline_data": {
"mime_type": "image/jpeg",
"data": "base64encoded..."
}
}
]
}]
}
\`\`\`
## Image Input (URL via File Data)
\`\`\`json
{
"contents": [{
"parts": [
{"text": "Describe this image"},
{
"file_data": {
"mime_type": "image/jpeg",
"file_uri": "https://example.com/image.jpg"
}
}
]
}]
}
\`\`\`
## Audio Input
\`\`\`json
{
"contents": [{
"parts": [
{"text": "Transcribe this audio"},
{
"inline_data": {
"mime_type": "audio/wav",
"data": "base64encoded..."
}
}
]
}]
}
\`\`\`
## Video Input
\`\`\`json
{
"contents": [{
"parts": [
{"text": "Summarize this video"},
{
"inline_data": {
"mime_type": "video/mp4",
"data": "base64encoded..."
}
}
]
}]
}
\`\`\`
## Python Example with Image
\`\`\`python
import google.generativeai as genai
import base64
model = genai.GenerativeModel("gemini-2.0-flash")
with open("image.jpg", "rb") as f:
image_data = base64.b64encode(f.read()).decode()
response = model.generate_content([
"Describe this image in detail",
{
"inline_data": {
"mime_type": "image/jpeg",
"data": image_data
}
}
])
print(response.text)
\`\`\``
},
{
id: "gemini-rate-limits",
title: "Gemini Rate Limits & Pricing",
category: "limits",
provider: "gemini",
keywords: ["gemini", "rate limit", "pricing", "cost", "free"],
content: `# Gemini Rate Limits & Pricing
## Free Tier Limits
- gemini-2.0-flash: 15 RPM, 1M TPM, 1500 RPD
- gemini-2.0-flash-lite: 15 RPM, 1M TPM, 1500 RPD
- gemini-1.5-flash: 15 RPM, 1M TPM, 1500 RPD
- gemini-1.5-pro: 2 RPM, 32K TPM, 50 RPD
## Paid Tier (Pay-as-you-go)
- gemini-2.0-flash: 1000 RPM, 4M TPM
- gemini-2.0-flash-lite: Higher limits
- gemini-1.5-pro: 1000 RPM, 4M TPM
## Pricing (per 1M tokens)
### gemini-2.0-flash
- Input (<=128K): $0.10
- Output (<=128K): $0.40
- Input (>128K): $0.20
- Output (>128K): $0.80
### gemini-2.0-flash-lite
- Input: $0.075
- Output: $0.30
### gemini-1.5-pro
- Input (<=128K): $1.25
- Output (<=128K): $5.00
- Input (>128K): $2.50
- Output (>128K): $10.00
## Context Windows
- gemini-2.0-flash: 1M tokens
- gemini-2.0-flash-lite: 1M tokens
- gemini-1.5-pro: 2M tokens
- gemini-1.5-flash: 1M tokens
## Safety Categories
- HARM_CATEGORY_HARASSMENT
- HARM_CATEGORY_HATE_SPEECH
- HARM_CATEGORY_SEXUALLY_EXPLICIT
- HARM_CATEGORY_DANGEROUS_CONTENT
Thresholds: BLOCK_NONE, BLOCK_LOW_AND_ABOVE, BLOCK_MEDIUM_AND_ABOVE, BLOCK_ONLY_HIGH`
},
{
id: "gemini-structured-output",
title: "Gemini Structured Output (JSON)",
category: "output",
provider: "gemini",
keywords: ["gemini", "json", "structured", "schema", "format"],
content: `# Gemini Structured Output
## JSON Mode (Simple)
\`\`\`json
{
"generationConfig": {
"responseMimeType": "application/json"
}
}
\`\`\`
## JSON Schema Mode (Strict)
\`\`\`json
{
"generationConfig": {
"responseMimeType": "application/json",
"responseSchema": {
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"},
"skills": {
"type": "array",
"items": {"type": "string"}
}
},
"required": ["name", "age"]
}
}
}
\`\`\`
## Python SDK with Pydantic
\`\`\`python
import google.generativeai as genai
from pydantic import BaseModel
class Person(BaseModel):
name: str
age: int
skills: list[str]
model = genai.GenerativeModel("gemini-2.0-flash")
response = model.generate_content(
"Create a profile for a fictional developer",
generation_config={
"response_mime_type": "application/json",
"response_schema": Person
}
)
import json
person = json.loads(response.text)
\`\`\`
## Schema Types Supported
- string, number, integer, boolean
- array (with items)
- object (with properties)
- enum
- nullable (nullable: true)`
}
];
//# sourceMappingURL=gemini.js.map