mirror of
https://github.com/ksyasuda/dotfiles.git
synced 2026-03-22 06:11:27 -07:00
update skills
This commit is contained in:
133
.agents/skills/cloudflare-deploy/references/d1/README.md
Normal file
133
.agents/skills/cloudflare-deploy/references/d1/README.md
Normal file
@@ -0,0 +1,133 @@
|
||||
# Cloudflare D1 Database
|
||||
|
||||
Expert guidance for Cloudflare D1, a serverless SQLite database designed for horizontal scale-out across multiple databases.
|
||||
|
||||
## Overview
|
||||
|
||||
D1 is Cloudflare's managed, serverless database with:
|
||||
- SQLite SQL semantics and compatibility
|
||||
- Built-in disaster recovery via Time Travel (30-day point-in-time recovery)
|
||||
- Horizontal scale-out architecture (10 GB per database)
|
||||
- Worker and HTTP API access
|
||||
- Pricing based on query and storage costs only
|
||||
|
||||
**Architecture Philosophy**: D1 is optimized for per-user, per-tenant, or per-entity database patterns rather than single large databases.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Create database
|
||||
wrangler d1 create <database-name>
|
||||
|
||||
# Execute migration
|
||||
wrangler d1 migrations apply <db-name> --remote
|
||||
|
||||
# Local development
|
||||
wrangler dev
|
||||
```
|
||||
|
||||
## Core Query Methods
|
||||
|
||||
```typescript
|
||||
// .all() - Returns all rows; .first() - First row or null; .first(col) - Single column value
|
||||
// .run() - INSERT/UPDATE/DELETE; .raw() - Array of arrays (efficient)
|
||||
const { results, success, meta } = await env.DB.prepare('SELECT * FROM users WHERE active = ?').bind(true).all();
|
||||
const user = await env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(userId).first();
|
||||
```
|
||||
|
||||
## Batch Operations
|
||||
|
||||
```typescript
|
||||
// Multiple queries in single round trip (atomic transaction)
|
||||
const results = await env.DB.batch([
|
||||
env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(1),
|
||||
env.DB.prepare('SELECT * FROM posts WHERE author_id = ?').bind(1),
|
||||
env.DB.prepare('UPDATE users SET last_access = ? WHERE id = ?').bind(Date.now(), 1)
|
||||
]);
|
||||
```
|
||||
|
||||
## Sessions API (Paid Plans)
|
||||
|
||||
```typescript
|
||||
// Create long-running session for analytics/migrations (up to 15 minutes)
|
||||
const session = env.DB.withSession();
|
||||
try {
|
||||
await session.prepare('CREATE INDEX idx_heavy ON large_table(column)').run();
|
||||
await session.prepare('ANALYZE').run();
|
||||
} finally {
|
||||
session.close(); // Always close to release resources
|
||||
}
|
||||
```
|
||||
|
||||
## Read Replication (Paid Plans)
|
||||
|
||||
```typescript
|
||||
// Read from nearest replica for lower latency (automatic failover)
|
||||
const user = await env.DB_REPLICA.prepare('SELECT * FROM users WHERE id = ?').bind(userId).first();
|
||||
|
||||
// Writes always go to primary
|
||||
await env.DB.prepare('UPDATE users SET last_login = ? WHERE id = ?').bind(Date.now(), userId).run();
|
||||
```
|
||||
|
||||
## Platform Limits
|
||||
|
||||
| Limit | Free Tier | Paid Plans |
|
||||
|-------|-----------|------------|
|
||||
| Database size | 500 MB | 10 GB per database |
|
||||
| Row size | 1 MB max | 1 MB max |
|
||||
| Query timeout | 30 seconds | 30 seconds |
|
||||
| Batch size | 1,000 statements | 10,000 statements |
|
||||
| Time Travel retention | 7 days | 30 days |
|
||||
| Read replicas | Not available | Yes (paid add-on) |
|
||||
|
||||
**Pricing**: $5/month per database beyond free tier + $0.001 per 1K reads + $1 per 1M writes + $0.75/GB storage/month
|
||||
|
||||
## CLI Commands
|
||||
|
||||
```bash
|
||||
# Database management
|
||||
wrangler d1 create <db-name>
|
||||
wrangler d1 list
|
||||
wrangler d1 delete <db-name>
|
||||
|
||||
# Migrations
|
||||
wrangler d1 migrations create <db-name> <migration-name> # Create new migration file
|
||||
wrangler d1 migrations apply <db-name> --remote # Apply pending migrations
|
||||
wrangler d1 migrations apply <db-name> --local # Apply locally
|
||||
wrangler d1 migrations list <db-name> --remote # Show applied migrations
|
||||
|
||||
# Direct SQL execution
|
||||
wrangler d1 execute <db-name> --remote --command="SELECT * FROM users"
|
||||
wrangler d1 execute <db-name> --local --file=./schema.sql
|
||||
|
||||
# Backups & Import/Export
|
||||
wrangler d1 export <db-name> --remote --output=./backup.sql # Full export with schema
|
||||
wrangler d1 export <db-name> --remote --no-schema --output=./data.sql # Data only
|
||||
wrangler d1 time-travel restore <db-name> --timestamp="2024-01-15T14:30:00Z" # Point-in-time recovery
|
||||
|
||||
# Development
|
||||
wrangler dev --persist-to=./.wrangler/state
|
||||
```
|
||||
|
||||
## Reading Order
|
||||
|
||||
**Start here**: Quick Start above → configuration.md (setup) → api.md (queries)
|
||||
|
||||
**Common tasks**:
|
||||
- First time setup: configuration.md → Run migrations
|
||||
- Adding queries: api.md → Prepared statements
|
||||
- Pagination/caching: patterns.md
|
||||
- Production optimization: Read Replication + Sessions API (this file)
|
||||
- Debugging: gotchas.md
|
||||
|
||||
## In This Reference
|
||||
|
||||
- [configuration.md](./configuration.md) - wrangler.jsonc setup, migrations, TypeScript types, ORMs, local dev
|
||||
- [api.md](./api.md) - Query methods (.all/.first/.run/.raw), batch, sessions, read replicas, error handling
|
||||
- [patterns.md](./patterns.md) - Pagination, bulk operations, caching, multi-tenant, sessions, analytics
|
||||
- [gotchas.md](./gotchas.md) - SQL injection, limits by plan tier, performance, common errors
|
||||
|
||||
## See Also
|
||||
|
||||
- [workers](../workers/) - Worker runtime and fetch handler patterns
|
||||
- [hyperdrive](../hyperdrive/) - Connection pooling for external databases
|
||||
196
.agents/skills/cloudflare-deploy/references/d1/api.md
Normal file
196
.agents/skills/cloudflare-deploy/references/d1/api.md
Normal file
@@ -0,0 +1,196 @@
|
||||
# D1 API Reference
|
||||
|
||||
## Prepared Statements (Required for Security)
|
||||
|
||||
```typescript
|
||||
// ❌ NEVER: Direct string interpolation (SQL injection risk)
|
||||
const result = await env.DB.prepare(`SELECT * FROM users WHERE id = ${userId}`).all();
|
||||
|
||||
// ✅ CORRECT: Prepared statements with bind()
|
||||
const result = await env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(userId).all();
|
||||
|
||||
// Multiple parameters
|
||||
const result = await env.DB.prepare('SELECT * FROM users WHERE email = ? AND active = ?').bind(email, true).all();
|
||||
```
|
||||
|
||||
## Query Execution Methods
|
||||
|
||||
```typescript
|
||||
// .all() - Returns all rows
|
||||
const { results, success, meta } = await env.DB.prepare('SELECT * FROM users WHERE active = ?').bind(true).all();
|
||||
// results: Array of row objects; success: boolean
|
||||
// meta: { duration: number, rows_read: number, rows_written: number }
|
||||
|
||||
// .first() - Returns first row or null
|
||||
const user = await env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(userId).first();
|
||||
|
||||
// .first(columnName) - Returns single column value
|
||||
const email = await env.DB.prepare('SELECT email FROM users WHERE id = ?').bind(userId).first('email');
|
||||
// Returns string | number | null
|
||||
|
||||
// .run() - For INSERT/UPDATE/DELETE (no row data returned)
|
||||
const result = await env.DB.prepare('UPDATE users SET last_login = ? WHERE id = ?').bind(Date.now(), userId).run();
|
||||
// result.meta: { duration, rows_read, rows_written, last_row_id, changes }
|
||||
|
||||
// .raw() - Returns array of arrays (efficient for large datasets)
|
||||
const rawResults = await env.DB.prepare('SELECT id, name FROM users').raw();
|
||||
// [[1, 'Alice'], [2, 'Bob']]
|
||||
```
|
||||
|
||||
## Batch Operations
|
||||
|
||||
```typescript
|
||||
// Execute multiple queries in single round trip (atomic transaction)
|
||||
const results = await env.DB.batch([
|
||||
env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(1),
|
||||
env.DB.prepare('SELECT * FROM posts WHERE author_id = ?').bind(1),
|
||||
env.DB.prepare('UPDATE users SET last_access = ? WHERE id = ?').bind(Date.now(), 1)
|
||||
]);
|
||||
// results is array: [result1, result2, result3]
|
||||
|
||||
// Batch with same prepared statement, different params
|
||||
const userIds = [1, 2, 3];
|
||||
const stmt = env.DB.prepare('SELECT * FROM users WHERE id = ?');
|
||||
const results = await env.DB.batch(userIds.map(id => stmt.bind(id)));
|
||||
```
|
||||
|
||||
## Transactions (via batch)
|
||||
|
||||
```typescript
|
||||
// D1 executes batch() as atomic transaction - all succeed or all fail
|
||||
const results = await env.DB.batch([
|
||||
env.DB.prepare('INSERT INTO accounts (id, balance) VALUES (?, ?)').bind(1, 100),
|
||||
env.DB.prepare('INSERT INTO accounts (id, balance) VALUES (?, ?)').bind(2, 200),
|
||||
env.DB.prepare('UPDATE accounts SET balance = balance - ? WHERE id = ?').bind(50, 1),
|
||||
env.DB.prepare('UPDATE accounts SET balance = balance + ? WHERE id = ?').bind(50, 2)
|
||||
]);
|
||||
```
|
||||
|
||||
## Sessions API (Paid Plans)
|
||||
|
||||
Long-running sessions for operations exceeding 30s timeout (up to 15 min).
|
||||
|
||||
```typescript
|
||||
const session = env.DB.withSession({ timeout: 600 }); // 10 min (1-900s)
|
||||
try {
|
||||
await session.prepare('CREATE INDEX idx_large ON big_table(column)').run();
|
||||
await session.prepare('ANALYZE').run();
|
||||
} finally {
|
||||
session.close(); // CRITICAL: always close to prevent leaks
|
||||
}
|
||||
```
|
||||
|
||||
**Use cases**: Migrations, ANALYZE, large index creation, bulk transformations
|
||||
|
||||
## Read Replication (Paid Plans)
|
||||
|
||||
Routes queries to nearest replica for lower latency. Writes always go to primary.
|
||||
|
||||
```typescript
|
||||
interface Env {
|
||||
DB: D1Database; // Primary (writes)
|
||||
DB_REPLICA: D1Database; // Replica (reads)
|
||||
}
|
||||
|
||||
// Reads: use replica
|
||||
const user = await env.DB_REPLICA.prepare('SELECT * FROM users WHERE id = ?').bind(userId).first();
|
||||
|
||||
// Writes: use primary
|
||||
await env.DB.prepare('UPDATE users SET last_login = ? WHERE id = ?').bind(Date.now(), userId).run();
|
||||
|
||||
// Read-after-write: use primary for consistency (replication lag <100ms-2s)
|
||||
await env.DB.prepare('INSERT INTO posts (title) VALUES (?)').bind(title).run();
|
||||
const post = await env.DB.prepare('SELECT * FROM posts WHERE title = ?').bind(title).first(); // Primary
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
```typescript
|
||||
async function getUser(userId: number, env: Env): Promise<Response> {
|
||||
try {
|
||||
const result = await env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(userId).all();
|
||||
if (!result.success) return new Response('Database error', { status: 500 });
|
||||
if (result.results.length === 0) return new Response('User not found', { status: 404 });
|
||||
return Response.json(result.results[0]);
|
||||
} catch (error) {
|
||||
return new Response('Internal error', { status: 500 });
|
||||
}
|
||||
}
|
||||
|
||||
// Constraint violations
|
||||
try {
|
||||
await env.DB.prepare('INSERT INTO users (email, name) VALUES (?, ?)').bind(email, name).run();
|
||||
} catch (error) {
|
||||
if (error.message?.includes('UNIQUE constraint failed')) return new Response('Email exists', { status: 409 });
|
||||
throw error;
|
||||
}
|
||||
```
|
||||
|
||||
## REST API (HTTP) Access
|
||||
|
||||
Access D1 from external services (non-Worker contexts) using Cloudflare API.
|
||||
|
||||
```typescript
|
||||
// Single query
|
||||
const response = await fetch(
|
||||
`https://api.cloudflare.com/client/v4/accounts/${ACCOUNT_ID}/d1/database/${DATABASE_ID}/query`,
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${CLOUDFLARE_API_TOKEN}`,
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
sql: 'SELECT * FROM users WHERE id = ?',
|
||||
params: [userId]
|
||||
})
|
||||
}
|
||||
);
|
||||
|
||||
const { result, success, errors } = await response.json();
|
||||
// result: [{ results: [...], success: true, meta: {...} }]
|
||||
|
||||
// Batch queries via HTTP
|
||||
const response = await fetch(
|
||||
`https://api.cloudflare.com/client/v4/accounts/${ACCOUNT_ID}/d1/database/${DATABASE_ID}/query`,
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${CLOUDFLARE_API_TOKEN}`,
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify([
|
||||
{ sql: 'SELECT * FROM users WHERE id = ?', params: [1] },
|
||||
{ sql: 'SELECT * FROM posts WHERE author_id = ?', params: [1] }
|
||||
])
|
||||
}
|
||||
);
|
||||
```
|
||||
|
||||
**Use cases**: Server-side scripts, CI/CD migrations, administrative tools, non-Worker integrations
|
||||
|
||||
## Testing & Debugging
|
||||
|
||||
```typescript
|
||||
// Vitest with unstable_dev
|
||||
import { unstable_dev } from 'wrangler';
|
||||
describe('D1', () => {
|
||||
let worker: Awaited<ReturnType<typeof unstable_dev>>;
|
||||
beforeAll(async () => { worker = await unstable_dev('src/index.ts'); });
|
||||
afterAll(async () => { await worker.stop(); });
|
||||
it('queries users', async () => { expect((await worker.fetch('/users')).status).toBe(200); });
|
||||
});
|
||||
|
||||
// Debug query performance
|
||||
const result = await env.DB.prepare('SELECT * FROM users').all();
|
||||
console.log('Duration:', result.meta.duration, 'ms');
|
||||
|
||||
// Query plan analysis
|
||||
const plan = await env.DB.prepare('EXPLAIN QUERY PLAN SELECT * FROM users WHERE email = ?').bind(email).all();
|
||||
```
|
||||
|
||||
```bash
|
||||
# Inspect local database
|
||||
sqlite3 .wrangler/state/v3/d1/<database-id>.sqlite
|
||||
.tables; .schema users; PRAGMA table_info(users);
|
||||
```
|
||||
188
.agents/skills/cloudflare-deploy/references/d1/configuration.md
Normal file
188
.agents/skills/cloudflare-deploy/references/d1/configuration.md
Normal file
@@ -0,0 +1,188 @@
|
||||
# D1 Configuration
|
||||
|
||||
## wrangler.jsonc Setup
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"name": "your-worker-name",
|
||||
"main": "src/index.ts",
|
||||
"compatibility_date": "2025-01-01", // Use current date for new projects
|
||||
"d1_databases": [
|
||||
{
|
||||
"binding": "DB", // Env variable name
|
||||
"database_name": "your-db-name", // Human-readable name
|
||||
"database_id": "your-database-id", // UUID from dashboard/CLI
|
||||
"migrations_dir": "migrations" // Optional: default is "migrations"
|
||||
},
|
||||
// Read replica (paid plans only)
|
||||
{
|
||||
"binding": "DB_REPLICA",
|
||||
"database_name": "your-db-name",
|
||||
"database_id": "your-database-id" // Same ID, different binding
|
||||
},
|
||||
// Multiple databases
|
||||
{
|
||||
"binding": "ANALYTICS_DB",
|
||||
"database_name": "analytics-db",
|
||||
"database_id": "yyy-yyy-yyy"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## TypeScript Types
|
||||
|
||||
```typescript
|
||||
interface Env { DB: D1Database; ANALYTICS_DB?: D1Database; }
|
||||
|
||||
export default {
|
||||
async fetch(request: Request, env: Env, ctx: ExecutionContext): Promise<Response> {
|
||||
const result = await env.DB.prepare('SELECT * FROM users').all();
|
||||
return Response.json(result.results);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Migrations
|
||||
|
||||
File structure: `migrations/0001_initial_schema.sql`, `0002_add_posts.sql`, etc.
|
||||
|
||||
### Example Migration
|
||||
|
||||
```sql
|
||||
-- migrations/0001_initial_schema.sql
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
email TEXT UNIQUE NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
created_at TEXT DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TEXT DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE INDEX idx_users_email ON users(email);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS posts (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
user_id INTEGER NOT NULL,
|
||||
title TEXT NOT NULL,
|
||||
content TEXT,
|
||||
published BOOLEAN DEFAULT 0,
|
||||
created_at TEXT DEFAULT CURRENT_TIMESTAMP,
|
||||
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE INDEX idx_posts_user_id ON posts(user_id);
|
||||
CREATE INDEX idx_posts_published ON posts(published);
|
||||
```
|
||||
|
||||
### Running Migrations
|
||||
|
||||
```bash
|
||||
# Create new migration file
|
||||
wrangler d1 migrations create <db-name> add_users_table
|
||||
# Creates: migrations/0001_add_users_table.sql
|
||||
|
||||
# Apply migrations
|
||||
wrangler d1 migrations apply <db-name> --local # Apply to local DB
|
||||
wrangler d1 migrations apply <db-name> --remote # Apply to production DB
|
||||
|
||||
# List applied migrations
|
||||
wrangler d1 migrations list <db-name> --remote
|
||||
|
||||
# Direct SQL execution (bypasses migration tracking)
|
||||
wrangler d1 execute <db-name> --remote --command="SELECT * FROM users"
|
||||
wrangler d1 execute <db-name> --local --file=./schema.sql
|
||||
```
|
||||
|
||||
**Migration tracking**: Wrangler creates `d1_migrations` table automatically to track applied migrations
|
||||
|
||||
## Indexing Strategy
|
||||
|
||||
```sql
|
||||
-- Index frequently queried columns
|
||||
CREATE INDEX idx_users_email ON users(email);
|
||||
|
||||
-- Composite indexes for multi-column queries
|
||||
CREATE INDEX idx_posts_user_published ON posts(user_id, published);
|
||||
|
||||
-- Covering indexes (include queried columns)
|
||||
CREATE INDEX idx_users_email_name ON users(email, name);
|
||||
|
||||
-- Partial indexes for filtered queries
|
||||
CREATE INDEX idx_active_users ON users(email) WHERE active = 1;
|
||||
|
||||
-- Check if query uses index
|
||||
EXPLAIN QUERY PLAN SELECT * FROM users WHERE email = ?;
|
||||
```
|
||||
|
||||
## Drizzle ORM
|
||||
|
||||
```typescript
|
||||
// drizzle.config.ts
|
||||
export default {
|
||||
schema: './src/schema.ts', out: './migrations', dialect: 'sqlite', driver: 'd1-http',
|
||||
dbCredentials: { accountId: process.env.CLOUDFLARE_ACCOUNT_ID!, databaseId: process.env.D1_DATABASE_ID!, token: process.env.CLOUDFLARE_API_TOKEN! }
|
||||
} satisfies Config;
|
||||
|
||||
// schema.ts
|
||||
import { sqliteTable, text, integer } from 'drizzle-orm/sqlite-core';
|
||||
export const users = sqliteTable('users', {
|
||||
id: integer('id').primaryKey({ autoIncrement: true }),
|
||||
email: text('email').notNull().unique(),
|
||||
name: text('name').notNull()
|
||||
});
|
||||
|
||||
// worker.ts
|
||||
import { drizzle } from 'drizzle-orm/d1';
|
||||
import { users } from './schema';
|
||||
export default {
|
||||
async fetch(request: Request, env: Env) {
|
||||
const db = drizzle(env.DB);
|
||||
return Response.json(await db.select().from(users));
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Import & Export
|
||||
|
||||
```bash
|
||||
# Export full database (schema + data)
|
||||
wrangler d1 export <db-name> --remote --output=./backup.sql
|
||||
|
||||
# Export data only (no schema)
|
||||
wrangler d1 export <db-name> --remote --no-schema --output=./data-only.sql
|
||||
|
||||
# Export with foreign key constraints preserved
|
||||
# (Default: foreign keys are disabled during export for import compatibility)
|
||||
|
||||
# Import SQL file
|
||||
wrangler d1 execute <db-name> --remote --file=./backup.sql
|
||||
|
||||
# Limitations
|
||||
# - BLOB data may not export correctly (use R2 for binary files)
|
||||
# - Very large exports (>1GB) may timeout (split into chunks)
|
||||
# - Import is NOT atomic (use batch() for transactional imports in Workers)
|
||||
```
|
||||
|
||||
## Plan Tiers
|
||||
|
||||
| Feature | Free | Paid |
|
||||
|---------|------|------|
|
||||
| Database size | 500 MB | 10 GB |
|
||||
| Batch size | 1,000 statements | 10,000 statements |
|
||||
| Time Travel | 7 days | 30 days |
|
||||
| Read replicas | ❌ | ✅ |
|
||||
| Sessions API | ❌ | ✅ (up to 15 min) |
|
||||
| Pricing | Free | $5/mo + usage |
|
||||
|
||||
**Usage pricing** (paid plans): $0.001 per 1K reads + $1 per 1M writes + $0.75/GB storage/month
|
||||
|
||||
## Local Development
|
||||
|
||||
```bash
|
||||
wrangler dev --persist-to=./.wrangler/state # Persist across restarts
|
||||
# Local DB: .wrangler/state/v3/d1/<database-id>.sqlite
|
||||
sqlite3 .wrangler/state/v3/d1/<database-id>.sqlite # Inspect
|
||||
|
||||
# Local dev uses free tier limits by default
|
||||
```
|
||||
98
.agents/skills/cloudflare-deploy/references/d1/gotchas.md
Normal file
98
.agents/skills/cloudflare-deploy/references/d1/gotchas.md
Normal file
@@ -0,0 +1,98 @@
|
||||
# D1 Gotchas & Troubleshooting
|
||||
|
||||
## Common Errors
|
||||
|
||||
### "SQL Injection Vulnerability"
|
||||
|
||||
**Cause:** Using string interpolation instead of prepared statements with bind()
|
||||
**Solution:** ALWAYS use prepared statements: `env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(userId).all()` instead of string interpolation which allows attackers to inject malicious SQL
|
||||
|
||||
### "no such table"
|
||||
|
||||
**Cause:** Table doesn't exist because migrations haven't been run, or using wrong database binding
|
||||
**Solution:** Run migrations using `wrangler d1 migrations apply <db-name> --remote` and verify binding name in wrangler.jsonc matches code
|
||||
|
||||
### "UNIQUE constraint failed"
|
||||
|
||||
**Cause:** Attempting to insert duplicate value in column with UNIQUE constraint
|
||||
**Solution:** Catch error and return 409 Conflict status code
|
||||
|
||||
### "Query Timeout (30s exceeded)"
|
||||
|
||||
**Cause:** Query execution exceeds 30 second timeout limit
|
||||
**Solution:** Break into smaller queries, add indexes to speed up queries, or reduce dataset size
|
||||
|
||||
### "N+1 Query Problem"
|
||||
|
||||
**Cause:** Making multiple individual queries in a loop instead of single optimized query
|
||||
**Solution:** Use JOIN to fetch related data in single query or use `batch()` method for multiple queries
|
||||
|
||||
### "Missing Indexes"
|
||||
|
||||
**Cause:** Queries performing full table scans without indexes
|
||||
**Solution:** Use `EXPLAIN QUERY PLAN` to check if index is used, then create index with `CREATE INDEX idx_users_email ON users(email)`
|
||||
|
||||
### "Boolean Type Issues"
|
||||
|
||||
**Cause:** SQLite uses INTEGER (0/1) not native boolean type
|
||||
**Solution:** Bind 1 or 0 instead of true/false when working with boolean values
|
||||
|
||||
### "Date/Time Type Issues"
|
||||
|
||||
**Cause:** SQLite doesn't have native DATE/TIME types
|
||||
**Solution:** Use TEXT (ISO 8601 format) or INTEGER (unix timestamp) for date/time values
|
||||
|
||||
## Plan Tier Limits
|
||||
|
||||
| Limit | Free Tier | Paid Plans | Notes |
|
||||
|-------|-----------|------------|-------|
|
||||
| Database size | 500 MB | 10 GB | Design for multiple DBs per tenant on paid |
|
||||
| Row size | 1 MB | 1 MB | Store large files in R2, not D1 |
|
||||
| Query timeout | 30s | 30s (900s with sessions) | Use sessions API for migrations |
|
||||
| Batch size | 1,000 statements | 10,000 statements | Split large batches accordingly |
|
||||
| Time Travel | 7 days | 30 days | Point-in-time recovery window |
|
||||
| Read replicas | ❌ Not available | ✅ Available | Paid add-on for lower latency |
|
||||
| Sessions API | ❌ Not available | ✅ Up to 15 min | For migrations and heavy operations |
|
||||
| Concurrent requests | 10,000/min | Higher | Contact support for custom limits |
|
||||
|
||||
## Production Gotchas
|
||||
|
||||
### "Batch size exceeded"
|
||||
|
||||
**Cause:** Attempting to send >1,000 statements on free tier or >10,000 on paid
|
||||
**Solution:** Chunk batches: `for (let i = 0; i < stmts.length; i += MAX_BATCH) await env.DB.batch(stmts.slice(i, i + MAX_BATCH))`
|
||||
|
||||
### "Session not closed / resource leak"
|
||||
|
||||
**Cause:** Forgot to call `session.close()` after using sessions API
|
||||
**Solution:** Always use try/finally block: `try { await session.prepare(...) } finally { session.close() }`
|
||||
|
||||
### "Replication lag causing stale reads"
|
||||
|
||||
**Cause:** Reading from replica immediately after write - replication lag can be 100ms-2s
|
||||
**Solution:** Use primary for read-after-write: `await env.DB.prepare(...)` not `env.DB_REPLICA`
|
||||
|
||||
### "Migration applied to local but not remote"
|
||||
|
||||
**Cause:** Forgot `--remote` flag when applying migrations
|
||||
**Solution:** Always run `wrangler d1 migrations apply <db-name> --remote` for production
|
||||
|
||||
### "Foreign key constraint failed"
|
||||
|
||||
**Cause:** Inserting row with FK to non-existent parent, or deleting parent before children
|
||||
**Solution:** Enable FK enforcement: `PRAGMA foreign_keys = ON;` and use ON DELETE CASCADE in schema
|
||||
|
||||
### "BLOB data corrupted on export"
|
||||
|
||||
**Cause:** D1 export may not handle BLOB correctly
|
||||
**Solution:** Store binary files in R2, only store R2 URLs/keys in D1
|
||||
|
||||
### "Database size approaching limit"
|
||||
|
||||
**Cause:** Storing too much data in single database
|
||||
**Solution:** Horizontal scale-out: create per-tenant/per-user databases, archive old data, or upgrade to paid plan
|
||||
|
||||
### "Local dev vs production behavior differs"
|
||||
|
||||
**Cause:** Local uses SQLite file, production uses distributed D1 - different performance/limits
|
||||
**Solution:** Always test migrations on remote with `--remote` flag before production rollout
|
||||
189
.agents/skills/cloudflare-deploy/references/d1/patterns.md
Normal file
189
.agents/skills/cloudflare-deploy/references/d1/patterns.md
Normal file
@@ -0,0 +1,189 @@
|
||||
# D1 Patterns & Best Practices
|
||||
|
||||
## Pagination
|
||||
|
||||
```typescript
|
||||
async function getUsers({ page, pageSize }: { page: number; pageSize: number }, env: Env) {
|
||||
const offset = (page - 1) * pageSize;
|
||||
const [countResult, dataResult] = await env.DB.batch([
|
||||
env.DB.prepare('SELECT COUNT(*) as total FROM users'),
|
||||
env.DB.prepare('SELECT * FROM users ORDER BY created_at DESC LIMIT ? OFFSET ?').bind(pageSize, offset)
|
||||
]);
|
||||
return { data: dataResult.results, total: countResult.results[0].total, page, pageSize, totalPages: Math.ceil(countResult.results[0].total / pageSize) };
|
||||
}
|
||||
```
|
||||
|
||||
## Conditional Queries
|
||||
|
||||
```typescript
|
||||
async function searchUsers(filters: { name?: string; email?: string; active?: boolean }, env: Env) {
|
||||
const conditions: string[] = [], params: (string | number | boolean | null)[] = [];
|
||||
if (filters.name) { conditions.push('name LIKE ?'); params.push(`%${filters.name}%`); }
|
||||
if (filters.email) { conditions.push('email = ?'); params.push(filters.email); }
|
||||
if (filters.active !== undefined) { conditions.push('active = ?'); params.push(filters.active ? 1 : 0); }
|
||||
const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(' AND ')}` : '';
|
||||
return await env.DB.prepare(`SELECT * FROM users ${whereClause}`).bind(...params).all();
|
||||
}
|
||||
```
|
||||
|
||||
## Bulk Insert
|
||||
|
||||
```typescript
|
||||
async function bulkInsertUsers(users: Array<{ name: string; email: string }>, env: Env) {
|
||||
const stmt = env.DB.prepare('INSERT INTO users (name, email) VALUES (?, ?)');
|
||||
const batch = users.map(user => stmt.bind(user.name, user.email));
|
||||
return await env.DB.batch(batch);
|
||||
}
|
||||
```
|
||||
|
||||
## Caching with KV
|
||||
|
||||
```typescript
|
||||
async function getCachedUser(userId: number, env: { DB: D1Database; CACHE: KVNamespace }) {
|
||||
const cacheKey = `user:${userId}`;
|
||||
const cached = await env.CACHE?.get(cacheKey, 'json');
|
||||
if (cached) return cached;
|
||||
const user = await env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(userId).first();
|
||||
if (user) await env.CACHE?.put(cacheKey, JSON.stringify(user), { expirationTtl: 300 });
|
||||
return user;
|
||||
}
|
||||
```
|
||||
|
||||
## Query Optimization
|
||||
|
||||
```typescript
|
||||
// ✅ Use indexes in WHERE clauses
|
||||
const users = await env.DB.prepare('SELECT * FROM users WHERE email = ?').bind(email).all();
|
||||
|
||||
// ✅ Limit result sets
|
||||
const recentPosts = await env.DB.prepare('SELECT * FROM posts ORDER BY created_at DESC LIMIT 100').all();
|
||||
|
||||
// ✅ Use batch() for multiple independent queries
|
||||
const [user, posts, comments] = await env.DB.batch([
|
||||
env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(userId),
|
||||
env.DB.prepare('SELECT * FROM posts WHERE user_id = ?').bind(userId),
|
||||
env.DB.prepare('SELECT * FROM comments WHERE user_id = ?').bind(userId)
|
||||
]);
|
||||
|
||||
// ❌ Avoid N+1 queries
|
||||
for (const post of posts) {
|
||||
const author = await env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(post.user_id).first(); // Bad: multiple round trips
|
||||
}
|
||||
|
||||
// ✅ Use JOINs instead
|
||||
const postsWithAuthors = await env.DB.prepare(`
|
||||
SELECT posts.*, users.name as author_name
|
||||
FROM posts
|
||||
JOIN users ON posts.user_id = users.id
|
||||
`).all();
|
||||
```
|
||||
|
||||
## Multi-Tenant SaaS
|
||||
|
||||
```typescript
|
||||
// Each tenant gets own database
|
||||
export default {
|
||||
async fetch(request: Request, env: { [key: `TENANT_${string}`]: D1Database }) {
|
||||
const tenantId = request.headers.get('X-Tenant-ID');
|
||||
const data = await env[`TENANT_${tenantId}`].prepare('SELECT * FROM records').all();
|
||||
return Response.json(data.results);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Session Storage
|
||||
|
||||
```typescript
|
||||
async function createSession(userId: number, token: string, env: Env) {
|
||||
const expiresAt = new Date(Date.now() + 7 * 24 * 60 * 60 * 1000).toISOString();
|
||||
return await env.DB.prepare('INSERT INTO sessions (user_id, token, expires_at) VALUES (?, ?, ?)').bind(userId, token, expiresAt).run();
|
||||
}
|
||||
|
||||
async function validateSession(token: string, env: Env) {
|
||||
return await env.DB.prepare('SELECT s.*, u.email FROM sessions s JOIN users u ON s.user_id = u.id WHERE s.token = ? AND s.expires_at > CURRENT_TIMESTAMP').bind(token).first();
|
||||
}
|
||||
```
|
||||
|
||||
## Analytics/Events
|
||||
|
||||
```typescript
|
||||
async function logEvent(event: { type: string; userId?: number; metadata: object }, env: Env) {
|
||||
return await env.DB.prepare('INSERT INTO events (type, user_id, metadata) VALUES (?, ?, ?)').bind(event.type, event.userId || null, JSON.stringify(event.metadata)).run();
|
||||
}
|
||||
|
||||
async function getEventStats(startDate: string, endDate: string, env: Env) {
|
||||
return await env.DB.prepare('SELECT type, COUNT(*) as count FROM events WHERE timestamp BETWEEN ? AND ? GROUP BY type ORDER BY count DESC').bind(startDate, endDate).all();
|
||||
}
|
||||
```
|
||||
|
||||
## Read Replication Pattern (Paid Plans)
|
||||
|
||||
```typescript
|
||||
interface Env { DB: D1Database; DB_REPLICA: D1Database; }
|
||||
|
||||
export default {
|
||||
async fetch(request: Request, env: Env) {
|
||||
if (request.method === 'GET') {
|
||||
// Reads: use replica for lower latency
|
||||
const users = await env.DB_REPLICA.prepare('SELECT * FROM users WHERE active = 1').all();
|
||||
return Response.json(users.results);
|
||||
}
|
||||
|
||||
if (request.method === 'POST') {
|
||||
const { name, email } = await request.json();
|
||||
const result = await env.DB.prepare('INSERT INTO users (name, email) VALUES (?, ?)').bind(name, email).run();
|
||||
|
||||
// Read-after-write: use primary for consistency (replication lag <100ms-2s)
|
||||
const user = await env.DB.prepare('SELECT * FROM users WHERE id = ?').bind(result.meta.last_row_id).first();
|
||||
return Response.json(user, { status: 201 });
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Use replicas for**: Analytics dashboards, search results, public queries (eventual consistency OK)
|
||||
**Use primary for**: Read-after-write, financial transactions, authentication (consistency required)
|
||||
|
||||
## Sessions API Pattern (Paid Plans)
|
||||
|
||||
```typescript
|
||||
// Migration with long-running session (up to 15 min)
|
||||
async function runMigration(env: Env) {
|
||||
const session = env.DB.withSession({ timeout: 600 }); // 10 min
|
||||
try {
|
||||
await session.prepare('CREATE INDEX idx_users_email ON users(email)').run();
|
||||
await session.prepare('CREATE INDEX idx_posts_user ON posts(user_id)').run();
|
||||
await session.prepare('ANALYZE').run();
|
||||
} finally {
|
||||
session.close(); // Always close to prevent leaks
|
||||
}
|
||||
}
|
||||
|
||||
// Bulk transformation with batching
|
||||
async function transformLargeDataset(env: Env) {
|
||||
const session = env.DB.withSession({ timeout: 900 }); // 15 min max
|
||||
try {
|
||||
const BATCH_SIZE = 1000;
|
||||
let offset = 0;
|
||||
while (true) {
|
||||
const rows = await session.prepare('SELECT id, data FROM legacy LIMIT ? OFFSET ?').bind(BATCH_SIZE, offset).all();
|
||||
if (rows.results.length === 0) break;
|
||||
const updates = rows.results.map(row =>
|
||||
session.prepare('UPDATE legacy SET new_data = ? WHERE id = ?').bind(transform(row.data), row.id)
|
||||
);
|
||||
await session.batch(updates);
|
||||
offset += BATCH_SIZE;
|
||||
}
|
||||
} finally { session.close(); }
|
||||
}
|
||||
```
|
||||
|
||||
## Time Travel & Backups
|
||||
|
||||
```bash
|
||||
wrangler d1 time-travel restore <db-name> --timestamp="2024-01-15T14:30:00Z" # Point-in-time
|
||||
wrangler d1 time-travel info <db-name> # List restore points (7 days free, 30 days paid)
|
||||
wrangler d1 export <db-name> --remote --output=./backup.sql # Full export
|
||||
wrangler d1 export <db-name> --remote --no-schema --output=./data.sql # Data only
|
||||
wrangler d1 execute <db-name> --remote --file=./backup.sql # Import
|
||||
```
|
||||
Reference in New Issue
Block a user