mirror of
https://github.com/ksyasuda/dotfiles.git
synced 2026-03-21 18:11:27 -07:00
update skills
This commit is contained in:
@@ -0,0 +1,75 @@
|
||||
# Cloudflare Durable Objects Storage
|
||||
|
||||
Persistent storage API for Durable Objects with SQLite and KV backends, PITR, and automatic concurrency control.
|
||||
|
||||
## Overview
|
||||
|
||||
DO Storage provides:
|
||||
- SQLite-backed (recommended) or KV-backed
|
||||
- SQL API + synchronous/async KV APIs
|
||||
- Automatic input/output gates (race-free)
|
||||
- 30-day point-in-time recovery (PITR)
|
||||
- Transactions and alarms
|
||||
|
||||
**Use cases:** Stateful coordination, real-time collaboration, counters, sessions, rate limiters
|
||||
|
||||
**Billing:** Charged by request, GB-month storage, and rowsRead/rowsWritten for SQL operations
|
||||
|
||||
## Quick Start
|
||||
|
||||
```typescript
|
||||
export class Counter extends DurableObject {
|
||||
sql: SqlStorage;
|
||||
|
||||
constructor(ctx: DurableObjectState, env: Env) {
|
||||
super(ctx, env);
|
||||
this.sql = ctx.storage.sql;
|
||||
this.sql.exec('CREATE TABLE IF NOT EXISTS data(key TEXT PRIMARY KEY, value INTEGER)');
|
||||
}
|
||||
|
||||
async increment(): Promise<number> {
|
||||
const result = this.sql.exec(
|
||||
'INSERT INTO data VALUES (?, ?) ON CONFLICT(key) DO UPDATE SET value = value + 1 RETURNING value',
|
||||
'counter', 1
|
||||
).one();
|
||||
return result?.value || 1;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Storage Backends
|
||||
|
||||
| Backend | Create Method | APIs | PITR |
|
||||
|---------|---------------|------|------|
|
||||
| SQLite (recommended) | `new_sqlite_classes` | SQL + sync KV + async KV | ✅ |
|
||||
| KV (legacy) | `new_classes` | async KV only | ❌ |
|
||||
|
||||
## Core APIs
|
||||
|
||||
- **SQL API** (`ctx.storage.sql`): Full SQLite with extensions (FTS5, JSON, math)
|
||||
- **Sync KV** (`ctx.storage.kv`): Synchronous key-value (SQLite only)
|
||||
- **Async KV** (`ctx.storage`): Asynchronous key-value (both backends)
|
||||
- **Transactions** (`transactionSync()`, `transaction()`)
|
||||
- **PITR** (`getBookmarkForTime()`, `onNextSessionRestoreBookmark()`)
|
||||
- **Alarms** (`setAlarm()`, `alarm()` handler)
|
||||
|
||||
## Reading Order
|
||||
|
||||
**New to DO storage:** configuration.md → api.md → patterns.md → gotchas.md
|
||||
**Building features:** patterns.md → api.md → gotchas.md
|
||||
**Debugging issues:** gotchas.md → api.md
|
||||
**Writing tests:** testing.md
|
||||
|
||||
## In This Reference
|
||||
|
||||
- [configuration.md](./configuration.md) - wrangler.jsonc migrations, SQLite vs KV setup, RPC binding
|
||||
- [api.md](./api.md) - SQL exec/cursors, KV methods, storage options, transactions, alarms, PITR
|
||||
- [patterns.md](./patterns.md) - Schema migrations, caching, rate limiting, batch processing, parent-child coordination
|
||||
- [gotchas.md](./gotchas.md) - Concurrency gates, INTEGER precision, transaction rules, SQL limits
|
||||
- [testing.md](./testing.md) - vitest-pool-workers setup, testing DOs with SQL/alarms/PITR
|
||||
|
||||
## See Also
|
||||
|
||||
- [durable-objects](../durable-objects/) - DO fundamentals and coordination patterns
|
||||
- [workers](../workers/) - Worker runtime for DO stubs
|
||||
- [d1](../d1/) - Shared database alternative to per-DO storage
|
||||
102
.agents/skills/cloudflare-deploy/references/do-storage/api.md
Normal file
102
.agents/skills/cloudflare-deploy/references/do-storage/api.md
Normal file
@@ -0,0 +1,102 @@
|
||||
# DO Storage API Reference
|
||||
|
||||
## SQL API
|
||||
|
||||
```typescript
|
||||
const cursor = this.sql.exec('SELECT * FROM users WHERE email = ?', email);
|
||||
for (let row of cursor) {} // Objects: { id, name, email }
|
||||
cursor.toArray(); cursor.one(); // Single row (throws if != 1)
|
||||
for (let row of cursor.raw()) {} // Arrays: [1, "Alice", "..."]
|
||||
|
||||
// Manual iteration
|
||||
const iter = cursor[Symbol.iterator]();
|
||||
const first = iter.next(); // { value: {...}, done: false }
|
||||
|
||||
cursor.columnNames; // ["id", "name", "email"]
|
||||
cursor.rowsRead; cursor.rowsWritten; // Billing
|
||||
|
||||
type User = { id: number; name: string; email: string };
|
||||
const user = this.sql.exec<User>('...', userId).one();
|
||||
```
|
||||
|
||||
## Sync KV API (SQLite only)
|
||||
|
||||
```typescript
|
||||
this.ctx.storage.kv.get("counter"); // undefined if missing
|
||||
this.ctx.storage.kv.put("counter", 42);
|
||||
this.ctx.storage.kv.put("user", { name: "Alice", age: 30 });
|
||||
this.ctx.storage.kv.delete("counter"); // true if existed
|
||||
|
||||
for (let [key, value] of this.ctx.storage.kv.list()) {}
|
||||
|
||||
// List options: start, prefix, reverse, limit
|
||||
this.ctx.storage.kv.list({ start: "user:", prefix: "user:", reverse: true, limit: 100 });
|
||||
```
|
||||
|
||||
## Async KV API (Both backends)
|
||||
|
||||
```typescript
|
||||
await this.ctx.storage.get("key"); // Single
|
||||
await this.ctx.storage.get(["key1", "key2"]); // Multiple (max 128)
|
||||
await this.ctx.storage.put("key", value); // Single
|
||||
await this.ctx.storage.put({ "key1": "v1", "key2": { nested: true } }); // Multiple (max 128)
|
||||
await this.ctx.storage.delete("key");
|
||||
await this.ctx.storage.delete(["key1", "key2"]);
|
||||
await this.ctx.storage.list({ prefix: "user:", limit: 100 });
|
||||
|
||||
// Options: allowConcurrency, noCache, allowUnconfirmed
|
||||
await this.ctx.storage.get("key", { allowConcurrency: true, noCache: true });
|
||||
await this.ctx.storage.put("key", value, { allowUnconfirmed: true, noCache: true });
|
||||
```
|
||||
|
||||
### Storage Options
|
||||
|
||||
| Option | Methods | Effect | Use Case |
|
||||
|--------|---------|--------|----------|
|
||||
| `allowConcurrency` | get, list | Skip input gate; allow concurrent requests during read | Read-heavy metrics that don't need strict consistency |
|
||||
| `noCache` | get, put, list | Skip in-memory cache; always read from disk | Rarely-accessed data or testing storage directly |
|
||||
| `allowUnconfirmed` | put, delete | Return before write confirms (still protected by output gate) | Non-critical writes where latency matters more than confirmation |
|
||||
|
||||
## Transactions
|
||||
|
||||
```typescript
|
||||
// Sync (SQL/sync KV only)
|
||||
this.ctx.storage.transactionSync(() => {
|
||||
this.sql.exec('UPDATE accounts SET balance = balance - ? WHERE id = ?', 100, 1);
|
||||
this.sql.exec('UPDATE accounts SET balance = balance + ? WHERE id = ?', 100, 2);
|
||||
return "result";
|
||||
});
|
||||
|
||||
// Async
|
||||
await this.ctx.storage.transaction(async () => {
|
||||
const value = await this.ctx.storage.get("counter");
|
||||
await this.ctx.storage.put("counter", value + 1);
|
||||
if (value > 100) this.ctx.storage.rollback(); // Explicit rollback
|
||||
});
|
||||
```
|
||||
|
||||
## Point-in-Time Recovery
|
||||
|
||||
```typescript
|
||||
await this.ctx.storage.getCurrentBookmark();
|
||||
await this.ctx.storage.getBookmarkForTime(Date.now() - 2 * 24 * 60 * 60 * 1000);
|
||||
await this.ctx.storage.onNextSessionRestoreBookmark(bookmark);
|
||||
this.ctx.abort(); // Restart to apply; bookmarks lexically comparable (earlier < later)
|
||||
```
|
||||
|
||||
## Alarms
|
||||
|
||||
```typescript
|
||||
await this.ctx.storage.setAlarm(Date.now() + 60000); // Timestamp or Date
|
||||
await this.ctx.storage.getAlarm();
|
||||
await this.ctx.storage.deleteAlarm();
|
||||
|
||||
async alarm() { await this.doScheduledWork(); }
|
||||
```
|
||||
|
||||
## Misc
|
||||
|
||||
```typescript
|
||||
await this.ctx.storage.deleteAll(); // Atomic for SQLite; alarm NOT included
|
||||
this.ctx.storage.sql.databaseSize; // Bytes
|
||||
```
|
||||
@@ -0,0 +1,112 @@
|
||||
# DO Storage Configuration
|
||||
|
||||
## SQLite-backed (Recommended)
|
||||
|
||||
**wrangler.jsonc:**
|
||||
```jsonc
|
||||
{
|
||||
"migrations": [
|
||||
{
|
||||
"tag": "v1",
|
||||
"new_sqlite_classes": ["Counter", "Session", "RateLimiter"]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Migration lifecycle:** Migrations run once per deployment. Existing DO instances get new storage backend on next invocation. Renaming/removing classes requires `renamed_classes` or `deleted_classes` entries.
|
||||
|
||||
## KV-backed (Legacy)
|
||||
|
||||
**wrangler.jsonc:**
|
||||
```jsonc
|
||||
{
|
||||
"migrations": [
|
||||
{
|
||||
"tag": "v1",
|
||||
"new_classes": ["OldCounter"]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## TypeScript Setup
|
||||
|
||||
```typescript
|
||||
export class MyDurableObject extends DurableObject {
|
||||
sql: SqlStorage;
|
||||
|
||||
constructor(ctx: DurableObjectState, env: Env) {
|
||||
super(ctx, env);
|
||||
this.sql = ctx.storage.sql;
|
||||
|
||||
// Initialize schema
|
||||
this.sql.exec(`
|
||||
CREATE TABLE IF NOT EXISTS users(
|
||||
id INTEGER PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
email TEXT UNIQUE
|
||||
);
|
||||
`);
|
||||
}
|
||||
}
|
||||
|
||||
// Binding
|
||||
interface Env {
|
||||
MY_DO: DurableObjectNamespace;
|
||||
}
|
||||
|
||||
export default {
|
||||
async fetch(request: Request, env: Env): Promise<Response> {
|
||||
const id = env.MY_DO.idFromName('singleton');
|
||||
const stub = env.MY_DO.get(id);
|
||||
|
||||
// Modern RPC: call methods directly (recommended)
|
||||
const result = await stub.someMethod();
|
||||
return Response.json(result);
|
||||
|
||||
// Legacy: forward request (still works)
|
||||
// return stub.fetch(request);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## CPU Limits
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"limits": {
|
||||
"cpu_ms": 300000 // 5 minutes (default 30s)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Location Control
|
||||
|
||||
```typescript
|
||||
// Jurisdiction (GDPR/FedRAMP)
|
||||
const euNamespace = env.MY_DO.jurisdiction("eu");
|
||||
const id = euNamespace.newUniqueId();
|
||||
const stub = euNamespace.get(id);
|
||||
|
||||
// Location hint (best effort)
|
||||
const stub = env.MY_DO.get(id, { locationHint: "enam" });
|
||||
// Hints: wnam, enam, sam, weur, eeur, apac, oc, afr, me
|
||||
```
|
||||
|
||||
## Initialization
|
||||
|
||||
```typescript
|
||||
export class Counter extends DurableObject {
|
||||
value: number;
|
||||
|
||||
constructor(ctx: DurableObjectState, env: Env) {
|
||||
super(ctx, env);
|
||||
|
||||
// Block concurrent requests during init
|
||||
ctx.blockConcurrencyWhile(async () => {
|
||||
this.value = (await ctx.storage.get("value")) || 0;
|
||||
});
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -0,0 +1,150 @@
|
||||
# DO Storage Gotchas & Troubleshooting
|
||||
|
||||
## Concurrency Model (CRITICAL)
|
||||
|
||||
Durable Objects use **input/output gates** to prevent race conditions:
|
||||
|
||||
### Input Gates
|
||||
Block new requests during storage reads from CURRENT request:
|
||||
|
||||
```typescript
|
||||
// SAFE: Input gate active during await
|
||||
async increment() {
|
||||
const val = await this.ctx.storage.get("counter"); // Input gate blocks other requests
|
||||
await this.ctx.storage.put("counter", val + 1);
|
||||
return val;
|
||||
}
|
||||
```
|
||||
|
||||
### Output Gates
|
||||
Hold response until ALL writes from current request confirm:
|
||||
|
||||
```typescript
|
||||
// SAFE: Output gate waits for put() to confirm before returning response
|
||||
async increment() {
|
||||
const val = await this.ctx.storage.get("counter");
|
||||
this.ctx.storage.put("counter", val + 1); // No await
|
||||
return new Response(String(val)); // Response delayed until write confirms
|
||||
}
|
||||
```
|
||||
|
||||
### Write Coalescing
|
||||
Multiple writes to same key = atomic (last write wins):
|
||||
|
||||
```typescript
|
||||
// SAFE: All three writes coalesce atomically
|
||||
this.ctx.storage.put("key", 1);
|
||||
this.ctx.storage.put("key", 2);
|
||||
this.ctx.storage.put("key", 3); // Final value: 3
|
||||
```
|
||||
|
||||
### Breaking Gates (DANGER)
|
||||
|
||||
**fetch() breaks input/output gates** → allows request interleaving:
|
||||
|
||||
```typescript
|
||||
// UNSAFE: fetch() allows another request to interleave
|
||||
async unsafe() {
|
||||
const val = await this.ctx.storage.get("counter");
|
||||
await fetch("https://api.example.com"); // Gate broken!
|
||||
await this.ctx.storage.put("counter", val + 1); // Race condition possible
|
||||
}
|
||||
```
|
||||
|
||||
**Solution:** Use `blockConcurrencyWhile()` or `transaction()`:
|
||||
|
||||
```typescript
|
||||
// SAFE: Block concurrent requests explicitly
|
||||
async safe() {
|
||||
return await this.ctx.blockConcurrencyWhile(async () => {
|
||||
const val = await this.ctx.storage.get("counter");
|
||||
await fetch("https://api.example.com");
|
||||
await this.ctx.storage.put("counter", val + 1);
|
||||
return val;
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
### allowConcurrency Option
|
||||
|
||||
Opt out of input gate for reads that don't need protection:
|
||||
|
||||
```typescript
|
||||
// Allow concurrent reads (no consistency guarantee)
|
||||
const val = await this.ctx.storage.get("metrics", { allowConcurrency: true });
|
||||
```
|
||||
|
||||
## Common Errors
|
||||
|
||||
### "Race Condition in Concurrent Calls"
|
||||
|
||||
**Cause:** Multiple concurrent storage operations initiated from same event (e.g., `Promise.all()`) are not protected by input gate
|
||||
**Solution:** Avoid concurrent storage operations within single event; input gate only serializes requests from different events, not operations within same event
|
||||
|
||||
### "Direct SQL Transaction Statements"
|
||||
|
||||
**Cause:** Using `BEGIN TRANSACTION` directly instead of transaction methods
|
||||
**Solution:** Use `this.ctx.storage.transactionSync()` for sync operations or `this.ctx.storage.transaction()` for async operations
|
||||
|
||||
### "Async in transactionSync"
|
||||
|
||||
**Cause:** Using async operations inside `transactionSync()` callback
|
||||
**Solution:** Use async `transaction()` method instead of `transactionSync()` when async operations needed
|
||||
|
||||
### "TypeScript Type Mismatch at Runtime"
|
||||
|
||||
**Cause:** Query doesn't return all fields specified in TypeScript type
|
||||
**Solution:** Ensure SQL query selects all columns that match the TypeScript type definition
|
||||
|
||||
### "Silent Data Corruption with Large IDs"
|
||||
|
||||
**Cause:** JavaScript numbers have 53-bit precision; SQLite INTEGER is 64-bit
|
||||
**Symptom:** IDs > 9007199254740991 (Number.MAX_SAFE_INTEGER) silently truncate/corrupt
|
||||
**Solution:** Store large IDs as TEXT:
|
||||
|
||||
```typescript
|
||||
// BAD: Snowflake/Twitter IDs will corrupt
|
||||
this.sql.exec("CREATE TABLE events(id INTEGER PRIMARY KEY)");
|
||||
this.sql.exec("INSERT INTO events VALUES (?)", 1234567890123456789n); // Corrupts!
|
||||
|
||||
// GOOD: Store as TEXT
|
||||
this.sql.exec("CREATE TABLE events(id TEXT PRIMARY KEY)");
|
||||
this.sql.exec("INSERT INTO events VALUES (?)", "1234567890123456789");
|
||||
```
|
||||
|
||||
### "Alarm Not Deleted with deleteAll()"
|
||||
|
||||
**Cause:** `deleteAll()` doesn't delete alarms automatically
|
||||
**Solution:** Call `deleteAlarm()` explicitly before `deleteAll()` to remove alarm
|
||||
|
||||
### "Slow Performance"
|
||||
|
||||
**Cause:** Using async KV API instead of sync API
|
||||
**Solution:** Use sync KV API (`ctx.storage.kv`) for better performance with simple key-value operations
|
||||
|
||||
### "High Billing from Storage Operations"
|
||||
|
||||
**Cause:** Excessive `rowsRead`/`rowsWritten` or unused objects not cleaned up
|
||||
**Solution:** Monitor `rowsRead`/`rowsWritten` metrics and ensure unused objects call `deleteAll()`
|
||||
|
||||
### "Durable Object Overloaded"
|
||||
|
||||
**Cause:** Single DO exceeding ~1K req/sec soft limit
|
||||
**Solution:** Shard across multiple DOs with random IDs or other distribution strategy
|
||||
|
||||
## Limits
|
||||
|
||||
| Limit | Value | Notes |
|
||||
|-------|-------|-------|
|
||||
| Max columns per table | 100 | SQL limitation |
|
||||
| Max string/BLOB per row | 2 MB | SQL limitation |
|
||||
| Max row size | 2 MB | SQL limitation |
|
||||
| Max SQL statement size | 100 KB | SQL limitation |
|
||||
| Max SQL parameters | 100 | SQL limitation |
|
||||
| Max LIKE/GLOB pattern | 50 B | SQL limitation |
|
||||
| SQLite storage per object | 10 GB | SQLite-backed storage |
|
||||
| SQLite key+value size | 2 MB | SQLite-backed storage |
|
||||
| KV storage per object | Unlimited | KV-style storage |
|
||||
| KV key size | 2 KiB | KV-style storage |
|
||||
| KV value size | 128 KiB | KV-style storage |
|
||||
| Request throughput | ~1K req/sec | Soft limit per DO |
|
||||
@@ -0,0 +1,182 @@
|
||||
# DO Storage Patterns & Best Practices
|
||||
|
||||
## Schema Migration
|
||||
|
||||
```typescript
|
||||
export class MyDurableObject extends DurableObject {
|
||||
constructor(ctx: DurableObjectState, env: Env) {
|
||||
super(ctx, env);
|
||||
this.sql = ctx.storage.sql;
|
||||
|
||||
// Use SQLite's built-in user_version pragma
|
||||
const ver = this.sql.exec("PRAGMA user_version").one()?.user_version || 0;
|
||||
|
||||
if (ver === 0) {
|
||||
this.sql.exec(`CREATE TABLE users(id INTEGER PRIMARY KEY, name TEXT)`);
|
||||
this.sql.exec("PRAGMA user_version = 1");
|
||||
}
|
||||
if (ver === 1) {
|
||||
this.sql.exec(`ALTER TABLE users ADD COLUMN email TEXT`);
|
||||
this.sql.exec("PRAGMA user_version = 2");
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## In-Memory Caching
|
||||
|
||||
```typescript
|
||||
export class UserCache extends DurableObject {
|
||||
cache = new Map<string, User>();
|
||||
async getUser(id: string): Promise<User | undefined> {
|
||||
if (this.cache.has(id)) {
|
||||
const cached = this.cache.get(id);
|
||||
if (cached) return cached;
|
||||
}
|
||||
const user = await this.ctx.storage.get<User>(`user:${id}`);
|
||||
if (user) this.cache.set(id, user);
|
||||
return user;
|
||||
}
|
||||
async updateUser(id: string, data: Partial<User>) {
|
||||
const updated = { ...await this.getUser(id), ...data };
|
||||
this.cache.set(id, updated);
|
||||
await this.ctx.storage.put(`user:${id}`, updated);
|
||||
return updated;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Rate Limiting
|
||||
|
||||
```typescript
|
||||
export class RateLimiter extends DurableObject {
|
||||
async checkLimit(key: string, limit: number, window: number): Promise<boolean> {
|
||||
const now = Date.now();
|
||||
this.sql.exec('DELETE FROM requests WHERE key = ? AND timestamp < ?', key, now - window);
|
||||
const count = this.sql.exec('SELECT COUNT(*) as count FROM requests WHERE key = ?', key).one().count;
|
||||
if (count >= limit) return false;
|
||||
this.sql.exec('INSERT INTO requests (key, timestamp) VALUES (?, ?)', key, now);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Batch Processing with Alarms
|
||||
|
||||
```typescript
|
||||
export class BatchProcessor extends DurableObject {
|
||||
pending: string[] = [];
|
||||
async addItem(item: string) {
|
||||
this.pending.push(item);
|
||||
if (!await this.ctx.storage.getAlarm()) await this.ctx.storage.setAlarm(Date.now() + 5000);
|
||||
}
|
||||
async alarm() {
|
||||
const items = [...this.pending];
|
||||
this.pending = [];
|
||||
this.sql.exec(`INSERT INTO processed_items (item, timestamp) VALUES ${items.map(() => "(?, ?)").join(", ")}`, ...items.flatMap(item => [item, Date.now()]));
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Initialization Pattern
|
||||
|
||||
```typescript
|
||||
export class Counter extends DurableObject {
|
||||
value: number;
|
||||
constructor(ctx: DurableObjectState, env: Env) {
|
||||
super(ctx, env);
|
||||
ctx.blockConcurrencyWhile(async () => { this.value = (await ctx.storage.get("value")) || 0; });
|
||||
}
|
||||
async increment() {
|
||||
this.value++;
|
||||
this.ctx.storage.put("value", this.value); // Don't await (output gate protects)
|
||||
return this.value;
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Safe Counter / Optimized Write
|
||||
|
||||
```typescript
|
||||
// Input gate blocks other requests
|
||||
async getUniqueNumber(): Promise<number> {
|
||||
let val = await this.ctx.storage.get("counter");
|
||||
await this.ctx.storage.put("counter", val + 1);
|
||||
return val;
|
||||
}
|
||||
|
||||
// No await on write - output gate delays response until write confirms
|
||||
async increment(): Promise<Response> {
|
||||
let val = await this.ctx.storage.get("counter");
|
||||
this.ctx.storage.put("counter", val + 1);
|
||||
return new Response(String(val));
|
||||
}
|
||||
```
|
||||
|
||||
## Parent-Child Coordination
|
||||
|
||||
Hierarchical DO pattern where parent manages child DOs:
|
||||
|
||||
```typescript
|
||||
// Parent DO coordinates children
|
||||
export class Workspace extends DurableObject {
|
||||
async createDocument(name: string): Promise<string> {
|
||||
const docId = crypto.randomUUID();
|
||||
const childId = this.env.DOCUMENT.idFromName(`${this.ctx.id.toString()}:${docId}`);
|
||||
const childStub = this.env.DOCUMENT.get(childId);
|
||||
await childStub.initialize(name);
|
||||
|
||||
// Track child in parent storage
|
||||
this.sql.exec('INSERT INTO documents (id, name, created) VALUES (?, ?, ?)',
|
||||
docId, name, Date.now());
|
||||
return docId;
|
||||
}
|
||||
|
||||
async listDocuments(): Promise<string[]> {
|
||||
return this.sql.exec('SELECT id FROM documents').toArray().map(r => r.id);
|
||||
}
|
||||
}
|
||||
|
||||
// Child DO
|
||||
export class Document extends DurableObject {
|
||||
async initialize(name: string) {
|
||||
this.sql.exec('CREATE TABLE IF NOT EXISTS content(key TEXT PRIMARY KEY, value TEXT)');
|
||||
this.sql.exec('INSERT INTO content VALUES (?, ?)', 'name', name);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Write Coalescing Pattern
|
||||
|
||||
Multiple writes to same key coalesce atomically (last write wins):
|
||||
|
||||
```typescript
|
||||
async updateMetrics(userId: string, actions: Action[]) {
|
||||
// All writes coalesce - no await needed
|
||||
for (const action of actions) {
|
||||
this.ctx.storage.put(`user:${userId}:lastAction`, action.type);
|
||||
this.ctx.storage.put(`user:${userId}:count`,
|
||||
await this.ctx.storage.get(`user:${userId}:count`) + 1);
|
||||
}
|
||||
// Output gate ensures all writes confirm before response
|
||||
return new Response("OK");
|
||||
}
|
||||
|
||||
// Atomic batch with SQL
|
||||
async batchUpdate(items: Item[]) {
|
||||
this.sql.exec('BEGIN');
|
||||
for (const item of items) {
|
||||
this.sql.exec('INSERT OR REPLACE INTO items VALUES (?, ?)', item.id, item.value);
|
||||
}
|
||||
this.sql.exec('COMMIT');
|
||||
}
|
||||
```
|
||||
|
||||
## Cleanup
|
||||
|
||||
```typescript
|
||||
async cleanup() {
|
||||
await this.ctx.storage.deleteAlarm(); // Separate from deleteAll
|
||||
await this.ctx.storage.deleteAll();
|
||||
}
|
||||
```
|
||||
@@ -0,0 +1,183 @@
|
||||
# DO Storage Testing
|
||||
|
||||
Testing Durable Objects with storage using `vitest-pool-workers`.
|
||||
|
||||
## Setup
|
||||
|
||||
**vitest.config.ts:**
|
||||
```typescript
|
||||
import { defineWorkersConfig } from "@cloudflare/vitest-pool-workers/config";
|
||||
|
||||
export default defineWorkersConfig({
|
||||
test: {
|
||||
poolOptions: {
|
||||
workers: { wrangler: { configPath: "./wrangler.toml" } }
|
||||
}
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
**package.json:** Add `@cloudflare/vitest-pool-workers` and `vitest` to devDependencies
|
||||
|
||||
## Basic Testing
|
||||
|
||||
```typescript
|
||||
import { env, runInDurableObject } from "cloudflare:test";
|
||||
import { describe, it, expect } from "vitest";
|
||||
|
||||
describe("Counter DO", () => {
|
||||
it("increments counter", async () => {
|
||||
const id = env.COUNTER.idFromName("test");
|
||||
const result = await runInDurableObject(env.COUNTER, id, async (instance, state) => {
|
||||
const val1 = await instance.increment();
|
||||
const val2 = await instance.increment();
|
||||
return { val1, val2 };
|
||||
});
|
||||
expect(result.val1).toBe(1);
|
||||
expect(result.val2).toBe(2);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Testing SQL Storage
|
||||
|
||||
```typescript
|
||||
it("creates and queries users", async () => {
|
||||
const id = env.USER_MANAGER.idFromName("test");
|
||||
await runInDurableObject(env.USER_MANAGER, id, async (instance, state) => {
|
||||
await instance.createUser("alice@example.com", "Alice");
|
||||
const user = await instance.getUser("alice@example.com");
|
||||
expect(user).toEqual({ email: "alice@example.com", name: "Alice" });
|
||||
});
|
||||
});
|
||||
|
||||
it("handles schema migrations", async () => {
|
||||
const id = env.USER_MANAGER.idFromName("migration-test");
|
||||
await runInDurableObject(env.USER_MANAGER, id, async (instance, state) => {
|
||||
const version = state.storage.sql.exec(
|
||||
"SELECT value FROM _meta WHERE key = 'schema_version'"
|
||||
).one()?.value;
|
||||
expect(version).toBe("1");
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Testing Alarms
|
||||
|
||||
```typescript
|
||||
import { runDurableObjectAlarm } from "cloudflare:test";
|
||||
|
||||
it("processes batch on alarm", async () => {
|
||||
const id = env.BATCH_PROCESSOR.idFromName("test");
|
||||
|
||||
// Add items
|
||||
await runInDurableObject(env.BATCH_PROCESSOR, id, async (instance) => {
|
||||
await instance.addItem("item1");
|
||||
await instance.addItem("item2");
|
||||
});
|
||||
|
||||
// Trigger alarm
|
||||
await runDurableObjectAlarm(env.BATCH_PROCESSOR, id);
|
||||
|
||||
// Verify processed
|
||||
await runInDurableObject(env.BATCH_PROCESSOR, id, async (instance, state) => {
|
||||
const count = state.storage.sql.exec(
|
||||
"SELECT COUNT(*) as count FROM processed_items"
|
||||
).one().count;
|
||||
expect(count).toBe(2);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Testing Concurrency
|
||||
|
||||
```typescript
|
||||
it("handles concurrent increments safely", async () => {
|
||||
const id = env.COUNTER.idFromName("concurrent-test");
|
||||
|
||||
// Parallel increments
|
||||
const results = await Promise.all([
|
||||
runInDurableObject(env.COUNTER, id, (i) => i.increment()),
|
||||
runInDurableObject(env.COUNTER, id, (i) => i.increment()),
|
||||
runInDurableObject(env.COUNTER, id, (i) => i.increment())
|
||||
]);
|
||||
|
||||
// All should get unique values
|
||||
expect(new Set(results).size).toBe(3);
|
||||
expect(Math.max(...results)).toBe(3);
|
||||
});
|
||||
```
|
||||
|
||||
## Test Isolation
|
||||
|
||||
```typescript
|
||||
// Per-test unique IDs
|
||||
let testId: string;
|
||||
beforeEach(() => { testId = crypto.randomUUID(); });
|
||||
|
||||
it("isolated test", async () => {
|
||||
const id = env.MY_DO.idFromName(testId);
|
||||
// Uses unique DO instance
|
||||
});
|
||||
|
||||
// Cleanup pattern
|
||||
it("with cleanup", async () => {
|
||||
const id = env.MY_DO.idFromName("cleanup-test");
|
||||
try {
|
||||
await runInDurableObject(env.MY_DO, id, async (instance) => {});
|
||||
} finally {
|
||||
await runInDurableObject(env.MY_DO, id, async (instance, state) => {
|
||||
await state.storage.deleteAll();
|
||||
});
|
||||
}
|
||||
});
|
||||
```
|
||||
|
||||
## Testing PITR
|
||||
|
||||
```typescript
|
||||
it("restores from bookmark", async () => {
|
||||
const id = env.MY_DO.idFromName("pitr-test");
|
||||
|
||||
// Create checkpoint
|
||||
const bookmark = await runInDurableObject(env.MY_DO, id, async (instance, state) => {
|
||||
await state.storage.put("value", 1);
|
||||
return await state.storage.getCurrentBookmark();
|
||||
});
|
||||
|
||||
// Modify and restore
|
||||
await runInDurableObject(env.MY_DO, id, async (instance, state) => {
|
||||
await state.storage.put("value", 2);
|
||||
await state.storage.onNextSessionRestoreBookmark(bookmark);
|
||||
state.abort();
|
||||
});
|
||||
|
||||
// Verify restored
|
||||
await runInDurableObject(env.MY_DO, id, async (instance, state) => {
|
||||
const value = await state.storage.get("value");
|
||||
expect(value).toBe(1);
|
||||
});
|
||||
});
|
||||
```
|
||||
|
||||
## Testing Transactions
|
||||
|
||||
```typescript
|
||||
it("rolls back on error", async () => {
|
||||
const id = env.BANK.idFromName("transaction-test");
|
||||
|
||||
await runInDurableObject(env.BANK, id, async (instance, state) => {
|
||||
await state.storage.put("balance", 100);
|
||||
|
||||
await expect(
|
||||
state.storage.transaction(async () => {
|
||||
await state.storage.put("balance", 50);
|
||||
throw new Error("Cancel");
|
||||
})
|
||||
).rejects.toThrow("Cancel");
|
||||
|
||||
const balance = await state.storage.get("balance");
|
||||
expect(balance).toBe(100); // Rolled back
|
||||
});
|
||||
});
|
||||
```
|
||||
Reference in New Issue
Block a user