Payload CMS Capacity Planning
Payload CMS Capacity Planning TypeScript Node.js MongoDB PostgreSQL CDN Redis Caching Sizing API Performance Production
| Size | CPU/RAM | Database | Instances | RPS Target |
|---|---|---|---|---|
| Small | 1 vCPU / 1GB | MongoDB 1GB | 1 Node.js | 50-100 |
| Medium | 2 vCPU / 4GB | MongoDB 10GB RS | 2 + LB | 500-1,000 |
| Large | 4 vCPU / 8GB | MongoDB 50GB RS | 4 + LB + CDN | 5,000-10,000 |
| Enterprise | 8+ vCPU / 16GB+ | MongoDB Sharded | Auto-scale + CDN | 10,000+ |
Sizing Calculator
# === Payload CMS Capacity Calculator ===
from dataclasses import dataclass
@dataclass
class SizingInput:
content_count: int
avg_doc_size_kb: float
media_count: int
avg_media_size_mb: float
api_rps: int
admin_users: int
@dataclass
class SizingResult:
db_storage_gb: float
media_storage_gb: float
node_instances: int
cpu_per_instance: int
ram_per_instance_gb: int
db_connections: int
redis_memory_gb: float
def calculate_sizing(inp: SizingInput) -> SizingResult:
db_gb = (inp.content_count * inp.avg_doc_size_kb * 3) / (1024 * 1024)
media_gb = (inp.media_count * inp.avg_media_size_mb * 1.5) / 1024
if inp.api_rps <= 100:
instances, cpu, ram = 1, 1, 1
elif inp.api_rps <= 1000:
instances, cpu, ram = 2, 2, 4
elif inp.api_rps <= 10000:
instances, cpu, ram = 4, 4, 8
else:
instances, cpu, ram = 8, 8, 16
db_conn = instances * 10
redis_gb = max(0.5, inp.api_rps * 0.001)
return SizingResult(round(db_gb, 2), round(media_gb, 2),
instances, cpu, ram, db_conn, round(redis_gb, 2))
scenarios = [
("Blog (Personal)", SizingInput(500, 5, 200, 2, 50, 2)),
("Corporate Site", SizingInput(5000, 10, 2000, 3, 500, 10)),
("E-commerce", SizingInput(50000, 15, 20000, 5, 5000, 50)),
("Enterprise Platform", SizingInput(500000, 20, 100000, 4, 20000, 200)),
]
print("=== Capacity Planning ===")
for name, inp in scenarios:
r = calculate_sizing(inp)
print(f"\n [{name}]")
print(f" Content: {inp.content_count:,} docs | Media: {inp.media_count:,} files")
print(f" DB: {r.db_storage_gb} GB | Media: {r.media_storage_gb} GB")
print(f" Instances: {r.node_instances}x ({r.cpu_per_instance} vCPU {r.ram_per_instance_gb}GB)")
print(f" DB Connections: {r.db_connections} | Redis: {r.redis_memory_gb} GB")
Caching Architecture
# === Caching Strategy ===
# Payload Hook for Cache Invalidation
# // collections/Posts.ts
# const Posts: CollectionConfig = {
# slug: 'posts',
# hooks: {
# afterChange: [
# async ({ doc }) => {
# // Purge CDN cache
# await fetch(`https://api.cloudflare.com/client/v4/zones//purge_cache`, {
# method: 'POST',
# headers: { Authorization: `Bearer ` },
# body: JSON.stringify({ files: [`https://example.com/api/posts/`] })
# });
# // Invalidate Redis cache
# await redis.del(`posts:`);
# await redis.del('posts:list');
# // Trigger Next.js revalidation
# await fetch(`https://frontend.com/api/revalidate?secret=&path=/blog/`);
# }
# ]
# }
# };
@dataclass
class CacheLayer:
layer: str
tool: str
ttl: str
invalidation: str
hit_rate: str
layers = [
CacheLayer("CDN Edge",
"Cloudflare / CloudFront",
"60s (API) 1y (Media)",
"Webhook Purge on afterChange",
"70-90% (Static content)"),
CacheLayer("Application (Redis)",
"Redis 6+",
"60-300s per content type",
"redis.del on afterChange Hook",
"50-80% (Dynamic queries)"),
CacheLayer("Full Page (ISR)",
"Next.js ISR / Vercel",
"revalidate: 60-3600",
"On-demand revalidation API",
"90%+ (Static pages)"),
CacheLayer("Browser",
"HTTP Cache-Control ETag",
"60s (API) 1y (Assets)",
"ETag mismatch → 200 else 304",
"High (repeat visitors)"),
CacheLayer("Database",
"MongoDB WiredTiger / PG shared_buffers",
"Automatic (LRU)",
"Automatic on write",
"Depends on working set vs RAM"),
]
print("=== Caching Layers ===")
for c in layers:
print(f" [{c.layer}] Tool: {c.tool}")
print(f" TTL: {c.ttl}")
print(f" Invalidation: {c.invalidation}")
print(f" Hit Rate: {c.hit_rate}")
Monitoring
# === Production Monitoring ===
@dataclass
class MonitorMetric:
metric: str
source: str
target: str
alert: str
monitoring = [
MonitorMetric("API Response Time P99",
"Prometheus / APM (Datadog NewRelic)",
"< 200ms (cached) < 500ms (uncached)",
"> 1s → P2 Check DB Cache Instances"),
MonitorMetric("API Throughput (RPS)",
"Prometheus http_requests_total",
"ตาม Sizing Target",
"< 50% target → Check if underprovisioned"),
MonitorMetric("Error Rate",
"Prometheus http_errors_total / total",
"< 0.1%",
"> 1% → P1 Check Logs DB Connection"),
MonitorMetric("Database Connection Pool",
"MongoDB/PG metrics",
"< 80% pool usage",
"> 90% → P2 Increase Pool or Scale"),
MonitorMetric("Cache Hit Rate",
"Redis INFO stats / CDN Analytics",
"> 60% Redis > 80% CDN",
"< 30% → Review Cache Keys TTL"),
MonitorMetric("Media Storage Usage",
"S3/Disk metrics",
"< 80% allocated",
"> 90% → P3 Expand Storage Archive Old"),
]
print("=== Monitoring ===")
for m in monitoring:
print(f" [{m.metric}] Target: {m.target}")
print(f" Source: {m.source}")
print(f" Alert: {m.alert}")
เคล็ดลับ
- CDN: ใช้ CDN Cache API Response ลด Load 70-90%
- Redis: Cache Popular Queries ใน Redis TTL 60-300s
- Hook: ใช้ afterChange Hook Invalidate Cache อัตโนมัติ
- ISR: ใช้ Next.js ISR + On-demand Revalidation ลด API Load 90%+
- Index: สร้าง Database Index ตาม Query Pattern ทุกตัว
Payload CMS คืออะไร
Open Source Headless CMS TypeScript Node.js MongoDB PostgreSQL REST GraphQL Admin Panel Auth Upload Versioning Localization Hooks
Capacity Planning ทำอย่างไร
RPS Estimate Small Medium Large Enterprise CPU RAM DB Media Instances LB CDN Connection Pool Sizing Calculator
Database Optimization ทำอย่างไร
MongoDB Index Replica Set Connection Pool PostgreSQL PgBouncer Partitioning Projection Pagination Cache Query Population Join
Caching Strategy มีอะไร
CDN Cloudflare Redis Application ISR Next.js Browser ETag Database WiredTiger afterChange Hook Invalidation Purge Revalidate
สรุป
Payload CMS Capacity Planning TypeScript MongoDB PostgreSQL CDN Redis ISR Caching Hook Invalidation Sizing Monitoring Production
