SiamCafe.net Blog
Cybersecurity

Uptime Kuma Monitoring Edge Deployment

uptime kuma monitoring edge deployment
Uptime Kuma Monitoring Edge Deployment | SiamCafe Blog
2025-06-12· อ. บอม — SiamCafe.net· 1,613 คำ

Uptime Kuma Monitoring Edge Deployment คืออะไร

Uptime Kuma เป็น open source self-hosted monitoring tool ที่ใช้ตรวจสอบ uptime ของเว็บไซต์ API และบริการต่างๆ มี UI สวยงาม ติดตั้งง่าย รองรับ notifications หลายช่องทาง Edge Deployment คือการ deploy แอปพลิเคชันไปยัง edge locations ใกล้กับผู้ใช้งาน เพื่อลด latency และเพิ่ม availability การรวมสองแนวคิดนี้ช่วยสร้างระบบ monitoring แบบ distributed ที่ตรวจสอบบริการจากหลาย locations ทั่วโลก ให้ภาพรวมที่แม่นยำเรื่อง uptime และ performance จากมุมมองของผู้ใช้จริง

Uptime Kuma Architecture

# kuma_arch.py — Uptime Kuma architecture
import json

class UptimeKumaArch:
    FEATURES = {
        "monitors": {
            "name": "Monitor Types",
            "types": ["HTTP(s)", "TCP Port", "Ping (ICMP)", "DNS", "Docker Container", "Steam Game Server", "MQTT", "gRPC", "Keyword check"],
        },
        "notifications": {
            "name": "Notification Channels (90+)",
            "channels": ["Slack", "Discord", "Telegram", "LINE", "Email (SMTP)", "Webhook", "PagerDuty", "Opsgenie", "Microsoft Teams"],
        },
        "status_page": {
            "name": "Status Page",
            "description": "Public status page สำหรับแจ้ง users เรื่อง service status",
            "features": ["Custom domain", "Multiple pages", "Incident management", "Maintenance windows"],
        },
        "dashboard": {
            "name": "Dashboard",
            "description": "Real-time dashboard แสดง uptime %, response time, certificate expiry",
        },
    }

    SETUP = """
# Docker setup (recommended)
docker run -d --name uptime-kuma \\
  -p 3001:3001 \\
  -v uptime-kuma:/app/data \\
  --restart=unless-stopped \\
  louislam/uptime-kuma:latest

# Docker Compose
version: '3'
services:
  uptime-kuma:
    image: louislam/uptime-kuma:latest
    container_name: uptime-kuma
    ports:
      - "3001:3001"
    volumes:
      - uptime-kuma-data:/app/data
    restart: unless-stopped

volumes:
  uptime-kuma-data:

# Access: http://localhost:3001
"""

    def show_features(self):
        print("=== Uptime Kuma Features ===\n")
        for key, feature in self.FEATURES.items():
            print(f"[{feature['name']}]")
            if 'types' in feature:
                print(f"  Types: {', '.join(feature['types'][:5])}")
            elif 'channels' in feature:
                print(f"  Channels: {', '.join(feature['channels'][:5])}")
            elif 'description' in feature:
                print(f"  {feature['description']}")
            print()

    def show_setup(self):
        print("=== Docker Setup ===")
        print(self.SETUP[:400])

arch = UptimeKumaArch()
arch.show_features()
arch.show_setup()

Edge Deployment Strategy

# edge_deploy.py — Edge deployment for monitoring
import json

class EdgeDeployment:
    ARCHITECTURE = {
        "central": {
            "name": "Central Hub",
            "role": "รวม data จาก edge probes, dashboard, alerting, status page",
            "location": "Primary cloud region (e.g., Singapore)",
        },
        "edge_probes": {
            "name": "Edge Probes (Distributed Monitors)",
            "role": "ส่ง health checks จากหลาย locations",
            "locations": ["Singapore", "Tokyo", "Sydney", "Frankfurt", "US-East", "US-West"],
        },
    }

    TERRAFORM_EDGE = """
# edge_kuma.tf — Terraform for edge Uptime Kuma deployment
terraform {
  required_providers {
    docker = { source = "kreuzwerker/docker" }
  }
}

variable "regions" {
  default = {
    "sgp" = { name = "Singapore", port = 3001 }
    "tyo" = { name = "Tokyo",     port = 3002 }
    "syd" = { name = "Sydney",    port = 3003 }
    "fra" = { name = "Frankfurt", port = 3004 }
  }
}

resource "docker_container" "kuma_edge" {
  for_each = var.regions
  
  name  = "kuma-edge-"
  image = "louislam/uptime-kuma:latest"
  
  ports {
    internal = 3001
    external = each.value.port
  }
  
  volumes {
    container_path = "/app/data"
    host_path      = "/opt/kuma-/data"
  }
  
  env = [
    "UPTIME_KUMA_PROBE_NAME=",
  ]
  
  restart = "unless-stopped"
}
"""

    DOCKER_COMPOSE_MULTI = """
# docker-compose-edge.yml — Multi-region monitoring
version: '3'
services:
  kuma-central:
    image: louislam/uptime-kuma:latest
    ports: ["3001:3001"]
    volumes: ["kuma-central:/app/data"]
    restart: unless-stopped
    labels:
      - "role=central"
      - "region=sgp"

  kuma-probe-tyo:
    image: louislam/uptime-kuma:latest
    ports: ["3002:3001"]
    volumes: ["kuma-tyo:/app/data"]
    restart: unless-stopped
    labels:
      - "role=probe"
      - "region=tyo"

  kuma-probe-syd:
    image: louislam/uptime-kuma:latest
    ports: ["3003:3001"]
    volumes: ["kuma-syd:/app/data"]
    restart: unless-stopped

volumes:
  kuma-central:
  kuma-tyo:
  kuma-syd:
"""

    def show_architecture(self):
        print("=== Edge Architecture ===\n")
        for key, comp in self.ARCHITECTURE.items():
            print(f"[{comp['name']}]")
            print(f"  Role: {comp['role']}")
            if 'locations' in comp:
                print(f"  Locations: {', '.join(comp['locations'])}")
            print()

    def show_terraform(self):
        print("=== Terraform Edge ===")
        print(self.TERRAFORM_EDGE[:500])

edge = EdgeDeployment()
edge.show_architecture()
edge.show_terraform()

API Automation

# api_automation.py — Uptime Kuma API automation
import json
import random

class KumaAPI:
    PYTHON_CLIENT = """
# kuma_client.py — Uptime Kuma API client
import requests

class UptimeKumaClient:
    def __init__(self, base_url, username, password):
        self.base_url = base_url.rstrip('/')
        self.session = requests.Session()
        self._login(username, password)
    
    def _login(self, username, password):
        resp = self.session.post(
            f"{self.base_url}/login/access-token",
            json={"username": username, "password": password},
        )
        token = resp.json().get("access_token")
        self.session.headers["Authorization"] = f"Bearer {token}"
    
    def get_monitors(self):
        resp = self.session.get(f"{self.base_url}/api/monitors")
        return resp.json()
    
    def add_monitor(self, name, url, monitor_type="http", interval=60):
        data = {
            "name": name,
            "url": url,
            "type": monitor_type,
            "interval": interval,
            "retryInterval": 30,
            "maxretries": 3,
            "accepted_statuscodes": ["200-299"],
        }
        resp = self.session.post(f"{self.base_url}/api/monitors", json=data)
        return resp.json()
    
    def get_heartbeats(self, monitor_id, hours=24):
        resp = self.session.get(
            f"{self.base_url}/api/monitors/{monitor_id}/beats",
            params={"hours": hours},
        )
        return resp.json()
    
    def get_uptime(self, monitor_id, duration=24):
        beats = self.get_heartbeats(monitor_id, duration)
        if not beats:
            return 0
        up = sum(1 for b in beats if b.get("status") == 1)
        return (up / len(beats)) * 100
    
    def pause_monitor(self, monitor_id):
        return self.session.post(f"{self.base_url}/api/monitors/{monitor_id}/pause").json()
    
    def resume_monitor(self, monitor_id):
        return self.session.post(f"{self.base_url}/api/monitors/{monitor_id}/resume").json()

# Usage
client = UptimeKumaClient("http://kuma.example.com:3001", "admin", "password")

# Add monitors for all services
services = [
    {"name": "Main Website", "url": "https://example.com"},
    {"name": "API Server", "url": "https://api.example.com/health"},
    {"name": "Blog", "url": "https://blog.example.com"},
]
for svc in services:
    client.add_monitor(svc["name"], svc["url"])

# Check uptime
monitors = client.get_monitors()
for m in monitors:
    uptime = client.get_uptime(m["id"])
    print(f"  [{m['name']}] Uptime: {uptime:.2f}%")
"""

    def show_client(self):
        print("=== Python API Client ===")
        print(self.PYTHON_CLIENT[:600])

    def dashboard_sim(self):
        print(f"\n=== Monitoring Dashboard ===")
        services = [
            {"name": "Main Website", "uptime": random.uniform(99.5, 100), "latency": random.randint(50, 200)},
            {"name": "API Server", "uptime": random.uniform(99.0, 100), "latency": random.randint(30, 150)},
            {"name": "Database", "uptime": random.uniform(99.8, 100), "latency": random.randint(5, 30)},
            {"name": "CDN", "uptime": random.uniform(99.9, 100), "latency": random.randint(10, 50)},
            {"name": "Auth Service", "uptime": random.uniform(99.5, 100), "latency": random.randint(40, 120)},
        ]
        for svc in services:
            status = "UP" if svc["uptime"] > 99.0 else "DEGRADED"
            print(f"  [{status:>8}] {svc['name']:<20} Uptime: {svc['uptime']:.2f}% | Latency: {svc['latency']}ms")

api = KumaAPI()
api.show_client()
api.dashboard_sim()

Alerting & Incident Response

# alerting.py — Alerting and incident response
import json
import random

class AlertingSetup:
    NOTIFICATION_CONFIG = """
# notification_setup.py — Configure notifications
import requests

class NotificationSetup:
    def __init__(self, kuma_client):
        self.client = kuma_client
    
    def setup_slack(self, webhook_url, channel="#alerts"):
        return self.client.session.post(
            f"{self.client.base_url}/api/notifications",
            json={
                "name": "Slack Alerts",
                "type": "slack",
                "slackwebhookURL": webhook_url,
                "slackchannel": channel,
                "isDefault": True,
                "applyExisting": True,
            },
        ).json()
    
    def setup_line(self, channel_token):
        return self.client.session.post(
            f"{self.client.base_url}/api/notifications",
            json={
                "name": "LINE Notify",
                "type": "LineNotify",
                "lineNotifyAccessToken": channel_token,
                "isDefault": True,
            },
        ).json()
    
    def setup_telegram(self, bot_token, chat_id):
        return self.client.session.post(
            f"{self.client.base_url}/api/notifications",
            json={
                "name": "Telegram",
                "type": "telegram",
                "telegramBotToken": bot_token,
                "telegramChatID": chat_id,
                "isDefault": True,
            },
        ).json()
"""

    INCIDENT_PLAYBOOK = {
        "detect": {"name": "1. Detect", "action": "Uptime Kuma alerts ส่ง notification", "time": "0-1 min"},
        "acknowledge": {"name": "2. Acknowledge", "action": "On-call engineer acknowledge alert", "time": "< 5 min"},
        "diagnose": {"name": "3. Diagnose", "action": "Check logs, metrics, edge probe results", "time": "5-15 min"},
        "mitigate": {"name": "4. Mitigate", "action": "Failover, restart, rollback", "time": "15-30 min"},
        "resolve": {"name": "5. Resolve", "action": "Fix root cause, verify all probes green", "time": "30-60 min"},
        "postmortem": {"name": "6. Post-mortem", "action": "Document incident, action items, update runbook", "time": "24-48h"},
    }

    def show_notification(self):
        print("=== Notification Setup ===")
        print(self.NOTIFICATION_CONFIG[:500])

    def show_playbook(self):
        print(f"\n=== Incident Response Playbook ===")
        for key, step in self.INCIDENT_PLAYBOOK.items():
            print(f"  [{step['name']}] {step['action']} (Target: {step['time']})")

    def recent_incidents(self):
        print(f"\n=== Recent Incidents ===")
        incidents = [
            {"time": "2h ago", "service": "API Server", "duration": f"{random.randint(2, 15)}min", "cause": "Memory leak", "status": "Resolved"},
            {"time": "1d ago", "service": "CDN", "duration": f"{random.randint(5, 30)}min", "cause": "SSL cert renewal failed", "status": "Resolved"},
            {"time": "3d ago", "service": "Database", "duration": f"{random.randint(1, 10)}min", "cause": "Connection pool exhausted", "status": "Resolved"},
        ]
        for i in incidents:
            print(f"  [{i['status']:>8}] {i['time']:>5} | {i['service']:<15} Duration: {i['duration']} | Cause: {i['cause']}")

alert = AlertingSetup()
alert.show_notification()
alert.show_playbook()
alert.recent_incidents()

Kubernetes Edge Deployment

# k8s_edge.py — Kubernetes edge deployment
import json

class K8sEdgeDeployment:
    MANIFEST = """
# kuma-edge.yaml — Kubernetes deployment for edge monitoring
apiVersion: apps/v1
kind: Deployment
metadata:
  name: uptime-kuma
  namespace: monitoring
  labels:
    app: uptime-kuma
spec:
  replicas: 1
  selector:
    matchLabels:
      app: uptime-kuma
  template:
    metadata:
      labels:
        app: uptime-kuma
    spec:
      containers:
        - name: uptime-kuma
          image: louislam/uptime-kuma:latest
          ports:
            - containerPort: 3001
          volumeMounts:
            - name: data
              mountPath: /app/data
          resources:
            requests:
              memory: "256Mi"
              cpu: "100m"
            limits:
              memory: "512Mi"
              cpu: "500m"
          livenessProbe:
            httpGet:
              path: /
              port: 3001
            initialDelaySeconds: 30
            periodSeconds: 10
          readinessProbe:
            httpGet:
              path: /
              port: 3001
            initialDelaySeconds: 5
      volumes:
        - name: data
          persistentVolumeClaim:
            claimName: kuma-pvc
---
apiVersion: v1
kind: Service
metadata:
  name: uptime-kuma
  namespace: monitoring
spec:
  selector:
    app: uptime-kuma
  ports:
    - port: 3001
      targetPort: 3001
  type: ClusterIP
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: uptime-kuma
  namespace: monitoring
  annotations:
    cert-manager.io/cluster-issuer: letsencrypt-prod
spec:
  tls:
    - hosts: [status.example.com]
      secretName: kuma-tls
  rules:
    - host: status.example.com
      http:
        paths:
          - path: /
            pathType: Prefix
            backend:
              service:
                name: uptime-kuma
                port:
                  number: 3001
"""

    def show_manifest(self):
        print("=== Kubernetes Manifest ===")
        print(self.MANIFEST[:600])

    def edge_tips(self):
        print(f"\n=== Edge Deployment Tips ===")
        tips = [
            "ใช้ lightweight instance (512MB RAM พอ) สำหรับ edge probes",
            "ตั้ง probe interval ให้เหมาะสม (60s สำหรับทั่วไป, 30s สำหรับ critical)",
            "Sync monitor configs ข้าม edge instances ด้วย API automation",
            "ใช้ VPN/WireGuard สำหรับ secure communication ระหว่าง probes",
            "เก็บ data retention ให้เหมาะสม (90 days สำหรับ edge, 1 year สำหรับ central)",
        ]
        for tip in tips:
            print(f"  • {tip}")

k8s = K8sEdgeDeployment()
k8s.show_manifest()
k8s.edge_tips()

FAQ - คำถามที่พบบ่อย

Q: Uptime Kuma กับ Grafana/Prometheus ต่างกันอย่างไร?

A: Uptime Kuma: external monitoring (เช็คจากภายนอกว่า service ยัง up ไหม), ติดตั้งง่าย, status page built-in Grafana/Prometheus: internal monitoring (metrics จาก application เอง), complex setup, powerful dashboards ใช้ทั้งคู่: Uptime Kuma สำหรับ uptime/external checks + Prometheus/Grafana สำหรับ internal metrics

Q: ทำไมต้อง monitor จากหลาย locations?

A: เพราะ: 1) Service อาจ down เฉพาะบาง region (CDN, DNS issues) 2) Latency ต่างกันตาม location 3) ให้ภาพที่แม่นยำกว่า monitor จากจุดเดียว 4) ลด false positive (ถ้า 1 probe fail แต่ 3 อื่น OK = network issue ไม่ใช่ service down) แนะนำ: อย่างน้อย 3 locations (Asia, Europe, US)

Q: Uptime Kuma ใช้ resources เท่าไหร่?

A: น้อยมาก: RAM 150-300MB, CPU < 5% (idle) สำหรับ monitors 50-100 ตัว recommended: 512MB RAM, 1 vCPU สำหรับ edge probe: 256MB RAM พอ Docker image: ~150MB Storage: ขึ้นกับ data retention (1GB สำหรับ 6 เดือน ~50 monitors)

Q: ใช้ Uptime Kuma ฟรีได้ไหม?

A: ฟรีทั้งหมด — Uptime Kuma เป็น open source (MIT license) Self-hosted: ฟรี (จ่ายแค่ค่า server/VPS) ค่า VPS สำหรับ edge probe: ~$3-5/month ต่อ location (DigitalOcean, Vultr, Hetzner) ไม่มี premium tier, ไม่มี feature lock — ได้ทุก feature ฟรี

📖 บทความที่เกี่ยวข้อง

Uptime Kuma Monitoring Pub Sub Architectureอ่านบทความ → Uptime Kuma Monitoring Disaster Recovery Planอ่านบทความ → Uptime Kuma Monitoring Post-mortem Analysisอ่านบทความ → Uptime Kuma Monitoring Site Reliability SREอ่านบทความ →

📚 ดูบทความทั้งหมด →