Fedora CoreOS Tuning
Fedora CoreOS Immutable OS Performance Tuning Ignition Kernel Container Runtime Network Storage Boot Kubernetes Edge Production
| Tuning Area | Default | Optimized | Impact | Risk |
|---|---|---|---|---|
| TCP Buffer | 4096 87380 6291456 | 4096 262144 16777216 | Network throughput +30% | Low |
| Conntrack Max | 65536 | 1048576 | More concurrent connections | Low (more RAM) |
| File Descriptors | 1024 | 1048576 | More open files/sockets | Low |
| Huge Pages | Disabled | Enabled (2MB pages) | Memory access +10-20% | Medium |
| Swappiness | 60 | 10 | Less swapping, more RAM for apps | Low |
| I/O Scheduler | mq-deadline | none (NVMe) / mq-deadline (SSD) | I/O latency -20% | Low |
Ignition and Kernel Tuning
# === Butane Config for Performance ===
# config.bu (Butane YAML)
# variant: fcos
# version: 1.5.0
# storage:
# files:
# - path: /etc/sysctl.d/99-performance.conf
# mode: 0644
# contents:
# inline: |
# # Network
# net.core.rmem_max=16777216
# net.core.wmem_max=16777216
# net.ipv4.tcp_rmem=4096 262144 16777216
# net.ipv4.tcp_wmem=4096 262144 16777216
# net.core.somaxconn=65535
# net.core.netdev_max_backlog=65536
# net.ipv4.tcp_max_syn_backlog=65536
# net.nf_conntrack_max=1048576
# # Memory
# vm.swappiness=10
# vm.overcommit_memory=1
# vm.max_map_count=262144
# # File System
# fs.file-max=2097152
# fs.inotify.max_user_watches=524288
# - path: /etc/security/limits.d/99-nofile.conf
# mode: 0644
# contents:
# inline: |
# * soft nofile 1048576
# * hard nofile 1048576
# kernel_arguments:
# should_exist:
# - mitigations=auto
# - transparent_hugepage=madvise
# systemd:
# units:
# - name: performance-tuning.service
# enabled: true
# contents: |
# [Unit]
# Description=Apply Performance Tuning
# After=network.target
# [Service]
# Type=oneshot
# ExecStart=/usr/sbin/sysctl --system
# [Install]
# WantedBy=multi-user.target
# Convert to Ignition
# butane --pretty --strict config.bu > config.ign
from dataclasses import dataclass
@dataclass
class KernelParam:
param: str
default: str
tuned: str
purpose: str
category: str
params = [
KernelParam("net.core.rmem_max", "212992", "16777216", "Max receive buffer", "Network"),
KernelParam("net.core.wmem_max", "212992", "16777216", "Max send buffer", "Network"),
KernelParam("net.core.somaxconn", "4096", "65535", "Max listen backlog", "Network"),
KernelParam("net.nf_conntrack_max", "65536", "1048576", "Max tracked connections", "Network"),
KernelParam("vm.swappiness", "60", "10", "Reduce swap usage", "Memory"),
KernelParam("vm.max_map_count", "65530", "262144", "Max memory map areas", "Memory"),
KernelParam("fs.file-max", "524288", "2097152", "Max open files system-wide", "FileSystem"),
KernelParam("fs.inotify.max_user_watches", "8192", "524288", "Max inotify watches", "FileSystem"),
]
print("=== Kernel Parameters ===")
for p in params:
print(f" [{p.category}] {p.param}")
print(f" Default: {p.default} → Tuned: {p.tuned}")
print(f" Purpose: {p.purpose}")
Container Runtime Optimization
# === Container and Podman Tuning ===
# Podman/CRI-O config for CoreOS
# /etc/containers/storage.conf
# [storage]
# driver = "overlay"
# [storage.options.overlay]
# mountopt = "nodev, metacopy=on"
# /etc/containers/containers.conf
# [engine]
# cgroup_manager = "systemd"
# events_logger = "journald"
# runtime = "crun" # Faster than runc
# [engine.runtimes]
# crun = ["/usr/bin/crun"]
# CPU Pinning for critical containers
# podman run --cpuset-cpus="0-3" --memory="4g" --memory-swap="4g" myapp
# Kubernetes kubelet config on CoreOS
# /etc/kubernetes/kubelet-config.yaml
# apiVersion: kubelet.config.k8s.io/v1beta1
# kind: KubeletConfiguration
# cgroupDriver: systemd
# maxPods: 250
# kubeReserved:
# cpu: "500m"
# memory: "512Mi"
# systemReserved:
# cpu: "500m"
# memory: "512Mi"
# evictionHard:
# memory.available: "200Mi"
# nodefs.available: "10%"
@dataclass
class ContainerTuning:
setting: str
value: str
impact: str
applies_to: str
tunings = [
ContainerTuning("Runtime: crun", "crun instead of runc",
"Container startup 50% faster, less memory",
"Podman / CRI-O"),
ContainerTuning("Overlay metacopy", "metacopy=on",
"Faster layer operations, less disk I/O",
"Storage driver"),
ContainerTuning("CPU Pinning", "--cpuset-cpus=0-3",
"Dedicated CPU cores, no context switching",
"Latency-sensitive containers"),
ContainerTuning("Memory Limit = Swap", "--memory=4g --memory-swap=4g",
"No swap for container, predictable performance",
"All production containers"),
ContainerTuning("cgroup v2", "systemd cgroup driver",
"Better resource accounting, pressure stall info",
"CoreOS default"),
ContainerTuning("Read-only rootfs", "--read-only",
"Security + slight performance improvement",
"Stateless containers"),
]
print("=== Container Tuning ===")
for t in tunings:
print(f" [{t.setting}] Value: {t.value}")
print(f" Impact: {t.impact}")
print(f" Applies to: {t.applies_to}")
Monitoring and Updates
# === CoreOS Update and Monitoring ===
# Auto-update with Zincati
# /etc/zincati/config.d/55-updates-strategy.toml
# [updates]
# strategy = "periodic"
# [[updates.periodic.window]]
# days = [ "Sun" ]
# start_time = "02:00"
# length_minutes = 120
# rpm-ostree commands
# rpm-ostree status # Current deployment status
# rpm-ostree upgrade # Manual upgrade
# rpm-ostree rollback # Rollback to previous
# rpm-ostree install htop # Layer package (avoid if possible)
# rpm-ostree kargs --append=key=value # Add kernel argument
# Monitoring with Prometheus Node Exporter
# podman run -d --name node-exporter \
# --net=host --pid=host \
# -v /:/host:ro, rslave \
# quay.io/prometheus/node-exporter --path.rootfs=/host
@dataclass
class MonitorMetric:
metric: str
source: str
alert_threshold: str
action: str
metrics = [
MonitorMetric("CPU Usage", "node_cpu_seconds_total",
"> 80% for 5 min", "Scale up or optimize workload"),
MonitorMetric("Memory Usage", "node_memory_MemAvailable_bytes",
"< 500 MB available", "Check container limits, OOM risk"),
MonitorMetric("Disk Usage", "node_filesystem_avail_bytes",
"< 15% free", "Clean images, expand disk"),
MonitorMetric("Network Errors", "node_network_receive_errs_total",
"> 0 for 5 min", "Check NIC, cable, driver"),
MonitorMetric("Container Restarts", "kube_pod_container_status_restarts",
"> 3 in 1 hour", "Check logs, fix crash loop"),
MonitorMetric("OS Update Pending", "rpm_ostree_pending_update",
"Update available > 7 days", "Schedule maintenance window"),
MonitorMetric("Boot Time", "node_boot_time_seconds",
"Unexpected reboot", "Check crash, kernel panic"),
]
print("=== Monitoring Metrics ===")
for m in metrics:
print(f" [{m.metric}] Source: {m.source}")
print(f" Alert: {m.alert_threshold}")
print(f" Action: {m.action}")
เคล็ดลับ
- crun: ใช้ crun แทน runc Container Start เร็วขึ้น 50%
- Layer: อย่า rpm-ostree install มากเกินไป ใช้ Container แทน
- Rollback: ทดสอบ Rollback ก่อน Deploy จริง rpm-ostree rollback
- Ignition: เก็บ Ignition Config ใน Git เป็น Infrastructure as Code
- Zincati: ตั้ง Update Window นอกเวลาใช้งาน ป้องกัน Downtime
Fedora CoreOS คืออะไร
Immutable OS Container rpm-ostree Atomic Update Rollback Ignition Provisioning Zincati Auto-update Kubernetes Edge IoT Boot เร็ว ขนาดเล็ก
Performance Tuning ทำอะไรได้บ้าง
Kernel sysctl Network TCP Buffer Conntrack Memory Huge Pages Swappiness Container crun CPU Pinning Storage Scheduler Boot Service GRUB
Ignition Config ตั้งค่าอย่างไร
Butane YAML Ignition JSON Storage Files Systemd Units User SSH Network Static IP Kernel Arguments butane --pretty PXE Cloud-init Bare Metal
เหมาะกับงานแบบไหน
Kubernetes Worker Container Host Edge IoT CI/CD Runner High Security Auto-scaling Bare Metal ไม่เหมาะ Desktop Traditional Package
สรุป
Fedora CoreOS Immutable OS Performance Tuning Ignition Kernel sysctl Container crun Network TCP Storage rpm-ostree Zincati Kubernetes Production
