Stable Diffusion ????????? ComfyUI ?????????????????????
Stable Diffusion ???????????? open source text-to-image AI model ??????????????????????????????????????????????????? text prompts ????????? diffusion process ??????????????? denoise ????????? random noise ?????????????????????????????? ?????????????????? img2img, inpainting, ControlNet, LoRA ???????????????????????? ?????????????????????????????? SDXL ????????? SD3 ?????????????????????????????????????????????
ComfyUI ???????????? node-based GUI ?????????????????? Stable Diffusion ?????????????????????????????? workflow graph ????????? nodes ???????????????????????????????????? ???????????????????????????????????? AUTOMATIC1111 ????????? ??????????????? complex workflows ????????? ???????????? multi-model pipeline, upscaling chain, batch processing ?????????????????? custom nodes ????????? community
Remote Work Setup ??????????????????????????????????????????????????? ComfyUI ?????? server ??????????????? GPU ???????????? (???????????? cloud GPU instance ???????????? workstation ?????????????????????) ???????????????????????????????????????????????????????????????????????????????????????????????? web browser ??????????????? ????????? GPU ???????????? ????????? server ??????????????????????????? GPU ???????????????????????????????????????????????? ???????????????????????? laptop ???????????? ????????? ???????????? workflows ??????????????????
????????????????????? ComfyUI ?????? Remote Server
Setup ComfyUI ?????? GPU server
# === ComfyUI Remote Installation ===
# 1. Server Requirements
# - GPU: NVIDIA RTX 3060+ (12GB VRAM minimum)
# - RAM: 16GB+ (32GB recommended)
# - Storage: 100GB+ SSD (models are large)
# - OS: Ubuntu 22.04 LTS
# - CUDA: 12.x
# - Python: 3.11+
# 2. Install NVIDIA Drivers and CUDA
sudo apt update && sudo apt upgrade -y
sudo apt install -y nvidia-driver-535 nvidia-cuda-toolkit
# Verify
nvidia-smi
# Should show GPU info and CUDA version
# 3. Clone ComfyUI
cd /opt
git clone https://github.com/comfyanonymous/ComfyUI.git
cd ComfyUI
# 4. Create Virtual Environment
python3.11 -m venv venv
source venv/bin/activate
# 5. Install Dependencies
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121
pip install -r requirements.txt
# 6. Download Models
# SDXL Base
wget -P models/checkpoints/ \
"https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors"
# SDXL Refiner
wget -P models/checkpoints/ \
"https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0.safetensors"
# VAE
wget -P models/vae/ \
"https://huggingface.co/stabilityai/sdxl-vae/resolve/main/sdxl_vae.safetensors"
# 7. Install Popular Custom Nodes
cd custom_nodes/
git clone https://github.com/ltdrdata/ComfyUI-Manager.git
git clone https://github.com/Fannovel16/comfyui_controlnet_aux.git
git clone https://github.com/cubiq/ComfyUI_IPAdapter_plus.git
git clone https://github.com/ssitu/ComfyUI_UltimateSDUpscale.git
cd ..
# Install custom node dependencies
for d in custom_nodes/*/; do
if [ -f "$d/requirements.txt" ]; then
pip install -r "$d/requirements.txt"
fi
done
# 8. Start ComfyUI (listen on all interfaces)
python main.py --listen 0.0.0.0 --port 8188
# 9. Systemd Service
cat > /etc/systemd/system/comfyui.service << 'EOF'
[Unit]
Description=ComfyUI Server
After=network.target
[Service]
Type=simple
User=comfyui
WorkingDirectory=/opt/ComfyUI
Environment=PATH=/opt/ComfyUI/venv/bin:/usr/bin
ExecStart=/opt/ComfyUI/venv/bin/python main.py --listen 0.0.0.0 --port 8188
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
systemctl enable --now comfyui
echo "ComfyUI installed"
Custom Workflows ????????? Nodes
??????????????? workflow ??????????????????????????? production
#!/usr/bin/env python3
# comfyui_workflows.py ??? ComfyUI Workflow Management
import json
import logging
from typing import Dict, List
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("workflows")
class ComfyUIWorkflowManager:
def __init__(self):
self.workflows = {}
def txt2img_sdxl_workflow(self):
"""Standard SDXL text-to-image workflow"""
return {
"name": "SDXL txt2img",
"nodes": {
"3": {
"class_type": "KSampler",
"inputs": {
"seed": 42,
"steps": 30,
"cfg": 7.5,
"sampler_name": "euler_ancestral",
"scheduler": "normal",
"denoise": 1.0,
"model": ["4", 0],
"positive": ["6", 0],
"negative": ["7", 0],
"latent_image": ["5", 0],
},
},
"4": {
"class_type": "CheckpointLoaderSimple",
"inputs": {
"ckpt_name": "sd_xl_base_1.0.safetensors",
},
},
"5": {
"class_type": "EmptyLatentImage",
"inputs": {
"width": 1024,
"height": 1024,
"batch_size": 1,
},
},
"6": {
"class_type": "CLIPTextEncode",
"inputs": {
"text": "beautiful landscape, mountains, sunset, 8k, detailed",
"clip": ["4", 1],
},
},
"7": {
"class_type": "CLIPTextEncode",
"inputs": {
"text": "ugly, blurry, low quality, watermark",
"clip": ["4", 1],
},
},
"8": {
"class_type": "VAEDecode",
"inputs": {
"samples": ["3", 0],
"vae": ["4", 2],
},
},
"9": {
"class_type": "SaveImage",
"inputs": {
"filename_prefix": "sdxl_output",
"images": ["8", 0],
},
},
},
}
def upscale_workflow(self):
"""Image upscale workflow"""
return {
"name": "4x Upscale Pipeline",
"description": "Load image ??? Upscale 4x with model ??? Save",
"nodes_summary": [
"LoadImage ??? input image",
"UpscaleModelLoader ??? RealESRGAN_x4plus",
"ImageUpscaleWithModel ??? 4x upscale",
"SaveImage ??? output",
],
}
def batch_processing(self):
"""Batch image generation config"""
return {
"name": "Batch Generation",
"config": {
"prompts_file": "prompts.txt",
"output_dir": "output/batch/",
"settings": {
"width": 1024,
"height": 1024,
"steps": 30,
"cfg": 7.5,
"batch_size": 4,
"seeds": "random",
},
"estimated_time_per_image": "15-30 seconds (SDXL on RTX 4090)",
},
}
mgr = ComfyUIWorkflowManager()
workflow = mgr.txt2img_sdxl_workflow()
print(f"Workflow: {workflow['name']}")
print(f"Nodes: {len(workflow['nodes'])}")
upscale = mgr.upscale_workflow()
print(f"\nUpscale: {upscale['name']}")
for step in upscale["nodes_summary"]:
print(f" {step}")
batch = mgr.batch_processing()
print(f"\nBatch: {batch['config']['estimated_time_per_image']}")
Remote Access ???????????? SSH Tunnel
????????????????????? ComfyUI ????????????????????????????????????
# === Secure Remote Access ===
# Method 1: SSH Tunnel (Recommended - most secure)
# From your local machine:
ssh -L 8188:localhost:8188 -p 22 user@gpu-server.example.com
# Then open http://localhost:8188 in browser
# Persistent tunnel with autossh:
sudo apt install autossh
autossh -M 0 -f -N -L 8188:localhost:8188 user@gpu-server.example.com
# Method 2: Nginx Reverse Proxy with Auth
cat > /etc/nginx/sites-available/comfyui << 'EOF'
server {
listen 443 ssl http2;
server_name comfyui.example.com;
ssl_certificate /etc/letsencrypt/live/comfyui.example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/comfyui.example.com/privkey.pem;
# Basic Auth
auth_basic "ComfyUI";
auth_basic_user_file /etc/nginx/.htpasswd;
# WebSocket support (required for ComfyUI)
location / {
proxy_pass http://127.0.0.1:8188;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_read_timeout 300s;
proxy_send_timeout 300s;
client_max_body_size 100m;
}
# Upload endpoint
location /upload/ {
proxy_pass http://127.0.0.1:8188;
client_max_body_size 500m;
}
}
EOF
# Create password file
htpasswd -c /etc/nginx/.htpasswd comfyuser
ln -s /etc/nginx/sites-available/comfyui /etc/nginx/sites-enabled/
certbot --nginx -d comfyui.example.com
nginx -t && systemctl reload nginx
# Method 3: Tailscale (Zero-config VPN)
# Install on server:
curl -fsSL https://tailscale.com/install.sh | sh
tailscale up
# Install on client:
# Download from tailscale.com
# Connect to same Tailnet
# Access via Tailscale IP:
# http://100.x.y.z:8188
# Method 4: Cloudflare Tunnel (no port forwarding needed)
# Install cloudflared on server:
wget https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64
chmod +x cloudflared-linux-amd64
sudo mv cloudflared-linux-amd64 /usr/local/bin/cloudflared
cloudflared tunnel create comfyui
cloudflared tunnel route dns comfyui comfyui.example.com
cat > ~/.cloudflared/config.yml << 'EOF'
tunnel:
credentials-file: /root/.cloudflared/.json
ingress:
- hostname: comfyui.example.com
service: http://localhost:8188
- service: http_status:404
EOF
cloudflared tunnel run comfyui
echo "Remote access configured"
API Integration ?????????????????? Automation
????????? ComfyUI API ?????????????????? automated generation
#!/usr/bin/env python3
# comfyui_api.py ??? ComfyUI API Client
import json
import logging
import urllib.request
import urllib.parse
import uuid
import time
from typing import Dict, Optional
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("api")
class ComfyUIClient:
def __init__(self, server_url="http://localhost:8188"):
self.server_url = server_url
self.client_id = str(uuid.uuid4())
def queue_prompt(self, workflow):
"""Queue a workflow for execution"""
payload = {
"prompt": workflow,
"client_id": self.client_id,
}
data = json.dumps(payload).encode("utf-8")
req = urllib.request.Request(
f"{self.server_url}/prompt",
data=data,
headers={"Content-Type": "application/json"},
)
response = urllib.request.urlopen(req)
return json.loads(response.read())
def get_history(self, prompt_id):
"""Get generation history/results"""
url = f"{self.server_url}/history/{prompt_id}"
response = urllib.request.urlopen(url)
return json.loads(response.read())
def get_image(self, filename, subfolder="", folder_type="output"):
"""Download generated image"""
params = urllib.parse.urlencode({
"filename": filename,
"subfolder": subfolder,
"type": folder_type,
})
url = f"{self.server_url}/view?{params}"
response = urllib.request.urlopen(url)
return response.read()
def wait_for_completion(self, prompt_id, timeout=300):
"""Wait for prompt to complete"""
start = time.time()
while time.time() - start < timeout:
history = self.get_history(prompt_id)
if prompt_id in history:
return history[prompt_id]
time.sleep(2)
return {"error": "Timeout waiting for completion"}
def generate_image(self, prompt_text, negative="ugly, blurry", width=1024, height=1024, steps=30):
"""High-level: generate image from text"""
workflow = {
"3": {
"class_type": "KSampler",
"inputs": {
"seed": int(time.time()) % 2**32,
"steps": steps,
"cfg": 7.5,
"sampler_name": "euler_ancestral",
"scheduler": "normal",
"denoise": 1.0,
"model": ["4", 0],
"positive": ["6", 0],
"negative": ["7", 0],
"latent_image": ["5", 0],
},
},
"4": {"class_type": "CheckpointLoaderSimple", "inputs": {"ckpt_name": "sd_xl_base_1.0.safetensors"}},
"5": {"class_type": "EmptyLatentImage", "inputs": {"width": width, "height": height, "batch_size": 1}},
"6": {"class_type": "CLIPTextEncode", "inputs": {"text": prompt_text, "clip": ["4", 1]}},
"7": {"class_type": "CLIPTextEncode", "inputs": {"text": negative, "clip": ["4", 1]}},
"8": {"class_type": "VAEDecode", "inputs": {"samples": ["3", 0], "vae": ["4", 2]}},
"9": {"class_type": "SaveImage", "inputs": {"filename_prefix": "api_output", "images": ["8", 0]}},
}
result = self.queue_prompt(workflow)
prompt_id = result.get("prompt_id")
logger.info(f"Queued: {prompt_id}")
return {"prompt_id": prompt_id, "status": "queued"}
client = ComfyUIClient("http://localhost:8188")
print("ComfyUI API Client initialized")
print("Methods: queue_prompt, get_history, get_image, generate_image")
Performance Tuning ????????? Multi-GPU
Optimize ComfyUI ?????????????????? production
# === ComfyUI Performance Optimization ===
# 1. GPU Memory Optimization
# Use --lowvram for GPUs with < 8GB VRAM
python main.py --listen 0.0.0.0 --lowvram
# Use --gpu-only to keep everything on GPU (fastest, needs more VRAM)
python main.py --listen 0.0.0.0 --gpu-only
# FP16 mode (default, good balance)
python main.py --listen 0.0.0.0 --force-fp16
# 2. Multi-GPU Setup
# Run multiple instances on different GPUs
CUDA_VISIBLE_DEVICES=0 python main.py --port 8188 &
CUDA_VISIBLE_DEVICES=1 python main.py --port 8189 &
# Nginx load balancer
cat > /etc/nginx/conf.d/comfyui-lb.conf << 'EOF'
upstream comfyui_backend {
least_conn;
server 127.0.0.1:8188;
server 127.0.0.1:8189;
}
server {
listen 8080;
location / {
proxy_pass http://comfyui_backend;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
}
}
EOF
# 3. Model Caching
# ComfyUI caches models in RAM/VRAM automatically
# Ensure enough RAM for frequently used models
# SDXL base: ~6.5GB VRAM
# SDXL refiner: ~6.5GB VRAM
# ControlNet: ~2.5GB per model
# LoRA: ~100-500MB per model
# 4. Storage Optimization
# Use NVMe SSD for models directory
# Symlink if needed:
ln -s /nvme/models /opt/ComfyUI/models
# Pre-download frequently used models
# to avoid first-load delays
# 5. Batch Optimization Script
cat > batch_generate.sh << 'BASH'
#!/bin/bash
# Batch generate images from prompts file
SERVER="http://localhost:8188"
PROMPTS_FILE="prompts.txt"
OUTPUT_DIR="output/batch_$(date +%Y%m%d)"
mkdir -p "$OUTPUT_DIR"
while IFS= read -r prompt; do
echo "Generating: $prompt"
curl -s -X POST "$SERVER/prompt" \
-H "Content-Type: application/json" \
-d "{
\"prompt\": {
\"3\": {\"class_type\": \"KSampler\", \"inputs\": {\"seed\": $RANDOM, \"steps\": 30, \"cfg\": 7.5, \"sampler_name\": \"euler_ancestral\", \"scheduler\": \"normal\", \"denoise\": 1.0, \"model\": [\"4\",0], \"positive\": [\"6\",0], \"negative\": [\"7\",0], \"latent_image\": [\"5\",0]}},
\"4\": {\"class_type\": \"CheckpointLoaderSimple\", \"inputs\": {\"ckpt_name\": \"sd_xl_base_1.0.safetensors\"}},
\"5\": {\"class_type\": \"EmptyLatentImage\", \"inputs\": {\"width\": 1024, \"height\": 1024, \"batch_size\": 1}},
\"6\": {\"class_type\": \"CLIPTextEncode\", \"inputs\": {\"text\": \"$prompt\", \"clip\": [\"4\",1]}},
\"7\": {\"class_type\": \"CLIPTextEncode\", \"inputs\": {\"text\": \"ugly, blurry\", \"clip\": [\"4\",1]}},
\"8\": {\"class_type\": \"VAEDecode\", \"inputs\": {\"samples\": [\"3\",0], \"vae\": [\"4\",2]}},
\"9\": {\"class_type\": \"SaveImage\", \"inputs\": {\"filename_prefix\": \"batch\", \"images\": [\"8\",0]}}
}
}"
sleep 2
done < "$PROMPTS_FILE"
echo "Batch generation complete"
BASH
chmod +x batch_generate.sh
echo "Performance tuning complete"
FAQ ??????????????????????????????????????????
Q: ComfyUI ????????? AUTOMATIC1111 ???????????????????????????????????????????
A: ComfyUI ???????????? node-based interface ????????? nodes ???????????????????????????????????? ????????????????????????????????? ??????????????? complex workflows ????????? ????????? VRAM ???????????????????????? ?????????????????? models ???????????????????????????????????? ?????? API ??????????????? ????????????????????????????????? power users ????????? production AUTOMATIC1111 (WebUI) ???????????? form-based interface ???????????????????????????????????????????????????????????????????????? ?????? extensions ????????????????????? community ???????????????????????? ????????? workflows ????????????????????????????????????????????? ?????????????????? remote work setup ??????????????? ComfyUI ??????????????? API ?????????????????? workflows reproducible share ????????????????????? ????????? resources ????????????????????????
Q: GPU VRAM ???????????????????????????????????????????
A: ????????????????????? model SD 1.5 ????????????????????? 4GB+ VRAM (RTX 3060 ?????????????????????) SDXL ????????????????????? 8GB+ VRAM (RTX 3070 ??????????????????) SDXL + ControlNet + LoRA ????????????????????? 12GB+ (RTX 3060 12GB, RTX 4070) SD3 Medium ????????????????????? 12GB+ FLUX.1 ????????????????????? 16GB+ (RTX 4080, RTX 4090) ?????????????????? production ????????????????????? batch generate ??????????????? RTX 4090 (24GB) ???????????? A100 (40/80GB) ????????? VRAM ??????????????? ????????? --lowvram flag ???????????? model quantization (FP8) ???????????????????????? VRAM
Q: Remote access ???????????????????????????????
A: ??????????????????????????????????????????????????? SSH Tunnel ??????????????????????????????????????? encrypted ????????????????????? ????????????????????? port ????????? internet ????????????????????? ???????????? SSH client ???????????????????????? Tailscale VPN ????????????????????? WireGuard encrypted ?????????????????????????????? zero config ????????????????????? ????????????????????????????????? client ?????????????????????????????? Cloudflare Tunnel ????????????????????? ????????????????????????????????? port ????????? Cloudflare Access ?????????????????? auth ??????????????? latency ???????????????????????? Nginx + Basic Auth ?????????????????????????????????????????? ????????????????????? HTTPS ???????????? password ????????? brute force ????????? ??????????????? fail2ban ???????????????????????? ComfyUI ????????? internet ?????????????????????????????????????????? auth ??????????????????????????????????????? GPU ???????????????????????????
Q: ?????????????????????????????? Cloud GPU ?????????????????? ComfyUI ??????????????????????
A: ????????????????????? provider ????????? GPU RunPod RTX 4090 ?????????????????? $0.44/hr, A100 ?????????????????? $1.64/hr ?????? serverless option ????????????????????? compute Lambda Labs RTX 4090 ?????????????????? $0.50/hr, A100 ?????????????????? $1.10/hr Vast.ai ??????????????????????????????????????? RTX 4090 ?????????????????? $0.20-0.40/hr (spot) AWS p4d.24xlarge (8x A100) ?????????????????? $32/hr (??????????????????) ?????????????????? personal use ???????????? RTX 4090 (~$1,600) ?????????????????????????????????????????? > 3,000 ?????????????????????/?????? ?????????????????? occasional use RunPod serverless ???????????? Vast.ai spot ??????????????????????????????
