Postman และ Newman คืออะไร
Postman เป็น API development platform ที่ใช้มากที่สุดในโลก สำหรับ design, test, document และ monitor APIs มีผู้ใช้มากกว่า 30 ล้านคน Newman เป็น command-line collection runner สำหรับ Postman ทำให้รัน Postman collections ใน CI/CD pipelines ได้โดยไม่ต้องเปิด Postman GUI
Feature Flag Management เป็นเทคนิคที่ให้ enable หรือ disable features ใน production โดยไม่ต้อง deploy code ใหม่ ใช้สำหรับ gradual rollout, A/B testing, canary releases และ kill switches การทดสอบ feature flags ด้วย Postman/Newman ช่วยให้มั่นใจว่า API ทำงานถูกต้องทั้งเมื่อ flag เปิดและปิด
ประโยชน์ของการรวม Newman กับ Feature Flag Testing ได้แก่ Automated testing ทดสอบทุก flag combinations อัตโนมัติ, CI/CD integration ทดสอบก่อนและหลัง flag changes, Regression prevention ป้องกันไม่ให้ flag changes break existing features, Documentation Postman collections เป็น living documentation ของ API behavior
ติดตั้งและตั้งค่า Newman
วิธีติดตั้ง Newman และเตรียม environment
# === Newman Installation ===
# 1. Install Newman
npm install -g newman
newman --version
# 2. Install Newman Reporters
npm install -g newman-reporter-htmlextra
npm install -g newman-reporter-json-summary
# 3. Create Postman Environment for Feature Flags
cat > environments/staging.json << 'EOF'
{
"id": "staging-env",
"name": "Staging Environment",
"values": [
{ "key": "base_url", "value": "https://api-staging.example.com", "enabled": true },
{ "key": "api_key", "value": "{{API_KEY}}", "enabled": true },
{ "key": "ff_new_checkout", "value": "true", "enabled": true },
{ "key": "ff_dark_mode", "value": "false", "enabled": true },
{ "key": "ff_ai_recommendations", "value": "true", "enabled": true },
{ "key": "ff_beta_dashboard", "value": "false", "enabled": true }
]
}
EOF
# 4. Create Feature Flag Config
cat > feature_flags.json << 'EOF'
{
"flags": {
"new_checkout": {
"enabled": true,
"rollout_percentage": 50,
"description": "New checkout flow with Stripe",
"affected_endpoints": ["/api/checkout", "/api/cart/summary"]
},
"dark_mode": {
"enabled": false,
"rollout_percentage": 0,
"description": "Dark mode UI theme",
"affected_endpoints": ["/api/settings/theme"]
},
"ai_recommendations": {
"enabled": true,
"rollout_percentage": 100,
"description": "AI-powered product recommendations",
"affected_endpoints": ["/api/recommendations", "/api/products/similar"]
}
}
}
EOF
# 5. Basic Newman Run
newman run collection.json \
-e environments/staging.json \
--reporters cli,htmlextra \
--reporter-htmlextra-export reports/test-report.html
# 6. Newman with Environment Variables
API_KEY=your-api-key newman run collection.json \
-e environments/staging.json \
--env-var "api_key=$API_KEY" \
--timeout-request 10000
echo "Newman installed and configured"
สร้าง API Tests ด้วย Postman
เขียน test collections สำหรับ feature flags
// === Postman Collection for Feature Flag Testing ===
// collection.json structure:
{
"info": {
"name": "Feature Flag API Tests",
"schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json"
},
"item": [
{
"name": "Feature Flag Management",
"item": [
{
"name": "Get All Feature Flags",
"request": {
"method": "GET",
"url": "{{base_url}}/api/feature-flags",
"header": [
{ "key": "Authorization", "value": "Bearer {{api_key}}" }
]
},
"event": [
{
"listen": "test",
"script": {
"exec": [
"pm.test('Status code is 200', () => {",
" pm.response.to.have.status(200);",
"});",
"",
"pm.test('Response contains flags array', () => {",
" const json = pm.response.json();",
" pm.expect(json).to.have.property('flags');",
" pm.expect(json.flags).to.be.an('array');",
"});",
"",
"pm.test('Each flag has required fields', () => {",
" const json = pm.response.json();",
" json.flags.forEach(flag => {",
" pm.expect(flag).to.have.property('name');",
" pm.expect(flag).to.have.property('enabled');",
" pm.expect(flag).to.have.property('rollout_percentage');",
" });",
"});",
"",
"// Store flags for subsequent tests",
"const flags = pm.response.json().flags;",
"pm.collectionVariables.set('all_flags', JSON.stringify(flags));"
]
}
}
]
},
{
"name": "Toggle Feature Flag",
"request": {
"method": "PUT",
"url": "{{base_url}}/api/feature-flags/{{flag_name}}",
"header": [
{ "key": "Authorization", "value": "Bearer {{api_key}}" },
{ "key": "Content-Type", "value": "application/json" }
],
"body": {
"mode": "raw",
"raw": "{ \"enabled\": {{flag_value}}, \"rollout_percentage\": {{rollout_pct}} }"
}
},
"event": [
{
"listen": "test",
"script": {
"exec": [
"pm.test('Flag updated successfully', () => {",
" pm.response.to.have.status(200);",
" const json = pm.response.json();",
" pm.expect(json.flag.name).to.equal(pm.variables.get('flag_name'));",
"});",
"",
"pm.test('Flag value matches request', () => {",
" const json = pm.response.json();",
" const expected = pm.variables.get('flag_value') === 'true';",
" pm.expect(json.flag.enabled).to.equal(expected);",
"});"
]
}
}
]
}
]
},
{
"name": "Feature: New Checkout (Flag ON)",
"item": [
{
"name": "Checkout API returns new flow",
"request": {
"method": "POST",
"url": "{{base_url}}/api/checkout",
"header": [
{ "key": "Authorization", "value": "Bearer {{api_key}}" },
{ "key": "X-Feature-Flags", "value": "new_checkout=true" }
]
},
"event": [
{
"listen": "test",
"script": {
"exec": [
"pm.test('New checkout flow active', () => {",
" pm.response.to.have.status(200);",
" const json = pm.response.json();",
" pm.expect(json.checkout_version).to.equal('v2');",
" pm.expect(json).to.have.property('stripe_session_id');",
"});",
"",
"pm.test('Response time < 500ms', () => {",
" pm.expect(pm.response.responseTime).to.be.below(500);",
"});"
]
}
}
]
}
]
}
]
}
// Pre-request script for dynamic flag testing:
// const flagName = pm.iterationData.get('flag_name') || 'new_checkout';
// const flagValue = pm.iterationData.get('flag_value') || 'true';
// pm.request.headers.add({
// key: 'X-Feature-Flags',
// value: `=`
// });
Feature Flag Testing Strategies
กลยุทธ์ทดสอบ feature flags
#!/usr/bin/env python3
# ff_test_matrix.py — Feature Flag Test Matrix Generator
import json
import itertools
import logging
from typing import Dict, List
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("ff_test")
class FeatureFlagTestMatrix:
def __init__(self):
self.flags = {}
self.test_results = []
def add_flag(self, name, states=None):
self.flags[name] = states or [True, False]
def generate_full_matrix(self):
"""Generate all possible flag combinations"""
flag_names = list(self.flags.keys())
flag_values = list(self.flags.values())
combinations = []
for combo in itertools.product(*flag_values):
combinations.append(dict(zip(flag_names, combo)))
return {
"total_combinations": len(combinations),
"flags": flag_names,
"combinations": combinations,
}
def generate_pairwise_matrix(self):
"""Generate pairwise testing matrix (fewer tests, good coverage)"""
flag_names = list(self.flags.keys())
n = len(flag_names)
# Simplified pairwise: test each flag on/off with others at default
tests = []
# All flags default (off)
tests.append({name: False for name in flag_names})
# All flags on
tests.append({name: True for name in flag_names})
# Each flag individually on
for i, name in enumerate(flag_names):
test = {n: False for n in flag_names}
test[name] = True
tests.append(test)
# Each flag individually off (others on)
for i, name in enumerate(flag_names):
test = {n: True for n in flag_names}
test[name] = False
tests.append(test)
# Remove duplicates
unique_tests = []
seen = set()
for test in tests:
key = tuple(sorted(test.items()))
if key not in seen:
seen.add(key)
unique_tests.append(test)
return {
"strategy": "pairwise",
"total_tests": len(unique_tests),
"reduction_vs_full": f"{(1 - len(unique_tests) / 2**n) * 100:.0f}%",
"tests": unique_tests,
}
def generate_newman_data_file(self, tests, output_file="test_data.json"):
"""Generate Newman data file for iterative testing"""
data = []
for i, test in enumerate(tests):
row = {"test_id": f"test_{i+1}"}
for flag_name, flag_value in test.items():
row[f"ff_{flag_name}"] = str(flag_value).lower()
data.append(row)
return data
def analyze_results(self, results):
"""Analyze test results for flag-related failures"""
failures = [r for r in results if r.get("status") == "fail"]
# Find flags that correlate with failures
flag_failure_rate = {}
for flag_name in self.flags:
on_failures = sum(1 for f in failures if f.get(f"ff_{flag_name}") == "true")
off_failures = sum(1 for f in failures if f.get(f"ff_{flag_name}") == "false")
on_total = sum(1 for r in results if r.get(f"ff_{flag_name}") == "true")
off_total = sum(1 for r in results if r.get(f"ff_{flag_name}") == "false")
flag_failure_rate[flag_name] = {
"on_failure_rate": round(on_failures / max(on_total, 1) * 100, 1),
"off_failure_rate": round(off_failures / max(off_total, 1) * 100, 1),
}
return {
"total_tests": len(results),
"passed": len(results) - len(failures),
"failed": len(failures),
"flag_analysis": flag_failure_rate,
}
# Example usage
matrix = FeatureFlagTestMatrix()
matrix.add_flag("new_checkout")
matrix.add_flag("dark_mode")
matrix.add_flag("ai_recommendations")
matrix.add_flag("beta_dashboard")
full = matrix.generate_full_matrix()
print(f"Full matrix: {full['total_combinations']} combinations")
pairwise = matrix.generate_pairwise_matrix()
print(f"Pairwise: {pairwise['total_tests']} tests ({pairwise['reduction_vs_full']} reduction)")
data = matrix.generate_newman_data_file(pairwise["tests"])
print(json.dumps(data[:3], indent=2))
CI/CD Integration กับ Newman
รวม Newman เข้ากับ CI/CD pipeline
# === CI/CD Integration ===
# 1. GitHub Actions
cat > .github/workflows/api-tests.yml << 'EOF'
name: API Feature Flag Tests
on:
push:
branches: [main, develop]
pull_request:
branches: [main]
schedule:
- cron: '0 */6 * * *' # Every 6 hours
jobs:
api-tests:
runs-on: ubuntu-latest
strategy:
matrix:
environment: [staging, production]
steps:
- uses: actions/checkout@v4
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Install Newman
run: |
npm install -g newman newman-reporter-htmlextra
- name: Run Feature Flag Tests
run: |
newman run collections/feature-flags.json \
-e environments/}.json \
--env-var "api_key=}" \
-d test-data/flag-combinations.json \
--reporters cli,htmlextra,json \
--reporter-htmlextra-export reports/report.html \
--reporter-json-export reports/results.json \
--delay-request 100 \
--timeout-request 15000 \
--bail failure
env:
API_KEY: }
- name: Check Results
if: always()
run: |
if [ -f reports/results.json ]; then
FAILURES=$(cat reports/results.json | python3 -c "
import json, sys
data = json.load(sys.stdin)
stats = data.get('run', {}).get('stats', {}).get('assertions', {})
print(stats.get('failed', 0))
")
echo "Failed assertions: $FAILURES"
if [ "$FAILURES" -gt 0 ]; then
echo "::error::$FAILURES API tests failed"
fi
fi
- name: Upload Report
if: always()
uses: actions/upload-artifact@v4
with:
name: api-test-report-}
path: reports/
EOF
# 2. Pre-deployment Flag Test
cat > scripts/test_before_flag_change.sh << 'SHEOF'
#!/bin/bash
set -e
FLAG_NAME=$1
FLAG_VALUE=$2
if [ -z "$FLAG_NAME" ] || [ -z "$FLAG_VALUE" ]; then
echo "Usage: $0 "
exit 1
fi
echo "Testing flag change: $FLAG_NAME = $FLAG_VALUE"
# Test with flag OFF (current state)
echo "=== Testing with flag OFF ==="
newman run collections/feature-flags.json \
-e environments/staging.json \
--env-var "ff_=false" \
--reporters cli \
--bail failure
# Test with flag ON (new state)
echo "=== Testing with flag ON ==="
newman run collections/feature-flags.json \
-e environments/staging.json \
--env-var "ff_=true" \
--reporters cli \
--bail failure
echo "All tests passed for flag: $FLAG_NAME"
SHEOF
chmod +x scripts/test_before_flag_change.sh
# 3. Run with Data File (multiple flag combinations)
cat > test-data/flag-combinations.json << 'EOF'
[
{"test_id": "all_off", "ff_new_checkout": "false", "ff_dark_mode": "false", "ff_ai_recs": "false"},
{"test_id": "all_on", "ff_new_checkout": "true", "ff_dark_mode": "true", "ff_ai_recs": "true"},
{"test_id": "checkout_only", "ff_new_checkout": "true", "ff_dark_mode": "false", "ff_ai_recs": "false"},
{"test_id": "dark_only", "ff_new_checkout": "false", "ff_dark_mode": "true", "ff_ai_recs": "false"}
]
EOF
newman run collections/feature-flags.json \
-e environments/staging.json \
-d test-data/flag-combinations.json \
--reporters cli,htmlextra
echo "CI/CD integration configured"
Monitoring และ Reporting
Monitor API tests และ feature flag health
#!/usr/bin/env python3
# ff_monitor.py — Feature Flag Test Monitor
import json
import logging
from datetime import datetime
from typing import Dict, List
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("monitor")
class FFTestMonitor:
def __init__(self):
self.test_runs = []
def parse_newman_results(self, results_json):
"""Parse Newman JSON output"""
# In production, read from file:
# with open(results_json) as f:
# data = json.load(f)
data = {
"run": {
"stats": {
"requests": {"total": 24, "failed": 1},
"assertions": {"total": 72, "failed": 2},
},
"timings": {
"responseAverage": 185,
"responseMin": 45,
"responseMax": 890,
"started": 1705305600000,
"completed": 1705305660000,
},
"executions": [
{"item": {"name": "Get Flags"}, "assertions": [{"error": None}]},
{"item": {"name": "New Checkout ON"}, "assertions": [{"error": None}]},
{"item": {"name": "Dark Mode ON"}, "assertions": [{"error": {"message": "Expected 200 got 500"}}]},
],
}
}
stats = data["run"]["stats"]
timings = data["run"]["timings"]
failures = []
for exc in data["run"].get("executions", []):
for assertion in exc.get("assertions", []):
if assertion.get("error"):
failures.append({
"test": exc["item"]["name"],
"error": assertion["error"]["message"],
})
return {
"timestamp": datetime.utcnow().isoformat(),
"requests": {"total": stats["requests"]["total"], "failed": stats["requests"]["failed"]},
"assertions": {"total": stats["assertions"]["total"], "failed": stats["assertions"]["failed"]},
"response_time": {
"avg_ms": timings["responseAverage"],
"min_ms": timings["responseMin"],
"max_ms": timings["responseMax"],
},
"duration_sec": round((timings["completed"] - timings["started"]) / 1000, 1),
"pass_rate": round((1 - stats["assertions"]["failed"] / max(stats["assertions"]["total"], 1)) * 100, 1),
"failures": failures,
}
def trend_analysis(self, runs):
"""Analyze test result trends"""
if len(runs) < 2:
return {"trend": "insufficient_data"}
recent = runs[-5:]
pass_rates = [r["pass_rate"] for r in recent]
avg_response = [r["response_time"]["avg_ms"] for r in recent]
pass_trend = "improving" if pass_rates[-1] > pass_rates[0] else "degrading" if pass_rates[-1] < pass_rates[0] else "stable"
return {
"pass_rate_trend": pass_trend,
"avg_pass_rate": round(sum(pass_rates) / len(pass_rates), 1),
"avg_response_time_ms": round(sum(avg_response) / len(avg_response)),
"last_5_runs": [{"pass_rate": r, "response_ms": t} for r, t in zip(pass_rates, avg_response)],
}
monitor = FFTestMonitor()
result = monitor.parse_newman_results("results.json")
print(json.dumps(result, indent=2))
FAQ คำถามที่พบบ่อย
Q: Newman กับ Postman ต่างกันอย่างไร?
A: Postman เป็น GUI application สำหรับ interactive API testing เหมาะสำหรับ development และ manual testing Newman เป็น CLI tool ที่รัน Postman collections ใน terminal หรือ CI/CD pipeline ใช้ collection เดียวกันทั้ง Postman และ Newman ทำงานร่วมกัน develop tests ใน Postman แล้ว automate ด้วย Newman ใน CI/CD
Q: ควรทดสอบ feature flag combinations อย่างไร?
A: Full matrix testing (ทุก combinations) ดีที่สุดแต่ไม่ practical เมื่อมีหลาย flags (n flags = 2^n combinations) ใช้ pairwise testing ลด combinations 60-80% ยังครอบคลุม interactions ระหว่าง flags ได้ดี ทดสอบ critical paths ทุก combination ที่สำคัญ ใช้ data-driven testing กับ Newman data files เปลี่ยน flag values อัตโนมัติ ทดสอบ flag ON และ OFF สำหรับทุก flag อย่างน้อยที่สุด
Q: Feature flag tools ที่แนะนำมีอะไรบ้าง?
A: LaunchDarkly เป็น leader ใน feature flag management มี targeting, experiments, progressive rollout ราคาสูง ($10/seat/month+) Flagsmith open source self-host ได้ มี REST API ที่ integrate กับ Newman ได้ดี Unleash open source feature toggle system รองรับ gradual rollout Flipt open source lightweight feature flag สำหรับ team เล็กเริ่มจาก Flagsmith หรือ Unleash (open source ฟรี) สำหรับ enterprise ใช้ LaunchDarkly
Q: วิธี handle feature flag cleanup?
A: Feature flags ที่ไม่ cleanup จะกลายเป็น technical debt ควร set expiration date สำหรับทุก flag, review flags ทุก sprint ลบ flags ที่ rollout 100% แล้ว, ใช้ Newman tests ตรวจสอบว่า API ยังทำงานหลังลบ flag, maintain flag inventory document ทุก flags กับ owner, automated alerts เตือนเมื่อ flag อายุเกิน threshold (เช่น 90 วัน), tag flags ใน code ให้หาง่าย ใช้ consistent naming convention