Part 15 – Advanced Performance Optimization

Optimizing Node.js applications requires a systematic approach. This guide covers profiling, benchmarking, and optimization techniques to make your applications blazing fast.

1. Performance Metrics to Monitor

Key Server Metrics

  • Response Time: Time to first byte (TTFB)
  • Throughput: Requests per second (RPS)
  • Error Rate: Failed requests percentage
  • CPU Usage: Event loop utilization

Memory Metrics

  • Heap Usage: V8 memory allocation
  • RSS: Resident Set Size
  • Garbage Collection: Frequency and duration
  • Memory Leaks: Growing memory over time

2. Profiling Tools

Built-in Node.js Profiler

# Start with profiling
node --prof app.js

# Process the log
node --prof-process isolate-0xnnnnnnnnnnnn-v8.log > processed.txt

# Key sections in output:
# [JavaScript]: Application code
# [C++]: Native bindings
# [GC]: Garbage collection

Chrome DevTools

  1. Start Node.js with inspect flag: node --inspect app.js
  2. Open chrome://inspect in Chrome
  3. Use Memory and CPU profilers

3. CPU Optimization

Event Loop Monitoring

const { monitorEventLoopDelay } = require('perf_hooks');

const histogram = monitorEventLoopDelay();
histogram.enable();

setInterval(() => {
    console.log(`Event loop delay:
    Avg: ${histogram.mean / 1e6}ms
    Max: ${histogram.max / 1e6}ms
    Min: ${histogram.min / 1e6}ms`);
    histogram.reset();
}, 10000);

Worker Threads

const { Worker } = require('worker_threads');

function runWorker(workerData) {
    return new Promise((resolve, reject) => {
        const worker = new Worker('./worker.js', { workerData });
        worker.on('message', resolve);
        worker.on('error', reject);
    });
}

// worker.js
const { parentPort, workerData } = require('worker_threads');
const result = heavyComputation(workerData);
parentPort.postMessage(result);

4. Memory Optimization

Heap Snapshots

# Take heap snapshot
node --heapsnapshot-signal=SIGUSR2 app.js

# Send signal to process
kill -USR2 [pid]

# Analyze in Chrome DevTools
# Look for:
# - Retained size (memory held)
# - Shallow size (object itself)
# - Dominators (what's keeping objects alive)

Memory Leak Detection

const heapdump = require('heapdump');

let leaks = [];

setInterval(() => {
    // Simulate leak
    leaks.push(new Array(1000).fill('*'));

    // Check memory
    const used = process.memoryUsage().heapUsed / 1024 / 1024;
    console.log(`Memory used: ${Math.round(used * 100) / 100} MB`);

    if (used > 500) { // 500MB threshold
        heapdump.writeSnapshot(`leak-${Date.now()}.heapsnapshot`);
        leaks = []; // Clear leaks
    }
}, 1000);

5. Database Optimization

MongoDB Indexing

// Create index
await User.createIndex({ email: 1 }, { unique: true });

// Compound index
await Order.createIndex({ userId: 1, createdAt: -1 });

// View query execution stats
const result = await User.find({ email: 'test@test.com' })
    .explain('executionStats');

// Key metrics:
// - executionTimeMillis
// - totalDocsExamined
// - index vs collection scan

SQL Query Optimization

// PostgreSQL EXPLAIN ANALYZE
EXPLAIN ANALYZE SELECT * FROM users WHERE email = 'test@test.com';

// Sequelize logging
const sequelize = new Sequelize(..., {
    logging: (sql, timing) => {
        if (timing > 100) { // Log slow queries
            console.warn(`Slow query (${timing}ms): ${sql}`);
        }
    }
});

// Add indexes
await queryInterface.addIndex('Users', ['email'], {
    unique: true
});

6. Caching Strategies

Redis Caching

const redis = require('redis');
const client = redis.createClient();

async function getProduct(id) {
    const cacheKey = `product:${id}`;

    // Try cache first
    const cached = await client.get(cacheKey);
    if (cached) return JSON.parse(cached);

    // Fallback to DB
    const product = await Product.findById(id);

    // Cache for 1 hour
    await client.setex(cacheKey, 3600, JSON.stringify(product));

    return product;
}

In-Memory Caching

const NodeCache = require('node-cache');
const cache = new NodeCache({ stdTTL: 600 }); // 10 min TTL

function cachedFetch(url) {
    const cached = cache.get(url);
    if (cached) return Promise.resolve(cached);

    return fetch(url)
        .then(res => res.json())
        .then(data => {
            cache.set(url, data);
            return data;
        });
}

7. Load Testing

Artillery

# Install
npm install -g artillery

# test.yml
config:
  target: "http://localhost:3000"
  phases:
    - duration: 60
      arrivalRate: 10
      name: "Warm up"
    - duration: 120
      arrivalRate: 20
      rampTo: 100
      name: "Spike test"

scenarios:
  - flow:
    - get:
        url: "/api/products"
    - post:
        url: "/api/checkout"
        json:
          productId: "123"

# Run test
artillery run test.yml

Autocannon

const autocannon = require('autocannon');

autocannon({
    url: 'http://localhost:3000',
    connections: 100, // Concurrent connections
    duration: 30,     // Test duration (seconds)
    requests: [
        {
            method: 'GET',
            path: '/api/products'
        },
        {
            method: 'POST',
            path: '/api/checkout',
            body: JSON.stringify({ productId: '123' }),
            headers: {
                'Content-Type': 'application/json'
            }
        }
    ]
}, (err, result) => {
    console.log('Requests per second:', result.requests.average);
    console.log('Latency:', result.latency.average);
});

Next: WebSockets with Socket.io →

Performance Checklist

  • ✅ Profile before optimizing
  • ✅ Monitor event loop latency
  • ✅ Implement caching for repeated operations
  • ✅ Optimize database queries with indexes
  • ✅ Use worker threads for CPU-intensive tasks
  • ✅ Load test with realistic scenarios

Leave a Comment

Your email address will not be published. Required fields are marked *