Speed up slow PostgreSQL queries — understand indexes (B-tree, GIN, partial), read EXPLAIN ANALYZE output, avoid N+1 queries, and tune server configuration.
Slow database queries are the most common cause of slow web applications. PostgreSQL gives you powerful tools to diagnose and fix them. This guide walks through the full process.
-- Enable pg_stat_statements extension
CREATE EXTENSION IF NOT EXISTS pg_stat_statements;
-- Find the slowest queries by total time
SELECT
round(total_exec_time::numeric, 2) AS total_ms,
round(mean_exec_time::numeric, 2) AS mean_ms,
calls,
round(rows / calls::numeric, 1) AS avg_rows,
left(query, 120) AS query
FROM pg_stat_statements
ORDER BY total_exec_time DESC
LIMIT 20;
EXPLAIN (ANALYZE, BUFFERS, FORMAT TEXT)
SELECT u.name, COUNT(o.id) AS order_count
FROM users u
JOIN orders o ON o.user_id = u.id
WHERE u.created_at > '2025-01-01'
GROUP BY u.id
ORDER BY order_count DESC
LIMIT 10;
Sample output to understand:
Limit (cost=1234.56..1234.61 rows=10 width=40) (actual time=45.2..45.3 rows=10)
-> Sort (cost=1234.56..1256.78 rows=8888 width=40) (actual time=45.1..45.2 rows=10)
Sort Key: (count(o.id)) DESC
Sort Method: top-N heapsort Memory: 25kB
-> HashAggregate (cost=876.00..987.00 rows=8888 width=40)
-> Hash Join (cost=234.00..765.00 rows=22200 width=16)
Hash Cond: (o.user_id = u.id)
-> Seq Scan on orders o ← ⚠️ sequential scan!
(cost=0.00..456.00 rows=22200 width=8)
-> Hash (cost=189.00..189.00 rows=3600 width=12)
-> Seq Scan on users u
Filter: (created_at > '2025-01-01')
Rows Removed by Filter: 6400
Key signals:
Seq Scan → full table scan, often needs an index
Index Scan → using an index (good)
Index Only Scan → best, no heap access needed
Hash Join → good for large joins
Nested Loop → good for small joins, bad for large
Rows Removed by Filter: N → high N = index opportunity
-- Default index type, good for =, <, >, BETWEEN, ORDER BY
CREATE INDEX idx_orders_user_id ON orders(user_id);
CREATE INDEX idx_users_email ON users(email);
-- Composite index (column order matters!)
-- Useful for queries filtering on (status, created_at) together
CREATE INDEX idx_orders_status_created ON orders(status, created_at DESC);
-- Covering index — includes extra columns to enable Index Only Scan
CREATE INDEX idx_users_email_cover ON users(email) INCLUDE (id, name);
-- Index only active users (much smaller, faster)
CREATE INDEX idx_users_active_email ON users(email)
WHERE status = 'active';
-- Index only unprocessed orders
CREATE INDEX idx_orders_pending ON orders(created_at)
WHERE status = 'pending';
-- The query MUST include the WHERE condition to use the index
SELECT * FROM users WHERE status = 'active' AND email = 'x@y.com';
-- Index a JSONB column for key/value search
CREATE INDEX idx_products_metadata ON products USING gin(metadata);
-- Now fast:
SELECT * FROM products WHERE metadata @> '{"color": "red"}';
-- Index an array column
CREATE INDEX idx_posts_tags ON posts USING gin(tags);
SELECT * FROM posts WHERE tags @> ARRAY['javascript', 'typescript'];
-- Full-text search index
CREATE INDEX idx_posts_search ON posts USING gin(to_tsvector('english', title || ' ' || body));
SELECT * FROM posts WHERE to_tsvector('english', title || ' ' || body) @@ to_tsquery('javascript & async');
-- ✗ N+1: 1 query for users + N queries for orders
-- In application code:
-- users = SELECT * FROM users LIMIT 20
-- for each user: SELECT * FROM orders WHERE user_id = ?
-- ✓ Fix: JOIN in one query
SELECT
u.id, u.name,
json_agg(json_build_object('id', o.id, 'total', o.total)) AS orders
FROM users u
LEFT JOIN orders o ON o.user_id = u.id
WHERE u.id = ANY(ARRAY[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20])
GROUP BY u.id, u.name;
-- ✓ Or use a subquery
SELECT
u.*,
(SELECT count(*) FROM orders WHERE user_id = u.id) AS order_count
FROM users u
WHERE u.id = ANY($1::int[]);
-- CTE: readable multi-step query
WITH monthly_revenue AS (
SELECT
date_trunc('month', created_at) AS month,
SUM(total) AS revenue
FROM orders
WHERE status = 'completed'
GROUP BY 1
),
ranked AS (
SELECT
month, revenue,
LAG(revenue) OVER (ORDER BY month) AS prev_revenue
FROM monthly_revenue
)
SELECT
month,
revenue,
round(100.0 * (revenue - prev_revenue) / prev_revenue, 1) AS growth_pct
FROM ranked
ORDER BY month;
-- Window functions: running totals, ranks, percentiles
SELECT
user_id,
order_total,
SUM(order_total) OVER (PARTITION BY user_id ORDER BY created_at) AS running_total,
RANK() OVER (PARTITION BY DATE_TRUNC('month', created_at) ORDER BY order_total DESC) AS rank_in_month
FROM orders;
# postgresql.conf — tune for your server RAM
# (these values assume a 8GB RAM server)
shared_buffers = 2GB # 25% of RAM — Postgres buffer pool
effective_cache_size = 6GB # 75% of RAM — hint to query planner
work_mem = 64MB # per sort/hash operation (careful: multiplies by connections)
maintenance_work_mem = 512MB # for VACUUM, CREATE INDEX
max_connections = 100 # use PgBouncer if you need more
checkpoint_completion_target = 0.9
wal_buffers = 64MB
random_page_cost = 1.1 # set to 1.1 if using SSD (default 4.0 is for HDD)
-- Check for bloated tables (dead tuples)
SELECT relname, n_live_tup, n_dead_tup,
round(n_dead_tup * 100.0 / nullif(n_live_tup + n_dead_tup, 0), 1) AS dead_pct
FROM pg_stat_user_tables
ORDER BY n_dead_tup DESC
LIMIT 10;
-- Manual vacuum + analyze (usually auto runs)
VACUUM ANALYZE orders;
-- Check index usage — find unused indexes
SELECT indexrelname, idx_scan, pg_size_pretty(pg_relation_size(indexrelid)) AS size
FROM pg_stat_user_indexes
ORDER BY idx_scan ASC;
-- Unused indexes waste write performance and storage — drop them
DROP INDEX CONCURRENTLY idx_unused_index; -- CONCURRENTLY = no table lock