I came across http://bonesmoses.org/2014/05/14/foreign-keys-are-not-free/
which seems to indicate so.
When I run the following test script, having 50 foreign keys takes
about twice as long to do the update. Is there a reason for that?
Seems like the RI triggers wouldn't have to run on updates if the
value doesn't change.
begin;
set client_min_messages='warning';
CREATE OR REPLACE FUNCTION fnc_check_fk_overhead(key_count INT)
RETURNS INTERVAL AS
$$
DECLARE
i INT;
start_time TIMESTAMP;
end_time TIMESTAMP;
BEGIN
DROP TABLE if exists test_fk CASCADE;
CREATE TABLE test_fk
(
id BIGINT PRIMARY KEY,
junk VARCHAR
);
INSERT INTO test_fk
SELECT generate_series(1, 100000), repeat(' ', 20);
CLUSTER test_fk_pkey ON test_fk;
FOR i IN 1..key_count LOOP
EXECUTE 'CREATE TABLE test_fk_ref_' || i ||
' (test_fk_id BIGINT REFERENCES test_fk (id) ON UPDATE NO ACTION)';
END LOOP;
start_time = clock_timestamp();
FOR i IN 1..100000 LOOP
UPDATE test_fk SET junk = ' '
WHERE id = i;
END LOOP;
end_time = clock_timestamp();
FOR i IN 1..key_count LOOP
EXECUTE 'DROP TABLE test_fk_ref_' || i;
END LOOP;
RETURN end_time - start_time;
END;
$$ LANGUAGE plpgsql VOLATILE;
SELECT fnc_check_fk_overhead(1);
SELECT fnc_check_fk_overhead(50);