From 9a8495d413a3fff8edc76ff84ee905a49262a1b5 Mon Sep 17 00:00:00 2001 From: Michael Paquier Date: Wed, 25 Nov 2015 22:06:17 +0900 Subject: [PATCH 2/2] Add recovery test suite Using the infrastructure put in place by last commit, this commit adds a new set of tests dedicated to nodes in recovery, standbys, backup and more advanced cluster operations. A couple of tests showing how to use those routines is given as well. --- src/bin/pg_rewind/RewindTest.pm | 32 ----- src/test/Makefile | 2 +- src/test/perl/TestLib.pm | 32 +++++ src/test/recovery/.gitignore | 3 + src/test/recovery/Makefile | 17 +++ src/test/recovery/README | 19 +++ src/test/recovery/RecoveryTest.pm | 179 ++++++++++++++++++++++++++++ src/test/recovery/t/001_stream_rep.pl | 67 +++++++++++ src/test/recovery/t/002_archiving.pl | 51 ++++++++ src/test/recovery/t/003_recovery_targets.pl | 135 +++++++++++++++++++++ src/test/recovery/t/004_timeline_switch.pl | 76 ++++++++++++ src/test/recovery/t/005_replay_delay.pl | 49 ++++++++ 12 files changed, 629 insertions(+), 33 deletions(-) create mode 100644 src/test/recovery/.gitignore create mode 100644 src/test/recovery/Makefile create mode 100644 src/test/recovery/README create mode 100644 src/test/recovery/RecoveryTest.pm create mode 100644 src/test/recovery/t/001_stream_rep.pl create mode 100644 src/test/recovery/t/002_archiving.pl create mode 100644 src/test/recovery/t/003_recovery_targets.pl create mode 100644 src/test/recovery/t/004_timeline_switch.pl create mode 100644 src/test/recovery/t/005_replay_delay.pl diff --git a/src/bin/pg_rewind/RewindTest.pm b/src/bin/pg_rewind/RewindTest.pm index 5124df8..660b03f 100644 --- a/src/bin/pg_rewind/RewindTest.pm +++ b/src/bin/pg_rewind/RewindTest.pm @@ -119,38 +119,6 @@ sub check_query } } -# Run a query once a second, until it returns 't' (i.e. SQL boolean true). -sub poll_query_until -{ - my ($query, $connstr) = @_; - - my $max_attempts = 30; - my $attempts = 0; - my ($stdout, $stderr); - - while ($attempts < $max_attempts) - { - my $cmd = [ 'psql', '-At', '-c', "$query", '-d', "$connstr" ]; - my $result = run $cmd, '>', \$stdout, '2>', \$stderr; - - chomp($stdout); - $stdout =~ s/\r//g if $Config{osname} eq 'msys'; - if ($stdout eq "t") - { - return 1; - } - - # Wait a second before retrying. - sleep 1; - $attempts++; - } - - # The query result didn't change in 30 seconds. Give up. Print the stderr - # from the last attempt, hopefully that's useful for debugging. - diag $stderr; - return 0; -} - sub append_to_file { my ($filename, $str) = @_; diff --git a/src/test/Makefile b/src/test/Makefile index b713c2c..7f7754f 100644 --- a/src/test/Makefile +++ b/src/test/Makefile @@ -12,7 +12,7 @@ subdir = src/test top_builddir = ../.. include $(top_builddir)/src/Makefile.global -SUBDIRS = regress isolation modules +SUBDIRS = regress isolation modules recovery # We don't build or execute examples/, locale/, or thread/ by default, # but we do want "make clean" etc to recurse into them. Likewise for ssl/, diff --git a/src/test/perl/TestLib.pm b/src/test/perl/TestLib.pm index a1473c6..091043e 100644 --- a/src/test/perl/TestLib.pm +++ b/src/test/perl/TestLib.pm @@ -8,6 +8,7 @@ use Exporter 'import'; our @EXPORT = qw( get_new_node teardown_node + poll_query_until psql slurp_dir slurp_file @@ -110,6 +111,37 @@ sub teardown_node @active_nodes = grep { $_ ne $node } @active_nodes; } +sub poll_query_until +{ + my ($query, $connstr) = @_; + + my $max_attempts = 30; + my $attempts = 0; + my ($stdout, $stderr); + + while ($attempts < $max_attempts) + { + my $cmd = [ 'psql', '-At', '-c', "$query", '-d', "$connstr" ]; + my $result = run $cmd, '>', \$stdout, '2>', \$stderr; + + chomp($stdout); + $stdout =~ s/\r//g if $Config{osname} eq 'msys'; + if ($stdout eq "t") + { + return 1; + } + + # Wait a second before retrying. + sleep 1; + $attempts++; + } + + # The query result didn't change in 30 seconds. Give up. Print the stderr + # from the last attempt, hopefully that's useful for debugging. + diag $stderr; + return 0; +} + END { foreach my $node (@active_nodes) diff --git a/src/test/recovery/.gitignore b/src/test/recovery/.gitignore new file mode 100644 index 0000000..499fa7d --- /dev/null +++ b/src/test/recovery/.gitignore @@ -0,0 +1,3 @@ +# Generated by test suite +/regress_log/ +/tmp_check/ diff --git a/src/test/recovery/Makefile b/src/test/recovery/Makefile new file mode 100644 index 0000000..16c063a --- /dev/null +++ b/src/test/recovery/Makefile @@ -0,0 +1,17 @@ +#------------------------------------------------------------------------- +# +# Makefile for src/test/recovery +# +# Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group +# Portions Copyright (c) 1994, Regents of the University of California +# +# src/test/recovery/Makefile +# +#------------------------------------------------------------------------- + +subdir = src/test/recovery +top_builddir = ../../.. +include $(top_builddir)/src/Makefile.global + +check: + $(prove_check) diff --git a/src/test/recovery/README b/src/test/recovery/README new file mode 100644 index 0000000..20b98e0 --- /dev/null +++ b/src/test/recovery/README @@ -0,0 +1,19 @@ +src/test/recovery/README + +Regression tests for recovery and replication +============================================= + +This directory contains a test suite for recovery and replication, +testing mainly the interactions of recovery.conf with cluster +instances by providing a simple set of routines that can be used +to define a custom cluster for a test, including backup, archiving, +and streaming configuration. + +Running the tests +================= + + make check + +NOTE: This creates a temporary installation, and some tests may +create one or multiple nodes, be they master or standby(s) for the +purpose of the tests. diff --git a/src/test/recovery/RecoveryTest.pm b/src/test/recovery/RecoveryTest.pm new file mode 100644 index 0000000..2b03233 --- /dev/null +++ b/src/test/recovery/RecoveryTest.pm @@ -0,0 +1,179 @@ +# Set of common routines for recovery regression tests for a PostgreSQL +# cluster. This includes methods that can be used by the various set of +# tests present to set up cluster nodes and configure them according to +# the test scenario wanted. +# +# This module makes use of PostgresNode for node manipulation, performing +# higher-level operations to create standby nodes or setting them up +# for archiving and replication. +# +# Nodes are identified by their port number and have one allocated when +# created, hence it is unique for each node of the cluster as it is run +# locally. PGHOST is equally set to a unique value for the duration of +# each test. + +package RecoveryTest; + +use strict; +use warnings; + +use Cwd; +use PostgresNode; +use RecursiveCopy; +use TestBase; +use TestLib; +use Test::More; + +use IPC::Run qw(run start); + +use Exporter 'import'; + +our @EXPORT = qw( + enable_archiving + enable_restoring + enable_streaming + make_master + make_archive_standby + make_stream_standby +); + +# Set of handy routines able to set up a node with different characteristics +# Enable streaming replication +sub enable_streaming +{ + my $node_root = shift; # Instance to link to + my $node_standby = shift; + my $root_connstr = $node_root->getConnStr(); + my $applname = $node_standby->getApplName(); + + $node_standby->appendConf('recovery.conf', qq( +primary_conninfo='$root_connstr application_name=$applname' +standby_mode=on +recovery_target_timeline='latest' +)); +} + +# Enable the use of restore_command from a node +sub enable_restoring +{ + my $node_root = shift; # Instance to link to + my $node_standby = shift; + my $path = $node_root->getArchiveDir(); + + # Switch path to use slashes on Windows + $path =~ tr#\\#/# if ($windows_os); + my $copy_command = $windows_os ? + "copy \"$path\\\\%f\" \"%p\"" : + "cp -i $path/%f %p"; + $node_standby->appendConf('recovery.conf', qq( +restore_command='$copy_command' +standby_mode=on +)); +} + +# Enable WAL archiving on a node +sub enable_archiving +{ + my $node = shift; + my $path = $node->getArchiveDir(); + + # Switch path to use slashes on Windows + $path =~ tr#\\#/# if ($windows_os); + my $copy_command = $windows_os ? + "copy \"%p\" \"$path\\\\%f\"" : + "cp %p $path/%f"; + + # Enable archive_mode and archive_command on node + $node->appendConf('postgresql.conf', qq( +archive_mode = on +archive_command = '$copy_command' +)); +} + +# Master node initialization. +sub make_master +{ + my $node_master = get_new_node(); + my $port_master = $node_master->getPort(); + print "# Initializing master node wih port $port_master\n"; + $node_master->initNode(); + configure_base_node($node_master); + return $node_master; +} + +sub configure_base_node +{ + my $node = shift; + + $node->appendConf('postgresql.conf', qq( +wal_level = hot_standby +max_wal_senders = 5 +wal_keep_segments = 20 +max_wal_size = 128MB +shared_buffers = 1MB +wal_log_hints = on +hot_standby = on +autovacuum = off +)); +} + +# Standby node initializations +# Node only streaming. +sub make_stream_standby +{ + my $node_master = shift; + my $backup_name = shift; + my $node_standby = get_new_node(); + my $master_port = $node_master->getPort(); + my $standby_port = $node_standby->getPort(); + + print "# Initializing streaming mode for node $standby_port from node $master_port\n"; + $node_standby->initNodeFromBackup($node_master, $backup_name); + configure_base_node($node_standby); + + # Start second node, streaming from first one + enable_streaming($node_master, $node_standby); + return $node_standby; +} + +# Node getting WAL only from archives +sub make_archive_standby +{ + my $node_master = shift; + my $backup_name = shift; + my $node_standby = get_new_node(); + my $master_port = $node_master->getPort(); + my $standby_port = $node_standby->getPort(); + + print "# Initializing archive mode for node $standby_port from node $master_port\n"; + $node_standby->initNodeFromBackup($node_master, $backup_name); + configure_base_node($node_standby); + + # Start second node, restoring from first one + enable_restoring($node_master, $node_standby); + return $node_standby; +} + +# Wait until a node is able to accept queries. Useful when putting a node +# in recovery and wait for it to be able to work particularly on slow +# machines. +sub wait_for_node +{ + my $node = shift; + my $max_attempts = 30; + my $attempts = 0; + while ($attempts < $max_attempts) + { + if (run_log(['pg_isready', '-p', $node->getPort()])) + { + return 1; + } + + # Wait a second before retrying. + sleep 1; + $attempts++; + } + return 0; +} + +1; diff --git a/src/test/recovery/t/001_stream_rep.pl b/src/test/recovery/t/001_stream_rep.pl new file mode 100644 index 0000000..aae1026 --- /dev/null +++ b/src/test/recovery/t/001_stream_rep.pl @@ -0,0 +1,67 @@ +# Minimal test testing streaming replication +use strict; +use warnings; +use TestLib; +use Test::More tests => 4; + +use RecoveryTest; + +# Initialize master node +my $node_master = make_master(); +$node_master->startNode(); +my $backup_name = 'my_backup'; + +# Take backup +$node_master->backupNode($backup_name); + +# Create streaming standby linking to master +my $node_standby_1 = make_stream_standby($node_master, $backup_name); +$node_standby_1->startNode(); + +# Take backup of standby 1 (not mandatory, but useful to check if +# pg_basebackup works on a standby). +$node_standby_1->backupNode($backup_name); + +# Create second standby node linking to standby 1 +my $node_standby_2 = make_stream_standby($node_standby_1, $backup_name); +$node_standby_2->startNode(); +$node_standby_2->backupNode($backup_name); + +# Create some content on master and check its presence in standby 1 an +psql $node_master->getConnStr(), + "CREATE TABLE tab_int AS SELECT generate_series(1,1002) AS a"; + +# Wait for standbys to catch up +my $applname_1 = $node_standby_1->getApplName(); +my $applname_2 = $node_standby_2->getApplName(); +my $caughtup_query = "SELECT pg_current_xlog_location() = write_location FROM pg_stat_replication WHERE application_name = '$applname_1';"; +poll_query_until($caughtup_query, $node_master->getConnStr()) + or die "Timed out while waiting for standby 1 to catch up"; +$caughtup_query = "SELECT pg_last_xlog_replay_location() = write_location FROM pg_stat_replication WHERE application_name = '$applname_2';"; +poll_query_until($caughtup_query, $node_standby_1->getConnStr()) + or die "Timed out while waiting for standby 2 to catch up"; + +my $result = psql $node_standby_1->getConnStr(), + "SELECT count(*) FROM tab_int"; +print "standby 1: $result\n"; +is($result, qq(1002), 'check streamed content on standby 1'); + +$result = psql $node_standby_2->getConnStr(), + "SELECT count(*) FROM tab_int"; +print "standby 2: $result\n"; +is($result, qq(1002), 'check streamed content on standby 2'); + +# Check that only READ-only queries can run on standbys +command_fails(['psql', '-A', '-t', '--no-psqlrc', + '-d', $node_standby_1->getConnStr(), '-c', + "INSERT INTO tab_int VALUES (1)"], + 'Read-only queries on standby 1'); +command_fails(['psql', '-A', '-t', '--no-psqlrc', + '-d', $node_standby_1->getConnStr(), '-c', + "INSERT INTO tab_int VALUES (1)"], + 'Read-only queries on standby 2'); + +# Cleanup nodes +teardown_node($node_standby_2); +teardown_node($node_standby_1); +teardown_node($node_master); diff --git a/src/test/recovery/t/002_archiving.pl b/src/test/recovery/t/002_archiving.pl new file mode 100644 index 0000000..c3e8465 --- /dev/null +++ b/src/test/recovery/t/002_archiving.pl @@ -0,0 +1,51 @@ +# test for archiving with warm standby +use strict; +use warnings; +use TestLib; +use Test::More tests => 1; +use File::Copy; +use RecoveryTest; + +# Initialize master node, doing archives +my $node_master = make_master(); +my $backup_name = 'my_backup'; +enable_archiving($node_master); + +# Start it +$node_master->startNode(); + +# Take backup for slave +$node_master->backupNode($backup_name); + +# Initialize standby node from backup, fetching WAL from archives +my $node_standby = make_archive_standby($node_master, $backup_name); +$node_standby->appendConf('postgresql.conf', qq( +wal_retrieve_retry_interval = '100ms' +)); +$node_standby->startNode(); + +# Create some content on master +psql $node_master->getConnStr(), + "CREATE TABLE tab_int AS SELECT generate_series(1,1000) AS a"; +my $current_lsn = psql $node_master->getConnStr(), + "SELECT pg_current_xlog_location();"; + +# Force archiving of WAL file to make it present on master +psql $node_master->getConnStr(), "SELECT pg_switch_xlog()"; + +# Add some more content, it should not be present on standby +psql $node_master->getConnStr(), + "INSERT INTO tab_int VALUES (generate_series(1001,2000))"; + +# Wait until necessary replay has been done on standby +my $caughtup_query = "SELECT '$current_lsn'::pg_lsn <= pg_last_xlog_replay_location()"; +poll_query_until($caughtup_query, $node_standby->getConnStr()) + or die "Timed out while waiting for standby to catch up"; + +my $result = psql $node_standby->getConnStr(), + "SELECT count(*) FROM tab_int"; +is($result, qq(1000), 'check content from archives'); + +# Cleanup nodes +teardown_node($node_standby); +teardown_node($node_master); diff --git a/src/test/recovery/t/003_recovery_targets.pl b/src/test/recovery/t/003_recovery_targets.pl new file mode 100644 index 0000000..995e8e4 --- /dev/null +++ b/src/test/recovery/t/003_recovery_targets.pl @@ -0,0 +1,135 @@ +# Test for recovery targets: name, timestamp, XID +use strict; +use warnings; +use TestLib; +use Test::More tests => 7; + +use RecoveryTest; + +# Create and test a standby from given backup, with a certain +# recovery target. +sub test_recovery_standby +{ + my $test_name = shift; + my $node_master = shift; + my $recovery_params = shift; + my $num_rows = shift; + my $until_lsn = shift; + + my $node_standby = make_archive_standby($node_master, 'my_backup'); + + foreach my $param_item (@$recovery_params) + { + $node_standby->appendConf('recovery.conf', + qq($param_item +)); + } + + $node_standby->startNode(); + + # Wait until standby has replayed enough data + my $caughtup_query = "SELECT '$until_lsn'::pg_lsn <= pg_last_xlog_replay_location()"; + poll_query_until($caughtup_query, $node_standby->getConnStr()) + or die "Timed out while waiting for standby to catch up"; + + # Create some content on master and check its presence in standby + my $result = psql $node_standby->getConnStr(), + "SELECT count(*) FROM tab_int"; + is($result, qq($num_rows), "check standby content for $test_name"); + + # Stop standby node + teardown_node($node_standby); +} + +# Initialize master node +my $node_master = make_master(); +enable_archiving($node_master); + +# Start it +$node_master->startNode(); + +# Create data before taking the backup, aimed at testing +# recovery_target = 'immediate' +psql $node_master->getConnStr(), + "CREATE TABLE tab_int AS SELECT generate_series(1,1000) AS a"; +my $lsn1 = psql $node_master->getConnStr(), + "SELECT pg_current_xlog_location();"; + +# Take backup from which all operations will be run +$node_master->backupNode('my_backup'); + +# Insert some data with used as a replay reference, with a recovery +# target TXID. +psql $node_master->getConnStr(), + "INSERT INTO tab_int VALUES (generate_series(1001,2000))"; +my $recovery_txid = psql $node_master->getConnStr(), + "SELECT txid_current()"; +my $lsn2 = psql $node_master->getConnStr(), + "SELECT pg_current_xlog_location();"; + +# More data, with recovery target timestamp +psql $node_master->getConnStr(), + "INSERT INTO tab_int VALUES (generate_series(2001,3000))"; +my $recovery_time = psql $node_master->getConnStr(), "SELECT now()"; +my $lsn3 = psql $node_master->getConnStr(), + "SELECT pg_current_xlog_location();"; + +# Even more data, this time with a recovery target name +psql $node_master->getConnStr(), + "INSERT INTO tab_int VALUES (generate_series(3001,4000))"; +my $recovery_name = "my_target"; +my $lsn4 = psql $node_master->getConnStr(), + "SELECT pg_current_xlog_location();"; +psql $node_master->getConnStr(), + "SELECT pg_create_restore_point('$recovery_name')"; + +# Force archiving of WAL file +psql $node_master->getConnStr(), "SELECT pg_switch_xlog()"; + +# Test recovery targets +my @recovery_params = ( "recovery_target = 'immediate'" ); +test_recovery_standby('immediate target', $node_master, + \@recovery_params, + "1000", $lsn1); +@recovery_params = ( "recovery_target_xid = '$recovery_txid'" ); +test_recovery_standby('XID', $node_master, + \@recovery_params, + "2000", $lsn2); +@recovery_params = ( "recovery_target_time = '$recovery_time'" ); +test_recovery_standby('Time', $node_master, + \@recovery_params, + "3000", $lsn3); +@recovery_params = ( "recovery_target_name = '$recovery_name'" ); +test_recovery_standby('Name', $node_master, + \@recovery_params, + "4000", $lsn4); + +# Multiple targets +# Last entry has priority (note that an array respects the order of items +# not hashes). +@recovery_params = ( + "recovery_target_name = '$recovery_name'", + "recovery_target_xid = '$recovery_txid'", + "recovery_target_time = '$recovery_time'" +); +test_recovery_standby('Name + XID + Time', $node_master, + \@recovery_params, + "3000", $lsn3); +@recovery_params = ( + "recovery_target_time = '$recovery_time'", + "recovery_target_name = '$recovery_name'", + "recovery_target_xid = '$recovery_txid'" +); +test_recovery_standby('Time + Name + XID', $node_master, + \@recovery_params, + "2000", $lsn2); +@recovery_params = ( + "recovery_target_xid = '$recovery_txid'", + "recovery_target_time = '$recovery_time'", + "recovery_target_name = '$recovery_name'" +); +test_recovery_standby('XID + Time + Name', $node_master, + \@recovery_params, + "4000", $lsn4); + +teardown_node($node_master); diff --git a/src/test/recovery/t/004_timeline_switch.pl b/src/test/recovery/t/004_timeline_switch.pl new file mode 100644 index 0000000..c78c92a --- /dev/null +++ b/src/test/recovery/t/004_timeline_switch.pl @@ -0,0 +1,76 @@ +# Tets for timeline switch +# Encure that a standby is able to follow a newly-promoted standby +# on a new timeline. +use strict; +use warnings; +use File::Path qw(remove_tree); +use PostgresNode; +use TestBase; +use TestLib; +use Test::More tests => 1; + +use RecoveryTest; + +$ENV{PGDATABASE} = 'postgres'; + +# Initialize master node +my $node_master = make_master(); +$node_master->startNode(); + +# Take backup +my $backup_name = 'my_backup'; +$node_master->backupNode($backup_name); + +# Create two standbys linking to it +my $node_standby_1 = make_stream_standby($node_master, $backup_name); +$node_standby_1->startNode(); +my $node_standby_2 = make_stream_standby($node_master, $backup_name); +$node_standby_2->startNode(); + +# Create some content on master +psql $node_master->getConnStr(), + "CREATE TABLE tab_int AS SELECT generate_series(1,1000) AS a"; +my $until_lsn = psql $node_master->getConnStr(), + "SELECT pg_current_xlog_location();"; + +# Wait until standby has replayed enough data on standby 1 +my $caughtup_query = "SELECT '$until_lsn'::pg_lsn <= pg_last_xlog_replay_location()"; +poll_query_until($caughtup_query, $node_standby_1->getConnStr()) + or die "Timed out while waiting for standby to catch up"; + +# Stop and remove master, and promote standby 1, switching it to a new timeline +teardown_node($node_master); +system_or_bail('pg_ctl', '-w', '-D', $node_standby_1->getDataDir(), + 'promote'); +print "# Promoted standby 1\n"; + +# Switch standby 2 to replay from standby 1 +remove_tree($node_standby_2->getDataDir() . '/recovery.conf'); +my $connstr_1 = $node_standby_1->getConnStr(); +$node_standby_2->appendConf('recovery.conf', qq( +primary_conninfo='$connstr_1' +standby_mode=on +recovery_target_timeline='latest' +)); +$node_standby_2->restartNode(); + +# Insert some data in standby 1 and check its presence in standby 2 +# to ensure that the timeline switch has been done. Standby 1 needs +# to exit recovery first before moving on with the test. +poll_query_until("SELECT pg_is_in_recovery() <> true", + $node_standby_1->getConnStr()); +psql $node_standby_1->getConnStr(), + "INSERT INTO tab_int VALUES (generate_series(1001,2000))"; +$until_lsn = psql $node_standby_1->getConnStr(), + "SELECT pg_current_xlog_location();"; +$caughtup_query = "SELECT '$until_lsn'::pg_lsn <= pg_last_xlog_replay_location()"; +poll_query_until($caughtup_query, $node_standby_2->getConnStr()) + or die "Timed out while waiting for standby to catch up"; + +my $result = psql $node_standby_2->getConnStr(), + "SELECT count(*) FROM tab_int"; +is($result, qq(2000), 'check content of standby 2'); + +# Stop nodes +teardown_node($node_standby_2); +teardown_node($node_standby_1); diff --git a/src/test/recovery/t/005_replay_delay.pl b/src/test/recovery/t/005_replay_delay.pl new file mode 100644 index 0000000..c209b55 --- /dev/null +++ b/src/test/recovery/t/005_replay_delay.pl @@ -0,0 +1,49 @@ +# Checks for recovery_min_apply_delay +use strict; +use warnings; +use TestLib; +use Test::More tests => 2; + +use RecoveryTest; + +# Initialize master node +my $node_master = make_master(); +$node_master->startNode(); + +# And some content +psql $node_master->getConnStr(), + "CREATE TABLE tab_int AS SELECT generate_series(1,10) AS a"; + +# Take backup +my $backup_name = 'my_backup'; +$node_master->backupNode($backup_name); + +# Create streaming standby from backup +my $node_standby = make_stream_standby($node_master, $backup_name); +$node_standby->appendConf('recovery.conf', qq( +recovery_min_apply_delay = '2s' +)); +$node_standby->startNode(); + +# Make new content on master and check its presence in standby +# depending on the delay of 2s applied above. +psql $node_master->getConnStr(), + "INSERT INTO tab_int VALUES (generate_series(11,20))"; +sleep 1; +# Here we should have only 10 rows +my $result = psql $node_standby->getConnStr(), + "SELECT count(*) FROM tab_int"; +is($result, qq(10), 'check content with delay of 1s'); + +# Now wait for replay to complete on standby +my $until_lsn = psql $node_master->getConnStr(), + "SELECT pg_current_xlog_location();"; +my $caughtup_query = "SELECT '$until_lsn'::pg_lsn <= pg_last_xlog_replay_location()"; +poll_query_until($caughtup_query, $node_standby->getConnStr()) + or die "Timed out while waiting for standby to catch up"; +$result = psql $node_standby->getConnStr(), "SELECT count(*) FROM tab_int"; +is($result, qq(20), 'check content with delay of 2s'); + +# Stop nodes +teardown_node($node_standby); +teardown_node($node_master); -- 2.6.3