* add for watchdog
*/
bool use_watchdog; /* Enables watchdog */
- bool failover_when_quorum_exists; /* Do failover only when wd cluster holds he quorum */
+ bool failover_when_quorum_exists; /* Do failover only when wd cluster holds the quorum */
bool failover_require_consensus; /* Only do failover when majority aggrees */
bool allow_multiple_failover_requests_from_node; /* One Pgpool-II node can send multiple
* failover requests to build consensus
--- /dev/null
+# master watchdog
+num_init_children = 4
+use_watchdog = on
+failover_when_quorum_exists = true
+failover_require_consensus = false
+allow_multiple_failover_requests_from_node = false
+wd_interval = 1
+wd_priority = 5
+wd_hostname = 'localhost'
+wd_port = 21004
+wd_heartbeat_port = 21005
+
+heartbeat_destination0 = 'localhost'
+heartbeat_destination_port0 = 21105
+heartbeat_destination1 = 'localhost'
+heartbeat_destination_port1 = 21205
+
+other_pgpool_hostname0 = 'localhost'
+other_pgpool_port0 = 11100
+other_wd_port0 = 21104
+
+other_pgpool_hostname1 = 'localhost'
+other_pgpool_port1 = 11200
+other_wd_port1 = 21204
--- /dev/null
+# standby watchdog
+num_init_children = 4
+port = 11100
+pcp_port = 11105
+use_watchdog = on
+failover_when_quorum_exists = true
+failover_require_consensus = false
+allow_multiple_failover_requests_from_node = false
+wd_interval = 1
+wd_priority = 3
+wd_hostname = 'localhost'
+wd_port = 21104
+wd_heartbeat_port = 21105
+heartbeat_destination0 = 'localhost'
+heartbeat_destination_port0 = 21005
+heartbeat_destination1 = 'localhost'
+heartbeat_destination_port1 = 21205
+
+other_pgpool_hostname0 = 'localhost'
+other_pgpool_port0 = 11000
+other_wd_port0 = 21004
+
+other_pgpool_hostname1 = 'localhost'
+other_pgpool_port1 = 11200
+other_wd_port1 = 21204
+
--- /dev/null
+# standby2 watchdog
+num_init_children = 4
+port = 11200
+pcp_port = 11205
+use_watchdog = on
+failover_when_quorum_exists = true
+failover_require_consensus = false
+allow_multiple_failover_requests_from_node = false
+wd_interval = 1
+wd_priority = 1
+wd_hostname = 'localhost'
+wd_port = 21204
+wd_heartbeat_port = 21205
+heartbeat_destination0 = 'localhost'
+heartbeat_destination_port0 = 21005
+heartbeat_destination1 = 'localhost'
+heartbeat_destination_port1 = 21105
+
+other_pgpool_hostname0 = 'localhost'
+other_pgpool_port0 = 11000
+other_wd_port0 = 21004
+
+other_pgpool_hostname1 = 'localhost'
+other_pgpool_port1 = 11100
+other_wd_port1 = 21104
+
--- /dev/null
+#!/usr/bin/env bash
+#-------------------------------------------------------------------
+# test script for watchdog
+#
+# Please note that to successfully run the test, "HEALTHCHECK_DEBUG"
+# must be defined before compiling main/health_check.c.
+#
+# test failover_when_quorum_exists
+#
+source $TESTLIBS
+MASTER_DIR=master
+STANDBY_DIR=standby
+STANDBY2_DIR=standby2
+num_tests=5
+success_count=0
+PSQL=$PGBIN/psql
+PG_CTL=$PGBIN/pg_ctl
+
+rm -fr $MASTER_DIR
+rm -fr $STANDBY_DIR
+rm -fr $STANDBY2_DIR
+
+mkdir $MASTER_DIR
+mkdir $STANDBY_DIR
+mkdir $STANDBY2_DIR
+
+
+# dir in master directory
+cd $MASTER_DIR
+
+# create master environment
+echo -n "creating master pgpool and PostgreSQL clusters..."
+$PGPOOL_SETUP -m s -n 2 -p 11000|| exit 1
+echo "master setup done."
+
+
+# copy the configurations from master to standby
+cp -r etc ../$STANDBY_DIR/
+
+# copy the configurations from master to standby2
+cp -r etc ../$STANDBY2_DIR/
+
+source ./bashrc.ports
+cat ../master.conf >> etc/pgpool.conf
+
+./startall
+wait_for_pgpool_startup
+
+
+# back to test root dir
+cd ..
+
+
+# create standby environment but do not start pgpool
+
+mkdir $STANDBY_DIR/log
+echo -n "creating standby pgpool..."
+cat standby.conf >> $STANDBY_DIR/etc/pgpool.conf
+# since we are using the same pgpool-II conf as of master. so change the pid file path in standby pgpool conf
+echo "pid_file_name = '$PWD/pgpool2.pid'" >> $STANDBY_DIR/etc/pgpool.conf
+echo "logdir = $STANDBY_DIR/log" >> $STANDBY_DIR/etc/pgpool.conf
+# start the stnadby pgpool-II by hand
+#$PGPOOL_INSTALL_DIR/bin/pgpool -D -n -f $STANDBY_DIR/etc/pgpool.conf -F $STANDBY_DIR/etc/pcp.conf -a $STANDBY_DIR/etc/pool_hba.conf > $STANDBY_DIR/log/pgpool.log 2>&1 &
+
+# create standby2 environment but do not start pgpool
+
+mkdir $STANDBY2_DIR/log
+echo -n "creating standby2 pgpool..."
+cat standby2.conf >> $STANDBY2_DIR/etc/pgpool.conf
+# since we are using the same pgpool-II conf as of master. so change the pid file path in standby pgpool conf
+echo "pid_file_name = '$PWD/pgpool3.pid'" >> $STANDBY2_DIR/etc/pgpool.conf
+echo "logdir = $STANDBY2_DIR/log" >> $STANDBY2_DIR/etc/pgpool.conf
+# start the stnadby pgpool-II by hand
+#$PGPOOL_INSTALL_DIR/bin/pgpool -D -n -f $STANDBY2_DIR/etc/pgpool.conf -F $STANDBY2_DIR/etc/pcp.conf -a $STANDBY2_DIR/etc/pool_hba.conf > $STANDBY2_DIR/log/pgpool.log 2>&1 &
+
+# First test check if both pgpool-II have found their correct place in watchdog cluster.
+echo "Waiting for the pgpool master..."
+for i in 1 2 3 4 5 6 7 8 9 10
+do
+ grep "I am the cluster leader node" $MASTER_DIR/log/pgpool.log > /dev/null 2>&1
+ if [ $? = 0 ];then
+ success_count=$(( success_count + 1 ))
+ echo "Master brought up successfully."
+ break;
+ fi
+ echo "[check] $i times"
+ sleep 2
+done
+
+
+# raise an artificial communication error on master for DB node 1
+echo "1 down" > $MASTER_DIR/log/backend_down_request
+echo "Checking if the Master rejects the failover because quorum is not present..."
+for i in 1 2 3 4 5 6 7 8 9 10
+do
+ grep -i "Rejecting the failover request" $MASTER_DIR/log/pgpool.log
+ if [ $? = 0 ];then
+ success_count=$(( success_count + 1 ))
+ echo "Fake DB error detected. and Failover rejected because of absence of quorum"
+ break;
+ fi
+ echo "[check] $i times"
+ sleep 2
+done
+
+# now start other Pgpool-II nodes to complete the quorum
+
+# start the stnadby pgpool-II by hand
+$PGPOOL_INSTALL_DIR/bin/pgpool -D -n -f $STANDBY_DIR/etc/pgpool.conf -F $STANDBY_DIR/etc/pcp.conf -a $STANDBY_DIR/etc/pool_hba.conf > $STANDBY_DIR/log/pgpool.log 2>&1 &
+# start the second stnadby pgpool-II by hand
+$PGPOOL_INSTALL_DIR/bin/pgpool -D -n -f $STANDBY2_DIR/etc/pgpool.conf -F $STANDBY2_DIR/etc/pcp.conf -a $STANDBY2_DIR/etc/pool_hba.conf > $STANDBY2_DIR/log/pgpool.log 2>&1 &
+
+# now check if standby1 has successfully joined connected to the master.
+echo "Waiting for the standby1 to join cluster..."
+for i in 1 2 3 4 5 6 7 8 9 10
+do
+ grep "successfully joined the watchdog cluster as standby node" $STANDBY_DIR/log/pgpool.log > /dev/null 2>&1
+ if [ $? = 0 ];then
+ success_count=$(( success_count + 1 ))
+ echo "Standby successfully connected."
+ break;
+ fi
+ echo "[check] $i times"
+ sleep 2
+done
+
+# now check if standby2 has successfully joined connected to the master.
+echo "Waiting for the standby2 to join cluster..."
+for i in 1 2 3 4 5 6 7 8 9 10
+do
+ grep "successfully joined the watchdog cluster as standby node" $STANDBY2_DIR/log/pgpool.log > /dev/null 2>&1
+ if [ $? = 0 ];then
+ success_count=$(( success_count + 1 ))
+ echo "Standby2 successfully connected."
+ break;
+ fi
+ echo "[check] $i times"
+ sleep 2
+done
+
+# raise an artificial communication again to check if failover is executed this time
+echo "1 down" > $MASTER_DIR/log/backend_down_request
+#give some time to pgpool-II to execute failover
+sleep 5
+# check to see if all Pgpool-II agrees that the failover request is
+# executed
+echo "Checking if all Pgpool-II agrees that the failover request is executed"
+for i in 1 2 3 4 5 6 7 8 9 10
+do
+ n=0
+ for p in 11000 11100 11200
+ do
+ $PSQL -p $p -c "show pool_nodes" test|grep standby|grep down >/dev/null 2>&1
+ if [ $? = 0 ];then
+ n=$(( n + 1))
+ fi
+ done
+ if [ $n -eq 3 ];then
+ success_count=$(( success_count + 1 ))
+ echo "All Pgpool-II agrees that the failover request is executed"
+ break;
+ fi
+ echo "[check] $i times"
+ sleep 2
+done
+
+
+# we are done. Just stop the standby pgpool-II
+$PGPOOL_INSTALL_DIR/bin/pgpool -f $STANDBY_DIR/etc/pgpool.conf -m f stop
+$PGPOOL_INSTALL_DIR/bin/pgpool -f $STANDBY2_DIR/etc/pgpool.conf -m f stop
+cd master
+./shutdownall
+
+echo "$success_count out of $num_tests successfull";
+
+if test $success_count -eq $num_tests
+then
+ exit 0
+fi
+
+exit 1
--- /dev/null
+# master watchdog
+num_init_children = 4
+use_watchdog = on
+failover_when_quorum_exists = true
+failover_require_consensus = true
+allow_multiple_failover_requests_from_node = false
+wd_interval = 1
+wd_priority = 5
+wd_hostname = 'localhost'
+wd_port = 21004
+wd_heartbeat_port = 21005
+
+heartbeat_destination0 = 'localhost'
+heartbeat_destination_port0 = 21105
+heartbeat_destination1 = 'localhost'
+heartbeat_destination_port1 = 21205
+
+other_pgpool_hostname0 = 'localhost'
+other_pgpool_port0 = 11100
+other_wd_port0 = 21104
+
+other_pgpool_hostname1 = 'localhost'
+other_pgpool_port1 = 11200
+other_wd_port1 = 21204
--- /dev/null
+# standby watchdog
+num_init_children = 4
+port = 11100
+pcp_port = 11105
+use_watchdog = on
+failover_when_quorum_exists = true
+failover_require_consensus = true
+allow_multiple_failover_requests_from_node = false
+wd_interval = 1
+wd_priority = 3
+wd_hostname = 'localhost'
+wd_port = 21104
+wd_heartbeat_port = 21105
+heartbeat_destination0 = 'localhost'
+heartbeat_destination_port0 = 21005
+heartbeat_destination1 = 'localhost'
+heartbeat_destination_port1 = 21205
+
+other_pgpool_hostname0 = 'localhost'
+other_pgpool_port0 = 11000
+other_wd_port0 = 21004
+
+other_pgpool_hostname1 = 'localhost'
+other_pgpool_port1 = 11200
+other_wd_port1 = 21204
+
--- /dev/null
+# standby2 watchdog
+num_init_children = 4
+port = 11200
+pcp_port = 11205
+use_watchdog = on
+failover_when_quorum_exists = true
+failover_require_consensus = true
+allow_multiple_failover_requests_from_node = false
+wd_interval = 1
+wd_priority = 1
+wd_hostname = 'localhost'
+wd_port = 21204
+wd_heartbeat_port = 21205
+heartbeat_destination0 = 'localhost'
+heartbeat_destination_port0 = 21005
+heartbeat_destination1 = 'localhost'
+heartbeat_destination_port1 = 21105
+
+other_pgpool_hostname0 = 'localhost'
+other_pgpool_port0 = 11000
+other_wd_port0 = 21004
+
+other_pgpool_hostname1 = 'localhost'
+other_pgpool_port1 = 11100
+other_wd_port1 = 21104
+
--- /dev/null
+#!/usr/bin/env bash
+#-------------------------------------------------------------------
+# test script for watchdog
+#
+# Please note that to successfully run the test, "HEALTHCHECK_DEBUG"
+# must be defined before compiling main/health_check.c.
+#
+# test failover_require_consensus
+#
+source $TESTLIBS
+MASTER_DIR=master
+STANDBY_DIR=standby
+STANDBY2_DIR=standby2
+num_tests=7
+success_count=0
+PSQL=$PGBIN/psql
+PG_CTL=$PGBIN/pg_ctl
+
+rm -fr $MASTER_DIR
+rm -fr $STANDBY_DIR
+rm -fr $STANDBY2_DIR
+
+mkdir $MASTER_DIR
+mkdir $STANDBY_DIR
+mkdir $STANDBY2_DIR
+
+
+# dir in master directory
+cd $MASTER_DIR
+
+# create master environment
+echo -n "creating master pgpool and PostgreSQL clusters..."
+$PGPOOL_SETUP -m s -n 2 -p 11000|| exit 1
+echo "master setup done."
+
+
+# copy the configurations from master to standby
+cp -r etc ../$STANDBY_DIR/
+
+# copy the configurations from master to standby2
+cp -r etc ../$STANDBY2_DIR/
+
+source ./bashrc.ports
+cat ../master.conf >> etc/pgpool.conf
+
+./startall
+wait_for_pgpool_startup
+
+
+# back to test root dir
+cd ..
+
+
+# create standby environment and start pgpool
+
+mkdir $STANDBY_DIR/log
+echo -n "creating standby pgpool..."
+cat standby.conf >> $STANDBY_DIR/etc/pgpool.conf
+# since we are using the same pgpool-II conf as of master. so change the pid file path in standby pgpool conf
+echo "pid_file_name = '$PWD/pgpool2.pid'" >> $STANDBY_DIR/etc/pgpool.conf
+echo "logdir = $STANDBY_DIR/log" >> $STANDBY_DIR/etc/pgpool.conf
+# start the stnadby pgpool-II by hand
+$PGPOOL_INSTALL_DIR/bin/pgpool -D -n -f $STANDBY_DIR/etc/pgpool.conf -F $STANDBY_DIR/etc/pcp.conf -a $STANDBY_DIR/etc/pool_hba.conf > $STANDBY_DIR/log/pgpool.log 2>&1 &
+
+# create standby2 environment but do not start pgpool
+mkdir $STANDBY2_DIR/log
+echo -n "creating standby2 pgpool..."
+cat standby2.conf >> $STANDBY2_DIR/etc/pgpool.conf
+# since we are using the same pgpool-II conf as of master. so change the pid file path in standby pgpool conf
+echo "pid_file_name = '$PWD/pgpool3.pid'" >> $STANDBY2_DIR/etc/pgpool.conf
+echo "logdir = $STANDBY2_DIR/log" >> $STANDBY2_DIR/etc/pgpool.conf
+# start the stnadby pgpool-II by hand
+$PGPOOL_INSTALL_DIR/bin/pgpool -D -n -f $STANDBY2_DIR/etc/pgpool.conf -F $STANDBY2_DIR/etc/pcp.conf -a $STANDBY2_DIR/etc/pool_hba.conf > $STANDBY2_DIR/log/pgpool.log 2>&1 &
+
+# First test check if both pgpool-II have found their correct place in watchdog cluster.
+echo "Waiting for the pgpool master..."
+for i in 1 2 3 4 5 6 7 8 9 10
+do
+ grep "I am the cluster leader node" $MASTER_DIR/log/pgpool.log > /dev/null 2>&1
+ if [ $? = 0 ];then
+ success_count=$(( success_count + 1 ))
+ echo "Master brought up successfully."
+ break;
+ fi
+ echo "[check] $i times"
+ sleep 2
+done
+
+# now check if standby1 has successfully joined connected to the master.
+echo "Waiting for the standby1 to join cluster..."
+for i in 1 2 3 4 5 6 7 8 9 10
+do
+ grep "successfully joined the watchdog cluster as standby node" $STANDBY_DIR/log/pgpool.log > /dev/null 2>&1
+ if [ $? = 0 ];then
+ success_count=$(( success_count + 1 ))
+ echo "Standby successfully connected."
+ break;
+ fi
+ echo "[check] $i times"
+ sleep 2
+done
+
+# now check if standby2 has successfully joined connected to the master.
+echo "Waiting for the standby2 to join cluster..."
+for i in 1 2 3 4 5 6 7 8 9 10
+do
+ grep "successfully joined the watchdog cluster as standby node" $STANDBY2_DIR/log/pgpool.log > /dev/null 2>&1
+ if [ $? = 0 ];then
+ success_count=$(( success_count + 1 ))
+ echo "Standby2 successfully connected."
+ break;
+ fi
+ echo "[check] $i times"
+ sleep 2
+done
+
+# raise an artificial communication error on standby for DB node 1
+echo "1 down" > $STANDBY_DIR/log/backend_down_request
+echo "Checking if the standby successfuly process the failover request..."
+for i in 1 2 3 4 5 6 7 8 9 10
+do
+ grep -i "building consensus for request" $STANDBY_DIR/log/pgpool.log
+ if [ $? = 0 ];then
+ success_count=$(( success_count + 1 ))
+ echo "Fake DB error generated and master is waiting for consensus"
+ break;
+ fi
+ echo "[check] $i times"
+ sleep 2
+done
+echo "Checking if the Master receives the failover request and waiting for consensus..."
+for i in 1 2 3 4 5 6 7 8 9 10
+do
+ grep -i "failover request noted" $MASTER_DIR/log/pgpool.log
+ if [ $? = 0 ];then
+ success_count=$(( success_count + 1 ))
+ echo "Fake DB error delivered to master. and master is waiting for consensus"
+ break;
+ fi
+ echo "[check] $i times"
+ sleep 2
+done
+
+# raise an artificial communication error again on the standby for DB node 1
+echo "1 down" > $STANDBY_DIR/log/backend_down_request
+# standby should quarantine the node now
+echo "Checking if the node is quarantined on standby..."
+for i in 1 2 3 4 5 6 7 8 9 10
+do
+ grep -i "changed to quarantine node request" $STANDBY_DIR/log/pgpool.log
+ if [ $? = 0 ];then
+ success_count=$(( success_count + 1 ))
+ echo "Node is quarantined because of second error but no consensus"
+ break;
+ fi
+ echo "[check] $i times"
+ sleep 2
+done
+
+
+# raise an artificial communication on second standby to check if failover is executed this time
+echo "1 down" > $STANDBY2_DIR/log/backend_down_request
+
+#give some time to pgpool-II to execute failover
+sleep 5
+# check to see if all Pgpool-II agrees that the failover request is
+# executed
+echo "Checking if all Pgpool-II agrees that the failover request is executed"
+for i in 1 2 3 4 5 6 7 8 9 10
+do
+ n=0
+ for p in 11000 11100 11200
+ do
+ $PSQL -p $p -c "show pool_nodes" test|grep standby|grep down >/dev/null 2>&1
+ if [ $? = 0 ];then
+ n=$(( n + 1))
+ fi
+ done
+ if [ $n -eq 3 ];then
+ success_count=$(( success_count + 1 ))
+ echo "All Pgpool-II agrees that the failover request is executed"
+ break;
+ fi
+ echo "[check] $i times"
+ sleep 2
+done
+
+
+# we are done. Just stop the standby pgpool-II
+$PGPOOL_INSTALL_DIR/bin/pgpool -f $STANDBY_DIR/etc/pgpool.conf -m f stop
+$PGPOOL_INSTALL_DIR/bin/pgpool -f $STANDBY2_DIR/etc/pgpool.conf -m f stop
+cd master
+./shutdownall
+
+echo "$success_count out of $num_tests successfull";
+
+if test $success_count -eq $num_tests
+then
+ exit 0
+fi
+
+exit 1
--- /dev/null
+# master watchdog
+num_init_children = 4
+use_watchdog = on
+failover_when_quorum_exists = true
+failover_require_consensus = true
+allow_multiple_failover_requests_from_node = false
+wd_interval = 1
+wd_priority = 5
+wd_hostname = 'localhost'
+wd_port = 21004
+wd_heartbeat_port = 21005
+
+heartbeat_destination0 = 'localhost'
+heartbeat_destination_port0 = 21105
+heartbeat_destination1 = 'localhost'
+heartbeat_destination_port1 = 21205
+
+other_pgpool_hostname0 = 'localhost'
+other_pgpool_port0 = 11100
+other_wd_port0 = 21104
+
+other_pgpool_hostname1 = 'localhost'
+other_pgpool_port1 = 11200
+other_wd_port1 = 21204
--- /dev/null
+#!/usr/bin/env bash
+#-------------------------------------------------------------------
+# test script for watchdog
+#
+# Please note that to successfully run the test, "HEALTHCHECK_DEBUG"
+# must be defined before compiling main/health_check.c.
+#
+# test pcp_detach bypass failover_when_quorum_exists and failover_require_consensus
+#
+source $TESTLIBS
+MASTER_DIR=master
+num_tests=2
+success_count=0
+PSQL=$PGBIN/psql
+PG_CTL=$PGBIN/pg_ctl
+
+rm -fr $MASTER_DIR
+rm -fr $STANDBY_DIR
+rm -fr $STANDBY2_DIR
+
+mkdir $MASTER_DIR
+mkdir $STANDBY_DIR
+mkdir $STANDBY2_DIR
+
+
+# dir in master directory
+cd $MASTER_DIR
+
+# create master environment
+echo -n "creating master pgpool and PostgreSQL clusters..."
+$PGPOOL_SETUP -m s -n 2 -p 11000|| exit 1
+echo "master setup done."
+
+
+source ./bashrc.ports
+cat ../master.conf >> etc/pgpool.conf
+
+./startall
+wait_for_pgpool_startup
+
+
+# back to test root dir
+cd ..
+
+
+# First test check if pgpool-II became a master.
+echo "Waiting for the pgpool master..."
+for i in 1 2 3 4 5 6 7 8 9 10
+do
+ grep "I am the cluster leader node" $MASTER_DIR/log/pgpool.log > /dev/null 2>&1
+ if [ $? = 0 ];then
+ success_count=$(( success_count + 1 ))
+ echo "Master brought up successfully."
+ break;
+ fi
+ echo "[check] $i times"
+ sleep 2
+done
+
+#export PCPPASSFILE=/home/usama/work/community/pgpool2/src/test/regression/tests/067.bug231/testdir/pcppass
+$PGPOOL_INSTALL_DIR/bin/pcp_detach_node -w -h localhost -p $PCP_PORT 1 2>&1
+
+$PSQL -p 11000 -c "show pool_nodes" 2>&1
+
+$PSQL -p 11000 -c "show pool_nodes" test|grep standby|grep down >/dev/null 2>&1
+if [ $? = 0 ];then
+ echo "Failover was successfuly executed"
+ success_count=$(( success_count + 1 ))
+fi
+
+cd master
+./shutdownall
+
+echo "$success_count out of $num_tests successfull";
+
+if test $success_count -eq $num_tests
+then
+ exit 0
+fi
+
+exit 1
--- /dev/null
+# master watchdog
+num_init_children = 4
+use_watchdog = on
+failover_when_quorum_exists = true
+failover_require_consensus = true
+allow_multiple_failover_requests_from_node = false
+wd_interval = 1
+wd_priority = 5
+wd_hostname = 'localhost'
+wd_port = 21004
+wd_heartbeat_port = 21005
+
+heartbeat_destination0 = 'localhost'
+heartbeat_destination_port0 = 21105
+heartbeat_destination1 = 'localhost'
+heartbeat_destination_port1 = 21205
+
+other_pgpool_hostname0 = 'localhost'
+other_pgpool_port0 = 11100
+other_wd_port0 = 21104
+
+other_pgpool_hostname1 = 'localhost'
+other_pgpool_port1 = 11200
+other_wd_port1 = 21204
--- /dev/null
+# standby watchdog
+num_init_children = 4
+port = 11100
+pcp_port = 11105
+use_watchdog = on
+failover_when_quorum_exists = true
+failover_require_consensus = true
+allow_multiple_failover_requests_from_node = false
+wd_interval = 1
+wd_priority = 3
+wd_hostname = 'localhost'
+wd_port = 21104
+wd_heartbeat_port = 21105
+heartbeat_destination0 = 'localhost'
+heartbeat_destination_port0 = 21005
+heartbeat_destination1 = 'localhost'
+heartbeat_destination_port1 = 21205
+
+other_pgpool_hostname0 = 'localhost'
+other_pgpool_port0 = 11000
+other_wd_port0 = 21004
+
+other_pgpool_hostname1 = 'localhost'
+other_pgpool_port1 = 11200
+other_wd_port1 = 21204
+
--- /dev/null
+# standby2 watchdog
+num_init_children = 4
+port = 11200
+pcp_port = 11205
+use_watchdog = on
+failover_when_quorum_exists = true
+failover_require_consensus = true
+allow_multiple_failover_requests_from_node = false
+wd_interval = 1
+wd_priority = 1
+wd_hostname = 'localhost'
+wd_port = 21204
+wd_heartbeat_port = 21205
+heartbeat_destination0 = 'localhost'
+heartbeat_destination_port0 = 21005
+heartbeat_destination1 = 'localhost'
+heartbeat_destination_port1 = 21105
+
+other_pgpool_hostname0 = 'localhost'
+other_pgpool_port0 = 11000
+other_wd_port0 = 21004
+
+other_pgpool_hostname1 = 'localhost'
+other_pgpool_port1 = 11100
+other_wd_port1 = 21104
+
--- /dev/null
+#!/usr/bin/env bash
+#-------------------------------------------------------------------
+# test script for watchdog
+#
+# Please note that to successfully run the test, "HEALTHCHECK_DEBUG"
+# must be defined before compiling main/health_check.c.
+#
+# test if master and backend goes down at same time Pgpool-II behaves as expected
+#
+source $TESTLIBS
+MASTER_DIR=master
+STANDBY_DIR=standby
+STANDBY2_DIR=standby2
+num_tests=6
+success_count=0
+PSQL=$PGBIN/psql
+PG_CTL=$PGBIN/pg_ctl
+
+rm -fr $MASTER_DIR
+rm -fr $STANDBY_DIR
+rm -fr $STANDBY2_DIR
+
+mkdir $MASTER_DIR
+mkdir $STANDBY_DIR
+mkdir $STANDBY2_DIR
+
+
+# dir in master directory
+cd $MASTER_DIR
+
+# create master environment
+echo -n "creating master pgpool and PostgreSQL clusters..."
+$PGPOOL_SETUP -m s -n 2 -p 11000|| exit 1
+echo "master setup done."
+
+
+# copy the configurations from master to standby
+cp -r etc ../$STANDBY_DIR/
+
+# copy the configurations from master to standby2
+cp -r etc ../$STANDBY2_DIR/
+
+source ./bashrc.ports
+cat ../master.conf >> etc/pgpool.conf
+
+./startall
+wait_for_pgpool_startup
+
+
+# back to test root dir
+cd ..
+
+
+# create standby environment and start pgpool
+
+mkdir $STANDBY_DIR/log
+echo -n "creating standby pgpool..."
+cat standby.conf >> $STANDBY_DIR/etc/pgpool.conf
+# since we are using the same pgpool-II conf as of master. so change the pid file path in standby pgpool conf
+echo "pid_file_name = '$PWD/pgpool2.pid'" >> $STANDBY_DIR/etc/pgpool.conf
+echo "logdir = $STANDBY_DIR/log" >> $STANDBY_DIR/etc/pgpool.conf
+# start the stnadby pgpool-II by hand
+$PGPOOL_INSTALL_DIR/bin/pgpool -D -n -f $STANDBY_DIR/etc/pgpool.conf -F $STANDBY_DIR/etc/pcp.conf -a $STANDBY_DIR/etc/pool_hba.conf > $STANDBY_DIR/log/pgpool.log 2>&1 &
+
+# create standby2 environment but do not start pgpool
+mkdir $STANDBY2_DIR/log
+echo -n "creating standby2 pgpool..."
+cat standby2.conf >> $STANDBY2_DIR/etc/pgpool.conf
+# since we are using the same pgpool-II conf as of master. so change the pid file path in standby pgpool conf
+echo "pid_file_name = '$PWD/pgpool3.pid'" >> $STANDBY2_DIR/etc/pgpool.conf
+echo "logdir = $STANDBY2_DIR/log" >> $STANDBY2_DIR/etc/pgpool.conf
+# start the stnadby pgpool-II by hand
+$PGPOOL_INSTALL_DIR/bin/pgpool -D -n -f $STANDBY2_DIR/etc/pgpool.conf -F $STANDBY2_DIR/etc/pcp.conf -a $STANDBY2_DIR/etc/pool_hba.conf > $STANDBY2_DIR/log/pgpool.log 2>&1 &
+
+# First test check if both pgpool-II have found their correct place in watchdog cluster.
+echo "Waiting for the pgpool master..."
+for i in 1 2 3 4 5 6 7 8 9 10
+do
+ grep "I am the cluster leader node" $MASTER_DIR/log/pgpool.log > /dev/null 2>&1
+ if [ $? = 0 ];then
+ success_count=$(( success_count + 1 ))
+ echo "Master brought up successfully."
+ break;
+ fi
+ echo "[check] $i times"
+ sleep 2
+done
+
+# now check if standby1 has successfully joined connected to the master.
+echo "Waiting for the standby1 to join cluster..."
+for i in 1 2 3 4 5 6 7 8 9 10
+do
+ grep "successfully joined the watchdog cluster as standby node" $STANDBY_DIR/log/pgpool.log > /dev/null 2>&1
+ if [ $? = 0 ];then
+ success_count=$(( success_count + 1 ))
+ echo "Standby successfully connected."
+ break;
+ fi
+ echo "[check] $i times"
+ sleep 2
+done
+
+# now check if standby2 has successfully joined connected to the master.
+echo "Waiting for the standby2 to join cluster..."
+for i in 1 2 3 4 5 6 7 8 9 10
+do
+ grep "successfully joined the watchdog cluster as standby node" $STANDBY2_DIR/log/pgpool.log > /dev/null 2>&1
+ if [ $? = 0 ];then
+ success_count=$(( success_count + 1 ))
+ echo "Standby2 successfully connected."
+ break;
+ fi
+ echo "[check] $i times"
+ sleep 2
+done
+
+#shutdown master and one PG server by hand
+$PGPOOL_INSTALL_DIR/bin/pgpool -D -n -f $MASTER_DIR/etc/pgpool.conf -m f stop
+$PG_CTL -D $MASTER_DIR/data1 -m f stop
+
+# First test check if both pgpool-II have found their correct place in watchdog cluster.
+echo "Waiting for the standby to become new master..."
+for i in 1 2 3 4 5 6 7 8 9 10
+do
+ grep "I am the cluster leader node" $STANDBY_DIR/log/pgpool.log > /dev/null 2>&1
+ if [ $? = 0 ];then
+ success_count=$(( success_count + 1 ))
+ echo "Standby became new master successfully."
+ break;
+ fi
+ echo "[check] $i times"
+ sleep 2
+done
+#give some time to execute failover
+sleep 5
+
+# First test check if both pgpool-II have found their correct place in watchdog cluster.
+echo "Waiting for the standby to execute failover..."
+for i in 1 2 3 4 5 6 7 8 9 10
+do
+ grep " failover done" $STANDBY_DIR/log/pgpool.log > /dev/null 2>&1
+ if [ $? = 0 ];then
+ success_count=$(( success_count + 1 ))
+ echo "Standby became new master successfully."
+ break;
+ fi
+ echo "[check] $i times"
+ sleep 2
+done
+# check to see if all Pgpool-II agrees that the failover request is
+# executed
+echo "Checking if all Pgpool-II agrees that the failover request was executed"
+for i in 1 2 3 4 5 6 7 8 9 10
+do
+ n=0
+ for p in 11100 11200
+ do
+ $PSQL -p $p -c "show pool_nodes" test|grep standby|grep down >/dev/null 2>&1
+ if [ $? = 0 ];then
+ n=$(( n + 1))
+ fi
+ done
+ if [ $n -eq 2 ];then
+ success_count=$(( success_count + 1 ))
+ echo "All Pgpool-II agrees that the failover request is executed"
+ break;
+ fi
+ echo "[check] $i times"
+ sleep 2
+done
+
+
+# we are done. Just stop the standby pgpool-II
+$PGPOOL_INSTALL_DIR/bin/pgpool -f $STANDBY_DIR/etc/pgpool.conf -m f stop
+$PGPOOL_INSTALL_DIR/bin/pgpool -f $STANDBY2_DIR/etc/pgpool.conf -m f stop
+cd master
+./shutdownall
+
+echo "$success_count out of $num_tests successfull";
+
+if test $success_count -eq $num_tests
+then
+ exit 0
+fi
+
+exit 1
{
case WD_FAILOVER_WAITING_FOR_CONSENSUS:
ereport(LOG,
- (errmsg("remote node \"%s\" is asking to degenerate quarantined backend node",wdNode->nodeName)));
+ (errmsg("remote node \"%s\" is asking to inform about quarantined backend nodes",wdNode->nodeName)));
register_inform_quarantine_nodes_req();
break;
}
else if (WD_MASTER_NODE == wdNode && oldQuorumStatus != wdNode->quorum_status)
{
- /* inform Pgpool man about quorum status changes */
+ /* inform Pgpool main about quorum status changes */
register_watchdog_quorum_change_interupt();
}
}