From 935be3d669a57c2b0ee36fcbbceb9bc138d515e7 Mon Sep 17 00:00:00 2001 From: Ian Barwick Date: Wed, 28 Aug 2019 15:52:14 +0900 Subject: [PATCH] doc: update example PostgreSQL version references to Pg12 --- doc/configuration-file.xml | 2 +- doc/repmgr-node-rejoin.xml | 2 +- doc/repmgr-node-service.xml | 26 ++++++------- doc/repmgrd-configuration.xml | 6 +-- doc/repmgrd-operation.xml | 2 +- doc/repmgrd-overview.xml | 70 +++++++++++++++++------------------ 6 files changed, 54 insertions(+), 54 deletions(-) diff --git a/doc/configuration-file.xml b/doc/configuration-file.xml index a919aa86..3b39b4e6 100644 --- a/doc/configuration-file.xml +++ b/doc/configuration-file.xml @@ -56,7 +56,7 @@ node_id=1 node_name= node1 conninfo ='host=node1 dbname=repmgr user=repmgr connect_timeout=2' -data_directory = /var/lib/pgsql/11/data +data_directory = /var/lib/pgsql/12/data diff --git a/doc/repmgr-node-rejoin.xml b/doc/repmgr-node-rejoin.xml index e5ddf11e..69427a4f 100644 --- a/doc/repmgr-node-rejoin.xml +++ b/doc/repmgr-node-rejoin.xml @@ -356,7 +356,7 @@ repmgr node rejoin attempts to determine whether it will succeed by comparing the timelines and relative WAL positions of the local node (rejoin candidate) and primary (rejoin target). This is particularly important if planning to use pg_rewind, - which currently (as of PostgreSQL 11) may appear to succeed (or indicate there is no action + which currently (as of PostgreSQL 12) may appear to succeed (or indicate there is no action needed) but potentially allow an impossible action, such as trying to rejoin a standby to a primary which is behind the standby. &repmgr; will prevent this situation from occurring. diff --git a/doc/repmgr-node-service.xml b/doc/repmgr-node-service.xml index 86542587..a4eec443 100644 --- a/doc/repmgr-node-service.xml +++ b/doc/repmgr-node-service.xml @@ -114,38 +114,38 @@ See what action would be taken for a restart: -[postgres@node1 ~]$ repmgr -f /etc/repmgr/11/repmgr.conf node service --action=restart --checkpoint --dry-run +[postgres@node1 ~]$ repmgr -f /etc/repmgr/12/repmgr.conf node service --action=restart --checkpoint --dry-run INFO: a CHECKPOINT would be issued here -INFO: would execute server command "sudo service postgresql-11 restart" +INFO: would execute server command "sudo service postgresql-12 restart" Restart the PostgreSQL instance: -[postgres@node1 ~]$ repmgr -f /etc/repmgr/11/repmgr.conf node service --action=restart --checkpoint +[postgres@node1 ~]$ repmgr -f /etc/repmgr/12/repmgr.conf node service --action=restart --checkpoint NOTICE: issuing CHECKPOINT -DETAIL: executing server command "sudo service postgresql-11 restart" -Redirecting to /bin/systemctl restart postgresql-11.service +DETAIL: executing server command "sudo service postgresql-12 restart" +Redirecting to /bin/systemctl restart postgresql-12.service List all commands: -[postgres@node1 ~]$ repmgr -f /etc/repmgr/11/repmgr.conf node service --list-actions +[postgres@node1 ~]$ repmgr -f /etc/repmgr/12/repmgr.conf node service --list-actions Following commands would be executed for each action: - start: "sudo service postgresql-11 start" - stop: "sudo service postgresql-11 stop" - restart: "sudo service postgresql-11 restart" - reload: "sudo service postgresql-11 reload" - promote: "/usr/pgsql-11/bin/pg_ctl -w -D '/var/lib/pgsql/11/data' promote" + start: "sudo service postgresql-12 start" + stop: "sudo service postgresql-12 stop" + restart: "sudo service postgresql-12 restart" + reload: "sudo service postgresql-12 reload" + promote: "/usr/pgsql-12/bin/pg_ctl -w -D '/var/lib/pgsql/12/data' promote" List a single command: -[postgres@node1 ~]$ repmgr -f /etc/repmgr/11/repmgr.conf node service --list-actions --action=promote -/usr/pgsql-11/bin/pg_ctl -w -D '/var/lib/pgsql/11/data' promote +[postgres@node1 ~]$ repmgr -f /etc/repmgr/12/repmgr.conf node service --list-actions --action=promote +/usr/pgsql-12/bin/pg_ctl -w -D '/var/lib/pgsql/12/data' promote diff --git a/doc/repmgrd-configuration.xml b/doc/repmgrd-configuration.xml index dd28650a..e1fe04f8 100644 --- a/doc/repmgrd-configuration.xml +++ b/doc/repmgrd-configuration.xml @@ -536,10 +536,10 @@ - Example (for &repmgr; with PostgreSQL 11 on CentOS 7): + Example (for &repmgr; with PostgreSQL 12 on CentOS 7): -repmgrd_service_start_command='sudo systemctl repmgr11 start' -repmgrd_service_stop_command='sudo systemctl repmgr11 stop' +repmgrd_service_start_command='sudo systemctl repmgr12 start' +repmgrd_service_stop_command='sudo systemctl repmgr12 stop' diff --git a/doc/repmgrd-operation.xml b/doc/repmgrd-operation.xml index c61bb166..8b259dfd 100644 --- a/doc/repmgrd-operation.xml +++ b/doc/repmgrd-operation.xml @@ -47,7 +47,7 @@ - For major PostgreSQL upgrades, e.g. from PostgreSQL 10 to PostgreSQL 11, + For major PostgreSQL upgrades, e.g. from PostgreSQL 11 to PostgreSQL 12, &repmgrd; should be shut down completely and only started up once the &repmgr; packages for the new PostgreSQL major version have been installed. diff --git a/doc/repmgrd-overview.xml b/doc/repmgrd-overview.xml index 53728bc3..970f0594 100644 --- a/doc/repmgrd-overview.xml +++ b/doc/repmgrd-overview.xml @@ -100,11 +100,11 @@ Start &repmgrd; on each standby and verify that it's running by examining the log output, which at log level INFO will look like this: - [2019-03-15 06:32:05] [NOTICE] repmgrd (repmgrd 4.3) starting up - [2019-03-15 06:32:05] [INFO] connecting to database "host=node2 dbname=repmgr user=repmgr connect_timeout=2" - INFO: set_repmgrd_pid(): provided pidfile is /var/run/repmgr/repmgrd-11.pid - [2019-03-15 06:32:05] [NOTICE] starting monitoring of node "node2" (ID: 2) - [2019-03-15 06:32:05] [INFO] monitoring connection to upstream node "node1" (ID: 1) + [2019-08-15 07:14:42] [NOTICE] repmgrd (repmgrd 5.0) starting up + [2019-08-15 07:14:42] [INFO] connecting to database "host=node2 dbname=repmgr user=repmgr connect_timeout=2" + INFO: set_repmgrd_pid(): provided pidfile is /var/run/repmgr/repmgrd-12.pid + [2019-08-15 07:14:42] [NOTICE] starting monitoring of node "node2" (ID: 2) + [2019-08-15 07:14:42] [INFO] monitoring connection to upstream node "node1" (ID: 1) Each &repmgrd; should also have recorded its successful startup as an event: @@ -112,9 +112,9 @@ $ repmgr -f /etc/repmgr.conf cluster event --event=repmgrd_start Node ID | Name | Event | OK | Timestamp | Details ---------+-------+---------------+----+---------------------+-------------------------------------------------------- - 3 | node3 | repmgrd_start | t | 2019-03-14 04:17:30 | monitoring connection to upstream node "node1" (ID: 1) - 2 | node2 | repmgrd_start | t | 2019-03-14 04:11:47 | monitoring connection to upstream node "node1" (ID: 1) - 1 | node1 | repmgrd_start | t | 2019-03-14 04:04:31 | monitoring cluster primary "node1" (ID: 1) + 3 | node3 | repmgrd_start | t | 2019-08-15 07:14:42 | monitoring connection to upstream node "node1" (ID: 1) + 2 | node2 | repmgrd_start | t | 2019-08-15 07:14:41 | monitoring connection to upstream node "node1" (ID: 1) + 1 | node1 | repmgrd_start | t | 2019-08-15 07:14:39 | monitoring cluster primary "node1" (ID: 1) Now stop the current primary server with e.g.: @@ -128,33 +128,33 @@ decision is made. This is an extract from the log of a standby server (node2) which has promoted to new primary after failure of the original primary (node1). - [2019-03-15 06:37:50] [WARNING] unable to connect to upstream node "node1" (ID: 1) - [2019-03-15 06:37:50] [INFO] checking state of node 1, 1 of 3 attempts - [2019-03-15 06:37:50] [INFO] sleeping 5 seconds until next reconnection attempt - [2019-03-15 06:37:55] [INFO] checking state of node 1, 2 of 3 attempts - [2019-03-15 06:37:55] [INFO] sleeping 5 seconds until next reconnection attempt - [2019-03-15 06:38:00] [INFO] checking state of node 1, 3 of 3 attempts - [2019-03-15 06:38:00] [WARNING] unable to reconnect to node 1 after 3 attempts - [2019-03-15 06:38:00] [INFO] primary and this node have the same location ("default") - [2019-03-15 06:38:00] [INFO] local node's last receive lsn: 0/900CBF8 - [2019-03-15 06:38:00] [INFO] node 3 last saw primary node 12 second(s) ago - [2019-03-15 06:38:00] [INFO] last receive LSN for sibling node "node3" (ID: 3) is: 0/900CBF8 - [2019-03-15 06:38:00] [INFO] node "node3" (ID: 3) has same LSN as current candidate "node2" (ID: 2) - [2019-03-15 06:38:00] [INFO] visible nodes: 2; total nodes: 2; no nodes have seen the primary within the last 4 seconds - [2019-03-15 06:38:00] [NOTICE] promotion candidate is "node2" (ID: 2) - [2019-03-15 06:38:00] [NOTICE] this node is the winner, will now promote itself and inform other nodes - [2019-03-15 06:38:00] [INFO] promote_command is: - "/usr/pgsql-11/bin/repmgr -f /etc/repmgr/11/repmgr.conf standby promote" + [2019-08-15 07:27:50] [WARNING] unable to connect to upstream node "node1" (ID: 1) + [2019-08-15 07:27:50] [INFO] checking state of node 1, 1 of 3 attempts + [2019-08-15 07:27:50] [INFO] sleeping 5 seconds until next reconnection attempt + [2019-08-15 07:27:55] [INFO] checking state of node 1, 2 of 3 attempts + [2019-08-15 07:27:55] [INFO] sleeping 5 seconds until next reconnection attempt + [2019-08-15 07:28:00] [INFO] checking state of node 1, 3 of 3 attempts + [2019-08-15 07:28:00] [WARNING] unable to reconnect to node 1 after 3 attempts + [2019-08-15 07:28:00] [INFO] primary and this node have the same location ("default") + [2019-08-15 07:28:00] [INFO] local node's last receive lsn: 0/900CBF8 + [2019-08-15 07:28:00] [INFO] node 3 last saw primary node 12 second(s) ago + [2019-08-15 07:28:00] [INFO] last receive LSN for sibling node "node3" (ID: 3) is: 0/900CBF8 + [2019-08-15 07:28:00] [INFO] node "node3" (ID: 3) has same LSN as current candidate "node2" (ID: 2) + [2019-08-15 07:28:00] [INFO] visible nodes: 2; total nodes: 2; no nodes have seen the primary within the last 4 seconds + [2019-08-15 07:28:00] [NOTICE] promotion candidate is "node2" (ID: 2) + [2019-08-15 07:28:00] [NOTICE] this node is the winner, will now promote itself and inform other nodes + [2019-08-15 07:28:00] [INFO] promote_command is: + "/usr/pgsql-12/bin/repmgr -f /etc/repmgr/12/repmgr.conf standby promote" NOTICE: promoting standby to primary - DETAIL: promoting server "node2" (ID: 2) using "/usr/pgsql-11/bin/pg_ctl -w -D '/var/lib/pgsql/11/data' promote" + DETAIL: promoting server "node2" (ID: 2) using "/usr/pgsql-12/bin/pg_ctl -w -D '/var/lib/pgsql/12/data' promote" NOTICE: waiting up to 60 seconds (parameter "promote_check_timeout") for promotion to complete NOTICE: STANDBY PROMOTE successful DETAIL: server "node2" (ID: 2) was successfully promoted to primary - [2019-03-15 06:38:01] [INFO] 3 followers to notify - [2019-03-15 06:38:01] [NOTICE] notifying node "node3" (ID: 3) to follow node 2 + [2019-08-15 07:28:01] [INFO] 3 followers to notify + [2019-08-15 07:28:01] [NOTICE] notifying node "node3" (ID: 3) to follow node 2 INFO: node 3 received notification to follow node 2 - [2019-03-15 06:38:01] [INFO] switching to primary monitoring mode - [2019-03-15 06:38:01] [NOTICE] monitoring cluster primary "node2" (ID: 2) + [2019-08-15 07:28:01] [INFO] switching to primary monitoring mode + [2019-08-15 07:28:01] [NOTICE] monitoring cluster primary "node2" (ID: 2) The cluster status will now look like this, with the original primary (node1) @@ -176,11 +176,11 @@ $ repmgr -f /etc/repmgr.conf cluster event Node ID | Name | Event | OK | Timestamp | Details ---------+-------+----------------------------+----+---------------------+------------------------------------------------------------- - 3 | node3 | repmgrd_failover_follow | t | 2019-03-15 06:38:03 | node 3 now following new upstream node 2 - 3 | node3 | standby_follow | t | 2019-03-15 06:38:02 | standby attached to upstream node "node2" (ID: 2) - 2 | node2 | repmgrd_reload | t | 2019-03-15 06:38:01 | monitoring cluster primary "node2" (ID: 2) - 2 | node2 | repmgrd_failover_promote | t | 2019-03-15 06:38:01 | node 2 promoted to primary; old primary 1 marked as failed - 2 | node2 | standby_promote | t | 2019-03-15 06:38:01 | server "node2" (ID: 2) was successfully promoted to primary + 3 | node3 | repmgrd_failover_follow | t | 2019-08-15 07:38:03 | node 3 now following new upstream node 2 + 3 | node3 | standby_follow | t | 2019-08-15 07:38:02 | standby attached to upstream node "node2" (ID: 2) + 2 | node2 | repmgrd_reload | t | 2019-08-15 07:38:01 | monitoring cluster primary "node2" (ID: 2) + 2 | node2 | repmgrd_failover_promote | t | 2019-08-15 07:38:01 | node 2 promoted to primary; old primary 1 marked as failed + 2 | node2 | standby_promote | t | 2019-08-15 07:38:01 | server "node2" (ID: 2) was successfully promoted to primary