diff --git a/.gitignore b/.gitignore
index 732cd41a..b6109a38 100644
--- a/.gitignore
+++ b/.gitignore
@@ -40,6 +40,8 @@ lib*.pc
# test output
/results/
+/doc/Makefile
+
# other
/.lineno
*.dSYM
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index ce1eb340..4faeb503 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -28,4 +28,3 @@ project. For more details see:
Contributors should reformat their code similarly before submitting code to
the project, in order to minimize merge conflicts with other work.
->>>>>>> Add further documentation files
diff --git a/Makefile.in b/Makefile.in
index 4c22a86f..f5aeb7ce 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -63,6 +63,12 @@ Makefile: Makefile.in config.status configure
Makefile.global: Makefile.global.in config.status configure
./config.status $@
+doc:
+ $(MAKE) -C doc all
+
+install-doc:
+ $(MAKE) -C doc install
+
clean: additional-clean
maintainer-clean: additional-maintainer-clean
diff --git a/README.md b/README.md
index 16ecabcb..1403da9d 100644
--- a/README.md
+++ b/README.md
@@ -176,6 +176,12 @@ is not required, but is necessary in the following cases:
### Packages
+* * *
+
+> *NOTE*: packages are currently being prepared for the repmgr 4.0beta release.
+
+* * *
+
We recommend installing `repmgr` using the available packages for your
system.
@@ -985,10 +991,10 @@ active primary, the previous warning will not be displayed:
$ repmgr -f /etc/repmgr.conf cluster show
ID | Name | Role | Status | Upstream | Location | Connection string
- ----+-------+---------+-----------+----------+----------+----------------------------------------------------
- 1 | node1 | primary | - failed | | default | host=node1 dbname=repmgr user=repmgr port=5501
- 2 | node2 | primary | * running | | default | host=node2 dbname=repmgr user=repmgr port=5502
- 3 | node3 | standby | running | node1 | default | host=node3 dbname=repmgr user=repmgr port=5503
+ ----+-------+---------+-----------+----------+----------+--------------------------------------
+ 1 | node1 | primary | - failed | | default | host=node1 dbname=repmgr user=repmgr
+ 2 | node2 | primary | * running | | default | host=node2 dbname=repmgr user=repmgr
+ 3 | node3 | standby | running | node1 | default | host=node3 dbname=repmgr user=repmgr
However the sole remaining standby is still trying to replicate from the failed
primary; `repmgr standby follow` must now be executed to rectify this situation.
diff --git a/configure b/configure
index 7f679cad..f56ada50 100755
--- a/configure
+++ b/configure
@@ -1871,6 +1871,8 @@ ac_config_files="$ac_config_files Makefile"
ac_config_files="$ac_config_files Makefile.global"
+ac_config_files="$ac_config_files doc/Makefile"
+
cat >confcache <<\_ACEOF
# This file is a shell script that caches the results of configure
# tests run on this system so they can be shared between configure
@@ -2564,6 +2566,7 @@ do
"config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;;
"Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
"Makefile.global") CONFIG_FILES="$CONFIG_FILES Makefile.global" ;;
+ "doc/Makefile") CONFIG_FILES="$CONFIG_FILES doc/Makefile" ;;
*) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
esac
diff --git a/configure.in b/configure.in
index ee3448f7..8a99cb8a 100644
--- a/configure.in
+++ b/configure.in
@@ -65,5 +65,6 @@ AC_SUBST(vpath_build)
AC_CONFIG_FILES([Makefile])
AC_CONFIG_FILES([Makefile.global])
+AC_CONFIG_FILES([doc/Makefile])
AC_OUTPUT
diff --git a/doc/.gitignore b/doc/.gitignore
new file mode 100644
index 00000000..d33f6032
--- /dev/null
+++ b/doc/.gitignore
@@ -0,0 +1,5 @@
+HTML.index
+bookindex.sgml
+html-stamp
+html/
+version.sgml
diff --git a/doc/Makefile.in b/doc/Makefile.in
new file mode 100644
index 00000000..0a824f37
--- /dev/null
+++ b/doc/Makefile.in
@@ -0,0 +1,71 @@
+repmgr_subdir = doc
+repmgr_top_builddir = ..
+include $(repmgr_top_builddir)/Makefile.global
+
+ifndef JADE
+JADE = $(missing) jade
+endif
+
+SGMLINCLUDE = -D . -D ${srcdir}
+
+SPFLAGS += -wall -wno-unused-param -wno-empty -wfully-tagged
+
+JADE.html.call = $(JADE) $(JADEFLAGS) $(SPFLAGS) $(SGMLINCLUDE) $(CATALOG) -d stylesheet.dsl -t sgml -i output-html
+
+ALLSGML := $(wildcard $(srcdir)/*.sgml)
+# to build bookindex
+ALMOSTALLSGML := $(filter-out %bookindex.sgml,$(ALLSGML))
+GENERATED_SGML = version.sgml bookindex.sgml
+
+Makefile: Makefile.in
+ cd $(repmgr_top_builddir) && ./config.status doc/Makefile
+
+all: html
+
+html: html-stamp
+
+html-stamp: repmgr.sgml $(ALLSGML) $(GENERATED_SGML) stylesheet.dsl website-docs.css
+ $(MKDIR_P) html
+ $(JADE.html.call) -i include-index $<
+ cp $(srcdir)/stylesheet.css $(srcdir)/website-docs.css html/
+ touch $@
+
+version.sgml: ${repmgr_top_builddir}/repmgr_version.h
+ { \
+ echo ""; \
+ } > $@
+
+HTML.index: repmgr.sgml $(ALMOSTALLSGML) stylesheet.dsl
+ @$(MKDIR_P) html
+ $(JADE.html.call) -V html-index $<
+
+website-docs.css:
+ @$(MKDIR_P) html
+ curl http://www.postgresql.org/media/css/docs.css > ${srcdir}/website-docs.css
+
+bookindex.sgml: HTML.index
+ifdef COLLATEINDEX
+ LC_ALL=C $(PERL) $(COLLATEINDEX) -f -g -i 'bookindex' -o $@ $<
+else
+ @$(missing) collateindex.pl $< $@
+endif
+
+clean:
+ rm -f html-stamp
+ rm -f HTML.index $(GENERATED_SGML)
+
+maintainer-clean:
+ rm -rf html
+ rm -rf Makefile
+
+zip: html
+ cp -r html repmgr-docs-$(REPMGR_VERSION)
+ zip -r repmgr-docs-$(REPMGR_VERSION).zip repmgr-docs-$(REPMGR_VERSION)
+ rm -rf repmgr-docs-$(REPMGR_VERSION)
+
+install: html
+ @$(MKDIR_P) $(DESTDIR)$(docdir)/$(docmoduledir)/repmgr
+ @$(INSTALL_DATA) $(wildcard html/*.html) $(wildcard html/*.css) $(DESTDIR)$(docdir)/$(docmoduledir)/repmgr
+ @echo Installed docs to $(DESTDIR)$(docdir)/$(docmoduledir)/repmgr
+
+.PHONY: html all
diff --git a/doc/appendix-signatures.sgml b/doc/appendix-signatures.sgml
new file mode 100644
index 00000000..30a8ef1b
--- /dev/null
+++ b/doc/appendix-signatures.sgml
@@ -0,0 +1,5 @@
+
+ Verifying digital signatures
+
+ WIP
+
diff --git a/doc/cloning-standbys.sgml b/doc/cloning-standbys.sgml
new file mode 100644
index 00000000..2c1fe095
--- /dev/null
+++ b/doc/cloning-standbys.sgml
@@ -0,0 +1,403 @@
+
+ Cloning standbys
+
+
+
+
+
+ cloning
+ from Barman
+
+ Cloning a standby from Barman
+
+ can use
+ 2ndQuadrant's
+ Barman application
+ to clone a standby (and also as a fallback source for WAL files).
+
+
+
+ Barman (aka PgBarman) should be considered as an integral part of any
+ PostgreSQL replication cluster. For more details see:
+ https://www.pgbarman.org/.
+
+
+
+ Barman support provides the following advantages:
+
+
+
+ the primary node does not need to perform a new backup every time a
+ new standby is cloned
+
+
+
+
+ a standby node can be disconnected for longer periods without losing
+ the ability to catch up, and without causing accumulation of WAL
+ files on the primary node
+
+
+
+
+ WAL management on the primary becomes much easier as there's no need
+ to use replication slots, and wal_keep_segments
+ does not need to be set.
+
+
+
+
+
+
+ Prerequisites for cloning from Barman
+
+ In order to enable Barman support for repmgr standby clone, following
+ prerequisites must be met:
+
+
+
+ the barman_server setting in repmgr.conf is the same as the
+ server configured in Barman;
+
+
+
+
+ the barman_host setting in repmgr.conf is set to the SSH
+ hostname of the Barman server;
+
+
+
+
+ the restore_command setting in repmgr.conf is configured to
+ use a copy of the barman-wal-restore script shipped with the
+ barman-cli package (see below);
+
+
+
+
+ the Barman catalogue includes at least one valid backup for this server.
+
+
+
+
+
+
+ Barman support is automatically enabled if barman_server
+ is set. Normally it is good practice to use Barman, for instance
+ when fetching a base backup while cloning a standby; in any case,
+ Barman mode can be disabled using the --without-barman
+ command line option.
+
+
+
+
+ If you have a non-default SSH configuration on the Barman
+ server, e.g. using a port other than 22, then you can set those
+ parameters in a dedicated Host section in ~/.ssh/config
+ corresponding to the value ofbarman_host in
+ repmgr.conf. See the Host
+ section in man 5 ssh_config for more details.
+
+
+
+ It's now possible to clone a standby from Barman, e.g.:
+
+ NOTICE: using configuration file "/etc/repmgr.conf"
+ NOTICE: destination directory "/var/lib/postgresql/data" provided
+ INFO: connecting to Barman server to verify backup for test_cluster
+ INFO: checking and correcting permissions on existing directory "/var/lib/postgresql/data"
+ INFO: creating directory "/var/lib/postgresql/data/repmgr"...
+ INFO: connecting to Barman server to fetch server parameters
+ INFO: connecting to upstream node
+ INFO: connected to source node, checking its state
+ INFO: successfully connected to source node
+ DETAIL: current installation size is 29 MB
+ NOTICE: retrieving backup from Barman...
+ receiving file list ...
+ (...)
+ NOTICE: standby clone (from Barman) complete
+ NOTICE: you can now start your PostgreSQL server
+ HINT: for example: pg_ctl -D /var/lib/postgresql/data start
+
+
+
+
+ Using Barman as a WAL file source
+
+ As a fallback in case streaming replication is interrupted, PostgreSQL can optionally
+ retrieve WAL files from an archive, such as that provided by Barman. This is done by
+ setting restore_command in recovery.conf to
+ a valid shell command which can retrieve a specified WAL file from the archive.
+
+
+ barman-wal-restore is a Python script provided as part of the barman-cli
+ package (Barman 2.0 and later; for Barman 1.x the script is provided separately as
+ barman-wal-restore.py) which performs this function for Barman.
+
+
+ To use barman-wal-restore with &repmgr;
+ and assuming Barman is located on the barmansrv host
+ and that barman-wal-restore is located as an executable at
+ /usr/bin/barman-wal-restore,
+ repmgr.conf should include the following lines:
+
+ barman_host=barmansrv
+ barman_server=somedb
+ restore_command=/usr/bin/barman-wal-restore barmansrv somedb %f %p
+
+
+
+ barman-wal-restore supports command line switches to
+ control parallelism (--parallel=N) and compression (
+ --bzip2, --gzip).
+
+
+
+
+ To use a non-default Barman configuration file on the Barman server,
+ specify this in repmgr.conf with barman_config:
+
+ barman_config=/path/to/barman.conf
+
+
+
+
+
+
+
+ cloning
+ replication slots
+
+
+
+ replication slots
+ cloning
+
+ Cloning and replication slots
+
+ Replication slots were introduced with PostgreSQL 9.4 and are designed to ensure
+ that any standby connected to the primary using a replication slot will always
+ be able to retrieve the required WAL files. This removes the need to manually
+ manage WAL file retention by estimating the number of WAL files that need to
+ be maintained on the primary using wal_keep_segments.
+ Do however be aware that if a standby is disconnected, WAL will continue to
+ accumulate on the primary until either the standby reconnects or the replication
+ slot is dropped.
+
+
+ To enable &repmgr; to use replication slots, set the boolean parameter
+ use_replication_slots in repmgr.conf:
+
+ use_replication_slots=true
+
+
+
+ Replication slots must be enabled in postgresql.conf` by
+ setting the parameter max_replication_slots to at least the
+ number of expected standbys (changes to this parameter require a server restart).
+
+
+ When cloning a standby, &repmgr; will automatically generate an appropriate
+ slot name, which is stored in the repmgr.nodes table, and create the slot
+ on the upstream node:
+
+ repmgr=# SELECT node_id, upstream_node_id, active, node_name, type, priority, slot_name
+ FROM repmgr.nodes ORDER BY node_id;
+ node_id | upstream_node_id | active | node_name | type | priority | slot_name
+ ---------+------------------+--------+-----------+---------+----------+---------------
+ 1 | | t | node1 | primary | 100 | repmgr_slot_1
+ 2 | 1 | t | node2 | standby | 100 | repmgr_slot_2
+ 3 | 1 | t | node3 | standby | 100 | repmgr_slot_3
+ (3 rows)
+
+
+ repmgr=# SELECT slot_name, slot_type, active, active_pid FROM pg_replication_slots ;
+ slot_name | slot_type | active | active_pid
+ ---------------+-----------+--------+------------
+ repmgr_slot_2 | physical | t | 23658
+ repmgr_slot_3 | physical | t | 23687
+ (2 rows)
+
+
+ Note that a slot name will be created by default for the primary but not
+ actually used unless the primary is converted to a standby using e.g.
+ repmgr standby switchover.
+
+
+ Further information on replication slots in the PostgreSQL documentation:
+ https://www.postgresql.org/docs/current/interactive/warm-standby.html#STREAMING-REPLICATION-SLOTS
+
+
+
+ While replication slots can be useful for streaming replication, it's
+ recommended to monitor for inactive slots as these will cause WAL files to
+ build up indefinitely, possibly leading to server failure.
+
+
+ As an alternative we recommend using 2ndQuadrant's Barman,
+ which offloads WAL management to a separate server, negating the need to use replication
+ slots to reserve WAL. See section
+ for more details on using &repmgr; together with Barman.
+
+
+
+
+
+
+ cloning
+ cascading replication
+
+ Cloning and cascading replication
+
+ Cascading replication, introduced with PostgreSQL 9.2, enables a standby server
+ to replicate from another standby server rather than directly from the primary,
+ meaning replication changes "cascade" down through a hierarchy of servers. This
+ can be used to reduce load on the primary and minimize bandwith usage between
+ sites. For more details, see the
+
+ PostgreSQL cascading replication documentation.
+
+
+ &repmgr; supports cascading replication. When cloning a standby,
+ set the command-line parameter --upstream-node-id to the
+ node_id of the server the standby should connect to, and
+ &repmgr; will create recovery.conf to point to it. Note
+ that if --upstream-node-id is not explicitly provided,
+ &repmgr; will set the standby's recovery.conf to
+ point to the primary node.
+
+
+ To demonstrate cascading replication, first ensure you have a primary and standby
+ set up as shown in the .
+ Then create an additional standby server with repmgr.conf looking
+ like this:
+
+ node_id=3
+ node_name=node3
+ conninfo='host=node3 user=repmgr dbname=repmgr'
+ data_directory='/var/lib/postgresql/data'
+
+
+ Clone this standby (using the connection parameters for the existing standby),
+ ensuring --upstream-node-id is provide with the node_id
+ of the previously created standby (if following the example, this will be 2):
+
+ $ repmgr -h node2 -U repmgr -d repmgr -f /etc/repmgr.conf standby clone --upstream-node-id=2
+ NOTICE: using configuration file "/etc/repmgr.conf"
+ NOTICE: destination directory "/var/lib/postgresql/data" provided
+ INFO: connecting to upstream node
+ INFO: connected to source node, checking its state
+ NOTICE: checking for available walsenders on upstream node (2 required)
+ INFO: sufficient walsenders available on upstream node (2 required)
+ INFO: successfully connected to source node
+ DETAIL: current installation size is 29 MB
+ INFO: creating directory "/var/lib/postgresql/data"...
+ NOTICE: starting backup (using pg_basebackup)...
+ HINT: this may take some time; consider using the -c/--fast-checkpoint option
+ INFO: executing: 'pg_basebackup -l "repmgr base backup" -D /var/lib/postgresql/data -h node2 -U repmgr -X stream '
+ NOTICE: standby clone (using pg_basebackup) complete
+ NOTICE: you can now start your PostgreSQL server
+ HINT: for example: pg_ctl -D /var/lib/postgresql/data start
+
+ then register it (note that --upstream-node-id must be provided here
+ too):
+
+ $ repmgr -f /etc/repmgr.conf standby register --upstream-node-id=2
+ NOTICE: standby node "node2" (ID: 2) successfully registered
+
+
+
+ After starting the standby, the cluster will look like this, showing that node3
+ is attached to node2, not the primary (node1).
+
+ $ repmgr -f /etc/repmgr.conf cluster show
+ ID | Name | Role | Status | Upstream | Location | Connection string
+ ----+-------+---------+-----------+----------+----------+--------------------------------------
+ 1 | node1 | primary | * running | | default | host=node1 dbname=repmgr user=repmgr
+ 2 | node2 | standby | running | node1 | default | host=node2 dbname=repmgr user=repmgr
+ 3 | node3 | standby | running | node2 | default | host=node3 dbname=repmgr user=repmgr
+
+
+
+
+ Under some circumstances when setting up a cascading replication
+ cluster, you may wish to clone a downstream standby whose upstream node
+ does not yet exist. In this case you can clone from the primary (or
+ another upstream node); provide the parameter --upstream-conninfo
+ to explictly set the upstream's primary_conninfo string
+ in recovery.conf.
+
+
+
+
+
+
+ cloning
+ advanced options
+
+ Advanced cloning options
+
+
+ pg_basebackup options when cloning a standby
+
+ By default, pg_basebackup performs a checkpoint before beginning the backup
+ process. However, a normal checkpoint may take some time to complete;
+ a fast checkpoint can be forced with the -c/--fast-checkpoint option.
+ However this may impact performance of the server being cloned from
+ so should be used with care.
+
+
+ Further options can be passed to the pg_basebackup utility via
+ the setting pg_basebackup_options in repmgr.conf.
+ See the PostgreSQL pg_basebackup documentation
+ for more details of available options.
+
+
+
+
+ Managing passwords
+
+ If replication connections to a standby's upstream server are password-protected,
+ the standby must be able to provide the password so it can begin streaming
+ replication.
+
+
+ The recommended way to do this is to store the password in the postgres system
+ user's ~/.pgpass file. It's also possible to store the password in the
+ environment variable PGPASSWORD, however this is not recommended for
+ security reasons. For more details see the
+ PostgreSQL password file documentation.
+
+
+ If, for whatever reason, you wish to include the password in recovery.conf,
+ set use_primary_conninfo_password to true in
+ repmgr.conf. This will read a password set in PGPASSWORD
+ (but not ~/.pgpass) and place it into the primary_conninfo
+ string in recovery.conf. Note that PGPASSWORD
+ will need to be set during any action which causes recovery.conf to be
+ rewritten, e.g. .
+
+
+ It is of course also possible to include the password value in the conninfo
+ string for each node, but this is obviously a security risk and should be
+ avoided.
+
+
+
+
+ Separate replication user
+
+ In some circumstances it might be desirable to create a dedicated replication-only
+ user (in addition to the user who manages the &repmgr; metadata). In this case,
+ the replication user should be set in repmgr.conf via the parameter
+ replication_user; &repmgr; will use this value when making
+ replication connections and generating recovery.conf. This
+ value will also be stored in the repmgr.nodes
+ table for each node; it no longer needs to be explicitly specified when
+ cloning a node or executing .
+
+
+
+
+
+
diff --git a/doc/configuration-file-settings.sgml b/doc/configuration-file-settings.sgml
new file mode 100644
index 00000000..7d868dde
--- /dev/null
+++ b/doc/configuration-file-settings.sgml
@@ -0,0 +1,114 @@
+
+ Configuration file settings
+
+ Each repmgr.conf file must contain the following parameters:
+
+
+
+
+ node_id (int)
+
+ node_id configuration file parameter
+
+
+
+
+ A unique integer greater than zero which identifies the node.
+
+
+
+
+
+ node_name (string)
+
+ node_name configuration file parameter
+
+
+
+
+ An arbitrary (but unique) string; we recommend using the server's hostname
+ or another identifier unambiguously associated with the server to avoid
+ confusion. Avoid choosing names which reflect the node's current role,
+ e.g. primary or standby1
+ as roles can change and if you end up in a solution where the current primary is
+ called standby1 (for example), things will be confusing
+ to say the least.
+
+
+
+
+
+ conninfo (string)
+
+ conninfo configuration file parameter
+
+
+
+
+ Database connection information as a conninfo string.
+ All servers in the cluster must be able to connect to
+ the local node using this string.
+
+
+ For details on conninfo strings, see section Connection Strings>
+ in the PosgreSQL documentation.
+
+
+ If repmgrd is in use, consider explicitly setting
+ connect_timeout in the conninfo
+ string to determine the length of time which elapses before a network
+ connection attempt is abandoned; for details see
+ the PostgreSQL documentation>.
+
+
+
+
+
+ data_directory (string)
+
+ data_directory configuration file parameter
+
+
+
+
+ The node's data directory. This is needed by repmgr
+ when performing operations when the PostgreSQL instance
+ is not running and there's no other way of determining
+ the data directory.
+
+
+
+
+
+
+
+
+
+ For a full list of annotated configuration items, see the file
+ repmgr.conf.sample>.
+
+
+
+
+ The following parameters in the configuration file can be overridden with
+ command line options:
+
+
+
+ -L/--log-level overrides log_level in
+ repmgr.conf
+
+
+
+
+ -b/--pg_bindir overrides pg_bindir in
+ repmgr.conf
+
+
+
+
+
+
+
diff --git a/doc/configuration-file.sgml b/doc/configuration-file.sgml
new file mode 100644
index 00000000..5b87dbcc
--- /dev/null
+++ b/doc/configuration-file.sgml
@@ -0,0 +1,46 @@
+
+ Configuration file location
+
+ repmgr and repmgrd
+ use a common configuration file, by default called
+ repmgr.conf (although any name can be used if explicitly specified).
+ repmgr.conf must contain a number of required parameters, including
+ the database connection string for the local node and the location
+ of its data directory; other values will be inferred from defaults if
+ not explicitly supplied. See section `configuration file parameters`
+ for more details.
+
+
+
+ The configuration file will be searched for in the following locations:
+
+
+ a configuration file specified by the `-f/--config-file` command line option
+
+
+
+ a location specified by the package maintainer (if repmgr
+ as installed from a package and the package maintainer has specified the configuration
+ file location)
+
+
+
+ repmgr.conf in the local directory
+
+
+ /etc/repmgr.conf
+
+
+ the directory reported by pg_config --sysconfdir
+
+
+
+
+
+ Note that if a file is explicitly specified with -f/--config-file,
+ an error will be raised if it is not found or not readable and no attempt will be made to
+ check default locations; this is to prevent repmgr unexpectedly
+ reading the wrong file.
+
+
+
diff --git a/doc/configuration.sgml b/doc/configuration.sgml
new file mode 100644
index 00000000..5ee77f07
--- /dev/null
+++ b/doc/configuration.sgml
@@ -0,0 +1,7 @@
+
+ repmgr configuration
+
+ &configuration-file;
+ &configuration-file-settings;
+
+
diff --git a/doc/event-notifications.sgml b/doc/event-notifications.sgml
new file mode 100644
index 00000000..46b327d3
--- /dev/null
+++ b/doc/event-notifications.sgml
@@ -0,0 +1,181 @@
+
+ Event Notifications
+
+ Each time `repmgr` or `repmgrd` perform a significant event, a record
+ of that event is written into the `repmgr.events` table together with
+ a timestamp, an indication of failure or success, and further details
+ if appropriate. This is useful for gaining an overview of events
+ affecting the replication cluster. However note that this table has
+ advisory character and should be used in combination with the `repmgr`
+ and PostgreSQL logs to obtain details of any events.
+
+
+ Example output after a primary was registered and a standby cloned
+ and registered:
+
+ repmgr=# SELECT * from repmgr.events ;
+ node_id | event | successful | event_timestamp | details
+ ---------+------------------+------------+-------------------------------+-------------------------------------------------------------------------------------
+ 1 | primary_register | t | 2016-01-08 15:04:39.781733+09 |
+ 2 | standby_clone | t | 2016-01-08 15:04:49.530001+09 | Cloned from host 'repmgr_node1', port 5432; backup method: pg_basebackup; --force: N
+ 2 | standby_register | t | 2016-01-08 15:04:50.621292+09 |
+ (3 rows)
+
+
+ Alternatively, use to output a
+ formatted list of events.
+
+
+ Additionally, event notifications can be passed to a user-defined program
+ or script which can take further action, e.g. send email notifications.
+ This is done by setting the `event_notification_command` parameter in
+ `repmgr.conf`.
+
+
+ This parameter accepts the following format placeholders:
+
+
+
+
+
+
+
+ node ID
+
+
+
+
+
+
+
+
+ event type
+
+
+
+
+
+
+
+
+ success (1 or 0)
+
+
+
+
+
+
+
+ timestamp
+
+
+
+
+
+
+
+
+ details
+
+
+
+
+
+ The values provided for %t and %d
+ will probably contain spaces, so should be quoted in the provided command
+ configuration, e.g.:
+
+ event_notification_command='/path/to/some/script %n %e %s "%t" "%d"'
+
+
+
+ Additionally the following format placeholders are available for the event
+ type bdr_failover and optionally bdr_recovery:
+
+
+
+
+
+
+ conninfo string of the next available node
+
+
+
+
+
+
+
+ name of the next available node
+
+
+
+
+
+ These should always be quoted.
+
+
+ By default, all notification types will be passed to the designated script;
+ the notification types can be filtered to explicitly named ones:
+
+
+
+ primary_register
+
+
+ standby_register
+
+
+ standby_unregister
+
+
+ standby_clone
+
+
+ standby_promote
+
+
+ standby_follow
+
+
+ standby_disconnect_manual
+
+
+ repmgrd_start
+
+
+ repmgrd_shutdown
+
+
+ repmgrd_failover_promote
+
+
+ repmgrd_failover_follow
+
+
+ bdr_failover
+
+
+ bdr_reconnect
+
+
+ bdr_recovery
+
+
+ bdr_register
+
+
+ bdr_unregister
+
+
+
+
+
+ Note that under some circumstances (e.g. when no replication cluster primary
+ could be located), it will not be possible to write an entry into the
+ repmgr.events
+ table, in which case executing a script via event_notification_command
+ can serve as a fallback by generating some form of notification.
+
+
+
+
diff --git a/doc/filelist.sgml b/doc/filelist.sgml
new file mode 100644
index 00000000..29fe40d3
--- /dev/null
+++ b/doc/filelist.sgml
@@ -0,0 +1,77 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/follow-new-primary.sgml b/doc/follow-new-primary.sgml
new file mode 100644
index 00000000..d1e8f65f
--- /dev/null
+++ b/doc/follow-new-primary.sgml
@@ -0,0 +1,42 @@
+
+ Following a new primary
+
+ Following the failure or removal of the replication cluster's existing primary
+ server, can be used to make 'orphaned' standbys
+ follow the new primary and catch up to its current state.
+
+
+ To demonstrate this, assuming a replication cluster in the same state as the
+ end of the preceding section (),
+ execute this:
+
+ $ repmgr -f /etc/repmgr.conf repmgr standby follow
+ INFO: changing node 3's primary to node 2
+ NOTICE: restarting server using "pg_ctl -l /var/log/postgresql/startup.log -w -D '/var/lib/postgresql/data' restart"
+ waiting for server to shut down......... done
+ server stopped
+ waiting for server to start.... done
+ server started
+ NOTICE: STANDBY FOLLOW successful
+ DETAIL: node 3 is now attached to node 2
+
+
+
+ The standby is now replicating from the new primary and `repmgr cluster show`
+ output reflects this:
+
+ $ repmgr -f /etc/repmgr.conf cluster show
+ ID | Name | Role | Status | Upstream | Location | Connection string
+ ----+-------+---------+-----------+----------+----------+--------------------------------------
+ 1 | node1 | primary | - failed | | default | host=node1 dbname=repmgr user=repmgr
+ 2 | node2 | primary | * running | | default | host=node2 dbname=repmgr user=repmgr
+ 3 | node3 | standby | running | node2 | default | host=node3 dbname=repmgr user=repmgr
+
+
+ Note that with cascading replication, repmgr standby follow can also be
+ used to detach a standby from its current upstream server and follow the
+ primary. However it's currently not possible to have it follow another standby;
+ we hope to improve this in a future release.
+
+
+
diff --git a/doc/install-packages.sgml b/doc/install-packages.sgml
new file mode 100644
index 00000000..727a5009
--- /dev/null
+++ b/doc/install-packages.sgml
@@ -0,0 +1,32 @@
+
+ Installing &repmgr; from packages
+
+We recommend installing `repmgr` using the available packages for your
+system.
+
+
+
+ RedHat/Fedora/CentOS
+
+ RPM packages for `repmgr` are available via Yum through
+ the PostgreSQL Global Development Group RPM repository
+ ( http://yum.postgresql.org/>).
+ Follow the instructions for your distribution (RedHat, CentOS,
+ Fedora, etc.) and architecture as detailed at yum.postgresql.org.
+
+
+ 2ndQuadrant also provides its own RPM packages which are made available
+ at the same time as each `repmgr` release, as it can take some days for
+ them to become available via the main PGDG repository. See here for details:
+ http://repmgr.org/yum-repository.html>
+
+
+
+ Debian/Ubuntu
+ .deb packages for `repmgr` are available from the
+ PostgreSQL Community APT repository (http://apt.postgresql.org/> ).
+ Instructions can be found in the APT section of the PostgreSQL Wiki
+ (https://wiki.postgresql.org/wiki/Apt> ).
+
+
+
diff --git a/doc/install-requirements.sgml b/doc/install-requirements.sgml
new file mode 100644
index 00000000..d9cd6191
--- /dev/null
+++ b/doc/install-requirements.sgml
@@ -0,0 +1,67 @@
+
+ Requirements for installing repmgr
+
+ repmgr is developed and tested on Linux and OS X, but should work on any
+ UNIX-like system supported by PostgreSQL itself. There is no support for
+ Microsoft Windows.
+
+
+
+ From version 4.0, repmgr is compatible with all PostgreSQL versions from 9.4, including PostgreSQL 10.
+
+
+ PostgreSQL 9.3 is supported by repmgr 3.3.
+
+
+
+
+If upgrading from `repmgr 3`, please see the separate upgrade guide
+`doc/upgrading-from-repmgr3.md`.
+
+
+
+
+ All servers in the replication cluster must be running the same major version of
+ PostgreSQL, and we recommend that they also run the same minor version.
+
+
+
+ `repmgr` must be installed on each server in the replication cluster.
+ If installing repmgr from packages, the package version must match the PostgreSQL
+ version. If installing from source, repmgr must be compiled against the same
+ major version.
+
+
+
+ A dedicated system user for `repmgr` is *not* required; as many `repmgr` and
+ `repmgrd` actions require direct access to the PostgreSQL data directory,
+ these commands should be executed by the `postgres` user.
+
+
+
+
+ Passwordless `ssh` connectivity between all servers in the replication cluster
+ is not required, but is necessary in the following cases:
+
+
+ if you need `repmgr` to copy configuration files from outside the PostgreSQL
+ data directory (in which case `rsync` is also required)
+
+
+ to perform switchover operations
+
+
+ when executing `repmgr cluster matrix` and `repmgr cluster crosscheck`
+
+
+
+
+
+
+ We recommend using a session multiplexer utility such as `screen` or
+ `tmux` when performing long-running actions (such as cloning a database)
+ on a remote server - this will ensure the `repmgr` action won't be prematurely
+ terminated if your `ssh` session to the server is interrupted or closed.
+
+
+
diff --git a/doc/install-source.sgml b/doc/install-source.sgml
new file mode 100644
index 00000000..b21d6aa8
--- /dev/null
+++ b/doc/install-source.sgml
@@ -0,0 +1,139 @@
+
+ Installing &repmgr; from source
+
+
+ Prerequisites for installing from source
+
+ To install &repmgr; the prerequisites for compiling
+ &postgres; must be installed. These are described in &postgres;'s
+ documentation
+ on build requirements
+ and build requirements for documentation.
+
+
+
+ Most mainstream Linux distributions and other UNIX variants provide simple
+ ways to install the prerequisites from packages.
+
+
+
+ Debian and Ubuntu: First
+ add the apt.postgresql.org
+ repository to your sources.list if you
+ have not already done so. Then install the pre-requisites for
+ building PostgreSQL with:
+
+ sudo apt-get update
+ sudo apt-get build-dep postgresql-9.6
+
+
+
+
+
+ RHEL or CentOS 6.x or 7.x: install the appropriate repository RPM
+ for your system from
+ yum.postgresql.org. Then install the prerequisites for building
+ PostgreSQL with:
+
+ sudo yum check-update
+ sudo yum groupinstall "Development Tools"
+ sudo yum install yum-utils openjade docbook-dtds docbook-style-dsssl docbook-style-xsl
+ sudo yum-builddep postgresql96
+
+
+
+
+
+
+
+
+ Select the appropriate PostgreSQL versions for your target repmgr version.
+
+
+
+
+
+
+ Getting &repmgr; source code
+
+
+ There are two ways to get the &repmgr; source code: with git, or by downloading tarballs of released versions.
+
+
+
+ Using git to get the &repmgr; sources
+
+
+ Use git if you expect
+ to update often, you want to keep track of development or if you want to contribute
+ changes to &repmgr;. There is no reason not to use git
+ if you're familiar with it.
+
+
+
+ The source for &repmgr; is maintained at
+ https://github.com/2ndQuadrant/repmgr.
+
+
+
+ There are also tags for each &repmgr; release, e.g. REL4_0_STABLE.
+
+
+
+ Clone the source code using git:
+
+ git clone https://github.com/2ndQuadrant/repmgr
+
+
+
+
+ For more information on using git see
+ git-scm.com.
+
+
+
+
+
+ Downloading release source tarballs
+
+
+ Official release source code is uploaded as tarballs to the
+ &repmgr; website along with a tarball checksum and a matching GnuPG
+ signature. See
+ http://repmgr.org/
+ for the download information. See
+ for information on verifying digital signatures.
+
+
+
+ You will need to download the repmgr source, e.g. repmgr-4.0.tar.gz.
+ You may optionally verify the package checksums from the
+ .md5 files and/or verify the GnuPG signatures
+ per .
+
+
+
+ After you unpack the source code archives using tar xf
+ the installation process is the same as if you were installing from a git
+ clone.
+
+
+
+
+
+
+
+ Installation of &repmgr; from source
+
+ To installing &repmgr; from source, simply execute:
+
+
+ ./configure && make install
+
+
+ Ensure `pg_config` for the target PostgreSQL version is in `$PATH`.
+
+
+
+
diff --git a/doc/install.sgml b/doc/install.sgml
new file mode 100644
index 00000000..52b827e1
--- /dev/null
+++ b/doc/install.sgml
@@ -0,0 +1,24 @@
+
+ Installation
+
+
+ &repmgr; can be installed from binary packages provided by your operating
+ system's packaging system, or from source.
+
+
+ In general we recommend using binary packages, unless unavailable for your operating system.
+
+
+ Source installs are mainly useful if you want to keep track of the very
+ latest repmgr development and contribute to development. They're also the
+ only option if there are no packages for your operating system yet.
+
+
+ Before installing &repmgr; make sure you satisfy the .
+
+
+ &install-requirements;
+ &install-packages;
+ &install-source;
+
+
diff --git a/doc/legal.sgml b/doc/legal.sgml
new file mode 100644
index 00000000..a3fefe35
--- /dev/null
+++ b/doc/legal.sgml
@@ -0,0 +1,20 @@
+
+
+2017
+
+
+ 2010-2017
+ 2ndQuadrant, Ltd.
+
+
+
+ Legal Notice
+
+
+ repmgr is Copyright © 2010-2017
+ by 2ndQuadrant, Ltd.
+
+
+ add license
+
+
diff --git a/doc/overview.sgml b/doc/overview.sgml
new file mode 100644
index 00000000..a0f511a5
--- /dev/null
+++ b/doc/overview.sgml
@@ -0,0 +1,209 @@
+
+ repmgr overview
+
+
+ This chapter provides a high-level overview of repmgr's components and functionality.
+
+
+ Concepts
+
+
+ This guide assumes that you are familiar with PostgreSQL administration and
+ streaming replication concepts. For further details on streaming
+ replication, see the PostgreSQL documentation section on
+ streaming replication>.
+
+
+ The following terms are used throughout the `repmgr` documentation.
+
+
+ replication cluster
+
+
+ In the `repmgr` documentation, "replication cluster" refers to the network
+ of PostgreSQL servers connected by streaming replication.
+
+
+
+
+
+ node
+
+
+ A node is a server within a replication cluster.
+
+
+
+
+
+ upstream node
+
+
+ The node a standby server connects to, in order to receive streaming replication.
+ This is either the primary server or in the case of cascading replication, another
+ standby.
+
+
+
+
+
+ failover
+
+
+ This is the action which occurs if a primary server fails and a suitable standby
+ is promoted as the new primary. The `repmgrd` daemon supports automatic failover
+ to minimise downtime.
+
+
+
+
+
+ switchover
+
+
+ In certain circumstances, such as hardware or operating system maintenance,
+ it's necessary to take a primary server offline; in this case a controlled
+ switchover is necessary, whereby a suitable standby is promoted and the
+ existing primary removed from the replication cluster in a controlled manner.
+ The `repmgr` command line client provides this functionality.
+
+
+
+
+
+ fencing
+
+
+ In a failover situation, following the promotion of a new standby, it's
+ essential that the previous primary does not unexpectedly come back on
+ line, which would result in a split-brain situation. To prevent this,
+ the failed primary should be isolated from applications, i.e. "fenced off".
+
+
+
+
+
+
+
+ Components
+
+ `repmgr` is a suite of open-source tools to manage replication and failover
+ within a cluster of PostgreSQL servers. It supports and enhances PostgreSQL's
+ built-in streaming replication, which provides a single read/write primary server
+ and one or more read-only standbys containing near-real time copies of the primary
+ server's database. It provides two main tools:
+
+
+ repmgr
+
+
+ A command-line tool used to perform administrative tasks such as:
+
+
+ setting up standby servers
+
+
+ promoting a standby server to primary
+
+
+ switching over primary and standby servers
+
+
+ displaying the status of servers in the replication cluster
+
+
+
+
+
+
+
+ repmgrd
+
+
+ A daemon which actively monitors servers in a replication cluster
+ and performs the following tasks:
+
+
+ monitoring and recording replication performance
+
+
+ performing failover by detecting failure of the primary and
+ promoting the most suitable standby server
+
+
+
+ provide notifications about events in the cluster to a user-defined
+ script which can perform tasks such as sending alerts by email
+
+
+
+
+
+
+
+
+
+
+ Repmgr user and metadata
+
+ In order to effectively manage a replication cluster, `repmgr` needs to store
+ information about the servers in the cluster in a dedicated database schema.
+ This schema is automatically by the `repmgr` extension, which is installed
+ during the first step in initialising a `repmgr`-administered cluster
+ (`repmgr primary register`) and contains the following objects:
+
+
+ Tables
+
+
+
+
+ repmgr.events: records events of interest
+
+
+ repmgr.nodes: connection and status information for each server in the
+ replication cluster
+
+
+ repmgr.monitoring_history: historical standby monitoring information written by `repmgrd`
+
+
+
+
+
+
+ Views
+
+
+
+
+ repmgr.show_nodes: based on the table `repl_nodes`, additionally showing the
+ name of the server's upstream node
+
+
+ repmgr.replication_status: when `repmgrd`'s monitoring is enabled, shows current monitoring
+ status for each standby.
+
+
+
+
+
+
+
+
+
+ The `repmgr` metadata schema can be stored in an existing database or in its own
+ dedicated database. Note that the `repmgr` metadata schema cannot reside on a database
+ server which is not part of the replication cluster managed by `repmgr`.
+
+
+ A database user must be available for `repmgr` to access this database and perform
+ necessary changes. This user does not need to be a superuser, however some operations
+ such as initial installation of the `repmgr` extension will require a superuser
+ connection (this can be specified where required with the command line option
+ `--superuser`).
+
+
+
+
diff --git a/doc/promoting-standby.sgml b/doc/promoting-standby.sgml
new file mode 100644
index 00000000..95cf1158
--- /dev/null
+++ b/doc/promoting-standby.sgml
@@ -0,0 +1,75 @@
+
+ Promoting a standby server with repmgr
+
+ If a primary server fails or needs to be removed from the replication cluster,
+ a new primary server must be designated, to ensure the cluster continues
+ to function correctly. This can be done with ,
+ which promotes the standby on the current server to primary.
+
+
+
+ To demonstrate this, set up a replication cluster with a primary and two attached
+ standby servers so that the cluster looks like this:
+
+ $ repmgr -f /etc/repmgr.conf cluster show
+ ID | Name | Role | Status | Upstream | Location | Connection string
+ ----+-------+---------+-----------+----------+----------+--------------------------------------
+ 1 | node1 | primary | * running | | default | host=node1 dbname=repmgr user=repmgr
+ 2 | node2 | standby | running | node1 | default | host=node2 dbname=repmgr user=repmgr
+ 3 | node3 | standby | running | node1 | default | host=node3 dbname=repmgr user=repmgr
+
+
+ Stop the current primary with e.g.:
+
+ $ pg_ctl -D /var/lib/postgresql/data -m fast stop
+
+
+ At this point the replication cluster will be in a partially disabled state, with
+ both standbys accepting read-only connections while attempting to connect to the
+ stopped primary. Note that the &repmgr; metadata table will not yet have been updated;
+ executing will note the discrepancy:
+
+ $ repmgr -f /etc/repmgr.conf cluster show
+ ID | Name | Role | Status | Upstream | Location | Connection string
+ ----+-------+---------+---------------+----------+----------+--------------------------------------
+ 1 | node1 | primary | ? unreachable | | default | host=node1 dbname=repmgr user=repmgr
+ 2 | node2 | standby | running | node1 | default | host=node2 dbname=repmgr user=repmgr
+ 3 | node3 | standby | running | node1 | default | host=node3 dbname=repmgr user=repmgr
+
+ WARNING: following issues were detected
+ node "node1" (ID: 1) is registered as an active primary but is unreachable
+
+
+ Now promote the first standby with:
+
+ $ repmgr -f /etc/repmgr.conf standby promote
+
+
+ This will produce output similar to the following:
+
+ INFO: connecting to standby database
+ NOTICE: promoting standby
+ DETAIL: promoting server using "pg_ctl -l /var/log/postgresql/startup.log -w -D '/var/lib/postgresql/data' promote"
+ server promoting
+ INFO: reconnecting to promoted server
+ NOTICE: STANDBY PROMOTE successful
+ DETAIL: node 2 was successfully promoted to primary
+
+
+ Executing will show the current state; as there is now an
+ active primary, the previous warning will not be displayed:
+
+ $ repmgr -f /etc/repmgr.conf cluster show
+ ID | Name | Role | Status | Upstream | Location | Connection string
+ ----+-------+---------+-----------+----------+----------+--------------------------------------
+ 1 | node1 | primary | - failed | | default | host=node1 dbname=repmgr user=repmgr
+ 2 | node2 | primary | * running | | default | host=node2 dbname=repmgr user=repmgr
+ 3 | node3 | standby | running | node1 | default | host=node3 dbname=repmgr user=repmgr
+
+
+ However the sole remaining standby (node3) is still trying to replicate from the failed
+ primary; must now be executed to rectify this situation
+ (see for example).
+
+
+
diff --git a/doc/quickstart.sgml b/doc/quickstart.sgml
new file mode 100644
index 00000000..98df606b
--- /dev/null
+++ b/doc/quickstart.sgml
@@ -0,0 +1,446 @@
+
+ Quick-start guide
+
+
+ This section gives a quick introduction to &repmgr;, including setting up a
+ sample &repmgr; installation and a basic replication cluster.
+
+
+
+ These instructions are not suitable for a production install, as they may not
+ take into account security considerations, proper system administration
+ procedures etc..
+
+
+
+ Prerequisites for setting up a basic replication cluster with &repmgr;
+
+ The following section will describe how to set up a basic replication cluster
+ with a primary and a standby server using the repmgr
+ command line tool.
+
+
+ We'll assume the primary is called node1 with IP address
+ 192.168.1.11, and the standby is called node2
+ with IP address 192.168.1.12
+
+
+ Following software must be installed on both servers:
+
+
+ PostgreSQL
+
+
+
+ repmgr (matching the installed
+ PostgreSQL major version)
+
+
+
+
+
+
+ At network level, connections between the PostgreSQL port (default: 5432)
+ must be possible in both directions.
+
+
+ If you want repmgr to copy configuration files which are
+ located outside the PostgreSQL data directory, and/or to test switchover
+ functionality, you will also need passwordless SSH connections between both servers, and
+ rsync should be installed.
+
+
+
+ For testing repmgr, it's possible to use multiple PostgreSQL
+ instances running on different ports on the same computer, with
+ passwordless SSH access to localhost enabled.
+
+
+
+
+
+ PostgreSQL configuration
+
+ On the primary server, a PostgreSQL instance must be initialised and running.
+ The following replication settings may need to be adjusted:
+
+
+
+ # Enable replication connections; set this figure to at least one more
+ # than the number of standbys which will connect to this server
+ # (note that repmgr will execute `pg_basebackup` in WAL streaming mode,
+ # which requires two free WAL senders)
+
+ max_wal_senders = 10
+
+ # Ensure WAL files contain enough information to enable read-only queries
+ # on the standby.
+ #
+ # PostgreSQL 9.5 and earlier: one of 'hot_standby' or 'logical'
+ # PostgreSQL 9.6 and later: one of 'replica' or 'logical'
+ # ('hot_standby' will still be accepted as an alias for 'replica')
+ #
+ # See: https://www.postgresql.org/docs/current/static/runtime-config-wal.html#GUC-WAL-LEVEL
+
+ wal_level = 'hot_standby'
+
+ # Enable read-only queries on a standby
+ # (Note: this will be ignored on a primary but we recommend including
+ # it anyway)
+
+ hot_standby = on
+
+ # Enable WAL file archiving
+ archive_mode = on
+
+ # Set archive command to a script or application that will safely store
+ # you WALs in a secure place. /bin/true is an example of a command that
+ # ignores archiving. Use something more sensible.
+ archive_command = '/bin/true'
+
+ # If you have configured `pg_basebackup_options`
+ # in `repmgr.conf` to include the setting `--xlog-method=fetch` (from
+ # PostgreSQL 10 `--wal-method=fetch`), *and* you have not set
+ # `restore_command` in `repmgr.conf`to fetch WAL files from another
+ # source such as Barman, you'll need to set `wal_keep_segments` to a
+ # high enough value to ensure that all WAL files generated while
+ # the standby is being cloned are retained until the standby starts up.
+ #
+ # wal_keep_segments = 5000
+
+
+
+ Rather than editing these settings in the default postgresql.conf
+ file, create a separate file such as postgresql.replication.conf and
+ include it from the end of the main configuration file with:
+ include 'postgresql.replication.conf.
+
+
+
+
+
+ Create the repmgr user and database
+
+ Create a dedicated PostgreSQL superuser account and a database for
+ the &repmgr; metadata, e.g.
+
+
+ createuser -s repmgr
+ createdb repmgr -O repmgr
+
+
+
+ For the examples in this document, the name repmgr will be
+ used for both user and database, but any names can be used.
+
+
+
+ For the sake of simplicity, the repmgr user is created
+ as a superuser. If desired, it's possible to create the repmgr
+ user as a normal user. However for certain operations superuser permissions
+ are requiredl; in this case the command line option --superuser
+ can be provided to specify a superuser.
+
+
+ It's also assumed that the repmgr user will be used to make the
+ replication connection from the standby to the primary; again this can be
+ overridden by specifying a separate replication user when registering each node.
+
+
+
+
+
+ &repmgr; will install the repmgr extension, which creates a
+ repmgr schema containing the &repmgr;'s metadata tables as
+ well as other functions and views. We also recommend that you set the
+ repmgr user's search path to include this schema name, e.g.
+
+ ALTER USER repmgr SET search_path TO repmgr, "$user", public;
+
+
+
+
+
+
+ Configuring authentication in pg_hba.conf
+
+ Ensure the repmgr user has appropriate permissions in pg_hba.conf and
+ can connect in replication mode; pg_hba.conf should contain entries
+ similar to the following:
+
+
+ local replication repmgr trust
+ host replication repmgr 127.0.0.1/32 trust
+ host replication repmgr 192.168.1.0/24 trust
+
+ local repmgr repmgr trust
+ host repmgr repmgr 127.0.0.1/32 trust
+ host repmgr repmgr 192.168.1.0/24 trust
+
+
+ Note that these are simple settings for testing purposes.
+ Adjust according to your network environment and authentication requirements.
+
+
+
+
+ Preparing the standby
+
+ On the standby, do not create a PostgreSQL instance, but do ensure the destination
+ data directory (and any other directories which you want PostgreSQL to use)
+ exist and are owned by the postgres system user. Permissions
+ must be set to 0700 (drwx------).
+
+
+ Check the primary database is reachable from the standby using psql:
+
+
+ psql 'host=node1 user=repmgr dbname=repmgr connect_timeout=2'
+
+
+
+ &repmgr; stores connection information as libpq
+ connection strings throughout. This documentation refers to them as conninfo
+ strings; an alternative name is DSN (data source name).
+ We'll use these in place of the -h hostname -d databasename -U username syntax.
+
+
+
+
+
+ repmgr configuration file
+
+ Create a repmgr.conf file on the primary server. The file must
+ contain at least the following parameters:
+
+
+ node_id=1
+ node_name=node1
+ conninfo='host=node1 user=repmgr dbname=repmgr connect_timeout=2'
+ data_directory='/var/lib/postgresql/data'
+
+
+
+ repmgr.conf should not be stored inside the PostgreSQL data directory,
+ as it could be overwritten when setting up or reinitialising the PostgreSQL
+ server. See sections on and
+ for further details about repmgr.conf.
+
+
+
+ For Debian-based distributions we recommend explictly setting
+ pg_bindir to the directory where pg_ctl and other binaries
+ not in the standard path are located. For PostgreSQL 9.6 this would be /usr/lib/postgresql/9.6/bin/.
+
+
+
+
+ See the file
+ repmgr.conf.sample>
+ for details of all available configuration parameters.
+
+
+
+
+
+
+ Register the primary server
+
+ To enable &repmgr; to support a replication cluster, the primary node must
+ be registered with &repmgr;. This installs the repmgr
+ extension and metadata objects, and adds a metadata record for the primary server:
+
+
+
+ $ repmgr -f /etc/repmgr.conf primary register
+ INFO: connecting to primary database...
+ NOTICE: attempting to install extension "repmgr"
+ NOTICE: "repmgr" extension successfully installed
+ NOTICE: primary node record (id: 1) registered
+
+
+ Verify status of the cluster like this:
+
+
+ $ repmgr -f /etc/repmgr.conf cluster show
+ ID | Name | Role | Status | Upstream | Connection string
+ ----+-------+---------+-----------+----------+--------------------------------------------------------
+ 1 | node1 | primary | * running | | host=node1 dbname=repmgr user=repmgr connect_timeout=2
+
+
+ The record in the repmgr metadata table will look like this:
+
+
+ repmgr=# SELECT * FROM repmgr.nodes;
+ -[ RECORD 1 ]----+-------------------------------------------------------
+ node_id | 1
+ upstream_node_id |
+ active | t
+ node_name | node1
+ type | primary
+ location | default
+ priority | 100
+ conninfo | host=node1 dbname=repmgr user=repmgr connect_timeout=2
+ repluser | repmgr
+ slot_name |
+ config_file | /etc/repmgr.conf
+
+ Each server in the replication cluster will have its own record. If repmgrd
+ is in use, the fields upstream_node_id, active and
+ type will be updated when the node's status or role changes.
+
+
+
+
+ Clone the standby server
+
+ Create a repmgr.conf file on the standby server. It must contain at
+ least the same parameters as the primary's repmgr.conf, but with
+ the mandatory values node, node_name, conninfo
+ (and possibly data_directory) adjusted accordingly, e.g.:
+
+
+ node=2
+ node_name=node2
+ conninfo='host=node2 user=repmgr dbname=repmgr connect_timeout=2'
+ data_directory='/var/lib/postgresql/data'
+
+
+ Use the --dry-run option to check the standby can be cloned:
+
+
+ $ repmgr -h node1 -U repmgr -d repmgr -f /etc/repmgr.conf standby clone --dry-run
+ NOTICE: using provided configuration file "/etc/repmgr.conf"
+ NOTICE: destination directory "/var/lib/postgresql/data" provided
+ INFO: connecting to source node
+ NOTICE: checking for available walsenders on source node (2 required)
+ INFO: sufficient walsenders available on source node (2 required)
+ NOTICE: standby will attach to upstream node 1
+ HINT: consider using the -c/--fast-checkpoint option
+ INFO: all prerequisites for "standby clone" are met
+
+ If no problems are reported, the standby can then be cloned with:
+
+
+ $ repmgr -h node1 -U repmgr -d repmgr -f /etc/repmgr.conf standby clone
+
+ NOTICE: using configuration file "/etc/repmgr.conf"
+ NOTICE: destination directory "/var/lib/postgresql/data" provided
+ INFO: connecting to source node
+ NOTICE: checking for available walsenders on source node (2 required)
+ INFO: sufficient walsenders available on source node (2 required)
+ INFO: creating directory "/var/lib/postgresql/data"...
+ NOTICE: starting backup (using pg_basebackup)...
+ HINT: this may take some time; consider using the -c/--fast-checkpoint option
+ INFO: executing:
+ pg_basebackup -l "repmgr base backup" -D /var/lib/postgresql/data -h node1 -U repmgr -X stream
+ NOTICE: standby clone (using pg_basebackup) complete
+ NOTICE: you can now start your PostgreSQL server
+ HINT: for example: pg_ctl -D /var/lib/postgresql/data start
+
+
+ This has cloned the PostgreSQL data directory files from the primary node1
+ using PostgreSQL's pg_basebackup utility. A recovery.conf
+ file containing the correct parameters to start streaming from this primary server will be created
+ automatically.
+
+
+
+ By default, any configuration files in the primary's data directory will be
+ copied to the standby. Typically these will be postgresql.conf,
+ postgresql.auto.conf, pg_hba.conf and
+ pg_ident.conf. These may require modification before the standby
+ is started.
+
+
+
+ Make any adjustments to the standby's PostgreSQL configuration files now,
+ then start the server.
+
+
+ For more details on repmgr standby clone, see the
+ command reference.
+ A more detailed overview of cloning options is available in the
+ administration manual.
+
+
+
+
+ Verify replication is functioning
+
+ Connect to the primary server and execute:
+
+ repmgr=# SELECT * FROM pg_stat_replication;
+ -[ RECORD 1 ]----+------------------------------
+ pid | 19111
+ usesysid | 16384
+ usename | repmgr
+ application_name | node2
+ client_addr | 192.168.1.12
+ client_hostname |
+ client_port | 50378
+ backend_start | 2017-08-28 15:14:19.851581+09
+ backend_xmin |
+ state | streaming
+ sent_location | 0/7000318
+ write_location | 0/7000318
+ flush_location | 0/7000318
+ replay_location | 0/7000318
+ sync_priority | 0
+ sync_state | async
+ This shows that the previously cloned standby (node2 shown in the field
+ application_name) has connected to the primary from IP address
+ 192.168.1.12.
+
+
+ From PostgreSQL 9.6 you can also use the view
+
+ pg_stat_wal_receiver to check the replication status from the standby.
+
+
+ repmgr=# SELECT * FROM pg_stat_wal_receiver;
+ Expanded display is on.
+ -[ RECORD 1 ]---------+--------------------------------------------------------------------------------
+ pid | 18236
+ status | streaming
+ receive_start_lsn | 0/3000000
+ receive_start_tli | 1
+ received_lsn | 0/7000538
+ received_tli | 1
+ last_msg_send_time | 2017-08-28 15:21:26.465728+09
+ last_msg_receipt_time | 2017-08-28 15:21:26.465774+09
+ latest_end_lsn | 0/7000538
+ latest_end_time | 2017-08-28 15:20:56.418735+09
+ slot_name |
+ conninfo | user=repmgr dbname=replication host=node1 application_name=node2
+
+ Note that the conninfo value is that generated in recovery.conf
+ and will differ slightly from the primary's conninfo as set in repmgr.conf -
+ among others it will contain the connecting node's name as application_name.
+
+
+
+
+ Register the standby
+
+ Register the standby server with:
+
+ $ repmgr -f /etc/repmgr.conf standby register
+ NOTICE: standby node "node2" (ID: 2) successfully registered
+
+
+ Check the node is registered by executing repmgr cluster show on the standby:
+
+ $ repmgr -f /etc/repmgr.conf cluster show
+ ID | Name | Role | Status | Upstream | Location | Connection string
+ ----+-------+---------+-----------+----------+----------+--------------------------------------
+ 1 | node1 | primary | * running | | default | host=node1 dbname=repmgr user=repmgr
+ 2 | node2 | standby | running | node1 | default | host=node2 dbname=repmgr user=repmgr
+
+
+ Both nodes are now registered with &repmgr; and the records have been copied to the standby server.
+
+
+
+
diff --git a/doc/repmgr-cluster-cleanup.sgml b/doc/repmgr-cluster-cleanup.sgml
new file mode 100644
index 00000000..df207d0c
--- /dev/null
+++ b/doc/repmgr-cluster-cleanup.sgml
@@ -0,0 +1,23 @@
+
+
+ repmgr cluster cleanup
+
+ repmgr cluster cleanup
+
+ Purges monitoring history from the repmgr.monitoring_history table to
+ prevent excessive table growth. Use the -k/--keep-history to specify the
+ number of days of monitoring history to retain. This command can be used
+ manually or as a cronjob.
+
+
+ This command requires a valid repmgr.conf file for the node on which it is
+ executed; no additional arguments are required.
+
+
+
+ Monitoring history will only be written if repmgrd is active, and
+ monitoring_history is set to true in
+ repmgr.conf.
+
+
+
diff --git a/doc/repmgr-cluster-crosscheck.sgml b/doc/repmgr-cluster-crosscheck.sgml
new file mode 100644
index 00000000..dd361883
--- /dev/null
+++ b/doc/repmgr-cluster-crosscheck.sgml
@@ -0,0 +1,28 @@
+
+
+ repmgr cluster crosscheck
+
+ repmgr cluster crosscheck
+
+ repmgr cluster crosscheck is similar to ,
+ but cross-checks connections between each combination of nodes. In "Example 3" in
+ we have no information about the state of node3.
+ However by running repmgr cluster crosscheck it's possible to get a better
+ overview of the cluster situation:
+
+ $ repmgr -f /etc/repmgr.conf cluster crosscheck
+
+ Name | Id | 1 | 2 | 3
+ -------+----+----+----+----
+ node1 | 1 | * | * | x
+ node2 | 2 | * | * | *
+ node3 | 3 | * | * | *
+
+
+ What happened is that repmgr cluster crosscheck merged its own
+ repmgr cluster matrix with the repmgr cluster matrix
+ output from node2; the latter is able to connect to node3
+ and therefore determine the state of outbound connections from that node.
+
+
+
diff --git a/doc/repmgr-cluster-event.sgml b/doc/repmgr-cluster-event.sgml
new file mode 100644
index 00000000..f1f24fb7
--- /dev/null
+++ b/doc/repmgr-cluster-event.sgml
@@ -0,0 +1,37 @@
+
+
+ repmgr cluster event
+
+ repmgr cluster event
+
+ This outputs a formatted list of cluster events, as stored in the
+ repmgr.events table. Output is in reverse chronological order, and
+ can be filtered with the following options:
+
+
+ --all: outputs all entries
+
+
+ --limit: set the maximum number of entries to output (default: 20)
+
+
+ --node-id: restrict entries to node with this ID
+
+
+ --node-name: restrict entries to node with this name
+
+
+ --event: filter specific event
+
+
+
+
+ Example:
+
+ $ repmgr -f /etc/repmgr.conf cluster event --event=standby_register
+ Node ID | Name | Event | OK | Timestamp | Details
+ ---------+-------+------------------+----+---------------------+--------------------------------
+ 3 | node3 | standby_register | t | 2017-08-17 10:28:55 | standby registration succeeded
+ 2 | node2 | standby_register | t | 2017-08-17 10:28:53 | standby registration succeeded
+
+
diff --git a/doc/repmgr-cluster-matrix.sgml b/doc/repmgr-cluster-matrix.sgml
new file mode 100644
index 00000000..d37d1406
--- /dev/null
+++ b/doc/repmgr-cluster-matrix.sgml
@@ -0,0 +1,83 @@
+
+
+ repmgr cluster matrix
+
+ repmgr cluster matrix
+
+ repmgr cluster matrix runs repmgr cluster show on each
+ node and arranges the results in a matrix, recording success or failure.
+
+
+ repmgr cluster matrix requires a valid repmgr.conf
+ file on each node. Additionally passwordless `ssh` connections are required between
+ all nodes.
+
+
+ Example 1 (all nodes up):
+
+ $ repmgr -f /etc/repmgr.conf cluster matrix
+
+ Name | Id | 1 | 2 | 3
+ -------+----+----+----+----
+ node1 | 1 | * | * | *
+ node2 | 2 | * | * | *
+ node3 | 3 | * | * | *
+
+
+ Example 2 (node1 and node2 up, node3 down):
+
+ $ repmgr -f /etc/repmgr.conf cluster matrix
+
+ Name | Id | 1 | 2 | 3
+ -------+----+----+----+----
+ node1 | 1 | * | * | x
+ node2 | 2 | * | * | x
+ node3 | 3 | ? | ? | ?
+
+
+
+ Each row corresponds to one server, and indicates the result of
+ testing an outbound connection from that server.
+
+
+ Since node3 is down, all the entries in its row are filled with
+ ?, meaning that there we cannot test outbound connections.
+
+
+ The other two nodes are up; the corresponding rows have x in the
+ column corresponding to node3, meaning that inbound connections to
+ that node have failed, and `*` in the columns corresponding to
+ node1 and node2, meaning that inbound connections
+ to these nodes have succeeded.
+
+
+ Example 3 (all nodes up, firewall dropping packets originating
+ from node1 and directed to port 5432 on node3) -
+ running repmgr cluster matrix from node1 gives the following output:
+
+ $ repmgr -f /etc/repmgr.conf cluster matrix
+
+ Name | Id | 1 | 2 | 3
+ -------+----+----+----+----
+ node1 | 1 | * | * | x
+ node2 | 2 | * | * | *
+ node3 | 3 | ? | ? | ?
+
+
+ Note this may take some time depending on the connect_timeout
+ setting in the node conninfo strings; default is
+ 1 minute which means without modification the above
+ command would take around 2 minutes to run; see comment elsewhere about setting
+ connect_timeout)
+
+
+ The matrix tells us that we cannot connect from node1 to node3,
+ and that (therefore) we don't know the state of any outbound
+ connection from node3.
+
+
+ In this case, the command will produce a more
+ useful result.
+
+
+
diff --git a/doc/repmgr-cluster-show.sgml b/doc/repmgr-cluster-show.sgml
new file mode 100644
index 00000000..f80e3dfa
--- /dev/null
+++ b/doc/repmgr-cluster-show.sgml
@@ -0,0 +1,67 @@
+
+
+ repmgr cluster show
+
+ repmgr cluster show
+
+ Displays information about each active node in the replication cluster. This
+ command polls each registered server and shows its role (primary /
+ standby / bdr) and status. It polls each server
+ directly and can be run on any node in the cluster; this is also useful when analyzing
+ connectivity from a particular node.
+
+
+ This command requires either a valid repmgr.conf file or a database
+ connection string to one of the registered nodes; no additional arguments are needed.
+
+
+
+ Example:
+
+ $ repmgr -f /etc/repmgr.conf cluster show
+
+ ID | Name | Role | Status | Upstream | Location | Connection string
+ ----+-------+---------+-----------+----------+----------+-----------------------------------------
+ 1 | node1 | primary | * running | | default | host=db_node1 dbname=repmgr user=repmgr
+ 2 | node2 | standby | running | node1 | default | host=db_node2 dbname=repmgr user=repmgr
+ 3 | node3 | standby | running | node1 | default | host=db_node3 dbname=repmgr user=repmgr
+
+
+
+ To show database connection errors when polling nodes, run the command in
+ --verbose mode.
+
+
+ The `cluster show` command accepts an optional parameter --csv, which
+ outputs the replication cluster's status in a simple CSV format, suitable for
+ parsing by scripts:
+
+ $ repmgr -f /etc/repmgr.conf cluster show --csv
+ 1,-1,-1
+ 2,0,0
+ 3,0,1
+
+
+ The columns have following meanings:
+
+
+
+ node ID
+
+
+ availability (0 = available, -1 = unavailable)
+
+
+ recovery state (0 = not in recovery, 1 = in recovery, -1 = unknown)
+
+
+
+
+
+
+ Note that the availability is tested by connecting from the node where
+ repmgr cluster show is executed, and does not necessarily imply the node
+ is down. See and to get
+ a better overviews of connections between nodes.
+
+
diff --git a/doc/repmgr-node-check.sgml b/doc/repmgr-node-check.sgml
new file mode 100644
index 00000000..b9a5b5f2
--- /dev/null
+++ b/doc/repmgr-node-check.sgml
@@ -0,0 +1,70 @@
+
+
+ repmgr node check
+
+ repmgr node check
+
+ Performs some health checks on a node from a replication perspective.
+ This command must be run on the local node.
+
+
+ Sample output (execute repmgr node check):
+
+ Node "node1":
+ Server role: OK (node is primary)
+ Replication lag: OK (N/A - node is primary)
+ WAL archiving: OK (0 pending files)
+ Downstream servers: OK (2 of 2 downstream nodes attached)
+ Replication slots: OK (node has no replication slots)
+
+
+
+ Additionally each check can be performed individually by supplying
+ an additional command line parameter, e.g.:
+
+ $ repmgr node check --role
+ OK (node is primary)
+
+
+
+ Parameters for individual checks are as follows:
+
+
+
+
+ --role: checks if the node has the expected role
+
+
+
+
+
+ --replication-lag: checks if the node is lagging by more than
+ replication_lag_warning or replication_lag_critical
+
+
+
+
+
+ --archive-ready: checks for WAL files which have not yet been archived
+
+
+
+
+
+ --downstream: checks that the expected downstream nodes are attached
+
+
+
+
+
+ --slots: checks there are no inactive replication slots
+
+
+
+
+
+
+ Individual checks can also be output in a Nagios-compatible format by additionally
+ providing the option --nagios.
+
+
diff --git a/doc/repmgr-node-rejoin.sgml b/doc/repmgr-node-rejoin.sgml
new file mode 100644
index 00000000..14d9f8b7
--- /dev/null
+++ b/doc/repmgr-node-rejoin.sgml
@@ -0,0 +1,13 @@
+
+
+ repmgr node rejoin
+
+ repmgr node rejoin
+
+ Enables a dormant (stopped) node to be rejoined to the replication cluster.
+
+
+ This can optionally use pg_rewind to re-integrate a node which has diverged
+ from the rest of the cluster, typically a failed primary.
+
+
diff --git a/doc/repmgr-node-status.sgml b/doc/repmgr-node-status.sgml
new file mode 100644
index 00000000..8789b1ca
--- /dev/null
+++ b/doc/repmgr-node-status.sgml
@@ -0,0 +1,29 @@
+
+
+
+ repmgr node status
+
+ repmgr node status
+
+ Displays an overview of a node's basic information and replication
+ status. This command must be run on the local node.
+
+
+ Sample output (execute repmgr node status):
+
+ Node "node1":
+ PostgreSQL version: 10beta1
+ Total data size: 30 MB
+ Conninfo: host=node1 dbname=repmgr user=repmgr connect_timeout=2
+ Role: primary
+ WAL archiving: off
+ Archive command: (none)
+ Replication connections: 2 (of maximal 10)
+ Replication slots: 0 (of maximal 10)
+ Replication lag: n/a
+
+
+
+ See to diagnose issues.
+
+
diff --git a/doc/repmgr-primary-register.sgml b/doc/repmgr-primary-register.sgml
new file mode 100644
index 00000000..08208e79
--- /dev/null
+++ b/doc/repmgr-primary-register.sgml
@@ -0,0 +1,18 @@
+
+ repmgr primary register
+ repmgr primary register
+
+ repmgr primary register registers a primary node in a
+ streaming replication cluster, and configures it for use with repmgr, including
+ installing the &repmgr; extension. This command needs to be executed before any
+ standby nodes are registered.
+
+
+ Execute with the --dry-run option to check what would happen without
+ actually registering the primary.
+
+
+ repmgr master register can be used as an alias for
+ repmgr primary register.
+
+
diff --git a/doc/repmgr-primary-unregister.sgml b/doc/repmgr-primary-unregister.sgml
new file mode 100644
index 00000000..c09d05cb
--- /dev/null
+++ b/doc/repmgr-primary-unregister.sgml
@@ -0,0 +1,18 @@
+
+ repmgr primary unregister
+ repmgr primary unregister
+
+ repmgr primary register unregisters an inactive primary node
+ from the &repmgr; metadata. This is typically when the primary has failed and is
+ being removed from the cluster after a new primary has been promoted.
+
+
+ Execute with the --dry-run option to check what would happen without
+ actually unregistering the node.
+
+
+
+ repmgr master unregister can be used as an alias for
+ repmgr primary unregister/
+
+
diff --git a/doc/repmgr-standby-clone.sgml b/doc/repmgr-standby-clone.sgml
new file mode 100644
index 00000000..76f7e52b
--- /dev/null
+++ b/doc/repmgr-standby-clone.sgml
@@ -0,0 +1,91 @@
+
+
+ repmgr standby clone
+ cloning
+
+ repmgr standby clone
+
+ repmgr standby clone clones a PostgreSQL node from another
+ PostgreSQL node, typically the primary, but optionally from any other node in
+ the cluster or from Barman. It creates the recovery.conf file required
+ to attach the cloned node to the primary node (or another standby, if cascading replication
+ is in use).
+
+
+
+ repmgr standby clone does not start the standby, and after cloning
+ repmgr standby register must be executed to notify &repmgr; of its presence.
+
+
+
+
+
+ Handling configuration files
+
+
+ Note that by default, all configuration files in the source node's data
+ directory will be copied to the cloned node. Typically these will be
+ postgresql.conf, postgresql.auto.conf,
+ pg_hba.conf and pg_ident.conf.
+ These may require modification before the standby is started.
+
+
+ In some cases (e.g. on Debian or Ubuntu Linux installations), PostgreSQL's
+ configuration files are located outside of the data directory and will
+ not be copied by default. &repmgr; can copy these files, either to the same
+ location on the standby server (provided appropriate directory and file permissions
+ are available), or into the standby's data directory. This requires passwordless
+ SSH access to the primary server. Add the option --copy-external-config-files
+ to the repmgr standby clone command; by default files will be copied to
+ the same path as on the upstream server. Note that the user executing repmgr
+ must have write access to those directories.
+
+
+ To have the configuration files placed in the standby's data directory, specify
+ --copy-external-config-files=pgdata, but note that
+ any include directives in the copied files may need to be updated.
+
+
+
+ For reliable configuration file management we recommend using a
+ configuration management tool such as Ansible, Chef, Puppet or Salt.
+
+
+
+
+
+ Managing WAL during the cloning process
+
+ When initially cloning a standby, you will need to ensure
+ that all required WAL files remain available while the cloning is taking
+ place. To ensure this happens when using the default `pg_basebackup` method,
+ &repmgr; will set pg_basebackup's --xlog-method
+ parameter to stream,
+ which will ensure all WAL files generated during the cloning process are
+ streamed in parallel with the main backup. Note that this requires two
+ replication connections to be available (&repmgr; will verify sufficient
+ connections are available before attempting to clone, and this can be checked
+ before performing the clone using the --dry-run option).
+
+
+ To override this behaviour, in repmgr.conf set
+ pg_basebackup's --xlog-method
+ parameter to fetch:
+
+ pg_basebackup_options='--xlog-method=fetch'
+
+ and ensure that wal_keep_segments is set to an appropriately high value.
+ See the
+ pg_basebackup documentation for details.
+
+
+
+
+ From PostgreSQL 10, pg_basebackup's
+ --xlog-method parameter has been renamed to
+ --wal-method.
+
+
+
+
+
diff --git a/doc/repmgr-standby-follow.sgml b/doc/repmgr-standby-follow.sgml
new file mode 100644
index 00000000..3181cf77
--- /dev/null
+++ b/doc/repmgr-standby-follow.sgml
@@ -0,0 +1,21 @@
+
+
+ repmgr standby follow
+
+ repmgr standby follow
+
+ Attaches the standby to a new primary. This command requires a valid
+ repmgr.conf file for the standby, either specified
+ explicitly with -f/--config-file or located in a
+ default location; no additional arguments are required.
+
+
+ This command will force a restart of the standby server, which must be
+ running. It can only be used to attach a standby to a new primary node.
+
+
+ To re-add an inactive node to the replication cluster, see
+
+
+
+
diff --git a/doc/repmgr-standby-promote.sgml b/doc/repmgr-standby-promote.sgml
new file mode 100644
index 00000000..1c95d763
--- /dev/null
+++ b/doc/repmgr-standby-promote.sgml
@@ -0,0 +1,18 @@
+
+
+ repmgr standby promote
+
+ repmgr standby promote
+
+ Promotes a standby to a primary if the current primary has failed. This
+ command requires a valid repmgr.conf file for the standby, either
+ specified explicitly with -f/--config-file or located in a
+ default location; no additional arguments are required.
+
+
+ If the standby promotion succeeds, the server will not need to be
+ restarted. However any other standbys will need to follow the new server,
+ by using ; if repmgrd
+ is active, it will handle this automatically.
+
+
diff --git a/doc/repmgr-standby-register.sgml b/doc/repmgr-standby-register.sgml
new file mode 100644
index 00000000..b4c77ce5
--- /dev/null
+++ b/doc/repmgr-standby-register.sgml
@@ -0,0 +1,50 @@
+
+ repmgr standby register
+ repmgr standby register
+
+ repmgr standby register adds a standby's information to
+ the &repmgr; metadata. This command needs to be executed to enable
+ promote/follow operations and to allow repmgrd to work with the node.
+ An existing standby can be registered using this command. Execute with the
+ --dry-run option to check what would happen without actually registering the
+ standby.
+
+
+
+ Waiting for the registration to propagate to the standby
+
+ Depending on your environment and workload, it may take some time for
+ the standby's node record to propagate from the primary to the standby. Some
+ actions (such as starting repmgrd) require that the standby's node record
+ is present and up-to-date to function correctly.
+
+
+ By providing the option --wait-sync to the
+ repmgr standby register command, &repmgr; will wait
+ until the record is synchronised before exiting. An optional timeout (in
+ seconds) can be added to this option (e.g. --wait-sync=60).
+
+
+
+
+ Registering an inactive node
+
+ Under some circumstances you may wish to register a standby which is not
+ yet running; this can be the case when using provisioning tools to create
+ a complex replication cluster. In this case, by using the -F/--force
+ option and providing the connection parameters to the primary server,
+ the standby can be registered.
+
+
+ Similarly, with cascading replication it may be necessary to register
+ a standby whose upstream node has not yet been registered - in this case,
+ using -F/--force will result in the creation of an inactive placeholder
+ record for the upstream node, which will however later need to be registered
+ with the -F/--force option too.
+
+
+ When used with repmgr standby register, care should be taken that use of the
+ -F/--force option does not result in an incorrectly configured cluster.
+
+
+
diff --git a/doc/repmgr-standby-switchover.sgml b/doc/repmgr-standby-switchover.sgml
new file mode 100644
index 00000000..102d6311
--- /dev/null
+++ b/doc/repmgr-standby-switchover.sgml
@@ -0,0 +1,27 @@
+
+
+ repmgr standby switchover
+
+ repmgr standby switchover
+
+ Promotes a standby to primary and demotes the existing primary to a standby.
+ This command must be run on the standby to be promoted, and requires a
+ passwordless SSH connection to the current primary.
+
+
+ If other standbys are connected to the demotion candidate, &repmgr; can instruct
+ these to follow the new primary if the option --siblings-follow
+ is specified.
+
+
+ Execute with the --dry-run option to test the switchover as far as
+ possible without actually changing the status of either node.
+
+
+ repmgrd should not be active on any nodes while a switchover is being
+ executed. This restriction may be lifted in a later version.
+
+
+ For more details see the section .
+
+
diff --git a/doc/repmgr-standby-unregister.sgml b/doc/repmgr-standby-unregister.sgml
new file mode 100644
index 00000000..7fd6e2f9
--- /dev/null
+++ b/doc/repmgr-standby-unregister.sgml
@@ -0,0 +1,29 @@
+
+ repmgr standby unregister
+ repmgr standby unregister
+
+ Unregisters a standby with `repmgr`. This command does not affect the actual
+ replication, just removes the standby's entry from the &repmgr; metadata.
+
+
+ To unregister a running standby, execute:
+
+ repmgr standby unregister -f /etc/repmgr.conf
+
+
+ This will remove the standby record from &repmgr;'s internal metadata
+ table (repmgr.nodes). A standby_unregister
+ event notification will be recorded in the repmgr.events table.
+
+
+ If the standby is not running, the command can be executed on another
+ node by providing the id of the node to be unregistered using
+ the command line parameter --node-id, e.g. executing the following
+ command on the master server will unregister the standby with
+ id 3:
+
+ repmgr standby unregister -f /etc/repmgr.conf --node-id=3
+
+
+
+
diff --git a/doc/repmgr.sgml b/doc/repmgr.sgml
new file mode 100644
index 00000000..ffa1baa0
--- /dev/null
+++ b/doc/repmgr.sgml
@@ -0,0 +1,117 @@
+
+
+
+ %version;
+
+
+ %filelist;
+
+ repmgr">
+ PostgreSQL">
+]>
+
+
+ repmgr &repmgrversion; Documentation
+
+
+ 2ndQuadrant Ltd
+ repmgr
+ &repmgrversion;
+ &legal;
+
+
+
+ Thisis the official documentation of &repmgr; &repmgrversion; for
+ use with PostgreSQL 9.3 - PostgreSQL 10.
+ It describes the functionality supported by the current version of &repmgr;.
+
+
+
+ repmgr was developed by
+ 2ndQuadrant
+ along with contributions from other individuals and companies.
+ Contributions from the community are appreciated and welcome - get
+ in touch via github>
+ or the mailing list/forum>.
+ Multiple 2ndQuadrant customers contribute funding
+ to make repmgr development possible.
+
+
+
+ 2ndQuadrant, a Platinum sponsor of the PostgreSQL project,
+ continues to develop repmgr to meet internal needs and those of customers.
+ Other companies as well as individual developers
+ are welcome to participate in the efforts.
+
+
+
+
+ repmgr
+ PostgreSQL
+ replication
+ asynchronous
+ HA
+ high-availability
+
+
+
+
+
+ Getting started
+ &overview;
+ &install;
+ &quickstart;
+
+
+
+ repmgr administration manual
+
+ &configuration;
+ &cloning-standbys;
+ &promoting-standby;
+ &follow-new-primary;
+ &switchover;
+ &event-notifications;
+ &upgrading-repmgr;
+
+
+
+ Using repmgrd
+ &repmgrd-automatic-failover;
+ &repmgrd-configuration;
+ &repmgrd-demonstration;
+ &repmgrd-cascading-replication;
+ &repmgrd-network-split;
+ &repmgrd-degraded-monitoring;
+ &repmgrd-monitoring;
+
+
+
+ repmgr command reference
+
+ &repmgr-primary-register;
+ &repmgr-primary-unregister;
+ &repmgr-standby-clone;
+ &repmgr-standby-register;
+ &repmgr-standby-unregister;
+ &repmgr-standby-promote;
+ &repmgr-standby-follow;
+ &repmgr-standby-switchover;
+ &repmgr-node-status;
+ &repmgr-node-check;
+ &repmgr-node-rejoin;
+ &repmgr-cluster-show;
+ &repmgr-cluster-matrix;
+ &repmgr-cluster-crosscheck;
+ &repmgr-cluster-event;
+ &repmgr-cluster-cleanup;
+
+
+ &appendix-signatures;
+
+
+ ]]>
+
+
diff --git a/doc/repmgrd-automatic-failover.sgml b/doc/repmgrd-automatic-failover.sgml
new file mode 100644
index 00000000..8c28deca
--- /dev/null
+++ b/doc/repmgrd-automatic-failover.sgml
@@ -0,0 +1,12 @@
+
+ Automatic failover with repmgrd
+
+
+ repmgrd is a management and monitoring daemon which runs
+ on each node in a replication cluster. It can automate actions such as
+ failover and updating standbys to follow the new primary, as well as
+ providing monitoring information about the state of each standby.
+
+
+
+
diff --git a/doc/repmgrd-cascading-replication.sgml b/doc/repmgrd-cascading-replication.sgml
new file mode 100644
index 00000000..b8e00514
--- /dev/null
+++ b/doc/repmgrd-cascading-replication.sgml
@@ -0,0 +1,17 @@
+
+ repmgrd and cascading replication
+
+ Cascading replication - where a standby can connect to an upstream node and not
+ the primary server itself - was introduced in PostgreSQL 9.2. &repmgr; and
+ repmgrd support cascading replication by keeping track of the relationship
+ between standby servers - each node record is stored with the node id of its
+ upstream ("parent") server (except of course the primary server).
+
+
+ In a failover situation where the primary node fails and a top-level standby
+ is promoted, a standby connected to another standby will not be affected
+ and continue working as normal (even if the upstream standby it's connected
+ to becomes the primary node). If however the node's direct upstream fails,
+ the "cascaded standby" will attempt to reconnect to that node's parent.
+
+
diff --git a/doc/repmgrd-configuration.sgml b/doc/repmgrd-configuration.sgml
new file mode 100644
index 00000000..6ae80a94
--- /dev/null
+++ b/doc/repmgrd-configuration.sgml
@@ -0,0 +1,94 @@
+
+ repmgrd configuration
+
+ To use repmgrd, its associated function library must be
+ included in postgresql.conf with:
+
+
+ shared_preload_libraries = 'repmgr'
+
+
+ Changing this setting requires a restart of PostgreSQL; for more details see
+ the PostgreSQL documentation.
+
+
+ Additionally the following repmgrd options *must* be set in
+ repmgr.conf (adjust configuration file locations as appropriate):
+
+ failover=automatic
+ promote_command='repmgr standby promote -f /etc/repmgr.conf --log-to-file'
+ follow_command='repmgr standby follow -f /etc/repmgr.conf --log-to-file --upstream-node-id=%n'
+
+
+ Note that the --log-to-file option will cause
+ output generated by the &repmgr; command, when executed by repmgrd,
+ to be logged to the same destination configured to receive log output for repmgrd.
+ See repmgr.conf.sample for further repmgrd-specific settings.
+
+
+ When failover is set to automatic, upon detecting failure
+ of the current primary, repmgrd will execute one of
+ promote_command or follow_command,
+ depending on whether the current server is to become the new primary, or
+ needs to follow another server which has become the new primary. Note that
+ these commands can be any valid shell script which results in one of these
+ two actions happening, but if &repmgr;'s standby follow or
+ standby promote
+ commands are not executed (either directly as shown here, or from a script which
+ performs other actions), the &repmgr; metadata will not be updated and
+ &repmgr; will no longer function reliably.
+
+
+ The follow_command should provide the --upstream-node-id=%n
+ option to repmgr standby follow; the %n will be replaced by
+ repmgrd with the ID of the new primary node. If this is not provided, &repmgr;
+ will attempt to determine the new primary by itself, but if the
+ original primary comes back online after the new primary is promoted, there is a risk that
+ repmgr standby follow will result in the node continuing to follow
+ the original primary.
+
+
+ repmgrd connection settings
+
+ In addition to the &repmgr; configuration settings, parameters in the
+ conninfo string influence how &repmgr; makes a network connection to
+ PostgreSQL. In particular, if another server in the replication cluster
+ is unreachable at network level, system network settings will influence
+ the length of time it takes to determine that the connection is not possible.
+
+
+ In particular explicitly setting a parameter for connect_timeout
+ should be considered; the effective minimum value of 2
+ (seconds) will ensure that a connection failure at network level is reported
+ as soon as possible, otherwise depending on the system settings (e.g.
+ tcp_syn_retries in Linux) a delay of a minute or more
+ is possible.
+
+
+ For further details on conninfo network connection
+ parameters, see the
+ PostgreSQL documentation.
+
+
+
+ repmgrd log rotation
+
+ To ensure the current repmgrd logfile does not grow
+ indefinitely, configure your system's logrotate to
+ regularly rotate it.
+
+
+ Sample configuration to rotate logfiles weekly with retention for
+ up to 52 weeks and rotation forced if a file grows beyond 100Mb:
+
+ /var/log/postgresql/repmgr-9.6.log {
+ missingok
+ compress
+ rotate 52
+ maxsize 100M
+ weekly
+ create 0600 postgres postgres
+ }
+
+
+
diff --git a/doc/repmgrd-degraded-monitoring.sgml b/doc/repmgrd-degraded-monitoring.sgml
new file mode 100644
index 00000000..5a4f5c16
--- /dev/null
+++ b/doc/repmgrd-degraded-monitoring.sgml
@@ -0,0 +1,70 @@
+
+ "degraded monitoring" mode
+
+ In certain circumstances, repmgrd is not able to fulfill its primary mission
+ of monitoring the nodes' upstream server. In these cases it enters "degraded
+ monitoring" mode, where repmgrd remains active but is waiting for the situation
+ to be resolved.
+
+
+ Situations where this happens are:
+
+
+
+ a failover situation has occurred, no nodes in the primary node's location are visible
+
+
+
+ a failover situation has occurred, but no promotion candidate is available
+
+
+
+ a failover situation has occurred, but the promotion candidate could not be promoted
+
+
+
+ a failover situation has occurred, but the node was unable to follow the new primary
+
+
+
+ a failover situation has occurred, but no primary has become available
+
+
+
+ a failover situation has occurred, but automatic failover is not enabled for the node
+
+
+
+ repmgrd is monitoring the primary node, but it is not available
+
+
+
+
+
+ Example output in a situation where there is only one standby with failover=manual,
+ and the primary node is unavailable (but is later restarted):
+
+ [2017-08-29 10:59:19] [INFO] node "node2" (node ID: 2) monitoring upstream node "node1" (node ID: 1) in normal state (automatic failover disabled)
+ [2017-08-29 10:59:33] [WARNING] unable to connect to upstream node "node1" (node ID: 1)
+ [2017-08-29 10:59:33] [INFO] checking state of node 1, 1 of 5 attempts
+ [2017-08-29 10:59:33] [INFO] sleeping 1 seconds until next reconnection attempt
+ (...)
+ [2017-08-29 10:59:37] [INFO] checking state of node 1, 5 of 5 attempts
+ [2017-08-29 10:59:37] [WARNING] unable to reconnect to node 1 after 5 attempts
+ [2017-08-29 10:59:37] [NOTICE] this node is not configured for automatic failover so will not be considered as promotion candidate
+ [2017-08-29 10:59:37] [NOTICE] no other nodes are available as promotion candidate
+ [2017-08-29 10:59:37] [HINT] use "repmgr standby promote" to manually promote this node
+ [2017-08-29 10:59:37] [INFO] node "node2" (node ID: 2) monitoring upstream node "node1" (node ID: 1) in degraded state (automatic failover disabled)
+ [2017-08-29 10:59:53] [INFO] node "node2" (node ID: 2) monitoring upstream node "node1" (node ID: 1) in degraded state (automatic failover disabled)
+ [2017-08-29 11:00:45] [NOTICE] reconnected to upstream node 1 after 68 seconds, resuming monitoring
+ [2017-08-29 11:00:57] [INFO] node "node2" (node ID: 2) monitoring upstream node "node1" (node ID: 1) in normal state (automatic failover disabled)
+
+
+
+ By default, repmgrd will continue in degraded monitoring mode indefinitely.
+ However a timeout (in seconds) can be set with degraded_monitoring_timeout,
+ after which repmgrd will terminate.
+
+
+
+
diff --git a/doc/repmgrd-demonstration.sgml b/doc/repmgrd-demonstration.sgml
new file mode 100644
index 00000000..401c2db5
--- /dev/null
+++ b/doc/repmgrd-demonstration.sgml
@@ -0,0 +1,96 @@
+
+ repmgrd demonstration
+
+ To demonstrate automatic failover, set up a 3-node replication cluster (one primary
+ and two standbys streaming directly from the primary) so that the cluster looks
+ something like this:
+
+ $ repmgr -f /etc/repmgr.conf cluster show
+ ID | Name | Role | Status | Upstream | Location | Connection string
+ ----+-------+---------+-----------+----------+----------+--------------------------------------
+ 1 | node1 | primary | * running | | default | host=node1 dbname=repmgr user=repmgr
+ 2 | node2 | standby | running | node1 | default | host=node2 dbname=repmgr user=repmgr
+ 3 | node3 | standby | running | node1 | default | host=node3 dbname=repmgr user=repmgr
+
+
+ Start repmgrd on each standby and verify that it's running by examining the
+ log output, which at log level INFO will look like this:
+
+ [2017-08-24 17:31:00] [NOTICE] using configuration file "/etc/repmgr.conf"
+ [2017-08-24 17:31:00] [INFO] connecting to database "host=node2 dbname=repmgr user=repmgr"
+ [2017-08-24 17:31:00] [NOTICE] starting monitoring of node node2 (ID: 2)
+ [2017-08-24 17:31:00] [INFO] monitoring connection to upstream node "node1" (node ID: 1)
+
+
+ Each repmgrd should also have recorded its successful startup as an event:
+
+ $ repmgr -f /etc/repmgr.conf cluster event --event=repmgrd_start
+ Node ID | Name | Event | OK | Timestamp | Details
+ ---------+-------+---------------+----+---------------------+-------------------------------------------------------------
+ 3 | node3 | repmgrd_start | t | 2017-08-24 17:35:54 | monitoring connection to upstream node "node1" (node ID: 1)
+ 2 | node2 | repmgrd_start | t | 2017-08-24 17:35:50 | monitoring connection to upstream node "node1" (node ID: 1)
+ 1 | node1 | repmgrd_start | t | 2017-08-24 17:35:46 | monitoring cluster primary "node1" (node ID: 1)
+
+
+ Now stop the current primary server with e.g.:
+
+ pg_ctl -D /var/lib/postgresql/data -m immediate stop
+
+
+ This will force the primary to shut down straight away, aborting all processes
+ and transactions. This will cause a flurry of activity in the repmgrd log
+ files as each repmgrd detects the failure of the primary and a failover
+ decision is made. This is an extract from the log of a standby server (node2)
+ which has promoted to new primary after failure of the original primary (node1).
+
+ [2017-08-24 23:32:01] [INFO] node "node2" (node ID: 2) monitoring upstream node "node1" (node ID: 1) in normal state
+ [2017-08-24 23:32:08] [WARNING] unable to connect to upstream node "node1" (node ID: 1)
+ [2017-08-24 23:32:08] [INFO] checking state of node 1, 1 of 5 attempts
+ [2017-08-24 23:32:08] [INFO] sleeping 1 seconds until next reconnection attempt
+ [2017-08-24 23:32:09] [INFO] checking state of node 1, 2 of 5 attempts
+ [2017-08-24 23:32:09] [INFO] sleeping 1 seconds until next reconnection attempt
+ [2017-08-24 23:32:10] [INFO] checking state of node 1, 3 of 5 attempts
+ [2017-08-24 23:32:10] [INFO] sleeping 1 seconds until next reconnection attempt
+ [2017-08-24 23:32:11] [INFO] checking state of node 1, 4 of 5 attempts
+ [2017-08-24 23:32:11] [INFO] sleeping 1 seconds until next reconnection attempt
+ [2017-08-24 23:32:12] [INFO] checking state of node 1, 5 of 5 attempts
+ [2017-08-24 23:32:12] [WARNING] unable to reconnect to node 1 after 5 attempts
+ INFO: setting voting term to 1
+ INFO: node 2 is candidate
+ INFO: node 3 has received request from node 2 for electoral term 1 (our term: 0)
+ [2017-08-24 23:32:12] [NOTICE] this node is the winner, will now promote self and inform other nodes
+ INFO: connecting to standby database
+ NOTICE: promoting standby
+ DETAIL: promoting server using '/home/barwick/devel/builds/HEAD/bin/pg_ctl -l /tmp/postgres.5602.log -w -D '/tmp/repmgr-test/node_2/data' promote'
+ INFO: reconnecting to promoted server
+ NOTICE: STANDBY PROMOTE successful
+ DETAIL: node 2 was successfully promoted to primary
+ INFO: node 3 received notification to follow node 2
+ [2017-08-24 23:32:13] [INFO] switching to primary monitoring mode
+
+
+ The cluster status will now look like this, with the original primary (node1)
+ marked as inactive, and standby node3 now following the new primary
+ (node2):
+
+ $ repmgr -f /etc/repmgr.conf cluster show
+ ID | Name | Role | Status | Upstream | Location | Connection string
+ ----+-------+---------+-----------+----------+----------+----------------------------------------------------
+ 1 | node1 | primary | - failed | | default | host=node1 dbname=repmgr user=repmgr
+ 2 | node2 | primary | * running | | default | host=node2 dbname=repmgr user=repmgr
+ 3 | node3 | standby | running | node2 | default | host=node3 dbname=repmgr user=repmgr
+
+
+
+ repmgr cluster event will display a summary of what happened to each server
+ during the failover:
+
+ $ repmgr -f /etc/repmgr.conf cluster event
+ Node ID | Name | Event | OK | Timestamp | Details
+ ---------+-------+--------------------------+----+---------------------+-----------------------------------------------------------------------------------
+ 3 | node3 | repmgrd_failover_follow | t | 2017-08-24 23:32:16 | node 3 now following new upstream node 2
+ 3 | node3 | standby_follow | t | 2017-08-24 23:32:16 | node 3 is now attached to node 2
+ 2 | node2 | repmgrd_failover_promote | t | 2017-08-24 23:32:13 | node 2 promoted to primary; old primary 1 marked as failed
+ 2 | node2 | standby_promote | t | 2017-08-24 23:32:13 | node 2 was successfully promoted to primary
+
+
diff --git a/doc/repmgrd-monitoring.sgml b/doc/repmgrd-monitoring.sgml
new file mode 100644
index 00000000..7daaac0a
--- /dev/null
+++ b/doc/repmgrd-monitoring.sgml
@@ -0,0 +1,71 @@
+
+ Monitoring with repmgrd
+
+ When `repmgrd` is running with the option monitoring_history=true,
+ it will constantly write standby node status information to the
+ monitoring_history table, providing a near-real time
+ overview of replication status on all nodes
+ in the cluster.
+
+
+ The view replication_status shows the most recent state
+ for each node, e.g.:
+
+ repmgr=# select * from repmgr.replication_status;
+ -[ RECORD 1 ]-------------+------------------------------
+ primary_node_id | 1
+ standby_node_id | 2
+ standby_name | node2
+ node_type | standby
+ active | t
+ last_monitor_time | 2017-08-24 16:28:41.260478+09
+ last_wal_primary_location | 0/6D57A00
+ last_wal_standby_location | 0/5000000
+ replication_lag | 29 MB
+ replication_time_lag | 00:00:11.736163
+ apply_lag | 15 MB
+ communication_time_lag | 00:00:01.365643
+
+
+ The interval in which monitoring history is written is controlled by the
+ configuration parameter monitor_interval_secs;
+ default is 2.
+
+
+ As this can generate a large amount of monitoring data in the table
+ repmgr.monitoring_history. it's advisable to regularly
+ purge historical data using the
+ command; use the -k/--keep-history option to
+ specify how many day's worth of data should be retained.
+
+
+ It's possible to use repmgrd to run in monitoring
+ mode only (without automatic failover capability) for some or all
+ nodes by setting failover=manual in the node's
+ repmgr.conf file. In the event of the node's upstream failing,
+ no failover action will be taken and the node will require manual intervention to
+ be reattached to replication. If this occurs, an
+ event notification
+ standby_disconnect_manual will be created.
+
+
+ Note that when a standby node is not streaming directly from its upstream
+ node, e.g. recovering WAL from an archive, apply_lag will always appear as
+ 0 bytes.
+
+
+
+ If monitoring history is enabled, the contents of the repmgr.monitoring_history
+ table will be replicated to attached standbys. This means there will be a small but
+ constant stream of replication activity which may not be desirable. To prevent
+ this, convert the table to an UNLOGGED one with:
+
+ ALTER TABLE repmgr.monitoring_history SET UNLOGGED;
+
+
+ This will however mean that monitoring history will not be available on
+ another node following a failover, and the view repmgr.replication_status
+ will not work on standbys.
+
+
+
diff --git a/doc/repmgrd-network-split.sgml b/doc/repmgrd-network-split.sgml
new file mode 100644
index 00000000..934bf0b8
--- /dev/null
+++ b/doc/repmgrd-network-split.sgml
@@ -0,0 +1,43 @@
+
+ Handling network splits with repmgrd
+
+ A common pattern for replication cluster setups is to spread servers over
+ more than one datacentre. This can provide benefits such as geographically-
+ distributed read replicas and DR (disaster recovery capability). However
+ this also means there is a risk of disconnection at network level between
+ datacentre locations, which would result in a split-brain scenario if
+ servers in a secondary data centre were no longer able to see the primary
+ in the main data centre and promoted a standby among themselves.
+
+
+ Previous &repmgr; versions used the concept of a "witness server" to
+ artificially create a quorum of servers in a particular location, ensuring
+ that nodes in another location will not elect a new primary if they
+ are unable to see the majority of nodes. However this approach does not
+ scale well, particularly with more complex replication setups, e.g.
+ where the majority of nodes are located outside of the primary datacentre.
+ It also means the witness node needs to be managed as an
+ extra PostgreSQL instance outside of the main replication cluster, which
+ adds administrative and programming complexity.
+
+
+ repmgr4 introduces the concept of location:
+ each node is associated with an arbitrary location string (default is
+ default); this is set in repmgr.conf, e.g.:
+
+ node_id=1
+ node_name=node1
+ conninfo='host=node1 user=repmgr dbname=repmgr connect_timeout=2'
+ data_directory='/var/lib/postgresql/data'
+ location='dc1'
+
+
+ In a failover situation, repmgrd will check if any servers in the
+ same location as the current primary node are visible. If not, repmgrd
+ will assume a network interruption and not promote any node in any
+ other location (it will however enter mode until
+ a primary becomes visible).
+
+
+
+
diff --git a/doc/stylesheet.css b/doc/stylesheet.css
new file mode 100644
index 00000000..0fd0f017
--- /dev/null
+++ b/doc/stylesheet.css
@@ -0,0 +1,96 @@
+/* doc/src/sgml/stylesheet.css */
+
+/* color scheme similar to www.postgresql.org */
+
+BODY {
+ color: #000000;
+ background: #FFFFFF;
+ font-family: verdana, sans-serif;
+}
+
+A:link { color:#0066A2; }
+A:visited { color:#004E66; }
+A:active { color:#0066A2; }
+A:hover { color:#000000; }
+
+H1 {
+ font-size: 1.4em;
+ font-weight: bold;
+ margin-top: 0em;
+ margin-bottom: 0em;
+ color: #EC5800;
+}
+
+H2 {
+ font-size: 1.2em;
+ margin: 1.2em 0em 1.2em 0em;
+ font-weight: bold;
+ color: #666;
+}
+
+H3 {
+ font-size: 1.1em;
+ margin: 1.2em 0em 1.2em 0em;
+ font-weight: bold;
+ color: #666;
+}
+
+H4 {
+ font-size: 0.95em;
+ margin: 1.2em 0em 1.2em 0em;
+ font-weight: normal;
+ color: #666;
+}
+
+H5 {
+ font-size: 0.9em;
+ margin: 1.2em 0em 1.2em 0em;
+ font-weight: normal;
+}
+
+H6 {
+ font-size: 0.85em;
+ margin: 1.2em 0em 1.2em 0em;
+ font-weight: normal;
+}
+
+/* center some titles */
+
+.BOOK .TITLE, .BOOK .CORPAUTHOR, .BOOK .COPYRIGHT {
+ text-align: center;
+}
+
+/* decoration for formal examples */
+
+DIV.EXAMPLE {
+ padding-left: 15px;
+ border-style: solid;
+ border-width: 0px;
+ border-left-width: 2px;
+ border-color: black;
+ margin: 0.5ex;
+}
+
+/* less dense spacing of TOC */
+
+.BOOK .TOC DL DT {
+ padding-top: 1.5ex;
+ padding-bottom: 1.5ex;
+}
+
+.BOOK .TOC DL DL DT {
+ padding-top: 0ex;
+ padding-bottom: 0ex;
+}
+
+/* miscellaneous */
+
+PRE.LITERALLAYOUT, .SCREEN, .SYNOPSIS, .PROGRAMLISTING {
+ margin-left: 4ex;
+}
+
+.COMMENT { color: red; }
+
+VAR { font-family: monospace; font-style: italic; }
+/* Konqueror's standard style for ACRONYM is italic. */
+ACRONYM { font-style: inherit; }
diff --git a/doc/stylesheet.dsl b/doc/stylesheet.dsl
new file mode 100644
index 00000000..ba96a888
--- /dev/null
+++ b/doc/stylesheet.dsl
@@ -0,0 +1,851 @@
+
+
+
+
+
+
+
+]]>
+
+
+]]>
+
+
+]]>
+
+]>
+
+
+
+
+
+
+
+
+
+(define draft-mode #f)
+
+;; Don't show manpage volume numbers
+(define %refentry-xref-manvolnum% #f)
+
+;; Don't use graphics for callouts. (We could probably do that, but
+;; it needs extra work.)
+(define %callout-graphics% #f)
+
+;; Show comments during the development stage.
+(define %show-comments% draft-mode)
+
+;; Force a chapter TOC even if it includes only a single entry
+(define %force-chapter-toc% #t)
+
+;; Don't append period if run-in title ends with any of these
+;; characters. We had to add the colon here. This is fixed in
+;; stylesheets version 1.71, so it can be removed sometime.
+(define %content-title-end-punct%
+ '(#\. #\! #\? #\:))
+
+;; No automatic punctuation after honorific name parts
+(define %honorific-punctuation% "")
+
+;; Change display of some elements
+(element command ($mono-seq$))
+(element envar ($mono-seq$))
+(element lineannotation ($italic-seq$))
+(element literal ($mono-seq$))
+(element option ($mono-seq$))
+(element parameter ($mono-seq$))
+(element structfield ($mono-seq$))
+(element structname ($mono-seq$))
+(element symbol ($mono-seq$))
+(element token ($mono-seq$))
+(element type ($mono-seq$))
+(element varname ($mono-seq$))
+(element (programlisting emphasis) ($bold-seq$)) ;; to highlight sections of code
+
+;; Special support for Tcl synopses
+(element optional
+ (if (equal? (attribute-string (normalize "role")) "tcl")
+ (make sequence
+ (literal "?")
+ ($charseq$)
+ (literal "?"))
+ (make sequence
+ (literal %arg-choice-opt-open-str%)
+ ($charseq$)
+ (literal %arg-choice-opt-close-str%))))
+
+;; Avoid excessive cross-reference labels
+(define (auto-xref-indirect? target ancestor)
+ (cond
+; ;; Always add indirect references to another book
+; ((member (gi ancestor) (book-element-list))
+; #t)
+ ;; Add indirect references to the section or component a block
+ ;; is in iff chapters aren't autolabelled. (Otherwise "Figure 1-3"
+ ;; is sufficient)
+ ((and (member (gi target) (block-element-list))
+ (not %chapter-autolabel%))
+ #t)
+ ;; Add indirect references to the component a section is in if
+ ;; the sections are not autolabelled
+ ((and (member (gi target) (section-element-list))
+ (member (gi ancestor) (component-element-list))
+ (not %section-autolabel%))
+ #t)
+ (else #f)))
+
+
+;; Bibliography things
+
+;; Use the titles of bibliography entries in cross-references
+(define biblio-xref-title #t)
+
+;; Process bibliography entry components in the order shown below, not
+;; in the order they appear in the document. (I suppose this should
+;; be made to fit some publishing standard.)
+(define %biblioentry-in-entry-order% #f)
+
+(define (biblioentry-inline-elements)
+ (list
+ (normalize "author")
+ (normalize "authorgroup")
+ (normalize "title")
+ (normalize "subtitle")
+ (normalize "volumenum")
+ (normalize "edition")
+ (normalize "othercredit")
+ (normalize "contrib")
+ (normalize "editor")
+ (normalize "publishername")
+ (normalize "confgroup")
+ (normalize "publisher")
+ (normalize "isbn")
+ (normalize "issn")
+ (normalize "pubsnumber")
+ (normalize "date")
+ (normalize "pubdate")
+ (normalize "pagenums")
+ (normalize "bibliomisc")))
+
+(mode biblioentry-inline-mode
+
+ (element confgroup
+ (make sequence
+ (literal "Proc. ")
+ (next-match)))
+
+ (element isbn
+ (make sequence
+ (literal "ISBN ")
+ (process-children)))
+
+ (element issn
+ (make sequence
+ (literal "ISSN ")
+ (process-children))))
+
+
+;; The rules in the default stylesheet for productname format it as a
+;; paragraph. This may be suitable for productname directly within
+;; *info, but it's nonsense when productname is used inline, as we do.
+(mode book-titlepage-recto-mode
+ (element (para productname) ($charseq$)))
+(mode book-titlepage-verso-mode
+ (element (para productname) ($charseq$)))
+;; Add more here if needed...
+
+
+;; Replace a sequence of whitespace in a string by a single space
+(define (normalize-whitespace str #!optional (whitespace '(#\space #\U-000D)))
+ (let loop ((characters (string->list str))
+ (result '())
+ (prev-was-space #f))
+ (if (null? characters)
+ (list->string (reverse result))
+ (let ((c (car characters))
+ (rest (cdr characters)))
+ (if (member c whitespace)
+ (if prev-was-space
+ (loop rest result #t)
+ (loop rest (cons #\space result) #t))
+ (loop rest (cons c result) #f))))))
+
+
+
+
+string (time) #t)))))
+
+
+;; Block elements are allowed in PARA in DocBook, but not in P in
+;; HTML. With %fix-para-wrappers% turned on, the stylesheets attempt
+;; to avoid putting block elements in HTML P tags by outputting
+;; additional end/begin P pairs around them.
+(define %fix-para-wrappers% #t)
+
+;; ...but we need to do some extra work to make the above apply to PRE
+;; as well. (mostly pasted from dbverb.dsl)
+(define ($verbatim-display$ indent line-numbers?)
+ (let ((content (make element gi: "PRE"
+ attributes: (list
+ (list "CLASS" (gi)))
+ (if (or indent line-numbers?)
+ ($verbatim-line-by-line$ indent line-numbers?)
+ (process-children)))))
+ (if %shade-verbatim%
+ (make element gi: "TABLE"
+ attributes: ($shade-verbatim-attr$)
+ (make element gi: "TR"
+ (make element gi: "TD"
+ content)))
+ (make sequence
+ (para-check)
+ content
+ (para-check 'restart)))))
+
+;; ...and for notes.
+(element note
+ (make sequence
+ (para-check)
+ ($admonition$)
+ (para-check 'restart)))
+
+;;; XXX The above is very ugly. It might be better to run 'tidy' on
+;;; the resulting *.html files.
+
+
+;; Format multiple terms in varlistentry vertically, instead
+;; of comma-separated.
+(element (varlistentry term)
+ (make sequence
+ (process-children-trim)
+ (if (not (last-sibling?))
+ (make empty-element gi: "BR")
+ (empty-sosofo))))
+
+
+;; Customization of header
+;; - make title a link to the home page
+;; - add tool tips to Prev/Next links
+;; - add Up link
+;; (overrides dbnavig.dsl)
+(define (default-header-nav-tbl-noff elemnode prev next prevsib nextsib)
+ (let* ((r1? (nav-banner? elemnode))
+ (r1-sosofo (make element gi: "TR"
+ (make element gi: "TH"
+ attributes: (list
+ (list "COLSPAN" "4")
+ (list "ALIGN" "center")
+ (list "VALIGN" "bottom"))
+ (make element gi: "A"
+ attributes: (list
+ (list "HREF" (href-to (nav-home elemnode))))
+ (nav-banner elemnode)))))
+ (r2? (or (not (node-list-empty? prev))
+ (not (node-list-empty? next))
+ (nav-context? elemnode)))
+ (r2-sosofo (make element gi: "TR"
+ (make element gi: "TD"
+ attributes: (list
+ (list "WIDTH" "10%")
+ (list "ALIGN" "left")
+ (list "VALIGN" "top"))
+ (if (node-list-empty? prev)
+ (make entity-ref name: "nbsp")
+ (make element gi: "A"
+ attributes: (list
+ (list "TITLE" (element-title-string prev))
+ (list "HREF"
+ (href-to
+ prev))
+ (list "ACCESSKEY"
+ "P"))
+ (gentext-nav-prev prev))))
+ (make element gi: "TD"
+ attributes: (list
+ (list "WIDTH" "10%")
+ (list "ALIGN" "left")
+ (list "VALIGN" "top"))
+ (if (nav-up? elemnode)
+ (nav-up elemnode)
+ (nav-home-link elemnode)))
+ (make element gi: "TD"
+ attributes: (list
+ (list "WIDTH" "60%")
+ (list "ALIGN" "center")
+ (list "VALIGN" "bottom"))
+ (nav-context elemnode))
+ (make element gi: "TD"
+ attributes: (list
+ (list "WIDTH" "20%")
+ (list "ALIGN" "right")
+ (list "VALIGN" "top"))
+ (if (node-list-empty? next)
+ (make entity-ref name: "nbsp")
+ (make element gi: "A"
+ attributes: (list
+ (list "TITLE" (element-title-string next))
+ (list "HREF"
+ (href-to
+ next))
+ (list "ACCESSKEY"
+ "N"))
+ (gentext-nav-next next)))))))
+ (if (or r1? r2?)
+ (make element gi: "DIV"
+ attributes: '(("CLASS" "NAVHEADER"))
+ (make element gi: "TABLE"
+ attributes: (list
+ (list "SUMMARY" "Header navigation table")
+ (list "WIDTH" %gentext-nav-tblwidth%)
+ (list "BORDER" "0")
+ (list "CELLPADDING" "0")
+ (list "CELLSPACING" "0"))
+ (if r1? r1-sosofo (empty-sosofo))
+ (if r2? r2-sosofo (empty-sosofo)))
+ (make empty-element gi: "HR"
+ attributes: (list
+ (list "ALIGN" "LEFT")
+ (list "WIDTH" %gentext-nav-tblwidth%))))
+ (empty-sosofo))))
+
+
+;; Put index "quicklinks" (A | B | C | ...) at the top of the bookindex page.
+
+(element index
+ (let ((preamble (node-list-filter-by-not-gi
+ (children (current-node))
+ (list (normalize "indexentry"))))
+ (indexdivs (node-list-filter-by-gi
+ (children (current-node))
+ (list (normalize "indexdiv"))))
+ (entries (node-list-filter-by-gi
+ (children (current-node))
+ (list (normalize "indexentry")))))
+ (html-document
+ (with-mode head-title-mode
+ (literal (element-title-string (current-node))))
+ (make element gi: "DIV"
+ attributes: (list (list "CLASS" (gi)))
+ ($component-separator$)
+ ($component-title$)
+ (if (node-list-empty? indexdivs)
+ (empty-sosofo)
+ (make element gi: "P"
+ attributes: (list (list "CLASS" "INDEXDIV-QUICKLINKS"))
+ (with-mode indexdiv-quicklinks-mode
+ (process-node-list indexdivs))))
+ (process-node-list preamble)
+ (if (node-list-empty? entries)
+ (empty-sosofo)
+ (make element gi: "DL"
+ (process-node-list entries)))))))
+
+
+(mode indexdiv-quicklinks-mode
+ (element indexdiv
+ (make sequence
+ (make element gi: "A"
+ attributes: (list (list "HREF" (href-to (current-node))))
+ (element-title-sosofo))
+ (if (not (last-sibling?))
+ (literal " | ")
+ (literal "")))))
+
+
+;; Changed to strip and normalize index term content (overrides
+;; dbindex.dsl)
+(define (htmlindexterm)
+ (let* ((attr (gi (current-node)))
+ (content (data (current-node)))
+ (string (strip (normalize-whitespace content))) ;; changed
+ (sortas (attribute-string (normalize "sortas"))))
+ (make sequence
+ (make formatting-instruction data: attr)
+ (if sortas
+ (make sequence
+ (make formatting-instruction data: "[")
+ (make formatting-instruction data: sortas)
+ (make formatting-instruction data: "]"))
+ (empty-sosofo))
+ (make formatting-instruction data: " ")
+ (make formatting-instruction data: string)
+ (htmlnewline))))
+
+(define ($html-body-start$)
+ (if website-build
+ (make empty-element gi: "!--#include virtual=\"/resources/docs-header.html\"--")
+ (empty-sosofo)))
+
+(define ($html-body-end$)
+ (if website-build
+ (make empty-element gi: "!--#include virtual=\"/resources/docs-footer.html\"--")
+ (empty-sosofo)))
+
+]]>
+
+
+
+
+ (string->number (attribute-string (normalize "columns"))) 0)
+ (string->number (attribute-string (normalize "columns")))
+ 1)
+ 1))
+ (members (select-elements (children (current-node)) (normalize "member"))))
+ (cond
+ ((equal? type (normalize "inline"))
+ (if (equal? (gi (parent (current-node)))
+ (normalize "para"))
+ (process-children)
+ (make paragraph
+ space-before: %para-sep%
+ space-after: %para-sep%
+ start-indent: (inherited-start-indent))))
+ ((equal? type (normalize "vert"))
+ (my-simplelist-vert members))
+ ((equal? type (normalize "horiz"))
+ (simplelist-table 'row cols members)))))
+
+(element member
+ (let ((type (inherited-attribute-string (normalize "type"))))
+ (cond
+ ((equal? type (normalize "inline"))
+ (make sequence
+ (process-children)
+ (if (not (last-sibling?))
+ (literal ", ")
+ (literal ""))))
+ ((equal? type (normalize "vert"))
+ (make paragraph
+ space-before: 0pt
+ space-after: 0pt))
+ ((equal? type (normalize "horiz"))
+ (make paragraph
+ quadding: 'start
+ (process-children))))))
+
+
+;; Jadetex doesn't handle links to the content of tables, so
+;; indexterms that point to table entries will go nowhere. We fix
+;; this by pointing the index entry to the table itself instead, which
+;; should be equally useful in practice.
+
+(define (find-parent-table nd)
+ (let ((table (ancestor-member nd ($table-element-list$))))
+ (if (node-list-empty? table)
+ nd
+ table)))
+
+;; (The function below overrides the one in print/dbindex.dsl.)
+
+(define (indexentry-link nd)
+ (let* ((id (attribute-string (normalize "role") nd))
+ (prelim-target (find-indexterm id))
+ (target (find-parent-table prelim-target))
+ (preferred (not (node-list-empty?
+ (select-elements (children (current-node))
+ (normalize "emphasis")))))
+ (sosofo (if (node-list-empty? target)
+ (literal "?")
+ (make link
+ destination: (node-list-address target)
+ (with-mode toc-page-number-mode
+ (process-node-list target))))))
+ (if preferred
+ (make sequence
+ font-weight: 'bold
+ sosofo)
+ sosofo)))
+
+
+;; By default, the part and reference title pages get wrong page
+;; numbers: The first title page gets roman numerals carried over from
+;; preface/toc -- we want Arabic numerals. We also need to make sure
+;; that page-number-restart is set of #f explicitly, because otherwise
+;; it will carry over from the previous component, which is not good.
+;;
+;; (This looks worse than it is. It's copied from print/dbttlpg.dsl
+;; and common/dbcommon.dsl and modified in minor detail.)
+
+(define (first-part?)
+ (let* ((book (ancestor (normalize "book")))
+ (nd (ancestor-member (current-node)
+ (append
+ (component-element-list)
+ (division-element-list))))
+ (bookch (children book)))
+ (let loop ((nl bookch))
+ (if (node-list-empty? nl)
+ #f
+ (if (equal? (gi (node-list-first nl)) (normalize "part"))
+ (if (node-list=? (node-list-first nl) nd)
+ #t
+ #f)
+ (loop (node-list-rest nl)))))))
+
+(define (first-reference?)
+ (let* ((book (ancestor (normalize "book")))
+ (nd (ancestor-member (current-node)
+ (append
+ (component-element-list)
+ (division-element-list))))
+ (bookch (children book)))
+ (let loop ((nl bookch))
+ (if (node-list-empty? nl)
+ #f
+ (if (equal? (gi (node-list-first nl)) (normalize "reference"))
+ (if (node-list=? (node-list-first nl) nd)
+ #t
+ #f)
+ (loop (node-list-rest nl)))))))
+
+
+(define (part-titlepage elements #!optional (side 'recto))
+ (let ((nodelist (titlepage-nodelist
+ (if (equal? side 'recto)
+ (reference-titlepage-recto-elements)
+ (reference-titlepage-verso-elements))
+ elements))
+ ;; partintro is a special case...
+ (partintro (node-list-first
+ (node-list-filter-by-gi elements (list (normalize "partintro"))))))
+ (if (part-titlepage-content? elements side)
+ (make simple-page-sequence
+ page-n-columns: %titlepage-n-columns%
+ ;; Make sure that page number format is correct.
+ page-number-format: ($page-number-format$)
+ ;; Make sure that the page number is set to 1 if this is the
+ ;; first part in the book
+ page-number-restart?: (first-part?)
+ input-whitespace-treatment: 'collapse
+ use: default-text-style
+
+ ;; This hack is required for the RTF backend. If an external-graphic
+ ;; is the first thing on the page, RTF doesn't seem to do the right
+ ;; thing (the graphic winds up on the baseline of the first line
+ ;; of the page, left justified). This "one point rule" fixes
+ ;; that problem.
+ (make paragraph
+ line-spacing: 1pt
+ (literal ""))
+
+ (let loop ((nl nodelist) (lastnode (empty-node-list)))
+ (if (node-list-empty? nl)
+ (empty-sosofo)
+ (make sequence
+ (if (or (node-list-empty? lastnode)
+ (not (equal? (gi (node-list-first nl))
+ (gi lastnode))))
+ (part-titlepage-before (node-list-first nl) side)
+ (empty-sosofo))
+ (cond
+ ((equal? (gi (node-list-first nl)) (normalize "subtitle"))
+ (part-titlepage-subtitle (node-list-first nl) side))
+ ((equal? (gi (node-list-first nl)) (normalize "title"))
+ (part-titlepage-title (node-list-first nl) side))
+ (else
+ (part-titlepage-default (node-list-first nl) side)))
+ (loop (node-list-rest nl) (node-list-first nl)))))
+
+ (if (and %generate-part-toc%
+ %generate-part-toc-on-titlepage%
+ (equal? side 'recto))
+ (make display-group
+ (build-toc (current-node)
+ (toc-depth (current-node))))
+ (empty-sosofo))
+
+ ;; PartIntro is a special case
+ (if (and (equal? side 'recto)
+ (not (node-list-empty? partintro))
+ %generate-partintro-on-titlepage%)
+ ($process-partintro$ partintro #f)
+ (empty-sosofo)))
+
+ (empty-sosofo))))
+
+
+(define (reference-titlepage elements #!optional (side 'recto))
+ (let ((nodelist (titlepage-nodelist
+ (if (equal? side 'recto)
+ (reference-titlepage-recto-elements)
+ (reference-titlepage-verso-elements))
+ elements))
+ ;; partintro is a special case...
+ (partintro (node-list-first
+ (node-list-filter-by-gi elements (list (normalize "partintro"))))))
+ (if (reference-titlepage-content? elements side)
+ (make simple-page-sequence
+ page-n-columns: %titlepage-n-columns%
+ ;; Make sure that page number format is correct.
+ page-number-format: ($page-number-format$)
+ ;; Make sure that the page number is set to 1 if this is the
+ ;; first part in the book
+ page-number-restart?: (first-reference?)
+ input-whitespace-treatment: 'collapse
+ use: default-text-style
+
+ ;; This hack is required for the RTF backend. If an external-graphic
+ ;; is the first thing on the page, RTF doesn't seem to do the right
+ ;; thing (the graphic winds up on the baseline of the first line
+ ;; of the page, left justified). This "one point rule" fixes
+ ;; that problem.
+ (make paragraph
+ line-spacing: 1pt
+ (literal ""))
+
+ (let loop ((nl nodelist) (lastnode (empty-node-list)))
+ (if (node-list-empty? nl)
+ (empty-sosofo)
+ (make sequence
+ (if (or (node-list-empty? lastnode)
+ (not (equal? (gi (node-list-first nl))
+ (gi lastnode))))
+ (reference-titlepage-before (node-list-first nl) side)
+ (empty-sosofo))
+ (cond
+ ((equal? (gi (node-list-first nl)) (normalize "author"))
+ (reference-titlepage-author (node-list-first nl) side))
+ ((equal? (gi (node-list-first nl)) (normalize "authorgroup"))
+ (reference-titlepage-authorgroup (node-list-first nl) side))
+ ((equal? (gi (node-list-first nl)) (normalize "corpauthor"))
+ (reference-titlepage-corpauthor (node-list-first nl) side))
+ ((equal? (gi (node-list-first nl)) (normalize "editor"))
+ (reference-titlepage-editor (node-list-first nl) side))
+ ((equal? (gi (node-list-first nl)) (normalize "subtitle"))
+ (reference-titlepage-subtitle (node-list-first nl) side))
+ ((equal? (gi (node-list-first nl)) (normalize "title"))
+ (reference-titlepage-title (node-list-first nl) side))
+ (else
+ (reference-titlepage-default (node-list-first nl) side)))
+ (loop (node-list-rest nl) (node-list-first nl)))))
+
+ (if (and %generate-reference-toc%
+ %generate-reference-toc-on-titlepage%
+ (equal? side 'recto))
+ (make display-group
+ (build-toc (current-node)
+ (toc-depth (current-node))))
+ (empty-sosofo))
+
+ ;; PartIntro is a special case
+ (if (and (equal? side 'recto)
+ (not (node-list-empty? partintro))
+ %generate-partintro-on-titlepage%)
+ ($process-partintro$ partintro #f)
+ (empty-sosofo)))
+
+ (empty-sosofo))))
+
+]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/doc/switchover.sgml b/doc/switchover.sgml
new file mode 100644
index 00000000..4a07a4a1
--- /dev/null
+++ b/doc/switchover.sgml
@@ -0,0 +1,204 @@
+
+ Performing a switchover with repmgr
+
+ A typical use-case for replication is a combination of primary and standby
+ server, with the standby serving as a backup which can easily be activated
+ in case of a problem with the primary. Such an unplanned failover would
+ normally be handled by promoting the standby, after which an appropriate
+ action must be taken to restore the old primary.
+
+
+ In some cases however it's desirable to promote the standby in a planned
+ way, e.g. so maintenance can be performed on the primary; this kind of switchover
+ is supported by the command.
+
+
+ repmgr standby switchover differs from other &repmgr;
+ actions in that it lso performs actions on another server (the demotion
+ candidate), which means passwordless SSH access is required to that server
+ from the one where repmgr standby switchover is executed.
+
+
+
+ repmgr standby switchover performs a relatively complex
+ series of operations on two servers, and should therefore be performed after
+ careful preparation and with adequate attention. In particular you should
+ be confident that your network environment is stable and reliable.
+
+
+ Additionally you should be sure that the current primary can be shut down
+ quickly and cleanly. In particular, access from applications should be
+ minimalized or preferably blocked completely. Also be aware that if there
+ is a backlog of files waiting to be archived, PostgreSQL will not shut
+ down until archiving completes.
+
+
+ We recommend running repmgr standby switchover at the
+ most verbose logging level (--log-level=DEBUG --verbose)
+ and capturing all output to assist troubleshooting any problems.
+
+
+ Please also read carefully the sections and
+ `Caveats` below.
+
+
+
+
+
+ switchover
+ preparation
+
+ Preparing for switchover
+
+ As mentioned above, success of the switchover operation depends on &repmgr;
+ being able to shut down the current primary server quickly and cleanly.
+
+
+ Double-check which commands will be used to stop/start/restart the current
+ primary; on the primary execute:
+
+ repmgr -f /etc./repmgr.conf node service --list --action=stop
+ repmgr -f /etc./repmgr.conf node service --list --action=start
+ repmgr -f /etc./repmgr.conf node service --list --action=restart
+
+
+
+
+ On systemd systems we strongly recommend using the appropriate
+ systemctl commands (typically run via sudo) to ensure
+ systemd informed about the status of the PostgreSQL service.
+
+
+
+ Check that access from applications is minimalized or preferably blocked
+ completely, so applications are not unexpectedly interrupted.
+
+
+ Check there is no significant replication lag on standbys attached to the
+ current primary.
+
+
+ If WAL file archiving is set up, check that there is no backlog of files waiting
+ to be archived, as PostgreSQL will not finally shut down until all these have been
+ archived. If there is a backlog exceeding archive_ready_warning WAL files,
+ `repmgr` will emit a warning before attempting to perform a switchover; you can also check
+ manually with repmgr node check --archive-ready.
+
+
+ Ensure that repmgrd is *not* running anywhere to prevent it unintentionally
+ promoting a node.
+
+
+ Finally, consider executing repmgr standby switchover with the
+ --dry-run option; this will perform any necessary checks and inform you about
+ success/failure, and stop before the first actual command is run (which would be the shutdown of the
+ current primary). Example output:
+
+ $ repmgr standby switchover -f /etc/repmgr.conf --siblings-follow --dry-run
+ NOTICE: checking switchover on node "node2" (ID: 2) in --dry-run mode
+ INFO: SSH connection to host "localhost" succeeded
+ INFO: archive mode is "off"
+ INFO: replication lag on this standby is 0 seconds
+ INFO: all sibling nodes are reachable via SSH
+ NOTICE: local node "node2" (ID: 2) will be promoted to primary; current primary "node1" (ID: 1) will be demoted to standby
+ INFO: following shutdown command would be run on node "node1":
+ "pg_ctl -l /var/log/postgresql/startup.log -D '/var/lib/postgresql/data' -m fast -W stop"
+
+
+
+
+
+
+ switchover
+ execution
+
+ Executing the switchover command
+
+ To demonstrate switchover, we will assume a replication cluster with a
+ primary (node1) and one standby (node2);
+ after the switchover node2 should become the primary with
+ node1 following it.
+
+
+ The switchover command must be run from the standby which is to be promoted,
+ and in its simplest form looks like this:
+
+ $ repmgr -f /etc/repmgr.conf standby switchover
+ NOTICE: executing switchover on node "node2" (ID: 2)
+ INFO: searching for primary node
+ INFO: checking if node 1 is primary
+ INFO: current primary node is 1
+ INFO: SSH connection to host "localhost" succeeded
+ INFO: archive mode is "off"
+ INFO: replication lag on this standby is 0 seconds
+ NOTICE: local node "node2" (ID: 2) will be promoted to primary; current primary "node1" (ID: 1) will be demoted to standby
+ NOTICE: stopping current primary node "node1" (ID: 1)
+ NOTICE: issuing CHECKPOINT
+ DETAIL: executing server command "pg_ctl -l /var/log/postgres/startup.log -D '/var/lib/pgsql/data' -m fast -W stop"
+ INFO: checking primary status; 1 of 6 attempts
+ NOTICE: current primary has been cleanly shut down at location 0/3001460
+ NOTICE: promoting standby to primary
+ DETAIL: promoting server "node2" (ID: 2) using "pg_ctl -l /var/log/postgres/startup.log -w -D '/var/lib/pgsql/data' promote"
+ server promoting
+ NOTICE: STANDBY PROMOTE successful
+ DETAIL: server "node2" (ID: 2) was successfully promoted to primary
+ INFO: setting node 1's primary to node 2
+ NOTICE: starting server using "pg_ctl -l /var/log/postgres/startup.log -w -D '/var/lib/pgsql/data' restart"
+ NOTICE: NODE REJOIN successful
+ DETAIL: node 1 is now attached to node 2
+ NOTICE: switchover was successful
+ DETAIL: node "node2" is now primary
+ NOTICE: STANDBY SWITCHOVER is complete
+
+
+
+ The old primary is now replicating as a standby from the new primary, and the
+ cluster status will now look like this:
+
+ $ repmgr -f /etc/repmgr.conf cluster show
+ ID | Name | Role | Status | Upstream | Location | Connection string
+ ----+-------+---------+-----------+----------+----------+--------------------------------------
+ 1 | node1 | standby | running | node2 | default | host=node1 dbname=repmgr user=repmgr
+ 2 | node2 | primary | * running | | default | host=node2 dbname=repmgr user=repmgr
+
+
+
+
+
+ switchover
+ caveats
+
+ Caveats
+
+
+
+
+ If using PostgreSQL 9.3 or 9.4, you should ensure that the shutdown command
+ is configured to use PostgreSQL's fast shutdown mode (the default in 9.5
+ and later). If relying on pg_ctl to perform database server operations,
+ you should include -m fast in pg_ctl_options
+ in repmgr.conf.
+
+
+
+
+ pg_rewind *requires* that either wal_log_hints is enabled, or that
+ data checksums were enabled when the cluster was initialized. See the
+ pg_rewind documentation
+ for details.
+
+
+
+
+ repmgrd should not be running with setting failover=automatic
+ in repmgr.conf when a switchover is carried out, otherwise the
+ repmgrd daemon may try and promote a standby by itself.
+
+
+
+
+
+ We hope to remove some of these restrictions in future versions of `repmgr`.
+
+
+
diff --git a/doc/upgrading-from-repmgr3.md b/doc/upgrading-from-repmgr3.md
index 04ec2c1f..da3eb1c9 100644
--- a/doc/upgrading-from-repmgr3.md
+++ b/doc/upgrading-from-repmgr3.md
@@ -4,10 +4,14 @@ Upgrading from repmgr 3
The upgrade process consists of two steps:
1) converting the repmgr.conf configuration files
- 2) upgrading the repmgr schema.
+ 2) upgrading the repmgr schema
+
+A script is provided to assist with converting `repmgr.conf`.
+
+The schema upgrade (which converts the `repmgr` metadata into
+a packaged PostgreSQL extension) is normally carried out
+automatically when the `repmgr` extension is created.
-Scripts are provided to assist both with converting repmgr.conf
-and upgrading the schema.
Converting repmgr.conf configuration files
------------------------------------------
@@ -57,11 +61,10 @@ is provided in `contrib/convert-config.pl`. Use like this:
$ ./convert-config.pl /etc/repmgr.conf
node_id=2
node_name=node2
- conninfo=host=localhost dbname=repmgr user=repmgr port=5602
- pg_ctl_options='-l /tmp/postgres.5602.log'
- pg_bindir=/home/barwick/devel/builds/HEAD/bin
+ conninfo=host=node2 dbname=repmgr user=repmgr connect_timeout=2
+ pg_ctl_options='-l /var/log/postgres/startup.log'
rsync_options=--exclude=postgresql.local.conf --archive
- log_level=DEBUG
+ log_level=INFO
pg_basebackup_options=--no-slot
data_directory=
@@ -80,7 +83,23 @@ Ensure `repmgrd` is not running, or any cron jobs which execute the
`repmgr` binary.
Install `repmgr4`; any `repmgr3` packages should be uninstalled
-(if not automatically installed already).
+(if not automatically uninstalled already).
+
+### Upgrading from repmgr 3.1.1 or earlier
+
+If your repmgr version is 3.1.1 or earlier, you will need to update
+the schema to the latest version in the 3.x series (3.3.2) before
+converting the installation to repmgr 4.
+
+To do this, apply the following upgrade scripts as appropriate for
+your current version:
+
+ - repmgr3.0_repmgr3.1.sql
+ - repmgr3.1.1_repmgr3.1.2.sql
+
+For more details see:
+
+ https://repmgr.org/release-notes-3.3.2.html#upgrading
### Manually create the repmgr extension
diff --git a/doc/upgrading-repmgr.sgml b/doc/upgrading-repmgr.sgml
new file mode 100644
index 00000000..d3c9ef09
--- /dev/null
+++ b/doc/upgrading-repmgr.sgml
@@ -0,0 +1,245 @@
+
+ Upgrading repmgr
+
+ &repmgr; is updated regularly with point releases (e.g. 4.0.1 to 4.0.2)
+ containing bugfixes and other minor improvements. Any substantial new
+ functionality will be included in a feature release (e.g. 4.0.x to 4.1.x).
+
+
+ &repmgr; is implemented as a PostgreSQL extension; to upgrade it, first
+ install the updated package (or compile the updated source), then in the
+ database where the &repmgr; extension is installed, execute
+ ALTER EXTENSION repmgr UPDATE.
+
+
+ If repmgrd is running, it may be necessary to restart
+ the PostgreSQL server if the upgrade contains changes to the shared object
+ file used by repmgrd; check the release notes for details.
+
+
+
+ Upgrading from repmgr 3
+
+ The upgrade process consists of two steps:
+
+
+
+ converting the repmgr.conf configuration files
+
+
+
+
+ upgrading the repmgr schema
+
+
+
+
+
+ A script is provided to assist with converting repmgr.conf.
+
+
+ The schema upgrade (which converts the &repmgr; metadata into
+ a packaged PostgreSQL extension) is normally carried out
+ automatically when the &repmgr; extension is created.
+
+
+ Converting repmgr.conf configuration files
+
+ With a completely new repmgr version, we've taken the opportunity
+ to rename some configuration items have had their names changed for
+ clarity and consistency, both between the configuration file and
+ the column names in repmgr.nodes
+ (e.g. node to node_id), and
+ also for consistency with PostgreSQL naming conventions
+ (e.g. loglevel to log_level).
+
+
+ Other configuration items have been changed to command line options,
+ and vice-versa, e.g. to avoid hard-coding items such as a a node's
+ upstream ID, which might change over time.
+
+
+ &repmgr; will issue a warning about deprecated/altered options.
+
+
+ Changed parameters in "repmgr.conf"
+
+ Following parameters have been added:
+
+
+ data_directory: this is mandatory and must
+ contain the path to the node's data directory
+
+
+ monitoring_history: this replaces the
+ repmgrd command line option
+ --monitoring-history
+
+
+
+
+ Following parameters have been renamed:
+
+
+ Parameters renamed in repmgr4
+
+
+
+ repmgr3
+ repmgr4
+
+
+
+
+ node
+ node_id
+
+
+ loglevel
+ log_level
+
+
+ logfacility
+ log_facility
+
+
+ logfile
+ log_file
+
+
+ master_reponse_timeout
+ async_query_timeout
+
+
+
+
+
+ Following parameters have been removed:
+
+
+ cluster: is no longer required and will
+ be ignored.
+
+
+ upstream_node_id: is replaced by the
+ command-line parameter --upstream-node-id
+
+
+
+
+
+ Conversion script
+
+ To assist with conversion of repmgr.conf files, a Perl script
+ is provided in contrib/convert-config.pl.
+ Use like this:
+
+ $ ./convert-config.pl /etc/repmgr.conf
+ node_id=2
+ node_name=node2
+ conninfo=host=localhost dbname=repmgr user=repmgr connect_timeout=2
+ pg_ctl_options='-l /var/log/postgres/startup.log'
+ rsync_options=--exclude=postgresql.local.conf --archive
+ log_level=INFO
+ pg_basebackup_options=--no-slot
+ data_directory=
+
+
+ The converted file is printed to STDOUT and the original file is not
+ changed.
+
+
+ Please note that the parameter data_directory must
+ be provided; if not already present, the conversion script will add an empty
+ placeholder parameter.
+
+
+
+
+ Upgrading the repmgr schema
+
+ Ensure repmgrd is not running, or any cron jobs which execute the
+ repmgr binary.
+
+
+ Install repmgr4; any repmgr3 packages should be uninstalled
+ (if not automatically uninstalled already).
+
+
+ Upgrading from repmgr 3.1.1 or earlier
+
+ If your repmgr version is 3.1.1 or earlier, you will need to update
+ the schema to the latest version in the 3.x series (3.3.2) before
+ converting the installation to repmgr 4.
+
+
+ To do this, apply the following upgrade scripts as appropriate for
+ your current version:
+
+
+
+ repmgr3.0_repmgr3.1.sql
+
+
+ repmgr3.1.1_repmgr3.1.2.sql
+
+
+
+
+ For more details see the
+ repmgr 3 upgrade notes.
+
+
+
+ Manually create the repmgr extension
+
+ In the database used by the existing &repmgr; installation, execute:
+
+ CREATE EXTENSION repmgr FROM unpackaged;
+
+
+ This will move and convert all objects from the existing schema
+ into the new, standard repmgr schema.
+
+
+ there must be only one schema matching repmgr_% in the
+ database, otherwise this step may not work.
+
+
+
+ Re-register each node
+
+ This is necessary to update the repmgr metadata with some additional items.
+
+
+ On the primary node, execute e.g.
+
+ repmgr primary register -f /etc/repmgr.conf --force
+
+
+ On each standby node, execute e.g.
+
+ repmgr standby register -f /etc/repmgr.conf --force
+
+
+ Check the data is updated as expected by examining the repmgr.nodes
+ table; restart repmgrd if required.
+
+
+ The original repmgr_$cluster schema can be dropped at any time.
+
+
+
+ If you don't care about any data from the existing &repmgr; installation,
+ (e.g. the contents of the events and monitoring
+ tables), the manual CREATE EXTENSION step can be skipped; just re-register
+ each node, starting with the primary node, and the repmgr extension will be
+ automatically created.
+
+
+
+
+
+
+
+
diff --git a/doc/version.sgml b/doc/version.sgml
new file mode 100644
index 00000000..f26be77a
--- /dev/null
+++ b/doc/version.sgml
@@ -0,0 +1 @@
+
diff --git a/doc/website-docs.css b/doc/website-docs.css
new file mode 100644
index 00000000..8a707c2d
--- /dev/null
+++ b/doc/website-docs.css
@@ -0,0 +1,469 @@
+/* PostgreSQL.org Documentation Style */
+
+/* requires global.css, table.css and text.css to be loaded before this file! */
+body {
+ font-family: verdana, sans-serif;
+ font-size: 76%;
+ background: url("/resources/background.png") repeat-x scroll left top transparent;
+ padding: 15px 4%;
+ margin: 0;
+}
+
+/* monospace font size fix */
+pre, code, kbd, samp, tt {
+ font-family: monospace,monospace;
+ font-size: 1em;
+}
+
+div.NAVHEADER table {
+ margin-left: 0;
+}
+
+/* Container Definitions */
+
+#docContainerWrap {
+ text-align: center; /* Win IE5 */
+}
+
+#docContainer {
+ margin: 0 auto;
+ width: 90%;
+ padding-bottom: 2em;
+ display: block;
+ text-align: left; /* Win IE5 */
+}
+
+#docHeader {
+ background-image: url("/media/img/docs/bg_hdr.png");
+ height: 83px;
+ margin: 0px;
+ padding: 0px;
+ display: block;
+}
+
+#docHeaderLogo {
+ position: relative;
+ width: 206px;
+ height: 83px;
+ border: 0px;
+ padding: 0px;
+ margin: 0 0 0 20px;
+}
+
+#docHeaderLogo img {
+ border: 0px;
+}
+
+#docNavSearchContainer {
+ padding-bottom: 2px;
+}
+
+#docNav, #docVersions {
+ position: relative;
+ text-align: left;
+ margin-left: 10px;
+ margin-top: 5px;
+ color: #666;
+ font-size: 0.95em;
+}
+
+#docSearch {
+ position: relative;
+ text-align: right;
+ padding: 0;
+ margin: 0;
+ color: #666;
+}
+
+#docTextSize {
+ text-align: right;
+ white-space: nowrap;
+ margin-top: 7px;
+ font-size: 0.95em;
+}
+
+#docSearch form {
+ position: relative;
+ top: 5px;
+ right: 0;
+ margin: 0; /* need for IE 5.5 OSX */
+ text-align: right; /* need for IE 5.5 OSX */
+ white-space: nowrap; /* for Opera */
+}
+
+#docSearch form label {
+ color: #666;
+ font-size: 0.95em;
+}
+
+#docSearch form input {
+ font-size: 0.95em;
+}
+
+#docSearch form #submit {
+ font-size: 0.95em;
+ background: #7A7A7A;
+ color: #fff;
+ border: 1px solid #7A7A7A;
+ padding: 1px 4px;
+}
+
+#docSearch form #q {
+ width: 170px;
+ font-size: 0.95em;
+ border: 1px solid #7A7A7A;
+ background: #E1E1E1;
+ color: #000000;
+ padding: 2px;
+}
+
+.frmDocSearch {
+ padding: 0;
+ margin: 0;
+ display: inline;
+}
+
+.inpDocSearch {
+ padding: 0;
+ margin: 0;
+ color: #000;
+}
+
+#docContent {
+ position: relative;
+ margin-left: 10px;
+ margin-right: 10px;
+ margin-top: 40px;
+}
+
+#docFooter {
+ position: relative;
+ font-size: 0.9em;
+ color: #666;
+ line-height: 1.3em;
+ margin-left: 10px;
+ margin-right: 10px;
+}
+
+#docComments {
+ margin-top: 10px;
+}
+
+#docClear {
+ clear: both;
+ margin: 0;
+ padding: 0;
+}
+
+/* Heading Definitions */
+
+h1, h2, h3 {
+ font-weight: bold;
+ margin-top: 2ex;
+ color: #444;
+}
+
+h1 {
+ font-size: 1.4em;
+}
+
+h2 {
+ font-size: 1.2em !important;
+}
+
+h3 {
+ font-size: 1.1em;
+}
+
+h1 a:hover,
+h2 a:hover,
+h3 a:hover,
+h4 a:hover {
+ color: #444;
+ text-decoration: none;
+}
+
+/* Text Styles */
+
+div.SECT2 {
+ margin-top: 4ex;
+}
+
+div.SECT3 {
+ margin-top: 3ex;
+ margin-left: 3ex;
+}
+
+.txtCurrentLocation {
+ font-weight: bold;
+}
+
+p, ol, ul, li {
+ line-height: 1.5em;
+}
+
+.txtCommentsWrap {
+ border: 2px solid #F5F5F5;
+ width: 100%;
+}
+
+.txtCommentsContent {
+ background: #F5F5F5;
+ padding: 3px;
+}
+
+.txtCommentsPoster {
+ float: left;
+}
+
+.txtCommentsDate {
+ float: right;
+}
+
+.txtCommentsComment {
+ padding: 3px;
+}
+
+#docContainer pre code,
+#docContainer pre tt,
+#docContainer pre pre,
+#docContainer tt tt,
+#docContainer tt code,
+#docContainer tt pre {
+ font-size: 1em;
+}
+
+pre.LITERALLAYOUT,
+.SCREEN,
+.SYNOPSIS,
+.PROGRAMLISTING,
+.REFSYNOPSISDIV p,
+table.CAUTION,
+table.WARNING,
+blockquote.NOTE,
+blockquote.TIP,
+table.CALSTABLE {
+ -moz-box-shadow: 3px 3px 5px #DFDFDF;
+ -webkit-box-shadow: 3px 3px 5px #DFDFDF;
+ -khtml-box-shadow: 3px 3px 5px #DFDFDF;
+ -o-box-shadow: 3px 3px 5px #DFDFDF;
+ box-shadow: 3px 3px 5px #DFDFDF;
+}
+
+pre.LITERALLAYOUT,
+.SCREEN,
+.SYNOPSIS,
+.PROGRAMLISTING,
+.REFSYNOPSISDIV p,
+table.CAUTION,
+table.WARNING,
+blockquote.NOTE,
+blockquote.TIP {
+ color: black;
+ border-width: 1px;
+ border-style: solid;
+ padding: 2ex;
+ margin: 2ex 0 2ex 2ex;
+ overflow: auto;
+ -moz-border-radius: 8px;
+ -webkit-border-radius: 8px;
+ -khtml-border-radius: 8px;
+ border-radius: 8px;
+}
+
+pre.LITERALLAYOUT,
+pre.SYNOPSIS,
+pre.PROGRAMLISTING,
+.REFSYNOPSISDIV p,
+.SCREEN {
+ border-color: #CFCFCF;
+ background-color: #F7F7F7;
+}
+
+blockquote.NOTE,
+blockquote.TIP {
+ border-color: #DBDBCC;
+ background-color: #EEEEDD;
+ padding: 14px;
+ width: 572px;
+}
+
+blockquote.NOTE,
+blockquote.TIP,
+table.CAUTION,
+table.WARNING {
+ margin: 4ex auto;
+}
+
+blockquote.NOTE p,
+blockquote.TIP p {
+ margin: 0;
+}
+
+blockquote.NOTE pre,
+blockquote.NOTE code,
+blockquote.TIP pre,
+blockquote.TIP code {
+ margin-left: 0;
+ margin-right: 0;
+ -moz-box-shadow: none;
+ -webkit-box-shadow: none;
+ -khtml-box-shadow: none;
+ -o-box-shadow: none;
+ box-shadow: none;
+}
+
+.emphasis,
+.c2 {
+ font-weight: bold;
+}
+
+.REPLACEABLE {
+ font-style: italic;
+}
+
+/* Table Styles */
+
+table {
+ margin-left: 2ex;
+}
+
+table.CALSTABLE td,
+table.CALSTABLE th,
+table.CAUTION td,
+table.CAUTION th,
+table.WARNING td,
+table.WARNING th {
+ border-style: solid;
+}
+
+table.CALSTABLE,
+table.CAUTION,
+table.WARNING {
+ border-spacing: 0;
+ border-collapse: collapse;
+}
+
+table.CALSTABLE
+{
+ margin: 2ex 0 2ex 2ex;
+ background-color: #E0ECEF;
+ border: 2px solid #A7C6DF;
+}
+
+table.CALSTABLE tr:hover td
+{
+ background-color: #EFEFEF;
+}
+
+table.CALSTABLE td {
+ background-color: #FFF;
+}
+
+table.CALSTABLE td,
+table.CALSTABLE th {
+ border: 1px solid #A7C6DF;
+ padding: 0.5ex 0.5ex;
+}
+
+table.CAUTION,
+table.WARNING {
+ border-collapse: separate;
+ display: block;
+ padding: 0;
+ max-width: 600px;
+}
+
+table.CAUTION {
+ background-color: #F5F5DC;
+ border-color: #DEDFA7;
+}
+
+table.WARNING {
+ background-color: #FFD7D7;
+ border-color: #DF421E;
+}
+
+table.CAUTION td,
+table.CAUTION th,
+table.WARNING td,
+table.WARNING th {
+ border-width: 0;
+ padding-left: 2ex;
+ padding-right: 2ex;
+}
+
+table.CAUTION td,
+table.CAUTION th {
+ border-color: #F3E4D5
+}
+
+table.WARNING td,
+table.WARNING th {
+ border-color: #FFD7D7;
+}
+
+td.c1,
+td.c2,
+td.c3,
+td.c4,
+td.c5,
+td.c6 {
+ font-size: 1.1em;
+ font-weight: bold;
+ border-bottom: 0px solid #FFEFEF;
+ padding: 1ex 2ex 0;
+}
+
+/* Link Styles */
+
+#docNav a {
+ font-weight: bold;
+}
+
+a:link,
+a:visited,
+a:active,
+a:hover {
+ text-decoration: underline;
+}
+
+a:link,
+a:active {
+ color:#0066A2;
+}
+
+a:visited {
+ color:#004E66;
+}
+
+a:hover {
+ color:#000000;
+}
+
+#docFooter a:link,
+#docFooter a:visited,
+#docFooter a:active {
+ color:#666;
+}
+
+#docContainer code.FUNCTION tt {
+ font-size: 1em;
+}
+
+div.header {
+ color: #444;
+ margin-top: 5px;
+}
+
+div.footer {
+ text-align: center;
+ background-image: url("/resources/footerl.png"), url("/resources/footerr.png"), url("/resources/footerc.png");
+ background-position: left top, right top, center top;
+ background-repeat: no-repeat, no-repeat, repeat-x;
+ padding-top: 45px;
+}
+
+img {
+ border-style: none;
+}
diff --git a/repmgr_version.h.in b/repmgr_version.h.in
index e3979e5c..4047f56b 100644
--- a/repmgr_version.h.in
+++ b/repmgr_version.h.in
@@ -1,25 +1,3 @@
-/*
- * repmgr_version.h
- * Copyright (c) 2ndQuadrant, 2010-2017
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see .
- */
-
-#ifndef _VERSION_H_
-#define _VERSION_H_
-
#define REPMGR_VERSION_DATE ""
#define REPMGR_VERSION "4.0beta1"
-#endif
diff --git a/sql/.gitignore b/sql/.gitignore
new file mode 100644
index 00000000..2cf3f9b8
--- /dev/null
+++ b/sql/.gitignore
@@ -0,0 +1,2 @@
+# Might be created by repmgr3
+/repmgr_funcs.sql