Compare commits

..

1 Commits

Author SHA1 Message Date
Gianni Ciolli
d52b045113 Factoring out switchover sanity checks
We refactor switchover sanity checks into a separate function, using a
dedicated structure to pass variables across.

Signed-off-by: Gianni Ciolli <gianni.ciolli@enterprisedb.com>
2022-11-09 21:10:17 +00:00
22 changed files with 504 additions and 1016 deletions

View File

@@ -1,77 +1,37 @@
### name: SonarQube Scan
# Foundation-security SonarQube workflow
# version: 2.1
###
name: Foundation-Security/SonarQube Scan
on: on:
push:
tags:
- "**"
branches:
- "*main*"
- "*master*"
- "*STABLE*"
pull_request: pull_request:
types: [opened, synchronize, reopened] push:
branches: branches: [ master ]
- "**" workflow_dispatch:
workflow_dispatch:
inputs:
ref:
description: "Branch to scan"
required: true
default: "main"
jobs: jobs:
SonarQube-Scan: sonarQube:
name: SonarQube Scan Job name: SonarQube-Job
if: ${{ github.actor != 'dependabot[bot]' }} runs-on: ubuntu-latest
permissions:
id-token: write
contents: read
runs-on: ubuntu-22.04
steps: steps:
- name: Checkout source repository for dispatch runs
id: checkout-source-dispatch - name: Checkout source repo
if: github.event_name == 'workflow_dispatch' uses: actions/checkout@v1
uses: actions/checkout@v4
with: with:
repository: ${{ github.repository }} ref: '${{ github.head_ref }}'
ref: ${{ inputs.ref }}
path: source - name: Checkout GitHub Action Repo
token: ${{ secrets.GH_SLONIK }} uses: actions/checkout@master
- name: Checkout source repository for non-dispatch runs
id: checkout-source
if: github.event_name != 'workflow_dispatch'
uses: actions/checkout@v4
with: with:
repository: ${{ github.repository }} repository: EnterpriseDB/edb-github-actions.git
ref: ${{ github.ref }} ref: master
path: source
token: ${{ secrets.GH_SLONIK }} token: ${{ secrets.GH_SLONIK }}
path: .github/actions/edb-github-actions
- name: Checkout foundation-security repository
id: checkout-foundation-security
uses: actions/checkout@v4
with:
repository: EnterpriseDB/foundation-security
ref: v2
path: foundation-security
token: ${{ secrets.GH_SLONIK }}
- name: SonarQube Scan - name: SonarQube Scan
id: call-sq-composite uses: ./.github/actions/edb-github-actions/sonarqube
uses: ./foundation-security/actions/sonarqube
with: with:
github-token: ${{ secrets.GH_SLONIK }} REPO_NAME: '${{github.event.repository.name}}'
github-ref: ${{ github.ref_name }} SONAR_PROJECT_KEY: EnterpriseDB_repmgr
sonarqube-url: ${{ vars.SQ_URL }} SONAR_URL: '${{secrets.SONARQUBE_URL}}'
sonarqube-token: ${{ secrets.SONARQUBE_TOKEN }} SONAR_LOGIN: '${{secrets.SONARQUBE_LOGIN}}'
project-name: ${{ github.event.repository.name }} PULL_REQUEST_KEY: '${{github.event.number}}'
pull-request-key: ${{ github.event.number }} PULL_REQUEST_BRANCH: '${{github.head_ref}}'
pull-request-branch: ${{ github.head_ref }} PULL_REQUEST_BASE_BRANCH: '${{github.base_ref}}'
pull-request-base-branch: ${{ github.base_ref }} REPO_DEFAULT_BRANCH: '${{github.event.repository.default_branch}}'
foundation-security-sonarqube-token: ${{ secrets.FOUNDATION_SECURITY_SONARQUBE_TOKEN }} REPO_EXCLUDE_FILES: '*properties*,**/src/test/**/*,**/*.sql,**/docs/**/*,**/*/*.java'
cloudsmith-token: ${{ secrets.CLOUDSMITH_READ_ALL }}

18
.github/workflows/sonarqube/configure-env.sh vendored Executable file
View File

@@ -0,0 +1,18 @@
#!/bin/bash
DEBIAN_FRONTEND=noninteractive sudo apt-get -y install debhelper curl autoconf zlib1g-dev \
libedit-dev libxml2-dev libxslt1-dev libkrb5-dev libssl-dev libpam0g-dev systemtap-sdt-dev \
libselinux1-dev build-essential bison apt-utils lsb-release devscripts \
software-properties-common git shellcheck flex
sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
sudo apt-get update
sudo apt-get -y install libpq-dev postgresql-13 postgresql-server-dev-13
./configure
export PG_CONFIG=/usr/bin/pg_config
/home/buildfarm/sonar/depends/build-wrapper-linux-x86/build-wrapper-linux-x86-64 --out-dir build_wrapper_output_directory make

View File

@@ -1,9 +1,3 @@
5.4.1 2023-??-??
repmgrd: ensure witness node metadata is updated (Ian)
5.4.0 2023-03-16
Support cloning replicas using pg-backup-api
5.3.3 2022-10-17 5.3.3 2022-10-17
Support for PostgreSQL added Support for PostgreSQL added
repmgrd: ensure event notification script is called for event repmgrd: ensure event notification script is called for event

View File

@@ -22,7 +22,7 @@ GIT_WORK_TREE=${repmgr_abs_srcdir}
GIT_DIR=${repmgr_abs_srcdir}/.git GIT_DIR=${repmgr_abs_srcdir}/.git
export GIT_DIR export GIT_DIR
export GIT_WORK_TREE export GIT_WORK_TREE
PG_LDFLAGS=-lcurl -ljson-c
include $(PGXS) include $(PGXS)
-include ${repmgr_abs_srcdir}/Makefile.custom -include ${repmgr_abs_srcdir}/Makefile.custom

View File

@@ -66,7 +66,7 @@ REPMGR_CLIENT_OBJS = repmgr-client.o \
repmgr-action-primary.o repmgr-action-standby.o repmgr-action-witness.o \ repmgr-action-primary.o repmgr-action-standby.o repmgr-action-witness.o \
repmgr-action-cluster.o repmgr-action-node.o repmgr-action-service.o repmgr-action-daemon.o \ repmgr-action-cluster.o repmgr-action-node.o repmgr-action-service.o repmgr-action-daemon.o \
configdata.o configfile.o configfile-scan.o log.o strutil.o controldata.o dirutil.o compat.o \ configdata.o configfile.o configfile-scan.o log.o strutil.o controldata.o dirutil.o compat.o \
dbutils.o sysutils.o pgbackupapi.o dbutils.o sysutils.o
REPMGRD_OBJS = repmgrd.o repmgrd-physical.o configdata.o configfile.o configfile-scan.o log.o \ REPMGRD_OBJS = repmgrd.o repmgrd-physical.o configdata.o configfile.o configfile-scan.o log.o \
dbutils.o strutil.o controldata.o compat.o sysutils.o dbutils.o strutil.o controldata.o compat.o sysutils.o

View File

@@ -8,7 +8,7 @@ replication, and perform administrative tasks such as failover or switchover
operations. operations.
The most recent `repmgr` version (5.3.2) supports all PostgreSQL versions from The most recent `repmgr` version (5.3.2) supports all PostgreSQL versions from
9.5 to 15. PostgreSQL 9.4 is also supported, with some restrictions. 9.5 to 14. PostgreSQL 9.4 is also supported, with some restrictions.
`repmgr` is distributed under the GNU GPL 3 and maintained by EnterpriseDB. `repmgr` is distributed under the GNU GPL 3 and maintained by EnterpriseDB.
@@ -40,6 +40,7 @@ Directories
- `contrib/`: additional utilities - `contrib/`: additional utilities
- `doc/`: DocBook-based documentation files - `doc/`: DocBook-based documentation files
- `expected/`: expected regression test output - `expected/`: expected regression test output
- `scripts/`: example scripts
- `sql/`: regression test input - `sql/`: regression test input
@@ -69,6 +70,7 @@ news are always welcome.
Thanks from the repmgr core team. Thanks from the repmgr core team.
* Ian Barwick
* Jaime Casanova * Jaime Casanova
* Abhijit Menon-Sen * Abhijit Menon-Sen
* Simon Riggs * Simon Riggs

View File

@@ -291,46 +291,6 @@ struct ConfigFileSetting config_file_settings[] =
{}, {},
{} {}
}, },
/* pg_backupapi_backup_id*/
{
"pg_backupapi_backup_id",
CONFIG_STRING,
{ .strptr = config_file_options.pg_backupapi_backup_id },
{ .strdefault = "" },
{},
{ .strmaxlen = sizeof(config_file_options.pg_backupapi_backup_id) },
{}
},
/* pg_backupapi_host*/
{
"pg_backupapi_host",
CONFIG_STRING,
{ .strptr = config_file_options.pg_backupapi_host },
{ .strdefault = "" },
{},
{ .strmaxlen = sizeof(config_file_options.pg_backupapi_host) },
{}
},
/* pg_backupapi_node_name */
{
"pg_backupapi_node_name",
CONFIG_STRING,
{ .strptr = config_file_options.pg_backupapi_node_name },
{ .strdefault = "" },
{},
{ .strmaxlen = sizeof(config_file_options.pg_backupapi_node_name) },
{}
},
/* pg_backupapi_remote_ssh_command */
{
"pg_backupapi_remote_ssh_command",
CONFIG_STRING,
{ .strptr = config_file_options.pg_backupapi_remote_ssh_command },
{ .strdefault = "" },
{},
{ .strmaxlen = sizeof(config_file_options.pg_backupapi_remote_ssh_command) },
{}
},
/* ======================= /* =======================
* standby follow settings * standby follow settings

View File

@@ -164,10 +164,6 @@ typedef struct
char archive_cleanup_command[MAXLEN]; char archive_cleanup_command[MAXLEN];
bool use_primary_conninfo_password; bool use_primary_conninfo_password;
char passfile[MAXPGPATH]; char passfile[MAXPGPATH];
char pg_backupapi_backup_id[NAMEDATALEN];
char pg_backupapi_host[NAMEDATALEN];
char pg_backupapi_node_name[NAMEDATALEN];
char pg_backupapi_remote_ssh_command[MAXLEN];
/* standby promote settings */ /* standby promote settings */
int promote_check_timeout; int promote_check_timeout;

View File

@@ -60,7 +60,6 @@ AC_SUBST(vpath_build)
AC_CHECK_PROG(HAVE_GNUSED,gnused,yes,no) AC_CHECK_PROG(HAVE_GNUSED,gnused,yes,no)
AC_CHECK_PROG(HAVE_GSED,gsed,yes,no) AC_CHECK_PROG(HAVE_GSED,gsed,yes,no)
AC_CHECK_PROG(HAVE_SED,sed,yes,no) AC_CHECK_PROG(HAVE_SED,sed,yes,no)
AC_CHECK_PROG(HAVE_FLEX,flex,yes,no)
if test "$HAVE_GNUSED" = yes; then if test "$HAVE_GNUSED" = yes; then
SED=gnused SED=gnused
@@ -73,25 +72,6 @@ else
fi fi
AC_SUBST(SED) AC_SUBST(SED)
AS_IF([test x"$HAVE_FLEX" != x"yes"], AC_MSG_ERROR([flex should be installed first]))
#Checking libraries
GENERIC_LIB_FAILED_MSG="library should be installed"
AC_CHECK_LIB(selinux, is_selinux_enabled, [],
[AC_MSG_ERROR(['selinux' $GENERIC_LIB_FAILED_MSG])])
AC_CHECK_LIB(lz4, LZ4_compress_default, [],
[AC_MSG_ERROR(['Z4' $GENERIC_LIB_FAILED_MSG])])
AC_CHECK_LIB(xslt, xsltCleanupGlobals, [],
[AC_MSG_ERROR(['xslt' $GENERIC_LIB_FAILED_MSG])])
AC_CHECK_LIB(pam, pam_start, [],
[AC_MSG_ERROR(['pam' $GENERIC_LIB_FAILED_MSG])])
AC_CHECK_LIB(gssapi_krb5, gss_init_sec_context, [],
[AC_MSG_ERROR([gssapi_krb5 $GENERIC_LIB_FAILED_MSG])])
AC_CONFIG_FILES([Makefile]) AC_CONFIG_FILES([Makefile])
AC_CONFIG_FILES([Makefile.global]) AC_CONFIG_FILES([Makefile.global])

View File

@@ -16,44 +16,8 @@
</para> </para>
<!-- remember to update the release date in ../repmgr_version.h.in --> <!-- remember to update the release date in ../repmgr_version.h.in -->
<sect1 id="release-5.4.1">
<title id="release-current">Release 5.4.1</title>
<para><emphasis>??? ?? ??????, 202?</emphasis></para>
<para>
&repmgr; 5.4.1 is a minor release providing ...
</para>
<sect2>
<title>Bug fixes</title>
<para>
<itemizedlist>
<listitem>
<para>
&repmgrd;: ensure witness node metadata is updated if the primary
node changed while the witness &repmgrd; was not running.
</para>
</listitem>
</itemizedlist>
</para>
</sect2>
</sect1>
<sect1 id="release-5.4.0">
<title>Release 5.4.0</title>
<para><emphasis>Thu 15 March, 2023</emphasis></para>
<para>
&repmgr; 5.4.0 is a major release.
</para>
<para>
This release provides support for cloning standbys using backups taken with <ulink url="http://www.pgbarman.org">barman</ulink>
with the use of <ulink url="https://github.com/EnterpriseDB/pg-backup-api">pg-backup-api</ulink>.
</para>
<para>
Minor fixes to the documentation.
</para>
</sect1>
<sect1 id="release-5.3.3"> <sect1 id="release-5.3.3">
<title>Release 5.3.3</title> <title id="release-current">Release 5.3.3</title>
<para><emphasis>Mon 17 October, 2022</emphasis></para> <para><emphasis>Mon 17 October, 2022</emphasis></para>
<para> <para>
&repmgr; 5.3.3 is a minor release providing support for &repmgr; 5.3.3 is a minor release providing support for

View File

@@ -225,109 +225,6 @@ description = "Main cluster"
</note> </note>
</sect2> </sect2>
<sect2 id="cloning-from-barman-pg_backupapi-mode" xreflabel="Using Barman through its API (pg-backup-api)">
<title>Using Barman through its API (pg-backup-api)</title>
<indexterm>
<primary>cloning</primary>
<secondary>pg-backup-api</secondary>
</indexterm>
<para>
You can find information on how to install and setup pg-backup-api in
<ulink url="https://www.enterprisedb.com/docs/supported-open-source/barman/pg-backup-api/">the pg-backup-api
documentation</ulink>.
</para>
<para>
This mode (`pg-backupapi`) was introduced in v5.4.0 as a way to further integrate with Barman letting Barman
handle the restore. This also reduces the ssh keys that need to share between the backup and postgres nodes.
As long as you have access to the API service by HTTP calls, you could perform recoveries right away.
You just need to instruct Barman through the API which backup you need and on which node the backup needs to
to be restored on.
</para>
<para>
In order to enable <literal>pg_backupapi mode</literal> support for <command>repmgr standby clone</command>,
you need the following lines in repmgr.conf:
<itemizedlist spacing="compact" mark="bullet">
<listitem><para>pg_backupapi_host: Where pg-backup-api is hosted</para></listitem>
<listitem><para>pg_backupapi_node_name: Name of the server as understood by Barman</para></listitem>
<listitem><para>pg_backupapi_remote_ssh_command: How Barman will be connecting as to the node</para></listitem>
<listitem><para>pg_backupapi_backup_id: ID of the existing backup you need to restore</para></listitem>
</itemizedlist>
This is an example of how repmgr.conf would look like:
<programlisting>
pg_backupapi_host = '192.168.122.154'
pg_backupapi_node_name = 'burrito'
pg_backupapi_remote_ssh_command = 'ssh john_doe@192.168.122.1'
pg_backupapi_backup_id = '20230223T093201'
</programlisting>
</para>
<para>
<literal>pg_backupapi_host</literal> is the variable name that enables this mode, and when you set it,
all the rest of the above variables are required. Also, remember that this service is just an interface
between Barman and repmgr, hence if something fails during a recovery, you should check Barman's logs upon
why the process couldn't finish properly.
</para>
<note>
<simpara>
Despite in Barman you can define shortcuts like "lastest" or "oldest", they are not supported for the
time being in pg-backup-api. These shortcuts will be supported in a future release.
</simpara>
</note>
<para>
This is a real example of repmgr's output cloning with the API. Note that during this operation, we stopped
the service for a little while and repmgr had to retry but that doesn't affect the final outcome. The primary
is listening on localhost's port 6001:
<programlisting>
$ repmgr -f ~/nodes/node_3/repmgr.conf standby clone -U repmgr -p 6001 -h localhost
NOTICE: destination directory "/home/mario/nodes/node_3/data" provided
INFO: Attempting to use `pg_backupapi` new restore mode
INFO: connecting to source node
DETAIL: connection string is: user=repmgr port=6001 host=localhost
DETAIL: current installation size is 8541 MB
DEBUG: 1 node records returned by source node
DEBUG: connecting to: "user=repmgr dbname=repmgr host=localhost port=6001 connect_timeout=2 fallback_application_name=repmgr options=-csearch_path="
DEBUG: upstream_node_id determined as 1
INFO: Attempting to use `pg_backupapi` new restore mode
INFO: replication slot usage not requested; no replication slot will be set up for this standby
NOTICE: starting backup (using pg_backupapi)...
INFO: Success creating the task: operation id '20230309T150647'
INFO: status IN_PROGRESS
INFO: status IN_PROGRESS
Incorrect reply received for that operation ID.
INFO: Retrying...
INFO: status IN_PROGRESS
INFO: status IN_PROGRESS
INFO: status IN_PROGRESS
INFO: status IN_PROGRESS
INFO: status IN_PROGRESS
INFO: status IN_PROGRESS
INFO: status IN_PROGRESS
INFO: status IN_PROGRESS
INFO: status IN_PROGRESS
INFO: status IN_PROGRESS
INFO: status IN_PROGRESS
INFO: status IN_PROGRESS
INFO: status IN_PROGRESS
INFO: status IN_PROGRESS
INFO: status IN_PROGRESS
INFO: status DONE
NOTICE: standby clone (from pg_backupapi) complete
NOTICE: you can now start your PostgreSQL server
HINT: for example: pg_ctl -D /home/mario/nodes/node_3/data start
HINT: after starting the server, you need to register this standby with "repmgr standby register"
</programlisting>
</para>
</sect2> <!--END cloning-from-barman-pg_backupapi-mode !-->
</sect1> </sect1>
<sect1 id="cloning-replication-slots" xreflabel="Cloning and replication slots"> <sect1 id="cloning-replication-slots" xreflabel="Cloning and replication slots">

View File

@@ -10,8 +10,8 @@
<note> <note>
<simpara> <simpara>
This section documents a subset of optional configuration settings; for a full This section documents a subset of optional configuration settings; for a full
and annotated view of all configuration options see the for a full and annotated view of all configuration options see the
<ulink url="https://raw.githubusercontent.com/EnterpriseDB/repmgr/master/repmgr.conf.sample">sample repmgr.conf file</ulink> see the <ulink url="https://raw.githubusercontent.com/EnterpriseDB/repmgr/master/repmgr.conf.sample">sample repmgr.conf file</ulink>
</simpara> </simpara>
</note> </note>

View File

@@ -29,7 +29,7 @@
<listitem> <listitem>
<simpara> <simpara>
option to execute custom scripts (&quot;<link linkend="event-notifications">event notifications</link>&quot;) option to execute custom scripts (&quot;<link linkend="event-notifications">event notifications</link>
at different points in the failover sequence at different points in the failover sequence
</simpara> </simpara>
</listitem> </listitem>

View File

@@ -49,6 +49,5 @@
#define ERR_NODE_STATUS 25 #define ERR_NODE_STATUS 25
#define ERR_REPMGRD_PAUSE 26 #define ERR_REPMGRD_PAUSE 26
#define ERR_REPMGRD_SERVICE 27 #define ERR_REPMGRD_SERVICE 27
#define ERR_PGBACKUPAPI_SERVICE 28
#endif /* _ERRCODE_H_ */ #endif /* _ERRCODE_H_ */

View File

@@ -1,147 +0,0 @@
/*
* pgbackupapi.c
* Copyright (c) EnterpriseDB Corporation, 2010-2021
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <string.h>
#include <curl/curl.h>
#include <json-c/json.h>
#include "repmgr.h"
#include "pgbackupapi.h"
size_t receive_operations_cb(void *content, size_t size, size_t nmemb, char *buffer) {
short int max_chars_to_copy = MAX_BUFFER_LENGTH -2;
short int i = 0;
int operation_length = 0;
json_object *value;
json_object *root = json_tokener_parse(content);
json_object *operations = json_object_object_get(root, "operations");
operation_length = strlen(json_object_get_string(operations));
if (operation_length < max_chars_to_copy) {
max_chars_to_copy = operation_length;
}
strncpy(buffer, json_object_get_string(operations), max_chars_to_copy);
fprintf(stdout, "Success! The following operations were found\n");
for (i=0; i<json_object_array_length(operations); i++) {
value = json_object_array_get_idx(operations, i);
printf("%s\n", json_object_get_string(value));
}
return size * nmemb;
}
char * define_base_url(operation_task *task) {
char *format = "http://%s:7480/servers/%s/operations";
char *url = malloc(MAX_BUFFER_LENGTH);
snprintf(url, MAX_BUFFER_LENGTH-1, format, task->host, task->node_name);
//`url` is freed on the function that called this
return url;
}
CURLcode get_operations_on_server(CURL *curl, operation_task *task) {
char buffer[MAX_BUFFER_LENGTH];
char *url = define_base_url(task);
CURLcode ret;
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, receive_operations_cb);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &buffer);
curl_easy_setopt(curl, CURLOPT_URL, url);
ret = curl_easy_perform(curl);
free(url);
return ret;
}
size_t receive_operation_id(void *content, size_t size, size_t nmemb, char *buffer) {
json_object *root = json_tokener_parse(content);
json_object *operation = json_object_object_get(root, "operation_id");
if (operation != NULL) {
strncpy(buffer, json_object_get_string(operation), MAX_BUFFER_LENGTH-2);
}
return size * nmemb;
}
CURLcode create_new_task(CURL *curl, operation_task *task) {
PQExpBufferData payload;
char *url = define_base_url(task);
CURLcode ret;
json_object *root = json_object_new_object();
struct curl_slist *chunk = NULL;
json_object_object_add(root, "operation_type", json_object_new_string(task->operation_type));
json_object_object_add(root, "backup_id", json_object_new_string(task->backup_id));
json_object_object_add(root, "remote_ssh_command", json_object_new_string(task->remote_ssh_command));
json_object_object_add(root, "destination_directory", json_object_new_string(task->destination_directory));
initPQExpBuffer(&payload);
appendPQExpBufferStr(&payload, json_object_to_json_string(root));
chunk = curl_slist_append(chunk, "Content-type: application/json");
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk);
curl_easy_setopt(curl, CURLOPT_URL, url);
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, payload.data);
curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1L);
//curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, receive_operation_id);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, task->operation_id);
ret = curl_easy_perform(curl);
free(url);
termPQExpBuffer(&payload);
return ret;
}
size_t receive_operation_status(void *content, size_t size, size_t nmemb, char *buffer) {
json_object *root = json_tokener_parse(content);
json_object *status = json_object_object_get(root, "status");
if (status != NULL) {
strncpy(buffer, json_object_get_string(status), MAX_BUFFER_LENGTH-2);
}
else {
fprintf(stderr, "Incorrect reply received for that operation ID.\n");
strcpy(buffer, "\0");
}
return size * nmemb;
}
CURLcode get_status_of_operation(CURL *curl, operation_task *task) {
CURLcode ret;
char *url = define_base_url(task);
strcat(url, "/");
strcat(url, task->operation_id);
curl_easy_setopt(curl, CURLOPT_URL, url);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, receive_operation_status);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, task->operation_status);
ret = curl_easy_perform(curl);
free(url);
return ret;
}

View File

@@ -1,46 +0,0 @@
/*
* pgbackupapi.h
* Copyright (c) EnterpriseDB Corporation, 2010-2021
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <curl/curl.h>
#include <json-c/json.h>
typedef struct operation_task {
char *backup_id;
char *destination_directory;
char *operation_type;
char *operation_id;
char *operation_status;
char *remote_ssh_command;
char *host;
char *node_name;
} operation_task;
//Default simplebuffer size in most of operations
#define MAX_BUFFER_LENGTH 72
//Callbacks to send/receive data from pg-backup-api endpoints
size_t receive_operations_cb(void *content, size_t size, size_t nmemb, char *buffer);
size_t receive_operation_id(void *content, size_t size, size_t nmemb, char *buffer);
size_t receive_operation_status(void *content, size_t size, size_t nmemb, char *buffer);
//Functions that implement the logic and know what to do and how to comunnicate wuth the API
CURLcode get_operations_on_server(CURL *curl, operation_task *task);
CURLcode create_new_task(CURL *curl, operation_task *task);
CURLcode get_status_of_operation(CURL *curl, operation_task *task);
//Helper to make simpler to read the handler where we set the URL
char * define_base_url(operation_task *task);

File diff suppressed because it is too large Load Diff

View File

@@ -19,6 +19,77 @@
#ifndef _REPMGR_ACTION_STANDBY_H_ #ifndef _REPMGR_ACTION_STANDBY_H_
#define _REPMGR_ACTION_STANDBY_H_ #define _REPMGR_ACTION_STANDBY_H_
typedef struct
{
int reachable_sibling_node_count;
int reachable_sibling_nodes_with_slot_count;
int unreachable_sibling_node_count;
int min_required_wal_senders;
int min_required_free_slots;
} SiblingNodeStats;
#define T_SIBLING_NODES_STATS_INITIALIZER { \
0, \
0, \
0, \
0, \
0 \
}
typedef struct
{
RepmgrdInfo **repmgrd_info;
int repmgrd_running_count;
bool dry_run_success;
/* store list of configuration files on the demotion candidate */
KeyValueList remote_config_files;
/* used for handling repmgrd pause/unpause */
NodeInfoList all_nodes;
NodeInfoList sibling_nodes;
SiblingNodeStats sibling_nodes_stats;
t_event_info event_info;
int remote_node_id;
t_node_info remote_node_record;
t_node_info local_node_record;
char remote_conninfo[MAXCONNINFO];
bool switchover_success;
RecoveryType recovery_type;
PGconn *superuser_conn;
/* the remote server is the primary to be demoted */
char remote_host[MAXLEN];
int remote_repmgr_version;
PGconn *remote_conn;
PGconn *local_conn;
} t_standby_switchover_rec;
#define T_STANDBY_SWITCHOVER_INITIALIZER { \
NULL, \
true, \
{NULL, NULL}, \
T_NODE_INFO_LIST_INITIALIZER, \
T_NODE_INFO_LIST_INITIALIZER, \
T_SIBLING_NODES_STATS_INITIALIZER, \
T_EVENT_INFO_INITIALIZER, \
UNKNOWN_NODE_ID, \
T_NODE_INFO_INITIALIZER, \
T_NODE_INFO_INITIALIZER, \
"", \
true, \
RECTYPE_UNKNOWN, \
NULL, \
"", \
UNKNOWN_REPMGR_VERSION_NUM, \
NULL, \
NULL \
}
extern void do_standby_clone(void); extern void do_standby_clone(void);
extern void do_standby_register(void); extern void do_standby_register(void);
extern void do_standby_unregister(void); extern void do_standby_unregister(void);
@@ -30,6 +101,6 @@ extern void do_standby_help(void);
extern bool do_standby_follow_internal(PGconn *primary_conn, PGconn *follow_target_conn, t_node_info *follow_target_node_record, PQExpBufferData *output, int general_error_code, int *error_code); extern bool do_standby_follow_internal(PGconn *primary_conn, PGconn *follow_target_conn, t_node_info *follow_target_node_record, PQExpBufferData *output, int general_error_code, int *error_code);
void check_pg_backupapi_standby_clone_options(void);
#endif /* _REPMGR_ACTION_STANDBY_H_ */ #endif /* _REPMGR_ACTION_STANDBY_H_ */

View File

@@ -193,8 +193,7 @@ typedef struct
typedef enum typedef enum
{ {
barman, barman,
pg_basebackup, pg_basebackup
pg_backupapi
} standy_clone_mode; } standy_clone_mode;
typedef enum typedef enum

View File

@@ -3096,14 +3096,9 @@ get_standby_clone_mode(void)
if (*config_file_options.barman_host != '\0' && runtime_options.without_barman == false) if (*config_file_options.barman_host != '\0' && runtime_options.without_barman == false)
mode = barman; mode = barman;
else { else
if (*config_file_options.pg_backupapi_host != '\0') { mode = pg_basebackup;
log_info("Attempting to use `pg_backupapi` new restore mode");
mode = pg_backupapi;
}
else
mode = pg_basebackup;
}
return mode; return mode;
} }

View File

@@ -116,7 +116,6 @@
#define DEFAULT_STANDBY_FOLLOW_TIMEOUT 30 /* seconds */ #define DEFAULT_STANDBY_FOLLOW_TIMEOUT 30 /* seconds */
#define DEFAULT_STANDBY_FOLLOW_RESTART false #define DEFAULT_STANDBY_FOLLOW_RESTART false
#define DEFAULT_SHUTDOWN_CHECK_TIMEOUT 60 /* seconds */ #define DEFAULT_SHUTDOWN_CHECK_TIMEOUT 60 /* seconds */
#define DEFAULT_STANDBY_PG_BACKUPAPI_OP_TYPE "recovery"
#define DEFAULT_STANDBY_RECONNECT_TIMEOUT 60 /* seconds */ #define DEFAULT_STANDBY_RECONNECT_TIMEOUT 60 /* seconds */
#define DEFAULT_NODE_REJOIN_TIMEOUT 60 /* seconds */ #define DEFAULT_NODE_REJOIN_TIMEOUT 60 /* seconds */
#define DEFAULT_ARCHIVE_READY_WARNING 16 /* WAL files */ #define DEFAULT_ARCHIVE_READY_WARNING 16 /* WAL files */

View File

@@ -2394,17 +2394,6 @@ monitor_streaming_witness(void)
terminate(ERR_BAD_CONFIG); terminate(ERR_BAD_CONFIG);
} }
/*
* It's possible that the primary changed while the witness repmgrd was not
* running. This does not affect the functionality of the witness repmgrd, but
* does mean outdated node metadata will be displayed, so update that.
*/
if (local_node_info.upstream_node_id != primary_node_id)
{
update_node_record_set_upstream(primary_conn, local_node_info.node_id, primary_node_id);
local_node_info.upstream_node_id = primary_node_id;
}
initPQExpBuffer(&event_details); initPQExpBuffer(&event_details);
appendPQExpBuffer(&event_details, appendPQExpBuffer(&event_details,