Compare commits

..

1 Commits

Author SHA1 Message Date
Gianni Ciolli
d52b045113 Factoring out switchover sanity checks
We refactor switchover sanity checks into a separate function, using a
dedicated structure to pass variables across.

Signed-off-by: Gianni Ciolli <gianni.ciolli@enterprisedb.com>
2022-11-09 21:10:17 +00:00
16 changed files with 507 additions and 788 deletions

37
.github/workflows/sonarqube-scan.yml vendored Normal file
View File

@@ -0,0 +1,37 @@
name: SonarQube Scan
on:
pull_request:
push:
branches: [ master ]
workflow_dispatch:
jobs:
sonarQube:
name: SonarQube-Job
runs-on: ubuntu-latest
steps:
- name: Checkout source repo
uses: actions/checkout@v1
with:
ref: '${{ github.head_ref }}'
- name: Checkout GitHub Action Repo
uses: actions/checkout@master
with:
repository: EnterpriseDB/edb-github-actions.git
ref: master
token: ${{ secrets.GH_SLONIK }}
path: .github/actions/edb-github-actions
- name: SonarQube Scan
uses: ./.github/actions/edb-github-actions/sonarqube
with:
REPO_NAME: '${{github.event.repository.name}}'
SONAR_PROJECT_KEY: EnterpriseDB_repmgr
SONAR_URL: '${{secrets.SONARQUBE_URL}}'
SONAR_LOGIN: '${{secrets.SONARQUBE_LOGIN}}'
PULL_REQUEST_KEY: '${{github.event.number}}'
PULL_REQUEST_BRANCH: '${{github.head_ref}}'
PULL_REQUEST_BASE_BRANCH: '${{github.base_ref}}'
REPO_DEFAULT_BRANCH: '${{github.event.repository.default_branch}}'
REPO_EXCLUDE_FILES: '*properties*,**/src/test/**/*,**/*.sql,**/docs/**/*,**/*/*.java'

18
.github/workflows/sonarqube/configure-env.sh vendored Executable file
View File

@@ -0,0 +1,18 @@
#!/bin/bash
DEBIAN_FRONTEND=noninteractive sudo apt-get -y install debhelper curl autoconf zlib1g-dev \
libedit-dev libxml2-dev libxslt1-dev libkrb5-dev libssl-dev libpam0g-dev systemtap-sdt-dev \
libselinux1-dev build-essential bison apt-utils lsb-release devscripts \
software-properties-common git shellcheck flex
sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
sudo apt-get update
sudo apt-get -y install libpq-dev postgresql-13 postgresql-server-dev-13
./configure
export PG_CONFIG=/usr/bin/pg_config
/home/buildfarm/sonar/depends/build-wrapper-linux-x86/build-wrapper-linux-x86-64 --out-dir build_wrapper_output_directory make

View File

@@ -22,7 +22,7 @@ GIT_WORK_TREE=${repmgr_abs_srcdir}
GIT_DIR=${repmgr_abs_srcdir}/.git
export GIT_DIR
export GIT_WORK_TREE
PG_LDFLAGS=-lcurl -ljson-c
include $(PGXS)
-include ${repmgr_abs_srcdir}/Makefile.custom

View File

@@ -66,7 +66,7 @@ REPMGR_CLIENT_OBJS = repmgr-client.o \
repmgr-action-primary.o repmgr-action-standby.o repmgr-action-witness.o \
repmgr-action-cluster.o repmgr-action-node.o repmgr-action-service.o repmgr-action-daemon.o \
configdata.o configfile.o configfile-scan.o log.o strutil.o controldata.o dirutil.o compat.o \
dbutils.o sysutils.o pgbackupapi.o
dbutils.o sysutils.o
REPMGRD_OBJS = repmgrd.o repmgrd-physical.o configdata.o configfile.o configfile-scan.o log.o \
dbutils.o strutil.o controldata.o compat.o sysutils.o

View File

@@ -291,46 +291,6 @@ struct ConfigFileSetting config_file_settings[] =
{},
{}
},
/* pg_backupapi_backup_id*/
{
"pg_backupapi_backup_id",
CONFIG_STRING,
{ .strptr = config_file_options.pg_backupapi_backup_id },
{ .strdefault = "" },
{},
{ .strmaxlen = sizeof(config_file_options.pg_backupapi_backup_id) },
{}
},
/* pg_backupapi_host*/
{
"pg_backupapi_host",
CONFIG_STRING,
{ .strptr = config_file_options.pg_backupapi_host },
{ .strdefault = "" },
{},
{ .strmaxlen = sizeof(config_file_options.pg_backupapi_host) },
{}
},
/* pg_backupapi_node_name */
{
"pg_backupapi_node_name",
CONFIG_STRING,
{ .strptr = config_file_options.pg_backupapi_node_name },
{ .strdefault = "" },
{},
{ .strmaxlen = sizeof(config_file_options.pg_backupapi_node_name) },
{}
},
/* pg_backupapi_remote_ssh_command */
{
"pg_backupapi_remote_ssh_command",
CONFIG_STRING,
{ .strptr = config_file_options.pg_backupapi_remote_ssh_command },
{ .strdefault = "" },
{},
{ .strmaxlen = sizeof(config_file_options.pg_backupapi_remote_ssh_command) },
{}
},
/* =======================
* standby follow settings

View File

@@ -164,10 +164,6 @@ typedef struct
char archive_cleanup_command[MAXLEN];
bool use_primary_conninfo_password;
char passfile[MAXPGPATH];
char pg_backupapi_backup_id[NAMEDATALEN];
char pg_backupapi_host[NAMEDATALEN];
char pg_backupapi_node_name[NAMEDATALEN];
char pg_backupapi_remote_ssh_command[MAXLEN];
/* standby promote settings */
int promote_check_timeout;

View File

@@ -60,7 +60,6 @@ AC_SUBST(vpath_build)
AC_CHECK_PROG(HAVE_GNUSED,gnused,yes,no)
AC_CHECK_PROG(HAVE_GSED,gsed,yes,no)
AC_CHECK_PROG(HAVE_SED,sed,yes,no)
AC_CHECK_PROG(HAVE_FLEX,flex,yes,no)
if test "$HAVE_GNUSED" = yes; then
SED=gnused
@@ -73,25 +72,6 @@ else
fi
AC_SUBST(SED)
AS_IF([test x"$HAVE_FLEX" != x"yes"], AC_MSG_ERROR([flex should be installed first]))
#Checking libraries
GENERIC_LIB_FAILED_MSG="library should be installed"
AC_CHECK_LIB(selinux, is_selinux_enabled, [],
[AC_MSG_ERROR(['selinux' $GENERIC_LIB_FAILED_MSG])])
AC_CHECK_LIB(lz4, LZ4_compress_default, [],
[AC_MSG_ERROR(['Z4' $GENERIC_LIB_FAILED_MSG])])
AC_CHECK_LIB(xslt, xsltCleanupGlobals, [],
[AC_MSG_ERROR(['xslt' $GENERIC_LIB_FAILED_MSG])])
AC_CHECK_LIB(pam, pam_start, [],
[AC_MSG_ERROR(['pam' $GENERIC_LIB_FAILED_MSG])])
AC_CHECK_LIB(gssapi_krb5, gss_init_sec_context, [],
[AC_MSG_ERROR([gssapi_krb5 $GENERIC_LIB_FAILED_MSG])])
AC_CONFIG_FILES([Makefile])
AC_CONFIG_FILES([Makefile.global])

View File

@@ -29,7 +29,7 @@
<listitem>
<simpara>
option to execute custom scripts (&quot;<link linkend="event-notifications">event notifications</link>&quot;)
option to execute custom scripts (&quot;<link linkend="event-notifications">event notifications</link>
at different points in the failover sequence
</simpara>
</listitem>

View File

@@ -49,6 +49,5 @@
#define ERR_NODE_STATUS 25
#define ERR_REPMGRD_PAUSE 26
#define ERR_REPMGRD_SERVICE 27
#define ERR_PGBACKUPAPI_SERVICE 28
#endif /* _ERRCODE_H_ */

View File

@@ -1,147 +0,0 @@
/*
* pgbackupapi.c
* Copyright (c) EnterpriseDB Corporation, 2010-2021
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <string.h>
#include <curl/curl.h>
#include <json-c/json.h>
#include "repmgr.h"
#include "pgbackupapi.h"
size_t receive_operations_cb(void *content, size_t size, size_t nmemb, char *buffer) {
short int max_chars_to_copy = MAX_BUFFER_LENGTH -2;
short int i = 0;
int operation_length = 0;
json_object *value;
json_object *root = json_tokener_parse(content);
json_object *operations = json_object_object_get(root, "operations");
operation_length = strlen(json_object_get_string(operations));
if (operation_length < max_chars_to_copy) {
max_chars_to_copy = operation_length;
}
strncpy(buffer, json_object_get_string(operations), max_chars_to_copy);
fprintf(stdout, "Success! The following operations were found\n");
for (i=0; i<json_object_array_length(operations); i++) {
value = json_object_array_get_idx(operations, i);
printf("%s\n", json_object_get_string(value));
}
return size * nmemb;
}
char * define_base_url(operation_task *task) {
char *format = "http://%s:7480/servers/%s/operations";
char *url = malloc(MAX_BUFFER_LENGTH);
snprintf(url, MAX_BUFFER_LENGTH-1, format, task->host, task->node_name);
//`url` is freed on the function that called this
return url;
}
CURLcode get_operations_on_server(CURL *curl, operation_task *task) {
char buffer[MAX_BUFFER_LENGTH];
char *url = define_base_url(task);
CURLcode ret;
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, receive_operations_cb);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &buffer);
curl_easy_setopt(curl, CURLOPT_URL, url);
ret = curl_easy_perform(curl);
free(url);
return ret;
}
size_t receive_operation_id(void *content, size_t size, size_t nmemb, char *buffer) {
json_object *root = json_tokener_parse(content);
json_object *operation = json_object_object_get(root, "operation_id");
if (operation != NULL) {
strncpy(buffer, json_object_get_string(operation), MAX_BUFFER_LENGTH-2);
}
return size * nmemb;
}
CURLcode create_new_task(CURL *curl, operation_task *task) {
PQExpBufferData payload;
char *url = define_base_url(task);
CURLcode ret;
json_object *root = json_object_new_object();
struct curl_slist *chunk = NULL;
json_object_object_add(root, "operation_type", json_object_new_string(task->operation_type));
json_object_object_add(root, "backup_id", json_object_new_string(task->backup_id));
json_object_object_add(root, "remote_ssh_command", json_object_new_string(task->remote_ssh_command));
json_object_object_add(root, "destination_directory", json_object_new_string(task->destination_directory));
initPQExpBuffer(&payload);
appendPQExpBufferStr(&payload, json_object_to_json_string(root));
chunk = curl_slist_append(chunk, "Content-type: application/json");
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk);
curl_easy_setopt(curl, CURLOPT_URL, url);
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, payload.data);
curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1L);
//curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, receive_operation_id);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, task->operation_id);
ret = curl_easy_perform(curl);
free(url);
termPQExpBuffer(&payload);
return ret;
}
size_t receive_operation_status(void *content, size_t size, size_t nmemb, char *buffer) {
json_object *root = json_tokener_parse(content);
json_object *status = json_object_object_get(root, "status");
if (status != NULL) {
strncpy(buffer, json_object_get_string(status), MAX_BUFFER_LENGTH-2);
}
else {
fprintf(stderr, "Incorrect reply received for that operation ID.\n");
strcpy(buffer, "\0");
}
return size * nmemb;
}
CURLcode get_status_of_operation(CURL *curl, operation_task *task) {
CURLcode ret;
char *url = define_base_url(task);
strcat(url, "/");
strcat(url, task->operation_id);
curl_easy_setopt(curl, CURLOPT_URL, url);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, receive_operation_status);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, task->operation_status);
ret = curl_easy_perform(curl);
free(url);
return ret;
}

View File

@@ -1,46 +0,0 @@
/*
* pgbackupapi.h
* Copyright (c) EnterpriseDB Corporation, 2010-2021
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <curl/curl.h>
#include <json-c/json.h>
typedef struct operation_task {
char *backup_id;
char *destination_directory;
char *operation_type;
char *operation_id;
char *operation_status;
char *remote_ssh_command;
char *host;
char *node_name;
} operation_task;
//Default simplebuffer size in most of operations
#define MAX_BUFFER_LENGTH 72
//Callbacks to send/receive data from pg-backup-api endpoints
size_t receive_operations_cb(void *content, size_t size, size_t nmemb, char *buffer);
size_t receive_operation_id(void *content, size_t size, size_t nmemb, char *buffer);
size_t receive_operation_status(void *content, size_t size, size_t nmemb, char *buffer);
//Functions that implement the logic and know what to do and how to comunnicate wuth the API
CURLcode get_operations_on_server(CURL *curl, operation_task *task);
CURLcode create_new_task(CURL *curl, operation_task *task);
CURLcode get_status_of_operation(CURL *curl, operation_task *task);
//Helper to make simpler to read the handler where we set the URL
char * define_base_url(operation_task *task);

File diff suppressed because it is too large Load Diff

View File

@@ -19,6 +19,77 @@
#ifndef _REPMGR_ACTION_STANDBY_H_
#define _REPMGR_ACTION_STANDBY_H_
typedef struct
{
int reachable_sibling_node_count;
int reachable_sibling_nodes_with_slot_count;
int unreachable_sibling_node_count;
int min_required_wal_senders;
int min_required_free_slots;
} SiblingNodeStats;
#define T_SIBLING_NODES_STATS_INITIALIZER { \
0, \
0, \
0, \
0, \
0 \
}
typedef struct
{
RepmgrdInfo **repmgrd_info;
int repmgrd_running_count;
bool dry_run_success;
/* store list of configuration files on the demotion candidate */
KeyValueList remote_config_files;
/* used for handling repmgrd pause/unpause */
NodeInfoList all_nodes;
NodeInfoList sibling_nodes;
SiblingNodeStats sibling_nodes_stats;
t_event_info event_info;
int remote_node_id;
t_node_info remote_node_record;
t_node_info local_node_record;
char remote_conninfo[MAXCONNINFO];
bool switchover_success;
RecoveryType recovery_type;
PGconn *superuser_conn;
/* the remote server is the primary to be demoted */
char remote_host[MAXLEN];
int remote_repmgr_version;
PGconn *remote_conn;
PGconn *local_conn;
} t_standby_switchover_rec;
#define T_STANDBY_SWITCHOVER_INITIALIZER { \
NULL, \
true, \
{NULL, NULL}, \
T_NODE_INFO_LIST_INITIALIZER, \
T_NODE_INFO_LIST_INITIALIZER, \
T_SIBLING_NODES_STATS_INITIALIZER, \
T_EVENT_INFO_INITIALIZER, \
UNKNOWN_NODE_ID, \
T_NODE_INFO_INITIALIZER, \
T_NODE_INFO_INITIALIZER, \
"", \
true, \
RECTYPE_UNKNOWN, \
NULL, \
"", \
UNKNOWN_REPMGR_VERSION_NUM, \
NULL, \
NULL \
}
extern void do_standby_clone(void);
extern void do_standby_register(void);
extern void do_standby_unregister(void);
@@ -30,6 +101,6 @@ extern void do_standby_help(void);
extern bool do_standby_follow_internal(PGconn *primary_conn, PGconn *follow_target_conn, t_node_info *follow_target_node_record, PQExpBufferData *output, int general_error_code, int *error_code);
void check_pg_backupapi_standby_clone_options(void);
#endif /* _REPMGR_ACTION_STANDBY_H_ */

View File

@@ -193,8 +193,7 @@ typedef struct
typedef enum
{
barman,
pg_basebackup,
pg_backupapi
pg_basebackup
} standy_clone_mode;
typedef enum

View File

@@ -3096,14 +3096,9 @@ get_standby_clone_mode(void)
if (*config_file_options.barman_host != '\0' && runtime_options.without_barman == false)
mode = barman;
else {
if (*config_file_options.pg_backupapi_host != '\0') {
log_info("Attempting to use `pg_backupapi` new restore mode");
mode = pg_backupapi;
}
else
mode = pg_basebackup;
}
else
mode = pg_basebackup;
return mode;
}

View File

@@ -116,7 +116,6 @@
#define DEFAULT_STANDBY_FOLLOW_TIMEOUT 30 /* seconds */
#define DEFAULT_STANDBY_FOLLOW_RESTART false
#define DEFAULT_SHUTDOWN_CHECK_TIMEOUT 60 /* seconds */
#define DEFAULT_STANDBY_PG_BACKUPAPI_OP_TYPE "recovery"
#define DEFAULT_STANDBY_RECONNECT_TIMEOUT 60 /* seconds */
#define DEFAULT_NODE_REJOIN_TIMEOUT 60 /* seconds */
#define DEFAULT_ARCHIVE_READY_WARNING 16 /* WAL files */