Compare commits

..

1 Commits

Author SHA1 Message Date
Dee Dee Rothery
9a4bec5496 Changed v14 package name in install example
Per comment from Ton Machielsen in docs Slack channel: https://edb.slack.com/archives/CU5QEU5L7/p1669116950090219
2022-11-22 09:45:15 -05:00
16 changed files with 70 additions and 369 deletions

37
.github/workflows/sonarqube-scan.yml vendored Normal file
View File

@@ -0,0 +1,37 @@
name: SonarQube Scan
on:
pull_request:
push:
branches: [ master ]
workflow_dispatch:
jobs:
sonarQube:
name: SonarQube-Job
runs-on: ubuntu-latest
steps:
- name: Checkout source repo
uses: actions/checkout@v1
with:
ref: '${{ github.head_ref }}'
- name: Checkout GitHub Action Repo
uses: actions/checkout@master
with:
repository: EnterpriseDB/edb-github-actions.git
ref: master
token: ${{ secrets.GH_SLONIK }}
path: .github/actions/edb-github-actions
- name: SonarQube Scan
uses: ./.github/actions/edb-github-actions/sonarqube
with:
REPO_NAME: '${{github.event.repository.name}}'
SONAR_PROJECT_KEY: EnterpriseDB_repmgr
SONAR_URL: '${{secrets.SONARQUBE_URL}}'
SONAR_LOGIN: '${{secrets.SONARQUBE_LOGIN}}'
PULL_REQUEST_KEY: '${{github.event.number}}'
PULL_REQUEST_BRANCH: '${{github.head_ref}}'
PULL_REQUEST_BASE_BRANCH: '${{github.base_ref}}'
REPO_DEFAULT_BRANCH: '${{github.event.repository.default_branch}}'
REPO_EXCLUDE_FILES: '*properties*,**/src/test/**/*,**/*.sql,**/docs/**/*,**/*/*.java'

18
.github/workflows/sonarqube/configure-env.sh vendored Executable file
View File

@@ -0,0 +1,18 @@
#!/bin/bash
DEBIAN_FRONTEND=noninteractive sudo apt-get -y install debhelper curl autoconf zlib1g-dev \
libedit-dev libxml2-dev libxslt1-dev libkrb5-dev libssl-dev libpam0g-dev systemtap-sdt-dev \
libselinux1-dev build-essential bison apt-utils lsb-release devscripts \
software-properties-common git shellcheck flex
sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
sudo apt-get update
sudo apt-get -y install libpq-dev postgresql-13 postgresql-server-dev-13
./configure
export PG_CONFIG=/usr/bin/pg_config
/home/buildfarm/sonar/depends/build-wrapper-linux-x86/build-wrapper-linux-x86-64 --out-dir build_wrapper_output_directory make

View File

@@ -22,7 +22,7 @@ GIT_WORK_TREE=${repmgr_abs_srcdir}
GIT_DIR=${repmgr_abs_srcdir}/.git
export GIT_DIR
export GIT_WORK_TREE
PG_LDFLAGS=-lcurl -ljson-c
include $(PGXS)
-include ${repmgr_abs_srcdir}/Makefile.custom

View File

@@ -66,7 +66,7 @@ REPMGR_CLIENT_OBJS = repmgr-client.o \
repmgr-action-primary.o repmgr-action-standby.o repmgr-action-witness.o \
repmgr-action-cluster.o repmgr-action-node.o repmgr-action-service.o repmgr-action-daemon.o \
configdata.o configfile.o configfile-scan.o log.o strutil.o controldata.o dirutil.o compat.o \
dbutils.o sysutils.o pgbackupapi.o
dbutils.o sysutils.o
REPMGRD_OBJS = repmgrd.o repmgrd-physical.o configdata.o configfile.o configfile-scan.o log.o \
dbutils.o strutil.o controldata.o compat.o sysutils.o

View File

@@ -291,46 +291,6 @@ struct ConfigFileSetting config_file_settings[] =
{},
{}
},
/* pg_backupapi_backup_id*/
{
"pg_backupapi_backup_id",
CONFIG_STRING,
{ .strptr = config_file_options.pg_backupapi_backup_id },
{ .strdefault = "" },
{},
{ .strmaxlen = sizeof(config_file_options.pg_backupapi_backup_id) },
{}
},
/* pg_backupapi_host*/
{
"pg_backupapi_host",
CONFIG_STRING,
{ .strptr = config_file_options.pg_backupapi_host },
{ .strdefault = "" },
{},
{ .strmaxlen = sizeof(config_file_options.pg_backupapi_host) },
{}
},
/* pg_backupapi_node_name */
{
"pg_backupapi_node_name",
CONFIG_STRING,
{ .strptr = config_file_options.pg_backupapi_node_name },
{ .strdefault = "" },
{},
{ .strmaxlen = sizeof(config_file_options.pg_backupapi_node_name) },
{}
},
/* pg_backupapi_remote_ssh_command */
{
"pg_backupapi_remote_ssh_command",
CONFIG_STRING,
{ .strptr = config_file_options.pg_backupapi_remote_ssh_command },
{ .strdefault = "" },
{},
{ .strmaxlen = sizeof(config_file_options.pg_backupapi_remote_ssh_command) },
{}
},
/* =======================
* standby follow settings

View File

@@ -164,10 +164,6 @@ typedef struct
char archive_cleanup_command[MAXLEN];
bool use_primary_conninfo_password;
char passfile[MAXPGPATH];
char pg_backupapi_backup_id[NAMEDATALEN];
char pg_backupapi_host[NAMEDATALEN];
char pg_backupapi_node_name[NAMEDATALEN];
char pg_backupapi_remote_ssh_command[MAXLEN];
/* standby promote settings */
int promote_check_timeout;

View File

@@ -60,7 +60,6 @@ AC_SUBST(vpath_build)
AC_CHECK_PROG(HAVE_GNUSED,gnused,yes,no)
AC_CHECK_PROG(HAVE_GSED,gsed,yes,no)
AC_CHECK_PROG(HAVE_SED,sed,yes,no)
AC_CHECK_PROG(HAVE_FLEX,flex,yes,no)
if test "$HAVE_GNUSED" = yes; then
SED=gnused
@@ -73,25 +72,6 @@ else
fi
AC_SUBST(SED)
AS_IF([test x"$HAVE_FLEX" != x"yes"], AC_MSG_ERROR([flex should be installed first]))
#Checking libraries
GENERIC_LIB_FAILED_MSG="library should be installed"
AC_CHECK_LIB(selinux, is_selinux_enabled, [],
[AC_MSG_ERROR(['selinux' $GENERIC_LIB_FAILED_MSG])])
AC_CHECK_LIB(lz4, LZ4_compress_default, [],
[AC_MSG_ERROR(['Z4' $GENERIC_LIB_FAILED_MSG])])
AC_CHECK_LIB(xslt, xsltCleanupGlobals, [],
[AC_MSG_ERROR(['xslt' $GENERIC_LIB_FAILED_MSG])])
AC_CHECK_LIB(pam, pam_start, [],
[AC_MSG_ERROR(['pam' $GENERIC_LIB_FAILED_MSG])])
AC_CHECK_LIB(gssapi_krb5, gss_init_sec_context, [],
[AC_MSG_ERROR([gssapi_krb5 $GENERIC_LIB_FAILED_MSG])])
AC_CONFIG_FILES([Makefile])
AC_CONFIG_FILES([Makefile.global])

View File

@@ -112,7 +112,7 @@ sudo dnf repolist</programlisting>
<para>
Install the &repmgr; version appropriate for your PostgreSQL version (e.g. <literal>repmgr14</literal>):
<programlisting>
sudo dnf install repmgr14</programlisting>
sudo dnf install repmgr_14</programlisting>
</para>
<tip>
<para>

View File

@@ -29,7 +29,7 @@
<listitem>
<simpara>
option to execute custom scripts (&quot;<link linkend="event-notifications">event notifications</link>&quot;)
option to execute custom scripts (&quot;<link linkend="event-notifications">event notifications</link>
at different points in the failover sequence
</simpara>
</listitem>

View File

@@ -49,6 +49,5 @@
#define ERR_NODE_STATUS 25
#define ERR_REPMGRD_PAUSE 26
#define ERR_REPMGRD_SERVICE 27
#define ERR_PGBACKUPAPI_SERVICE 28
#endif /* _ERRCODE_H_ */

View File

@@ -1,147 +0,0 @@
/*
* pgbackupapi.c
* Copyright (c) EnterpriseDB Corporation, 2010-2021
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <string.h>
#include <curl/curl.h>
#include <json-c/json.h>
#include "repmgr.h"
#include "pgbackupapi.h"
size_t receive_operations_cb(void *content, size_t size, size_t nmemb, char *buffer) {
short int max_chars_to_copy = MAX_BUFFER_LENGTH -2;
short int i = 0;
int operation_length = 0;
json_object *value;
json_object *root = json_tokener_parse(content);
json_object *operations = json_object_object_get(root, "operations");
operation_length = strlen(json_object_get_string(operations));
if (operation_length < max_chars_to_copy) {
max_chars_to_copy = operation_length;
}
strncpy(buffer, json_object_get_string(operations), max_chars_to_copy);
fprintf(stdout, "Success! The following operations were found\n");
for (i=0; i<json_object_array_length(operations); i++) {
value = json_object_array_get_idx(operations, i);
printf("%s\n", json_object_get_string(value));
}
return size * nmemb;
}
char * define_base_url(operation_task *task) {
char *format = "http://%s:80/servers/%s/operations";
char *url = malloc(MAX_BUFFER_LENGTH);
snprintf(url, MAX_BUFFER_LENGTH-1, format, task->host, task->node_name);
//`url` is freed on the function that called this
return url;
}
CURLcode get_operations_on_server(CURL *curl, operation_task *task) {
char buffer[MAX_BUFFER_LENGTH];
char *url = define_base_url(task);
CURLcode ret;
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, receive_operations_cb);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &buffer);
curl_easy_setopt(curl, CURLOPT_URL, url);
ret = curl_easy_perform(curl);
free(url);
return ret;
}
size_t receive_operation_id(void *content, size_t size, size_t nmemb, char *buffer) {
json_object *root = json_tokener_parse(content);
json_object *operation = json_object_object_get(root, "operation_id");
if (operation != NULL) {
strncpy(buffer, json_object_get_string(operation), MAX_BUFFER_LENGTH-2);
}
return size * nmemb;
}
CURLcode create_new_task(CURL *curl, operation_task *task) {
PQExpBufferData payload;
char *url = define_base_url(task);
CURLcode ret;
json_object *root = json_object_new_object();
struct curl_slist *chunk = NULL;
json_object_object_add(root, "operation_type", json_object_new_string(task->operation_type));
json_object_object_add(root, "backup_id", json_object_new_string(task->backup_id));
json_object_object_add(root, "remote_ssh_command", json_object_new_string(task->remote_ssh_command));
json_object_object_add(root, "destination_directory", json_object_new_string(task->destination_directory));
initPQExpBuffer(&payload);
appendPQExpBufferStr(&payload, json_object_to_json_string(root));
chunk = curl_slist_append(chunk, "Content-type: application/json");
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, chunk);
curl_easy_setopt(curl, CURLOPT_URL, url);
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, payload.data);
curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1L);
//curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, receive_operation_id);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, task->operation_id);
ret = curl_easy_perform(curl);
free(url);
termPQExpBuffer(&payload);
return ret;
}
size_t receive_operation_status(void *content, size_t size, size_t nmemb, char *buffer) {
json_object *root = json_tokener_parse(content);
json_object *status = json_object_object_get(root, "status");
if (status != NULL) {
strncpy(buffer, json_object_get_string(status), MAX_BUFFER_LENGTH-2);
}
else {
fprintf(stderr, "Incorrect reply received for that operation ID.\n");
strcpy(buffer, "\0");
}
return size * nmemb;
}
CURLcode get_status_of_operation(CURL *curl, operation_task *task) {
CURLcode ret;
char *url = define_base_url(task);
strcat(url, "/");
strcat(url, task->operation_id);
curl_easy_setopt(curl, CURLOPT_URL, url);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, receive_operation_status);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, task->operation_status);
ret = curl_easy_perform(curl);
free(url);
return ret;
}

View File

@@ -1,46 +0,0 @@
/*
* pgbackupapi.h
* Copyright (c) EnterpriseDB Corporation, 2010-2021
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <curl/curl.h>
#include <json-c/json.h>
typedef struct operation_task {
char *backup_id;
char *destination_directory;
char *operation_type;
char *operation_id;
char *operation_status;
char *remote_ssh_command;
char *host;
char *node_name;
} operation_task;
//Default simplebuffer size in most of operations
#define MAX_BUFFER_LENGTH 72
//Callbacks to send/receive data from pg-backup-api endpoints
size_t receive_operations_cb(void *content, size_t size, size_t nmemb, char *buffer);
size_t receive_operation_id(void *content, size_t size, size_t nmemb, char *buffer);
size_t receive_operation_status(void *content, size_t size, size_t nmemb, char *buffer);
//Functions that implement the logic and know what to do and how to comunnicate wuth the API
CURLcode get_operations_on_server(CURL *curl, operation_task *task);
CURLcode create_new_task(CURL *curl, operation_task *task);
CURLcode get_status_of_operation(CURL *curl, operation_task *task);
//Helper to make simpler to read the handler where we set the URL
char * define_base_url(operation_task *task);

View File

@@ -21,7 +21,6 @@
#include <sys/stat.h>
#include <time.h>
#include <unistd.h>
#include "repmgr.h"
#include "dirutil.h"
@@ -30,7 +29,7 @@
#include "repmgr-client-global.h"
#include "repmgr-action-standby.h"
#include "pgbackupapi.h"
typedef struct TablespaceDataListCell
{
@@ -114,7 +113,6 @@ static void check_recovery_type(PGconn *conn);
static void initialise_direct_clone(t_node_info *local_node_record, t_node_info *upstream_node_record);
static int run_basebackup(t_node_info *node_record);
static int run_file_backup(t_node_info *node_record);
static int run_pg_backupapi(t_node_info *node_record);
static void copy_configuration_files(bool delete_after_copy);
@@ -689,18 +687,19 @@ do_standby_clone(void)
exit(SUCCESS);
}
if (mode != barman)
{
initialise_direct_clone(&local_node_record, &upstream_node_record);
}
switch (mode)
{
case pg_basebackup:
initialise_direct_clone(&local_node_record, &upstream_node_record);
log_notice(_("starting backup (using pg_basebackup)..."));
break;
case barman:
log_notice(_("retrieving backup from Barman..."));
break;
case pg_backupapi:
log_notice(_("starting backup (using pg_backupapi)..."));
break;
default:
/* should never reach here */
log_error(_("unknown clone mode"));
@@ -722,9 +721,6 @@ do_standby_clone(void)
case barman:
r = run_file_backup(&local_node_record);
break;
case pg_backupapi:
r = run_pg_backupapi(&local_node_record);
break;
default:
/* should never reach here */
log_error(_("unknown clone mode"));
@@ -818,6 +814,7 @@ do_standby_clone(void)
}
/* Write the recovery.conf file */
if (create_recovery_file(&local_node_record,
&recovery_conninfo,
source_server_version_num,
@@ -849,9 +846,6 @@ do_standby_clone(void)
case barman:
log_notice(_("standby clone (from Barman) complete"));
break;
case pg_backupapi:
log_notice(_("standby clone (from pg_backupapi) complete"));
break;
}
/*
@@ -943,9 +937,6 @@ do_standby_clone(void)
case barman:
appendPQExpBufferStr(&event_details, "barman");
break;
case pg_backupapi:
appendPQExpBufferStr(&event_details, "pg_backupapi");
break;
}
appendPQExpBuffer(&event_details,
@@ -7779,86 +7770,6 @@ stop_backup:
}
/*
* Perform a call to pg_backupapi endpoint to ask barman to write the backup
* for us. This will ensure that no matter the format on-disk of new backups,
* barman will always find a way how to read and write them.
* From repmgr 4 this is only used for Barman backups.
*/
static int
run_pg_backupapi(t_node_info *local_node_record)
{
int r = ERR_PGBACKUPAPI_SERVICE;
long http_return_code = 0;
short seconds_to_sleep = 3;
operation_task *task = malloc(sizeof(operation_task));
CURL *curl = curl_easy_init();
CURLcode ret;
task->host = malloc(strlen(config_file_options.pg_backupapi_host)+1);
task->remote_ssh_command = malloc(strlen(config_file_options.pg_backupapi_remote_ssh_command)+1);
task->node_name = malloc(strlen(config_file_options.pg_backupapi_node_name)+1);
task->operation_type = malloc(strlen(DEFAULT_STANDBY_PG_BACKUPAPI_OP_TYPE)+1);
task->backup_id = malloc(strlen(config_file_options.pg_backupapi_backup_id)+1);
task->destination_directory = malloc(strlen(local_data_directory)+1);
task->operation_id = malloc(MAX_BUFFER_LENGTH);
task->operation_status = malloc(MAX_BUFFER_LENGTH);
strcpy(task->host, config_file_options.pg_backupapi_host);
strcpy(task->remote_ssh_command, config_file_options.pg_backupapi_remote_ssh_command);
strcpy(task->node_name, config_file_options.pg_backupapi_node_name);
strcpy(task->operation_type, DEFAULT_STANDBY_PG_BACKUPAPI_OP_TYPE);
strcpy(task->backup_id, config_file_options.pg_backupapi_backup_id);
strcpy(task->destination_directory, local_data_directory);
strcpy(task->operation_id, "\0");
ret = create_new_task(curl, task);
if ((ret != CURLE_OK) || (strlen(task->operation_id) == 0)) {
curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &http_return_code);
if (499 > http_return_code && http_return_code >= 400) {
log_error("Cannot find backup '%s' for node '%s'.", task->backup_id, task->node_name);
} else {
log_error("whilst reaching out pg_backup service: %s\n", curl_easy_strerror(ret));
}
}
else
{
log_info("Success creating the task: operation id '%s'", task->operation_id);
//We call init again because previous call included POST calls
curl_easy_cleanup(curl);
curl = curl_easy_init();
while (true)
{
ret = get_status_of_operation(curl, task);
if (strlen(task->operation_status) == 0) {
log_info("Retrying...");
}
else
{
log_info("status %s", task->operation_status);
}
if (strcmp(task->operation_status, "FAILED") == 0) {
break;
}
if (strcmp(task->operation_status, "DONE") == 0) {
r = SUCCESS;
break;
}
sleep(seconds_to_sleep);
}
}
curl_easy_cleanup(curl);
free(task);
return r;
}
static char *
make_barman_ssh_command(char *buf)
{

View File

@@ -193,8 +193,7 @@ typedef struct
typedef enum
{
barman,
pg_basebackup,
pg_backupapi
pg_basebackup
} standy_clone_mode;
typedef enum

View File

@@ -3096,14 +3096,9 @@ get_standby_clone_mode(void)
if (*config_file_options.barman_host != '\0' && runtime_options.without_barman == false)
mode = barman;
else {
if (*config_file_options.pg_backupapi_host != '\0') {
log_info("Attempting to use `pg_backupapi` new restore mode");
mode = pg_backupapi;
}
else
mode = pg_basebackup;
}
else
mode = pg_basebackup;
return mode;
}

View File

@@ -116,7 +116,6 @@
#define DEFAULT_STANDBY_FOLLOW_TIMEOUT 30 /* seconds */
#define DEFAULT_STANDBY_FOLLOW_RESTART false
#define DEFAULT_SHUTDOWN_CHECK_TIMEOUT 60 /* seconds */
#define DEFAULT_STANDBY_PG_BACKUPAPI_OP_TYPE "recovery"
#define DEFAULT_STANDBY_RECONNECT_TIMEOUT 60 /* seconds */
#define DEFAULT_NODE_REJOIN_TIMEOUT 60 /* seconds */
#define DEFAULT_ARCHIVE_READY_WARNING 16 /* WAL files */