mirror of
https://github.com/postgresml/pgcat.git
synced 2026-03-22 17:06:29 +00:00
Initial gh-pages commit
This commit is contained in:
@@ -1,87 +0,0 @@
|
||||
# Use the latest 2.1 version of CircleCI pipeline process engine.
|
||||
# See: https://circleci.com/docs/2.0/configuration-reference
|
||||
version: 2.1
|
||||
|
||||
# Define a job to be invoked later in a workflow.
|
||||
# See: https://circleci.com/docs/2.0/configuration-reference/#jobs
|
||||
jobs:
|
||||
build:
|
||||
# Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub.
|
||||
# See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor
|
||||
docker:
|
||||
- image: ghcr.io/postgresml/pgcat-ci:latest
|
||||
environment:
|
||||
RUST_LOG: info
|
||||
LLVM_PROFILE_FILE: /tmp/pgcat-%m-%p.profraw
|
||||
RUSTC_BOOTSTRAP: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
RUSTFLAGS: "-Zprofile -Ccodegen-units=1 -Copt-level=0 -Clink-dead-code -Coverflow-checks=off -Zpanic_abort_tests -Cpanic=abort -Cinstrument-coverage"
|
||||
RUSTDOCFLAGS: "-Cpanic=abort"
|
||||
- image: postgres:14
|
||||
command: ["postgres", "-p", "5432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||
environment:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_DB: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_INITDB_ARGS: --auth-local=md5 --auth-host=md5 --auth=md5
|
||||
- image: postgres:14
|
||||
command: ["postgres", "-p", "7432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||
environment:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_DB: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256
|
||||
- image: postgres:14
|
||||
command: ["postgres", "-p", "8432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||
environment:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_DB: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256
|
||||
- image: postgres:14
|
||||
command: ["postgres", "-p", "9432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||
environment:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_DB: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256
|
||||
|
||||
- image: postgres:14
|
||||
command: ["postgres", "-p", "10432", "-c", "shared_preload_libraries=pg_stat_statements"]
|
||||
environment:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_DB: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_INITDB_ARGS: --auth-local=md5 --auth-host=md5 --auth=md5
|
||||
|
||||
# Add steps to the job
|
||||
# See: https://circleci.com/docs/2.0/configuration-reference/#steps
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
key: cargo-lock-2-{{ checksum "Cargo.lock" }}
|
||||
- run:
|
||||
name: "Lint"
|
||||
command: "cargo fmt --check"
|
||||
- run:
|
||||
name: "Clippy"
|
||||
command: "cargo clippy --all --all-targets -- -Dwarnings"
|
||||
- run:
|
||||
name: "Tests"
|
||||
command: "cargo clean && cargo build && cargo test && bash .circleci/run_tests.sh && .circleci/generate_coverage.sh"
|
||||
- store_artifacts:
|
||||
path: /tmp/cov
|
||||
destination: coverage-data
|
||||
- save_cache:
|
||||
key: cargo-lock-2-{{ checksum "Cargo.lock" }}
|
||||
paths:
|
||||
- target
|
||||
- ~/.cargo
|
||||
|
||||
|
||||
# Invoke jobs via workflows
|
||||
# See: https://circleci.com/docs/2.0/configuration-reference/#workflows
|
||||
workflows:
|
||||
build:
|
||||
jobs:
|
||||
- build
|
||||
@@ -1,15 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# inspired by https://doc.rust-lang.org/rustc/instrument-coverage.html#tips-for-listing-the-binaries-automatically
|
||||
TEST_OBJECTS=$( \
|
||||
for file in $(cargo test --no-run 2>&1 | grep "target/debug/deps/pgcat-[[:alnum:]]\+" -o); \
|
||||
do \
|
||||
printf "%s %s " --object $file; \
|
||||
done \
|
||||
)
|
||||
|
||||
rust-profdata merge -sparse /tmp/pgcat-*.profraw -o /tmp/pgcat.profdata
|
||||
|
||||
bash -c "rust-cov export -ignore-filename-regex='rustc|registry' -Xdemangler=rustfilt -instr-profile=/tmp/pgcat.profdata $TEST_OBJECTS --object ./target/debug/pgcat --format lcov > ./lcov.info"
|
||||
|
||||
genhtml lcov.info --title "PgCat Code Coverage" --css-file ./cov-style.css --no-function-coverage --highlight --ignore-errors source --legend --output-directory /tmp/cov --prefix $(pwd)
|
||||
@@ -1,158 +0,0 @@
|
||||
#
|
||||
# PgCat config example.
|
||||
#
|
||||
|
||||
#
|
||||
# General pooler settings
|
||||
[general]
|
||||
# What IP to run on, 0.0.0.0 means accessible from everywhere.
|
||||
host = "0.0.0.0"
|
||||
|
||||
# Port to run on, same as PgBouncer used in this example.
|
||||
port = 6432
|
||||
|
||||
# Whether to enable prometheus exporter or not.
|
||||
enable_prometheus_exporter = true
|
||||
|
||||
# Port at which prometheus exporter listens on.
|
||||
prometheus_exporter_port = 9930
|
||||
|
||||
# How long to wait before aborting a server connection (ms).
|
||||
connect_timeout = 1000
|
||||
|
||||
# How much time to give the health check query to return with a result (ms).
|
||||
healthcheck_timeout = 1000
|
||||
|
||||
# How long to keep connection available for immediate re-use, without running a healthcheck query on it
|
||||
healthcheck_delay = 30000
|
||||
|
||||
# How much time to give clients during shutdown before forcibly killing client connections (ms).
|
||||
shutdown_timeout = 5000
|
||||
|
||||
# For how long to ban a server if it fails a health check (seconds).
|
||||
ban_time = 60 # Seconds
|
||||
|
||||
# If we should log client connections
|
||||
log_client_connections = false
|
||||
|
||||
# If we should log client disconnections
|
||||
log_client_disconnections = false
|
||||
|
||||
# Reload config automatically if it changes.
|
||||
autoreload = 15000
|
||||
|
||||
# TLS
|
||||
tls_certificate = ".circleci/server.cert"
|
||||
tls_private_key = ".circleci/server.key"
|
||||
|
||||
# Credentials to access the virtual administrative database (pgbouncer or pgcat)
|
||||
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
||||
admin_username = "admin_user"
|
||||
admin_password = "admin_pass"
|
||||
|
||||
# pool
|
||||
# configs are structured as pool.<pool_name>
|
||||
# the pool_name is what clients use as database name when connecting
|
||||
# For the example below a client can connect using "postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded_db"
|
||||
[pools.sharded_db]
|
||||
# Pool mode (see PgBouncer docs for more).
|
||||
# session: one server connection per connected client
|
||||
# transaction: one server connection per client transaction
|
||||
pool_mode = "transaction"
|
||||
prepared_statements_cache_size = 500
|
||||
|
||||
# If the client doesn't specify, route traffic to
|
||||
# this role by default.
|
||||
#
|
||||
# any: round-robin between primary and replicas,
|
||||
# replica: round-robin between replicas only without touching the primary,
|
||||
# primary: all queries go to the primary unless otherwise specified.
|
||||
default_role = "any"
|
||||
|
||||
# Query parser. If enabled, we'll attempt to parse
|
||||
# every incoming query to determine if it's a read or a write.
|
||||
# If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||
# we'll direct it to the primary.
|
||||
query_parser_enabled = true
|
||||
|
||||
# If the query parser is enabled and this setting is enabled, we'll attempt to
|
||||
# infer the role from the query itself.
|
||||
query_parser_read_write_splitting = true
|
||||
|
||||
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||
# load balancing of read queries. Otherwise, the primary will only be used for write
|
||||
# queries. The primary can always be explicitely selected with our custom protocol.
|
||||
primary_reads_enabled = true
|
||||
|
||||
# So what if you wanted to implement a different hashing function,
|
||||
# or you've already built one and you want this pooler to use it?
|
||||
#
|
||||
# Current options:
|
||||
#
|
||||
# pg_bigint_hash: PARTITION BY HASH (Postgres hashing function)
|
||||
# sha1: A hashing function based on SHA1
|
||||
#
|
||||
sharding_function = "pg_bigint_hash"
|
||||
|
||||
# Credentials for users that may connect to this cluster
|
||||
[pools.sharded_db.users.0]
|
||||
username = "sharding_user"
|
||||
password = "sharding_user"
|
||||
# Maximum number of server connections that can be established for this user
|
||||
# The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||
# is the sum of pool_size across all users.
|
||||
pool_size = 9
|
||||
statement_timeout = 0
|
||||
|
||||
[pools.sharded_db.users.1]
|
||||
username = "other_user"
|
||||
password = "other_user"
|
||||
pool_size = 21
|
||||
statement_timeout = 30000
|
||||
|
||||
# Shard 0
|
||||
[pools.sharded_db.shards.0]
|
||||
# [ host, port, role ]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ]
|
||||
]
|
||||
# Database name (e.g. "postgres")
|
||||
database = "shard0"
|
||||
|
||||
[pools.sharded_db.shards.1]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ],
|
||||
]
|
||||
database = "shard1"
|
||||
|
||||
[pools.sharded_db.shards.2]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ],
|
||||
]
|
||||
database = "shard2"
|
||||
|
||||
|
||||
[pools.simple_db]
|
||||
pool_mode = "session"
|
||||
default_role = "primary"
|
||||
query_parser_enabled = true
|
||||
query_parser_read_write_splitting = true
|
||||
primary_reads_enabled = true
|
||||
sharding_function = "pg_bigint_hash"
|
||||
prepared_statements_cache_size = 500
|
||||
|
||||
[pools.simple_db.users.0]
|
||||
username = "simple_user"
|
||||
password = "simple_user"
|
||||
pool_size = 5
|
||||
statement_timeout = 30000
|
||||
|
||||
[pools.simple_db.shards.0]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ]
|
||||
]
|
||||
database = "some_db"
|
||||
@@ -1,183 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
# non-zero exit code if we provide bad configs
|
||||
(! ./target/debug/pgcat "fake_configs" 2>/dev/null)
|
||||
|
||||
# Start PgCat with a particular log level
|
||||
# for inspection.
|
||||
function start_pgcat() {
|
||||
kill -s SIGINT $(pgrep pgcat) || true
|
||||
RUST_LOG=${1} ./target/debug/pgcat .circleci/pgcat.toml &
|
||||
sleep 1
|
||||
}
|
||||
|
||||
# Setup the database with shards and user
|
||||
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 5432 -U postgres -f tests/sharding/query_routing_setup.sql
|
||||
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 7432 -U postgres -f tests/sharding/query_routing_setup.sql
|
||||
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 8432 -U postgres -f tests/sharding/query_routing_setup.sql
|
||||
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 9432 -U postgres -f tests/sharding/query_routing_setup.sql
|
||||
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 10432 -U postgres -f tests/sharding/query_routing_setup.sql
|
||||
|
||||
PGPASSWORD=sharding_user pgbench -h 127.0.0.1 -U sharding_user shard0 -i
|
||||
PGPASSWORD=sharding_user pgbench -h 127.0.0.1 -U sharding_user shard1 -i
|
||||
PGPASSWORD=sharding_user pgbench -h 127.0.0.1 -U sharding_user shard2 -i
|
||||
|
||||
# Start Toxiproxy
|
||||
kill -9 $(pgrep toxiproxy) || true
|
||||
LOG_LEVEL=error toxiproxy-server &
|
||||
sleep 1
|
||||
|
||||
# Create a database at port 5433, forward it to Postgres
|
||||
toxiproxy-cli create -l 127.0.0.1:5433 -u 127.0.0.1:5432 postgres_replica
|
||||
|
||||
start_pgcat "info"
|
||||
|
||||
# Check that prometheus is running
|
||||
curl --fail localhost:9930/metrics
|
||||
|
||||
export PGPASSWORD=sharding_user
|
||||
export PGDATABASE=sharded_db
|
||||
|
||||
# pgbench test
|
||||
pgbench -U sharding_user -i -h 127.0.0.1 -p 6432
|
||||
pgbench -U sharding_user -h 127.0.0.1 -p 6432 -t 500 -c 2 --protocol simple -f tests/pgbench/simple.sql
|
||||
pgbench -U sharding_user -h 127.0.0.1 -p 6432 -t 500 -c 2 --protocol extended
|
||||
|
||||
# COPY TO STDOUT test
|
||||
psql -U sharding_user -h 127.0.0.1 -p 6432 -c 'COPY (SELECT * FROM pgbench_accounts LIMIT 15) TO STDOUT;' > /dev/null
|
||||
|
||||
# Query cancellation test
|
||||
(psql -U sharding_user -h 127.0.0.1 -p 6432 -c 'SELECT pg_sleep(50)' || true) &
|
||||
sleep 1
|
||||
killall psql -s SIGINT
|
||||
|
||||
# Pause/resume test.
|
||||
# Running benches before, during, and after pause/resume.
|
||||
pgbench -U sharding_user -t 500 -c 2 -h 127.0.0.1 -p 6432 --protocol extended &
|
||||
BENCH_ONE=$!
|
||||
PGPASSWORD=admin_pass psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'PAUSE sharded_db,sharding_user'
|
||||
pgbench -U sharding_user -h 127.0.0.1 -p 6432 -t 500 -c 2 --protocol extended &
|
||||
BENCH_TWO=$!
|
||||
PGPASSWORD=admin_pass psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'RESUME sharded_db,sharding_user'
|
||||
wait ${BENCH_ONE}
|
||||
wait ${BENCH_TWO}
|
||||
|
||||
# Reload pool (closing unused server connections)
|
||||
PGPASSWORD=admin_pass psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'RELOAD'
|
||||
|
||||
(psql -U sharding_user -h 127.0.0.1 -p 6432 -c 'SELECT pg_sleep(50)' || true) &
|
||||
sleep 1
|
||||
killall psql -s SIGINT
|
||||
|
||||
# Sharding insert
|
||||
psql -U sharding_user -e -h 127.0.0.1 -p 6432 -f tests/sharding/query_routing_test_insert.sql
|
||||
|
||||
# Sharding select
|
||||
psql -U sharding_user -e -h 127.0.0.1 -p 6432 -f tests/sharding/query_routing_test_select.sql > /dev/null
|
||||
|
||||
# Replica/primary selection & more sharding tests
|
||||
psql -U sharding_user -e -h 127.0.0.1 -p 6432 -f tests/sharding/query_routing_test_primary_replica.sql > /dev/null
|
||||
|
||||
# Statement timeout tests
|
||||
sed -i 's/statement_timeout = 0/statement_timeout = 100/' .circleci/pgcat.toml
|
||||
kill -SIGHUP $(pgrep pgcat) # Reload config
|
||||
sleep 0.2
|
||||
|
||||
# This should timeout
|
||||
(! psql -U sharding_user -e -h 127.0.0.1 -p 6432 -c 'select pg_sleep(0.5)')
|
||||
|
||||
# Disable statement timeout
|
||||
sed -i 's/statement_timeout = 100/statement_timeout = 0/' .circleci/pgcat.toml
|
||||
kill -SIGHUP $(pgrep pgcat) # Reload config again
|
||||
|
||||
#
|
||||
# Integration tests and ActiveRecord tests
|
||||
#
|
||||
cd tests/ruby
|
||||
sudo bundle install
|
||||
bundle exec ruby tests.rb --format documentation || exit 1
|
||||
bundle exec rspec *_spec.rb --format documentation || exit 1
|
||||
cd ../..
|
||||
|
||||
#
|
||||
# Python tests
|
||||
# These tests will start and stop the pgcat server so it will need to be restarted after the tests
|
||||
#
|
||||
pip3 install -r tests/python/requirements.txt
|
||||
pytest || exit 1
|
||||
|
||||
|
||||
#
|
||||
# Go tests
|
||||
# Starts its own pgcat server
|
||||
#
|
||||
pushd tests/go
|
||||
/usr/local/go/bin/go test || exit 1
|
||||
popd
|
||||
|
||||
start_pgcat "info"
|
||||
|
||||
#
|
||||
# Rust tests
|
||||
#
|
||||
cd tests/rust
|
||||
cargo run
|
||||
cd ../../
|
||||
|
||||
# Admin tests
|
||||
export PGPASSWORD=admin_pass
|
||||
psql -U admin_user -e -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW STATS' > /dev/null
|
||||
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'RELOAD' > /dev/null
|
||||
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW CONFIG' > /dev/null
|
||||
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW DATABASES' > /dev/null
|
||||
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgcat -c 'SHOW LISTS' > /dev/null
|
||||
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgcat -c 'SHOW POOLS' > /dev/null
|
||||
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgcat -c 'SHOW VERSION' > /dev/null
|
||||
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c "SET client_encoding TO 'utf8'" > /dev/null # will ignore
|
||||
(! psql -U admin_user -e -h 127.0.0.1 -p 6432 -d random_db -c 'SHOW STATS' > /dev/null)
|
||||
export PGPASSWORD=sharding_user
|
||||
|
||||
# Start PgCat in debug to demonstrate failover better
|
||||
start_pgcat "trace"
|
||||
|
||||
# Add latency to the replica at port 5433 slightly above the healthcheck timeout
|
||||
toxiproxy-cli toxic add -t latency -a latency=300 postgres_replica
|
||||
sleep 1
|
||||
|
||||
# Note the failover in the logs
|
||||
timeout 5 psql -U sharding_user -e -h 127.0.0.1 -p 6432 <<-EOF
|
||||
SELECT 1;
|
||||
SELECT 1;
|
||||
SELECT 1;
|
||||
EOF
|
||||
|
||||
# Remove latency
|
||||
toxiproxy-cli toxic remove --toxicName latency_downstream postgres_replica
|
||||
|
||||
start_pgcat "info"
|
||||
|
||||
# Test session mode (and config reload)
|
||||
sed -i '0,/simple_db/s/pool_mode = "transaction"/pool_mode = "session"/' .circleci/pgcat.toml
|
||||
|
||||
# Reload config test
|
||||
kill -SIGHUP $(pgrep pgcat)
|
||||
|
||||
# Revert settings after reload. Makes test runs idempotent
|
||||
sed -i '0,/simple_db/s/pool_mode = "session"/pool_mode = "transaction"/' .circleci/pgcat.toml
|
||||
|
||||
sleep 1
|
||||
|
||||
# Prepared statements that will only work in session mode
|
||||
pgbench -U sharding_user -h 127.0.0.1 -p 6432 -t 500 -c 2 --protocol prepared
|
||||
|
||||
# Attempt clean shut down
|
||||
killall pgcat -s SIGINT
|
||||
|
||||
# Allow for graceful shutdown
|
||||
sleep 1
|
||||
|
||||
kill -9 $(pgrep toxiproxy)
|
||||
sleep 1
|
||||
@@ -1,21 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDazCCAlOgAwIBAgIUChIvUGFJGJe5EDch32rchqoxER0wDQYJKoZIhvcNAQEL
|
||||
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMjA2MjcyMjI2MDZaFw0yMjA3
|
||||
MjcyMjI2MDZaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
|
||||
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
|
||||
AQUAA4IBDwAwggEKAoIBAQDdTwrBzV1v79faVckFvIn/9V4fypYs4vDi3X+h3wGn
|
||||
AjEh6mmizlKCwSwAam07D9Q5zKiXFrzNJqzSioOv5zsOAvObwrnzbtKSwfs3aP5g
|
||||
eEh2clHCZYx9p06WszPcgSB5nTz1NeY4XAwvGn3A+SVCLyPMTNwnem48+ONh2F9u
|
||||
FHtSuIsEVvTjMlH09O7LjwJlODxy3HNv2JHYM5Hx9tzc+NVYdERPtaVcX8ycw1Eh
|
||||
9hgGSgfaNM52/JfRMIDhENrsn0S1omRUtcJe72loreiwrECUOLAnAfp9Xqc+rMPP
|
||||
aLA6ElzmYef1+ZEC0p6isCHPhxY5ESVhKYhE9nQvksjnAgMBAAGjUzBRMB0GA1Ud
|
||||
DgQWBBQLDtzexqjx7xPtUZuZB/angU9oSDAfBgNVHSMEGDAWgBQLDtzexqjx7xPt
|
||||
UZuZB/angU9oSDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQC/
|
||||
mxY/a/WeLENVj2Gg9EUH0CKzfqeTey1mb6YfPGxzrD7oq1m0Vn2MmTbjZrJgh/Ob
|
||||
QckO3ElF4kC9+6XP+iDPmabGpjeLgllBboT5l2aqnD1syMrf61WPLzgRzRfplYGy
|
||||
cjBQDDKPu8Lu0QRMWU28tHYN0bMxJoCuXysGGX5WsuFnKCA6f/V+nycJJXxJH3eB
|
||||
eLjTueD9/RE3OXhi6m8A29Q1E9AE5EF4uRxYXrr91BmYnk4aFvSmBxhUEzE12eSN
|
||||
lHB/uSc0+Dp+UVmVr6wW8AQfd16UBA0BUf3kSW3aSvirYPYH0rXiOOpEJgOwOMnR
|
||||
f5+XAbN1Y+3OsFz/ZmP9
|
||||
-----END CERTIFICATE-----
|
||||
@@ -1,28 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDdTwrBzV1v79fa
|
||||
VckFvIn/9V4fypYs4vDi3X+h3wGnAjEh6mmizlKCwSwAam07D9Q5zKiXFrzNJqzS
|
||||
ioOv5zsOAvObwrnzbtKSwfs3aP5geEh2clHCZYx9p06WszPcgSB5nTz1NeY4XAwv
|
||||
Gn3A+SVCLyPMTNwnem48+ONh2F9uFHtSuIsEVvTjMlH09O7LjwJlODxy3HNv2JHY
|
||||
M5Hx9tzc+NVYdERPtaVcX8ycw1Eh9hgGSgfaNM52/JfRMIDhENrsn0S1omRUtcJe
|
||||
72loreiwrECUOLAnAfp9Xqc+rMPPaLA6ElzmYef1+ZEC0p6isCHPhxY5ESVhKYhE
|
||||
9nQvksjnAgMBAAECggEAbnvddO9frFhivJ+DIhgEFQKcIOb0nigV9kx6QYehvYy8
|
||||
lp/+aMb0Lk7d9r8rFQdL/icMK5GwZALg2KNKJvEbbF1Q3PwT9VHoUlgBYKJMDEFA
|
||||
e9GKu7ASuVBjTZzdUUItwkkbe5eS/aQGeSWSjlpTnX0HNCFS72qRymK+scRhsAQf
|
||||
ZoHyZHDslkvPR3Pos+sndWBYCDHag5/KoPhsMt1+5S9NQcOUHx9Ac0gLHjau3N+P
|
||||
0FhODHFFGnnpyQvLvj6u3ZOR34ladMgoBglE0O3vPFhckn92EK4teeTWOsUMotiz
|
||||
qM3QIJTOJjtiY6VDGY93bIa4pFvt7Zi4vIerenKt0QKBgQD/UMFqfevTAMrk10AC
|
||||
bOa4+cM07ORY4ZwVj5ILhZn+8crDEEtBsUyuEU2FTINtnoEq1yGc/IXpsyS1BHjL
|
||||
L1xSml5LN3jInbi8z5XQfY5Sj3VOMtwY6yD20jcdeDC44rz3nStXdkcMWxbTMapx
|
||||
iOPsap5ciUKOMS7LyMidPEG/LQKBgQDd5vHgrLN0FBIIm+vZg6MEm4QyobstVp4l
|
||||
7V/GZsdL+M8AQv1Rx+5wSUSWKomOIv5lglis7f6g0c9O7Qkr78/wzoyoKC2RRqPp
|
||||
I90GjY2Iv22N4GIkRrDAgMZbkTitzIB6tbXEVeLAOh3frFJ8IwauRCOiXIjrZdJ4
|
||||
FvV86+nU4wKBgQDdWTP2kWkMrBk7QOp7r9Jv+AmnLuHhtOdPQgOJ/bA++X2ik9PL
|
||||
Bl3GY7XjpSwks1CkxZKcucmXjPp7/X6EGXFfI/owF82dkDADca0e7lufdERtIWb0
|
||||
K5WOpz2lTPhgsiLGQfq7fw2lxqsJOnvcpqOD6gOVkmKjSDyb7F0RBJazmQKBgQDD
|
||||
a8PQTcesjpBjLI3EfX1vbVY7ENu6zfFxDV+vZoxVh8UlQdm90AlYse3JIaUKnB7W
|
||||
Xrihcucv0hZ0N6RAIW5LcFvHK7sVmdR4WbEpODhRGeTtcZJ8yBSZM898jKQRy2vK
|
||||
pYRyaADNsWDlvujVkjMr/a40KrIaPQ3h3LZNUaYYaQKBgQD1x8A5S5SiE1cN1vFr
|
||||
aACkmA2WqEDKKhUsUigJdwW6WB/B9kWlIlz/iV1H9uwBXtSIYG4VqCSTAvh0z4gX
|
||||
Qu2SrdPm5PYnKzpdynpz78OnGdflD1RKWFGHItR6GN6tj/VmulO6mlFvT4jzBQ7j
|
||||
+Hf8m2TcD4U3ksz3xw+YOD+cmA==
|
||||
-----END RSA PRIVATE KEY-----
|
||||
@@ -1,6 +0,0 @@
|
||||
target/
|
||||
tests/
|
||||
tracing/
|
||||
.circleci/
|
||||
.git/
|
||||
dev/
|
||||
@@ -1,14 +0,0 @@
|
||||
root = true
|
||||
|
||||
[*]
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
|
||||
[*.rs]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
max_line_length = 120
|
||||
|
||||
[*.toml]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
38
.github/ISSUE_TEMPLATE/bug_report.md
vendored
38
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,38 +0,0 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Desktop (please complete the following information):**
|
||||
- OS: [e.g. iOS]
|
||||
- Browser [e.g. chrome, safari]
|
||||
- Version [e.g. 22]
|
||||
|
||||
**Smartphone (please complete the following information):**
|
||||
- Device: [e.g. iPhone6]
|
||||
- OS: [e.g. iOS8.1]
|
||||
- Browser [e.g. stock browser, safari]
|
||||
- Version [e.g. 22]
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -1,20 +0,0 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
16
.github/dependabot.yml
vendored
16
.github/dependabot.yml
vendored
@@ -1,16 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "cargo"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
time: "04:00" # UTC
|
||||
labels:
|
||||
- "dependencies"
|
||||
commit-message:
|
||||
prefix: "chore(deps)"
|
||||
open-pull-requests-limit: 10
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
68
.github/workflows/build-and-push.yaml
vendored
68
.github/workflows/build-and-push.yaml
vendored
@@ -1,68 +0,0 @@
|
||||
name: Build and Push
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- '!charts/**.md'
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- v*
|
||||
|
||||
env:
|
||||
registry: ghcr.io
|
||||
image-name: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Determine tags
|
||||
id: metadata
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.registry }}/${{ env.image-name }}
|
||||
tags: |
|
||||
type=sha,prefix=,format=long
|
||||
type=schedule
|
||||
type=ref,event=tag
|
||||
type=ref,event=branch
|
||||
type=ref,event=pr
|
||||
type=raw,value=latest,enable={{ is_default_branch }}
|
||||
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.registry }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and push ${{ env.image-name }}
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64,linux/arm64
|
||||
provenance: false
|
||||
push: true
|
||||
tags: ${{ steps.metadata.outputs.tags }}
|
||||
labels: ${{ steps.metadata.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
50
.github/workflows/chart-lint-test.yaml
vendored
50
.github/workflows/chart-lint-test.yaml
vendored
@@ -1,50 +0,0 @@
|
||||
name: Lint and Test Charts
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- charts/**
|
||||
- '!charts/**.md'
|
||||
jobs:
|
||||
lint-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3.1.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v3
|
||||
with:
|
||||
version: v3.8.1
|
||||
|
||||
# Python is required because `ct lint` runs Yamale (https://github.com/23andMe/Yamale) and
|
||||
# yamllint (https://github.com/adrienverge/yamllint) which require Python
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5.1.0
|
||||
with:
|
||||
python-version: 3.7
|
||||
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@v2.2.1
|
||||
with:
|
||||
version: v3.5.1
|
||||
|
||||
- name: Run chart-testing (list-changed)
|
||||
id: list-changed
|
||||
run: |
|
||||
changed=$(ct list-changed --config ct.yaml)
|
||||
if [[ -n "$changed" ]]; then
|
||||
echo "changed=true" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Run chart-testing (lint)
|
||||
run: ct lint --config ct.yaml
|
||||
|
||||
- name: Create kind cluster
|
||||
uses: helm/kind-action@v1.10.0
|
||||
if: steps.list-changed.outputs.changed == 'true'
|
||||
|
||||
- name: Run chart-testing (install)
|
||||
run: ct install --config ct.yaml
|
||||
40
.github/workflows/chart-release.yaml
vendored
40
.github/workflows/chart-release.yaml
vendored
@@ -1,40 +0,0 @@
|
||||
name: Release Charts
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- charts/**
|
||||
- '!**.md'
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 # v3.5
|
||||
with:
|
||||
version: v3.13.0
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@a917fd15b20e8b64b94d9158ad54cd6345335584 # v1.6.0
|
||||
with:
|
||||
charts_dir: charts
|
||||
config: cr.yaml
|
||||
env:
|
||||
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
48
.github/workflows/generate-chart-readme.yaml
vendored
48
.github/workflows/generate-chart-readme.yaml
vendored
@@ -1,48 +0,0 @@
|
||||
name: '[CI/CD] Update README metadata'
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- 'charts/*/values.yaml'
|
||||
# Remove all permissions by default
|
||||
permissions: {}
|
||||
jobs:
|
||||
update-readme-metadata:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Install readme-generator-for-helm
|
||||
run: npm install -g @bitnami/readme-generator-for-helm
|
||||
- name: Checkout
|
||||
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||
with:
|
||||
path: charts
|
||||
ref: ${{github.event.pull_request.head.ref}}
|
||||
repository: ${{github.event.pull_request.head.repo.full_name}}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Execute readme-generator-for-helm
|
||||
env:
|
||||
DIFF_URL: "${{github.event.pull_request.diff_url}}"
|
||||
TEMP_FILE: "${{runner.temp}}/pr-${{github.event.number}}.diff"
|
||||
run: |
|
||||
# This request doesn't consume API calls.
|
||||
curl -Lkso $TEMP_FILE $DIFF_URL
|
||||
files_changed="$(sed -nr 's/[\-\+]{3} [ab]\/(.*)/\1/p' $TEMP_FILE | sort | uniq)"
|
||||
# Adding || true to avoid "Process exited with code 1" errors
|
||||
charts_dirs_changed="$(echo "$files_changed" | xargs dirname | grep -o "pgcat/[^/]*" | sort | uniq || true)"
|
||||
for chart in ${charts_dirs_changed}; do
|
||||
echo "Updating README.md for ${chart}"
|
||||
readme-generator --values "charts/${chart}/values.yaml" --readme "charts/${chart}/README.md" --schema "/tmp/schema.json"
|
||||
done
|
||||
- name: Push changes
|
||||
run: |
|
||||
# Push all the changes
|
||||
cd charts
|
||||
if git status -s | grep pgcat; then
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
git add . && git commit -am "Update README.md with readme-generator-for-helm" --signoff && git push
|
||||
fi
|
||||
20
.github/workflows/publish-ci-docker-image.yml
vendored
20
.github/workflows/publish-ci-docker-image.yml
vendored
@@ -1,20 +0,0 @@
|
||||
name: publish-ci-docker-image
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
jobs:
|
||||
publish-ci-docker-image:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build CI Docker image
|
||||
run: |
|
||||
docker build . -f Dockerfile.ci --tag ghcr.io/postgresml/pgcat-ci:latest
|
||||
docker run ghcr.io/postgresml/pgcat-ci:latest
|
||||
docker push ghcr.io/postgresml/pgcat-ci:latest
|
||||
59
.github/workflows/publish-deb-package.yml
vendored
59
.github/workflows/publish-deb-package.yml
vendored
@@ -1,59 +0,0 @@
|
||||
name: pgcat package (deb)
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- v*
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
packageVersion:
|
||||
default: "1.1.2-dev1"
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
max-parallel: 1
|
||||
fail-fast: false # Let the other job finish, or they can lock each other out
|
||||
matrix:
|
||||
os: ["buildjet-4vcpu-ubuntu-2204", "buildjet-4vcpu-ubuntu-2204-arm"]
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set package version
|
||||
if: github.event_name == 'push' # For push event
|
||||
run: |
|
||||
TAG=${{ github.ref_name }}
|
||||
echo "packageVersion=${TAG#v}" >> "$GITHUB_ENV"
|
||||
- name: Set package version (manual dispatch)
|
||||
if: github.event_name == 'workflow_dispatch' # For manual dispatch
|
||||
run: echo "packageVersion=${{ github.event.inputs.packageVersion }}" >> "$GITHUB_ENV"
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
- name: Install dependencies
|
||||
env:
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
TZ: Etc/UTC
|
||||
run: |
|
||||
curl -sLO https://github.com/deb-s3/deb-s3/releases/download/0.11.4/deb-s3-0.11.4.gem
|
||||
sudo gem install deb-s3-0.11.4.gem
|
||||
dpkg-deb --version
|
||||
- name: Build and release package
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ vars.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_DEFAULT_REGION: ${{ vars.AWS_DEFAULT_REGION }}
|
||||
run: |
|
||||
if [[ $(arch) == "x86_64" ]]; then
|
||||
export ARCH=amd64
|
||||
else
|
||||
export ARCH=arm64
|
||||
fi
|
||||
|
||||
bash utilities/deb.sh ${{ env.packageVersion }}
|
||||
|
||||
deb-s3 upload \
|
||||
--lock \
|
||||
--bucket apt.postgresml.org \
|
||||
pgcat-${{ env.packageVersion }}-ubuntu22.04-${ARCH}.deb \
|
||||
--codename $(lsb_release -cs)
|
||||
@@ -1,2 +0,0 @@
|
||||
edition = "2021"
|
||||
hard_tabs = false
|
||||
523
CONFIG.md
523
CONFIG.md
@@ -1,523 +0,0 @@
|
||||
# PgCat Configurations
|
||||
## `general` Section
|
||||
|
||||
### host
|
||||
```
|
||||
path: general.host
|
||||
default: "0.0.0.0"
|
||||
```
|
||||
|
||||
What IP to run on, 0.0.0.0 means accessible from everywhere.
|
||||
|
||||
### port
|
||||
```
|
||||
path: general.port
|
||||
default: 6432
|
||||
```
|
||||
|
||||
Port to run on, same as PgBouncer used in this example.
|
||||
|
||||
### enable_prometheus_exporter
|
||||
```
|
||||
path: general.enable_prometheus_exporter
|
||||
default: true
|
||||
```
|
||||
|
||||
Whether to enable prometheus exporter or not.
|
||||
|
||||
### prometheus_exporter_port
|
||||
```
|
||||
path: general.prometheus_exporter_port
|
||||
default: 9930
|
||||
```
|
||||
|
||||
Port at which prometheus exporter listens on.
|
||||
|
||||
### connect_timeout
|
||||
```
|
||||
path: general.connect_timeout
|
||||
default: 1000 # milliseconds
|
||||
```
|
||||
|
||||
How long the client waits to obtain a server connection before aborting (ms).
|
||||
This is similar to PgBouncer's `query_wait_timeout`.
|
||||
|
||||
### idle_timeout
|
||||
```
|
||||
path: general.idle_timeout
|
||||
default: 30000 # milliseconds
|
||||
```
|
||||
|
||||
How long an idle connection with a server is left open (ms).
|
||||
|
||||
### server_lifetime
|
||||
```
|
||||
path: general.server_lifetime
|
||||
default: 86400000 # 24 hours
|
||||
```
|
||||
|
||||
Max connection lifetime before it's closed, even if actively used.
|
||||
|
||||
### server_round_robin
|
||||
```
|
||||
path: general.server_round_robin
|
||||
default: false
|
||||
```
|
||||
|
||||
Whether to use round robin for server selection or not.
|
||||
|
||||
### server_tls
|
||||
```
|
||||
path: general.server_tls
|
||||
default: false
|
||||
```
|
||||
|
||||
Whether to use TLS for server connections or not.
|
||||
|
||||
### verify_server_certificate
|
||||
```
|
||||
path: general.verify_server_certificate
|
||||
default: false
|
||||
```
|
||||
|
||||
Whether to verify server certificate or not.
|
||||
|
||||
### verify_config
|
||||
```
|
||||
path: general.verify_config
|
||||
default: true
|
||||
```
|
||||
|
||||
Whether to verify config or not.
|
||||
|
||||
### idle_client_in_transaction_timeout
|
||||
```
|
||||
path: general.idle_client_in_transaction_timeout
|
||||
default: 0 # milliseconds
|
||||
```
|
||||
|
||||
How long a client is allowed to be idle while in a transaction (ms).
|
||||
|
||||
### healthcheck_timeout
|
||||
```
|
||||
path: general.healthcheck_timeout
|
||||
default: 1000 # milliseconds
|
||||
```
|
||||
|
||||
How much time to give the health check query to return with a result (ms).
|
||||
|
||||
### healthcheck_delay
|
||||
```
|
||||
path: general.healthcheck_delay
|
||||
default: 30000 # milliseconds
|
||||
```
|
||||
|
||||
How long to keep connection available for immediate re-use, without running a healthcheck query on it
|
||||
|
||||
### shutdown_timeout
|
||||
```
|
||||
path: general.shutdown_timeout
|
||||
default: 60000 # milliseconds
|
||||
```
|
||||
|
||||
How much time to give clients during shutdown before forcibly killing client connections (ms).
|
||||
|
||||
### ban_time
|
||||
```
|
||||
path: general.ban_time
|
||||
default: 60 # seconds
|
||||
```
|
||||
|
||||
How long to ban a server if it fails a health check (seconds).
|
||||
|
||||
### log_client_connections
|
||||
```
|
||||
path: general.log_client_connections
|
||||
default: false
|
||||
```
|
||||
|
||||
If we should log client connections
|
||||
|
||||
### log_client_disconnections
|
||||
```
|
||||
path: general.log_client_disconnections
|
||||
default: false
|
||||
```
|
||||
|
||||
If we should log client disconnections
|
||||
|
||||
### autoreload
|
||||
```
|
||||
path: general.autoreload
|
||||
default: 15000 # milliseconds
|
||||
```
|
||||
|
||||
When set, PgCat automatically reloads its configurations at the specified interval (in milliseconds) if it detects changes in the configuration file. The default interval is 15000 milliseconds or 15 seconds.
|
||||
|
||||
### worker_threads
|
||||
```
|
||||
path: general.worker_threads
|
||||
default: 5
|
||||
```
|
||||
|
||||
Number of worker threads the Runtime will use (4 by default).
|
||||
|
||||
### tcp_keepalives_idle
|
||||
```
|
||||
path: general.tcp_keepalives_idle
|
||||
default: 5
|
||||
```
|
||||
|
||||
Number of seconds of connection idleness to wait before sending a keepalive packet to the server.
|
||||
|
||||
### tcp_keepalives_count
|
||||
```
|
||||
path: general.tcp_keepalives_count
|
||||
default: 5
|
||||
```
|
||||
|
||||
Number of unacknowledged keepalive packets allowed before giving up and closing the connection.
|
||||
|
||||
### tcp_keepalives_interval
|
||||
```
|
||||
path: general.tcp_keepalives_interval
|
||||
default: 5
|
||||
```
|
||||
|
||||
### tcp_user_timeout
|
||||
```
|
||||
path: general.tcp_user_timeout
|
||||
default: 10000
|
||||
```
|
||||
A linux-only parameters that defines the amount of time in milliseconds that transmitted data may remain unacknowledged or buffered data may remain untransmitted (due to zero window size) before TCP will forcibly disconnect
|
||||
|
||||
|
||||
### tls_certificate
|
||||
```
|
||||
path: general.tls_certificate
|
||||
default: <UNSET>
|
||||
example: "server.cert"
|
||||
```
|
||||
|
||||
Path to TLS Certificate file to use for TLS connections
|
||||
|
||||
### tls_private_key
|
||||
```
|
||||
path: general.tls_private_key
|
||||
default: <UNSET>
|
||||
example: "server.key"
|
||||
```
|
||||
|
||||
Path to TLS private key file to use for TLS connections
|
||||
|
||||
### admin_username
|
||||
```
|
||||
path: general.admin_username
|
||||
default: "admin_user"
|
||||
```
|
||||
|
||||
User name to access the virtual administrative database (pgbouncer or pgcat)
|
||||
Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
||||
|
||||
### admin_password
|
||||
```
|
||||
path: general.admin_password
|
||||
default: "admin_pass"
|
||||
```
|
||||
|
||||
Password to access the virtual administrative database
|
||||
|
||||
### auth_query
|
||||
```
|
||||
path: general.auth_query
|
||||
default: <UNSET>
|
||||
example: "SELECT $1"
|
||||
```
|
||||
|
||||
Query to be sent to servers to obtain the hash used for md5 authentication. The connection will be
|
||||
established using the database configured in the pool. This parameter is inherited by every pool
|
||||
and can be redefined in pool configuration.
|
||||
|
||||
### auth_query_user
|
||||
```
|
||||
path: general.auth_query_user
|
||||
default: <UNSET>
|
||||
example: "sharding_user"
|
||||
```
|
||||
|
||||
User to be used for connecting to servers to obtain the hash used for md5 authentication by sending the query
|
||||
specified in `auth_query_user`. The connection will be established using the database configured in the pool.
|
||||
This parameter is inherited by every pool and can be redefined in pool configuration.
|
||||
|
||||
### auth_query_password
|
||||
```
|
||||
path: general.auth_query_password
|
||||
default: <UNSET>
|
||||
example: "sharding_user"
|
||||
```
|
||||
|
||||
Password to be used for connecting to servers to obtain the hash used for md5 authentication by sending the query
|
||||
specified in `auth_query_user`. The connection will be established using the database configured in the pool.
|
||||
This parameter is inherited by every pool and can be redefined in pool configuration.
|
||||
|
||||
### dns_cache_enabled
|
||||
```
|
||||
path: general.dns_cache_enabled
|
||||
default: false
|
||||
```
|
||||
When enabled, ip resolutions for server connections specified using hostnames will be cached
|
||||
and checked for changes every `dns_max_ttl` seconds. If a change in the host resolution is found
|
||||
old ip connections are closed (gracefully) and new connections will start using new ip.
|
||||
|
||||
### dns_max_ttl
|
||||
```
|
||||
path: general.dns_max_ttl
|
||||
default: 30
|
||||
```
|
||||
Specifies how often (in seconds) cached ip addresses for servers are rechecked (see `dns_cache_enabled`).
|
||||
|
||||
## `pools.<pool_name>` Section
|
||||
|
||||
### pool_mode
|
||||
```
|
||||
path: pools.<pool_name>.pool_mode
|
||||
default: "transaction"
|
||||
```
|
||||
|
||||
Pool mode (see PgBouncer docs for more).
|
||||
`session` one server connection per connected client
|
||||
`transaction` one server connection per client transaction
|
||||
|
||||
### load_balancing_mode
|
||||
```
|
||||
path: pools.<pool_name>.load_balancing_mode
|
||||
default: "random"
|
||||
```
|
||||
|
||||
Load balancing mode
|
||||
`random` selects the server at random
|
||||
`loc` selects the server with the least outstanding busy connections
|
||||
|
||||
### default_role
|
||||
```
|
||||
path: pools.<pool_name>.default_role
|
||||
default: "any"
|
||||
```
|
||||
|
||||
If the client doesn't specify, PgCat routes traffic to this role by default.
|
||||
`any` round-robin between primary and replicas,
|
||||
`replica` round-robin between replicas only without touching the primary,
|
||||
`primary` all queries go to the primary unless otherwise specified.
|
||||
|
||||
### prepared_statements_cache_size
|
||||
```
|
||||
path: general.prepared_statements_cache_size
|
||||
default: 0
|
||||
```
|
||||
|
||||
Size of the prepared statements cache. 0 means disabled.
|
||||
TODO: update documentation
|
||||
|
||||
### query_parser_enabled
|
||||
```
|
||||
path: pools.<pool_name>.query_parser_enabled
|
||||
default: true
|
||||
```
|
||||
|
||||
If Query Parser is enabled, we'll attempt to parse
|
||||
every incoming query to determine if it's a read or a write.
|
||||
If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||
we'll direct it to the primary.
|
||||
|
||||
### primary_reads_enabled
|
||||
```
|
||||
path: pools.<pool_name>.primary_reads_enabled
|
||||
default: true
|
||||
```
|
||||
|
||||
If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||
load balancing of read queries. Otherwise, the primary will only be used for write
|
||||
queries. The primary can always be explicitly selected with our custom protocol.
|
||||
|
||||
### sharding_key_regex
|
||||
```
|
||||
path: pools.<pool_name>.sharding_key_regex
|
||||
default: <UNSET>
|
||||
example: '/\* sharding_key: (\d+) \*/'
|
||||
```
|
||||
|
||||
Allow sharding commands to be passed as statement comments instead of
|
||||
separate commands. If these are unset this functionality is disabled.
|
||||
|
||||
### sharding_function
|
||||
```
|
||||
path: pools.<pool_name>.sharding_function
|
||||
default: "pg_bigint_hash"
|
||||
```
|
||||
|
||||
So what if you wanted to implement a different hashing function,
|
||||
or you've already built one and you want this pooler to use it?
|
||||
Current options:
|
||||
`pg_bigint_hash`: PARTITION BY HASH (Postgres hashing function)
|
||||
`sha1`: A hashing function based on SHA1
|
||||
|
||||
### auth_query
|
||||
```
|
||||
path: pools.<pool_name>.auth_query
|
||||
default: <UNSET>
|
||||
example: "SELECT $1"
|
||||
```
|
||||
|
||||
Query to be sent to servers to obtain the hash used for md5 authentication. The connection will be
|
||||
established using the database configured in the pool. This parameter is inherited by every pool
|
||||
and can be redefined in pool configuration.
|
||||
|
||||
### auth_query_user
|
||||
```
|
||||
path: pools.<pool_name>.auth_query_user
|
||||
default: <UNSET>
|
||||
example: "sharding_user"
|
||||
```
|
||||
|
||||
User to be used for connecting to servers to obtain the hash used for md5 authentication by sending the query
|
||||
specified in `auth_query_user`. The connection will be established using the database configured in the pool.
|
||||
This parameter is inherited by every pool and can be redefined in pool configuration.
|
||||
|
||||
### auth_query_password
|
||||
```
|
||||
path: pools.<pool_name>.auth_query_password
|
||||
default: <UNSET>
|
||||
example: "sharding_user"
|
||||
```
|
||||
|
||||
Password to be used for connecting to servers to obtain the hash used for md5 authentication by sending the query
|
||||
specified in `auth_query_user`. The connection will be established using the database configured in the pool.
|
||||
This parameter is inherited by every pool and can be redefined in pool configuration.
|
||||
|
||||
### automatic_sharding_key
|
||||
```
|
||||
path: pools.<pool_name>.automatic_sharding_key
|
||||
default: <UNSET>
|
||||
example: "data.id"
|
||||
```
|
||||
|
||||
Automatically parse this from queries and route queries to the right shard!
|
||||
|
||||
### idle_timeout
|
||||
```
|
||||
path: pools.<pool_name>.idle_timeout
|
||||
default: 40000
|
||||
```
|
||||
|
||||
Idle timeout can be overwritten in the pool
|
||||
|
||||
### connect_timeout
|
||||
```
|
||||
path: pools.<pool_name>.connect_timeout
|
||||
default: 3000
|
||||
```
|
||||
|
||||
Connect timeout can be overwritten in the pool
|
||||
|
||||
## `pools.<pool_name>.users.<user_index>` Section
|
||||
|
||||
### username
|
||||
```
|
||||
path: pools.<pool_name>.users.<user_index>.username
|
||||
default: "sharding_user"
|
||||
```
|
||||
|
||||
PostgreSQL username used to authenticate the user and connect to the server
|
||||
if `server_username` is not set.
|
||||
|
||||
### password
|
||||
```
|
||||
path: pools.<pool_name>.users.<user_index>.password
|
||||
default: "sharding_user"
|
||||
```
|
||||
|
||||
PostgreSQL password used to authenticate the user and connect to the server
|
||||
if `server_password` is not set.
|
||||
|
||||
### server_username
|
||||
```
|
||||
path: pools.<pool_name>.users.<user_index>.server_username
|
||||
default: <UNSET>
|
||||
example: "another_user"
|
||||
```
|
||||
|
||||
PostgreSQL username used to connect to the server.
|
||||
|
||||
### server_password
|
||||
```
|
||||
path: pools.<pool_name>.users.<user_index>.server_password
|
||||
default: <UNSET>
|
||||
example: "another_password"
|
||||
```
|
||||
|
||||
PostgreSQL password used to connect to the server.
|
||||
|
||||
### pool_size
|
||||
```
|
||||
path: pools.<pool_name>.users.<user_index>.pool_size
|
||||
default: 9
|
||||
```
|
||||
|
||||
Maximum number of server connections that can be established for this user.
|
||||
The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||
is the sum of pool_size across all users.
|
||||
|
||||
### min_pool_size
|
||||
```
|
||||
path: pools.<pool_name>.users.<user_index>.min_pool_size
|
||||
default: 0
|
||||
```
|
||||
|
||||
Minimum number of idle server connections to retain for this pool.
|
||||
|
||||
### statement_timeout
|
||||
```
|
||||
path: pools.<pool_name>.users.<user_index>.statement_timeout
|
||||
default: 0
|
||||
```
|
||||
|
||||
Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
|
||||
0 means it is disabled.
|
||||
|
||||
### connect_timeout
|
||||
```
|
||||
path: pools.<pool_name>.users.<user_index>.connect_timeout
|
||||
default: <UNSET> # milliseconds
|
||||
```
|
||||
|
||||
How long the client waits to obtain a server connection before aborting (ms).
|
||||
This is similar to PgBouncer's `query_wait_timeout`.
|
||||
If unset, uses the `connect_timeout` defined globally.
|
||||
|
||||
## `pools.<pool_name>.shards.<shard_index>` Section
|
||||
|
||||
### servers
|
||||
```
|
||||
path: pools.<pool_name>.shards.<shard_index>.servers
|
||||
default: [["127.0.0.1", 5432, "primary"], ["localhost", 5432, "replica"]]
|
||||
```
|
||||
|
||||
Array of servers in the shard, each server entry is an array of `[host, port, role]`
|
||||
|
||||
### mirrors
|
||||
```
|
||||
path: pools.<pool_name>.shards.<shard_index>.mirrors
|
||||
default: <UNSET>
|
||||
example: [["1.2.3.4", 5432, 0], ["1.2.3.4", 5432, 1]]
|
||||
```
|
||||
|
||||
Array of mirrors for the shard, each mirror entry is an array of `[host, port, index of server in servers array]`
|
||||
Traffic hitting the server identified by the index will be sent to the mirror.
|
||||
|
||||
### database
|
||||
```
|
||||
path: pools.<pool_name>.shards.<shard_index>.database
|
||||
default: "shard0"
|
||||
```
|
||||
|
||||
Database name (e.g. "postgres")
|
||||
@@ -1,39 +0,0 @@
|
||||
## Introduction
|
||||
|
||||
Thank you for contributing! Just a few tips here:
|
||||
|
||||
1. `cargo fmt` and `cargo clippy` your code before opening up a PR
|
||||
2. Run the test suite (e.g. `pgbench`) to make sure everything still works. The tests are in `.circleci/run_tests.sh`.
|
||||
3. Performance is important, make sure there are no regressions in your branch vs. `main`.
|
||||
|
||||
## How to run the integration tests locally and iterate on them
|
||||
We have integration tests written in Ruby, Python, Go and Rust.
|
||||
Below are the steps to run them in a developer-friendly way that allows iterating and quick turnaround.
|
||||
Hear me out, this should be easy, it will involve opening a shell into a container with all the necessary dependancies available for you and you can modify the test code and immediately rerun your test in the interactive shell.
|
||||
|
||||
|
||||
Quite simply, make sure you have docker installed and then run
|
||||
`./start_test_env.sh`
|
||||
|
||||
That is it!
|
||||
|
||||
Within this test environment you can modify the file in your favorite IDE and rerun the tests without having to bootstrap the entire environment again.
|
||||
|
||||
Once the environment is ready, you can run the tests by running
|
||||
Ruby: `cd /app/tests/ruby && bundle exec ruby <test_name>.rb --format documentation`
|
||||
Python: `cd /app/ && pytest`
|
||||
Rust: `cd /app/tests/rust && cargo run`
|
||||
Go: `cd /app/tests/go && /usr/local/go/bin/go test`
|
||||
|
||||
You can also rebuild PgCat directly within the environment and the tests will run against the newly built binary
|
||||
To rebuild PgCat, just run `cargo build` within the container under `/app`
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
Happy hacking!
|
||||
|
||||
## TODOs
|
||||
|
||||
See [Issues]([url](https://github.com/levkk/pgcat/issues)).
|
||||
2168
Cargo.lock
generated
2168
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
60
Cargo.toml
60
Cargo.toml
@@ -1,60 +0,0 @@
|
||||
[package]
|
||||
name = "pgcat"
|
||||
version = "1.2.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
[dependencies]
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
bytes = "1"
|
||||
md-5 = "0.10"
|
||||
bb8 = "=0.8.6"
|
||||
async-trait = "0.1"
|
||||
rand = "0.8"
|
||||
chrono = "0.4"
|
||||
sha-1 = "0.10"
|
||||
toml = "0.7"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_derive = "1"
|
||||
regex = "1"
|
||||
num_cpus = "1"
|
||||
once_cell = "1"
|
||||
sqlparser = { version = "0.41", features = ["visitor"] }
|
||||
log = "0.4"
|
||||
arc-swap = "1"
|
||||
parking_lot = "0.12.1"
|
||||
hmac = "0.12"
|
||||
sha2 = "0.10"
|
||||
base64 = "0.21"
|
||||
stringprep = "0.1"
|
||||
tokio-rustls = "0.24"
|
||||
rustls-pemfile = "1"
|
||||
http-body-util = "0.1.2"
|
||||
hyper = { version = "1.4.1", features = ["full"] }
|
||||
hyper-util = { version = "0.1.7", features = ["tokio"] }
|
||||
phf = { version = "0.11.1", features = ["macros"] }
|
||||
exitcode = "1.1.2"
|
||||
futures = "0.3"
|
||||
socket2 = { version = "0.4.7", features = ["all"] }
|
||||
nix = "0.26.2"
|
||||
atomic_enum = "0.2.0"
|
||||
postgres-protocol = "0.6.5"
|
||||
fallible-iterator = "0.2"
|
||||
pin-project = "1"
|
||||
webpki-roots = "0.23"
|
||||
rustls = { version = "0.21", features = ["dangerous_configuration"] }
|
||||
trust-dns-resolver = "0.22.0"
|
||||
tokio-test = "0.4.2"
|
||||
serde_json = "1"
|
||||
itertools = "0.10"
|
||||
clap = { version = "4.3.1", features = ["derive", "env"] }
|
||||
tracing = "0.1.37"
|
||||
tracing-subscriber = { version = "0.3.17", features = [
|
||||
"json",
|
||||
"env-filter",
|
||||
"std",
|
||||
] }
|
||||
lru = "0.12.0"
|
||||
|
||||
[target.'cfg(not(target_env = "msvc"))'.dependencies]
|
||||
jemallocator = "0.5.0"
|
||||
22
Dockerfile
22
Dockerfile
@@ -1,22 +0,0 @@
|
||||
FROM rust:1.79.0-slim-bookworm AS builder
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y build-essential
|
||||
|
||||
COPY . /app
|
||||
WORKDIR /app
|
||||
RUN cargo build --release
|
||||
|
||||
FROM debian:bookworm-slim
|
||||
RUN apt-get update && apt-get install -o Dpkg::Options::=--force-confdef -yq --no-install-recommends \
|
||||
postgresql-client \
|
||||
# Clean up layer
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \
|
||||
&& truncate -s 0 /var/log/*log
|
||||
COPY --from=builder /app/target/release/pgcat /usr/bin/pgcat
|
||||
COPY --from=builder /app/pgcat.toml /etc/pgcat/pgcat.toml
|
||||
WORKDIR /etc/pgcat
|
||||
ENV RUST_LOG=info
|
||||
CMD ["pgcat"]
|
||||
STOPSIGNAL SIGINT
|
||||
@@ -1,17 +0,0 @@
|
||||
FROM cimg/rust:1.79.0
|
||||
COPY --from=sclevine/yj /bin/yj /bin/yj
|
||||
RUN /bin/yj -h
|
||||
RUN sudo apt-get update && \
|
||||
sudo apt-get install -y \
|
||||
psmisc postgresql-contrib-14 postgresql-client-14 libpq-dev \
|
||||
ruby ruby-dev python3 python3-pip \
|
||||
lcov llvm-11 iproute2 && \
|
||||
sudo apt-get upgrade curl && \
|
||||
cargo install cargo-binutils rustfilt && \
|
||||
rustup component add llvm-tools-preview && \
|
||||
pip3 install psycopg2 && sudo gem install bundler && \
|
||||
wget -O /tmp/toxiproxy-2.4.0.deb https://github.com/Shopify/toxiproxy/releases/download/v2.4.0/toxiproxy_2.4.0_linux_$(dpkg --print-architecture).deb && \
|
||||
sudo dpkg -i /tmp/toxiproxy-2.4.0.deb
|
||||
RUN wget -O /tmp/go1.21.3.linux-$(dpkg --print-architecture).tar.gz https://go.dev/dl/go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \
|
||||
sudo tar -C /usr/local -xzf /tmp/go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \
|
||||
rm /tmp/go1.21.3.linux-$(dpkg --print-architecture).tar.gz
|
||||
@@ -1,25 +0,0 @@
|
||||
FROM lukemathwalker/cargo-chef:latest-rust-1 AS chef
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y build-essential
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
FROM chef AS planner
|
||||
COPY . .
|
||||
RUN cargo chef prepare --recipe-path recipe.json
|
||||
|
||||
FROM chef AS builder
|
||||
COPY --from=planner /app/recipe.json recipe.json
|
||||
# Build dependencies - this is the caching Docker layer!
|
||||
RUN cargo chef cook --release --recipe-path recipe.json
|
||||
# Build application
|
||||
COPY . .
|
||||
RUN cargo build
|
||||
|
||||
FROM debian:bookworm-slim
|
||||
COPY --from=builder /app/target/release/pgcat /usr/bin/pgcat
|
||||
COPY --from=builder /app/pgcat.toml /etc/pgcat/pgcat.toml
|
||||
WORKDIR /etc/pgcat
|
||||
ENV RUST_LOG=info
|
||||
CMD ["pgcat"]
|
||||
20
LICENSE
20
LICENSE
@@ -1,20 +0,0 @@
|
||||
Copyright (c) 2023 PgCat Contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
293
README.md
293
README.md
@@ -1,292 +1 @@
|
||||
## PgCat: Nextgen PostgreSQL Pooler
|
||||
|
||||
[](https://circleci.com/gh/postgresml/pgcat/tree/main)
|
||||
<a href="https://discord.gg/DmyJP3qJ7U" target="_blank">
|
||||
<img src="https://img.shields.io/discord/1013868243036930099" alt="Join our Discord!" />
|
||||
</a>
|
||||
|
||||
PostgreSQL pooler and proxy (like PgBouncer) with support for sharding, load balancing, failover and mirroring.
|
||||
|
||||
## Features
|
||||
|
||||
| **Feature** | **Status** | **Comments** |
|
||||
|-------------|------------|--------------|
|
||||
| Transaction pooling | **Stable** | Identical to PgBouncer with notable improvements for handling bad clients and abandoned transactions. |
|
||||
| Session pooling | **Stable** | Identical to PgBouncer. |
|
||||
| Multi-threaded runtime | **Stable** | Using Tokio asynchronous runtime, the pooler takes advantage of multicore machines. |
|
||||
| Load balancing of read queries | **Stable** | Queries are automatically load balanced between replicas and the primary. |
|
||||
| Failover | **Stable** | Queries are automatically rerouted around broken replicas, validated by regular health checks. |
|
||||
| Admin database statistics | **Stable** | Pooler statistics and administration via the `pgbouncer` and `pgcat` databases. |
|
||||
| Prometheus statistics | **Stable** | Statistics are reported via a HTTP endpoint for Prometheus. |
|
||||
| SSL/TLS | **Stable** | Clients can connect to the pooler using TLS. Pooler can connect to Postgres servers using TLS. |
|
||||
| Client/Server authentication | **Stable** | Clients can connect using MD5 authentication, supported by `libpq` and all Postgres client drivers. PgCat can connect to Postgres using MD5 and SCRAM-SHA-256. |
|
||||
| Live configuration reloading | **Stable** | Identical to PgBouncer; all settings can be reloaded dynamically (except `host` and `port`). |
|
||||
| Auth passthrough | **Stable** | MD5 password authentication can be configured to use an `auth_query` so no cleartext passwords are needed in the config file.|
|
||||
| Sharding using extended SQL syntax | **Experimental** | Clients can dynamically configure the pooler to route queries to specific shards. |
|
||||
| Sharding using comments parsing/Regex | **Experimental** | Clients can include shard information (sharding key, shard ID) in the query comments. |
|
||||
| Automatic sharding | **Experimental** | PgCat can parse queries, detect sharding keys automatically, and route queries to the correct shard. |
|
||||
| Mirroring | **Experimental** | Mirror queries between multiple databases in order to test servers with realistic production traffic. |
|
||||
|
||||
|
||||
## Status
|
||||
|
||||
PgCat is stable and used in production to serve hundreds of thousands of queries per second.
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td>
|
||||
<a href="https://tech.instacart.com/adopting-pgcat-a-nextgen-postgres-proxy-3cf284e68c2f">
|
||||
<img src="./images/instacart.webp" height="70" width="auto">
|
||||
</a>
|
||||
</td>
|
||||
<td>
|
||||
<a href="https://postgresml.org/blog/scaling-postgresml-to-1-million-requests-per-second">
|
||||
<img src="./images/postgresml.webp" height="70" width="auto">
|
||||
</a>
|
||||
</td>
|
||||
<td>
|
||||
<a href="https://onesignal.com">
|
||||
<img src="./images/one_signal.webp" height="70" width="auto">
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<a href="https://tech.instacart.com/adopting-pgcat-a-nextgen-postgres-proxy-3cf284e68c2f">
|
||||
Instacart
|
||||
</a>
|
||||
</td>
|
||||
<td>
|
||||
<a href="https://postgresml.org/blog/scaling-postgresml-to-1-million-requests-per-second">
|
||||
PostgresML
|
||||
</a>
|
||||
</td>
|
||||
<td>
|
||||
OneSignal
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
Some features remain experimental and are being actively developed. They are optional and can be enabled through configuration.
|
||||
|
||||
## Deployment
|
||||
|
||||
See `Dockerfile` for example deployment using Docker. The pooler is configured to spawn 4 workers so 4 CPUs are recommended for optimal performance. That setting can be adjusted to spawn as many (or as little) workers as needed.
|
||||
|
||||
A Docker image is available from `docker pull ghcr.io/postgresml/pgcat:latest`. See our [Github packages repository](https://github.com/postgresml/pgcat/pkgs/container/pgcat).
|
||||
|
||||
For quick local example, use the Docker Compose environment provided:
|
||||
|
||||
```bash
|
||||
docker-compose up
|
||||
|
||||
# In a new terminal:
|
||||
PGPASSWORD=postgres psql -h 127.0.0.1 -p 6432 -U postgres -c 'SELECT 1'
|
||||
```
|
||||
|
||||
### Config
|
||||
|
||||
See **[Configuration](https://github.com/levkk/pgcat/blob/main/CONFIG.md)**.
|
||||
|
||||
## Contributing
|
||||
|
||||
The project is being actively developed and looking for additional contributors and production deployments.
|
||||
|
||||
### Local development
|
||||
|
||||
1. Install Rust (latest stable will work great).
|
||||
2. `cargo build --release` (to get better benchmarks).
|
||||
3. Change the config in `pgcat.toml` to fit your setup (optional given next step).
|
||||
4. Install Postgres and run `psql -f tests/sharding/query_routing_setup.sql` (user/password may be required depending on your setup)
|
||||
5. `RUST_LOG=info cargo run --release` You're ready to go!
|
||||
|
||||
### Tests
|
||||
|
||||
When making substantial modifications to the protocol implementation, make sure to test them with pgbench:
|
||||
|
||||
```
|
||||
pgbench -i -h 127.0.0.1 -p 6432 && \
|
||||
pgbench -t 1000 -p 6432 -h 127.0.0.1 --protocol simple && \
|
||||
pgbench -t 1000 -p 6432 -h 127.0.0.1 --protocol extended
|
||||
```
|
||||
|
||||
See [sharding README](./tests/sharding/README.md) for sharding logic testing.
|
||||
|
||||
Additionally, all features are tested with Ruby, Python, and Rust unit and integration tests.
|
||||
|
||||
Run `cargo test` to run Rust unit tests.
|
||||
|
||||
Run the following commands to run Ruby and Python integration tests:
|
||||
|
||||
```
|
||||
cd tests/docker/
|
||||
docker compose up --exit-code-from main # This will also produce coverage report under ./cov/
|
||||
```
|
||||
|
||||
### Docker-based local development
|
||||
|
||||
You can open a Docker development environment where you can debug tests easier. Run the following command to spin it up:
|
||||
|
||||
```
|
||||
./dev/script/console
|
||||
```
|
||||
|
||||
This will open a terminal in an environment similar to that used in tests. In there, you can compile the pooler, run tests, do some debugging with the test environment, etc. Objects compiled inside the container (and bundled gems) will be placed in `dev/cache` so they don't interfere with what you have on your machine.
|
||||
|
||||
## Usage
|
||||
|
||||
### Session mode
|
||||
In session mode, a client talks to one server for the duration of the connection. Prepared statements, `SET`, and advisory locks are supported. In terms of supported features, there is very little if any difference between session mode and talking directly to the server.
|
||||
|
||||
To use session mode, change `pool_mode = "session"`.
|
||||
|
||||
### Transaction mode
|
||||
In transaction mode, a client talks to one server for the duration of a single transaction; once it's over, the server is returned to the pool. Prepared statements, `SET`, and advisory locks are not supported; alternatives are to use `SET LOCAL` and `pg_advisory_xact_lock` which are scoped to the transaction.
|
||||
|
||||
This mode is enabled by default.
|
||||
|
||||
### Load balancing of read queries
|
||||
All queries are load balanced against the configured servers using either the random or least open connections algorithms. The most straightforward configuration example would be to put this pooler in front of several replicas and let it load balance all queries.
|
||||
|
||||
If the configuration includes a primary and replicas, the queries can be separated with the built-in query parser. The query parser, implemented with the `sqlparser` crate, will interpret the query and route all `SELECT` queries to a replica, while all other queries including explicit transactions will be routed to the primary.
|
||||
|
||||
#### Query parser
|
||||
The query parser will do its best to determine where the query should go, but sometimes that's not possible. In that case, the client can select which server it wants using this custom SQL syntax:
|
||||
|
||||
```sql
|
||||
-- To talk to the primary for the duration of the next transaction:
|
||||
SET SERVER ROLE TO 'primary';
|
||||
|
||||
-- To talk to the replica for the duration of the next transaction:
|
||||
SET SERVER ROLE TO 'replica';
|
||||
|
||||
-- Let the query parser decide
|
||||
SET SERVER ROLE TO 'auto';
|
||||
|
||||
-- Pick any server at random
|
||||
SET SERVER ROLE TO 'any';
|
||||
|
||||
-- Reset to default configured settings
|
||||
SET SERVER ROLE TO 'default';
|
||||
```
|
||||
|
||||
The setting will persist until it's changed again or the client disconnects.
|
||||
|
||||
By default, all queries are routed to the first available server; `default_role` setting controls this behavior.
|
||||
|
||||
### Failover
|
||||
All servers are checked with a `;` (very fast) query before being given to a client. Additionally, the server health is monitored with every client query that it processes. If the server is not reachable, it will be banned and cannot serve any more transactions for the duration of the ban. The queries are routed to the remaining servers. If all servers become banned, the ban list is cleared: this is a safety precaution against false positives. The primary can never be banned.
|
||||
|
||||
The ban time can be changed with `ban_time`. The default is 60 seconds.
|
||||
|
||||
### Sharding
|
||||
We use the `PARTITION BY HASH` hashing function, the same as used by Postgres for declarative partitioning. This allows to shard the database using Postgres partitions and place the partitions on different servers (shards). Both read and write queries can be routed to the shards using this pooler.
|
||||
|
||||
#### Extended syntax
|
||||
To route queries to a particular shard, we use this custom SQL syntax:
|
||||
|
||||
```sql
|
||||
-- To talk to a shard explicitly
|
||||
SET SHARD TO '1';
|
||||
|
||||
-- To let the pooler choose based on a value
|
||||
SET SHARDING KEY TO '1234';
|
||||
```
|
||||
|
||||
The active shard will last until it's changed again or the client disconnects. By default, the queries are routed to shard 0.
|
||||
|
||||
For hash function implementation, see `src/sharding.rs` and `tests/sharding/partition_hash_test_setup.sql`.
|
||||
|
||||
|
||||
##### ActiveRecord/Rails
|
||||
|
||||
```ruby
|
||||
class User < ActiveRecord::Base
|
||||
end
|
||||
|
||||
# Metadata will be fetched from shard 0
|
||||
ActiveRecord::Base.establish_connection
|
||||
|
||||
# Grab a bunch of users from shard 1
|
||||
User.connection.execute "SET SHARD TO '1'"
|
||||
User.take(10)
|
||||
|
||||
# Using id as the sharding key
|
||||
User.connection.execute "SET SHARDING KEY TO '1234'"
|
||||
User.find_by_id(1234)
|
||||
|
||||
# Using geographical sharding
|
||||
User.connection.execute "SET SERVER ROLE TO 'primary'"
|
||||
User.connection.execute "SET SHARDING KEY TO '85'"
|
||||
User.create(name: "test user", email: "test@example.com", zone_id: 85)
|
||||
|
||||
# Let the query parser figure out where the query should go.
|
||||
# We are still on shard = hash(85) % shards.
|
||||
User.connection.execute "SET SERVER ROLE TO 'auto'"
|
||||
User.find_by_email("test@example.com")
|
||||
```
|
||||
|
||||
##### Raw SQL
|
||||
|
||||
```sql
|
||||
-- Grab a bunch of users from shard 1
|
||||
SET SHARD TO '1';
|
||||
SELECT * FROM users LIMT 10;
|
||||
|
||||
-- Find by id
|
||||
SET SHARDING KEY TO '1234';
|
||||
SELECT * FROM USERS WHERE id = 1234;
|
||||
|
||||
-- Writing in a primary/replicas configuration.
|
||||
SET SHARDING ROLE TO 'primary';
|
||||
SET SHARDING KEY TO '85';
|
||||
INSERT INTO users (name, email, zome_id) VALUES ('test user', 'test@example.com', 85);
|
||||
|
||||
SET SERVER ROLE TO 'auto'; -- let the query router figure out where the query should go
|
||||
SELECT * FROM users WHERE email = 'test@example.com'; -- shard setting lasts until set again; we are reading from the primary
|
||||
```
|
||||
|
||||
#### With comments
|
||||
Issuing queries to the pooler can cause additional latency. To reduce its impact, it's possible to include sharding information inside SQL comments sent via the query. This is reasonably easy to implement with ORMs like [ActiveRecord](https://api.rubyonrails.org/classes/ActiveRecord/QueryMethods.html#method-i-annotate) and [SQLAlchemy](https://docs.sqlalchemy.org/en/20/core/events.html#sql-execution-and-connection-events).
|
||||
|
||||
```
|
||||
/* shard_id: 5 */ SELECT * FROM foo WHERE id = 1234;
|
||||
|
||||
/* sharding_key: 1234 */ SELECT * FROM foo WHERE id = 1234;
|
||||
```
|
||||
|
||||
#### Automatic query parsing
|
||||
PgCat can use the `sqlparser` crate to parse SQL queries and extract the sharding key. This is configurable with the `automatic_sharding_key` setting. This feature is still experimental, but it's the ideal implementation for sharding, requiring no client modifications.
|
||||
|
||||
### Statistics reporting
|
||||
|
||||
The stats are very similar to what PgBouncer reports and the names are kept to be comparable. They are accessible by querying the admin database `pgcat`, and `pgbouncer` for compatibility.
|
||||
|
||||
```
|
||||
psql -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW DATABASES'
|
||||
```
|
||||
|
||||
Additionally, Prometheus statistics are available at `/metrics` via HTTP.
|
||||
|
||||
We also have a [basic Grafana dashboard](https://github.com/postgresml/pgcat/blob/main/grafana_dashboard.json) based on Prometheus metrics that you can import into Grafana and build on it or use it for monitoring.
|
||||
|
||||
### Live configuration reloading
|
||||
|
||||
The config can be reloaded by sending a `kill -s SIGHUP` to the process or by querying `RELOAD` to the admin database. All settings except the `host` and `port` can be reloaded without restarting the pooler, including sharding and replicas configurations.
|
||||
|
||||
### Mirroring
|
||||
|
||||
Mirroring allows to route queries to multiple databases at the same time. This is useful for prewarning replicas before placing them into the active configuration, or for testing different versions of Postgres with live traffic.
|
||||
|
||||
## License
|
||||
|
||||
PgCat is free and open source, released under the MIT license.
|
||||
|
||||
## Contributors
|
||||
|
||||
Many thanks to our amazing contributors!
|
||||
|
||||
<a href = "https://github.com/postgresml/pgcat/graphs/contributors">
|
||||
<img src = "https://contrib.rocks/image?repo=postgresml/pgcat"/>
|
||||
</a>
|
||||
|
||||
## PgCat: Nextgen PostgreSQL Pooler Helm Charts
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
@@ -1,8 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: pgcat
|
||||
description: A Helm chart for PgCat a PostgreSQL pooler and proxy (like PgBouncer) with support for sharding, load balancing, failover and mirroring.
|
||||
maintainers:
|
||||
- name: Wildcard
|
||||
email: support@w6d.io
|
||||
appVersion: "1.2.0"
|
||||
version: 0.2.1
|
||||
@@ -1,22 +0,0 @@
|
||||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "pgcat.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "pgcat.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "pgcat.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "pgcat.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
|
||||
{{- end }}
|
||||
@@ -1,3 +0,0 @@
|
||||
{{/*
|
||||
Configuration template definition
|
||||
*/}}
|
||||
@@ -1,62 +0,0 @@
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "pgcat.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "pgcat.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "pgcat.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "pgcat.labels" -}}
|
||||
helm.sh/chart: {{ include "pgcat.chart" . }}
|
||||
{{ include "pgcat.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "pgcat.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "pgcat.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "pgcat.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "pgcat.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,66 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "pgcat.fullname" . }}
|
||||
labels:
|
||||
{{- include "pgcat.labels" . | nindent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "pgcat.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
|
||||
{{- with .Values.podAnnotations }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "pgcat.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.image.pullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "pgcat.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.containerSecurityContext | nindent 12 }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
ports:
|
||||
- name: pgcat
|
||||
containerPort: {{ .Values.configuration.general.port }}
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
tcpSocket:
|
||||
port: pgcat
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: pgcat
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
volumeMounts:
|
||||
- mountPath: /etc/pgcat
|
||||
name: config
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
volumes:
|
||||
- secret:
|
||||
defaultMode: 420
|
||||
secretName: {{ include "pgcat.fullname" . }}
|
||||
name: config
|
||||
@@ -1,61 +0,0 @@
|
||||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "pgcat.fullname" . -}}
|
||||
{{- $svcPort := .Values.service.port -}}
|
||||
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
|
||||
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
|
||||
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
{{- end }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
labels:
|
||||
{{- include "pgcat.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
|
||||
ingressClassName: {{ .Values.ingress.className }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ .path }}
|
||||
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
|
||||
pathType: {{ .pathType }}
|
||||
{{- end }}
|
||||
backend:
|
||||
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
|
||||
service:
|
||||
name: {{ $fullName }}
|
||||
port:
|
||||
number: {{ $svcPort }}
|
||||
{{- else }}
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $svcPort }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,97 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "pgcat.fullname" . }}
|
||||
labels:
|
||||
{{- include "pgcat.labels" . | nindent 4 }}
|
||||
type: Opaque
|
||||
stringData:
|
||||
pgcat.toml: |
|
||||
[general]
|
||||
host = {{ .Values.configuration.general.host | quote }}
|
||||
port = {{ .Values.configuration.general.port }}
|
||||
enable_prometheus_exporter = {{ .Values.configuration.general.enable_prometheus_exporter }}
|
||||
prometheus_exporter_port = {{ .Values.configuration.general.prometheus_exporter_port }}
|
||||
connect_timeout = {{ .Values.configuration.general.connect_timeout }}
|
||||
idle_timeout = {{ .Values.configuration.general.idle_timeout | int }}
|
||||
server_lifetime = {{ .Values.configuration.general.server_lifetime | int }}
|
||||
server_tls = {{ .Values.configuration.general.server_tls }}
|
||||
idle_client_in_transaction_timeout = {{ .Values.configuration.general.idle_client_in_transaction_timeout | int }}
|
||||
healthcheck_timeout = {{ .Values.configuration.general.healthcheck_timeout }}
|
||||
healthcheck_delay = {{ .Values.configuration.general.healthcheck_delay }}
|
||||
shutdown_timeout = {{ .Values.configuration.general.shutdown_timeout }}
|
||||
ban_time = {{ .Values.configuration.general.ban_time }}
|
||||
log_client_connections = {{ .Values.configuration.general.log_client_connections }}
|
||||
log_client_disconnections = {{ .Values.configuration.general.log_client_disconnections }}
|
||||
tcp_keepalives_idle = {{ .Values.configuration.general.tcp_keepalives_idle }}
|
||||
tcp_keepalives_count = {{ .Values.configuration.general.tcp_keepalives_count }}
|
||||
tcp_keepalives_interval = {{ .Values.configuration.general.tcp_keepalives_interval }}
|
||||
{{- if and (ne .Values.configuration.general.tls_certificate "-") (ne .Values.configuration.general.tls_private_key "-") }}
|
||||
tls_certificate = "{{ .Values.configuration.general.tls_certificate }}"
|
||||
tls_private_key = "{{ .Values.configuration.general.tls_private_key }}"
|
||||
{{- end }}
|
||||
admin_username = {{ .Values.configuration.general.admin_username | quote }}
|
||||
admin_password = {{ .Values.configuration.general.admin_password | quote }}
|
||||
{{- if and .Values.configuration.general.auth_query_user .Values.configuration.general.auth_query_password .Values.configuration.general.auth_query }}
|
||||
auth_query = {{ .Values.configuration.general.auth_query | quote }}
|
||||
auth_query_user = {{ .Values.configuration.general.auth_query_user | quote }}
|
||||
auth_query_password = {{ .Values.configuration.general.auth_query_password | quote }}
|
||||
{{- end }}
|
||||
|
||||
{{- range $pool := .Values.configuration.pools }}
|
||||
|
||||
##
|
||||
## pool for {{ $pool.name }}
|
||||
##
|
||||
[pools.{{ $pool.name | quote }}]
|
||||
pool_mode = {{ default "transaction" $pool.pool_mode | quote }}
|
||||
load_balancing_mode = {{ default "random" $pool.load_balancing_mode | quote }}
|
||||
default_role = {{ default "any" $pool.default_role | quote }}
|
||||
prepared_statements_cache_size = {{ default 500 $pool.prepared_statements_cache_size }}
|
||||
query_parser_enabled = {{ default true $pool.query_parser_enabled }}
|
||||
query_parser_read_write_splitting = {{ default true $pool.query_parser_read_write_splitting }}
|
||||
primary_reads_enabled = {{ default true $pool.primary_reads_enabled }}
|
||||
sharding_function = {{ default "pg_bigint_hash" $pool.sharding_function | quote }}
|
||||
|
||||
{{- range $index, $user := $pool.users }}
|
||||
|
||||
## pool {{ $pool.name }} user {{ $user.username | quote }}
|
||||
##
|
||||
[pools.{{ $pool.name | quote }}.users.{{ $index }}]
|
||||
username = {{ $user.username | quote }}
|
||||
{{- if $user.password }}
|
||||
password = {{ $user.password | quote }}
|
||||
{{- else if and $user.passwordSecret.name $user.passwordSecret.key }}
|
||||
{{- $secret := (lookup "v1" "Secret" $.Release.Namespace $user.passwordSecret.name) }}
|
||||
{{- if $secret }}
|
||||
{{- $password := index $secret.data $user.passwordSecret.key | b64dec }}
|
||||
password = {{ $password | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
pool_size = {{ $user.pool_size }}
|
||||
statement_timeout = {{ default 0 $user.statement_timeout }}
|
||||
min_pool_size = {{ default 3 $user.min_pool_size }}
|
||||
{{- if $user.server_lifetime }}
|
||||
server_lifetime = {{ $user.server_lifetime }}
|
||||
{{- end }}
|
||||
{{- if and $user.server_username $user.server_password }}
|
||||
server_username = {{ $user.server_username | quote }}
|
||||
server_password = {{ $user.server_password | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{- range $index, $shard := $pool.shards }}
|
||||
|
||||
## pool {{ $pool.name }} database {{ $shard.database }}
|
||||
##
|
||||
[pools.{{ $pool.name | quote }}.shards.{{ $index }}]
|
||||
{{- if gt (len $shard.servers) 0}}
|
||||
servers = [
|
||||
{{- range $server := $shard.servers }}
|
||||
[ {{ $server.host | quote }}, {{ $server.port }}, {{ $server.role | quote }} ],
|
||||
{{- end }}
|
||||
]
|
||||
{{- end }}
|
||||
database = {{ $shard.database | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,15 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "pgcat.fullname" . }}
|
||||
labels:
|
||||
{{- include "pgcat.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: pgcat
|
||||
protocol: TCP
|
||||
name: pgcat
|
||||
selector:
|
||||
{{- include "pgcat.selectorLabels" . | nindent 4 }}
|
||||
@@ -1,12 +0,0 @@
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "pgcat.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "pgcat.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -1,374 +0,0 @@
|
||||
## String to partially override aspnet-core.fullname template (will maintain the release name)
|
||||
## @param nameOverride String to partially override common.names.fullname
|
||||
##
|
||||
nameOverride: ""
|
||||
|
||||
## String to fully override aspnet-core.fullname template
|
||||
## @param fullnameOverride String to fully override common.names.fullname
|
||||
##
|
||||
fullnameOverride: ""
|
||||
|
||||
## Number of PgCat replicas to deploy
|
||||
## @param replicaCount Number of PgCat replicas to deploy
|
||||
replicaCount: 1
|
||||
|
||||
## Bitnami PgCat image version
|
||||
## ref: https://hub.docker.com/r/bitnami/kubewatch/tags/
|
||||
##
|
||||
## @param image.registry PgCat image registry
|
||||
## @param image.repository PgCat image name
|
||||
## @param image.tag PgCat image tag
|
||||
## @param image.pullPolicy PgCat image tag
|
||||
## @param image.pullSecrets Specify docker-registry secret names as an array
|
||||
image:
|
||||
repository: ghcr.io/postgresml/pgcat
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: "main"
|
||||
## Specify a imagePullPolicy
|
||||
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
|
||||
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
|
||||
##
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
## Secrets must be manually created in the namespace.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
## Example:
|
||||
## pullSecrets:
|
||||
## - myRegistryKeySecretName
|
||||
##
|
||||
pullSecrets: []
|
||||
|
||||
## Specifies whether a ServiceAccount should be created
|
||||
##
|
||||
## @param serviceAccount.create Enable the creation of a ServiceAccount for PgCat pods
|
||||
## @param serviceAccount.name Name of the created ServiceAccount
|
||||
##
|
||||
serviceAccount:
|
||||
## Specifies whether a service account should be created
|
||||
create: true
|
||||
## Annotations to add to the service account
|
||||
annotations: {}
|
||||
## The name of the service account to use.
|
||||
## If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
## Annotations for server pods.
|
||||
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||
##
|
||||
## @param podAnnotations Annotations for PgCat pods
|
||||
##
|
||||
podAnnotations: {}
|
||||
|
||||
## PgCat containers' SecurityContext
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
|
||||
##
|
||||
## @param podSecurityContext.enabled Enabled PgCat pods' Security Context
|
||||
## @param podSecurityContext.fsGroup Set PgCat pod's Security Context fsGroup
|
||||
##
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
## PgCat pods' Security Context
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
||||
##
|
||||
## @param containerSecurityContext.enabled Enabled PgCat containers' Security Context
|
||||
## @param containerSecurityContext.runAsUser Set PgCat container's Security Context runAsUser
|
||||
## @param containerSecurityContext.runAsNonRoot Set PgCat container's Security Context runAsNonRoot
|
||||
##
|
||||
containerSecurityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
## PgCat service
|
||||
##
|
||||
## @param service.type PgCat service type
|
||||
## @param service.port PgCat service port
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 6432
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: chart-example.local
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
## PgCat resource requests and limits
|
||||
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
|
||||
##
|
||||
## @skip resources Optional description
|
||||
## @disabled-param resources.limits The resources limits for the PgCat container
|
||||
## @disabled-param resources.requests The requested resources for the PgCat container
|
||||
##
|
||||
resources:
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
limits: {}
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
requests: {}
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
## Node labels for pod assignment. Evaluated as a template.
|
||||
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||
##
|
||||
## @param nodeSelector Node labels for pod assignment
|
||||
##
|
||||
nodeSelector: {}
|
||||
|
||||
## Tolerations for pod assignment. Evaluated as a template.
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||
##
|
||||
## @param tolerations Tolerations for pod assignment
|
||||
##
|
||||
tolerations: []
|
||||
|
||||
## Affinity for pod assignment. Evaluated as a template.
|
||||
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
|
||||
##
|
||||
## @param affinity Affinity for pod assignment
|
||||
##
|
||||
affinity: {}
|
||||
|
||||
## PgCat configuration
|
||||
## @param configuration [object]
|
||||
configuration:
|
||||
## General pooler settings
|
||||
## @param [object]
|
||||
general:
|
||||
## @param configuration.general.host What IP to run on, 0.0.0.0 means accessible from everywhere.
|
||||
host: "0.0.0.0"
|
||||
|
||||
## @param configuration.general.port Port to run on, same as PgBouncer used in this example.
|
||||
port: 6432
|
||||
|
||||
## @param configuration.general.enable_prometheus_exporter Whether to enable prometheus exporter or not.
|
||||
enable_prometheus_exporter: false
|
||||
|
||||
## @param configuration.general.prometheus_exporter_port Port at which prometheus exporter listens on.
|
||||
prometheus_exporter_port: 9930
|
||||
|
||||
# @param configuration.general.connect_timeout How long to wait before aborting a server connection (ms).
|
||||
connect_timeout: 5000
|
||||
|
||||
# How long an idle connection with a server is left open (ms).
|
||||
idle_timeout: 30000 # milliseconds
|
||||
|
||||
# Max connection lifetime before it's closed, even if actively used.
|
||||
server_lifetime: 86400000 # 24 hours
|
||||
|
||||
# Whether to use TLS for server connections or not.
|
||||
server_tls: false
|
||||
|
||||
# How long a client is allowed to be idle while in a transaction (ms).
|
||||
idle_client_in_transaction_timeout: 0 # milliseconds
|
||||
|
||||
# @param configuration.general.healthcheck_timeout How much time to give `SELECT 1` health check query to return with a result (ms).
|
||||
healthcheck_timeout: 1000
|
||||
|
||||
# @param configuration.general.healthcheck_delay How long to keep connection available for immediate re-use, without running a healthcheck query on it
|
||||
healthcheck_delay: 30000
|
||||
|
||||
# @param configuration.general.shutdown_timeout How much time to give clients during shutdown before forcibly killing client connections (ms).
|
||||
shutdown_timeout: 60000
|
||||
|
||||
# @param configuration.general.ban_time For how long to ban a server if it fails a health check (seconds).
|
||||
ban_time: 60 # seconds
|
||||
|
||||
# @param configuration.general.log_client_connections If we should log client connections
|
||||
log_client_connections: false
|
||||
|
||||
# @param configuration.general.log_client_disconnections If we should log client disconnections
|
||||
log_client_disconnections: false
|
||||
|
||||
# TLS
|
||||
# tls_certificate: "server.cert"
|
||||
# tls_private_key: "server.key"
|
||||
tls_certificate: "-"
|
||||
tls_private_key: "-"
|
||||
|
||||
# Credentials to access the virtual administrative database (pgbouncer or pgcat)
|
||||
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
||||
admin_username: "postgres"
|
||||
admin_password: "postgres"
|
||||
|
||||
# Query to be sent to servers to obtain the hash used for md5 authentication. The connection will be
|
||||
# established using the database configured in the pool. This parameter is inherited by every pool and
|
||||
# can be redefined in pool configuration.
|
||||
auth_query: null
|
||||
|
||||
# User to be used for connecting to servers to obtain the hash used for md5 authentication by sending
|
||||
# the query specified in auth_query_user. The connection will be established using the database configured
|
||||
# in the pool. This parameter is inherited by every pool and can be redefined in pool configuration.
|
||||
#
|
||||
# @param configuration.general.auth_query_user
|
||||
auth_query_user: null
|
||||
|
||||
# Password to be used for connecting to servers to obtain the hash used for md5 authentication by sending
|
||||
# the query specified in auth_query_user. The connection will be established using the database configured
|
||||
# in the pool. This parameter is inherited by every pool and can be redefined in pool configuration.
|
||||
#
|
||||
# @param configuration.general.auth_query_password
|
||||
auth_query_password: null
|
||||
|
||||
# Number of seconds of connection idleness to wait before sending a keepalive packet to the server.
|
||||
tcp_keepalives_idle: 5
|
||||
|
||||
# Number of unacknowledged keepalive packets allowed before giving up and closing the connection.
|
||||
tcp_keepalives_count: 5
|
||||
|
||||
# Number of seconds between keepalive packets.
|
||||
tcp_keepalives_interval: 5
|
||||
|
||||
## pool
|
||||
## configs are structured as pool.<pool_name>
|
||||
## the pool_name is what clients use as database name when connecting
|
||||
## For the example below a client can connect using "postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded"
|
||||
## @param [object]
|
||||
pools:
|
||||
[{
|
||||
name: "simple", pool_mode: "transaction",
|
||||
users: [{username: "user", password: "pass", pool_size: 5, statement_timeout: 0}],
|
||||
shards: [{
|
||||
servers: [{host: "postgres", port: 5432, role: "primary"}],
|
||||
database: "postgres"
|
||||
}]
|
||||
}]
|
||||
# - ## default values
|
||||
# ##
|
||||
# ##
|
||||
# ##
|
||||
# name: "db"
|
||||
|
||||
# ## Pool mode (see PgBouncer docs for more).
|
||||
# ## session: one server connection per connected client
|
||||
# ## transaction: one server connection per client transaction
|
||||
# ## @param configuration.poolsPostgres.pool_mode
|
||||
# pool_mode: "transaction"
|
||||
|
||||
# ## Load balancing mode
|
||||
# ## `random` selects the server at random
|
||||
# ## `loc` selects the server with the least outstanding busy connections
|
||||
# ##
|
||||
# ## @param configuration.poolsPostgres.load_balancing_mode
|
||||
# load_balancing_mode: "random"
|
||||
|
||||
# ## Prepared statements cache size.
|
||||
# ## TODO: update documentation
|
||||
# ##
|
||||
# ## @param configuration.poolsPostgres.prepared_statements_cache_size
|
||||
# prepared_statements_cache_size: 500
|
||||
|
||||
# ## If the client doesn't specify, route traffic to
|
||||
# ## this role by default.
|
||||
# ##
|
||||
# ## any: round-robin between primary and replicas,
|
||||
# ## replica: round-robin between replicas only without touching the primary,
|
||||
# ## primary: all queries go to the primary unless otherwise specified.
|
||||
# ## @param configuration.poolsPostgres.default_role
|
||||
# default_role: "any"
|
||||
|
||||
# ## Query parser. If enabled, we'll attempt to parse
|
||||
# ## every incoming query to determine if it's a read or a write.
|
||||
# ## If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||
# ## we'll direct it to the primary.
|
||||
# ## @param configuration.poolsPostgres.query_parser_enabled
|
||||
# query_parser_enabled: true
|
||||
|
||||
# ## If the query parser is enabled and this setting is enabled, we'll attempt to
|
||||
# ## infer the role from the query itself.
|
||||
# ## @param configuration.poolsPostgres.query_parser_read_write_splitting
|
||||
# query_parser_read_write_splitting: true
|
||||
|
||||
# ## If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||
# ## load balancing of read queries. Otherwise, the primary will only be used for write
|
||||
# ## queries. The primary can always be explicitly selected with our custom protocol.
|
||||
# ## @param configuration.poolsPostgres.primary_reads_enabled
|
||||
# primary_reads_enabled: true
|
||||
|
||||
# ## So what if you wanted to implement a different hashing function,
|
||||
# ## or you've already built one and you want this pooler to use it?
|
||||
# ##
|
||||
# ## Current options:
|
||||
# ##
|
||||
# ## pg_bigint_hash: PARTITION BY HASH (Postgres hashing function)
|
||||
# ## sha1: A hashing function based on SHA1
|
||||
# ##
|
||||
# ## @param configuration.poolsPostgres.sharding_function
|
||||
# sharding_function: "pg_bigint_hash"
|
||||
|
||||
# ## Credentials for users that may connect to this cluster
|
||||
# ## @param users [array]
|
||||
# ## @param users[0].username Name of the env var (required)
|
||||
# ## @param users[0].password Value for the env var (required) leave empty to use existing secret see passwordSecret.name and passwordSecret.key
|
||||
# ## @param users[0].passwordSecret.name Name of the secret containing the password
|
||||
# ## @param users[0].passwordSecret.key Key in the secret containing the password
|
||||
# ## @param users[0].pool_size Maximum number of server connections that can be established for this user
|
||||
# ## @param users[0].statement_timeout Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
|
||||
# users: []
|
||||
# # - username: "user"
|
||||
# # password: "pass"
|
||||
# #
|
||||
# # # The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||
# # # is the sum of pool_size across all users.
|
||||
# # pool_size: 9
|
||||
# #
|
||||
# # # Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
|
||||
# # statement_timeout: 0
|
||||
# #
|
||||
# # # PostgreSQL username used to connect to the server.
|
||||
# # server_username: "postgres
|
||||
# #
|
||||
# # # PostgreSQL password used to connect to the server.
|
||||
# # server_password: "postgres
|
||||
|
||||
# ## @param shards [array]
|
||||
# ## @param shards[0].server[0].host Host for this shard
|
||||
# ## @param shards[0].server[0].port Port for this shard
|
||||
# ## @param shards[0].server[0].role Role for this shard
|
||||
# shards: []
|
||||
# # [ host, port, role ]
|
||||
# # - servers:
|
||||
# # - host: "postgres"
|
||||
# # port: 5432
|
||||
# # role: "primary"
|
||||
# # - host: "postgres"
|
||||
# # port: 5432
|
||||
# # role: "replica"
|
||||
# # database: "postgres"
|
||||
# # # [ host, port, role ]
|
||||
# # - servers:
|
||||
# # - host: "postgres"
|
||||
# # port: 5432
|
||||
# # role: "primary"
|
||||
# # - host: "postgres"
|
||||
# # port: 5432
|
||||
# # role: "replica"
|
||||
# # database: "postgres"
|
||||
# # # [ host, port, role ]
|
||||
# # - servers:
|
||||
# # - host: "postgres"
|
||||
# # port: 5432
|
||||
# # role: "primary"
|
||||
# # - host: "postgres"
|
||||
# # port: 5432
|
||||
# # role: "replica"
|
||||
# # database: "postgres"
|
||||
9
control
9
control
@@ -1,9 +0,0 @@
|
||||
Package: pgcat
|
||||
Version: ${PACKAGE_VERSION}
|
||||
Section: database
|
||||
Priority: optional
|
||||
Architecture: ${ARCH}
|
||||
Maintainer: PostgresML <team@postgresml.org>
|
||||
Homepage: https://postgresml.org
|
||||
Description: PgCat - NextGen PostgreSQL Pooler
|
||||
PostgreSQL pooler and proxy (like PgBouncer) with support for sharding, load balancing, failover and mirroring.
|
||||
158
cov-style.css
158
cov-style.css
@@ -1,158 +0,0 @@
|
||||
/*
|
||||
* Copyright 2021 Collabora, Ltd.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining
|
||||
* a copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sublicense, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial
|
||||
* portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
body {
|
||||
background-color: #f2f2f2;
|
||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto,
|
||||
"Noto Sans", Ubuntu, Cantarell, "Helvetica Neue", sans-serif,
|
||||
"Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol",
|
||||
"Noto Color Emoji";
|
||||
}
|
||||
|
||||
.sourceHeading, .source, .coverFn,
|
||||
.testName, .testPer, .testNum,
|
||||
.coverLegendCovLo, .headerCovTableEntryLo, .coverPerLo, .coverNumLo,
|
||||
.coverLegendCovMed, .headerCovTableEntryMed, .coverPerMed, .coverNumMed,
|
||||
.coverLegendCovHi, .headerCovTableEntryHi, .coverPerHi, .coverNumHi,
|
||||
.coverFile {
|
||||
font-family: "Menlo", "DejaVu Sans Mono", "Liberation Mono",
|
||||
"Consolas", "Ubuntu Mono", "Courier New", "andale mono",
|
||||
"lucida console", monospace;
|
||||
}
|
||||
|
||||
pre {
|
||||
font-size: 0.7875rem;
|
||||
}
|
||||
|
||||
.headerCovTableEntry, .testPer, .testNum, .testName,
|
||||
.coverLegendCovLo, .headerCovTableEntryLo, .coverPerLo, .coverNumLo,
|
||||
.coverLegendCovMed, .headerCovTableEntryMed, .coverPerMed, .coverNumMed,
|
||||
.coverLegendCovHi, .headerCovTableEntryHi, .coverPerHi, .coverNumHi {
|
||||
text-align: right;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.coverPerLo, .coverPerMed, .coverPerHi, .testPer {
|
||||
/* font-weight: bold;*/
|
||||
}
|
||||
|
||||
.coverNumLo, .coverNumMed, .coverNumHi, .testNum {
|
||||
font-style: italic;
|
||||
font-size: 90%;
|
||||
padding-left: 1em;
|
||||
}
|
||||
|
||||
.title {
|
||||
font-size: 200%;
|
||||
}
|
||||
|
||||
.tableHead {
|
||||
text-align: center;
|
||||
font-weight: bold;
|
||||
background-color: #bfbfbf;
|
||||
}
|
||||
|
||||
.coverFile, .coverBar, .coverFn {
|
||||
background-color: #d9d9d9;
|
||||
}
|
||||
|
||||
.headerCovTableHead {
|
||||
font-weight: bold;
|
||||
text-align: right;
|
||||
}
|
||||
|
||||
.headerCovTableEntry {
|
||||
background-color: #d9d9d9;
|
||||
}
|
||||
|
||||
.coverFnLo,
|
||||
.coverLegendCovLo, .headerCovTableEntryLo, .coverPerLo, .coverNumLo {
|
||||
background-color: #f2dada;
|
||||
}
|
||||
|
||||
.coverFnHi,
|
||||
.coverLegendCovMed, .headerCovTableEntryMed, .coverPerMed, .coverNumMed {
|
||||
background-color: #add9ad;
|
||||
}
|
||||
|
||||
.coverLegendCovHi, .headerCovTableEntryHi, .coverPerHi, .coverNumHi {
|
||||
background-color: #59b359;
|
||||
}
|
||||
|
||||
.coverBarOutline {
|
||||
border-style: solid;
|
||||
border-width: 1px;
|
||||
border-color: black;
|
||||
padding: 0px;
|
||||
}
|
||||
|
||||
.coverFnLo, .coverFnHi {
|
||||
text-align: right;
|
||||
}
|
||||
|
||||
.lineNum {
|
||||
background-color: #d9d9d9;
|
||||
}
|
||||
|
||||
.coverLegendCov, .lineCov, .branchCov {
|
||||
background-image: url('data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAABCAIAAABsYngUAAADAXpUWHRSYXcgcHJvZmlsZSB0eXBlIGV4aWYAAHjazZVbktwgDEX/WUWWgCSExHIwj6rsIMvPxcY9PY9MzVTyEVMNtCwkoYNwGL9+zvADDxHHkNQ8l5wjnlRS4YqJx+upZ08xnf313O/otTw8FBgzwShbP2/5gJyhz1vetp0KuT4ZKmO/OF6/qNsQ+3ZwO9yOhC4HcRsOdRsS3p7T9f+4thVzcXveQtv6sz5t1dfW0CUxzprJEvrE0SwXzJ1jMuStr0CPvhfqdvTmf7hVGTHxEJKI3leEsn4kFWNCT/CGfUnBXDEuyd4yaHGIhnm58/r581nk4Q59Y32N+p69Qc3xPelwJvRWkTeE8mP8UE76Ig/PSE9uT55z3jN+LZ/pJaibXLjxzdl9znHtrqaMLee9qXuL5wx6x8rWuSqjGX4afSV7tYLmKImGc9RxyA60RoUYGCcl6lRp0jjHRg0hJh4MjszcALcFCB0wCjcgJYBGo8kGzF0cB6DhOAik/IiFTrfldNfI4biTB5wegjHCkr9q4StKc66CIlq55CtXiItXwhHFIkeE6ocaiNDcSdUzwXd7+yyuAoJ6ptmxwRqPZQH4D6WXwyUnaIGiYrwKmKxvA0gRIlAEQwICMZMoZYrGHIwIiXQAqgidJfEBLKTKHUFyEsmAgyqAb6wxOlVZ+RLjIgQIlRzEwAaFCFgpKc6PJccZqiqaVDWrqWvRmiWvCsvZ8rpRq4klU8tm5lasBhdPrp7d3L14LVwEN64W1GPxUkqtcFphuWJ1hUKtBx9ypEOPfNjhRzlq49CkpaYtN2veSqudu3TUcc/duvfS66CBozTS0JGHDR9l1ImjNmWmqTNPmz5LmPVBbWN9175BjTY1PkktRXtQg9TsNkHrOtHFDMQ4EYDbIkASmBez6JQSL3KLWSyMqlBGkLrgdFrEQDANYp30YPdCToPkf8MtAAT/C3JhofsCuffcPqLW6/mhk5PQKsOV1CiovpHgnx3LcCvhwlnz9dF8P4Y/vfju+J8aQpZK+A373P3XzDqcKwAAAAZiS0dEAAAAAAAA+UO7fwAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAd0SU1FB+UEEQYyDQA04tUAAAAZdEVYdENvbW1lbnQAQ3JlYXRlZCB3aXRoIEdJTVBXgQ4XAAAADklEQVQI12PULVBlwAYAEagAxGHRDdwAAAAASUVORK5CYII=');
|
||||
background-repeat: repeat-y;
|
||||
background-position: left top;
|
||||
background-color: #c6ffb8;
|
||||
}
|
||||
|
||||
.coverLegendNoCov, .lineNoCov, .branchNoCov, .branchNoExec {
|
||||
background-image: url('data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAABCAIAAABsYngUAAAACXBIWXMAAA7EAAAOxAGVKw4bAAAAB3RJTUUH5QMUCiMidNgp2gAAABl0RVh0Q29tbWVudABDcmVhdGVkIHdpdGggR0lNUFeBDhcAAAAPSURBVAjXY/wZIcWADQAAIa4BbZaExr0AAAAASUVORK5CYII=');
|
||||
background-repeat: repeat-y;
|
||||
background-position: left top;
|
||||
background-color: #ffcfbb;
|
||||
}
|
||||
|
||||
.coverLegendCov, .coverLegendNoCov {
|
||||
padding: 0em 1em 0em 1em;
|
||||
}
|
||||
|
||||
.headerItem, .headerValue, .headerValueLeg {
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.headerItem {
|
||||
text-align: right;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.ruler {
|
||||
background-color: #d9d9d9;
|
||||
}
|
||||
|
||||
.detail {
|
||||
font-size: 80%;
|
||||
}
|
||||
|
||||
.versionInfo {
|
||||
font-size: 80%;
|
||||
text-align: right;
|
||||
}
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
FROM rust:bullseye
|
||||
|
||||
# Dependencies
|
||||
COPY --from=sclevine/yj /bin/yj /bin/yj
|
||||
RUN /bin/yj -h
|
||||
RUN apt-get update -y \
|
||||
&& apt-get install -y \
|
||||
llvm-11 psmisc postgresql-contrib postgresql-client \
|
||||
ruby ruby-dev libpq-dev python3 python3-pip lcov curl sudo iproute2 \
|
||||
strace ngrep iproute2 dnsutils lsof net-tools telnet
|
||||
|
||||
# Rust
|
||||
RUN cargo install cargo-binutils rustfilt
|
||||
RUN rustup component add llvm-tools-preview
|
||||
|
||||
# Ruby
|
||||
RUN sudo gem install bundler
|
||||
|
||||
# Toxyproxy
|
||||
RUN wget -O toxiproxy-2.4.0.deb https://github.com/Shopify/toxiproxy/releases/download/v2.4.0/toxiproxy_2.4.0_linux_$(dpkg --print-architecture).deb && \
|
||||
sudo dpkg -i toxiproxy-2.4.0.deb
|
||||
|
||||
# Config
|
||||
ENV APP_ROOT=/app
|
||||
ARG APP_USER=pgcat
|
||||
COPY dev_bashrc /etc/bash.bashrc
|
||||
|
||||
RUN useradd -m -o -u 999 ${APP_USER} || exit 0 && mkdir ${APP_ROOT} && chown ${APP_USER} ${APP_ROOT}
|
||||
RUN adduser ${APP_USER} sudo \
|
||||
&& echo "${APP_USER} ALL=NOPASSWD: ALL" > /etc/sudoers.d/${APP_USER} \
|
||||
&& chmod ugo+s /usr/sbin/usermod /usr/sbin/groupmod
|
||||
ENV HOME=${APP_ROOT}
|
||||
WORKDIR ${APP_ROOT}
|
||||
|
||||
ENTRYPOINT ["/bin/bash"]
|
||||
120
dev/dev_bashrc
120
dev/dev_bashrc
@@ -1,120 +0,0 @@
|
||||
# ~/.bashrc: executed by bash(1) for non-login shells.
|
||||
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
|
||||
# for examples
|
||||
|
||||
# FIX USER NEEDED SO WE CAN SHARE UID BETWEEN HOST AND DEV ENV
|
||||
usermod -o -u $(id -u) pgcat
|
||||
groupmod -o -g $(id -g) pgcat
|
||||
|
||||
# We fix the setuid in those commands as we now have sudo
|
||||
sudo chmod ugo-s /usr/sbin/usermod /usr/sbin/groupmod
|
||||
|
||||
# Environment customization
|
||||
export DEV_ROOT="${APP_ROOT}/dev"
|
||||
export HISTFILE="${DEV_ROOT}/.bash_history"
|
||||
export CARGO_TARGET_DIR="${DEV_ROOT}/cache/target"
|
||||
export CARGO_HOME="${DEV_ROOT}/cache/target/.cargo"
|
||||
export BUNDLE_PATH="${DEV_ROOT}/cache/bundle"
|
||||
|
||||
# Regular bashrc
|
||||
# If not running interactively, don't do anything
|
||||
case $- in
|
||||
*i*) ;;
|
||||
*) return;;
|
||||
esac
|
||||
|
||||
# don't put duplicate lines or lines starting with space in the history.
|
||||
# See bash(1) for more options
|
||||
HISTCONTROL=ignoreboth
|
||||
|
||||
# append to the history file, don't overwrite it
|
||||
shopt -s histappend
|
||||
|
||||
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
|
||||
HISTSIZE=1000
|
||||
HISTFILESIZE=2000
|
||||
|
||||
# check the window size after each command and, if necessary,
|
||||
# update the values of LINES and COLUMNS.
|
||||
shopt -s checkwinsize
|
||||
|
||||
# If set, the pattern "**" used in a pathname expansion context will
|
||||
# match all files and zero or more directories and subdirectories.
|
||||
#shopt -s globstar
|
||||
|
||||
# make less more friendly for non-text input files, see lesspipe(1)
|
||||
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
|
||||
|
||||
# set variable identifying the chroot you work in (used in the prompt below)
|
||||
if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then
|
||||
debian_chroot=$(cat /etc/debian_chroot)
|
||||
fi
|
||||
|
||||
# set a fancy prompt (non-color, unless we know we "want" color)
|
||||
case "$TERM" in
|
||||
xterm-color|*-256color) color_prompt=yes;;
|
||||
esac
|
||||
|
||||
# uncomment for a colored prompt, if the terminal has the capability; turned
|
||||
# off by default to not distract the user: the focus in a terminal window
|
||||
# should be on the output of commands, not on the prompt
|
||||
#force_color_prompt=yes
|
||||
|
||||
if [ -n "$force_color_prompt" ]; then
|
||||
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
|
||||
# We have color support; assume it's compliant with Ecma-48
|
||||
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
|
||||
# a case would tend to support setf rather than setaf.)
|
||||
color_prompt=yes
|
||||
else
|
||||
color_prompt=
|
||||
fi
|
||||
fi
|
||||
|
||||
PS1='\[\e]0;pgcat@dev-container\h: \w\a\]${debian_chroot:+($debian_chroot)}\[\033[01;32m\]pgcat\[\033[00m\]@\[\033[01;32m\]dev-container\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\[\033[01;31m\]$(git branch &>/dev/null; if [ $? -eq 0 ]; then echo " ($(git branch | grep ^* |sed s/\*\ //))"; fi)\[\033[00m\]\$ '
|
||||
|
||||
unset color_prompt force_color_prompt
|
||||
|
||||
# enable color support of ls and also add handy aliases
|
||||
if [ -x /usr/bin/dircolors ]; then
|
||||
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
|
||||
alias ls='ls --color=auto'
|
||||
#alias dir='dir --color=auto'
|
||||
#alias vdir='vdir --color=auto'
|
||||
|
||||
alias grep='grep --color=auto'
|
||||
alias fgrep='fgrep --color=auto'
|
||||
alias egrep='egrep --color=auto'
|
||||
fi
|
||||
|
||||
# colored GCC warnings and errors
|
||||
#export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
|
||||
|
||||
# some more ls aliases
|
||||
alias ll='ls -alF'
|
||||
alias la='ls -A'
|
||||
alias l='ls -CF'
|
||||
|
||||
# Add an "alert" alias for long running commands. Use like so:
|
||||
# sleep 10; alert
|
||||
alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
|
||||
|
||||
# Alias definitions.
|
||||
# You may want to put all your additions into a separate file like
|
||||
# ~/.bash_aliases, instead of adding them here directly.
|
||||
# See /usr/share/doc/bash-doc/examples in the bash-doc package.
|
||||
|
||||
if [ -f ~/.bash_aliases ]; then
|
||||
. ~/.bash_aliases
|
||||
fi
|
||||
|
||||
# enable programmable completion features (you don't need to enable
|
||||
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
|
||||
# sources /etc/bash.bashrc).
|
||||
if ! shopt -oq posix; then
|
||||
if [ -f /usr/share/bash-completion/bash_completion ]; then
|
||||
. /usr/share/bash-completion/bash_completion
|
||||
elif [ -f /etc/bash_completion ]; then
|
||||
. /etc/bash_completion
|
||||
fi
|
||||
fi
|
||||
@@ -1,94 +0,0 @@
|
||||
version: "3"
|
||||
|
||||
x-common-definition-pg:
|
||||
&common-definition-pg
|
||||
image: postgres:14
|
||||
network_mode: "service:main"
|
||||
healthcheck:
|
||||
test: [ "CMD-SHELL", "pg_isready -U postgres -d postgres" ]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ../tests/sharding/query_routing_setup.sql
|
||||
target: /docker-entrypoint-initdb.d/query_routing_setup.sql
|
||||
- type: bind
|
||||
source: ../tests/sharding/partition_hash_test_setup.sql
|
||||
target: /docker-entrypoint-initdb.d/partition_hash_test_setup.sql
|
||||
|
||||
x-common-env-pg:
|
||||
&common-env-pg
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_DB: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
|
||||
services:
|
||||
main:
|
||||
image: gcr.io/google_containers/pause:3.2
|
||||
ports:
|
||||
- 6432
|
||||
|
||||
pg1:
|
||||
<<: *common-definition-pg
|
||||
environment:
|
||||
<<: *common-env-pg
|
||||
POSTGRES_INITDB_ARGS: --auth-local=md5 --auth-host=md5 --auth=md5
|
||||
PGPORT: 5432
|
||||
command: ["postgres", "-p", "5432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||
|
||||
pg2:
|
||||
<<: *common-definition-pg
|
||||
environment:
|
||||
<<: *common-env-pg
|
||||
POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256
|
||||
PGPORT: 7432
|
||||
command: ["postgres", "-p", "7432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||
pg3:
|
||||
<<: *common-definition-pg
|
||||
environment:
|
||||
<<: *common-env-pg
|
||||
POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256
|
||||
PGPORT: 8432
|
||||
command: ["postgres", "-p", "8432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||
pg4:
|
||||
<<: *common-definition-pg
|
||||
environment:
|
||||
<<: *common-env-pg
|
||||
POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256
|
||||
PGPORT: 9432
|
||||
command: ["postgres", "-p", "9432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||
pg5:
|
||||
<<: *common-definition-pg
|
||||
environment:
|
||||
<<: *common-env-pg
|
||||
POSTGRES_INITDB_ARGS: --auth-local=md5 --auth-host=md5 --auth=md5
|
||||
PGPORT: 10432
|
||||
command: ["postgres", "-p", "10432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||
|
||||
toxiproxy:
|
||||
build: .
|
||||
network_mode: "service:main"
|
||||
container_name: toxiproxy
|
||||
environment:
|
||||
LOG_LEVEL: info
|
||||
entrypoint: toxiproxy-server
|
||||
depends_on:
|
||||
- pg1
|
||||
- pg2
|
||||
- pg3
|
||||
- pg4
|
||||
- pg5
|
||||
|
||||
pgcat-shell:
|
||||
stdin_open: true
|
||||
user: "${HOST_UID}:${HOST_GID}"
|
||||
build: .
|
||||
network_mode: "service:main"
|
||||
depends_on:
|
||||
- toxiproxy
|
||||
volumes:
|
||||
- ../:/app/
|
||||
entrypoint:
|
||||
- /bin/bash
|
||||
- -i
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
export HOST_UID="$(id -u)"
|
||||
export HOST_GID="$(id -g)"
|
||||
|
||||
if [[ "${1}" == "down" ]]; then
|
||||
docker-compose -f "${DIR}/../docker-compose.yaml" down
|
||||
exit 0
|
||||
else
|
||||
docker-compose -f "${DIR}/../docker-compose.yaml" run --rm pgcat-shell
|
||||
fi
|
||||
@@ -1,17 +0,0 @@
|
||||
version: "3"
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:14
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_HOST_AUTH_METHOD: md5
|
||||
pgcat:
|
||||
build: .
|
||||
command:
|
||||
- "pgcat"
|
||||
- "/etc/pgcat/pgcat.toml"
|
||||
volumes:
|
||||
- "${PWD}/examples/docker/pgcat.toml:/etc/pgcat/pgcat.toml"
|
||||
ports:
|
||||
- "6432:6432"
|
||||
- "9930:9930"
|
||||
@@ -1,127 +0,0 @@
|
||||
#
|
||||
# PgCat config example.
|
||||
#
|
||||
|
||||
#
|
||||
# General pooler settings
|
||||
[general]
|
||||
# What IP to run on, 0.0.0.0 means accessible from everywhere.
|
||||
host = "0.0.0.0"
|
||||
|
||||
# Port to run on, same as PgBouncer used in this example.
|
||||
port = 6432
|
||||
|
||||
# Whether to enable prometheus exporter or not.
|
||||
enable_prometheus_exporter = true
|
||||
|
||||
# Port at which prometheus exporter listens on.
|
||||
prometheus_exporter_port = 9930
|
||||
|
||||
# How long to wait before aborting a server connection (ms).
|
||||
connect_timeout = 5000
|
||||
|
||||
# How much time to give `SELECT 1` health check query to return with a result (ms).
|
||||
healthcheck_timeout = 1000
|
||||
|
||||
# How long to keep connection available for immediate re-use, without running a healthcheck query on it
|
||||
healthcheck_delay = 30000
|
||||
|
||||
# How much time to give clients during shutdown before forcibly killing client connections (ms).
|
||||
shutdown_timeout = 60000
|
||||
|
||||
# For how long to ban a server if it fails a health check (seconds).
|
||||
ban_time = 60 # seconds
|
||||
|
||||
# If we should log client connections
|
||||
log_client_connections = false
|
||||
|
||||
# If we should log client disconnections
|
||||
log_client_disconnections = false
|
||||
|
||||
# TLS
|
||||
# tls_certificate = "server.cert"
|
||||
# tls_private_key = "server.key"
|
||||
|
||||
# Credentials to access the virtual administrative database (pgbouncer or pgcat)
|
||||
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
||||
admin_username = "postgres"
|
||||
admin_password = "postgres"
|
||||
|
||||
# pool
|
||||
# configs are structured as pool.<pool_name>
|
||||
# the pool_name is what clients use as database name when connecting
|
||||
# For the example below a client can connect using "postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded"
|
||||
[pools.postgres]
|
||||
# Pool mode (see PgBouncer docs for more).
|
||||
# session: one server connection per connected client
|
||||
# transaction: one server connection per client transaction
|
||||
pool_mode = "transaction"
|
||||
|
||||
# If the client doesn't specify, route traffic to
|
||||
# this role by default.
|
||||
#
|
||||
# any: round-robin between primary and replicas,
|
||||
# replica: round-robin between replicas only without touching the primary,
|
||||
# primary: all queries go to the primary unless otherwise specified.
|
||||
default_role = "any"
|
||||
|
||||
# Query parser. If enabled, we'll attempt to parse
|
||||
# every incoming query to determine if it's a read or a write.
|
||||
# If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||
# we'll direct it to the primary.
|
||||
query_parser_enabled = true
|
||||
|
||||
# If the query parser is enabled and this setting is enabled, we'll attempt to
|
||||
# infer the role from the query itself.
|
||||
query_parser_read_write_splitting = true
|
||||
|
||||
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||
# load balancing of read queries. Otherwise, the primary will only be used for write
|
||||
# queries. The primary can always be explicitly selected with our custom protocol.
|
||||
primary_reads_enabled = true
|
||||
|
||||
# So what if you wanted to implement a different hashing function,
|
||||
# or you've already built one and you want this pooler to use it?
|
||||
#
|
||||
# Current options:
|
||||
#
|
||||
# pg_bigint_hash: PARTITION BY HASH (Postgres hashing function)
|
||||
# sha1: A hashing function based on SHA1
|
||||
#
|
||||
sharding_function = "pg_bigint_hash"
|
||||
|
||||
# Credentials for users that may connect to this cluster
|
||||
[pools.postgres.users.0]
|
||||
username = "postgres"
|
||||
password = "postgres"
|
||||
# Maximum number of server connections that can be established for this user
|
||||
# The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||
# is the sum of pool_size across all users.
|
||||
pool_size = 9
|
||||
|
||||
# Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
|
||||
statement_timeout = 0
|
||||
|
||||
# Shard 0
|
||||
[pools.postgres.shards.0]
|
||||
# [ host, port, role ]
|
||||
servers = [
|
||||
[ "postgres", 5432, "primary" ],
|
||||
[ "postgres", 5432, "replica" ]
|
||||
]
|
||||
# Database name (e.g. "postgres")
|
||||
database = "postgres"
|
||||
|
||||
[pools.postgres.shards.1]
|
||||
servers = [
|
||||
[ "postgres", 5432, "primary" ],
|
||||
[ "postgres", 5432, "replica" ],
|
||||
]
|
||||
database = "postgres"
|
||||
|
||||
[pools.postgres.shards.2]
|
||||
servers = [
|
||||
[ "postgres", 5432, "primary" ],
|
||||
[ "postgres", 5432, "replica" ],
|
||||
]
|
||||
database = "postgres"
|
||||
File diff suppressed because it is too large
Load Diff
Binary file not shown.
|
Before Width: | Height: | Size: 3.4 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 16 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 4.7 KiB |
@@ -1,22 +0,0 @@
|
||||
# This is an example of the most basic config
|
||||
# that will mimic what PgBouncer does in transaction mode with one server.
|
||||
|
||||
[general]
|
||||
|
||||
host = "0.0.0.0"
|
||||
port = 6433
|
||||
admin_username = "pgcat"
|
||||
admin_password = "pgcat"
|
||||
|
||||
[pools.pgml.users.0]
|
||||
username = "postgres"
|
||||
password = "postgres"
|
||||
pool_size = 10
|
||||
min_pool_size = 1
|
||||
pool_mode = "transaction"
|
||||
|
||||
[pools.pgml.shards.0]
|
||||
servers = [
|
||||
["127.0.0.1", 28815, "primary"]
|
||||
]
|
||||
database = "postgres"
|
||||
@@ -1,17 +0,0 @@
|
||||
[Unit]
|
||||
Description=PgCat pooler
|
||||
After=network.target
|
||||
StartLimitIntervalSec=0
|
||||
|
||||
[Service]
|
||||
User=pgcat
|
||||
Type=simple
|
||||
Restart=always
|
||||
RestartSec=1
|
||||
Environment=RUST_LOG=info
|
||||
LimitNOFILE=65536
|
||||
ExecStart=/usr/bin/pgcat /etc/pgcat.toml
|
||||
ExecReload=/bin/kill -SIGHUP $MAINPID
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
350
pgcat.toml
350
pgcat.toml
@@ -1,350 +0,0 @@
|
||||
#
|
||||
# PgCat config example.
|
||||
#
|
||||
|
||||
#
|
||||
# General pooler settings
|
||||
[general]
|
||||
# What IP to run on, 0.0.0.0 means accessible from everywhere.
|
||||
host = "0.0.0.0"
|
||||
|
||||
# Port to run on, same as PgBouncer used in this example.
|
||||
port = 6432
|
||||
|
||||
# Whether to enable prometheus exporter or not.
|
||||
enable_prometheus_exporter = true
|
||||
|
||||
# Port at which prometheus exporter listens on.
|
||||
prometheus_exporter_port = 9930
|
||||
|
||||
# How long to wait before aborting a server connection (ms).
|
||||
connect_timeout = 5000 # milliseconds
|
||||
|
||||
# How long an idle connection with a server is left open (ms).
|
||||
idle_timeout = 30000 # milliseconds
|
||||
|
||||
# Max connection lifetime before it's closed, even if actively used.
|
||||
server_lifetime = 86400000 # 24 hours
|
||||
|
||||
# How long a client is allowed to be idle while in a transaction (ms).
|
||||
idle_client_in_transaction_timeout = 0 # milliseconds
|
||||
|
||||
# How much time to give the health check query to return with a result (ms).
|
||||
healthcheck_timeout = 1000 # milliseconds
|
||||
|
||||
# How long to keep connection available for immediate re-use, without running a healthcheck query on it
|
||||
healthcheck_delay = 30000 # milliseconds
|
||||
|
||||
# How much time to give clients during shutdown before forcibly killing client connections (ms).
|
||||
shutdown_timeout = 60000 # milliseconds
|
||||
|
||||
# How long to ban a server if it fails a health check (seconds).
|
||||
ban_time = 60 # seconds
|
||||
|
||||
# If we should log client connections
|
||||
log_client_connections = false
|
||||
|
||||
# If we should log client disconnections
|
||||
log_client_disconnections = false
|
||||
|
||||
# When set to true, PgCat reloads configs if it detects a change in the config file.
|
||||
autoreload = 15000
|
||||
|
||||
# Number of worker threads the Runtime will use (4 by default).
|
||||
worker_threads = 5
|
||||
|
||||
# Number of seconds of connection idleness to wait before sending a keepalive packet to the server.
|
||||
tcp_keepalives_idle = 5
|
||||
# Number of unacknowledged keepalive packets allowed before giving up and closing the connection.
|
||||
tcp_keepalives_count = 5
|
||||
# Number of seconds between keepalive packets.
|
||||
tcp_keepalives_interval = 5
|
||||
|
||||
# Path to TLS Certificate file to use for TLS connections
|
||||
# tls_certificate = ".circleci/server.cert"
|
||||
# Path to TLS private key file to use for TLS connections
|
||||
# tls_private_key = ".circleci/server.key"
|
||||
|
||||
# Enable/disable server TLS
|
||||
server_tls = false
|
||||
|
||||
# Verify server certificate is completely authentic.
|
||||
verify_server_certificate = false
|
||||
|
||||
# User name to access the virtual administrative database (pgbouncer or pgcat)
|
||||
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
||||
admin_username = "admin_user"
|
||||
# Password to access the virtual administrative database
|
||||
admin_password = "admin_pass"
|
||||
|
||||
# Default plugins that are configured on all pools.
|
||||
[plugins]
|
||||
|
||||
# Prewarmer plugin that runs queries on server startup, before giving the connection
|
||||
# to the client.
|
||||
[plugins.prewarmer]
|
||||
enabled = false
|
||||
queries = [
|
||||
"SELECT pg_prewarm('pgbench_accounts')",
|
||||
]
|
||||
|
||||
# Log all queries to stdout.
|
||||
[plugins.query_logger]
|
||||
enabled = false
|
||||
|
||||
# Block access to tables that Postgres does not allow us to control.
|
||||
[plugins.table_access]
|
||||
enabled = false
|
||||
tables = [
|
||||
"pg_user",
|
||||
"pg_roles",
|
||||
"pg_database",
|
||||
]
|
||||
|
||||
# Intercept user queries and give a fake reply.
|
||||
[plugins.intercept]
|
||||
enabled = true
|
||||
|
||||
[plugins.intercept.queries.0]
|
||||
|
||||
query = "select current_database() as a, current_schemas(false) as b"
|
||||
schema = [
|
||||
["a", "text"],
|
||||
["b", "text"],
|
||||
]
|
||||
result = [
|
||||
["${DATABASE}", "{public}"],
|
||||
]
|
||||
|
||||
[plugins.intercept.queries.1]
|
||||
|
||||
query = "select current_database(), current_schema(), current_user"
|
||||
schema = [
|
||||
["current_database", "text"],
|
||||
["current_schema", "text"],
|
||||
["current_user", "text"],
|
||||
]
|
||||
result = [
|
||||
["${DATABASE}", "public", "${USER}"],
|
||||
]
|
||||
|
||||
|
||||
# pool configs are structured as pool.<pool_name>
|
||||
# the pool_name is what clients use as database name when connecting.
|
||||
# For a pool named `sharded_db`, clients access that pool using connection string like
|
||||
# `postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded_db`
|
||||
[pools.sharded_db]
|
||||
# Pool mode (see PgBouncer docs for more).
|
||||
# `session` one server connection per connected client
|
||||
# `transaction` one server connection per client transaction
|
||||
pool_mode = "transaction"
|
||||
|
||||
# Load balancing mode
|
||||
# `random` selects the server at random
|
||||
# `loc` selects the server with the least outstanding busy conncetions
|
||||
load_balancing_mode = "random"
|
||||
|
||||
# If the client doesn't specify, PgCat routes traffic to this role by default.
|
||||
# `any` round-robin between primary and replicas,
|
||||
# `replica` round-robin between replicas only without touching the primary,
|
||||
# `primary` all queries go to the primary unless otherwise specified.
|
||||
default_role = "any"
|
||||
|
||||
# Prepared statements cache size.
|
||||
# TODO: update documentation
|
||||
prepared_statements_cache_size = 500
|
||||
|
||||
# If Query Parser is enabled, we'll attempt to parse
|
||||
# every incoming query to determine if it's a read or a write.
|
||||
# If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||
# we'll direct it to the primary.
|
||||
query_parser_enabled = true
|
||||
|
||||
# If the query parser is enabled and this setting is enabled, we'll attempt to
|
||||
# infer the role from the query itself.
|
||||
query_parser_read_write_splitting = true
|
||||
|
||||
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||
# load balancing of read queries. Otherwise, the primary will only be used for write
|
||||
# queries. The primary can always be explicitly selected with our custom protocol.
|
||||
primary_reads_enabled = true
|
||||
|
||||
# Allow sharding commands to be passed as statement comments instead of
|
||||
# separate commands. If these are unset this functionality is disabled.
|
||||
# sharding_key_regex = '/\* sharding_key: (\d+) \*/'
|
||||
# shard_id_regex = '/\* shard_id: (\d+) \*/'
|
||||
# regex_search_limit = 1000 # only look at the first 1000 characters of SQL statements
|
||||
|
||||
# Defines the behavior when no shard is selected in a sharded system.
|
||||
# `random`: picks a shard at random
|
||||
# `random_healthy`: picks a shard at random favoring shards with the least number of recent errors
|
||||
# `shard_<number>`: e.g. shard_0, shard_4, etc. picks a specific shard, everytime
|
||||
# no_shard_specified_behavior = "shard_0"
|
||||
|
||||
# So what if you wanted to implement a different hashing function,
|
||||
# or you've already built one and you want this pooler to use it?
|
||||
# Current options:
|
||||
# `pg_bigint_hash`: PARTITION BY HASH (Postgres hashing function)
|
||||
# `sha1`: A hashing function based on SHA1
|
||||
sharding_function = "pg_bigint_hash"
|
||||
|
||||
# Query to be sent to servers to obtain the hash used for md5 authentication. The connection will be
|
||||
# established using the database configured in the pool. This parameter is inherited by every pool
|
||||
# and can be redefined in pool configuration.
|
||||
# auth_query="SELECT usename, passwd FROM pg_shadow WHERE usename='$1'"
|
||||
|
||||
# User to be used for connecting to servers to obtain the hash used for md5 authentication by sending the query
|
||||
# specified in `auth_query_user`. The connection will be established using the database configured in the pool.
|
||||
# This parameter is inherited by every pool and can be redefined in pool configuration.
|
||||
# auth_query_user = "sharding_user"
|
||||
|
||||
# Password to be used for connecting to servers to obtain the hash used for md5 authentication by sending the query
|
||||
# specified in `auth_query_user`. The connection will be established using the database configured in the pool.
|
||||
# This parameter is inherited by every pool and can be redefined in pool configuration.
|
||||
# auth_query_password = "sharding_user"
|
||||
|
||||
# Automatically parse this from queries and route queries to the right shard!
|
||||
# automatic_sharding_key = "data.id"
|
||||
|
||||
# Idle timeout can be overwritten in the pool
|
||||
idle_timeout = 40000
|
||||
|
||||
# Connect timeout can be overwritten in the pool
|
||||
connect_timeout = 3000
|
||||
|
||||
# When enabled, ip resolutions for server connections specified using hostnames will be cached
|
||||
# and checked for changes every `dns_max_ttl` seconds. If a change in the host resolution is found
|
||||
# old ip connections are closed (gracefully) and new connections will start using new ip.
|
||||
# dns_cache_enabled = false
|
||||
|
||||
# Specifies how often (in seconds) cached ip addresses for servers are rechecked (see `dns_cache_enabled`).
|
||||
# dns_max_ttl = 30
|
||||
|
||||
# Plugins can be configured on a pool-per-pool basis. This overrides the global plugins setting,
|
||||
# so all plugins have to be configured here again.
|
||||
[pool.sharded_db.plugins]
|
||||
|
||||
[pools.sharded_db.plugins.prewarmer]
|
||||
enabled = true
|
||||
queries = [
|
||||
"SELECT pg_prewarm('pgbench_accounts')",
|
||||
]
|
||||
|
||||
[pools.sharded_db.plugins.query_logger]
|
||||
enabled = false
|
||||
|
||||
[pools.sharded_db.plugins.table_access]
|
||||
enabled = false
|
||||
tables = [
|
||||
"pg_user",
|
||||
"pg_roles",
|
||||
"pg_database",
|
||||
]
|
||||
|
||||
[pools.sharded_db.plugins.intercept]
|
||||
enabled = true
|
||||
|
||||
[pools.sharded_db.plugins.intercept.queries.0]
|
||||
|
||||
query = "select current_database() as a, current_schemas(false) as b"
|
||||
schema = [
|
||||
["a", "text"],
|
||||
["b", "text"],
|
||||
]
|
||||
result = [
|
||||
["${DATABASE}", "{public}"],
|
||||
]
|
||||
|
||||
[pools.sharded_db.plugins.intercept.queries.1]
|
||||
|
||||
query = "select current_database(), current_schema(), current_user"
|
||||
schema = [
|
||||
["current_database", "text"],
|
||||
["current_schema", "text"],
|
||||
["current_user", "text"],
|
||||
]
|
||||
result = [
|
||||
["${DATABASE}", "public", "${USER}"],
|
||||
]
|
||||
|
||||
# User configs are structured as pool.<pool_name>.users.<user_index>
|
||||
# This section holds the credentials for users that may connect to this cluster
|
||||
[pools.sharded_db.users.0]
|
||||
# PostgreSQL username used to authenticate the user and connect to the server
|
||||
# if `server_username` is not set.
|
||||
username = "sharding_user"
|
||||
|
||||
# PostgreSQL password used to authenticate the user and connect to the server
|
||||
# if `server_password` is not set.
|
||||
password = "sharding_user"
|
||||
|
||||
pool_mode = "transaction"
|
||||
|
||||
# PostgreSQL username used to connect to the server.
|
||||
# server_username = "another_user"
|
||||
|
||||
# PostgreSQL password used to connect to the server.
|
||||
# server_password = "another_password"
|
||||
|
||||
# Maximum number of server connections that can be established for this user
|
||||
# The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||
# is the sum of pool_size across all users.
|
||||
pool_size = 9
|
||||
|
||||
|
||||
# Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
|
||||
# 0 means it is disabled.
|
||||
statement_timeout = 0
|
||||
|
||||
[pools.sharded_db.users.1]
|
||||
username = "other_user"
|
||||
password = "other_user"
|
||||
pool_size = 21
|
||||
statement_timeout = 15000
|
||||
connect_timeout = 1000
|
||||
idle_timeout = 1000
|
||||
|
||||
# Shard configs are structured as pool.<pool_name>.shards.<shard_id>
|
||||
# Each shard config contains a list of servers that make up the shard
|
||||
# and the database name to use.
|
||||
[pools.sharded_db.shards.0]
|
||||
# Array of servers in the shard, each server entry is an array of `[host, port, role]`
|
||||
servers = [["127.0.0.1", 5432, "primary"], ["localhost", 5432, "replica"]]
|
||||
|
||||
# Array of mirrors for the shard, each mirror entry is an array of `[host, port, index of server in servers array]`
|
||||
# Traffic hitting the server identified by the index will be sent to the mirror.
|
||||
# mirrors = [["1.2.3.4", 5432, 0], ["1.2.3.4", 5432, 1]]
|
||||
|
||||
# Database name (e.g. "postgres")
|
||||
database = "shard0"
|
||||
|
||||
[pools.sharded_db.shards.1]
|
||||
servers = [["127.0.0.1", 5432, "primary"], ["localhost", 5432, "replica"]]
|
||||
database = "shard1"
|
||||
|
||||
[pools.sharded_db.shards.2]
|
||||
servers = [["127.0.0.1", 5432, "primary" ], ["localhost", 5432, "replica" ]]
|
||||
database = "shard2"
|
||||
|
||||
|
||||
[pools.simple_db]
|
||||
pool_mode = "session"
|
||||
default_role = "primary"
|
||||
query_parser_enabled = true
|
||||
primary_reads_enabled = true
|
||||
sharding_function = "pg_bigint_hash"
|
||||
|
||||
[pools.simple_db.users.0]
|
||||
username = "simple_user"
|
||||
password = "simple_user"
|
||||
pool_size = 5
|
||||
min_pool_size = 3
|
||||
server_lifetime = 60000
|
||||
statement_timeout = 0
|
||||
|
||||
[pools.simple_db.shards.0]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ]
|
||||
]
|
||||
database = "some_db"
|
||||
13
postinst
13
postinst
@@ -1,13 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
systemctl daemon-reload
|
||||
systemctl enable pgcat
|
||||
|
||||
if ! id pgcat 2> /dev/null; then
|
||||
useradd -s /usr/bin/false pgcat
|
||||
fi
|
||||
|
||||
if [ -f /etc/pgcat.toml ]; then
|
||||
systemctl start pgcat
|
||||
fi
|
||||
999
src/admin.rs
999
src/admin.rs
@@ -1,999 +0,0 @@
|
||||
use crate::pool::BanReason;
|
||||
use crate::server::ServerParameters;
|
||||
use crate::stats::pool::PoolStats;
|
||||
use bytes::{Buf, BufMut, BytesMut};
|
||||
use log::{error, info, trace};
|
||||
use nix::sys::signal::{self, Signal};
|
||||
use nix::unistd::Pid;
|
||||
use std::collections::HashMap;
|
||||
/// Admin database.
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use tokio::time::Instant;
|
||||
|
||||
use crate::config::{get_config, reload_config, VERSION};
|
||||
use crate::errors::Error;
|
||||
use crate::messages::*;
|
||||
use crate::pool::ClientServerMap;
|
||||
use crate::pool::{get_all_pools, get_pool};
|
||||
use crate::stats::{get_client_stats, get_server_stats, ClientState, ServerState};
|
||||
|
||||
pub fn generate_server_parameters_for_admin() -> ServerParameters {
|
||||
let mut server_parameters = ServerParameters::new();
|
||||
|
||||
server_parameters.set_param("application_name".to_string(), "".to_string(), true);
|
||||
server_parameters.set_param("client_encoding".to_string(), "UTF8".to_string(), true);
|
||||
server_parameters.set_param("server_encoding".to_string(), "UTF8".to_string(), true);
|
||||
server_parameters.set_param("server_version".to_string(), VERSION.to_string(), true);
|
||||
server_parameters.set_param("DateStyle".to_string(), "ISO, MDY".to_string(), true);
|
||||
|
||||
server_parameters
|
||||
}
|
||||
|
||||
/// Handle admin client.
|
||||
pub async fn handle_admin<T>(
|
||||
stream: &mut T,
|
||||
mut query: BytesMut,
|
||||
client_server_map: ClientServerMap,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let code = query.get_u8() as char;
|
||||
|
||||
if code != 'Q' {
|
||||
return Err(Error::ProtocolSyncError(format!(
|
||||
"Invalid code, expected 'Q' but got '{}'",
|
||||
code
|
||||
)));
|
||||
}
|
||||
|
||||
let len = query.get_i32() as usize;
|
||||
let query = String::from_utf8_lossy(&query[..len - 5]).to_string();
|
||||
|
||||
trace!("Admin query: {}", query);
|
||||
|
||||
let query_parts: Vec<&str> = query.trim_end_matches(';').split_whitespace().collect();
|
||||
|
||||
match query_parts
|
||||
.first()
|
||||
.unwrap_or(&"")
|
||||
.to_ascii_uppercase()
|
||||
.as_str()
|
||||
{
|
||||
"BAN" => {
|
||||
trace!("BAN");
|
||||
ban(stream, query_parts).await
|
||||
}
|
||||
"UNBAN" => {
|
||||
trace!("UNBAN");
|
||||
unban(stream, query_parts).await
|
||||
}
|
||||
"RELOAD" => {
|
||||
trace!("RELOAD");
|
||||
reload(stream, client_server_map).await
|
||||
}
|
||||
"SET" => {
|
||||
trace!("SET");
|
||||
ignore_set(stream).await
|
||||
}
|
||||
"PAUSE" => {
|
||||
trace!("PAUSE");
|
||||
pause(stream, query_parts).await
|
||||
}
|
||||
"RESUME" => {
|
||||
trace!("RESUME");
|
||||
resume(stream, query_parts).await
|
||||
}
|
||||
"SHUTDOWN" => {
|
||||
trace!("SHUTDOWN");
|
||||
shutdown(stream).await
|
||||
}
|
||||
"SHOW" => match query_parts
|
||||
.get(1)
|
||||
.unwrap_or(&"")
|
||||
.to_ascii_uppercase()
|
||||
.as_str()
|
||||
{
|
||||
"HELP" => {
|
||||
trace!("SHOW HELP");
|
||||
show_help(stream).await
|
||||
}
|
||||
"BANS" => {
|
||||
trace!("SHOW BANS");
|
||||
show_bans(stream).await
|
||||
}
|
||||
"CONFIG" => {
|
||||
trace!("SHOW CONFIG");
|
||||
show_config(stream).await
|
||||
}
|
||||
"DATABASES" => {
|
||||
trace!("SHOW DATABASES");
|
||||
show_databases(stream).await
|
||||
}
|
||||
"LISTS" => {
|
||||
trace!("SHOW LISTS");
|
||||
show_lists(stream).await
|
||||
}
|
||||
"POOLS" => {
|
||||
trace!("SHOW POOLS");
|
||||
show_pools(stream).await
|
||||
}
|
||||
"CLIENTS" => {
|
||||
trace!("SHOW CLIENTS");
|
||||
show_clients(stream).await
|
||||
}
|
||||
"SERVERS" => {
|
||||
trace!("SHOW SERVERS");
|
||||
show_servers(stream).await
|
||||
}
|
||||
"STATS" => {
|
||||
trace!("SHOW STATS");
|
||||
show_stats(stream).await
|
||||
}
|
||||
"VERSION" => {
|
||||
trace!("SHOW VERSION");
|
||||
show_version(stream).await
|
||||
}
|
||||
"USERS" => {
|
||||
trace!("SHOW USERS");
|
||||
show_users(stream).await
|
||||
}
|
||||
_ => error_response(stream, "Unsupported SHOW query against the admin database").await,
|
||||
},
|
||||
_ => error_response(stream, "Unsupported query against the admin database").await,
|
||||
}
|
||||
}
|
||||
|
||||
/// Column-oriented statistics.
|
||||
async fn show_lists<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let client_stats = get_client_stats();
|
||||
let server_stats = get_server_stats();
|
||||
|
||||
let columns = vec![("list", DataType::Text), ("items", DataType::Int4)];
|
||||
|
||||
let mut users = 1;
|
||||
let mut databases = 1;
|
||||
for (_, pool) in get_all_pools() {
|
||||
databases += pool.databases();
|
||||
users += 1; // One user per pool
|
||||
}
|
||||
let mut res = BytesMut::new();
|
||||
res.put(row_description(&columns));
|
||||
res.put(data_row(&vec![
|
||||
"databases".to_string(),
|
||||
databases.to_string(),
|
||||
]));
|
||||
res.put(data_row(&vec!["users".to_string(), users.to_string()]));
|
||||
res.put(data_row(&vec!["pools".to_string(), databases.to_string()]));
|
||||
res.put(data_row(&vec![
|
||||
"free_clients".to_string(),
|
||||
client_stats
|
||||
.keys()
|
||||
.filter(|client_id| {
|
||||
client_stats
|
||||
.get(client_id)
|
||||
.unwrap()
|
||||
.state
|
||||
.load(Ordering::Relaxed)
|
||||
== ClientState::Idle
|
||||
})
|
||||
.count()
|
||||
.to_string(),
|
||||
]));
|
||||
res.put(data_row(&vec![
|
||||
"used_clients".to_string(),
|
||||
client_stats
|
||||
.keys()
|
||||
.filter(|client_id| {
|
||||
client_stats
|
||||
.get(client_id)
|
||||
.unwrap()
|
||||
.state
|
||||
.load(Ordering::Relaxed)
|
||||
== ClientState::Active
|
||||
})
|
||||
.count()
|
||||
.to_string(),
|
||||
]));
|
||||
res.put(data_row(&vec![
|
||||
"login_clients".to_string(),
|
||||
"0".to_string(),
|
||||
]));
|
||||
res.put(data_row(&vec![
|
||||
"free_servers".to_string(),
|
||||
server_stats
|
||||
.keys()
|
||||
.filter(|server_id| {
|
||||
server_stats
|
||||
.get(server_id)
|
||||
.unwrap()
|
||||
.state
|
||||
.load(Ordering::Relaxed)
|
||||
== ServerState::Idle
|
||||
})
|
||||
.count()
|
||||
.to_string(),
|
||||
]));
|
||||
res.put(data_row(&vec![
|
||||
"used_servers".to_string(),
|
||||
server_stats
|
||||
.keys()
|
||||
.filter(|server_id| {
|
||||
server_stats
|
||||
.get(server_id)
|
||||
.unwrap()
|
||||
.state
|
||||
.load(Ordering::Relaxed)
|
||||
== ServerState::Active
|
||||
})
|
||||
.count()
|
||||
.to_string(),
|
||||
]));
|
||||
res.put(data_row(&vec!["dns_names".to_string(), "0".to_string()]));
|
||||
res.put(data_row(&vec!["dns_zones".to_string(), "0".to_string()]));
|
||||
res.put(data_row(&vec!["dns_queries".to_string(), "0".to_string()]));
|
||||
res.put(data_row(&vec!["dns_pending".to_string(), "0".to_string()]));
|
||||
|
||||
res.put(command_complete("SHOW"));
|
||||
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, &res).await
|
||||
}
|
||||
|
||||
/// Show PgCat version.
|
||||
async fn show_version<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let mut res = BytesMut::new();
|
||||
|
||||
res.put(row_description(&vec![("version", DataType::Text)]));
|
||||
res.put(data_row(&vec![format!("PgCat {}", VERSION)]));
|
||||
res.put(command_complete("SHOW"));
|
||||
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, &res).await
|
||||
}
|
||||
|
||||
/// Show utilization of connection pools for each shard and replicas.
|
||||
async fn show_pools<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let pool_lookup = PoolStats::construct_pool_lookup();
|
||||
let mut res = BytesMut::new();
|
||||
res.put(row_description(&PoolStats::generate_header()));
|
||||
pool_lookup.iter().for_each(|(_identifier, pool_stats)| {
|
||||
res.put(data_row(&pool_stats.generate_row()));
|
||||
});
|
||||
res.put(command_complete("SHOW"));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, &res).await
|
||||
}
|
||||
|
||||
/// Show all available options.
|
||||
async fn show_help<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let mut res = BytesMut::new();
|
||||
|
||||
let detail_msg = [
|
||||
"",
|
||||
"SHOW HELP|CONFIG|DATABASES|POOLS|CLIENTS|SERVERS|USERS|VERSION",
|
||||
// "SHOW PEERS|PEER_POOLS", // missing PEERS|PEER_POOLS
|
||||
// "SHOW FDS|SOCKETS|ACTIVE_SOCKETS|LISTS|MEM|STATE", // missing FDS|SOCKETS|ACTIVE_SOCKETS|MEM|STATE
|
||||
"SHOW LISTS",
|
||||
// "SHOW DNS_HOSTS|DNS_ZONES", // missing DNS_HOSTS|DNS_ZONES
|
||||
"SHOW STATS", // missing STATS_TOTALS|STATS_AVERAGES|TOTALS
|
||||
"SET key = arg",
|
||||
"RELOAD",
|
||||
"PAUSE [<db>, <user>]",
|
||||
"RESUME [<db>, <user>]",
|
||||
// "DISABLE <db>", // missing
|
||||
// "ENABLE <db>", // missing
|
||||
// "RECONNECT [<db>]", missing
|
||||
// "KILL <db>",
|
||||
// "SUSPEND",
|
||||
"SHUTDOWN",
|
||||
];
|
||||
|
||||
res.put(notify("Console usage", detail_msg.join("\n\t")));
|
||||
res.put(command_complete("SHOW"));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, &res).await
|
||||
}
|
||||
|
||||
/// Show shards and replicas.
|
||||
async fn show_databases<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
// Columns
|
||||
let columns = vec![
|
||||
("name", DataType::Text),
|
||||
("host", DataType::Text),
|
||||
("port", DataType::Text),
|
||||
("database", DataType::Text),
|
||||
("force_user", DataType::Text),
|
||||
("pool_size", DataType::Int4),
|
||||
("min_pool_size", DataType::Int4),
|
||||
("reserve_pool", DataType::Int4),
|
||||
("pool_mode", DataType::Text),
|
||||
("max_connections", DataType::Int4),
|
||||
("current_connections", DataType::Int4),
|
||||
("paused", DataType::Int4),
|
||||
("disabled", DataType::Int4),
|
||||
];
|
||||
|
||||
let mut res = BytesMut::new();
|
||||
|
||||
res.put(row_description(&columns));
|
||||
|
||||
for (_, pool) in get_all_pools() {
|
||||
let pool_config = pool.settings.clone();
|
||||
for shard in 0..pool.shards() {
|
||||
let database_name = &pool.address(shard, 0).database;
|
||||
for server in 0..pool.servers(shard) {
|
||||
let address = pool.address(shard, server);
|
||||
let pool_state = pool.pool_state(shard, server);
|
||||
let banned = pool.is_banned(address);
|
||||
let paused = pool.paused();
|
||||
|
||||
res.put(data_row(&vec![
|
||||
address.name(), // name
|
||||
address.host.to_string(), // host
|
||||
address.port.to_string(), // port
|
||||
database_name.to_string(), // database
|
||||
pool_config.user.username.to_string(), // force_user
|
||||
pool_config.user.pool_size.to_string(), // pool_size
|
||||
pool_config.user.min_pool_size.unwrap_or(0).to_string(), // min_pool_size
|
||||
"0".to_string(), // reserve_pool
|
||||
pool_config.pool_mode.to_string(), // pool_mode
|
||||
pool_config.user.pool_size.to_string(), // max_connections
|
||||
pool_state.connections.to_string(), // current_connections
|
||||
match paused {
|
||||
// paused
|
||||
true => "1".to_string(),
|
||||
false => "0".to_string(),
|
||||
},
|
||||
match banned {
|
||||
// disabled
|
||||
true => "1".to_string(),
|
||||
false => "0".to_string(),
|
||||
},
|
||||
]));
|
||||
}
|
||||
}
|
||||
}
|
||||
res.put(command_complete("SHOW"));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, &res).await
|
||||
}
|
||||
|
||||
/// Ignore any SET commands the client sends.
|
||||
/// This is common initialization done by ORMs.
|
||||
async fn ignore_set<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
custom_protocol_response_ok(stream, "SET").await
|
||||
}
|
||||
|
||||
/// Bans a host from being used
|
||||
async fn ban<T>(stream: &mut T, tokens: Vec<&str>) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let host = match tokens.get(1) {
|
||||
Some(host) => host,
|
||||
None => return error_response(stream, "usage: BAN hostname duration_seconds").await,
|
||||
};
|
||||
|
||||
let duration_seconds = match tokens.get(2) {
|
||||
Some(duration_seconds) => match duration_seconds.parse::<i64>() {
|
||||
Ok(duration_seconds) => duration_seconds,
|
||||
Err(_) => {
|
||||
return error_response(stream, "duration_seconds must be an integer").await;
|
||||
}
|
||||
},
|
||||
None => return error_response(stream, "usage: BAN hostname duration_seconds").await,
|
||||
};
|
||||
|
||||
if duration_seconds <= 0 {
|
||||
return error_response(stream, "duration_seconds must be >= 0").await;
|
||||
}
|
||||
|
||||
let columns = vec![
|
||||
("db", DataType::Text),
|
||||
("user", DataType::Text),
|
||||
("role", DataType::Text),
|
||||
("host", DataType::Text),
|
||||
];
|
||||
let mut res = BytesMut::new();
|
||||
res.put(row_description(&columns));
|
||||
|
||||
for (id, pool) in get_all_pools().iter() {
|
||||
for address in pool.get_addresses_from_host(host) {
|
||||
if !pool.is_banned(&address) {
|
||||
pool.ban(&address, BanReason::AdminBan(duration_seconds), None);
|
||||
res.put(data_row(&vec![
|
||||
id.db.clone(),
|
||||
id.user.clone(),
|
||||
address.role.to_string(),
|
||||
address.host,
|
||||
]));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res.put(command_complete("BAN"));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, &res).await
|
||||
}
|
||||
|
||||
/// Clear a host for use
|
||||
async fn unban<T>(stream: &mut T, tokens: Vec<&str>) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let host = match tokens.get(1) {
|
||||
Some(host) => host,
|
||||
None => return error_response(stream, "UNBAN command requires a hostname to unban").await,
|
||||
};
|
||||
|
||||
let columns = vec![
|
||||
("db", DataType::Text),
|
||||
("user", DataType::Text),
|
||||
("role", DataType::Text),
|
||||
("host", DataType::Text),
|
||||
];
|
||||
let mut res = BytesMut::new();
|
||||
res.put(row_description(&columns));
|
||||
|
||||
for (id, pool) in get_all_pools().iter() {
|
||||
for address in pool.get_addresses_from_host(host) {
|
||||
if pool.is_banned(&address) {
|
||||
pool.unban(&address);
|
||||
res.put(data_row(&vec![
|
||||
id.db.clone(),
|
||||
id.user.clone(),
|
||||
address.role.to_string(),
|
||||
address.host,
|
||||
]));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res.put(command_complete("UNBAN"));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, &res).await
|
||||
}
|
||||
|
||||
/// Shows all the bans
|
||||
async fn show_bans<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let columns = vec![
|
||||
("db", DataType::Text),
|
||||
("user", DataType::Text),
|
||||
("role", DataType::Text),
|
||||
("host", DataType::Text),
|
||||
("reason", DataType::Text),
|
||||
("ban_time", DataType::Text),
|
||||
("ban_duration_seconds", DataType::Text),
|
||||
("ban_remaining_seconds", DataType::Text),
|
||||
];
|
||||
let mut res = BytesMut::new();
|
||||
res.put(row_description(&columns));
|
||||
|
||||
// The block should be pretty quick so we cache the time outside
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("Time went backwards")
|
||||
.as_secs() as i64;
|
||||
|
||||
for (id, pool) in get_all_pools().iter() {
|
||||
for (address, (ban_reason, ban_time)) in pool.get_bans().iter() {
|
||||
let ban_duration = match ban_reason {
|
||||
BanReason::AdminBan(duration) => *duration,
|
||||
_ => pool.settings.ban_time,
|
||||
};
|
||||
let remaining = ban_duration - (now - ban_time.timestamp());
|
||||
if remaining <= 0 {
|
||||
continue;
|
||||
}
|
||||
res.put(data_row(&vec![
|
||||
id.db.clone(),
|
||||
id.user.clone(),
|
||||
address.role.to_string(),
|
||||
address.host.clone(),
|
||||
format!("{:?}", ban_reason),
|
||||
ban_time.to_string(),
|
||||
ban_duration.to_string(),
|
||||
remaining.to_string(),
|
||||
]));
|
||||
}
|
||||
}
|
||||
|
||||
res.put(command_complete("SHOW BANS"));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, &res).await
|
||||
}
|
||||
|
||||
/// Reload the configuration file without restarting the process.
|
||||
async fn reload<T>(stream: &mut T, client_server_map: ClientServerMap) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
info!("Reloading config");
|
||||
|
||||
reload_config(client_server_map).await?;
|
||||
|
||||
get_config().show();
|
||||
|
||||
let mut res = BytesMut::new();
|
||||
|
||||
res.put(command_complete("RELOAD"));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, &res).await
|
||||
}
|
||||
|
||||
/// Shows current configuration.
|
||||
async fn show_config<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let config = &get_config();
|
||||
let config: HashMap<String, String> = config.into();
|
||||
|
||||
// Configs that cannot be changed without restarting.
|
||||
let immutables = ["host", "port", "connect_timeout"];
|
||||
|
||||
// Columns
|
||||
let columns = vec![
|
||||
("key", DataType::Text),
|
||||
("value", DataType::Text),
|
||||
("default", DataType::Text),
|
||||
("changeable", DataType::Text),
|
||||
];
|
||||
|
||||
// Response data
|
||||
let mut res = BytesMut::new();
|
||||
res.put(row_description(&columns));
|
||||
|
||||
// DataRow rows
|
||||
for (key, value) in config {
|
||||
let changeable = if immutables.iter().filter(|col| *col == &key).count() == 1 {
|
||||
"no".to_string()
|
||||
} else {
|
||||
"yes".to_string()
|
||||
};
|
||||
|
||||
let row = vec![key, value, "-".to_string(), changeable];
|
||||
|
||||
res.put(data_row(&row));
|
||||
}
|
||||
|
||||
res.put(command_complete("SHOW"));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, &res).await
|
||||
}
|
||||
|
||||
/// Show shard and replicas statistics.
|
||||
async fn show_stats<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let columns = vec![
|
||||
("instance", DataType::Text),
|
||||
("database", DataType::Text),
|
||||
("user", DataType::Text),
|
||||
("total_xact_count", DataType::Numeric),
|
||||
("total_query_count", DataType::Numeric),
|
||||
("total_received", DataType::Numeric),
|
||||
("total_sent", DataType::Numeric),
|
||||
("total_xact_time", DataType::Numeric),
|
||||
("total_query_time", DataType::Numeric),
|
||||
("total_wait_time", DataType::Numeric),
|
||||
("total_errors", DataType::Numeric),
|
||||
("avg_xact_count", DataType::Numeric),
|
||||
("avg_query_count", DataType::Numeric),
|
||||
("avg_recv", DataType::Numeric),
|
||||
("avg_sent", DataType::Numeric),
|
||||
("avg_errors", DataType::Numeric),
|
||||
("avg_xact_time", DataType::Numeric),
|
||||
("avg_query_time", DataType::Numeric),
|
||||
("avg_wait_time", DataType::Numeric),
|
||||
];
|
||||
|
||||
let mut res = BytesMut::new();
|
||||
res.put(row_description(&columns));
|
||||
|
||||
for (user_pool, pool) in get_all_pools() {
|
||||
for shard in 0..pool.shards() {
|
||||
for server in 0..pool.servers(shard) {
|
||||
let address = pool.address(shard, server);
|
||||
|
||||
let mut row = vec![address.name(), user_pool.db.clone(), user_pool.user.clone()];
|
||||
let stats = address.stats.clone();
|
||||
stats.populate_row(&mut row);
|
||||
|
||||
res.put(data_row(&row));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res.put(command_complete("SHOW"));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, &res).await
|
||||
}
|
||||
|
||||
/// Show currently connected clients
|
||||
async fn show_clients<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let columns = vec![
|
||||
("client_id", DataType::Text),
|
||||
("database", DataType::Text),
|
||||
("user", DataType::Text),
|
||||
("application_name", DataType::Text),
|
||||
("state", DataType::Text),
|
||||
("transaction_count", DataType::Numeric),
|
||||
("query_count", DataType::Numeric),
|
||||
("error_count", DataType::Numeric),
|
||||
("age_seconds", DataType::Numeric),
|
||||
("maxwait", DataType::Numeric),
|
||||
("maxwait_us", DataType::Numeric),
|
||||
];
|
||||
|
||||
let new_map = get_client_stats();
|
||||
let mut res = BytesMut::new();
|
||||
res.put(row_description(&columns));
|
||||
|
||||
for (_, client) in new_map {
|
||||
let max_wait = client.max_wait_time.load(Ordering::Relaxed);
|
||||
let row = vec![
|
||||
format!("{:#010X}", client.client_id()),
|
||||
client.pool_name(),
|
||||
client.username(),
|
||||
client.application_name(),
|
||||
client.state.load(Ordering::Relaxed).to_string(),
|
||||
client.transaction_count.load(Ordering::Relaxed).to_string(),
|
||||
client.query_count.load(Ordering::Relaxed).to_string(),
|
||||
client.error_count.load(Ordering::Relaxed).to_string(),
|
||||
Instant::now()
|
||||
.duration_since(client.connect_time())
|
||||
.as_secs()
|
||||
.to_string(),
|
||||
(max_wait / 1_000_000).to_string(),
|
||||
(max_wait % 1_000_000).to_string(),
|
||||
];
|
||||
|
||||
res.put(data_row(&row));
|
||||
}
|
||||
|
||||
res.put(command_complete("SHOW"));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, &res).await
|
||||
}
|
||||
|
||||
/// Show currently connected servers
|
||||
async fn show_servers<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let columns = vec![
|
||||
("server_id", DataType::Text),
|
||||
("database_name", DataType::Text),
|
||||
("user", DataType::Text),
|
||||
("address_id", DataType::Text),
|
||||
("application_name", DataType::Text),
|
||||
("state", DataType::Text),
|
||||
("transaction_count", DataType::Numeric),
|
||||
("query_count", DataType::Numeric),
|
||||
("bytes_sent", DataType::Numeric),
|
||||
("bytes_received", DataType::Numeric),
|
||||
("age_seconds", DataType::Numeric),
|
||||
("prepare_cache_hit", DataType::Numeric),
|
||||
("prepare_cache_miss", DataType::Numeric),
|
||||
("prepare_cache_eviction", DataType::Numeric),
|
||||
("prepare_cache_size", DataType::Numeric),
|
||||
];
|
||||
|
||||
let new_map = get_server_stats();
|
||||
let mut res = BytesMut::new();
|
||||
res.put(row_description(&columns));
|
||||
|
||||
for (_, server) in new_map {
|
||||
let application_name = server.application_name.read();
|
||||
let row = vec![
|
||||
format!("{:#010X}", server.server_id()),
|
||||
server.pool_name(),
|
||||
server.username(),
|
||||
server.address_name(),
|
||||
application_name.clone(),
|
||||
server.state.load(Ordering::Relaxed).to_string(),
|
||||
server.transaction_count.load(Ordering::Relaxed).to_string(),
|
||||
server.query_count.load(Ordering::Relaxed).to_string(),
|
||||
server.bytes_sent.load(Ordering::Relaxed).to_string(),
|
||||
server.bytes_received.load(Ordering::Relaxed).to_string(),
|
||||
Instant::now()
|
||||
.duration_since(server.connect_time())
|
||||
.as_secs()
|
||||
.to_string(),
|
||||
server
|
||||
.prepared_hit_count
|
||||
.load(Ordering::Relaxed)
|
||||
.to_string(),
|
||||
server
|
||||
.prepared_miss_count
|
||||
.load(Ordering::Relaxed)
|
||||
.to_string(),
|
||||
server
|
||||
.prepared_eviction_count
|
||||
.load(Ordering::Relaxed)
|
||||
.to_string(),
|
||||
server
|
||||
.prepared_cache_size
|
||||
.load(Ordering::Relaxed)
|
||||
.to_string(),
|
||||
];
|
||||
|
||||
res.put(data_row(&row));
|
||||
}
|
||||
|
||||
res.put(command_complete("SHOW"));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, &res).await
|
||||
}
|
||||
|
||||
/// Pause a pool. It won't pass any more queries to the backends.
|
||||
async fn pause<T>(stream: &mut T, tokens: Vec<&str>) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let parts: Vec<&str> = match tokens.len() == 2 {
|
||||
true => tokens[1].split(',').map(|part| part.trim()).collect(),
|
||||
false => Vec::new(),
|
||||
};
|
||||
|
||||
match parts.len() {
|
||||
0 => {
|
||||
for (_, pool) in get_all_pools() {
|
||||
pool.pause();
|
||||
}
|
||||
|
||||
let mut res = BytesMut::new();
|
||||
|
||||
res.put(command_complete("PAUSE"));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, &res).await
|
||||
}
|
||||
2 => {
|
||||
let database = parts[0];
|
||||
let user = parts[1];
|
||||
|
||||
match get_pool(database, user) {
|
||||
Some(pool) => {
|
||||
pool.pause();
|
||||
|
||||
let mut res = BytesMut::new();
|
||||
|
||||
res.put(command_complete(&format!("PAUSE {},{}", database, user)));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, &res).await
|
||||
}
|
||||
|
||||
None => {
|
||||
error_response(
|
||||
stream,
|
||||
&format!(
|
||||
"No pool configured for database: {}, user: {}",
|
||||
database, user
|
||||
),
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => error_response(stream, "usage: PAUSE [db, user]").await,
|
||||
}
|
||||
}
|
||||
|
||||
/// Resume a pool. Queries are allowed again.
|
||||
async fn resume<T>(stream: &mut T, tokens: Vec<&str>) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let parts: Vec<&str> = match tokens.len() == 2 {
|
||||
true => tokens[1].split(',').map(|part| part.trim()).collect(),
|
||||
false => Vec::new(),
|
||||
};
|
||||
|
||||
match parts.len() {
|
||||
0 => {
|
||||
for (_, pool) in get_all_pools() {
|
||||
pool.resume();
|
||||
}
|
||||
|
||||
let mut res = BytesMut::new();
|
||||
|
||||
res.put(command_complete("RESUME"));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, &res).await
|
||||
}
|
||||
2 => {
|
||||
let database = parts[0];
|
||||
let user = parts[1];
|
||||
|
||||
match get_pool(database, user) {
|
||||
Some(pool) => {
|
||||
pool.resume();
|
||||
|
||||
let mut res = BytesMut::new();
|
||||
|
||||
res.put(command_complete(&format!("RESUME {},{}", database, user)));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, &res).await
|
||||
}
|
||||
|
||||
None => {
|
||||
error_response(
|
||||
stream,
|
||||
&format!(
|
||||
"No pool configured for database: {}, user: {}",
|
||||
database, user
|
||||
),
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => error_response(stream, "usage: RESUME [db, user]").await,
|
||||
}
|
||||
}
|
||||
|
||||
/// Send response packets for shutdown.
|
||||
async fn shutdown<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let mut res = BytesMut::new();
|
||||
|
||||
res.put(row_description(&vec![("success", DataType::Text)]));
|
||||
|
||||
let mut shutdown_success = "t";
|
||||
|
||||
let pid = std::process::id();
|
||||
if signal::kill(Pid::from_raw(pid.try_into().unwrap()), Signal::SIGINT).is_err() {
|
||||
error!("Unable to send SIGINT to PID: {}", pid);
|
||||
shutdown_success = "f";
|
||||
}
|
||||
|
||||
res.put(data_row(&vec![shutdown_success.to_string()]));
|
||||
|
||||
res.put(command_complete("SHUTDOWN"));
|
||||
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, &res).await
|
||||
}
|
||||
|
||||
/// Show Users.
|
||||
async fn show_users<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let mut res = BytesMut::new();
|
||||
|
||||
res.put(row_description(&vec![
|
||||
("name", DataType::Text),
|
||||
("pool_mode", DataType::Text),
|
||||
]));
|
||||
|
||||
for (user_pool, pool) in get_all_pools() {
|
||||
let pool_config = &pool.settings;
|
||||
res.put(data_row(&vec![
|
||||
user_pool.user.clone(),
|
||||
pool_config.pool_mode.to_string(),
|
||||
]));
|
||||
}
|
||||
|
||||
res.put(command_complete("SHOW"));
|
||||
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, &res).await
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
use crate::config::AuthType;
|
||||
use crate::errors::Error;
|
||||
use crate::pool::ConnectionPool;
|
||||
use crate::server::Server;
|
||||
use log::debug;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct AuthPassthrough {
|
||||
password: String,
|
||||
query: String,
|
||||
user: String,
|
||||
}
|
||||
|
||||
impl AuthPassthrough {
|
||||
/// Initializes an AuthPassthrough.
|
||||
pub fn new(query: &str, user: &str, password: &str) -> Self {
|
||||
AuthPassthrough {
|
||||
password: password.to_string(),
|
||||
query: query.to_string(),
|
||||
user: user.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an AuthPassthrough given the pool configuration.
|
||||
/// If any of required values is not set, None is returned.
|
||||
pub fn from_pool_config(pool_config: &crate::config::Pool) -> Option<Self> {
|
||||
if pool_config.is_auth_query_configured() {
|
||||
return Some(AuthPassthrough::new(
|
||||
pool_config.auth_query.as_ref().unwrap(),
|
||||
pool_config.auth_query_user.as_ref().unwrap(),
|
||||
pool_config.auth_query_password.as_ref().unwrap(),
|
||||
));
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Returns an AuthPassthrough given the pool settings.
|
||||
/// If any of required values is not set, None is returned.
|
||||
pub fn from_pool_settings(pool_settings: &crate::pool::PoolSettings) -> Option<Self> {
|
||||
let pool_config = crate::config::Pool {
|
||||
auth_query: pool_settings.auth_query.clone(),
|
||||
auth_query_password: pool_settings.auth_query_password.clone(),
|
||||
auth_query_user: pool_settings.auth_query_user.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
AuthPassthrough::from_pool_config(&pool_config)
|
||||
}
|
||||
|
||||
/// Connects to server and executes auth_query for the specified address.
|
||||
/// If the response is a row with two columns containing the username set in the address.
|
||||
/// and its MD5 hash, the MD5 hash returned.
|
||||
///
|
||||
/// Note that the query is executed, changing $1 with the name of the user
|
||||
/// this is so we only hold in memory (and transfer) the least amount of 'sensitive' data.
|
||||
/// Also, it is compatible with pgbouncer.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `address` - An Address of the server we want to connect to. The username for the hash will be obtained from this value.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use pgcat::auth_passthrough::AuthPassthrough;
|
||||
/// use pgcat::config::Address;
|
||||
/// let auth_passthrough = AuthPassthrough::new("SELECT * FROM public.user_lookup('$1');", "postgres", "postgres");
|
||||
/// auth_passthrough.fetch_hash(&Address::default());
|
||||
/// ```
|
||||
///
|
||||
pub async fn fetch_hash(&self, address: &crate::config::Address) -> Result<String, Error> {
|
||||
let auth_user = crate::config::User {
|
||||
username: self.user.clone(),
|
||||
auth_type: AuthType::MD5,
|
||||
password: Some(self.password.clone()),
|
||||
server_username: None,
|
||||
server_password: None,
|
||||
pool_size: 1,
|
||||
statement_timeout: 0,
|
||||
pool_mode: None,
|
||||
server_lifetime: None,
|
||||
min_pool_size: None,
|
||||
connect_timeout: None,
|
||||
idle_timeout: None,
|
||||
};
|
||||
|
||||
let user = &address.username;
|
||||
|
||||
debug!("Connecting to server to obtain auth hashes");
|
||||
|
||||
let auth_query = self.query.replace("$1", user);
|
||||
|
||||
match Server::exec_simple_query(address, &auth_user, &auth_query).await {
|
||||
Ok(password_data) => {
|
||||
if password_data.len() == 2 && password_data.first().unwrap() == user {
|
||||
if let Some(stripped_hash) = password_data
|
||||
.last()
|
||||
.unwrap()
|
||||
.to_string()
|
||||
.strip_prefix("md5") {
|
||||
Ok(stripped_hash.to_string())
|
||||
}
|
||||
else {
|
||||
Err(Error::AuthPassthroughError(
|
||||
"Obtained hash from auth_query does not seem to be in md5 format.".to_string(),
|
||||
))
|
||||
}
|
||||
} else {
|
||||
Err(Error::AuthPassthroughError(
|
||||
"Data obtained from query does not follow the scheme 'user','hash'."
|
||||
.to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
Err(Error::AuthPassthroughError(
|
||||
format!("Error trying to obtain password from auth_query, ignoring hash for user '{}'. Error: {:?}",
|
||||
user, err))
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn refetch_auth_hash(pool: &ConnectionPool) -> Result<String, Error> {
|
||||
let address = pool.address(0, 0);
|
||||
if let Some(apt) = AuthPassthrough::from_pool_settings(&pool.settings) {
|
||||
let hash = apt.fetch_hash(address).await?;
|
||||
|
||||
return Ok(hash);
|
||||
}
|
||||
|
||||
Err(Error::ClientError(format!(
|
||||
"Could not obtain hash for {{ username: {:?}, database: {:?} }}. Auth passthrough not enabled.",
|
||||
address.username, address.database
|
||||
)))
|
||||
}
|
||||
2079
src/client.rs
2079
src/client.rs
File diff suppressed because it is too large
Load Diff
@@ -1,36 +0,0 @@
|
||||
use clap::{Parser, ValueEnum};
|
||||
use tracing::Level;
|
||||
|
||||
/// PgCat: Nextgen PostgreSQL Pooler
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(author, version, about, long_about = None)]
|
||||
pub struct Args {
|
||||
#[arg(default_value_t = String::from("pgcat.toml"), env)]
|
||||
pub config_file: String,
|
||||
|
||||
#[arg(short, long, default_value_t = tracing::Level::INFO, env)]
|
||||
pub log_level: Level,
|
||||
|
||||
#[clap(short='F', long, value_enum, default_value_t=LogFormat::Text, env)]
|
||||
pub log_format: LogFormat,
|
||||
|
||||
#[arg(
|
||||
short,
|
||||
long,
|
||||
default_value_t = false,
|
||||
env,
|
||||
help = "disable colors in the log output"
|
||||
)]
|
||||
pub no_color: bool,
|
||||
}
|
||||
|
||||
pub fn parse() -> Args {
|
||||
Args::parse()
|
||||
}
|
||||
|
||||
#[derive(ValueEnum, Clone, Debug)]
|
||||
pub enum LogFormat {
|
||||
Text,
|
||||
Structured,
|
||||
Debug,
|
||||
}
|
||||
1607
src/config.rs
1607
src/config.rs
File diff suppressed because it is too large
Load Diff
@@ -1,33 +0,0 @@
|
||||
/// Various protocol constants, as defined in
|
||||
/// <https://www.postgresql.org/docs/12/protocol-message-formats.html>
|
||||
/// and elsewhere in the source code.
|
||||
|
||||
// Used in the StartupMessage to indicate regular handshake.
|
||||
pub const PROTOCOL_VERSION_NUMBER: i32 = 196608;
|
||||
|
||||
// SSLRequest: used to indicate we want an SSL connection.
|
||||
pub const SSL_REQUEST_CODE: i32 = 80877103;
|
||||
|
||||
// CancelRequest: the cancel request code.
|
||||
pub const CANCEL_REQUEST_CODE: i32 = 80877102;
|
||||
|
||||
// AuthenticationMD5Password
|
||||
pub const MD5_ENCRYPTED_PASSWORD: i32 = 5;
|
||||
|
||||
// SASL
|
||||
pub const SASL: i32 = 10;
|
||||
pub const SASL_CONTINUE: i32 = 11;
|
||||
pub const SASL_FINAL: i32 = 12;
|
||||
pub const SCRAM_SHA_256: &str = "SCRAM-SHA-256";
|
||||
pub const NONCE_LENGTH: usize = 24;
|
||||
|
||||
// AuthenticationOk
|
||||
pub const AUTHENTICATION_SUCCESSFUL: i32 = 0;
|
||||
|
||||
// ErrorResponse: A code identifying the field type; if zero, this is the message terminator and no string follows.
|
||||
pub const MESSAGE_TERMINATOR: u8 = 0;
|
||||
|
||||
//
|
||||
// Data types
|
||||
//
|
||||
pub const _OID_INT8: i32 = 20; // bigint
|
||||
410
src/dns_cache.rs
410
src/dns_cache.rs
@@ -1,410 +0,0 @@
|
||||
use crate::config::get_config;
|
||||
use crate::errors::Error;
|
||||
use arc_swap::ArcSwap;
|
||||
use log::{debug, error, info, warn};
|
||||
use once_cell::sync::Lazy;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::io;
|
||||
use std::net::IpAddr;
|
||||
use std::sync::Arc;
|
||||
use std::sync::RwLock;
|
||||
use tokio::time::{sleep, Duration};
|
||||
use trust_dns_resolver::error::{ResolveError, ResolveResult};
|
||||
use trust_dns_resolver::lookup_ip::LookupIp;
|
||||
use trust_dns_resolver::TokioAsyncResolver;
|
||||
|
||||
/// Cached Resolver Globally available
|
||||
pub static CACHED_RESOLVER: Lazy<ArcSwap<CachedResolver>> =
|
||||
Lazy::new(|| ArcSwap::from_pointee(CachedResolver::default()));
|
||||
|
||||
// Ip addressed are returned as a set of addresses
|
||||
// so we can compare.
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct AddrSet {
|
||||
set: HashSet<IpAddr>,
|
||||
}
|
||||
|
||||
impl AddrSet {
|
||||
fn new() -> AddrSet {
|
||||
AddrSet {
|
||||
set: HashSet::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<LookupIp> for AddrSet {
|
||||
fn from(lookup_ip: LookupIp) -> Self {
|
||||
let mut addr_set = AddrSet::new();
|
||||
for address in lookup_ip.iter() {
|
||||
addr_set.set.insert(address);
|
||||
}
|
||||
addr_set
|
||||
}
|
||||
}
|
||||
|
||||
///
|
||||
/// A CachedResolver is a DNS resolution cache mechanism with customizable expiration time.
|
||||
///
|
||||
/// The system works as follows:
|
||||
///
|
||||
/// When a host is to be resolved, if we have not resolved it before, a new resolution is
|
||||
/// executed and stored in the internal cache. Concurrently, every `dns_max_ttl` time, the
|
||||
/// cache is refreshed.
|
||||
///
|
||||
/// # Example:
|
||||
///
|
||||
/// ```
|
||||
/// use pgcat::dns_cache::{CachedResolverConfig, CachedResolver};
|
||||
///
|
||||
/// # tokio_test::block_on(async {
|
||||
/// let config = CachedResolverConfig::default();
|
||||
/// let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||
/// let addrset = resolver.lookup_ip("www.example.com.").await.unwrap();
|
||||
/// # })
|
||||
/// ```
|
||||
///
|
||||
/// // Now the ip resolution is stored in local cache and subsequent
|
||||
/// // calls will be returned from cache. Also, the cache is refreshed
|
||||
/// // and updated every 10 seconds.
|
||||
///
|
||||
/// // You can now check if an 'old' lookup differs from what it's currently
|
||||
/// // store in cache by using `has_changed`.
|
||||
/// resolver.has_changed("www.example.com.", addrset)
|
||||
#[derive(Default)]
|
||||
pub struct CachedResolver {
|
||||
// The configuration of the cached_resolver.
|
||||
config: CachedResolverConfig,
|
||||
|
||||
// This is the hash that contains the hash.
|
||||
data: Option<RwLock<HashMap<String, AddrSet>>>,
|
||||
|
||||
// The resolver to be used for DNS queries.
|
||||
resolver: Option<TokioAsyncResolver>,
|
||||
|
||||
// The RefreshLoop
|
||||
refresh_loop: RwLock<Option<tokio::task::JoinHandle<()>>>,
|
||||
}
|
||||
|
||||
///
|
||||
/// Configuration
|
||||
#[derive(Clone, Debug, Default, PartialEq)]
|
||||
pub struct CachedResolverConfig {
|
||||
/// Amount of time in secods that a resolved dns address is considered stale.
|
||||
dns_max_ttl: u64,
|
||||
|
||||
/// Enabled or disabled? (this is so we can reload config)
|
||||
enabled: bool,
|
||||
}
|
||||
|
||||
impl CachedResolverConfig {
|
||||
fn new(dns_max_ttl: u64, enabled: bool) -> Self {
|
||||
CachedResolverConfig {
|
||||
dns_max_ttl,
|
||||
enabled,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<crate::config::Config> for CachedResolverConfig {
|
||||
fn from(config: crate::config::Config) -> Self {
|
||||
CachedResolverConfig::new(config.general.dns_max_ttl, config.general.dns_cache_enabled)
|
||||
}
|
||||
}
|
||||
|
||||
impl CachedResolver {
|
||||
///
|
||||
/// Returns a new Arc<CachedResolver> based on passed configuration.
|
||||
/// It also starts the loop that will refresh cache entries.
|
||||
///
|
||||
/// # Arguments:
|
||||
///
|
||||
/// * `config` - The `CachedResolverConfig` to be used to create the resolver.
|
||||
///
|
||||
/// # Example:
|
||||
///
|
||||
/// ```
|
||||
/// use pgcat::dns_cache::{CachedResolverConfig, CachedResolver};
|
||||
///
|
||||
/// # tokio_test::block_on(async {
|
||||
/// let config = CachedResolverConfig::default();
|
||||
/// let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||
/// # })
|
||||
/// ```
|
||||
///
|
||||
pub async fn new(
|
||||
config: CachedResolverConfig,
|
||||
data: Option<HashMap<String, AddrSet>>,
|
||||
) -> Result<Arc<Self>, io::Error> {
|
||||
// Construct a new Resolver with default configuration options
|
||||
let resolver = Some(TokioAsyncResolver::tokio_from_system_conf()?);
|
||||
|
||||
let data = if let Some(hash) = data {
|
||||
Some(RwLock::new(hash))
|
||||
} else {
|
||||
Some(RwLock::new(HashMap::new()))
|
||||
};
|
||||
|
||||
let instance = Arc::new(Self {
|
||||
config,
|
||||
resolver,
|
||||
data,
|
||||
refresh_loop: RwLock::new(None),
|
||||
});
|
||||
|
||||
if instance.enabled() {
|
||||
info!("Scheduling DNS refresh loop");
|
||||
let refresh_loop = tokio::task::spawn({
|
||||
let instance = instance.clone();
|
||||
async move {
|
||||
instance.refresh_dns_entries_loop().await;
|
||||
}
|
||||
});
|
||||
*(instance.refresh_loop.write().unwrap()) = Some(refresh_loop);
|
||||
}
|
||||
|
||||
Ok(instance)
|
||||
}
|
||||
|
||||
pub fn enabled(&self) -> bool {
|
||||
self.config.enabled
|
||||
}
|
||||
|
||||
// Schedules the refresher
|
||||
async fn refresh_dns_entries_loop(&self) {
|
||||
let resolver = TokioAsyncResolver::tokio_from_system_conf().unwrap();
|
||||
let interval = Duration::from_secs(self.config.dns_max_ttl);
|
||||
loop {
|
||||
debug!("Begin refreshing cached DNS addresses.");
|
||||
// To minimize the time we hold the lock, we first create
|
||||
// an array with keys.
|
||||
let mut hostnames: Vec<String> = Vec::new();
|
||||
{
|
||||
if let Some(ref data) = self.data {
|
||||
for hostname in data.read().unwrap().keys() {
|
||||
hostnames.push(hostname.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for hostname in hostnames.iter() {
|
||||
let addrset = self
|
||||
.fetch_from_cache(hostname.as_str())
|
||||
.expect("Could not obtain expected address from cache, this should not happen");
|
||||
|
||||
match resolver.lookup_ip(hostname).await {
|
||||
Ok(lookup_ip) => {
|
||||
let new_addrset = AddrSet::from(lookup_ip);
|
||||
debug!(
|
||||
"Obtained address for host ({}) -> ({:?})",
|
||||
hostname, new_addrset
|
||||
);
|
||||
|
||||
if addrset != new_addrset {
|
||||
debug!(
|
||||
"Addr changed from {:?} to {:?} updating cache.",
|
||||
addrset, new_addrset
|
||||
);
|
||||
self.store_in_cache(hostname, new_addrset);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
error!(
|
||||
"There was an error trying to resolv {}: ({}).",
|
||||
hostname, err
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
debug!("Finished refreshing cached DNS addresses.");
|
||||
sleep(interval).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a `AddrSet` given the specified hostname.
|
||||
///
|
||||
/// This method first tries to fetch the value from the cache, if it misses
|
||||
/// then it is resolved and stored in the cache. TTL from records is ignored.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `host` - A string slice referencing the hostname to be resolved.
|
||||
///
|
||||
/// # Example:
|
||||
///
|
||||
/// ```
|
||||
/// use pgcat::dns_cache::{CachedResolverConfig, CachedResolver};
|
||||
///
|
||||
/// # tokio_test::block_on(async {
|
||||
/// let config = CachedResolverConfig::default();
|
||||
/// let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||
/// let response = resolver.lookup_ip("www.google.com.");
|
||||
/// # })
|
||||
/// ```
|
||||
///
|
||||
pub async fn lookup_ip(&self, host: &str) -> ResolveResult<AddrSet> {
|
||||
debug!("Lookup up {} in cache", host);
|
||||
match self.fetch_from_cache(host) {
|
||||
Some(addr_set) => {
|
||||
debug!("Cache hit!");
|
||||
Ok(addr_set)
|
||||
}
|
||||
None => {
|
||||
debug!("Not found, executing a dns query!");
|
||||
if let Some(ref resolver) = self.resolver {
|
||||
let addr_set = AddrSet::from(resolver.lookup_ip(host).await?);
|
||||
debug!("Obtained: {:?}", addr_set);
|
||||
self.store_in_cache(host, addr_set.clone());
|
||||
Ok(addr_set)
|
||||
} else {
|
||||
Err(ResolveError::from("No resolver available"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Returns true if the stored host resolution differs from the AddrSet passed.
|
||||
pub fn has_changed(&self, host: &str, addr_set: &AddrSet) -> bool {
|
||||
if let Some(fetched_addr_set) = self.fetch_from_cache(host) {
|
||||
return fetched_addr_set != *addr_set;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
// Fetches an AddrSet from the inner cache adquiring the read lock.
|
||||
fn fetch_from_cache(&self, key: &str) -> Option<AddrSet> {
|
||||
if let Some(ref hash) = self.data {
|
||||
if let Some(addr_set) = hash.read().unwrap().get(key) {
|
||||
return Some(addr_set.clone());
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
// Sets up the global CACHED_RESOLVER static variable so we can globally use DNS
|
||||
// cache.
|
||||
pub async fn from_config() -> Result<(), Error> {
|
||||
let cached_resolver = CACHED_RESOLVER.load();
|
||||
let desired_config = CachedResolverConfig::from(get_config());
|
||||
|
||||
if cached_resolver.config != desired_config {
|
||||
if let Some(ref refresh_loop) = *(cached_resolver.refresh_loop.write().unwrap()) {
|
||||
warn!("Killing Dnscache refresh loop as its configuration is being reloaded");
|
||||
refresh_loop.abort()
|
||||
}
|
||||
let new_resolver = if let Some(ref data) = cached_resolver.data {
|
||||
let data = Some(data.read().unwrap().clone());
|
||||
CachedResolver::new(desired_config, data).await
|
||||
} else {
|
||||
CachedResolver::new(desired_config, None).await
|
||||
};
|
||||
|
||||
match new_resolver {
|
||||
Ok(ok) => {
|
||||
CACHED_RESOLVER.store(ok);
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => {
|
||||
let message = format!("Error setting up cached_resolver. Error: {:?}, will continue without this feature.", err);
|
||||
Err(Error::DNSCachedError(message))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Stores the AddrSet in cache adquiring the write lock.
|
||||
fn store_in_cache(&self, host: &str, addr_set: AddrSet) {
|
||||
if let Some(ref data) = self.data {
|
||||
data.write().unwrap().insert(host.to_string(), addr_set);
|
||||
} else {
|
||||
error!("Could not insert, Hash not initialized");
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use trust_dns_resolver::error::ResolveError;
|
||||
|
||||
#[tokio::test]
|
||||
async fn new() {
|
||||
let config = CachedResolverConfig {
|
||||
dns_max_ttl: 10,
|
||||
enabled: true,
|
||||
};
|
||||
let resolver = CachedResolver::new(config, None).await;
|
||||
assert!(resolver.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn lookup_ip() {
|
||||
let config = CachedResolverConfig {
|
||||
dns_max_ttl: 10,
|
||||
enabled: true,
|
||||
};
|
||||
let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||
let response = resolver.lookup_ip("www.google.com.").await;
|
||||
assert!(response.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn has_changed() {
|
||||
let config = CachedResolverConfig {
|
||||
dns_max_ttl: 10,
|
||||
enabled: true,
|
||||
};
|
||||
let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||
let hostname = "www.google.com.";
|
||||
let response = resolver.lookup_ip(hostname).await;
|
||||
let addr_set = response.unwrap();
|
||||
assert!(!resolver.has_changed(hostname, &addr_set));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn unknown_host() {
|
||||
let config = CachedResolverConfig {
|
||||
dns_max_ttl: 10,
|
||||
enabled: true,
|
||||
};
|
||||
let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||
let hostname = "www.idontexists.";
|
||||
let response = resolver.lookup_ip(hostname).await;
|
||||
assert!(matches!(response, Err(ResolveError { .. })));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn incorrect_address() {
|
||||
let config = CachedResolverConfig {
|
||||
dns_max_ttl: 10,
|
||||
enabled: true,
|
||||
};
|
||||
let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||
let hostname = "w ww.idontexists.";
|
||||
let response = resolver.lookup_ip(hostname).await;
|
||||
assert!(matches!(response, Err(ResolveError { .. })));
|
||||
assert!(!resolver.has_changed(hostname, &AddrSet::new()));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
// Ok, this test is based on the fact that google does DNS RR
|
||||
// and does not responds with every available ip everytime, so
|
||||
// if I cache here, it will miss after one cache iteration or two.
|
||||
async fn thread() {
|
||||
let config = CachedResolverConfig {
|
||||
dns_max_ttl: 10,
|
||||
enabled: true,
|
||||
};
|
||||
let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||
let hostname = "www.google.com.";
|
||||
let response = resolver.lookup_ip(hostname).await;
|
||||
let addr_set = response.unwrap();
|
||||
assert!(!resolver.has_changed(hostname, &addr_set));
|
||||
let resolver_for_refresher = resolver.clone();
|
||||
let _thread_handle = tokio::task::spawn(async move {
|
||||
resolver_for_refresher.refresh_dns_entries_loop().await;
|
||||
});
|
||||
assert!(!resolver.has_changed(hostname, &addr_set));
|
||||
}
|
||||
}
|
||||
133
src/errors.rs
133
src/errors.rs
@@ -1,133 +0,0 @@
|
||||
//! Errors.
|
||||
|
||||
/// Various errors.
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub enum Error {
|
||||
SocketError(String),
|
||||
ClientSocketError(String, ClientIdentifier),
|
||||
ClientGeneralError(String, ClientIdentifier),
|
||||
ClientAuthImpossible(String),
|
||||
ClientAuthPassthroughError(String, ClientIdentifier),
|
||||
ClientBadStartup,
|
||||
ProtocolSyncError(String),
|
||||
BadQuery(String),
|
||||
ServerError,
|
||||
ServerMessageParserError(String),
|
||||
ServerStartupError(String, ServerIdentifier),
|
||||
ServerAuthError(String, ServerIdentifier),
|
||||
BadConfig,
|
||||
AllServersDown,
|
||||
ClientError(String),
|
||||
TlsError,
|
||||
StatementTimeout,
|
||||
DNSCachedError(String),
|
||||
ShuttingDown,
|
||||
ParseBytesError(String),
|
||||
AuthError(String),
|
||||
AuthPassthroughError(String),
|
||||
UnsupportedStatement,
|
||||
QueryRouterParserError(String),
|
||||
QueryRouterError(String),
|
||||
InvalidShardId(usize),
|
||||
PreparedStatementError,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct ClientIdentifier {
|
||||
pub application_name: String,
|
||||
pub username: String,
|
||||
pub pool_name: String,
|
||||
}
|
||||
|
||||
impl ClientIdentifier {
|
||||
pub fn new(application_name: &str, username: &str, pool_name: &str) -> ClientIdentifier {
|
||||
ClientIdentifier {
|
||||
application_name: application_name.into(),
|
||||
username: username.into(),
|
||||
pool_name: pool_name.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ClientIdentifier {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{{ application_name: {}, username: {}, pool_name: {} }}",
|
||||
self.application_name, self.username, self.pool_name
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct ServerIdentifier {
|
||||
pub username: String,
|
||||
pub database: String,
|
||||
}
|
||||
|
||||
impl ServerIdentifier {
|
||||
pub fn new(username: &str, database: &str) -> ServerIdentifier {
|
||||
ServerIdentifier {
|
||||
username: username.into(),
|
||||
database: database.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ServerIdentifier {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"{{ username: {}, database: {} }}",
|
||||
self.username, self.database
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
match &self {
|
||||
&Error::ClientSocketError(error, client_identifier) => write!(
|
||||
f,
|
||||
"Error reading {} from client {}",
|
||||
error, client_identifier
|
||||
),
|
||||
&Error::ClientGeneralError(error, client_identifier) => {
|
||||
write!(f, "{} {}", error, client_identifier)
|
||||
}
|
||||
&Error::ClientAuthImpossible(username) => write!(
|
||||
f,
|
||||
"Client auth not possible, \
|
||||
no cleartext password set for username: {} \
|
||||
in config and auth passthrough (query_auth) \
|
||||
is not set up.",
|
||||
username
|
||||
),
|
||||
&Error::ClientAuthPassthroughError(error, client_identifier) => write!(
|
||||
f,
|
||||
"No cleartext password set, \
|
||||
and no auth passthrough could not \
|
||||
obtain the hash from server for {}, \
|
||||
the error was: {}",
|
||||
client_identifier, error
|
||||
),
|
||||
&Error::ServerStartupError(error, server_identifier) => write!(
|
||||
f,
|
||||
"Error reading {} on server startup {}",
|
||||
error, server_identifier,
|
||||
),
|
||||
&Error::ServerAuthError(error, server_identifier) => {
|
||||
write!(f, "{} for {}", error, server_identifier,)
|
||||
}
|
||||
|
||||
// The rest can use Debug.
|
||||
err => write!(f, "{:?}", err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<std::ffi::NulError> for Error {
|
||||
fn from(err: std::ffi::NulError) -> Self {
|
||||
Error::QueryRouterError(err.to_string())
|
||||
}
|
||||
}
|
||||
42
src/lib.rs
42
src/lib.rs
@@ -1,42 +0,0 @@
|
||||
pub mod admin;
|
||||
pub mod auth_passthrough;
|
||||
pub mod client;
|
||||
pub mod cmd_args;
|
||||
pub mod config;
|
||||
pub mod constants;
|
||||
pub mod dns_cache;
|
||||
pub mod errors;
|
||||
pub mod logger;
|
||||
pub mod messages;
|
||||
pub mod mirrors;
|
||||
pub mod plugins;
|
||||
pub mod pool;
|
||||
pub mod prometheus;
|
||||
pub mod query_router;
|
||||
pub mod scram;
|
||||
pub mod server;
|
||||
pub mod sharding;
|
||||
pub mod stats;
|
||||
pub mod tls;
|
||||
|
||||
/// Format chrono::Duration to be more human-friendly.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `duration` - A duration of time
|
||||
pub fn format_duration(duration: &chrono::Duration) -> String {
|
||||
let milliseconds = format!("{:0>3}", duration.num_milliseconds() % 1000);
|
||||
|
||||
let seconds = format!("{:0>2}", duration.num_seconds() % 60);
|
||||
|
||||
let minutes = format!("{:0>2}", duration.num_minutes() % 60);
|
||||
|
||||
let hours = format!("{:0>2}", duration.num_hours() % 24);
|
||||
|
||||
let days = duration.num_days().to_string();
|
||||
|
||||
format!(
|
||||
"{}d {}:{}:{}.{}",
|
||||
days, hours, minutes, seconds, milliseconds
|
||||
)
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
use crate::cmd_args::{Args, LogFormat};
|
||||
use tracing_subscriber;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
|
||||
pub fn init(args: &Args) {
|
||||
// Iniitalize a default filter, and then override the builtin default "warning" with our
|
||||
// commandline, (default: "info")
|
||||
let filter = EnvFilter::from_default_env().add_directive(args.log_level.into());
|
||||
|
||||
let trace_sub = tracing_subscriber::fmt()
|
||||
.with_thread_ids(true)
|
||||
.with_env_filter(filter)
|
||||
.with_ansi(!args.no_color);
|
||||
|
||||
match args.log_format {
|
||||
LogFormat::Structured => trace_sub.json().init(),
|
||||
LogFormat::Debug => trace_sub.pretty().init(),
|
||||
_ => trace_sub.init(),
|
||||
};
|
||||
}
|
||||
340
src/main.rs
340
src/main.rs
@@ -1,340 +0,0 @@
|
||||
// Copyright (c) 2022 Lev Kokotov <hi@levthe.dev>
|
||||
|
||||
// Permission is hereby granted, free of charge, to any person obtaining
|
||||
// a copy of this software and associated documentation files (the
|
||||
// "Software"), to deal in the Software without restriction, including
|
||||
// without limitation the rights to use, copy, modify, merge, publish,
|
||||
// distribute, sublicense, and/or sell copies of the Software, and to
|
||||
// permit persons to whom the Software is furnished to do so, subject to
|
||||
// the following conditions:
|
||||
|
||||
// The above copyright notice and this permission notice shall be
|
||||
// included in all copies or substantial portions of the Software.
|
||||
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
extern crate arc_swap;
|
||||
extern crate async_trait;
|
||||
extern crate bb8;
|
||||
extern crate bytes;
|
||||
extern crate exitcode;
|
||||
extern crate log;
|
||||
extern crate md5;
|
||||
extern crate num_cpus;
|
||||
extern crate once_cell;
|
||||
extern crate rustls_pemfile;
|
||||
extern crate serde;
|
||||
extern crate serde_derive;
|
||||
extern crate sqlparser;
|
||||
extern crate tokio;
|
||||
extern crate tokio_rustls;
|
||||
extern crate toml;
|
||||
extern crate trust_dns_resolver;
|
||||
|
||||
#[cfg(not(target_env = "msvc"))]
|
||||
use jemallocator::Jemalloc;
|
||||
|
||||
#[cfg(not(target_env = "msvc"))]
|
||||
#[global_allocator]
|
||||
static GLOBAL: Jemalloc = Jemalloc;
|
||||
|
||||
use log::{debug, error, info, warn};
|
||||
use parking_lot::Mutex;
|
||||
use pgcat::format_duration;
|
||||
use tokio::net::TcpListener;
|
||||
#[cfg(not(windows))]
|
||||
use tokio::signal::unix::{signal as unix_signal, SignalKind};
|
||||
#[cfg(windows)]
|
||||
use tokio::signal::windows as win_signal;
|
||||
use tokio::{runtime::Builder, sync::mpsc};
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::net::SocketAddr;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::broadcast;
|
||||
|
||||
use pgcat::cmd_args;
|
||||
use pgcat::config::{get_config, reload_config, VERSION};
|
||||
use pgcat::dns_cache;
|
||||
use pgcat::logger;
|
||||
use pgcat::messages::configure_socket;
|
||||
use pgcat::pool::{ClientServerMap, ConnectionPool};
|
||||
use pgcat::prometheus::start_metric_server;
|
||||
use pgcat::stats::{Collector, Reporter, REPORTER};
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let args = cmd_args::parse();
|
||||
logger::init(&args);
|
||||
|
||||
info!("Welcome to PgCat! Meow. (Version {})", VERSION);
|
||||
|
||||
if !pgcat::query_router::QueryRouter::setup() {
|
||||
error!("Could not setup query router");
|
||||
std::process::exit(exitcode::CONFIG);
|
||||
}
|
||||
|
||||
// Create a transient runtime for loading the config for the first time.
|
||||
{
|
||||
let runtime = Builder::new_multi_thread().worker_threads(1).build()?;
|
||||
|
||||
runtime.block_on(async {
|
||||
match pgcat::config::parse(args.config_file.as_str()).await {
|
||||
Ok(_) => (),
|
||||
Err(err) => {
|
||||
error!("Config parse error: {:?}", err);
|
||||
std::process::exit(exitcode::CONFIG);
|
||||
}
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
let config = get_config();
|
||||
|
||||
// Create the runtime now we know required worker_threads.
|
||||
let runtime = Builder::new_multi_thread()
|
||||
.worker_threads(config.general.worker_threads)
|
||||
.enable_all()
|
||||
.build()?;
|
||||
|
||||
runtime.block_on(async move {
|
||||
|
||||
if let Some(true) = config.general.enable_prometheus_exporter {
|
||||
let http_addr_str = format!(
|
||||
"{}:{}",
|
||||
config.general.host, config.general.prometheus_exporter_port
|
||||
);
|
||||
|
||||
let http_addr = match SocketAddr::from_str(&http_addr_str) {
|
||||
Ok(addr) => addr,
|
||||
Err(err) => {
|
||||
error!("Invalid http address: {}", err);
|
||||
std::process::exit(exitcode::CONFIG);
|
||||
}
|
||||
};
|
||||
|
||||
tokio::task::spawn(async move {
|
||||
start_metric_server(http_addr).await;
|
||||
});
|
||||
}
|
||||
|
||||
let addr = format!("{}:{}", config.general.host, config.general.port);
|
||||
|
||||
let listener = match TcpListener::bind(&addr).await {
|
||||
Ok(sock) => sock,
|
||||
Err(err) => {
|
||||
error!("Listener socket error: {:?}", err);
|
||||
std::process::exit(exitcode::CONFIG);
|
||||
}
|
||||
};
|
||||
|
||||
info!("Running on {}", addr);
|
||||
|
||||
config.show();
|
||||
|
||||
// Tracks which client is connected to which server for query cancellation.
|
||||
let client_server_map: ClientServerMap = Arc::new(Mutex::new(HashMap::new()));
|
||||
|
||||
// Statistics reporting.
|
||||
REPORTER.store(Arc::new(Reporter::default()));
|
||||
|
||||
// Starts (if enabled) dns cache before pools initialization
|
||||
match dns_cache::CachedResolver::from_config().await {
|
||||
Ok(_) => (),
|
||||
Err(err) => error!("DNS cache initialization error: {:?}", err),
|
||||
};
|
||||
|
||||
// Connection pool that allows to query all shards and replicas.
|
||||
match ConnectionPool::from_config(client_server_map.clone()).await {
|
||||
Ok(_) => (),
|
||||
Err(err) => {
|
||||
error!("Pool error: {:?}", err);
|
||||
std::process::exit(exitcode::CONFIG);
|
||||
}
|
||||
};
|
||||
|
||||
tokio::task::spawn(async move {
|
||||
let mut stats_collector = Collector::default();
|
||||
stats_collector.collect().await;
|
||||
});
|
||||
|
||||
info!("Config autoreloader: {}", match config.general.autoreload {
|
||||
Some(interval) => format!("{} ms", interval),
|
||||
None => "disabled".into(),
|
||||
});
|
||||
|
||||
if let Some(interval) = config.general.autoreload {
|
||||
let mut autoreload_interval = tokio::time::interval(tokio::time::Duration::from_millis(interval));
|
||||
let autoreload_client_server_map = client_server_map.clone();
|
||||
|
||||
tokio::task::spawn(async move {
|
||||
loop {
|
||||
autoreload_interval.tick().await;
|
||||
debug!("Automatically reloading config");
|
||||
|
||||
if let Ok(changed) = reload_config(autoreload_client_server_map.clone()).await {
|
||||
if changed {
|
||||
get_config().show()
|
||||
}
|
||||
};
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
|
||||
#[cfg(windows)]
|
||||
let mut term_signal = win_signal::ctrl_close().unwrap();
|
||||
#[cfg(windows)]
|
||||
let mut interrupt_signal = win_signal::ctrl_c().unwrap();
|
||||
#[cfg(windows)]
|
||||
let mut sighup_signal = win_signal::ctrl_shutdown().unwrap();
|
||||
|
||||
#[cfg(not(windows))]
|
||||
let mut term_signal = unix_signal(SignalKind::terminate()).unwrap();
|
||||
#[cfg(not(windows))]
|
||||
let mut interrupt_signal = unix_signal(SignalKind::interrupt()).unwrap();
|
||||
#[cfg(not(windows))]
|
||||
let mut sighup_signal = unix_signal(SignalKind::hangup()).unwrap();
|
||||
let (shutdown_tx, _) = broadcast::channel::<()>(1);
|
||||
let (drain_tx, mut drain_rx) = mpsc::channel::<i32>(2048);
|
||||
let (exit_tx, mut exit_rx) = mpsc::channel::<()>(1);
|
||||
let mut admin_only = false;
|
||||
let mut total_clients = 0;
|
||||
|
||||
info!("Waiting for clients");
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
// Reload config:
|
||||
// kill -SIGHUP $(pgrep pgcat)
|
||||
_ = sighup_signal.recv() => {
|
||||
info!("Reloading config");
|
||||
|
||||
_ = reload_config(client_server_map.clone()).await;
|
||||
|
||||
get_config().show();
|
||||
},
|
||||
|
||||
// Initiate graceful shutdown sequence on sig int
|
||||
_ = interrupt_signal.recv() => {
|
||||
info!("Got SIGINT");
|
||||
|
||||
// Don't want this to happen more than once
|
||||
if admin_only {
|
||||
continue;
|
||||
}
|
||||
|
||||
admin_only = true;
|
||||
|
||||
// Broadcast that client tasks need to finish
|
||||
let _ = shutdown_tx.send(());
|
||||
let exit_tx = exit_tx.clone();
|
||||
let _ = drain_tx.send(0).await;
|
||||
|
||||
tokio::task::spawn(async move {
|
||||
let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(config.general.shutdown_timeout));
|
||||
|
||||
// First tick fires immediately.
|
||||
interval.tick().await;
|
||||
|
||||
// Second one in the interval time.
|
||||
interval.tick().await;
|
||||
|
||||
// We're done waiting.
|
||||
error!("Graceful shutdown timed out. {} active clients being closed", total_clients);
|
||||
|
||||
let _ = exit_tx.send(()).await;
|
||||
});
|
||||
},
|
||||
|
||||
_ = term_signal.recv() => {
|
||||
info!("Got SIGTERM, closing with {} clients active", total_clients);
|
||||
break;
|
||||
},
|
||||
|
||||
new_client = listener.accept() => {
|
||||
let (socket, addr) = match new_client {
|
||||
Ok((socket, addr)) => (socket, addr),
|
||||
Err(err) => {
|
||||
error!("{:?}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let shutdown_rx = shutdown_tx.subscribe();
|
||||
let drain_tx = drain_tx.clone();
|
||||
let client_server_map = client_server_map.clone();
|
||||
|
||||
let tls_certificate = get_config().general.tls_certificate.clone();
|
||||
|
||||
configure_socket(&socket);
|
||||
|
||||
tokio::task::spawn(async move {
|
||||
let start = chrono::offset::Utc::now().naive_utc();
|
||||
|
||||
match pgcat::client::client_entrypoint(
|
||||
socket,
|
||||
client_server_map,
|
||||
shutdown_rx,
|
||||
drain_tx,
|
||||
admin_only,
|
||||
tls_certificate,
|
||||
config.general.log_client_connections,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(()) => {
|
||||
let duration = chrono::offset::Utc::now().naive_utc() - start;
|
||||
|
||||
if get_config().general.log_client_disconnections {
|
||||
info!(
|
||||
"Client {:?} disconnected, session duration: {}",
|
||||
addr,
|
||||
format_duration(&duration)
|
||||
);
|
||||
} else {
|
||||
debug!(
|
||||
"Client {:?} disconnected, session duration: {}",
|
||||
addr,
|
||||
format_duration(&duration)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Err(err) => {
|
||||
match err {
|
||||
pgcat::errors::Error::ClientBadStartup => debug!("Client disconnected with error {:?}", err),
|
||||
_ => warn!("Client disconnected with error {:?}", err),
|
||||
}
|
||||
|
||||
}
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
_ = exit_rx.recv() => {
|
||||
break;
|
||||
}
|
||||
|
||||
client_ping = drain_rx.recv() => {
|
||||
let client_ping = client_ping.unwrap();
|
||||
total_clients += client_ping;
|
||||
|
||||
if total_clients == 0 && admin_only {
|
||||
let _ = exit_tx.send(()).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("Shutting down...");
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
1548
src/messages.rs
1548
src/messages.rs
File diff suppressed because it is too large
Load Diff
190
src/mirrors.rs
190
src/mirrors.rs
@@ -1,190 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
/// A mirrored PostgreSQL client.
|
||||
/// Packets arrive to us through a channel from the main client and we send them to the server.
|
||||
use bb8::Pool;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use parking_lot::RwLock;
|
||||
|
||||
use crate::config::{get_config, Address, Role, User};
|
||||
use crate::pool::{ClientServerMap, ServerPool};
|
||||
use log::{error, info, trace, warn};
|
||||
use tokio::sync::mpsc::{channel, Receiver, Sender};
|
||||
|
||||
pub struct MirroredClient {
|
||||
address: Address,
|
||||
user: User,
|
||||
database: String,
|
||||
bytes_rx: Receiver<Bytes>,
|
||||
disconnect_rx: Receiver<()>,
|
||||
}
|
||||
|
||||
impl MirroredClient {
|
||||
async fn create_pool(&self) -> Pool<ServerPool> {
|
||||
let config = get_config();
|
||||
let default = std::time::Duration::from_millis(10_000).as_millis() as u64;
|
||||
let (connection_timeout, idle_timeout, _cfg, prepared_statement_cache_size) =
|
||||
match config.pools.get(&self.address.pool_name) {
|
||||
Some(cfg) => (
|
||||
cfg.connect_timeout.unwrap_or(default),
|
||||
cfg.idle_timeout.unwrap_or(default),
|
||||
cfg.clone(),
|
||||
cfg.prepared_statements_cache_size,
|
||||
),
|
||||
None => (default, default, crate::config::Pool::default(), 0),
|
||||
};
|
||||
|
||||
let manager = ServerPool::new(
|
||||
self.address.clone(),
|
||||
self.user.clone(),
|
||||
self.database.as_str(),
|
||||
ClientServerMap::default(),
|
||||
Arc::new(RwLock::new(None)),
|
||||
None,
|
||||
true,
|
||||
false,
|
||||
prepared_statement_cache_size,
|
||||
);
|
||||
|
||||
Pool::builder()
|
||||
.max_size(1)
|
||||
.connection_timeout(std::time::Duration::from_millis(connection_timeout))
|
||||
.idle_timeout(Some(std::time::Duration::from_millis(idle_timeout)))
|
||||
.test_on_check_out(false)
|
||||
.build(manager)
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub fn start(mut self) {
|
||||
tokio::spawn(async move {
|
||||
let pool = self.create_pool().await;
|
||||
let address = self.address.clone();
|
||||
loop {
|
||||
let mut server = match pool.get().await {
|
||||
Ok(server) => server,
|
||||
Err(err) => {
|
||||
error!(
|
||||
"Failed to get connection from pool, Discarding message {:?}, {:?}",
|
||||
err,
|
||||
address.clone()
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
tokio::select! {
|
||||
// Exit channel events
|
||||
_ = self.disconnect_rx.recv() => {
|
||||
info!("Got mirror exit signal, exiting {:?}", address.clone());
|
||||
break;
|
||||
}
|
||||
|
||||
// Incoming data from server (we read to clear the socket buffer and discard the data)
|
||||
recv_result = server.recv(None) => {
|
||||
match recv_result {
|
||||
Ok(message) => trace!("Received from mirror: {} {:?}", String::from_utf8_lossy(&message[..]), address.clone()),
|
||||
Err(err) => {
|
||||
server.mark_bad(
|
||||
format!("Failed to send to mirror, Discarding message {:?}, {:?}", err, address.clone()).as_str()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Messages to send to the server
|
||||
message = self.bytes_rx.recv() => {
|
||||
match message {
|
||||
Some(bytes) => {
|
||||
match server.send(&BytesMut::from(&bytes[..])).await {
|
||||
Ok(_) => trace!("Sent to mirror: {} {:?}", String::from_utf8_lossy(&bytes[..]), address.clone()),
|
||||
Err(err) => {
|
||||
server.mark_bad(
|
||||
format!("Failed to receive from mirror {:?} {:?}", err, address.clone()).as_str()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
info!("Mirror channel closed, exiting {:?}", address.clone());
|
||||
break;
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
pub struct MirroringManager {
|
||||
pub byte_senders: Vec<Sender<Bytes>>,
|
||||
pub disconnect_senders: Vec<Sender<()>>,
|
||||
}
|
||||
impl MirroringManager {
|
||||
pub fn from_addresses(
|
||||
user: User,
|
||||
database: String,
|
||||
addresses: Vec<Address>,
|
||||
) -> MirroringManager {
|
||||
let mut byte_senders: Vec<Sender<Bytes>> = vec![];
|
||||
let mut exit_senders: Vec<Sender<()>> = vec![];
|
||||
|
||||
addresses.iter().for_each(|mirror| {
|
||||
let (bytes_tx, bytes_rx) = channel::<Bytes>(10);
|
||||
let (exit_tx, exit_rx) = channel::<()>(1);
|
||||
let mut addr = mirror.clone();
|
||||
addr.role = Role::Mirror;
|
||||
let client = MirroredClient {
|
||||
user: user.clone(),
|
||||
database: database.to_owned(),
|
||||
address: addr,
|
||||
bytes_rx,
|
||||
disconnect_rx: exit_rx,
|
||||
};
|
||||
exit_senders.push(exit_tx);
|
||||
byte_senders.push(bytes_tx);
|
||||
client.start();
|
||||
});
|
||||
|
||||
Self {
|
||||
byte_senders,
|
||||
disconnect_senders: exit_senders,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send(&mut self, bytes: &BytesMut) {
|
||||
// We want to avoid performing an allocation if we won't be able to send the message
|
||||
// There is a possibility of a race here where we check the capacity and then the channel is
|
||||
// closed or the capacity is reduced to 0, but mirroring is best effort anyway
|
||||
if self
|
||||
.byte_senders
|
||||
.iter()
|
||||
.all(|sender| sender.capacity() == 0 || sender.is_closed())
|
||||
{
|
||||
return;
|
||||
}
|
||||
let immutable_bytes = bytes.clone().freeze();
|
||||
self.byte_senders.iter_mut().for_each(|sender| {
|
||||
match sender.try_send(immutable_bytes.clone()) {
|
||||
Ok(_) => {}
|
||||
Err(err) => {
|
||||
warn!("Failed to send bytes to a mirror channel {}", err);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
pub fn disconnect(&mut self) {
|
||||
self.disconnect_senders
|
||||
.iter_mut()
|
||||
.for_each(|sender| match sender.try_send(()) {
|
||||
Ok(_) => {}
|
||||
Err(err) => {
|
||||
warn!(
|
||||
"Failed to send disconnect signal to a mirror channel {}",
|
||||
err
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,120 +0,0 @@
|
||||
//! The intercept plugin.
|
||||
//!
|
||||
//! It intercepts queries and returns fake results.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use bytes::{BufMut, BytesMut};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlparser::ast::Statement;
|
||||
|
||||
use log::debug;
|
||||
|
||||
use crate::{
|
||||
config::Intercept as InterceptConfig,
|
||||
errors::Error,
|
||||
messages::{command_complete, data_row_nullable, row_description, DataType},
|
||||
plugins::{Plugin, PluginOutput},
|
||||
query_router::QueryRouter,
|
||||
};
|
||||
|
||||
// TODO: use these structs for deserialization
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct Rule {
|
||||
query: String,
|
||||
schema: Vec<Column>,
|
||||
result: Vec<Vec<String>>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct Column {
|
||||
name: String,
|
||||
data_type: String,
|
||||
}
|
||||
|
||||
/// The intercept plugin.
|
||||
pub struct Intercept<'a> {
|
||||
pub enabled: bool,
|
||||
pub config: &'a InterceptConfig,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<'a> Plugin for Intercept<'a> {
|
||||
async fn run(
|
||||
&mut self,
|
||||
query_router: &QueryRouter,
|
||||
ast: &Vec<Statement>,
|
||||
) -> Result<PluginOutput, Error> {
|
||||
if !self.enabled || ast.is_empty() {
|
||||
return Ok(PluginOutput::Allow);
|
||||
}
|
||||
|
||||
let mut config = self.config.clone();
|
||||
config.substitute(
|
||||
&query_router.pool_settings().db,
|
||||
&query_router.pool_settings().user.username,
|
||||
);
|
||||
|
||||
let mut result = BytesMut::new();
|
||||
|
||||
for q in ast {
|
||||
// Normalization
|
||||
let q = q.to_string().to_ascii_lowercase();
|
||||
|
||||
for (_, target) in config.queries.iter() {
|
||||
if target.query.as_str() == q {
|
||||
debug!("Intercepting query: {}", q);
|
||||
|
||||
let rd = target
|
||||
.schema
|
||||
.iter()
|
||||
.map(|row| {
|
||||
let name = &row[0];
|
||||
let data_type = &row[1];
|
||||
(
|
||||
name.as_str(),
|
||||
match data_type.as_str() {
|
||||
"text" => DataType::Text,
|
||||
"anyarray" => DataType::AnyArray,
|
||||
"oid" => DataType::Oid,
|
||||
"bool" => DataType::Bool,
|
||||
"int4" => DataType::Int4,
|
||||
_ => DataType::Any,
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect::<Vec<(&str, DataType)>>();
|
||||
|
||||
result.put(row_description(&rd));
|
||||
|
||||
target.result.iter().for_each(|row| {
|
||||
let row = row
|
||||
.iter()
|
||||
.map(|s| {
|
||||
let s = s.as_str().to_string();
|
||||
|
||||
if s.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(s)
|
||||
}
|
||||
})
|
||||
.collect::<Vec<Option<String>>>();
|
||||
result.put(data_row_nullable(&row));
|
||||
});
|
||||
|
||||
result.put(command_complete("SELECT"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !result.is_empty() {
|
||||
result.put_u8(b'Z');
|
||||
result.put_i32(5);
|
||||
result.put_u8(b'I');
|
||||
|
||||
return Ok(PluginOutput::Intercept(result));
|
||||
} else {
|
||||
Ok(PluginOutput::Allow)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
//! The plugin ecosystem.
|
||||
//!
|
||||
//! Currently plugins only grant access or deny access to the database for a particual query.
|
||||
//! Example use cases:
|
||||
//! - block known bad queries
|
||||
//! - block access to system catalogs
|
||||
//! - block dangerous modifications like `DROP TABLE`
|
||||
//! - etc
|
||||
//!
|
||||
|
||||
pub mod intercept;
|
||||
pub mod prewarmer;
|
||||
pub mod query_logger;
|
||||
pub mod table_access;
|
||||
|
||||
use crate::{errors::Error, query_router::QueryRouter};
|
||||
use async_trait::async_trait;
|
||||
use bytes::BytesMut;
|
||||
use sqlparser::ast::Statement;
|
||||
|
||||
pub use intercept::Intercept;
|
||||
pub use query_logger::QueryLogger;
|
||||
pub use table_access::TableAccess;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum PluginOutput {
|
||||
Allow,
|
||||
Deny(String),
|
||||
Overwrite(Vec<Statement>),
|
||||
Intercept(BytesMut),
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait Plugin {
|
||||
// Run before the query is sent to the server.
|
||||
#[allow(clippy::ptr_arg)]
|
||||
async fn run(
|
||||
&mut self,
|
||||
query_router: &QueryRouter,
|
||||
ast: &Vec<Statement>,
|
||||
) -> Result<PluginOutput, Error>;
|
||||
|
||||
// TODO: run after the result is returned
|
||||
// async fn callback(&mut self, query_router: &QueryRouter);
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
//! Prewarm new connections before giving them to the client.
|
||||
use crate::{errors::Error, server::Server};
|
||||
use log::info;
|
||||
|
||||
pub struct Prewarmer<'a> {
|
||||
pub enabled: bool,
|
||||
pub server: &'a mut Server,
|
||||
pub queries: &'a Vec<String>,
|
||||
}
|
||||
|
||||
impl<'a> Prewarmer<'a> {
|
||||
pub async fn run(&mut self) -> Result<(), Error> {
|
||||
if !self.enabled {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
for query in self.queries {
|
||||
info!(
|
||||
"{} Prewarning with query: `{}`",
|
||||
self.server.address(),
|
||||
query
|
||||
);
|
||||
self.server.query(query).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
//! Log all queries to stdout (or somewhere else, why not).
|
||||
|
||||
use crate::{
|
||||
errors::Error,
|
||||
plugins::{Plugin, PluginOutput},
|
||||
query_router::QueryRouter,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use log::info;
|
||||
use sqlparser::ast::Statement;
|
||||
|
||||
pub struct QueryLogger<'a> {
|
||||
pub enabled: bool,
|
||||
pub user: &'a str,
|
||||
pub db: &'a str,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<'a> Plugin for QueryLogger<'a> {
|
||||
async fn run(
|
||||
&mut self,
|
||||
_query_router: &QueryRouter,
|
||||
ast: &Vec<Statement>,
|
||||
) -> Result<PluginOutput, Error> {
|
||||
if !self.enabled {
|
||||
return Ok(PluginOutput::Allow);
|
||||
}
|
||||
|
||||
let query = ast
|
||||
.iter()
|
||||
.map(|q| q.to_string())
|
||||
.collect::<Vec<String>>()
|
||||
.join("; ");
|
||||
info!("[pool: {}][user: {}] {}", self.db, self.user, query);
|
||||
|
||||
Ok(PluginOutput::Allow)
|
||||
}
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
//! This query router plugin will check if the user can access a particular
|
||||
//! table as part of their query. If they can't, the query will not be routed.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use sqlparser::ast::{visit_relations, Statement};
|
||||
|
||||
use crate::{
|
||||
errors::Error,
|
||||
plugins::{Plugin, PluginOutput},
|
||||
query_router::QueryRouter,
|
||||
};
|
||||
|
||||
use log::debug;
|
||||
|
||||
use core::ops::ControlFlow;
|
||||
|
||||
pub struct TableAccess<'a> {
|
||||
pub enabled: bool,
|
||||
pub tables: &'a Vec<String>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<'a> Plugin for TableAccess<'a> {
|
||||
async fn run(
|
||||
&mut self,
|
||||
_query_router: &QueryRouter,
|
||||
ast: &Vec<Statement>,
|
||||
) -> Result<PluginOutput, Error> {
|
||||
if !self.enabled {
|
||||
return Ok(PluginOutput::Allow);
|
||||
}
|
||||
|
||||
let mut found = None;
|
||||
|
||||
visit_relations(ast, |relation| {
|
||||
let relation = relation.to_string();
|
||||
let parts = relation.split('.').collect::<Vec<&str>>();
|
||||
let table_name = parts.last().unwrap();
|
||||
|
||||
if self.tables.contains(&table_name.to_string()) {
|
||||
found = Some(table_name.to_string());
|
||||
ControlFlow::<()>::Break(())
|
||||
} else {
|
||||
ControlFlow::<()>::Continue(())
|
||||
}
|
||||
});
|
||||
|
||||
if let Some(found) = found {
|
||||
debug!("Blocking access to table \"{}\"", found);
|
||||
|
||||
Ok(PluginOutput::Deny(format!(
|
||||
"permission for table \"{}\" denied",
|
||||
found
|
||||
)))
|
||||
} else {
|
||||
Ok(PluginOutput::Allow)
|
||||
}
|
||||
}
|
||||
}
|
||||
1223
src/pool.rs
1223
src/pool.rs
File diff suppressed because it is too large
Load Diff
@@ -1,530 +0,0 @@
|
||||
use http_body_util::Full;
|
||||
use hyper::body;
|
||||
use hyper::body::Bytes;
|
||||
|
||||
use hyper::server::conn::http1;
|
||||
use hyper::service::service_fn;
|
||||
use hyper::{Method, Request, Response, StatusCode};
|
||||
use hyper_util::rt::TokioIo;
|
||||
use log::{debug, error, info};
|
||||
use phf::phf_map;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::atomic::Ordering;
|
||||
use tokio::net::TcpListener;
|
||||
|
||||
use crate::config::Address;
|
||||
use crate::pool::{get_all_pools, PoolIdentifier};
|
||||
use crate::stats::get_server_stats;
|
||||
use crate::stats::pool::PoolStats;
|
||||
|
||||
struct MetricHelpType {
|
||||
help: &'static str,
|
||||
ty: &'static str,
|
||||
}
|
||||
|
||||
struct ServerPrometheusStats {
|
||||
bytes_received: u64,
|
||||
bytes_sent: u64,
|
||||
transaction_count: u64,
|
||||
query_count: u64,
|
||||
error_count: u64,
|
||||
active_count: u64,
|
||||
idle_count: u64,
|
||||
login_count: u64,
|
||||
tested_count: u64,
|
||||
}
|
||||
|
||||
// reference for metric types: https://prometheus.io/docs/concepts/metric_types/
|
||||
// counters only increase
|
||||
// gauges can arbitrarily increase or decrease
|
||||
static METRIC_HELP_AND_TYPES_LOOKUP: phf::Map<&'static str, MetricHelpType> = phf_map! {
|
||||
"stats_total_query_count" => MetricHelpType {
|
||||
help: "Number of queries sent by all clients",
|
||||
ty: "counter",
|
||||
},
|
||||
"stats_total_query_time" => MetricHelpType {
|
||||
help: "Total amount of time for queries to execute",
|
||||
ty: "counter",
|
||||
},
|
||||
"stats_total_received" => MetricHelpType {
|
||||
help: "Number of bytes received from the server",
|
||||
ty: "counter",
|
||||
},
|
||||
"stats_total_sent" => MetricHelpType {
|
||||
help: "Number of bytes sent to the server",
|
||||
ty: "counter",
|
||||
},
|
||||
"stats_total_xact_count" => MetricHelpType {
|
||||
help: "Total number of transactions started by the client",
|
||||
ty: "counter",
|
||||
},
|
||||
"stats_total_xact_time" => MetricHelpType {
|
||||
help: "Total amount of time for all transactions to execute",
|
||||
ty: "counter",
|
||||
},
|
||||
"stats_total_wait_time" => MetricHelpType {
|
||||
help: "Total time client waited for a server connection",
|
||||
ty: "counter",
|
||||
},
|
||||
"stats_avg_query_count" => MetricHelpType {
|
||||
help: "Average of total_query_count every 15 seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"stats_avg_query_time" => MetricHelpType {
|
||||
help: "Average time taken for queries to execute every 15 seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"stats_avg_recv" => MetricHelpType {
|
||||
help: "Average of total_received bytes every 15 seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"stats_avg_sent" => MetricHelpType {
|
||||
help: "Average of total_sent bytes every 15 seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"stats_avg_errors" => MetricHelpType {
|
||||
help: "Average number of errors every 15 seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"stats_avg_xact_count" => MetricHelpType {
|
||||
help: "Average of total_xact_count every 15 seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"stats_avg_xact_time" => MetricHelpType {
|
||||
help: "Average of total_xact_time every 15 seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"stats_avg_wait_time" => MetricHelpType {
|
||||
help: "Average of total_wait_time every 15 seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"pools_maxwait_us" => MetricHelpType {
|
||||
help: "The time a client waited for a server connection in microseconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"pools_maxwait" => MetricHelpType {
|
||||
help: "The time a client waited for a server connection in seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"pools_cl_waiting" => MetricHelpType {
|
||||
help: "How many clients are waiting for a connection from the pool",
|
||||
ty: "gauge",
|
||||
},
|
||||
"pools_cl_active" => MetricHelpType {
|
||||
help: "How many clients are actively communicating with a server",
|
||||
ty: "gauge",
|
||||
},
|
||||
"pools_cl_idle" => MetricHelpType {
|
||||
help: "How many clients are idle",
|
||||
ty: "gauge",
|
||||
},
|
||||
"pools_sv_idle" => MetricHelpType {
|
||||
help: "How many server connections are idle",
|
||||
ty: "gauge",
|
||||
},
|
||||
"pools_sv_active" => MetricHelpType {
|
||||
help: "How many server connections are actively communicating with a client",
|
||||
ty: "gauge",
|
||||
},
|
||||
"pools_sv_login" => MetricHelpType {
|
||||
help: "How many server connections are currently being created",
|
||||
ty: "gauge",
|
||||
},
|
||||
"pools_sv_tested" => MetricHelpType {
|
||||
help: "How many server connections are currently waiting on a health check to succeed",
|
||||
ty: "gauge",
|
||||
},
|
||||
"servers_bytes_received" => MetricHelpType {
|
||||
help: "Volume in bytes of network traffic received by server",
|
||||
ty: "counter",
|
||||
},
|
||||
"servers_bytes_sent" => MetricHelpType {
|
||||
help: "Volume in bytes of network traffic sent by server",
|
||||
ty: "counter",
|
||||
},
|
||||
"servers_transaction_count" => MetricHelpType {
|
||||
help: "Number of transactions executed by server",
|
||||
ty: "counter",
|
||||
},
|
||||
"servers_query_count" => MetricHelpType {
|
||||
help: "Number of queries executed by server",
|
||||
ty: "counter",
|
||||
},
|
||||
"servers_error_count" => MetricHelpType {
|
||||
help: "Number of errors",
|
||||
ty: "counter",
|
||||
},
|
||||
"servers_idle_count" => MetricHelpType {
|
||||
help: "Number of server connection in idle state",
|
||||
ty: "gauge",
|
||||
},
|
||||
"servers_active_count" => MetricHelpType {
|
||||
help: "Number of server connection in active state",
|
||||
ty: "gauge",
|
||||
},
|
||||
"servers_tested_count" => MetricHelpType {
|
||||
help: "Number of server connection in tested state",
|
||||
ty: "gauge",
|
||||
},
|
||||
"servers_login_count" => MetricHelpType {
|
||||
help: "Number of server connection in login state",
|
||||
ty: "gauge",
|
||||
},
|
||||
"servers_is_banned" => MetricHelpType {
|
||||
help: "0 if server is not banned, 1 if server is banned",
|
||||
ty: "gauge",
|
||||
},
|
||||
"servers_is_paused" => MetricHelpType {
|
||||
help: "0 if server is not paused, 1 if server is paused",
|
||||
ty: "gauge",
|
||||
},
|
||||
"databases_pool_size" => MetricHelpType {
|
||||
help: "Maximum number of server connections",
|
||||
ty: "gauge",
|
||||
},
|
||||
"databases_current_connections" => MetricHelpType {
|
||||
help: "Current number of connections for this database",
|
||||
ty: "gauge",
|
||||
},
|
||||
};
|
||||
|
||||
struct PrometheusMetric<Value: fmt::Display> {
|
||||
name: String,
|
||||
help: String,
|
||||
ty: String,
|
||||
labels: HashMap<&'static str, String>,
|
||||
value: Value,
|
||||
}
|
||||
|
||||
impl<Value: fmt::Display> fmt::Display for PrometheusMetric<Value> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let mut sorted_labels: Vec<_> = self.labels.iter().collect();
|
||||
sorted_labels.sort_by_key(|&(key, _)| key);
|
||||
let formatted_labels = sorted_labels
|
||||
.iter()
|
||||
.map(|(key, value)| format!("{}=\"{}\"", key, value))
|
||||
.collect::<Vec<_>>()
|
||||
.join(",");
|
||||
write!(
|
||||
f,
|
||||
"{name}{{{formatted_labels}}} {value}",
|
||||
name = format_args!("pgcat_{}", self.name),
|
||||
formatted_labels = formatted_labels,
|
||||
value = self.value
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Value: fmt::Display> PrometheusMetric<Value> {
|
||||
fn from_name<V: fmt::Display>(
|
||||
name: &str,
|
||||
value: V,
|
||||
labels: HashMap<&'static str, String>,
|
||||
) -> Option<PrometheusMetric<V>> {
|
||||
METRIC_HELP_AND_TYPES_LOOKUP
|
||||
.get(name)
|
||||
.map(|metric| PrometheusMetric::<V> {
|
||||
name: name.to_owned(),
|
||||
help: metric.help.to_owned(),
|
||||
ty: metric.ty.to_owned(),
|
||||
value,
|
||||
labels,
|
||||
})
|
||||
}
|
||||
|
||||
fn from_database_info(
|
||||
address: &Address,
|
||||
name: &str,
|
||||
value: u32,
|
||||
) -> Option<PrometheusMetric<u32>> {
|
||||
let mut labels = HashMap::new();
|
||||
labels.insert("host", address.host.clone());
|
||||
labels.insert("shard", address.shard.to_string());
|
||||
labels.insert("role", address.role.to_string());
|
||||
labels.insert("pool", address.pool_name.clone());
|
||||
labels.insert("index", address.address_index.to_string());
|
||||
labels.insert("database", address.database.to_string());
|
||||
labels.insert("username", address.username.clone());
|
||||
|
||||
Self::from_name(&format!("databases_{}", name), value, labels)
|
||||
}
|
||||
|
||||
fn from_server_info(
|
||||
address: &Address,
|
||||
name: &str,
|
||||
value: u64,
|
||||
) -> Option<PrometheusMetric<u64>> {
|
||||
let mut labels = HashMap::new();
|
||||
labels.insert("host", address.host.clone());
|
||||
labels.insert("shard", address.shard.to_string());
|
||||
labels.insert("role", address.role.to_string());
|
||||
labels.insert("pool", address.pool_name.clone());
|
||||
labels.insert("index", address.address_index.to_string());
|
||||
labels.insert("database", address.database.to_string());
|
||||
labels.insert("username", address.username.clone());
|
||||
|
||||
Self::from_name(&format!("servers_{}", name), value, labels)
|
||||
}
|
||||
|
||||
fn from_address(address: &Address, name: &str, value: u64) -> Option<PrometheusMetric<u64>> {
|
||||
let mut labels = HashMap::new();
|
||||
labels.insert("host", address.host.clone());
|
||||
labels.insert("shard", address.shard.to_string());
|
||||
labels.insert("pool", address.pool_name.clone());
|
||||
labels.insert("role", address.role.to_string());
|
||||
labels.insert("index", address.address_index.to_string());
|
||||
labels.insert("database", address.database.to_string());
|
||||
labels.insert("username", address.username.clone());
|
||||
|
||||
Self::from_name(&format!("stats_{}", name), value, labels)
|
||||
}
|
||||
|
||||
fn from_pool(pool_id: PoolIdentifier, name: &str, value: u64) -> Option<PrometheusMetric<u64>> {
|
||||
let mut labels = HashMap::new();
|
||||
labels.insert("pool", pool_id.db);
|
||||
labels.insert("user", pool_id.user);
|
||||
|
||||
Self::from_name(&format!("pools_{}", name), value, labels)
|
||||
}
|
||||
|
||||
fn get_header(&self) -> String {
|
||||
format!(
|
||||
"\n# HELP {name} {help}\n# TYPE {name} {ty}",
|
||||
name = format_args!("pgcat_{}", self.name),
|
||||
help = self.help,
|
||||
ty = self.ty,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async fn prometheus_stats(
|
||||
request: Request<body::Incoming>,
|
||||
) -> Result<Response<Full<Bytes>>, hyper::http::Error> {
|
||||
match (request.method(), request.uri().path()) {
|
||||
(&Method::GET, "/metrics") => {
|
||||
let mut lines = Vec::new();
|
||||
push_address_stats(&mut lines);
|
||||
push_pool_stats(&mut lines);
|
||||
push_server_stats(&mut lines);
|
||||
push_database_stats(&mut lines);
|
||||
lines.push("".to_string()); // Ensure to end the stats with a line terminator as required by the specification.
|
||||
|
||||
Response::builder()
|
||||
.header("content-type", "text/plain; version=0.0.4")
|
||||
.body(lines.join("\n").into())
|
||||
}
|
||||
_ => Response::builder()
|
||||
.status(StatusCode::NOT_FOUND)
|
||||
.body("".into()),
|
||||
}
|
||||
}
|
||||
|
||||
// Adds metrics shown in a SHOW STATS admin command.
|
||||
fn push_address_stats(lines: &mut Vec<String>) {
|
||||
let mut grouped_metrics: HashMap<String, Vec<PrometheusMetric<u64>>> = HashMap::new();
|
||||
for (_, pool) in get_all_pools() {
|
||||
for shard in 0..pool.shards() {
|
||||
for server in 0..pool.servers(shard) {
|
||||
let address = pool.address(shard, server);
|
||||
let stats = &*address.stats;
|
||||
for (key, value) in stats.clone() {
|
||||
if let Some(prometheus_metric) =
|
||||
PrometheusMetric::<u64>::from_address(address, &key, value)
|
||||
{
|
||||
grouped_metrics
|
||||
.entry(key)
|
||||
.or_default()
|
||||
.push(prometheus_metric);
|
||||
} else {
|
||||
debug!("Metric {} not implemented for {}", key, address.name());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for (_key, metrics) in grouped_metrics {
|
||||
if !metrics.is_empty() {
|
||||
lines.push(metrics[0].get_header());
|
||||
for metric in metrics {
|
||||
lines.push(metric.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Adds relevant metrics shown in a SHOW POOLS admin command.
|
||||
fn push_pool_stats(lines: &mut Vec<String>) {
|
||||
let mut grouped_metrics: HashMap<String, Vec<PrometheusMetric<u64>>> = HashMap::new();
|
||||
let pool_stats = PoolStats::construct_pool_lookup();
|
||||
for (pool_id, stats) in pool_stats.iter() {
|
||||
for (name, value) in stats.clone() {
|
||||
if let Some(prometheus_metric) =
|
||||
PrometheusMetric::<u64>::from_pool(pool_id.clone(), &name, value)
|
||||
{
|
||||
grouped_metrics
|
||||
.entry(name)
|
||||
.or_default()
|
||||
.push(prometheus_metric);
|
||||
} else {
|
||||
debug!("Metric {} not implemented for ({})", name, *pool_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (_key, metrics) in grouped_metrics {
|
||||
if !metrics.is_empty() {
|
||||
lines.push(metrics[0].get_header());
|
||||
for metric in metrics {
|
||||
lines.push(metric.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Adds relevant metrics shown in a SHOW DATABASES admin command.
|
||||
fn push_database_stats(lines: &mut Vec<String>) {
|
||||
let mut grouped_metrics: HashMap<String, Vec<PrometheusMetric<u32>>> = HashMap::new();
|
||||
for (_, pool) in get_all_pools() {
|
||||
let pool_config = pool.settings.clone();
|
||||
for shard in 0..pool.shards() {
|
||||
for server in 0..pool.servers(shard) {
|
||||
let address = pool.address(shard, server);
|
||||
let pool_state = pool.pool_state(shard, server);
|
||||
let metrics = vec![
|
||||
("pool_size", pool_config.user.pool_size),
|
||||
("current_connections", pool_state.connections),
|
||||
];
|
||||
for (key, value) in metrics {
|
||||
if let Some(prometheus_metric) =
|
||||
PrometheusMetric::<u32>::from_database_info(address, key, value)
|
||||
{
|
||||
grouped_metrics
|
||||
.entry(key.to_string())
|
||||
.or_default()
|
||||
.push(prometheus_metric);
|
||||
} else {
|
||||
debug!("Metric {} not implemented for {}", key, address.name());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for (_key, metrics) in grouped_metrics {
|
||||
if !metrics.is_empty() {
|
||||
lines.push(metrics[0].get_header());
|
||||
for metric in metrics {
|
||||
lines.push(metric.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Adds relevant metrics shown in a SHOW SERVERS admin command.
|
||||
fn push_server_stats(lines: &mut Vec<String>) {
|
||||
let server_stats = get_server_stats();
|
||||
let mut prom_stats = HashMap::<String, ServerPrometheusStats>::new();
|
||||
for (_, stats) in server_stats {
|
||||
let entry = prom_stats
|
||||
.entry(stats.address_name())
|
||||
.or_insert(ServerPrometheusStats {
|
||||
bytes_received: 0,
|
||||
bytes_sent: 0,
|
||||
transaction_count: 0,
|
||||
query_count: 0,
|
||||
error_count: 0,
|
||||
active_count: 0,
|
||||
idle_count: 0,
|
||||
login_count: 0,
|
||||
tested_count: 0,
|
||||
});
|
||||
entry.bytes_received += stats.bytes_received.load(Ordering::Relaxed);
|
||||
entry.bytes_sent += stats.bytes_sent.load(Ordering::Relaxed);
|
||||
entry.transaction_count += stats.transaction_count.load(Ordering::Relaxed);
|
||||
entry.query_count += stats.query_count.load(Ordering::Relaxed);
|
||||
entry.error_count += stats.error_count.load(Ordering::Relaxed);
|
||||
match stats.state.load(Ordering::Relaxed) {
|
||||
crate::stats::ServerState::Login => entry.login_count += 1,
|
||||
crate::stats::ServerState::Active => entry.active_count += 1,
|
||||
crate::stats::ServerState::Tested => entry.tested_count += 1,
|
||||
crate::stats::ServerState::Idle => entry.idle_count += 1,
|
||||
}
|
||||
}
|
||||
let mut grouped_metrics: HashMap<String, Vec<PrometheusMetric<u64>>> = HashMap::new();
|
||||
for (_, pool) in get_all_pools() {
|
||||
for shard in 0..pool.shards() {
|
||||
for server in 0..pool.servers(shard) {
|
||||
let address = pool.address(shard, server);
|
||||
if let Some(server_info) = prom_stats.get(&address.name()) {
|
||||
let metrics = [
|
||||
("bytes_received", server_info.bytes_received),
|
||||
("bytes_sent", server_info.bytes_sent),
|
||||
("transaction_count", server_info.transaction_count),
|
||||
("query_count", server_info.query_count),
|
||||
("error_count", server_info.error_count),
|
||||
("idle_count", server_info.idle_count),
|
||||
("active_count", server_info.active_count),
|
||||
("login_count", server_info.login_count),
|
||||
("tested_count", server_info.tested_count),
|
||||
("is_banned", if pool.is_banned(address) { 1 } else { 0 }),
|
||||
("is_paused", if pool.paused() { 1 } else { 0 }),
|
||||
];
|
||||
for (key, value) in metrics {
|
||||
if let Some(prometheus_metric) =
|
||||
PrometheusMetric::<u64>::from_server_info(address, key, value)
|
||||
{
|
||||
grouped_metrics
|
||||
.entry(key.to_string())
|
||||
.or_default()
|
||||
.push(prometheus_metric);
|
||||
} else {
|
||||
debug!("Metric {} not implemented for {}", key, address.name());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for (_key, metrics) in grouped_metrics {
|
||||
if !metrics.is_empty() {
|
||||
lines.push(metrics[0].get_header());
|
||||
for metric in metrics {
|
||||
lines.push(metric.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start_metric_server(http_addr: SocketAddr) {
|
||||
let listener = TcpListener::bind(http_addr);
|
||||
let listener = match listener.await {
|
||||
Ok(listener) => listener,
|
||||
Err(e) => {
|
||||
error!("Failed to bind prometheus server to HTTP address: {}.", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
info!(
|
||||
"Exposing prometheus metrics on http://{}/metrics.",
|
||||
http_addr
|
||||
);
|
||||
loop {
|
||||
let stream = match listener.accept().await {
|
||||
Ok((stream, _)) => stream,
|
||||
Err(e) => {
|
||||
error!("Error accepting connection: {}", e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let io = TokioIo::new(stream);
|
||||
|
||||
tokio::task::spawn(async move {
|
||||
if let Err(err) = http1::Builder::new()
|
||||
.serve_connection(io, service_fn(prometheus_stats))
|
||||
.await
|
||||
{
|
||||
eprintln!("Error serving HTTP connection for metrics: {:?}", err);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
1968
src/query_router.rs
1968
src/query_router.rs
File diff suppressed because it is too large
Load Diff
325
src/scram.rs
325
src/scram.rs
@@ -1,325 +0,0 @@
|
||||
// SCRAM-SHA-256 authentication. Heavily inspired by
|
||||
// https://github.com/sfackler/rust-postgres/
|
||||
// SASL implementation.
|
||||
|
||||
use base64::{engine::general_purpose, Engine as _};
|
||||
use bytes::BytesMut;
|
||||
use hmac::{Hmac, Mac};
|
||||
use rand::{self, Rng};
|
||||
use sha2::digest::FixedOutput;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
use std::fmt::Write;
|
||||
|
||||
use crate::constants::*;
|
||||
use crate::errors::Error;
|
||||
|
||||
/// Normalize a password string. Postgres
|
||||
/// passwords don't have to be UTF-8.
|
||||
fn normalize(pass: &[u8]) -> Vec<u8> {
|
||||
let pass = match std::str::from_utf8(pass) {
|
||||
Ok(pass) => pass,
|
||||
Err(_) => return pass.to_vec(),
|
||||
};
|
||||
|
||||
match stringprep::saslprep(pass) {
|
||||
Ok(pass) => pass.into_owned().into_bytes(),
|
||||
Err(_) => pass.as_bytes().to_vec(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Keep the SASL state through the exchange.
|
||||
/// It takes 3 messages to complete the authentication.
|
||||
pub struct ScramSha256 {
|
||||
password: String,
|
||||
salted_password: [u8; 32],
|
||||
auth_message: String,
|
||||
message: BytesMut,
|
||||
nonce: String,
|
||||
}
|
||||
|
||||
impl ScramSha256 {
|
||||
/// Create the Scram state from a password. It'll automatically
|
||||
/// generate a nonce.
|
||||
pub fn new(password: &str) -> ScramSha256 {
|
||||
let mut rng = rand::thread_rng();
|
||||
let nonce = (0..NONCE_LENGTH)
|
||||
.map(|_| {
|
||||
let mut v = rng.gen_range(0x21u8..0x7e);
|
||||
if v == 0x2c {
|
||||
v = 0x7e
|
||||
}
|
||||
v as char
|
||||
})
|
||||
.collect::<String>();
|
||||
|
||||
Self::from_nonce(password, &nonce)
|
||||
}
|
||||
|
||||
/// Used for testing.
|
||||
pub fn from_nonce(password: &str, nonce: &str) -> ScramSha256 {
|
||||
let message = BytesMut::from(format!("{}n=,r={}", "n,,", nonce).as_bytes());
|
||||
|
||||
ScramSha256 {
|
||||
password: password.to_string(),
|
||||
nonce: String::from(nonce),
|
||||
message,
|
||||
salted_password: [0u8; 32],
|
||||
auth_message: String::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the current state of the SASL authentication.
|
||||
pub fn message(&mut self) -> BytesMut {
|
||||
self.message.clone()
|
||||
}
|
||||
|
||||
/// Update the state with message received from server.
|
||||
pub fn update(&mut self, message: &BytesMut) -> Result<BytesMut, Error> {
|
||||
let server_message = Message::parse(message)?;
|
||||
|
||||
if !server_message.nonce.starts_with(&self.nonce) {
|
||||
return Err(Error::ProtocolSyncError("SCRAM".to_string()));
|
||||
}
|
||||
|
||||
let salt = match general_purpose::STANDARD.decode(&server_message.salt) {
|
||||
Ok(salt) => salt,
|
||||
Err(_) => return Err(Error::ProtocolSyncError("SCRAM".to_string())),
|
||||
};
|
||||
|
||||
let salted_password = Self::hi(
|
||||
&normalize(self.password.as_bytes()),
|
||||
&salt,
|
||||
server_message.iterations,
|
||||
);
|
||||
|
||||
// Save for verification of final server message.
|
||||
self.salted_password = salted_password;
|
||||
|
||||
let mut hmac = match Hmac::<Sha256>::new_from_slice(&salted_password) {
|
||||
Ok(hmac) => hmac,
|
||||
Err(_) => return Err(Error::ServerError),
|
||||
};
|
||||
|
||||
hmac.update(b"Client Key");
|
||||
|
||||
let client_key = hmac.finalize().into_bytes();
|
||||
|
||||
let mut hash = Sha256::default();
|
||||
hash.update(client_key.as_slice());
|
||||
|
||||
let stored_key = hash.finalize_fixed();
|
||||
let mut cbind_input = vec![];
|
||||
cbind_input.extend("n,,".as_bytes());
|
||||
|
||||
let cbind_input = general_purpose::STANDARD.encode(&cbind_input);
|
||||
|
||||
self.message.clear();
|
||||
|
||||
// Start writing the client reply.
|
||||
match write!(
|
||||
&mut self.message,
|
||||
"c={},r={}",
|
||||
cbind_input, server_message.nonce
|
||||
) {
|
||||
Ok(_) => (),
|
||||
Err(_) => return Err(Error::ServerError),
|
||||
};
|
||||
|
||||
let auth_message = format!(
|
||||
"n=,r={},{},{}",
|
||||
self.nonce,
|
||||
String::from_utf8_lossy(&message[..]),
|
||||
String::from_utf8_lossy(&self.message[..])
|
||||
);
|
||||
|
||||
let mut hmac = match Hmac::<Sha256>::new_from_slice(&stored_key) {
|
||||
Ok(hmac) => hmac,
|
||||
Err(_) => return Err(Error::ServerError),
|
||||
};
|
||||
hmac.update(auth_message.as_bytes());
|
||||
|
||||
// Save the auth message for server final message verification.
|
||||
self.auth_message = auth_message;
|
||||
|
||||
let client_signature = hmac.finalize().into_bytes();
|
||||
|
||||
// Sign the client proof.
|
||||
let mut client_proof = client_key;
|
||||
for (proof, signature) in client_proof.iter_mut().zip(client_signature) {
|
||||
*proof ^= signature;
|
||||
}
|
||||
|
||||
match write!(
|
||||
&mut self.message,
|
||||
",p={}",
|
||||
general_purpose::STANDARD.encode(&*client_proof)
|
||||
) {
|
||||
Ok(_) => (),
|
||||
Err(_) => return Err(Error::ServerError),
|
||||
};
|
||||
|
||||
Ok(self.message.clone())
|
||||
}
|
||||
|
||||
/// Verify final server message.
|
||||
pub fn finish(&mut self, message: &BytesMut) -> Result<(), Error> {
|
||||
let final_message = FinalMessage::parse(message)?;
|
||||
|
||||
let verifier = match general_purpose::STANDARD.decode(final_message.value) {
|
||||
Ok(verifier) => verifier,
|
||||
Err(_) => return Err(Error::ProtocolSyncError("SCRAM".to_string())),
|
||||
};
|
||||
|
||||
let mut hmac = match Hmac::<Sha256>::new_from_slice(&self.salted_password) {
|
||||
Ok(hmac) => hmac,
|
||||
Err(_) => return Err(Error::ServerError),
|
||||
};
|
||||
hmac.update(b"Server Key");
|
||||
let server_key = hmac.finalize().into_bytes();
|
||||
|
||||
let mut hmac = match Hmac::<Sha256>::new_from_slice(&server_key) {
|
||||
Ok(hmac) => hmac,
|
||||
Err(_) => return Err(Error::ServerError),
|
||||
};
|
||||
hmac.update(self.auth_message.as_bytes());
|
||||
|
||||
match hmac.verify_slice(&verifier) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(_) => Err(Error::ServerError),
|
||||
}
|
||||
}
|
||||
|
||||
/// Hash the password with the salt i-times.
|
||||
fn hi(str: &[u8], salt: &[u8], i: u32) -> [u8; 32] {
|
||||
let mut hmac =
|
||||
Hmac::<Sha256>::new_from_slice(str).expect("HMAC is able to accept all key sizes");
|
||||
hmac.update(salt);
|
||||
hmac.update(&[0, 0, 0, 1]);
|
||||
let mut prev = hmac.finalize().into_bytes();
|
||||
|
||||
let mut hi = prev;
|
||||
|
||||
for _ in 1..i {
|
||||
let mut hmac = Hmac::<Sha256>::new_from_slice(str).expect("already checked above");
|
||||
hmac.update(&prev);
|
||||
prev = hmac.finalize().into_bytes();
|
||||
|
||||
for (hi, prev) in hi.iter_mut().zip(prev) {
|
||||
*hi ^= prev;
|
||||
}
|
||||
}
|
||||
|
||||
hi.into()
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse the server challenge.
|
||||
struct Message {
|
||||
nonce: String,
|
||||
salt: String,
|
||||
iterations: u32,
|
||||
}
|
||||
|
||||
impl Message {
|
||||
/// Parse the server SASL challenge.
|
||||
fn parse(message: &BytesMut) -> Result<Message, Error> {
|
||||
let parts = String::from_utf8_lossy(&message[..])
|
||||
.split(',')
|
||||
.map(|s| s.to_string())
|
||||
.collect::<Vec<String>>();
|
||||
|
||||
if parts.len() != 3 {
|
||||
return Err(Error::ProtocolSyncError("SCRAM".to_string()));
|
||||
}
|
||||
|
||||
let nonce = str::replace(&parts[0], "r=", "");
|
||||
let salt = str::replace(&parts[1], "s=", "");
|
||||
let iterations = match str::replace(&parts[2], "i=", "").parse::<u32>() {
|
||||
Ok(iterations) => iterations,
|
||||
Err(_) => return Err(Error::ProtocolSyncError("SCRAM".to_string())),
|
||||
};
|
||||
|
||||
Ok(Message {
|
||||
nonce,
|
||||
salt,
|
||||
iterations,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse server final validation message.
|
||||
struct FinalMessage {
|
||||
value: String,
|
||||
}
|
||||
|
||||
impl FinalMessage {
|
||||
/// Parse the server final validation message.
|
||||
pub fn parse(message: &BytesMut) -> Result<FinalMessage, Error> {
|
||||
if !message.starts_with(b"v=") || message.len() < 4 {
|
||||
return Err(Error::ProtocolSyncError("SCRAM".to_string()));
|
||||
}
|
||||
|
||||
Ok(FinalMessage {
|
||||
value: String::from_utf8_lossy(&message[2..]).to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn parse_server_first_message() {
|
||||
let message = BytesMut::from(
|
||||
"r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096".as_bytes(),
|
||||
);
|
||||
let message = Message::parse(&message).unwrap();
|
||||
assert_eq!(message.nonce, "fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j");
|
||||
assert_eq!(message.salt, "QSXCR+Q6sek8bf92");
|
||||
assert_eq!(message.iterations, 4096);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_server_last_message() {
|
||||
let f = FinalMessage::parse(&BytesMut::from(
|
||||
"v=U+ppxD5XUKtradnv8e2MkeupiA8FU87Sg8CXzXHDAzw".as_bytes(),
|
||||
))
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
f.value,
|
||||
"U+ppxD5XUKtradnv8e2MkeupiA8FU87Sg8CXzXHDAzw".to_string()
|
||||
);
|
||||
}
|
||||
|
||||
// recorded auth exchange from psql
|
||||
#[test]
|
||||
fn exchange() {
|
||||
let password = "foobar";
|
||||
let nonce = "9IZ2O01zb9IgiIZ1WJ/zgpJB";
|
||||
|
||||
let client_first = "n,,n=,r=9IZ2O01zb9IgiIZ1WJ/zgpJB";
|
||||
let server_first =
|
||||
"r=9IZ2O01zb9IgiIZ1WJ/zgpJBjx/oIRLs02gGSHcw1KEty3eY,s=fs3IXBy7U7+IvVjZ,i\
|
||||
=4096";
|
||||
let client_final =
|
||||
"c=biws,r=9IZ2O01zb9IgiIZ1WJ/zgpJBjx/oIRLs02gGSHcw1KEty3eY,p=AmNKosjJzS3\
|
||||
1NTlQYNs5BTeQjdHdk7lOflDo5re2an8=";
|
||||
let server_final = "v=U+ppxD5XUKtradnv8e2MkeupiA8FU87Sg8CXzXHDAzw=";
|
||||
|
||||
let mut scram = ScramSha256::from_nonce(password, nonce);
|
||||
|
||||
let message = scram.message();
|
||||
assert_eq!(std::str::from_utf8(&message).unwrap(), client_first);
|
||||
|
||||
let result = scram
|
||||
.update(&BytesMut::from(server_first.as_bytes()))
|
||||
.unwrap();
|
||||
assert_eq!(std::str::from_utf8(&result).unwrap(), client_final);
|
||||
|
||||
scram
|
||||
.finish(&BytesMut::from(server_final.as_bytes()))
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
1537
src/server.rs
1537
src/server.rs
File diff suppressed because it is too large
Load Diff
216
src/sharding.rs
216
src/sharding.rs
@@ -1,216 +0,0 @@
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
/// Implements various sharding functions.
|
||||
use sha1::{Digest, Sha1};
|
||||
|
||||
/// See: <https://github.com/postgres/postgres/blob/27b77ecf9f4d5be211900eda54d8155ada50d696/src/include/catalog/partition.h#L20>.
|
||||
const PARTITION_HASH_SEED: u64 = 0x7A5B22367996DCFD;
|
||||
|
||||
/// The sharding functions we support.
|
||||
#[derive(Debug, PartialEq, Copy, Clone, Serialize, Deserialize, Hash, std::cmp::Eq)]
|
||||
pub enum ShardingFunction {
|
||||
#[serde(alias = "pg_bigint_hash", alias = "PgBigintHash")]
|
||||
PgBigintHash,
|
||||
#[serde(alias = "sha1", alias = "Sha1")]
|
||||
Sha1,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ShardingFunction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ShardingFunction::PgBigintHash => write!(f, "pg_bigint_hash"),
|
||||
ShardingFunction::Sha1 => write!(f, "sha1"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The sharder.
|
||||
pub struct Sharder {
|
||||
/// Number of shards in the cluster.
|
||||
shards: usize,
|
||||
|
||||
/// The sharding function in use.
|
||||
sharding_function: ShardingFunction,
|
||||
}
|
||||
|
||||
impl Sharder {
|
||||
/// Create new instance of the sharder.
|
||||
pub fn new(shards: usize, sharding_function: ShardingFunction) -> Sharder {
|
||||
Sharder {
|
||||
shards,
|
||||
sharding_function,
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the shard given sharding key.
|
||||
pub fn shard(&self, key: i64) -> usize {
|
||||
match self.sharding_function {
|
||||
ShardingFunction::PgBigintHash => self.pg_bigint_hash(key),
|
||||
ShardingFunction::Sha1 => self.sha1(key),
|
||||
}
|
||||
}
|
||||
|
||||
/// Hash function used by Postgres to determine which partition
|
||||
/// to put the row in when using HASH(column) partitioning.
|
||||
/// Source: <https://github.com/postgres/postgres/blob/27b77ecf9f4d5be211900eda54d8155ada50d696/src/common/hashfn.c#L631>.
|
||||
/// Supports only 1 bigint at the moment, but we can add more later.
|
||||
fn pg_bigint_hash(&self, key: i64) -> usize {
|
||||
let mut lohalf = key as u32;
|
||||
let hihalf = (key >> 32) as u32;
|
||||
lohalf ^= if key >= 0 { hihalf } else { !hihalf };
|
||||
Self::combine(0, Self::pg_u32_hash(lohalf)) as usize % self.shards
|
||||
}
|
||||
|
||||
/// Example of a hashing function based on SHA1.
|
||||
fn sha1(&self, key: i64) -> usize {
|
||||
let mut hasher = Sha1::new();
|
||||
|
||||
hasher.update(key.to_string().as_bytes());
|
||||
|
||||
let result = hasher.finalize();
|
||||
|
||||
// Convert the SHA1 hash into hex so we can parse it as a large integer.
|
||||
let hex = format!("{:x}", result);
|
||||
|
||||
// Parse the last 8 bytes as an integer (8 bytes = bigint).
|
||||
let key = i64::from_str_radix(&hex[hex.len() - 8..], 16).unwrap() as usize;
|
||||
|
||||
key % self.shards
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn rot(x: u32, k: u32) -> u32 {
|
||||
(x << k) | (x >> (32 - k))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn mix(mut a: u32, mut b: u32, mut c: u32) -> (u32, u32, u32) {
|
||||
a = a.wrapping_sub(c);
|
||||
a ^= Self::rot(c, 4);
|
||||
c = c.wrapping_add(b);
|
||||
|
||||
b = b.wrapping_sub(a);
|
||||
b ^= Self::rot(a, 6);
|
||||
a = a.wrapping_add(c);
|
||||
|
||||
c = c.wrapping_sub(b);
|
||||
c ^= Self::rot(b, 8);
|
||||
b = b.wrapping_add(a);
|
||||
|
||||
a = a.wrapping_sub(c);
|
||||
a ^= Self::rot(c, 16);
|
||||
c = c.wrapping_add(b);
|
||||
|
||||
b = b.wrapping_sub(a);
|
||||
b ^= Self::rot(a, 19);
|
||||
a = a.wrapping_add(c);
|
||||
|
||||
c = c.wrapping_sub(b);
|
||||
c ^= Self::rot(b, 4);
|
||||
b = b.wrapping_add(a);
|
||||
|
||||
(a, b, c)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn _final(mut a: u32, mut b: u32, mut c: u32) -> (u32, u32, u32) {
|
||||
c ^= b;
|
||||
c = c.wrapping_sub(Self::rot(b, 14));
|
||||
a ^= c;
|
||||
a = a.wrapping_sub(Self::rot(c, 11));
|
||||
b ^= a;
|
||||
b = b.wrapping_sub(Self::rot(a, 25));
|
||||
c ^= b;
|
||||
c = c.wrapping_sub(Self::rot(b, 16));
|
||||
a ^= c;
|
||||
a = a.wrapping_sub(Self::rot(c, 4));
|
||||
b ^= a;
|
||||
b = b.wrapping_sub(Self::rot(a, 14));
|
||||
c ^= b;
|
||||
c = c.wrapping_sub(Self::rot(b, 24));
|
||||
(a, b, c)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn combine(mut a: u64, b: u64) -> u64 {
|
||||
a ^= b
|
||||
.wrapping_add(0x49a0f4dd15e5a8e3_u64)
|
||||
.wrapping_add(a << 54)
|
||||
.wrapping_add(a >> 7);
|
||||
a
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn pg_u32_hash(k: u32) -> u64 {
|
||||
let mut a: u32 = 0x9e3779b9_u32 + std::mem::size_of::<u32>() as u32 + 3923095_u32;
|
||||
let mut b = a;
|
||||
let c = a;
|
||||
|
||||
a = a.wrapping_add((PARTITION_HASH_SEED >> 32) as u32);
|
||||
b = b.wrapping_add(PARTITION_HASH_SEED as u32);
|
||||
let (mut a, b, c) = Self::mix(a, b, c);
|
||||
|
||||
a = a.wrapping_add(k);
|
||||
|
||||
let (_a, b, c) = Self::_final(a, b, c);
|
||||
|
||||
((b as u64) << 32) | (c as u64)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
// See tests/sharding/partition_hash_test_setup.sql
|
||||
// The output of those SELECT statements will match this test,
|
||||
// confirming that we implemented Postgres BIGINT hashing correctly.
|
||||
#[test]
|
||||
fn test_pg_bigint_hash() {
|
||||
let sharder = Sharder::new(5, ShardingFunction::PgBigintHash);
|
||||
|
||||
let shard_0 = vec![1, 4, 5, 14, 19, 39, 40, 46, 47, 53];
|
||||
|
||||
for v in shard_0 {
|
||||
assert_eq!(sharder.shard(v), 0);
|
||||
}
|
||||
|
||||
let shard_1 = vec![2, 3, 11, 17, 21, 23, 30, 49, 51, 54];
|
||||
|
||||
for v in shard_1 {
|
||||
assert_eq!(sharder.shard(v), 1);
|
||||
}
|
||||
|
||||
let shard_2 = vec![6, 7, 15, 16, 18, 20, 25, 28, 34, 35];
|
||||
|
||||
for v in shard_2 {
|
||||
assert_eq!(sharder.shard(v), 2);
|
||||
}
|
||||
|
||||
let shard_3 = vec![8, 12, 13, 22, 29, 31, 33, 36, 41, 43];
|
||||
|
||||
for v in shard_3 {
|
||||
assert_eq!(sharder.shard(v), 3);
|
||||
}
|
||||
|
||||
let shard_4 = vec![9, 10, 24, 26, 27, 32, 37, 38, 42, 45];
|
||||
|
||||
for v in shard_4 {
|
||||
assert_eq!(sharder.shard(v), 4);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sha1_hash() {
|
||||
let sharder = Sharder::new(12, ShardingFunction::Sha1);
|
||||
let ids = [
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
||||
];
|
||||
let shards = [
|
||||
4, 7, 8, 3, 6, 0, 0, 10, 3, 11, 1, 7, 4, 4, 11, 2, 5, 0, 8, 3,
|
||||
];
|
||||
|
||||
for (i, id) in ids.iter().enumerate() {
|
||||
assert_eq!(sharder.shard(*id), shards[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
130
src/stats.rs
130
src/stats.rs
@@ -1,130 +0,0 @@
|
||||
/// Statistics and reporting.
|
||||
use arc_swap::ArcSwap;
|
||||
|
||||
use log::{info, warn};
|
||||
use once_cell::sync::Lazy;
|
||||
use parking_lot::RwLock;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
// Structs that hold stats for different resources
|
||||
pub mod address;
|
||||
pub mod client;
|
||||
pub mod pool;
|
||||
pub mod server;
|
||||
pub use address::AddressStats;
|
||||
pub use client::{ClientState, ClientStats};
|
||||
pub use server::{ServerState, ServerStats};
|
||||
|
||||
/// Convenience types for various stats
|
||||
type ClientStatesLookup = HashMap<i32, Arc<ClientStats>>;
|
||||
type ServerStatesLookup = HashMap<i32, Arc<ServerStats>>;
|
||||
|
||||
/// Stats for individual client connections
|
||||
/// Used in SHOW CLIENTS.
|
||||
static CLIENT_STATS: Lazy<Arc<RwLock<ClientStatesLookup>>> =
|
||||
Lazy::new(|| Arc::new(RwLock::new(ClientStatesLookup::default())));
|
||||
|
||||
/// Stats for individual server connections
|
||||
/// Used in SHOW SERVERS.
|
||||
static SERVER_STATS: Lazy<Arc<RwLock<ServerStatesLookup>>> =
|
||||
Lazy::new(|| Arc::new(RwLock::new(ServerStatesLookup::default())));
|
||||
|
||||
/// The statistics reporter. An instance is given to each possible source of statistics,
|
||||
/// e.g. client stats, server stats, connection pool stats.
|
||||
pub static REPORTER: Lazy<ArcSwap<Reporter>> =
|
||||
Lazy::new(|| ArcSwap::from_pointee(Reporter::default()));
|
||||
|
||||
/// Statistics period used for average calculations.
|
||||
/// 15 seconds.
|
||||
static STAT_PERIOD: u64 = 15000;
|
||||
|
||||
/// The statistics reporter. An instance is given
|
||||
/// to each possible source of statistics,
|
||||
/// e.g. clients, servers, connection pool.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct Reporter {}
|
||||
|
||||
impl Reporter {
|
||||
/// Register a client with the stats system. The stats system uses client_id
|
||||
/// to track and aggregate statistics from all source that relate to that client
|
||||
fn client_register(&self, client_id: i32, stats: Arc<ClientStats>) {
|
||||
if CLIENT_STATS.read().get(&client_id).is_some() {
|
||||
warn!("Client {:?} was double registered!", client_id);
|
||||
return;
|
||||
}
|
||||
|
||||
CLIENT_STATS.write().insert(client_id, stats);
|
||||
}
|
||||
|
||||
/// Reports a client is disconnecting from the pooler.
|
||||
fn client_disconnecting(&self, client_id: i32) {
|
||||
CLIENT_STATS.write().remove(&client_id);
|
||||
}
|
||||
|
||||
/// Register a server connection with the stats system. The stats system uses server_id
|
||||
/// to track and aggregate statistics from all source that relate to that server
|
||||
fn server_register(&self, server_id: i32, stats: Arc<ServerStats>) {
|
||||
SERVER_STATS.write().insert(server_id, stats);
|
||||
}
|
||||
/// Reports a server connection is disconnecting from the pooler.
|
||||
fn server_disconnecting(&self, server_id: i32) {
|
||||
SERVER_STATS.write().remove(&server_id);
|
||||
}
|
||||
}
|
||||
|
||||
/// The statistics collector which used for calculating averages
|
||||
/// There is only one collector (kind of like a singleton)
|
||||
/// it updates averages every 15 seconds.
|
||||
#[derive(Default)]
|
||||
pub struct Collector {}
|
||||
|
||||
impl Collector {
|
||||
/// The statistics collection handler. It will collect statistics
|
||||
/// for `address_id`s starting at 0 up to `addresses`.
|
||||
pub async fn collect(&mut self) {
|
||||
info!("Events reporter started");
|
||||
|
||||
tokio::task::spawn(async move {
|
||||
let mut interval =
|
||||
tokio::time::interval(tokio::time::Duration::from_millis(STAT_PERIOD));
|
||||
loop {
|
||||
interval.tick().await;
|
||||
|
||||
// Hold read lock for duration of update to retain all server stats
|
||||
let server_stats = SERVER_STATS.read();
|
||||
|
||||
for stats in server_stats.values() {
|
||||
if !stats.check_address_stat_average_is_updated_status() {
|
||||
stats.address_stats().update_averages();
|
||||
stats.address_stats().reset_current_counts();
|
||||
stats.set_address_stat_average_is_updated_status(true);
|
||||
}
|
||||
}
|
||||
|
||||
// Reset to false for next update
|
||||
for stats in server_stats.values() {
|
||||
stats.set_address_stat_average_is_updated_status(false);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a snapshot of client statistics.
|
||||
/// by the `Collector`.
|
||||
pub fn get_client_stats() -> ClientStatesLookup {
|
||||
CLIENT_STATS.read().clone()
|
||||
}
|
||||
|
||||
/// Get a snapshot of server statistics.
|
||||
/// by the `Collector`.
|
||||
pub fn get_server_stats() -> ServerStatesLookup {
|
||||
SERVER_STATS.read().clone()
|
||||
}
|
||||
|
||||
/// Get the statistics reporter used to update stats across the pools/clients.
|
||||
pub fn get_reporter() -> Reporter {
|
||||
(*(*REPORTER.load())).clone()
|
||||
}
|
||||
@@ -1,226 +0,0 @@
|
||||
use std::sync::atomic::*;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
struct AddressStatFields {
|
||||
xact_count: Arc<AtomicU64>,
|
||||
query_count: Arc<AtomicU64>,
|
||||
bytes_received: Arc<AtomicU64>,
|
||||
bytes_sent: Arc<AtomicU64>,
|
||||
xact_time: Arc<AtomicU64>,
|
||||
query_time: Arc<AtomicU64>,
|
||||
wait_time: Arc<AtomicU64>,
|
||||
errors: Arc<AtomicU64>,
|
||||
}
|
||||
|
||||
/// Internal address stats
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct AddressStats {
|
||||
total: AddressStatFields,
|
||||
|
||||
current: AddressStatFields,
|
||||
|
||||
averages: AddressStatFields,
|
||||
|
||||
// Determines if the averages have been updated since the last time they were reported
|
||||
pub averages_updated: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
impl IntoIterator for AddressStats {
|
||||
type Item = (String, u64);
|
||||
type IntoIter = std::vec::IntoIter<Self::Item>;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
vec![
|
||||
(
|
||||
"total_xact_count".to_string(),
|
||||
self.total.xact_count.load(Ordering::Relaxed),
|
||||
),
|
||||
(
|
||||
"total_query_count".to_string(),
|
||||
self.total.query_count.load(Ordering::Relaxed),
|
||||
),
|
||||
(
|
||||
"total_received".to_string(),
|
||||
self.total.bytes_received.load(Ordering::Relaxed),
|
||||
),
|
||||
(
|
||||
"total_sent".to_string(),
|
||||
self.total.bytes_sent.load(Ordering::Relaxed),
|
||||
),
|
||||
(
|
||||
"total_xact_time".to_string(),
|
||||
self.total.xact_time.load(Ordering::Relaxed),
|
||||
),
|
||||
(
|
||||
"total_query_time".to_string(),
|
||||
self.total.query_time.load(Ordering::Relaxed),
|
||||
),
|
||||
(
|
||||
"total_wait_time".to_string(),
|
||||
self.total.wait_time.load(Ordering::Relaxed),
|
||||
),
|
||||
(
|
||||
"total_errors".to_string(),
|
||||
self.total.errors.load(Ordering::Relaxed),
|
||||
),
|
||||
(
|
||||
"avg_xact_count".to_string(),
|
||||
self.averages.xact_count.load(Ordering::Relaxed),
|
||||
),
|
||||
(
|
||||
"avg_query_count".to_string(),
|
||||
self.averages.query_count.load(Ordering::Relaxed),
|
||||
),
|
||||
(
|
||||
"avg_recv".to_string(),
|
||||
self.averages.bytes_received.load(Ordering::Relaxed),
|
||||
),
|
||||
(
|
||||
"avg_sent".to_string(),
|
||||
self.averages.bytes_sent.load(Ordering::Relaxed),
|
||||
),
|
||||
(
|
||||
"avg_errors".to_string(),
|
||||
self.averages.errors.load(Ordering::Relaxed),
|
||||
),
|
||||
(
|
||||
"avg_xact_time".to_string(),
|
||||
self.averages.xact_time.load(Ordering::Relaxed),
|
||||
),
|
||||
(
|
||||
"avg_query_time".to_string(),
|
||||
self.averages.query_time.load(Ordering::Relaxed),
|
||||
),
|
||||
(
|
||||
"avg_wait_time".to_string(),
|
||||
self.averages.wait_time.load(Ordering::Relaxed),
|
||||
),
|
||||
]
|
||||
.into_iter()
|
||||
}
|
||||
}
|
||||
|
||||
impl AddressStats {
|
||||
pub fn xact_count_add(&self) {
|
||||
self.total.xact_count.fetch_add(1, Ordering::Relaxed);
|
||||
self.current.xact_count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn query_count_add(&self) {
|
||||
self.total.query_count.fetch_add(1, Ordering::Relaxed);
|
||||
self.current.query_count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn bytes_received_add(&self, bytes: u64) {
|
||||
self.total
|
||||
.bytes_received
|
||||
.fetch_add(bytes, Ordering::Relaxed);
|
||||
self.current
|
||||
.bytes_received
|
||||
.fetch_add(bytes, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn bytes_sent_add(&self, bytes: u64) {
|
||||
self.total.bytes_sent.fetch_add(bytes, Ordering::Relaxed);
|
||||
self.current.bytes_sent.fetch_add(bytes, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn xact_time_add(&self, time: u64) {
|
||||
self.total.xact_time.fetch_add(time, Ordering::Relaxed);
|
||||
self.current.xact_time.fetch_add(time, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn query_time_add(&self, time: u64) {
|
||||
self.total.query_time.fetch_add(time, Ordering::Relaxed);
|
||||
self.current.query_time.fetch_add(time, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn wait_time_add(&self, time: u64) {
|
||||
self.total.wait_time.fetch_add(time, Ordering::Relaxed);
|
||||
self.current.wait_time.fetch_add(time, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn error(&self) {
|
||||
self.total.errors.fetch_add(1, Ordering::Relaxed);
|
||||
self.current.errors.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn update_averages(&self) {
|
||||
let stat_period_per_second = crate::stats::STAT_PERIOD / 1_000;
|
||||
|
||||
// xact_count
|
||||
let current_xact_count = self.current.xact_count.load(Ordering::Relaxed);
|
||||
let current_xact_time = self.current.xact_time.load(Ordering::Relaxed);
|
||||
self.averages.xact_count.store(
|
||||
current_xact_count / stat_period_per_second,
|
||||
Ordering::Relaxed,
|
||||
);
|
||||
if current_xact_count == 0 {
|
||||
self.averages.xact_time.store(0, Ordering::Relaxed);
|
||||
} else {
|
||||
self.averages
|
||||
.xact_time
|
||||
.store(current_xact_time / current_xact_count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
// query_count
|
||||
let current_query_count = self.current.query_count.load(Ordering::Relaxed);
|
||||
let current_query_time = self.current.query_time.load(Ordering::Relaxed);
|
||||
self.averages.query_count.store(
|
||||
current_query_count / stat_period_per_second,
|
||||
Ordering::Relaxed,
|
||||
);
|
||||
if current_query_count == 0 {
|
||||
self.averages.query_time.store(0, Ordering::Relaxed);
|
||||
} else {
|
||||
self.averages
|
||||
.query_time
|
||||
.store(current_query_time / current_query_count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
// bytes_received
|
||||
let current_bytes_received = self.current.bytes_received.load(Ordering::Relaxed);
|
||||
self.averages.bytes_received.store(
|
||||
current_bytes_received / stat_period_per_second,
|
||||
Ordering::Relaxed,
|
||||
);
|
||||
|
||||
// bytes_sent
|
||||
let current_bytes_sent = self.current.bytes_sent.load(Ordering::Relaxed);
|
||||
self.averages.bytes_sent.store(
|
||||
current_bytes_sent / stat_period_per_second,
|
||||
Ordering::Relaxed,
|
||||
);
|
||||
|
||||
// wait_time
|
||||
let current_wait_time = self.current.wait_time.load(Ordering::Relaxed);
|
||||
self.averages.wait_time.store(
|
||||
current_wait_time / stat_period_per_second,
|
||||
Ordering::Relaxed,
|
||||
);
|
||||
|
||||
// errors
|
||||
let current_errors = self.current.errors.load(Ordering::Relaxed);
|
||||
self.averages
|
||||
.errors
|
||||
.store(current_errors / stat_period_per_second, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn reset_current_counts(&self) {
|
||||
self.current.xact_count.store(0, Ordering::Relaxed);
|
||||
self.current.xact_time.store(0, Ordering::Relaxed);
|
||||
self.current.query_count.store(0, Ordering::Relaxed);
|
||||
self.current.query_time.store(0, Ordering::Relaxed);
|
||||
self.current.bytes_received.store(0, Ordering::Relaxed);
|
||||
self.current.bytes_sent.store(0, Ordering::Relaxed);
|
||||
self.current.wait_time.store(0, Ordering::Relaxed);
|
||||
self.current.errors.store(0, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn populate_row(&self, row: &mut Vec<String>) {
|
||||
for (_key, value) in self.clone() {
|
||||
row.push(value.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,204 +0,0 @@
|
||||
use super::{get_reporter, Reporter};
|
||||
use atomic_enum::atomic_enum;
|
||||
use std::sync::atomic::*;
|
||||
use std::sync::Arc;
|
||||
use tokio::time::Instant;
|
||||
/// The various states that a client can be in
|
||||
#[atomic_enum]
|
||||
#[derive(PartialEq)]
|
||||
pub enum ClientState {
|
||||
Idle = 0,
|
||||
Waiting,
|
||||
Active,
|
||||
}
|
||||
impl std::fmt::Display for ClientState {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
match *self {
|
||||
ClientState::Idle => write!(f, "idle"),
|
||||
ClientState::Waiting => write!(f, "waiting"),
|
||||
ClientState::Active => write!(f, "active"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
/// Information we keep track of which can be queried by SHOW CLIENTS
|
||||
pub struct ClientStats {
|
||||
/// A random integer assigned to the client and used by stats to track the client
|
||||
client_id: i32,
|
||||
|
||||
/// Data associated with the client, not writable, only set when we construct the ClientStat
|
||||
application_name: String,
|
||||
username: String,
|
||||
pool_name: String,
|
||||
connect_time: Instant,
|
||||
|
||||
reporter: Reporter,
|
||||
|
||||
/// Total time spent waiting for a connection from pool, measures in microseconds
|
||||
pub total_wait_time: Arc<AtomicU64>,
|
||||
|
||||
/// Maximum time spent waiting for a connection from pool, measures in microseconds
|
||||
pub max_wait_time: Arc<AtomicU64>,
|
||||
|
||||
// Time when the client started waiting for a connection from pool, measures in microseconds
|
||||
// We use connect_time as the reference point for this value
|
||||
// U64 can represent ~5850 centuries in microseconds, so we should be fine
|
||||
pub wait_start_us: Arc<AtomicU64>,
|
||||
|
||||
/// Current state of the client
|
||||
pub state: Arc<AtomicClientState>,
|
||||
|
||||
/// Number of transactions executed by this client
|
||||
pub transaction_count: Arc<AtomicU64>,
|
||||
|
||||
/// Number of queries executed by this client
|
||||
pub query_count: Arc<AtomicU64>,
|
||||
|
||||
/// Number of errors made by this client
|
||||
pub error_count: Arc<AtomicU64>,
|
||||
}
|
||||
|
||||
impl Default for ClientStats {
|
||||
fn default() -> Self {
|
||||
ClientStats {
|
||||
client_id: 0,
|
||||
connect_time: Instant::now(),
|
||||
application_name: String::new(),
|
||||
username: String::new(),
|
||||
pool_name: String::new(),
|
||||
total_wait_time: Arc::new(AtomicU64::new(0)),
|
||||
max_wait_time: Arc::new(AtomicU64::new(0)),
|
||||
wait_start_us: Arc::new(AtomicU64::new(0)),
|
||||
state: Arc::new(AtomicClientState::new(ClientState::Idle)),
|
||||
transaction_count: Arc::new(AtomicU64::new(0)),
|
||||
query_count: Arc::new(AtomicU64::new(0)),
|
||||
error_count: Arc::new(AtomicU64::new(0)),
|
||||
reporter: get_reporter(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ClientStats {
|
||||
pub fn new(
|
||||
client_id: i32,
|
||||
application_name: &str,
|
||||
username: &str,
|
||||
pool_name: &str,
|
||||
connect_time: Instant,
|
||||
) -> Self {
|
||||
Self {
|
||||
client_id,
|
||||
connect_time,
|
||||
application_name: application_name.to_string(),
|
||||
username: username.to_string(),
|
||||
pool_name: pool_name.to_string(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Reports a client is disconnecting from the pooler and
|
||||
/// update metrics on the corresponding pool.
|
||||
pub fn disconnect(&self) {
|
||||
self.reporter.client_disconnecting(self.client_id);
|
||||
}
|
||||
|
||||
/// Register a client with the stats system. The stats system uses client_id
|
||||
/// to track and aggregate statistics from all source that relate to that client
|
||||
pub fn register(&self, stats: Arc<ClientStats>) {
|
||||
self.reporter.client_register(self.client_id, stats);
|
||||
self.state.store(ClientState::Idle, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Reports a client is done querying the server and is no longer assigned a server connection
|
||||
pub fn idle(&self) {
|
||||
self.state.store(ClientState::Idle, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Reports a client is waiting for a connection
|
||||
pub fn waiting(&self) {
|
||||
let wait_start = self.connect_time.elapsed().as_micros() as u64;
|
||||
|
||||
self.wait_start_us.store(wait_start, Ordering::Relaxed);
|
||||
self.state.store(ClientState::Waiting, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Reports a client is done waiting for a connection and is about to query the server.
|
||||
pub fn active(&self) {
|
||||
self.state.store(ClientState::Active, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Reports a client has failed to obtain a connection from a connection pool
|
||||
pub fn checkout_error(&self) {
|
||||
self.state.store(ClientState::Idle, Ordering::Relaxed);
|
||||
self.update_wait_times();
|
||||
}
|
||||
|
||||
/// Reports a client has succeeded in obtaining a connection from a connection pool
|
||||
pub fn checkout_success(&self) {
|
||||
self.state.store(ClientState::Active, Ordering::Relaxed);
|
||||
self.update_wait_times();
|
||||
}
|
||||
|
||||
/// Reports a client has had the server assigned to it be banned
|
||||
pub fn ban_error(&self) {
|
||||
self.state.store(ClientState::Idle, Ordering::Relaxed);
|
||||
self.error_count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
fn update_wait_times(&self) {
|
||||
if self.wait_start_us.load(Ordering::Relaxed) == 0 {
|
||||
return;
|
||||
}
|
||||
|
||||
let wait_time_us = self.get_current_wait_time_us();
|
||||
self.total_wait_time
|
||||
.fetch_add(wait_time_us, Ordering::Relaxed);
|
||||
self.max_wait_time
|
||||
.fetch_max(wait_time_us, Ordering::Relaxed);
|
||||
self.wait_start_us.store(0, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn get_current_wait_time_us(&self) -> u64 {
|
||||
let wait_start_us = self.wait_start_us.load(Ordering::Relaxed);
|
||||
let microseconds_since_connection_epoch = self.connect_time.elapsed().as_micros() as u64;
|
||||
if wait_start_us == 0 || microseconds_since_connection_epoch < wait_start_us {
|
||||
return 0;
|
||||
}
|
||||
microseconds_since_connection_epoch - wait_start_us
|
||||
}
|
||||
|
||||
/// Report a query executed by a client against a server
|
||||
pub fn query(&self) {
|
||||
self.query_count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Report a transaction executed by a client a server
|
||||
/// we report each individual queries outside a transaction as a transaction
|
||||
/// We only count the initial BEGIN as a transaction, all queries within do not
|
||||
/// count as transactions
|
||||
pub fn transaction(&self) {
|
||||
self.transaction_count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
// Helper methods for show clients
|
||||
pub fn connect_time(&self) -> Instant {
|
||||
self.connect_time
|
||||
}
|
||||
|
||||
pub fn client_id(&self) -> i32 {
|
||||
self.client_id
|
||||
}
|
||||
|
||||
pub fn application_name(&self) -> String {
|
||||
self.application_name.clone()
|
||||
}
|
||||
|
||||
pub fn username(&self) -> String {
|
||||
self.username.clone()
|
||||
}
|
||||
|
||||
pub fn pool_name(&self) -> String {
|
||||
self.pool_name.clone()
|
||||
}
|
||||
}
|
||||
@@ -1,154 +0,0 @@
|
||||
use log::debug;
|
||||
|
||||
use super::{ClientState, ServerState};
|
||||
use crate::{config::PoolMode, messages::DataType, pool::PoolIdentifier};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::*;
|
||||
|
||||
use crate::pool::get_all_pools;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
/// A struct that holds information about a Pool .
|
||||
pub struct PoolStats {
|
||||
pub identifier: PoolIdentifier,
|
||||
pub mode: PoolMode,
|
||||
pub cl_idle: u64,
|
||||
pub cl_active: u64,
|
||||
pub cl_waiting: u64,
|
||||
pub cl_cancel_req: u64,
|
||||
pub sv_active: u64,
|
||||
pub sv_idle: u64,
|
||||
pub sv_used: u64,
|
||||
pub sv_tested: u64,
|
||||
pub sv_login: u64,
|
||||
pub maxwait: u64,
|
||||
}
|
||||
impl PoolStats {
|
||||
pub fn new(identifier: PoolIdentifier, mode: PoolMode) -> Self {
|
||||
PoolStats {
|
||||
identifier,
|
||||
mode,
|
||||
cl_idle: 0,
|
||||
cl_active: 0,
|
||||
cl_waiting: 0,
|
||||
cl_cancel_req: 0,
|
||||
sv_active: 0,
|
||||
sv_idle: 0,
|
||||
sv_used: 0,
|
||||
sv_tested: 0,
|
||||
sv_login: 0,
|
||||
maxwait: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn construct_pool_lookup() -> HashMap<PoolIdentifier, PoolStats> {
|
||||
let mut map: HashMap<PoolIdentifier, PoolStats> = HashMap::new();
|
||||
let client_map = super::get_client_stats();
|
||||
let server_map = super::get_server_stats();
|
||||
|
||||
for (identifier, pool) in get_all_pools() {
|
||||
map.insert(
|
||||
identifier.clone(),
|
||||
PoolStats::new(identifier, pool.settings.pool_mode),
|
||||
);
|
||||
}
|
||||
|
||||
for client in client_map.values() {
|
||||
match map.get_mut(&PoolIdentifier {
|
||||
db: client.pool_name(),
|
||||
user: client.username(),
|
||||
}) {
|
||||
Some(pool_stats) => {
|
||||
match client.state.load(Ordering::Relaxed) {
|
||||
ClientState::Active => pool_stats.cl_active += 1,
|
||||
ClientState::Idle => pool_stats.cl_idle += 1,
|
||||
ClientState::Waiting => pool_stats.cl_waiting += 1,
|
||||
}
|
||||
let wait_start_us = client.wait_start_us.load(Ordering::Relaxed);
|
||||
if wait_start_us > 0 {
|
||||
let wait_time_us = client.get_current_wait_time_us();
|
||||
pool_stats.maxwait = std::cmp::max(pool_stats.maxwait, wait_time_us);
|
||||
}
|
||||
}
|
||||
None => debug!("Client from an obselete pool"),
|
||||
}
|
||||
}
|
||||
|
||||
for server in server_map.values() {
|
||||
match map.get_mut(&PoolIdentifier {
|
||||
db: server.pool_name(),
|
||||
user: server.username(),
|
||||
}) {
|
||||
Some(pool_stats) => match server.state.load(Ordering::Relaxed) {
|
||||
ServerState::Active => pool_stats.sv_active += 1,
|
||||
ServerState::Idle => pool_stats.sv_idle += 1,
|
||||
ServerState::Login => pool_stats.sv_login += 1,
|
||||
ServerState::Tested => pool_stats.sv_tested += 1,
|
||||
},
|
||||
None => debug!("Server from an obselete pool"),
|
||||
}
|
||||
}
|
||||
|
||||
map
|
||||
}
|
||||
|
||||
pub fn generate_header() -> Vec<(&'static str, DataType)> {
|
||||
vec![
|
||||
("database", DataType::Text),
|
||||
("user", DataType::Text),
|
||||
("pool_mode", DataType::Text),
|
||||
("cl_idle", DataType::Numeric),
|
||||
("cl_active", DataType::Numeric),
|
||||
("cl_waiting", DataType::Numeric),
|
||||
("cl_cancel_req", DataType::Numeric),
|
||||
("sv_active", DataType::Numeric),
|
||||
("sv_idle", DataType::Numeric),
|
||||
("sv_used", DataType::Numeric),
|
||||
("sv_tested", DataType::Numeric),
|
||||
("sv_login", DataType::Numeric),
|
||||
("maxwait", DataType::Numeric),
|
||||
("maxwait_us", DataType::Numeric),
|
||||
]
|
||||
}
|
||||
|
||||
pub fn generate_row(&self) -> Vec<String> {
|
||||
vec![
|
||||
self.identifier.db.clone(),
|
||||
self.identifier.user.clone(),
|
||||
self.mode.to_string(),
|
||||
self.cl_idle.to_string(),
|
||||
self.cl_active.to_string(),
|
||||
self.cl_waiting.to_string(),
|
||||
self.cl_cancel_req.to_string(),
|
||||
self.sv_active.to_string(),
|
||||
self.sv_idle.to_string(),
|
||||
self.sv_used.to_string(),
|
||||
self.sv_tested.to_string(),
|
||||
self.sv_login.to_string(),
|
||||
(self.maxwait / 1_000_000).to_string(),
|
||||
(self.maxwait % 1_000_000).to_string(),
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
impl IntoIterator for PoolStats {
|
||||
type Item = (String, u64);
|
||||
type IntoIter = std::vec::IntoIter<Self::Item>;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
vec![
|
||||
("cl_idle".to_string(), self.cl_idle),
|
||||
("cl_active".to_string(), self.cl_active),
|
||||
("cl_waiting".to_string(), self.cl_waiting),
|
||||
("cl_cancel_req".to_string(), self.cl_cancel_req),
|
||||
("sv_active".to_string(), self.sv_active),
|
||||
("sv_idle".to_string(), self.sv_idle),
|
||||
("sv_used".to_string(), self.sv_used),
|
||||
("sv_tested".to_string(), self.sv_tested),
|
||||
("sv_login".to_string(), self.sv_login),
|
||||
("maxwait".to_string(), self.maxwait / 1_000_000),
|
||||
("maxwait_us".to_string(), self.maxwait % 1_000_000),
|
||||
]
|
||||
.into_iter()
|
||||
}
|
||||
}
|
||||
@@ -1,229 +0,0 @@
|
||||
use super::AddressStats;
|
||||
use super::{get_reporter, Reporter};
|
||||
use crate::config::Address;
|
||||
use atomic_enum::atomic_enum;
|
||||
use parking_lot::RwLock;
|
||||
use std::sync::atomic::*;
|
||||
use std::sync::Arc;
|
||||
use tokio::time::Instant;
|
||||
|
||||
/// The various states that a server can be in
|
||||
#[atomic_enum]
|
||||
#[derive(PartialEq)]
|
||||
pub enum ServerState {
|
||||
Login = 0,
|
||||
Active,
|
||||
Tested,
|
||||
Idle,
|
||||
}
|
||||
impl std::fmt::Display for ServerState {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
match *self {
|
||||
ServerState::Login => write!(f, "login"),
|
||||
ServerState::Active => write!(f, "active"),
|
||||
ServerState::Tested => write!(f, "tested"),
|
||||
ServerState::Idle => write!(f, "idle"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Information we keep track of which can be queried by SHOW SERVERS
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ServerStats {
|
||||
/// A random integer assigned to the server and used by stats to track the server
|
||||
server_id: i32,
|
||||
|
||||
/// Context information, only to be read
|
||||
address: Address,
|
||||
connect_time: Instant,
|
||||
|
||||
reporter: Reporter,
|
||||
|
||||
/// Data
|
||||
pub application_name: Arc<RwLock<String>>,
|
||||
pub state: Arc<AtomicServerState>,
|
||||
pub bytes_sent: Arc<AtomicU64>,
|
||||
pub bytes_received: Arc<AtomicU64>,
|
||||
pub transaction_count: Arc<AtomicU64>,
|
||||
pub query_count: Arc<AtomicU64>,
|
||||
pub error_count: Arc<AtomicU64>,
|
||||
pub prepared_hit_count: Arc<AtomicU64>,
|
||||
pub prepared_miss_count: Arc<AtomicU64>,
|
||||
pub prepared_eviction_count: Arc<AtomicU64>,
|
||||
pub prepared_cache_size: Arc<AtomicU64>,
|
||||
}
|
||||
|
||||
impl Default for ServerStats {
|
||||
fn default() -> Self {
|
||||
ServerStats {
|
||||
server_id: 0,
|
||||
application_name: Arc::new(RwLock::new(String::new())),
|
||||
address: Address::default(),
|
||||
connect_time: Instant::now(),
|
||||
state: Arc::new(AtomicServerState::new(ServerState::Login)),
|
||||
bytes_sent: Arc::new(AtomicU64::new(0)),
|
||||
bytes_received: Arc::new(AtomicU64::new(0)),
|
||||
transaction_count: Arc::new(AtomicU64::new(0)),
|
||||
query_count: Arc::new(AtomicU64::new(0)),
|
||||
error_count: Arc::new(AtomicU64::new(0)),
|
||||
reporter: get_reporter(),
|
||||
prepared_hit_count: Arc::new(AtomicU64::new(0)),
|
||||
prepared_miss_count: Arc::new(AtomicU64::new(0)),
|
||||
prepared_eviction_count: Arc::new(AtomicU64::new(0)),
|
||||
prepared_cache_size: Arc::new(AtomicU64::new(0)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ServerStats {
|
||||
pub fn new(address: Address, connect_time: Instant) -> Self {
|
||||
Self {
|
||||
address,
|
||||
connect_time,
|
||||
server_id: rand::random::<i32>(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn server_id(&self) -> i32 {
|
||||
self.server_id
|
||||
}
|
||||
|
||||
/// Register a server connection with the stats system. The stats system uses server_id
|
||||
/// to track and aggregate statistics from all source that relate to that server
|
||||
// Delegates to reporter
|
||||
pub fn register(&self, stats: Arc<ServerStats>) {
|
||||
self.reporter.server_register(self.server_id, stats);
|
||||
self.login();
|
||||
}
|
||||
|
||||
/// Reports a server connection is no longer assigned to a client
|
||||
/// and is available for the next client to pick it up
|
||||
pub fn idle(&self) {
|
||||
self.state.store(ServerState::Idle, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Reports a server connection is disconnecting from the pooler.
|
||||
/// Also updates metrics on the pool regarding server usage.
|
||||
pub fn disconnect(&self) {
|
||||
self.reporter.server_disconnecting(self.server_id);
|
||||
}
|
||||
|
||||
/// Reports a server connection is being tested before being given to a client.
|
||||
pub fn tested(&self) {
|
||||
self.set_undefined_application();
|
||||
self.state.store(ServerState::Tested, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Reports a server connection is attempting to login.
|
||||
pub fn login(&self) {
|
||||
self.state.store(ServerState::Login, Ordering::Relaxed);
|
||||
self.set_undefined_application();
|
||||
}
|
||||
|
||||
/// Reports a server connection has been assigned to a client that
|
||||
/// is about to query the server
|
||||
pub fn active(&self, application_name: String) {
|
||||
self.state.store(ServerState::Active, Ordering::Relaxed);
|
||||
self.set_application(application_name);
|
||||
}
|
||||
|
||||
pub fn address_stats(&self) -> Arc<AddressStats> {
|
||||
self.address.stats.clone()
|
||||
}
|
||||
|
||||
pub fn check_address_stat_average_is_updated_status(&self) -> bool {
|
||||
self.address.stats.averages_updated.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn set_address_stat_average_is_updated_status(&self, is_checked: bool) {
|
||||
self.address
|
||||
.stats
|
||||
.averages_updated
|
||||
.store(is_checked, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
// Helper methods for show_servers
|
||||
pub fn pool_name(&self) -> String {
|
||||
self.address.pool_name.clone()
|
||||
}
|
||||
|
||||
pub fn username(&self) -> String {
|
||||
self.address.username.clone()
|
||||
}
|
||||
|
||||
pub fn address_name(&self) -> String {
|
||||
self.address.name()
|
||||
}
|
||||
|
||||
pub fn connect_time(&self) -> Instant {
|
||||
self.connect_time
|
||||
}
|
||||
|
||||
fn set_application(&self, name: String) {
|
||||
let mut application_name = self.application_name.write();
|
||||
*application_name = name;
|
||||
}
|
||||
|
||||
fn set_undefined_application(&self) {
|
||||
self.set_application(String::from("Undefined"))
|
||||
}
|
||||
|
||||
pub fn checkout_time(&self, microseconds: u64, application_name: String) {
|
||||
// Update server stats and address aggregation stats
|
||||
self.set_application(application_name);
|
||||
self.address.stats.wait_time_add(microseconds);
|
||||
}
|
||||
|
||||
/// Report a query executed by a client against a server
|
||||
pub fn query(&self, milliseconds: u64, application_name: &str) {
|
||||
self.set_application(application_name.to_string());
|
||||
self.address.stats.query_count_add();
|
||||
self.address.stats.query_time_add(milliseconds);
|
||||
self.query_count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Report a transaction executed by a client a server
|
||||
/// we report each individual queries outside a transaction as a transaction
|
||||
/// We only count the initial BEGIN as a transaction, all queries within do not
|
||||
/// count as transactions
|
||||
pub fn transaction(&self, application_name: &str) {
|
||||
self.set_application(application_name.to_string());
|
||||
|
||||
self.transaction_count.fetch_add(1, Ordering::Relaxed);
|
||||
self.address.stats.xact_count_add();
|
||||
}
|
||||
|
||||
/// Report data sent to a server
|
||||
pub fn data_sent(&self, amount_bytes: usize) {
|
||||
self.bytes_sent
|
||||
.fetch_add(amount_bytes as u64, Ordering::Relaxed);
|
||||
self.address.stats.bytes_sent_add(amount_bytes as u64);
|
||||
}
|
||||
|
||||
/// Report data received from a server
|
||||
pub fn data_received(&self, amount_bytes: usize) {
|
||||
self.bytes_received
|
||||
.fetch_add(amount_bytes as u64, Ordering::Relaxed);
|
||||
self.address.stats.bytes_received_add(amount_bytes as u64);
|
||||
}
|
||||
|
||||
/// Report a prepared statement that already exists on the server.
|
||||
pub fn prepared_cache_hit(&self) {
|
||||
self.prepared_hit_count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Report a prepared statement that does not exist on the server yet.
|
||||
pub fn prepared_cache_miss(&self) {
|
||||
self.prepared_miss_count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn prepared_cache_add(&self) {
|
||||
self.prepared_cache_size.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn prepared_cache_remove(&self) {
|
||||
self.prepared_eviction_count.fetch_add(1, Ordering::Relaxed);
|
||||
self.prepared_cache_size.fetch_sub(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
87
src/tls.rs
87
src/tls.rs
@@ -1,87 +0,0 @@
|
||||
// Stream wrapper.
|
||||
|
||||
use rustls_pemfile::{certs, read_one, Item};
|
||||
use std::iter;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::SystemTime;
|
||||
use tokio_rustls::rustls::{
|
||||
self,
|
||||
client::{ServerCertVerified, ServerCertVerifier},
|
||||
Certificate, PrivateKey, ServerName,
|
||||
};
|
||||
use tokio_rustls::TlsAcceptor;
|
||||
|
||||
use crate::config::get_config;
|
||||
use crate::errors::Error;
|
||||
|
||||
// TLS
|
||||
pub fn load_certs(path: &Path) -> std::io::Result<Vec<Certificate>> {
|
||||
certs(&mut std::io::BufReader::new(std::fs::File::open(path)?))
|
||||
.map_err(|_| std::io::Error::new(std::io::ErrorKind::InvalidInput, "invalid cert"))
|
||||
.map(|mut certs| certs.drain(..).map(Certificate).collect())
|
||||
}
|
||||
|
||||
pub fn load_keys(path: &Path) -> std::io::Result<Vec<PrivateKey>> {
|
||||
let mut rd = std::io::BufReader::new(std::fs::File::open(path)?);
|
||||
|
||||
iter::from_fn(|| read_one(&mut rd).transpose())
|
||||
.filter_map(|item| match item {
|
||||
Err(err) => Some(Err(err)),
|
||||
Ok(Item::RSAKey(key)) => Some(Ok(PrivateKey(key))),
|
||||
Ok(Item::ECKey(key)) => Some(Ok(PrivateKey(key))),
|
||||
Ok(Item::PKCS8Key(key)) => Some(Ok(PrivateKey(key))),
|
||||
_ => None,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub struct Tls {
|
||||
pub acceptor: TlsAcceptor,
|
||||
}
|
||||
|
||||
impl Tls {
|
||||
pub fn new() -> Result<Self, Error> {
|
||||
let config = get_config();
|
||||
|
||||
let certs = match load_certs(Path::new(&config.general.tls_certificate.unwrap())) {
|
||||
Ok(certs) => certs,
|
||||
Err(_) => return Err(Error::TlsError),
|
||||
};
|
||||
|
||||
let mut keys = match load_keys(Path::new(&config.general.tls_private_key.unwrap())) {
|
||||
Ok(keys) => keys,
|
||||
Err(_) => return Err(Error::TlsError),
|
||||
};
|
||||
|
||||
let config = match rustls::ServerConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(certs, keys.remove(0))
|
||||
.map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidInput, err))
|
||||
{
|
||||
Ok(c) => c,
|
||||
Err(_) => return Err(Error::TlsError),
|
||||
};
|
||||
|
||||
Ok(Tls {
|
||||
acceptor: TlsAcceptor::from(Arc::new(config)),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct NoCertificateVerification;
|
||||
|
||||
impl ServerCertVerifier for NoCertificateVerification {
|
||||
fn verify_server_cert(
|
||||
&self,
|
||||
_end_entity: &Certificate,
|
||||
_intermediates: &[Certificate],
|
||||
_server_name: &ServerName,
|
||||
_scts: &mut dyn Iterator<Item = &[u8]>,
|
||||
_ocsp_response: &[u8],
|
||||
_now: SystemTime,
|
||||
) -> Result<ServerCertVerified, rustls::Error> {
|
||||
Ok(ServerCertVerified::assertion())
|
||||
}
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
GREEN="\033[0;32m"
|
||||
RED="\033[0;31m"
|
||||
BLUE="\033[0;34m"
|
||||
RESET="\033[0m"
|
||||
|
||||
|
||||
cd tests/docker/
|
||||
docker compose kill main || true
|
||||
docker compose build main
|
||||
docker compose down
|
||||
docker compose up -d
|
||||
# wait for the container to start
|
||||
while ! docker compose exec main ls; do
|
||||
echo "Waiting for test environment to start"
|
||||
sleep 1
|
||||
done
|
||||
echo "==================================="
|
||||
docker compose exec -e LOG_LEVEL=error -d main toxiproxy-server
|
||||
docker compose exec --workdir /app main cargo build
|
||||
docker compose exec -d --workdir /app main ./target/debug/pgcat ./.circleci/pgcat.toml
|
||||
docker compose exec --workdir /app/tests/ruby main bundle install
|
||||
docker compose exec --workdir /app/tests/python main pip3 install -r requirements.txt
|
||||
echo "Interactive test environment ready"
|
||||
echo "To run integration tests, you can use the following commands:"
|
||||
echo -e " ${BLUE}Ruby: ${RED}cd /app/tests/ruby && bundle exec ruby tests.rb --format documentation${RESET}"
|
||||
echo -e " ${BLUE}Python: ${RED}cd /app/ && pytest ${RESET}"
|
||||
echo -e " ${BLUE}Rust: ${RED}cd /app/tests/rust && cargo run ${RESET}"
|
||||
echo -e " ${BLUE}Go: ${RED}cd /app/tests/go && /usr/local/go/bin/go test${RESET}"
|
||||
echo "the source code for tests are directly linked to the source code in the container so you can modify the code and run the tests again"
|
||||
echo "You can rebuild PgCat from within the container by running"
|
||||
echo -e " ${GREEN}cargo build${RESET}"
|
||||
echo "and then run the tests again"
|
||||
echo "==================================="
|
||||
docker compose exec --workdir /app/tests main bash
|
||||
@@ -1,13 +0,0 @@
|
||||
FROM rust:bullseye
|
||||
|
||||
COPY --from=sclevine/yj /bin/yj /bin/yj
|
||||
RUN /bin/yj -h
|
||||
RUN apt-get update && apt-get install llvm-11 psmisc postgresql-contrib postgresql-client ruby ruby-dev libpq-dev python3 python3-pip lcov curl sudo iproute2 -y
|
||||
RUN cargo install cargo-binutils rustfilt
|
||||
RUN rustup component add llvm-tools-preview
|
||||
RUN sudo gem install bundler
|
||||
RUN wget -O toxiproxy-2.4.0.deb https://github.com/Shopify/toxiproxy/releases/download/v2.4.0/toxiproxy_2.4.0_linux_$(dpkg --print-architecture).deb && \
|
||||
sudo dpkg -i toxiproxy-2.4.0.deb
|
||||
RUN wget -O go1.21.3.linux-$(dpkg --print-architecture).tar.gz https://go.dev/dl/go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \
|
||||
sudo tar -C /usr/local -xzf go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \
|
||||
rm go1.21.3.linux-$(dpkg --print-architecture).tar.gz
|
||||
@@ -1,54 +0,0 @@
|
||||
services:
|
||||
pg1:
|
||||
image: postgres:14
|
||||
network_mode: "service:main"
|
||||
environment:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_DB: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_INITDB_ARGS: --auth-local=md5 --auth-host=md5 --auth=md5
|
||||
command: ["postgres", "-p", "5432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||
pg2:
|
||||
image: postgres:14
|
||||
network_mode: "service:main"
|
||||
environment:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_DB: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256
|
||||
command: ["postgres", "-p", "7432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||
pg3:
|
||||
image: postgres:14
|
||||
network_mode: "service:main"
|
||||
environment:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_DB: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256
|
||||
command: ["postgres", "-p", "8432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||
pg4:
|
||||
image: postgres:14
|
||||
network_mode: "service:main"
|
||||
environment:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_DB: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256
|
||||
command: ["postgres", "-p", "9432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||
pg5:
|
||||
image: postgres:14
|
||||
network_mode: "service:main"
|
||||
environment:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_DB: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_INITDB_ARGS: --auth-local=md5 --auth-host=md5 --auth=md5
|
||||
command: ["postgres", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-p", "10432"]
|
||||
main:
|
||||
build: .
|
||||
command: ["bash", "/app/tests/docker/run.sh"]
|
||||
environment:
|
||||
- INTERACTIVE_TEST_ENVIRONMENT=true
|
||||
volumes:
|
||||
- ../../:/app/
|
||||
- /app/target/
|
||||
@@ -1,69 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
rm -rf /app/target/ || true
|
||||
rm /app/*.profraw || true
|
||||
rm /app/pgcat.profdata || true
|
||||
rm -rf /app/cov || true
|
||||
|
||||
# Prepares the interactive test environment
|
||||
#
|
||||
if [ -n "$INTERACTIVE_TEST_ENVIRONMENT" ]; then
|
||||
ports=(5432 7432 8432 9432 10432)
|
||||
for port in "${ports[@]}"; do
|
||||
is_it_up=0
|
||||
attempts=0
|
||||
while [ $is_it_up -eq 0 ]; do
|
||||
PGPASSWORD=postgres psql -h 127.0.0.1 -p $port -U postgres -c '\q' > /dev/null 2>&1
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "PostgreSQL on port $port is up."
|
||||
is_it_up=1
|
||||
else
|
||||
attempts=$((attempts+1))
|
||||
if [ $attempts -gt 10 ]; then
|
||||
echo "PostgreSQL on port $port is down, giving up."
|
||||
exit 1
|
||||
fi
|
||||
echo "PostgreSQL on port $port is down, waiting for it to start."
|
||||
sleep 1
|
||||
fi
|
||||
done
|
||||
done
|
||||
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 5432 -U postgres -f /app/tests/sharding/query_routing_setup.sql
|
||||
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 7432 -U postgres -f /app/tests/sharding/query_routing_setup.sql
|
||||
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 8432 -U postgres -f /app/tests/sharding/query_routing_setup.sql
|
||||
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 9432 -U postgres -f /app/tests/sharding/query_routing_setup.sql
|
||||
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 10432 -U postgres -f /app/tests/sharding/query_routing_setup.sql
|
||||
sleep 100000000000000000
|
||||
exit 0
|
||||
fi
|
||||
|
||||
export LLVM_PROFILE_FILE="/app/pgcat-%m-%p.profraw"
|
||||
export RUSTC_BOOTSTRAP=1
|
||||
export CARGO_INCREMENTAL=0
|
||||
export RUSTFLAGS="-Zprofile -Ccodegen-units=1 -Copt-level=0 -Clink-dead-code -Coverflow-checks=off -Zpanic_abort_tests -Cpanic=abort -Cinstrument-coverage"
|
||||
export RUSTDOCFLAGS="-Cpanic=abort"
|
||||
|
||||
cd /app/
|
||||
cargo clean
|
||||
cargo build
|
||||
cargo test --tests
|
||||
|
||||
bash .circleci/run_tests.sh
|
||||
|
||||
TEST_OBJECTS=$( \
|
||||
for file in $(cargo test --no-run 2>&1 | grep "target/debug/deps/pgcat-[[:alnum:]]\+" -o); \
|
||||
do \
|
||||
printf "%s %s " --object $file; \
|
||||
done \
|
||||
)
|
||||
|
||||
echo "Generating coverage report"
|
||||
|
||||
rust-profdata merge -sparse /app/pgcat-*.profraw -o /app/pgcat.profdata
|
||||
|
||||
bash -c "rust-cov export -ignore-filename-regex='rustc|registry' -Xdemangler=rustfilt -instr-profile=/app/pgcat.profdata $TEST_OBJECTS --object ./target/debug/pgcat --format lcov > ./lcov.info"
|
||||
|
||||
genhtml lcov.info --title "PgCat Code Coverage" --css-file ./cov-style.css --highlight --no-function-coverage --ignore-errors source --legend --output-directory cov --prefix $(pwd)
|
||||
|
||||
rm /app/*.profraw
|
||||
rm /app/pgcat.profdata
|
||||
@@ -1,5 +0,0 @@
|
||||
module pgcat
|
||||
|
||||
go 1.21
|
||||
|
||||
require github.com/lib/pq v1.10.9
|
||||
@@ -1,2 +0,0 @@
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
@@ -1,162 +0,0 @@
|
||||
#
|
||||
# PgCat config example.
|
||||
#
|
||||
|
||||
#
|
||||
# General pooler settings
|
||||
[general]
|
||||
# What IP to run on, 0.0.0.0 means accessible from everywhere.
|
||||
host = "0.0.0.0"
|
||||
|
||||
# Port to run on, same as PgBouncer used in this example.
|
||||
port = "${PORT}"
|
||||
|
||||
# Whether to enable prometheus exporter or not.
|
||||
enable_prometheus_exporter = true
|
||||
|
||||
# Port at which prometheus exporter listens on.
|
||||
prometheus_exporter_port = 9930
|
||||
|
||||
# How long to wait before aborting a server connection (ms).
|
||||
connect_timeout = 1000
|
||||
|
||||
# How much time to give the health check query to return with a result (ms).
|
||||
healthcheck_timeout = 1000
|
||||
|
||||
# How long to keep connection available for immediate re-use, without running a healthcheck query on it
|
||||
healthcheck_delay = 30000
|
||||
|
||||
# How much time to give clients during shutdown before forcibly killing client connections (ms).
|
||||
shutdown_timeout = 5000
|
||||
|
||||
# For how long to ban a server if it fails a health check (seconds).
|
||||
ban_time = 60 # Seconds
|
||||
|
||||
# If we should log client connections
|
||||
log_client_connections = false
|
||||
|
||||
# If we should log client disconnections
|
||||
log_client_disconnections = false
|
||||
|
||||
# Reload config automatically if it changes.
|
||||
autoreload = 15000
|
||||
|
||||
server_round_robin = false
|
||||
|
||||
# TLS
|
||||
tls_certificate = "../../.circleci/server.cert"
|
||||
tls_private_key = "../../.circleci/server.key"
|
||||
|
||||
# Credentials to access the virtual administrative database (pgbouncer or pgcat)
|
||||
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
||||
admin_username = "admin_user"
|
||||
admin_password = "admin_pass"
|
||||
|
||||
# pool
|
||||
# configs are structured as pool.<pool_name>
|
||||
# the pool_name is what clients use as database name when connecting
|
||||
# For the example below a client can connect using "postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded_db"
|
||||
[pools.sharded_db]
|
||||
# Pool mode (see PgBouncer docs for more).
|
||||
# session: one server connection per connected client
|
||||
# transaction: one server connection per client transaction
|
||||
pool_mode = "transaction"
|
||||
|
||||
# If the client doesn't specify, route traffic to
|
||||
# this role by default.
|
||||
#
|
||||
# any: round-robin between primary and replicas,
|
||||
# replica: round-robin between replicas only without touching the primary,
|
||||
# primary: all queries go to the primary unless otherwise specified.
|
||||
default_role = "any"
|
||||
|
||||
# Query parser. If enabled, we'll attempt to parse
|
||||
# every incoming query to determine if it's a read or a write.
|
||||
# If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||
# we'll direct it to the primary.
|
||||
query_parser_enabled = true
|
||||
|
||||
# If the query parser is enabled and this setting is enabled, we'll attempt to
|
||||
# infer the role from the query itself.
|
||||
query_parser_read_write_splitting = true
|
||||
|
||||
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||
# load balancing of read queries. Otherwise, the primary will only be used for write
|
||||
# queries. The primary can always be explicitely selected with our custom protocol.
|
||||
primary_reads_enabled = true
|
||||
|
||||
# So what if you wanted to implement a different hashing function,
|
||||
# or you've already built one and you want this pooler to use it?
|
||||
#
|
||||
# Current options:
|
||||
#
|
||||
# pg_bigint_hash: PARTITION BY HASH (Postgres hashing function)
|
||||
# sha1: A hashing function based on SHA1
|
||||
#
|
||||
sharding_function = "pg_bigint_hash"
|
||||
|
||||
# Prepared statements cache size.
|
||||
prepared_statements_cache_size = 500
|
||||
|
||||
# Credentials for users that may connect to this cluster
|
||||
[pools.sharded_db.users.0]
|
||||
username = "sharding_user"
|
||||
password = "sharding_user"
|
||||
# Maximum number of server connections that can be established for this user
|
||||
# The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||
# is the sum of pool_size across all users.
|
||||
pool_size = 5
|
||||
statement_timeout = 0
|
||||
|
||||
|
||||
[pools.sharded_db.users.1]
|
||||
username = "other_user"
|
||||
password = "other_user"
|
||||
pool_size = 21
|
||||
statement_timeout = 30000
|
||||
|
||||
# Shard 0
|
||||
[pools.sharded_db.shards.0]
|
||||
# [ host, port, role ]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ]
|
||||
]
|
||||
# Database name (e.g. "postgres")
|
||||
database = "shard0"
|
||||
|
||||
[pools.sharded_db.shards.1]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ],
|
||||
]
|
||||
database = "shard1"
|
||||
|
||||
[pools.sharded_db.shards.2]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ],
|
||||
]
|
||||
database = "shard2"
|
||||
|
||||
|
||||
[pools.simple_db]
|
||||
pool_mode = "session"
|
||||
default_role = "primary"
|
||||
query_parser_enabled = true
|
||||
query_parser_read_write_splitting = true
|
||||
primary_reads_enabled = true
|
||||
sharding_function = "pg_bigint_hash"
|
||||
|
||||
[pools.simple_db.users.0]
|
||||
username = "simple_user"
|
||||
password = "simple_user"
|
||||
pool_size = 5
|
||||
statement_timeout = 30000
|
||||
|
||||
[pools.simple_db.shards.0]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ]
|
||||
]
|
||||
database = "some_db"
|
||||
@@ -1,52 +0,0 @@
|
||||
package pgcat
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
_ "github.com/lib/pq"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test(t *testing.T) {
|
||||
t.Cleanup(setup(t))
|
||||
t.Run("Named parameterized prepared statement works", namedParameterizedPreparedStatement)
|
||||
t.Run("Unnamed parameterized prepared statement works", unnamedParameterizedPreparedStatement)
|
||||
}
|
||||
|
||||
func namedParameterizedPreparedStatement(t *testing.T) {
|
||||
db, err := sql.Open("postgres", fmt.Sprintf("host=localhost port=%d database=sharded_db user=sharding_user password=sharding_user sslmode=disable", port))
|
||||
if err != nil {
|
||||
t.Fatalf("could not open connection: %+v", err)
|
||||
}
|
||||
|
||||
stmt, err := db.Prepare("SELECT $1")
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("could not prepare: %+v", err)
|
||||
}
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
rows, err := stmt.Query(1)
|
||||
if err != nil {
|
||||
t.Fatalf("could not query: %+v", err)
|
||||
}
|
||||
_ = rows.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func unnamedParameterizedPreparedStatement(t *testing.T) {
|
||||
db, err := sql.Open("postgres", fmt.Sprintf("host=localhost port=%d database=sharded_db user=sharding_user password=sharding_user sslmode=disable", port))
|
||||
if err != nil {
|
||||
t.Fatalf("could not open connection: %+v", err)
|
||||
}
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
// Under the hood QueryContext generates an unnamed parameterized prepared statement
|
||||
rows, err := db.QueryContext(context.Background(), "SELECT $1", 1)
|
||||
if err != nil {
|
||||
t.Fatalf("could not query: %+v", err)
|
||||
}
|
||||
_ = rows.Close()
|
||||
}
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
package pgcat
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
//go:embed pgcat.toml
|
||||
var pgcatCfg string
|
||||
|
||||
var port = rand.Intn(32760-20000) + 20000
|
||||
|
||||
func setup(t *testing.T) func() {
|
||||
cfg, err := os.CreateTemp("/tmp", "pgcat_cfg_*.toml")
|
||||
if err != nil {
|
||||
t.Fatalf("could not create temp file: %+v", err)
|
||||
}
|
||||
|
||||
pgcatCfg = strings.Replace(pgcatCfg, "\"${PORT}\"", fmt.Sprintf("%d", port), 1)
|
||||
|
||||
_, err = cfg.Write([]byte(pgcatCfg))
|
||||
if err != nil {
|
||||
t.Fatalf("could not write temp file: %+v", err)
|
||||
}
|
||||
|
||||
commandPath := "../../target/debug/pgcat"
|
||||
if os.Getenv("CARGO_TARGET_DIR") != "" {
|
||||
commandPath = os.Getenv("CARGO_TARGET_DIR") + "/debug/pgcat"
|
||||
}
|
||||
|
||||
cmd := exec.Command(commandPath, cfg.Name())
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
go func() {
|
||||
err = cmd.Run()
|
||||
if err != nil {
|
||||
t.Errorf("could not run pgcat: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
deadline, cancelFunc := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second))
|
||||
defer cancelFunc()
|
||||
for {
|
||||
select {
|
||||
case <-deadline.Done():
|
||||
break
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
db, err := sql.Open("postgres", fmt.Sprintf("host=localhost port=%d database=pgcat user=admin_user password=admin_pass sslmode=disable", port))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
rows, err := db.QueryContext(deadline, "SHOW STATS")
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
_ = rows.Close()
|
||||
_ = db.Close()
|
||||
break
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return func() {
|
||||
err := cmd.Process.Signal(os.Interrupt)
|
||||
if err != nil {
|
||||
t.Fatalf("could not interrupt pgcat: %+v", err)
|
||||
}
|
||||
err = os.Remove(cfg.Name())
|
||||
if err != nil {
|
||||
t.Fatalf("could not remove temp file: %+v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
|
||||
-- \setrandom aid 1 :naccounts
|
||||
\set aid random(1, 100000)
|
||||
-- \setrandom bid 1 :nbranches
|
||||
\set bid random(1, 100000)
|
||||
-- \setrandom tid 1 :ntellers
|
||||
\set tid random(1, 100000)
|
||||
-- \setrandom delta -5000 5000
|
||||
\set delta random(-5000,5000)
|
||||
|
||||
\set shard random(0, 2)
|
||||
|
||||
SET SHARD TO :shard;
|
||||
|
||||
SET SERVER ROLE TO 'auto';
|
||||
|
||||
BEGIN;
|
||||
|
||||
UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid;
|
||||
|
||||
SELECT abalance FROM pgbench_accounts WHERE aid = :aid;
|
||||
|
||||
UPDATE pgbench_tellers SET tbalance = tbalance + :delta WHERE tid = :tid;
|
||||
|
||||
UPDATE pgbench_branches SET bbalance = bbalance + :delta WHERE bid = :bid;
|
||||
|
||||
INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);
|
||||
|
||||
END;
|
||||
|
||||
SET SHARDING KEY TO :aid;
|
||||
|
||||
-- Read load balancing
|
||||
SELECT abalance FROM pgbench_accounts WHERE aid = :aid;
|
||||
|
||||
SET SERVER ROLE TO 'replica';
|
||||
|
||||
-- Read load balancing
|
||||
SELECT abalance FROM pgbench_accounts WHERE aid = :aid;
|
||||
1
tests/python/.gitignore
vendored
1
tests/python/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
venv/
|
||||
@@ -1,3 +0,0 @@
|
||||
pytest
|
||||
psycopg2==2.9.3
|
||||
psutil==5.9.1
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user