mirror of
https://github.com/postgresml/pgcat.git
synced 2026-03-23 01:16:30 +00:00
Compare commits
80 Commits
v0.1.0-alp
...
levkk-drop
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d6a13d047d | ||
|
|
5948fef6cf | ||
|
|
790898c20e | ||
|
|
d64f6793c1 | ||
|
|
cea35db35c | ||
|
|
a3aefabb47 | ||
|
|
3285006440 | ||
|
|
52303cc808 | ||
|
|
be254cedd9 | ||
|
|
a5db6881b8 | ||
|
|
f963b12821 | ||
|
|
a262337ba5 | ||
|
|
014628d6e0 | ||
|
|
65c32ad9fb | ||
|
|
1b166b462d | ||
|
|
7592339092 | ||
|
|
3719c22322 | ||
|
|
106ebee71c | ||
|
|
b79f55abd6 | ||
|
|
b828e62408 | ||
|
|
499612dd76 | ||
|
|
5ac85eaadd | ||
|
|
20e8f9d74c | ||
|
|
1b648ca00e | ||
|
|
35381ba8fd | ||
|
|
e591865d78 | ||
|
|
48cff1f955 | ||
|
|
8a06fc4047 | ||
|
|
14d4dc45f5 | ||
|
|
2ae4b438e3 | ||
|
|
c5be5565a5 | ||
|
|
eff8e3e229 | ||
|
|
ae3db111ac | ||
|
|
8bcfbed574 | ||
|
|
773602dedf | ||
|
|
21bf07258c | ||
|
|
186f8be5b3 | ||
|
|
7667fefead | ||
|
|
c11d595ac7 | ||
|
|
8f3202ed92 | ||
|
|
eb58920870 | ||
|
|
b974aacd71 | ||
|
|
7dfe59a91a | ||
|
|
5bcd3bf9c3 | ||
|
|
f06f64119c | ||
|
|
b93303eb83 | ||
|
|
d865d9f9d8 | ||
|
|
d3310a62c2 | ||
|
|
d412238f47 | ||
|
|
7782933f59 | ||
|
|
bac4e1f52c | ||
|
|
37e3a86881 | ||
|
|
61db13f614 | ||
|
|
fe32b5ef17 | ||
|
|
54699222f8 | ||
|
|
ccbca66e7a | ||
|
|
df85139281 | ||
|
|
509e4815a3 | ||
|
|
5338ff2323 | ||
|
|
1ea0a7f332 | ||
|
|
d1b86d363d | ||
|
|
b309ead58f | ||
|
|
341ebf4123 | ||
|
|
35828a0a8c | ||
|
|
1e8fa110ae | ||
|
|
d4186b7815 | ||
|
|
aaeef69d59 | ||
|
|
b21e0f4a7e | ||
|
|
eb1473060e | ||
|
|
26f75f8d5d | ||
|
|
99d65fc475 | ||
|
|
206fdc9769 | ||
|
|
f74101cdfe | ||
|
|
8e0682482d | ||
|
|
6db51b4a11 | ||
|
|
a784883611 | ||
|
|
5972b6fa52 | ||
|
|
b3c8ca4b8a | ||
|
|
dce72ba262 | ||
|
|
af1716bcd7 |
@@ -9,16 +9,20 @@ jobs:
|
||||
# Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub.
|
||||
# See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor
|
||||
docker:
|
||||
- image: cimg/rust:1.58.1
|
||||
- image: levkk/pgcat-ci:latest
|
||||
environment:
|
||||
RUST_LOG: info
|
||||
- image: cimg/postgres:14.0
|
||||
auth:
|
||||
username: mydockerhub-user
|
||||
password: $DOCKERHUB_PASSWORD
|
||||
RUSTFLAGS: "-C instrument-coverage"
|
||||
LLVM_PROFILE_FILE: "pgcat-%m.profraw"
|
||||
- image: postgres:14
|
||||
# auth:
|
||||
# username: mydockerhub-user
|
||||
# password: $DOCKERHUB_PASSWORD
|
||||
environment:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_DB: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_HOST_AUTH_METHOD: scram-sha-256
|
||||
# Add steps to the job
|
||||
# See: https://circleci.com/docs/2.0/configuration-reference/#steps
|
||||
steps:
|
||||
@@ -30,16 +34,19 @@ jobs:
|
||||
command: "cargo fmt --check"
|
||||
- run:
|
||||
name: "Install dependencies"
|
||||
command: "sudo apt-get update && sudo apt-get install -y psmisc postgresql-contrib-12 postgresql-client-12 ruby ruby-dev libpq-dev"
|
||||
command: "sudo apt-get update && sudo apt-get install -y psmisc postgresql-contrib-12 postgresql-client-12 ruby ruby-dev libpq-dev python3 python3-pip lcov llvm-11 && sudo apt-get upgrade curl"
|
||||
- run:
|
||||
name: "Build"
|
||||
name: "Install rust tools"
|
||||
command: "cargo install cargo-binutils rustfilt && rustup component add llvm-tools-preview"
|
||||
- run:
|
||||
name: "Build"
|
||||
command: "cargo build"
|
||||
- run:
|
||||
name: "Test"
|
||||
command: "cargo test"
|
||||
- run:
|
||||
name: "Test end-to-end"
|
||||
command: "bash .circleci/run_tests.sh"
|
||||
name: "Tests"
|
||||
command: "cargo test && bash .circleci/run_tests.sh && .circleci/generate_coverage.sh"
|
||||
- store_artifacts:
|
||||
path: /tmp/cov
|
||||
destination: coverage-data
|
||||
- save_cache:
|
||||
key: cargo-lock-2-{{ checksum "Cargo.lock" }}
|
||||
paths:
|
||||
|
||||
7
.circleci/generate_coverage.sh
Executable file
7
.circleci/generate_coverage.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
rust-profdata merge -sparse pgcat-*.profraw -o pgcat.profdata
|
||||
|
||||
rust-cov export -ignore-filename-regex="rustc|registry" -Xdemangler=rustfilt -instr-profile=pgcat.profdata --object ./target/debug/pgcat --format lcov > ./lcov.info
|
||||
|
||||
genhtml lcov.info --output-directory /tmp/cov --prefix $(pwd)
|
||||
145
.circleci/pgcat.toml
Normal file
145
.circleci/pgcat.toml
Normal file
@@ -0,0 +1,145 @@
|
||||
#
|
||||
# PgCat config example.
|
||||
#
|
||||
|
||||
#
|
||||
# General pooler settings
|
||||
[general]
|
||||
# What IP to run on, 0.0.0.0 means accessible from everywhere.
|
||||
host = "0.0.0.0"
|
||||
|
||||
# Port to run on, same as PgBouncer used in this example.
|
||||
port = 6432
|
||||
|
||||
# Whether to enable prometheus exporter or not.
|
||||
enable_prometheus_exporter = true
|
||||
|
||||
# Port at which prometheus exporter listens on.
|
||||
prometheus_exporter_port = 9930
|
||||
|
||||
# How long to wait before aborting a server connection (ms).
|
||||
connect_timeout = 100
|
||||
|
||||
# How much time to give the health check query to return with a result (ms).
|
||||
healthcheck_timeout = 100
|
||||
|
||||
# How long to keep connection available for immediate re-use, without running a healthcheck query on it
|
||||
healthcheck_delay = 30000
|
||||
|
||||
# How much time to give clients during shutdown before forcibly killing client connections (ms).
|
||||
shutdown_timeout = 5000
|
||||
|
||||
# For how long to ban a server if it fails a health check (seconds).
|
||||
ban_time = 60 # Seconds
|
||||
|
||||
# Reload config automatically if it changes.
|
||||
autoreload = true
|
||||
|
||||
# TLS
|
||||
tls_certificate = ".circleci/server.cert"
|
||||
tls_private_key = ".circleci/server.key"
|
||||
|
||||
# Credentials to access the virtual administrative database (pgbouncer or pgcat)
|
||||
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
||||
admin_username = "admin_user"
|
||||
admin_password = "admin_pass"
|
||||
|
||||
# pool
|
||||
# configs are structured as pool.<pool_name>
|
||||
# the pool_name is what clients use as database name when connecting
|
||||
# For the example below a client can connect using "postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded_db"
|
||||
[pools.sharded_db]
|
||||
# Pool mode (see PgBouncer docs for more).
|
||||
# session: one server connection per connected client
|
||||
# transaction: one server connection per client transaction
|
||||
pool_mode = "transaction"
|
||||
|
||||
# If the client doesn't specify, route traffic to
|
||||
# this role by default.
|
||||
#
|
||||
# any: round-robin between primary and replicas,
|
||||
# replica: round-robin between replicas only without touching the primary,
|
||||
# primary: all queries go to the primary unless otherwise specified.
|
||||
default_role = "any"
|
||||
|
||||
# Query parser. If enabled, we'll attempt to parse
|
||||
# every incoming query to determine if it's a read or a write.
|
||||
# If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||
# we'll direct it to the primary.
|
||||
query_parser_enabled = true
|
||||
|
||||
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||
# load balancing of read queries. Otherwise, the primary will only be used for write
|
||||
# queries. The primary can always be explicitely selected with our custom protocol.
|
||||
primary_reads_enabled = true
|
||||
|
||||
# So what if you wanted to implement a different hashing function,
|
||||
# or you've already built one and you want this pooler to use it?
|
||||
#
|
||||
# Current options:
|
||||
#
|
||||
# pg_bigint_hash: PARTITION BY HASH (Postgres hashing function)
|
||||
# sha1: A hashing function based on SHA1
|
||||
#
|
||||
sharding_function = "pg_bigint_hash"
|
||||
|
||||
# Credentials for users that may connect to this cluster
|
||||
[pools.sharded_db.users.0]
|
||||
username = "sharding_user"
|
||||
password = "sharding_user"
|
||||
# Maximum number of server connections that can be established for this user
|
||||
# The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||
# is the sum of pool_size across all users.
|
||||
pool_size = 9
|
||||
statement_timeout = 0
|
||||
|
||||
[pools.sharded_db.users.1]
|
||||
username = "other_user"
|
||||
password = "other_user"
|
||||
pool_size = 21
|
||||
statement_timeout = 30000
|
||||
|
||||
# Shard 0
|
||||
[pools.sharded_db.shards.0]
|
||||
# [ host, port, role ]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ]
|
||||
]
|
||||
# Database name (e.g. "postgres")
|
||||
database = "shard0"
|
||||
|
||||
[pools.sharded_db.shards.1]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ],
|
||||
]
|
||||
database = "shard1"
|
||||
|
||||
[pools.sharded_db.shards.2]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ],
|
||||
]
|
||||
database = "shard2"
|
||||
|
||||
|
||||
[pools.simple_db]
|
||||
pool_mode = "session"
|
||||
default_role = "primary"
|
||||
query_parser_enabled = true
|
||||
primary_reads_enabled = true
|
||||
sharding_function = "pg_bigint_hash"
|
||||
|
||||
[pools.simple_db.users.0]
|
||||
username = "simple_user"
|
||||
password = "simple_user"
|
||||
pool_size = 5
|
||||
statement_timeout = 30000
|
||||
|
||||
[pools.simple_db.shards.0]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ]
|
||||
]
|
||||
database = "some_db"
|
||||
@@ -3,54 +3,141 @@
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
psql -e -h 127.0.0.1 -p 5432 -U postgres -f tests/sharding/query_routing_setup.sql
|
||||
# Start PgCat with a particular log level
|
||||
# for inspection.
|
||||
function start_pgcat() {
|
||||
kill -s SIGINT $(pgrep pgcat) || true
|
||||
RUST_LOG=${1} ./target/debug/pgcat .circleci/pgcat.toml &
|
||||
sleep 1
|
||||
}
|
||||
|
||||
./target/debug/pgcat &
|
||||
# Setup the database with shards and user
|
||||
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 5432 -U postgres -f tests/sharding/query_routing_setup.sql
|
||||
|
||||
PGPASSWORD=sharding_user pgbench -h 127.0.0.1 -U sharding_user shard0 -i
|
||||
PGPASSWORD=sharding_user pgbench -h 127.0.0.1 -U sharding_user shard1 -i
|
||||
PGPASSWORD=sharding_user pgbench -h 127.0.0.1 -U sharding_user shard2 -i
|
||||
|
||||
# Install Toxiproxy to simulate a downed/slow database
|
||||
wget -O toxiproxy-2.1.4.deb https://github.com/Shopify/toxiproxy/releases/download/v2.1.4/toxiproxy_2.1.4_amd64.deb
|
||||
sudo dpkg -i toxiproxy-2.1.4.deb
|
||||
|
||||
# Start Toxiproxy
|
||||
toxiproxy-server &
|
||||
sleep 1
|
||||
|
||||
# Setup PgBench
|
||||
pgbench -i -h 127.0.0.1 -p 6432
|
||||
# Create a database at port 5433, forward it to Postgres
|
||||
toxiproxy-cli create -l 127.0.0.1:5433 -u 127.0.0.1:5432 postgres_replica
|
||||
|
||||
# Run it
|
||||
pgbench -h 127.0.0.1 -p 6432 -t 500 -c 2 --protocol simple
|
||||
start_pgcat "info"
|
||||
|
||||
# Extended protocol
|
||||
pgbench -h 127.0.0.1 -p 6432 -t 500 -c 2 --protocol extended
|
||||
# Check that prometheus is running
|
||||
curl --fail localhost:9930/metrics
|
||||
|
||||
export PGPASSWORD=sharding_user
|
||||
export PGDATABASE=sharded_db
|
||||
|
||||
# pgbench test
|
||||
pgbench -U sharding_user -i -h 127.0.0.1 -p 6432
|
||||
pgbench -U sharding_user -h 127.0.0.1 -p 6432 -t 500 -c 2 --protocol simple -f tests/pgbench/simple.sql
|
||||
pgbench -U sharding_user -h 127.0.0.1 -p 6432 -t 500 -c 2 --protocol extended
|
||||
|
||||
# COPY TO STDOUT test
|
||||
psql -h 127.0.0.1 -p 6432 -c 'COPY (SELECT * FROM pgbench_accounts LIMIT 15) TO STDOUT;' > /dev/null
|
||||
psql -U sharding_user -h 127.0.0.1 -p 6432 -c 'COPY (SELECT * FROM pgbench_accounts LIMIT 15) TO STDOUT;' > /dev/null
|
||||
|
||||
# Query cancellation test
|
||||
(psql -h 127.0.0.1 -p 6432 -c 'SELECT pg_sleep(5)' || true) &
|
||||
(psql -U sharding_user -h 127.0.0.1 -p 6432 -c 'SELECT pg_sleep(50)' || true) &
|
||||
sleep 1
|
||||
killall psql -s SIGINT
|
||||
|
||||
# Reload pool (closing unused server connections)
|
||||
PGPASSWORD=admin_pass psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'RELOAD'
|
||||
|
||||
(psql -U sharding_user -h 127.0.0.1 -p 6432 -c 'SELECT pg_sleep(50)' || true) &
|
||||
sleep 1
|
||||
killall psql -s SIGINT
|
||||
|
||||
# Sharding insert
|
||||
psql -e -h 127.0.0.1 -p 6432 -f tests/sharding/query_routing_test_insert.sql
|
||||
psql -U sharding_user -e -h 127.0.0.1 -p 6432 -f tests/sharding/query_routing_test_insert.sql
|
||||
|
||||
# Sharding select
|
||||
psql -e -h 127.0.0.1 -p 6432 -f tests/sharding/query_routing_test_select.sql > /dev/null
|
||||
psql -U sharding_user -e -h 127.0.0.1 -p 6432 -f tests/sharding/query_routing_test_select.sql > /dev/null
|
||||
|
||||
# Replica/primary selection & more sharding tests
|
||||
psql -e -h 127.0.0.1 -p 6432 -f tests/sharding/query_routing_test_primary_replica.sql > /dev/null
|
||||
psql -U sharding_user -e -h 127.0.0.1 -p 6432 -f tests/sharding/query_routing_test_primary_replica.sql > /dev/null
|
||||
|
||||
# Statement timeout tests
|
||||
sed -i 's/statement_timeout = 0/statement_timeout = 100/' .circleci/pgcat.toml
|
||||
kill -SIGHUP $(pgrep pgcat) # Reload config
|
||||
sleep 0.2
|
||||
|
||||
# This should timeout
|
||||
(! psql -U sharding_user -e -h 127.0.0.1 -p 6432 -c 'select pg_sleep(0.5)')
|
||||
|
||||
# Disable statement timeout
|
||||
sed -i 's/statement_timeout = 100/statement_timeout = 0/' .circleci/pgcat.toml
|
||||
kill -SIGHUP $(pgrep pgcat) # Reload config again
|
||||
|
||||
#
|
||||
# ActiveRecord tests!
|
||||
# ActiveRecord tests
|
||||
#
|
||||
cd tests/ruby
|
||||
sudo gem install bundler
|
||||
bundle install
|
||||
ruby tests.rb
|
||||
cd ../..
|
||||
|
||||
#
|
||||
# Python tests
|
||||
# These tests will start and stop the pgcat server so it will need to be restarted after the tests
|
||||
#
|
||||
pip3 install -r tests/python/requirements.txt
|
||||
python3 tests/python/tests.py
|
||||
|
||||
start_pgcat "info"
|
||||
|
||||
# Admin tests
|
||||
export PGPASSWORD=admin_pass
|
||||
psql -U admin_user -e -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW STATS' > /dev/null
|
||||
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'RELOAD' > /dev/null
|
||||
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW CONFIG' > /dev/null
|
||||
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW DATABASES' > /dev/null
|
||||
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW LISTS' > /dev/null
|
||||
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW POOLS' > /dev/null
|
||||
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW VERSION' > /dev/null
|
||||
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c "SET client_encoding TO 'utf8'" > /dev/null # will ignore
|
||||
(! psql -U admin_user -e -h 127.0.0.1 -p 6432 -d random_db -c 'SHOW STATS' > /dev/null)
|
||||
export PGPASSWORD=sharding_user
|
||||
|
||||
# Start PgCat in debug to demonstrate failover better
|
||||
start_pgcat "trace"
|
||||
|
||||
# Add latency to the replica at port 5433 slightly above the healthcheck timeout
|
||||
toxiproxy-cli toxic add -t latency -a latency=300 postgres_replica
|
||||
sleep 1
|
||||
|
||||
# Note the failover in the logs
|
||||
timeout 5 psql -U sharding_user -e -h 127.0.0.1 -p 6432 <<-EOF
|
||||
SELECT 1;
|
||||
SELECT 1;
|
||||
SELECT 1;
|
||||
EOF
|
||||
|
||||
# Remove latency
|
||||
toxiproxy-cli toxic remove --toxicName latency_downstream postgres_replica
|
||||
|
||||
start_pgcat "info"
|
||||
|
||||
cd ../../
|
||||
# Test session mode (and config reload)
|
||||
sed -i 's/pool_mode = "transaction"/pool_mode = "session"/' pgcat.toml
|
||||
sed -i 's/pool_mode = "transaction"/pool_mode = "session"/' .circleci/pgcat.toml
|
||||
|
||||
# Reload config
|
||||
# Reload config test
|
||||
kill -SIGHUP $(pgrep pgcat)
|
||||
|
||||
sleep 1
|
||||
|
||||
# Prepared statements that will only work in session mode
|
||||
pgbench -h 127.0.0.1 -p 6432 -t 500 -c 2 --protocol prepared
|
||||
pgbench -U sharding_user -h 127.0.0.1 -p 6432 -t 500 -c 2 --protocol prepared
|
||||
|
||||
# Attempt clean shut down
|
||||
killall pgcat -s SIGINT
|
||||
|
||||
21
.circleci/server.cert
Normal file
21
.circleci/server.cert
Normal file
@@ -0,0 +1,21 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDazCCAlOgAwIBAgIUChIvUGFJGJe5EDch32rchqoxER0wDQYJKoZIhvcNAQEL
|
||||
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMjA2MjcyMjI2MDZaFw0yMjA3
|
||||
MjcyMjI2MDZaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
|
||||
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
|
||||
AQUAA4IBDwAwggEKAoIBAQDdTwrBzV1v79faVckFvIn/9V4fypYs4vDi3X+h3wGn
|
||||
AjEh6mmizlKCwSwAam07D9Q5zKiXFrzNJqzSioOv5zsOAvObwrnzbtKSwfs3aP5g
|
||||
eEh2clHCZYx9p06WszPcgSB5nTz1NeY4XAwvGn3A+SVCLyPMTNwnem48+ONh2F9u
|
||||
FHtSuIsEVvTjMlH09O7LjwJlODxy3HNv2JHYM5Hx9tzc+NVYdERPtaVcX8ycw1Eh
|
||||
9hgGSgfaNM52/JfRMIDhENrsn0S1omRUtcJe72loreiwrECUOLAnAfp9Xqc+rMPP
|
||||
aLA6ElzmYef1+ZEC0p6isCHPhxY5ESVhKYhE9nQvksjnAgMBAAGjUzBRMB0GA1Ud
|
||||
DgQWBBQLDtzexqjx7xPtUZuZB/angU9oSDAfBgNVHSMEGDAWgBQLDtzexqjx7xPt
|
||||
UZuZB/angU9oSDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQC/
|
||||
mxY/a/WeLENVj2Gg9EUH0CKzfqeTey1mb6YfPGxzrD7oq1m0Vn2MmTbjZrJgh/Ob
|
||||
QckO3ElF4kC9+6XP+iDPmabGpjeLgllBboT5l2aqnD1syMrf61WPLzgRzRfplYGy
|
||||
cjBQDDKPu8Lu0QRMWU28tHYN0bMxJoCuXysGGX5WsuFnKCA6f/V+nycJJXxJH3eB
|
||||
eLjTueD9/RE3OXhi6m8A29Q1E9AE5EF4uRxYXrr91BmYnk4aFvSmBxhUEzE12eSN
|
||||
lHB/uSc0+Dp+UVmVr6wW8AQfd16UBA0BUf3kSW3aSvirYPYH0rXiOOpEJgOwOMnR
|
||||
f5+XAbN1Y+3OsFz/ZmP9
|
||||
-----END CERTIFICATE-----
|
||||
28
.circleci/server.key
Normal file
28
.circleci/server.key
Normal file
@@ -0,0 +1,28 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDdTwrBzV1v79fa
|
||||
VckFvIn/9V4fypYs4vDi3X+h3wGnAjEh6mmizlKCwSwAam07D9Q5zKiXFrzNJqzS
|
||||
ioOv5zsOAvObwrnzbtKSwfs3aP5geEh2clHCZYx9p06WszPcgSB5nTz1NeY4XAwv
|
||||
Gn3A+SVCLyPMTNwnem48+ONh2F9uFHtSuIsEVvTjMlH09O7LjwJlODxy3HNv2JHY
|
||||
M5Hx9tzc+NVYdERPtaVcX8ycw1Eh9hgGSgfaNM52/JfRMIDhENrsn0S1omRUtcJe
|
||||
72loreiwrECUOLAnAfp9Xqc+rMPPaLA6ElzmYef1+ZEC0p6isCHPhxY5ESVhKYhE
|
||||
9nQvksjnAgMBAAECggEAbnvddO9frFhivJ+DIhgEFQKcIOb0nigV9kx6QYehvYy8
|
||||
lp/+aMb0Lk7d9r8rFQdL/icMK5GwZALg2KNKJvEbbF1Q3PwT9VHoUlgBYKJMDEFA
|
||||
e9GKu7ASuVBjTZzdUUItwkkbe5eS/aQGeSWSjlpTnX0HNCFS72qRymK+scRhsAQf
|
||||
ZoHyZHDslkvPR3Pos+sndWBYCDHag5/KoPhsMt1+5S9NQcOUHx9Ac0gLHjau3N+P
|
||||
0FhODHFFGnnpyQvLvj6u3ZOR34ladMgoBglE0O3vPFhckn92EK4teeTWOsUMotiz
|
||||
qM3QIJTOJjtiY6VDGY93bIa4pFvt7Zi4vIerenKt0QKBgQD/UMFqfevTAMrk10AC
|
||||
bOa4+cM07ORY4ZwVj5ILhZn+8crDEEtBsUyuEU2FTINtnoEq1yGc/IXpsyS1BHjL
|
||||
L1xSml5LN3jInbi8z5XQfY5Sj3VOMtwY6yD20jcdeDC44rz3nStXdkcMWxbTMapx
|
||||
iOPsap5ciUKOMS7LyMidPEG/LQKBgQDd5vHgrLN0FBIIm+vZg6MEm4QyobstVp4l
|
||||
7V/GZsdL+M8AQv1Rx+5wSUSWKomOIv5lglis7f6g0c9O7Qkr78/wzoyoKC2RRqPp
|
||||
I90GjY2Iv22N4GIkRrDAgMZbkTitzIB6tbXEVeLAOh3frFJ8IwauRCOiXIjrZdJ4
|
||||
FvV86+nU4wKBgQDdWTP2kWkMrBk7QOp7r9Jv+AmnLuHhtOdPQgOJ/bA++X2ik9PL
|
||||
Bl3GY7XjpSwks1CkxZKcucmXjPp7/X6EGXFfI/owF82dkDADca0e7lufdERtIWb0
|
||||
K5WOpz2lTPhgsiLGQfq7fw2lxqsJOnvcpqOD6gOVkmKjSDyb7F0RBJazmQKBgQDD
|
||||
a8PQTcesjpBjLI3EfX1vbVY7ENu6zfFxDV+vZoxVh8UlQdm90AlYse3JIaUKnB7W
|
||||
Xrihcucv0hZ0N6RAIW5LcFvHK7sVmdR4WbEpODhRGeTtcZJ8yBSZM898jKQRy2vK
|
||||
pYRyaADNsWDlvujVkjMr/a40KrIaPQ3h3LZNUaYYaQKBgQD1x8A5S5SiE1cN1vFr
|
||||
aACkmA2WqEDKKhUsUigJdwW6WB/B9kWlIlz/iV1H9uwBXtSIYG4VqCSTAvh0z4gX
|
||||
Qu2SrdPm5PYnKzpdynpz78OnGdflD1RKWFGHItR6GN6tj/VmulO6mlFvT4jzBQ7j
|
||||
+Hf8m2TcD4U3ksz3xw+YOD+cmA==
|
||||
-----END RSA PRIVATE KEY-----
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,2 +1,4 @@
|
||||
.idea
|
||||
/target
|
||||
*.deb
|
||||
.vscode
|
||||
@@ -10,10 +10,4 @@ Happy hacking!
|
||||
|
||||
## TODOs
|
||||
|
||||
A non-exhaustive list of things that would be useful to implement:
|
||||
|
||||
#### Client authentication
|
||||
MD5 is probably sufficient, but maybe others too.
|
||||
|
||||
#### Admin
|
||||
Admin database for stats collection and pooler administration. PgBouncer gives us a nice example on how to do that, specifically how to implement `RowDescription` and `DataRow` messages, [example here](https://github.com/pgbouncer/pgbouncer/blob/4f9ced8e63d317a6ff45c8b0efa876b32161f6db/src/admin.c#L813).
|
||||
See [Issues]([url](https://github.com/levkk/pgcat/issues)).
|
||||
|
||||
519
Cargo.lock
generated
519
Cargo.lock
generated
@@ -45,6 +45,12 @@ version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
|
||||
|
||||
[[package]]
|
||||
name = "base64"
|
||||
version = "0.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
|
||||
|
||||
[[package]]
|
||||
name = "bb8"
|
||||
version = "0.7.1"
|
||||
@@ -73,12 +79,24 @@ dependencies = [
|
||||
"generic-array",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bumpalo"
|
||||
version = "3.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "37ccbd214614c6783386c1af30caf03192f17891059cecc394b4fb119e363de3"
|
||||
|
||||
[[package]]
|
||||
name = "bytes"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.0.73"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11"
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.0"
|
||||
@@ -109,22 +127,23 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "crypto-common"
|
||||
version = "0.1.1"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "683d6b536309245c849479fba3da410962a43ed8e51c26b729208ec0ac2798d0"
|
||||
checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8"
|
||||
dependencies = [
|
||||
"generic-array",
|
||||
"typenum",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "digest"
|
||||
version = "0.10.1"
|
||||
version = "0.10.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b697d66081d42af4fba142d56918a3cb21dc8eb63372c6b85d14f44fb9c5979b"
|
||||
checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506"
|
||||
dependencies = [
|
||||
"block-buffer",
|
||||
"crypto-common",
|
||||
"generic-array",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -140,6 +159,12 @@ dependencies = [
|
||||
"termcolor",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fnv"
|
||||
version = "1.0.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
|
||||
|
||||
[[package]]
|
||||
name = "futures-channel"
|
||||
version = "0.3.19"
|
||||
@@ -155,6 +180,12 @@ version = "0.3.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d0c8ff0461b82559810cdccfde3215c3f373807f5e5232b71479bff7bb2583d7"
|
||||
|
||||
[[package]]
|
||||
name = "futures-sink"
|
||||
version = "0.3.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868"
|
||||
|
||||
[[package]]
|
||||
name = "futures-task"
|
||||
version = "0.3.19"
|
||||
@@ -196,6 +227,31 @@ dependencies = [
|
||||
"wasi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "h2"
|
||||
version = "0.3.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"fnv",
|
||||
"futures-core",
|
||||
"futures-sink",
|
||||
"futures-util",
|
||||
"http",
|
||||
"indexmap",
|
||||
"slab",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hashbrown"
|
||||
version = "0.12.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.1.19"
|
||||
@@ -205,12 +261,89 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hmac"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e"
|
||||
dependencies = [
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "http"
|
||||
version = "0.2.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"fnv",
|
||||
"itoa",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "http-body"
|
||||
version = "0.4.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"http",
|
||||
"pin-project-lite",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "httparse"
|
||||
version = "1.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c"
|
||||
|
||||
[[package]]
|
||||
name = "httpdate"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421"
|
||||
|
||||
[[package]]
|
||||
name = "humantime"
|
||||
version = "2.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
|
||||
|
||||
[[package]]
|
||||
name = "hyper"
|
||||
version = "0.14.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"futures-channel",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"h2",
|
||||
"http",
|
||||
"http-body",
|
||||
"httparse",
|
||||
"httpdate",
|
||||
"itoa",
|
||||
"pin-project-lite",
|
||||
"socket2",
|
||||
"tokio",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
"want",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "indexmap"
|
||||
version = "1.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"hashbrown",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "instant"
|
||||
version = "0.1.12"
|
||||
@@ -221,10 +354,31 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.117"
|
||||
name = "itoa"
|
||||
version = "1.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e74d72e0f9b65b5b4ca49a346af3976df0f9c61d550727f349ecd559f251a26c"
|
||||
checksum = "6c8af84674fe1f223a982c933a0ee1086ac4d4052aa0fb8060c12c6ad838e754"
|
||||
|
||||
[[package]]
|
||||
name = "js-sys"
|
||||
version = "0.3.58"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c3fac17f7123a73ca62df411b1bf727ccc805daa070338fda671c86dac1bdc27"
|
||||
dependencies = [
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.126"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
@@ -352,29 +506,81 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pgcat"
|
||||
version = "0.1.0"
|
||||
version = "0.6.0-alpha1"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"async-trait",
|
||||
"base64",
|
||||
"bb8",
|
||||
"bytes",
|
||||
"chrono",
|
||||
"env_logger",
|
||||
"hmac",
|
||||
"hyper",
|
||||
"log",
|
||||
"md-5",
|
||||
"num_cpus",
|
||||
"once_cell",
|
||||
"parking_lot",
|
||||
"phf",
|
||||
"rand",
|
||||
"regex",
|
||||
"rustls-pemfile",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"sha-1",
|
||||
"sha2",
|
||||
"sqlparser",
|
||||
"statsd",
|
||||
"stringprep",
|
||||
"tokio",
|
||||
"tokio-rustls",
|
||||
"toml",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf"
|
||||
version = "0.10.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fabbf1ead8a5bcbc20f5f8b939ee3f5b0f6f281b6ad3468b84656b658b455259"
|
||||
dependencies = [
|
||||
"phf_macros",
|
||||
"phf_shared",
|
||||
"proc-macro-hack",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_generator"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5d5285893bb5eb82e6aaf5d59ee909a06a16737a8970984dd7746ba9283498d6"
|
||||
dependencies = [
|
||||
"phf_shared",
|
||||
"rand",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_macros"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "58fdf3184dd560f160dd73922bea2d5cd6e8f064bf4b13110abd81b03697b4e0"
|
||||
dependencies = [
|
||||
"phf_generator",
|
||||
"phf_shared",
|
||||
"proc-macro-hack",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_shared"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096"
|
||||
dependencies = [
|
||||
"siphasher",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pin-project-lite"
|
||||
version = "0.2.8"
|
||||
@@ -393,6 +599,12 @@ version = "0.2.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-hack"
|
||||
version = "0.5.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.36"
|
||||
@@ -462,9 +674,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.5.4"
|
||||
version = "1.5.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461"
|
||||
checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -477,12 +689,58 @@ version = "0.6.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b"
|
||||
|
||||
[[package]]
|
||||
name = "ring"
|
||||
version = "0.16.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"once_cell",
|
||||
"spin",
|
||||
"untrusted",
|
||||
"web-sys",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustls"
|
||||
version = "0.20.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5aab8ee6c7097ed6057f43c187a62418d0c05a4bd5f18b3571db50ee0f9ce033"
|
||||
dependencies = [
|
||||
"log",
|
||||
"ring",
|
||||
"sct",
|
||||
"webpki",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustls-pemfile"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e7522c9de787ff061458fe9a829dc790a3f5b22dc571694fc5883f448b94d9a9"
|
||||
dependencies = [
|
||||
"base64",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "sct"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4"
|
||||
dependencies = [
|
||||
"ring",
|
||||
"untrusted",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.136"
|
||||
@@ -511,6 +769,17 @@ dependencies = [
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sha2"
|
||||
version = "0.10.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cpufeatures",
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "signal-hook-registry"
|
||||
version = "1.4.0"
|
||||
@@ -520,6 +789,12 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "siphasher"
|
||||
version = "0.3.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de"
|
||||
|
||||
[[package]]
|
||||
name = "slab"
|
||||
version = "0.4.5"
|
||||
@@ -532,6 +807,22 @@ version = "1.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83"
|
||||
|
||||
[[package]]
|
||||
name = "socket2"
|
||||
version = "0.4.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "spin"
|
||||
version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
|
||||
|
||||
[[package]]
|
||||
name = "sqlparser"
|
||||
version = "0.14.0"
|
||||
@@ -542,14 +833,21 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "statsd"
|
||||
version = "0.15.0"
|
||||
name = "stringprep"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "df1efceb4bf2c0b5ebec94354285a43bbbed1375605bdf2ebe4132299434a330"
|
||||
checksum = "8ee348cb74b87454fff4b551cbf727025810a004f88aeacae7f85b87f4e9a1c1"
|
||||
dependencies = [
|
||||
"rand",
|
||||
"unicode-bidi",
|
||||
"unicode-normalization",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "subtle"
|
||||
version = "2.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.86"
|
||||
@@ -581,6 +879,21 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tinyvec"
|
||||
version = "1.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50"
|
||||
dependencies = [
|
||||
"tinyvec_macros",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tinyvec_macros"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c"
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.16.1"
|
||||
@@ -611,6 +924,31 @@ dependencies = [
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-rustls"
|
||||
version = "0.23.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59"
|
||||
dependencies = [
|
||||
"rustls",
|
||||
"tokio",
|
||||
"webpki",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-util"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f988a1a1adc2fb21f9c12aa96441da33a1728193ae0b95d2be22dbd17fcb4e5c"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"futures-core",
|
||||
"futures-sink",
|
||||
"pin-project-lite",
|
||||
"tokio",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "0.5.8"
|
||||
@@ -620,30 +958,179 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tower-service"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52"
|
||||
|
||||
[[package]]
|
||||
name = "tracing"
|
||||
version = "0.1.34"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"pin-project-lite",
|
||||
"tracing-attributes",
|
||||
"tracing-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-attributes"
|
||||
version = "0.1.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "11c75893af559bc8e10716548bdef5cb2b983f8e637db9d0e15126b61b484ee2"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-core"
|
||||
version = "0.1.26"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "try-lock"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642"
|
||||
|
||||
[[package]]
|
||||
name = "typenum"
|
||||
version = "1.15.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-bidi"
|
||||
version = "0.3.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-normalization"
|
||||
version = "0.1.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9"
|
||||
dependencies = [
|
||||
"tinyvec",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-xid"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
|
||||
|
||||
[[package]]
|
||||
name = "untrusted"
|
||||
version = "0.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a"
|
||||
|
||||
[[package]]
|
||||
name = "version_check"
|
||||
version = "0.9.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
|
||||
|
||||
[[package]]
|
||||
name = "want"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0"
|
||||
dependencies = [
|
||||
"log",
|
||||
"try-lock",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
version = "0.10.0+wasi-snapshot-preview1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen"
|
||||
version = "0.2.81"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7c53b543413a17a202f4be280a7e5c62a1c69345f5de525ee64f8cfdbc954994"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"wasm-bindgen-macro",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-backend"
|
||||
version = "0.2.81"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5491a68ab4500fa6b4d726bd67408630c3dbe9c4fe7bda16d5c82a1fd8c7340a"
|
||||
dependencies = [
|
||||
"bumpalo",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"wasm-bindgen-shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro"
|
||||
version = "0.2.81"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c441e177922bc58f1e12c022624b6216378e5febc2f0533e41ba443d505b80aa"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"wasm-bindgen-macro-support",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro-support"
|
||||
version = "0.2.81"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7d94ac45fcf608c1f45ef53e748d35660f168490c10b23704c7779ab8f5c3048"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"wasm-bindgen-backend",
|
||||
"wasm-bindgen-shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-shared"
|
||||
version = "0.2.81"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6a89911bd99e5f3659ec4acf9c4d93b0a90fe4a2a11f15328472058edc5261be"
|
||||
|
||||
[[package]]
|
||||
name = "web-sys"
|
||||
version = "0.3.58"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2fed94beee57daf8dd7d51f2b15dc2bcde92d7a72304cdf662a4371008b71b90"
|
||||
dependencies = [
|
||||
"js-sys",
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "webpki"
|
||||
version = "0.22.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd"
|
||||
dependencies = [
|
||||
"ring",
|
||||
"untrusted",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.3.9"
|
||||
|
||||
12
Cargo.toml
12
Cargo.toml
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "pgcat"
|
||||
version = "0.1.0"
|
||||
version = "0.6.0-alpha1"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
@@ -20,8 +20,16 @@ serde_derive = "1"
|
||||
regex = "1"
|
||||
num_cpus = "1"
|
||||
once_cell = "1"
|
||||
statsd = "0.15"
|
||||
sqlparser = "0.14"
|
||||
log = "0.4"
|
||||
arc-swap = "1"
|
||||
env_logger = "0.9"
|
||||
parking_lot = "0.11"
|
||||
hmac = "0.12"
|
||||
sha2 = "0.10"
|
||||
base64 = "0.13"
|
||||
stringprep = "0.1"
|
||||
tokio-rustls = "0.23"
|
||||
rustls-pemfile = "1"
|
||||
hyper = { version = "0.14", features = ["full"] }
|
||||
phf = { version = "0.10", features = ["macros"] }
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
FROM rust:1.58-slim-buster AS builder
|
||||
FROM rust:1 AS builder
|
||||
COPY . /app
|
||||
WORKDIR /app
|
||||
RUN cargo build --release
|
||||
|
||||
FROM debian:buster-slim
|
||||
FROM debian:bullseye-slim
|
||||
COPY --from=builder /app/target/release/pgcat /usr/bin/pgcat
|
||||
COPY --from=builder /app/pgcat.toml /etc/pgcat/pgcat.toml
|
||||
WORKDIR /etc/pgcat
|
||||
ENV RUST_LOG=info
|
||||
ENTRYPOINT ["/usr/bin/pgcat"]
|
||||
CMD ["pgcat"]
|
||||
|
||||
8
Dockerfile.ci
Normal file
8
Dockerfile.ci
Normal file
@@ -0,0 +1,8 @@
|
||||
FROM cimg/rust:1.62.0
|
||||
RUN sudo apt-get update && \
|
||||
sudo apt-get install -y psmisc postgresql-contrib-12 postgresql-client-12 ruby ruby-dev libpq-dev python3 python3-pip lcov llvm-11 && \
|
||||
sudo apt-get upgrade curl
|
||||
RUN cargo install cargo-binutils rustfilt && \
|
||||
rustup component add llvm-tools-preview
|
||||
RUN pip3 install psycopg2 && \
|
||||
sudo gem install bundler
|
||||
146
README.md
146
README.md
@@ -4,54 +4,68 @@
|
||||
|
||||

|
||||
|
||||
Meow. PgBouncer rewritten in Rust, with sharding, load balancing and failover support.
|
||||
PostgreSQL pooler (like PgBouncer) with sharding, load balancing and failover support.
|
||||
|
||||
**Alpha**: looking for alpha testers, see [#35](https://github.com/levkk/pgcat/issues/35).
|
||||
**Beta**: looking for beta testers, see [#35](https://github.com/levkk/pgcat/issues/35).
|
||||
|
||||
## Features
|
||||
| **Feature** | **Status** | **Comments** |
|
||||
|--------------------------------|-----------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Transaction pooling | :heavy_check_mark: | Identical to PgBouncer. |
|
||||
| Session pooling | :heavy_check_mark: | Identical to PgBouncer. |
|
||||
| `COPY` support | :heavy_check_mark: | Both `COPY TO` and `COPY FROM` are supported. |
|
||||
| Query cancellation | :heavy_check_mark: | Supported both in transaction and session pooling modes. |
|
||||
| Load balancing of read queries | :heavy_check_mark: | Using round-robin between replicas. Primary is included when `primary_reads_enabled` is enabled (default). |
|
||||
| Sharding | :heavy_check_mark: | Transactions are sharded using `SET SHARD TO` and `SET SHARDING KEY TO` syntax extensions; see examples below. |
|
||||
| Failover | :heavy_check_mark: | Replicas are tested with a health check. If a health check fails, remaining replicas are attempted; see below for algorithm description and examples. |
|
||||
| Statistics reporting | :heavy_check_mark: | Statistics similar to PgBouncers are reported via StatsD. |
|
||||
| Live configuration reloading | :construction_worker: | Reload config with a `SIGHUP` to the process, e.g. `kill -s SIGHUP $(pgrep pgcat)`. Not all settings can be reloaded without a restart. |
|
||||
| Client authentication | :x: :wrench: | On the roadmap; currently all clients are allowed to connect and one user is used to connect to Postgres. |
|
||||
| **Feature** | **Status** | **Comments** |
|
||||
|--------------------------------|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Transaction pooling | :white_check_mark: | Identical to PgBouncer. |
|
||||
| Session pooling | :white_check_mark: | Identical to PgBouncer. |
|
||||
| `COPY` support | :white_check_mark: | Both `COPY TO` and `COPY FROM` are supported. |
|
||||
| Query cancellation | :white_check_mark: | Supported both in transaction and session pooling modes. |
|
||||
| Load balancing of read queries | :white_check_mark: | Using round-robin between replicas. Primary is included when `primary_reads_enabled` is enabled (default). |
|
||||
| Sharding | :white_check_mark: | Transactions are sharded using `SET SHARD TO` and `SET SHARDING KEY TO` syntax extensions; see examples below. |
|
||||
| Failover | :white_check_mark: | Replicas are tested with a health check. If a health check fails, remaining replicas are attempted; see below for algorithm description and examples. |
|
||||
| Statistics | :white_check_mark: | Statistics available in the admin database (`pgcat` and `pgbouncer`) with `SHOW STATS`, `SHOW POOLS` and others. |
|
||||
| Live configuration reloading | :white_check_mark: | Reload supported settings with a `SIGHUP` to the process, e.g. `kill -s SIGHUP $(pgrep pgcat)` or `RELOAD` query issued to the admin database. |
|
||||
| Client authentication | :white_check_mark: :wrench: | MD5 password authentication is supported, SCRAM is on the roadmap; one user is used to connect to Postgres with both SCRAM and MD5 supported. |
|
||||
| Admin database | :white_check_mark: | The admin database, similar to PgBouncer's, allows to query for statistics and reload the configuration. |
|
||||
|
||||
## Deployment
|
||||
|
||||
See `Dockerfile` for example deployment using Docker. The pooler is configured to spawn 4 workers so 4 CPUs are recommended for optimal performance.
|
||||
That setting can be adjusted to spawn as many (or as little) workers as needed.
|
||||
See `Dockerfile` for example deployment using Docker. The pooler is configured to spawn 4 workers so 4 CPUs are recommended for optimal performance. That setting can be adjusted to spawn as many (or as little) workers as needed.
|
||||
|
||||
For quick local example, use the Docker Compose environment provided:
|
||||
|
||||
```bash
|
||||
docker-compose up
|
||||
|
||||
# In a new terminal:
|
||||
psql -h 127.0.0.1 -p 6432 -c 'SELECT 1'
|
||||
```
|
||||
|
||||
### Config
|
||||
|
||||
| **Name** | **Description** | **Examples** |
|
||||
|-------------------------|--------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------|
|
||||
| **`general`** | | |
|
||||
| `host` | The pooler will run on this host, 0.0.0.0 means accessible from everywhere. | `0.0.0.0` |
|
||||
| `port` | The pooler will run on this port. | `6432` |
|
||||
| `pool_size` | Maximum allowed server connections per pool. Pools are separated for each user/shard/server role. The connections are allocated as needed. | `15` |
|
||||
| `pool_mode` | The pool mode to use, i.e. `session` or `transaction`. | `transaction` |
|
||||
| `connect_timeout` | Maximum time to establish a connection to a server (milliseconds). If reached, the server is banned and the next target is attempted. | `5000` |
|
||||
| `healthcheck_timeout` | Maximum time to pass a health check (`SELECT 1`, milliseconds). If reached, the server is banned and the next target is attempted. | `1000` |
|
||||
| `ban_time` | Ban time for a server (seconds). It won't be allowed to serve transactions until the ban expires; failover targets will be used instead. | `60` |
|
||||
| `statsd_address` | StatsD host and port. Statistics will be sent there every 15 seconds. | `127.0.0.1:8125` |
|
||||
| | | |
|
||||
| **`user`** | | |
|
||||
| `name` | The user name. | `sharding_user` |
|
||||
| `password` | The user password in plaintext. | `hunter2` |
|
||||
| | | |
|
||||
| **`shards`** | Shards are numerically numbered starting from 0; the order in the config is preserved by the pooler to route queries accordingly. | `[shards.0]` |
|
||||
| `servers` | List of servers to connect to and their roles. A server is: `[host, port, role]`, where `role` is either `primary` or `replica`. | `["127.0.0.1", 5432, "primary"]` |
|
||||
| `database` | The name of the database to connect to. This is the same on all servers that are part of one shard. | |
|
||||
| **`query_router`** | | |
|
||||
| `default_role` | Traffic is routed to this role by default (round-robin), unless the client specifies otherwise. Default is `any`, for any role available. | `any`, `primary`, `replica` |
|
||||
| `query_parser_enabled` | Enable the query parser which will inspect incoming queries and route them to a primary or replicas. | `false` |
|
||||
| `primary_reads_enabled` | Enable this to allow read queries on the primary; otherwise read queries are routed to the replicas. | `true` |
|
||||
| **Name** | **Description** | **Examples** |
|
||||
|------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------|
|
||||
| **`general`** | | |
|
||||
| `host` | The pooler will run on this host, 0.0.0.0 means accessible from everywhere. | `0.0.0.0` |
|
||||
| `port` | The pooler will run on this port. | `6432` |
|
||||
| `enable_prometheus_exporter` | Enable prometheus exporter which will export metrics in prometheus exposition format. | `true` |
|
||||
| `prometheus_exporter_port` | Port at which prometheus exporter listens on. | `9930` |
|
||||
| `pool_size` | Maximum allowed server connections per pool. Pools are separated for each user/shard/server role. The connections are allocated as needed. | `15` |
|
||||
| `pool_mode` | The pool mode to use, i.e. `session` or `transaction`. | `transaction` |
|
||||
| `connect_timeout` | Maximum time to establish a connection to a server (milliseconds). If reached, the server is banned and the next target is attempted. | `5000` |
|
||||
| `healthcheck_timeout` | Maximum time to pass a health check (`SELECT 1`, milliseconds). If reached, the server is banned and the next target is attempted. | `1000` |
|
||||
| `shutdown_timeout` | Maximum time to give clients during shutdown before forcibly killing client connections (ms). | `60000` |
|
||||
| `healthcheck_delay` | How long to keep connection available for immediate re-use, without running a healthcheck query on it | `30000` |
|
||||
| `ban_time` | Ban time for a server (seconds). It won't be allowed to serve transactions until the ban expires; failover targets will be used instead. | `60` |
|
||||
| `autoreload` | Enable auto-reload of config after fixed time-interval. | `false` |
|
||||
| | | |
|
||||
| **`user`** | | |
|
||||
| `name` | The user name. | `sharding_user` |
|
||||
| `password` | The user password in plaintext. | `hunter2` |
|
||||
| | | |
|
||||
| **`shards`** | Shards are numerically numbered starting from 0; the order in the config is preserved by the pooler to route queries accordingly. | `[shards.0]` |
|
||||
| `servers` | List of servers to connect to and their roles. A server is: `[host, port, role]`, where `role` is either `primary` or `replica`. | `["127.0.0.1", 5432, "primary"]` |
|
||||
| `database` | The name of the database to connect to. This is the same on all servers that are part of one shard. | |
|
||||
| | | |
|
||||
| **`query_router`** | | |
|
||||
| `default_role` | Traffic is routed to this role by default (round-robin), unless the client specifies otherwise. Default is `any`, for any role available. | `any`, `primary`, `replica` |
|
||||
| `query_parser_enabled` | Enable the query parser which will inspect incoming queries and route them to a primary or replicas. | `false` |
|
||||
| `primary_reads_enabled` | Enable this to allow read queries on the primary; otherwise read queries are routed to the replicas. | `true` |
|
||||
|
||||
## Local development
|
||||
|
||||
@@ -75,15 +89,15 @@ See [sharding README](./tests/sharding/README.md) for sharding logic testing.
|
||||
|
||||
| **Feature** | **Tested in CI** | **Tested manually** | **Comments** |
|
||||
|-----------------------|--------------------|---------------------|--------------------------------------------------------------------------------------------------------------------------|
|
||||
| Transaction pooling | :heavy_check_mark: | :heavy_check_mark: | Used by default for all tests. |
|
||||
| Session pooling | :heavy_check_mark: | :heavy_check_mark: | Tested by running pgbench with `--protocol prepared` which only works in session mode. |
|
||||
| `COPY` | :heavy_check_mark: | :heavy_check_mark: | `pgbench -i` uses `COPY`. `COPY FROM` is tested as well. |
|
||||
| Query cancellation | :heavy_check_mark: | :heavy_check_mark: | `psql -c 'SELECT pg_sleep(1000);'` and press `Ctrl-C`. |
|
||||
| Load balancing | :x: | :heavy_check_mark: | We could test this by emitting statistics for each replica and compare them. |
|
||||
| Failover | :x: | :heavy_check_mark: | Misconfigure a replica in `pgcat.toml` and watch it forward queries to spares. CI testing could include using Toxiproxy. |
|
||||
| Sharding | :heavy_check_mark: | :heavy_check_mark: | See `tests/sharding` and `tests/ruby` for an Rails/ActiveRecord example. |
|
||||
| Statistics reporting | :x: | :heavy_check_mark: | Run `nc -l -u 8125` and watch the stats come in every 15 seconds. |
|
||||
| Live config reloading | :heavy_check_mark: | :heavy_check_mark: | Run `kill -s SIGHUP $(pgrep pgcat)` and watch the config reload. |
|
||||
| Transaction pooling | :white_check_mark: | :white_check_mark: | Used by default for all tests. |
|
||||
| Session pooling | :white_check_mark: | :white_check_mark: | Tested by running pgbench with `--protocol prepared` which only works in session mode. |
|
||||
| `COPY` | :white_check_mark: | :white_check_mark: | `pgbench -i` uses `COPY`. `COPY FROM` is tested as well. |
|
||||
| Query cancellation | :white_check_mark: | :white_check_mark: | `psql -c 'SELECT pg_sleep(1000);'` and press `Ctrl-C`. |
|
||||
| Load balancing | :white_check_mark: | :white_check_mark: | We could test this by emitting statistics for each replica and compare them. |
|
||||
| Failover | :white_check_mark: | :white_check_mark: | Misconfigure a replica in `pgcat.toml` and watch it forward queries to spares. CI testing is using Toxiproxy. |
|
||||
| Sharding | :white_check_mark: | :white_check_mark: | See `tests/sharding` and `tests/ruby` for an Rails/ActiveRecord example. |
|
||||
| Statistics | :white_check_mark: | :white_check_mark: | Query the admin database with `psql -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW STATS'`. |
|
||||
| Live config reloading | :white_check_mark: | :white_check_mark: | Run `kill -s SIGHUP $(pgrep pgcat)` and watch the config reload. |
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -133,6 +147,31 @@ All servers are checked with a `SELECT 1` query before being given to a client.
|
||||
|
||||
The ban time can be changed with `ban_time`. The default is 60 seconds.
|
||||
|
||||
Failover behavior can get pretty interesting (read complex) when multiple configurations and factors are involved. The table below will try to explain what PgCat does in each scenario:
|
||||
|
||||
| **Query** | **`SET SERVER ROLE TO`** | **`query_parser_enabled`** | **`primary_reads_enabled`** | **Target state** | **Outcome** |
|
||||
|---------------------------|--------------------------|----------------------------|-----------------------------|------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Read query, i.e. `SELECT` | unset (any) | false | false | up | Query is routed to the first instance in the round-robin loop. |
|
||||
| Read query | unset (any) | true | false | up | Query is routed to the first replica instance in the round-robin loop. |
|
||||
| Read query | unset (any) | true | true | up | Query is routed to the first instance in the round-robin loop. |
|
||||
| Read query | replica | false | false | up | Query is routed to the first replica instance in the round-robin loop. |
|
||||
| Read query | primary | false | false | up | Query is routed to the primary. |
|
||||
| Read query | unset (any) | false | false | down | First instance is banned for reads. Next target in the round-robin loop is attempted. |
|
||||
| Read query | unset (any) | true | false | down | First replica instance is banned. Next replica instance is attempted in the round-robin loop. |
|
||||
| Read query | unset (any) | true | true | down | First instance (even if primary) is banned for reads. Next instance is attempted in the round-robin loop. |
|
||||
| Read query | replica | false | false | down | First replica instance is banned. Next replica instance is attempted in the round-robin loop. |
|
||||
| Read query | primary | false | false | down | The query is attempted against the primary and fails. The client receives an error. |
|
||||
| | | | | | |
|
||||
| Write query e.g. `INSERT` | unset (any) | false | false | up | The query is attempted against the first available instance in the round-robin loop. If the instance is a replica, the query fails and the client receives an error. |
|
||||
| Write query | unset (any) | true | false | up | The query is routed to the primary. |
|
||||
| Write query | unset (any) | true | true | up | The query is routed to the primary. |
|
||||
| Write query | primary | false | false | up | The query is routed to the primary. |
|
||||
| Write query | replica | false | false | up | The query is routed to the replica and fails. The client receives an error. |
|
||||
| Write query | unset (any) | true | false | down | The query is routed to the primary and fails. The client receives an error. |
|
||||
| Write query | unset (any) | true | true | down | The query is routed to the primary and fails. The client receives an error. |
|
||||
| Write query | primary | false | false | down | The query is routed to the primary and fails. The client receives an error. |
|
||||
| | | | | | |
|
||||
|
||||
### Sharding
|
||||
We use the `PARTITION BY HASH` hashing function, the same as used by Postgres for declarative partitioning. This allows to shard the database using Postgres partitions and place the partitions on different servers (shards). Both read and write queries can be routed to the shards using this pooler.
|
||||
|
||||
@@ -200,11 +239,15 @@ SELECT * FROM users WHERE email = 'test@example.com'; -- shard setting lasts unt
|
||||
|
||||
### Statistics reporting
|
||||
|
||||
Stats are reported using StatsD every 15 seconds. The address is configurable with `statsd_address`, the default is `127.0.0.1:8125`. The stats are very similar to what Pgbouncer reports and the names are kept to be comparable.
|
||||
The stats are very similar to what Pgbouncer reports and the names are kept to be comparable. They are accessible by querying the admin database `pgcat`, and `pgbouncer` for compatibility.
|
||||
|
||||
```
|
||||
psql -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW DATABASES'
|
||||
```
|
||||
|
||||
### Live configuration reloading
|
||||
|
||||
The config can be reloaded by sending a `kill -s SIGHUP` to the process. Not all settings are currently supported by live reload:
|
||||
The config can be reloaded by sending a `kill -s SIGHUP` to the process or by querying `RELOAD` to the admin database. Not all settings are currently supported by live reload:
|
||||
|
||||
| **Config** | **Requires restart** |
|
||||
|-------------------------|----------------------|
|
||||
@@ -213,8 +256,9 @@ The config can be reloaded by sending a `kill -s SIGHUP` to the process. Not all
|
||||
| `pool_mode` | no |
|
||||
| `connect_timeout` | yes |
|
||||
| `healthcheck_timeout` | no |
|
||||
| `shutdown_timeout` | no |
|
||||
| `healthcheck_delay` | no |
|
||||
| `ban_time` | no |
|
||||
| `statsd_address` | yes |
|
||||
| `user` | yes |
|
||||
| `shards` | yes |
|
||||
| `default_role` | no |
|
||||
|
||||
17
docker-compose.yml
Normal file
17
docker-compose.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
version: "3"
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:14
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_HOST_AUTH_METHOD: md5
|
||||
pgcat:
|
||||
build: .
|
||||
command:
|
||||
- "pgcat"
|
||||
- "/etc/pgcat/pgcat.toml"
|
||||
volumes:
|
||||
- "${PWD}/examples/docker/pgcat.toml:/etc/pgcat/pgcat.toml"
|
||||
ports:
|
||||
- "6432:6432"
|
||||
- "9930:9930"
|
||||
147
examples/docker/pgcat.toml
Normal file
147
examples/docker/pgcat.toml
Normal file
@@ -0,0 +1,147 @@
|
||||
#
|
||||
# PgCat config example.
|
||||
#
|
||||
|
||||
#
|
||||
# General pooler settings
|
||||
[general]
|
||||
# What IP to run on, 0.0.0.0 means accessible from everywhere.
|
||||
host = "0.0.0.0"
|
||||
|
||||
# Port to run on, same as PgBouncer used in this example.
|
||||
port = 6432
|
||||
|
||||
# Whether to enable prometheus exporter or not.
|
||||
enable_prometheus_exporter = true
|
||||
|
||||
# Port at which prometheus exporter listens on.
|
||||
prometheus_exporter_port = 9930
|
||||
|
||||
# How long to wait before aborting a server connection (ms).
|
||||
connect_timeout = 5000
|
||||
|
||||
# How much time to give `SELECT 1` health check query to return with a result (ms).
|
||||
healthcheck_timeout = 1000
|
||||
|
||||
# How long to keep connection available for immediate re-use, without running a healthcheck query on it
|
||||
healthcheck_delay = 30000
|
||||
|
||||
# How much time to give clients during shutdown before forcibly killing client connections (ms).
|
||||
shutdown_timeout = 60000
|
||||
|
||||
# For how long to ban a server if it fails a health check (seconds).
|
||||
ban_time = 60 # seconds
|
||||
|
||||
# Reload config automatically if it changes.
|
||||
autoreload = false
|
||||
|
||||
# TLS
|
||||
# tls_certificate = "server.cert"
|
||||
# tls_private_key = "server.key"
|
||||
|
||||
# Credentials to access the virtual administrative database (pgbouncer or pgcat)
|
||||
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
||||
admin_username = "postgres"
|
||||
admin_password = "postgres"
|
||||
|
||||
# pool
|
||||
# configs are structured as pool.<pool_name>
|
||||
# the pool_name is what clients use as database name when connecting
|
||||
# For the example below a client can connect using "postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded"
|
||||
[pools.sharded]
|
||||
# Pool mode (see PgBouncer docs for more).
|
||||
# session: one server connection per connected client
|
||||
# transaction: one server connection per client transaction
|
||||
pool_mode = "transaction"
|
||||
|
||||
# If the client doesn't specify, route traffic to
|
||||
# this role by default.
|
||||
#
|
||||
# any: round-robin between primary and replicas,
|
||||
# replica: round-robin between replicas only without touching the primary,
|
||||
# primary: all queries go to the primary unless otherwise specified.
|
||||
default_role = "any"
|
||||
|
||||
# Query parser. If enabled, we'll attempt to parse
|
||||
# every incoming query to determine if it's a read or a write.
|
||||
# If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||
# we'll direct it to the primary.
|
||||
query_parser_enabled = true
|
||||
|
||||
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||
# load balancing of read queries. Otherwise, the primary will only be used for write
|
||||
# queries. The primary can always be explicitely selected with our custom protocol.
|
||||
primary_reads_enabled = true
|
||||
|
||||
# So what if you wanted to implement a different hashing function,
|
||||
# or you've already built one and you want this pooler to use it?
|
||||
#
|
||||
# Current options:
|
||||
#
|
||||
# pg_bigint_hash: PARTITION BY HASH (Postgres hashing function)
|
||||
# sha1: A hashing function based on SHA1
|
||||
#
|
||||
sharding_function = "pg_bigint_hash"
|
||||
|
||||
# Credentials for users that may connect to this cluster
|
||||
[pools.sharded.users.0]
|
||||
username = "postgres"
|
||||
password = "postgres"
|
||||
# Maximum number of server connections that can be established for this user
|
||||
# The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||
# is the sum of pool_size across all users.
|
||||
pool_size = 9
|
||||
|
||||
# Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
|
||||
statement_timeout = 0
|
||||
|
||||
[pools.sharded.users.1]
|
||||
username = "postgres"
|
||||
password = "postgres"
|
||||
pool_size = 21
|
||||
statement_timeout = 15000
|
||||
|
||||
# Shard 0
|
||||
[pools.sharded.shards.0]
|
||||
# [ host, port, role ]
|
||||
servers = [
|
||||
[ "postgres", 5432, "primary" ],
|
||||
[ "postgres", 5432, "replica" ]
|
||||
]
|
||||
# Database name (e.g. "postgres")
|
||||
database = "postgres"
|
||||
|
||||
[pools.sharded.shards.1]
|
||||
servers = [
|
||||
[ "postgres", 5432, "primary" ],
|
||||
[ "postgres", 5432, "replica" ],
|
||||
]
|
||||
database = "postgres"
|
||||
|
||||
[pools.sharded.shards.2]
|
||||
servers = [
|
||||
[ "postgres", 5432, "primary" ],
|
||||
[ "postgres", 5432, "replica" ],
|
||||
]
|
||||
database = "postgres"
|
||||
|
||||
|
||||
[pools.simple_db]
|
||||
pool_mode = "session"
|
||||
default_role = "primary"
|
||||
query_parser_enabled = true
|
||||
primary_reads_enabled = true
|
||||
sharding_function = "pg_bigint_hash"
|
||||
|
||||
[pools.simple_db.users.0]
|
||||
username = "postgres"
|
||||
password = "postgres"
|
||||
pool_size = 5
|
||||
statement_timeout = 0
|
||||
|
||||
[pools.simple_db.shards.0]
|
||||
servers = [
|
||||
[ "postgres", 5432, "primary" ],
|
||||
[ "postgres", 5432, "replica" ]
|
||||
]
|
||||
database = "postgres"
|
||||
161
pgcat.toml
161
pgcat.toml
@@ -5,77 +5,54 @@
|
||||
#
|
||||
# General pooler settings
|
||||
[general]
|
||||
|
||||
# What IP to run on, 0.0.0.0 means accessible from everywhere.
|
||||
host = "0.0.0.0"
|
||||
|
||||
# Port to run on, same as PgBouncer used in this example.
|
||||
port = 6432
|
||||
|
||||
# How many connections to allocate per server.
|
||||
pool_size = 15
|
||||
# Whether to enable prometheus exporter or not.
|
||||
enable_prometheus_exporter = true
|
||||
|
||||
# Pool mode (see PgBouncer docs for more).
|
||||
# session: one server connection per connected client
|
||||
# transaction: one server connection per client transaction
|
||||
pool_mode = "transaction"
|
||||
# Port at which prometheus exporter listens on.
|
||||
prometheus_exporter_port = 9930
|
||||
|
||||
# How long to wait before aborting a server connection (ms).
|
||||
connect_timeout = 5000
|
||||
|
||||
# How much time to give `SELECT 1` health check query to return with a result (ms).
|
||||
# How much time to give the health check query to return with a result (ms).
|
||||
healthcheck_timeout = 1000
|
||||
|
||||
# How long to keep connection available for immediate re-use, without running a healthcheck query on it
|
||||
healthcheck_delay = 30000
|
||||
|
||||
# How much time to give clients during shutdown before forcibly killing client connections (ms).
|
||||
shutdown_timeout = 60000
|
||||
|
||||
# For how long to ban a server if it fails a health check (seconds).
|
||||
ban_time = 60 # Seconds
|
||||
ban_time = 60 # seconds
|
||||
|
||||
# Stats will be sent here
|
||||
statsd_address = "127.0.0.1:8125"
|
||||
# Reload config automatically if it changes.
|
||||
autoreload = false
|
||||
|
||||
#
|
||||
# User to use for authentication against the server.
|
||||
[user]
|
||||
name = "sharding_user"
|
||||
password = "sharding_user"
|
||||
# TLS
|
||||
# tls_certificate = "server.cert"
|
||||
# tls_private_key = "server.key"
|
||||
|
||||
# Credentials to access the virtual administrative database (pgbouncer or pgcat)
|
||||
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
||||
admin_username = "admin_user"
|
||||
admin_password = "admin_pass"
|
||||
|
||||
#
|
||||
# Shards in the cluster
|
||||
[shards]
|
||||
|
||||
# Shard 0
|
||||
[shards.0]
|
||||
|
||||
# [ host, port, role ]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ],
|
||||
# [ "127.0.1.1", 5432, "replica" ],
|
||||
]
|
||||
# Database name (e.g. "postgres")
|
||||
database = "shard0"
|
||||
|
||||
[shards.1]
|
||||
# [ host, port, role ]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ],
|
||||
# [ "127.0.1.1", 5432, "replica" ],
|
||||
]
|
||||
database = "shard1"
|
||||
|
||||
[shards.2]
|
||||
# [ host, port, role ]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ],
|
||||
# [ "127.0.1.1", 5432, "replica" ],
|
||||
]
|
||||
database = "shard2"
|
||||
|
||||
|
||||
# Settings for our query routing layer.
|
||||
[query_router]
|
||||
# pool
|
||||
# configs are structured as pool.<pool_name>
|
||||
# the pool_name is what clients use as database name when connecting
|
||||
# For the example below a client can connect using "postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded_db"
|
||||
[pools.sharded_db]
|
||||
# Pool mode (see PgBouncer docs for more).
|
||||
# session: one server connection per connected client
|
||||
# transaction: one server connection per client transaction
|
||||
pool_mode = "transaction"
|
||||
|
||||
# If the client doesn't specify, route traffic to
|
||||
# this role by default.
|
||||
@@ -85,14 +62,86 @@ database = "shard2"
|
||||
# primary: all queries go to the primary unless otherwise specified.
|
||||
default_role = "any"
|
||||
|
||||
|
||||
# Query parser. If enabled, we'll attempt to parse
|
||||
# every incoming query to determine if it's a read or a write.
|
||||
# If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||
# we'll direct it to the primary.
|
||||
query_parser_enabled = false
|
||||
query_parser_enabled = true
|
||||
|
||||
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||
# load balancing of read queries. Otherwise, the primary will only be used for write
|
||||
# queries. The primary can always be explicitely selected with our custom protocol.
|
||||
# queries. The primary can always be explicitly selected with our custom protocol.
|
||||
primary_reads_enabled = true
|
||||
|
||||
# So what if you wanted to implement a different hashing function,
|
||||
# or you've already built one and you want this pooler to use it?
|
||||
#
|
||||
# Current options:
|
||||
#
|
||||
# pg_bigint_hash: PARTITION BY HASH (Postgres hashing function)
|
||||
# sha1: A hashing function based on SHA1
|
||||
#
|
||||
sharding_function = "pg_bigint_hash"
|
||||
|
||||
# Credentials for users that may connect to this cluster
|
||||
[pools.sharded_db.users.0]
|
||||
username = "sharding_user"
|
||||
password = "sharding_user"
|
||||
# Maximum number of server connections that can be established for this user
|
||||
# The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||
# is the sum of pool_size across all users.
|
||||
pool_size = 9
|
||||
|
||||
# Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
|
||||
statement_timeout = 0
|
||||
|
||||
[pools.sharded_db.users.1]
|
||||
username = "other_user"
|
||||
password = "other_user"
|
||||
pool_size = 21
|
||||
statement_timeout = 15000
|
||||
|
||||
# Shard 0
|
||||
[pools.sharded_db.shards.0]
|
||||
# [ host, port, role ]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ]
|
||||
]
|
||||
# Database name (e.g. "postgres")
|
||||
database = "shard0"
|
||||
|
||||
[pools.sharded_db.shards.1]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ],
|
||||
]
|
||||
database = "shard1"
|
||||
|
||||
[pools.sharded_db.shards.2]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ],
|
||||
]
|
||||
database = "shard2"
|
||||
|
||||
|
||||
[pools.simple_db]
|
||||
pool_mode = "session"
|
||||
default_role = "primary"
|
||||
query_parser_enabled = true
|
||||
primary_reads_enabled = true
|
||||
sharding_function = "pg_bigint_hash"
|
||||
|
||||
[pools.simple_db.users.0]
|
||||
username = "simple_user"
|
||||
password = "simple_user"
|
||||
pool_size = 5
|
||||
statement_timeout = 0
|
||||
|
||||
[pools.simple_db.shards.0]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ]
|
||||
]
|
||||
database = "some_db"
|
||||
|
||||
441
src/admin.rs
Normal file
441
src/admin.rs
Normal file
@@ -0,0 +1,441 @@
|
||||
/// Admin database.
|
||||
use bytes::{Buf, BufMut, BytesMut};
|
||||
use log::{info, trace};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::config::{get_config, reload_config, VERSION};
|
||||
use crate::errors::Error;
|
||||
use crate::messages::*;
|
||||
use crate::pool::get_all_pools;
|
||||
use crate::stats::get_stats;
|
||||
use crate::ClientServerMap;
|
||||
|
||||
pub fn generate_server_info_for_admin() -> BytesMut {
|
||||
let mut server_info = BytesMut::new();
|
||||
|
||||
server_info.put(server_paramater_message("application_name", ""));
|
||||
server_info.put(server_paramater_message("client_encoding", "UTF8"));
|
||||
server_info.put(server_paramater_message("server_encoding", "UTF8"));
|
||||
server_info.put(server_paramater_message("server_version", VERSION));
|
||||
server_info.put(server_paramater_message("DateStyle", "ISO, MDY"));
|
||||
|
||||
return server_info;
|
||||
}
|
||||
|
||||
/// Handle admin client.
|
||||
pub async fn handle_admin<T>(
|
||||
stream: &mut T,
|
||||
mut query: BytesMut,
|
||||
client_server_map: ClientServerMap,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let code = query.get_u8() as char;
|
||||
|
||||
if code != 'Q' {
|
||||
return Err(Error::ProtocolSyncError);
|
||||
}
|
||||
|
||||
let len = query.get_i32() as usize;
|
||||
let query = String::from_utf8_lossy(&query[..len - 5])
|
||||
.to_string()
|
||||
.to_ascii_uppercase();
|
||||
|
||||
trace!("Admin query: {}", query);
|
||||
|
||||
let query_parts: Vec<&str> = query.trim_end_matches(';').split_whitespace().collect();
|
||||
|
||||
match query_parts[0] {
|
||||
"RELOAD" => {
|
||||
trace!("RELOAD");
|
||||
reload(stream, client_server_map).await
|
||||
}
|
||||
"SET" => {
|
||||
trace!("SET");
|
||||
ignore_set(stream).await
|
||||
}
|
||||
"SHOW" => match query_parts[1] {
|
||||
"CONFIG" => {
|
||||
trace!("SHOW CONFIG");
|
||||
show_config(stream).await
|
||||
}
|
||||
"DATABASES" => {
|
||||
trace!("SHOW DATABASES");
|
||||
show_databases(stream).await
|
||||
}
|
||||
"LISTS" => {
|
||||
trace!("SHOW LISTS");
|
||||
show_lists(stream).await
|
||||
}
|
||||
"POOLS" => {
|
||||
trace!("SHOW POOLS");
|
||||
show_pools(stream).await
|
||||
}
|
||||
"STATS" => {
|
||||
trace!("SHOW STATS");
|
||||
show_stats(stream).await
|
||||
}
|
||||
"VERSION" => {
|
||||
trace!("SHOW VERSION");
|
||||
show_version(stream).await
|
||||
}
|
||||
_ => error_response(stream, "Unsupported SHOW query against the admin database").await,
|
||||
},
|
||||
_ => error_response(stream, "Unsupported query against the admin database").await,
|
||||
}
|
||||
}
|
||||
|
||||
/// Column-oriented statistics.
|
||||
async fn show_lists<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let stats = get_stats();
|
||||
|
||||
let columns = vec![("list", DataType::Text), ("items", DataType::Int4)];
|
||||
|
||||
let mut users = 1;
|
||||
let mut databases = 1;
|
||||
for (_, pool) in get_all_pools() {
|
||||
databases += pool.databases();
|
||||
users += 1; // One user per pool
|
||||
}
|
||||
let mut res = BytesMut::new();
|
||||
res.put(row_description(&columns));
|
||||
res.put(data_row(&vec![
|
||||
"databases".to_string(),
|
||||
databases.to_string(),
|
||||
]));
|
||||
res.put(data_row(&vec!["users".to_string(), users.to_string()]));
|
||||
res.put(data_row(&vec!["pools".to_string(), databases.to_string()]));
|
||||
res.put(data_row(&vec![
|
||||
"free_clients".to_string(),
|
||||
stats
|
||||
.keys()
|
||||
.map(|address_id| stats[&address_id]["cl_idle"])
|
||||
.sum::<i64>()
|
||||
.to_string(),
|
||||
]));
|
||||
res.put(data_row(&vec![
|
||||
"used_clients".to_string(),
|
||||
stats
|
||||
.keys()
|
||||
.map(|address_id| stats[&address_id]["cl_active"])
|
||||
.sum::<i64>()
|
||||
.to_string(),
|
||||
]));
|
||||
res.put(data_row(&vec![
|
||||
"login_clients".to_string(),
|
||||
"0".to_string(),
|
||||
]));
|
||||
res.put(data_row(&vec![
|
||||
"free_servers".to_string(),
|
||||
stats
|
||||
.keys()
|
||||
.map(|address_id| stats[&address_id]["sv_idle"])
|
||||
.sum::<i64>()
|
||||
.to_string(),
|
||||
]));
|
||||
res.put(data_row(&vec![
|
||||
"used_servers".to_string(),
|
||||
stats
|
||||
.keys()
|
||||
.map(|address_id| stats[&address_id]["sv_active"])
|
||||
.sum::<i64>()
|
||||
.to_string(),
|
||||
]));
|
||||
res.put(data_row(&vec!["dns_names".to_string(), "0".to_string()]));
|
||||
res.put(data_row(&vec!["dns_zones".to_string(), "0".to_string()]));
|
||||
res.put(data_row(&vec!["dns_queries".to_string(), "0".to_string()]));
|
||||
res.put(data_row(&vec!["dns_pending".to_string(), "0".to_string()]));
|
||||
|
||||
res.put(command_complete("SHOW"));
|
||||
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, res).await
|
||||
}
|
||||
|
||||
/// Show PgCat version.
|
||||
async fn show_version<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let mut res = BytesMut::new();
|
||||
|
||||
res.put(row_description(&vec![("version", DataType::Text)]));
|
||||
res.put(data_row(&vec![format!("PgCat {}", VERSION).to_string()]));
|
||||
res.put(command_complete("SHOW"));
|
||||
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, res).await
|
||||
}
|
||||
|
||||
/// Show utilization of connection pools for each shard and replicas.
|
||||
async fn show_pools<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let stats = get_stats();
|
||||
|
||||
let columns = vec![
|
||||
("database", DataType::Text),
|
||||
("user", DataType::Text),
|
||||
("cl_idle", DataType::Numeric),
|
||||
("cl_active", DataType::Numeric),
|
||||
("cl_waiting", DataType::Numeric),
|
||||
("cl_cancel_req", DataType::Numeric),
|
||||
("sv_active", DataType::Numeric),
|
||||
("sv_idle", DataType::Numeric),
|
||||
("sv_used", DataType::Numeric),
|
||||
("sv_tested", DataType::Numeric),
|
||||
("sv_login", DataType::Numeric),
|
||||
("maxwait", DataType::Numeric),
|
||||
("maxwait_us", DataType::Numeric),
|
||||
("pool_mode", DataType::Text),
|
||||
];
|
||||
|
||||
let mut res = BytesMut::new();
|
||||
res.put(row_description(&columns));
|
||||
for (_, pool) in get_all_pools() {
|
||||
let pool_config = &pool.settings;
|
||||
for shard in 0..pool.shards() {
|
||||
for server in 0..pool.servers(shard) {
|
||||
let address = pool.address(shard, server);
|
||||
let stats = match stats.get(&address.id) {
|
||||
Some(stats) => stats.clone(),
|
||||
None => HashMap::new(),
|
||||
};
|
||||
|
||||
let mut row = vec![address.name(), pool_config.user.username.clone()];
|
||||
|
||||
for column in &columns[2..columns.len() - 1] {
|
||||
let value = stats.get(column.0).unwrap_or(&0).to_string();
|
||||
row.push(value);
|
||||
}
|
||||
|
||||
row.push(pool_config.pool_mode.to_string());
|
||||
res.put(data_row(&row));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res.put(command_complete("SHOW"));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, res).await
|
||||
}
|
||||
|
||||
/// Show shards and replicas.
|
||||
async fn show_databases<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
// Columns
|
||||
let columns = vec![
|
||||
("name", DataType::Text),
|
||||
("host", DataType::Text),
|
||||
("port", DataType::Text),
|
||||
("database", DataType::Text),
|
||||
("force_user", DataType::Text),
|
||||
("pool_size", DataType::Int4),
|
||||
("min_pool_size", DataType::Int4),
|
||||
("reserve_pool", DataType::Int4),
|
||||
("pool_mode", DataType::Text),
|
||||
("max_connections", DataType::Int4),
|
||||
("current_connections", DataType::Int4),
|
||||
("paused", DataType::Int4),
|
||||
("disabled", DataType::Int4),
|
||||
];
|
||||
|
||||
let mut res = BytesMut::new();
|
||||
|
||||
res.put(row_description(&columns));
|
||||
|
||||
for (_, pool) in get_all_pools() {
|
||||
let pool_config = pool.settings.clone();
|
||||
for shard in 0..pool.shards() {
|
||||
let database_name = &pool_config.shards[&shard.to_string()].database;
|
||||
for server in 0..pool.servers(shard) {
|
||||
let address = pool.address(shard, server);
|
||||
let pool_state = pool.pool_state(shard, server);
|
||||
let banned = pool.is_banned(address, shard, Some(address.role));
|
||||
|
||||
res.put(data_row(&vec![
|
||||
address.name(), // name
|
||||
address.host.to_string(), // host
|
||||
address.port.to_string(), // port
|
||||
database_name.to_string(), // database
|
||||
pool_config.user.username.to_string(), // force_user
|
||||
pool_config.user.pool_size.to_string(), // pool_size
|
||||
"0".to_string(), // min_pool_size
|
||||
"0".to_string(), // reserve_pool
|
||||
pool_config.pool_mode.to_string(), // pool_mode
|
||||
pool_config.user.pool_size.to_string(), // max_connections
|
||||
pool_state.connections.to_string(), // current_connections
|
||||
"0".to_string(), // paused
|
||||
match banned {
|
||||
// disabled
|
||||
true => "1".to_string(),
|
||||
false => "0".to_string(),
|
||||
},
|
||||
]));
|
||||
}
|
||||
}
|
||||
}
|
||||
res.put(command_complete("SHOW"));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, res).await
|
||||
}
|
||||
|
||||
/// Ignore any SET commands the client sends.
|
||||
/// This is common initialization done by ORMs.
|
||||
async fn ignore_set<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
custom_protocol_response_ok(stream, "SET").await
|
||||
}
|
||||
|
||||
/// Reload the configuration file without restarting the process.
|
||||
async fn reload<T>(stream: &mut T, client_server_map: ClientServerMap) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
info!("Reloading config");
|
||||
|
||||
reload_config(client_server_map).await?;
|
||||
|
||||
get_config().show();
|
||||
|
||||
let mut res = BytesMut::new();
|
||||
|
||||
res.put(command_complete("RELOAD"));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, res).await
|
||||
}
|
||||
|
||||
/// Shows current configuration.
|
||||
async fn show_config<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let config = &get_config();
|
||||
let config: HashMap<String, String> = config.into();
|
||||
|
||||
// Configs that cannot be changed without restarting.
|
||||
let immutables = ["host", "port", "connect_timeout"];
|
||||
|
||||
// Columns
|
||||
let columns = vec![
|
||||
("key", DataType::Text),
|
||||
("value", DataType::Text),
|
||||
("default", DataType::Text),
|
||||
("changeable", DataType::Text),
|
||||
];
|
||||
|
||||
// Response data
|
||||
let mut res = BytesMut::new();
|
||||
res.put(row_description(&columns));
|
||||
|
||||
// DataRow rows
|
||||
for (key, value) in config {
|
||||
let changeable = if immutables.iter().filter(|col| *col == &key).count() == 1 {
|
||||
"no".to_string()
|
||||
} else {
|
||||
"yes".to_string()
|
||||
};
|
||||
|
||||
let row = vec![key, value, "-".to_string(), changeable];
|
||||
|
||||
res.put(data_row(&row));
|
||||
}
|
||||
|
||||
res.put(command_complete("SHOW"));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, res).await
|
||||
}
|
||||
|
||||
/// Show shard and replicas statistics.
|
||||
async fn show_stats<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let columns = vec![
|
||||
("database", DataType::Text),
|
||||
("user", DataType::Text),
|
||||
("total_xact_count", DataType::Numeric),
|
||||
("total_query_count", DataType::Numeric),
|
||||
("total_received", DataType::Numeric),
|
||||
("total_sent", DataType::Numeric),
|
||||
("total_xact_time", DataType::Numeric),
|
||||
("total_query_time", DataType::Numeric),
|
||||
("total_wait_time", DataType::Numeric),
|
||||
("avg_xact_count", DataType::Numeric),
|
||||
("avg_query_count", DataType::Numeric),
|
||||
("avg_recv", DataType::Numeric),
|
||||
("avg_sent", DataType::Numeric),
|
||||
("avg_xact_time", DataType::Numeric),
|
||||
("avg_query_time", DataType::Numeric),
|
||||
("avg_wait_time", DataType::Numeric),
|
||||
];
|
||||
|
||||
let stats = get_stats();
|
||||
let mut res = BytesMut::new();
|
||||
res.put(row_description(&columns));
|
||||
|
||||
for ((_db_name, username), pool) in get_all_pools() {
|
||||
for shard in 0..pool.shards() {
|
||||
for server in 0..pool.servers(shard) {
|
||||
let address = pool.address(shard, server);
|
||||
let stats = match stats.get(&address.id) {
|
||||
Some(stats) => stats.clone(),
|
||||
None => HashMap::new(),
|
||||
};
|
||||
|
||||
let mut row = vec![address.name()];
|
||||
row.push(username.clone());
|
||||
|
||||
for column in &columns[2..] {
|
||||
row.push(stats.get(column.0).unwrap_or(&0).to_string());
|
||||
}
|
||||
|
||||
res.put(data_row(&row));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res.put(command_complete("SHOW"));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, res).await
|
||||
}
|
||||
991
src/client.rs
991
src/client.rs
File diff suppressed because it is too large
Load Diff
538
src/config.rs
538
src/config.rs
@@ -1,24 +1,41 @@
|
||||
use arc_swap::{ArcSwap, Guard};
|
||||
/// Parse the configuration file.
|
||||
use arc_swap::ArcSwap;
|
||||
use log::{error, info};
|
||||
use once_cell::sync::Lazy;
|
||||
use serde_derive::Deserialize;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::hash::Hash;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncReadExt;
|
||||
use toml;
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::errors::Error;
|
||||
use crate::tls::{load_certs, load_keys};
|
||||
use crate::{ClientServerMap, ConnectionPool};
|
||||
|
||||
pub const VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
|
||||
/// Globally available configuration.
|
||||
static CONFIG: Lazy<ArcSwap<Config>> = Lazy::new(|| ArcSwap::from_pointee(Config::default()));
|
||||
|
||||
#[derive(Clone, PartialEq, Deserialize, Hash, std::cmp::Eq, Debug, Copy)]
|
||||
/// Server role: primary or replica.
|
||||
#[derive(Clone, PartialEq, Serialize, Deserialize, Hash, std::cmp::Eq, Debug, Copy)]
|
||||
pub enum Role {
|
||||
Primary,
|
||||
Replica,
|
||||
}
|
||||
|
||||
impl ToString for Role {
|
||||
fn to_string(&self) -> String {
|
||||
match *self {
|
||||
Role::Primary => "primary".to_string(),
|
||||
Role::Replica => "replica".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<Option<Role>> for Role {
|
||||
fn eq(&self, other: &Option<Role>) -> bool {
|
||||
match other {
|
||||
@@ -37,50 +54,87 @@ impl PartialEq<Role> for Option<Role> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Address identifying a PostgreSQL server uniquely.
|
||||
#[derive(Clone, PartialEq, Hash, std::cmp::Eq, Debug)]
|
||||
pub struct Address {
|
||||
pub id: usize,
|
||||
pub host: String,
|
||||
pub port: String,
|
||||
pub shard: usize,
|
||||
pub database: String,
|
||||
pub role: Role,
|
||||
pub replica_number: usize,
|
||||
pub username: String,
|
||||
pub poolname: String,
|
||||
}
|
||||
|
||||
impl Default for Address {
|
||||
fn default() -> Address {
|
||||
Address {
|
||||
id: 0,
|
||||
host: String::from("127.0.0.1"),
|
||||
port: String::from("5432"),
|
||||
shard: 0,
|
||||
replica_number: 0,
|
||||
database: String::from("database"),
|
||||
role: Role::Replica,
|
||||
username: String::from("username"),
|
||||
poolname: String::from("poolname"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Hash, std::cmp::Eq, Deserialize, Debug)]
|
||||
impl Address {
|
||||
/// Address name (aka database) used in `SHOW STATS`, `SHOW DATABASES`, and `SHOW POOLS`.
|
||||
pub fn name(&self) -> String {
|
||||
match self.role {
|
||||
Role::Primary => format!("{}_shard_{}_primary", self.poolname, self.shard),
|
||||
|
||||
Role::Replica => format!(
|
||||
"{}_shard_{}_replica_{}",
|
||||
self.poolname, self.shard, self.replica_number
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// PostgreSQL user.
|
||||
#[derive(Clone, PartialEq, Hash, std::cmp::Eq, Serialize, Deserialize, Debug)]
|
||||
pub struct User {
|
||||
pub name: String,
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
pub pool_size: u32,
|
||||
pub statement_timeout: u64,
|
||||
}
|
||||
|
||||
impl Default for User {
|
||||
fn default() -> User {
|
||||
User {
|
||||
name: String::from("postgres"),
|
||||
username: String::from("postgres"),
|
||||
password: String::new(),
|
||||
pool_size: 15,
|
||||
statement_timeout: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone)]
|
||||
/// General configuration.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
pub struct General {
|
||||
pub host: String,
|
||||
pub port: i16,
|
||||
pub pool_size: u32,
|
||||
pub pool_mode: String,
|
||||
pub enable_prometheus_exporter: Option<bool>,
|
||||
pub prometheus_exporter_port: i16,
|
||||
pub connect_timeout: u64,
|
||||
pub healthcheck_timeout: u64,
|
||||
pub shutdown_timeout: u64,
|
||||
pub healthcheck_delay: u64,
|
||||
pub ban_time: i64,
|
||||
pub statsd_address: String,
|
||||
pub autoreload: bool,
|
||||
pub tls_certificate: Option<String>,
|
||||
pub tls_private_key: Option<String>,
|
||||
pub admin_username: String,
|
||||
pub admin_password: String,
|
||||
}
|
||||
|
||||
impl Default for General {
|
||||
@@ -88,20 +142,50 @@ impl Default for General {
|
||||
General {
|
||||
host: String::from("localhost"),
|
||||
port: 5432,
|
||||
pool_size: 15,
|
||||
pool_mode: String::from("transaction"),
|
||||
enable_prometheus_exporter: Some(false),
|
||||
prometheus_exporter_port: 9930,
|
||||
connect_timeout: 5000,
|
||||
healthcheck_timeout: 1000,
|
||||
shutdown_timeout: 60000,
|
||||
healthcheck_delay: 30000,
|
||||
ban_time: 60,
|
||||
statsd_address: String::from("127.0.0.1:8125"),
|
||||
autoreload: false,
|
||||
tls_certificate: None,
|
||||
tls_private_key: None,
|
||||
admin_username: String::from("admin"),
|
||||
admin_password: String::from("admin"),
|
||||
}
|
||||
}
|
||||
}
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
pub struct Pool {
|
||||
pub pool_mode: String,
|
||||
pub default_role: String,
|
||||
pub query_parser_enabled: bool,
|
||||
pub primary_reads_enabled: bool,
|
||||
pub sharding_function: String,
|
||||
pub shards: HashMap<String, Shard>,
|
||||
pub users: HashMap<String, User>,
|
||||
}
|
||||
impl Default for Pool {
|
||||
fn default() -> Pool {
|
||||
Pool {
|
||||
pool_mode: String::from("transaction"),
|
||||
shards: HashMap::from([(String::from("1"), Shard::default())]),
|
||||
users: HashMap::default(),
|
||||
default_role: String::from("any"),
|
||||
query_parser_enabled: false,
|
||||
primary_reads_enabled: true,
|
||||
sharding_function: "pg_bigint_hash".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone)]
|
||||
/// Shard configuration.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
pub struct Shard {
|
||||
pub servers: Vec<(String, u16, String)>,
|
||||
pub database: String,
|
||||
pub servers: Vec<(String, u16, String)>,
|
||||
}
|
||||
|
||||
impl Default for Shard {
|
||||
@@ -113,60 +197,185 @@ impl Default for Shard {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone)]
|
||||
pub struct QueryRouter {
|
||||
pub default_role: String,
|
||||
pub query_parser_enabled: bool,
|
||||
pub primary_reads_enabled: bool,
|
||||
fn default_path() -> String {
|
||||
String::from("pgcat.toml")
|
||||
}
|
||||
|
||||
impl Default for QueryRouter {
|
||||
fn default() -> QueryRouter {
|
||||
QueryRouter {
|
||||
default_role: String::from("any"),
|
||||
query_parser_enabled: false,
|
||||
primary_reads_enabled: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone)]
|
||||
/// Configuration wrapper.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
pub struct Config {
|
||||
// Serializer maintains the order of fields in the struct
|
||||
// so we should always put simple fields before nested fields
|
||||
// in all serializable structs to avoid ValueAfterTable errors
|
||||
// These errors occur when the toml serializer is about to produce
|
||||
// ambigous toml structure like the one below
|
||||
// [main]
|
||||
// field1_under_main = 1
|
||||
// field2_under_main = 2
|
||||
// [main.subconf]
|
||||
// field1_under_subconf = 1
|
||||
// field3_under_main = 3 # This field will be interpreted as being under subconf and not under main
|
||||
#[serde(default = "default_path")]
|
||||
pub path: String,
|
||||
|
||||
pub general: General,
|
||||
pub user: User,
|
||||
pub shards: HashMap<String, Shard>,
|
||||
pub query_router: QueryRouter,
|
||||
pub pools: HashMap<String, Pool>,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Config {
|
||||
Config {
|
||||
path: String::from("pgcat.toml"),
|
||||
general: General::default(),
|
||||
user: User::default(),
|
||||
shards: HashMap::from([(String::from("1"), Shard::default())]),
|
||||
query_router: QueryRouter::default(),
|
||||
pools: HashMap::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&Config> for std::collections::HashMap<String, String> {
|
||||
fn from(config: &Config) -> HashMap<String, String> {
|
||||
let mut r: Vec<(String, String)> = config
|
||||
.pools
|
||||
.iter()
|
||||
.flat_map(|(pool_name, pool)| {
|
||||
[
|
||||
(
|
||||
format!("pools.{}.pool_mode", pool_name),
|
||||
pool.pool_mode.clone(),
|
||||
),
|
||||
(
|
||||
format!("pools.{}.primary_reads_enabled", pool_name),
|
||||
pool.primary_reads_enabled.to_string(),
|
||||
),
|
||||
(
|
||||
format!("pools.{}.query_parser_enabled", pool_name),
|
||||
pool.query_parser_enabled.to_string(),
|
||||
),
|
||||
(
|
||||
format!("pools.{}.default_role", pool_name),
|
||||
pool.default_role.clone(),
|
||||
),
|
||||
(
|
||||
format!("pools.{}.sharding_function", pool_name),
|
||||
pool.sharding_function.clone(),
|
||||
),
|
||||
(
|
||||
format!("pools.{:?}.shard_count", pool_name),
|
||||
pool.shards.len().to_string(),
|
||||
),
|
||||
(
|
||||
format!("pools.{:?}.users", pool_name),
|
||||
pool.users
|
||||
.iter()
|
||||
.map(|(_username, user)| &user.username)
|
||||
.cloned()
|
||||
.collect::<Vec<String>>()
|
||||
.join(", "),
|
||||
),
|
||||
]
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut static_settings = vec![
|
||||
("host".to_string(), config.general.host.to_string()),
|
||||
("port".to_string(), config.general.port.to_string()),
|
||||
(
|
||||
"prometheus_exporter_port".to_string(),
|
||||
config.general.prometheus_exporter_port.to_string(),
|
||||
),
|
||||
(
|
||||
"connect_timeout".to_string(),
|
||||
config.general.connect_timeout.to_string(),
|
||||
),
|
||||
(
|
||||
"healthcheck_timeout".to_string(),
|
||||
config.general.healthcheck_timeout.to_string(),
|
||||
),
|
||||
(
|
||||
"shutdown_timeout".to_string(),
|
||||
config.general.shutdown_timeout.to_string(),
|
||||
),
|
||||
(
|
||||
"healthcheck_delay".to_string(),
|
||||
config.general.healthcheck_delay.to_string(),
|
||||
),
|
||||
("ban_time".to_string(), config.general.ban_time.to_string()),
|
||||
];
|
||||
|
||||
r.append(&mut static_settings);
|
||||
return r.iter().cloned().collect();
|
||||
}
|
||||
}
|
||||
|
||||
impl Config {
|
||||
/// Print current configuration.
|
||||
pub fn show(&self) {
|
||||
info!("Pool size: {}", self.general.pool_size);
|
||||
info!("Pool mode: {}", self.general.pool_mode);
|
||||
info!("Ban time: {}s", self.general.ban_time);
|
||||
info!(
|
||||
"Healthcheck timeout: {}ms",
|
||||
self.general.healthcheck_timeout
|
||||
);
|
||||
info!("Connection timeout: {}ms", self.general.connect_timeout);
|
||||
info!("Shutdown timeout: {}ms", self.general.shutdown_timeout);
|
||||
info!("Healthcheck delay: {}ms", self.general.healthcheck_delay);
|
||||
match self.general.tls_certificate.clone() {
|
||||
Some(tls_certificate) => {
|
||||
info!("TLS certificate: {}", tls_certificate);
|
||||
|
||||
match self.general.tls_private_key.clone() {
|
||||
Some(tls_private_key) => {
|
||||
info!("TLS private key: {}", tls_private_key);
|
||||
info!("TLS support is enabled");
|
||||
}
|
||||
|
||||
None => (),
|
||||
}
|
||||
}
|
||||
|
||||
None => {
|
||||
info!("TLS support is disabled");
|
||||
}
|
||||
};
|
||||
|
||||
for (pool_name, pool_config) in &self.pools {
|
||||
// TODO: Make this output prettier (maybe a table?)
|
||||
info!("--- Settings for pool {} ---", pool_name);
|
||||
info!(
|
||||
"Pool size from all users: {}",
|
||||
pool_config
|
||||
.users
|
||||
.iter()
|
||||
.map(|(_, user_cfg)| user_cfg.pool_size)
|
||||
.sum::<u32>()
|
||||
.to_string()
|
||||
);
|
||||
info!("Pool mode: {}", pool_config.pool_mode);
|
||||
info!("Sharding function: {}", pool_config.sharding_function);
|
||||
info!("Primary reads: {}", pool_config.primary_reads_enabled);
|
||||
info!("Query router: {}", pool_config.query_parser_enabled);
|
||||
|
||||
// TODO: Make this prettier.
|
||||
info!("Number of shards: {}", pool_config.shards.len());
|
||||
info!("Number of users: {}", pool_config.users.len());
|
||||
|
||||
for user in &pool_config.users {
|
||||
info!(
|
||||
"{} pool size: {}, statement timeout: {}",
|
||||
user.1.username, user.1.pool_size, user.1.statement_timeout
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_config() -> Guard<Arc<Config>> {
|
||||
CONFIG.load()
|
||||
/// Get a read-only instance of the configuration
|
||||
/// from anywhere in the app.
|
||||
/// ArcSwap makes this cheap and quick.
|
||||
pub fn get_config() -> Config {
|
||||
(*(*CONFIG.load())).clone()
|
||||
}
|
||||
|
||||
/// Parse the config.
|
||||
/// Parse the configuration file located at the path.
|
||||
pub async fn parse(path: &str) -> Result<(), Error> {
|
||||
let mut contents = String::new();
|
||||
let mut file = match File::open(path).await {
|
||||
@@ -185,7 +394,7 @@ pub async fn parse(path: &str) -> Result<(), Error> {
|
||||
}
|
||||
};
|
||||
|
||||
let config: Config = match toml::from_str(&contents) {
|
||||
let mut config: Config = match toml::from_str(&contents) {
|
||||
Ok(config) => config,
|
||||
Err(err) => {
|
||||
error!("Could not parse config file: {}", err.to_string());
|
||||
@@ -193,81 +402,151 @@ pub async fn parse(path: &str) -> Result<(), Error> {
|
||||
}
|
||||
};
|
||||
|
||||
// Quick config sanity check.
|
||||
for shard in &config.shards {
|
||||
// We use addresses as unique identifiers,
|
||||
// let's make sure they are unique in the config as well.
|
||||
let mut dup_check = HashSet::new();
|
||||
let mut primary_count = 0;
|
||||
// Validate TLS!
|
||||
match config.general.tls_certificate.clone() {
|
||||
Some(tls_certificate) => {
|
||||
match load_certs(&Path::new(&tls_certificate)) {
|
||||
Ok(_) => {
|
||||
// Cert is okay, but what about the private key?
|
||||
match config.general.tls_private_key.clone() {
|
||||
Some(tls_private_key) => match load_keys(&Path::new(&tls_private_key)) {
|
||||
Ok(_) => (),
|
||||
Err(err) => {
|
||||
error!("tls_private_key is incorrectly configured: {:?}", err);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
},
|
||||
|
||||
match shard.0.parse::<usize>() {
|
||||
Ok(_) => (),
|
||||
Err(_) => {
|
||||
None => {
|
||||
error!("tls_certificate is set, but the tls_private_key is not");
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
Err(err) => {
|
||||
error!("tls_certificate is incorrectly configured: {:?}", err);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
}
|
||||
}
|
||||
None => (),
|
||||
};
|
||||
|
||||
for (pool_name, pool) in &config.pools {
|
||||
match pool.sharding_function.as_ref() {
|
||||
"pg_bigint_hash" => (),
|
||||
"sha1" => (),
|
||||
_ => {
|
||||
error!(
|
||||
"Shard '{}' is not a valid number, shards must be numbered starting at 0",
|
||||
shard.0
|
||||
"Supported sharding functions are: 'pg_bigint_hash', 'sha1', got: '{}' in pool {} settings",
|
||||
pool.sharding_function,
|
||||
pool_name
|
||||
);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
};
|
||||
|
||||
if shard.1.servers.len() == 0 {
|
||||
error!("Shard {} has no servers configured", shard.0);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
match pool.default_role.as_ref() {
|
||||
"any" => (),
|
||||
"primary" => (),
|
||||
"replica" => (),
|
||||
other => {
|
||||
error!(
|
||||
"Query router default_role must be 'primary', 'replica', or 'any', got: '{}'",
|
||||
other
|
||||
);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
};
|
||||
|
||||
for server in &shard.1.servers {
|
||||
dup_check.insert(server);
|
||||
for shard in &pool.shards {
|
||||
// We use addresses as unique identifiers,
|
||||
// let's make sure they are unique in the config as well.
|
||||
let mut dup_check = HashSet::new();
|
||||
let mut primary_count = 0;
|
||||
|
||||
// Check that we define only zero or one primary.
|
||||
match server.2.as_ref() {
|
||||
"primary" => primary_count += 1,
|
||||
_ => (),
|
||||
};
|
||||
|
||||
// Check role spelling.
|
||||
match server.2.as_ref() {
|
||||
"primary" => (),
|
||||
"replica" => (),
|
||||
_ => {
|
||||
match shard.0.parse::<usize>() {
|
||||
Ok(_) => (),
|
||||
Err(_) => {
|
||||
error!(
|
||||
"Shard {} server role must be either 'primary' or 'replica', got: '{}'",
|
||||
shard.0, server.2
|
||||
"Shard '{}' is not a valid number, shards must be numbered starting at 0",
|
||||
shard.0
|
||||
);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
if primary_count > 1 {
|
||||
error!("Shard {} has more than on primary configured", &shard.0);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
if shard.1.servers.len() == 0 {
|
||||
error!("Shard {} has no servers configured", shard.0);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
|
||||
if dup_check.len() != shard.1.servers.len() {
|
||||
error!("Shard {} contains duplicate server configs", &shard.0);
|
||||
return Err(Error::BadConfig);
|
||||
for server in &shard.1.servers {
|
||||
dup_check.insert(server);
|
||||
|
||||
// Check that we define only zero or one primary.
|
||||
match server.2.as_ref() {
|
||||
"primary" => primary_count += 1,
|
||||
_ => (),
|
||||
};
|
||||
|
||||
// Check role spelling.
|
||||
match server.2.as_ref() {
|
||||
"primary" => (),
|
||||
"replica" => (),
|
||||
_ => {
|
||||
error!(
|
||||
"Shard {} server role must be either 'primary' or 'replica', got: '{}'",
|
||||
shard.0, server.2
|
||||
);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
if primary_count > 1 {
|
||||
error!("Shard {} has more than on primary configured", &shard.0);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
|
||||
if dup_check.len() != shard.1.servers.len() {
|
||||
error!("Shard {} contains duplicate server configs", &shard.0);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match config.query_router.default_role.as_ref() {
|
||||
"any" => (),
|
||||
"primary" => (),
|
||||
"replica" => (),
|
||||
other => {
|
||||
error!(
|
||||
"Query router default_role must be 'primary', 'replica', or 'any', got: '{}'",
|
||||
other
|
||||
);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
};
|
||||
config.path = path.to_string();
|
||||
|
||||
// Update the configuration globally.
|
||||
CONFIG.store(Arc::new(config.clone()));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn reload_config(client_server_map: ClientServerMap) -> Result<bool, Error> {
|
||||
let old_config = get_config();
|
||||
match parse(&old_config.path).await {
|
||||
Ok(()) => (),
|
||||
Err(err) => {
|
||||
error!("Config reload error: {:?}", err);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
};
|
||||
let new_config = get_config();
|
||||
|
||||
if old_config.pools != new_config.pools {
|
||||
info!("Pool configuration changed, re-creating server pools");
|
||||
ConnectionPool::from_config(client_server_map).await?;
|
||||
Ok(true)
|
||||
} else if old_config != new_config {
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
@@ -275,10 +554,67 @@ mod test {
|
||||
#[tokio::test]
|
||||
async fn test_config() {
|
||||
parse("pgcat.toml").await.unwrap();
|
||||
assert_eq!(get_config().general.pool_size, 15);
|
||||
assert_eq!(get_config().shards.len(), 3);
|
||||
assert_eq!(get_config().shards["1"].servers[0].0, "127.0.0.1");
|
||||
assert_eq!(get_config().shards["0"].servers[0].2, "primary");
|
||||
assert_eq!(get_config().query_router.default_role, "any");
|
||||
|
||||
assert_eq!(get_config().path, "pgcat.toml".to_string());
|
||||
|
||||
assert_eq!(get_config().general.ban_time, 60);
|
||||
assert_eq!(get_config().pools.len(), 2);
|
||||
assert_eq!(get_config().pools["sharded_db"].shards.len(), 3);
|
||||
assert_eq!(get_config().pools["simple_db"].shards.len(), 1);
|
||||
assert_eq!(get_config().pools["sharded_db"].users.len(), 2);
|
||||
assert_eq!(get_config().pools["simple_db"].users.len(), 1);
|
||||
|
||||
assert_eq!(
|
||||
get_config().pools["sharded_db"].shards["0"].servers[0].0,
|
||||
"127.0.0.1"
|
||||
);
|
||||
assert_eq!(
|
||||
get_config().pools["sharded_db"].shards["1"].servers[0].2,
|
||||
"primary"
|
||||
);
|
||||
assert_eq!(
|
||||
get_config().pools["sharded_db"].shards["1"].database,
|
||||
"shard1"
|
||||
);
|
||||
assert_eq!(
|
||||
get_config().pools["sharded_db"].users["0"].username,
|
||||
"sharding_user"
|
||||
);
|
||||
assert_eq!(
|
||||
get_config().pools["sharded_db"].users["1"].password,
|
||||
"other_user"
|
||||
);
|
||||
assert_eq!(get_config().pools["sharded_db"].users["1"].pool_size, 21);
|
||||
assert_eq!(get_config().pools["sharded_db"].default_role, "any");
|
||||
|
||||
assert_eq!(
|
||||
get_config().pools["simple_db"].shards["0"].servers[0].0,
|
||||
"127.0.0.1"
|
||||
);
|
||||
assert_eq!(
|
||||
get_config().pools["simple_db"].shards["0"].servers[0].1,
|
||||
5432
|
||||
);
|
||||
assert_eq!(
|
||||
get_config().pools["simple_db"].shards["0"].database,
|
||||
"some_db"
|
||||
);
|
||||
assert_eq!(get_config().pools["simple_db"].default_role, "primary");
|
||||
|
||||
assert_eq!(
|
||||
get_config().pools["simple_db"].users["0"].username,
|
||||
"simple_user"
|
||||
);
|
||||
assert_eq!(
|
||||
get_config().pools["simple_db"].users["0"].password,
|
||||
"simple_user"
|
||||
);
|
||||
assert_eq!(get_config().pools["simple_db"].users["0"].pool_size, 5);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_serialize_configs() {
|
||||
parse("pgcat.toml").await.unwrap();
|
||||
print!("{}", toml::to_string(&get_config()).unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
/// Various protocol constants, as defined in
|
||||
/// https://www.postgresql.org/docs/12/protocol-message-formats.html
|
||||
/// <https://www.postgresql.org/docs/12/protocol-message-formats.html>
|
||||
/// and elsewhere in the source code.
|
||||
/// Also other constants we use elsewhere.
|
||||
|
||||
// Used in the StartupMessage to indicate regular handshake.
|
||||
pub const PROTOCOL_VERSION_NUMBER: i32 = 196608;
|
||||
@@ -15,8 +14,20 @@ pub const CANCEL_REQUEST_CODE: i32 = 80877102;
|
||||
// AuthenticationMD5Password
|
||||
pub const MD5_ENCRYPTED_PASSWORD: i32 = 5;
|
||||
|
||||
// SASL
|
||||
pub const SASL: i32 = 10;
|
||||
pub const SASL_CONTINUE: i32 = 11;
|
||||
pub const SASL_FINAL: i32 = 12;
|
||||
pub const SCRAM_SHA_256: &str = "SCRAM-SHA-256";
|
||||
pub const NONCE_LENGTH: usize = 24;
|
||||
|
||||
// AuthenticationOk
|
||||
pub const AUTHENTICATION_SUCCESSFUL: i32 = 0;
|
||||
|
||||
// ErrorResponse: A code identifying the field type; if zero, this is the message terminator and no string follows.
|
||||
pub const MESSAGE_TERMINATOR: u8 = 0;
|
||||
|
||||
//
|
||||
// Data types
|
||||
//
|
||||
pub const _OID_INT8: i32 = 20; // bigint
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
/// Errors.
|
||||
|
||||
/// Various errors.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
SocketError,
|
||||
// ClientDisconnected,
|
||||
ClientBadStartup,
|
||||
ProtocolSyncError,
|
||||
ServerError,
|
||||
// ServerTimeout,
|
||||
// DirtyServer,
|
||||
BadConfig,
|
||||
AllServersDown,
|
||||
ClientError,
|
||||
TlsError,
|
||||
StatementTimeout,
|
||||
}
|
||||
|
||||
247
src/main.rs
247
src/main.rs
@@ -1,4 +1,4 @@
|
||||
// Copyright (c) 2022 Lev Kokotov <lev@levthe.dev>
|
||||
// Copyright (c) 2022 Lev Kokotov <hi@levthe.dev>
|
||||
|
||||
// Permission is hereby granted, free of charge, to any person obtaining
|
||||
// a copy of this software and associated documentation files (the
|
||||
@@ -28,48 +28,53 @@ extern crate log;
|
||||
extern crate md5;
|
||||
extern crate num_cpus;
|
||||
extern crate once_cell;
|
||||
extern crate rustls_pemfile;
|
||||
extern crate serde;
|
||||
extern crate serde_derive;
|
||||
extern crate sqlparser;
|
||||
extern crate statsd;
|
||||
extern crate tokio;
|
||||
extern crate tokio_rustls;
|
||||
extern crate toml;
|
||||
|
||||
use log::{error, info};
|
||||
use log::{debug, error, info};
|
||||
use parking_lot::Mutex;
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::{
|
||||
signal,
|
||||
signal::unix::{signal as unix_signal, SignalKind},
|
||||
sync::mpsc,
|
||||
};
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::net::SocketAddr;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::broadcast;
|
||||
|
||||
mod admin;
|
||||
mod client;
|
||||
mod config;
|
||||
mod constants;
|
||||
mod errors;
|
||||
mod messages;
|
||||
mod pool;
|
||||
mod prometheus;
|
||||
mod query_router;
|
||||
mod scram;
|
||||
mod server;
|
||||
mod sharding;
|
||||
mod stats;
|
||||
mod tls;
|
||||
|
||||
// Support for query cancellation: this maps our process_ids and
|
||||
// secret keys to the backend's.
|
||||
use config::get_config;
|
||||
use pool::{ClientServerMap, ConnectionPool};
|
||||
use stats::{Collector, Reporter};
|
||||
use crate::config::{get_config, reload_config, VERSION};
|
||||
use crate::pool::{ClientServerMap, ConnectionPool};
|
||||
use crate::prometheus::start_metric_server;
|
||||
use crate::stats::{Collector, Reporter, REPORTER};
|
||||
|
||||
/// Main!
|
||||
#[tokio::main(worker_threads = 4)]
|
||||
async fn main() {
|
||||
env_logger::init();
|
||||
info!("Welcome to PgCat! Meow.");
|
||||
info!("Welcome to PgCat! Meow. (Version {})", VERSION);
|
||||
|
||||
// Prepare regexes
|
||||
if !query_router::QueryRouter::setup() {
|
||||
error!("Could not setup query router");
|
||||
return;
|
||||
@@ -83,7 +88,6 @@ async fn main() {
|
||||
String::from("pgcat.toml")
|
||||
};
|
||||
|
||||
// Prepare the config
|
||||
match config::parse(&config_file).await {
|
||||
Ok(_) => (),
|
||||
Err(err) => {
|
||||
@@ -94,7 +98,25 @@ async fn main() {
|
||||
|
||||
let config = get_config();
|
||||
|
||||
if let Some(true) = config.general.enable_prometheus_exporter {
|
||||
let http_addr_str = format!(
|
||||
"{}:{}",
|
||||
config.general.host, config.general.prometheus_exporter_port
|
||||
);
|
||||
let http_addr = match SocketAddr::from_str(&http_addr_str) {
|
||||
Ok(addr) => addr,
|
||||
Err(err) => {
|
||||
error!("Invalid http address: {}", err);
|
||||
return;
|
||||
}
|
||||
};
|
||||
tokio::task::spawn(async move {
|
||||
start_metric_server(http_addr).await;
|
||||
});
|
||||
}
|
||||
|
||||
let addr = format!("{}:{}", config.general.host, config.general.port);
|
||||
|
||||
let listener = match TcpListener::bind(&addr).await {
|
||||
Ok(sock) => sock,
|
||||
Err(err) => {
|
||||
@@ -104,112 +126,181 @@ async fn main() {
|
||||
};
|
||||
|
||||
info!("Running on {}", addr);
|
||||
|
||||
config.show();
|
||||
|
||||
// Tracks which client is connected to which server for query cancellation.
|
||||
let client_server_map: ClientServerMap = Arc::new(Mutex::new(HashMap::new()));
|
||||
|
||||
// Collect statistics and send them to StatsD
|
||||
let (tx, rx) = mpsc::channel(100);
|
||||
// Statistics reporting.
|
||||
let (tx, rx) = mpsc::channel(100_000);
|
||||
REPORTER.store(Arc::new(Reporter::new(tx.clone())));
|
||||
|
||||
tokio::task::spawn(async move {
|
||||
let mut stats_collector = Collector::new(rx);
|
||||
stats_collector.collect().await;
|
||||
});
|
||||
|
||||
let mut pool =
|
||||
ConnectionPool::from_config(client_server_map.clone(), Reporter::new(tx.clone())).await;
|
||||
|
||||
let server_info = match pool.validate().await {
|
||||
Ok(info) => info,
|
||||
// Connection pool that allows to query all shards and replicas.
|
||||
match ConnectionPool::from_config(client_server_map.clone()).await {
|
||||
Ok(_) => (),
|
||||
Err(err) => {
|
||||
error!("Could not validate connection pool: {:?}", err);
|
||||
error!("Pool error: {:?}", err);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// Statistics collector task.
|
||||
let collector_tx = tx.clone();
|
||||
|
||||
// Save these for reloading
|
||||
let reload_client_server_map = client_server_map.clone();
|
||||
let autoreload_client_server_map = client_server_map.clone();
|
||||
|
||||
tokio::task::spawn(async move {
|
||||
let mut stats_collector = Collector::new(rx, collector_tx);
|
||||
stats_collector.collect().await;
|
||||
});
|
||||
|
||||
info!("Waiting for clients");
|
||||
|
||||
// Main app runs here.
|
||||
tokio::task::spawn(async move {
|
||||
loop {
|
||||
let pool = pool.clone();
|
||||
let client_server_map = client_server_map.clone();
|
||||
let server_info = server_info.clone();
|
||||
let reporter = Reporter::new(tx.clone());
|
||||
let (shutdown_event_tx, mut shutdown_event_rx) = broadcast::channel::<()>(1);
|
||||
|
||||
let (socket, addr) = match listener.accept().await {
|
||||
Ok((socket, addr)) => (socket, addr),
|
||||
Err(err) => {
|
||||
error!("{:?}", err);
|
||||
continue;
|
||||
let shutdown_event_tx_clone = shutdown_event_tx.clone();
|
||||
|
||||
// Client connection loop.
|
||||
tokio::task::spawn(async move {
|
||||
// Creates event subscriber for shutdown event, this is dropped when shutdown event is broadcast
|
||||
let mut listener_shutdown_event_rx = shutdown_event_tx_clone.subscribe();
|
||||
loop {
|
||||
let client_server_map = client_server_map.clone();
|
||||
|
||||
// Listen for shutdown event and client connection at the same time
|
||||
let (socket, addr) = tokio::select! {
|
||||
_ = listener_shutdown_event_rx.recv() => {
|
||||
// Exits client connection loop which drops listener, listener_shutdown_event_rx and shutdown_event_tx_clone
|
||||
break;
|
||||
}
|
||||
|
||||
listener_response = listener.accept() => {
|
||||
match listener_response {
|
||||
Ok((socket, addr)) => (socket, addr),
|
||||
Err(err) => {
|
||||
error!("{:?}", err);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Client goes to another thread, bye.
|
||||
// Used to signal shutdown
|
||||
let client_shutdown_handler_rx = shutdown_event_tx_clone.subscribe();
|
||||
|
||||
// Used to signal that the task has completed
|
||||
let dummy_tx = shutdown_event_tx_clone.clone();
|
||||
|
||||
// Handle client.
|
||||
tokio::task::spawn(async move {
|
||||
let start = chrono::offset::Utc::now().naive_utc();
|
||||
match client::Client::startup(socket, client_server_map, server_info, reporter)
|
||||
.await
|
||||
|
||||
match client::client_entrypoint(
|
||||
socket,
|
||||
client_server_map,
|
||||
client_shutdown_handler_rx,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(mut client) => {
|
||||
info!("Client {:?} connected", addr);
|
||||
match client.handle(pool).await {
|
||||
Ok(()) => {
|
||||
let duration = chrono::offset::Utc::now().naive_utc() - start;
|
||||
Ok(_) => {
|
||||
let duration = chrono::offset::Utc::now().naive_utc() - start;
|
||||
|
||||
info!(
|
||||
"Client {:?} disconnected, session duration: {}",
|
||||
addr,
|
||||
format_duration(&duration)
|
||||
);
|
||||
}
|
||||
|
||||
Err(err) => {
|
||||
error!("Client disconnected with error: {:?}", err);
|
||||
client.release();
|
||||
}
|
||||
}
|
||||
info!(
|
||||
"Client {:?} disconnected, session duration: {}",
|
||||
addr,
|
||||
format_duration(&duration)
|
||||
);
|
||||
}
|
||||
|
||||
Err(err) => {
|
||||
error!("Client failed to login: {:?}", err);
|
||||
debug!("Client disconnected with error {:?}", err);
|
||||
}
|
||||
};
|
||||
// Drop this transmitter so receiver knows that the task is completed
|
||||
drop(dummy_tx);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Reload config
|
||||
// Reload config:
|
||||
// kill -SIGHUP $(pgrep pgcat)
|
||||
tokio::task::spawn(async move {
|
||||
let mut stream = unix_signal(SignalKind::hangup()).unwrap();
|
||||
|
||||
loop {
|
||||
stream.recv().await;
|
||||
|
||||
info!("Reloading config");
|
||||
match config::parse("pgcat.toml").await {
|
||||
Ok(_) => {
|
||||
get_config().show();
|
||||
}
|
||||
Err(err) => {
|
||||
error!("{:?}", err);
|
||||
return;
|
||||
}
|
||||
|
||||
match reload_config(reload_client_server_map.clone()).await {
|
||||
Ok(_) => (),
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
get_config().show();
|
||||
}
|
||||
});
|
||||
|
||||
// Setup shut down sequence
|
||||
match signal::ctrl_c().await {
|
||||
Ok(()) => {
|
||||
info!("Shutting down...");
|
||||
}
|
||||
if config.general.autoreload {
|
||||
let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(15_000));
|
||||
|
||||
Err(err) => {
|
||||
error!("Unable to listen for shutdown signal: {}", err);
|
||||
}
|
||||
};
|
||||
tokio::task::spawn(async move {
|
||||
info!("Config autoreloader started");
|
||||
|
||||
loop {
|
||||
interval.tick().await;
|
||||
match reload_config(autoreload_client_server_map.clone()).await {
|
||||
Ok(changed) => {
|
||||
if changed {
|
||||
get_config().show()
|
||||
}
|
||||
}
|
||||
Err(_) => (),
|
||||
};
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
let mut term_signal = unix_signal(SignalKind::terminate()).unwrap();
|
||||
let mut interrupt_signal = unix_signal(SignalKind::interrupt()).unwrap();
|
||||
|
||||
tokio::select! {
|
||||
// Initiate graceful shutdown sequence on sig int
|
||||
_ = interrupt_signal.recv() => {
|
||||
info!("Got SIGINT, waiting for client connection drain now");
|
||||
|
||||
// Broadcast that client tasks need to finish
|
||||
shutdown_event_tx.send(()).unwrap();
|
||||
// Closes transmitter
|
||||
drop(shutdown_event_tx);
|
||||
|
||||
// This is in a loop because the first event that the receiver receives will be the shutdown event
|
||||
// This is not what we are waiting for instead, we want the receiver to send an error once all senders are closed which is reached after the shutdown event is received
|
||||
loop {
|
||||
match tokio::time::timeout(
|
||||
tokio::time::Duration::from_millis(config.general.shutdown_timeout),
|
||||
shutdown_event_rx.recv(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(res) => match res {
|
||||
Ok(_) => {}
|
||||
Err(_) => break,
|
||||
},
|
||||
Err(_) => {
|
||||
info!("Timed out while waiting for clients to shutdown");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
_ = term_signal.recv() => (),
|
||||
}
|
||||
|
||||
info!("Shutting down...");
|
||||
}
|
||||
|
||||
/// Format chrono::Duration to be more human-friendly.
|
||||
|
||||
366
src/messages.rs
366
src/messages.rs
@@ -2,18 +2,36 @@
|
||||
/// and handle TcpStream (TCP socket).
|
||||
use bytes::{Buf, BufMut, BytesMut};
|
||||
use md5::{Digest, Md5};
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, BufReader};
|
||||
use tokio::net::{
|
||||
tcp::{OwnedReadHalf, OwnedWriteHalf},
|
||||
TcpStream,
|
||||
};
|
||||
|
||||
use std::collections::HashMap;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
|
||||
use crate::errors::Error;
|
||||
use std::collections::HashMap;
|
||||
use std::mem;
|
||||
|
||||
/// Postgres data type mappings
|
||||
/// used in RowDescription ('T') message.
|
||||
pub enum DataType {
|
||||
Text,
|
||||
Int4,
|
||||
Numeric,
|
||||
}
|
||||
|
||||
impl From<&DataType> for i32 {
|
||||
fn from(data_type: &DataType) -> i32 {
|
||||
match data_type {
|
||||
DataType::Text => 25,
|
||||
DataType::Int4 => 23,
|
||||
DataType::Numeric => 1700,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Tell the client that authentication handshake completed successfully.
|
||||
pub async fn auth_ok(stream: &mut TcpStream) -> Result<(), Error> {
|
||||
pub async fn auth_ok<S>(stream: &mut S) -> Result<(), Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let mut auth_ok = BytesMut::with_capacity(9);
|
||||
|
||||
auth_ok.put_u8(b'R');
|
||||
@@ -23,13 +41,39 @@ pub async fn auth_ok(stream: &mut TcpStream) -> Result<(), Error> {
|
||||
Ok(write_all(stream, auth_ok).await?)
|
||||
}
|
||||
|
||||
/// Generate md5 password challenge.
|
||||
pub async fn md5_challenge<S>(stream: &mut S) -> Result<[u8; 4], Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
// let mut rng = rand::thread_rng();
|
||||
let salt: [u8; 4] = [
|
||||
rand::random(),
|
||||
rand::random(),
|
||||
rand::random(),
|
||||
rand::random(),
|
||||
];
|
||||
|
||||
let mut res = BytesMut::new();
|
||||
res.put_u8(b'R');
|
||||
res.put_i32(12);
|
||||
res.put_i32(5); // MD5
|
||||
res.put_slice(&salt[..]);
|
||||
|
||||
write_all(stream, res).await?;
|
||||
Ok(salt)
|
||||
}
|
||||
|
||||
/// Give the client the process_id and secret we generated
|
||||
/// used in query cancellation.
|
||||
pub async fn backend_key_data(
|
||||
stream: &mut TcpStream,
|
||||
pub async fn backend_key_data<S>(
|
||||
stream: &mut S,
|
||||
backend_id: i32,
|
||||
secret_key: i32,
|
||||
) -> Result<(), Error> {
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let mut key_data = BytesMut::from(&b"K"[..]);
|
||||
key_data.put_i32(12);
|
||||
key_data.put_i32(backend_id);
|
||||
@@ -38,7 +82,7 @@ pub async fn backend_key_data(
|
||||
Ok(write_all(stream, key_data).await?)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
/// Construct a `Q`: Query message.
|
||||
pub fn simple_query(query: &str) -> BytesMut {
|
||||
let mut res = BytesMut::from(&b"Q"[..]);
|
||||
let query = format!("{}\0", query);
|
||||
@@ -50,8 +94,13 @@ pub fn simple_query(query: &str) -> BytesMut {
|
||||
}
|
||||
|
||||
/// Tell the client we're ready for another query.
|
||||
pub async fn ready_for_query(stream: &mut TcpStream) -> Result<(), Error> {
|
||||
let mut bytes = BytesMut::with_capacity(5);
|
||||
pub async fn ready_for_query<S>(stream: &mut S) -> Result<(), Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let mut bytes = BytesMut::with_capacity(
|
||||
mem::size_of::<u8>() + mem::size_of::<i32>() + mem::size_of::<u8>(),
|
||||
);
|
||||
|
||||
bytes.put_u8(b'Z');
|
||||
bytes.put_i32(5);
|
||||
@@ -91,9 +140,8 @@ pub async fn startup(stream: &mut TcpStream, user: &str, database: &str) -> Resu
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse StartupMessage parameters.
|
||||
/// e.g. user, database, application_name, etc.
|
||||
pub fn parse_startup(mut bytes: BytesMut) -> Result<HashMap<String, String>, Error> {
|
||||
/// Parse the params the server sends as a key/value format.
|
||||
pub fn parse_params(mut bytes: BytesMut) -> Result<HashMap<String, String>, Error> {
|
||||
let mut result = HashMap::new();
|
||||
let mut buf = Vec::new();
|
||||
let mut tmp = String::new();
|
||||
@@ -115,7 +163,7 @@ pub fn parse_startup(mut bytes: BytesMut) -> Result<HashMap<String, String>, Err
|
||||
|
||||
// Expect pairs of name and value
|
||||
// and at least one pair to be present.
|
||||
if buf.len() % 2 != 0 && buf.len() >= 2 {
|
||||
if buf.len() % 2 != 0 || buf.len() < 2 {
|
||||
return Err(Error::ClientBadStartup);
|
||||
}
|
||||
|
||||
@@ -127,6 +175,14 @@ pub fn parse_startup(mut bytes: BytesMut) -> Result<HashMap<String, String>, Err
|
||||
i += 2;
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Parse StartupMessage parameters.
|
||||
/// e.g. user, database, application_name, etc.
|
||||
pub fn parse_startup(bytes: BytesMut) -> Result<HashMap<String, String>, Error> {
|
||||
let result = parse_params(bytes)?;
|
||||
|
||||
// Minimum required parameters
|
||||
// I want to have the user at the very minimum, according to the protocol spec.
|
||||
if !result.contains_key("user") {
|
||||
@@ -136,14 +192,8 @@ pub fn parse_startup(mut bytes: BytesMut) -> Result<HashMap<String, String>, Err
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Send password challenge response to the server.
|
||||
/// This is the MD5 challenge.
|
||||
pub async fn md5_password(
|
||||
stream: &mut TcpStream,
|
||||
user: &str,
|
||||
password: &str,
|
||||
salt: &[u8],
|
||||
) -> Result<(), Error> {
|
||||
/// Create md5 password hash given a salt.
|
||||
pub fn md5_hash_password(user: &str, password: &str, salt: &[u8]) -> Vec<u8> {
|
||||
let mut md5 = Md5::new();
|
||||
|
||||
// First pass
|
||||
@@ -162,6 +212,22 @@ pub async fn md5_password(
|
||||
.collect::<Vec<u8>>();
|
||||
password.push(0);
|
||||
|
||||
password
|
||||
}
|
||||
|
||||
/// Send password challenge response to the server.
|
||||
/// This is the MD5 challenge.
|
||||
pub async fn md5_password<S>(
|
||||
stream: &mut S,
|
||||
user: &str,
|
||||
password: &str,
|
||||
salt: &[u8],
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let password = md5_hash_password(user, password, salt);
|
||||
|
||||
let mut message = BytesMut::with_capacity(password.len() as usize + 5);
|
||||
|
||||
message.put_u8(b'p');
|
||||
@@ -174,10 +240,10 @@ pub async fn md5_password(
|
||||
/// Implements a response to our custom `SET SHARDING KEY`
|
||||
/// and `SET SERVER ROLE` commands.
|
||||
/// This tells the client we're ready for the next query.
|
||||
pub async fn custom_protocol_response_ok(
|
||||
stream: &mut OwnedWriteHalf,
|
||||
message: &str,
|
||||
) -> Result<(), Error> {
|
||||
pub async fn custom_protocol_response_ok<S>(stream: &mut S, message: &str) -> Result<(), Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let mut res = BytesMut::with_capacity(25);
|
||||
|
||||
let set_complete = BytesMut::from(&format!("{}\0", message)[..]);
|
||||
@@ -188,18 +254,28 @@ pub async fn custom_protocol_response_ok(
|
||||
res.put_i32(len);
|
||||
res.put_slice(&set_complete[..]);
|
||||
|
||||
// ReadyForQuery (idle)
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, res).await
|
||||
write_all_half(stream, res).await?;
|
||||
ready_for_query(stream).await
|
||||
}
|
||||
|
||||
/// Send a custom error message to the client.
|
||||
/// Tell the client we are ready for the next query and no rollback is necessary.
|
||||
/// Docs on error codes: https://www.postgresql.org/docs/12/errcodes-appendix.html
|
||||
pub async fn error_response(stream: &mut OwnedWriteHalf, message: &str) -> Result<(), Error> {
|
||||
/// Docs on error codes: <https://www.postgresql.org/docs/12/errcodes-appendix.html>.
|
||||
pub async fn error_response<S>(stream: &mut S, message: &str) -> Result<(), Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
error_response_terminal(stream, message).await?;
|
||||
ready_for_query(stream).await
|
||||
}
|
||||
|
||||
/// Send a custom error message to the client.
|
||||
/// Tell the client we are ready for the next query and no rollback is necessary.
|
||||
/// Docs on error codes: <https://www.postgresql.org/docs/12/errcodes-appendix.html>.
|
||||
pub async fn error_response_terminal<S>(stream: &mut S, message: &str) -> Result<(), Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let mut error = BytesMut::new();
|
||||
|
||||
// Error level
|
||||
@@ -221,110 +297,157 @@ pub async fn error_response(stream: &mut OwnedWriteHalf, message: &str) -> Resul
|
||||
// No more fields follow.
|
||||
error.put_u8(0);
|
||||
|
||||
// Ready for query, no rollback needed (I = idle).
|
||||
let mut ready_for_query = BytesMut::new();
|
||||
// Compose the two message reply.
|
||||
let mut res = BytesMut::with_capacity(error.len() + 5);
|
||||
|
||||
ready_for_query.put_u8(b'Z');
|
||||
ready_for_query.put_i32(5);
|
||||
ready_for_query.put_u8(b'I');
|
||||
res.put_u8(b'E');
|
||||
res.put_i32(error.len() as i32 + 4);
|
||||
res.put(error);
|
||||
|
||||
Ok(write_all_half(stream, res).await?)
|
||||
}
|
||||
|
||||
pub async fn wrong_password<S>(stream: &mut S, user: &str) -> Result<(), Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let mut error = BytesMut::new();
|
||||
|
||||
// Error level
|
||||
error.put_u8(b'S');
|
||||
error.put_slice(&b"FATAL\0"[..]);
|
||||
|
||||
// Error level (non-translatable)
|
||||
error.put_u8(b'V');
|
||||
error.put_slice(&b"FATAL\0"[..]);
|
||||
|
||||
// Error code: not sure how much this matters.
|
||||
error.put_u8(b'C');
|
||||
error.put_slice(&b"28P01\0"[..]); // system_error, see Appendix A.
|
||||
|
||||
// The short error message.
|
||||
error.put_u8(b'M');
|
||||
error.put_slice(&format!("password authentication failed for user \"{}\"\0", user).as_bytes());
|
||||
|
||||
// No more fields follow.
|
||||
error.put_u8(0);
|
||||
|
||||
// Compose the two message reply.
|
||||
let mut res = BytesMut::with_capacity(error.len() + ready_for_query.len() + 5);
|
||||
let mut res = BytesMut::new();
|
||||
|
||||
res.put_u8(b'E');
|
||||
res.put_i32(error.len() as i32 + 4);
|
||||
|
||||
res.put(error);
|
||||
res.put(ready_for_query);
|
||||
|
||||
Ok(write_all_half(stream, res).await?)
|
||||
write_all(stream, res).await
|
||||
}
|
||||
|
||||
/// Respond to a SHOW SHARD command.
|
||||
pub async fn show_response(
|
||||
stream: &mut OwnedWriteHalf,
|
||||
name: &str,
|
||||
value: &str,
|
||||
) -> Result<(), Error> {
|
||||
pub async fn show_response<S>(stream: &mut S, name: &str, value: &str) -> Result<(), Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
// A SELECT response consists of:
|
||||
// 1. RowDescription
|
||||
// 2. One or more DataRow
|
||||
// 3. CommandComplete
|
||||
// 4. ReadyForQuery
|
||||
|
||||
// RowDescription
|
||||
let mut row_desc = BytesMut::new();
|
||||
|
||||
// Number of columns: 1
|
||||
row_desc.put_i16(1);
|
||||
|
||||
// Column name
|
||||
row_desc.put_slice(&format!("{}\0", name).as_bytes());
|
||||
|
||||
// Doesn't belong to any table
|
||||
row_desc.put_i32(0);
|
||||
|
||||
// Doesn't belong to any table
|
||||
row_desc.put_i16(0);
|
||||
|
||||
// Text
|
||||
row_desc.put_i32(25);
|
||||
|
||||
// Text size = variable (-1)
|
||||
row_desc.put_i16(-1);
|
||||
|
||||
// Type modifier: none that I know
|
||||
row_desc.put_i32(0);
|
||||
|
||||
// Format being used: text (0), binary (1)
|
||||
row_desc.put_i16(0);
|
||||
|
||||
// DataRow
|
||||
let mut data_row = BytesMut::new();
|
||||
|
||||
// Number of columns
|
||||
data_row.put_i16(1);
|
||||
|
||||
// Size of the column content (length of the string really)
|
||||
data_row.put_i32(value.len() as i32);
|
||||
|
||||
// The content
|
||||
data_row.put_slice(value.as_bytes());
|
||||
|
||||
// CommandComplete
|
||||
let mut command_complete = BytesMut::new();
|
||||
|
||||
// Number of rows returned (just one)
|
||||
command_complete.put_slice(&b"SELECT 1\0"[..]);
|
||||
|
||||
// The final messages sent to the client
|
||||
let mut res = BytesMut::new();
|
||||
|
||||
// RowDescription
|
||||
res.put(row_description(&vec![(name, DataType::Text)]));
|
||||
|
||||
// DataRow
|
||||
res.put(data_row(&vec![value.to_string()]));
|
||||
|
||||
// CommandComplete
|
||||
res.put(command_complete("SELECT 1"));
|
||||
|
||||
write_all_half(stream, res).await?;
|
||||
ready_for_query(stream).await
|
||||
}
|
||||
|
||||
pub fn row_description(columns: &Vec<(&str, DataType)>) -> BytesMut {
|
||||
let mut res = BytesMut::new();
|
||||
let mut row_desc = BytesMut::new();
|
||||
|
||||
// how many colums we are storing
|
||||
row_desc.put_i16(columns.len() as i16);
|
||||
|
||||
for (name, data_type) in columns {
|
||||
// Column name
|
||||
row_desc.put_slice(&format!("{}\0", name).as_bytes());
|
||||
|
||||
// Doesn't belong to any table
|
||||
row_desc.put_i32(0);
|
||||
|
||||
// Doesn't belong to any table
|
||||
row_desc.put_i16(0);
|
||||
|
||||
// Text
|
||||
row_desc.put_i32(data_type.into());
|
||||
|
||||
// Text size = variable (-1)
|
||||
let type_size = match data_type {
|
||||
DataType::Text => -1,
|
||||
DataType::Int4 => 4,
|
||||
DataType::Numeric => -1,
|
||||
};
|
||||
|
||||
row_desc.put_i16(type_size);
|
||||
|
||||
// Type modifier: none that I know
|
||||
row_desc.put_i32(-1);
|
||||
|
||||
// Format being used: text (0), binary (1)
|
||||
row_desc.put_i16(0);
|
||||
}
|
||||
|
||||
res.put_u8(b'T');
|
||||
res.put_i32(row_desc.len() as i32 + 4);
|
||||
res.put(row_desc);
|
||||
|
||||
// DataRow
|
||||
res
|
||||
}
|
||||
|
||||
/// Create a DataRow message.
|
||||
pub fn data_row(row: &Vec<String>) -> BytesMut {
|
||||
let mut res = BytesMut::new();
|
||||
let mut data_row = BytesMut::new();
|
||||
|
||||
data_row.put_i16(row.len() as i16);
|
||||
|
||||
for column in row {
|
||||
let column = column.as_bytes();
|
||||
data_row.put_i32(column.len() as i32);
|
||||
data_row.put_slice(&column);
|
||||
}
|
||||
|
||||
res.put_u8(b'D');
|
||||
res.put_i32(data_row.len() as i32 + 4);
|
||||
res.put(data_row);
|
||||
|
||||
// CommandComplete
|
||||
res
|
||||
}
|
||||
|
||||
/// Create a CommandComplete message.
|
||||
pub fn command_complete(command: &str) -> BytesMut {
|
||||
let cmd = BytesMut::from(format!("{}\0", command).as_bytes());
|
||||
let mut res = BytesMut::new();
|
||||
res.put_u8(b'C');
|
||||
res.put_i32(command_complete.len() as i32 + 4);
|
||||
res.put(command_complete);
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, res).await
|
||||
res.put_i32(cmd.len() as i32 + 4);
|
||||
res.put(cmd);
|
||||
res
|
||||
}
|
||||
|
||||
/// Write all data in the buffer to the TcpStream.
|
||||
pub async fn write_all(stream: &mut TcpStream, buf: BytesMut) -> Result<(), Error> {
|
||||
pub async fn write_all<S>(stream: &mut S, buf: BytesMut) -> Result<(), Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
match stream.write_all(&buf).await {
|
||||
Ok(_) => Ok(()),
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
@@ -332,7 +455,10 @@ pub async fn write_all(stream: &mut TcpStream, buf: BytesMut) -> Result<(), Erro
|
||||
}
|
||||
|
||||
/// Write all the data in the buffer to the TcpStream, write owned half (see mpsc).
|
||||
pub async fn write_all_half(stream: &mut OwnedWriteHalf, buf: BytesMut) -> Result<(), Error> {
|
||||
pub async fn write_all_half<S>(stream: &mut S, buf: BytesMut) -> Result<(), Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
match stream.write_all(&buf).await {
|
||||
Ok(_) => Ok(()),
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
@@ -340,7 +466,10 @@ pub async fn write_all_half(stream: &mut OwnedWriteHalf, buf: BytesMut) -> Resul
|
||||
}
|
||||
|
||||
/// Read a complete message from the socket.
|
||||
pub async fn read_message(stream: &mut BufReader<OwnedReadHalf>) -> Result<BytesMut, Error> {
|
||||
pub async fn read_message<S>(stream: &mut S) -> Result<BytesMut, Error>
|
||||
where
|
||||
S: tokio::io::AsyncRead + std::marker::Unpin,
|
||||
{
|
||||
let code = match stream.read_u8().await {
|
||||
Ok(code) => code,
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
@@ -366,3 +495,20 @@ pub async fn read_message(stream: &mut BufReader<OwnedReadHalf>) -> Result<Bytes
|
||||
|
||||
Ok(bytes)
|
||||
}
|
||||
|
||||
pub fn server_paramater_message(key: &str, value: &str) -> BytesMut {
|
||||
let mut server_info = BytesMut::new();
|
||||
|
||||
let null_byte_size = 1;
|
||||
let len: usize =
|
||||
mem::size_of::<i32>() + key.len() + null_byte_size + value.len() + null_byte_size;
|
||||
|
||||
server_info.put_slice("S".as_bytes());
|
||||
server_info.put_i32(len.try_into().unwrap());
|
||||
server_info.put_slice(key.as_bytes());
|
||||
server_info.put_bytes(0, 1);
|
||||
server_info.put_slice(value.as_bytes());
|
||||
server_info.put_bytes(0, 1);
|
||||
|
||||
return server_info;
|
||||
}
|
||||
|
||||
434
src/pool.rs
434
src/pool.rs
@@ -1,109 +1,197 @@
|
||||
/// Pooling and failover and banlist.
|
||||
use arc_swap::ArcSwap;
|
||||
use async_trait::async_trait;
|
||||
use bb8::{ManageConnection, Pool, PooledConnection};
|
||||
use bytes::BytesMut;
|
||||
use chrono::naive::NaiveDateTime;
|
||||
use log::{error, info, warn};
|
||||
|
||||
use crate::config::{get_config, Address, Role, User};
|
||||
use crate::errors::Error;
|
||||
use crate::server::Server;
|
||||
use crate::stats::Reporter;
|
||||
|
||||
use log::{debug, error, info, warn};
|
||||
use once_cell::sync::Lazy;
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
// Banlist: bad servers go in here.
|
||||
pub type BanList = Arc<Mutex<Vec<HashMap<Address, NaiveDateTime>>>>;
|
||||
use crate::config::{get_config, Address, Role, Shard, User};
|
||||
use crate::errors::Error;
|
||||
|
||||
use crate::server::Server;
|
||||
use crate::stats::{get_reporter, Reporter};
|
||||
|
||||
pub type BanList = Arc<RwLock<Vec<HashMap<Address, NaiveDateTime>>>>;
|
||||
pub type ClientServerMap = Arc<Mutex<HashMap<(i32, i32), (i32, i32, String, String)>>>;
|
||||
pub type PoolMap = HashMap<(String, String), ConnectionPool>;
|
||||
/// The connection pool, globally available.
|
||||
/// This is atomic and safe and read-optimized.
|
||||
/// The pool is recreated dynamically when the config is reloaded.
|
||||
pub static POOLS: Lazy<ArcSwap<PoolMap>> = Lazy::new(|| ArcSwap::from_pointee(HashMap::default()));
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PoolSettings {
|
||||
pub pool_mode: String,
|
||||
pub shards: HashMap<String, Shard>,
|
||||
pub user: User,
|
||||
pub default_role: String,
|
||||
pub query_parser_enabled: bool,
|
||||
pub primary_reads_enabled: bool,
|
||||
pub sharding_function: String,
|
||||
}
|
||||
impl Default for PoolSettings {
|
||||
fn default() -> PoolSettings {
|
||||
PoolSettings {
|
||||
pool_mode: String::from("transaction"),
|
||||
shards: HashMap::from([(String::from("1"), Shard::default())]),
|
||||
user: User::default(),
|
||||
default_role: String::from("any"),
|
||||
query_parser_enabled: false,
|
||||
primary_reads_enabled: true,
|
||||
sharding_function: "pg_bigint_hash".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The globally accessible connection pool.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ConnectionPool {
|
||||
/// The pools handled internally by bb8.
|
||||
databases: Vec<Vec<Pool<ServerPool>>>,
|
||||
|
||||
/// The addresses (host, port, role) to handle
|
||||
/// failover and load balancing deterministically.
|
||||
addresses: Vec<Vec<Address>>,
|
||||
round_robin: usize,
|
||||
|
||||
/// List of banned addresses (see above)
|
||||
/// that should not be queried.
|
||||
banlist: BanList,
|
||||
|
||||
/// The statistics aggregator runs in a separate task
|
||||
/// and receives stats from clients, servers, and the pool.
|
||||
stats: Reporter,
|
||||
|
||||
/// The server information (K messages) have to be passed to the
|
||||
/// clients on startup. We pre-connect to all shards and replicas
|
||||
/// on pool creation and save the K messages here.
|
||||
server_info: BytesMut,
|
||||
|
||||
pub settings: PoolSettings,
|
||||
}
|
||||
|
||||
impl ConnectionPool {
|
||||
/// Construct the connection pool from a config file.
|
||||
pub async fn from_config(
|
||||
client_server_map: ClientServerMap,
|
||||
stats: Reporter,
|
||||
) -> ConnectionPool {
|
||||
/// Construct the connection pool from the configuration.
|
||||
pub async fn from_config(client_server_map: ClientServerMap) -> Result<(), Error> {
|
||||
let config = get_config();
|
||||
let mut shards = Vec::new();
|
||||
let mut addresses = Vec::new();
|
||||
let mut banlist = Vec::new();
|
||||
let mut shard_ids = config
|
||||
.shards
|
||||
.clone()
|
||||
.into_keys()
|
||||
.map(|x| x.to_string())
|
||||
.collect::<Vec<String>>();
|
||||
shard_ids.sort_by_key(|k| k.parse::<i64>().unwrap());
|
||||
let mut new_pools = PoolMap::default();
|
||||
|
||||
for shard_idx in shard_ids {
|
||||
let shard = &config.shards[&shard_idx];
|
||||
let mut pools = Vec::new();
|
||||
let mut replica_addresses = Vec::new();
|
||||
let mut address_id = 0;
|
||||
for (pool_name, pool_config) in &config.pools {
|
||||
for (_user_index, user_info) in &pool_config.users {
|
||||
let mut shards = Vec::new();
|
||||
let mut addresses = Vec::new();
|
||||
let mut banlist = Vec::new();
|
||||
let mut shard_ids = pool_config
|
||||
.shards
|
||||
.clone()
|
||||
.into_keys()
|
||||
.map(|x| x.to_string())
|
||||
.collect::<Vec<String>>();
|
||||
|
||||
for server in shard.servers.iter() {
|
||||
let role = match server.2.as_ref() {
|
||||
"primary" => Role::Primary,
|
||||
"replica" => Role::Replica,
|
||||
_ => {
|
||||
error!("Config error: server role can be 'primary' or 'replica', have: '{}'. Defaulting to 'replica'.", server.2);
|
||||
Role::Replica
|
||||
// Sort by shard number to ensure consistency.
|
||||
shard_ids.sort_by_key(|k| k.parse::<i64>().unwrap());
|
||||
|
||||
for shard_idx in shard_ids {
|
||||
let shard = &pool_config.shards[&shard_idx];
|
||||
let mut pools = Vec::new();
|
||||
let mut servers = Vec::new();
|
||||
let mut replica_number = 0;
|
||||
|
||||
for server in shard.servers.iter() {
|
||||
let role = match server.2.as_ref() {
|
||||
"primary" => Role::Primary,
|
||||
"replica" => Role::Replica,
|
||||
_ => {
|
||||
error!("Config error: server role can be 'primary' or 'replica', have: '{}'. Defaulting to 'replica'.", server.2);
|
||||
Role::Replica
|
||||
}
|
||||
};
|
||||
|
||||
let address = Address {
|
||||
id: address_id,
|
||||
database: shard.database.clone(),
|
||||
host: server.0.clone(),
|
||||
port: server.1.to_string(),
|
||||
role: role,
|
||||
replica_number,
|
||||
shard: shard_idx.parse::<usize>().unwrap(),
|
||||
username: user_info.username.clone(),
|
||||
poolname: pool_name.clone(),
|
||||
};
|
||||
|
||||
address_id += 1;
|
||||
|
||||
if role == Role::Replica {
|
||||
replica_number += 1;
|
||||
}
|
||||
|
||||
let manager = ServerPool::new(
|
||||
address.clone(),
|
||||
user_info.clone(),
|
||||
&shard.database,
|
||||
client_server_map.clone(),
|
||||
get_reporter(),
|
||||
);
|
||||
|
||||
let pool = Pool::builder()
|
||||
.max_size(user_info.pool_size)
|
||||
.connection_timeout(std::time::Duration::from_millis(
|
||||
config.general.connect_timeout,
|
||||
))
|
||||
.test_on_check_out(false)
|
||||
.build(manager)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
pools.push(pool);
|
||||
servers.push(address);
|
||||
}
|
||||
|
||||
shards.push(pools);
|
||||
addresses.push(servers);
|
||||
banlist.push(HashMap::new());
|
||||
}
|
||||
|
||||
assert_eq!(shards.len(), addresses.len());
|
||||
|
||||
let mut pool = ConnectionPool {
|
||||
databases: shards,
|
||||
addresses: addresses,
|
||||
banlist: Arc::new(RwLock::new(banlist)),
|
||||
stats: get_reporter(),
|
||||
server_info: BytesMut::new(),
|
||||
settings: PoolSettings {
|
||||
pool_mode: pool_config.pool_mode.clone(),
|
||||
shards: pool_config.shards.clone(),
|
||||
user: user_info.clone(),
|
||||
default_role: pool_config.default_role.clone(),
|
||||
query_parser_enabled: pool_config.query_parser_enabled.clone(),
|
||||
primary_reads_enabled: pool_config.primary_reads_enabled,
|
||||
sharding_function: pool_config.sharding_function.clone(),
|
||||
},
|
||||
};
|
||||
|
||||
// Connect to the servers to make sure pool configuration is valid
|
||||
// before setting it globally.
|
||||
match pool.validate().await {
|
||||
Ok(_) => (),
|
||||
Err(err) => {
|
||||
error!("Could not validate connection pool: {:?}", err);
|
||||
return Err(err);
|
||||
}
|
||||
};
|
||||
|
||||
let address = Address {
|
||||
host: server.0.clone(),
|
||||
port: server.1.to_string(),
|
||||
role: role,
|
||||
shard: shard_idx.parse::<usize>().unwrap(),
|
||||
};
|
||||
|
||||
let manager = ServerPool::new(
|
||||
address.clone(),
|
||||
config.user.clone(),
|
||||
&shard.database,
|
||||
client_server_map.clone(),
|
||||
stats.clone(),
|
||||
);
|
||||
|
||||
let pool = Pool::builder()
|
||||
.max_size(config.general.pool_size)
|
||||
.connection_timeout(std::time::Duration::from_millis(
|
||||
config.general.connect_timeout,
|
||||
))
|
||||
.test_on_check_out(false)
|
||||
.build(manager)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
pools.push(pool);
|
||||
replica_addresses.push(address);
|
||||
new_pools.insert((pool_name.clone(), user_info.username.clone()), pool);
|
||||
}
|
||||
|
||||
shards.push(pools);
|
||||
addresses.push(replica_addresses);
|
||||
banlist.push(HashMap::new());
|
||||
}
|
||||
|
||||
assert_eq!(shards.len(), addresses.len());
|
||||
let address_len = addresses.len();
|
||||
POOLS.store(Arc::new(new_pools.clone()));
|
||||
|
||||
ConnectionPool {
|
||||
databases: shards,
|
||||
addresses: addresses,
|
||||
round_robin: rand::random::<usize>() % address_len, // Start at a random replica
|
||||
banlist: Arc::new(Mutex::new(banlist)),
|
||||
stats: stats,
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Connect to all shards and grab server information.
|
||||
@@ -111,12 +199,18 @@ impl ConnectionPool {
|
||||
/// when they connect.
|
||||
/// This also warms up the pool for clients that connect when
|
||||
/// the pooler starts up.
|
||||
pub async fn validate(&mut self) -> Result<BytesMut, Error> {
|
||||
async fn validate(&mut self) -> Result<(), Error> {
|
||||
let mut server_infos = Vec::new();
|
||||
let stats = self.stats.clone();
|
||||
|
||||
for shard in 0..self.shards() {
|
||||
let mut round_robin = 0;
|
||||
|
||||
for _ in 0..self.servers(shard) {
|
||||
let connection = match self.get(shard, None).await {
|
||||
// To keep stats consistent.
|
||||
let fake_process_id = 0;
|
||||
|
||||
let connection = match self.get(shard, None, fake_process_id, round_robin).await {
|
||||
Ok(conn) => conn,
|
||||
Err(err) => {
|
||||
error!("Shard {} down or misconfigured: {:?}", shard, err);
|
||||
@@ -124,11 +218,25 @@ impl ConnectionPool {
|
||||
}
|
||||
};
|
||||
|
||||
let mut proxy = connection.0;
|
||||
let _address = connection.1;
|
||||
let server = &mut *proxy;
|
||||
let proxy = connection.0;
|
||||
let address = connection.1;
|
||||
let server = &*proxy;
|
||||
let server_info = server.server_info();
|
||||
|
||||
server_infos.push(server.server_info());
|
||||
stats.client_disconnecting(fake_process_id, address.id);
|
||||
|
||||
if server_infos.len() > 0 {
|
||||
// Compare against the last server checked.
|
||||
if server_info != server_infos[server_infos.len() - 1] {
|
||||
warn!(
|
||||
"{:?} has different server configuration than the last server",
|
||||
address
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
server_infos.push(server_info);
|
||||
round_robin += 1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -138,14 +246,18 @@ impl ConnectionPool {
|
||||
return Err(Error::AllServersDown);
|
||||
}
|
||||
|
||||
Ok(server_infos[0].clone())
|
||||
self.server_info = server_infos[0].clone();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get a connection from the pool.
|
||||
pub async fn get(
|
||||
&mut self,
|
||||
shard: usize,
|
||||
role: Option<Role>,
|
||||
&self,
|
||||
shard: usize, // shard number
|
||||
role: Option<Role>, // primary or replica
|
||||
process_id: i32, // client id
|
||||
mut round_robin: usize, // round robin offset
|
||||
) -> Result<(PooledConnection<'_, ServerPool>, Address), Error> {
|
||||
let now = Instant::now();
|
||||
let addresses = &self.addresses[shard];
|
||||
@@ -161,6 +273,8 @@ impl ConnectionPool {
|
||||
_ => addresses.len(),
|
||||
};
|
||||
|
||||
debug!("Allowed attempts for {:?}: {}", role, allowed_attempts);
|
||||
|
||||
let exists = match role {
|
||||
Some(role) => addresses.iter().filter(|addr| addr.role == role).count() > 0,
|
||||
None => true,
|
||||
@@ -171,12 +285,14 @@ impl ConnectionPool {
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
|
||||
let healthcheck_timeout = get_config().general.healthcheck_timeout;
|
||||
let healthcheck_delay = get_config().general.healthcheck_delay as u128;
|
||||
|
||||
while allowed_attempts > 0 {
|
||||
// Round-robin each client's queries.
|
||||
// If a client only sends one query and then disconnects, it doesn't matter
|
||||
// which replica it'll go to.
|
||||
self.round_robin += 1;
|
||||
let index = self.round_robin % addresses.len();
|
||||
// Round-robin replicas.
|
||||
round_robin += 1;
|
||||
|
||||
let index = round_robin % addresses.len();
|
||||
let address = &addresses[index];
|
||||
|
||||
// Make sure you're getting a primary or a replica
|
||||
@@ -188,55 +304,79 @@ impl ConnectionPool {
|
||||
|
||||
allowed_attempts -= 1;
|
||||
|
||||
// Don't attempt to connect to banned servers.
|
||||
if self.is_banned(address, shard, role) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Indicate we're waiting on a server connection from a pool.
|
||||
self.stats.client_waiting(process_id, address.id);
|
||||
|
||||
// Check if we can connect
|
||||
let mut conn = match self.databases[shard][index].get().await {
|
||||
Ok(conn) => conn,
|
||||
Err(err) => {
|
||||
error!("Banning replica {}, error: {:?}", index, err);
|
||||
self.ban(address, shard);
|
||||
self.ban(address, shard, process_id);
|
||||
self.stats.client_disconnecting(process_id, address.id);
|
||||
self.stats
|
||||
.checkout_time(now.elapsed().as_micros(), process_id, address.id);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// // Check if this server is alive with a health check
|
||||
// // Check if this server is alive with a health check.
|
||||
let server = &mut *conn;
|
||||
let healthcheck_timeout = get_config().general.healthcheck_timeout;
|
||||
|
||||
self.stats.server_tested(server.process_id());
|
||||
// Will return error if timestamp is greater than current system time, which it should never be set to
|
||||
let require_healthcheck =
|
||||
server.last_activity().elapsed().unwrap().as_millis() > healthcheck_delay;
|
||||
|
||||
if !require_healthcheck {
|
||||
self.stats
|
||||
.checkout_time(now.elapsed().as_micros(), process_id, address.id);
|
||||
self.stats.server_active(conn.process_id(), address.id);
|
||||
return Ok((conn, address.clone()));
|
||||
}
|
||||
|
||||
debug!("Running health check on server {:?}", address);
|
||||
|
||||
self.stats.server_tested(server.process_id(), address.id);
|
||||
|
||||
match tokio::time::timeout(
|
||||
tokio::time::Duration::from_millis(healthcheck_timeout),
|
||||
server.query("SELECT 1"),
|
||||
server.query(";"),
|
||||
)
|
||||
.await
|
||||
{
|
||||
// Check if health check succeeded
|
||||
// Check if health check succeeded.
|
||||
Ok(res) => match res {
|
||||
Ok(_) => {
|
||||
self.stats.checkout_time(now.elapsed().as_micros());
|
||||
self.stats.server_idle(conn.process_id());
|
||||
self.stats
|
||||
.checkout_time(now.elapsed().as_micros(), process_id, address.id);
|
||||
self.stats.server_active(conn.process_id(), address.id);
|
||||
return Ok((conn, address.clone()));
|
||||
}
|
||||
|
||||
// Health check failed.
|
||||
Err(_) => {
|
||||
error!("Banning replica {} because of failed health check", index);
|
||||
|
||||
// Don't leave a bad connection in the pool.
|
||||
server.mark_bad();
|
||||
|
||||
self.ban(address, shard);
|
||||
self.ban(address, shard, process_id);
|
||||
continue;
|
||||
}
|
||||
},
|
||||
// Health check never came back, database is really really down
|
||||
|
||||
// Health check timed out.
|
||||
Err(_) => {
|
||||
error!("Banning replica {} because of health check timeout", index);
|
||||
// Don't leave a bad connection in the pool.
|
||||
server.mark_bad();
|
||||
|
||||
self.ban(address, shard);
|
||||
self.ban(address, shard, process_id);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@@ -248,17 +388,21 @@ impl ConnectionPool {
|
||||
/// Ban an address (i.e. replica). It no longer will serve
|
||||
/// traffic for any new transactions. Existing transactions on that replica
|
||||
/// will finish successfully or error out to the clients.
|
||||
pub fn ban(&self, address: &Address, shard: usize) {
|
||||
pub fn ban(&self, address: &Address, shard: usize, process_id: i32) {
|
||||
self.stats.client_disconnecting(process_id, address.id);
|
||||
self.stats
|
||||
.checkout_time(Instant::now().elapsed().as_micros(), process_id, address.id);
|
||||
|
||||
error!("Banning {:?}", address);
|
||||
let now = chrono::offset::Utc::now().naive_utc();
|
||||
let mut guard = self.banlist.lock().unwrap();
|
||||
let mut guard = self.banlist.write();
|
||||
guard[shard].insert(address.clone(), now);
|
||||
}
|
||||
|
||||
/// Clear the replica to receive traffic again. Takes effect immediately
|
||||
/// for all new transactions.
|
||||
pub fn _unban(&self, address: &Address, shard: usize) {
|
||||
let mut guard = self.banlist.lock().unwrap();
|
||||
let mut guard = self.banlist.write();
|
||||
guard[shard].remove(address);
|
||||
}
|
||||
|
||||
@@ -274,12 +418,14 @@ impl ConnectionPool {
|
||||
Some(Role::Primary) => return false, // Primary cannot be banned.
|
||||
};
|
||||
|
||||
// If you're not asking for the primary,
|
||||
// all databases are treated as replicas.
|
||||
let mut guard = self.banlist.lock().unwrap();
|
||||
debug!("Available targets for {:?}: {}", role, replicas_available);
|
||||
|
||||
let guard = self.banlist.read();
|
||||
|
||||
// Everything is banned = nothing is banned.
|
||||
if guard[shard].len() == replicas_available {
|
||||
drop(guard);
|
||||
let mut guard = self.banlist.write();
|
||||
guard[shard].clear();
|
||||
drop(guard);
|
||||
warn!("Unbanning all replicas.");
|
||||
@@ -291,28 +437,63 @@ impl ConnectionPool {
|
||||
Some(timestamp) => {
|
||||
let now = chrono::offset::Utc::now().naive_utc();
|
||||
let config = get_config();
|
||||
|
||||
// Ban expired.
|
||||
if now.timestamp() - timestamp.timestamp() > config.general.ban_time {
|
||||
drop(guard);
|
||||
warn!("Unbanning {:?}", address);
|
||||
let mut guard = self.banlist.write();
|
||||
guard[shard].remove(address);
|
||||
false
|
||||
} else {
|
||||
debug!("{:?} is banned", address);
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
None => false,
|
||||
None => {
|
||||
debug!("{:?} is ok", address);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the number of configured shards.
|
||||
pub fn shards(&self) -> usize {
|
||||
self.databases.len()
|
||||
}
|
||||
|
||||
/// Get the number of servers (primary and replicas)
|
||||
/// configured for a shard.
|
||||
pub fn servers(&self, shard: usize) -> usize {
|
||||
self.addresses[shard].len()
|
||||
}
|
||||
|
||||
/// Get the total number of servers (databases) we are connected to.
|
||||
pub fn databases(&self) -> usize {
|
||||
let mut databases = 0;
|
||||
for shard in 0..self.shards() {
|
||||
databases += self.servers(shard);
|
||||
}
|
||||
databases
|
||||
}
|
||||
|
||||
/// Get pool state for a particular shard server as reported by bb8.
|
||||
pub fn pool_state(&self, shard: usize, server: usize) -> bb8::State {
|
||||
self.databases[shard][server].state()
|
||||
}
|
||||
|
||||
/// Get the address information for a shard server.
|
||||
pub fn address(&self, shard: usize, server: usize) -> &Address {
|
||||
&self.addresses[shard][server]
|
||||
}
|
||||
|
||||
pub fn server_info(&self) -> BytesMut {
|
||||
self.server_info.clone()
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper for the bb8 connection pool.
|
||||
pub struct ServerPool {
|
||||
address: Address,
|
||||
user: User,
|
||||
@@ -348,14 +529,16 @@ impl ManageConnection for ServerPool {
|
||||
async fn connect(&self) -> Result<Self::Connection, Self::Error> {
|
||||
info!(
|
||||
"Creating a new connection to {:?} using user {:?}",
|
||||
self.address, self.user.name
|
||||
self.address.name(),
|
||||
self.user.username
|
||||
);
|
||||
|
||||
// Put a temporary process_id into the stats
|
||||
// for server login.
|
||||
let process_id = rand::random::<i32>();
|
||||
self.stats.server_login(process_id);
|
||||
self.stats.server_login(process_id, self.address.id);
|
||||
|
||||
// Connect to the PostgreSQL server.
|
||||
match Server::startup(
|
||||
&self.address,
|
||||
&self.user,
|
||||
@@ -367,12 +550,12 @@ impl ManageConnection for ServerPool {
|
||||
{
|
||||
Ok(conn) => {
|
||||
// Remove the temporary process_id from the stats.
|
||||
self.stats.server_disconnecting(process_id);
|
||||
self.stats.server_disconnecting(process_id, self.address.id);
|
||||
Ok(conn)
|
||||
}
|
||||
Err(err) => {
|
||||
// Remove the temporary process_id from the stats.
|
||||
self.stats.server_disconnecting(process_id);
|
||||
self.stats.server_disconnecting(process_id, self.address.id);
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
@@ -388,3 +571,22 @@ impl ManageConnection for ServerPool {
|
||||
conn.is_bad()
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the connection pool
|
||||
pub fn get_pool(db: String, user: String) -> Option<ConnectionPool> {
|
||||
match get_all_pools().get(&(db, user)) {
|
||||
Some(pool) => Some(pool.clone()),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_number_of_addresses() -> usize {
|
||||
get_all_pools()
|
||||
.iter()
|
||||
.map(|(_, pool)| pool.databases())
|
||||
.sum()
|
||||
}
|
||||
|
||||
pub fn get_all_pools() -> HashMap<(String, String), ConnectionPool> {
|
||||
return (*(*POOLS.load())).clone();
|
||||
}
|
||||
|
||||
210
src/prometheus.rs
Normal file
210
src/prometheus.rs
Normal file
@@ -0,0 +1,210 @@
|
||||
use hyper::service::{make_service_fn, service_fn};
|
||||
use hyper::{Body, Method, Request, Response, Server, StatusCode};
|
||||
use log::{error, info, warn};
|
||||
use phf::phf_map;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use crate::config::Address;
|
||||
use crate::pool::get_all_pools;
|
||||
use crate::stats::get_stats;
|
||||
|
||||
struct MetricHelpType {
|
||||
help: &'static str,
|
||||
ty: &'static str,
|
||||
}
|
||||
|
||||
// reference for metric types: https://prometheus.io/docs/concepts/metric_types/
|
||||
// counters only increase
|
||||
// gauges can arbitrarily increase or decrease
|
||||
static METRIC_HELP_AND_TYPES_LOOKUP: phf::Map<&'static str, MetricHelpType> = phf_map! {
|
||||
"total_query_count" => MetricHelpType {
|
||||
help: "Number of queries sent by all clients",
|
||||
ty: "counter",
|
||||
},
|
||||
"total_query_time" => MetricHelpType {
|
||||
help: "Total amount of time for queries to execute",
|
||||
ty: "counter",
|
||||
},
|
||||
"total_received" => MetricHelpType {
|
||||
help: "Number of bytes received from the server",
|
||||
ty: "counter",
|
||||
},
|
||||
"total_sent" => MetricHelpType {
|
||||
help: "Number of bytes sent to the server",
|
||||
ty: "counter",
|
||||
},
|
||||
"total_xact_count" => MetricHelpType {
|
||||
help: "Total number of transactions started by the client",
|
||||
ty: "counter",
|
||||
},
|
||||
"total_xact_time" => MetricHelpType {
|
||||
help: "Total amount of time for all transactions to execute",
|
||||
ty: "counter",
|
||||
},
|
||||
"total_wait_time" => MetricHelpType {
|
||||
help: "Total time client waited for a server connection",
|
||||
ty: "counter",
|
||||
},
|
||||
"avg_query_count" => MetricHelpType {
|
||||
help: "Average of total_query_count every 15 seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"avg_query_time" => MetricHelpType {
|
||||
help: "Average time taken for queries to execute every 15 seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"avg_recv" => MetricHelpType {
|
||||
help: "Average of total_received bytes every 15 seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"avg_sent" => MetricHelpType {
|
||||
help: "Average of total_sent bytes every 15 seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"avg_xact_count" => MetricHelpType {
|
||||
help: "Average of total_xact_count every 15 seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"avg_xact_time" => MetricHelpType {
|
||||
help: "Average of total_xact_time every 15 seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"avg_wait_time" => MetricHelpType {
|
||||
help: "Average of total_wait_time every 15 seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"maxwait_us" => MetricHelpType {
|
||||
help: "The time a client waited for a server connection in microseconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"maxwait" => MetricHelpType {
|
||||
help: "The time a client waited for a server connection in seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"cl_waiting" => MetricHelpType {
|
||||
help: "How many clients are waiting for a connection from the pool",
|
||||
ty: "gauge",
|
||||
},
|
||||
"cl_active" => MetricHelpType {
|
||||
help: "How many clients are actively communicating with a server",
|
||||
ty: "gauge",
|
||||
},
|
||||
"cl_idle" => MetricHelpType {
|
||||
help: "How many clients are idle",
|
||||
ty: "gauge",
|
||||
},
|
||||
"sv_idle" => MetricHelpType {
|
||||
help: "How many server connections are idle",
|
||||
ty: "gauge",
|
||||
},
|
||||
"sv_active" => MetricHelpType {
|
||||
help: "How many server connections are actively communicating with a client",
|
||||
ty: "gauge",
|
||||
},
|
||||
"sv_login" => MetricHelpType {
|
||||
help: "How many server connections are currently being created",
|
||||
ty: "gauge",
|
||||
},
|
||||
"sv_tested" => MetricHelpType {
|
||||
help: "How many server connections are currently waiting on a health check to succeed",
|
||||
ty: "gauge",
|
||||
},
|
||||
};
|
||||
|
||||
struct PrometheusMetric {
|
||||
name: String,
|
||||
help: String,
|
||||
ty: String,
|
||||
labels: HashMap<&'static str, String>,
|
||||
value: i64,
|
||||
}
|
||||
|
||||
impl fmt::Display for PrometheusMetric {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let formatted_labels = self
|
||||
.labels
|
||||
.iter()
|
||||
.map(|(key, value)| format!("{}=\"{}\"", key, value))
|
||||
.collect::<Vec<_>>()
|
||||
.join(",");
|
||||
write!(
|
||||
f,
|
||||
"# HELP {name} {help}\n# TYPE {name} {ty}\n{name}{{{formatted_labels}}} {value}\n",
|
||||
name = format_args!("pgcat_{}", self.name),
|
||||
help = self.help,
|
||||
ty = self.ty,
|
||||
formatted_labels = formatted_labels,
|
||||
value = self.value
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl PrometheusMetric {
|
||||
fn new(address: &Address, name: &str, value: i64) -> Option<PrometheusMetric> {
|
||||
let mut labels = HashMap::new();
|
||||
labels.insert("host", address.host.clone());
|
||||
labels.insert("shard", address.shard.to_string());
|
||||
labels.insert("role", address.role.to_string());
|
||||
labels.insert("database", address.database.to_string());
|
||||
|
||||
METRIC_HELP_AND_TYPES_LOOKUP
|
||||
.get(name)
|
||||
.map(|metric| PrometheusMetric {
|
||||
name: name.to_owned(),
|
||||
help: metric.help.to_owned(),
|
||||
ty: metric.ty.to_owned(),
|
||||
labels,
|
||||
value,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async fn prometheus_stats(request: Request<Body>) -> Result<Response<Body>, hyper::http::Error> {
|
||||
match (request.method(), request.uri().path()) {
|
||||
(&Method::GET, "/metrics") => {
|
||||
let stats = get_stats();
|
||||
|
||||
let mut lines = Vec::new();
|
||||
for (_, pool) in get_all_pools() {
|
||||
for shard in 0..pool.shards() {
|
||||
for server in 0..pool.servers(shard) {
|
||||
let address = pool.address(shard, server);
|
||||
if let Some(address_stats) = stats.get(&address.id) {
|
||||
for (key, value) in address_stats.iter() {
|
||||
if let Some(prometheus_metric) =
|
||||
PrometheusMetric::new(address, key, *value)
|
||||
{
|
||||
lines.push(prometheus_metric.to_string());
|
||||
} else {
|
||||
warn!("Metric {} not implemented for {}", key, address.name());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Response::builder()
|
||||
.header("content-type", "text/plain; version=0.0.4")
|
||||
.body(lines.join("\n").into())
|
||||
}
|
||||
_ => Response::builder()
|
||||
.status(StatusCode::NOT_FOUND)
|
||||
.body("".into()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start_metric_server(http_addr: SocketAddr) {
|
||||
let http_service_factory =
|
||||
make_service_fn(|_conn| async { Ok::<_, hyper::Error>(service_fn(prometheus_stats)) });
|
||||
let server = Server::bind(&http_addr.into()).serve(http_service_factory);
|
||||
info!(
|
||||
"Exposing prometheus metrics on http://{}/metrics.",
|
||||
http_addr
|
||||
);
|
||||
if let Err(e) = server.await {
|
||||
error!("Failed to run HTTP server: {}.", e);
|
||||
}
|
||||
}
|
||||
@@ -1,23 +1,29 @@
|
||||
use crate::config::{get_config, Role};
|
||||
use crate::sharding::Sharder;
|
||||
/// Route queries automatically based on explicitely requested
|
||||
/// or implied query characteristics.
|
||||
use bytes::{Buf, BytesMut};
|
||||
use log::{debug, error};
|
||||
use once_cell::sync::OnceCell;
|
||||
use regex::RegexSet;
|
||||
use regex::{Regex, RegexSet};
|
||||
use sqlparser::ast::Statement::{Query, StartTransaction};
|
||||
use sqlparser::dialect::PostgreSqlDialect;
|
||||
use sqlparser::parser::Parser;
|
||||
|
||||
const CUSTOM_SQL_REGEXES: [&str; 5] = [
|
||||
r"(?i)SET SHARDING KEY TO '[0-9]+'",
|
||||
r"(?i)SET SHARD TO '[0-9]+'",
|
||||
r"(?i)SHOW SHARD",
|
||||
r"(?i)SET SERVER ROLE TO '(PRIMARY|REPLICA|ANY|AUTO|DEFAULT)'",
|
||||
r"(?i)SHOW SERVER ROLE",
|
||||
use crate::config::Role;
|
||||
use crate::pool::PoolSettings;
|
||||
use crate::sharding::{Sharder, ShardingFunction};
|
||||
|
||||
/// Regexes used to parse custom commands.
|
||||
const CUSTOM_SQL_REGEXES: [&str; 7] = [
|
||||
r"(?i)^ *SET SHARDING KEY TO '?([0-9]+)'? *;? *$",
|
||||
r"(?i)^ *SET SHARD TO '?([0-9]+|ANY)'? *;? *$",
|
||||
r"(?i)^ *SHOW SHARD *;? *$",
|
||||
r"(?i)^ *SET SERVER ROLE TO '(PRIMARY|REPLICA|ANY|AUTO|DEFAULT)' *;? *$",
|
||||
r"(?i)^ *SHOW SERVER ROLE *;? *$",
|
||||
r"(?i)^ *SET PRIMARY READS TO '?(on|off|default)'? *;? *$",
|
||||
r"(?i)^ *SHOW PRIMARY READS *;? *$",
|
||||
];
|
||||
|
||||
/// Custom commands.
|
||||
#[derive(PartialEq, Debug)]
|
||||
pub enum Command {
|
||||
SetShardingKey,
|
||||
@@ -25,32 +31,35 @@ pub enum Command {
|
||||
ShowShard,
|
||||
SetServerRole,
|
||||
ShowServerRole,
|
||||
SetPrimaryReads,
|
||||
ShowPrimaryReads,
|
||||
}
|
||||
|
||||
/// Quickly test for match when a query is received.
|
||||
static CUSTOM_SQL_REGEX_SET: OnceCell<RegexSet> = OnceCell::new();
|
||||
|
||||
// Get the value inside the custom command.
|
||||
static CUSTOM_SQL_REGEX_LIST: OnceCell<Vec<Regex>> = OnceCell::new();
|
||||
|
||||
/// The query router.
|
||||
pub struct QueryRouter {
|
||||
// By default, queries go here, unless we have better information
|
||||
// about what the client wants.
|
||||
default_server_role: Option<Role>,
|
||||
|
||||
// Number of shards in the cluster.
|
||||
shards: usize,
|
||||
|
||||
// Which shard we should be talking to right now.
|
||||
/// Which shard we should be talking to right now.
|
||||
active_shard: Option<usize>,
|
||||
|
||||
// Should we be talking to a primary or a replica?
|
||||
/// Which server should we be talking to.
|
||||
active_role: Option<Role>,
|
||||
|
||||
// Include the primary into the replica pool?
|
||||
/// Should we try to parse queries to route them to replicas or primary automatically
|
||||
query_parser_enabled: bool,
|
||||
|
||||
/// Include the primary into the replica pool for reads.
|
||||
primary_reads_enabled: bool,
|
||||
|
||||
// Should we try to parse queries?
|
||||
query_parser_enabled: bool,
|
||||
pool_settings: PoolSettings,
|
||||
}
|
||||
|
||||
impl QueryRouter {
|
||||
/// One-time initialization of regexes.
|
||||
pub fn setup() -> bool {
|
||||
let set = match RegexSet::new(&CUSTOM_SQL_REGEXES) {
|
||||
Ok(rgx) => rgx,
|
||||
@@ -60,37 +69,47 @@ impl QueryRouter {
|
||||
}
|
||||
};
|
||||
|
||||
let list: Vec<_> = CUSTOM_SQL_REGEXES
|
||||
.iter()
|
||||
.map(|rgx| Regex::new(rgx).unwrap())
|
||||
.collect();
|
||||
|
||||
// Impossible
|
||||
if list.len() != set.len() {
|
||||
return false;
|
||||
}
|
||||
|
||||
match CUSTOM_SQL_REGEX_LIST.set(list) {
|
||||
Ok(_) => true,
|
||||
Err(_) => return false,
|
||||
};
|
||||
|
||||
match CUSTOM_SQL_REGEX_SET.set(set) {
|
||||
Ok(_) => true,
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new instance of the query router. Each client gets its own.
|
||||
pub fn new() -> QueryRouter {
|
||||
let config = get_config();
|
||||
|
||||
let default_server_role = match config.query_router.default_role.as_ref() {
|
||||
"any" => None,
|
||||
"primary" => Some(Role::Primary),
|
||||
"replica" => Some(Role::Replica),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
QueryRouter {
|
||||
default_server_role: default_server_role,
|
||||
shards: config.shards.len(),
|
||||
|
||||
active_role: default_server_role,
|
||||
active_shard: None,
|
||||
primary_reads_enabled: config.query_router.primary_reads_enabled,
|
||||
query_parser_enabled: config.query_router.query_parser_enabled,
|
||||
active_role: None,
|
||||
query_parser_enabled: false,
|
||||
primary_reads_enabled: false,
|
||||
pool_settings: PoolSettings::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_pool_settings(&mut self, pool_settings: PoolSettings) {
|
||||
self.pool_settings = pool_settings;
|
||||
}
|
||||
|
||||
/// Try to parse a command and execute it.
|
||||
pub fn try_execute_command(&mut self, mut buf: BytesMut) -> Option<(Command, String)> {
|
||||
let code = buf.get_u8() as char;
|
||||
|
||||
// Only simple protocol supported for commands.
|
||||
if code != 'Q' {
|
||||
return None;
|
||||
}
|
||||
@@ -103,24 +122,61 @@ impl QueryRouter {
|
||||
None => return None,
|
||||
};
|
||||
|
||||
let regex_list = match CUSTOM_SQL_REGEX_LIST.get() {
|
||||
Some(regex_list) => regex_list,
|
||||
None => return None,
|
||||
};
|
||||
|
||||
let matches: Vec<_> = regex_set.matches(&query).into_iter().collect();
|
||||
|
||||
// This is not a custom query, try to infer which
|
||||
// server it'll go to if the query parser is enabled.
|
||||
if matches.len() != 1 {
|
||||
debug!("Regular query, not a command");
|
||||
return None;
|
||||
}
|
||||
|
||||
let sharding_function = match self.pool_settings.sharding_function.as_ref() {
|
||||
"pg_bigint_hash" => ShardingFunction::PgBigintHash,
|
||||
"sha1" => ShardingFunction::Sha1,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let default_server_role = match self.pool_settings.default_role.as_ref() {
|
||||
"any" => None,
|
||||
"primary" => Some(Role::Primary),
|
||||
"replica" => Some(Role::Replica),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let command = match matches[0] {
|
||||
0 => Command::SetShardingKey,
|
||||
1 => Command::SetShard,
|
||||
2 => Command::ShowShard,
|
||||
3 => Command::SetServerRole,
|
||||
4 => Command::ShowServerRole,
|
||||
5 => Command::SetPrimaryReads,
|
||||
6 => Command::ShowPrimaryReads,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let mut value = match command {
|
||||
Command::SetShardingKey | Command::SetShard | Command::SetServerRole => {
|
||||
query.split("'").collect::<Vec<&str>>()[1].to_string()
|
||||
Command::SetShardingKey
|
||||
| Command::SetShard
|
||||
| Command::SetServerRole
|
||||
| Command::SetPrimaryReads => {
|
||||
// Capture value. I know this re-runs the regex engine, but I haven't
|
||||
// figured out a better way just yet. I think I can write a single Regex
|
||||
// that matches all 5 custom SQL patterns, but maybe that's not very legible?
|
||||
//
|
||||
// I think this is faster than running the Regex engine 5 times.
|
||||
match regex_list[matches[0]].captures(&query) {
|
||||
Some(captures) => match captures.get(1) {
|
||||
Some(value) => value.as_str().to_string(),
|
||||
None => return None,
|
||||
},
|
||||
None => return None,
|
||||
}
|
||||
}
|
||||
|
||||
Command::ShowShard => self.shard().to_string(),
|
||||
@@ -135,18 +191,26 @@ impl QueryRouter {
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
Command::ShowPrimaryReads => match self.primary_reads_enabled {
|
||||
true => String::from("on"),
|
||||
false => String::from("off"),
|
||||
},
|
||||
};
|
||||
|
||||
match command {
|
||||
Command::SetShardingKey => {
|
||||
let sharder = Sharder::new(self.shards);
|
||||
let shard = sharder.pg_bigint_hash(value.parse::<i64>().unwrap());
|
||||
let sharder = Sharder::new(self.pool_settings.shards.len(), sharding_function);
|
||||
let shard = sharder.shard(value.parse::<i64>().unwrap());
|
||||
self.active_shard = Some(shard);
|
||||
value = shard.to_string();
|
||||
}
|
||||
|
||||
Command::SetShard => {
|
||||
self.active_shard = Some(value.parse::<usize>().unwrap());
|
||||
self.active_shard = match value.to_ascii_uppercase().as_ref() {
|
||||
"ANY" => Some(rand::random::<usize>() % self.pool_settings.shards.len()),
|
||||
_ => Some(value.parse::<usize>().unwrap()),
|
||||
};
|
||||
}
|
||||
|
||||
Command::SetServerRole => {
|
||||
@@ -172,9 +236,8 @@ impl QueryRouter {
|
||||
}
|
||||
|
||||
"default" => {
|
||||
// TODO: reset query parser to default here.
|
||||
self.active_role = self.default_server_role;
|
||||
self.query_parser_enabled = get_config().query_router.query_parser_enabled;
|
||||
self.active_role = default_server_role;
|
||||
self.query_parser_enabled = self.query_parser_enabled;
|
||||
self.active_role
|
||||
}
|
||||
|
||||
@@ -182,6 +245,19 @@ impl QueryRouter {
|
||||
};
|
||||
}
|
||||
|
||||
Command::SetPrimaryReads => {
|
||||
if value == "on" {
|
||||
debug!("Setting primary reads to on");
|
||||
self.primary_reads_enabled = true;
|
||||
} else if value == "off" {
|
||||
debug!("Setting primary reads to off");
|
||||
self.primary_reads_enabled = false;
|
||||
} else if value == "default" {
|
||||
debug!("Setting primary reads to default");
|
||||
self.primary_reads_enabled = self.pool_settings.primary_reads_enabled;
|
||||
}
|
||||
}
|
||||
|
||||
_ => (),
|
||||
}
|
||||
|
||||
@@ -190,11 +266,20 @@ impl QueryRouter {
|
||||
|
||||
/// Try to infer which server to connect to based on the contents of the query.
|
||||
pub fn infer_role(&mut self, mut buf: BytesMut) -> bool {
|
||||
debug!("Inferring role");
|
||||
|
||||
let code = buf.get_u8() as char;
|
||||
let len = buf.get_i32() as usize;
|
||||
|
||||
let query = match code {
|
||||
'Q' => String::from_utf8_lossy(&buf[..len - 5]).to_string(),
|
||||
// Query
|
||||
'Q' => {
|
||||
let query = String::from_utf8_lossy(&buf[..len - 5]).to_string();
|
||||
debug!("Query: '{}'", query);
|
||||
query
|
||||
}
|
||||
|
||||
// Parse (prepared statement)
|
||||
'P' => {
|
||||
let mut start = 0;
|
||||
let mut end;
|
||||
@@ -213,15 +298,18 @@ impl QueryRouter {
|
||||
|
||||
let query = String::from_utf8_lossy(&buf[start..end]).to_string();
|
||||
|
||||
debug!("Prepared statement: '{}'", query);
|
||||
|
||||
query.replace("$", "") // Remove placeholders turning them into "values"
|
||||
}
|
||||
|
||||
_ => return false,
|
||||
};
|
||||
|
||||
let ast = match Parser::parse_sql(&PostgreSqlDialect {}, &query) {
|
||||
Ok(ast) => ast,
|
||||
Err(err) => {
|
||||
debug!("{:?}, query: {}", err, query);
|
||||
debug!("{}", err.to_string());
|
||||
return false;
|
||||
}
|
||||
};
|
||||
@@ -266,26 +354,21 @@ impl QueryRouter {
|
||||
}
|
||||
}
|
||||
|
||||
/// Reset the router back to defaults.
|
||||
/// This must be called at the end of every transaction in transaction mode.
|
||||
pub fn _reset(&mut self) {
|
||||
self.active_role = self.default_server_role;
|
||||
self.active_shard = None;
|
||||
pub fn set_shard(&mut self, shard: usize) {
|
||||
self.active_shard = Some(shard);
|
||||
}
|
||||
|
||||
/// Should we attempt to parse queries?
|
||||
#[allow(dead_code)]
|
||||
pub fn query_parser_enabled(&self) -> bool {
|
||||
self.query_parser_enabled
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn toggle_primary_reads(&mut self, value: bool) {
|
||||
self.primary_reads_enabled = value;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::collections::HashMap;
|
||||
|
||||
use super::*;
|
||||
use crate::messages::simple_query;
|
||||
use bytes::BufMut;
|
||||
@@ -304,7 +387,8 @@ mod test {
|
||||
let mut qr = QueryRouter::new();
|
||||
assert!(qr.try_execute_command(simple_query("SET SERVER ROLE TO 'auto'")) != None);
|
||||
assert_eq!(qr.query_parser_enabled(), true);
|
||||
qr.toggle_primary_reads(false);
|
||||
|
||||
assert!(qr.try_execute_command(simple_query("SET PRIMARY READS TO off")) != None);
|
||||
|
||||
let queries = vec![
|
||||
simple_query("SELECT * FROM items WHERE id = 5"),
|
||||
@@ -345,7 +429,7 @@ mod test {
|
||||
QueryRouter::setup();
|
||||
let mut qr = QueryRouter::new();
|
||||
let query = simple_query("SELECT * FROM items WHERE id = 5");
|
||||
qr.toggle_primary_reads(true);
|
||||
assert!(qr.try_execute_command(simple_query("SET PRIMARY READS TO on")) != None);
|
||||
|
||||
assert!(qr.infer_role(query));
|
||||
assert_eq!(qr.role(), None);
|
||||
@@ -356,7 +440,7 @@ mod test {
|
||||
QueryRouter::setup();
|
||||
let mut qr = QueryRouter::new();
|
||||
qr.try_execute_command(simple_query("SET SERVER ROLE TO 'auto'"));
|
||||
qr.toggle_primary_reads(false);
|
||||
assert!(qr.try_execute_command(simple_query("SET PRIMARY READS TO off")) != None);
|
||||
|
||||
let prepared_stmt = BytesMut::from(
|
||||
&b"WITH t AS (SELECT * FROM items WHERE name = $1) SELECT * FROM t WHERE id = $2\0"[..],
|
||||
@@ -385,6 +469,10 @@ mod test {
|
||||
"SET SERVER ROLE TO 'any'",
|
||||
"SET SERVER ROLE TO 'auto'",
|
||||
"SHOW SERVER ROLE",
|
||||
"SET PRIMARY READS TO 'on'",
|
||||
"SET PRIMARY READS TO 'off'",
|
||||
"SET PRIMARY READS TO 'default'",
|
||||
"SHOW PRIMARY READS",
|
||||
// Lower case
|
||||
"set sharding key to '1'",
|
||||
"set shard to '1'",
|
||||
@@ -394,14 +482,47 @@ mod test {
|
||||
"set server role to 'any'",
|
||||
"set server role to 'auto'",
|
||||
"show server role",
|
||||
"set primary reads to 'on'",
|
||||
"set primary reads to 'OFF'",
|
||||
"set primary reads to 'deFaUlt'",
|
||||
// No quotes
|
||||
"SET SHARDING KEY TO 11235",
|
||||
"SET SHARD TO 15",
|
||||
"SET PRIMARY READS TO off",
|
||||
// Spaces and semicolon
|
||||
" SET SHARDING KEY TO 11235 ; ",
|
||||
" SET SHARD TO 15; ",
|
||||
" SET SHARDING KEY TO 11235 ;",
|
||||
" SET SERVER ROLE TO 'primary'; ",
|
||||
" SET SERVER ROLE TO 'primary' ; ",
|
||||
" SET SERVER ROLE TO 'primary' ;",
|
||||
" SET PRIMARY READS TO 'off' ;",
|
||||
];
|
||||
|
||||
// Which regexes it'll match to in the list
|
||||
let matches = [
|
||||
0, 1, 2, 3, 3, 3, 3, 4, 5, 5, 5, 6, 0, 1, 2, 3, 3, 3, 3, 4, 5, 5, 5, 0, 1, 5, 0, 1, 0,
|
||||
3, 3, 3, 5,
|
||||
];
|
||||
|
||||
let list = CUSTOM_SQL_REGEX_LIST.get().unwrap();
|
||||
let set = CUSTOM_SQL_REGEX_SET.get().unwrap();
|
||||
|
||||
for test in &tests {
|
||||
let matches: Vec<_> = set.matches(test).into_iter().collect();
|
||||
for (i, test) in tests.iter().enumerate() {
|
||||
if !list[matches[i]].is_match(test) {
|
||||
println!("{} does not match {}", test, list[matches[i]]);
|
||||
assert!(false);
|
||||
}
|
||||
assert_eq!(set.matches(test).into_iter().collect::<Vec<_>>().len(), 1);
|
||||
}
|
||||
|
||||
assert_eq!(matches.len(), 1);
|
||||
let bad = [
|
||||
"SELECT * FROM table",
|
||||
"SELECT * FROM table WHERE value = 'set sharding key to 5'", // Don't capture things in the middle of the query
|
||||
];
|
||||
|
||||
for query in &bad {
|
||||
assert_eq!(set.matches(query).into_iter().collect::<Vec<_>>().len(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -411,12 +532,12 @@ mod test {
|
||||
let mut qr = QueryRouter::new();
|
||||
|
||||
// SetShardingKey
|
||||
let query = simple_query("SET SHARDING KEY TO '13'");
|
||||
let query = simple_query("SET SHARDING KEY TO 13");
|
||||
assert_eq!(
|
||||
qr.try_execute_command(query),
|
||||
Some((Command::SetShardingKey, String::from("1")))
|
||||
Some((Command::SetShardingKey, String::from("0")))
|
||||
);
|
||||
assert_eq!(qr.shard(), 1);
|
||||
assert_eq!(qr.shard(), 0);
|
||||
|
||||
// SetShard
|
||||
let query = simple_query("SET SHARD TO '1'");
|
||||
@@ -460,6 +581,26 @@ mod test {
|
||||
Some((Command::ShowServerRole, String::from(*role)))
|
||||
);
|
||||
}
|
||||
|
||||
let primary_reads = ["on", "off", "default"];
|
||||
let primary_reads_enabled = ["on", "off", "on"];
|
||||
|
||||
for (idx, primary_reads) in primary_reads.iter().enumerate() {
|
||||
assert_eq!(
|
||||
qr.try_execute_command(simple_query(&format!(
|
||||
"SET PRIMARY READS TO {}",
|
||||
primary_reads
|
||||
))),
|
||||
Some((Command::SetPrimaryReads, String::from(*primary_reads)))
|
||||
);
|
||||
assert_eq!(
|
||||
qr.try_execute_command(simple_query("SHOW PRIMARY READS")),
|
||||
Some((
|
||||
Command::ShowPrimaryReads,
|
||||
String::from(primary_reads_enabled[idx])
|
||||
))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -467,7 +608,7 @@ mod test {
|
||||
QueryRouter::setup();
|
||||
let mut qr = QueryRouter::new();
|
||||
let query = simple_query("SET SERVER ROLE TO 'auto'");
|
||||
qr.toggle_primary_reads(false);
|
||||
assert!(qr.try_execute_command(simple_query("SET PRIMARY READS TO off")) != None);
|
||||
|
||||
assert!(qr.try_execute_command(query) != None);
|
||||
assert!(qr.query_parser_enabled());
|
||||
@@ -484,6 +625,45 @@ mod test {
|
||||
assert!(qr.query_parser_enabled());
|
||||
let query = simple_query("SET SERVER ROLE TO 'default'");
|
||||
assert!(qr.try_execute_command(query) != None);
|
||||
assert!(!qr.query_parser_enabled());
|
||||
assert!(qr.query_parser_enabled());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_from_pool_settings() {
|
||||
QueryRouter::setup();
|
||||
|
||||
let pool_settings = PoolSettings {
|
||||
pool_mode: "transaction".to_string(),
|
||||
shards: HashMap::default(),
|
||||
user: crate::config::User::default(),
|
||||
default_role: Role::Replica.to_string(),
|
||||
query_parser_enabled: true,
|
||||
primary_reads_enabled: false,
|
||||
sharding_function: "pg_bigint_hash".to_string(),
|
||||
};
|
||||
let mut qr = QueryRouter::new();
|
||||
assert_eq!(qr.active_role, None);
|
||||
assert_eq!(qr.active_shard, None);
|
||||
assert_eq!(qr.query_parser_enabled, false);
|
||||
assert_eq!(qr.primary_reads_enabled, false);
|
||||
|
||||
// Internal state must not be changed due to this, only defaults
|
||||
qr.update_pool_settings(pool_settings.clone());
|
||||
|
||||
assert_eq!(qr.active_role, None);
|
||||
assert_eq!(qr.active_shard, None);
|
||||
assert_eq!(qr.query_parser_enabled, false);
|
||||
assert_eq!(qr.primary_reads_enabled, false);
|
||||
|
||||
let q1 = simple_query("SET SERVER ROLE TO 'primary'");
|
||||
assert!(qr.try_execute_command(q1) != None);
|
||||
assert_eq!(qr.active_role.unwrap(), Role::Primary);
|
||||
|
||||
let q2 = simple_query("SET SERVER ROLE TO 'default'");
|
||||
assert!(qr.try_execute_command(q2) != None);
|
||||
assert_eq!(
|
||||
qr.active_role.unwrap().to_string(),
|
||||
pool_settings.clone().default_role
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
320
src/scram.rs
Normal file
320
src/scram.rs
Normal file
@@ -0,0 +1,320 @@
|
||||
// SCRAM-SHA-256 authentication. Heavily inspired by
|
||||
// https://github.com/sfackler/rust-postgres/
|
||||
// SASL implementation.
|
||||
|
||||
use bytes::BytesMut;
|
||||
use hmac::{Hmac, Mac};
|
||||
use rand::{self, Rng};
|
||||
use sha2::digest::FixedOutput;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
use std::fmt::Write;
|
||||
|
||||
use crate::constants::*;
|
||||
use crate::errors::Error;
|
||||
|
||||
/// Normalize a password string. Postgres
|
||||
/// passwords don't have to be UTF-8.
|
||||
fn normalize(pass: &[u8]) -> Vec<u8> {
|
||||
let pass = match std::str::from_utf8(pass) {
|
||||
Ok(pass) => pass,
|
||||
Err(_) => return pass.to_vec(),
|
||||
};
|
||||
|
||||
match stringprep::saslprep(pass) {
|
||||
Ok(pass) => pass.into_owned().into_bytes(),
|
||||
Err(_) => pass.as_bytes().to_vec(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Keep the SASL state through the exchange.
|
||||
/// It takes 3 messages to complete the authentication.
|
||||
pub struct ScramSha256 {
|
||||
password: String,
|
||||
salted_password: [u8; 32],
|
||||
auth_message: String,
|
||||
message: BytesMut,
|
||||
nonce: String,
|
||||
}
|
||||
|
||||
impl ScramSha256 {
|
||||
/// Create the Scram state from a password. It'll automatically
|
||||
/// generate a nonce.
|
||||
pub fn new(password: &str) -> ScramSha256 {
|
||||
let mut rng = rand::thread_rng();
|
||||
let nonce = (0..NONCE_LENGTH)
|
||||
.map(|_| {
|
||||
let mut v = rng.gen_range(0x21u8..0x7e);
|
||||
if v == 0x2c {
|
||||
v = 0x7e
|
||||
}
|
||||
v as char
|
||||
})
|
||||
.collect::<String>();
|
||||
|
||||
Self::from_nonce(password, &nonce)
|
||||
}
|
||||
|
||||
/// Used for testing.
|
||||
pub fn from_nonce(password: &str, nonce: &str) -> ScramSha256 {
|
||||
let message = BytesMut::from(&format!("{}n=,r={}", "n,,", nonce).as_bytes()[..]);
|
||||
|
||||
ScramSha256 {
|
||||
password: password.to_string(),
|
||||
nonce: String::from(nonce),
|
||||
message,
|
||||
salted_password: [0u8; 32],
|
||||
auth_message: String::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the current state of the SASL authentication.
|
||||
pub fn message(&mut self) -> BytesMut {
|
||||
self.message.clone()
|
||||
}
|
||||
|
||||
/// Update the state with message received from server.
|
||||
pub fn update(&mut self, message: &BytesMut) -> Result<BytesMut, Error> {
|
||||
let server_message = Message::parse(message)?;
|
||||
|
||||
if !server_message.nonce.starts_with(&self.nonce) {
|
||||
return Err(Error::ProtocolSyncError);
|
||||
}
|
||||
|
||||
let salt = match base64::decode(&server_message.salt) {
|
||||
Ok(salt) => salt,
|
||||
Err(_) => return Err(Error::ProtocolSyncError),
|
||||
};
|
||||
|
||||
let salted_password = Self::hi(
|
||||
&normalize(&self.password.as_bytes()[..]),
|
||||
&salt,
|
||||
server_message.iterations,
|
||||
);
|
||||
|
||||
// Save for verification of final server message.
|
||||
self.salted_password = salted_password;
|
||||
|
||||
let mut hmac = match Hmac::<Sha256>::new_from_slice(&salted_password) {
|
||||
Ok(hmac) => hmac,
|
||||
Err(_) => return Err(Error::ServerError),
|
||||
};
|
||||
|
||||
hmac.update(b"Client Key");
|
||||
|
||||
let client_key = hmac.finalize().into_bytes();
|
||||
|
||||
let mut hash = Sha256::default();
|
||||
hash.update(client_key.as_slice());
|
||||
|
||||
let stored_key = hash.finalize_fixed();
|
||||
let mut cbind_input = vec![];
|
||||
cbind_input.extend("n,,".as_bytes());
|
||||
|
||||
let cbind_input = base64::encode(&cbind_input);
|
||||
|
||||
self.message.clear();
|
||||
|
||||
// Start writing the client reply.
|
||||
match write!(
|
||||
&mut self.message,
|
||||
"c={},r={}",
|
||||
cbind_input, server_message.nonce
|
||||
) {
|
||||
Ok(_) => (),
|
||||
Err(_) => return Err(Error::ServerError),
|
||||
};
|
||||
|
||||
let auth_message = format!(
|
||||
"n=,r={},{},{}",
|
||||
self.nonce,
|
||||
String::from_utf8_lossy(&message[..]),
|
||||
String::from_utf8_lossy(&self.message[..])
|
||||
);
|
||||
|
||||
let mut hmac = match Hmac::<Sha256>::new_from_slice(&stored_key) {
|
||||
Ok(hmac) => hmac,
|
||||
Err(_) => return Err(Error::ServerError),
|
||||
};
|
||||
hmac.update(auth_message.as_bytes());
|
||||
|
||||
// Save the auth message for server final message verification.
|
||||
self.auth_message = auth_message;
|
||||
|
||||
let client_signature = hmac.finalize().into_bytes();
|
||||
|
||||
// Sign the client proof.
|
||||
let mut client_proof = client_key;
|
||||
for (proof, signature) in client_proof.iter_mut().zip(client_signature) {
|
||||
*proof ^= signature;
|
||||
}
|
||||
|
||||
match write!(&mut self.message, ",p={}", base64::encode(&*client_proof)) {
|
||||
Ok(_) => (),
|
||||
Err(_) => return Err(Error::ServerError),
|
||||
};
|
||||
|
||||
Ok(self.message.clone())
|
||||
}
|
||||
|
||||
/// Verify final server message.
|
||||
pub fn finish(&mut self, message: &BytesMut) -> Result<(), Error> {
|
||||
let final_message = FinalMessage::parse(message)?;
|
||||
|
||||
let verifier = match base64::decode(&final_message.value) {
|
||||
Ok(verifier) => verifier,
|
||||
Err(_) => return Err(Error::ProtocolSyncError),
|
||||
};
|
||||
|
||||
let mut hmac = match Hmac::<Sha256>::new_from_slice(&self.salted_password) {
|
||||
Ok(hmac) => hmac,
|
||||
Err(_) => return Err(Error::ServerError),
|
||||
};
|
||||
hmac.update(b"Server Key");
|
||||
let server_key = hmac.finalize().into_bytes();
|
||||
|
||||
let mut hmac = match Hmac::<Sha256>::new_from_slice(&server_key) {
|
||||
Ok(hmac) => hmac,
|
||||
Err(_) => return Err(Error::ServerError),
|
||||
};
|
||||
hmac.update(self.auth_message.as_bytes());
|
||||
|
||||
match hmac.verify_slice(&verifier) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(_) => return Err(Error::ServerError),
|
||||
}
|
||||
}
|
||||
|
||||
/// Hash the password with the salt i-times.
|
||||
fn hi(str: &[u8], salt: &[u8], i: u32) -> [u8; 32] {
|
||||
let mut hmac =
|
||||
Hmac::<Sha256>::new_from_slice(str).expect("HMAC is able to accept all key sizes");
|
||||
hmac.update(salt);
|
||||
hmac.update(&[0, 0, 0, 1]);
|
||||
let mut prev = hmac.finalize().into_bytes();
|
||||
|
||||
let mut hi = prev;
|
||||
|
||||
for _ in 1..i {
|
||||
let mut hmac = Hmac::<Sha256>::new_from_slice(str).expect("already checked above");
|
||||
hmac.update(&prev);
|
||||
prev = hmac.finalize().into_bytes();
|
||||
|
||||
for (hi, prev) in hi.iter_mut().zip(prev) {
|
||||
*hi ^= prev;
|
||||
}
|
||||
}
|
||||
|
||||
hi.into()
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse the server challenge.
|
||||
struct Message {
|
||||
nonce: String,
|
||||
salt: String,
|
||||
iterations: u32,
|
||||
}
|
||||
|
||||
impl Message {
|
||||
/// Parse the server SASL challenge.
|
||||
fn parse(message: &BytesMut) -> Result<Message, Error> {
|
||||
let parts = String::from_utf8_lossy(&message[..])
|
||||
.split(",")
|
||||
.map(|s| s.to_string())
|
||||
.collect::<Vec<String>>();
|
||||
|
||||
if parts.len() != 3 {
|
||||
return Err(Error::ProtocolSyncError);
|
||||
}
|
||||
|
||||
let nonce = str::replace(&parts[0], "r=", "");
|
||||
let salt = str::replace(&parts[1], "s=", "");
|
||||
let iterations = match str::replace(&parts[2], "i=", "").parse::<u32>() {
|
||||
Ok(iterations) => iterations,
|
||||
Err(_) => return Err(Error::ProtocolSyncError),
|
||||
};
|
||||
|
||||
Ok(Message {
|
||||
nonce,
|
||||
salt,
|
||||
iterations,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse server final validation message.
|
||||
struct FinalMessage {
|
||||
value: String,
|
||||
}
|
||||
|
||||
impl FinalMessage {
|
||||
/// Parse the server final validation message.
|
||||
pub fn parse(message: &BytesMut) -> Result<FinalMessage, Error> {
|
||||
if !message.starts_with(b"v=") || message.len() < 4 {
|
||||
return Err(Error::ProtocolSyncError);
|
||||
}
|
||||
|
||||
Ok(FinalMessage {
|
||||
value: String::from_utf8_lossy(&message[2..]).to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn parse_server_first_message() {
|
||||
let message = BytesMut::from(
|
||||
&"r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096".as_bytes()[..],
|
||||
);
|
||||
let message = Message::parse(&message).unwrap();
|
||||
assert_eq!(message.nonce, "fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j");
|
||||
assert_eq!(message.salt, "QSXCR+Q6sek8bf92");
|
||||
assert_eq!(message.iterations, 4096);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_server_last_message() {
|
||||
let f = FinalMessage::parse(&BytesMut::from(
|
||||
&"v=U+ppxD5XUKtradnv8e2MkeupiA8FU87Sg8CXzXHDAzw".as_bytes()[..],
|
||||
))
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
f.value,
|
||||
"U+ppxD5XUKtradnv8e2MkeupiA8FU87Sg8CXzXHDAzw".to_string()
|
||||
);
|
||||
}
|
||||
|
||||
// recorded auth exchange from psql
|
||||
#[test]
|
||||
fn exchange() {
|
||||
let password = "foobar";
|
||||
let nonce = "9IZ2O01zb9IgiIZ1WJ/zgpJB";
|
||||
|
||||
let client_first = "n,,n=,r=9IZ2O01zb9IgiIZ1WJ/zgpJB";
|
||||
let server_first =
|
||||
"r=9IZ2O01zb9IgiIZ1WJ/zgpJBjx/oIRLs02gGSHcw1KEty3eY,s=fs3IXBy7U7+IvVjZ,i\
|
||||
=4096";
|
||||
let client_final =
|
||||
"c=biws,r=9IZ2O01zb9IgiIZ1WJ/zgpJBjx/oIRLs02gGSHcw1KEty3eY,p=AmNKosjJzS3\
|
||||
1NTlQYNs5BTeQjdHdk7lOflDo5re2an8=";
|
||||
let server_final = "v=U+ppxD5XUKtradnv8e2MkeupiA8FU87Sg8CXzXHDAzw=";
|
||||
|
||||
let mut scram = ScramSha256::from_nonce(password, nonce);
|
||||
|
||||
let message = scram.message();
|
||||
assert_eq!(std::str::from_utf8(&message).unwrap(), client_first);
|
||||
|
||||
let result = scram
|
||||
.update(&BytesMut::from(&server_first.as_bytes()[..]))
|
||||
.unwrap();
|
||||
assert_eq!(std::str::from_utf8(&result).unwrap(), client_final);
|
||||
|
||||
scram
|
||||
.finish(&BytesMut::from(&server_final.as_bytes()[..]))
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
219
src/server.rs
219
src/server.rs
@@ -1,7 +1,8 @@
|
||||
/// Implementation of the PostgreSQL server (database) protocol.
|
||||
/// Here we are pretending to the a Postgres client.
|
||||
use bytes::{Buf, BufMut, BytesMut};
|
||||
///! Implementation of the PostgreSQL server (database) protocol.
|
||||
///! Here we are pretending to the a Postgres client.
|
||||
use log::{error, info};
|
||||
use log::{debug, error, info, trace};
|
||||
use std::time::SystemTime;
|
||||
use tokio::io::{AsyncReadExt, BufReader};
|
||||
use tokio::net::{
|
||||
tcp::{OwnedReadHalf, OwnedWriteHalf},
|
||||
@@ -12,48 +13,55 @@ use crate::config::{Address, User};
|
||||
use crate::constants::*;
|
||||
use crate::errors::Error;
|
||||
use crate::messages::*;
|
||||
use crate::scram::ScramSha256;
|
||||
use crate::stats::Reporter;
|
||||
use crate::ClientServerMap;
|
||||
|
||||
/// Server state.
|
||||
pub struct Server {
|
||||
// Server host, e.g. localhost,
|
||||
// port, e.g. 5432, and role, e.g. primary or replica.
|
||||
/// Server host, e.g. localhost,
|
||||
/// port, e.g. 5432, and role, e.g. primary or replica.
|
||||
address: Address,
|
||||
|
||||
// Buffered read socket.
|
||||
/// Buffered read socket.
|
||||
read: BufReader<OwnedReadHalf>,
|
||||
|
||||
// Unbuffered write socket (our client code buffers).
|
||||
/// Unbuffered write socket (our client code buffers).
|
||||
write: OwnedWriteHalf,
|
||||
|
||||
// Our server response buffer. We buffer data before we give it to the client.
|
||||
/// Our server response buffer. We buffer data before we give it to the client.
|
||||
buffer: BytesMut,
|
||||
|
||||
// Server information the server sent us over on startup.
|
||||
/// Server information the server sent us over on startup.
|
||||
server_info: BytesMut,
|
||||
|
||||
// Backend id and secret key used for query cancellation.
|
||||
/// Backend id and secret key used for query cancellation.
|
||||
process_id: i32,
|
||||
secret_key: i32,
|
||||
|
||||
// Is the server inside a transaction or idle.
|
||||
/// Is the server inside a transaction or idle.
|
||||
in_transaction: bool,
|
||||
|
||||
// Is there more data for the client to read.
|
||||
/// Is there more data for the client to read.
|
||||
data_available: bool,
|
||||
|
||||
// Is the server broken? We'll remote it from the pool if so.
|
||||
/// Is the server broken? We'll remote it from the pool if so.
|
||||
bad: bool,
|
||||
|
||||
// Mapping of clients and servers used for query cancellation.
|
||||
/// Mapping of clients and servers used for query cancellation.
|
||||
client_server_map: ClientServerMap,
|
||||
|
||||
// Server connected at.
|
||||
/// Server connected at.
|
||||
connected_at: chrono::naive::NaiveDateTime,
|
||||
|
||||
// Reports various metrics, e.g. data sent & received.
|
||||
/// Reports various metrics, e.g. data sent & received.
|
||||
stats: Reporter,
|
||||
|
||||
/// Application name using the server at the moment.
|
||||
application_name: String,
|
||||
|
||||
// Last time that a successful server send or response happened
|
||||
last_activity: SystemTime,
|
||||
}
|
||||
|
||||
impl Server {
|
||||
@@ -75,8 +83,10 @@ impl Server {
|
||||
}
|
||||
};
|
||||
|
||||
// Send the startup packet telling the server we're a normal Postgres client.
|
||||
startup(&mut stream, &user.name, database).await?;
|
||||
trace!("Sending StartupMessage");
|
||||
|
||||
// StartupMessage
|
||||
startup(&mut stream, &user.username, database).await?;
|
||||
|
||||
let mut server_info = BytesMut::new();
|
||||
let mut process_id: i32 = 0;
|
||||
@@ -84,6 +94,8 @@ impl Server {
|
||||
|
||||
// We'll be handling multiple packets, but they will all be structured the same.
|
||||
// We'll loop here until this exchange is complete.
|
||||
let mut scram = ScramSha256::new(&user.password);
|
||||
|
||||
loop {
|
||||
let code = match stream.read_u8().await {
|
||||
Ok(code) => code as char,
|
||||
@@ -95,6 +107,8 @@ impl Server {
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
};
|
||||
|
||||
trace!("Message: {}", code);
|
||||
|
||||
match code {
|
||||
// Authentication
|
||||
'R' => {
|
||||
@@ -104,6 +118,8 @@ impl Server {
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
};
|
||||
|
||||
trace!("Auth: {}", auth_code);
|
||||
|
||||
match auth_code {
|
||||
MD5_ENCRYPTED_PASSWORD => {
|
||||
// The salt is 4 bytes.
|
||||
@@ -115,12 +131,97 @@ impl Server {
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
};
|
||||
|
||||
md5_password(&mut stream, &user.name, &user.password, &salt[..])
|
||||
md5_password(&mut stream, &user.username, &user.password, &salt[..])
|
||||
.await?;
|
||||
}
|
||||
|
||||
AUTHENTICATION_SUCCESSFUL => (),
|
||||
|
||||
SASL => {
|
||||
debug!("Starting SASL authentication");
|
||||
let sasl_len = (len - 8) as usize;
|
||||
let mut sasl_auth = vec![0u8; sasl_len];
|
||||
|
||||
match stream.read_exact(&mut sasl_auth).await {
|
||||
Ok(_) => (),
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
};
|
||||
|
||||
let sasl_type = String::from_utf8_lossy(&sasl_auth[..sasl_len - 2]);
|
||||
|
||||
if sasl_type == SCRAM_SHA_256 {
|
||||
debug!("Using {}", SCRAM_SHA_256);
|
||||
|
||||
// Generate client message.
|
||||
let sasl_response = scram.message();
|
||||
|
||||
// SASLInitialResponse (F)
|
||||
let mut res = BytesMut::new();
|
||||
res.put_u8(b'p');
|
||||
|
||||
// length + String length + length + length of sasl response
|
||||
res.put_i32(
|
||||
4 // i32 size
|
||||
+ SCRAM_SHA_256.len() as i32 // length of SASL version string,
|
||||
+ 1 // Null terminator for the SASL version string,
|
||||
+ 4 // i32 size
|
||||
+ sasl_response.len() as i32, // length of SASL response
|
||||
);
|
||||
|
||||
res.put_slice(&format!("{}\0", SCRAM_SHA_256).as_bytes()[..]);
|
||||
res.put_i32(sasl_response.len() as i32);
|
||||
res.put(sasl_response);
|
||||
|
||||
write_all(&mut stream, res).await?;
|
||||
} else {
|
||||
error!("Unsupported SCRAM version: {}", sasl_type);
|
||||
return Err(Error::ServerError);
|
||||
}
|
||||
}
|
||||
|
||||
SASL_CONTINUE => {
|
||||
trace!("Continuing SASL");
|
||||
|
||||
let mut sasl_data = vec![0u8; (len - 8) as usize];
|
||||
|
||||
match stream.read_exact(&mut sasl_data).await {
|
||||
Ok(_) => (),
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
};
|
||||
|
||||
let msg = BytesMut::from(&sasl_data[..]);
|
||||
let sasl_response = scram.update(&msg)?;
|
||||
|
||||
// SASLResponse
|
||||
let mut res = BytesMut::new();
|
||||
res.put_u8(b'p');
|
||||
res.put_i32(4 + sasl_response.len() as i32);
|
||||
res.put(sasl_response);
|
||||
|
||||
write_all(&mut stream, res).await?;
|
||||
}
|
||||
|
||||
SASL_FINAL => {
|
||||
trace!("Final SASL");
|
||||
|
||||
let mut sasl_final = vec![0u8; len as usize - 8];
|
||||
match stream.read_exact(&mut sasl_final).await {
|
||||
Ok(_) => (),
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
};
|
||||
|
||||
match scram.finish(&BytesMut::from(&sasl_final[..])) {
|
||||
Ok(_) => {
|
||||
debug!("SASL authentication successful");
|
||||
}
|
||||
|
||||
Err(err) => {
|
||||
debug!("SASL authentication failed");
|
||||
return Err(err);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
_ => {
|
||||
error!("Unsupported authentication mechanism: {}", auth_code);
|
||||
return Err(Error::ServerError);
|
||||
@@ -135,6 +236,8 @@ impl Server {
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
};
|
||||
|
||||
trace!("Error: {}", error_code);
|
||||
|
||||
match error_code {
|
||||
// No error message is present in the message.
|
||||
MESSAGE_TERMINATOR => (),
|
||||
@@ -179,7 +282,7 @@ impl Server {
|
||||
// BackendKeyData
|
||||
'K' => {
|
||||
// The frontend must save these values if it wishes to be able to issue CancelRequest messages later.
|
||||
// See: https://www.postgresql.org/docs/12/protocol-message-formats.html
|
||||
// See: <https://www.postgresql.org/docs/12/protocol-message-formats.html>.
|
||||
process_id = match stream.read_i32().await {
|
||||
Ok(id) => id,
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
@@ -200,11 +303,9 @@ impl Server {
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
};
|
||||
|
||||
// This is the last step in the client-server connection setup,
|
||||
// and indicates the server is ready for to query it.
|
||||
let (read, write) = stream.into_split();
|
||||
|
||||
return Ok(Server {
|
||||
let mut server = Server {
|
||||
address: address.clone(),
|
||||
read: BufReader::new(read),
|
||||
write: write,
|
||||
@@ -218,7 +319,13 @@ impl Server {
|
||||
client_server_map: client_server_map,
|
||||
connected_at: chrono::offset::Utc::now().naive_utc(),
|
||||
stats: stats,
|
||||
});
|
||||
application_name: String::new(),
|
||||
last_activity: SystemTime::now(),
|
||||
};
|
||||
|
||||
server.set_name("pgcat").await?;
|
||||
|
||||
return Ok(server);
|
||||
}
|
||||
|
||||
// We have an unexpected message from the server during this exchange.
|
||||
@@ -247,6 +354,8 @@ impl Server {
|
||||
}
|
||||
};
|
||||
|
||||
debug!("Sending CancelRequest");
|
||||
|
||||
let mut bytes = BytesMut::with_capacity(16);
|
||||
bytes.put_i32(16);
|
||||
bytes.put_i32(CANCEL_REQUEST_CODE);
|
||||
@@ -258,10 +367,15 @@ impl Server {
|
||||
|
||||
/// Send messages to the server from the client.
|
||||
pub async fn send(&mut self, messages: BytesMut) -> Result<(), Error> {
|
||||
self.stats.data_sent(messages.len());
|
||||
self.stats
|
||||
.data_sent(messages.len(), self.process_id, self.address.id);
|
||||
|
||||
match write_all_half(&mut self.write, messages).await {
|
||||
Ok(_) => Ok(()),
|
||||
Ok(_) => {
|
||||
// Successfully sent to server
|
||||
self.last_activity = SystemTime::now();
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => {
|
||||
error!("Terminating server because of: {:?}", err);
|
||||
self.bad = true;
|
||||
@@ -290,6 +404,8 @@ impl Server {
|
||||
let code = message.get_u8() as char;
|
||||
let _len = message.get_i32();
|
||||
|
||||
trace!("Message: {}", code);
|
||||
|
||||
match code {
|
||||
// ReadyForQuery
|
||||
'Z' => {
|
||||
@@ -306,7 +422,7 @@ impl Server {
|
||||
self.in_transaction = false;
|
||||
}
|
||||
|
||||
// Some error occured, the transaction was rolled back.
|
||||
// Some error occurred, the transaction was rolled back.
|
||||
'E' => {
|
||||
self.in_transaction = true;
|
||||
}
|
||||
@@ -329,8 +445,7 @@ impl Server {
|
||||
// More data is available after this message, this is not the end of the reply.
|
||||
self.data_available = true;
|
||||
|
||||
// Don't flush yet, the more we buffer, the faster this goes...
|
||||
// up to a limit of course.
|
||||
// Don't flush yet, the more we buffer, the faster this goes...up to a limit.
|
||||
if self.buffer.len() >= 8196 {
|
||||
break;
|
||||
}
|
||||
@@ -362,11 +477,15 @@ impl Server {
|
||||
let bytes = self.buffer.clone();
|
||||
|
||||
// Keep track of how much data we got from the server for stats.
|
||||
self.stats.data_received(bytes.len());
|
||||
self.stats
|
||||
.data_received(bytes.len(), self.process_id, self.address.id);
|
||||
|
||||
// Clear the buffer for next query.
|
||||
self.buffer.clear();
|
||||
|
||||
// Successfully received data from server
|
||||
self.last_activity = SystemTime::now();
|
||||
|
||||
// Pass the data back to the client.
|
||||
Ok(bytes)
|
||||
}
|
||||
@@ -397,13 +516,13 @@ impl Server {
|
||||
|
||||
/// Indicate that this server connection cannot be re-used and must be discarded.
|
||||
pub fn mark_bad(&mut self) {
|
||||
error!("Server marked bad");
|
||||
error!("Server {:?} marked bad", self.address);
|
||||
self.bad = true;
|
||||
}
|
||||
|
||||
/// Claim this server as mine for the purposes of query cancellation.
|
||||
pub fn claim(&mut self, process_id: i32, secret_key: i32) {
|
||||
let mut guard = self.client_server_map.lock().unwrap();
|
||||
let mut guard = self.client_server_map.lock();
|
||||
guard.insert(
|
||||
(process_id, secret_key),
|
||||
(
|
||||
@@ -419,18 +538,9 @@ impl Server {
|
||||
/// It will use the simple query protocol.
|
||||
/// Result will not be returned, so this is useful for things like `SET` or `ROLLBACK`.
|
||||
pub async fn query(&mut self, query: &str) -> Result<(), Error> {
|
||||
let mut query = BytesMut::from(&query.as_bytes()[..]);
|
||||
query.put_u8(0); // C-string terminator (NULL character).
|
||||
let query = simple_query(query);
|
||||
|
||||
let len = query.len() as i32 + 4;
|
||||
|
||||
let mut msg = BytesMut::with_capacity(len as usize + 1);
|
||||
|
||||
msg.put_u8(b'Q');
|
||||
msg.put_i32(len);
|
||||
msg.put_slice(&query[..]);
|
||||
|
||||
self.send(msg).await?;
|
||||
self.send(query).await?;
|
||||
|
||||
loop {
|
||||
let _ = self.recv().await?;
|
||||
@@ -446,9 +556,14 @@ impl Server {
|
||||
/// A shorthand for `SET application_name = $1`.
|
||||
#[allow(dead_code)]
|
||||
pub async fn set_name(&mut self, name: &str) -> Result<(), Error> {
|
||||
Ok(self
|
||||
.query(&format!("SET application_name = '{}'", name))
|
||||
.await?)
|
||||
if self.application_name != name {
|
||||
self.application_name = name.to_string();
|
||||
Ok(self
|
||||
.query(&format!("SET application_name = '{}'", name))
|
||||
.await?)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the servers address.
|
||||
@@ -457,9 +572,15 @@ impl Server {
|
||||
self.address.clone()
|
||||
}
|
||||
|
||||
/// Get the server's unique identifier.
|
||||
pub fn process_id(&self) -> i32 {
|
||||
self.process_id
|
||||
}
|
||||
|
||||
// Get server's latest response timestamp
|
||||
pub fn last_activity(&self) -> SystemTime {
|
||||
self.last_activity
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Server {
|
||||
@@ -467,7 +588,8 @@ impl Drop for Server {
|
||||
/// the socket is in non-blocking mode, so it may not be ready
|
||||
/// for a write.
|
||||
fn drop(&mut self) {
|
||||
self.stats.server_disconnecting(self.process_id());
|
||||
self.stats
|
||||
.server_disconnecting(self.process_id(), self.address.id);
|
||||
|
||||
let mut bytes = BytesMut::with_capacity(4);
|
||||
bytes.put_u8(b'X');
|
||||
@@ -475,9 +597,10 @@ impl Drop for Server {
|
||||
|
||||
match self.write.try_write(&bytes) {
|
||||
Ok(_) => (),
|
||||
Err(_) => (),
|
||||
Err(_) => debug!("Dirty shutdown"),
|
||||
};
|
||||
|
||||
// Should not matter.
|
||||
self.bad = true;
|
||||
|
||||
let now = chrono::offset::Utc::now().naive_utc();
|
||||
|
||||
@@ -1,26 +1,70 @@
|
||||
// https://github.com/postgres/postgres/blob/27b77ecf9f4d5be211900eda54d8155ada50d696/src/include/catalog/partition.h#L20
|
||||
/// Implements various sharding functions.
|
||||
use sha1::{Digest, Sha1};
|
||||
|
||||
/// See: <https://github.com/postgres/postgres/blob/27b77ecf9f4d5be211900eda54d8155ada50d696/src/include/catalog/partition.h#L20>.
|
||||
const PARTITION_HASH_SEED: u64 = 0x7A5B22367996DCFD;
|
||||
|
||||
/// The sharding functions we support.
|
||||
#[derive(Debug, PartialEq, Copy, Clone)]
|
||||
pub enum ShardingFunction {
|
||||
PgBigintHash,
|
||||
Sha1,
|
||||
}
|
||||
|
||||
/// The sharder.
|
||||
pub struct Sharder {
|
||||
/// Number of shards in the cluster.
|
||||
shards: usize,
|
||||
|
||||
/// The sharding function in use.
|
||||
sharding_function: ShardingFunction,
|
||||
}
|
||||
|
||||
impl Sharder {
|
||||
pub fn new(shards: usize) -> Sharder {
|
||||
Sharder { shards: shards }
|
||||
/// Create new instance of the sharder.
|
||||
pub fn new(shards: usize, sharding_function: ShardingFunction) -> Sharder {
|
||||
Sharder {
|
||||
shards,
|
||||
sharding_function,
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the shard given sharding key.
|
||||
pub fn shard(&self, key: i64) -> usize {
|
||||
match self.sharding_function {
|
||||
ShardingFunction::PgBigintHash => self.pg_bigint_hash(key),
|
||||
ShardingFunction::Sha1 => self.sha1(key),
|
||||
}
|
||||
}
|
||||
|
||||
/// Hash function used by Postgres to determine which partition
|
||||
/// to put the row in when using HASH(column) partitioning.
|
||||
/// Source: https://github.com/postgres/postgres/blob/27b77ecf9f4d5be211900eda54d8155ada50d696/src/common/hashfn.c#L631
|
||||
/// Source: <https://github.com/postgres/postgres/blob/27b77ecf9f4d5be211900eda54d8155ada50d696/src/common/hashfn.c#L631>.
|
||||
/// Supports only 1 bigint at the moment, but we can add more later.
|
||||
pub fn pg_bigint_hash(&self, key: i64) -> usize {
|
||||
fn pg_bigint_hash(&self, key: i64) -> usize {
|
||||
let mut lohalf = key as u32;
|
||||
let hihalf = (key >> 32) as u32;
|
||||
lohalf ^= if key >= 0 { hihalf } else { !hihalf };
|
||||
Self::combine(0, Self::pg_u32_hash(lohalf)) as usize % self.shards
|
||||
}
|
||||
|
||||
/// Example of a hashing function based on SHA1.
|
||||
fn sha1(&self, key: i64) -> usize {
|
||||
let mut hasher = Sha1::new();
|
||||
|
||||
hasher.update(&key.to_string().as_bytes());
|
||||
|
||||
let result = hasher.finalize();
|
||||
|
||||
// Convert the SHA1 hash into hex so we can parse it as a large integer.
|
||||
let hex = format!("{:x}", result);
|
||||
|
||||
// Parse the last 8 bytes as an integer (8 bytes = bigint).
|
||||
let key = i64::from_str_radix(&hex[hex.len() - 8..], 16).unwrap() as usize;
|
||||
|
||||
key % self.shards
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn rot(x: u32, k: u32) -> u32 {
|
||||
(x << k) | (x >> (32 - k))
|
||||
@@ -83,6 +127,7 @@ impl Sharder {
|
||||
a
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn pg_u32_hash(k: u32) -> u64 {
|
||||
let mut a: u32 = 0x9e3779b9 as u32 + std::mem::size_of::<u32>() as u32 + 3923095 as u32;
|
||||
let mut b = a;
|
||||
@@ -109,36 +154,51 @@ mod test {
|
||||
// confirming that we implemented Postgres BIGINT hashing correctly.
|
||||
#[test]
|
||||
fn test_pg_bigint_hash() {
|
||||
let sharder = Sharder::new(5);
|
||||
let sharder = Sharder::new(5, ShardingFunction::PgBigintHash);
|
||||
|
||||
let shard_0 = vec![1, 4, 5, 14, 19, 39, 40, 46, 47, 53];
|
||||
|
||||
for v in shard_0 {
|
||||
assert_eq!(sharder.pg_bigint_hash(v), 0);
|
||||
assert_eq!(sharder.shard(v), 0);
|
||||
}
|
||||
|
||||
let shard_1 = vec![2, 3, 11, 17, 21, 23, 30, 49, 51, 54];
|
||||
|
||||
for v in shard_1 {
|
||||
assert_eq!(sharder.pg_bigint_hash(v), 1);
|
||||
assert_eq!(sharder.shard(v), 1);
|
||||
}
|
||||
|
||||
let shard_2 = vec![6, 7, 15, 16, 18, 20, 25, 28, 34, 35];
|
||||
|
||||
for v in shard_2 {
|
||||
assert_eq!(sharder.pg_bigint_hash(v), 2);
|
||||
assert_eq!(sharder.shard(v), 2);
|
||||
}
|
||||
|
||||
let shard_3 = vec![8, 12, 13, 22, 29, 31, 33, 36, 41, 43];
|
||||
|
||||
for v in shard_3 {
|
||||
assert_eq!(sharder.pg_bigint_hash(v), 3);
|
||||
assert_eq!(sharder.shard(v), 3);
|
||||
}
|
||||
|
||||
let shard_4 = vec![9, 10, 24, 26, 27, 32, 37, 38, 42, 45];
|
||||
|
||||
for v in shard_4 {
|
||||
assert_eq!(sharder.pg_bigint_hash(v), 4);
|
||||
assert_eq!(sharder.shard(v), 4);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sha1_hash() {
|
||||
let sharder = Sharder::new(12, ShardingFunction::Sha1);
|
||||
let ids = vec![
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
||||
];
|
||||
let shards = vec![
|
||||
4, 7, 8, 3, 6, 0, 0, 10, 3, 11, 1, 7, 4, 4, 11, 2, 5, 0, 8, 3,
|
||||
];
|
||||
|
||||
for (i, id) in ids.iter().enumerate() {
|
||||
assert_eq!(sharder.shard(*id), shards[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
492
src/stats.rs
492
src/stats.rs
@@ -1,13 +1,27 @@
|
||||
use log::info;
|
||||
use statsd::Client;
|
||||
/// Events collector and publisher.
|
||||
use tokio::sync::mpsc::{Receiver, Sender};
|
||||
|
||||
use arc_swap::ArcSwap;
|
||||
/// Statistics and reporting.
|
||||
use log::{error, info, trace};
|
||||
use once_cell::sync::Lazy;
|
||||
use parking_lot::Mutex;
|
||||
use std::collections::HashMap;
|
||||
use std::time::Instant;
|
||||
use tokio::sync::mpsc::error::TrySendError;
|
||||
use tokio::sync::mpsc::{channel, Receiver, Sender};
|
||||
|
||||
use crate::config::get_config;
|
||||
use crate::pool::get_number_of_addresses;
|
||||
|
||||
pub static REPORTER: Lazy<ArcSwap<Reporter>> =
|
||||
Lazy::new(|| ArcSwap::from_pointee(Reporter::default()));
|
||||
|
||||
/// Latest stats updated every second; used in SHOW STATS and other admin commands.
|
||||
static LATEST_STATS: Lazy<Mutex<HashMap<usize, HashMap<String, i64>>>> =
|
||||
Lazy::new(|| Mutex::new(HashMap::new()));
|
||||
|
||||
/// Statistics period used for average calculations.
|
||||
/// 15 seconds.
|
||||
static STAT_PERIOD: u64 = 15000;
|
||||
|
||||
/// The names for the events reported
|
||||
/// to the statistics collector.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
enum EventName {
|
||||
CheckoutTime,
|
||||
@@ -24,188 +38,293 @@ enum EventName {
|
||||
ServerTested,
|
||||
ServerLogin,
|
||||
ServerDisconnecting,
|
||||
UpdateStats,
|
||||
UpdateAverages,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
/// Event data sent to the collector
|
||||
/// from clients and servers.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Event {
|
||||
/// The name of the event being reported.
|
||||
name: EventName,
|
||||
|
||||
/// The value being reported. Meaning differs based on event name.
|
||||
value: i64,
|
||||
process_id: Option<i32>,
|
||||
|
||||
/// The client or server connection reporting the event.
|
||||
process_id: i32,
|
||||
|
||||
/// The server the client is connected to.
|
||||
address_id: usize,
|
||||
}
|
||||
|
||||
/// The statistics reporter. An instance is given
|
||||
/// to each possible source of statistics,
|
||||
/// e.g. clients, servers, connection pool.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Reporter {
|
||||
tx: Sender<Event>,
|
||||
}
|
||||
|
||||
impl Default for Reporter {
|
||||
fn default() -> Reporter {
|
||||
let (tx, _rx) = channel(5);
|
||||
Reporter { tx }
|
||||
}
|
||||
}
|
||||
|
||||
impl Reporter {
|
||||
/// Create a new Reporter instance.
|
||||
pub fn new(tx: Sender<Event>) -> Reporter {
|
||||
Reporter { tx: tx }
|
||||
}
|
||||
|
||||
pub fn query(&self) {
|
||||
let statistic = Event {
|
||||
/// Send statistics to the task keeping track of stats.
|
||||
fn send(&self, event: Event) {
|
||||
let name = event.name;
|
||||
let result = self.tx.try_send(event);
|
||||
|
||||
match result {
|
||||
Ok(_) => trace!(
|
||||
"{:?} event reported successfully, capacity: {}",
|
||||
name,
|
||||
self.tx.capacity()
|
||||
),
|
||||
|
||||
Err(err) => match err {
|
||||
TrySendError::Full { .. } => error!("{:?} event dropped, buffer full", name),
|
||||
TrySendError::Closed { .. } => error!("{:?} event dropped, channel closed", name),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/// Report a query executed by a client against
|
||||
/// a server identified by the `address_id`.
|
||||
pub fn query(&self, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::Query,
|
||||
value: 1,
|
||||
process_id: None,
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
let _ = self.tx.try_send(statistic);
|
||||
self.send(event);
|
||||
}
|
||||
|
||||
pub fn transaction(&self) {
|
||||
let statistic = Event {
|
||||
/// Report a transaction executed by a client against
|
||||
/// a server identified by the `address_id`.
|
||||
pub fn transaction(&self, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::Transaction,
|
||||
value: 1,
|
||||
process_id: None,
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
let _ = self.tx.try_send(statistic);
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
pub fn data_sent(&self, amount: usize) {
|
||||
let statistic = Event {
|
||||
/// Report data sent to a server identified by `address_id`.
|
||||
/// The `amount` is measured in bytes.
|
||||
pub fn data_sent(&self, amount: usize, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::DataSent,
|
||||
value: amount as i64,
|
||||
process_id: None,
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
let _ = self.tx.try_send(statistic);
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
pub fn data_received(&self, amount: usize) {
|
||||
let statistic = Event {
|
||||
/// Report data received from a server identified by `address_id`.
|
||||
/// The `amount` is measured in bytes.
|
||||
pub fn data_received(&self, amount: usize, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::DataReceived,
|
||||
value: amount as i64,
|
||||
process_id: None,
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
let _ = self.tx.try_send(statistic);
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
pub fn checkout_time(&self, ms: u128) {
|
||||
let statistic = Event {
|
||||
/// Time spent waiting to get a healthy connection from the pool
|
||||
/// for a server identified by `address_id`.
|
||||
/// Measured in milliseconds.
|
||||
pub fn checkout_time(&self, ms: u128, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::CheckoutTime,
|
||||
value: ms as i64,
|
||||
process_id: None,
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
let _ = self.tx.try_send(statistic);
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
pub fn client_waiting(&self, process_id: i32) {
|
||||
let statistic = Event {
|
||||
/// Reports a client identified by `process_id` waiting for a connection
|
||||
/// to a server identified by `address_id`.
|
||||
pub fn client_waiting(&self, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::ClientWaiting,
|
||||
value: 1,
|
||||
process_id: Some(process_id),
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
let _ = self.tx.try_send(statistic);
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
pub fn client_active(&self, process_id: i32) {
|
||||
let statistic = Event {
|
||||
/// Reports a client identified by `process_id` is done waiting for a connection
|
||||
/// to a server identified by `address_id` and is about to query the server.
|
||||
pub fn client_active(&self, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::ClientActive,
|
||||
value: 1,
|
||||
process_id: Some(process_id),
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
let _ = self.tx.try_send(statistic);
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
pub fn client_idle(&self, process_id: i32) {
|
||||
let statistic = Event {
|
||||
/// Reports a client identified by `process_id` is done querying the server
|
||||
/// identified by `address_id` and is no longer active.
|
||||
pub fn client_idle(&self, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::ClientIdle,
|
||||
value: 1,
|
||||
process_id: Some(process_id),
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
let _ = self.tx.try_send(statistic);
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
pub fn client_disconnecting(&self, process_id: i32) {
|
||||
let statistic = Event {
|
||||
/// Reports a client identified by `process_id` is disconecting from the pooler.
|
||||
/// The last server it was connected to is identified by `address_id`.
|
||||
pub fn client_disconnecting(&self, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::ClientDisconnecting,
|
||||
value: 1,
|
||||
process_id: Some(process_id),
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
let _ = self.tx.try_send(statistic);
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
pub fn server_active(&self, process_id: i32) {
|
||||
let statistic = Event {
|
||||
/// Reports a server connection identified by `process_id` for
|
||||
/// a configured server identified by `address_id` is actively used
|
||||
/// by a client.
|
||||
pub fn server_active(&self, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::ServerActive,
|
||||
value: 1,
|
||||
process_id: Some(process_id),
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
let _ = self.tx.try_send(statistic);
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
pub fn server_idle(&self, process_id: i32) {
|
||||
let statistic = Event {
|
||||
/// Reports a server connection identified by `process_id` for
|
||||
/// a configured server identified by `address_id` is no longer
|
||||
/// actively used by a client and is now idle.
|
||||
pub fn server_idle(&self, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::ServerIdle,
|
||||
value: 1,
|
||||
process_id: Some(process_id),
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
let _ = self.tx.try_send(statistic);
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
pub fn server_login(&self, process_id: i32) {
|
||||
let statistic = Event {
|
||||
/// Reports a server connection identified by `process_id` for
|
||||
/// a configured server identified by `address_id` is attempting
|
||||
/// to login.
|
||||
pub fn server_login(&self, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::ServerLogin,
|
||||
value: 1,
|
||||
process_id: Some(process_id),
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
let _ = self.tx.try_send(statistic);
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
pub fn server_tested(&self, process_id: i32) {
|
||||
let statistic = Event {
|
||||
/// Reports a server connection identified by `process_id` for
|
||||
/// a configured server identified by `address_id` is being
|
||||
/// tested before being given to a client.
|
||||
pub fn server_tested(&self, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::ServerTested,
|
||||
value: 1,
|
||||
process_id: Some(process_id),
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
let _ = self.tx.try_send(statistic);
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
pub fn server_disconnecting(&self, process_id: i32) {
|
||||
let statistic = Event {
|
||||
/// Reports a server connection identified by `process_id` is disconecting from the pooler.
|
||||
/// The configured server it was connected to is identified by `address_id`.
|
||||
pub fn server_disconnecting(&self, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::ServerDisconnecting,
|
||||
value: 1,
|
||||
process_id: Some(process_id),
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
let _ = self.tx.try_send(statistic);
|
||||
self.send(event)
|
||||
}
|
||||
}
|
||||
|
||||
/// The statistics collector which is receiving statistics
|
||||
/// from clients, servers, and the connection pool. There is
|
||||
/// only one collector (kind of like a singleton).
|
||||
/// The collector can trigger events on its own, e.g.
|
||||
/// it updates aggregates every second and averages every
|
||||
/// 15 seconds.
|
||||
pub struct Collector {
|
||||
rx: Receiver<Event>,
|
||||
client: Client,
|
||||
tx: Sender<Event>,
|
||||
}
|
||||
|
||||
impl Collector {
|
||||
pub fn new(rx: Receiver<Event>) -> Collector {
|
||||
Collector {
|
||||
rx: rx,
|
||||
client: Client::new(&get_config().general.statsd_address, "pgcat").unwrap(),
|
||||
}
|
||||
/// Create a new collector instance. There should only be one instance
|
||||
/// at a time. This is ensured by mpsc which allows only one receiver.
|
||||
pub fn new(rx: Receiver<Event>, tx: Sender<Event>) -> Collector {
|
||||
Collector { rx, tx }
|
||||
}
|
||||
|
||||
/// The statistics collection handler. It will collect statistics
|
||||
/// for `address_id`s starting at 0 up to `addresses`.
|
||||
pub async fn collect(&mut self) {
|
||||
info!("Events reporter started");
|
||||
|
||||
let mut stats = HashMap::from([
|
||||
let stats_template = HashMap::from([
|
||||
("total_query_count", 0),
|
||||
("total_xact_count", 0),
|
||||
("total_sent", 0),
|
||||
("total_query_time", 0),
|
||||
("total_received", 0),
|
||||
("total_sent", 0),
|
||||
("total_xact_count", 0),
|
||||
("total_xact_time", 0),
|
||||
("total_wait_time", 0),
|
||||
("avg_query_count", 0),
|
||||
("avg_query_time", 0),
|
||||
("avg_recv", 0),
|
||||
("avg_sent", 0),
|
||||
("avg_xact_count", 0),
|
||||
("avg_xact_time", 0),
|
||||
("avg_wait_time", 0),
|
||||
("maxwait_us", 0),
|
||||
("maxwait", 0),
|
||||
("cl_waiting", 0),
|
||||
@@ -217,10 +336,53 @@ impl Collector {
|
||||
("sv_tested", 0),
|
||||
]);
|
||||
|
||||
let mut client_server_states: HashMap<i32, EventName> = HashMap::new();
|
||||
let mut stats = HashMap::new();
|
||||
|
||||
let mut now = Instant::now();
|
||||
// Stats saved after each iteration of the flush event. Used in calculation
|
||||
// of averages in the last flush period.
|
||||
let mut old_stats: HashMap<usize, HashMap<String, i64>> = HashMap::new();
|
||||
|
||||
// Track which state the client and server are at any given time.
|
||||
let mut client_server_states: HashMap<usize, HashMap<i32, EventName>> = HashMap::new();
|
||||
|
||||
// Flush stats to StatsD and calculate averages every 15 seconds.
|
||||
let tx = self.tx.clone();
|
||||
tokio::task::spawn(async move {
|
||||
let mut interval =
|
||||
tokio::time::interval(tokio::time::Duration::from_millis(STAT_PERIOD / 15));
|
||||
loop {
|
||||
interval.tick().await;
|
||||
let address_count = get_number_of_addresses();
|
||||
for address_id in 0..address_count {
|
||||
let _ = tx.try_send(Event {
|
||||
name: EventName::UpdateStats,
|
||||
value: 0,
|
||||
process_id: -1,
|
||||
address_id: address_id,
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let tx = self.tx.clone();
|
||||
tokio::task::spawn(async move {
|
||||
let mut interval =
|
||||
tokio::time::interval(tokio::time::Duration::from_millis(STAT_PERIOD));
|
||||
loop {
|
||||
interval.tick().await;
|
||||
let address_count = get_number_of_addresses();
|
||||
for address_id in 0..address_count {
|
||||
let _ = tx.try_send(Event {
|
||||
name: EventName::UpdateAverages,
|
||||
value: 0,
|
||||
process_id: -1,
|
||||
address_id: address_id,
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// The collector loop
|
||||
loop {
|
||||
let stat = match self.rx.recv().await {
|
||||
Some(stat) => stat,
|
||||
@@ -230,6 +392,14 @@ impl Collector {
|
||||
}
|
||||
};
|
||||
|
||||
let stats = stats
|
||||
.entry(stat.address_id)
|
||||
.or_insert(stats_template.clone());
|
||||
let client_server_states = client_server_states
|
||||
.entry(stat.address_id)
|
||||
.or_insert(HashMap::new());
|
||||
let old_stats = old_stats.entry(stat.address_id).or_insert(HashMap::new());
|
||||
|
||||
// Some are counters, some are gauges...
|
||||
match stat.name {
|
||||
EventName::Query => {
|
||||
@@ -257,10 +427,11 @@ impl Collector {
|
||||
*counter += stat.value;
|
||||
|
||||
let counter = stats.entry("maxwait_us").or_insert(0);
|
||||
let mic_part = stat.value % 1_000_000;
|
||||
|
||||
// Report max time here
|
||||
if stat.value > *counter {
|
||||
*counter = stat.value;
|
||||
if mic_part > *counter {
|
||||
*counter = mic_part;
|
||||
}
|
||||
|
||||
let counter = stats.entry("maxwait").or_insert(0);
|
||||
@@ -278,71 +449,116 @@ impl Collector {
|
||||
| EventName::ServerIdle
|
||||
| EventName::ServerTested
|
||||
| EventName::ServerLogin => {
|
||||
client_server_states.insert(stat.process_id.unwrap(), stat.name);
|
||||
client_server_states.insert(stat.process_id, stat.name);
|
||||
}
|
||||
|
||||
EventName::ClientDisconnecting | EventName::ServerDisconnecting => {
|
||||
client_server_states.remove(&stat.process_id.unwrap());
|
||||
client_server_states.remove(&stat.process_id);
|
||||
}
|
||||
|
||||
EventName::UpdateStats => {
|
||||
// Calculate connection states
|
||||
for (_, state) in client_server_states.iter() {
|
||||
match state {
|
||||
EventName::ClientActive => {
|
||||
let counter = stats.entry("cl_active").or_insert(0);
|
||||
*counter += 1;
|
||||
}
|
||||
|
||||
EventName::ClientWaiting => {
|
||||
let counter = stats.entry("cl_waiting").or_insert(0);
|
||||
*counter += 1;
|
||||
}
|
||||
|
||||
EventName::ServerIdle => {
|
||||
let counter = stats.entry("sv_idle").or_insert(0);
|
||||
*counter += 1;
|
||||
}
|
||||
|
||||
EventName::ServerActive => {
|
||||
let counter = stats.entry("sv_active").or_insert(0);
|
||||
*counter += 1;
|
||||
}
|
||||
|
||||
EventName::ServerTested => {
|
||||
let counter = stats.entry("sv_tested").or_insert(0);
|
||||
*counter += 1;
|
||||
}
|
||||
|
||||
EventName::ServerLogin => {
|
||||
let counter = stats.entry("sv_login").or_insert(0);
|
||||
*counter += 1;
|
||||
}
|
||||
|
||||
EventName::ClientIdle => {
|
||||
let counter = stats.entry("cl_idle").or_insert(0);
|
||||
*counter += 1;
|
||||
}
|
||||
|
||||
_ => unreachable!(),
|
||||
};
|
||||
}
|
||||
|
||||
// Update latest stats used in SHOW STATS
|
||||
let mut guard = LATEST_STATS.lock();
|
||||
for (key, value) in stats.iter() {
|
||||
let entry = guard.entry(stat.address_id).or_insert(HashMap::new());
|
||||
entry.insert(key.to_string(), value.clone());
|
||||
}
|
||||
|
||||
// These are re-calculated every iteration of the loop, so we don't want to add values
|
||||
// from the last iteration.
|
||||
for stat in &[
|
||||
"cl_active",
|
||||
"cl_waiting",
|
||||
"cl_idle",
|
||||
"sv_idle",
|
||||
"sv_active",
|
||||
"sv_tested",
|
||||
"sv_login",
|
||||
"maxwait",
|
||||
"maxwait_us",
|
||||
] {
|
||||
stats.insert(stat, 0);
|
||||
}
|
||||
}
|
||||
|
||||
EventName::UpdateAverages => {
|
||||
// Calculate averages
|
||||
for stat in &[
|
||||
"avg_query_count",
|
||||
"avg_query_time",
|
||||
"avg_recv",
|
||||
"avg_sent",
|
||||
"avg_xact_time",
|
||||
"avg_xact_count",
|
||||
"avg_wait_time",
|
||||
] {
|
||||
let total_name = match stat {
|
||||
&"avg_recv" => "total_received".to_string(), // Because PgBouncer is saving bytes
|
||||
_ => stat.replace("avg_", "total_"),
|
||||
};
|
||||
|
||||
let old_value = old_stats.entry(total_name.clone()).or_insert(0);
|
||||
let new_value = stats.get(total_name.as_str()).unwrap_or(&0).to_owned();
|
||||
let avg = (new_value - *old_value) / (STAT_PERIOD as i64 / 1_000); // Avg / second
|
||||
|
||||
stats.insert(stat, avg);
|
||||
*old_value = new_value;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// It's been 15 seconds. If there is no traffic, it won't publish anything,
|
||||
// but it also doesn't matter then.
|
||||
if now.elapsed().as_secs() > 15 {
|
||||
for (_, state) in &client_server_states {
|
||||
match state {
|
||||
EventName::ClientActive => {
|
||||
let counter = stats.entry("cl_active").or_insert(0);
|
||||
*counter += 1;
|
||||
}
|
||||
|
||||
EventName::ClientWaiting => {
|
||||
let counter = stats.entry("cl_waiting").or_insert(0);
|
||||
*counter += 1;
|
||||
}
|
||||
|
||||
EventName::ClientIdle => {
|
||||
let counter = stats.entry("cl_idle").or_insert(0);
|
||||
*counter += 1;
|
||||
}
|
||||
|
||||
EventName::ServerIdle => {
|
||||
let counter = stats.entry("sv_idle").or_insert(0);
|
||||
*counter += 1;
|
||||
}
|
||||
|
||||
EventName::ServerActive => {
|
||||
let counter = stats.entry("sv_active").or_insert(0);
|
||||
*counter += 1;
|
||||
}
|
||||
|
||||
EventName::ServerTested => {
|
||||
let counter = stats.entry("sv_tested").or_insert(0);
|
||||
*counter += 1;
|
||||
}
|
||||
|
||||
EventName::ServerLogin => {
|
||||
let counter = stats.entry("sv_login").or_insert(0);
|
||||
*counter += 1;
|
||||
}
|
||||
|
||||
_ => unreachable!(),
|
||||
};
|
||||
}
|
||||
|
||||
info!("{:?}", stats);
|
||||
|
||||
let mut pipeline = self.client.pipeline();
|
||||
|
||||
for (key, value) in stats.iter_mut() {
|
||||
pipeline.gauge(key, *value as f64);
|
||||
*value = 0;
|
||||
}
|
||||
|
||||
pipeline.send(&self.client);
|
||||
|
||||
now = Instant::now();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a snapshot of statistics. Updated once a second
|
||||
/// by the `Collector`.
|
||||
pub fn get_stats() -> HashMap<usize, HashMap<String, i64>> {
|
||||
LATEST_STATS.lock().clone()
|
||||
}
|
||||
|
||||
/// Get the statistics reporter used to update stats across the pools/clients.
|
||||
pub fn get_reporter() -> Reporter {
|
||||
(*(*REPORTER.load())).clone()
|
||||
}
|
||||
|
||||
57
src/tls.rs
Normal file
57
src/tls.rs
Normal file
@@ -0,0 +1,57 @@
|
||||
// Stream wrapper.
|
||||
|
||||
use rustls_pemfile::{certs, rsa_private_keys};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use tokio_rustls::rustls::{self, Certificate, PrivateKey};
|
||||
use tokio_rustls::TlsAcceptor;
|
||||
|
||||
use crate::config::get_config;
|
||||
use crate::errors::Error;
|
||||
|
||||
// TLS
|
||||
pub fn load_certs(path: &Path) -> std::io::Result<Vec<Certificate>> {
|
||||
certs(&mut std::io::BufReader::new(std::fs::File::open(path)?))
|
||||
.map_err(|_| std::io::Error::new(std::io::ErrorKind::InvalidInput, "invalid cert"))
|
||||
.map(|mut certs| certs.drain(..).map(Certificate).collect())
|
||||
}
|
||||
|
||||
pub fn load_keys(path: &Path) -> std::io::Result<Vec<PrivateKey>> {
|
||||
rsa_private_keys(&mut std::io::BufReader::new(std::fs::File::open(path)?))
|
||||
.map_err(|_| std::io::Error::new(std::io::ErrorKind::InvalidInput, "invalid key"))
|
||||
.map(|mut keys| keys.drain(..).map(PrivateKey).collect())
|
||||
}
|
||||
|
||||
pub struct Tls {
|
||||
pub acceptor: TlsAcceptor,
|
||||
}
|
||||
|
||||
impl Tls {
|
||||
pub fn new() -> Result<Self, Error> {
|
||||
let config = get_config();
|
||||
|
||||
let certs = match load_certs(&Path::new(&config.general.tls_certificate.unwrap())) {
|
||||
Ok(certs) => certs,
|
||||
Err(_) => return Err(Error::TlsError),
|
||||
};
|
||||
|
||||
let mut keys = match load_keys(&Path::new(&config.general.tls_private_key.unwrap())) {
|
||||
Ok(keys) => keys,
|
||||
Err(_) => return Err(Error::TlsError),
|
||||
};
|
||||
|
||||
let config = match rustls::ServerConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(certs, keys.remove(0))
|
||||
.map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidInput, err))
|
||||
{
|
||||
Ok(c) => c,
|
||||
Err(_) => return Err(Error::TlsError),
|
||||
};
|
||||
|
||||
Ok(Tls {
|
||||
acceptor: TlsAcceptor::from(Arc::new(config)),
|
||||
})
|
||||
}
|
||||
}
|
||||
39
tests/pgbench/simple.sql
Normal file
39
tests/pgbench/simple.sql
Normal file
@@ -0,0 +1,39 @@
|
||||
|
||||
-- \setrandom aid 1 :naccounts
|
||||
\set aid random(1, 100000)
|
||||
-- \setrandom bid 1 :nbranches
|
||||
\set bid random(1, 100000)
|
||||
-- \setrandom tid 1 :ntellers
|
||||
\set tid random(1, 100000)
|
||||
-- \setrandom delta -5000 5000
|
||||
\set delta random(-5000,5000)
|
||||
|
||||
\set shard random(0, 2)
|
||||
|
||||
SET SHARD TO :shard;
|
||||
|
||||
SET SERVER ROLE TO 'auto';
|
||||
|
||||
BEGIN;
|
||||
|
||||
UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid;
|
||||
|
||||
SELECT abalance FROM pgbench_accounts WHERE aid = :aid;
|
||||
|
||||
UPDATE pgbench_tellers SET tbalance = tbalance + :delta WHERE tid = :tid;
|
||||
|
||||
UPDATE pgbench_branches SET bbalance = bbalance + :delta WHERE bid = :bid;
|
||||
|
||||
INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);
|
||||
|
||||
END;
|
||||
|
||||
SET SHARDING KEY TO :aid;
|
||||
|
||||
-- Read load balancing
|
||||
SELECT abalance FROM pgbench_accounts WHERE aid = :aid;
|
||||
|
||||
SET SERVER ROLE TO 'replica';
|
||||
|
||||
-- Read load balancing
|
||||
SELECT abalance FROM pgbench_accounts WHERE aid = :aid;
|
||||
@@ -1 +1,2 @@
|
||||
psycopg2==2.9.3
|
||||
psutil==5.9.1
|
||||
@@ -1,11 +1,167 @@
|
||||
from typing import Tuple
|
||||
import psycopg2
|
||||
import psutil
|
||||
import os
|
||||
import signal
|
||||
import time
|
||||
|
||||
conn = psycopg2.connect("postgres://random:password@127.0.0.1:6432/db")
|
||||
cur = conn.cursor()
|
||||
SHUTDOWN_TIMEOUT = 5
|
||||
|
||||
cur.execute("SELECT 1");
|
||||
res = cur.fetchall()
|
||||
PGCAT_HOST = "127.0.0.1"
|
||||
PGCAT_PORT = "6432"
|
||||
|
||||
print(res)
|
||||
|
||||
# conn.commit()
|
||||
def pgcat_start():
|
||||
pg_cat_send_signal(signal.SIGTERM)
|
||||
os.system("./target/debug/pgcat .circleci/pgcat.toml &")
|
||||
|
||||
|
||||
def pg_cat_send_signal(signal: signal.Signals):
|
||||
for proc in psutil.process_iter(["pid", "name"]):
|
||||
if "pgcat" == proc.name():
|
||||
os.kill(proc.pid, signal)
|
||||
if signal == signal.SIGTERM:
|
||||
# Returns 0 if pgcat process exists
|
||||
time.sleep(2)
|
||||
if not os.system('pgrep pgcat'):
|
||||
raise Exception("pgcat not closed after SIGTERM")
|
||||
|
||||
|
||||
def connect_normal_db(
|
||||
autocommit: bool = False,
|
||||
) -> Tuple[psycopg2.extensions.connection, psycopg2.extensions.cursor]:
|
||||
conn = psycopg2.connect(
|
||||
f"postgres://sharding_user:sharding_user@{PGCAT_HOST}:{PGCAT_PORT}/sharded_db?application_name=testing_pgcat"
|
||||
)
|
||||
conn.autocommit = autocommit
|
||||
cur = conn.cursor()
|
||||
|
||||
return (conn, cur)
|
||||
|
||||
|
||||
def cleanup_conn(conn: psycopg2.extensions.connection, cur: psycopg2.extensions.cursor):
|
||||
cur.close()
|
||||
conn.close()
|
||||
|
||||
|
||||
def test_normal_db_access():
|
||||
conn, cur = connect_normal_db()
|
||||
cur.execute("SELECT 1")
|
||||
res = cur.fetchall()
|
||||
print(res)
|
||||
cleanup_conn(conn, cur)
|
||||
|
||||
|
||||
def test_admin_db_access():
|
||||
conn = psycopg2.connect(
|
||||
f"postgres://admin_user:admin_pass@{PGCAT_HOST}:{PGCAT_PORT}/pgcat"
|
||||
)
|
||||
conn.autocommit = True # BEGIN/COMMIT is not supported by admin db
|
||||
cur = conn.cursor()
|
||||
|
||||
cur.execute("SHOW POOLS")
|
||||
res = cur.fetchall()
|
||||
print(res)
|
||||
cleanup_conn(conn, cur)
|
||||
|
||||
|
||||
def test_shutdown_logic():
|
||||
|
||||
##### NO ACTIVE QUERIES SIGINT HANDLING #####
|
||||
# Start pgcat
|
||||
pgcat_start()
|
||||
|
||||
# Wait for server to fully start up
|
||||
time.sleep(2)
|
||||
|
||||
# Create client connection and send query (not in transaction)
|
||||
conn, cur = connect_normal_db(True)
|
||||
|
||||
cur.execute("BEGIN;")
|
||||
cur.execute("SELECT 1;")
|
||||
cur.execute("COMMIT;")
|
||||
|
||||
# Send sigint to pgcat
|
||||
pg_cat_send_signal(signal.SIGINT)
|
||||
time.sleep(1)
|
||||
|
||||
# Check that any new queries fail after sigint since server should close with no active transactions
|
||||
try:
|
||||
cur.execute("SELECT 1;")
|
||||
except psycopg2.OperationalError as e:
|
||||
pass
|
||||
else:
|
||||
# Fail if query execution succeeded
|
||||
raise Exception("Server not closed after sigint")
|
||||
|
||||
cleanup_conn(conn, cur)
|
||||
pg_cat_send_signal(signal.SIGTERM)
|
||||
|
||||
##### END #####
|
||||
|
||||
##### HANDLE TRANSACTION WITH SIGINT #####
|
||||
# Start pgcat
|
||||
pgcat_start()
|
||||
|
||||
# Wait for server to fully start up
|
||||
time.sleep(2)
|
||||
|
||||
# Create client connection and begin transaction
|
||||
conn, cur = connect_normal_db(True)
|
||||
|
||||
cur.execute("BEGIN;")
|
||||
cur.execute("SELECT 1;")
|
||||
|
||||
# Send sigint to pgcat while still in transaction
|
||||
pg_cat_send_signal(signal.SIGINT)
|
||||
time.sleep(1)
|
||||
|
||||
# Check that any new queries succeed after sigint since server should still allow transaction to complete
|
||||
try:
|
||||
cur.execute("SELECT 1;")
|
||||
except psycopg2.OperationalError as e:
|
||||
# Fail if query fails since server closed
|
||||
raise Exception("Server closed while in transaction", e.pgerror)
|
||||
|
||||
cleanup_conn(conn, cur)
|
||||
pg_cat_send_signal(signal.SIGTERM)
|
||||
|
||||
##### END #####
|
||||
|
||||
##### HANDLE SHUTDOWN TIMEOUT WITH SIGINT #####
|
||||
# Start pgcat
|
||||
pgcat_start()
|
||||
|
||||
# Wait for server to fully start up
|
||||
time.sleep(3)
|
||||
|
||||
# Create client connection and begin transaction, which should prevent server shutdown unless shutdown timeout is reached
|
||||
conn, cur = connect_normal_db(True)
|
||||
|
||||
cur.execute("BEGIN;")
|
||||
cur.execute("SELECT 1;")
|
||||
|
||||
# Send sigint to pgcat while still in transaction
|
||||
pg_cat_send_signal(signal.SIGINT)
|
||||
|
||||
# pgcat shutdown timeout is set to SHUTDOWN_TIMEOUT seconds, so we sleep for SHUTDOWN_TIMEOUT + 1 seconds
|
||||
time.sleep(SHUTDOWN_TIMEOUT + 1)
|
||||
|
||||
# Check that any new queries succeed after sigint since server should still allow transaction to complete
|
||||
try:
|
||||
cur.execute("SELECT 1;")
|
||||
except psycopg2.OperationalError as e:
|
||||
pass
|
||||
else:
|
||||
# Fail if query execution succeeded
|
||||
raise Exception("Server not closed after sigint and expected timeout")
|
||||
|
||||
cleanup_conn(conn, cur)
|
||||
pg_cat_send_signal(signal.SIGTERM)
|
||||
|
||||
##### END #####
|
||||
|
||||
|
||||
test_normal_db_access()
|
||||
test_admin_db_access()
|
||||
test_shutdown_logic()
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
2.7.1
|
||||
3.0.0
|
||||
|
||||
|
||||
@@ -2,3 +2,5 @@ source "https://rubygems.org"
|
||||
|
||||
gem "pg"
|
||||
gem "activerecord"
|
||||
gem "rubocop"
|
||||
gem "toml", "~> 0.3.0"
|
||||
|
||||
@@ -1,30 +1,56 @@
|
||||
GEM
|
||||
remote: https://rubygems.org/
|
||||
specs:
|
||||
activemodel (7.0.2.2)
|
||||
activesupport (= 7.0.2.2)
|
||||
activerecord (7.0.2.2)
|
||||
activemodel (= 7.0.2.2)
|
||||
activesupport (= 7.0.2.2)
|
||||
activesupport (7.0.2.2)
|
||||
activemodel (7.0.3.1)
|
||||
activesupport (= 7.0.3.1)
|
||||
activerecord (7.0.3.1)
|
||||
activemodel (= 7.0.3.1)
|
||||
activesupport (= 7.0.3.1)
|
||||
activesupport (7.0.3.1)
|
||||
concurrent-ruby (~> 1.0, >= 1.0.2)
|
||||
i18n (>= 1.6, < 2)
|
||||
minitest (>= 5.1)
|
||||
tzinfo (~> 2.0)
|
||||
concurrent-ruby (1.1.9)
|
||||
i18n (1.10.0)
|
||||
ast (2.4.2)
|
||||
concurrent-ruby (1.1.10)
|
||||
i18n (1.11.0)
|
||||
concurrent-ruby (~> 1.0)
|
||||
minitest (5.15.0)
|
||||
minitest (5.16.2)
|
||||
parallel (1.22.1)
|
||||
parser (3.1.2.0)
|
||||
ast (~> 2.4.1)
|
||||
parslet (2.0.0)
|
||||
pg (1.3.2)
|
||||
rainbow (3.1.1)
|
||||
regexp_parser (2.3.1)
|
||||
rexml (3.2.5)
|
||||
rubocop (1.29.0)
|
||||
parallel (~> 1.10)
|
||||
parser (>= 3.1.0.0)
|
||||
rainbow (>= 2.2.2, < 4.0)
|
||||
regexp_parser (>= 1.8, < 3.0)
|
||||
rexml (>= 3.2.5, < 4.0)
|
||||
rubocop-ast (>= 1.17.0, < 2.0)
|
||||
ruby-progressbar (~> 1.7)
|
||||
unicode-display_width (>= 1.4.0, < 3.0)
|
||||
rubocop-ast (1.17.0)
|
||||
parser (>= 3.1.1.0)
|
||||
ruby-progressbar (1.11.0)
|
||||
toml (0.3.0)
|
||||
parslet (>= 1.8.0, < 3.0.0)
|
||||
tzinfo (2.0.4)
|
||||
concurrent-ruby (~> 1.0)
|
||||
unicode-display_width (2.1.0)
|
||||
|
||||
PLATFORMS
|
||||
arm64-darwin-21
|
||||
x86_64-linux
|
||||
|
||||
DEPENDENCIES
|
||||
activerecord
|
||||
pg
|
||||
rubocop
|
||||
toml (~> 0.3.0)
|
||||
|
||||
BUNDLED WITH
|
||||
2.3.7
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require 'active_record'
|
||||
require 'pg'
|
||||
require 'toml'
|
||||
|
||||
$stdout.sync = true
|
||||
|
||||
# Uncomment these two to see all queries.
|
||||
# ActiveRecord.verbose_query_logs = true
|
||||
@@ -12,7 +16,8 @@ ActiveRecord::Base.establish_connection(
|
||||
port: 6432,
|
||||
username: 'sharding_user',
|
||||
password: 'sharding_user',
|
||||
database: 'rails_dev',
|
||||
database: 'sharded_db',
|
||||
application_name: 'testing_pgcat',
|
||||
prepared_statements: false, # Transaction mode
|
||||
advisory_locks: false # Same
|
||||
)
|
||||
@@ -110,3 +115,89 @@ begin
|
||||
rescue ActiveRecord::StatementInvalid
|
||||
puts 'OK'
|
||||
end
|
||||
|
||||
# Test evil clients
|
||||
def poorly_behaved_client
|
||||
conn = PG::connect("postgres://sharding_user:sharding_user@127.0.0.1:6432/sharded_db?application_name=testing_pgcat")
|
||||
conn.async_exec 'BEGIN'
|
||||
conn.async_exec 'SELECT 1'
|
||||
|
||||
conn.close
|
||||
puts 'Bad client ok'
|
||||
end
|
||||
|
||||
25.times do
|
||||
poorly_behaved_client
|
||||
end
|
||||
|
||||
|
||||
def test_server_parameters
|
||||
server_conn = PG::connect("postgres://sharding_user:sharding_user@127.0.0.1:6432/sharded_db?application_name=testing_pgcat")
|
||||
raise StandardError, "Bad server version" if server_conn.server_version == 0
|
||||
server_conn.close
|
||||
|
||||
admin_conn = PG::connect("postgres://admin_user:admin_pass@127.0.0.1:6432/pgcat")
|
||||
raise StandardError, "Bad server version" if admin_conn.server_version == 0
|
||||
admin_conn.close
|
||||
|
||||
puts 'Server parameters ok'
|
||||
end
|
||||
|
||||
|
||||
class ConfigEditor
|
||||
def initialize
|
||||
@original_config_text = File.read('../../.circleci/pgcat.toml')
|
||||
text_to_load = @original_config_text.gsub("5432", "\"5432\"")
|
||||
|
||||
@original_configs = TOML.load(text_to_load)
|
||||
end
|
||||
|
||||
def original_configs
|
||||
TOML.load(TOML::Generator.new(@original_configs).body)
|
||||
end
|
||||
|
||||
def with_modified_configs(new_configs)
|
||||
text_to_write = TOML::Generator.new(new_configs).body
|
||||
text_to_write = text_to_write.gsub("\"5432\"", "5432")
|
||||
File.write('../../.circleci/pgcat.toml', text_to_write)
|
||||
yield
|
||||
ensure
|
||||
File.write('../../.circleci/pgcat.toml', @original_config_text)
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
|
||||
def test_reload_pool_recycling
|
||||
admin_conn = PG::connect("postgres://admin_user:admin_pass@127.0.0.1:6432/pgcat")
|
||||
server_conn = PG::connect("postgres://sharding_user:sharding_user@127.0.0.1:6432/sharded_db?application_name=testing_pgcat")
|
||||
|
||||
server_conn.async_exec("BEGIN")
|
||||
conf_editor = ConfigEditor.new
|
||||
new_configs = conf_editor.original_configs
|
||||
|
||||
# swap shards
|
||||
new_configs["pools"]["sharded_db"]["shards"]["0"]["database"] = "shard1"
|
||||
new_configs["pools"]["sharded_db"]["shards"]["1"]["database"] = "shard0"
|
||||
|
||||
raise StandardError if server_conn.async_exec("SELECT current_database();")[0]["current_database"] != 'shard0'
|
||||
conf_editor.with_modified_configs(new_configs) { admin_conn.async_exec("RELOAD") }
|
||||
raise StandardError if server_conn.async_exec("SELECT current_database();")[0]["current_database"] != 'shard0'
|
||||
server_conn.async_exec("COMMIT;")
|
||||
|
||||
# Transaction finished, client should get new configs
|
||||
raise StandardError if server_conn.async_exec("SELECT current_database();")[0]["current_database"] != 'shard1'
|
||||
server_conn.close()
|
||||
|
||||
# New connection should get new configs
|
||||
server_conn = PG::connect("postgres://sharding_user:sharding_user@127.0.0.1:6432/sharded_db?application_name=testing_pgcat")
|
||||
raise StandardError if server_conn.async_exec("SELECT current_database();")[0]["current_database"] != 'shard1'
|
||||
|
||||
ensure
|
||||
admin_conn.async_exec("RELOAD") # Go back to old state
|
||||
admin_conn.close
|
||||
server_conn.close
|
||||
puts "Pool Recycling okay!"
|
||||
end
|
||||
|
||||
test_reload_pool_recycling
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
|
||||
DROP DATABASE IF EXISTS shard0;
|
||||
DROP DATABASE IF EXISTS shard1;
|
||||
DROP DATABASE IF EXISTS shard2;
|
||||
DROP DATABASE IF EXISTS some_db;
|
||||
|
||||
CREATE DATABASE shard0;
|
||||
CREATE DATABASE shard1;
|
||||
CREATE DATABASE shard2;
|
||||
CREATE DATABASE some_db;
|
||||
|
||||
\c shard0
|
||||
|
||||
@@ -41,21 +42,51 @@ CREATE TABLE data (
|
||||
|
||||
CREATE TABLE data_shard_2 PARTITION OF data FOR VALUES WITH (MODULUS 3, REMAINDER 2);
|
||||
|
||||
DROP ROLE IF EXISTS sharding_user;
|
||||
CREATE ROLE sharding_user ENCRYPTED PASSWORD 'sharding_user' LOGIN;
|
||||
|
||||
GRANT CONNECT ON DATABASE shard0 TO sharding_user;
|
||||
GRANT CONNECT ON DATABASE shard1 TO sharding_user;
|
||||
GRANT CONNECT ON DATABASE shard2 TO sharding_user;
|
||||
\c some_db
|
||||
|
||||
DROP TABLE IF EXISTS data CASCADE;
|
||||
|
||||
CREATE TABLE data (
|
||||
id BIGINT,
|
||||
value VARCHAR
|
||||
);
|
||||
|
||||
DROP ROLE IF EXISTS sharding_user;
|
||||
DROP ROLE IF EXISTS other_user;
|
||||
DROP ROLE IF EXISTS simple_user;
|
||||
CREATE ROLE sharding_user ENCRYPTED PASSWORD 'sharding_user' LOGIN;
|
||||
CREATE ROLE other_user ENCRYPTED PASSWORD 'other_user' LOGIN;
|
||||
CREATE ROLE simple_user ENCRYPTED PASSWORD 'simple_user' LOGIN;
|
||||
|
||||
GRANT CONNECT ON DATABASE shard0 TO sharding_user;
|
||||
GRANT CONNECT ON DATABASE shard1 TO sharding_user;
|
||||
GRANT CONNECT ON DATABASE shard2 TO sharding_user;
|
||||
|
||||
GRANT CONNECT ON DATABASE shard0 TO other_user;
|
||||
GRANT CONNECT ON DATABASE shard1 TO other_user;
|
||||
GRANT CONNECT ON DATABASE shard2 TO other_user;
|
||||
|
||||
GRANT CONNECT ON DATABASE some_db TO simple_user;
|
||||
|
||||
\c shard0
|
||||
GRANT ALL ON SCHEMA public TO sharding_user;
|
||||
GRANT ALL ON TABLE data TO sharding_user;
|
||||
GRANT ALL ON SCHEMA public TO other_user;
|
||||
GRANT ALL ON TABLE data TO other_user;
|
||||
|
||||
\c shard1
|
||||
GRANT ALL ON SCHEMA public TO sharding_user;
|
||||
GRANT ALL ON TABLE data TO sharding_user;
|
||||
GRANT ALL ON SCHEMA public TO other_user;
|
||||
GRANT ALL ON TABLE data TO other_user;
|
||||
|
||||
\c shard2
|
||||
GRANT ALL ON SCHEMA public TO sharding_user;
|
||||
GRANT ALL ON TABLE data TO sharding_user;
|
||||
GRANT ALL ON SCHEMA public TO other_user;
|
||||
GRANT ALL ON TABLE data TO other_user;
|
||||
|
||||
\c some_db
|
||||
GRANT ALL ON SCHEMA public TO simple_user;
|
||||
GRANT ALL ON TABLE data TO simple_user;
|
||||
|
||||
@@ -151,3 +151,12 @@ SELECT 1;
|
||||
set server role to 'replica';
|
||||
SeT SeRver Role TO 'PrImARY';
|
||||
select 1;
|
||||
|
||||
SET PRIMARY READS TO 'on';
|
||||
SELECT 1;
|
||||
|
||||
SET PRIMARY READS TO 'off';
|
||||
SELECT 1;
|
||||
|
||||
SET PRIMARY READS TO 'default';
|
||||
SELECT 1;
|
||||
|
||||
Reference in New Issue
Block a user