mirror of
https://github.com/postgresml/pgcat.git
synced 2026-03-23 09:26:30 +00:00
Compare commits
156 Commits
sharded
...
levkk-sear
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5872354c3e | ||
|
|
48bb6ebeef | ||
|
|
3bc4f9351c | ||
|
|
9d84d6f131 | ||
|
|
c054ff068d | ||
|
|
5a0cea6a24 | ||
|
|
d0e8171b1b | ||
|
|
069d76029f | ||
|
|
902fafd8d7 | ||
|
|
5f5b5e2543 | ||
|
|
5948fef6cf | ||
|
|
790898c20e | ||
|
|
d64f6793c1 | ||
|
|
cea35db35c | ||
|
|
a3aefabb47 | ||
|
|
3285006440 | ||
|
|
52303cc808 | ||
|
|
be254cedd9 | ||
|
|
a5db6881b8 | ||
|
|
f963b12821 | ||
|
|
a262337ba5 | ||
|
|
014628d6e0 | ||
|
|
65c32ad9fb | ||
|
|
1b166b462d | ||
|
|
7592339092 | ||
|
|
3719c22322 | ||
|
|
106ebee71c | ||
|
|
b79f55abd6 | ||
|
|
b828e62408 | ||
|
|
499612dd76 | ||
|
|
5ac85eaadd | ||
|
|
20e8f9d74c | ||
|
|
1b648ca00e | ||
|
|
35381ba8fd | ||
|
|
e591865d78 | ||
|
|
48cff1f955 | ||
|
|
8a06fc4047 | ||
|
|
14d4dc45f5 | ||
|
|
2ae4b438e3 | ||
|
|
c5be5565a5 | ||
|
|
eff8e3e229 | ||
|
|
ae3db111ac | ||
|
|
8bcfbed574 | ||
|
|
773602dedf | ||
|
|
21bf07258c | ||
|
|
186f8be5b3 | ||
|
|
7667fefead | ||
|
|
c11d595ac7 | ||
|
|
8f3202ed92 | ||
|
|
eb58920870 | ||
|
|
b974aacd71 | ||
|
|
7dfe59a91a | ||
|
|
5bcd3bf9c3 | ||
|
|
f06f64119c | ||
|
|
b93303eb83 | ||
|
|
d865d9f9d8 | ||
|
|
d3310a62c2 | ||
|
|
d412238f47 | ||
|
|
7782933f59 | ||
|
|
bac4e1f52c | ||
|
|
37e3a86881 | ||
|
|
61db13f614 | ||
|
|
fe32b5ef17 | ||
|
|
54699222f8 | ||
|
|
ccbca66e7a | ||
|
|
df85139281 | ||
|
|
509e4815a3 | ||
|
|
5338ff2323 | ||
|
|
1ea0a7f332 | ||
|
|
d1b86d363d | ||
|
|
b309ead58f | ||
|
|
341ebf4123 | ||
|
|
35828a0a8c | ||
|
|
1e8fa110ae | ||
|
|
d4186b7815 | ||
|
|
aaeef69d59 | ||
|
|
b21e0f4a7e | ||
|
|
eb1473060e | ||
|
|
26f75f8d5d | ||
|
|
99d65fc475 | ||
|
|
206fdc9769 | ||
|
|
f74101cdfe | ||
|
|
8e0682482d | ||
|
|
6db51b4a11 | ||
|
|
a784883611 | ||
|
|
5972b6fa52 | ||
|
|
b3c8ca4b8a | ||
|
|
dce72ba262 | ||
|
|
af1716bcd7 | ||
|
|
3f16123cc5 | ||
|
|
f6f5471aa0 | ||
|
|
a6fc935040 | ||
|
|
754381fc6c | ||
|
|
b1e9a406fb | ||
|
|
f21a3d8d8c | ||
|
|
f805b43a08 | ||
|
|
86941d62e4 | ||
|
|
aceb2ace24 | ||
|
|
303fec063b | ||
|
|
64574211c6 | ||
|
|
44b5e7eeee | ||
|
|
108f5715c0 | ||
|
|
3b795464a8 | ||
|
|
d4c1fc87ee | ||
|
|
4ca50b9a71 | ||
|
|
a556ec1c43 | ||
|
|
bbacb9cf01 | ||
|
|
aa796289bf | ||
|
|
4c8a3987fe | ||
|
|
7b0ceefb96 | ||
|
|
bb84dcee64 | ||
|
|
1c406e0fc6 | ||
|
|
05b4cccb97 | ||
|
|
659b1e00b8 | ||
|
|
8e5e28a139 | ||
|
|
574ebe02b8 | ||
|
|
9c521f07c1 | ||
|
|
4aa9c3d3c7 | ||
|
|
20ceb729a0 | ||
|
|
eb45d65110 | ||
|
|
526b9eb666 | ||
|
|
ab8573c94f | ||
|
|
bc5b9e422f | ||
|
|
e8263430a3 | ||
|
|
0d369ab90a | ||
|
|
595e564216 | ||
|
|
06575eae7b | ||
|
|
0bec14ba1c | ||
|
|
ee9f609d4e | ||
|
|
19e9f26467 | ||
|
|
070c38ddc5 | ||
|
|
1798065b76 | ||
|
|
a3b910ea72 | ||
|
|
b9b89db708 | ||
|
|
604af32b94 | ||
|
|
39028282b9 | ||
|
|
9d51fe8985 | ||
|
|
12011be3ec | ||
|
|
86386c7377 | ||
|
|
66c5271453 | ||
|
|
17aed5dcee | ||
|
|
89dc33f8aa | ||
|
|
c6ccc6b6ae | ||
|
|
495d6ce6c3 | ||
|
|
883b6ee793 | ||
|
|
22c6f13dc7 | ||
|
|
c1476d29da | ||
|
|
8209633e05 | ||
|
|
daf120aeac | ||
|
|
6d5ab79ed3 | ||
|
|
fccfb40258 | ||
|
|
a9b2a41a9b | ||
|
|
28c70d47b6 | ||
|
|
00f2d39446 | ||
|
|
4c16ba3848 | ||
|
|
d530f30def |
@@ -9,7 +9,20 @@ jobs:
|
||||
# Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub.
|
||||
# See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor
|
||||
docker:
|
||||
- image: cimg/rust:1.58.1
|
||||
- image: levkk/pgcat-ci:latest
|
||||
environment:
|
||||
RUST_LOG: info
|
||||
RUSTFLAGS: "-C instrument-coverage"
|
||||
LLVM_PROFILE_FILE: "pgcat-%m.profraw"
|
||||
- image: postgres:14
|
||||
# auth:
|
||||
# username: mydockerhub-user
|
||||
# password: $DOCKERHUB_PASSWORD
|
||||
environment:
|
||||
POSTGRES_USER: postgres
|
||||
POSTGRES_DB: postgres
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_HOST_AUTH_METHOD: scram-sha-256
|
||||
# Add steps to the job
|
||||
# See: https://circleci.com/docs/2.0/configuration-reference/#steps
|
||||
steps:
|
||||
@@ -17,11 +30,23 @@ jobs:
|
||||
- restore_cache:
|
||||
key: cargo-lock-2-{{ checksum "Cargo.lock" }}
|
||||
- run:
|
||||
name: "Build"
|
||||
name: "Lint"
|
||||
command: "cargo fmt --check"
|
||||
- run:
|
||||
name: "Install dependencies"
|
||||
command: "sudo apt-get update && sudo apt-get install -y psmisc postgresql-contrib-12 postgresql-client-12 ruby ruby-dev libpq-dev python3 python3-pip lcov llvm-11 && sudo apt-get upgrade curl"
|
||||
- run:
|
||||
name: "Install rust tools"
|
||||
command: "cargo install cargo-binutils rustfilt && rustup component add llvm-tools-preview"
|
||||
- run:
|
||||
name: "Build"
|
||||
command: "cargo build"
|
||||
- run:
|
||||
name: "Test"
|
||||
command: "cargo test"
|
||||
name: "Tests"
|
||||
command: "cargo test && bash .circleci/run_tests.sh && .circleci/generate_coverage.sh"
|
||||
- store_artifacts:
|
||||
path: /tmp/cov
|
||||
destination: coverage-data
|
||||
- save_cache:
|
||||
key: cargo-lock-2-{{ checksum "Cargo.lock" }}
|
||||
paths:
|
||||
@@ -34,4 +59,4 @@ jobs:
|
||||
workflows:
|
||||
build:
|
||||
jobs:
|
||||
- build
|
||||
- build
|
||||
|
||||
7
.circleci/generate_coverage.sh
Executable file
7
.circleci/generate_coverage.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
rust-profdata merge -sparse pgcat-*.profraw -o pgcat.profdata
|
||||
|
||||
rust-cov export -ignore-filename-regex="rustc|registry" -Xdemangler=rustfilt -instr-profile=pgcat.profdata --object ./target/debug/pgcat --format lcov > ./lcov.info
|
||||
|
||||
genhtml lcov.info --output-directory /tmp/cov --prefix $(pwd)
|
||||
146
.circleci/pgcat.toml
Normal file
146
.circleci/pgcat.toml
Normal file
@@ -0,0 +1,146 @@
|
||||
#
|
||||
# PgCat config example.
|
||||
#
|
||||
|
||||
#
|
||||
# General pooler settings
|
||||
[general]
|
||||
# What IP to run on, 0.0.0.0 means accessible from everywhere.
|
||||
host = "0.0.0.0"
|
||||
|
||||
# Port to run on, same as PgBouncer used in this example.
|
||||
port = 6432
|
||||
|
||||
# Whether to enable prometheus exporter or not.
|
||||
enable_prometheus_exporter = true
|
||||
|
||||
# Port at which prometheus exporter listens on.
|
||||
prometheus_exporter_port = 9930
|
||||
|
||||
# How long to wait before aborting a server connection (ms).
|
||||
connect_timeout = 100
|
||||
|
||||
# How much time to give the health check query to return with a result (ms).
|
||||
healthcheck_timeout = 100
|
||||
|
||||
# How long to keep connection available for immediate re-use, without running a healthcheck query on it
|
||||
healthcheck_delay = 30000
|
||||
|
||||
# How much time to give clients during shutdown before forcibly killing client connections (ms).
|
||||
shutdown_timeout = 5000
|
||||
|
||||
# For how long to ban a server if it fails a health check (seconds).
|
||||
ban_time = 60 # Seconds
|
||||
|
||||
# Reload config automatically if it changes.
|
||||
autoreload = true
|
||||
|
||||
# TLS
|
||||
tls_certificate = ".circleci/server.cert"
|
||||
tls_private_key = ".circleci/server.key"
|
||||
|
||||
# Credentials to access the virtual administrative database (pgbouncer or pgcat)
|
||||
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
||||
admin_username = "admin_user"
|
||||
admin_password = "admin_pass"
|
||||
|
||||
# pool
|
||||
# configs are structured as pool.<pool_name>
|
||||
# the pool_name is what clients use as database name when connecting
|
||||
# For the example below a client can connect using "postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded_db"
|
||||
[pools.sharded_db]
|
||||
# Pool mode (see PgBouncer docs for more).
|
||||
# session: one server connection per connected client
|
||||
# transaction: one server connection per client transaction
|
||||
pool_mode = "transaction"
|
||||
|
||||
# If the client doesn't specify, route traffic to
|
||||
# this role by default.
|
||||
#
|
||||
# any: round-robin between primary and replicas,
|
||||
# replica: round-robin between replicas only without touching the primary,
|
||||
# primary: all queries go to the primary unless otherwise specified.
|
||||
default_role = "any"
|
||||
|
||||
# Query parser. If enabled, we'll attempt to parse
|
||||
# every incoming query to determine if it's a read or a write.
|
||||
# If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||
# we'll direct it to the primary.
|
||||
query_parser_enabled = true
|
||||
|
||||
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||
# load balancing of read queries. Otherwise, the primary will only be used for write
|
||||
# queries. The primary can always be explicitely selected with our custom protocol.
|
||||
primary_reads_enabled = true
|
||||
|
||||
# So what if you wanted to implement a different hashing function,
|
||||
# or you've already built one and you want this pooler to use it?
|
||||
#
|
||||
# Current options:
|
||||
#
|
||||
# pg_bigint_hash: PARTITION BY HASH (Postgres hashing function)
|
||||
# sha1: A hashing function based on SHA1
|
||||
#
|
||||
sharding_function = "pg_bigint_hash"
|
||||
|
||||
# Credentials for users that may connect to this cluster
|
||||
[pools.sharded_db.users.0]
|
||||
username = "sharding_user"
|
||||
password = "sharding_user"
|
||||
# Maximum number of server connections that can be established for this user
|
||||
# The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||
# is the sum of pool_size across all users.
|
||||
pool_size = 9
|
||||
statement_timeout = 0
|
||||
|
||||
[pools.sharded_db.users.1]
|
||||
username = "other_user"
|
||||
password = "other_user"
|
||||
pool_size = 21
|
||||
statement_timeout = 30000
|
||||
|
||||
# Shard 0
|
||||
[pools.sharded_db.shards.0]
|
||||
# [ host, port, role ]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ]
|
||||
]
|
||||
# Database name (e.g. "postgres")
|
||||
database = "shard0"
|
||||
search_path = "\"$user\",public"
|
||||
|
||||
[pools.sharded_db.shards.1]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ],
|
||||
]
|
||||
database = "shard1"
|
||||
|
||||
[pools.sharded_db.shards.2]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ],
|
||||
]
|
||||
database = "shard2"
|
||||
|
||||
|
||||
[pools.simple_db]
|
||||
pool_mode = "session"
|
||||
default_role = "primary"
|
||||
query_parser_enabled = true
|
||||
primary_reads_enabled = true
|
||||
sharding_function = "pg_bigint_hash"
|
||||
|
||||
[pools.simple_db.users.0]
|
||||
username = "simple_user"
|
||||
password = "simple_user"
|
||||
pool_size = 5
|
||||
statement_timeout = 30000
|
||||
|
||||
[pools.simple_db.shards.0]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ]
|
||||
]
|
||||
database = "some_db"
|
||||
152
.circleci/run_tests.sh
Normal file
152
.circleci/run_tests.sh
Normal file
@@ -0,0 +1,152 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
set -o xtrace
|
||||
|
||||
# non-zero exit code if we provide bad configs
|
||||
(! ./target/debug/pgcat "fake_configs" 2>/dev/null)
|
||||
|
||||
# Start PgCat with a particular log level
|
||||
# for inspection.
|
||||
function start_pgcat() {
|
||||
kill -s SIGINT $(pgrep pgcat) || true
|
||||
RUST_LOG=${1} ./target/debug/pgcat .circleci/pgcat.toml &
|
||||
sleep 1
|
||||
}
|
||||
|
||||
# Setup the database with shards and user
|
||||
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 5432 -U postgres -f tests/sharding/query_routing_setup.sql
|
||||
|
||||
PGPASSWORD=sharding_user pgbench -h 127.0.0.1 -U sharding_user shard0 -i
|
||||
PGPASSWORD=sharding_user pgbench -h 127.0.0.1 -U sharding_user shard1 -i
|
||||
PGPASSWORD=sharding_user pgbench -h 127.0.0.1 -U sharding_user shard2 -i
|
||||
|
||||
# Install Toxiproxy to simulate a downed/slow database
|
||||
wget -O toxiproxy-2.4.0.deb https://github.com/Shopify/toxiproxy/releases/download/v2.4.0/toxiproxy_2.4.0_linux_$(dpkg --print-architecture).deb
|
||||
sudo dpkg -i toxiproxy-2.4.0.deb
|
||||
|
||||
# Start Toxiproxy
|
||||
toxiproxy-server &
|
||||
sleep 1
|
||||
|
||||
# Create a database at port 5433, forward it to Postgres
|
||||
toxiproxy-cli create -l 127.0.0.1:5433 -u 127.0.0.1:5432 postgres_replica
|
||||
|
||||
start_pgcat "info"
|
||||
|
||||
# Check that prometheus is running
|
||||
curl --fail localhost:9930/metrics
|
||||
|
||||
export PGPASSWORD=sharding_user
|
||||
export PGDATABASE=sharded_db
|
||||
|
||||
# pgbench test
|
||||
pgbench -U sharding_user -i -h 127.0.0.1 -p 6432
|
||||
pgbench -U sharding_user -h 127.0.0.1 -p 6432 -t 500 -c 2 --protocol simple -f tests/pgbench/simple.sql
|
||||
pgbench -U sharding_user -h 127.0.0.1 -p 6432 -t 500 -c 2 --protocol extended
|
||||
|
||||
# COPY TO STDOUT test
|
||||
psql -U sharding_user -h 127.0.0.1 -p 6432 -c 'COPY (SELECT * FROM pgbench_accounts LIMIT 15) TO STDOUT;' > /dev/null
|
||||
|
||||
# Query cancellation test
|
||||
(psql -U sharding_user -h 127.0.0.1 -p 6432 -c 'SELECT pg_sleep(50)' || true) &
|
||||
sleep 1
|
||||
killall psql -s SIGINT
|
||||
|
||||
# Reload pool (closing unused server connections)
|
||||
PGPASSWORD=admin_pass psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'RELOAD'
|
||||
|
||||
(psql -U sharding_user -h 127.0.0.1 -p 6432 -c 'SELECT pg_sleep(50)' || true) &
|
||||
sleep 1
|
||||
killall psql -s SIGINT
|
||||
|
||||
# Sharding insert
|
||||
psql -U sharding_user -e -h 127.0.0.1 -p 6432 -f tests/sharding/query_routing_test_insert.sql
|
||||
|
||||
# Sharding select
|
||||
psql -U sharding_user -e -h 127.0.0.1 -p 6432 -f tests/sharding/query_routing_test_select.sql > /dev/null
|
||||
|
||||
# Replica/primary selection & more sharding tests
|
||||
psql -U sharding_user -e -h 127.0.0.1 -p 6432 -f tests/sharding/query_routing_test_primary_replica.sql > /dev/null
|
||||
|
||||
# Statement timeout tests
|
||||
sed -i 's/statement_timeout = 0/statement_timeout = 100/' .circleci/pgcat.toml
|
||||
kill -SIGHUP $(pgrep pgcat) # Reload config
|
||||
sleep 0.2
|
||||
|
||||
# This should timeout
|
||||
(! psql -U sharding_user -e -h 127.0.0.1 -p 6432 -c 'select pg_sleep(0.5)')
|
||||
|
||||
# Disable statement timeout
|
||||
sed -i 's/statement_timeout = 100/statement_timeout = 0/' .circleci/pgcat.toml
|
||||
kill -SIGHUP $(pgrep pgcat) # Reload config again
|
||||
|
||||
#
|
||||
# ActiveRecord tests
|
||||
#
|
||||
cd tests/ruby
|
||||
sudo gem install bundler
|
||||
bundle install
|
||||
ruby tests.rb
|
||||
cd ../..
|
||||
|
||||
#
|
||||
# Python tests
|
||||
# These tests will start and stop the pgcat server so it will need to be restarted after the tests
|
||||
#
|
||||
pip3 install -r tests/python/requirements.txt
|
||||
python3 tests/python/tests.py
|
||||
|
||||
start_pgcat "info"
|
||||
|
||||
# Admin tests
|
||||
export PGPASSWORD=admin_pass
|
||||
psql -U admin_user -e -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW STATS' > /dev/null
|
||||
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'RELOAD' > /dev/null
|
||||
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW CONFIG' > /dev/null
|
||||
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW DATABASES' > /dev/null
|
||||
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW LISTS' > /dev/null
|
||||
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW POOLS' > /dev/null
|
||||
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW VERSION' > /dev/null
|
||||
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c "SET client_encoding TO 'utf8'" > /dev/null # will ignore
|
||||
(! psql -U admin_user -e -h 127.0.0.1 -p 6432 -d random_db -c 'SHOW STATS' > /dev/null)
|
||||
export PGPASSWORD=sharding_user
|
||||
|
||||
# Start PgCat in debug to demonstrate failover better
|
||||
start_pgcat "trace"
|
||||
|
||||
# Add latency to the replica at port 5433 slightly above the healthcheck timeout
|
||||
toxiproxy-cli toxic add -t latency -a latency=300 postgres_replica
|
||||
sleep 1
|
||||
|
||||
# Note the failover in the logs
|
||||
timeout 5 psql -U sharding_user -e -h 127.0.0.1 -p 6432 <<-EOF
|
||||
SELECT 1;
|
||||
SELECT 1;
|
||||
SELECT 1;
|
||||
EOF
|
||||
|
||||
# Remove latency
|
||||
toxiproxy-cli toxic remove --toxicName latency_downstream postgres_replica
|
||||
|
||||
start_pgcat "info"
|
||||
|
||||
# Test session mode (and config reload)
|
||||
sed -i '0,/simple_db/s/pool_mode = "transaction"/pool_mode = "session"/' .circleci/pgcat.toml
|
||||
|
||||
# Reload config test
|
||||
kill -SIGHUP $(pgrep pgcat)
|
||||
|
||||
# Revert settings after reload. Makes test runs idempotent
|
||||
sed -i '0,/simple_db/s/pool_mode = "session"/pool_mode = "transaction"/' .circleci/pgcat.toml
|
||||
|
||||
sleep 1
|
||||
|
||||
# Prepared statements that will only work in session mode
|
||||
pgbench -U sharding_user -h 127.0.0.1 -p 6432 -t 500 -c 2 --protocol prepared
|
||||
|
||||
# Attempt clean shut down
|
||||
killall pgcat -s SIGINT
|
||||
|
||||
# Allow for graceful shutdown
|
||||
sleep 1
|
||||
21
.circleci/server.cert
Normal file
21
.circleci/server.cert
Normal file
@@ -0,0 +1,21 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDazCCAlOgAwIBAgIUChIvUGFJGJe5EDch32rchqoxER0wDQYJKoZIhvcNAQEL
|
||||
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMjA2MjcyMjI2MDZaFw0yMjA3
|
||||
MjcyMjI2MDZaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
|
||||
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
|
||||
AQUAA4IBDwAwggEKAoIBAQDdTwrBzV1v79faVckFvIn/9V4fypYs4vDi3X+h3wGn
|
||||
AjEh6mmizlKCwSwAam07D9Q5zKiXFrzNJqzSioOv5zsOAvObwrnzbtKSwfs3aP5g
|
||||
eEh2clHCZYx9p06WszPcgSB5nTz1NeY4XAwvGn3A+SVCLyPMTNwnem48+ONh2F9u
|
||||
FHtSuIsEVvTjMlH09O7LjwJlODxy3HNv2JHYM5Hx9tzc+NVYdERPtaVcX8ycw1Eh
|
||||
9hgGSgfaNM52/JfRMIDhENrsn0S1omRUtcJe72loreiwrECUOLAnAfp9Xqc+rMPP
|
||||
aLA6ElzmYef1+ZEC0p6isCHPhxY5ESVhKYhE9nQvksjnAgMBAAGjUzBRMB0GA1Ud
|
||||
DgQWBBQLDtzexqjx7xPtUZuZB/angU9oSDAfBgNVHSMEGDAWgBQLDtzexqjx7xPt
|
||||
UZuZB/angU9oSDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQC/
|
||||
mxY/a/WeLENVj2Gg9EUH0CKzfqeTey1mb6YfPGxzrD7oq1m0Vn2MmTbjZrJgh/Ob
|
||||
QckO3ElF4kC9+6XP+iDPmabGpjeLgllBboT5l2aqnD1syMrf61WPLzgRzRfplYGy
|
||||
cjBQDDKPu8Lu0QRMWU28tHYN0bMxJoCuXysGGX5WsuFnKCA6f/V+nycJJXxJH3eB
|
||||
eLjTueD9/RE3OXhi6m8A29Q1E9AE5EF4uRxYXrr91BmYnk4aFvSmBxhUEzE12eSN
|
||||
lHB/uSc0+Dp+UVmVr6wW8AQfd16UBA0BUf3kSW3aSvirYPYH0rXiOOpEJgOwOMnR
|
||||
f5+XAbN1Y+3OsFz/ZmP9
|
||||
-----END CERTIFICATE-----
|
||||
28
.circleci/server.key
Normal file
28
.circleci/server.key
Normal file
@@ -0,0 +1,28 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDdTwrBzV1v79fa
|
||||
VckFvIn/9V4fypYs4vDi3X+h3wGnAjEh6mmizlKCwSwAam07D9Q5zKiXFrzNJqzS
|
||||
ioOv5zsOAvObwrnzbtKSwfs3aP5geEh2clHCZYx9p06WszPcgSB5nTz1NeY4XAwv
|
||||
Gn3A+SVCLyPMTNwnem48+ONh2F9uFHtSuIsEVvTjMlH09O7LjwJlODxy3HNv2JHY
|
||||
M5Hx9tzc+NVYdERPtaVcX8ycw1Eh9hgGSgfaNM52/JfRMIDhENrsn0S1omRUtcJe
|
||||
72loreiwrECUOLAnAfp9Xqc+rMPPaLA6ElzmYef1+ZEC0p6isCHPhxY5ESVhKYhE
|
||||
9nQvksjnAgMBAAECggEAbnvddO9frFhivJ+DIhgEFQKcIOb0nigV9kx6QYehvYy8
|
||||
lp/+aMb0Lk7d9r8rFQdL/icMK5GwZALg2KNKJvEbbF1Q3PwT9VHoUlgBYKJMDEFA
|
||||
e9GKu7ASuVBjTZzdUUItwkkbe5eS/aQGeSWSjlpTnX0HNCFS72qRymK+scRhsAQf
|
||||
ZoHyZHDslkvPR3Pos+sndWBYCDHag5/KoPhsMt1+5S9NQcOUHx9Ac0gLHjau3N+P
|
||||
0FhODHFFGnnpyQvLvj6u3ZOR34ladMgoBglE0O3vPFhckn92EK4teeTWOsUMotiz
|
||||
qM3QIJTOJjtiY6VDGY93bIa4pFvt7Zi4vIerenKt0QKBgQD/UMFqfevTAMrk10AC
|
||||
bOa4+cM07ORY4ZwVj5ILhZn+8crDEEtBsUyuEU2FTINtnoEq1yGc/IXpsyS1BHjL
|
||||
L1xSml5LN3jInbi8z5XQfY5Sj3VOMtwY6yD20jcdeDC44rz3nStXdkcMWxbTMapx
|
||||
iOPsap5ciUKOMS7LyMidPEG/LQKBgQDd5vHgrLN0FBIIm+vZg6MEm4QyobstVp4l
|
||||
7V/GZsdL+M8AQv1Rx+5wSUSWKomOIv5lglis7f6g0c9O7Qkr78/wzoyoKC2RRqPp
|
||||
I90GjY2Iv22N4GIkRrDAgMZbkTitzIB6tbXEVeLAOh3frFJ8IwauRCOiXIjrZdJ4
|
||||
FvV86+nU4wKBgQDdWTP2kWkMrBk7QOp7r9Jv+AmnLuHhtOdPQgOJ/bA++X2ik9PL
|
||||
Bl3GY7XjpSwks1CkxZKcucmXjPp7/X6EGXFfI/owF82dkDADca0e7lufdERtIWb0
|
||||
K5WOpz2lTPhgsiLGQfq7fw2lxqsJOnvcpqOD6gOVkmKjSDyb7F0RBJazmQKBgQDD
|
||||
a8PQTcesjpBjLI3EfX1vbVY7ENu6zfFxDV+vZoxVh8UlQdm90AlYse3JIaUKnB7W
|
||||
Xrihcucv0hZ0N6RAIW5LcFvHK7sVmdR4WbEpODhRGeTtcZJ8yBSZM898jKQRy2vK
|
||||
pYRyaADNsWDlvujVkjMr/a40KrIaPQ3h3LZNUaYYaQKBgQD1x8A5S5SiE1cN1vFr
|
||||
aACkmA2WqEDKKhUsUigJdwW6WB/B9kWlIlz/iV1H9uwBXtSIYG4VqCSTAvh0z4gX
|
||||
Qu2SrdPm5PYnKzpdynpz78OnGdflD1RKWFGHItR6GN6tj/VmulO6mlFvT4jzBQ7j
|
||||
+Hf8m2TcD4U3ksz3xw+YOD+cmA==
|
||||
-----END RSA PRIVATE KEY-----
|
||||
4
.dockerignore
Normal file
4
.dockerignore
Normal file
@@ -0,0 +1,4 @@
|
||||
target/
|
||||
tests/
|
||||
tracing/
|
||||
.circleci/
|
||||
38
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
38
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Desktop (please complete the following information):**
|
||||
- OS: [e.g. iOS]
|
||||
- Browser [e.g. chrome, safari]
|
||||
- Version [e.g. 22]
|
||||
|
||||
**Smartphone (please complete the following information):**
|
||||
- Device: [e.g. iPhone6]
|
||||
- OS: [e.g. iOS8.1]
|
||||
- Browser [e.g. stock browser, safari]
|
||||
- Version [e.g. 22]
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,2 +1,4 @@
|
||||
.idea
|
||||
/target
|
||||
*.deb
|
||||
.vscode
|
||||
@@ -1,6 +1,13 @@
|
||||
## Introduction
|
||||
|
||||
Thank you for contributing! Just a few tips here:
|
||||
|
||||
1. `cargo fmt` your code before opening up a PR
|
||||
2. Run the "test suite" (i.e. PgBench) to make sure everything still works.
|
||||
2. Run the test suite (e.g. `pgbench`) to make sure everything still works. The tests are in `.circleci/run_tests.sh`.
|
||||
3. Performance is important, make sure there are no regressions in your branch vs. `main`.
|
||||
|
||||
Happy hacking!
|
||||
|
||||
## TODOs
|
||||
|
||||
See [Issues]([url](https://github.com/levkk/pgcat/issues)).
|
||||
|
||||
595
Cargo.lock
generated
595
Cargo.lock
generated
@@ -11,6 +11,12 @@ dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "arc-swap"
|
||||
version = "1.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c5d78ce20460b82d3fa150275ed9d55e21064fc7951177baacf86a145c4a4b1f"
|
||||
|
||||
[[package]]
|
||||
name = "async-trait"
|
||||
version = "0.1.52"
|
||||
@@ -22,12 +28,29 @@ dependencies = [
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "atty"
|
||||
version = "0.2.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
|
||||
dependencies = [
|
||||
"hermit-abi",
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
|
||||
|
||||
[[package]]
|
||||
name = "base64"
|
||||
version = "0.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
|
||||
|
||||
[[package]]
|
||||
name = "bb8"
|
||||
version = "0.7.1"
|
||||
@@ -56,12 +79,24 @@ dependencies = [
|
||||
"generic-array",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bumpalo"
|
||||
version = "3.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "37ccbd214614c6783386c1af30caf03192f17891059cecc394b4fb119e363de3"
|
||||
|
||||
[[package]]
|
||||
name = "bytes"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.0.73"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11"
|
||||
|
||||
[[package]]
|
||||
name = "cfg-if"
|
||||
version = "1.0.0"
|
||||
@@ -92,24 +127,50 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "crypto-common"
|
||||
version = "0.1.1"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "683d6b536309245c849479fba3da410962a43ed8e51c26b729208ec0ac2798d0"
|
||||
checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8"
|
||||
dependencies = [
|
||||
"generic-array",
|
||||
"typenum",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "digest"
|
||||
version = "0.10.1"
|
||||
version = "0.10.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b697d66081d42af4fba142d56918a3cb21dc8eb63372c6b85d14f44fb9c5979b"
|
||||
checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506"
|
||||
dependencies = [
|
||||
"block-buffer",
|
||||
"crypto-common",
|
||||
"generic-array",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "env_logger"
|
||||
version = "0.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3"
|
||||
dependencies = [
|
||||
"atty",
|
||||
"humantime",
|
||||
"log",
|
||||
"regex",
|
||||
"termcolor",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "exitcode"
|
||||
version = "1.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "de853764b47027c2e862a995c34978ffa63c1501f2e15f987ba11bd4f9bba193"
|
||||
|
||||
[[package]]
|
||||
name = "fnv"
|
||||
version = "1.0.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
|
||||
|
||||
[[package]]
|
||||
name = "futures-channel"
|
||||
version = "0.3.19"
|
||||
@@ -125,6 +186,12 @@ version = "0.3.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d0c8ff0461b82559810cdccfde3215c3f373807f5e5232b71479bff7bb2583d7"
|
||||
|
||||
[[package]]
|
||||
name = "futures-sink"
|
||||
version = "0.3.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868"
|
||||
|
||||
[[package]]
|
||||
name = "futures-task"
|
||||
version = "0.3.19"
|
||||
@@ -166,6 +233,31 @@ dependencies = [
|
||||
"wasi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "h2"
|
||||
version = "0.3.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"fnv",
|
||||
"futures-core",
|
||||
"futures-sink",
|
||||
"futures-util",
|
||||
"http",
|
||||
"indexmap",
|
||||
"slab",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hashbrown"
|
||||
version = "0.12.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
|
||||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.1.19"
|
||||
@@ -175,6 +267,89 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hmac"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e"
|
||||
dependencies = [
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "http"
|
||||
version = "0.2.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"fnv",
|
||||
"itoa",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "http-body"
|
||||
version = "0.4.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"http",
|
||||
"pin-project-lite",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "httparse"
|
||||
version = "1.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c"
|
||||
|
||||
[[package]]
|
||||
name = "httpdate"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421"
|
||||
|
||||
[[package]]
|
||||
name = "humantime"
|
||||
version = "2.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
|
||||
|
||||
[[package]]
|
||||
name = "hyper"
|
||||
version = "0.14.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"futures-channel",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"h2",
|
||||
"http",
|
||||
"http-body",
|
||||
"httparse",
|
||||
"httpdate",
|
||||
"itoa",
|
||||
"pin-project-lite",
|
||||
"socket2",
|
||||
"tokio",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
"want",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "indexmap"
|
||||
version = "1.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"hashbrown",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "instant"
|
||||
version = "0.1.12"
|
||||
@@ -185,10 +360,31 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.117"
|
||||
name = "itoa"
|
||||
version = "1.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e74d72e0f9b65b5b4ca49a346af3976df0f9c61d550727f349ecd559f251a26c"
|
||||
checksum = "6c8af84674fe1f223a982c933a0ee1086ac4d4052aa0fb8060c12c6ad838e754"
|
||||
|
||||
[[package]]
|
||||
name = "js-sys"
|
||||
version = "0.3.58"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c3fac17f7123a73ca62df411b1bf727ccc805daa070338fda671c86dac1bdc27"
|
||||
dependencies = [
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.126"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
@@ -316,22 +512,82 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "pgcat"
|
||||
version = "0.1.0"
|
||||
version = "0.6.0-alpha1"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"async-trait",
|
||||
"base64",
|
||||
"bb8",
|
||||
"bytes",
|
||||
"chrono",
|
||||
"env_logger",
|
||||
"exitcode",
|
||||
"hmac",
|
||||
"hyper",
|
||||
"log",
|
||||
"md-5",
|
||||
"num_cpus",
|
||||
"once_cell",
|
||||
"parking_lot",
|
||||
"phf",
|
||||
"rand",
|
||||
"regex",
|
||||
"rustls-pemfile",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"sha-1",
|
||||
"sha2",
|
||||
"sqlparser",
|
||||
"stringprep",
|
||||
"tokio",
|
||||
"tokio-rustls",
|
||||
"toml",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf"
|
||||
version = "0.10.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fabbf1ead8a5bcbc20f5f8b939ee3f5b0f6f281b6ad3468b84656b658b455259"
|
||||
dependencies = [
|
||||
"phf_macros",
|
||||
"phf_shared",
|
||||
"proc-macro-hack",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_generator"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5d5285893bb5eb82e6aaf5d59ee909a06a16737a8970984dd7746ba9283498d6"
|
||||
dependencies = [
|
||||
"phf_shared",
|
||||
"rand",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_macros"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "58fdf3184dd560f160dd73922bea2d5cd6e8f064bf4b13110abd81b03697b4e0"
|
||||
dependencies = [
|
||||
"phf_generator",
|
||||
"phf_shared",
|
||||
"proc-macro-hack",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_shared"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096"
|
||||
dependencies = [
|
||||
"siphasher",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pin-project-lite"
|
||||
version = "0.2.8"
|
||||
@@ -350,6 +606,12 @@ version = "0.2.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro-hack"
|
||||
version = "0.5.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.36"
|
||||
@@ -419,9 +681,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.5.4"
|
||||
version = "1.5.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461"
|
||||
checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
@@ -434,12 +696,58 @@ version = "0.6.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b"
|
||||
|
||||
[[package]]
|
||||
name = "ring"
|
||||
version = "0.16.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
"once_cell",
|
||||
"spin",
|
||||
"untrusted",
|
||||
"web-sys",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustls"
|
||||
version = "0.20.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5aab8ee6c7097ed6057f43c187a62418d0c05a4bd5f18b3571db50ee0f9ce033"
|
||||
dependencies = [
|
||||
"log",
|
||||
"ring",
|
||||
"sct",
|
||||
"webpki",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustls-pemfile"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e7522c9de787ff061458fe9a829dc790a3f5b22dc571694fc5883f448b94d9a9"
|
||||
dependencies = [
|
||||
"base64",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
||||
|
||||
[[package]]
|
||||
name = "sct"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4"
|
||||
dependencies = [
|
||||
"ring",
|
||||
"untrusted",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.136"
|
||||
@@ -468,6 +776,17 @@ dependencies = [
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sha2"
|
||||
version = "0.10.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cpufeatures",
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "signal-hook-registry"
|
||||
version = "1.4.0"
|
||||
@@ -477,6 +796,12 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "siphasher"
|
||||
version = "0.3.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de"
|
||||
|
||||
[[package]]
|
||||
name = "slab"
|
||||
version = "0.4.5"
|
||||
@@ -489,6 +814,47 @@ version = "1.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83"
|
||||
|
||||
[[package]]
|
||||
name = "socket2"
|
||||
version = "0.4.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "spin"
|
||||
version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
|
||||
|
||||
[[package]]
|
||||
name = "sqlparser"
|
||||
version = "0.14.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b8f192f29f4aa49e57bebd0aa05858e0a1f32dd270af36efe49edb82cbfffab6"
|
||||
dependencies = [
|
||||
"log",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "stringprep"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ee348cb74b87454fff4b551cbf727025810a004f88aeacae7f85b87f4e9a1c1"
|
||||
dependencies = [
|
||||
"unicode-bidi",
|
||||
"unicode-normalization",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "subtle"
|
||||
version = "2.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601"
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "1.0.86"
|
||||
@@ -500,6 +866,15 @@ dependencies = [
|
||||
"unicode-xid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "termcolor"
|
||||
version = "1.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4"
|
||||
dependencies = [
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "time"
|
||||
version = "0.1.44"
|
||||
@@ -511,6 +886,21 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tinyvec"
|
||||
version = "1.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50"
|
||||
dependencies = [
|
||||
"tinyvec_macros",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tinyvec_macros"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c"
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.16.1"
|
||||
@@ -541,6 +931,31 @@ dependencies = [
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-rustls"
|
||||
version = "0.23.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59"
|
||||
dependencies = [
|
||||
"rustls",
|
||||
"tokio",
|
||||
"webpki",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio-util"
|
||||
version = "0.7.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f988a1a1adc2fb21f9c12aa96441da33a1728193ae0b95d2be22dbd17fcb4e5c"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"futures-core",
|
||||
"futures-sink",
|
||||
"pin-project-lite",
|
||||
"tokio",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "0.5.8"
|
||||
@@ -550,30 +965,179 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tower-service"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52"
|
||||
|
||||
[[package]]
|
||||
name = "tracing"
|
||||
version = "0.1.34"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"pin-project-lite",
|
||||
"tracing-attributes",
|
||||
"tracing-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-attributes"
|
||||
version = "0.1.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "11c75893af559bc8e10716548bdef5cb2b983f8e637db9d0e15126b61b484ee2"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-core"
|
||||
version = "0.1.26"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "try-lock"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642"
|
||||
|
||||
[[package]]
|
||||
name = "typenum"
|
||||
version = "1.15.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-bidi"
|
||||
version = "0.3.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-normalization"
|
||||
version = "0.1.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9"
|
||||
dependencies = [
|
||||
"tinyvec",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-xid"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
|
||||
|
||||
[[package]]
|
||||
name = "untrusted"
|
||||
version = "0.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a"
|
||||
|
||||
[[package]]
|
||||
name = "version_check"
|
||||
version = "0.9.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
|
||||
|
||||
[[package]]
|
||||
name = "want"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0"
|
||||
dependencies = [
|
||||
"log",
|
||||
"try-lock",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasi"
|
||||
version = "0.10.0+wasi-snapshot-preview1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen"
|
||||
version = "0.2.81"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7c53b543413a17a202f4be280a7e5c62a1c69345f5de525ee64f8cfdbc954994"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"wasm-bindgen-macro",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-backend"
|
||||
version = "0.2.81"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5491a68ab4500fa6b4d726bd67408630c3dbe9c4fe7bda16d5c82a1fd8c7340a"
|
||||
dependencies = [
|
||||
"bumpalo",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"wasm-bindgen-shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro"
|
||||
version = "0.2.81"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c441e177922bc58f1e12c022624b6216378e5febc2f0533e41ba443d505b80aa"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"wasm-bindgen-macro-support",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro-support"
|
||||
version = "0.2.81"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7d94ac45fcf608c1f45ef53e748d35660f168490c10b23704c7779ab8f5c3048"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
"wasm-bindgen-backend",
|
||||
"wasm-bindgen-shared",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-shared"
|
||||
version = "0.2.81"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6a89911bd99e5f3659ec4acf9c4d93b0a90fe4a2a11f15328472058edc5261be"
|
||||
|
||||
[[package]]
|
||||
name = "web-sys"
|
||||
version = "0.3.58"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2fed94beee57daf8dd7d51f2b15dc2bcde92d7a72304cdf662a4371008b71b90"
|
||||
dependencies = [
|
||||
"js-sys",
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "webpki"
|
||||
version = "0.22.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd"
|
||||
dependencies = [
|
||||
"ring",
|
||||
"untrusted",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.3.9"
|
||||
@@ -590,6 +1154,15 @@ version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-util"
|
||||
version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
|
||||
dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
|
||||
18
Cargo.toml
18
Cargo.toml
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "pgcat"
|
||||
version = "0.1.0"
|
||||
version = "0.6.0-alpha1"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
@@ -18,3 +18,19 @@ toml = "0.5"
|
||||
serde = "1"
|
||||
serde_derive = "1"
|
||||
regex = "1"
|
||||
num_cpus = "1"
|
||||
once_cell = "1"
|
||||
sqlparser = "0.14"
|
||||
log = "0.4"
|
||||
arc-swap = "1"
|
||||
env_logger = "0.9"
|
||||
parking_lot = "0.11"
|
||||
hmac = "0.12"
|
||||
sha2 = "0.10"
|
||||
base64 = "0.13"
|
||||
stringprep = "0.1"
|
||||
tokio-rustls = "0.23"
|
||||
rustls-pemfile = "1"
|
||||
hyper = { version = "0.14", features = ["full"] }
|
||||
phf = { version = "0.10", features = ["macros"] }
|
||||
exitcode = "1.1.2"
|
||||
|
||||
11
Dockerfile
Normal file
11
Dockerfile
Normal file
@@ -0,0 +1,11 @@
|
||||
FROM rust:1 AS builder
|
||||
COPY . /app
|
||||
WORKDIR /app
|
||||
RUN cargo build --release
|
||||
|
||||
FROM debian:bullseye-slim
|
||||
COPY --from=builder /app/target/release/pgcat /usr/bin/pgcat
|
||||
COPY --from=builder /app/pgcat.toml /etc/pgcat/pgcat.toml
|
||||
WORKDIR /etc/pgcat
|
||||
ENV RUST_LOG=info
|
||||
CMD ["pgcat"]
|
||||
8
Dockerfile.ci
Normal file
8
Dockerfile.ci
Normal file
@@ -0,0 +1,8 @@
|
||||
FROM cimg/rust:1.62.0
|
||||
RUN sudo apt-get update && \
|
||||
sudo apt-get install -y psmisc postgresql-contrib-12 postgresql-client-12 ruby ruby-dev libpq-dev python3 python3-pip lcov llvm-11 && \
|
||||
sudo apt-get upgrade curl
|
||||
RUN cargo install cargo-binutils rustfilt && \
|
||||
rustup component add llvm-tools-preview
|
||||
RUN pip3 install psycopg2 && \
|
||||
sudo gem install bundler
|
||||
694
LICENSE
694
LICENSE
@@ -1,674 +1,20 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<http://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
||||
Copyright (c) 2022 Lev Kokotov <lev@levthe.dev>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
556
README.md
556
README.md
@@ -4,20 +4,80 @@
|
||||
|
||||

|
||||
|
||||
Meow. PgBouncer rewritten in Rust, with sharding, load balancing and failover support.
|
||||
PostgreSQL pooler (like PgBouncer) with sharding, load balancing and failover support.
|
||||
|
||||
**Alpha**: don't use in production just yet.
|
||||
**Beta**: looking for beta testers, see [#35](https://github.com/levkk/pgcat/issues/35).
|
||||
|
||||
## Features
|
||||
| **Feature** | **Status** | **Comments** |
|
||||
|--------------------------------|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Transaction pooling | :white_check_mark: | Identical to PgBouncer. |
|
||||
| Session pooling | :white_check_mark: | Identical to PgBouncer. |
|
||||
| `COPY` support | :white_check_mark: | Both `COPY TO` and `COPY FROM` are supported. |
|
||||
| Query cancellation | :white_check_mark: | Supported both in transaction and session pooling modes. |
|
||||
| Load balancing of read queries | :white_check_mark: | Using random between replicas. Primary is included when `primary_reads_enabled` is enabled (default). |
|
||||
| Sharding | :white_check_mark: | Transactions are sharded using `SET SHARD TO` and `SET SHARDING KEY TO` syntax extensions; see examples below. |
|
||||
| Failover | :white_check_mark: | Replicas are tested with a health check. If a health check fails, remaining replicas are attempted; see below for algorithm description and examples. |
|
||||
| Statistics | :white_check_mark: | Statistics available in the admin database (`pgcat` and `pgbouncer`) with `SHOW STATS`, `SHOW POOLS` and others. |
|
||||
| Live configuration reloading | :white_check_mark: | Reload supported settings with a `SIGHUP` to the process, e.g. `kill -s SIGHUP $(pgrep pgcat)` or `RELOAD` query issued to the admin database. |
|
||||
| Client authentication | :white_check_mark: :wrench: | MD5 password authentication is supported, SCRAM is on the roadmap; one user is used to connect to Postgres with both SCRAM and MD5 supported. |
|
||||
| Admin database | :white_check_mark: | The admin database, similar to PgBouncer's, allows to query for statistics and reload the configuration. |
|
||||
|
||||
## Deployment
|
||||
|
||||
See `Dockerfile` for example deployment using Docker. The pooler is configured to spawn 4 workers so 4 CPUs are recommended for optimal performance. That setting can be adjusted to spawn as many (or as little) workers as needed.
|
||||
|
||||
For quick local example, use the Docker Compose environment provided:
|
||||
|
||||
```bash
|
||||
docker-compose up
|
||||
|
||||
# In a new terminal:
|
||||
psql -h 127.0.0.1 -p 6432 -c 'SELECT 1'
|
||||
```
|
||||
|
||||
### Config
|
||||
|
||||
| **Name** | **Description** | **Examples** |
|
||||
|------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------|
|
||||
| **`general`** | | |
|
||||
| `host` | The pooler will run on this host, 0.0.0.0 means accessible from everywhere. | `0.0.0.0` |
|
||||
| `port` | The pooler will run on this port. | `6432` |
|
||||
| `enable_prometheus_exporter` | Enable prometheus exporter which will export metrics in prometheus exposition format. | `true` |
|
||||
| `prometheus_exporter_port` | Port at which prometheus exporter listens on. | `9930` |
|
||||
| `pool_size` | Maximum allowed server connections per pool. Pools are separated for each user/shard/server role. The connections are allocated as needed. | `15` |
|
||||
| `pool_mode` | The pool mode to use, i.e. `session` or `transaction`. | `transaction` |
|
||||
| `connect_timeout` | Maximum time to establish a connection to a server (milliseconds). If reached, the server is banned and the next target is attempted. | `5000` |
|
||||
| `healthcheck_timeout` | Maximum time to pass a health check (`SELECT 1`, milliseconds). If reached, the server is banned and the next target is attempted. | `1000` |
|
||||
| `shutdown_timeout` | Maximum time to give clients during shutdown before forcibly killing client connections (ms). | `60000` |
|
||||
| `healthcheck_delay` | How long to keep connection available for immediate re-use, without running a healthcheck query on it | `30000` |
|
||||
| `ban_time` | Ban time for a server (seconds). It won't be allowed to serve transactions until the ban expires; failover targets will be used instead. | `60` |
|
||||
| `autoreload` | Enable auto-reload of config after fixed time-interval. | `false` |
|
||||
| | | |
|
||||
| **`user`** | | |
|
||||
| `name` | The user name. | `sharding_user` |
|
||||
| `password` | The user password in plaintext. | `hunter2` |
|
||||
| | | |
|
||||
| **`shards`** | Shards are numerically numbered starting from 0; the order in the config is preserved by the pooler to route queries accordingly. | `[shards.0]` |
|
||||
| `servers` | List of servers to connect to and their roles. A server is: `[host, port, role]`, where `role` is either `primary` or `replica`. | `["127.0.0.1", 5432, "primary"]` |
|
||||
| `database` | The name of the database to connect to. This is the same on all servers that are part of one shard. | |
|
||||
| | | |
|
||||
| **`query_router`** | | |
|
||||
| `default_role` | Traffic is routed to this role by default (random), unless the client specifies otherwise. Default is `any`, for any role available. | `any`, `primary`, `replica` |
|
||||
| `query_parser_enabled` | Enable the query parser which will inspect incoming queries and route them to a primary or replicas. | `false` |
|
||||
| `primary_reads_enabled` | Enable this to allow read queries on the primary; otherwise read queries are routed to the replicas. | `true` |
|
||||
|
||||
## Local development
|
||||
|
||||
1. Install Rust (latest stable will work great).
|
||||
2. `cargo run --release` (to get better benchmarks).
|
||||
2. `cargo build --release` (to get better benchmarks).
|
||||
3. Change the config in `pgcat.toml` to fit your setup (optional given next step).
|
||||
4. Install Postgres and run `psql -f tests/sharding/query_routing_setup.sql`
|
||||
4. Install Postgres and run `psql -f tests/sharding/query_routing_setup.sql` (user/password may be required depending on your setup)
|
||||
5. `RUST_LOG=info cargo run --release` You're ready to go!
|
||||
|
||||
### Tests
|
||||
|
||||
You can just PgBench to test your changes:
|
||||
Quickest way to test your changes is to use pgbench:
|
||||
|
||||
```
|
||||
pgbench -i -h 127.0.0.1 -p 6432 && \
|
||||
@@ -27,65 +87,184 @@ pgbench -t 1000 -p 6432 -h 127.0.0.1 --protocol extended
|
||||
|
||||
See [sharding README](./tests/sharding/README.md) for sharding logic testing.
|
||||
|
||||
## Features
|
||||
| **Feature** | **Tested in CI** | **Tested manually** | **Comments** |
|
||||
|-----------------------|--------------------|---------------------|--------------------------------------------------------------------------------------------------------------------------|
|
||||
| Transaction pooling | :white_check_mark: | :white_check_mark: | Used by default for all tests. |
|
||||
| Session pooling | :white_check_mark: | :white_check_mark: | Tested by running pgbench with `--protocol prepared` which only works in session mode. |
|
||||
| `COPY` | :white_check_mark: | :white_check_mark: | `pgbench -i` uses `COPY`. `COPY FROM` is tested as well. |
|
||||
| Query cancellation | :white_check_mark: | :white_check_mark: | `psql -c 'SELECT pg_sleep(1000);'` and press `Ctrl-C`. |
|
||||
| Load balancing | :white_check_mark: | :white_check_mark: | We could test this by emitting statistics for each replica and compare them. |
|
||||
| Failover | :white_check_mark: | :white_check_mark: | Misconfigure a replica in `pgcat.toml` and watch it forward queries to spares. CI testing is using Toxiproxy. |
|
||||
| Sharding | :white_check_mark: | :white_check_mark: | See `tests/sharding` and `tests/ruby` for an Rails/ActiveRecord example. |
|
||||
| Statistics | :white_check_mark: | :white_check_mark: | Query the admin database with `psql -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW STATS'`. |
|
||||
| Live config reloading | :white_check_mark: | :white_check_mark: | Run `kill -s SIGHUP $(pgrep pgcat)` and watch the config reload. |
|
||||
|
||||
1. Session mode.
|
||||
2. Transaction mode.
|
||||
3. `COPY` protocol support.
|
||||
4. Query cancellation.
|
||||
5. Round-robin load balancing of replicas.
|
||||
6. Banlist & failover
|
||||
7. Sharding!
|
||||
## Usage
|
||||
|
||||
### Session mode
|
||||
Each client owns its own server for the duration of the session. Commands like `SET` are allowed.
|
||||
This is identical to PgBouncer session mode.
|
||||
In session mode, a client talks to one server for the duration of the connection. Prepared statements, `SET`, and advisory locks are supported. In terms of supported features, there is very little if any difference between session mode and talking directly to the server.
|
||||
|
||||
To use session mode, change `pool_mode = "session"`.
|
||||
|
||||
### Transaction mode
|
||||
The connection is attached to the server for the duration of the transaction. `SET` will pollute the connection,
|
||||
but `SET LOCAL` works great. Identical to PgBouncer transaction mode.
|
||||
In transaction mode, a client talks to one server for the duration of a single transaction; once it's over, the server is returned to the pool. Prepared statements, `SET`, and advisory locks are not supported; alternatives are to use `SET LOCAL` and `pg_advisory_xact_lock` which are scoped to the transaction.
|
||||
|
||||
### COPY protocol
|
||||
That one isn't particularly special, but good to mention that you can `COPY` data in and from the server
|
||||
using this pooler.
|
||||
This mode is enabled by default.
|
||||
|
||||
### Query cancellation
|
||||
Okay, this is just basic stuff, but we support cancelling queries. If you know the Postgres protocol,
|
||||
this might be relevant given than this is a transactional pooler but if you're new to Pg, don't worry about it, it works.
|
||||
### Load balancing of read queries
|
||||
All queries are load balanced against the configured servers using the random algorithm. The most straight forward configuration example would be to put this pooler in front of several replicas and let it load balance all queries.
|
||||
|
||||
### Round-robin load balancing
|
||||
This is the novel part. PgBouncer doesn't support it and suggests we use DNS or a TCP proxy instead.
|
||||
We prefer to have everything as part of one package; arguably, it's easier to understand and optimize.
|
||||
This pooler will round-robin between multiple replicas keeping load reasonably even.
|
||||
If the configuration includes a primary and replicas, the queries can be separated with the built-in query parser. The query parser will interpret the query and route all `SELECT` queries to a replica, while all other queries including explicit transactions will be routed to the primary.
|
||||
|
||||
### Banlist & failover
|
||||
This is where it gets even more interesting. If we fail to connect to one of the replicas or it fails a health check,
|
||||
we add it to a ban list. No more new transactions will be served by that replica for, in our case, 60 seconds. This
|
||||
gives it the opportunity to recover while clients are happily served by the remaining replicas.
|
||||
The query parser is disabled by default.
|
||||
|
||||
This decreases error rates substantially! Worth noting here that on busy systems, if the replicas are running too hot,
|
||||
failing over could bring even more load and tip over the remaining healthy-ish replicas. In this case, a decision should be made:
|
||||
either lose 1/x of your traffic or risk losing it all eventually. Ideally you overprovision your system, so you don't necessarily need
|
||||
to make this choice :-).
|
||||
|
||||
### Sharding
|
||||
We're implemeting Postgres' `PARTITION BY HASH` sharding function for `BIGINT` fields. This works well for tables that use `BIGSERIAL` primary key which I think is common enough these days. We can also add many more functions here, but this is a good start. See `src/sharding.rs` and `tests/sharding/partition_hash_test_setup.sql` for more details on the implementation.
|
||||
|
||||
The biggest advantage of using this sharding function is that anyone can shard the dataset using Postgres partitions
|
||||
while also access it for both reads and writes using this pooler. No custom obscure sharding function is needed and database sharding can be done entirely in Postgres.
|
||||
|
||||
To select the shard we want to talk to, we introduced special syntax:
|
||||
#### Query parser
|
||||
The query parser will do its best to determine where the query should go, but sometimes that's not possible. In that case, the client can select which server it wants using this custom SQL syntax:
|
||||
|
||||
```sql
|
||||
-- To talk to the primary for the duration of the next transaction:
|
||||
SET SERVER ROLE TO 'primary';
|
||||
|
||||
-- To talk to the replica for the duration of the next transaction:
|
||||
SET SERVER ROLE TO 'replica';
|
||||
|
||||
-- Let the query parser decide
|
||||
SET SERVER ROLE TO 'auto';
|
||||
|
||||
-- Pick any server at random
|
||||
SET SERVER ROLE TO 'any';
|
||||
|
||||
-- Reset to default configured settings
|
||||
SET SERVER ROLE TO 'default';
|
||||
```
|
||||
|
||||
The setting will persist until it's changed again or the client disconnects.
|
||||
|
||||
By default, all queries are routed to the first available server; `default_role` setting controls this behavior.
|
||||
|
||||
### Failover
|
||||
All servers are checked with a `SELECT 1` query before being given to a client. If the server is not reachable, it will be banned and cannot serve any more transactions for the duration of the ban. The queries are routed to the remaining servers. If all servers become banned, the ban list is cleared: this is a safety precaution against false positives. The primary can never be banned.
|
||||
|
||||
The ban time can be changed with `ban_time`. The default is 60 seconds.
|
||||
|
||||
Failover behavior can get pretty interesting (read complex) when multiple configurations and factors are involved. The table below will try to explain what PgCat does in each scenario:
|
||||
|
||||
| **Query** | **`SET SERVER ROLE TO`** | **`query_parser_enabled`** | **`primary_reads_enabled`** | **Target state** | **Outcome** |
|
||||
|---------------------------|--------------------------|----------------------------|-----------------------------|------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Read query, i.e. `SELECT` | unset (any) | false | false | up | Query is routed to the first instance in the random loop. |
|
||||
| Read query | unset (any) | true | false | up | Query is routed to the first replica instance in the random loop. |
|
||||
| Read query | unset (any) | true | true | up | Query is routed to the first instance in the random loop. |
|
||||
| Read query | replica | false | false | up | Query is routed to the first replica instance in the random loop. |
|
||||
| Read query | primary | false | false | up | Query is routed to the primary. |
|
||||
| Read query | unset (any) | false | false | down | First instance is banned for reads. Next target in the random loop is attempted. |
|
||||
| Read query | unset (any) | true | false | down | First replica instance is banned. Next replica instance is attempted in the random loop. |
|
||||
| Read query | unset (any) | true | true | down | First instance (even if primary) is banned for reads. Next instance is attempted in the random loop. |
|
||||
| Read query | replica | false | false | down | First replica instance is banned. Next replica instance is attempted in the random loop. |
|
||||
| Read query | primary | false | false | down | The query is attempted against the primary and fails. The client receives an error. |
|
||||
| | | | | | |
|
||||
| Write query e.g. `INSERT` | unset (any) | false | false | up | The query is attempted against the first available instance in the random loop. If the instance is a replica, the query fails and the client receives an error. |
|
||||
| Write query | unset (any) | true | false | up | The query is routed to the primary. |
|
||||
| Write query | unset (any) | true | true | up | The query is routed to the primary. |
|
||||
| Write query | primary | false | false | up | The query is routed to the primary. |
|
||||
| Write query | replica | false | false | up | The query is routed to the replica and fails. The client receives an error. |
|
||||
| Write query | unset (any) | true | false | down | The query is routed to the primary and fails. The client receives an error. |
|
||||
| Write query | unset (any) | true | true | down | The query is routed to the primary and fails. The client receives an error. |
|
||||
| Write query | primary | false | false | down | The query is routed to the primary and fails. The client receives an error. |
|
||||
| | | | | | |
|
||||
|
||||
### Sharding
|
||||
We use the `PARTITION BY HASH` hashing function, the same as used by Postgres for declarative partitioning. This allows to shard the database using Postgres partitions and place the partitions on different servers (shards). Both read and write queries can be routed to the shards using this pooler.
|
||||
|
||||
To route queries to a particular shard, we use this custom SQL syntax:
|
||||
|
||||
```sql
|
||||
-- To talk to a shard explicitely
|
||||
SET SHARD TO '1';
|
||||
|
||||
-- To let the pooler choose based on a value
|
||||
SET SHARDING KEY TO '1234';
|
||||
```
|
||||
|
||||
This sharding key will be hashed and the pooler will select a shard to use for the next transaction. If the pooler is in session mode, this sharding key will be used until it's set again or the client disconnects.
|
||||
The active shard will last until it's changed again or the client disconnects. By default, the queries are routed to shard 0.
|
||||
|
||||
For hash function implementation, see `src/sharding.rs` and `tests/sharding/partition_hash_test_setup.sql`.
|
||||
|
||||
## Missing
|
||||
#### ActiveRecord/Rails
|
||||
|
||||
```ruby
|
||||
class User < ActiveRecord::Base
|
||||
end
|
||||
|
||||
# Metadata will be fetched from shard 0
|
||||
ActiveRecord::Base.establish_connection
|
||||
|
||||
# Grab a bunch of users from shard 1
|
||||
User.connection.execute "SET SHARD TO '1'"
|
||||
User.take(10)
|
||||
|
||||
# Using id as the sharding key
|
||||
User.connection.execute "SET SHARDING KEY TO '1234'"
|
||||
User.find_by_id(1234)
|
||||
|
||||
# Using geographical sharding
|
||||
User.connection.execute "SET SERVER ROLE TO 'primary'"
|
||||
User.connection.execute "SET SHARDING KEY TO '85'"
|
||||
User.create(name: "test user", email: "test@example.com", zone_id: 85)
|
||||
|
||||
# Let the query parser figure out where the query should go.
|
||||
# We are still on shard = hash(85) % shards.
|
||||
User.connection.execute "SET SERVER ROLE TO 'auto'"
|
||||
User.find_by_email("test@example.com")
|
||||
```
|
||||
|
||||
#### Raw SQL
|
||||
|
||||
```sql
|
||||
-- Grab a bunch of users from shard 1
|
||||
SET SHARD TO '1';
|
||||
SELECT * FROM users LIMT 10;
|
||||
|
||||
-- Find by id
|
||||
SET SHARDING KEY TO '1234';
|
||||
SELECT * FROM USERS WHERE id = 1234;
|
||||
|
||||
-- Writing in a primary/replicas configuration.
|
||||
SET SHARDING ROLE TO 'primary';
|
||||
SET SHARDING KEY TO '85';
|
||||
INSERT INTO users (name, email, zome_id) VALUES ('test user', 'test@example.com', 85);
|
||||
|
||||
SET SERVER ROLE TO 'auto'; -- let the query router figure out where the query should go
|
||||
SELECT * FROM users WHERE email = 'test@example.com'; -- shard setting lasts until set again; we are reading from the primary
|
||||
```
|
||||
|
||||
### Statistics reporting
|
||||
|
||||
The stats are very similar to what Pgbouncer reports and the names are kept to be comparable. They are accessible by querying the admin database `pgcat`, and `pgbouncer` for compatibility.
|
||||
|
||||
```
|
||||
psql -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW DATABASES'
|
||||
```
|
||||
|
||||
### Live configuration reloading
|
||||
|
||||
The config can be reloaded by sending a `kill -s SIGHUP` to the process or by querying `RELOAD` to the admin database. Not all settings are currently supported by live reload:
|
||||
|
||||
| **Config** | **Requires restart** |
|
||||
|-------------------------|----------------------|
|
||||
| `host` | yes |
|
||||
| `port` | yes |
|
||||
| `pool_mode` | no |
|
||||
| `connect_timeout` | yes |
|
||||
| `healthcheck_timeout` | no |
|
||||
| `shutdown_timeout` | no |
|
||||
| `healthcheck_delay` | no |
|
||||
| `ban_time` | no |
|
||||
| `user` | yes |
|
||||
| `shards` | yes |
|
||||
| `default_role` | no |
|
||||
| `primary_reads_enabled` | no |
|
||||
| `query_parser_enabled` | no |
|
||||
|
||||
1. Authentication, ehem, this proxy is letting anyone in at the moment.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
@@ -95,114 +274,231 @@ You can setup PgBench locally through PgCat:
|
||||
pgbench -h 127.0.0.1 -p 6432 -i
|
||||
```
|
||||
|
||||
Coincidenly, this uses `COPY` so you can test if that works.
|
||||
Coincidenly, this uses `COPY` so you can test if that works. Additionally, we'll be running the following PgBench configurations:
|
||||
|
||||
1. 16 clients, 2 threads
|
||||
2. 32 clients, 2 threads
|
||||
3. 64 clients, 2 threads
|
||||
4. 128 clients, 2 threads
|
||||
|
||||
All queries will be `SELECT` only (`-S`) just so disks don't get in the way, since the dataset will be effectively all in RAM.
|
||||
|
||||
My setup:
|
||||
|
||||
- 8 cores, 16 hyperthreaded (AMD Ryzen 5800X)
|
||||
- 32GB RAM (doesn't matter for this benchmark, except to prove that Postgres will fit the whole dataset into RAM)
|
||||
|
||||
### PgBouncer
|
||||
|
||||
#### Config
|
||||
|
||||
```ini
|
||||
[databases]
|
||||
shard0 = host=localhost port=5432 user=sharding_user password=sharding_user
|
||||
|
||||
[pgbouncer]
|
||||
pool_mode = transaction
|
||||
max_client_conn = 1000
|
||||
```
|
||||
$ pgbench -i -h 127.0.0.1 -p 6432 && pgbench -t 1000 -p 6432 -h 127.0.0.1 --protocol simple && pgbench -t 1000 -p 6432 -h 127.0.0.1 --protocol extended
|
||||
dropping old tables...
|
||||
creating tables...
|
||||
generating data...
|
||||
100000 of 100000 tuples (100%) done (elapsed 0.01 s, remaining 0.00 s)
|
||||
vacuuming...
|
||||
creating primary keys...
|
||||
done.
|
||||
|
||||
Everything else stays default.
|
||||
|
||||
#### Runs
|
||||
|
||||
|
||||
```
|
||||
$ pgbench -t 1000 -c 16 -j 2 -p 6432 -h 127.0.0.1 -S --protocol extended shard0
|
||||
|
||||
starting vacuum...end.
|
||||
transaction type: <builtin: TPC-B (sort of)>
|
||||
scaling factor: 1
|
||||
query mode: simple
|
||||
number of clients: 1
|
||||
number of threads: 1
|
||||
number of transactions per client: 1000
|
||||
number of transactions actually processed: 1000/1000
|
||||
latency average = 1.089 ms
|
||||
tps = 918.687098 (including connections establishing)
|
||||
tps = 918.847790 (excluding connections establishing)
|
||||
starting vacuum...end.
|
||||
transaction type: <builtin: TPC-B (sort of)>
|
||||
transaction type: <builtin: select only>
|
||||
scaling factor: 1
|
||||
query mode: extended
|
||||
number of clients: 1
|
||||
number of threads: 1
|
||||
number of clients: 16
|
||||
number of threads: 2
|
||||
number of transactions per client: 1000
|
||||
number of transactions actually processed: 1000/1000
|
||||
latency average = 1.136 ms
|
||||
tps = 880.622009 (including connections establishing)
|
||||
tps = 880.769550 (excluding connections establishing)
|
||||
number of transactions actually processed: 16000/16000
|
||||
latency average = 0.155 ms
|
||||
tps = 103417.377469 (including connections establishing)
|
||||
tps = 103510.639935 (excluding connections establishing)
|
||||
|
||||
|
||||
$ pgbench -t 1000 -c 32 -j 2 -p 6432 -h 127.0.0.1 -S --protocol extended shard0
|
||||
|
||||
starting vacuum...end.
|
||||
transaction type: <builtin: select only>
|
||||
scaling factor: 1
|
||||
query mode: extended
|
||||
number of clients: 32
|
||||
number of threads: 2
|
||||
number of transactions per client: 1000
|
||||
number of transactions actually processed: 32000/32000
|
||||
latency average = 0.290 ms
|
||||
tps = 110325.939785 (including connections establishing)
|
||||
tps = 110386.513435 (excluding connections establishing)
|
||||
|
||||
|
||||
$ pgbench -t 1000 -c 64 -j 2 -p 6432 -h 127.0.0.1 -S --protocol extended shard0
|
||||
|
||||
starting vacuum...end.
|
||||
transaction type: <builtin: select only>
|
||||
scaling factor: 1
|
||||
query mode: extended
|
||||
number of clients: 64
|
||||
number of threads: 2
|
||||
number of transactions per client: 1000
|
||||
number of transactions actually processed: 64000/64000
|
||||
latency average = 0.692 ms
|
||||
tps = 92470.427412 (including connections establishing)
|
||||
tps = 92618.389350 (excluding connections establishing)
|
||||
|
||||
$ pgbench -t 1000 -c 128 -j 2 -p 6432 -h 127.0.0.1 -S --protocol extended shard0
|
||||
|
||||
starting vacuum...end.
|
||||
transaction type: <builtin: select only>
|
||||
scaling factor: 1
|
||||
query mode: extended
|
||||
number of clients: 128
|
||||
number of threads: 2
|
||||
number of transactions per client: 1000
|
||||
number of transactions actually processed: 128000/128000
|
||||
latency average = 1.406 ms
|
||||
tps = 91013.429985 (including connections establishing)
|
||||
tps = 91067.583928 (excluding connections establishing)
|
||||
```
|
||||
|
||||
### PgCat
|
||||
|
||||
#### Config
|
||||
|
||||
The only thing that matters here is the number of workers in the Tokio pool. Make sure to set it to < than the number of your CPU cores.
|
||||
Also account for hyper-threading, so if you have that, take the number you got above and divide it by two, that way only "real" cores serving
|
||||
requests.
|
||||
|
||||
My setup is 16 threads, 8 cores (`htop` shows as 16 CPUs), so I set the `max_workers` in Tokio to 4. Too many, and it starts conflicting with PgBench
|
||||
which is also running on the same system.
|
||||
|
||||
#### Runs
|
||||
|
||||
|
||||
```
|
||||
$ pgbench -i -h 127.0.0.1 -p 6432 && pgbench -t 1000 -p 6432 -h 127.0.0.1 --protocol simple && pgbench -t 1000 -p 6432 -h 127.0.0.1 --protocol extended
|
||||
dropping old tables...
|
||||
creating tables...
|
||||
generating data...
|
||||
100000 of 100000 tuples (100%) done (elapsed 0.01 s, remaining 0.00 s)
|
||||
vacuuming...
|
||||
creating primary keys...
|
||||
done.
|
||||
$ pgbench -t 1000 -c 16 -j 2 -p 6432 -h 127.0.0.1 -S --protocol extended
|
||||
starting vacuum...end.
|
||||
transaction type: <builtin: TPC-B (sort of)>
|
||||
scaling factor: 1
|
||||
query mode: simple
|
||||
number of clients: 1
|
||||
number of threads: 1
|
||||
number of transactions per client: 1000
|
||||
number of transactions actually processed: 1000/1000
|
||||
latency average = 1.142 ms
|
||||
tps = 875.645437 (including connections establishing)
|
||||
tps = 875.799995 (excluding connections establishing)
|
||||
starting vacuum...end.
|
||||
transaction type: <builtin: TPC-B (sort of)>
|
||||
transaction type: <builtin: select only>
|
||||
scaling factor: 1
|
||||
query mode: extended
|
||||
number of clients: 1
|
||||
number of threads: 1
|
||||
number of clients: 16
|
||||
number of threads: 2
|
||||
number of transactions per client: 1000
|
||||
number of transactions actually processed: 1000/1000
|
||||
latency average = 1.181 ms
|
||||
tps = 846.539176 (including connections establishing)
|
||||
tps = 846.713636 (excluding connections establishing)
|
||||
number of transactions actually processed: 16000/16000
|
||||
latency average = 0.164 ms
|
||||
tps = 97705.088232 (including connections establishing)
|
||||
tps = 97872.216045 (excluding connections establishing)
|
||||
|
||||
|
||||
$ pgbench -t 1000 -c 32 -j 2 -p 6432 -h 127.0.0.1 -S --protocol extended
|
||||
|
||||
starting vacuum...end.
|
||||
transaction type: <builtin: select only>
|
||||
scaling factor: 1
|
||||
query mode: extended
|
||||
number of clients: 32
|
||||
number of threads: 2
|
||||
number of transactions per client: 1000
|
||||
number of transactions actually processed: 32000/32000
|
||||
latency average = 0.288 ms
|
||||
tps = 111300.488119 (including connections establishing)
|
||||
tps = 111413.107800 (excluding connections establishing)
|
||||
|
||||
|
||||
$ pgbench -t 1000 -c 64 -j 2 -p 6432 -h 127.0.0.1 -S --protocol extended
|
||||
|
||||
starting vacuum...end.
|
||||
transaction type: <builtin: select only>
|
||||
scaling factor: 1
|
||||
query mode: extended
|
||||
number of clients: 64
|
||||
number of threads: 2
|
||||
number of transactions per client: 1000
|
||||
number of transactions actually processed: 64000/64000
|
||||
latency average = 0.556 ms
|
||||
tps = 115190.496139 (including connections establishing)
|
||||
tps = 115247.521295 (excluding connections establishing)
|
||||
|
||||
$ pgbench -t 1000 -c 128 -j 2 -p 6432 -h 127.0.0.1 -S --protocol extended
|
||||
|
||||
starting vacuum...end.
|
||||
transaction type: <builtin: select only>
|
||||
scaling factor: 1
|
||||
query mode: extended
|
||||
number of clients: 128
|
||||
number of threads: 2
|
||||
number of transactions per client: 1000
|
||||
number of transactions actually processed: 128000/128000
|
||||
latency average = 1.135 ms
|
||||
tps = 112770.562239 (including connections establishing)
|
||||
tps = 112796.502381 (excluding connections establishing)
|
||||
```
|
||||
|
||||
### Direct Postgres
|
||||
|
||||
Always good to have a base line.
|
||||
|
||||
#### Runs
|
||||
|
||||
```
|
||||
$ pgbench -i -h 127.0.0.1 -p 5432 && pgbench -t 1000 -p 5432 -h 127.0.0.1 --protocol simple && pgbench -t 1000 -p
|
||||
5432 -h 127.0.0.1 --protocol extended
|
||||
Password:
|
||||
dropping old tables...
|
||||
creating tables...
|
||||
generating data...
|
||||
100000 of 100000 tuples (100%) done (elapsed 0.01 s, remaining 0.00 s)
|
||||
vacuuming...
|
||||
creating primary keys...
|
||||
done.
|
||||
Password:
|
||||
$ pgbench -t 1000 -c 16 -j 2 -p 5432 -h 127.0.0.1 -S --protocol extended shard0
|
||||
Password:
|
||||
starting vacuum...end.
|
||||
transaction type: <builtin: TPC-B (sort of)>
|
||||
scaling factor: 1
|
||||
query mode: simple
|
||||
number of clients: 1
|
||||
number of threads: 1
|
||||
number of transactions per client: 1000
|
||||
number of transactions actually processed: 1000/1000
|
||||
latency average = 0.902 ms
|
||||
tps = 1109.014867 (including connections establishing)
|
||||
tps = 1112.318595 (excluding connections establishing)
|
||||
Password:
|
||||
starting vacuum...end.
|
||||
transaction type: <builtin: TPC-B (sort of)>
|
||||
transaction type: <builtin: select only>
|
||||
scaling factor: 1
|
||||
query mode: extended
|
||||
number of clients: 1
|
||||
number of threads: 1
|
||||
number of clients: 16
|
||||
number of threads: 2
|
||||
number of transactions per client: 1000
|
||||
number of transactions actually processed: 1000/1000
|
||||
latency average = 0.931 ms
|
||||
tps = 1074.017747 (including connections establishing)
|
||||
tps = 1077.121752 (excluding connections establishing)
|
||||
```
|
||||
number of transactions actually processed: 16000/16000
|
||||
latency average = 0.115 ms
|
||||
tps = 139443.955722 (including connections establishing)
|
||||
tps = 142314.859075 (excluding connections establishing)
|
||||
|
||||
$ pgbench -t 1000 -c 32 -j 2 -p 5432 -h 127.0.0.1 -S --protocol extended shard0
|
||||
Password:
|
||||
starting vacuum...end.
|
||||
transaction type: <builtin: select only>
|
||||
scaling factor: 1
|
||||
query mode: extended
|
||||
number of clients: 32
|
||||
number of threads: 2
|
||||
number of transactions per client: 1000
|
||||
number of transactions actually processed: 32000/32000
|
||||
latency average = 0.212 ms
|
||||
tps = 150644.840891 (including connections establishing)
|
||||
tps = 152218.499430 (excluding connections establishing)
|
||||
|
||||
$ pgbench -t 1000 -c 64 -j 2 -p 5432 -h 127.0.0.1 -S --protocol extended shard0
|
||||
Password:
|
||||
starting vacuum...end.
|
||||
transaction type: <builtin: select only>
|
||||
scaling factor: 1
|
||||
query mode: extended
|
||||
number of clients: 64
|
||||
number of threads: 2
|
||||
number of transactions per client: 1000
|
||||
number of transactions actually processed: 64000/64000
|
||||
latency average = 0.420 ms
|
||||
tps = 152517.663404 (including connections establishing)
|
||||
tps = 153319.188482 (excluding connections establishing)
|
||||
|
||||
$ pgbench -t 1000 -c 128 -j 2 -p 5432 -h 127.0.0.1 -S --protocol extended shard0
|
||||
Password:
|
||||
starting vacuum...end.
|
||||
transaction type: <builtin: select only>
|
||||
scaling factor: 1
|
||||
query mode: extended
|
||||
number of clients: 128
|
||||
number of threads: 2
|
||||
number of transactions per client: 1000
|
||||
number of transactions actually processed: 128000/128000
|
||||
latency average = 0.854 ms
|
||||
tps = 149818.594087 (including connections establishing)
|
||||
tps = 150200.603049 (excluding connections establishing)
|
||||
```
|
||||
|
||||
17
docker-compose.yml
Normal file
17
docker-compose.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
version: "3"
|
||||
services:
|
||||
postgres:
|
||||
image: postgres:14
|
||||
environment:
|
||||
POSTGRES_PASSWORD: postgres
|
||||
POSTGRES_HOST_AUTH_METHOD: md5
|
||||
pgcat:
|
||||
build: .
|
||||
command:
|
||||
- "pgcat"
|
||||
- "/etc/pgcat/pgcat.toml"
|
||||
volumes:
|
||||
- "${PWD}/examples/docker/pgcat.toml:/etc/pgcat/pgcat.toml"
|
||||
ports:
|
||||
- "6432:6432"
|
||||
- "9930:9930"
|
||||
147
examples/docker/pgcat.toml
Normal file
147
examples/docker/pgcat.toml
Normal file
@@ -0,0 +1,147 @@
|
||||
#
|
||||
# PgCat config example.
|
||||
#
|
||||
|
||||
#
|
||||
# General pooler settings
|
||||
[general]
|
||||
# What IP to run on, 0.0.0.0 means accessible from everywhere.
|
||||
host = "0.0.0.0"
|
||||
|
||||
# Port to run on, same as PgBouncer used in this example.
|
||||
port = 6432
|
||||
|
||||
# Whether to enable prometheus exporter or not.
|
||||
enable_prometheus_exporter = true
|
||||
|
||||
# Port at which prometheus exporter listens on.
|
||||
prometheus_exporter_port = 9930
|
||||
|
||||
# How long to wait before aborting a server connection (ms).
|
||||
connect_timeout = 5000
|
||||
|
||||
# How much time to give `SELECT 1` health check query to return with a result (ms).
|
||||
healthcheck_timeout = 1000
|
||||
|
||||
# How long to keep connection available for immediate re-use, without running a healthcheck query on it
|
||||
healthcheck_delay = 30000
|
||||
|
||||
# How much time to give clients during shutdown before forcibly killing client connections (ms).
|
||||
shutdown_timeout = 60000
|
||||
|
||||
# For how long to ban a server if it fails a health check (seconds).
|
||||
ban_time = 60 # seconds
|
||||
|
||||
# Reload config automatically if it changes.
|
||||
autoreload = false
|
||||
|
||||
# TLS
|
||||
# tls_certificate = "server.cert"
|
||||
# tls_private_key = "server.key"
|
||||
|
||||
# Credentials to access the virtual administrative database (pgbouncer or pgcat)
|
||||
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
||||
admin_username = "postgres"
|
||||
admin_password = "postgres"
|
||||
|
||||
# pool
|
||||
# configs are structured as pool.<pool_name>
|
||||
# the pool_name is what clients use as database name when connecting
|
||||
# For the example below a client can connect using "postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded"
|
||||
[pools.sharded]
|
||||
# Pool mode (see PgBouncer docs for more).
|
||||
# session: one server connection per connected client
|
||||
# transaction: one server connection per client transaction
|
||||
pool_mode = "transaction"
|
||||
|
||||
# If the client doesn't specify, route traffic to
|
||||
# this role by default.
|
||||
#
|
||||
# any: round-robin between primary and replicas,
|
||||
# replica: round-robin between replicas only without touching the primary,
|
||||
# primary: all queries go to the primary unless otherwise specified.
|
||||
default_role = "any"
|
||||
|
||||
# Query parser. If enabled, we'll attempt to parse
|
||||
# every incoming query to determine if it's a read or a write.
|
||||
# If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||
# we'll direct it to the primary.
|
||||
query_parser_enabled = true
|
||||
|
||||
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||
# load balancing of read queries. Otherwise, the primary will only be used for write
|
||||
# queries. The primary can always be explicitely selected with our custom protocol.
|
||||
primary_reads_enabled = true
|
||||
|
||||
# So what if you wanted to implement a different hashing function,
|
||||
# or you've already built one and you want this pooler to use it?
|
||||
#
|
||||
# Current options:
|
||||
#
|
||||
# pg_bigint_hash: PARTITION BY HASH (Postgres hashing function)
|
||||
# sha1: A hashing function based on SHA1
|
||||
#
|
||||
sharding_function = "pg_bigint_hash"
|
||||
|
||||
# Credentials for users that may connect to this cluster
|
||||
[pools.sharded.users.0]
|
||||
username = "postgres"
|
||||
password = "postgres"
|
||||
# Maximum number of server connections that can be established for this user
|
||||
# The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||
# is the sum of pool_size across all users.
|
||||
pool_size = 9
|
||||
|
||||
# Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
|
||||
statement_timeout = 0
|
||||
|
||||
[pools.sharded.users.1]
|
||||
username = "postgres"
|
||||
password = "postgres"
|
||||
pool_size = 21
|
||||
statement_timeout = 15000
|
||||
|
||||
# Shard 0
|
||||
[pools.sharded.shards.0]
|
||||
# [ host, port, role ]
|
||||
servers = [
|
||||
[ "postgres", 5432, "primary" ],
|
||||
[ "postgres", 5432, "replica" ]
|
||||
]
|
||||
# Database name (e.g. "postgres")
|
||||
database = "postgres"
|
||||
|
||||
[pools.sharded.shards.1]
|
||||
servers = [
|
||||
[ "postgres", 5432, "primary" ],
|
||||
[ "postgres", 5432, "replica" ],
|
||||
]
|
||||
database = "postgres"
|
||||
|
||||
[pools.sharded.shards.2]
|
||||
servers = [
|
||||
[ "postgres", 5432, "primary" ],
|
||||
[ "postgres", 5432, "replica" ],
|
||||
]
|
||||
database = "postgres"
|
||||
|
||||
|
||||
[pools.simple_db]
|
||||
pool_mode = "session"
|
||||
default_role = "primary"
|
||||
query_parser_enabled = true
|
||||
primary_reads_enabled = true
|
||||
sharding_function = "pg_bigint_hash"
|
||||
|
||||
[pools.simple_db.users.0]
|
||||
username = "postgres"
|
||||
password = "postgres"
|
||||
pool_size = 5
|
||||
statement_timeout = 0
|
||||
|
||||
[pools.simple_db.shards.0]
|
||||
servers = [
|
||||
[ "postgres", 5432, "primary" ],
|
||||
[ "postgres", 5432, "replica" ]
|
||||
]
|
||||
database = "postgres"
|
||||
143
pgcat.toml
143
pgcat.toml
@@ -5,64 +5,143 @@
|
||||
#
|
||||
# General pooler settings
|
||||
[general]
|
||||
|
||||
# What IP to run on, 0.0.0.0 means accessible from everywhere.
|
||||
host = "0.0.0.0"
|
||||
|
||||
# Port to run on, same as PgBouncer used in this example.
|
||||
port = 6432
|
||||
|
||||
# How many connections to allocate per server.
|
||||
pool_size = 15
|
||||
# Whether to enable prometheus exporter or not.
|
||||
enable_prometheus_exporter = true
|
||||
|
||||
# Port at which prometheus exporter listens on.
|
||||
prometheus_exporter_port = 9930
|
||||
|
||||
# How long to wait before aborting a server connection (ms).
|
||||
connect_timeout = 5000
|
||||
|
||||
# How much time to give the health check query to return with a result (ms).
|
||||
healthcheck_timeout = 1000
|
||||
|
||||
# How long to keep connection available for immediate re-use, without running a healthcheck query on it
|
||||
healthcheck_delay = 30000
|
||||
|
||||
# How much time to give clients during shutdown before forcibly killing client connections (ms).
|
||||
shutdown_timeout = 60000
|
||||
|
||||
# For how long to ban a server if it fails a health check (seconds).
|
||||
ban_time = 60 # seconds
|
||||
|
||||
# Reload config automatically if it changes.
|
||||
autoreload = false
|
||||
|
||||
# TLS
|
||||
# tls_certificate = "server.cert"
|
||||
# tls_private_key = "server.key"
|
||||
|
||||
# Credentials to access the virtual administrative database (pgbouncer or pgcat)
|
||||
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
||||
admin_username = "admin_user"
|
||||
admin_password = "admin_pass"
|
||||
|
||||
# pool
|
||||
# configs are structured as pool.<pool_name>
|
||||
# the pool_name is what clients use as database name when connecting
|
||||
# For the example below a client can connect using "postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded_db"
|
||||
[pools.sharded_db]
|
||||
# Pool mode (see PgBouncer docs for more).
|
||||
# session: one server connection per connected client
|
||||
# transaction: one server connection per client transaction
|
||||
pool_mode = "transaction"
|
||||
|
||||
# How long to wait before aborting a server connection (ms).
|
||||
connect_timeout = 5000
|
||||
|
||||
# How much time to give `SELECT 1` health check query to return with a result (ms).
|
||||
healthcheck_timeout = 1000
|
||||
|
||||
# For how long to ban a server if it fails a health check (seconds).
|
||||
ban_time = 60 # Seconds
|
||||
|
||||
# If the client doesn't specify, route traffic to
|
||||
# this role by default.
|
||||
#
|
||||
# User to use for authentication against the server.
|
||||
[user]
|
||||
name = "sharding_user"
|
||||
# any: round-robin between primary and replicas,
|
||||
# replica: round-robin between replicas only without touching the primary,
|
||||
# primary: all queries go to the primary unless otherwise specified.
|
||||
default_role = "any"
|
||||
|
||||
# Query parser. If enabled, we'll attempt to parse
|
||||
# every incoming query to determine if it's a read or a write.
|
||||
# If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||
# we'll direct it to the primary.
|
||||
query_parser_enabled = true
|
||||
|
||||
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||
# load balancing of read queries. Otherwise, the primary will only be used for write
|
||||
# queries. The primary can always be explicitly selected with our custom protocol.
|
||||
primary_reads_enabled = true
|
||||
|
||||
# So what if you wanted to implement a different hashing function,
|
||||
# or you've already built one and you want this pooler to use it?
|
||||
#
|
||||
# Current options:
|
||||
#
|
||||
# pg_bigint_hash: PARTITION BY HASH (Postgres hashing function)
|
||||
# sha1: A hashing function based on SHA1
|
||||
#
|
||||
sharding_function = "pg_bigint_hash"
|
||||
|
||||
# Credentials for users that may connect to this cluster
|
||||
[pools.sharded_db.users.0]
|
||||
username = "sharding_user"
|
||||
password = "sharding_user"
|
||||
# Maximum number of server connections that can be established for this user
|
||||
# The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||
# is the sum of pool_size across all users.
|
||||
pool_size = 9
|
||||
|
||||
# Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
|
||||
statement_timeout = 0
|
||||
|
||||
#
|
||||
# Shards in the cluster
|
||||
[shards]
|
||||
[pools.sharded_db.users.1]
|
||||
username = "other_user"
|
||||
password = "other_user"
|
||||
pool_size = 21
|
||||
statement_timeout = 15000
|
||||
|
||||
# Shard 0
|
||||
[shards.0]
|
||||
|
||||
# [ host, port ]
|
||||
[pools.sharded_db.shards.0]
|
||||
# [ host, port, role ]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432 ],
|
||||
[ "localhost", 5432 ],
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ]
|
||||
]
|
||||
# Database name (e.g. "postgres")
|
||||
database = "shard0"
|
||||
|
||||
[shards.1]
|
||||
# [ host, port ]
|
||||
[pools.sharded_db.shards.1]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432 ],
|
||||
[ "localhost", 5432 ],
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ],
|
||||
]
|
||||
database = "shard1"
|
||||
|
||||
[shards.2]
|
||||
# [ host, port ]
|
||||
[pools.sharded_db.shards.2]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432 ],
|
||||
[ "localhost", 5432 ],
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ],
|
||||
]
|
||||
database = "shard2"
|
||||
database = "shard2"
|
||||
|
||||
|
||||
[pools.simple_db]
|
||||
pool_mode = "session"
|
||||
default_role = "primary"
|
||||
query_parser_enabled = true
|
||||
primary_reads_enabled = true
|
||||
sharding_function = "pg_bigint_hash"
|
||||
|
||||
[pools.simple_db.users.0]
|
||||
username = "simple_user"
|
||||
password = "simple_user"
|
||||
pool_size = 5
|
||||
statement_timeout = 0
|
||||
|
||||
[pools.simple_db.shards.0]
|
||||
servers = [
|
||||
[ "127.0.0.1", 5432, "primary" ],
|
||||
[ "localhost", 5432, "replica" ]
|
||||
]
|
||||
database = "some_db"
|
||||
|
||||
441
src/admin.rs
Normal file
441
src/admin.rs
Normal file
@@ -0,0 +1,441 @@
|
||||
/// Admin database.
|
||||
use bytes::{Buf, BufMut, BytesMut};
|
||||
use log::{info, trace};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::config::{get_config, reload_config, VERSION};
|
||||
use crate::errors::Error;
|
||||
use crate::messages::*;
|
||||
use crate::pool::get_all_pools;
|
||||
use crate::stats::get_stats;
|
||||
use crate::ClientServerMap;
|
||||
|
||||
pub fn generate_server_info_for_admin() -> BytesMut {
|
||||
let mut server_info = BytesMut::new();
|
||||
|
||||
server_info.put(server_paramater_message("application_name", ""));
|
||||
server_info.put(server_paramater_message("client_encoding", "UTF8"));
|
||||
server_info.put(server_paramater_message("server_encoding", "UTF8"));
|
||||
server_info.put(server_paramater_message("server_version", VERSION));
|
||||
server_info.put(server_paramater_message("DateStyle", "ISO, MDY"));
|
||||
|
||||
return server_info;
|
||||
}
|
||||
|
||||
/// Handle admin client.
|
||||
pub async fn handle_admin<T>(
|
||||
stream: &mut T,
|
||||
mut query: BytesMut,
|
||||
client_server_map: ClientServerMap,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let code = query.get_u8() as char;
|
||||
|
||||
if code != 'Q' {
|
||||
return Err(Error::ProtocolSyncError);
|
||||
}
|
||||
|
||||
let len = query.get_i32() as usize;
|
||||
let query = String::from_utf8_lossy(&query[..len - 5])
|
||||
.to_string()
|
||||
.to_ascii_uppercase();
|
||||
|
||||
trace!("Admin query: {}", query);
|
||||
|
||||
let query_parts: Vec<&str> = query.trim_end_matches(';').split_whitespace().collect();
|
||||
|
||||
match query_parts[0] {
|
||||
"RELOAD" => {
|
||||
trace!("RELOAD");
|
||||
reload(stream, client_server_map).await
|
||||
}
|
||||
"SET" => {
|
||||
trace!("SET");
|
||||
ignore_set(stream).await
|
||||
}
|
||||
"SHOW" => match query_parts[1] {
|
||||
"CONFIG" => {
|
||||
trace!("SHOW CONFIG");
|
||||
show_config(stream).await
|
||||
}
|
||||
"DATABASES" => {
|
||||
trace!("SHOW DATABASES");
|
||||
show_databases(stream).await
|
||||
}
|
||||
"LISTS" => {
|
||||
trace!("SHOW LISTS");
|
||||
show_lists(stream).await
|
||||
}
|
||||
"POOLS" => {
|
||||
trace!("SHOW POOLS");
|
||||
show_pools(stream).await
|
||||
}
|
||||
"STATS" => {
|
||||
trace!("SHOW STATS");
|
||||
show_stats(stream).await
|
||||
}
|
||||
"VERSION" => {
|
||||
trace!("SHOW VERSION");
|
||||
show_version(stream).await
|
||||
}
|
||||
_ => error_response(stream, "Unsupported SHOW query against the admin database").await,
|
||||
},
|
||||
_ => error_response(stream, "Unsupported query against the admin database").await,
|
||||
}
|
||||
}
|
||||
|
||||
/// Column-oriented statistics.
|
||||
async fn show_lists<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let stats = get_stats();
|
||||
|
||||
let columns = vec![("list", DataType::Text), ("items", DataType::Int4)];
|
||||
|
||||
let mut users = 1;
|
||||
let mut databases = 1;
|
||||
for (_, pool) in get_all_pools() {
|
||||
databases += pool.databases();
|
||||
users += 1; // One user per pool
|
||||
}
|
||||
let mut res = BytesMut::new();
|
||||
res.put(row_description(&columns));
|
||||
res.put(data_row(&vec![
|
||||
"databases".to_string(),
|
||||
databases.to_string(),
|
||||
]));
|
||||
res.put(data_row(&vec!["users".to_string(), users.to_string()]));
|
||||
res.put(data_row(&vec!["pools".to_string(), databases.to_string()]));
|
||||
res.put(data_row(&vec![
|
||||
"free_clients".to_string(),
|
||||
stats
|
||||
.keys()
|
||||
.map(|address_id| stats[&address_id]["cl_idle"])
|
||||
.sum::<i64>()
|
||||
.to_string(),
|
||||
]));
|
||||
res.put(data_row(&vec![
|
||||
"used_clients".to_string(),
|
||||
stats
|
||||
.keys()
|
||||
.map(|address_id| stats[&address_id]["cl_active"])
|
||||
.sum::<i64>()
|
||||
.to_string(),
|
||||
]));
|
||||
res.put(data_row(&vec![
|
||||
"login_clients".to_string(),
|
||||
"0".to_string(),
|
||||
]));
|
||||
res.put(data_row(&vec![
|
||||
"free_servers".to_string(),
|
||||
stats
|
||||
.keys()
|
||||
.map(|address_id| stats[&address_id]["sv_idle"])
|
||||
.sum::<i64>()
|
||||
.to_string(),
|
||||
]));
|
||||
res.put(data_row(&vec![
|
||||
"used_servers".to_string(),
|
||||
stats
|
||||
.keys()
|
||||
.map(|address_id| stats[&address_id]["sv_active"])
|
||||
.sum::<i64>()
|
||||
.to_string(),
|
||||
]));
|
||||
res.put(data_row(&vec!["dns_names".to_string(), "0".to_string()]));
|
||||
res.put(data_row(&vec!["dns_zones".to_string(), "0".to_string()]));
|
||||
res.put(data_row(&vec!["dns_queries".to_string(), "0".to_string()]));
|
||||
res.put(data_row(&vec!["dns_pending".to_string(), "0".to_string()]));
|
||||
|
||||
res.put(command_complete("SHOW"));
|
||||
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, res).await
|
||||
}
|
||||
|
||||
/// Show PgCat version.
|
||||
async fn show_version<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let mut res = BytesMut::new();
|
||||
|
||||
res.put(row_description(&vec![("version", DataType::Text)]));
|
||||
res.put(data_row(&vec![format!("PgCat {}", VERSION).to_string()]));
|
||||
res.put(command_complete("SHOW"));
|
||||
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, res).await
|
||||
}
|
||||
|
||||
/// Show utilization of connection pools for each shard and replicas.
|
||||
async fn show_pools<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let stats = get_stats();
|
||||
|
||||
let columns = vec![
|
||||
("database", DataType::Text),
|
||||
("user", DataType::Text),
|
||||
("cl_idle", DataType::Numeric),
|
||||
("cl_active", DataType::Numeric),
|
||||
("cl_waiting", DataType::Numeric),
|
||||
("cl_cancel_req", DataType::Numeric),
|
||||
("sv_active", DataType::Numeric),
|
||||
("sv_idle", DataType::Numeric),
|
||||
("sv_used", DataType::Numeric),
|
||||
("sv_tested", DataType::Numeric),
|
||||
("sv_login", DataType::Numeric),
|
||||
("maxwait", DataType::Numeric),
|
||||
("maxwait_us", DataType::Numeric),
|
||||
("pool_mode", DataType::Text),
|
||||
];
|
||||
|
||||
let mut res = BytesMut::new();
|
||||
res.put(row_description(&columns));
|
||||
for (_, pool) in get_all_pools() {
|
||||
let pool_config = &pool.settings;
|
||||
for shard in 0..pool.shards() {
|
||||
for server in 0..pool.servers(shard) {
|
||||
let address = pool.address(shard, server);
|
||||
let stats = match stats.get(&address.id) {
|
||||
Some(stats) => stats.clone(),
|
||||
None => HashMap::new(),
|
||||
};
|
||||
|
||||
let mut row = vec![address.name(), pool_config.user.username.clone()];
|
||||
|
||||
for column in &columns[2..columns.len() - 1] {
|
||||
let value = stats.get(column.0).unwrap_or(&0).to_string();
|
||||
row.push(value);
|
||||
}
|
||||
|
||||
row.push(pool_config.pool_mode.to_string());
|
||||
res.put(data_row(&row));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res.put(command_complete("SHOW"));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, res).await
|
||||
}
|
||||
|
||||
/// Show shards and replicas.
|
||||
async fn show_databases<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
// Columns
|
||||
let columns = vec![
|
||||
("name", DataType::Text),
|
||||
("host", DataType::Text),
|
||||
("port", DataType::Text),
|
||||
("database", DataType::Text),
|
||||
("force_user", DataType::Text),
|
||||
("pool_size", DataType::Int4),
|
||||
("min_pool_size", DataType::Int4),
|
||||
("reserve_pool", DataType::Int4),
|
||||
("pool_mode", DataType::Text),
|
||||
("max_connections", DataType::Int4),
|
||||
("current_connections", DataType::Int4),
|
||||
("paused", DataType::Int4),
|
||||
("disabled", DataType::Int4),
|
||||
];
|
||||
|
||||
let mut res = BytesMut::new();
|
||||
|
||||
res.put(row_description(&columns));
|
||||
|
||||
for (_, pool) in get_all_pools() {
|
||||
let pool_config = pool.settings.clone();
|
||||
for shard in 0..pool.shards() {
|
||||
let database_name = &pool.address(shard, 0).database;
|
||||
for server in 0..pool.servers(shard) {
|
||||
let address = pool.address(shard, server);
|
||||
let pool_state = pool.pool_state(shard, server);
|
||||
let banned = pool.is_banned(address, Some(address.role));
|
||||
|
||||
res.put(data_row(&vec![
|
||||
address.name(), // name
|
||||
address.host.to_string(), // host
|
||||
address.port.to_string(), // port
|
||||
database_name.to_string(), // database
|
||||
pool_config.user.username.to_string(), // force_user
|
||||
pool_config.user.pool_size.to_string(), // pool_size
|
||||
"0".to_string(), // min_pool_size
|
||||
"0".to_string(), // reserve_pool
|
||||
pool_config.pool_mode.to_string(), // pool_mode
|
||||
pool_config.user.pool_size.to_string(), // max_connections
|
||||
pool_state.connections.to_string(), // current_connections
|
||||
"0".to_string(), // paused
|
||||
match banned {
|
||||
// disabled
|
||||
true => "1".to_string(),
|
||||
false => "0".to_string(),
|
||||
},
|
||||
]));
|
||||
}
|
||||
}
|
||||
}
|
||||
res.put(command_complete("SHOW"));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, res).await
|
||||
}
|
||||
|
||||
/// Ignore any SET commands the client sends.
|
||||
/// This is common initialization done by ORMs.
|
||||
async fn ignore_set<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
custom_protocol_response_ok(stream, "SET").await
|
||||
}
|
||||
|
||||
/// Reload the configuration file without restarting the process.
|
||||
async fn reload<T>(stream: &mut T, client_server_map: ClientServerMap) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
info!("Reloading config");
|
||||
|
||||
reload_config(client_server_map).await?;
|
||||
|
||||
get_config().show();
|
||||
|
||||
let mut res = BytesMut::new();
|
||||
|
||||
res.put(command_complete("RELOAD"));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, res).await
|
||||
}
|
||||
|
||||
/// Shows current configuration.
|
||||
async fn show_config<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let config = &get_config();
|
||||
let config: HashMap<String, String> = config.into();
|
||||
|
||||
// Configs that cannot be changed without restarting.
|
||||
let immutables = ["host", "port", "connect_timeout"];
|
||||
|
||||
// Columns
|
||||
let columns = vec![
|
||||
("key", DataType::Text),
|
||||
("value", DataType::Text),
|
||||
("default", DataType::Text),
|
||||
("changeable", DataType::Text),
|
||||
];
|
||||
|
||||
// Response data
|
||||
let mut res = BytesMut::new();
|
||||
res.put(row_description(&columns));
|
||||
|
||||
// DataRow rows
|
||||
for (key, value) in config {
|
||||
let changeable = if immutables.iter().filter(|col| *col == &key).count() == 1 {
|
||||
"no".to_string()
|
||||
} else {
|
||||
"yes".to_string()
|
||||
};
|
||||
|
||||
let row = vec![key, value, "-".to_string(), changeable];
|
||||
|
||||
res.put(data_row(&row));
|
||||
}
|
||||
|
||||
res.put(command_complete("SHOW"));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, res).await
|
||||
}
|
||||
|
||||
/// Show shard and replicas statistics.
|
||||
async fn show_stats<T>(stream: &mut T) -> Result<(), Error>
|
||||
where
|
||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let columns = vec![
|
||||
("database", DataType::Text),
|
||||
("user", DataType::Text),
|
||||
("total_xact_count", DataType::Numeric),
|
||||
("total_query_count", DataType::Numeric),
|
||||
("total_received", DataType::Numeric),
|
||||
("total_sent", DataType::Numeric),
|
||||
("total_xact_time", DataType::Numeric),
|
||||
("total_query_time", DataType::Numeric),
|
||||
("total_wait_time", DataType::Numeric),
|
||||
("avg_xact_count", DataType::Numeric),
|
||||
("avg_query_count", DataType::Numeric),
|
||||
("avg_recv", DataType::Numeric),
|
||||
("avg_sent", DataType::Numeric),
|
||||
("avg_xact_time", DataType::Numeric),
|
||||
("avg_query_time", DataType::Numeric),
|
||||
("avg_wait_time", DataType::Numeric),
|
||||
];
|
||||
|
||||
let stats = get_stats();
|
||||
let mut res = BytesMut::new();
|
||||
res.put(row_description(&columns));
|
||||
|
||||
for ((_db_name, username), pool) in get_all_pools() {
|
||||
for shard in 0..pool.shards() {
|
||||
for server in 0..pool.servers(shard) {
|
||||
let address = pool.address(shard, server);
|
||||
let stats = match stats.get(&address.id) {
|
||||
Some(stats) => stats.clone(),
|
||||
None => HashMap::new(),
|
||||
};
|
||||
|
||||
let mut row = vec![address.name()];
|
||||
row.push(username.clone());
|
||||
|
||||
for column in &columns[2..] {
|
||||
row.push(stats.get(column.0).unwrap_or(&0).to_string());
|
||||
}
|
||||
|
||||
res.put(data_row(&row));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
res.put(command_complete("SHOW"));
|
||||
|
||||
// ReadyForQuery
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
|
||||
write_all_half(stream, res).await
|
||||
}
|
||||
1122
src/client.rs
1122
src/client.rs
File diff suppressed because it is too large
Load Diff
649
src/config.rs
649
src/config.rs
@@ -1,55 +1,433 @@
|
||||
use serde_derive::Deserialize;
|
||||
/// Parse the configuration file.
|
||||
use arc_swap::ArcSwap;
|
||||
use log::{error, info};
|
||||
use once_cell::sync::Lazy;
|
||||
use serde_derive::{Deserialize, Serialize};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::hash::Hash;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncReadExt;
|
||||
use toml;
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::errors::Error;
|
||||
use crate::tls::{load_certs, load_keys};
|
||||
use crate::{ClientServerMap, ConnectionPool};
|
||||
|
||||
pub const VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
|
||||
/// Globally available configuration.
|
||||
static CONFIG: Lazy<ArcSwap<Config>> = Lazy::new(|| ArcSwap::from_pointee(Config::default()));
|
||||
|
||||
/// Server role: primary or replica.
|
||||
#[derive(Clone, PartialEq, Serialize, Deserialize, Hash, std::cmp::Eq, Debug, Copy)]
|
||||
pub enum Role {
|
||||
Primary,
|
||||
Replica,
|
||||
}
|
||||
|
||||
impl ToString for Role {
|
||||
fn to_string(&self) -> String {
|
||||
match *self {
|
||||
Role::Primary => "primary".to_string(),
|
||||
Role::Replica => "replica".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<Option<Role>> for Role {
|
||||
fn eq(&self, other: &Option<Role>) -> bool {
|
||||
match other {
|
||||
None => true,
|
||||
Some(role) => *self == *role,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq<Role> for Option<Role> {
|
||||
fn eq(&self, other: &Role) -> bool {
|
||||
match *self {
|
||||
None => true,
|
||||
Some(role) => role == *other,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Address identifying a PostgreSQL server uniquely.
|
||||
#[derive(Clone, PartialEq, Hash, std::cmp::Eq, Debug)]
|
||||
pub struct Address {
|
||||
/// Unique ID per addressable Postgres server.
|
||||
pub id: usize,
|
||||
|
||||
/// Server host.
|
||||
pub host: String,
|
||||
pub port: String,
|
||||
|
||||
/// Server port.
|
||||
pub port: u16,
|
||||
|
||||
/// Shard number of this Postgres server.
|
||||
pub shard: usize,
|
||||
|
||||
/// The name of the Postgres database.
|
||||
pub database: String,
|
||||
|
||||
/// Default search_path.
|
||||
pub search_path: Option<String>,
|
||||
|
||||
/// Server role: replica, primary.
|
||||
pub role: Role,
|
||||
|
||||
/// If it's a replica, number it for reference and failover.
|
||||
pub replica_number: usize,
|
||||
|
||||
/// Position of the server in the pool for failover.
|
||||
pub address_index: usize,
|
||||
|
||||
/// The name of the user configured to use this pool.
|
||||
pub username: String,
|
||||
|
||||
/// The name of this pool (i.e. database name visible to the client).
|
||||
pub pool_name: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Hash, std::cmp::Eq, Deserialize, Debug)]
|
||||
impl Default for Address {
|
||||
fn default() -> Address {
|
||||
Address {
|
||||
id: 0,
|
||||
host: String::from("127.0.0.1"),
|
||||
port: 5432,
|
||||
shard: 0,
|
||||
address_index: 0,
|
||||
replica_number: 0,
|
||||
database: String::from("database"),
|
||||
search_path: None,
|
||||
role: Role::Replica,
|
||||
username: String::from("username"),
|
||||
pool_name: String::from("pool_name"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Address {
|
||||
/// Address name (aka database) used in `SHOW STATS`, `SHOW DATABASES`, and `SHOW POOLS`.
|
||||
pub fn name(&self) -> String {
|
||||
match self.role {
|
||||
Role::Primary => format!("{}_shard_{}_primary", self.pool_name, self.shard),
|
||||
|
||||
Role::Replica => format!(
|
||||
"{}_shard_{}_replica_{}",
|
||||
self.pool_name, self.shard, self.replica_number
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// PostgreSQL user.
|
||||
#[derive(Clone, PartialEq, Hash, std::cmp::Eq, Serialize, Deserialize, Debug)]
|
||||
pub struct User {
|
||||
pub name: String,
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
pub pool_size: u32,
|
||||
pub statement_timeout: u64,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone)]
|
||||
impl Default for User {
|
||||
fn default() -> User {
|
||||
User {
|
||||
username: String::from("postgres"),
|
||||
password: String::new(),
|
||||
pool_size: 15,
|
||||
statement_timeout: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// General configuration.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
pub struct General {
|
||||
pub host: String,
|
||||
pub port: i16,
|
||||
pub pool_size: u32,
|
||||
pub pool_mode: String,
|
||||
pub enable_prometheus_exporter: Option<bool>,
|
||||
pub prometheus_exporter_port: i16,
|
||||
pub connect_timeout: u64,
|
||||
pub healthcheck_timeout: u64,
|
||||
pub shutdown_timeout: u64,
|
||||
pub healthcheck_delay: u64,
|
||||
pub ban_time: i64,
|
||||
pub autoreload: bool,
|
||||
pub tls_certificate: Option<String>,
|
||||
pub tls_private_key: Option<String>,
|
||||
pub admin_username: String,
|
||||
pub admin_password: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone)]
|
||||
pub struct Shard {
|
||||
pub servers: Vec<(String, u16)>,
|
||||
pub database: String,
|
||||
impl Default for General {
|
||||
fn default() -> General {
|
||||
General {
|
||||
host: String::from("localhost"),
|
||||
port: 5432,
|
||||
enable_prometheus_exporter: Some(false),
|
||||
prometheus_exporter_port: 9930,
|
||||
connect_timeout: 5000,
|
||||
healthcheck_timeout: 1000,
|
||||
shutdown_timeout: 60000,
|
||||
healthcheck_delay: 30000,
|
||||
ban_time: 60,
|
||||
autoreload: false,
|
||||
tls_certificate: None,
|
||||
tls_private_key: None,
|
||||
admin_username: String::from("admin"),
|
||||
admin_password: String::from("admin"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone)]
|
||||
pub struct Config {
|
||||
pub general: General,
|
||||
pub user: User,
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
pub struct Pool {
|
||||
pub pool_mode: String,
|
||||
pub default_role: String,
|
||||
pub query_parser_enabled: bool,
|
||||
pub primary_reads_enabled: bool,
|
||||
pub sharding_function: String,
|
||||
pub shards: HashMap<String, Shard>,
|
||||
pub users: HashMap<String, User>,
|
||||
}
|
||||
impl Default for Pool {
|
||||
fn default() -> Pool {
|
||||
Pool {
|
||||
pool_mode: String::from("transaction"),
|
||||
shards: HashMap::from([(String::from("1"), Shard::default())]),
|
||||
users: HashMap::default(),
|
||||
default_role: String::from("any"),
|
||||
query_parser_enabled: false,
|
||||
primary_reads_enabled: true,
|
||||
sharding_function: "pg_bigint_hash".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn parse(path: &str) -> Result<Config, Error> {
|
||||
// let path = Path::new(path);
|
||||
/// Shard configuration.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
pub struct Shard {
|
||||
pub database: String,
|
||||
pub search_path: Option<String>,
|
||||
pub servers: Vec<(String, u16, String)>,
|
||||
}
|
||||
|
||||
impl Default for Shard {
|
||||
fn default() -> Shard {
|
||||
Shard {
|
||||
servers: vec![(String::from("localhost"), 5432, String::from("primary"))],
|
||||
search_path: None,
|
||||
database: String::from("postgres"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn default_path() -> String {
|
||||
String::from("pgcat.toml")
|
||||
}
|
||||
|
||||
/// Configuration wrapper.
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
pub struct Config {
|
||||
// Serializer maintains the order of fields in the struct
|
||||
// so we should always put simple fields before nested fields
|
||||
// in all serializable structs to avoid ValueAfterTable errors
|
||||
// These errors occur when the toml serializer is about to produce
|
||||
// ambigous toml structure like the one below
|
||||
// [main]
|
||||
// field1_under_main = 1
|
||||
// field2_under_main = 2
|
||||
// [main.subconf]
|
||||
// field1_under_subconf = 1
|
||||
// field3_under_main = 3 # This field will be interpreted as being under subconf and not under main
|
||||
#[serde(default = "default_path")]
|
||||
pub path: String,
|
||||
|
||||
pub general: General,
|
||||
pub pools: HashMap<String, Pool>,
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Config {
|
||||
Config {
|
||||
path: String::from("pgcat.toml"),
|
||||
general: General::default(),
|
||||
pools: HashMap::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&Config> for std::collections::HashMap<String, String> {
|
||||
fn from(config: &Config) -> HashMap<String, String> {
|
||||
let mut r: Vec<(String, String)> = config
|
||||
.pools
|
||||
.iter()
|
||||
.flat_map(|(pool_name, pool)| {
|
||||
[
|
||||
(
|
||||
format!("pools.{}.pool_mode", pool_name),
|
||||
pool.pool_mode.clone(),
|
||||
),
|
||||
(
|
||||
format!("pools.{}.primary_reads_enabled", pool_name),
|
||||
pool.primary_reads_enabled.to_string(),
|
||||
),
|
||||
(
|
||||
format!("pools.{}.query_parser_enabled", pool_name),
|
||||
pool.query_parser_enabled.to_string(),
|
||||
),
|
||||
(
|
||||
format!("pools.{}.default_role", pool_name),
|
||||
pool.default_role.clone(),
|
||||
),
|
||||
(
|
||||
format!("pools.{}.sharding_function", pool_name),
|
||||
pool.sharding_function.clone(),
|
||||
),
|
||||
(
|
||||
format!("pools.{:?}.shard_count", pool_name),
|
||||
pool.shards.len().to_string(),
|
||||
),
|
||||
(
|
||||
format!("pools.{:?}.users", pool_name),
|
||||
pool.users
|
||||
.iter()
|
||||
.map(|(_username, user)| &user.username)
|
||||
.cloned()
|
||||
.collect::<Vec<String>>()
|
||||
.join(", "),
|
||||
),
|
||||
]
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut static_settings = vec![
|
||||
("host".to_string(), config.general.host.to_string()),
|
||||
("port".to_string(), config.general.port.to_string()),
|
||||
(
|
||||
"prometheus_exporter_port".to_string(),
|
||||
config.general.prometheus_exporter_port.to_string(),
|
||||
),
|
||||
(
|
||||
"connect_timeout".to_string(),
|
||||
config.general.connect_timeout.to_string(),
|
||||
),
|
||||
(
|
||||
"healthcheck_timeout".to_string(),
|
||||
config.general.healthcheck_timeout.to_string(),
|
||||
),
|
||||
(
|
||||
"shutdown_timeout".to_string(),
|
||||
config.general.shutdown_timeout.to_string(),
|
||||
),
|
||||
(
|
||||
"healthcheck_delay".to_string(),
|
||||
config.general.healthcheck_delay.to_string(),
|
||||
),
|
||||
("ban_time".to_string(), config.general.ban_time.to_string()),
|
||||
];
|
||||
|
||||
r.append(&mut static_settings);
|
||||
return r.iter().cloned().collect();
|
||||
}
|
||||
}
|
||||
|
||||
impl Config {
|
||||
/// Print current configuration.
|
||||
pub fn show(&self) {
|
||||
info!("Ban time: {}s", self.general.ban_time);
|
||||
info!(
|
||||
"Healthcheck timeout: {}ms",
|
||||
self.general.healthcheck_timeout
|
||||
);
|
||||
info!("Connection timeout: {}ms", self.general.connect_timeout);
|
||||
info!("Shutdown timeout: {}ms", self.general.shutdown_timeout);
|
||||
info!("Healthcheck delay: {}ms", self.general.healthcheck_delay);
|
||||
match self.general.tls_certificate.clone() {
|
||||
Some(tls_certificate) => {
|
||||
info!("TLS certificate: {}", tls_certificate);
|
||||
|
||||
match self.general.tls_private_key.clone() {
|
||||
Some(tls_private_key) => {
|
||||
info!("TLS private key: {}", tls_private_key);
|
||||
info!("TLS support is enabled");
|
||||
}
|
||||
|
||||
None => (),
|
||||
}
|
||||
}
|
||||
|
||||
None => {
|
||||
info!("TLS support is disabled");
|
||||
}
|
||||
};
|
||||
|
||||
for (pool_name, pool_config) in &self.pools {
|
||||
// TODO: Make this output prettier (maybe a table?)
|
||||
info!(
|
||||
"[pool: {}] Maximum user connections: {}",
|
||||
pool_name,
|
||||
pool_config
|
||||
.users
|
||||
.iter()
|
||||
.map(|(_, user_cfg)| user_cfg.pool_size)
|
||||
.sum::<u32>()
|
||||
.to_string()
|
||||
);
|
||||
info!("[pool: {}] Pool mode: {}", pool_name, pool_config.pool_mode);
|
||||
info!(
|
||||
"[pool: {}] Sharding function: {}",
|
||||
pool_name, pool_config.sharding_function
|
||||
);
|
||||
info!(
|
||||
"[pool: {}] Primary reads: {}",
|
||||
pool_name, pool_config.primary_reads_enabled
|
||||
);
|
||||
info!(
|
||||
"[pool: {}] Query router: {}",
|
||||
pool_name, pool_config.query_parser_enabled
|
||||
);
|
||||
info!(
|
||||
"[pool: {}] Number of shards: {}",
|
||||
pool_name,
|
||||
pool_config.shards.len()
|
||||
);
|
||||
info!(
|
||||
"[pool: {}] Number of users: {}",
|
||||
pool_name,
|
||||
pool_config.users.len()
|
||||
);
|
||||
|
||||
for user in &pool_config.users {
|
||||
info!(
|
||||
"[pool: {}][user: {}] Pool size: {}",
|
||||
pool_name, user.1.username, user.1.pool_size,
|
||||
);
|
||||
info!(
|
||||
"[pool: {}][user: {}] Statement timeout: {}",
|
||||
pool_name, user.1.username, user.1.statement_timeout
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a read-only instance of the configuration
|
||||
/// from anywhere in the app.
|
||||
/// ArcSwap makes this cheap and quick.
|
||||
pub fn get_config() -> Config {
|
||||
(*(*CONFIG.load())).clone()
|
||||
}
|
||||
|
||||
/// Parse the configuration file located at the path.
|
||||
pub async fn parse(path: &str) -> Result<(), Error> {
|
||||
let mut contents = String::new();
|
||||
let mut file = match File::open(path).await {
|
||||
Ok(file) => file,
|
||||
Err(err) => {
|
||||
println!("> Config error: {:?}", err);
|
||||
error!("Could not open '{}': {}", path, err.to_string());
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
};
|
||||
@@ -57,20 +435,174 @@ pub async fn parse(path: &str) -> Result<Config, Error> {
|
||||
match file.read_to_string(&mut contents).await {
|
||||
Ok(_) => (),
|
||||
Err(err) => {
|
||||
println!("> Config error: {:?}", err);
|
||||
error!("Could not read config file: {}", err.to_string());
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
};
|
||||
|
||||
let config: Config = match toml::from_str(&contents) {
|
||||
let mut config: Config = match toml::from_str(&contents) {
|
||||
Ok(config) => config,
|
||||
Err(err) => {
|
||||
println!("> Config error: {:?}", err);
|
||||
error!("Could not parse config file: {}", err.to_string());
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
};
|
||||
|
||||
Ok(config)
|
||||
// Validate TLS!
|
||||
match config.general.tls_certificate.clone() {
|
||||
Some(tls_certificate) => {
|
||||
match load_certs(&Path::new(&tls_certificate)) {
|
||||
Ok(_) => {
|
||||
// Cert is okay, but what about the private key?
|
||||
match config.general.tls_private_key.clone() {
|
||||
Some(tls_private_key) => match load_keys(&Path::new(&tls_private_key)) {
|
||||
Ok(_) => (),
|
||||
Err(err) => {
|
||||
error!("tls_private_key is incorrectly configured: {:?}", err);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
},
|
||||
|
||||
None => {
|
||||
error!("tls_certificate is set, but the tls_private_key is not");
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
Err(err) => {
|
||||
error!("tls_certificate is incorrectly configured: {:?}", err);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
}
|
||||
}
|
||||
None => (),
|
||||
};
|
||||
|
||||
for (pool_name, pool) in &config.pools {
|
||||
match pool.sharding_function.as_ref() {
|
||||
"pg_bigint_hash" => (),
|
||||
"sha1" => (),
|
||||
_ => {
|
||||
error!(
|
||||
"Supported sharding functions are: 'pg_bigint_hash', 'sha1', got: '{}' in pool {} settings",
|
||||
pool.sharding_function,
|
||||
pool_name
|
||||
);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
};
|
||||
|
||||
match pool.default_role.as_ref() {
|
||||
"any" => (),
|
||||
"primary" => (),
|
||||
"replica" => (),
|
||||
other => {
|
||||
error!(
|
||||
"Query router default_role must be 'primary', 'replica', or 'any', got: '{}'",
|
||||
other
|
||||
);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
};
|
||||
|
||||
match pool.pool_mode.as_ref() {
|
||||
"transaction" => (),
|
||||
"session" => (),
|
||||
other => {
|
||||
error!(
|
||||
"pool_mode can be 'session' or 'transaction', got: '{}'",
|
||||
other
|
||||
);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
};
|
||||
|
||||
for shard in &pool.shards {
|
||||
// We use addresses as unique identifiers,
|
||||
// let's make sure they are unique in the config as well.
|
||||
let mut dup_check = HashSet::new();
|
||||
let mut primary_count = 0;
|
||||
|
||||
match shard.0.parse::<usize>() {
|
||||
Ok(_) => (),
|
||||
Err(_) => {
|
||||
error!(
|
||||
"Shard '{}' is not a valid number, shards must be numbered starting at 0",
|
||||
shard.0
|
||||
);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
};
|
||||
|
||||
if shard.1.servers.len() == 0 {
|
||||
error!("Shard {} has no servers configured", shard.0);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
|
||||
for server in &shard.1.servers {
|
||||
dup_check.insert(server);
|
||||
|
||||
// Check that we define only zero or one primary.
|
||||
match server.2.as_ref() {
|
||||
"primary" => primary_count += 1,
|
||||
_ => (),
|
||||
};
|
||||
|
||||
// Check role spelling.
|
||||
match server.2.as_ref() {
|
||||
"primary" => (),
|
||||
"replica" => (),
|
||||
_ => {
|
||||
error!(
|
||||
"Shard {} server role must be either 'primary' or 'replica', got: '{}'",
|
||||
shard.0, server.2
|
||||
);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
if primary_count > 1 {
|
||||
error!("Shard {} has more than on primary configured", &shard.0);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
|
||||
if dup_check.len() != shard.1.servers.len() {
|
||||
error!("Shard {} contains duplicate server configs", &shard.0);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
config.path = path.to_string();
|
||||
|
||||
// Update the configuration globally.
|
||||
CONFIG.store(Arc::new(config.clone()));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn reload_config(client_server_map: ClientServerMap) -> Result<bool, Error> {
|
||||
let old_config = get_config();
|
||||
match parse(&old_config.path).await {
|
||||
Ok(()) => (),
|
||||
Err(err) => {
|
||||
error!("Config reload error: {:?}", err);
|
||||
return Err(Error::BadConfig);
|
||||
}
|
||||
};
|
||||
let new_config = get_config();
|
||||
|
||||
if old_config.pools != new_config.pools {
|
||||
info!("Pool configuration changed, re-creating server pools");
|
||||
ConnectionPool::from_config(client_server_map).await?;
|
||||
Ok(true)
|
||||
} else if old_config != new_config {
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -79,9 +611,68 @@ mod test {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_config() {
|
||||
let config = parse("pgcat.toml").await.unwrap();
|
||||
assert_eq!(config.general.pool_size, 15);
|
||||
assert_eq!(config.shards.len(), 3);
|
||||
assert_eq!(config.shards["1"].servers[0].0, "127.0.0.1");
|
||||
parse("pgcat.toml").await.unwrap();
|
||||
|
||||
assert_eq!(get_config().path, "pgcat.toml".to_string());
|
||||
|
||||
assert_eq!(get_config().general.ban_time, 60);
|
||||
assert_eq!(get_config().pools.len(), 2);
|
||||
assert_eq!(get_config().pools["sharded_db"].shards.len(), 3);
|
||||
assert_eq!(get_config().pools["simple_db"].shards.len(), 1);
|
||||
assert_eq!(get_config().pools["sharded_db"].users.len(), 2);
|
||||
assert_eq!(get_config().pools["simple_db"].users.len(), 1);
|
||||
|
||||
assert_eq!(
|
||||
get_config().pools["sharded_db"].shards["0"].servers[0].0,
|
||||
"127.0.0.1"
|
||||
);
|
||||
assert_eq!(
|
||||
get_config().pools["sharded_db"].shards["1"].servers[0].2,
|
||||
"primary"
|
||||
);
|
||||
assert_eq!(
|
||||
get_config().pools["sharded_db"].shards["1"].database,
|
||||
"shard1"
|
||||
);
|
||||
assert_eq!(
|
||||
get_config().pools["sharded_db"].users["0"].username,
|
||||
"sharding_user"
|
||||
);
|
||||
assert_eq!(
|
||||
get_config().pools["sharded_db"].users["1"].password,
|
||||
"other_user"
|
||||
);
|
||||
assert_eq!(get_config().pools["sharded_db"].users["1"].pool_size, 21);
|
||||
assert_eq!(get_config().pools["sharded_db"].default_role, "any");
|
||||
|
||||
assert_eq!(
|
||||
get_config().pools["simple_db"].shards["0"].servers[0].0,
|
||||
"127.0.0.1"
|
||||
);
|
||||
assert_eq!(
|
||||
get_config().pools["simple_db"].shards["0"].servers[0].1,
|
||||
5432
|
||||
);
|
||||
assert_eq!(
|
||||
get_config().pools["simple_db"].shards["0"].database,
|
||||
"some_db"
|
||||
);
|
||||
assert_eq!(get_config().pools["simple_db"].default_role, "primary");
|
||||
|
||||
assert_eq!(
|
||||
get_config().pools["simple_db"].users["0"].username,
|
||||
"simple_user"
|
||||
);
|
||||
assert_eq!(
|
||||
get_config().pools["simple_db"].users["0"].password,
|
||||
"simple_user"
|
||||
);
|
||||
assert_eq!(get_config().pools["simple_db"].users["0"].pool_size, 5);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_serialize_configs() {
|
||||
parse("pgcat.toml").await.unwrap();
|
||||
print!("{}", toml::to_string(&get_config()).unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
33
src/constants.rs
Normal file
33
src/constants.rs
Normal file
@@ -0,0 +1,33 @@
|
||||
/// Various protocol constants, as defined in
|
||||
/// <https://www.postgresql.org/docs/12/protocol-message-formats.html>
|
||||
/// and elsewhere in the source code.
|
||||
|
||||
// Used in the StartupMessage to indicate regular handshake.
|
||||
pub const PROTOCOL_VERSION_NUMBER: i32 = 196608;
|
||||
|
||||
// SSLRequest: used to indicate we want an SSL connection.
|
||||
pub const SSL_REQUEST_CODE: i32 = 80877103;
|
||||
|
||||
// CancelRequest: the cancel request code.
|
||||
pub const CANCEL_REQUEST_CODE: i32 = 80877102;
|
||||
|
||||
// AuthenticationMD5Password
|
||||
pub const MD5_ENCRYPTED_PASSWORD: i32 = 5;
|
||||
|
||||
// SASL
|
||||
pub const SASL: i32 = 10;
|
||||
pub const SASL_CONTINUE: i32 = 11;
|
||||
pub const SASL_FINAL: i32 = 12;
|
||||
pub const SCRAM_SHA_256: &str = "SCRAM-SHA-256";
|
||||
pub const NONCE_LENGTH: usize = 24;
|
||||
|
||||
// AuthenticationOk
|
||||
pub const AUTHENTICATION_SUCCESSFUL: i32 = 0;
|
||||
|
||||
// ErrorResponse: A code identifying the field type; if zero, this is the message terminator and no string follows.
|
||||
pub const MESSAGE_TERMINATOR: u8 = 0;
|
||||
|
||||
//
|
||||
// Data types
|
||||
//
|
||||
pub const _OID_INT8: i32 = 20; // bigint
|
||||
@@ -1,11 +1,16 @@
|
||||
/// Errors.
|
||||
|
||||
/// Various errors.
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
SocketError,
|
||||
// ClientDisconnected,
|
||||
ClientBadStartup,
|
||||
ProtocolSyncError,
|
||||
ServerError,
|
||||
// ServerTimeout,
|
||||
// DirtyServer,
|
||||
BadConfig,
|
||||
AllServersDown,
|
||||
ClientError,
|
||||
TlsError,
|
||||
StatementTimeout,
|
||||
ShuttingDown,
|
||||
}
|
||||
|
||||
340
src/main.rs
340
src/main.rs
@@ -1,123 +1,333 @@
|
||||
// PgCat, a PostgreSQL pooler with load balancing, failover, and sharding support.
|
||||
// Copyright (C) 2022 Lev Kokotov <lev@levthe.dev>
|
||||
// Copyright (c) 2022 Lev Kokotov <hi@levthe.dev>
|
||||
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
// Permission is hereby granted, free of charge, to any person obtaining
|
||||
// a copy of this software and associated documentation files (the
|
||||
// "Software"), to deal in the Software without restriction, including
|
||||
// without limitation the rights to use, copy, modify, merge, publish,
|
||||
// distribute, sublicense, and/or sell copies of the Software, and to
|
||||
// permit persons to whom the Software is furnished to do so, subject to
|
||||
// the following conditions:
|
||||
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
// The above copyright notice and this permission notice shall be
|
||||
// included in all copies or substantial portions of the Software.
|
||||
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
extern crate arc_swap;
|
||||
extern crate async_trait;
|
||||
extern crate bb8;
|
||||
extern crate bytes;
|
||||
extern crate env_logger;
|
||||
extern crate exitcode;
|
||||
extern crate log;
|
||||
extern crate md5;
|
||||
extern crate num_cpus;
|
||||
extern crate once_cell;
|
||||
extern crate rustls_pemfile;
|
||||
extern crate serde;
|
||||
extern crate serde_derive;
|
||||
extern crate sqlparser;
|
||||
extern crate tokio;
|
||||
extern crate tokio_rustls;
|
||||
extern crate toml;
|
||||
|
||||
use log::{debug, error, info};
|
||||
use parking_lot::Mutex;
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::{
|
||||
signal::unix::{signal as unix_signal, SignalKind},
|
||||
sync::mpsc,
|
||||
};
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::net::SocketAddr;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::broadcast;
|
||||
|
||||
mod admin;
|
||||
mod client;
|
||||
mod config;
|
||||
mod constants;
|
||||
mod errors;
|
||||
mod messages;
|
||||
mod pool;
|
||||
mod prometheus;
|
||||
mod query_router;
|
||||
mod scram;
|
||||
mod server;
|
||||
mod sharding;
|
||||
mod stats;
|
||||
mod tls;
|
||||
|
||||
// Support for query cancellation: this maps our process_ids and
|
||||
// secret keys to the backend's.
|
||||
use pool::{ClientServerMap, ConnectionPool};
|
||||
use crate::config::{get_config, reload_config, VERSION};
|
||||
use crate::errors::Error;
|
||||
use crate::pool::{ClientServerMap, ConnectionPool};
|
||||
use crate::prometheus::start_metric_server;
|
||||
use crate::stats::{Collector, Reporter, REPORTER};
|
||||
|
||||
/// Main!
|
||||
#[tokio::main]
|
||||
#[tokio::main(worker_threads = 4)]
|
||||
async fn main() {
|
||||
println!("> Welcome to PgCat! Meow.");
|
||||
env_logger::init();
|
||||
info!("Welcome to PgCat! Meow. (Version {})", VERSION);
|
||||
|
||||
let config = match config::parse("pgcat.toml").await {
|
||||
Ok(config) => config,
|
||||
if !query_router::QueryRouter::setup() {
|
||||
error!("Could not setup query router");
|
||||
std::process::exit(exitcode::CONFIG);
|
||||
}
|
||||
|
||||
let args = std::env::args().collect::<Vec<String>>();
|
||||
|
||||
let config_file = if args.len() == 2 {
|
||||
args[1].to_string()
|
||||
} else {
|
||||
String::from("pgcat.toml")
|
||||
};
|
||||
|
||||
match config::parse(&config_file).await {
|
||||
Ok(_) => (),
|
||||
Err(err) => {
|
||||
println!("> Config parse error: {:?}", err);
|
||||
return;
|
||||
error!("Config parse error: {:?}", err);
|
||||
std::process::exit(exitcode::CONFIG);
|
||||
}
|
||||
};
|
||||
|
||||
let config = get_config();
|
||||
|
||||
if let Some(true) = config.general.enable_prometheus_exporter {
|
||||
let http_addr_str = format!(
|
||||
"{}:{}",
|
||||
config.general.host, config.general.prometheus_exporter_port
|
||||
);
|
||||
let http_addr = match SocketAddr::from_str(&http_addr_str) {
|
||||
Ok(addr) => addr,
|
||||
Err(err) => {
|
||||
error!("Invalid http address: {}", err);
|
||||
std::process::exit(exitcode::CONFIG);
|
||||
}
|
||||
};
|
||||
tokio::task::spawn(async move {
|
||||
start_metric_server(http_addr).await;
|
||||
});
|
||||
}
|
||||
|
||||
let addr = format!("{}:{}", config.general.host, config.general.port);
|
||||
|
||||
let listener = match TcpListener::bind(&addr).await {
|
||||
Ok(sock) => sock,
|
||||
Err(err) => {
|
||||
println!("> Error: {:?}", err);
|
||||
return;
|
||||
error!("Listener socket error: {:?}", err);
|
||||
std::process::exit(exitcode::CONFIG);
|
||||
}
|
||||
};
|
||||
|
||||
println!("> Running on {}", addr);
|
||||
info!("Running on {}", addr);
|
||||
|
||||
config.show();
|
||||
|
||||
// Tracks which client is connected to which server for query cancellation.
|
||||
let client_server_map: ClientServerMap = Arc::new(Mutex::new(HashMap::new()));
|
||||
|
||||
println!("> Pool size: {}", config.general.pool_size);
|
||||
println!("> Pool mode: {}", config.general.pool_mode);
|
||||
println!("> Ban time: {}s", config.general.ban_time);
|
||||
println!(
|
||||
"> Healthcheck timeout: {}ms",
|
||||
config.general.healthcheck_timeout
|
||||
);
|
||||
// Statistics reporting.
|
||||
let (stats_tx, stats_rx) = mpsc::channel(100_000);
|
||||
REPORTER.store(Arc::new(Reporter::new(stats_tx.clone())));
|
||||
|
||||
let pool = ConnectionPool::from_config(config.clone(), client_server_map.clone()).await;
|
||||
let transaction_mode = config.general.pool_mode == "transaction";
|
||||
// Connection pool that allows to query all shards and replicas.
|
||||
match ConnectionPool::from_config(client_server_map.clone()).await {
|
||||
Ok(_) => (),
|
||||
Err(err) => {
|
||||
error!("Pool error: {:?}", err);
|
||||
std::process::exit(exitcode::CONFIG);
|
||||
}
|
||||
};
|
||||
|
||||
println!("> Waiting for clients...");
|
||||
tokio::task::spawn(async move {
|
||||
let mut stats_collector = Collector::new(stats_rx, stats_tx.clone());
|
||||
stats_collector.collect().await;
|
||||
});
|
||||
|
||||
info!("Config autoreloader: {}", config.general.autoreload);
|
||||
|
||||
let mut term_signal = unix_signal(SignalKind::terminate()).unwrap();
|
||||
let mut interrupt_signal = unix_signal(SignalKind::interrupt()).unwrap();
|
||||
let mut sighup_signal = unix_signal(SignalKind::hangup()).unwrap();
|
||||
let mut autoreload_interval = tokio::time::interval(tokio::time::Duration::from_millis(15_000));
|
||||
let (shutdown_tx, _) = broadcast::channel::<()>(1);
|
||||
let (drain_tx, mut drain_rx) = mpsc::channel::<i8>(2048);
|
||||
let (exit_tx, mut exit_rx) = mpsc::channel::<()>(1);
|
||||
|
||||
info!("Waiting for clients");
|
||||
|
||||
let mut admin_only = false;
|
||||
let mut total_clients = 0;
|
||||
|
||||
loop {
|
||||
let pool = pool.clone();
|
||||
let client_server_map = client_server_map.clone();
|
||||
tokio::select! {
|
||||
// Reload config:
|
||||
// kill -SIGHUP $(pgrep pgcat)
|
||||
_ = sighup_signal.recv() => {
|
||||
info!("Reloading config");
|
||||
|
||||
let (socket, addr) = match listener.accept().await {
|
||||
Ok((socket, addr)) => (socket, addr),
|
||||
Err(err) => {
|
||||
println!("> Listener: {:?}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
match reload_config(client_server_map.clone()).await {
|
||||
Ok(_) => (),
|
||||
Err(_) => (),
|
||||
};
|
||||
|
||||
// Client goes to another thread, bye.
|
||||
tokio::task::spawn(async move {
|
||||
println!(
|
||||
">> Client {:?} connected, transaction pooling: {}",
|
||||
addr, transaction_mode
|
||||
);
|
||||
get_config().show();
|
||||
},
|
||||
|
||||
match client::Client::startup(socket, client_server_map, transaction_mode).await {
|
||||
Ok(mut client) => {
|
||||
println!(">> Client {:?} authenticated successfully!", addr);
|
||||
_ = autoreload_interval.tick() => {
|
||||
if config.general.autoreload {
|
||||
info!("Automatically reloading config");
|
||||
|
||||
match client.handle(pool).await {
|
||||
match reload_config(client_server_map.clone()).await {
|
||||
Ok(changed) => {
|
||||
if changed {
|
||||
get_config().show()
|
||||
}
|
||||
}
|
||||
Err(_) => (),
|
||||
};
|
||||
}
|
||||
},
|
||||
|
||||
// Initiate graceful shutdown sequence on sig int
|
||||
_ = interrupt_signal.recv() => {
|
||||
info!("Got SIGINT, waiting for client connection drain now");
|
||||
admin_only = true;
|
||||
|
||||
// Broadcast that client tasks need to finish
|
||||
let _ = shutdown_tx.send(());
|
||||
let exit_tx = exit_tx.clone();
|
||||
let _ = drain_tx.send(0).await;
|
||||
|
||||
tokio::task::spawn(async move {
|
||||
let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(config.general.shutdown_timeout));
|
||||
|
||||
// First tick fires immediately.
|
||||
interval.tick().await;
|
||||
|
||||
// Second one in the interval time.
|
||||
interval.tick().await;
|
||||
|
||||
// We're done waiting.
|
||||
error!("Timed out waiting for clients");
|
||||
|
||||
let _ = exit_tx.send(()).await;
|
||||
});
|
||||
},
|
||||
|
||||
_ = term_signal.recv() => break,
|
||||
|
||||
new_client = listener.accept() => {
|
||||
let (socket, addr) = match new_client {
|
||||
Ok((socket, addr)) => (socket, addr),
|
||||
Err(err) => {
|
||||
error!("{:?}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let shutdown_rx = shutdown_tx.subscribe();
|
||||
let drain_tx = drain_tx.clone();
|
||||
let client_server_map = client_server_map.clone();
|
||||
|
||||
tokio::task::spawn(async move {
|
||||
let start = chrono::offset::Utc::now().naive_utc();
|
||||
|
||||
match client::client_entrypoint(
|
||||
socket,
|
||||
client_server_map,
|
||||
shutdown_rx,
|
||||
drain_tx,
|
||||
admin_only,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(()) => {
|
||||
println!(">> Client {:?} disconnected.", addr);
|
||||
|
||||
let duration = chrono::offset::Utc::now().naive_utc() - start;
|
||||
|
||||
info!(
|
||||
"Client {:?} disconnected, session duration: {}",
|
||||
addr,
|
||||
format_duration(&duration)
|
||||
);
|
||||
}
|
||||
|
||||
Err(err) => {
|
||||
println!(">> Client disconnected with error: {:?}", err);
|
||||
client.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
match err {
|
||||
// Don't count the clients we rejected.
|
||||
Error::ShuttingDown => (),
|
||||
_ => {
|
||||
// drain_tx.send(-1).await.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
Err(err) => {
|
||||
println!(">> Error: {:?}", err);
|
||||
debug!("Client disconnected with error {:?}", err);
|
||||
}
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
_ = exit_rx.recv() => {
|
||||
break;
|
||||
}
|
||||
|
||||
client_ping = drain_rx.recv() => {
|
||||
let client_ping = client_ping.unwrap();
|
||||
total_clients += client_ping;
|
||||
|
||||
if total_clients == 0 && admin_only {
|
||||
let _ = exit_tx.send(()).await;
|
||||
}
|
||||
};
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("Shutting down...");
|
||||
}
|
||||
|
||||
/// Format chrono::Duration to be more human-friendly.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `duration` - A duration of time
|
||||
fn format_duration(duration: &chrono::Duration) -> String {
|
||||
let seconds = {
|
||||
let seconds = duration.num_seconds() % 60;
|
||||
if seconds < 10 {
|
||||
format!("0{}", seconds)
|
||||
} else {
|
||||
format!("{}", seconds)
|
||||
}
|
||||
};
|
||||
|
||||
let minutes = {
|
||||
let minutes = duration.num_minutes() % 60;
|
||||
if minutes < 10 {
|
||||
format!("0{}", minutes)
|
||||
} else {
|
||||
format!("{}", minutes)
|
||||
}
|
||||
};
|
||||
|
||||
let hours = {
|
||||
let hours = duration.num_hours() % 24;
|
||||
if hours < 10 {
|
||||
format!("0{}", hours)
|
||||
} else {
|
||||
format!("{}", hours)
|
||||
}
|
||||
};
|
||||
|
||||
let days = duration.num_days().to_string();
|
||||
|
||||
format!("{}d {}:{}:{}", days, hours, minutes, seconds)
|
||||
}
|
||||
|
||||
434
src/messages.rs
434
src/messages.rs
@@ -1,20 +1,37 @@
|
||||
use bytes::{BufMut, BytesMut};
|
||||
/// Helper functions to send one-off protocol messages
|
||||
/// and handle TcpStream (TCP socket).
|
||||
use bytes::{Buf, BufMut, BytesMut};
|
||||
use md5::{Digest, Md5};
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, BufReader};
|
||||
use tokio::net::tcp::{OwnedReadHalf, OwnedWriteHalf};
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
|
||||
use crate::errors::Error;
|
||||
use std::collections::HashMap;
|
||||
use std::mem;
|
||||
|
||||
// This is a funny one. `psql` parses this to figure out which
|
||||
// queries to send when using shortcuts, e.g. \d+.
|
||||
//
|
||||
// TODO: Actually get the version from the server itself.
|
||||
//
|
||||
const SERVER_VESION: &str = "12.9 (Ubuntu 12.9-0ubuntu0.20.04.1)";
|
||||
/// Postgres data type mappings
|
||||
/// used in RowDescription ('T') message.
|
||||
pub enum DataType {
|
||||
Text,
|
||||
Int4,
|
||||
Numeric,
|
||||
}
|
||||
|
||||
impl From<&DataType> for i32 {
|
||||
fn from(data_type: &DataType) -> i32 {
|
||||
match data_type {
|
||||
DataType::Text => 25,
|
||||
DataType::Int4 => 23,
|
||||
DataType::Numeric => 1700,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Tell the client that authentication handshake completed successfully.
|
||||
pub async fn auth_ok(stream: &mut TcpStream) -> Result<(), Error> {
|
||||
pub async fn auth_ok<S>(stream: &mut S) -> Result<(), Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let mut auth_ok = BytesMut::with_capacity(9);
|
||||
|
||||
auth_ok.put_u8(b'R');
|
||||
@@ -24,36 +41,39 @@ pub async fn auth_ok(stream: &mut TcpStream) -> Result<(), Error> {
|
||||
Ok(write_all(stream, auth_ok).await?)
|
||||
}
|
||||
|
||||
/// Send server parameters to the client. This will tell the client
|
||||
/// what server version and what's the encoding we're using.
|
||||
pub async fn server_parameters(stream: &mut TcpStream) -> Result<(), Error> {
|
||||
let client_encoding = BytesMut::from(&b"client_encoding\0UTF8\0"[..]);
|
||||
let server_version =
|
||||
BytesMut::from(&format!("server_version\0{}\0", SERVER_VESION).as_bytes()[..]);
|
||||
/// Generate md5 password challenge.
|
||||
pub async fn md5_challenge<S>(stream: &mut S) -> Result<[u8; 4], Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
// let mut rng = rand::thread_rng();
|
||||
let salt: [u8; 4] = [
|
||||
rand::random(),
|
||||
rand::random(),
|
||||
rand::random(),
|
||||
rand::random(),
|
||||
];
|
||||
|
||||
// Client encoding
|
||||
let len = client_encoding.len() as i32 + 4; // TODO: add more parameters here
|
||||
let mut res = BytesMut::with_capacity(64);
|
||||
let mut res = BytesMut::new();
|
||||
res.put_u8(b'R');
|
||||
res.put_i32(12);
|
||||
res.put_i32(5); // MD5
|
||||
res.put_slice(&salt[..]);
|
||||
|
||||
res.put_u8(b'S');
|
||||
res.put_i32(len);
|
||||
res.put_slice(&client_encoding[..]);
|
||||
|
||||
let len = server_version.len() as i32 + 4;
|
||||
res.put_u8(b'S');
|
||||
res.put_i32(len);
|
||||
res.put_slice(&server_version[..]);
|
||||
|
||||
Ok(write_all(stream, res).await?)
|
||||
write_all(stream, res).await?;
|
||||
Ok(salt)
|
||||
}
|
||||
|
||||
/// Give the client the process_id and secret we generated
|
||||
/// used in query cancellation.
|
||||
pub async fn backend_key_data(
|
||||
stream: &mut TcpStream,
|
||||
pub async fn backend_key_data<S>(
|
||||
stream: &mut S,
|
||||
backend_id: i32,
|
||||
secret_key: i32,
|
||||
) -> Result<(), Error> {
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let mut key_data = BytesMut::from(&b"K"[..]);
|
||||
key_data.put_i32(12);
|
||||
key_data.put_i32(backend_id);
|
||||
@@ -62,9 +82,25 @@ pub async fn backend_key_data(
|
||||
Ok(write_all(stream, key_data).await?)
|
||||
}
|
||||
|
||||
/// Construct a `Q`: Query message.
|
||||
pub fn simple_query(query: &str) -> BytesMut {
|
||||
let mut res = BytesMut::from(&b"Q"[..]);
|
||||
let query = format!("{}\0", query);
|
||||
|
||||
res.put_i32(query.len() as i32 + 4);
|
||||
res.put_slice(&query.as_bytes());
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
/// Tell the client we're ready for another query.
|
||||
pub async fn ready_for_query(stream: &mut TcpStream) -> Result<(), Error> {
|
||||
let mut bytes = BytesMut::with_capacity(5);
|
||||
pub async fn ready_for_query<S>(stream: &mut S) -> Result<(), Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let mut bytes = BytesMut::with_capacity(
|
||||
mem::size_of::<u8>() + mem::size_of::<i32>() + mem::size_of::<u8>(),
|
||||
);
|
||||
|
||||
bytes.put_u8(b'Z');
|
||||
bytes.put_i32(5);
|
||||
@@ -75,7 +111,12 @@ pub async fn ready_for_query(stream: &mut TcpStream) -> Result<(), Error> {
|
||||
|
||||
/// Send the startup packet the server. We're pretending we're a Pg client.
|
||||
/// This tells the server which user we are and what database we want.
|
||||
pub async fn startup(stream: &mut TcpStream, user: &str, database: &str) -> Result<(), Error> {
|
||||
pub async fn startup(
|
||||
stream: &mut TcpStream,
|
||||
user: &str,
|
||||
database: &str,
|
||||
search_path: Option<&String>,
|
||||
) -> Result<(), Error> {
|
||||
let mut bytes = BytesMut::with_capacity(25);
|
||||
|
||||
bytes.put_i32(196608); // Protocol number
|
||||
@@ -89,6 +130,17 @@ pub async fn startup(stream: &mut TcpStream, user: &str, database: &str) -> Resu
|
||||
bytes.put(&b"database\0"[..]);
|
||||
bytes.put_slice(&database.as_bytes());
|
||||
bytes.put_u8(0);
|
||||
|
||||
// search_path
|
||||
match search_path {
|
||||
Some(search_path) => {
|
||||
bytes.put(&b"options\0"[..]);
|
||||
bytes.put_slice(&format!("-c search_path={}", search_path).as_bytes());
|
||||
bytes.put_u8(0);
|
||||
}
|
||||
None => (),
|
||||
};
|
||||
|
||||
bytes.put_u8(0); // Null terminator
|
||||
|
||||
let len = bytes.len() as i32 + 4i32;
|
||||
@@ -104,14 +156,60 @@ pub async fn startup(stream: &mut TcpStream, user: &str, database: &str) -> Resu
|
||||
}
|
||||
}
|
||||
|
||||
/// Send password challenge response to the server.
|
||||
/// This is the MD5 challenge.
|
||||
pub async fn md5_password(
|
||||
stream: &mut TcpStream,
|
||||
user: &str,
|
||||
password: &str,
|
||||
salt: &[u8],
|
||||
) -> Result<(), Error> {
|
||||
/// Parse the params the server sends as a key/value format.
|
||||
pub fn parse_params(mut bytes: BytesMut) -> Result<HashMap<String, String>, Error> {
|
||||
let mut result = HashMap::new();
|
||||
let mut buf = Vec::new();
|
||||
let mut tmp = String::new();
|
||||
|
||||
while bytes.has_remaining() {
|
||||
let mut c = bytes.get_u8();
|
||||
|
||||
// Null-terminated C-strings.
|
||||
while c != 0 {
|
||||
tmp.push(c as char);
|
||||
c = bytes.get_u8();
|
||||
}
|
||||
|
||||
if tmp.len() > 0 {
|
||||
buf.push(tmp.clone());
|
||||
tmp.clear();
|
||||
}
|
||||
}
|
||||
|
||||
// Expect pairs of name and value
|
||||
// and at least one pair to be present.
|
||||
if buf.len() % 2 != 0 || buf.len() < 2 {
|
||||
return Err(Error::ClientBadStartup);
|
||||
}
|
||||
|
||||
let mut i = 0;
|
||||
while i < buf.len() {
|
||||
let name = buf[i].clone();
|
||||
let value = buf[i + 1].clone();
|
||||
let _ = result.insert(name, value);
|
||||
i += 2;
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Parse StartupMessage parameters.
|
||||
/// e.g. user, database, application_name, etc.
|
||||
pub fn parse_startup(bytes: BytesMut) -> Result<HashMap<String, String>, Error> {
|
||||
let result = parse_params(bytes)?;
|
||||
|
||||
// Minimum required parameters
|
||||
// I want to have the user at the very minimum, according to the protocol spec.
|
||||
if !result.contains_key("user") {
|
||||
return Err(Error::ClientBadStartup);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Create md5 password hash given a salt.
|
||||
pub fn md5_hash_password(user: &str, password: &str, salt: &[u8]) -> Vec<u8> {
|
||||
let mut md5 = Md5::new();
|
||||
|
||||
// First pass
|
||||
@@ -130,7 +228,24 @@ pub async fn md5_password(
|
||||
.collect::<Vec<u8>>();
|
||||
password.push(0);
|
||||
|
||||
password
|
||||
}
|
||||
|
||||
/// Send password challenge response to the server.
|
||||
/// This is the MD5 challenge.
|
||||
pub async fn md5_password<S>(
|
||||
stream: &mut S,
|
||||
user: &str,
|
||||
password: &str,
|
||||
salt: &[u8],
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let password = md5_hash_password(user, password, salt);
|
||||
|
||||
let mut message = BytesMut::with_capacity(password.len() as usize + 5);
|
||||
|
||||
message.put_u8(b'p');
|
||||
message.put_i32(password.len() as i32 + 4);
|
||||
message.put_slice(&password[..]);
|
||||
@@ -138,25 +253,217 @@ pub async fn md5_password(
|
||||
Ok(write_all(stream, message).await?)
|
||||
}
|
||||
|
||||
pub async fn set_sharding_key(stream: &mut OwnedWriteHalf) -> Result<(), Error> {
|
||||
/// Implements a response to our custom `SET SHARDING KEY`
|
||||
/// and `SET SERVER ROLE` commands.
|
||||
/// This tells the client we're ready for the next query.
|
||||
pub async fn custom_protocol_response_ok<S>(stream: &mut S, message: &str) -> Result<(), Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let mut res = BytesMut::with_capacity(25);
|
||||
|
||||
let set_complete = BytesMut::from(&"SET SHARDING KEY\0"[..]);
|
||||
let set_complete = BytesMut::from(&format!("{}\0", message)[..]);
|
||||
let len = (set_complete.len() + 4) as i32;
|
||||
|
||||
// CommandComplete
|
||||
res.put_u8(b'C');
|
||||
res.put_i32(len);
|
||||
res.put_slice(&set_complete[..]);
|
||||
|
||||
res.put_u8(b'Z');
|
||||
res.put_i32(5);
|
||||
res.put_u8(b'I');
|
||||
write_all_half(stream, res).await?;
|
||||
ready_for_query(stream).await
|
||||
}
|
||||
|
||||
write_all_half(stream, res).await
|
||||
/// Send a custom error message to the client.
|
||||
/// Tell the client we are ready for the next query and no rollback is necessary.
|
||||
/// Docs on error codes: <https://www.postgresql.org/docs/12/errcodes-appendix.html>.
|
||||
pub async fn error_response<S>(stream: &mut S, message: &str) -> Result<(), Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
error_response_terminal(stream, message).await?;
|
||||
ready_for_query(stream).await
|
||||
}
|
||||
|
||||
/// Send a custom error message to the client.
|
||||
/// Tell the client we are ready for the next query and no rollback is necessary.
|
||||
/// Docs on error codes: <https://www.postgresql.org/docs/12/errcodes-appendix.html>.
|
||||
pub async fn error_response_terminal<S>(stream: &mut S, message: &str) -> Result<(), Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let mut error = BytesMut::new();
|
||||
|
||||
// Error level
|
||||
error.put_u8(b'S');
|
||||
error.put_slice(&b"FATAL\0"[..]);
|
||||
|
||||
// Error level (non-translatable)
|
||||
error.put_u8(b'V');
|
||||
error.put_slice(&b"FATAL\0"[..]);
|
||||
|
||||
// Error code: not sure how much this matters.
|
||||
error.put_u8(b'C');
|
||||
error.put_slice(&b"58000\0"[..]); // system_error, see Appendix A.
|
||||
|
||||
// The short error message.
|
||||
error.put_u8(b'M');
|
||||
error.put_slice(&format!("{}\0", message).as_bytes());
|
||||
|
||||
// No more fields follow.
|
||||
error.put_u8(0);
|
||||
|
||||
// Compose the two message reply.
|
||||
let mut res = BytesMut::with_capacity(error.len() + 5);
|
||||
|
||||
res.put_u8(b'E');
|
||||
res.put_i32(error.len() as i32 + 4);
|
||||
res.put(error);
|
||||
|
||||
Ok(write_all_half(stream, res).await?)
|
||||
}
|
||||
|
||||
pub async fn wrong_password<S>(stream: &mut S, user: &str) -> Result<(), Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
let mut error = BytesMut::new();
|
||||
|
||||
// Error level
|
||||
error.put_u8(b'S');
|
||||
error.put_slice(&b"FATAL\0"[..]);
|
||||
|
||||
// Error level (non-translatable)
|
||||
error.put_u8(b'V');
|
||||
error.put_slice(&b"FATAL\0"[..]);
|
||||
|
||||
// Error code: not sure how much this matters.
|
||||
error.put_u8(b'C');
|
||||
error.put_slice(&b"28P01\0"[..]); // system_error, see Appendix A.
|
||||
|
||||
// The short error message.
|
||||
error.put_u8(b'M');
|
||||
error.put_slice(&format!("password authentication failed for user \"{}\"\0", user).as_bytes());
|
||||
|
||||
// No more fields follow.
|
||||
error.put_u8(0);
|
||||
|
||||
// Compose the two message reply.
|
||||
let mut res = BytesMut::new();
|
||||
|
||||
res.put_u8(b'E');
|
||||
res.put_i32(error.len() as i32 + 4);
|
||||
|
||||
res.put(error);
|
||||
|
||||
write_all(stream, res).await
|
||||
}
|
||||
|
||||
/// Respond to a SHOW SHARD command.
|
||||
pub async fn show_response<S>(stream: &mut S, name: &str, value: &str) -> Result<(), Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
// A SELECT response consists of:
|
||||
// 1. RowDescription
|
||||
// 2. One or more DataRow
|
||||
// 3. CommandComplete
|
||||
// 4. ReadyForQuery
|
||||
|
||||
// The final messages sent to the client
|
||||
let mut res = BytesMut::new();
|
||||
|
||||
// RowDescription
|
||||
res.put(row_description(&vec![(name, DataType::Text)]));
|
||||
|
||||
// DataRow
|
||||
res.put(data_row(&vec![value.to_string()]));
|
||||
|
||||
// CommandComplete
|
||||
res.put(command_complete("SELECT 1"));
|
||||
|
||||
write_all_half(stream, res).await?;
|
||||
ready_for_query(stream).await
|
||||
}
|
||||
|
||||
pub fn row_description(columns: &Vec<(&str, DataType)>) -> BytesMut {
|
||||
let mut res = BytesMut::new();
|
||||
let mut row_desc = BytesMut::new();
|
||||
|
||||
// how many colums we are storing
|
||||
row_desc.put_i16(columns.len() as i16);
|
||||
|
||||
for (name, data_type) in columns {
|
||||
// Column name
|
||||
row_desc.put_slice(&format!("{}\0", name).as_bytes());
|
||||
|
||||
// Doesn't belong to any table
|
||||
row_desc.put_i32(0);
|
||||
|
||||
// Doesn't belong to any table
|
||||
row_desc.put_i16(0);
|
||||
|
||||
// Text
|
||||
row_desc.put_i32(data_type.into());
|
||||
|
||||
// Text size = variable (-1)
|
||||
let type_size = match data_type {
|
||||
DataType::Text => -1,
|
||||
DataType::Int4 => 4,
|
||||
DataType::Numeric => -1,
|
||||
};
|
||||
|
||||
row_desc.put_i16(type_size);
|
||||
|
||||
// Type modifier: none that I know
|
||||
row_desc.put_i32(-1);
|
||||
|
||||
// Format being used: text (0), binary (1)
|
||||
row_desc.put_i16(0);
|
||||
}
|
||||
|
||||
res.put_u8(b'T');
|
||||
res.put_i32(row_desc.len() as i32 + 4);
|
||||
res.put(row_desc);
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
/// Create a DataRow message.
|
||||
pub fn data_row(row: &Vec<String>) -> BytesMut {
|
||||
let mut res = BytesMut::new();
|
||||
let mut data_row = BytesMut::new();
|
||||
|
||||
data_row.put_i16(row.len() as i16);
|
||||
|
||||
for column in row {
|
||||
let column = column.as_bytes();
|
||||
data_row.put_i32(column.len() as i32);
|
||||
data_row.put_slice(&column);
|
||||
}
|
||||
|
||||
res.put_u8(b'D');
|
||||
res.put_i32(data_row.len() as i32 + 4);
|
||||
res.put(data_row);
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
/// Create a CommandComplete message.
|
||||
pub fn command_complete(command: &str) -> BytesMut {
|
||||
let cmd = BytesMut::from(format!("{}\0", command).as_bytes());
|
||||
let mut res = BytesMut::new();
|
||||
res.put_u8(b'C');
|
||||
res.put_i32(cmd.len() as i32 + 4);
|
||||
res.put(cmd);
|
||||
res
|
||||
}
|
||||
|
||||
/// Write all data in the buffer to the TcpStream.
|
||||
pub async fn write_all(stream: &mut TcpStream, buf: BytesMut) -> Result<(), Error> {
|
||||
pub async fn write_all<S>(stream: &mut S, buf: BytesMut) -> Result<(), Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
match stream.write_all(&buf).await {
|
||||
Ok(_) => Ok(()),
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
@@ -164,7 +471,10 @@ pub async fn write_all(stream: &mut TcpStream, buf: BytesMut) -> Result<(), Erro
|
||||
}
|
||||
|
||||
/// Write all the data in the buffer to the TcpStream, write owned half (see mpsc).
|
||||
pub async fn write_all_half(stream: &mut OwnedWriteHalf, buf: BytesMut) -> Result<(), Error> {
|
||||
pub async fn write_all_half<S>(stream: &mut S, buf: BytesMut) -> Result<(), Error>
|
||||
where
|
||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||
{
|
||||
match stream.write_all(&buf).await {
|
||||
Ok(_) => Ok(()),
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
@@ -172,7 +482,10 @@ pub async fn write_all_half(stream: &mut OwnedWriteHalf, buf: BytesMut) -> Resul
|
||||
}
|
||||
|
||||
/// Read a complete message from the socket.
|
||||
pub async fn read_message(stream: &mut BufReader<OwnedReadHalf>) -> Result<BytesMut, Error> {
|
||||
pub async fn read_message<S>(stream: &mut S) -> Result<BytesMut, Error>
|
||||
where
|
||||
S: tokio::io::AsyncRead + std::marker::Unpin,
|
||||
{
|
||||
let code = match stream.read_u8().await {
|
||||
Ok(code) => code,
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
@@ -198,3 +511,20 @@ pub async fn read_message(stream: &mut BufReader<OwnedReadHalf>) -> Result<Bytes
|
||||
|
||||
Ok(bytes)
|
||||
}
|
||||
|
||||
pub fn server_paramater_message(key: &str, value: &str) -> BytesMut {
|
||||
let mut server_info = BytesMut::new();
|
||||
|
||||
let null_byte_size = 1;
|
||||
let len: usize =
|
||||
mem::size_of::<i32>() + key.len() + null_byte_size + value.len() + null_byte_size;
|
||||
|
||||
server_info.put_slice("S".as_bytes());
|
||||
server_info.put_i32(len.try_into().unwrap());
|
||||
server_info.put_slice(key.as_bytes());
|
||||
server_info.put_bytes(0, 1);
|
||||
server_info.put_slice(value.as_bytes());
|
||||
server_info.put_bytes(0, 1);
|
||||
|
||||
return server_info;
|
||||
}
|
||||
|
||||
612
src/pool.rs
612
src/pool.rs
@@ -1,222 +1,538 @@
|
||||
/// Pooling and failover and banlist.
|
||||
use arc_swap::ArcSwap;
|
||||
use async_trait::async_trait;
|
||||
use bb8::{ManageConnection, Pool, PooledConnection};
|
||||
use bytes::BytesMut;
|
||||
use chrono::naive::NaiveDateTime;
|
||||
|
||||
use crate::config::{Address, Config, User};
|
||||
use crate::errors::Error;
|
||||
use crate::server::Server;
|
||||
|
||||
use log::{debug, error, info, warn};
|
||||
use once_cell::sync::Lazy;
|
||||
use parking_lot::{Mutex, RwLock};
|
||||
use rand::seq::SliceRandom;
|
||||
use rand::thread_rng;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc, Mutex,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
// Banlist: bad servers go in here.
|
||||
pub type BanList = Arc<Mutex<Vec<HashMap<Address, NaiveDateTime>>>>;
|
||||
pub type Counter = Arc<AtomicUsize>;
|
||||
pub type ClientServerMap = Arc<Mutex<HashMap<(i32, i32), (i32, i32, String, String)>>>;
|
||||
use crate::config::{get_config, Address, Role, User};
|
||||
use crate::errors::Error;
|
||||
|
||||
use crate::server::Server;
|
||||
use crate::sharding::ShardingFunction;
|
||||
use crate::stats::{get_reporter, Reporter};
|
||||
|
||||
pub type BanList = Arc<RwLock<Vec<HashMap<Address, NaiveDateTime>>>>;
|
||||
pub type ClientServerMap = Arc<Mutex<HashMap<(i32, i32), (i32, i32, String, u16)>>>;
|
||||
pub type PoolMap = HashMap<(String, String), ConnectionPool>;
|
||||
/// The connection pool, globally available.
|
||||
/// This is atomic and safe and read-optimized.
|
||||
/// The pool is recreated dynamically when the config is reloaded.
|
||||
pub static POOLS: Lazy<ArcSwap<PoolMap>> = Lazy::new(|| ArcSwap::from_pointee(HashMap::default()));
|
||||
|
||||
/// Pool mode:
|
||||
/// - transaction: server serves one transaction,
|
||||
/// - session: server is attached to the client.
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum PoolMode {
|
||||
Session,
|
||||
Transaction,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for PoolMode {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
match *self {
|
||||
PoolMode::Session => write!(f, "session"),
|
||||
PoolMode::Transaction => write!(f, "transaction"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Pool settings.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct PoolSettings {
|
||||
/// Transaction or Session.
|
||||
pub pool_mode: PoolMode,
|
||||
|
||||
// Number of shards.
|
||||
pub shards: usize,
|
||||
|
||||
// Connecting user.
|
||||
pub user: User,
|
||||
|
||||
// Default server role to connect to.
|
||||
pub default_role: Option<Role>,
|
||||
|
||||
// Enable/disable query parser.
|
||||
pub query_parser_enabled: bool,
|
||||
|
||||
// Read from the primary as well or not.
|
||||
pub primary_reads_enabled: bool,
|
||||
|
||||
// Sharding function.
|
||||
pub sharding_function: ShardingFunction,
|
||||
}
|
||||
|
||||
impl Default for PoolSettings {
|
||||
fn default() -> PoolSettings {
|
||||
PoolSettings {
|
||||
pool_mode: PoolMode::Transaction,
|
||||
shards: 1,
|
||||
user: User::default(),
|
||||
default_role: None,
|
||||
query_parser_enabled: false,
|
||||
primary_reads_enabled: true,
|
||||
sharding_function: ShardingFunction::PgBigintHash,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The globally accessible connection pool.
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct ConnectionPool {
|
||||
/// The pools handled internally by bb8.
|
||||
databases: Vec<Vec<Pool<ServerPool>>>,
|
||||
|
||||
/// The addresses (host, port, role) to handle
|
||||
/// failover and load balancing deterministically.
|
||||
addresses: Vec<Vec<Address>>,
|
||||
round_robin: Counter,
|
||||
|
||||
/// List of banned addresses (see above)
|
||||
/// that should not be queried.
|
||||
banlist: BanList,
|
||||
healthcheck_timeout: u64,
|
||||
ban_time: i64,
|
||||
|
||||
/// The statistics aggregator runs in a separate task
|
||||
/// and receives stats from clients, servers, and the pool.
|
||||
stats: Reporter,
|
||||
|
||||
/// The server information (K messages) have to be passed to the
|
||||
/// clients on startup. We pre-connect to all shards and replicas
|
||||
/// on pool creation and save the K messages here.
|
||||
server_info: BytesMut,
|
||||
|
||||
/// Pool configuration.
|
||||
pub settings: PoolSettings,
|
||||
}
|
||||
|
||||
impl ConnectionPool {
|
||||
/// Construct the connection pool from a config file.
|
||||
pub async fn from_config(config: Config, client_server_map: ClientServerMap) -> ConnectionPool {
|
||||
let mut shards = Vec::new();
|
||||
let mut addresses = Vec::new();
|
||||
let mut banlist = Vec::new();
|
||||
let mut shard_ids = config
|
||||
.shards
|
||||
.clone()
|
||||
.into_keys()
|
||||
.map(|x| x.to_string())
|
||||
.collect::<Vec<String>>();
|
||||
shard_ids.sort_by_key(|k| k.parse::<i64>().unwrap());
|
||||
/// Construct the connection pool from the configuration.
|
||||
pub async fn from_config(client_server_map: ClientServerMap) -> Result<(), Error> {
|
||||
let config = get_config();
|
||||
|
||||
for shard in shard_ids {
|
||||
let shard = &config.shards[&shard];
|
||||
let mut pools = Vec::new();
|
||||
let mut replica_addresses = Vec::new();
|
||||
let mut new_pools = HashMap::new();
|
||||
let mut address_id = 0;
|
||||
|
||||
for server in &shard.servers {
|
||||
let address = Address {
|
||||
host: server.0.clone(),
|
||||
port: server.1.to_string(),
|
||||
for (pool_name, pool_config) in &config.pools {
|
||||
// There is one pool per database/user pair.
|
||||
for (_, user) in &pool_config.users {
|
||||
let mut shards = Vec::new();
|
||||
let mut addresses = Vec::new();
|
||||
let mut banlist = Vec::new();
|
||||
let mut shard_ids = pool_config
|
||||
.shards
|
||||
.clone()
|
||||
.into_keys()
|
||||
.map(|x| x.to_string())
|
||||
.collect::<Vec<String>>();
|
||||
|
||||
// Sort by shard number to ensure consistency.
|
||||
shard_ids.sort_by_key(|k| k.parse::<i64>().unwrap());
|
||||
|
||||
for shard_idx in &shard_ids {
|
||||
let shard = &pool_config.shards[shard_idx];
|
||||
let mut pools = Vec::new();
|
||||
let mut servers = Vec::new();
|
||||
let mut address_index = 0;
|
||||
let mut replica_number = 0;
|
||||
|
||||
for server in shard.servers.iter() {
|
||||
let role = match server.2.as_ref() {
|
||||
"primary" => Role::Primary,
|
||||
"replica" => Role::Replica,
|
||||
_ => {
|
||||
error!("Config error: server role can be 'primary' or 'replica', have: '{}'. Defaulting to 'replica'.", server.2);
|
||||
Role::Replica
|
||||
}
|
||||
};
|
||||
|
||||
let address = Address {
|
||||
id: address_id,
|
||||
database: shard.database.clone(),
|
||||
search_path: shard.search_path.clone(),
|
||||
host: server.0.clone(),
|
||||
port: server.1 as u16,
|
||||
role: role,
|
||||
address_index,
|
||||
replica_number,
|
||||
shard: shard_idx.parse::<usize>().unwrap(),
|
||||
username: user.username.clone(),
|
||||
pool_name: pool_name.clone(),
|
||||
};
|
||||
|
||||
address_id += 1;
|
||||
address_index += 1;
|
||||
|
||||
if role == Role::Replica {
|
||||
replica_number += 1;
|
||||
}
|
||||
|
||||
let manager = ServerPool::new(
|
||||
address.clone(),
|
||||
user.clone(),
|
||||
&shard.database,
|
||||
client_server_map.clone(),
|
||||
get_reporter(),
|
||||
);
|
||||
|
||||
let pool = Pool::builder()
|
||||
.max_size(user.pool_size)
|
||||
.connection_timeout(std::time::Duration::from_millis(
|
||||
config.general.connect_timeout,
|
||||
))
|
||||
.test_on_check_out(false)
|
||||
.build(manager)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
pools.push(pool);
|
||||
servers.push(address);
|
||||
}
|
||||
|
||||
shards.push(pools);
|
||||
addresses.push(servers);
|
||||
banlist.push(HashMap::new());
|
||||
}
|
||||
|
||||
assert_eq!(shards.len(), addresses.len());
|
||||
|
||||
let mut pool = ConnectionPool {
|
||||
databases: shards,
|
||||
addresses: addresses,
|
||||
banlist: Arc::new(RwLock::new(banlist)),
|
||||
stats: get_reporter(),
|
||||
server_info: BytesMut::new(),
|
||||
settings: PoolSettings {
|
||||
pool_mode: match pool_config.pool_mode.as_str() {
|
||||
"transaction" => PoolMode::Transaction,
|
||||
"session" => PoolMode::Session,
|
||||
_ => unreachable!(),
|
||||
},
|
||||
// shards: pool_config.shards.clone(),
|
||||
shards: shard_ids.len(),
|
||||
user: user.clone(),
|
||||
default_role: match pool_config.default_role.as_str() {
|
||||
"any" => None,
|
||||
"replica" => Some(Role::Replica),
|
||||
"primary" => Some(Role::Primary),
|
||||
_ => unreachable!(),
|
||||
},
|
||||
query_parser_enabled: pool_config.query_parser_enabled.clone(),
|
||||
primary_reads_enabled: pool_config.primary_reads_enabled,
|
||||
sharding_function: match pool_config.sharding_function.as_str() {
|
||||
"pg_bigint_hash" => ShardingFunction::PgBigintHash,
|
||||
"sha1" => ShardingFunction::Sha1,
|
||||
_ => unreachable!(),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
let manager = ServerPool::new(
|
||||
address.clone(),
|
||||
config.user.clone(),
|
||||
&shard.database,
|
||||
client_server_map.clone(),
|
||||
);
|
||||
// Connect to the servers to make sure pool configuration is valid
|
||||
// before setting it globally.
|
||||
match pool.validate().await {
|
||||
Ok(_) => (),
|
||||
Err(err) => {
|
||||
error!("Could not validate connection pool: {:?}", err);
|
||||
return Err(err);
|
||||
}
|
||||
};
|
||||
|
||||
let pool = Pool::builder()
|
||||
.max_size(config.general.pool_size)
|
||||
.connection_timeout(std::time::Duration::from_millis(
|
||||
config.general.connect_timeout,
|
||||
))
|
||||
.test_on_check_out(false)
|
||||
.build(manager)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
pools.push(pool);
|
||||
replica_addresses.push(address);
|
||||
// There is one pool per database/user pair.
|
||||
new_pools.insert((pool_name.clone(), user.username.clone()), pool);
|
||||
}
|
||||
|
||||
shards.push(pools);
|
||||
addresses.push(replica_addresses);
|
||||
banlist.push(HashMap::new());
|
||||
}
|
||||
|
||||
ConnectionPool {
|
||||
databases: shards,
|
||||
addresses: addresses,
|
||||
round_robin: Arc::new(AtomicUsize::new(0)),
|
||||
banlist: Arc::new(Mutex::new(banlist)),
|
||||
healthcheck_timeout: config.general.healthcheck_timeout,
|
||||
ban_time: config.general.ban_time,
|
||||
POOLS.store(Arc::new(new_pools.clone()));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Connect to all shards and grab server information.
|
||||
/// Return server information we will pass to the clients
|
||||
/// when they connect.
|
||||
/// This also warms up the pool for clients that connect when
|
||||
/// the pooler starts up.
|
||||
async fn validate(&mut self) -> Result<(), Error> {
|
||||
let mut server_infos = Vec::new();
|
||||
for shard in 0..self.shards() {
|
||||
for server in 0..self.servers(shard) {
|
||||
let connection = match self.databases[shard][server].get().await {
|
||||
Ok(conn) => conn,
|
||||
Err(err) => {
|
||||
error!("Shard {} down or misconfigured: {:?}", shard, err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let proxy = connection;
|
||||
let server = &*proxy;
|
||||
let server_info = server.server_info();
|
||||
|
||||
if server_infos.len() > 0 {
|
||||
// Compare against the last server checked.
|
||||
if server_info != server_infos[server_infos.len() - 1] {
|
||||
warn!(
|
||||
"{:?} has different server configuration than the last server",
|
||||
proxy.address()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
server_infos.push(server_info);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: compare server information to make sure
|
||||
// all shards are running identical configurations.
|
||||
if server_infos.len() == 0 {
|
||||
return Err(Error::AllServersDown);
|
||||
}
|
||||
|
||||
// We're assuming all servers are identical.
|
||||
// TODO: not true.
|
||||
self.server_info = server_infos[0].clone();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get a connection from the pool.
|
||||
pub async fn get(
|
||||
&self,
|
||||
shard: Option<usize>,
|
||||
shard: usize, // shard number
|
||||
role: Option<Role>, // primary or replica
|
||||
process_id: i32, // client id
|
||||
) -> Result<(PooledConnection<'_, ServerPool>, Address), Error> {
|
||||
// Set this to false to gain ~3-4% speed.
|
||||
let with_health_check = true;
|
||||
let now = Instant::now();
|
||||
let mut candidates: Vec<&Address> = self.addresses[shard]
|
||||
.iter()
|
||||
.filter(|address| address.role == role)
|
||||
.collect();
|
||||
|
||||
let shard = match shard {
|
||||
Some(shard) => shard,
|
||||
None => 0, // TODO: pick a shard at random
|
||||
};
|
||||
// Random load balancing
|
||||
candidates.shuffle(&mut thread_rng());
|
||||
|
||||
loop {
|
||||
let index =
|
||||
self.round_robin.fetch_add(1, Ordering::SeqCst) % self.databases[shard].len();
|
||||
let address = self.addresses[shard][index].clone();
|
||||
let healthcheck_timeout = get_config().general.healthcheck_timeout;
|
||||
let healthcheck_delay = get_config().general.healthcheck_delay as u128;
|
||||
|
||||
if self.is_banned(&address, shard) {
|
||||
while !candidates.is_empty() {
|
||||
// Get the next candidate
|
||||
let address = match candidates.pop() {
|
||||
Some(address) => address,
|
||||
None => break,
|
||||
};
|
||||
|
||||
if self.is_banned(&address, role) {
|
||||
debug!("Address {:?} is banned", address);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Indicate we're waiting on a server connection from a pool.
|
||||
self.stats.client_waiting(process_id, address.id);
|
||||
|
||||
// Check if we can connect
|
||||
// TODO: implement query wait timeout, i.e. time to get a conn from the pool
|
||||
let mut conn = match self.databases[shard][index].get().await {
|
||||
let mut conn = match self.databases[address.shard][address.address_index]
|
||||
.get()
|
||||
.await
|
||||
{
|
||||
Ok(conn) => conn,
|
||||
Err(err) => {
|
||||
println!(">> Banning replica {}, error: {:?}", index, err);
|
||||
self.ban(&address, shard);
|
||||
error!("Banning instance {:?}, error: {:?}", address, err);
|
||||
self.ban(&address, process_id);
|
||||
self.stats
|
||||
.checkout_time(now.elapsed().as_micros(), process_id, address.id);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
if !with_health_check {
|
||||
return Ok((conn, address));
|
||||
}
|
||||
|
||||
// // Check if this server is alive with a health check
|
||||
// // Check if this server is alive with a health check.
|
||||
let server = &mut *conn;
|
||||
|
||||
// Will return error if timestamp is greater than current system time, which it should never be set to
|
||||
let require_healthcheck =
|
||||
server.last_activity().elapsed().unwrap().as_millis() > healthcheck_delay;
|
||||
|
||||
// Do not issue a health check unless it's been a little while
|
||||
// since we last checked the server is ok.
|
||||
// Health checks are pretty expensive.
|
||||
if !require_healthcheck {
|
||||
self.stats
|
||||
.checkout_time(now.elapsed().as_micros(), process_id, address.id);
|
||||
self.stats.server_active(conn.process_id(), address.id);
|
||||
return Ok((conn, address.clone()));
|
||||
}
|
||||
|
||||
debug!("Running health check on server {:?}", address);
|
||||
|
||||
self.stats.server_tested(server.process_id(), address.id);
|
||||
|
||||
match tokio::time::timeout(
|
||||
tokio::time::Duration::from_millis(self.healthcheck_timeout),
|
||||
server.query("SELECT 1"),
|
||||
tokio::time::Duration::from_millis(healthcheck_timeout),
|
||||
server.query(";"), // Cheap query (query parser not used in PG)
|
||||
)
|
||||
.await
|
||||
{
|
||||
// Check if health check succeeded
|
||||
// Check if health check succeeded.
|
||||
Ok(res) => match res {
|
||||
Ok(_) => return Ok((conn, address)),
|
||||
Err(_) => {
|
||||
println!(
|
||||
">> Banning replica {} because of failed health check",
|
||||
index
|
||||
Ok(_) => {
|
||||
self.stats
|
||||
.checkout_time(now.elapsed().as_micros(), process_id, address.id);
|
||||
self.stats.server_active(conn.process_id(), address.id);
|
||||
return Ok((conn, address.clone()));
|
||||
}
|
||||
|
||||
// Health check failed.
|
||||
Err(err) => {
|
||||
error!(
|
||||
"Banning instance {:?} because of failed health check, {:?}",
|
||||
address, err
|
||||
);
|
||||
self.ban(&address, shard);
|
||||
|
||||
// Don't leave a bad connection in the pool.
|
||||
server.mark_bad();
|
||||
|
||||
self.ban(&address, process_id);
|
||||
continue;
|
||||
}
|
||||
},
|
||||
// Health check never came back, database is really really down
|
||||
Err(_) => {
|
||||
println!(
|
||||
">> Banning replica {} because of health check timeout",
|
||||
index
|
||||
|
||||
// Health check timed out.
|
||||
Err(err) => {
|
||||
error!(
|
||||
"Banning instance {:?} because of health check timeout, {:?}",
|
||||
address, err
|
||||
);
|
||||
self.ban(&address, shard);
|
||||
// Don't leave a bad connection in the pool.
|
||||
server.mark_bad();
|
||||
|
||||
self.ban(&address, process_id);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Err(Error::AllServersDown)
|
||||
}
|
||||
|
||||
/// Ban an address (i.e. replica). It no longer will serve
|
||||
/// traffic for any new transactions. Existing transactions on that replica
|
||||
/// will finish successfully or error out to the clients.
|
||||
pub fn ban(&self, address: &Address, shard: usize) {
|
||||
println!(">> Banning {:?}", address);
|
||||
pub fn ban(&self, address: &Address, process_id: i32) {
|
||||
self.stats.client_disconnecting(process_id, address.id);
|
||||
|
||||
error!("Banning {:?}", address);
|
||||
|
||||
let now = chrono::offset::Utc::now().naive_utc();
|
||||
let mut guard = self.banlist.lock().unwrap();
|
||||
guard[shard].insert(address.clone(), now);
|
||||
let mut guard = self.banlist.write();
|
||||
guard[address.shard].insert(address.clone(), now);
|
||||
}
|
||||
|
||||
/// Clear the replica to receive traffic again. Takes effect immediately
|
||||
/// for all new transactions.
|
||||
pub fn _unban(&self, address: &Address, shard: usize) {
|
||||
let mut guard = self.banlist.lock().unwrap();
|
||||
guard[shard].remove(address);
|
||||
pub fn _unban(&self, address: &Address) {
|
||||
let mut guard = self.banlist.write();
|
||||
guard[address.shard].remove(address);
|
||||
}
|
||||
|
||||
/// Check if a replica can serve traffic. If all replicas are banned,
|
||||
/// we unban all of them. Better to try then not to.
|
||||
pub fn is_banned(&self, address: &Address, shard: usize) -> bool {
|
||||
let mut guard = self.banlist.lock().unwrap();
|
||||
pub fn is_banned(&self, address: &Address, role: Option<Role>) -> bool {
|
||||
let replicas_available = match role {
|
||||
Some(Role::Replica) => self.addresses[address.shard]
|
||||
.iter()
|
||||
.filter(|addr| addr.role == Role::Replica)
|
||||
.count(),
|
||||
None => self.addresses[address.shard].len(),
|
||||
Some(Role::Primary) => return false, // Primary cannot be banned.
|
||||
};
|
||||
|
||||
// Everything is banned, nothig is banned
|
||||
if guard[shard].len() == self.databases[shard].len() {
|
||||
guard[shard].clear();
|
||||
debug!("Available targets for {:?}: {}", role, replicas_available);
|
||||
|
||||
let guard = self.banlist.read();
|
||||
|
||||
// Everything is banned = nothing is banned.
|
||||
if guard[address.shard].len() == replicas_available {
|
||||
drop(guard);
|
||||
println!(">> Unbanning all replicas.");
|
||||
let mut guard = self.banlist.write();
|
||||
guard[address.shard].clear();
|
||||
drop(guard);
|
||||
warn!("Unbanning all replicas.");
|
||||
return false;
|
||||
}
|
||||
|
||||
// I expect this to miss 99.9999% of the time.
|
||||
match guard[shard].get(address) {
|
||||
match guard[address.shard].get(address) {
|
||||
Some(timestamp) => {
|
||||
let now = chrono::offset::Utc::now().naive_utc();
|
||||
if now.timestamp() - timestamp.timestamp() > self.ban_time {
|
||||
// 1 minute
|
||||
guard[shard].remove(address);
|
||||
let config = get_config();
|
||||
|
||||
// Ban expired.
|
||||
if now.timestamp() - timestamp.timestamp() > config.general.ban_time {
|
||||
drop(guard);
|
||||
warn!("Unbanning {:?}", address);
|
||||
let mut guard = self.banlist.write();
|
||||
guard[address.shard].remove(address);
|
||||
false
|
||||
} else {
|
||||
debug!("{:?} is banned", address);
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
None => false,
|
||||
None => {
|
||||
debug!("{:?} is ok", address);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the number of configured shards.
|
||||
pub fn shards(&self) -> usize {
|
||||
self.databases.len()
|
||||
}
|
||||
|
||||
/// Get the number of servers (primary and replicas)
|
||||
/// configured for a shard.
|
||||
pub fn servers(&self, shard: usize) -> usize {
|
||||
self.addresses[shard].len()
|
||||
}
|
||||
|
||||
/// Get the total number of servers (databases) we are connected to.
|
||||
pub fn databases(&self) -> usize {
|
||||
let mut databases = 0;
|
||||
for shard in 0..self.shards() {
|
||||
databases += self.servers(shard);
|
||||
}
|
||||
databases
|
||||
}
|
||||
|
||||
/// Get pool state for a particular shard server as reported by bb8.
|
||||
pub fn pool_state(&self, shard: usize, server: usize) -> bb8::State {
|
||||
self.databases[shard][server].state()
|
||||
}
|
||||
|
||||
/// Get the address information for a shard server.
|
||||
pub fn address(&self, shard: usize, server: usize) -> &Address {
|
||||
&self.addresses[shard][server]
|
||||
}
|
||||
|
||||
pub fn server_info(&self) -> BytesMut {
|
||||
self.server_info.clone()
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper for the bb8 connection pool.
|
||||
pub struct ServerPool {
|
||||
address: Address,
|
||||
user: User,
|
||||
database: String,
|
||||
client_server_map: ClientServerMap,
|
||||
stats: Reporter,
|
||||
}
|
||||
|
||||
impl ServerPool {
|
||||
@@ -225,12 +541,14 @@ impl ServerPool {
|
||||
user: User,
|
||||
database: &str,
|
||||
client_server_map: ClientServerMap,
|
||||
stats: Reporter,
|
||||
) -> ServerPool {
|
||||
ServerPool {
|
||||
address: address,
|
||||
user: user,
|
||||
database: database.to_string(),
|
||||
client_server_map: client_server_map,
|
||||
stats: stats,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -242,17 +560,38 @@ impl ManageConnection for ServerPool {
|
||||
|
||||
/// Attempts to create a new connection.
|
||||
async fn connect(&self) -> Result<Self::Connection, Self::Error> {
|
||||
println!(">> Creating a new connection for the pool");
|
||||
info!(
|
||||
"Creating a new connection to {:?} using user {:?}",
|
||||
self.address.name(),
|
||||
self.user.username
|
||||
);
|
||||
|
||||
Server::startup(
|
||||
&self.address.host,
|
||||
&self.address.port,
|
||||
&self.user.name,
|
||||
&self.user.password,
|
||||
// Put a temporary process_id into the stats
|
||||
// for server login.
|
||||
let process_id = rand::random::<i32>();
|
||||
self.stats.server_login(process_id, self.address.id);
|
||||
|
||||
// Connect to the PostgreSQL server.
|
||||
match Server::startup(
|
||||
&self.address,
|
||||
&self.user,
|
||||
&self.database,
|
||||
self.client_server_map.clone(),
|
||||
self.stats.clone(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(conn) => {
|
||||
// Remove the temporary process_id from the stats.
|
||||
self.stats.server_disconnecting(process_id, self.address.id);
|
||||
Ok(conn)
|
||||
}
|
||||
Err(err) => {
|
||||
// Remove the temporary process_id from the stats.
|
||||
self.stats.server_disconnecting(process_id, self.address.id);
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Determines if the connection is still connected to the database.
|
||||
@@ -265,3 +604,24 @@ impl ManageConnection for ServerPool {
|
||||
conn.is_bad()
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the connection pool
|
||||
pub fn get_pool(db: String, user: String) -> Option<ConnectionPool> {
|
||||
match get_all_pools().get(&(db, user)) {
|
||||
Some(pool) => Some(pool.clone()),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// How many total servers we have in the config.
|
||||
pub fn get_number_of_addresses() -> usize {
|
||||
get_all_pools()
|
||||
.iter()
|
||||
.map(|(_, pool)| pool.databases())
|
||||
.sum()
|
||||
}
|
||||
|
||||
/// Get a pointer to all configured pools.
|
||||
pub fn get_all_pools() -> HashMap<(String, String), ConnectionPool> {
|
||||
return (*(*POOLS.load())).clone();
|
||||
}
|
||||
|
||||
210
src/prometheus.rs
Normal file
210
src/prometheus.rs
Normal file
@@ -0,0 +1,210 @@
|
||||
use hyper::service::{make_service_fn, service_fn};
|
||||
use hyper::{Body, Method, Request, Response, Server, StatusCode};
|
||||
use log::{error, info, warn};
|
||||
use phf::phf_map;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use std::net::SocketAddr;
|
||||
|
||||
use crate::config::Address;
|
||||
use crate::pool::get_all_pools;
|
||||
use crate::stats::get_stats;
|
||||
|
||||
struct MetricHelpType {
|
||||
help: &'static str,
|
||||
ty: &'static str,
|
||||
}
|
||||
|
||||
// reference for metric types: https://prometheus.io/docs/concepts/metric_types/
|
||||
// counters only increase
|
||||
// gauges can arbitrarily increase or decrease
|
||||
static METRIC_HELP_AND_TYPES_LOOKUP: phf::Map<&'static str, MetricHelpType> = phf_map! {
|
||||
"total_query_count" => MetricHelpType {
|
||||
help: "Number of queries sent by all clients",
|
||||
ty: "counter",
|
||||
},
|
||||
"total_query_time" => MetricHelpType {
|
||||
help: "Total amount of time for queries to execute",
|
||||
ty: "counter",
|
||||
},
|
||||
"total_received" => MetricHelpType {
|
||||
help: "Number of bytes received from the server",
|
||||
ty: "counter",
|
||||
},
|
||||
"total_sent" => MetricHelpType {
|
||||
help: "Number of bytes sent to the server",
|
||||
ty: "counter",
|
||||
},
|
||||
"total_xact_count" => MetricHelpType {
|
||||
help: "Total number of transactions started by the client",
|
||||
ty: "counter",
|
||||
},
|
||||
"total_xact_time" => MetricHelpType {
|
||||
help: "Total amount of time for all transactions to execute",
|
||||
ty: "counter",
|
||||
},
|
||||
"total_wait_time" => MetricHelpType {
|
||||
help: "Total time client waited for a server connection",
|
||||
ty: "counter",
|
||||
},
|
||||
"avg_query_count" => MetricHelpType {
|
||||
help: "Average of total_query_count every 15 seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"avg_query_time" => MetricHelpType {
|
||||
help: "Average time taken for queries to execute every 15 seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"avg_recv" => MetricHelpType {
|
||||
help: "Average of total_received bytes every 15 seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"avg_sent" => MetricHelpType {
|
||||
help: "Average of total_sent bytes every 15 seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"avg_xact_count" => MetricHelpType {
|
||||
help: "Average of total_xact_count every 15 seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"avg_xact_time" => MetricHelpType {
|
||||
help: "Average of total_xact_time every 15 seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"avg_wait_time" => MetricHelpType {
|
||||
help: "Average of total_wait_time every 15 seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"maxwait_us" => MetricHelpType {
|
||||
help: "The time a client waited for a server connection in microseconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"maxwait" => MetricHelpType {
|
||||
help: "The time a client waited for a server connection in seconds",
|
||||
ty: "gauge",
|
||||
},
|
||||
"cl_waiting" => MetricHelpType {
|
||||
help: "How many clients are waiting for a connection from the pool",
|
||||
ty: "gauge",
|
||||
},
|
||||
"cl_active" => MetricHelpType {
|
||||
help: "How many clients are actively communicating with a server",
|
||||
ty: "gauge",
|
||||
},
|
||||
"cl_idle" => MetricHelpType {
|
||||
help: "How many clients are idle",
|
||||
ty: "gauge",
|
||||
},
|
||||
"sv_idle" => MetricHelpType {
|
||||
help: "How many server connections are idle",
|
||||
ty: "gauge",
|
||||
},
|
||||
"sv_active" => MetricHelpType {
|
||||
help: "How many server connections are actively communicating with a client",
|
||||
ty: "gauge",
|
||||
},
|
||||
"sv_login" => MetricHelpType {
|
||||
help: "How many server connections are currently being created",
|
||||
ty: "gauge",
|
||||
},
|
||||
"sv_tested" => MetricHelpType {
|
||||
help: "How many server connections are currently waiting on a health check to succeed",
|
||||
ty: "gauge",
|
||||
},
|
||||
};
|
||||
|
||||
struct PrometheusMetric {
|
||||
name: String,
|
||||
help: String,
|
||||
ty: String,
|
||||
labels: HashMap<&'static str, String>,
|
||||
value: i64,
|
||||
}
|
||||
|
||||
impl fmt::Display for PrometheusMetric {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let formatted_labels = self
|
||||
.labels
|
||||
.iter()
|
||||
.map(|(key, value)| format!("{}=\"{}\"", key, value))
|
||||
.collect::<Vec<_>>()
|
||||
.join(",");
|
||||
write!(
|
||||
f,
|
||||
"# HELP {name} {help}\n# TYPE {name} {ty}\n{name}{{{formatted_labels}}} {value}\n",
|
||||
name = format_args!("pgcat_{}", self.name),
|
||||
help = self.help,
|
||||
ty = self.ty,
|
||||
formatted_labels = formatted_labels,
|
||||
value = self.value
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl PrometheusMetric {
|
||||
fn new(address: &Address, name: &str, value: i64) -> Option<PrometheusMetric> {
|
||||
let mut labels = HashMap::new();
|
||||
labels.insert("host", address.host.clone());
|
||||
labels.insert("shard", address.shard.to_string());
|
||||
labels.insert("role", address.role.to_string());
|
||||
labels.insert("database", address.database.to_string());
|
||||
|
||||
METRIC_HELP_AND_TYPES_LOOKUP
|
||||
.get(name)
|
||||
.map(|metric| PrometheusMetric {
|
||||
name: name.to_owned(),
|
||||
help: metric.help.to_owned(),
|
||||
ty: metric.ty.to_owned(),
|
||||
labels,
|
||||
value,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async fn prometheus_stats(request: Request<Body>) -> Result<Response<Body>, hyper::http::Error> {
|
||||
match (request.method(), request.uri().path()) {
|
||||
(&Method::GET, "/metrics") => {
|
||||
let stats = get_stats();
|
||||
|
||||
let mut lines = Vec::new();
|
||||
for (_, pool) in get_all_pools() {
|
||||
for shard in 0..pool.shards() {
|
||||
for server in 0..pool.servers(shard) {
|
||||
let address = pool.address(shard, server);
|
||||
if let Some(address_stats) = stats.get(&address.id) {
|
||||
for (key, value) in address_stats.iter() {
|
||||
if let Some(prometheus_metric) =
|
||||
PrometheusMetric::new(address, key, *value)
|
||||
{
|
||||
lines.push(prometheus_metric.to_string());
|
||||
} else {
|
||||
warn!("Metric {} not implemented for {}", key, address.name());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Response::builder()
|
||||
.header("content-type", "text/plain; version=0.0.4")
|
||||
.body(lines.join("\n").into())
|
||||
}
|
||||
_ => Response::builder()
|
||||
.status(StatusCode::NOT_FOUND)
|
||||
.body("".into()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start_metric_server(http_addr: SocketAddr) {
|
||||
let http_service_factory =
|
||||
make_service_fn(|_conn| async { Ok::<_, hyper::Error>(service_fn(prometheus_stats)) });
|
||||
let server = Server::bind(&http_addr.into()).serve(http_service_factory);
|
||||
info!(
|
||||
"Exposing prometheus metrics on http://{}/metrics.",
|
||||
http_addr
|
||||
);
|
||||
if let Err(e) = server.await {
|
||||
error!("Failed to run HTTP server: {}.", e);
|
||||
}
|
||||
}
|
||||
657
src/query_router.rs
Normal file
657
src/query_router.rs
Normal file
@@ -0,0 +1,657 @@
|
||||
/// Route queries automatically based on explicitely requested
|
||||
/// or implied query characteristics.
|
||||
use bytes::{Buf, BytesMut};
|
||||
use log::{debug, error};
|
||||
use once_cell::sync::OnceCell;
|
||||
use regex::{Regex, RegexSet};
|
||||
use sqlparser::ast::Statement::{Query, StartTransaction};
|
||||
use sqlparser::dialect::PostgreSqlDialect;
|
||||
use sqlparser::parser::Parser;
|
||||
|
||||
use crate::config::Role;
|
||||
use crate::pool::PoolSettings;
|
||||
use crate::sharding::Sharder;
|
||||
|
||||
/// Regexes used to parse custom commands.
|
||||
const CUSTOM_SQL_REGEXES: [&str; 7] = [
|
||||
r"(?i)^ *SET SHARDING KEY TO '?([0-9]+)'? *;? *$",
|
||||
r"(?i)^ *SET SHARD TO '?([0-9]+|ANY)'? *;? *$",
|
||||
r"(?i)^ *SHOW SHARD *;? *$",
|
||||
r"(?i)^ *SET SERVER ROLE TO '(PRIMARY|REPLICA|ANY|AUTO|DEFAULT)' *;? *$",
|
||||
r"(?i)^ *SHOW SERVER ROLE *;? *$",
|
||||
r"(?i)^ *SET PRIMARY READS TO '?(on|off|default)'? *;? *$",
|
||||
r"(?i)^ *SHOW PRIMARY READS *;? *$",
|
||||
];
|
||||
|
||||
/// Custom commands.
|
||||
#[derive(PartialEq, Debug)]
|
||||
pub enum Command {
|
||||
SetShardingKey,
|
||||
SetShard,
|
||||
ShowShard,
|
||||
SetServerRole,
|
||||
ShowServerRole,
|
||||
SetPrimaryReads,
|
||||
ShowPrimaryReads,
|
||||
}
|
||||
|
||||
/// Quickly test for match when a query is received.
|
||||
static CUSTOM_SQL_REGEX_SET: OnceCell<RegexSet> = OnceCell::new();
|
||||
|
||||
// Get the value inside the custom command.
|
||||
static CUSTOM_SQL_REGEX_LIST: OnceCell<Vec<Regex>> = OnceCell::new();
|
||||
|
||||
/// The query router.
|
||||
pub struct QueryRouter {
|
||||
/// Which shard we should be talking to right now.
|
||||
active_shard: Option<usize>,
|
||||
|
||||
/// Which server should we be talking to.
|
||||
active_role: Option<Role>,
|
||||
|
||||
/// Should we try to parse queries to route them to replicas or primary automatically
|
||||
query_parser_enabled: bool,
|
||||
|
||||
/// Include the primary into the replica pool for reads.
|
||||
primary_reads_enabled: bool,
|
||||
|
||||
/// Pool configuration.
|
||||
pool_settings: PoolSettings,
|
||||
}
|
||||
|
||||
impl QueryRouter {
|
||||
/// One-time initialization of regexes
|
||||
/// that parse our custom SQL protocol.
|
||||
pub fn setup() -> bool {
|
||||
let set = match RegexSet::new(&CUSTOM_SQL_REGEXES) {
|
||||
Ok(rgx) => rgx,
|
||||
Err(err) => {
|
||||
error!("QueryRouter::setup Could not compile regex set: {:?}", err);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
let list: Vec<_> = CUSTOM_SQL_REGEXES
|
||||
.iter()
|
||||
.map(|rgx| Regex::new(rgx).unwrap())
|
||||
.collect();
|
||||
|
||||
assert_eq!(list.len(), set.len());
|
||||
|
||||
match CUSTOM_SQL_REGEX_LIST.set(list) {
|
||||
Ok(_) => true,
|
||||
Err(_) => return false,
|
||||
};
|
||||
|
||||
match CUSTOM_SQL_REGEX_SET.set(set) {
|
||||
Ok(_) => true,
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new instance of the query router.
|
||||
/// Each client gets its own.
|
||||
pub fn new() -> QueryRouter {
|
||||
QueryRouter {
|
||||
active_shard: None,
|
||||
active_role: None,
|
||||
query_parser_enabled: false,
|
||||
primary_reads_enabled: false,
|
||||
pool_settings: PoolSettings::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Pool settings can change because of a config reload.
|
||||
pub fn update_pool_settings(&mut self, pool_settings: PoolSettings) {
|
||||
self.pool_settings = pool_settings;
|
||||
}
|
||||
|
||||
/// Try to parse a command and execute it.
|
||||
pub fn try_execute_command(&mut self, mut buf: BytesMut) -> Option<(Command, String)> {
|
||||
let code = buf.get_u8() as char;
|
||||
|
||||
// Only simple protocol supported for commands.
|
||||
if code != 'Q' {
|
||||
return None;
|
||||
}
|
||||
|
||||
let len = buf.get_i32() as usize;
|
||||
let query = String::from_utf8_lossy(&buf[..len - 5]).to_string(); // Ignore the terminating NULL.
|
||||
|
||||
let regex_set = match CUSTOM_SQL_REGEX_SET.get() {
|
||||
Some(regex_set) => regex_set,
|
||||
None => return None,
|
||||
};
|
||||
|
||||
let regex_list = match CUSTOM_SQL_REGEX_LIST.get() {
|
||||
Some(regex_list) => regex_list,
|
||||
None => return None,
|
||||
};
|
||||
|
||||
let matches: Vec<_> = regex_set.matches(&query).into_iter().collect();
|
||||
|
||||
// This is not a custom query, try to infer which
|
||||
// server it'll go to if the query parser is enabled.
|
||||
if matches.len() != 1 {
|
||||
debug!("Regular query, not a command");
|
||||
return None;
|
||||
}
|
||||
|
||||
let command = match matches[0] {
|
||||
0 => Command::SetShardingKey,
|
||||
1 => Command::SetShard,
|
||||
2 => Command::ShowShard,
|
||||
3 => Command::SetServerRole,
|
||||
4 => Command::ShowServerRole,
|
||||
5 => Command::SetPrimaryReads,
|
||||
6 => Command::ShowPrimaryReads,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
let mut value = match command {
|
||||
Command::SetShardingKey
|
||||
| Command::SetShard
|
||||
| Command::SetServerRole
|
||||
| Command::SetPrimaryReads => {
|
||||
// Capture value. I know this re-runs the regex engine, but I haven't
|
||||
// figured out a better way just yet. I think I can write a single Regex
|
||||
// that matches all 5 custom SQL patterns, but maybe that's not very legible?
|
||||
//
|
||||
// I think this is faster than running the Regex engine 5 times.
|
||||
match regex_list[matches[0]].captures(&query) {
|
||||
Some(captures) => match captures.get(1) {
|
||||
Some(value) => value.as_str().to_string(),
|
||||
None => return None,
|
||||
},
|
||||
None => return None,
|
||||
}
|
||||
}
|
||||
|
||||
Command::ShowShard => self.shard().to_string(),
|
||||
Command::ShowServerRole => match self.active_role {
|
||||
Some(Role::Primary) => String::from("primary"),
|
||||
Some(Role::Replica) => String::from("replica"),
|
||||
None => {
|
||||
if self.query_parser_enabled {
|
||||
String::from("auto")
|
||||
} else {
|
||||
String::from("any")
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
Command::ShowPrimaryReads => match self.primary_reads_enabled {
|
||||
true => String::from("on"),
|
||||
false => String::from("off"),
|
||||
},
|
||||
};
|
||||
|
||||
match command {
|
||||
Command::SetShardingKey => {
|
||||
let sharder = Sharder::new(
|
||||
self.pool_settings.shards,
|
||||
self.pool_settings.sharding_function,
|
||||
);
|
||||
let shard = sharder.shard(value.parse::<i64>().unwrap());
|
||||
self.active_shard = Some(shard);
|
||||
value = shard.to_string();
|
||||
}
|
||||
|
||||
Command::SetShard => {
|
||||
self.active_shard = match value.to_ascii_uppercase().as_ref() {
|
||||
"ANY" => Some(rand::random::<usize>() % self.pool_settings.shards),
|
||||
_ => Some(value.parse::<usize>().unwrap()),
|
||||
};
|
||||
}
|
||||
|
||||
Command::SetServerRole => {
|
||||
self.active_role = match value.to_ascii_lowercase().as_ref() {
|
||||
"primary" => {
|
||||
self.query_parser_enabled = false;
|
||||
Some(Role::Primary)
|
||||
}
|
||||
|
||||
"replica" => {
|
||||
self.query_parser_enabled = false;
|
||||
Some(Role::Replica)
|
||||
}
|
||||
|
||||
"any" => {
|
||||
self.query_parser_enabled = false;
|
||||
None
|
||||
}
|
||||
|
||||
"auto" => {
|
||||
self.query_parser_enabled = true;
|
||||
None
|
||||
}
|
||||
|
||||
"default" => {
|
||||
self.active_role = self.pool_settings.default_role;
|
||||
self.query_parser_enabled = self.query_parser_enabled;
|
||||
self.active_role
|
||||
}
|
||||
|
||||
_ => unreachable!(),
|
||||
};
|
||||
}
|
||||
|
||||
Command::SetPrimaryReads => {
|
||||
if value == "on" {
|
||||
debug!("Setting primary reads to on");
|
||||
self.primary_reads_enabled = true;
|
||||
} else if value == "off" {
|
||||
debug!("Setting primary reads to off");
|
||||
self.primary_reads_enabled = false;
|
||||
} else if value == "default" {
|
||||
debug!("Setting primary reads to default");
|
||||
self.primary_reads_enabled = self.pool_settings.primary_reads_enabled;
|
||||
}
|
||||
}
|
||||
|
||||
_ => (),
|
||||
}
|
||||
|
||||
Some((command, value))
|
||||
}
|
||||
|
||||
/// Try to infer which server to connect to based on the contents of the query.
|
||||
pub fn infer_role(&mut self, mut buf: BytesMut) -> bool {
|
||||
debug!("Inferring role");
|
||||
|
||||
let code = buf.get_u8() as char;
|
||||
let len = buf.get_i32() as usize;
|
||||
|
||||
let query = match code {
|
||||
// Query
|
||||
'Q' => {
|
||||
let query = String::from_utf8_lossy(&buf[..len - 5]).to_string();
|
||||
debug!("Query: '{}'", query);
|
||||
query
|
||||
}
|
||||
|
||||
// Parse (prepared statement)
|
||||
'P' => {
|
||||
let mut start = 0;
|
||||
let mut end;
|
||||
|
||||
// Skip the name of the prepared statement.
|
||||
while buf[start] != 0 && start < buf.len() {
|
||||
start += 1;
|
||||
}
|
||||
start += 1; // Skip terminating null
|
||||
|
||||
// Find the end of the prepared stmt (\0)
|
||||
end = start;
|
||||
while buf[end] != 0 && end < buf.len() {
|
||||
end += 1;
|
||||
}
|
||||
|
||||
let query = String::from_utf8_lossy(&buf[start..end]).to_string();
|
||||
|
||||
debug!("Prepared statement: '{}'", query);
|
||||
|
||||
query.replace("$", "") // Remove placeholders turning them into "values"
|
||||
}
|
||||
|
||||
_ => return false,
|
||||
};
|
||||
|
||||
let ast = match Parser::parse_sql(&PostgreSqlDialect {}, &query) {
|
||||
Ok(ast) => ast,
|
||||
Err(err) => {
|
||||
debug!("{}", err.to_string());
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
if ast.len() == 0 {
|
||||
return false;
|
||||
}
|
||||
|
||||
match ast[0] {
|
||||
// All transactions go to the primary, probably a write.
|
||||
StartTransaction { .. } => {
|
||||
self.active_role = Some(Role::Primary);
|
||||
}
|
||||
|
||||
// Likely a read-only query
|
||||
Query { .. } => {
|
||||
self.active_role = match self.primary_reads_enabled {
|
||||
false => Some(Role::Replica), // If primary should not be receiving reads, use a replica.
|
||||
true => None, // Any server role is fine in this case.
|
||||
}
|
||||
}
|
||||
|
||||
// Likely a write
|
||||
_ => {
|
||||
self.active_role = Some(Role::Primary);
|
||||
}
|
||||
};
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
/// Get the current desired server role we should be talking to.
|
||||
pub fn role(&self) -> Option<Role> {
|
||||
self.active_role
|
||||
}
|
||||
|
||||
/// Get desired shard we should be talking to.
|
||||
pub fn shard(&self) -> usize {
|
||||
match self.active_shard {
|
||||
Some(shard) => shard,
|
||||
None => 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_shard(&mut self, shard: usize) {
|
||||
self.active_shard = Some(shard);
|
||||
}
|
||||
|
||||
/// Should we attempt to parse queries?
|
||||
#[allow(dead_code)]
|
||||
pub fn query_parser_enabled(&self) -> bool {
|
||||
self.query_parser_enabled
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::messages::simple_query;
|
||||
use crate::pool::PoolMode;
|
||||
use crate::sharding::ShardingFunction;
|
||||
use bytes::BufMut;
|
||||
|
||||
#[test]
|
||||
fn test_defaults() {
|
||||
QueryRouter::setup();
|
||||
let qr = QueryRouter::new();
|
||||
|
||||
assert_eq!(qr.role(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_infer_role_replica() {
|
||||
QueryRouter::setup();
|
||||
let mut qr = QueryRouter::new();
|
||||
assert!(qr.try_execute_command(simple_query("SET SERVER ROLE TO 'auto'")) != None);
|
||||
assert_eq!(qr.query_parser_enabled(), true);
|
||||
|
||||
assert!(qr.try_execute_command(simple_query("SET PRIMARY READS TO off")) != None);
|
||||
|
||||
let queries = vec![
|
||||
simple_query("SELECT * FROM items WHERE id = 5"),
|
||||
simple_query(
|
||||
"SELECT id, name, value FROM items INNER JOIN prices ON item.id = prices.item_id",
|
||||
),
|
||||
simple_query("WITH t AS (SELECT * FROM items) SELECT * FROM t"),
|
||||
];
|
||||
|
||||
for query in queries {
|
||||
// It's a recognized query
|
||||
assert!(qr.infer_role(query));
|
||||
assert_eq!(qr.role(), Some(Role::Replica));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_infer_role_primary() {
|
||||
QueryRouter::setup();
|
||||
let mut qr = QueryRouter::new();
|
||||
|
||||
let queries = vec![
|
||||
simple_query("UPDATE items SET name = 'pumpkin' WHERE id = 5"),
|
||||
simple_query("INSERT INTO items (id, name) VALUES (5, 'pumpkin')"),
|
||||
simple_query("DELETE FROM items WHERE id = 5"),
|
||||
simple_query("BEGIN"), // Transaction start
|
||||
];
|
||||
|
||||
for query in queries {
|
||||
// It's a recognized query
|
||||
assert!(qr.infer_role(query));
|
||||
assert_eq!(qr.role(), Some(Role::Primary));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_infer_role_primary_reads_enabled() {
|
||||
QueryRouter::setup();
|
||||
let mut qr = QueryRouter::new();
|
||||
let query = simple_query("SELECT * FROM items WHERE id = 5");
|
||||
assert!(qr.try_execute_command(simple_query("SET PRIMARY READS TO on")) != None);
|
||||
|
||||
assert!(qr.infer_role(query));
|
||||
assert_eq!(qr.role(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_infer_role_parse_prepared() {
|
||||
QueryRouter::setup();
|
||||
let mut qr = QueryRouter::new();
|
||||
qr.try_execute_command(simple_query("SET SERVER ROLE TO 'auto'"));
|
||||
assert!(qr.try_execute_command(simple_query("SET PRIMARY READS TO off")) != None);
|
||||
|
||||
let prepared_stmt = BytesMut::from(
|
||||
&b"WITH t AS (SELECT * FROM items WHERE name = $1) SELECT * FROM t WHERE id = $2\0"[..],
|
||||
);
|
||||
let mut res = BytesMut::from(&b"P"[..]);
|
||||
res.put_i32(prepared_stmt.len() as i32 + 4 + 1 + 2);
|
||||
res.put_u8(0);
|
||||
res.put(prepared_stmt);
|
||||
res.put_i16(0);
|
||||
|
||||
assert!(qr.infer_role(res));
|
||||
assert_eq!(qr.role(), Some(Role::Replica));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_regex_set() {
|
||||
QueryRouter::setup();
|
||||
|
||||
let tests = [
|
||||
// Upper case
|
||||
"SET SHARDING KEY TO '1'",
|
||||
"SET SHARD TO '1'",
|
||||
"SHOW SHARD",
|
||||
"SET SERVER ROLE TO 'replica'",
|
||||
"SET SERVER ROLE TO 'primary'",
|
||||
"SET SERVER ROLE TO 'any'",
|
||||
"SET SERVER ROLE TO 'auto'",
|
||||
"SHOW SERVER ROLE",
|
||||
"SET PRIMARY READS TO 'on'",
|
||||
"SET PRIMARY READS TO 'off'",
|
||||
"SET PRIMARY READS TO 'default'",
|
||||
"SHOW PRIMARY READS",
|
||||
// Lower case
|
||||
"set sharding key to '1'",
|
||||
"set shard to '1'",
|
||||
"show shard",
|
||||
"set server role to 'replica'",
|
||||
"set server role to 'primary'",
|
||||
"set server role to 'any'",
|
||||
"set server role to 'auto'",
|
||||
"show server role",
|
||||
"set primary reads to 'on'",
|
||||
"set primary reads to 'OFF'",
|
||||
"set primary reads to 'deFaUlt'",
|
||||
// No quotes
|
||||
"SET SHARDING KEY TO 11235",
|
||||
"SET SHARD TO 15",
|
||||
"SET PRIMARY READS TO off",
|
||||
// Spaces and semicolon
|
||||
" SET SHARDING KEY TO 11235 ; ",
|
||||
" SET SHARD TO 15; ",
|
||||
" SET SHARDING KEY TO 11235 ;",
|
||||
" SET SERVER ROLE TO 'primary'; ",
|
||||
" SET SERVER ROLE TO 'primary' ; ",
|
||||
" SET SERVER ROLE TO 'primary' ;",
|
||||
" SET PRIMARY READS TO 'off' ;",
|
||||
];
|
||||
|
||||
// Which regexes it'll match to in the list
|
||||
let matches = [
|
||||
0, 1, 2, 3, 3, 3, 3, 4, 5, 5, 5, 6, 0, 1, 2, 3, 3, 3, 3, 4, 5, 5, 5, 0, 1, 5, 0, 1, 0,
|
||||
3, 3, 3, 5,
|
||||
];
|
||||
|
||||
let list = CUSTOM_SQL_REGEX_LIST.get().unwrap();
|
||||
let set = CUSTOM_SQL_REGEX_SET.get().unwrap();
|
||||
|
||||
for (i, test) in tests.iter().enumerate() {
|
||||
if !list[matches[i]].is_match(test) {
|
||||
println!("{} does not match {}", test, list[matches[i]]);
|
||||
assert!(false);
|
||||
}
|
||||
assert_eq!(set.matches(test).into_iter().collect::<Vec<_>>().len(), 1);
|
||||
}
|
||||
|
||||
let bad = [
|
||||
"SELECT * FROM table",
|
||||
"SELECT * FROM table WHERE value = 'set sharding key to 5'", // Don't capture things in the middle of the query
|
||||
];
|
||||
|
||||
for query in &bad {
|
||||
assert_eq!(set.matches(query).into_iter().collect::<Vec<_>>().len(), 0);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_try_execute_command() {
|
||||
QueryRouter::setup();
|
||||
let mut qr = QueryRouter::new();
|
||||
|
||||
// SetShardingKey
|
||||
let query = simple_query("SET SHARDING KEY TO 13");
|
||||
assert_eq!(
|
||||
qr.try_execute_command(query),
|
||||
Some((Command::SetShardingKey, String::from("0")))
|
||||
);
|
||||
assert_eq!(qr.shard(), 0);
|
||||
|
||||
// SetShard
|
||||
let query = simple_query("SET SHARD TO '1'");
|
||||
assert_eq!(
|
||||
qr.try_execute_command(query),
|
||||
Some((Command::SetShard, String::from("1")))
|
||||
);
|
||||
assert_eq!(qr.shard(), 1);
|
||||
|
||||
// ShowShard
|
||||
let query = simple_query("SHOW SHARD");
|
||||
assert_eq!(
|
||||
qr.try_execute_command(query),
|
||||
Some((Command::ShowShard, String::from("1")))
|
||||
);
|
||||
|
||||
// SetServerRole
|
||||
let roles = ["primary", "replica", "any", "auto", "primary"];
|
||||
let verify_roles = [
|
||||
Some(Role::Primary),
|
||||
Some(Role::Replica),
|
||||
None,
|
||||
None,
|
||||
Some(Role::Primary),
|
||||
];
|
||||
let query_parser_enabled = [false, false, false, true, false];
|
||||
|
||||
for (idx, role) in roles.iter().enumerate() {
|
||||
let query = simple_query(&format!("SET SERVER ROLE TO '{}'", role));
|
||||
assert_eq!(
|
||||
qr.try_execute_command(query),
|
||||
Some((Command::SetServerRole, String::from(*role)))
|
||||
);
|
||||
assert_eq!(qr.role(), verify_roles[idx],);
|
||||
assert_eq!(qr.query_parser_enabled(), query_parser_enabled[idx],);
|
||||
|
||||
// ShowServerRole
|
||||
let query = simple_query("SHOW SERVER ROLE");
|
||||
assert_eq!(
|
||||
qr.try_execute_command(query),
|
||||
Some((Command::ShowServerRole, String::from(*role)))
|
||||
);
|
||||
}
|
||||
|
||||
let primary_reads = ["on", "off", "default"];
|
||||
let primary_reads_enabled = ["on", "off", "on"];
|
||||
|
||||
for (idx, primary_reads) in primary_reads.iter().enumerate() {
|
||||
assert_eq!(
|
||||
qr.try_execute_command(simple_query(&format!(
|
||||
"SET PRIMARY READS TO {}",
|
||||
primary_reads
|
||||
))),
|
||||
Some((Command::SetPrimaryReads, String::from(*primary_reads)))
|
||||
);
|
||||
assert_eq!(
|
||||
qr.try_execute_command(simple_query("SHOW PRIMARY READS")),
|
||||
Some((
|
||||
Command::ShowPrimaryReads,
|
||||
String::from(primary_reads_enabled[idx])
|
||||
))
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_enable_query_parser() {
|
||||
QueryRouter::setup();
|
||||
let mut qr = QueryRouter::new();
|
||||
let query = simple_query("SET SERVER ROLE TO 'auto'");
|
||||
assert!(qr.try_execute_command(simple_query("SET PRIMARY READS TO off")) != None);
|
||||
|
||||
assert!(qr.try_execute_command(query) != None);
|
||||
assert!(qr.query_parser_enabled());
|
||||
assert_eq!(qr.role(), None);
|
||||
|
||||
let query = simple_query("INSERT INTO test_table VALUES (1)");
|
||||
assert_eq!(qr.infer_role(query), true);
|
||||
assert_eq!(qr.role(), Some(Role::Primary));
|
||||
|
||||
let query = simple_query("SELECT * FROM test_table");
|
||||
assert_eq!(qr.infer_role(query), true);
|
||||
assert_eq!(qr.role(), Some(Role::Replica));
|
||||
|
||||
assert!(qr.query_parser_enabled());
|
||||
let query = simple_query("SET SERVER ROLE TO 'default'");
|
||||
assert!(qr.try_execute_command(query) != None);
|
||||
assert!(qr.query_parser_enabled());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_from_pool_settings() {
|
||||
QueryRouter::setup();
|
||||
|
||||
let pool_settings = PoolSettings {
|
||||
pool_mode: PoolMode::Transaction,
|
||||
shards: 0,
|
||||
user: crate::config::User::default(),
|
||||
default_role: Some(Role::Replica),
|
||||
query_parser_enabled: true,
|
||||
primary_reads_enabled: false,
|
||||
sharding_function: ShardingFunction::PgBigintHash,
|
||||
};
|
||||
let mut qr = QueryRouter::new();
|
||||
assert_eq!(qr.active_role, None);
|
||||
assert_eq!(qr.active_shard, None);
|
||||
assert_eq!(qr.query_parser_enabled, false);
|
||||
assert_eq!(qr.primary_reads_enabled, false);
|
||||
|
||||
// Internal state must not be changed due to this, only defaults
|
||||
qr.update_pool_settings(pool_settings.clone());
|
||||
|
||||
assert_eq!(qr.active_role, None);
|
||||
assert_eq!(qr.active_shard, None);
|
||||
assert_eq!(qr.query_parser_enabled, false);
|
||||
assert_eq!(qr.primary_reads_enabled, false);
|
||||
|
||||
let q1 = simple_query("SET SERVER ROLE TO 'primary'");
|
||||
assert!(qr.try_execute_command(q1) != None);
|
||||
assert_eq!(qr.active_role.unwrap(), Role::Primary);
|
||||
|
||||
let q2 = simple_query("SET SERVER ROLE TO 'default'");
|
||||
assert!(qr.try_execute_command(q2) != None);
|
||||
assert_eq!(qr.active_role.unwrap(), pool_settings.clone().default_role);
|
||||
}
|
||||
}
|
||||
320
src/scram.rs
Normal file
320
src/scram.rs
Normal file
@@ -0,0 +1,320 @@
|
||||
// SCRAM-SHA-256 authentication. Heavily inspired by
|
||||
// https://github.com/sfackler/rust-postgres/
|
||||
// SASL implementation.
|
||||
|
||||
use bytes::BytesMut;
|
||||
use hmac::{Hmac, Mac};
|
||||
use rand::{self, Rng};
|
||||
use sha2::digest::FixedOutput;
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
use std::fmt::Write;
|
||||
|
||||
use crate::constants::*;
|
||||
use crate::errors::Error;
|
||||
|
||||
/// Normalize a password string. Postgres
|
||||
/// passwords don't have to be UTF-8.
|
||||
fn normalize(pass: &[u8]) -> Vec<u8> {
|
||||
let pass = match std::str::from_utf8(pass) {
|
||||
Ok(pass) => pass,
|
||||
Err(_) => return pass.to_vec(),
|
||||
};
|
||||
|
||||
match stringprep::saslprep(pass) {
|
||||
Ok(pass) => pass.into_owned().into_bytes(),
|
||||
Err(_) => pass.as_bytes().to_vec(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Keep the SASL state through the exchange.
|
||||
/// It takes 3 messages to complete the authentication.
|
||||
pub struct ScramSha256 {
|
||||
password: String,
|
||||
salted_password: [u8; 32],
|
||||
auth_message: String,
|
||||
message: BytesMut,
|
||||
nonce: String,
|
||||
}
|
||||
|
||||
impl ScramSha256 {
|
||||
/// Create the Scram state from a password. It'll automatically
|
||||
/// generate a nonce.
|
||||
pub fn new(password: &str) -> ScramSha256 {
|
||||
let mut rng = rand::thread_rng();
|
||||
let nonce = (0..NONCE_LENGTH)
|
||||
.map(|_| {
|
||||
let mut v = rng.gen_range(0x21u8..0x7e);
|
||||
if v == 0x2c {
|
||||
v = 0x7e
|
||||
}
|
||||
v as char
|
||||
})
|
||||
.collect::<String>();
|
||||
|
||||
Self::from_nonce(password, &nonce)
|
||||
}
|
||||
|
||||
/// Used for testing.
|
||||
pub fn from_nonce(password: &str, nonce: &str) -> ScramSha256 {
|
||||
let message = BytesMut::from(&format!("{}n=,r={}", "n,,", nonce).as_bytes()[..]);
|
||||
|
||||
ScramSha256 {
|
||||
password: password.to_string(),
|
||||
nonce: String::from(nonce),
|
||||
message,
|
||||
salted_password: [0u8; 32],
|
||||
auth_message: String::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the current state of the SASL authentication.
|
||||
pub fn message(&mut self) -> BytesMut {
|
||||
self.message.clone()
|
||||
}
|
||||
|
||||
/// Update the state with message received from server.
|
||||
pub fn update(&mut self, message: &BytesMut) -> Result<BytesMut, Error> {
|
||||
let server_message = Message::parse(message)?;
|
||||
|
||||
if !server_message.nonce.starts_with(&self.nonce) {
|
||||
return Err(Error::ProtocolSyncError);
|
||||
}
|
||||
|
||||
let salt = match base64::decode(&server_message.salt) {
|
||||
Ok(salt) => salt,
|
||||
Err(_) => return Err(Error::ProtocolSyncError),
|
||||
};
|
||||
|
||||
let salted_password = Self::hi(
|
||||
&normalize(&self.password.as_bytes()[..]),
|
||||
&salt,
|
||||
server_message.iterations,
|
||||
);
|
||||
|
||||
// Save for verification of final server message.
|
||||
self.salted_password = salted_password;
|
||||
|
||||
let mut hmac = match Hmac::<Sha256>::new_from_slice(&salted_password) {
|
||||
Ok(hmac) => hmac,
|
||||
Err(_) => return Err(Error::ServerError),
|
||||
};
|
||||
|
||||
hmac.update(b"Client Key");
|
||||
|
||||
let client_key = hmac.finalize().into_bytes();
|
||||
|
||||
let mut hash = Sha256::default();
|
||||
hash.update(client_key.as_slice());
|
||||
|
||||
let stored_key = hash.finalize_fixed();
|
||||
let mut cbind_input = vec![];
|
||||
cbind_input.extend("n,,".as_bytes());
|
||||
|
||||
let cbind_input = base64::encode(&cbind_input);
|
||||
|
||||
self.message.clear();
|
||||
|
||||
// Start writing the client reply.
|
||||
match write!(
|
||||
&mut self.message,
|
||||
"c={},r={}",
|
||||
cbind_input, server_message.nonce
|
||||
) {
|
||||
Ok(_) => (),
|
||||
Err(_) => return Err(Error::ServerError),
|
||||
};
|
||||
|
||||
let auth_message = format!(
|
||||
"n=,r={},{},{}",
|
||||
self.nonce,
|
||||
String::from_utf8_lossy(&message[..]),
|
||||
String::from_utf8_lossy(&self.message[..])
|
||||
);
|
||||
|
||||
let mut hmac = match Hmac::<Sha256>::new_from_slice(&stored_key) {
|
||||
Ok(hmac) => hmac,
|
||||
Err(_) => return Err(Error::ServerError),
|
||||
};
|
||||
hmac.update(auth_message.as_bytes());
|
||||
|
||||
// Save the auth message for server final message verification.
|
||||
self.auth_message = auth_message;
|
||||
|
||||
let client_signature = hmac.finalize().into_bytes();
|
||||
|
||||
// Sign the client proof.
|
||||
let mut client_proof = client_key;
|
||||
for (proof, signature) in client_proof.iter_mut().zip(client_signature) {
|
||||
*proof ^= signature;
|
||||
}
|
||||
|
||||
match write!(&mut self.message, ",p={}", base64::encode(&*client_proof)) {
|
||||
Ok(_) => (),
|
||||
Err(_) => return Err(Error::ServerError),
|
||||
};
|
||||
|
||||
Ok(self.message.clone())
|
||||
}
|
||||
|
||||
/// Verify final server message.
|
||||
pub fn finish(&mut self, message: &BytesMut) -> Result<(), Error> {
|
||||
let final_message = FinalMessage::parse(message)?;
|
||||
|
||||
let verifier = match base64::decode(&final_message.value) {
|
||||
Ok(verifier) => verifier,
|
||||
Err(_) => return Err(Error::ProtocolSyncError),
|
||||
};
|
||||
|
||||
let mut hmac = match Hmac::<Sha256>::new_from_slice(&self.salted_password) {
|
||||
Ok(hmac) => hmac,
|
||||
Err(_) => return Err(Error::ServerError),
|
||||
};
|
||||
hmac.update(b"Server Key");
|
||||
let server_key = hmac.finalize().into_bytes();
|
||||
|
||||
let mut hmac = match Hmac::<Sha256>::new_from_slice(&server_key) {
|
||||
Ok(hmac) => hmac,
|
||||
Err(_) => return Err(Error::ServerError),
|
||||
};
|
||||
hmac.update(self.auth_message.as_bytes());
|
||||
|
||||
match hmac.verify_slice(&verifier) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(_) => return Err(Error::ServerError),
|
||||
}
|
||||
}
|
||||
|
||||
/// Hash the password with the salt i-times.
|
||||
fn hi(str: &[u8], salt: &[u8], i: u32) -> [u8; 32] {
|
||||
let mut hmac =
|
||||
Hmac::<Sha256>::new_from_slice(str).expect("HMAC is able to accept all key sizes");
|
||||
hmac.update(salt);
|
||||
hmac.update(&[0, 0, 0, 1]);
|
||||
let mut prev = hmac.finalize().into_bytes();
|
||||
|
||||
let mut hi = prev;
|
||||
|
||||
for _ in 1..i {
|
||||
let mut hmac = Hmac::<Sha256>::new_from_slice(str).expect("already checked above");
|
||||
hmac.update(&prev);
|
||||
prev = hmac.finalize().into_bytes();
|
||||
|
||||
for (hi, prev) in hi.iter_mut().zip(prev) {
|
||||
*hi ^= prev;
|
||||
}
|
||||
}
|
||||
|
||||
hi.into()
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse the server challenge.
|
||||
struct Message {
|
||||
nonce: String,
|
||||
salt: String,
|
||||
iterations: u32,
|
||||
}
|
||||
|
||||
impl Message {
|
||||
/// Parse the server SASL challenge.
|
||||
fn parse(message: &BytesMut) -> Result<Message, Error> {
|
||||
let parts = String::from_utf8_lossy(&message[..])
|
||||
.split(",")
|
||||
.map(|s| s.to_string())
|
||||
.collect::<Vec<String>>();
|
||||
|
||||
if parts.len() != 3 {
|
||||
return Err(Error::ProtocolSyncError);
|
||||
}
|
||||
|
||||
let nonce = str::replace(&parts[0], "r=", "");
|
||||
let salt = str::replace(&parts[1], "s=", "");
|
||||
let iterations = match str::replace(&parts[2], "i=", "").parse::<u32>() {
|
||||
Ok(iterations) => iterations,
|
||||
Err(_) => return Err(Error::ProtocolSyncError),
|
||||
};
|
||||
|
||||
Ok(Message {
|
||||
nonce,
|
||||
salt,
|
||||
iterations,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse server final validation message.
|
||||
struct FinalMessage {
|
||||
value: String,
|
||||
}
|
||||
|
||||
impl FinalMessage {
|
||||
/// Parse the server final validation message.
|
||||
pub fn parse(message: &BytesMut) -> Result<FinalMessage, Error> {
|
||||
if !message.starts_with(b"v=") || message.len() < 4 {
|
||||
return Err(Error::ProtocolSyncError);
|
||||
}
|
||||
|
||||
Ok(FinalMessage {
|
||||
value: String::from_utf8_lossy(&message[2..]).to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn parse_server_first_message() {
|
||||
let message = BytesMut::from(
|
||||
&"r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096".as_bytes()[..],
|
||||
);
|
||||
let message = Message::parse(&message).unwrap();
|
||||
assert_eq!(message.nonce, "fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j");
|
||||
assert_eq!(message.salt, "QSXCR+Q6sek8bf92");
|
||||
assert_eq!(message.iterations, 4096);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_server_last_message() {
|
||||
let f = FinalMessage::parse(&BytesMut::from(
|
||||
&"v=U+ppxD5XUKtradnv8e2MkeupiA8FU87Sg8CXzXHDAzw".as_bytes()[..],
|
||||
))
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
f.value,
|
||||
"U+ppxD5XUKtradnv8e2MkeupiA8FU87Sg8CXzXHDAzw".to_string()
|
||||
);
|
||||
}
|
||||
|
||||
// recorded auth exchange from psql
|
||||
#[test]
|
||||
fn exchange() {
|
||||
let password = "foobar";
|
||||
let nonce = "9IZ2O01zb9IgiIZ1WJ/zgpJB";
|
||||
|
||||
let client_first = "n,,n=,r=9IZ2O01zb9IgiIZ1WJ/zgpJB";
|
||||
let server_first =
|
||||
"r=9IZ2O01zb9IgiIZ1WJ/zgpJBjx/oIRLs02gGSHcw1KEty3eY,s=fs3IXBy7U7+IvVjZ,i\
|
||||
=4096";
|
||||
let client_final =
|
||||
"c=biws,r=9IZ2O01zb9IgiIZ1WJ/zgpJBjx/oIRLs02gGSHcw1KEty3eY,p=AmNKosjJzS3\
|
||||
1NTlQYNs5BTeQjdHdk7lOflDo5re2an8=";
|
||||
let server_final = "v=U+ppxD5XUKtradnv8e2MkeupiA8FU87Sg8CXzXHDAzw=";
|
||||
|
||||
let mut scram = ScramSha256::from_nonce(password, nonce);
|
||||
|
||||
let message = scram.message();
|
||||
assert_eq!(std::str::from_utf8(&message).unwrap(), client_first);
|
||||
|
||||
let result = scram
|
||||
.update(&BytesMut::from(&server_first.as_bytes()[..]))
|
||||
.unwrap();
|
||||
assert_eq!(std::str::from_utf8(&result).unwrap(), client_final);
|
||||
|
||||
scram
|
||||
.finish(&BytesMut::from(&server_final.as_bytes()[..]))
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
432
src/server.rs
432
src/server.rs
@@ -1,81 +1,107 @@
|
||||
#![allow(dead_code)]
|
||||
#![allow(unused_variables)]
|
||||
|
||||
///! Implementation of the PostgreSQL server (database) protocol.
|
||||
///! Here we are pretending to the a Postgres client.
|
||||
/// Implementation of the PostgreSQL server (database) protocol.
|
||||
/// Here we are pretending to the a Postgres client.
|
||||
use bytes::{Buf, BufMut, BytesMut};
|
||||
use log::{debug, error, info, trace};
|
||||
use std::time::SystemTime;
|
||||
use tokio::io::{AsyncReadExt, BufReader};
|
||||
use tokio::net::tcp::{OwnedReadHalf, OwnedWriteHalf};
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::net::{
|
||||
tcp::{OwnedReadHalf, OwnedWriteHalf},
|
||||
TcpStream,
|
||||
};
|
||||
|
||||
use crate::config::Address;
|
||||
use crate::config::{Address, User};
|
||||
use crate::constants::*;
|
||||
use crate::errors::Error;
|
||||
use crate::messages::*;
|
||||
use crate::scram::ScramSha256;
|
||||
use crate::stats::Reporter;
|
||||
use crate::ClientServerMap;
|
||||
|
||||
/// Server state.
|
||||
pub struct Server {
|
||||
// Server host, e.g. localhost
|
||||
host: String,
|
||||
/// Server host, e.g. localhost,
|
||||
/// port, e.g. 5432, and role, e.g. primary or replica.
|
||||
address: Address,
|
||||
|
||||
// Server port: e.g. 5432
|
||||
port: String,
|
||||
|
||||
// Buffered read socket
|
||||
/// Buffered read socket.
|
||||
read: BufReader<OwnedReadHalf>,
|
||||
|
||||
// Unbuffered write socket (our client code buffers)
|
||||
/// Unbuffered write socket (our client code buffers).
|
||||
write: OwnedWriteHalf,
|
||||
|
||||
// Our server response buffer
|
||||
/// Our server response buffer. We buffer data before we give it to the client.
|
||||
buffer: BytesMut,
|
||||
|
||||
// Server information the server sent us over on startup
|
||||
/// Server information the server sent us over on startup.
|
||||
server_info: BytesMut,
|
||||
|
||||
// Backend id and secret key used for query cancellation.
|
||||
backend_id: i32,
|
||||
/// Backend id and secret key used for query cancellation.
|
||||
process_id: i32,
|
||||
secret_key: i32,
|
||||
|
||||
// Is the server inside a transaction at the moment.
|
||||
/// Is the server inside a transaction or idle.
|
||||
in_transaction: bool,
|
||||
|
||||
// Is there more data for the client to read.
|
||||
/// Is there more data for the client to read.
|
||||
data_available: bool,
|
||||
|
||||
// Is the server broken? We'll remote it from the pool if so.
|
||||
/// Is the server broken? We'll remote it from the pool if so.
|
||||
bad: bool,
|
||||
|
||||
// Mapping of clients and servers used for query cancellation.
|
||||
/// Mapping of clients and servers used for query cancellation.
|
||||
client_server_map: ClientServerMap,
|
||||
|
||||
/// Server connected at.
|
||||
connected_at: chrono::naive::NaiveDateTime,
|
||||
|
||||
/// Reports various metrics, e.g. data sent & received.
|
||||
stats: Reporter,
|
||||
|
||||
/// Application name using the server at the moment.
|
||||
application_name: String,
|
||||
|
||||
// Last time that a successful server send or response happened
|
||||
last_activity: SystemTime,
|
||||
}
|
||||
|
||||
impl Server {
|
||||
/// Pretend to be the Postgres client and connect to the server given host, port and credentials.
|
||||
/// Perform the authentication and return the server in a ready-for-query mode.
|
||||
/// Perform the authentication and return the server in a ready for query state.
|
||||
pub async fn startup(
|
||||
host: &str,
|
||||
port: &str,
|
||||
user: &str,
|
||||
password: &str,
|
||||
address: &Address,
|
||||
user: &User,
|
||||
database: &str,
|
||||
client_server_map: ClientServerMap,
|
||||
stats: Reporter,
|
||||
) -> Result<Server, Error> {
|
||||
let mut stream = match TcpStream::connect(&format!("{}:{}", host, port)).await {
|
||||
Ok(stream) => stream,
|
||||
Err(err) => {
|
||||
println!(">> Could not connect to server: {}", err);
|
||||
return Err(Error::SocketError);
|
||||
}
|
||||
};
|
||||
let mut stream =
|
||||
match TcpStream::connect(&format!("{}:{}", &address.host, address.port)).await {
|
||||
Ok(stream) => stream,
|
||||
Err(err) => {
|
||||
error!("Could not connect to server: {}", err);
|
||||
return Err(Error::SocketError);
|
||||
}
|
||||
};
|
||||
|
||||
// Send the startup packet.
|
||||
startup(&mut stream, user, database).await?;
|
||||
trace!("Sending StartupMessage");
|
||||
|
||||
let mut server_info = BytesMut::with_capacity(25);
|
||||
let mut backend_id: i32 = 0;
|
||||
// StartupMessage
|
||||
startup(
|
||||
&mut stream,
|
||||
&user.username,
|
||||
database,
|
||||
address.search_path.as_ref(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut server_info = BytesMut::new();
|
||||
let mut process_id: i32 = 0;
|
||||
let mut secret_key: i32 = 0;
|
||||
|
||||
// We'll be handling multiple packets, but they will all be structured the same.
|
||||
// We'll loop here until this exchange is complete.
|
||||
let mut scram = ScramSha256::new(&user.password);
|
||||
|
||||
loop {
|
||||
let code = match stream.read_u8().await {
|
||||
Ok(code) => code as char,
|
||||
@@ -87,17 +113,23 @@ impl Server {
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
};
|
||||
|
||||
trace!("Message: {}", code);
|
||||
|
||||
match code {
|
||||
// Authentication
|
||||
'R' => {
|
||||
// Auth can proceed
|
||||
let code = match stream.read_i32().await {
|
||||
Ok(code) => code,
|
||||
// Determine which kind of authentication is required, if any.
|
||||
let auth_code = match stream.read_i32().await {
|
||||
Ok(auth_code) => auth_code,
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
};
|
||||
|
||||
match code {
|
||||
// MD5
|
||||
5 => {
|
||||
trace!("Auth: {}", auth_code);
|
||||
|
||||
match auth_code {
|
||||
MD5_ENCRYPTED_PASSWORD => {
|
||||
// The salt is 4 bytes.
|
||||
// See: https://www.postgresql.org/docs/12/protocol-message-formats.html
|
||||
let mut salt = vec![0u8; 4];
|
||||
|
||||
match stream.read_exact(&mut salt).await {
|
||||
@@ -105,66 +137,170 @@ impl Server {
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
};
|
||||
|
||||
md5_password(&mut stream, user, password, &salt[..]).await?;
|
||||
md5_password(&mut stream, &user.username, &user.password, &salt[..])
|
||||
.await?;
|
||||
}
|
||||
|
||||
// Authentication handshake complete.
|
||||
0 => (),
|
||||
AUTHENTICATION_SUCCESSFUL => (),
|
||||
|
||||
SASL => {
|
||||
debug!("Starting SASL authentication");
|
||||
let sasl_len = (len - 8) as usize;
|
||||
let mut sasl_auth = vec![0u8; sasl_len];
|
||||
|
||||
match stream.read_exact(&mut sasl_auth).await {
|
||||
Ok(_) => (),
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
};
|
||||
|
||||
let sasl_type = String::from_utf8_lossy(&sasl_auth[..sasl_len - 2]);
|
||||
|
||||
if sasl_type == SCRAM_SHA_256 {
|
||||
debug!("Using {}", SCRAM_SHA_256);
|
||||
|
||||
// Generate client message.
|
||||
let sasl_response = scram.message();
|
||||
|
||||
// SASLInitialResponse (F)
|
||||
let mut res = BytesMut::new();
|
||||
res.put_u8(b'p');
|
||||
|
||||
// length + String length + length + length of sasl response
|
||||
res.put_i32(
|
||||
4 // i32 size
|
||||
+ SCRAM_SHA_256.len() as i32 // length of SASL version string,
|
||||
+ 1 // Null terminator for the SASL version string,
|
||||
+ 4 // i32 size
|
||||
+ sasl_response.len() as i32, // length of SASL response
|
||||
);
|
||||
|
||||
res.put_slice(&format!("{}\0", SCRAM_SHA_256).as_bytes()[..]);
|
||||
res.put_i32(sasl_response.len() as i32);
|
||||
res.put(sasl_response);
|
||||
|
||||
write_all(&mut stream, res).await?;
|
||||
} else {
|
||||
error!("Unsupported SCRAM version: {}", sasl_type);
|
||||
return Err(Error::ServerError);
|
||||
}
|
||||
}
|
||||
|
||||
SASL_CONTINUE => {
|
||||
trace!("Continuing SASL");
|
||||
|
||||
let mut sasl_data = vec![0u8; (len - 8) as usize];
|
||||
|
||||
match stream.read_exact(&mut sasl_data).await {
|
||||
Ok(_) => (),
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
};
|
||||
|
||||
let msg = BytesMut::from(&sasl_data[..]);
|
||||
let sasl_response = scram.update(&msg)?;
|
||||
|
||||
// SASLResponse
|
||||
let mut res = BytesMut::new();
|
||||
res.put_u8(b'p');
|
||||
res.put_i32(4 + sasl_response.len() as i32);
|
||||
res.put(sasl_response);
|
||||
|
||||
write_all(&mut stream, res).await?;
|
||||
}
|
||||
|
||||
SASL_FINAL => {
|
||||
trace!("Final SASL");
|
||||
|
||||
let mut sasl_final = vec![0u8; len as usize - 8];
|
||||
match stream.read_exact(&mut sasl_final).await {
|
||||
Ok(_) => (),
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
};
|
||||
|
||||
match scram.finish(&BytesMut::from(&sasl_final[..])) {
|
||||
Ok(_) => {
|
||||
debug!("SASL authentication successful");
|
||||
}
|
||||
|
||||
Err(err) => {
|
||||
debug!("SASL authentication failed");
|
||||
return Err(err);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
_ => {
|
||||
println!(">> Unsupported authentication mechanism: {}", code);
|
||||
error!("Unsupported authentication mechanism: {}", auth_code);
|
||||
return Err(Error::ServerError);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorResponse
|
||||
'E' => {
|
||||
let error_code = match stream.read_u8().await {
|
||||
Ok(error_code) => error_code,
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
};
|
||||
|
||||
trace!("Error: {}", error_code);
|
||||
|
||||
match error_code {
|
||||
0 => (), // Terminator
|
||||
// No error message is present in the message.
|
||||
MESSAGE_TERMINATOR => (),
|
||||
|
||||
// An error message will be present.
|
||||
_ => {
|
||||
// Read the error message without the terminating null character.
|
||||
let mut error = vec![0u8; len as usize - 4 - 1];
|
||||
|
||||
match stream.read_exact(&mut error).await {
|
||||
Ok(_) => (),
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
};
|
||||
|
||||
println!(">> Server error: {}", String::from_utf8_lossy(&error));
|
||||
// TODO: the error message contains multiple fields; we can decode them and
|
||||
// present a prettier message to the user.
|
||||
// See: https://www.postgresql.org/docs/12/protocol-error-fields.html
|
||||
error!("Server error: {}", String::from_utf8_lossy(&error));
|
||||
}
|
||||
};
|
||||
|
||||
return Err(Error::ServerError);
|
||||
}
|
||||
|
||||
// ParameterStatus
|
||||
'S' => {
|
||||
// Parameter
|
||||
let mut param = vec![0u8; len as usize - 4];
|
||||
|
||||
match stream.read_exact(&mut param).await {
|
||||
Ok(_) => (),
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
};
|
||||
|
||||
// Save the parameter so we can pass it to the client later.
|
||||
// These can be server_encoding, client_encoding, server timezone, Postgres version,
|
||||
// and many more interesting things we should know about the Postgres server we are talking to.
|
||||
server_info.put_u8(b'S');
|
||||
server_info.put_i32(len);
|
||||
server_info.put_slice(¶m[..]);
|
||||
}
|
||||
|
||||
// BackendKeyData
|
||||
'K' => {
|
||||
// Query cancellation data.
|
||||
backend_id = match stream.read_i32().await {
|
||||
// The frontend must save these values if it wishes to be able to issue CancelRequest messages later.
|
||||
// See: <https://www.postgresql.org/docs/12/protocol-message-formats.html>.
|
||||
process_id = match stream.read_i32().await {
|
||||
Ok(id) => id,
|
||||
Err(err) => return Err(Error::SocketError),
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
};
|
||||
|
||||
secret_key = match stream.read_i32().await {
|
||||
Ok(id) => id,
|
||||
Err(err) => return Err(Error::SocketError),
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
};
|
||||
}
|
||||
|
||||
// ReadyForQuery
|
||||
'Z' => {
|
||||
let mut idle = vec![0u8; len as usize - 4];
|
||||
|
||||
@@ -173,71 +309,88 @@ impl Server {
|
||||
Err(_) => return Err(Error::SocketError),
|
||||
};
|
||||
|
||||
// Startup finished
|
||||
let (read, write) = stream.into_split();
|
||||
|
||||
return Ok(Server {
|
||||
host: host.to_string(),
|
||||
port: port.to_string(),
|
||||
let mut server = Server {
|
||||
address: address.clone(),
|
||||
read: BufReader::new(read),
|
||||
write: write,
|
||||
buffer: BytesMut::with_capacity(8196),
|
||||
server_info: server_info,
|
||||
backend_id: backend_id,
|
||||
process_id: process_id,
|
||||
secret_key: secret_key,
|
||||
in_transaction: false,
|
||||
data_available: false,
|
||||
bad: false,
|
||||
client_server_map: client_server_map,
|
||||
});
|
||||
connected_at: chrono::offset::Utc::now().naive_utc(),
|
||||
stats: stats,
|
||||
application_name: String::new(),
|
||||
last_activity: SystemTime::now(),
|
||||
};
|
||||
|
||||
server.set_name("pgcat").await?;
|
||||
|
||||
return Ok(server);
|
||||
}
|
||||
|
||||
// We have an unexpected message from the server during this exchange.
|
||||
// Means we implemented the protocol wrong or we're not talking to a Postgres server.
|
||||
_ => {
|
||||
println!(">> Unknown code: {}", code);
|
||||
error!("Unknown code: {}", code);
|
||||
return Err(Error::ProtocolSyncError);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// Issue a cancellation request to the server.
|
||||
/// Issue a query cancellation request to the server.
|
||||
/// Uses a separate connection that's not part of the connection pool.
|
||||
pub async fn cancel(
|
||||
host: &str,
|
||||
port: &str,
|
||||
port: u16,
|
||||
process_id: i32,
|
||||
secret_key: i32,
|
||||
) -> Result<(), Error> {
|
||||
let mut stream = match TcpStream::connect(&format!("{}:{}", host, port)).await {
|
||||
Ok(stream) => stream,
|
||||
Err(err) => {
|
||||
println!(">> Could not connect to server: {}", err);
|
||||
error!("Could not connect to server: {}", err);
|
||||
return Err(Error::SocketError);
|
||||
}
|
||||
};
|
||||
|
||||
debug!("Sending CancelRequest");
|
||||
|
||||
let mut bytes = BytesMut::with_capacity(16);
|
||||
bytes.put_i32(16);
|
||||
bytes.put_i32(80877102);
|
||||
bytes.put_i32(CANCEL_REQUEST_CODE);
|
||||
bytes.put_i32(process_id);
|
||||
bytes.put_i32(secret_key);
|
||||
|
||||
Ok(write_all(&mut stream, bytes).await?)
|
||||
}
|
||||
|
||||
/// Send data to the server from the client.
|
||||
/// Send messages to the server from the client.
|
||||
pub async fn send(&mut self, messages: BytesMut) -> Result<(), Error> {
|
||||
self.stats
|
||||
.data_sent(messages.len(), self.process_id, self.address.id);
|
||||
|
||||
match write_all_half(&mut self.write, messages).await {
|
||||
Ok(_) => Ok(()),
|
||||
Ok(_) => {
|
||||
// Successfully sent to server
|
||||
self.last_activity = SystemTime::now();
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => {
|
||||
println!(">> Terminating server because of: {:?}", err);
|
||||
error!("Terminating server because of: {:?}", err);
|
||||
self.bad = true;
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Receive data from the server in response to a client request sent previously.
|
||||
/// Receive data from the server in response to a client request.
|
||||
/// This method must be called multiple times while `self.is_data_available()` is true
|
||||
/// in order to receive all data the server has to offer.
|
||||
pub async fn recv(&mut self) -> Result<BytesMut, Error> {
|
||||
@@ -245,86 +398,101 @@ impl Server {
|
||||
let mut message = match read_message(&mut self.read).await {
|
||||
Ok(message) => message,
|
||||
Err(err) => {
|
||||
println!(">> Terminating server because of: {:?}", err);
|
||||
error!("Terminating server because of: {:?}", err);
|
||||
self.bad = true;
|
||||
return Err(err);
|
||||
}
|
||||
};
|
||||
|
||||
// Buffer the message we'll forward to the client in a bit.
|
||||
// Buffer the message we'll forward to the client later.
|
||||
self.buffer.put(&message[..]);
|
||||
|
||||
let code = message.get_u8() as char;
|
||||
let _len = message.get_i32();
|
||||
|
||||
trace!("Message: {}", code);
|
||||
|
||||
match code {
|
||||
// ReadyForQuery
|
||||
'Z' => {
|
||||
// Ready for query, time to forward buffer to client.
|
||||
let transaction_state = message.get_u8() as char;
|
||||
|
||||
match transaction_state {
|
||||
// In transaction.
|
||||
'T' => {
|
||||
self.in_transaction = true;
|
||||
}
|
||||
|
||||
// Idle, transaction over.
|
||||
'I' => {
|
||||
self.in_transaction = false;
|
||||
}
|
||||
|
||||
// Error client didn't clean up!
|
||||
// We shuold drop this server
|
||||
// Some error occurred, the transaction was rolled back.
|
||||
'E' => {
|
||||
self.in_transaction = true;
|
||||
}
|
||||
|
||||
// Something totally unexpected, this is not a Postgres server we know.
|
||||
_ => {
|
||||
self.bad = true;
|
||||
return Err(Error::ProtocolSyncError);
|
||||
}
|
||||
};
|
||||
|
||||
// There is no more data available from the server.
|
||||
self.data_available = false;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
// DataRow
|
||||
'D' => {
|
||||
// More data is available after this message, this is not the end of the reply.
|
||||
self.data_available = true;
|
||||
|
||||
// Don't flush yet, the more we buffer, the faster this goes.
|
||||
// Up to a limit of course.
|
||||
// Don't flush yet, the more we buffer, the faster this goes...up to a limit.
|
||||
if self.buffer.len() >= 8196 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// CopyInResponse: copy is starting from client to server
|
||||
// CopyInResponse: copy is starting from client to server.
|
||||
'G' => break,
|
||||
|
||||
// CopyOutResponse: copy is starting from the server to the client
|
||||
// CopyOutResponse: copy is starting from the server to the client.
|
||||
'H' => {
|
||||
self.data_available = true;
|
||||
break;
|
||||
}
|
||||
|
||||
// CopyData
|
||||
// CopyData: we are not buffering this one because there will be many more
|
||||
// and we don't know how big this packet could be, best not to take a risk.
|
||||
'd' => break,
|
||||
|
||||
// CopyDone
|
||||
'c' => {
|
||||
self.data_available = false;
|
||||
// Buffer until ReadyForQuery shows up
|
||||
}
|
||||
// Buffer until ReadyForQuery shows up, so don't exit the loop yet.
|
||||
'c' => (),
|
||||
|
||||
_ => {
|
||||
// Keep buffering,
|
||||
}
|
||||
// Anything else, e.g. errors, notices, etc.
|
||||
// Keep buffering until ReadyForQuery shows up.
|
||||
_ => (),
|
||||
};
|
||||
}
|
||||
|
||||
let bytes = self.buffer.clone();
|
||||
|
||||
// Keep track of how much data we got from the server for stats.
|
||||
self.stats
|
||||
.data_received(bytes.len(), self.process_id, self.address.id);
|
||||
|
||||
// Clear the buffer for next query.
|
||||
self.buffer.clear();
|
||||
|
||||
// Successfully received data from server
|
||||
self.last_activity = SystemTime::now();
|
||||
|
||||
// Pass the data back to the client.
|
||||
Ok(bytes)
|
||||
}
|
||||
|
||||
@@ -354,42 +522,35 @@ impl Server {
|
||||
|
||||
/// Indicate that this server connection cannot be re-used and must be discarded.
|
||||
pub fn mark_bad(&mut self) {
|
||||
println!(">> Server marked bad");
|
||||
error!("Server {:?} marked bad", self.address);
|
||||
self.bad = true;
|
||||
}
|
||||
|
||||
/// Claim this server as mine for the purposes of query cancellation.
|
||||
pub fn claim(&mut self, process_id: i32, secret_key: i32) {
|
||||
let mut guard = self.client_server_map.lock().unwrap();
|
||||
let mut guard = self.client_server_map.lock();
|
||||
guard.insert(
|
||||
(process_id, secret_key),
|
||||
(
|
||||
self.backend_id,
|
||||
self.process_id,
|
||||
self.secret_key,
|
||||
self.host.clone(),
|
||||
self.port.clone(),
|
||||
self.address.host.clone(),
|
||||
self.address.port,
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
/// Execute an arbitrary query against the server.
|
||||
/// It will use the Simple query protocol.
|
||||
/// It will use the simple query protocol.
|
||||
/// Result will not be returned, so this is useful for things like `SET` or `ROLLBACK`.
|
||||
pub async fn query(&mut self, query: &str) -> Result<(), Error> {
|
||||
let mut query = BytesMut::from(&query.as_bytes()[..]);
|
||||
query.put_u8(0);
|
||||
let query = simple_query(query);
|
||||
|
||||
let len = query.len() as i32 + 4;
|
||||
self.send(query).await?;
|
||||
|
||||
let mut msg = BytesMut::with_capacity(len as usize + 1);
|
||||
|
||||
msg.put_u8(b'Q');
|
||||
msg.put_i32(len);
|
||||
msg.put_slice(&query[..]);
|
||||
|
||||
self.send(msg).await?;
|
||||
loop {
|
||||
let _ = self.recv().await?;
|
||||
|
||||
if !self.data_available {
|
||||
break;
|
||||
}
|
||||
@@ -399,16 +560,61 @@ impl Server {
|
||||
}
|
||||
|
||||
/// A shorthand for `SET application_name = $1`.
|
||||
#[allow(dead_code)]
|
||||
pub async fn set_name(&mut self, name: &str) -> Result<(), Error> {
|
||||
Ok(self
|
||||
.query(&format!("SET application_name = '{}'", name))
|
||||
.await?)
|
||||
}
|
||||
|
||||
pub fn address(&self) -> Address {
|
||||
Address {
|
||||
host: self.host.to_string(),
|
||||
port: self.port.to_string(),
|
||||
if self.application_name != name {
|
||||
self.application_name = name.to_string();
|
||||
Ok(self
|
||||
.query(&format!("SET application_name = '{}'", name))
|
||||
.await?)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the servers address.
|
||||
#[allow(dead_code)]
|
||||
pub fn address(&self) -> Address {
|
||||
self.address.clone()
|
||||
}
|
||||
|
||||
/// Get the server's unique identifier.
|
||||
pub fn process_id(&self) -> i32 {
|
||||
self.process_id
|
||||
}
|
||||
|
||||
// Get server's latest response timestamp
|
||||
pub fn last_activity(&self) -> SystemTime {
|
||||
self.last_activity
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Server {
|
||||
/// Try to do a clean shut down. Best effort because
|
||||
/// the socket is in non-blocking mode, so it may not be ready
|
||||
/// for a write.
|
||||
fn drop(&mut self) {
|
||||
self.stats
|
||||
.server_disconnecting(self.process_id(), self.address.id);
|
||||
|
||||
let mut bytes = BytesMut::with_capacity(4);
|
||||
bytes.put_u8(b'X');
|
||||
bytes.put_i32(4);
|
||||
|
||||
match self.write.try_write(&bytes) {
|
||||
Ok(_) => (),
|
||||
Err(_) => debug!("Dirty shutdown"),
|
||||
};
|
||||
|
||||
// Should not matter.
|
||||
self.bad = true;
|
||||
|
||||
let now = chrono::offset::Utc::now().naive_utc();
|
||||
let duration = now - self.connected_at;
|
||||
|
||||
info!(
|
||||
"Server connection closed, session duration: {}",
|
||||
crate::format_duration(&duration)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,24 +1,68 @@
|
||||
// https://github.com/postgres/postgres/blob/27b77ecf9f4d5be211900eda54d8155ada50d696/src/include/catalog/partition.h#L20
|
||||
/// Implements various sharding functions.
|
||||
use sha1::{Digest, Sha1};
|
||||
|
||||
/// See: <https://github.com/postgres/postgres/blob/27b77ecf9f4d5be211900eda54d8155ada50d696/src/include/catalog/partition.h#L20>.
|
||||
const PARTITION_HASH_SEED: u64 = 0x7A5B22367996DCFD;
|
||||
|
||||
/// The sharding functions we support.
|
||||
#[derive(Debug, PartialEq, Copy, Clone)]
|
||||
pub enum ShardingFunction {
|
||||
PgBigintHash,
|
||||
Sha1,
|
||||
}
|
||||
|
||||
/// The sharder.
|
||||
pub struct Sharder {
|
||||
/// Number of shards in the cluster.
|
||||
shards: usize,
|
||||
|
||||
/// The sharding function in use.
|
||||
sharding_function: ShardingFunction,
|
||||
}
|
||||
|
||||
impl Sharder {
|
||||
pub fn new(shards: usize) -> Sharder {
|
||||
Sharder { shards: shards }
|
||||
/// Create new instance of the sharder.
|
||||
pub fn new(shards: usize, sharding_function: ShardingFunction) -> Sharder {
|
||||
Sharder {
|
||||
shards,
|
||||
sharding_function,
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the shard given sharding key.
|
||||
pub fn shard(&self, key: i64) -> usize {
|
||||
match self.sharding_function {
|
||||
ShardingFunction::PgBigintHash => self.pg_bigint_hash(key),
|
||||
ShardingFunction::Sha1 => self.sha1(key),
|
||||
}
|
||||
}
|
||||
|
||||
/// Hash function used by Postgres to determine which partition
|
||||
/// to put the row in when using HASH(column) partitioning.
|
||||
/// Source: https://github.com/postgres/postgres/blob/27b77ecf9f4d5be211900eda54d8155ada50d696/src/common/hashfn.c#L631
|
||||
/// Source: <https://github.com/postgres/postgres/blob/27b77ecf9f4d5be211900eda54d8155ada50d696/src/common/hashfn.c#L631>.
|
||||
/// Supports only 1 bigint at the moment, but we can add more later.
|
||||
pub fn pg_bigint_hash(&self, key: i64) -> usize {
|
||||
fn pg_bigint_hash(&self, key: i64) -> usize {
|
||||
let mut lohalf = key as u32;
|
||||
let hihalf = (key >> 32) as u32;
|
||||
lohalf ^= if key >= 0 { hihalf } else { !hihalf };
|
||||
Self::combine(0, Self::pg_u32_hash(lohalf)) as usize % self.shards as usize
|
||||
Self::combine(0, Self::pg_u32_hash(lohalf)) as usize % self.shards
|
||||
}
|
||||
|
||||
/// Example of a hashing function based on SHA1.
|
||||
fn sha1(&self, key: i64) -> usize {
|
||||
let mut hasher = Sha1::new();
|
||||
|
||||
hasher.update(&key.to_string().as_bytes());
|
||||
|
||||
let result = hasher.finalize();
|
||||
|
||||
// Convert the SHA1 hash into hex so we can parse it as a large integer.
|
||||
let hex = format!("{:x}", result);
|
||||
|
||||
// Parse the last 8 bytes as an integer (8 bytes = bigint).
|
||||
let key = i64::from_str_radix(&hex[hex.len() - 8..], 16).unwrap() as usize;
|
||||
|
||||
key % self.shards
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@@ -83,6 +127,7 @@ impl Sharder {
|
||||
a
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn pg_u32_hash(k: u32) -> u64 {
|
||||
let mut a: u32 = 0x9e3779b9 as u32 + std::mem::size_of::<u32>() as u32 + 3923095 as u32;
|
||||
let mut b = a;
|
||||
@@ -109,36 +154,51 @@ mod test {
|
||||
// confirming that we implemented Postgres BIGINT hashing correctly.
|
||||
#[test]
|
||||
fn test_pg_bigint_hash() {
|
||||
let sharder = Sharder::new(5);
|
||||
let sharder = Sharder::new(5, ShardingFunction::PgBigintHash);
|
||||
|
||||
let shard_0 = vec![1, 4, 5, 14, 19, 39, 40, 46, 47, 53];
|
||||
|
||||
for v in shard_0 {
|
||||
assert_eq!(sharder.pg_bigint_hash(v), 0);
|
||||
assert_eq!(sharder.shard(v), 0);
|
||||
}
|
||||
|
||||
let shard_1 = vec![2, 3, 11, 17, 21, 23, 30, 49, 51, 54];
|
||||
|
||||
for v in shard_1 {
|
||||
assert_eq!(sharder.pg_bigint_hash(v), 1);
|
||||
assert_eq!(sharder.shard(v), 1);
|
||||
}
|
||||
|
||||
let shard_2 = vec![6, 7, 15, 16, 18, 20, 25, 28, 34, 35];
|
||||
|
||||
for v in shard_2 {
|
||||
assert_eq!(sharder.pg_bigint_hash(v), 2);
|
||||
assert_eq!(sharder.shard(v), 2);
|
||||
}
|
||||
|
||||
let shard_3 = vec![8, 12, 13, 22, 29, 31, 33, 36, 41, 43];
|
||||
|
||||
for v in shard_3 {
|
||||
assert_eq!(sharder.pg_bigint_hash(v), 3);
|
||||
assert_eq!(sharder.shard(v), 3);
|
||||
}
|
||||
|
||||
let shard_4 = vec![9, 10, 24, 26, 27, 32, 37, 38, 42, 45];
|
||||
|
||||
for v in shard_4 {
|
||||
assert_eq!(sharder.pg_bigint_hash(v), 4);
|
||||
assert_eq!(sharder.shard(v), 4);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sha1_hash() {
|
||||
let sharder = Sharder::new(12, ShardingFunction::Sha1);
|
||||
let ids = vec![
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
||||
];
|
||||
let shards = vec![
|
||||
4, 7, 8, 3, 6, 0, 0, 10, 3, 11, 1, 7, 4, 4, 11, 2, 5, 0, 8, 3,
|
||||
];
|
||||
|
||||
for (i, id) in ids.iter().enumerate() {
|
||||
assert_eq!(sharder.shard(*id), shards[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
564
src/stats.rs
Normal file
564
src/stats.rs
Normal file
@@ -0,0 +1,564 @@
|
||||
use arc_swap::ArcSwap;
|
||||
/// Statistics and reporting.
|
||||
use log::{error, info, trace};
|
||||
use once_cell::sync::Lazy;
|
||||
use parking_lot::Mutex;
|
||||
use std::collections::HashMap;
|
||||
use tokio::sync::mpsc::error::TrySendError;
|
||||
use tokio::sync::mpsc::{channel, Receiver, Sender};
|
||||
|
||||
use crate::pool::get_number_of_addresses;
|
||||
|
||||
pub static REPORTER: Lazy<ArcSwap<Reporter>> =
|
||||
Lazy::new(|| ArcSwap::from_pointee(Reporter::default()));
|
||||
|
||||
/// Latest stats updated every second; used in SHOW STATS and other admin commands.
|
||||
static LATEST_STATS: Lazy<Mutex<HashMap<usize, HashMap<String, i64>>>> =
|
||||
Lazy::new(|| Mutex::new(HashMap::new()));
|
||||
|
||||
/// Statistics period used for average calculations.
|
||||
/// 15 seconds.
|
||||
static STAT_PERIOD: u64 = 15000;
|
||||
|
||||
/// The names for the events reported
|
||||
/// to the statistics collector.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
enum EventName {
|
||||
CheckoutTime,
|
||||
Query,
|
||||
Transaction,
|
||||
DataSent,
|
||||
DataReceived,
|
||||
ClientWaiting,
|
||||
ClientActive,
|
||||
ClientIdle,
|
||||
ClientDisconnecting,
|
||||
ServerActive,
|
||||
ServerIdle,
|
||||
ServerTested,
|
||||
ServerLogin,
|
||||
ServerDisconnecting,
|
||||
UpdateStats,
|
||||
UpdateAverages,
|
||||
}
|
||||
|
||||
/// Event data sent to the collector
|
||||
/// from clients and servers.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Event {
|
||||
/// The name of the event being reported.
|
||||
name: EventName,
|
||||
|
||||
/// The value being reported. Meaning differs based on event name.
|
||||
value: i64,
|
||||
|
||||
/// The client or server connection reporting the event.
|
||||
process_id: i32,
|
||||
|
||||
/// The server the client is connected to.
|
||||
address_id: usize,
|
||||
}
|
||||
|
||||
/// The statistics reporter. An instance is given
|
||||
/// to each possible source of statistics,
|
||||
/// e.g. clients, servers, connection pool.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Reporter {
|
||||
tx: Sender<Event>,
|
||||
}
|
||||
|
||||
impl Default for Reporter {
|
||||
fn default() -> Reporter {
|
||||
let (tx, _rx) = channel(5);
|
||||
Reporter { tx }
|
||||
}
|
||||
}
|
||||
|
||||
impl Reporter {
|
||||
/// Create a new Reporter instance.
|
||||
pub fn new(tx: Sender<Event>) -> Reporter {
|
||||
Reporter { tx: tx }
|
||||
}
|
||||
|
||||
/// Send statistics to the task keeping track of stats.
|
||||
fn send(&self, event: Event) {
|
||||
let name = event.name;
|
||||
let result = self.tx.try_send(event);
|
||||
|
||||
match result {
|
||||
Ok(_) => trace!(
|
||||
"{:?} event reported successfully, capacity: {}",
|
||||
name,
|
||||
self.tx.capacity()
|
||||
),
|
||||
|
||||
Err(err) => match err {
|
||||
TrySendError::Full { .. } => error!("{:?} event dropped, buffer full", name),
|
||||
TrySendError::Closed { .. } => error!("{:?} event dropped, channel closed", name),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/// Report a query executed by a client against
|
||||
/// a server identified by the `address_id`.
|
||||
pub fn query(&self, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::Query,
|
||||
value: 1,
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
self.send(event);
|
||||
}
|
||||
|
||||
/// Report a transaction executed by a client against
|
||||
/// a server identified by the `address_id`.
|
||||
pub fn transaction(&self, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::Transaction,
|
||||
value: 1,
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
/// Report data sent to a server identified by `address_id`.
|
||||
/// The `amount` is measured in bytes.
|
||||
pub fn data_sent(&self, amount: usize, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::DataSent,
|
||||
value: amount as i64,
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
/// Report data received from a server identified by `address_id`.
|
||||
/// The `amount` is measured in bytes.
|
||||
pub fn data_received(&self, amount: usize, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::DataReceived,
|
||||
value: amount as i64,
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
/// Time spent waiting to get a healthy connection from the pool
|
||||
/// for a server identified by `address_id`.
|
||||
/// Measured in milliseconds.
|
||||
pub fn checkout_time(&self, ms: u128, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::CheckoutTime,
|
||||
value: ms as i64,
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
/// Reports a client identified by `process_id` waiting for a connection
|
||||
/// to a server identified by `address_id`.
|
||||
pub fn client_waiting(&self, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::ClientWaiting,
|
||||
value: 1,
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
/// Reports a client identified by `process_id` is done waiting for a connection
|
||||
/// to a server identified by `address_id` and is about to query the server.
|
||||
pub fn client_active(&self, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::ClientActive,
|
||||
value: 1,
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
/// Reports a client identified by `process_id` is done querying the server
|
||||
/// identified by `address_id` and is no longer active.
|
||||
pub fn client_idle(&self, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::ClientIdle,
|
||||
value: 1,
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
/// Reports a client identified by `process_id` is disconecting from the pooler.
|
||||
/// The last server it was connected to is identified by `address_id`.
|
||||
pub fn client_disconnecting(&self, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::ClientDisconnecting,
|
||||
value: 1,
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
/// Reports a server connection identified by `process_id` for
|
||||
/// a configured server identified by `address_id` is actively used
|
||||
/// by a client.
|
||||
pub fn server_active(&self, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::ServerActive,
|
||||
value: 1,
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
/// Reports a server connection identified by `process_id` for
|
||||
/// a configured server identified by `address_id` is no longer
|
||||
/// actively used by a client and is now idle.
|
||||
pub fn server_idle(&self, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::ServerIdle,
|
||||
value: 1,
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
/// Reports a server connection identified by `process_id` for
|
||||
/// a configured server identified by `address_id` is attempting
|
||||
/// to login.
|
||||
pub fn server_login(&self, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::ServerLogin,
|
||||
value: 1,
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
/// Reports a server connection identified by `process_id` for
|
||||
/// a configured server identified by `address_id` is being
|
||||
/// tested before being given to a client.
|
||||
pub fn server_tested(&self, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::ServerTested,
|
||||
value: 1,
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
self.send(event)
|
||||
}
|
||||
|
||||
/// Reports a server connection identified by `process_id` is disconecting from the pooler.
|
||||
/// The configured server it was connected to is identified by `address_id`.
|
||||
pub fn server_disconnecting(&self, process_id: i32, address_id: usize) {
|
||||
let event = Event {
|
||||
name: EventName::ServerDisconnecting,
|
||||
value: 1,
|
||||
process_id: process_id,
|
||||
address_id: address_id,
|
||||
};
|
||||
|
||||
self.send(event)
|
||||
}
|
||||
}
|
||||
|
||||
/// The statistics collector which is receiving statistics
|
||||
/// from clients, servers, and the connection pool. There is
|
||||
/// only one collector (kind of like a singleton).
|
||||
/// The collector can trigger events on its own, e.g.
|
||||
/// it updates aggregates every second and averages every
|
||||
/// 15 seconds.
|
||||
pub struct Collector {
|
||||
rx: Receiver<Event>,
|
||||
tx: Sender<Event>,
|
||||
}
|
||||
|
||||
impl Collector {
|
||||
/// Create a new collector instance. There should only be one instance
|
||||
/// at a time. This is ensured by mpsc which allows only one receiver.
|
||||
pub fn new(rx: Receiver<Event>, tx: Sender<Event>) -> Collector {
|
||||
Collector { rx, tx }
|
||||
}
|
||||
|
||||
/// The statistics collection handler. It will collect statistics
|
||||
/// for `address_id`s starting at 0 up to `addresses`.
|
||||
pub async fn collect(&mut self) {
|
||||
info!("Events reporter started");
|
||||
|
||||
let stats_template = HashMap::from([
|
||||
("total_query_count", 0),
|
||||
("total_query_time", 0),
|
||||
("total_received", 0),
|
||||
("total_sent", 0),
|
||||
("total_xact_count", 0),
|
||||
("total_xact_time", 0),
|
||||
("total_wait_time", 0),
|
||||
("avg_query_count", 0),
|
||||
("avg_query_time", 0),
|
||||
("avg_recv", 0),
|
||||
("avg_sent", 0),
|
||||
("avg_xact_count", 0),
|
||||
("avg_xact_time", 0),
|
||||
("avg_wait_time", 0),
|
||||
("maxwait_us", 0),
|
||||
("maxwait", 0),
|
||||
("cl_waiting", 0),
|
||||
("cl_active", 0),
|
||||
("cl_idle", 0),
|
||||
("sv_idle", 0),
|
||||
("sv_active", 0),
|
||||
("sv_login", 0),
|
||||
("sv_tested", 0),
|
||||
]);
|
||||
|
||||
let mut stats = HashMap::new();
|
||||
|
||||
// Stats saved after each iteration of the flush event. Used in calculation
|
||||
// of averages in the last flush period.
|
||||
let mut old_stats: HashMap<usize, HashMap<String, i64>> = HashMap::new();
|
||||
|
||||
// Track which state the client and server are at any given time.
|
||||
let mut client_server_states: HashMap<usize, HashMap<i32, EventName>> = HashMap::new();
|
||||
|
||||
// Flush stats to StatsD and calculate averages every 15 seconds.
|
||||
let tx = self.tx.clone();
|
||||
tokio::task::spawn(async move {
|
||||
let mut interval =
|
||||
tokio::time::interval(tokio::time::Duration::from_millis(STAT_PERIOD / 15));
|
||||
loop {
|
||||
interval.tick().await;
|
||||
let address_count = get_number_of_addresses();
|
||||
for address_id in 0..address_count {
|
||||
let _ = tx.try_send(Event {
|
||||
name: EventName::UpdateStats,
|
||||
value: 0,
|
||||
process_id: -1,
|
||||
address_id: address_id,
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let tx = self.tx.clone();
|
||||
tokio::task::spawn(async move {
|
||||
let mut interval =
|
||||
tokio::time::interval(tokio::time::Duration::from_millis(STAT_PERIOD));
|
||||
loop {
|
||||
interval.tick().await;
|
||||
let address_count = get_number_of_addresses();
|
||||
for address_id in 0..address_count {
|
||||
let _ = tx.try_send(Event {
|
||||
name: EventName::UpdateAverages,
|
||||
value: 0,
|
||||
process_id: -1,
|
||||
address_id: address_id,
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// The collector loop
|
||||
loop {
|
||||
let stat = match self.rx.recv().await {
|
||||
Some(stat) => stat,
|
||||
None => {
|
||||
info!("Events collector is shutting down");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let stats = stats
|
||||
.entry(stat.address_id)
|
||||
.or_insert(stats_template.clone());
|
||||
let client_server_states = client_server_states
|
||||
.entry(stat.address_id)
|
||||
.or_insert(HashMap::new());
|
||||
let old_stats = old_stats.entry(stat.address_id).or_insert(HashMap::new());
|
||||
|
||||
// Some are counters, some are gauges...
|
||||
match stat.name {
|
||||
EventName::Query => {
|
||||
let counter = stats.entry("total_query_count").or_insert(0);
|
||||
*counter += stat.value;
|
||||
}
|
||||
|
||||
EventName::Transaction => {
|
||||
let counter = stats.entry("total_xact_count").or_insert(0);
|
||||
*counter += stat.value;
|
||||
}
|
||||
|
||||
EventName::DataSent => {
|
||||
let counter = stats.entry("total_sent").or_insert(0);
|
||||
*counter += stat.value;
|
||||
}
|
||||
|
||||
EventName::DataReceived => {
|
||||
let counter = stats.entry("total_received").or_insert(0);
|
||||
*counter += stat.value;
|
||||
}
|
||||
|
||||
EventName::CheckoutTime => {
|
||||
let counter = stats.entry("total_wait_time").or_insert(0);
|
||||
*counter += stat.value;
|
||||
|
||||
let counter = stats.entry("maxwait_us").or_insert(0);
|
||||
let mic_part = stat.value % 1_000_000;
|
||||
|
||||
// Report max time here
|
||||
if mic_part > *counter {
|
||||
*counter = mic_part;
|
||||
}
|
||||
|
||||
let counter = stats.entry("maxwait").or_insert(0);
|
||||
let seconds = *counter / 1_000_000;
|
||||
|
||||
if seconds > *counter {
|
||||
*counter = seconds;
|
||||
}
|
||||
}
|
||||
|
||||
EventName::ClientActive
|
||||
| EventName::ClientWaiting
|
||||
| EventName::ClientIdle
|
||||
| EventName::ServerActive
|
||||
| EventName::ServerIdle
|
||||
| EventName::ServerTested
|
||||
| EventName::ServerLogin => {
|
||||
client_server_states.insert(stat.process_id, stat.name);
|
||||
}
|
||||
|
||||
EventName::ClientDisconnecting | EventName::ServerDisconnecting => {
|
||||
client_server_states.remove(&stat.process_id);
|
||||
}
|
||||
|
||||
EventName::UpdateStats => {
|
||||
// Calculate connection states
|
||||
for (_, state) in client_server_states.iter() {
|
||||
match state {
|
||||
EventName::ClientActive => {
|
||||
let counter = stats.entry("cl_active").or_insert(0);
|
||||
*counter += 1;
|
||||
}
|
||||
|
||||
EventName::ClientWaiting => {
|
||||
let counter = stats.entry("cl_waiting").or_insert(0);
|
||||
*counter += 1;
|
||||
}
|
||||
|
||||
EventName::ServerIdle => {
|
||||
let counter = stats.entry("sv_idle").or_insert(0);
|
||||
*counter += 1;
|
||||
}
|
||||
|
||||
EventName::ServerActive => {
|
||||
let counter = stats.entry("sv_active").or_insert(0);
|
||||
*counter += 1;
|
||||
}
|
||||
|
||||
EventName::ServerTested => {
|
||||
let counter = stats.entry("sv_tested").or_insert(0);
|
||||
*counter += 1;
|
||||
}
|
||||
|
||||
EventName::ServerLogin => {
|
||||
let counter = stats.entry("sv_login").or_insert(0);
|
||||
*counter += 1;
|
||||
}
|
||||
|
||||
EventName::ClientIdle => {
|
||||
let counter = stats.entry("cl_idle").or_insert(0);
|
||||
*counter += 1;
|
||||
}
|
||||
|
||||
_ => unreachable!(),
|
||||
};
|
||||
}
|
||||
|
||||
// Update latest stats used in SHOW STATS
|
||||
let mut guard = LATEST_STATS.lock();
|
||||
for (key, value) in stats.iter() {
|
||||
let entry = guard.entry(stat.address_id).or_insert(HashMap::new());
|
||||
entry.insert(key.to_string(), value.clone());
|
||||
}
|
||||
|
||||
// These are re-calculated every iteration of the loop, so we don't want to add values
|
||||
// from the last iteration.
|
||||
for stat in &[
|
||||
"cl_active",
|
||||
"cl_waiting",
|
||||
"cl_idle",
|
||||
"sv_idle",
|
||||
"sv_active",
|
||||
"sv_tested",
|
||||
"sv_login",
|
||||
"maxwait",
|
||||
"maxwait_us",
|
||||
] {
|
||||
stats.insert(stat, 0);
|
||||
}
|
||||
}
|
||||
|
||||
EventName::UpdateAverages => {
|
||||
// Calculate averages
|
||||
for stat in &[
|
||||
"avg_query_count",
|
||||
"avg_query_time",
|
||||
"avg_recv",
|
||||
"avg_sent",
|
||||
"avg_xact_time",
|
||||
"avg_xact_count",
|
||||
"avg_wait_time",
|
||||
] {
|
||||
let total_name = match stat {
|
||||
&"avg_recv" => "total_received".to_string(), // Because PgBouncer is saving bytes
|
||||
_ => stat.replace("avg_", "total_"),
|
||||
};
|
||||
|
||||
let old_value = old_stats.entry(total_name.clone()).or_insert(0);
|
||||
let new_value = stats.get(total_name.as_str()).unwrap_or(&0).to_owned();
|
||||
let avg = (new_value - *old_value) / (STAT_PERIOD as i64 / 1_000); // Avg / second
|
||||
|
||||
stats.insert(stat, avg);
|
||||
*old_value = new_value;
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a snapshot of statistics. Updated once a second
|
||||
/// by the `Collector`.
|
||||
pub fn get_stats() -> HashMap<usize, HashMap<String, i64>> {
|
||||
LATEST_STATS.lock().clone()
|
||||
}
|
||||
|
||||
/// Get the statistics reporter used to update stats across the pools/clients.
|
||||
pub fn get_reporter() -> Reporter {
|
||||
(*(*REPORTER.load())).clone()
|
||||
}
|
||||
57
src/tls.rs
Normal file
57
src/tls.rs
Normal file
@@ -0,0 +1,57 @@
|
||||
// Stream wrapper.
|
||||
|
||||
use rustls_pemfile::{certs, rsa_private_keys};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use tokio_rustls::rustls::{self, Certificate, PrivateKey};
|
||||
use tokio_rustls::TlsAcceptor;
|
||||
|
||||
use crate::config::get_config;
|
||||
use crate::errors::Error;
|
||||
|
||||
// TLS
|
||||
pub fn load_certs(path: &Path) -> std::io::Result<Vec<Certificate>> {
|
||||
certs(&mut std::io::BufReader::new(std::fs::File::open(path)?))
|
||||
.map_err(|_| std::io::Error::new(std::io::ErrorKind::InvalidInput, "invalid cert"))
|
||||
.map(|mut certs| certs.drain(..).map(Certificate).collect())
|
||||
}
|
||||
|
||||
pub fn load_keys(path: &Path) -> std::io::Result<Vec<PrivateKey>> {
|
||||
rsa_private_keys(&mut std::io::BufReader::new(std::fs::File::open(path)?))
|
||||
.map_err(|_| std::io::Error::new(std::io::ErrorKind::InvalidInput, "invalid key"))
|
||||
.map(|mut keys| keys.drain(..).map(PrivateKey).collect())
|
||||
}
|
||||
|
||||
pub struct Tls {
|
||||
pub acceptor: TlsAcceptor,
|
||||
}
|
||||
|
||||
impl Tls {
|
||||
pub fn new() -> Result<Self, Error> {
|
||||
let config = get_config();
|
||||
|
||||
let certs = match load_certs(&Path::new(&config.general.tls_certificate.unwrap())) {
|
||||
Ok(certs) => certs,
|
||||
Err(_) => return Err(Error::TlsError),
|
||||
};
|
||||
|
||||
let mut keys = match load_keys(&Path::new(&config.general.tls_private_key.unwrap())) {
|
||||
Ok(keys) => keys,
|
||||
Err(_) => return Err(Error::TlsError),
|
||||
};
|
||||
|
||||
let config = match rustls::ServerConfig::builder()
|
||||
.with_safe_defaults()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(certs, keys.remove(0))
|
||||
.map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidInput, err))
|
||||
{
|
||||
Ok(c) => c,
|
||||
Err(_) => return Err(Error::TlsError),
|
||||
};
|
||||
|
||||
Ok(Tls {
|
||||
acceptor: TlsAcceptor::from(Arc::new(config)),
|
||||
})
|
||||
}
|
||||
}
|
||||
39
tests/pgbench/simple.sql
Normal file
39
tests/pgbench/simple.sql
Normal file
@@ -0,0 +1,39 @@
|
||||
|
||||
-- \setrandom aid 1 :naccounts
|
||||
\set aid random(1, 100000)
|
||||
-- \setrandom bid 1 :nbranches
|
||||
\set bid random(1, 100000)
|
||||
-- \setrandom tid 1 :ntellers
|
||||
\set tid random(1, 100000)
|
||||
-- \setrandom delta -5000 5000
|
||||
\set delta random(-5000,5000)
|
||||
|
||||
\set shard random(0, 2)
|
||||
|
||||
SET SHARD TO :shard;
|
||||
|
||||
SET SERVER ROLE TO 'auto';
|
||||
|
||||
BEGIN;
|
||||
|
||||
UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid;
|
||||
|
||||
SELECT abalance FROM pgbench_accounts WHERE aid = :aid;
|
||||
|
||||
UPDATE pgbench_tellers SET tbalance = tbalance + :delta WHERE tid = :tid;
|
||||
|
||||
UPDATE pgbench_branches SET bbalance = bbalance + :delta WHERE bid = :bid;
|
||||
|
||||
INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);
|
||||
|
||||
END;
|
||||
|
||||
SET SHARDING KEY TO :aid;
|
||||
|
||||
-- Read load balancing
|
||||
SELECT abalance FROM pgbench_accounts WHERE aid = :aid;
|
||||
|
||||
SET SERVER ROLE TO 'replica';
|
||||
|
||||
-- Read load balancing
|
||||
SELECT abalance FROM pgbench_accounts WHERE aid = :aid;
|
||||
@@ -1 +1,2 @@
|
||||
psycopg2==2.9.3
|
||||
psutil==5.9.1
|
||||
@@ -1,11 +1,252 @@
|
||||
from typing import Tuple
|
||||
import psycopg2
|
||||
import psutil
|
||||
import os
|
||||
import signal
|
||||
import time
|
||||
|
||||
conn = psycopg2.connect("postgres://random:password@127.0.0.1:6432/db")
|
||||
cur = conn.cursor()
|
||||
SHUTDOWN_TIMEOUT = 5
|
||||
|
||||
cur.execute("SELECT 1");
|
||||
res = cur.fetchall()
|
||||
PGCAT_HOST = "127.0.0.1"
|
||||
PGCAT_PORT = "6432"
|
||||
|
||||
print(res)
|
||||
|
||||
# conn.commit()
|
||||
def pgcat_start():
|
||||
pg_cat_send_signal(signal.SIGTERM)
|
||||
os.system("./target/debug/pgcat .circleci/pgcat.toml &")
|
||||
time.sleep(2)
|
||||
|
||||
|
||||
def pg_cat_send_signal(signal: signal.Signals):
|
||||
for proc in psutil.process_iter(["pid", "name"]):
|
||||
if "pgcat" == proc.name():
|
||||
os.kill(proc.pid, signal)
|
||||
if signal == signal.SIGTERM:
|
||||
# Returns 0 if pgcat process exists
|
||||
time.sleep(2)
|
||||
if not os.system('pgrep pgcat'):
|
||||
raise Exception("pgcat not closed after SIGTERM")
|
||||
|
||||
|
||||
def connect_db(
|
||||
autocommit: bool = True,
|
||||
admin: bool = False,
|
||||
) -> Tuple[psycopg2.extensions.connection, psycopg2.extensions.cursor]:
|
||||
|
||||
if admin:
|
||||
user = "admin_user"
|
||||
password = "admin_pass"
|
||||
db = "pgcat"
|
||||
else:
|
||||
user = "sharding_user"
|
||||
password = "sharding_user"
|
||||
db = "sharded_db"
|
||||
|
||||
conn = psycopg2.connect(
|
||||
f"postgres://{user}:{password}@{PGCAT_HOST}:{PGCAT_PORT}/{db}?application_name=testing_pgcat",
|
||||
connect_timeout=2,
|
||||
)
|
||||
conn.autocommit = autocommit
|
||||
cur = conn.cursor()
|
||||
|
||||
return (conn, cur)
|
||||
|
||||
|
||||
def cleanup_conn(conn: psycopg2.extensions.connection, cur: psycopg2.extensions.cursor):
|
||||
cur.close()
|
||||
conn.close()
|
||||
|
||||
|
||||
def test_normal_db_access():
|
||||
conn, cur = connect_db(autocommit=False)
|
||||
cur.execute("SELECT 1")
|
||||
res = cur.fetchall()
|
||||
print(res)
|
||||
cleanup_conn(conn, cur)
|
||||
|
||||
|
||||
def test_admin_db_access():
|
||||
conn, cur = connect_db(admin=True)
|
||||
|
||||
cur.execute("SHOW POOLS")
|
||||
res = cur.fetchall()
|
||||
print(res)
|
||||
cleanup_conn(conn, cur)
|
||||
|
||||
|
||||
def test_shutdown_logic():
|
||||
|
||||
# - - - - - - - - - - - - - - - - - -
|
||||
# NO ACTIVE QUERIES SIGINT HANDLING
|
||||
|
||||
# Start pgcat
|
||||
pgcat_start()
|
||||
|
||||
# Create client connection and send query (not in transaction)
|
||||
conn, cur = connect_db()
|
||||
|
||||
cur.execute("BEGIN;")
|
||||
cur.execute("SELECT 1;")
|
||||
cur.execute("COMMIT;")
|
||||
|
||||
# Send sigint to pgcat
|
||||
pg_cat_send_signal(signal.SIGINT)
|
||||
time.sleep(1)
|
||||
|
||||
# Check that any new queries fail after sigint since server should close with no active transactions
|
||||
try:
|
||||
cur.execute("SELECT 1;")
|
||||
except psycopg2.OperationalError as e:
|
||||
pass
|
||||
else:
|
||||
# Fail if query execution succeeded
|
||||
raise Exception("Server not closed after sigint")
|
||||
|
||||
cleanup_conn(conn, cur)
|
||||
pg_cat_send_signal(signal.SIGTERM)
|
||||
|
||||
# - - - - - - - - - - - - - - - - - -
|
||||
# HANDLE TRANSACTION WITH SIGINT
|
||||
|
||||
# Start pgcat
|
||||
pgcat_start()
|
||||
|
||||
# Create client connection and begin transaction
|
||||
conn, cur = connect_db()
|
||||
|
||||
cur.execute("BEGIN;")
|
||||
cur.execute("SELECT 1;")
|
||||
|
||||
# Send sigint to pgcat while still in transaction
|
||||
pg_cat_send_signal(signal.SIGINT)
|
||||
time.sleep(1)
|
||||
|
||||
# Check that any new queries succeed after sigint since server should still allow transaction to complete
|
||||
try:
|
||||
cur.execute("SELECT 1;")
|
||||
except psycopg2.OperationalError as e:
|
||||
# Fail if query fails since server closed
|
||||
raise Exception("Server closed while in transaction", e.pgerror)
|
||||
|
||||
cleanup_conn(conn, cur)
|
||||
pg_cat_send_signal(signal.SIGTERM)
|
||||
|
||||
# - - - - - - - - - - - - - - - - - -
|
||||
# NO NEW NON-ADMIN CONNECTIONS DURING SHUTDOWN
|
||||
# Start pgcat
|
||||
pgcat_start()
|
||||
|
||||
# Create client connection and begin transaction
|
||||
transaction_conn, transaction_cur = connect_db()
|
||||
|
||||
transaction_cur.execute("BEGIN;")
|
||||
transaction_cur.execute("SELECT 1;")
|
||||
|
||||
# Send sigint to pgcat while still in transaction
|
||||
pg_cat_send_signal(signal.SIGINT)
|
||||
time.sleep(1)
|
||||
|
||||
start = time.perf_counter()
|
||||
try:
|
||||
conn, cur = connect_db()
|
||||
cur.execute("SELECT 1;")
|
||||
cleanup_conn(conn, cur)
|
||||
except psycopg2.OperationalError as e:
|
||||
time_taken = time.perf_counter() - start
|
||||
if time_taken > 0.1:
|
||||
raise Exception(
|
||||
"Failed to reject connection within 0.1 seconds, got", time_taken, "seconds")
|
||||
pass
|
||||
else:
|
||||
raise Exception("Able connect to database during shutdown")
|
||||
|
||||
cleanup_conn(transaction_conn, transaction_cur)
|
||||
pg_cat_send_signal(signal.SIGTERM)
|
||||
|
||||
# - - - - - - - - - - - - - - - - - -
|
||||
# ALLOW NEW ADMIN CONNECTIONS DURING SHUTDOWN
|
||||
# Start pgcat
|
||||
pgcat_start()
|
||||
|
||||
# Create client connection and begin transaction
|
||||
transaction_conn, transaction_cur = connect_db()
|
||||
|
||||
transaction_cur.execute("BEGIN;")
|
||||
transaction_cur.execute("SELECT 1;")
|
||||
|
||||
# Send sigint to pgcat while still in transaction
|
||||
pg_cat_send_signal(signal.SIGINT)
|
||||
time.sleep(1)
|
||||
|
||||
try:
|
||||
conn, cur = connect_db(admin=True)
|
||||
cur.execute("SHOW DATABASES;")
|
||||
cleanup_conn(conn, cur)
|
||||
except psycopg2.OperationalError as e:
|
||||
raise Exception(e)
|
||||
|
||||
cleanup_conn(transaction_conn, transaction_cur)
|
||||
pg_cat_send_signal(signal.SIGTERM)
|
||||
|
||||
# - - - - - - - - - - - - - - - - - -
|
||||
# ADMIN CONNECTIONS CONTINUING TO WORK AFTER SHUTDOWN
|
||||
# Start pgcat
|
||||
pgcat_start()
|
||||
|
||||
# Create client connection and begin transaction
|
||||
transaction_conn, transaction_cur = connect_db()
|
||||
transaction_cur.execute("BEGIN;")
|
||||
transaction_cur.execute("SELECT 1;")
|
||||
|
||||
admin_conn, admin_cur = connect_db(admin=True)
|
||||
admin_cur.execute("SHOW DATABASES;")
|
||||
|
||||
# Send sigint to pgcat while still in transaction
|
||||
pg_cat_send_signal(signal.SIGINT)
|
||||
time.sleep(1)
|
||||
|
||||
try:
|
||||
admin_cur.execute("SHOW DATABASES;")
|
||||
except psycopg2.OperationalError as e:
|
||||
raise Exception("Could not execute admin command:", e)
|
||||
|
||||
cleanup_conn(transaction_conn, transaction_cur)
|
||||
cleanup_conn(admin_conn, admin_cur)
|
||||
pg_cat_send_signal(signal.SIGTERM)
|
||||
|
||||
# - - - - - - - - - - - - - - - - - -
|
||||
# HANDLE SHUTDOWN TIMEOUT WITH SIGINT
|
||||
|
||||
# Start pgcat
|
||||
pgcat_start()
|
||||
|
||||
# Create client connection and begin transaction, which should prevent server shutdown unless shutdown timeout is reached
|
||||
conn, cur = connect_db()
|
||||
|
||||
cur.execute("BEGIN;")
|
||||
cur.execute("SELECT 1;")
|
||||
|
||||
# Send sigint to pgcat while still in transaction
|
||||
pg_cat_send_signal(signal.SIGINT)
|
||||
|
||||
# pgcat shutdown timeout is set to SHUTDOWN_TIMEOUT seconds, so we sleep for SHUTDOWN_TIMEOUT + 1 seconds
|
||||
time.sleep(SHUTDOWN_TIMEOUT + 1)
|
||||
|
||||
# Check that any new queries succeed after sigint since server should still allow transaction to complete
|
||||
try:
|
||||
cur.execute("SELECT 1;")
|
||||
except psycopg2.OperationalError as e:
|
||||
pass
|
||||
else:
|
||||
# Fail if query execution succeeded
|
||||
raise Exception("Server not closed after sigint and expected timeout")
|
||||
|
||||
cleanup_conn(conn, cur)
|
||||
pg_cat_send_signal(signal.SIGTERM)
|
||||
|
||||
# - - - - - - - - - - - - - - - - - -
|
||||
|
||||
|
||||
test_normal_db_access()
|
||||
test_admin_db_access()
|
||||
test_shutdown_logic()
|
||||
|
||||
2
tests/ruby/.ruby-version
Normal file
2
tests/ruby/.ruby-version
Normal file
@@ -0,0 +1,2 @@
|
||||
3.0.0
|
||||
|
||||
6
tests/ruby/Gemfile
Normal file
6
tests/ruby/Gemfile
Normal file
@@ -0,0 +1,6 @@
|
||||
source "https://rubygems.org"
|
||||
|
||||
gem "pg"
|
||||
gem "activerecord"
|
||||
gem "rubocop"
|
||||
gem "toml", "~> 0.3.0"
|
||||
56
tests/ruby/Gemfile.lock
Normal file
56
tests/ruby/Gemfile.lock
Normal file
@@ -0,0 +1,56 @@
|
||||
GEM
|
||||
remote: https://rubygems.org/
|
||||
specs:
|
||||
activemodel (7.0.3.1)
|
||||
activesupport (= 7.0.3.1)
|
||||
activerecord (7.0.3.1)
|
||||
activemodel (= 7.0.3.1)
|
||||
activesupport (= 7.0.3.1)
|
||||
activesupport (7.0.3.1)
|
||||
concurrent-ruby (~> 1.0, >= 1.0.2)
|
||||
i18n (>= 1.6, < 2)
|
||||
minitest (>= 5.1)
|
||||
tzinfo (~> 2.0)
|
||||
ast (2.4.2)
|
||||
concurrent-ruby (1.1.10)
|
||||
i18n (1.11.0)
|
||||
concurrent-ruby (~> 1.0)
|
||||
minitest (5.16.2)
|
||||
parallel (1.22.1)
|
||||
parser (3.1.2.0)
|
||||
ast (~> 2.4.1)
|
||||
parslet (2.0.0)
|
||||
pg (1.3.2)
|
||||
rainbow (3.1.1)
|
||||
regexp_parser (2.3.1)
|
||||
rexml (3.2.5)
|
||||
rubocop (1.29.0)
|
||||
parallel (~> 1.10)
|
||||
parser (>= 3.1.0.0)
|
||||
rainbow (>= 2.2.2, < 4.0)
|
||||
regexp_parser (>= 1.8, < 3.0)
|
||||
rexml (>= 3.2.5, < 4.0)
|
||||
rubocop-ast (>= 1.17.0, < 2.0)
|
||||
ruby-progressbar (~> 1.7)
|
||||
unicode-display_width (>= 1.4.0, < 3.0)
|
||||
rubocop-ast (1.17.0)
|
||||
parser (>= 3.1.1.0)
|
||||
ruby-progressbar (1.11.0)
|
||||
toml (0.3.0)
|
||||
parslet (>= 1.8.0, < 3.0.0)
|
||||
tzinfo (2.0.4)
|
||||
concurrent-ruby (~> 1.0)
|
||||
unicode-display_width (2.1.0)
|
||||
|
||||
PLATFORMS
|
||||
arm64-darwin-21
|
||||
x86_64-linux
|
||||
|
||||
DEPENDENCIES
|
||||
activerecord
|
||||
pg
|
||||
rubocop
|
||||
toml (~> 0.3.0)
|
||||
|
||||
BUNDLED WITH
|
||||
2.3.7
|
||||
@@ -1,11 +1,265 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require 'active_record'
|
||||
require 'pg'
|
||||
require 'toml'
|
||||
|
||||
conn = PG.connect(host: '127.0.0.1', port: 5433, dbname: 'test')
|
||||
$stdout.sync = true
|
||||
$stderr.sync = true
|
||||
|
||||
conn.exec( "SELECT * FROM pg_stat_activity" ) do |result|
|
||||
puts " PID | User | Query"
|
||||
result.each do |row|
|
||||
puts " %7d | %-16s | %s " %
|
||||
row.values_at('pid', 'usename', 'query')
|
||||
class ConfigEditor
|
||||
def initialize
|
||||
@original_config_text = File.read('../../.circleci/pgcat.toml')
|
||||
text_to_load = @original_config_text.gsub("5432", "\"5432\"")
|
||||
|
||||
@original_configs = TOML.load(text_to_load)
|
||||
end
|
||||
end
|
||||
|
||||
def original_configs
|
||||
TOML.load(TOML::Generator.new(@original_configs).body)
|
||||
end
|
||||
|
||||
def with_modified_configs(new_configs)
|
||||
text_to_write = TOML::Generator.new(new_configs).body
|
||||
text_to_write = text_to_write.gsub("\"5432\"", "5432")
|
||||
File.write('../../.circleci/pgcat.toml', text_to_write)
|
||||
yield
|
||||
ensure
|
||||
File.write('../../.circleci/pgcat.toml', @original_config_text)
|
||||
end
|
||||
end
|
||||
|
||||
def with_captured_stdout_stderr
|
||||
sout = STDOUT.clone
|
||||
serr = STDERR.clone
|
||||
STDOUT.reopen("/tmp/out.txt", "w+")
|
||||
STDERR.reopen("/tmp/err.txt", "w+")
|
||||
STDOUT.sync = true
|
||||
STDERR.sync = true
|
||||
yield
|
||||
return File.read('/tmp/out.txt'), File.read('/tmp/err.txt')
|
||||
ensure
|
||||
STDOUT.reopen(sout)
|
||||
STDERR.reopen(serr)
|
||||
end
|
||||
|
||||
|
||||
def test_extended_protocol_pooler_errors
|
||||
admin_conn = PG::connect("postgres://admin_user:admin_pass@127.0.0.1:6432/pgcat")
|
||||
|
||||
conf_editor = ConfigEditor.new
|
||||
new_configs = conf_editor.original_configs
|
||||
|
||||
# shorter timeouts
|
||||
new_configs["general"]["connect_timeout"] = 500
|
||||
new_configs["general"]["ban_time"] = 1
|
||||
new_configs["general"]["shutdown_timeout"] = 1
|
||||
new_configs["pools"]["sharded_db"]["users"]["0"]["pool_size"] = 1
|
||||
new_configs["pools"]["sharded_db"]["users"]["1"]["pool_size"] = 1
|
||||
|
||||
conf_editor.with_modified_configs(new_configs) { admin_conn.async_exec("RELOAD") }
|
||||
|
||||
conn_str = "postgres://sharding_user:sharding_user@127.0.0.1:6432/sharded_db"
|
||||
10.times do
|
||||
Thread.new do
|
||||
conn = PG::connect(conn_str)
|
||||
conn.async_exec("SELECT pg_sleep(5)") rescue PG::SystemError
|
||||
ensure
|
||||
conn&.close
|
||||
end
|
||||
end
|
||||
|
||||
sleep(0.5)
|
||||
conn_under_test = PG::connect(conn_str)
|
||||
stdout, stderr = with_captured_stdout_stderr do
|
||||
5.times do |i|
|
||||
conn_under_test.async_exec("SELECT 1") rescue PG::SystemError
|
||||
conn_under_test.exec_params("SELECT #{i} + $1", [i]) rescue PG::SystemError
|
||||
sleep 1
|
||||
end
|
||||
end
|
||||
|
||||
raise StandardError, "Libpq got unexpected messages while idle" if stderr.include?("arrived from server while idle")
|
||||
puts "Pool checkout errors not breaking clients passed"
|
||||
ensure
|
||||
sleep 1
|
||||
admin_conn.async_exec("RELOAD") # Reset state
|
||||
conn_under_test&.close
|
||||
end
|
||||
|
||||
test_extended_protocol_pooler_errors
|
||||
|
||||
# Uncomment these two to see all queries.
|
||||
# ActiveRecord.verbose_query_logs = true
|
||||
# ActiveRecord::Base.logger = Logger.new(STDOUT)
|
||||
|
||||
ActiveRecord::Base.establish_connection(
|
||||
adapter: 'postgresql',
|
||||
host: '127.0.0.1',
|
||||
port: 6432,
|
||||
username: 'sharding_user',
|
||||
password: 'sharding_user',
|
||||
database: 'sharded_db',
|
||||
application_name: 'testing_pgcat',
|
||||
prepared_statements: false, # Transaction mode
|
||||
advisory_locks: false # Same
|
||||
)
|
||||
|
||||
class TestSafeTable < ActiveRecord::Base
|
||||
self.table_name = 'test_safe_table'
|
||||
end
|
||||
|
||||
class ShouldNeverHappenException < RuntimeError
|
||||
end
|
||||
|
||||
class CreateSafeShardedTable < ActiveRecord::Migration[7.0]
|
||||
# Disable transasctions or things will fly out of order!
|
||||
disable_ddl_transaction!
|
||||
|
||||
SHARDS = 3
|
||||
|
||||
def up
|
||||
SHARDS.times do |x|
|
||||
# This will make this migration reversible!
|
||||
connection.execute "SET SHARD TO '#{x.to_i}'"
|
||||
connection.execute "SET SERVER ROLE TO 'primary'"
|
||||
|
||||
connection.execute <<-SQL
|
||||
CREATE TABLE test_safe_table (
|
||||
id BIGINT PRIMARY KEY,
|
||||
name VARCHAR,
|
||||
description TEXT
|
||||
) PARTITION BY HASH (id);
|
||||
|
||||
CREATE TABLE test_safe_table_data PARTITION OF test_safe_table
|
||||
FOR VALUES WITH (MODULUS #{SHARDS.to_i}, REMAINDER #{x.to_i});
|
||||
SQL
|
||||
end
|
||||
end
|
||||
|
||||
def down
|
||||
SHARDS.times do |x|
|
||||
connection.execute "SET SHARD TO '#{x.to_i}'"
|
||||
connection.execute "SET SERVER ROLE TO 'primary'"
|
||||
connection.execute 'DROP TABLE test_safe_table CASCADE'
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
SHARDS = 3
|
||||
|
||||
2.times do
|
||||
begin
|
||||
CreateSafeShardedTable.migrate(:down)
|
||||
rescue Exception
|
||||
puts "Tables don't exist yet"
|
||||
end
|
||||
|
||||
CreateSafeShardedTable.migrate(:up)
|
||||
|
||||
SHARDS.times do |x|
|
||||
TestSafeTable.connection.execute "SET SHARD TO '#{x.to_i}'"
|
||||
TestSafeTable.connection.execute "SET SERVER ROLE TO 'primary'"
|
||||
TestSafeTable.connection.execute "TRUNCATE #{TestSafeTable.table_name}"
|
||||
end
|
||||
|
||||
# Equivalent to Makara's stick_to_master! except it sticks until it's changed.
|
||||
TestSafeTable.connection.execute "SET SERVER ROLE TO 'primary'"
|
||||
|
||||
200.times do |x|
|
||||
x += 1 # Postgres ids start at 1
|
||||
TestSafeTable.connection.execute "SET SHARDING KEY TO '#{x.to_i}'"
|
||||
TestSafeTable.create(id: x, name: "something_special_#{x.to_i}", description: "It's a surprise!")
|
||||
end
|
||||
|
||||
TestSafeTable.connection.execute "SET SERVER ROLE TO 'replica'"
|
||||
|
||||
100.times do |x|
|
||||
x += 1 # 0 confuses our sharding function
|
||||
TestSafeTable.connection.execute "SET SHARDING KEY TO '#{x.to_i}'"
|
||||
TestSafeTable.find_by_id(x).id
|
||||
end
|
||||
|
||||
# Will use the query parser to direct reads to replicas
|
||||
TestSafeTable.connection.execute "SET SERVER ROLE TO 'auto'"
|
||||
|
||||
100.times do |x|
|
||||
x += 101
|
||||
TestSafeTable.connection.execute "SET SHARDING KEY TO '#{x.to_i}'"
|
||||
TestSafeTable.find_by_id(x).id
|
||||
end
|
||||
end
|
||||
|
||||
# Test wrong shard
|
||||
TestSafeTable.connection.execute "SET SHARD TO '1'"
|
||||
begin
|
||||
TestSafeTable.create(id: 5, name: 'test', description: 'test description')
|
||||
raise ShouldNeverHappenException('Uh oh')
|
||||
rescue ActiveRecord::StatementInvalid
|
||||
puts 'OK'
|
||||
end
|
||||
|
||||
# Test evil clients
|
||||
def poorly_behaved_client
|
||||
conn = PG::connect("postgres://sharding_user:sharding_user@127.0.0.1:6432/sharded_db?application_name=testing_pgcat")
|
||||
conn.async_exec 'BEGIN'
|
||||
conn.async_exec 'SELECT 1'
|
||||
|
||||
conn.close
|
||||
puts 'Bad client ok'
|
||||
end
|
||||
|
||||
25.times do
|
||||
poorly_behaved_client
|
||||
end
|
||||
|
||||
|
||||
def test_server_parameters
|
||||
server_conn = PG::connect("postgres://sharding_user:sharding_user@127.0.0.1:6432/sharded_db?application_name=testing_pgcat")
|
||||
raise StandardError, "Bad server version" if server_conn.server_version == 0
|
||||
server_conn.close
|
||||
|
||||
admin_conn = PG::connect("postgres://admin_user:admin_pass@127.0.0.1:6432/pgcat")
|
||||
raise StandardError, "Bad server version" if admin_conn.server_version == 0
|
||||
admin_conn.close
|
||||
|
||||
puts 'Server parameters ok'
|
||||
end
|
||||
|
||||
|
||||
def test_reload_pool_recycling
|
||||
admin_conn = PG::connect("postgres://admin_user:admin_pass@127.0.0.1:6432/pgcat")
|
||||
server_conn = PG::connect("postgres://sharding_user:sharding_user@127.0.0.1:6432/sharded_db?application_name=testing_pgcat")
|
||||
|
||||
server_conn.async_exec("BEGIN")
|
||||
conf_editor = ConfigEditor.new
|
||||
new_configs = conf_editor.original_configs
|
||||
|
||||
# swap shards
|
||||
new_configs["pools"]["sharded_db"]["shards"]["0"]["database"] = "shard1"
|
||||
new_configs["pools"]["sharded_db"]["shards"]["1"]["database"] = "shard0"
|
||||
|
||||
raise StandardError if server_conn.async_exec("SELECT current_database();")[0]["current_database"] != 'shard0'
|
||||
conf_editor.with_modified_configs(new_configs) { admin_conn.async_exec("RELOAD") }
|
||||
raise StandardError if server_conn.async_exec("SELECT current_database();")[0]["current_database"] != 'shard0'
|
||||
server_conn.async_exec("COMMIT;")
|
||||
|
||||
# Transaction finished, client should get new configs
|
||||
raise StandardError if server_conn.async_exec("SELECT current_database();")[0]["current_database"] != 'shard1'
|
||||
server_conn.close()
|
||||
|
||||
# New connection should get new configs
|
||||
server_conn = PG::connect("postgres://sharding_user:sharding_user@127.0.0.1:6432/sharded_db?application_name=testing_pgcat")
|
||||
raise StandardError if server_conn.async_exec("SELECT current_database();")[0]["current_database"] != 'shard1'
|
||||
|
||||
ensure
|
||||
admin_conn.async_exec("RELOAD") # Go back to old state
|
||||
admin_conn.close
|
||||
server_conn.close
|
||||
puts "Pool Recycling okay!"
|
||||
end
|
||||
|
||||
test_reload_pool_recycling
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -23,4 +23,4 @@ SELECT * FROM shard_0 ORDER BY id LIMIT 10;
|
||||
SELECT * FROM shard_1 ORDER BY id LIMIT 10;
|
||||
SELECT * FROM shard_2 ORDER BY id LIMIT 10;
|
||||
SELECT * FROM shard_3 ORDER BY id LIMIT 10;
|
||||
SELECT * FROM shard_4 ORDER BY id LIMIT 10;
|
||||
SELECT * FROM shard_4 ORDER BY id LIMIT 10;
|
||||
|
||||
@@ -1,12 +1,19 @@
|
||||
#/bin/bash
|
||||
set -e
|
||||
|
||||
# Setup all the shards.
|
||||
sudo service postgresql restart
|
||||
# sudo service postgresql restart
|
||||
|
||||
psql -f query_routing_setup.sql
|
||||
echo "Giving Postgres 5 seconds to start up..."
|
||||
|
||||
# sleep 5
|
||||
|
||||
# psql -f query_routing_setup.sql
|
||||
|
||||
psql -h 127.0.0.1 -p 6432 -f query_routing_test_insert.sql
|
||||
|
||||
psql -h 127.0.0.1 -p 6432 -f query_routing_test_select.sql
|
||||
|
||||
psql -f query_routing_test_validate.sql
|
||||
psql -e -h 127.0.0.1 -p 6432 -f query_routing_test_primary_replica.sql
|
||||
|
||||
psql -f query_routing_test_validate.sql
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
|
||||
DROP DATABASE IF EXISTS shard0;
|
||||
DROP DATABASE IF EXISTS shard1;
|
||||
DROP DATABASE IF EXISTS shard2;
|
||||
DROP DATABASE IF EXISTS some_db;
|
||||
|
||||
CREATE DATABASE shard0;
|
||||
CREATE DATABASE shard1;
|
||||
CREATE DATABASE shard2;
|
||||
CREATE DATABASE some_db;
|
||||
|
||||
\c shard0
|
||||
|
||||
@@ -41,21 +42,51 @@ CREATE TABLE data (
|
||||
|
||||
CREATE TABLE data_shard_2 PARTITION OF data FOR VALUES WITH (MODULUS 3, REMAINDER 2);
|
||||
|
||||
DROP ROLE IF EXISTS sharding_user;
|
||||
CREATE ROLE sharding_user ENCRYPTED PASSWORD 'sharding_user' LOGIN;
|
||||
|
||||
GRANT CONNECT ON DATABASE shard0 TO sharding_user;
|
||||
GRANT CONNECT ON DATABASE shard1 TO sharding_user;
|
||||
GRANT CONNECT ON DATABASE shard2 TO sharding_user;
|
||||
\c some_db
|
||||
|
||||
DROP TABLE IF EXISTS data CASCADE;
|
||||
|
||||
CREATE TABLE data (
|
||||
id BIGINT,
|
||||
value VARCHAR
|
||||
);
|
||||
|
||||
DROP ROLE IF EXISTS sharding_user;
|
||||
DROP ROLE IF EXISTS other_user;
|
||||
DROP ROLE IF EXISTS simple_user;
|
||||
CREATE ROLE sharding_user ENCRYPTED PASSWORD 'sharding_user' LOGIN;
|
||||
CREATE ROLE other_user ENCRYPTED PASSWORD 'other_user' LOGIN;
|
||||
CREATE ROLE simple_user ENCRYPTED PASSWORD 'simple_user' LOGIN;
|
||||
|
||||
GRANT CONNECT ON DATABASE shard0 TO sharding_user;
|
||||
GRANT CONNECT ON DATABASE shard1 TO sharding_user;
|
||||
GRANT CONNECT ON DATABASE shard2 TO sharding_user;
|
||||
|
||||
GRANT CONNECT ON DATABASE shard0 TO other_user;
|
||||
GRANT CONNECT ON DATABASE shard1 TO other_user;
|
||||
GRANT CONNECT ON DATABASE shard2 TO other_user;
|
||||
|
||||
GRANT CONNECT ON DATABASE some_db TO simple_user;
|
||||
|
||||
\c shard0
|
||||
GRANT ALL ON SCHEMA public TO sharding_user;
|
||||
GRANT ALL ON TABLE data TO sharding_user;
|
||||
GRANT ALL ON SCHEMA public TO other_user;
|
||||
GRANT ALL ON TABLE data TO other_user;
|
||||
|
||||
\c shard1
|
||||
GRANT ALL ON SCHEMA public TO sharding_user;
|
||||
GRANT ALL ON TABLE data TO sharding_user;
|
||||
GRANT ALL ON SCHEMA public TO other_user;
|
||||
GRANT ALL ON TABLE data TO other_user;
|
||||
|
||||
\c shard2
|
||||
GRANT ALL ON SCHEMA public TO sharding_user;
|
||||
GRANT ALL ON TABLE data TO sharding_user;
|
||||
GRANT ALL ON TABLE data TO sharding_user;
|
||||
GRANT ALL ON SCHEMA public TO other_user;
|
||||
GRANT ALL ON TABLE data TO other_user;
|
||||
|
||||
\c some_db
|
||||
GRANT ALL ON SCHEMA public TO simple_user;
|
||||
GRANT ALL ON TABLE data TO simple_user;
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
\set ON_ERROR_STOP on
|
||||
|
||||
SET SHARDING KEY TO '1';
|
||||
INSERT INTO data (id, value) VALUES (1, 'value_1');
|
||||
|
||||
@@ -44,4 +46,10 @@ SET SHARDING KEY TO '15';
|
||||
INSERT INTO data (id, value) VALUES (15, 'value_1');
|
||||
|
||||
SET SHARDING KEY TO '16';
|
||||
INSERT INTO data (id, value) VALUES (16, 'value_1');
|
||||
INSERT INTO data (id, value) VALUES (16, 'value_1');
|
||||
|
||||
set sharding key to '17';
|
||||
INSERT INTO data (id, value) VALUES (17, 'value_1');
|
||||
|
||||
SeT SHaRDInG KeY to '18';
|
||||
INSERT INTO data (id, value) VALUES (18, 'value_1');
|
||||
|
||||
162
tests/sharding/query_routing_test_primary_replica.sql
Normal file
162
tests/sharding/query_routing_test_primary_replica.sql
Normal file
@@ -0,0 +1,162 @@
|
||||
\set ON_ERROR_STOP on
|
||||
|
||||
SET SERVER ROLE TO 'primary';
|
||||
SET SHARDING KEY TO '1';
|
||||
INSERT INTO data (id, value) VALUES (1, 'value_1');
|
||||
|
||||
SET SERVER ROLE TO 'replica';
|
||||
SET SHARDING KEY TO '1';
|
||||
SELECT * FROM data WHERE id = 1;
|
||||
|
||||
---
|
||||
|
||||
SET SERVER ROLE TO 'primary';
|
||||
SET SHARDING KEY TO '2';
|
||||
INSERT INTO data (id, value) VALUES (2, 'value_1');
|
||||
|
||||
SET SERVER ROLE TO 'replica';
|
||||
SET SHARDING KEY TO '2';
|
||||
SELECT * FROM data WHERE id = 2;
|
||||
|
||||
---
|
||||
|
||||
SET SERVER ROLE TO 'primary';
|
||||
SET SHARDING KEY TO '3';
|
||||
INSERT INTO data (id, value) VALUES (3, 'value_1');
|
||||
|
||||
SET SERVER ROLE TO 'replica';
|
||||
SET SHARDING KEY TO '3';
|
||||
SELECT * FROM data WHERE id = 3;
|
||||
|
||||
---
|
||||
|
||||
SET SERVER ROLE TO 'primary';
|
||||
SET SHARDING KEY TO '4';
|
||||
INSERT INTO data (id, value) VALUES (4, 'value_1');
|
||||
|
||||
SET SERVER ROLE TO 'replica';
|
||||
SET SHARDING KEY TO '4';
|
||||
SELECT * FROM data WHERE id = 4;
|
||||
|
||||
---
|
||||
|
||||
SET SERVER ROLE TO 'primary';
|
||||
SET SHARDING KEY TO '5';
|
||||
INSERT INTO data (id, value) VALUES (5, 'value_1');
|
||||
|
||||
SET SERVER ROLE TO 'replica';
|
||||
SET SHARDING KEY TO '5';
|
||||
SELECT * FROM data WHERE id = 5;
|
||||
|
||||
---
|
||||
|
||||
SET SERVER ROLE TO 'primary';
|
||||
SET SHARDING KEY TO '6';
|
||||
INSERT INTO data (id, value) VALUES (6, 'value_1');
|
||||
|
||||
SET SERVER ROLE TO 'replica';
|
||||
SET SHARDING KEY TO '6';
|
||||
SELECT * FROM data WHERE id = 6;
|
||||
|
||||
---
|
||||
|
||||
SET SERVER ROLE TO 'primary';
|
||||
SET SHARDING KEY TO '7';
|
||||
INSERT INTO data (id, value) VALUES (7, 'value_1');
|
||||
|
||||
SET SERVER ROLE TO 'replica';
|
||||
SET SHARDING KEY TO '7';
|
||||
SELECT * FROM data WHERE id = 7;
|
||||
|
||||
---
|
||||
|
||||
SET SERVER ROLE TO 'primary';
|
||||
SET SHARDING KEY TO '8';
|
||||
INSERT INTO data (id, value) VALUES (8, 'value_1');
|
||||
|
||||
SET SERVER ROLE TO 'replica';
|
||||
SET SHARDING KEY TO '8';
|
||||
SELECT * FROM data WHERE id = 8;
|
||||
|
||||
---
|
||||
|
||||
SET SERVER ROLE TO 'primary';
|
||||
SET SHARDING KEY TO '9';
|
||||
INSERT INTO data (id, value) VALUES (9, 'value_1');
|
||||
|
||||
SET SERVER ROLE TO 'replica';
|
||||
SET SHARDING KEY TO '9';
|
||||
SELECT * FROM data WHERE id = 9;
|
||||
|
||||
---
|
||||
|
||||
\set ON_ERROR_STOP on
|
||||
|
||||
SET SERVER ROLE TO 'primary';
|
||||
SET SHARDING KEY TO '10';
|
||||
INSERT INTO data (id, value) VALUES (10, 'value_1');
|
||||
|
||||
SET SERVER ROLE TO 'replica';
|
||||
SET SHARDING KEY TO '10';
|
||||
SELECT * FROM data WHERE id = 10;
|
||||
|
||||
---
|
||||
|
||||
SET SERVER ROLE TO 'primary';
|
||||
SET SHARDING KEY TO '11';
|
||||
INSERT INTO data (id, value) VALUES (11, 'value_1');
|
||||
|
||||
SET SERVER ROLE TO 'replica';
|
||||
SET SHARDING KEY TO '11';
|
||||
SELECT * FROM data WHERE id = 11;
|
||||
|
||||
---
|
||||
|
||||
SET SERVER ROLE TO 'primary';
|
||||
SET SHARDING KEY TO '12';
|
||||
INSERT INTO data (id, value) VALUES (12, 'value_1');
|
||||
|
||||
SET SERVER ROLE TO 'replica';
|
||||
SET SHARDING KEY TO '12';
|
||||
SELECT * FROM data WHERE id = 12;
|
||||
|
||||
---
|
||||
|
||||
SET SERVER ROLE TO 'primary';
|
||||
SET SHARDING KEY TO '13';
|
||||
INSERT INTO data (id, value) VALUES (13, 'value_1');
|
||||
|
||||
SET SERVER ROLE TO 'replica';
|
||||
SET SHARDING KEY TO '13';
|
||||
SELECT * FROM data WHERE id = 13;
|
||||
|
||||
---
|
||||
|
||||
SET SERVER ROLE TO 'primary';
|
||||
SET SHARDING KEY TO '14';
|
||||
INSERT INTO data (id, value) VALUES (14, 'value_1');
|
||||
|
||||
SET SERVER ROLE TO 'replica';
|
||||
SET SHARDING KEY TO '14';
|
||||
SELECT * FROM data WHERE id = 14;
|
||||
|
||||
---
|
||||
|
||||
SET SERVER ROLE TO 'primary';
|
||||
SELECT 1;
|
||||
|
||||
SET SERVER ROLE TO 'replica';
|
||||
SELECT 1;
|
||||
|
||||
set server role to 'replica';
|
||||
SeT SeRver Role TO 'PrImARY';
|
||||
select 1;
|
||||
|
||||
SET PRIMARY READS TO 'on';
|
||||
SELECT 1;
|
||||
|
||||
SET PRIMARY READS TO 'off';
|
||||
SELECT 1;
|
||||
|
||||
SET PRIMARY READS TO 'default';
|
||||
SELECT 1;
|
||||
@@ -1,3 +1,5 @@
|
||||
\set ON_ERROR_STOP on
|
||||
|
||||
SET SHARDING KEY TO '1';
|
||||
SELECT * FROM data WHERE id = 1;
|
||||
|
||||
@@ -44,4 +46,4 @@ SET SHARDING KEY TO '15';
|
||||
SELECT * FROM data WHERE id = 15;
|
||||
|
||||
SET SHARDING KEY TO '16';
|
||||
SELECT * FROM data WHERE id = 16;
|
||||
SELECT * FROM data WHERE id = 16;
|
||||
|
||||
@@ -8,4 +8,4 @@ SELECT * FROM data;
|
||||
|
||||
\c shard2
|
||||
|
||||
SELECT * FROM data;
|
||||
SELECT * FROM data;
|
||||
|
||||
Reference in New Issue
Block a user