mirror of
https://github.com/postgresml/pgcat.git
synced 2026-03-23 17:36:28 +00:00
Compare commits
156 Commits
levkk-asyn
...
circleci_g
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2f8eb58bdb | ||
|
|
328c703e4a | ||
|
|
da044e8fa5 | ||
|
|
3796e26402 | ||
|
|
0ee59c0c40 | ||
|
|
b61d2cc6f0 | ||
|
|
c11418c083 | ||
|
|
c9544bdff2 | ||
|
|
cdcfa99fb9 | ||
|
|
f27dc6b483 | ||
|
|
326efc22b3 | ||
|
|
01c6afb2e5 | ||
|
|
a68071dd28 | ||
|
|
c27d801abf | ||
|
|
186e72298f | ||
|
|
3935366d86 | ||
|
|
b575935b1d | ||
|
|
efbab1c333 | ||
|
|
9f12d7958e | ||
|
|
e6634ef461 | ||
|
|
dab2e58647 | ||
|
|
4aaa4378cf | ||
|
|
670311daf9 | ||
|
|
b9ec7f8036 | ||
|
|
d91d23848b | ||
|
|
bbbc01a467 | ||
|
|
9bb71ede9d | ||
|
|
88b2afb19b | ||
|
|
f0865ca616 | ||
|
|
7d047c6c19 | ||
|
|
f73d15f82c | ||
|
|
69af6cc5e5 | ||
|
|
ca34597002 | ||
|
|
2def40ea6a | ||
|
|
c05129018d | ||
|
|
4a7a6a8e7a | ||
|
|
29a476e190 | ||
|
|
81933b918d | ||
|
|
7cbc9178d8 | ||
|
|
2c8b2f0776 | ||
|
|
8f9a2b8e6f | ||
|
|
cbf4d58144 | ||
|
|
731aa047ba | ||
|
|
88dbcc21d1 | ||
|
|
c34b15bddc | ||
|
|
0b034a6831 | ||
|
|
966b8e093c | ||
|
|
c9270a47d4 | ||
|
|
0d94d0b90a | ||
|
|
358724f7a9 | ||
|
|
e1e4929d43 | ||
|
|
dc4d6edf17 | ||
|
|
ec3920d60f | ||
|
|
4c5498b915 | ||
|
|
0e8064b049 | ||
|
|
4dbef49ec9 | ||
|
|
bc07dc9c81 | ||
|
|
9b8166b313 | ||
|
|
e58d69f3de | ||
|
|
e76d720ffb | ||
|
|
998cc16a3c | ||
|
|
7c37da2fad | ||
|
|
b45c6b1d23 | ||
|
|
dae240d30c | ||
|
|
b52ea8e7f1 | ||
|
|
7d3003a16a | ||
|
|
d37df43a90 | ||
|
|
2c7bf52c17 | ||
|
|
de8df29ca4 | ||
|
|
c4fb72b9fc | ||
|
|
3371c01e0e | ||
|
|
c2a483f36a | ||
|
|
51cd13b8b5 | ||
|
|
a054b454d2 | ||
|
|
04e9814770 | ||
|
|
037d232fcd | ||
|
|
b2933762e7 | ||
|
|
df8aa888f9 | ||
|
|
7f5639c94a | ||
|
|
c0112f6f12 | ||
|
|
b7ceee2ddf | ||
|
|
0b01d70b55 | ||
|
|
33db0dffa8 | ||
|
|
7994a661d9 | ||
|
|
9937193332 | ||
|
|
baa00ff546 | ||
|
|
ffe820497f | ||
|
|
be549f3faa | ||
|
|
4301ab0606 | ||
|
|
5143500c9a | ||
|
|
3255323bff | ||
|
|
bb27586758 | ||
|
|
4f0f45b576 | ||
|
|
f94ce97ebc | ||
|
|
9ab128579d | ||
|
|
1cde74f05e | ||
|
|
a4de6c1eb6 | ||
|
|
e14b283f0c | ||
|
|
7c3c90c38e | ||
|
|
2ca21b2bec | ||
|
|
3986eaa4b2 | ||
|
|
1f2c6507f7 | ||
|
|
aefcf4281c | ||
|
|
9d1c46a3e9 | ||
|
|
328108aeb5 | ||
|
|
4cf54a6122 | ||
|
|
2a8f3653a6 | ||
|
|
19cb8a3022 | ||
|
|
f85e5bd9e8 | ||
|
|
7bdb4e5cd9 | ||
|
|
5d87e3781e | ||
|
|
3e08c6bd8d | ||
|
|
15b6db8e4e | ||
|
|
b2e6dfd9bb | ||
|
|
3c9565d351 | ||
|
|
67579c9af4 | ||
|
|
cf7f6f35ab | ||
|
|
7205537b49 | ||
|
|
1ed6e925ed | ||
|
|
4b78af9676 | ||
|
|
73500c0c96 | ||
|
|
b167de5aa3 | ||
|
|
473bb3d17d | ||
|
|
c7d6273037 | ||
|
|
94c781881f | ||
|
|
a8c81e5df6 | ||
|
|
1d3746ec9e | ||
|
|
b5489dc1e6 | ||
|
|
557b425fb1 | ||
|
|
aca9738821 | ||
|
|
0bc453a771 | ||
|
|
b67c33b6d0 | ||
|
|
a8a30ad43b | ||
|
|
d63be9b93a | ||
|
|
100778670c | ||
|
|
37e3349c24 | ||
|
|
7f57a89d75 | ||
|
|
0898461c01 | ||
|
|
52b1b43850 | ||
|
|
0907f1b77f | ||
|
|
73260690b0 | ||
|
|
5056cbe8ed | ||
|
|
571b02e178 | ||
|
|
159eb89bf0 | ||
|
|
389993bf3e | ||
|
|
ba5243b6dd | ||
|
|
128ef72911 | ||
|
|
811885f464 | ||
|
|
d5e329fec5 | ||
|
|
09e54e1175 | ||
|
|
23819c8549 | ||
|
|
7dfbd993f2 | ||
|
|
3601130ba1 | ||
|
|
0d504032b2 | ||
|
|
4a87b4807d | ||
|
|
cb5ff40a59 |
@@ -9,7 +9,7 @@ jobs:
|
|||||||
# Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub.
|
# Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub.
|
||||||
# See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor
|
# See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor
|
||||||
docker:
|
docker:
|
||||||
- image: ghcr.io/levkk/pgcat-ci:1.67
|
- image: ghcr.io/postgresml/pgcat-ci:latest
|
||||||
environment:
|
environment:
|
||||||
RUST_LOG: info
|
RUST_LOG: info
|
||||||
LLVM_PROFILE_FILE: /tmp/pgcat-%m-%p.profraw
|
LLVM_PROFILE_FILE: /tmp/pgcat-%m-%p.profraw
|
||||||
@@ -63,6 +63,9 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
name: "Lint"
|
name: "Lint"
|
||||||
command: "cargo fmt --check"
|
command: "cargo fmt --check"
|
||||||
|
- run:
|
||||||
|
name: "Clippy"
|
||||||
|
command: "cargo clippy --all --all-targets -- -Dwarnings"
|
||||||
- run:
|
- run:
|
||||||
name: "Tests"
|
name: "Tests"
|
||||||
command: "cargo clean && cargo build && cargo test && bash .circleci/run_tests.sh && .circleci/generate_coverage.sh"
|
command: "cargo clean && cargo build && cargo test && bash .circleci/run_tests.sh && .circleci/generate_coverage.sh"
|
||||||
|
|||||||
@@ -59,6 +59,7 @@ admin_password = "admin_pass"
|
|||||||
# session: one server connection per connected client
|
# session: one server connection per connected client
|
||||||
# transaction: one server connection per client transaction
|
# transaction: one server connection per client transaction
|
||||||
pool_mode = "transaction"
|
pool_mode = "transaction"
|
||||||
|
prepared_statements_cache_size = 500
|
||||||
|
|
||||||
# If the client doesn't specify, route traffic to
|
# If the client doesn't specify, route traffic to
|
||||||
# this role by default.
|
# this role by default.
|
||||||
@@ -74,6 +75,10 @@ default_role = "any"
|
|||||||
# we'll direct it to the primary.
|
# we'll direct it to the primary.
|
||||||
query_parser_enabled = true
|
query_parser_enabled = true
|
||||||
|
|
||||||
|
# If the query parser is enabled and this setting is enabled, we'll attempt to
|
||||||
|
# infer the role from the query itself.
|
||||||
|
query_parser_read_write_splitting = true
|
||||||
|
|
||||||
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||||
# load balancing of read queries. Otherwise, the primary will only be used for write
|
# load balancing of read queries. Otherwise, the primary will only be used for write
|
||||||
# queries. The primary can always be explicitely selected with our custom protocol.
|
# queries. The primary can always be explicitely selected with our custom protocol.
|
||||||
@@ -134,8 +139,10 @@ database = "shard2"
|
|||||||
pool_mode = "session"
|
pool_mode = "session"
|
||||||
default_role = "primary"
|
default_role = "primary"
|
||||||
query_parser_enabled = true
|
query_parser_enabled = true
|
||||||
|
query_parser_read_write_splitting = true
|
||||||
primary_reads_enabled = true
|
primary_reads_enabled = true
|
||||||
sharding_function = "pg_bigint_hash"
|
sharding_function = "pg_bigint_hash"
|
||||||
|
prepared_statements_cache_size = 500
|
||||||
|
|
||||||
[pools.simple_db.users.0]
|
[pools.simple_db.users.0]
|
||||||
username = "simple_user"
|
username = "simple_user"
|
||||||
|
|||||||
@@ -26,6 +26,7 @@ PGPASSWORD=sharding_user pgbench -h 127.0.0.1 -U sharding_user shard1 -i
|
|||||||
PGPASSWORD=sharding_user pgbench -h 127.0.0.1 -U sharding_user shard2 -i
|
PGPASSWORD=sharding_user pgbench -h 127.0.0.1 -U sharding_user shard2 -i
|
||||||
|
|
||||||
# Start Toxiproxy
|
# Start Toxiproxy
|
||||||
|
kill -9 $(pgrep toxiproxy) || true
|
||||||
LOG_LEVEL=error toxiproxy-server &
|
LOG_LEVEL=error toxiproxy-server &
|
||||||
sleep 1
|
sleep 1
|
||||||
|
|
||||||
@@ -106,13 +107,25 @@ cd ../..
|
|||||||
# These tests will start and stop the pgcat server so it will need to be restarted after the tests
|
# These tests will start and stop the pgcat server so it will need to be restarted after the tests
|
||||||
#
|
#
|
||||||
pip3 install -r tests/python/requirements.txt
|
pip3 install -r tests/python/requirements.txt
|
||||||
python3 tests/python/tests.py || exit 1
|
pytest || exit 1
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Go tests
|
||||||
|
# Starts its own pgcat server
|
||||||
|
#
|
||||||
|
pushd tests/go
|
||||||
|
/usr/local/go/bin/go test || exit 1
|
||||||
|
popd
|
||||||
|
|
||||||
start_pgcat "info"
|
start_pgcat "info"
|
||||||
|
|
||||||
python3 tests/python/async_test.py
|
#
|
||||||
|
# Rust tests
|
||||||
start_pgcat "info"
|
#
|
||||||
|
cd tests/rust
|
||||||
|
cargo run
|
||||||
|
cd ../../
|
||||||
|
|
||||||
# Admin tests
|
# Admin tests
|
||||||
export PGPASSWORD=admin_pass
|
export PGPASSWORD=admin_pass
|
||||||
@@ -165,3 +178,6 @@ killall pgcat -s SIGINT
|
|||||||
|
|
||||||
# Allow for graceful shutdown
|
# Allow for graceful shutdown
|
||||||
sleep 1
|
sleep 1
|
||||||
|
|
||||||
|
kill -9 $(pgrep toxiproxy)
|
||||||
|
sleep 1
|
||||||
|
|||||||
4
.github/dependabot.yml
vendored
4
.github/dependabot.yml
vendored
@@ -10,3 +10,7 @@ updates:
|
|||||||
commit-message:
|
commit-message:
|
||||||
prefix: "chore(deps)"
|
prefix: "chore(deps)"
|
||||||
open-pull-requests-limit: 10
|
open-pull-requests-limit: 10
|
||||||
|
- package-ecosystem: "github-actions"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "weekly"
|
||||||
|
|||||||
26
.github/workflows/build-and-push.yaml
vendored
26
.github/workflows/build-and-push.yaml
vendored
@@ -1,6 +1,13 @@
|
|||||||
name: Build and Push
|
name: Build and Push
|
||||||
|
|
||||||
on: push
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- '!charts/**.md'
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
tags:
|
||||||
|
- v*
|
||||||
|
|
||||||
env:
|
env:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
@@ -16,33 +23,40 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout Repository
|
- name: Checkout Repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
- name: Determine tags
|
- name: Determine tags
|
||||||
id: metadata
|
id: metadata
|
||||||
uses: docker/metadata-action@v4
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
images: ${{ env.registry }}/${{ env.image-name }}
|
images: ${{ env.registry }}/${{ env.image-name }}
|
||||||
tags: |
|
tags: |
|
||||||
type=sha,prefix=,format=long
|
type=sha,prefix=,format=long
|
||||||
type=schedule
|
type=schedule
|
||||||
|
type=ref,event=tag
|
||||||
type=ref,event=branch
|
type=ref,event=branch
|
||||||
type=ref,event=pr
|
type=ref,event=pr
|
||||||
type=raw,value=latest,enable={{ is_default_branch }}
|
type=raw,value=latest,enable={{ is_default_branch }}
|
||||||
|
|
||||||
- name: Log in to the Container registry
|
- name: Log in to the Container registry
|
||||||
uses: docker/login-action@v2.1.0
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ${{ env.registry }}
|
registry: ${{ env.registry }}
|
||||||
username: ${{ github.actor }}
|
username: ${{ github.actor }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build and push ${{ env.image-name }}
|
- name: Build and push ${{ env.image-name }}
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
|
context: .
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
provenance: false
|
||||||
push: true
|
push: true
|
||||||
tags: ${{ steps.metadata.outputs.tags }}
|
tags: ${{ steps.metadata.outputs.tags }}
|
||||||
labels: ${{ steps.metadata.outputs.labels }}
|
labels: ${{ steps.metadata.outputs.labels }}
|
||||||
|
|||||||
50
.github/workflows/chart-lint-test.yaml
vendored
Normal file
50
.github/workflows/chart-lint-test.yaml
vendored
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
name: Lint and Test Charts
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- charts/**
|
||||||
|
- '!charts/**.md'
|
||||||
|
jobs:
|
||||||
|
lint-test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3.1.0
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Set up Helm
|
||||||
|
uses: azure/setup-helm@v3
|
||||||
|
with:
|
||||||
|
version: v3.8.1
|
||||||
|
|
||||||
|
# Python is required because `ct lint` runs Yamale (https://github.com/23andMe/Yamale) and
|
||||||
|
# yamllint (https://github.com/adrienverge/yamllint) which require Python
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5.1.0
|
||||||
|
with:
|
||||||
|
python-version: 3.7
|
||||||
|
|
||||||
|
- name: Set up chart-testing
|
||||||
|
uses: helm/chart-testing-action@v2.2.1
|
||||||
|
with:
|
||||||
|
version: v3.5.1
|
||||||
|
|
||||||
|
- name: Run chart-testing (list-changed)
|
||||||
|
id: list-changed
|
||||||
|
run: |
|
||||||
|
changed=$(ct list-changed --config ct.yaml)
|
||||||
|
if [[ -n "$changed" ]]; then
|
||||||
|
echo "changed=true" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Run chart-testing (lint)
|
||||||
|
run: ct lint --config ct.yaml
|
||||||
|
|
||||||
|
- name: Create kind cluster
|
||||||
|
uses: helm/kind-action@v1.10.0
|
||||||
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
|
||||||
|
- name: Run chart-testing (install)
|
||||||
|
run: ct install --config ct.yaml
|
||||||
40
.github/workflows/chart-release.yaml
vendored
Normal file
40
.github/workflows/chart-release.yaml
vendored
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
name: Release Charts
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- charts/**
|
||||||
|
- '!**.md'
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
release:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Configure Git
|
||||||
|
run: |
|
||||||
|
git config user.name "$GITHUB_ACTOR"
|
||||||
|
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||||
|
|
||||||
|
- name: Install Helm
|
||||||
|
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 # v3.5
|
||||||
|
with:
|
||||||
|
version: v3.13.0
|
||||||
|
|
||||||
|
- name: Run chart-releaser
|
||||||
|
uses: helm/chart-releaser-action@a917fd15b20e8b64b94d9158ad54cd6345335584 # v1.6.0
|
||||||
|
with:
|
||||||
|
charts_dir: charts
|
||||||
|
config: cr.yaml
|
||||||
|
env:
|
||||||
|
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
48
.github/workflows/generate-chart-readme.yaml
vendored
Normal file
48
.github/workflows/generate-chart-readme.yaml
vendored
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
name: '[CI/CD] Update README metadata'
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
paths:
|
||||||
|
- 'charts/*/values.yaml'
|
||||||
|
# Remove all permissions by default
|
||||||
|
permissions: {}
|
||||||
|
jobs:
|
||||||
|
update-readme-metadata:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
steps:
|
||||||
|
- name: Install readme-generator-for-helm
|
||||||
|
run: npm install -g @bitnami/readme-generator-for-helm
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||||
|
with:
|
||||||
|
path: charts
|
||||||
|
ref: ${{github.event.pull_request.head.ref}}
|
||||||
|
repository: ${{github.event.pull_request.head.repo.full_name}}
|
||||||
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- name: Execute readme-generator-for-helm
|
||||||
|
env:
|
||||||
|
DIFF_URL: "${{github.event.pull_request.diff_url}}"
|
||||||
|
TEMP_FILE: "${{runner.temp}}/pr-${{github.event.number}}.diff"
|
||||||
|
run: |
|
||||||
|
# This request doesn't consume API calls.
|
||||||
|
curl -Lkso $TEMP_FILE $DIFF_URL
|
||||||
|
files_changed="$(sed -nr 's/[\-\+]{3} [ab]\/(.*)/\1/p' $TEMP_FILE | sort | uniq)"
|
||||||
|
# Adding || true to avoid "Process exited with code 1" errors
|
||||||
|
charts_dirs_changed="$(echo "$files_changed" | xargs dirname | grep -o "pgcat/[^/]*" | sort | uniq || true)"
|
||||||
|
for chart in ${charts_dirs_changed}; do
|
||||||
|
echo "Updating README.md for ${chart}"
|
||||||
|
readme-generator --values "charts/${chart}/values.yaml" --readme "charts/${chart}/README.md" --schema "/tmp/schema.json"
|
||||||
|
done
|
||||||
|
- name: Push changes
|
||||||
|
run: |
|
||||||
|
# Push all the changes
|
||||||
|
cd charts
|
||||||
|
if git status -s | grep pgcat; then
|
||||||
|
git config user.name "$GITHUB_ACTOR"
|
||||||
|
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||||
|
git add . && git commit -am "Update README.md with readme-generator-for-helm" --signoff && git push
|
||||||
|
fi
|
||||||
59
.github/workflows/publish-deb-package.yml
vendored
Normal file
59
.github/workflows/publish-deb-package.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
name: pgcat package (deb)
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- v*
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
packageVersion:
|
||||||
|
default: "1.1.2-dev1"
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
max-parallel: 1
|
||||||
|
fail-fast: false # Let the other job finish, or they can lock each other out
|
||||||
|
matrix:
|
||||||
|
os: ["buildjet-4vcpu-ubuntu-2204", "buildjet-4vcpu-ubuntu-2204-arm"]
|
||||||
|
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- name: Set package version
|
||||||
|
if: github.event_name == 'push' # For push event
|
||||||
|
run: |
|
||||||
|
TAG=${{ github.ref_name }}
|
||||||
|
echo "packageVersion=${TAG#v}" >> "$GITHUB_ENV"
|
||||||
|
- name: Set package version (manual dispatch)
|
||||||
|
if: github.event_name == 'workflow_dispatch' # For manual dispatch
|
||||||
|
run: echo "packageVersion=${{ github.event.inputs.packageVersion }}" >> "$GITHUB_ENV"
|
||||||
|
- uses: actions-rs/toolchain@v1
|
||||||
|
with:
|
||||||
|
toolchain: stable
|
||||||
|
- name: Install dependencies
|
||||||
|
env:
|
||||||
|
DEBIAN_FRONTEND: noninteractive
|
||||||
|
TZ: Etc/UTC
|
||||||
|
run: |
|
||||||
|
curl -sLO https://github.com/deb-s3/deb-s3/releases/download/0.11.4/deb-s3-0.11.4.gem
|
||||||
|
sudo gem install deb-s3-0.11.4.gem
|
||||||
|
dpkg-deb --version
|
||||||
|
- name: Build and release package
|
||||||
|
env:
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ vars.AWS_ACCESS_KEY_ID }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||||
|
AWS_DEFAULT_REGION: ${{ vars.AWS_DEFAULT_REGION }}
|
||||||
|
run: |
|
||||||
|
if [[ $(arch) == "x86_64" ]]; then
|
||||||
|
export ARCH=amd64
|
||||||
|
else
|
||||||
|
export ARCH=arm64
|
||||||
|
fi
|
||||||
|
|
||||||
|
bash utilities/deb.sh ${{ env.packageVersion }}
|
||||||
|
|
||||||
|
deb-s3 upload \
|
||||||
|
--lock \
|
||||||
|
--bucket apt.postgresml.org \
|
||||||
|
pgcat-${{ env.packageVersion }}-ubuntu22.04-${ARCH}.deb \
|
||||||
|
--codename $(lsb_release -cs)
|
||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -10,3 +10,6 @@ lcov.info
|
|||||||
dev/.bash_history
|
dev/.bash_history
|
||||||
dev/cache
|
dev/cache
|
||||||
!dev/cache/.keepme
|
!dev/cache/.keepme
|
||||||
|
.venv
|
||||||
|
**/__pycache__
|
||||||
|
.bundle
|
||||||
140
CONFIG.md
140
CONFIG.md
@@ -1,4 +1,4 @@
|
|||||||
# PgCat Configurations
|
# PgCat Configurations
|
||||||
## `general` Section
|
## `general` Section
|
||||||
|
|
||||||
### host
|
### host
|
||||||
@@ -36,10 +36,11 @@ Port at which prometheus exporter listens on.
|
|||||||
### connect_timeout
|
### connect_timeout
|
||||||
```
|
```
|
||||||
path: general.connect_timeout
|
path: general.connect_timeout
|
||||||
default: 5000 # milliseconds
|
default: 1000 # milliseconds
|
||||||
```
|
```
|
||||||
|
|
||||||
How long to wait before aborting a server connection (ms).
|
How long the client waits to obtain a server connection before aborting (ms).
|
||||||
|
This is similar to PgBouncer's `query_wait_timeout`.
|
||||||
|
|
||||||
### idle_timeout
|
### idle_timeout
|
||||||
```
|
```
|
||||||
@@ -49,6 +50,46 @@ default: 30000 # milliseconds
|
|||||||
|
|
||||||
How long an idle connection with a server is left open (ms).
|
How long an idle connection with a server is left open (ms).
|
||||||
|
|
||||||
|
### server_lifetime
|
||||||
|
```
|
||||||
|
path: general.server_lifetime
|
||||||
|
default: 86400000 # 24 hours
|
||||||
|
```
|
||||||
|
|
||||||
|
Max connection lifetime before it's closed, even if actively used.
|
||||||
|
|
||||||
|
### server_round_robin
|
||||||
|
```
|
||||||
|
path: general.server_round_robin
|
||||||
|
default: false
|
||||||
|
```
|
||||||
|
|
||||||
|
Whether to use round robin for server selection or not.
|
||||||
|
|
||||||
|
### server_tls
|
||||||
|
```
|
||||||
|
path: general.server_tls
|
||||||
|
default: false
|
||||||
|
```
|
||||||
|
|
||||||
|
Whether to use TLS for server connections or not.
|
||||||
|
|
||||||
|
### verify_server_certificate
|
||||||
|
```
|
||||||
|
path: general.verify_server_certificate
|
||||||
|
default: false
|
||||||
|
```
|
||||||
|
|
||||||
|
Whether to verify server certificate or not.
|
||||||
|
|
||||||
|
### verify_config
|
||||||
|
```
|
||||||
|
path: general.verify_config
|
||||||
|
default: true
|
||||||
|
```
|
||||||
|
|
||||||
|
Whether to verify config or not.
|
||||||
|
|
||||||
### idle_client_in_transaction_timeout
|
### idle_client_in_transaction_timeout
|
||||||
```
|
```
|
||||||
path: general.idle_client_in_transaction_timeout
|
path: general.idle_client_in_transaction_timeout
|
||||||
@@ -108,10 +149,10 @@ If we should log client disconnections
|
|||||||
### autoreload
|
### autoreload
|
||||||
```
|
```
|
||||||
path: general.autoreload
|
path: general.autoreload
|
||||||
default: 15000
|
default: 15000 # milliseconds
|
||||||
```
|
```
|
||||||
|
|
||||||
When set to true, PgCat reloads configs if it detects a change in the config file.
|
When set, PgCat automatically reloads its configurations at the specified interval (in milliseconds) if it detects changes in the configuration file. The default interval is 15000 milliseconds or 15 seconds.
|
||||||
|
|
||||||
### worker_threads
|
### worker_threads
|
||||||
```
|
```
|
||||||
@@ -143,7 +184,13 @@ path: general.tcp_keepalives_interval
|
|||||||
default: 5
|
default: 5
|
||||||
```
|
```
|
||||||
|
|
||||||
Number of seconds between keepalive packets.
|
### tcp_user_timeout
|
||||||
|
```
|
||||||
|
path: general.tcp_user_timeout
|
||||||
|
default: 10000
|
||||||
|
```
|
||||||
|
A linux-only parameters that defines the amount of time in milliseconds that transmitted data may remain unacknowledged or buffered data may remain untransmitted (due to zero window size) before TCP will forcibly disconnect
|
||||||
|
|
||||||
|
|
||||||
### tls_certificate
|
### tls_certificate
|
||||||
```
|
```
|
||||||
@@ -180,6 +227,55 @@ default: "admin_pass"
|
|||||||
|
|
||||||
Password to access the virtual administrative database
|
Password to access the virtual administrative database
|
||||||
|
|
||||||
|
### auth_query
|
||||||
|
```
|
||||||
|
path: general.auth_query
|
||||||
|
default: <UNSET>
|
||||||
|
example: "SELECT $1"
|
||||||
|
```
|
||||||
|
|
||||||
|
Query to be sent to servers to obtain the hash used for md5 authentication. The connection will be
|
||||||
|
established using the database configured in the pool. This parameter is inherited by every pool
|
||||||
|
and can be redefined in pool configuration.
|
||||||
|
|
||||||
|
### auth_query_user
|
||||||
|
```
|
||||||
|
path: general.auth_query_user
|
||||||
|
default: <UNSET>
|
||||||
|
example: "sharding_user"
|
||||||
|
```
|
||||||
|
|
||||||
|
User to be used for connecting to servers to obtain the hash used for md5 authentication by sending the query
|
||||||
|
specified in `auth_query_user`. The connection will be established using the database configured in the pool.
|
||||||
|
This parameter is inherited by every pool and can be redefined in pool configuration.
|
||||||
|
|
||||||
|
### auth_query_password
|
||||||
|
```
|
||||||
|
path: general.auth_query_password
|
||||||
|
default: <UNSET>
|
||||||
|
example: "sharding_user"
|
||||||
|
```
|
||||||
|
|
||||||
|
Password to be used for connecting to servers to obtain the hash used for md5 authentication by sending the query
|
||||||
|
specified in `auth_query_user`. The connection will be established using the database configured in the pool.
|
||||||
|
This parameter is inherited by every pool and can be redefined in pool configuration.
|
||||||
|
|
||||||
|
### dns_cache_enabled
|
||||||
|
```
|
||||||
|
path: general.dns_cache_enabled
|
||||||
|
default: false
|
||||||
|
```
|
||||||
|
When enabled, ip resolutions for server connections specified using hostnames will be cached
|
||||||
|
and checked for changes every `dns_max_ttl` seconds. If a change in the host resolution is found
|
||||||
|
old ip connections are closed (gracefully) and new connections will start using new ip.
|
||||||
|
|
||||||
|
### dns_max_ttl
|
||||||
|
```
|
||||||
|
path: general.dns_max_ttl
|
||||||
|
default: 30
|
||||||
|
```
|
||||||
|
Specifies how often (in seconds) cached ip addresses for servers are rechecked (see `dns_cache_enabled`).
|
||||||
|
|
||||||
## `pools.<pool_name>` Section
|
## `pools.<pool_name>` Section
|
||||||
|
|
||||||
### pool_mode
|
### pool_mode
|
||||||
@@ -200,7 +296,7 @@ default: "random"
|
|||||||
|
|
||||||
Load balancing mode
|
Load balancing mode
|
||||||
`random` selects the server at random
|
`random` selects the server at random
|
||||||
`loc` selects the server with the least outstanding busy conncetions
|
`loc` selects the server with the least outstanding busy connections
|
||||||
|
|
||||||
### default_role
|
### default_role
|
||||||
```
|
```
|
||||||
@@ -213,6 +309,15 @@ If the client doesn't specify, PgCat routes traffic to this role by default.
|
|||||||
`replica` round-robin between replicas only without touching the primary,
|
`replica` round-robin between replicas only without touching the primary,
|
||||||
`primary` all queries go to the primary unless otherwise specified.
|
`primary` all queries go to the primary unless otherwise specified.
|
||||||
|
|
||||||
|
### prepared_statements_cache_size
|
||||||
|
```
|
||||||
|
path: general.prepared_statements_cache_size
|
||||||
|
default: 0
|
||||||
|
```
|
||||||
|
|
||||||
|
Size of the prepared statements cache. 0 means disabled.
|
||||||
|
TODO: update documentation
|
||||||
|
|
||||||
### query_parser_enabled
|
### query_parser_enabled
|
||||||
```
|
```
|
||||||
path: pools.<pool_name>.query_parser_enabled
|
path: pools.<pool_name>.query_parser_enabled
|
||||||
@@ -358,10 +463,18 @@ path: pools.<pool_name>.users.<user_index>.pool_size
|
|||||||
default: 9
|
default: 9
|
||||||
```
|
```
|
||||||
|
|
||||||
Maximum number of server connections that can be established for this user
|
Maximum number of server connections that can be established for this user.
|
||||||
The maximum number of connection from a single Pgcat process to any database in the cluster
|
The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||||
is the sum of pool_size across all users.
|
is the sum of pool_size across all users.
|
||||||
|
|
||||||
|
### min_pool_size
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.users.<user_index>.min_pool_size
|
||||||
|
default: 0
|
||||||
|
```
|
||||||
|
|
||||||
|
Minimum number of idle server connections to retain for this pool.
|
||||||
|
|
||||||
### statement_timeout
|
### statement_timeout
|
||||||
```
|
```
|
||||||
path: pools.<pool_name>.users.<user_index>.statement_timeout
|
path: pools.<pool_name>.users.<user_index>.statement_timeout
|
||||||
@@ -371,6 +484,16 @@ default: 0
|
|||||||
Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
|
Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
|
||||||
0 means it is disabled.
|
0 means it is disabled.
|
||||||
|
|
||||||
|
### connect_timeout
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.users.<user_index>.connect_timeout
|
||||||
|
default: <UNSET> # milliseconds
|
||||||
|
```
|
||||||
|
|
||||||
|
How long the client waits to obtain a server connection before aborting (ms).
|
||||||
|
This is similar to PgBouncer's `query_wait_timeout`.
|
||||||
|
If unset, uses the `connect_timeout` defined globally.
|
||||||
|
|
||||||
## `pools.<pool_name>.shards.<shard_index>` Section
|
## `pools.<pool_name>.shards.<shard_index>` Section
|
||||||
|
|
||||||
### servers
|
### servers
|
||||||
@@ -398,4 +521,3 @@ default: "shard0"
|
|||||||
```
|
```
|
||||||
|
|
||||||
Database name (e.g. "postgres")
|
Database name (e.g. "postgres")
|
||||||
|
|
||||||
|
|||||||
@@ -2,10 +2,36 @@
|
|||||||
|
|
||||||
Thank you for contributing! Just a few tips here:
|
Thank you for contributing! Just a few tips here:
|
||||||
|
|
||||||
1. `cargo fmt` your code before opening up a PR
|
1. `cargo fmt` and `cargo clippy` your code before opening up a PR
|
||||||
2. Run the test suite (e.g. `pgbench`) to make sure everything still works. The tests are in `.circleci/run_tests.sh`.
|
2. Run the test suite (e.g. `pgbench`) to make sure everything still works. The tests are in `.circleci/run_tests.sh`.
|
||||||
3. Performance is important, make sure there are no regressions in your branch vs. `main`.
|
3. Performance is important, make sure there are no regressions in your branch vs. `main`.
|
||||||
|
|
||||||
|
## How to run the integration tests locally and iterate on them
|
||||||
|
We have integration tests written in Ruby, Python, Go and Rust.
|
||||||
|
Below are the steps to run them in a developer-friendly way that allows iterating and quick turnaround.
|
||||||
|
Hear me out, this should be easy, it will involve opening a shell into a container with all the necessary dependancies available for you and you can modify the test code and immediately rerun your test in the interactive shell.
|
||||||
|
|
||||||
|
|
||||||
|
Quite simply, make sure you have docker installed and then run
|
||||||
|
`./start_test_env.sh`
|
||||||
|
|
||||||
|
That is it!
|
||||||
|
|
||||||
|
Within this test environment you can modify the file in your favorite IDE and rerun the tests without having to bootstrap the entire environment again.
|
||||||
|
|
||||||
|
Once the environment is ready, you can run the tests by running
|
||||||
|
Ruby: `cd /app/tests/ruby && bundle exec ruby <test_name>.rb --format documentation`
|
||||||
|
Python: `cd /app/ && pytest`
|
||||||
|
Rust: `cd /app/tests/rust && cargo run`
|
||||||
|
Go: `cd /app/tests/go && /usr/local/go/bin/go test`
|
||||||
|
|
||||||
|
You can also rebuild PgCat directly within the environment and the tests will run against the newly built binary
|
||||||
|
To rebuild PgCat, just run `cargo build` within the container under `/app`
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Happy hacking!
|
Happy hacking!
|
||||||
|
|
||||||
## TODOs
|
## TODOs
|
||||||
|
|||||||
1293
Cargo.lock
generated
1293
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
28
Cargo.toml
28
Cargo.toml
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "pgcat"
|
name = "pgcat"
|
||||||
version = "1.0.1"
|
version = "1.2.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
@@ -8,21 +8,20 @@ edition = "2021"
|
|||||||
tokio = { version = "1", features = ["full"] }
|
tokio = { version = "1", features = ["full"] }
|
||||||
bytes = "1"
|
bytes = "1"
|
||||||
md-5 = "0.10"
|
md-5 = "0.10"
|
||||||
bb8 = "0.8.0"
|
bb8 = "=0.8.6"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
sha-1 = "0.10"
|
sha-1 = "0.10"
|
||||||
toml = "0.7"
|
toml = "0.7"
|
||||||
serde = "1"
|
serde = { version = "1", features = ["derive"] }
|
||||||
serde_derive = "1"
|
serde_derive = "1"
|
||||||
regex = "1"
|
regex = "1"
|
||||||
num_cpus = "1"
|
num_cpus = "1"
|
||||||
once_cell = "1"
|
once_cell = "1"
|
||||||
sqlparser = "0.33.0"
|
sqlparser = { version = "0.52", features = ["visitor"] }
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
arc-swap = "1"
|
arc-swap = "1"
|
||||||
env_logger = "0.10"
|
|
||||||
parking_lot = "0.12.1"
|
parking_lot = "0.12.1"
|
||||||
hmac = "0.12"
|
hmac = "0.12"
|
||||||
sha2 = "0.10"
|
sha2 = "0.10"
|
||||||
@@ -30,7 +29,9 @@ base64 = "0.21"
|
|||||||
stringprep = "0.1"
|
stringprep = "0.1"
|
||||||
tokio-rustls = "0.24"
|
tokio-rustls = "0.24"
|
||||||
rustls-pemfile = "1"
|
rustls-pemfile = "1"
|
||||||
hyper = { version = "0.14", features = ["full"] }
|
http-body-util = "0.1.2"
|
||||||
|
hyper = { version = "1.4.1", features = ["full"] }
|
||||||
|
hyper-util = { version = "0.1.7", features = ["tokio"] }
|
||||||
phf = { version = "0.11.1", features = ["macros"] }
|
phf = { version = "0.11.1", features = ["macros"] }
|
||||||
exitcode = "1.1.2"
|
exitcode = "1.1.2"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
@@ -39,6 +40,21 @@ nix = "0.26.2"
|
|||||||
atomic_enum = "0.2.0"
|
atomic_enum = "0.2.0"
|
||||||
postgres-protocol = "0.6.5"
|
postgres-protocol = "0.6.5"
|
||||||
fallible-iterator = "0.2"
|
fallible-iterator = "0.2"
|
||||||
|
pin-project = "1"
|
||||||
|
webpki-roots = "0.23"
|
||||||
|
rustls = { version = "0.21", features = ["dangerous_configuration"] }
|
||||||
|
trust-dns-resolver = "0.22.0"
|
||||||
|
tokio-test = "0.4.2"
|
||||||
|
serde_json = "1"
|
||||||
|
itertools = "0.10"
|
||||||
|
clap = { version = "4.3.1", features = ["derive", "env"] }
|
||||||
|
tracing = "0.1.37"
|
||||||
|
tracing-subscriber = { version = "0.3.17", features = [
|
||||||
|
"json",
|
||||||
|
"env-filter",
|
||||||
|
"std",
|
||||||
|
] }
|
||||||
|
lru = "0.12.0"
|
||||||
|
|
||||||
[target.'cfg(not(target_env = "msvc"))'.dependencies]
|
[target.'cfg(not(target_env = "msvc"))'.dependencies]
|
||||||
jemallocator = "0.5.0"
|
jemallocator = "0.5.0"
|
||||||
|
|||||||
15
Dockerfile
15
Dockerfile
@@ -1,11 +1,22 @@
|
|||||||
FROM rust:1 AS builder
|
FROM rust:1.79.0-slim-bookworm AS builder
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y build-essential
|
||||||
|
|
||||||
COPY . /app
|
COPY . /app
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
RUN cargo build --release
|
RUN cargo build --release
|
||||||
|
|
||||||
FROM debian:bullseye-slim
|
FROM debian:bookworm-slim
|
||||||
|
RUN apt-get update && apt-get install -o Dpkg::Options::=--force-confdef -yq --no-install-recommends \
|
||||||
|
postgresql-client \
|
||||||
|
# Clean up layer
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \
|
||||||
|
&& truncate -s 0 /var/log/*log
|
||||||
COPY --from=builder /app/target/release/pgcat /usr/bin/pgcat
|
COPY --from=builder /app/target/release/pgcat /usr/bin/pgcat
|
||||||
COPY --from=builder /app/pgcat.toml /etc/pgcat/pgcat.toml
|
COPY --from=builder /app/pgcat.toml /etc/pgcat/pgcat.toml
|
||||||
WORKDIR /etc/pgcat
|
WORKDIR /etc/pgcat
|
||||||
ENV RUST_LOG=info
|
ENV RUST_LOG=info
|
||||||
CMD ["pgcat"]
|
CMD ["pgcat"]
|
||||||
|
STOPSIGNAL SIGINT
|
||||||
|
|||||||
@@ -1,4 +1,6 @@
|
|||||||
FROM cimg/rust:1.67.1
|
FROM cimg/rust:1.79.0
|
||||||
|
COPY --from=sclevine/yj /bin/yj /bin/yj
|
||||||
|
RUN /bin/yj -h
|
||||||
RUN sudo apt-get update && \
|
RUN sudo apt-get update && \
|
||||||
sudo apt-get install -y \
|
sudo apt-get install -y \
|
||||||
psmisc postgresql-contrib-14 postgresql-client-14 libpq-dev \
|
psmisc postgresql-contrib-14 postgresql-client-14 libpq-dev \
|
||||||
@@ -7,6 +9,9 @@ RUN sudo apt-get update && \
|
|||||||
sudo apt-get upgrade curl && \
|
sudo apt-get upgrade curl && \
|
||||||
cargo install cargo-binutils rustfilt && \
|
cargo install cargo-binutils rustfilt && \
|
||||||
rustup component add llvm-tools-preview && \
|
rustup component add llvm-tools-preview && \
|
||||||
pip3 install psycopg2 && sudo gem install bundler && \
|
pip3 install psycopg2 && sudo gem install bundler && \
|
||||||
wget -O /tmp/toxiproxy-2.4.0.deb https://github.com/Shopify/toxiproxy/releases/download/v2.4.0/toxiproxy_2.4.0_linux_$(dpkg --print-architecture).deb && \
|
wget -O /tmp/toxiproxy-2.4.0.deb https://github.com/Shopify/toxiproxy/releases/download/v2.4.0/toxiproxy_2.4.0_linux_$(dpkg --print-architecture).deb && \
|
||||||
sudo dpkg -i /tmp/toxiproxy-2.4.0.deb
|
sudo dpkg -i /tmp/toxiproxy-2.4.0.deb
|
||||||
|
RUN wget -O /tmp/go1.21.3.linux-$(dpkg --print-architecture).tar.gz https://go.dev/dl/go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \
|
||||||
|
sudo tar -C /usr/local -xzf /tmp/go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \
|
||||||
|
rm /tmp/go1.21.3.linux-$(dpkg --print-architecture).tar.gz
|
||||||
|
|||||||
25
Dockerfile.dev
Normal file
25
Dockerfile.dev
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
FROM lukemathwalker/cargo-chef:latest-rust-1 AS chef
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y build-essential
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
FROM chef AS planner
|
||||||
|
COPY . .
|
||||||
|
RUN cargo chef prepare --recipe-path recipe.json
|
||||||
|
|
||||||
|
FROM chef AS builder
|
||||||
|
COPY --from=planner /app/recipe.json recipe.json
|
||||||
|
# Build dependencies - this is the caching Docker layer!
|
||||||
|
RUN cargo chef cook --release --recipe-path recipe.json
|
||||||
|
# Build application
|
||||||
|
COPY . .
|
||||||
|
RUN cargo build
|
||||||
|
|
||||||
|
FROM debian:bookworm-slim
|
||||||
|
COPY --from=builder /app/target/release/pgcat /usr/bin/pgcat
|
||||||
|
COPY --from=builder /app/pgcat.toml /etc/pgcat/pgcat.toml
|
||||||
|
WORKDIR /etc/pgcat
|
||||||
|
ENV RUST_LOG=info
|
||||||
|
CMD ["pgcat"]
|
||||||
@@ -18,7 +18,7 @@ PostgreSQL pooler and proxy (like PgBouncer) with support for sharding, load bal
|
|||||||
| Failover | **Stable** | Queries are automatically rerouted around broken replicas, validated by regular health checks. |
|
| Failover | **Stable** | Queries are automatically rerouted around broken replicas, validated by regular health checks. |
|
||||||
| Admin database statistics | **Stable** | Pooler statistics and administration via the `pgbouncer` and `pgcat` databases. |
|
| Admin database statistics | **Stable** | Pooler statistics and administration via the `pgbouncer` and `pgcat` databases. |
|
||||||
| Prometheus statistics | **Stable** | Statistics are reported via a HTTP endpoint for Prometheus. |
|
| Prometheus statistics | **Stable** | Statistics are reported via a HTTP endpoint for Prometheus. |
|
||||||
| Client TLS | **Stable** | Clients can connect to the pooler using TLS/SSL. |
|
| SSL/TLS | **Stable** | Clients can connect to the pooler using TLS. Pooler can connect to Postgres servers using TLS. |
|
||||||
| Client/Server authentication | **Stable** | Clients can connect using MD5 authentication, supported by `libpq` and all Postgres client drivers. PgCat can connect to Postgres using MD5 and SCRAM-SHA-256. |
|
| Client/Server authentication | **Stable** | Clients can connect using MD5 authentication, supported by `libpq` and all Postgres client drivers. PgCat can connect to Postgres using MD5 and SCRAM-SHA-256. |
|
||||||
| Live configuration reloading | **Stable** | Identical to PgBouncer; all settings can be reloaded dynamically (except `host` and `port`). |
|
| Live configuration reloading | **Stable** | Identical to PgBouncer; all settings can be reloaded dynamically (except `host` and `port`). |
|
||||||
| Auth passthrough | **Stable** | MD5 password authentication can be configured to use an `auth_query` so no cleartext passwords are needed in the config file.|
|
| Auth passthrough | **Stable** | MD5 password authentication can be configured to use an `auth_query` so no cleartext passwords are needed in the config file.|
|
||||||
@@ -40,7 +40,7 @@ PgCat is stable and used in production to serve hundreds of thousands of queries
|
|||||||
</a>
|
</a>
|
||||||
</td>
|
</td>
|
||||||
<td>
|
<td>
|
||||||
<a href="https://postgresml.org/blog/scaling-postgresml-to-one-million-requests-per-second">
|
<a href="https://postgresml.org/blog/scaling-postgresml-to-1-million-requests-per-second">
|
||||||
<img src="./images/postgresml.webp" height="70" width="auto">
|
<img src="./images/postgresml.webp" height="70" width="auto">
|
||||||
</a>
|
</a>
|
||||||
</td>
|
</td>
|
||||||
@@ -57,7 +57,7 @@ PgCat is stable and used in production to serve hundreds of thousands of queries
|
|||||||
</a>
|
</a>
|
||||||
</td>
|
</td>
|
||||||
<td>
|
<td>
|
||||||
<a href="https://postgresml.org/blog/scaling-postgresml-to-one-million-requests-per-second">
|
<a href="https://postgresml.org/blog/scaling-postgresml-to-1-million-requests-per-second">
|
||||||
PostgresML
|
PostgresML
|
||||||
</a>
|
</a>
|
||||||
</td>
|
</td>
|
||||||
@@ -268,6 +268,8 @@ psql -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW DATABASES'
|
|||||||
|
|
||||||
Additionally, Prometheus statistics are available at `/metrics` via HTTP.
|
Additionally, Prometheus statistics are available at `/metrics` via HTTP.
|
||||||
|
|
||||||
|
We also have a [basic Grafana dashboard](https://github.com/postgresml/pgcat/blob/main/grafana_dashboard.json) based on Prometheus metrics that you can import into Grafana and build on it or use it for monitoring.
|
||||||
|
|
||||||
### Live configuration reloading
|
### Live configuration reloading
|
||||||
|
|
||||||
The config can be reloaded by sending a `kill -s SIGHUP` to the process or by querying `RELOAD` to the admin database. All settings except the `host` and `port` can be reloaded without restarting the pooler, including sharding and replicas configurations.
|
The config can be reloaded by sending a `kill -s SIGHUP` to the process or by querying `RELOAD` to the admin database. All settings except the `host` and `port` can be reloaded without restarting the pooler, including sharding and replicas configurations.
|
||||||
|
|||||||
23
charts/pgcat/.helmignore
Normal file
23
charts/pgcat/.helmignore
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# Patterns to ignore when building packages.
|
||||||
|
# This supports shell glob matching, relative path matching, and
|
||||||
|
# negation (prefixed with !). Only one pattern per line.
|
||||||
|
.DS_Store
|
||||||
|
# Common VCS dirs
|
||||||
|
.git/
|
||||||
|
.gitignore
|
||||||
|
.bzr/
|
||||||
|
.bzrignore
|
||||||
|
.hg/
|
||||||
|
.hgignore
|
||||||
|
.svn/
|
||||||
|
# Common backup files
|
||||||
|
*.swp
|
||||||
|
*.bak
|
||||||
|
*.tmp
|
||||||
|
*.orig
|
||||||
|
*~
|
||||||
|
# Various IDEs
|
||||||
|
.project
|
||||||
|
.idea/
|
||||||
|
*.tmproj
|
||||||
|
.vscode/
|
||||||
8
charts/pgcat/Chart.yaml
Normal file
8
charts/pgcat/Chart.yaml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
apiVersion: v2
|
||||||
|
name: pgcat
|
||||||
|
description: A Helm chart for PgCat a PostgreSQL pooler and proxy (like PgBouncer) with support for sharding, load balancing, failover and mirroring.
|
||||||
|
maintainers:
|
||||||
|
- name: PostgresML
|
||||||
|
email: team@postgresml.org
|
||||||
|
appVersion: "1.2.0"
|
||||||
|
version: 0.2.5
|
||||||
22
charts/pgcat/templates/NOTES.txt
Normal file
22
charts/pgcat/templates/NOTES.txt
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
1. Get the application URL by running these commands:
|
||||||
|
{{- if .Values.ingress.enabled }}
|
||||||
|
{{- range $host := .Values.ingress.hosts }}
|
||||||
|
{{- range .paths }}
|
||||||
|
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- else if contains "NodePort" .Values.service.type }}
|
||||||
|
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "pgcat.fullname" . }})
|
||||||
|
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||||
|
echo http://$NODE_IP:$NODE_PORT
|
||||||
|
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||||
|
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||||
|
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "pgcat.fullname" . }}'
|
||||||
|
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "pgcat.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||||
|
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||||
|
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||||
|
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "pgcat.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||||
|
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||||
|
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||||
|
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
|
||||||
|
{{- end }}
|
||||||
3
charts/pgcat/templates/_config.tpl
Normal file
3
charts/pgcat/templates/_config.tpl
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
{{/*
|
||||||
|
Configuration template definition
|
||||||
|
*/}}
|
||||||
62
charts/pgcat/templates/_helpers.tpl
Normal file
62
charts/pgcat/templates/_helpers.tpl
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
{{/*
|
||||||
|
Expand the name of the chart.
|
||||||
|
*/}}
|
||||||
|
{{- define "pgcat.name" -}}
|
||||||
|
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create a default fully qualified app name.
|
||||||
|
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||||
|
If release name contains chart name it will be used as a full name.
|
||||||
|
*/}}
|
||||||
|
{{- define "pgcat.fullname" -}}
|
||||||
|
{{- if .Values.fullnameOverride }}
|
||||||
|
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||||
|
{{- if contains $name .Release.Name }}
|
||||||
|
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create chart name and version as used by the chart label.
|
||||||
|
*/}}
|
||||||
|
{{- define "pgcat.chart" -}}
|
||||||
|
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Common labels
|
||||||
|
*/}}
|
||||||
|
{{- define "pgcat.labels" -}}
|
||||||
|
helm.sh/chart: {{ include "pgcat.chart" . }}
|
||||||
|
{{ include "pgcat.selectorLabels" . }}
|
||||||
|
{{- if .Chart.AppVersion }}
|
||||||
|
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||||
|
{{- end }}
|
||||||
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Selector labels
|
||||||
|
*/}}
|
||||||
|
{{- define "pgcat.selectorLabels" -}}
|
||||||
|
app.kubernetes.io/name: {{ include "pgcat.name" . }}
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create the name of the service account to use
|
||||||
|
*/}}
|
||||||
|
{{- define "pgcat.serviceAccountName" -}}
|
||||||
|
{{- if .Values.serviceAccount.create }}
|
||||||
|
{{- default (include "pgcat.fullname" .) .Values.serviceAccount.name }}
|
||||||
|
{{- else }}
|
||||||
|
{{- default "default" .Values.serviceAccount.name }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
66
charts/pgcat/templates/deployment.yaml
Normal file
66
charts/pgcat/templates/deployment.yaml
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: {{ include "pgcat.fullname" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "pgcat.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
replicas: {{ .Values.replicaCount }}
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{- include "pgcat.selectorLabels" . | nindent 6 }}
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
annotations:
|
||||||
|
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
|
||||||
|
{{- with .Values.podAnnotations }}
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
labels:
|
||||||
|
{{- include "pgcat.selectorLabels" . | nindent 8 }}
|
||||||
|
spec:
|
||||||
|
{{- with .Values.image.pullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
serviceAccountName: {{ include "pgcat.serviceAccountName" . }}
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||||
|
containers:
|
||||||
|
- name: {{ .Chart.Name }}
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.containerSecurityContext | nindent 12 }}
|
||||||
|
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||||
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
ports:
|
||||||
|
- name: pgcat
|
||||||
|
containerPort: {{ .Values.configuration.general.port }}
|
||||||
|
protocol: TCP
|
||||||
|
livenessProbe:
|
||||||
|
tcpSocket:
|
||||||
|
port: pgcat
|
||||||
|
readinessProbe:
|
||||||
|
tcpSocket:
|
||||||
|
port: pgcat
|
||||||
|
resources:
|
||||||
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /etc/pgcat
|
||||||
|
name: config
|
||||||
|
{{- with .Values.nodeSelector }}
|
||||||
|
nodeSelector:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.affinity }}
|
||||||
|
affinity:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.tolerations }}
|
||||||
|
tolerations:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
volumes:
|
||||||
|
- secret:
|
||||||
|
defaultMode: 420
|
||||||
|
secretName: {{ include "pgcat.fullname" . }}
|
||||||
|
name: config
|
||||||
61
charts/pgcat/templates/ingress.yaml
Normal file
61
charts/pgcat/templates/ingress.yaml
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
{{- if .Values.ingress.enabled -}}
|
||||||
|
{{- $fullName := include "pgcat.fullname" . -}}
|
||||||
|
{{- $svcPort := .Values.service.port -}}
|
||||||
|
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
|
||||||
|
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
|
||||||
|
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||||
|
apiVersion: networking.k8s.io/v1beta1
|
||||||
|
{{- else -}}
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
{{- end }}
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: {{ $fullName }}
|
||||||
|
labels:
|
||||||
|
{{- include "pgcat.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.ingress.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
spec:
|
||||||
|
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
|
||||||
|
ingressClassName: {{ .Values.ingress.className }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.ingress.tls }}
|
||||||
|
tls:
|
||||||
|
{{- range .Values.ingress.tls }}
|
||||||
|
- hosts:
|
||||||
|
{{- range .hosts }}
|
||||||
|
- {{ . | quote }}
|
||||||
|
{{- end }}
|
||||||
|
secretName: {{ .secretName }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
rules:
|
||||||
|
{{- range .Values.ingress.hosts }}
|
||||||
|
- host: {{ .host | quote }}
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
{{- range .paths }}
|
||||||
|
- path: {{ .path }}
|
||||||
|
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
|
||||||
|
pathType: {{ .pathType }}
|
||||||
|
{{- end }}
|
||||||
|
backend:
|
||||||
|
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
|
||||||
|
service:
|
||||||
|
name: {{ $fullName }}
|
||||||
|
port:
|
||||||
|
number: {{ $svcPort }}
|
||||||
|
{{- else }}
|
||||||
|
serviceName: {{ $fullName }}
|
||||||
|
servicePort: {{ $svcPort }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
97
charts/pgcat/templates/secret.yaml
Normal file
97
charts/pgcat/templates/secret.yaml
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: {{ include "pgcat.fullname" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "pgcat.labels" . | nindent 4 }}
|
||||||
|
type: Opaque
|
||||||
|
stringData:
|
||||||
|
pgcat.toml: |
|
||||||
|
[general]
|
||||||
|
host = {{ .Values.configuration.general.host | quote }}
|
||||||
|
port = {{ .Values.configuration.general.port }}
|
||||||
|
enable_prometheus_exporter = {{ .Values.configuration.general.enable_prometheus_exporter }}
|
||||||
|
prometheus_exporter_port = {{ .Values.configuration.general.prometheus_exporter_port }}
|
||||||
|
connect_timeout = {{ .Values.configuration.general.connect_timeout }}
|
||||||
|
idle_timeout = {{ .Values.configuration.general.idle_timeout | int }}
|
||||||
|
server_lifetime = {{ .Values.configuration.general.server_lifetime | int }}
|
||||||
|
server_tls = {{ .Values.configuration.general.server_tls }}
|
||||||
|
idle_client_in_transaction_timeout = {{ .Values.configuration.general.idle_client_in_transaction_timeout | int }}
|
||||||
|
healthcheck_timeout = {{ .Values.configuration.general.healthcheck_timeout }}
|
||||||
|
healthcheck_delay = {{ .Values.configuration.general.healthcheck_delay }}
|
||||||
|
shutdown_timeout = {{ .Values.configuration.general.shutdown_timeout }}
|
||||||
|
ban_time = {{ .Values.configuration.general.ban_time }}
|
||||||
|
log_client_connections = {{ .Values.configuration.general.log_client_connections }}
|
||||||
|
log_client_disconnections = {{ .Values.configuration.general.log_client_disconnections }}
|
||||||
|
tcp_keepalives_idle = {{ .Values.configuration.general.tcp_keepalives_idle }}
|
||||||
|
tcp_keepalives_count = {{ .Values.configuration.general.tcp_keepalives_count }}
|
||||||
|
tcp_keepalives_interval = {{ .Values.configuration.general.tcp_keepalives_interval }}
|
||||||
|
{{- if and (ne .Values.configuration.general.tls_certificate "-") (ne .Values.configuration.general.tls_private_key "-") }}
|
||||||
|
tls_certificate = "{{ .Values.configuration.general.tls_certificate }}"
|
||||||
|
tls_private_key = "{{ .Values.configuration.general.tls_private_key }}"
|
||||||
|
{{- end }}
|
||||||
|
admin_username = {{ .Values.configuration.general.admin_username | quote }}
|
||||||
|
admin_password = {{ .Values.configuration.general.admin_password | quote }}
|
||||||
|
{{- if and .Values.configuration.general.auth_query_user .Values.configuration.general.auth_query_password .Values.configuration.general.auth_query }}
|
||||||
|
auth_query = {{ .Values.configuration.general.auth_query | quote }}
|
||||||
|
auth_query_user = {{ .Values.configuration.general.auth_query_user | quote }}
|
||||||
|
auth_query_password = {{ .Values.configuration.general.auth_query_password | quote }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- range $pool := .Values.configuration.pools }}
|
||||||
|
|
||||||
|
##
|
||||||
|
## pool for {{ $pool.name }}
|
||||||
|
##
|
||||||
|
[pools.{{ $pool.name | quote }}]
|
||||||
|
pool_mode = {{ default "transaction" $pool.pool_mode | quote }}
|
||||||
|
load_balancing_mode = {{ default "random" $pool.load_balancing_mode | quote }}
|
||||||
|
default_role = {{ default "any" $pool.default_role | quote }}
|
||||||
|
prepared_statements_cache_size = {{ default 500 $pool.prepared_statements_cache_size }}
|
||||||
|
query_parser_enabled = {{ default true $pool.query_parser_enabled }}
|
||||||
|
query_parser_read_write_splitting = {{ default true $pool.query_parser_read_write_splitting }}
|
||||||
|
primary_reads_enabled = {{ default true $pool.primary_reads_enabled }}
|
||||||
|
sharding_function = {{ default "pg_bigint_hash" $pool.sharding_function | quote }}
|
||||||
|
|
||||||
|
{{- range $index, $user := $pool.users }}
|
||||||
|
|
||||||
|
## pool {{ $pool.name }} user {{ $user.username | quote }}
|
||||||
|
##
|
||||||
|
[pools.{{ $pool.name | quote }}.users.{{ $index }}]
|
||||||
|
username = {{ $user.username | quote }}
|
||||||
|
{{- if $user.password }}
|
||||||
|
password = {{ $user.password | quote }}
|
||||||
|
{{- else if and $user.passwordSecret.name $user.passwordSecret.key }}
|
||||||
|
{{- $secret := (lookup "v1" "Secret" $.Release.Namespace $user.passwordSecret.name) }}
|
||||||
|
{{- if $secret }}
|
||||||
|
{{- $password := index $secret.data $user.passwordSecret.key | b64dec }}
|
||||||
|
password = {{ $password | quote }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
pool_size = {{ $user.pool_size }}
|
||||||
|
statement_timeout = {{ default 0 $user.statement_timeout }}
|
||||||
|
min_pool_size = {{ default 3 $user.min_pool_size }}
|
||||||
|
{{- if $user.server_lifetime }}
|
||||||
|
server_lifetime = {{ $user.server_lifetime }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if and $user.server_username $user.server_password }}
|
||||||
|
server_username = {{ $user.server_username | quote }}
|
||||||
|
server_password = {{ $user.server_password | quote }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- range $index, $shard := $pool.shards }}
|
||||||
|
|
||||||
|
## pool {{ $pool.name }} database {{ $shard.database }}
|
||||||
|
##
|
||||||
|
[pools.{{ $pool.name | quote }}.shards.{{ $index }}]
|
||||||
|
{{- if gt (len $shard.servers) 0}}
|
||||||
|
servers = [
|
||||||
|
{{- range $server := $shard.servers }}
|
||||||
|
[ {{ $server.host | quote }}, {{ $server.port }}, {{ $server.role | quote }} ],
|
||||||
|
{{- end }}
|
||||||
|
]
|
||||||
|
{{- end }}
|
||||||
|
database = {{ $shard.database | quote }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
15
charts/pgcat/templates/service.yaml
Normal file
15
charts/pgcat/templates/service.yaml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: {{ include "pgcat.fullname" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "pgcat.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
type: {{ .Values.service.type }}
|
||||||
|
ports:
|
||||||
|
- port: {{ .Values.service.port }}
|
||||||
|
targetPort: pgcat
|
||||||
|
protocol: TCP
|
||||||
|
name: pgcat
|
||||||
|
selector:
|
||||||
|
{{- include "pgcat.selectorLabels" . | nindent 4 }}
|
||||||
12
charts/pgcat/templates/serviceaccount.yaml
Normal file
12
charts/pgcat/templates/serviceaccount.yaml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
{{- if .Values.serviceAccount.create -}}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: {{ include "pgcat.serviceAccountName" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "pgcat.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.serviceAccount.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
374
charts/pgcat/values.yaml
Normal file
374
charts/pgcat/values.yaml
Normal file
@@ -0,0 +1,374 @@
|
|||||||
|
## String to partially override aspnet-core.fullname template (will maintain the release name)
|
||||||
|
## @param nameOverride String to partially override common.names.fullname
|
||||||
|
##
|
||||||
|
nameOverride: ""
|
||||||
|
|
||||||
|
## String to fully override aspnet-core.fullname template
|
||||||
|
## @param fullnameOverride String to fully override common.names.fullname
|
||||||
|
##
|
||||||
|
fullnameOverride: ""
|
||||||
|
|
||||||
|
## Number of PgCat replicas to deploy
|
||||||
|
## @param replicaCount Number of PgCat replicas to deploy
|
||||||
|
replicaCount: 1
|
||||||
|
|
||||||
|
## Bitnami PgCat image version
|
||||||
|
## ref: https://hub.docker.com/r/bitnami/kubewatch/tags/
|
||||||
|
##
|
||||||
|
## @param image.registry PgCat image registry
|
||||||
|
## @param image.repository PgCat image name
|
||||||
|
## @param image.tag PgCat image tag
|
||||||
|
## @param image.pullPolicy PgCat image tag
|
||||||
|
## @param image.pullSecrets Specify docker-registry secret names as an array
|
||||||
|
image:
|
||||||
|
repository: ghcr.io/postgresml/pgcat
|
||||||
|
# Overrides the image tag whose default is the chart appVersion.
|
||||||
|
tag: "main"
|
||||||
|
## Specify a imagePullPolicy
|
||||||
|
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
|
||||||
|
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
|
||||||
|
##
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
## Optionally specify an array of imagePullSecrets.
|
||||||
|
## Secrets must be manually created in the namespace.
|
||||||
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||||
|
## Example:
|
||||||
|
## pullSecrets:
|
||||||
|
## - myRegistryKeySecretName
|
||||||
|
##
|
||||||
|
pullSecrets: []
|
||||||
|
|
||||||
|
## Specifies whether a ServiceAccount should be created
|
||||||
|
##
|
||||||
|
## @param serviceAccount.create Enable the creation of a ServiceAccount for PgCat pods
|
||||||
|
## @param serviceAccount.name Name of the created ServiceAccount
|
||||||
|
##
|
||||||
|
serviceAccount:
|
||||||
|
## Specifies whether a service account should be created
|
||||||
|
create: true
|
||||||
|
## Annotations to add to the service account
|
||||||
|
annotations: {}
|
||||||
|
## The name of the service account to use.
|
||||||
|
## If not set and create is true, a name is generated using the fullname template
|
||||||
|
name: ""
|
||||||
|
|
||||||
|
## Annotations for server pods.
|
||||||
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||||
|
##
|
||||||
|
## @param podAnnotations Annotations for PgCat pods
|
||||||
|
##
|
||||||
|
podAnnotations: {}
|
||||||
|
|
||||||
|
## PgCat containers' SecurityContext
|
||||||
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
|
||||||
|
##
|
||||||
|
## @param podSecurityContext.enabled Enabled PgCat pods' Security Context
|
||||||
|
## @param podSecurityContext.fsGroup Set PgCat pod's Security Context fsGroup
|
||||||
|
##
|
||||||
|
podSecurityContext: {}
|
||||||
|
# fsGroup: 2000
|
||||||
|
|
||||||
|
## PgCat pods' Security Context
|
||||||
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
||||||
|
##
|
||||||
|
## @param containerSecurityContext.enabled Enabled PgCat containers' Security Context
|
||||||
|
## @param containerSecurityContext.runAsUser Set PgCat container's Security Context runAsUser
|
||||||
|
## @param containerSecurityContext.runAsNonRoot Set PgCat container's Security Context runAsNonRoot
|
||||||
|
##
|
||||||
|
containerSecurityContext: {}
|
||||||
|
# capabilities:
|
||||||
|
# drop:
|
||||||
|
# - ALL
|
||||||
|
# readOnlyRootFilesystem: true
|
||||||
|
# runAsNonRoot: true
|
||||||
|
# runAsUser: 1000
|
||||||
|
|
||||||
|
## PgCat service
|
||||||
|
##
|
||||||
|
## @param service.type PgCat service type
|
||||||
|
## @param service.port PgCat service port
|
||||||
|
service:
|
||||||
|
type: ClusterIP
|
||||||
|
port: 6432
|
||||||
|
|
||||||
|
ingress:
|
||||||
|
enabled: false
|
||||||
|
className: ""
|
||||||
|
annotations: {}
|
||||||
|
# kubernetes.io/ingress.class: nginx
|
||||||
|
# kubernetes.io/tls-acme: "true"
|
||||||
|
hosts:
|
||||||
|
- host: chart-example.local
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: ImplementationSpecific
|
||||||
|
tls: []
|
||||||
|
# - secretName: chart-example-tls
|
||||||
|
# hosts:
|
||||||
|
# - chart-example.local
|
||||||
|
|
||||||
|
## PgCat resource requests and limits
|
||||||
|
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
|
||||||
|
##
|
||||||
|
## @skip resources Optional description
|
||||||
|
## @disabled-param resources.limits The resources limits for the PgCat container
|
||||||
|
## @disabled-param resources.requests The requested resources for the PgCat container
|
||||||
|
##
|
||||||
|
resources:
|
||||||
|
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||||
|
# choice for the user. This also increases chances charts run on environments with little
|
||||||
|
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||||
|
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||||
|
limits: {}
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 128Mi
|
||||||
|
requests: {}
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 128Mi
|
||||||
|
|
||||||
|
## Node labels for pod assignment. Evaluated as a template.
|
||||||
|
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||||
|
##
|
||||||
|
## @param nodeSelector Node labels for pod assignment
|
||||||
|
##
|
||||||
|
nodeSelector: {}
|
||||||
|
|
||||||
|
## Tolerations for pod assignment. Evaluated as a template.
|
||||||
|
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||||
|
##
|
||||||
|
## @param tolerations Tolerations for pod assignment
|
||||||
|
##
|
||||||
|
tolerations: []
|
||||||
|
|
||||||
|
## Affinity for pod assignment. Evaluated as a template.
|
||||||
|
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||||
|
## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
|
||||||
|
##
|
||||||
|
## @param affinity Affinity for pod assignment
|
||||||
|
##
|
||||||
|
affinity: {}
|
||||||
|
|
||||||
|
## PgCat configuration
|
||||||
|
## @param configuration [object]
|
||||||
|
configuration:
|
||||||
|
## General pooler settings
|
||||||
|
## @param [object]
|
||||||
|
general:
|
||||||
|
## @param configuration.general.host What IP to run on, 0.0.0.0 means accessible from everywhere.
|
||||||
|
host: "0.0.0.0"
|
||||||
|
|
||||||
|
## @param configuration.general.port Port to run on, same as PgBouncer used in this example.
|
||||||
|
port: 6432
|
||||||
|
|
||||||
|
## @param configuration.general.enable_prometheus_exporter Whether to enable prometheus exporter or not.
|
||||||
|
enable_prometheus_exporter: false
|
||||||
|
|
||||||
|
## @param configuration.general.prometheus_exporter_port Port at which prometheus exporter listens on.
|
||||||
|
prometheus_exporter_port: 9930
|
||||||
|
|
||||||
|
# @param configuration.general.connect_timeout How long to wait before aborting a server connection (ms).
|
||||||
|
connect_timeout: 5000
|
||||||
|
|
||||||
|
# How long an idle connection with a server is left open (ms).
|
||||||
|
idle_timeout: 30000 # milliseconds
|
||||||
|
|
||||||
|
# Max connection lifetime before it's closed, even if actively used.
|
||||||
|
server_lifetime: 86400000 # 24 hours
|
||||||
|
|
||||||
|
# Whether to use TLS for server connections or not.
|
||||||
|
server_tls: false
|
||||||
|
|
||||||
|
# How long a client is allowed to be idle while in a transaction (ms).
|
||||||
|
idle_client_in_transaction_timeout: 0 # milliseconds
|
||||||
|
|
||||||
|
# @param configuration.general.healthcheck_timeout How much time to give `SELECT 1` health check query to return with a result (ms).
|
||||||
|
healthcheck_timeout: 1000
|
||||||
|
|
||||||
|
# @param configuration.general.healthcheck_delay How long to keep connection available for immediate re-use, without running a healthcheck query on it
|
||||||
|
healthcheck_delay: 30000
|
||||||
|
|
||||||
|
# @param configuration.general.shutdown_timeout How much time to give clients during shutdown before forcibly killing client connections (ms).
|
||||||
|
shutdown_timeout: 60000
|
||||||
|
|
||||||
|
# @param configuration.general.ban_time For how long to ban a server if it fails a health check (seconds).
|
||||||
|
ban_time: 60 # seconds
|
||||||
|
|
||||||
|
# @param configuration.general.log_client_connections If we should log client connections
|
||||||
|
log_client_connections: false
|
||||||
|
|
||||||
|
# @param configuration.general.log_client_disconnections If we should log client disconnections
|
||||||
|
log_client_disconnections: false
|
||||||
|
|
||||||
|
# TLS
|
||||||
|
# tls_certificate: "server.cert"
|
||||||
|
# tls_private_key: "server.key"
|
||||||
|
tls_certificate: "-"
|
||||||
|
tls_private_key: "-"
|
||||||
|
|
||||||
|
# Credentials to access the virtual administrative database (pgbouncer or pgcat)
|
||||||
|
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
||||||
|
admin_username: "postgres"
|
||||||
|
admin_password: "postgres"
|
||||||
|
|
||||||
|
# Query to be sent to servers to obtain the hash used for md5 authentication. The connection will be
|
||||||
|
# established using the database configured in the pool. This parameter is inherited by every pool and
|
||||||
|
# can be redefined in pool configuration.
|
||||||
|
auth_query: null
|
||||||
|
|
||||||
|
# User to be used for connecting to servers to obtain the hash used for md5 authentication by sending
|
||||||
|
# the query specified in auth_query_user. The connection will be established using the database configured
|
||||||
|
# in the pool. This parameter is inherited by every pool and can be redefined in pool configuration.
|
||||||
|
#
|
||||||
|
# @param configuration.general.auth_query_user
|
||||||
|
auth_query_user: null
|
||||||
|
|
||||||
|
# Password to be used for connecting to servers to obtain the hash used for md5 authentication by sending
|
||||||
|
# the query specified in auth_query_user. The connection will be established using the database configured
|
||||||
|
# in the pool. This parameter is inherited by every pool and can be redefined in pool configuration.
|
||||||
|
#
|
||||||
|
# @param configuration.general.auth_query_password
|
||||||
|
auth_query_password: null
|
||||||
|
|
||||||
|
# Number of seconds of connection idleness to wait before sending a keepalive packet to the server.
|
||||||
|
tcp_keepalives_idle: 5
|
||||||
|
|
||||||
|
# Number of unacknowledged keepalive packets allowed before giving up and closing the connection.
|
||||||
|
tcp_keepalives_count: 5
|
||||||
|
|
||||||
|
# Number of seconds between keepalive packets.
|
||||||
|
tcp_keepalives_interval: 5
|
||||||
|
|
||||||
|
## pool
|
||||||
|
## configs are structured as pool.<pool_name>
|
||||||
|
## the pool_name is what clients use as database name when connecting
|
||||||
|
## For the example below a client can connect using "postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded"
|
||||||
|
## @param [object]
|
||||||
|
pools:
|
||||||
|
[{
|
||||||
|
name: "simple", pool_mode: "transaction",
|
||||||
|
users: [{username: "user", password: "pass", pool_size: 5, statement_timeout: 0}],
|
||||||
|
shards: [{
|
||||||
|
servers: [{host: "postgres", port: 5432, role: "primary"}],
|
||||||
|
database: "postgres"
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
# - ## default values
|
||||||
|
# ##
|
||||||
|
# ##
|
||||||
|
# ##
|
||||||
|
# name: "db"
|
||||||
|
|
||||||
|
# ## Pool mode (see PgBouncer docs for more).
|
||||||
|
# ## session: one server connection per connected client
|
||||||
|
# ## transaction: one server connection per client transaction
|
||||||
|
# ## @param configuration.poolsPostgres.pool_mode
|
||||||
|
# pool_mode: "transaction"
|
||||||
|
|
||||||
|
# ## Load balancing mode
|
||||||
|
# ## `random` selects the server at random
|
||||||
|
# ## `loc` selects the server with the least outstanding busy connections
|
||||||
|
# ##
|
||||||
|
# ## @param configuration.poolsPostgres.load_balancing_mode
|
||||||
|
# load_balancing_mode: "random"
|
||||||
|
|
||||||
|
# ## Prepared statements cache size.
|
||||||
|
# ## TODO: update documentation
|
||||||
|
# ##
|
||||||
|
# ## @param configuration.poolsPostgres.prepared_statements_cache_size
|
||||||
|
# prepared_statements_cache_size: 500
|
||||||
|
|
||||||
|
# ## If the client doesn't specify, route traffic to
|
||||||
|
# ## this role by default.
|
||||||
|
# ##
|
||||||
|
# ## any: round-robin between primary and replicas,
|
||||||
|
# ## replica: round-robin between replicas only without touching the primary,
|
||||||
|
# ## primary: all queries go to the primary unless otherwise specified.
|
||||||
|
# ## @param configuration.poolsPostgres.default_role
|
||||||
|
# default_role: "any"
|
||||||
|
|
||||||
|
# ## Query parser. If enabled, we'll attempt to parse
|
||||||
|
# ## every incoming query to determine if it's a read or a write.
|
||||||
|
# ## If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||||
|
# ## we'll direct it to the primary.
|
||||||
|
# ## @param configuration.poolsPostgres.query_parser_enabled
|
||||||
|
# query_parser_enabled: true
|
||||||
|
|
||||||
|
# ## If the query parser is enabled and this setting is enabled, we'll attempt to
|
||||||
|
# ## infer the role from the query itself.
|
||||||
|
# ## @param configuration.poolsPostgres.query_parser_read_write_splitting
|
||||||
|
# query_parser_read_write_splitting: true
|
||||||
|
|
||||||
|
# ## If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||||
|
# ## load balancing of read queries. Otherwise, the primary will only be used for write
|
||||||
|
# ## queries. The primary can always be explicitly selected with our custom protocol.
|
||||||
|
# ## @param configuration.poolsPostgres.primary_reads_enabled
|
||||||
|
# primary_reads_enabled: true
|
||||||
|
|
||||||
|
# ## So what if you wanted to implement a different hashing function,
|
||||||
|
# ## or you've already built one and you want this pooler to use it?
|
||||||
|
# ##
|
||||||
|
# ## Current options:
|
||||||
|
# ##
|
||||||
|
# ## pg_bigint_hash: PARTITION BY HASH (Postgres hashing function)
|
||||||
|
# ## sha1: A hashing function based on SHA1
|
||||||
|
# ##
|
||||||
|
# ## @param configuration.poolsPostgres.sharding_function
|
||||||
|
# sharding_function: "pg_bigint_hash"
|
||||||
|
|
||||||
|
# ## Credentials for users that may connect to this cluster
|
||||||
|
# ## @param users [array]
|
||||||
|
# ## @param users[0].username Name of the env var (required)
|
||||||
|
# ## @param users[0].password Value for the env var (required) leave empty to use existing secret see passwordSecret.name and passwordSecret.key
|
||||||
|
# ## @param users[0].passwordSecret.name Name of the secret containing the password
|
||||||
|
# ## @param users[0].passwordSecret.key Key in the secret containing the password
|
||||||
|
# ## @param users[0].pool_size Maximum number of server connections that can be established for this user
|
||||||
|
# ## @param users[0].statement_timeout Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
|
||||||
|
# users: []
|
||||||
|
# # - username: "user"
|
||||||
|
# # password: "pass"
|
||||||
|
# #
|
||||||
|
# # # The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||||
|
# # # is the sum of pool_size across all users.
|
||||||
|
# # pool_size: 9
|
||||||
|
# #
|
||||||
|
# # # Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
|
||||||
|
# # statement_timeout: 0
|
||||||
|
# #
|
||||||
|
# # # PostgreSQL username used to connect to the server.
|
||||||
|
# # server_username: "postgres
|
||||||
|
# #
|
||||||
|
# # # PostgreSQL password used to connect to the server.
|
||||||
|
# # server_password: "postgres
|
||||||
|
|
||||||
|
# ## @param shards [array]
|
||||||
|
# ## @param shards[0].server[0].host Host for this shard
|
||||||
|
# ## @param shards[0].server[0].port Port for this shard
|
||||||
|
# ## @param shards[0].server[0].role Role for this shard
|
||||||
|
# shards: []
|
||||||
|
# # [ host, port, role ]
|
||||||
|
# # - servers:
|
||||||
|
# # - host: "postgres"
|
||||||
|
# # port: 5432
|
||||||
|
# # role: "primary"
|
||||||
|
# # - host: "postgres"
|
||||||
|
# # port: 5432
|
||||||
|
# # role: "replica"
|
||||||
|
# # database: "postgres"
|
||||||
|
# # # [ host, port, role ]
|
||||||
|
# # - servers:
|
||||||
|
# # - host: "postgres"
|
||||||
|
# # port: 5432
|
||||||
|
# # role: "primary"
|
||||||
|
# # - host: "postgres"
|
||||||
|
# # port: 5432
|
||||||
|
# # role: "replica"
|
||||||
|
# # database: "postgres"
|
||||||
|
# # # [ host, port, role ]
|
||||||
|
# # - servers:
|
||||||
|
# # - host: "postgres"
|
||||||
|
# # port: 5432
|
||||||
|
# # role: "primary"
|
||||||
|
# # - host: "postgres"
|
||||||
|
# # port: 5432
|
||||||
|
# # role: "replica"
|
||||||
|
# # database: "postgres"
|
||||||
9
control
Normal file
9
control
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Package: pgcat
|
||||||
|
Version: ${PACKAGE_VERSION}
|
||||||
|
Section: database
|
||||||
|
Priority: optional
|
||||||
|
Architecture: ${ARCH}
|
||||||
|
Maintainer: PostgresML <team@postgresml.org>
|
||||||
|
Homepage: https://postgresml.org
|
||||||
|
Description: PgCat - NextGen PostgreSQL Pooler
|
||||||
|
PostgreSQL pooler and proxy (like PgBouncer) with support for sharding, load balancing, failover and mirroring.
|
||||||
5
ct.yaml
Normal file
5
ct.yaml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
remote: origin
|
||||||
|
target-branch: main
|
||||||
|
chart-dirs:
|
||||||
|
- charts
|
||||||
|
|
||||||
@@ -1,6 +1,8 @@
|
|||||||
FROM rust:bullseye
|
FROM rust:bullseye
|
||||||
|
|
||||||
# Dependencies
|
# Dependencies
|
||||||
|
COPY --from=sclevine/yj /bin/yj /bin/yj
|
||||||
|
RUN /bin/yj -h
|
||||||
RUN apt-get update -y \
|
RUN apt-get update -y \
|
||||||
&& apt-get install -y \
|
&& apt-get install -y \
|
||||||
llvm-11 psmisc postgresql-contrib postgresql-client \
|
llvm-11 psmisc postgresql-contrib postgresql-client \
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ x-common-env-pg:
|
|||||||
|
|
||||||
services:
|
services:
|
||||||
main:
|
main:
|
||||||
image: kubernetes/pause
|
image: gcr.io/google_containers/pause:3.2
|
||||||
ports:
|
ports:
|
||||||
- 6432
|
- 6432
|
||||||
|
|
||||||
@@ -64,7 +64,7 @@ services:
|
|||||||
<<: *common-env-pg
|
<<: *common-env-pg
|
||||||
POSTGRES_INITDB_ARGS: --auth-local=md5 --auth-host=md5 --auth=md5
|
POSTGRES_INITDB_ARGS: --auth-local=md5 --auth-host=md5 --auth=md5
|
||||||
PGPORT: 10432
|
PGPORT: 10432
|
||||||
command: ["postgres", "-p", "5432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
command: ["postgres", "-p", "10432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||||
|
|
||||||
toxiproxy:
|
toxiproxy:
|
||||||
build: .
|
build: .
|
||||||
|
|||||||
@@ -71,6 +71,10 @@ default_role = "any"
|
|||||||
# we'll direct it to the primary.
|
# we'll direct it to the primary.
|
||||||
query_parser_enabled = true
|
query_parser_enabled = true
|
||||||
|
|
||||||
|
# If the query parser is enabled and this setting is enabled, we'll attempt to
|
||||||
|
# infer the role from the query itself.
|
||||||
|
query_parser_read_write_splitting = true
|
||||||
|
|
||||||
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||||
# load balancing of read queries. Otherwise, the primary will only be used for write
|
# load balancing of read queries. Otherwise, the primary will only be used for write
|
||||||
# queries. The primary can always be explicitly selected with our custom protocol.
|
# queries. The primary can always be explicitly selected with our custom protocol.
|
||||||
|
|||||||
2124
grafana_dashboard.json
Normal file
2124
grafana_dashboard.json
Normal file
File diff suppressed because it is too large
Load Diff
22
pgcat.minimal.toml
Normal file
22
pgcat.minimal.toml
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
# This is an example of the most basic config
|
||||||
|
# that will mimic what PgBouncer does in transaction mode with one server.
|
||||||
|
|
||||||
|
[general]
|
||||||
|
|
||||||
|
host = "0.0.0.0"
|
||||||
|
port = 6433
|
||||||
|
admin_username = "pgcat"
|
||||||
|
admin_password = "pgcat"
|
||||||
|
|
||||||
|
[pools.pgml.users.0]
|
||||||
|
username = "postgres"
|
||||||
|
password = "postgres"
|
||||||
|
pool_size = 10
|
||||||
|
min_pool_size = 1
|
||||||
|
pool_mode = "transaction"
|
||||||
|
|
||||||
|
[pools.pgml.shards.0]
|
||||||
|
servers = [
|
||||||
|
["127.0.0.1", 28815, "primary"]
|
||||||
|
]
|
||||||
|
database = "postgres"
|
||||||
17
pgcat.service
Normal file
17
pgcat.service
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=PgCat pooler
|
||||||
|
After=network.target
|
||||||
|
StartLimitIntervalSec=0
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
User=pgcat
|
||||||
|
Type=simple
|
||||||
|
Restart=always
|
||||||
|
RestartSec=1
|
||||||
|
Environment=RUST_LOG=info
|
||||||
|
LimitNOFILE=65536
|
||||||
|
ExecStart=/usr/bin/pgcat /etc/pgcat.toml
|
||||||
|
ExecReload=/bin/kill -SIGHUP $MAINPID
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
142
pgcat.toml
142
pgcat.toml
@@ -23,6 +23,9 @@ connect_timeout = 5000 # milliseconds
|
|||||||
# How long an idle connection with a server is left open (ms).
|
# How long an idle connection with a server is left open (ms).
|
||||||
idle_timeout = 30000 # milliseconds
|
idle_timeout = 30000 # milliseconds
|
||||||
|
|
||||||
|
# Max connection lifetime before it's closed, even if actively used.
|
||||||
|
server_lifetime = 86400000 # 24 hours
|
||||||
|
|
||||||
# How long a client is allowed to be idle while in a transaction (ms).
|
# How long a client is allowed to be idle while in a transaction (ms).
|
||||||
idle_client_in_transaction_timeout = 0 # milliseconds
|
idle_client_in_transaction_timeout = 0 # milliseconds
|
||||||
|
|
||||||
@@ -58,9 +61,15 @@ tcp_keepalives_count = 5
|
|||||||
tcp_keepalives_interval = 5
|
tcp_keepalives_interval = 5
|
||||||
|
|
||||||
# Path to TLS Certificate file to use for TLS connections
|
# Path to TLS Certificate file to use for TLS connections
|
||||||
# tls_certificate = "server.cert"
|
# tls_certificate = ".circleci/server.cert"
|
||||||
# Path to TLS private key file to use for TLS connections
|
# Path to TLS private key file to use for TLS connections
|
||||||
# tls_private_key = "server.key"
|
# tls_private_key = ".circleci/server.key"
|
||||||
|
|
||||||
|
# Enable/disable server TLS
|
||||||
|
server_tls = false
|
||||||
|
|
||||||
|
# Verify server certificate is completely authentic.
|
||||||
|
verify_server_certificate = false
|
||||||
|
|
||||||
# User name to access the virtual administrative database (pgbouncer or pgcat)
|
# User name to access the virtual administrative database (pgbouncer or pgcat)
|
||||||
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
||||||
@@ -68,6 +77,58 @@ admin_username = "admin_user"
|
|||||||
# Password to access the virtual administrative database
|
# Password to access the virtual administrative database
|
||||||
admin_password = "admin_pass"
|
admin_password = "admin_pass"
|
||||||
|
|
||||||
|
# Default plugins that are configured on all pools.
|
||||||
|
[plugins]
|
||||||
|
|
||||||
|
# Prewarmer plugin that runs queries on server startup, before giving the connection
|
||||||
|
# to the client.
|
||||||
|
[plugins.prewarmer]
|
||||||
|
enabled = false
|
||||||
|
queries = [
|
||||||
|
"SELECT pg_prewarm('pgbench_accounts')",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Log all queries to stdout.
|
||||||
|
[plugins.query_logger]
|
||||||
|
enabled = false
|
||||||
|
|
||||||
|
# Block access to tables that Postgres does not allow us to control.
|
||||||
|
[plugins.table_access]
|
||||||
|
enabled = false
|
||||||
|
tables = [
|
||||||
|
"pg_user",
|
||||||
|
"pg_roles",
|
||||||
|
"pg_database",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Intercept user queries and give a fake reply.
|
||||||
|
[plugins.intercept]
|
||||||
|
enabled = true
|
||||||
|
|
||||||
|
[plugins.intercept.queries.0]
|
||||||
|
|
||||||
|
query = "select current_database() as a, current_schemas(false) as b"
|
||||||
|
schema = [
|
||||||
|
["a", "text"],
|
||||||
|
["b", "text"],
|
||||||
|
]
|
||||||
|
result = [
|
||||||
|
["${DATABASE}", "{public}"],
|
||||||
|
]
|
||||||
|
|
||||||
|
[plugins.intercept.queries.1]
|
||||||
|
|
||||||
|
query = "select current_database(), current_schema(), current_user"
|
||||||
|
schema = [
|
||||||
|
["current_database", "text"],
|
||||||
|
["current_schema", "text"],
|
||||||
|
["current_user", "text"],
|
||||||
|
]
|
||||||
|
result = [
|
||||||
|
["${DATABASE}", "public", "${USER}"],
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
# pool configs are structured as pool.<pool_name>
|
# pool configs are structured as pool.<pool_name>
|
||||||
# the pool_name is what clients use as database name when connecting.
|
# the pool_name is what clients use as database name when connecting.
|
||||||
# For a pool named `sharded_db`, clients access that pool using connection string like
|
# For a pool named `sharded_db`, clients access that pool using connection string like
|
||||||
@@ -89,12 +150,20 @@ load_balancing_mode = "random"
|
|||||||
# `primary` all queries go to the primary unless otherwise specified.
|
# `primary` all queries go to the primary unless otherwise specified.
|
||||||
default_role = "any"
|
default_role = "any"
|
||||||
|
|
||||||
|
# Prepared statements cache size.
|
||||||
|
# TODO: update documentation
|
||||||
|
prepared_statements_cache_size = 500
|
||||||
|
|
||||||
# If Query Parser is enabled, we'll attempt to parse
|
# If Query Parser is enabled, we'll attempt to parse
|
||||||
# every incoming query to determine if it's a read or a write.
|
# every incoming query to determine if it's a read or a write.
|
||||||
# If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
# If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||||
# we'll direct it to the primary.
|
# we'll direct it to the primary.
|
||||||
query_parser_enabled = true
|
query_parser_enabled = true
|
||||||
|
|
||||||
|
# If the query parser is enabled and this setting is enabled, we'll attempt to
|
||||||
|
# infer the role from the query itself.
|
||||||
|
query_parser_read_write_splitting = true
|
||||||
|
|
||||||
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||||
# load balancing of read queries. Otherwise, the primary will only be used for write
|
# load balancing of read queries. Otherwise, the primary will only be used for write
|
||||||
# queries. The primary can always be explicitly selected with our custom protocol.
|
# queries. The primary can always be explicitly selected with our custom protocol.
|
||||||
@@ -106,6 +175,12 @@ primary_reads_enabled = true
|
|||||||
# shard_id_regex = '/\* shard_id: (\d+) \*/'
|
# shard_id_regex = '/\* shard_id: (\d+) \*/'
|
||||||
# regex_search_limit = 1000 # only look at the first 1000 characters of SQL statements
|
# regex_search_limit = 1000 # only look at the first 1000 characters of SQL statements
|
||||||
|
|
||||||
|
# Defines the behavior when no shard is selected in a sharded system.
|
||||||
|
# `random`: picks a shard at random
|
||||||
|
# `random_healthy`: picks a shard at random favoring shards with the least number of recent errors
|
||||||
|
# `shard_<number>`: e.g. shard_0, shard_4, etc. picks a specific shard, everytime
|
||||||
|
# default_shard = "shard_0"
|
||||||
|
|
||||||
# So what if you wanted to implement a different hashing function,
|
# So what if you wanted to implement a different hashing function,
|
||||||
# or you've already built one and you want this pooler to use it?
|
# or you've already built one and you want this pooler to use it?
|
||||||
# Current options:
|
# Current options:
|
||||||
@@ -116,7 +191,7 @@ sharding_function = "pg_bigint_hash"
|
|||||||
# Query to be sent to servers to obtain the hash used for md5 authentication. The connection will be
|
# Query to be sent to servers to obtain the hash used for md5 authentication. The connection will be
|
||||||
# established using the database configured in the pool. This parameter is inherited by every pool
|
# established using the database configured in the pool. This parameter is inherited by every pool
|
||||||
# and can be redefined in pool configuration.
|
# and can be redefined in pool configuration.
|
||||||
# auth_query = "SELECT $1"
|
# auth_query="SELECT usename, passwd FROM pg_shadow WHERE usename='$1'"
|
||||||
|
|
||||||
# User to be used for connecting to servers to obtain the hash used for md5 authentication by sending the query
|
# User to be used for connecting to servers to obtain the hash used for md5 authentication by sending the query
|
||||||
# specified in `auth_query_user`. The connection will be established using the database configured in the pool.
|
# specified in `auth_query_user`. The connection will be established using the database configured in the pool.
|
||||||
@@ -137,6 +212,61 @@ idle_timeout = 40000
|
|||||||
# Connect timeout can be overwritten in the pool
|
# Connect timeout can be overwritten in the pool
|
||||||
connect_timeout = 3000
|
connect_timeout = 3000
|
||||||
|
|
||||||
|
# When enabled, ip resolutions for server connections specified using hostnames will be cached
|
||||||
|
# and checked for changes every `dns_max_ttl` seconds. If a change in the host resolution is found
|
||||||
|
# old ip connections are closed (gracefully) and new connections will start using new ip.
|
||||||
|
# dns_cache_enabled = false
|
||||||
|
|
||||||
|
# Specifies how often (in seconds) cached ip addresses for servers are rechecked (see `dns_cache_enabled`).
|
||||||
|
# dns_max_ttl = 30
|
||||||
|
|
||||||
|
# Plugins can be configured on a pool-per-pool basis. This overrides the global plugins setting,
|
||||||
|
# so all plugins have to be configured here again.
|
||||||
|
[pool.sharded_db.plugins]
|
||||||
|
|
||||||
|
[pools.sharded_db.plugins.prewarmer]
|
||||||
|
enabled = true
|
||||||
|
queries = [
|
||||||
|
"SELECT pg_prewarm('pgbench_accounts')",
|
||||||
|
]
|
||||||
|
|
||||||
|
[pools.sharded_db.plugins.query_logger]
|
||||||
|
enabled = false
|
||||||
|
|
||||||
|
[pools.sharded_db.plugins.table_access]
|
||||||
|
enabled = false
|
||||||
|
tables = [
|
||||||
|
"pg_user",
|
||||||
|
"pg_roles",
|
||||||
|
"pg_database",
|
||||||
|
]
|
||||||
|
|
||||||
|
[pools.sharded_db.plugins.intercept]
|
||||||
|
enabled = true
|
||||||
|
|
||||||
|
[pools.sharded_db.plugins.intercept.queries.0]
|
||||||
|
|
||||||
|
query = "select current_database() as a, current_schemas(false) as b"
|
||||||
|
schema = [
|
||||||
|
["a", "text"],
|
||||||
|
["b", "text"],
|
||||||
|
]
|
||||||
|
result = [
|
||||||
|
["${DATABASE}", "{public}"],
|
||||||
|
]
|
||||||
|
|
||||||
|
[pools.sharded_db.plugins.intercept.queries.1]
|
||||||
|
|
||||||
|
query = "select current_database(), current_schema(), current_user"
|
||||||
|
schema = [
|
||||||
|
["current_database", "text"],
|
||||||
|
["current_schema", "text"],
|
||||||
|
["current_user", "text"],
|
||||||
|
]
|
||||||
|
result = [
|
||||||
|
["${DATABASE}", "public", "${USER}"],
|
||||||
|
]
|
||||||
|
|
||||||
# User configs are structured as pool.<pool_name>.users.<user_index>
|
# User configs are structured as pool.<pool_name>.users.<user_index>
|
||||||
# This section holds the credentials for users that may connect to this cluster
|
# This section holds the credentials for users that may connect to this cluster
|
||||||
[pools.sharded_db.users.0]
|
[pools.sharded_db.users.0]
|
||||||
@@ -148,7 +278,7 @@ username = "sharding_user"
|
|||||||
# if `server_password` is not set.
|
# if `server_password` is not set.
|
||||||
password = "sharding_user"
|
password = "sharding_user"
|
||||||
|
|
||||||
pool_mode = "session"
|
pool_mode = "transaction"
|
||||||
|
|
||||||
# PostgreSQL username used to connect to the server.
|
# PostgreSQL username used to connect to the server.
|
||||||
# server_username = "another_user"
|
# server_username = "another_user"
|
||||||
@@ -171,6 +301,8 @@ username = "other_user"
|
|||||||
password = "other_user"
|
password = "other_user"
|
||||||
pool_size = 21
|
pool_size = 21
|
||||||
statement_timeout = 15000
|
statement_timeout = 15000
|
||||||
|
connect_timeout = 1000
|
||||||
|
idle_timeout = 1000
|
||||||
|
|
||||||
# Shard configs are structured as pool.<pool_name>.shards.<shard_id>
|
# Shard configs are structured as pool.<pool_name>.shards.<shard_id>
|
||||||
# Each shard config contains a list of servers that make up the shard
|
# Each shard config contains a list of servers that make up the shard
|
||||||
@@ -206,6 +338,8 @@ sharding_function = "pg_bigint_hash"
|
|||||||
username = "simple_user"
|
username = "simple_user"
|
||||||
password = "simple_user"
|
password = "simple_user"
|
||||||
pool_size = 5
|
pool_size = 5
|
||||||
|
min_pool_size = 3
|
||||||
|
server_lifetime = 60000
|
||||||
statement_timeout = 0
|
statement_timeout = 0
|
||||||
|
|
||||||
[pools.simple_db.shards.0]
|
[pools.simple_db.shards.0]
|
||||||
|
|||||||
13
postinst
Normal file
13
postinst
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl enable pgcat
|
||||||
|
|
||||||
|
if ! id pgcat 2> /dev/null; then
|
||||||
|
useradd -s /usr/bin/false pgcat
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f /etc/pgcat.toml ]; then
|
||||||
|
systemctl start pgcat
|
||||||
|
fi
|
||||||
5
prerm
Normal file
5
prerm
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
systemctl stop pgcat
|
||||||
|
systemctl disable pgcat
|
||||||
330
src/admin.rs
330
src/admin.rs
@@ -1,4 +1,6 @@
|
|||||||
use crate::pool::BanReason;
|
use crate::pool::BanReason;
|
||||||
|
use crate::server::ServerParameters;
|
||||||
|
use crate::stats::pool::PoolStats;
|
||||||
use bytes::{Buf, BufMut, BytesMut};
|
use bytes::{Buf, BufMut, BytesMut};
|
||||||
use log::{error, info, trace};
|
use log::{error, info, trace};
|
||||||
use nix::sys::signal::{self, Signal};
|
use nix::sys::signal::{self, Signal};
|
||||||
@@ -12,20 +14,20 @@ use tokio::time::Instant;
|
|||||||
use crate::config::{get_config, reload_config, VERSION};
|
use crate::config::{get_config, reload_config, VERSION};
|
||||||
use crate::errors::Error;
|
use crate::errors::Error;
|
||||||
use crate::messages::*;
|
use crate::messages::*;
|
||||||
|
use crate::pool::ClientServerMap;
|
||||||
use crate::pool::{get_all_pools, get_pool};
|
use crate::pool::{get_all_pools, get_pool};
|
||||||
use crate::stats::{get_client_stats, get_pool_stats, get_server_stats, ClientState, ServerState};
|
use crate::stats::{get_client_stats, get_server_stats, ClientState, ServerState};
|
||||||
use crate::ClientServerMap;
|
|
||||||
|
|
||||||
pub fn generate_server_info_for_admin() -> BytesMut {
|
pub fn generate_server_parameters_for_admin() -> ServerParameters {
|
||||||
let mut server_info = BytesMut::new();
|
let mut server_parameters = ServerParameters::new();
|
||||||
|
|
||||||
server_info.put(server_parameter_message("application_name", ""));
|
server_parameters.set_param("application_name".to_string(), "".to_string(), true);
|
||||||
server_info.put(server_parameter_message("client_encoding", "UTF8"));
|
server_parameters.set_param("client_encoding".to_string(), "UTF8".to_string(), true);
|
||||||
server_info.put(server_parameter_message("server_encoding", "UTF8"));
|
server_parameters.set_param("server_encoding".to_string(), "UTF8".to_string(), true);
|
||||||
server_info.put(server_parameter_message("server_version", VERSION));
|
server_parameters.set_param("server_version".to_string(), VERSION.to_string(), true);
|
||||||
server_info.put(server_parameter_message("DateStyle", "ISO, MDY"));
|
server_parameters.set_param("DateStyle".to_string(), "ISO, MDY".to_string(), true);
|
||||||
|
|
||||||
server_info
|
server_parameters
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handle admin client.
|
/// Handle admin client.
|
||||||
@@ -53,7 +55,12 @@ where
|
|||||||
|
|
||||||
let query_parts: Vec<&str> = query.trim_end_matches(';').split_whitespace().collect();
|
let query_parts: Vec<&str> = query.trim_end_matches(';').split_whitespace().collect();
|
||||||
|
|
||||||
match query_parts[0].to_ascii_uppercase().as_str() {
|
match query_parts
|
||||||
|
.first()
|
||||||
|
.unwrap_or(&"")
|
||||||
|
.to_ascii_uppercase()
|
||||||
|
.as_str()
|
||||||
|
{
|
||||||
"BAN" => {
|
"BAN" => {
|
||||||
trace!("BAN");
|
trace!("BAN");
|
||||||
ban(stream, query_parts).await
|
ban(stream, query_parts).await
|
||||||
@@ -72,17 +79,26 @@ where
|
|||||||
}
|
}
|
||||||
"PAUSE" => {
|
"PAUSE" => {
|
||||||
trace!("PAUSE");
|
trace!("PAUSE");
|
||||||
pause(stream, query_parts[1]).await
|
pause(stream, query_parts).await
|
||||||
}
|
}
|
||||||
"RESUME" => {
|
"RESUME" => {
|
||||||
trace!("RESUME");
|
trace!("RESUME");
|
||||||
resume(stream, query_parts[1]).await
|
resume(stream, query_parts).await
|
||||||
}
|
}
|
||||||
"SHUTDOWN" => {
|
"SHUTDOWN" => {
|
||||||
trace!("SHUTDOWN");
|
trace!("SHUTDOWN");
|
||||||
shutdown(stream).await
|
shutdown(stream).await
|
||||||
}
|
}
|
||||||
"SHOW" => match query_parts[1].to_ascii_uppercase().as_str() {
|
"SHOW" => match query_parts
|
||||||
|
.get(1)
|
||||||
|
.unwrap_or(&"")
|
||||||
|
.to_ascii_uppercase()
|
||||||
|
.as_str()
|
||||||
|
{
|
||||||
|
"HELP" => {
|
||||||
|
trace!("SHOW HELP");
|
||||||
|
show_help(stream).await
|
||||||
|
}
|
||||||
"BANS" => {
|
"BANS" => {
|
||||||
trace!("SHOW BANS");
|
trace!("SHOW BANS");
|
||||||
show_bans(stream).await
|
show_bans(stream).await
|
||||||
@@ -254,39 +270,50 @@ async fn show_pools<T>(stream: &mut T) -> Result<(), Error>
|
|||||||
where
|
where
|
||||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
{
|
{
|
||||||
let all_pool_stats = get_pool_stats();
|
let pool_lookup = PoolStats::construct_pool_lookup();
|
||||||
|
let mut res = BytesMut::new();
|
||||||
|
res.put(row_description(&PoolStats::generate_header()));
|
||||||
|
pool_lookup.iter().for_each(|(_identifier, pool_stats)| {
|
||||||
|
res.put(data_row(&pool_stats.generate_row()));
|
||||||
|
});
|
||||||
|
res.put(command_complete("SHOW"));
|
||||||
|
|
||||||
let columns = vec![
|
// ReadyForQuery
|
||||||
("database", DataType::Text),
|
res.put_u8(b'Z');
|
||||||
("user", DataType::Text),
|
res.put_i32(5);
|
||||||
("pool_mode", DataType::Text),
|
res.put_u8(b'I');
|
||||||
("cl_idle", DataType::Numeric),
|
|
||||||
("cl_active", DataType::Numeric),
|
write_all_half(stream, &res).await
|
||||||
("cl_waiting", DataType::Numeric),
|
}
|
||||||
("cl_cancel_req", DataType::Numeric),
|
|
||||||
("sv_active", DataType::Numeric),
|
/// Show all available options.
|
||||||
("sv_idle", DataType::Numeric),
|
async fn show_help<T>(stream: &mut T) -> Result<(), Error>
|
||||||
("sv_used", DataType::Numeric),
|
where
|
||||||
("sv_tested", DataType::Numeric),
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
("sv_login", DataType::Numeric),
|
{
|
||||||
("maxwait", DataType::Numeric),
|
let mut res = BytesMut::new();
|
||||||
("maxwait_us", DataType::Numeric),
|
|
||||||
|
let detail_msg = [
|
||||||
|
"",
|
||||||
|
"SHOW HELP|CONFIG|DATABASES|POOLS|CLIENTS|SERVERS|USERS|VERSION",
|
||||||
|
// "SHOW PEERS|PEER_POOLS", // missing PEERS|PEER_POOLS
|
||||||
|
// "SHOW FDS|SOCKETS|ACTIVE_SOCKETS|LISTS|MEM|STATE", // missing FDS|SOCKETS|ACTIVE_SOCKETS|MEM|STATE
|
||||||
|
"SHOW LISTS",
|
||||||
|
// "SHOW DNS_HOSTS|DNS_ZONES", // missing DNS_HOSTS|DNS_ZONES
|
||||||
|
"SHOW STATS", // missing STATS_TOTALS|STATS_AVERAGES|TOTALS
|
||||||
|
"SET key = arg",
|
||||||
|
"RELOAD",
|
||||||
|
"PAUSE [<db>, <user>]",
|
||||||
|
"RESUME [<db>, <user>]",
|
||||||
|
// "DISABLE <db>", // missing
|
||||||
|
// "ENABLE <db>", // missing
|
||||||
|
// "RECONNECT [<db>]", missing
|
||||||
|
// "KILL <db>",
|
||||||
|
// "SUSPEND",
|
||||||
|
"SHUTDOWN",
|
||||||
];
|
];
|
||||||
|
|
||||||
let mut res = BytesMut::new();
|
res.put(notify("Console usage", detail_msg.join("\n\t")));
|
||||||
res.put(row_description(&columns));
|
|
||||||
|
|
||||||
for ((_user_pool, _pool), pool_stats) in all_pool_stats {
|
|
||||||
let mut row = vec![
|
|
||||||
pool_stats.database(),
|
|
||||||
pool_stats.user(),
|
|
||||||
pool_stats.pool_mode().to_string(),
|
|
||||||
];
|
|
||||||
pool_stats.populate_row(&mut row);
|
|
||||||
pool_stats.clear_maxwait();
|
|
||||||
res.put(data_row(&row));
|
|
||||||
}
|
|
||||||
|
|
||||||
res.put(command_complete("SHOW"));
|
res.put(command_complete("SHOW"));
|
||||||
|
|
||||||
// ReadyForQuery
|
// ReadyForQuery
|
||||||
@@ -334,17 +361,17 @@ where
|
|||||||
let paused = pool.paused();
|
let paused = pool.paused();
|
||||||
|
|
||||||
res.put(data_row(&vec![
|
res.put(data_row(&vec![
|
||||||
address.name(), // name
|
address.name(), // name
|
||||||
address.host.to_string(), // host
|
address.host.to_string(), // host
|
||||||
address.port.to_string(), // port
|
address.port.to_string(), // port
|
||||||
database_name.to_string(), // database
|
database_name.to_string(), // database
|
||||||
pool_config.user.username.to_string(), // force_user
|
pool_config.user.username.to_string(), // force_user
|
||||||
pool_config.user.pool_size.to_string(), // pool_size
|
pool_config.user.pool_size.to_string(), // pool_size
|
||||||
"0".to_string(), // min_pool_size
|
pool_config.user.min_pool_size.unwrap_or(0).to_string(), // min_pool_size
|
||||||
"0".to_string(), // reserve_pool
|
"0".to_string(), // reserve_pool
|
||||||
pool_config.pool_mode.to_string(), // pool_mode
|
pool_config.pool_mode.to_string(), // pool_mode
|
||||||
pool_config.user.pool_size.to_string(), // max_connections
|
pool_config.user.pool_size.to_string(), // max_connections
|
||||||
pool_state.connections.to_string(), // current_connections
|
pool_state.connections.to_string(), // current_connections
|
||||||
match paused {
|
match paused {
|
||||||
// paused
|
// paused
|
||||||
true => "1".to_string(),
|
true => "1".to_string(),
|
||||||
@@ -673,6 +700,8 @@ where
|
|||||||
("query_count", DataType::Numeric),
|
("query_count", DataType::Numeric),
|
||||||
("error_count", DataType::Numeric),
|
("error_count", DataType::Numeric),
|
||||||
("age_seconds", DataType::Numeric),
|
("age_seconds", DataType::Numeric),
|
||||||
|
("maxwait", DataType::Numeric),
|
||||||
|
("maxwait_us", DataType::Numeric),
|
||||||
];
|
];
|
||||||
|
|
||||||
let new_map = get_client_stats();
|
let new_map = get_client_stats();
|
||||||
@@ -680,6 +709,7 @@ where
|
|||||||
res.put(row_description(&columns));
|
res.put(row_description(&columns));
|
||||||
|
|
||||||
for (_, client) in new_map {
|
for (_, client) in new_map {
|
||||||
|
let max_wait = client.max_wait_time.load(Ordering::Relaxed);
|
||||||
let row = vec![
|
let row = vec![
|
||||||
format!("{:#010X}", client.client_id()),
|
format!("{:#010X}", client.client_id()),
|
||||||
client.pool_name(),
|
client.pool_name(),
|
||||||
@@ -693,6 +723,8 @@ where
|
|||||||
.duration_since(client.connect_time())
|
.duration_since(client.connect_time())
|
||||||
.as_secs()
|
.as_secs()
|
||||||
.to_string(),
|
.to_string(),
|
||||||
|
(max_wait / 1_000_000).to_string(),
|
||||||
|
(max_wait % 1_000_000).to_string(),
|
||||||
];
|
];
|
||||||
|
|
||||||
res.put(data_row(&row));
|
res.put(data_row(&row));
|
||||||
@@ -725,6 +757,10 @@ where
|
|||||||
("bytes_sent", DataType::Numeric),
|
("bytes_sent", DataType::Numeric),
|
||||||
("bytes_received", DataType::Numeric),
|
("bytes_received", DataType::Numeric),
|
||||||
("age_seconds", DataType::Numeric),
|
("age_seconds", DataType::Numeric),
|
||||||
|
("prepare_cache_hit", DataType::Numeric),
|
||||||
|
("prepare_cache_miss", DataType::Numeric),
|
||||||
|
("prepare_cache_eviction", DataType::Numeric),
|
||||||
|
("prepare_cache_size", DataType::Numeric),
|
||||||
];
|
];
|
||||||
|
|
||||||
let new_map = get_server_stats();
|
let new_map = get_server_stats();
|
||||||
@@ -748,6 +784,22 @@ where
|
|||||||
.duration_since(server.connect_time())
|
.duration_since(server.connect_time())
|
||||||
.as_secs()
|
.as_secs()
|
||||||
.to_string(),
|
.to_string(),
|
||||||
|
server
|
||||||
|
.prepared_hit_count
|
||||||
|
.load(Ordering::Relaxed)
|
||||||
|
.to_string(),
|
||||||
|
server
|
||||||
|
.prepared_miss_count
|
||||||
|
.load(Ordering::Relaxed)
|
||||||
|
.to_string(),
|
||||||
|
server
|
||||||
|
.prepared_eviction_count
|
||||||
|
.load(Ordering::Relaxed)
|
||||||
|
.to_string(),
|
||||||
|
server
|
||||||
|
.prepared_cache_size
|
||||||
|
.load(Ordering::Relaxed)
|
||||||
|
.to_string(),
|
||||||
];
|
];
|
||||||
|
|
||||||
res.put(data_row(&row));
|
res.put(data_row(&row));
|
||||||
@@ -764,96 +816,128 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Pause a pool. It won't pass any more queries to the backends.
|
/// Pause a pool. It won't pass any more queries to the backends.
|
||||||
async fn pause<T>(stream: &mut T, query: &str) -> Result<(), Error>
|
async fn pause<T>(stream: &mut T, tokens: Vec<&str>) -> Result<(), Error>
|
||||||
where
|
where
|
||||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
{
|
{
|
||||||
let parts: Vec<&str> = query.split(",").map(|part| part.trim()).collect();
|
let parts: Vec<&str> = match tokens.len() == 2 {
|
||||||
|
true => tokens[1].split(',').map(|part| part.trim()).collect(),
|
||||||
|
false => Vec::new(),
|
||||||
|
};
|
||||||
|
|
||||||
if parts.len() != 2 {
|
match parts.len() {
|
||||||
error_response(
|
0 => {
|
||||||
stream,
|
for (_, pool) in get_all_pools() {
|
||||||
"PAUSE requires a database and a user, e.g. PAUSE my_db,my_user",
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
} else {
|
|
||||||
let database = parts[0];
|
|
||||||
let user = parts[1];
|
|
||||||
|
|
||||||
match get_pool(database, user) {
|
|
||||||
Some(pool) => {
|
|
||||||
pool.pause();
|
pool.pause();
|
||||||
|
|
||||||
let mut res = BytesMut::new();
|
|
||||||
|
|
||||||
res.put(command_complete(&format!("PAUSE {},{}", database, user)));
|
|
||||||
|
|
||||||
// ReadyForQuery
|
|
||||||
res.put_u8(b'Z');
|
|
||||||
res.put_i32(5);
|
|
||||||
res.put_u8(b'I');
|
|
||||||
|
|
||||||
write_all_half(stream, &res).await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
None => {
|
let mut res = BytesMut::new();
|
||||||
error_response(
|
|
||||||
stream,
|
res.put(command_complete("PAUSE"));
|
||||||
&format!(
|
|
||||||
"No pool configured for database: {}, user: {}",
|
// ReadyForQuery
|
||||||
database, user
|
res.put_u8(b'Z');
|
||||||
),
|
res.put_i32(5);
|
||||||
)
|
res.put_u8(b'I');
|
||||||
.await
|
|
||||||
|
write_all_half(stream, &res).await
|
||||||
|
}
|
||||||
|
2 => {
|
||||||
|
let database = parts[0];
|
||||||
|
let user = parts[1];
|
||||||
|
|
||||||
|
match get_pool(database, user) {
|
||||||
|
Some(pool) => {
|
||||||
|
pool.pause();
|
||||||
|
|
||||||
|
let mut res = BytesMut::new();
|
||||||
|
|
||||||
|
res.put(command_complete(&format!("PAUSE {},{}", database, user)));
|
||||||
|
|
||||||
|
// ReadyForQuery
|
||||||
|
res.put_u8(b'Z');
|
||||||
|
res.put_i32(5);
|
||||||
|
res.put_u8(b'I');
|
||||||
|
|
||||||
|
write_all_half(stream, &res).await
|
||||||
|
}
|
||||||
|
|
||||||
|
None => {
|
||||||
|
error_response(
|
||||||
|
stream,
|
||||||
|
&format!(
|
||||||
|
"No pool configured for database: {}, user: {}",
|
||||||
|
database, user
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
_ => error_response(stream, "usage: PAUSE [db, user]").await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Resume a pool. Queries are allowed again.
|
/// Resume a pool. Queries are allowed again.
|
||||||
async fn resume<T>(stream: &mut T, query: &str) -> Result<(), Error>
|
async fn resume<T>(stream: &mut T, tokens: Vec<&str>) -> Result<(), Error>
|
||||||
where
|
where
|
||||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
{
|
{
|
||||||
let parts: Vec<&str> = query.split(",").map(|part| part.trim()).collect();
|
let parts: Vec<&str> = match tokens.len() == 2 {
|
||||||
|
true => tokens[1].split(',').map(|part| part.trim()).collect(),
|
||||||
|
false => Vec::new(),
|
||||||
|
};
|
||||||
|
|
||||||
if parts.len() != 2 {
|
match parts.len() {
|
||||||
error_response(
|
0 => {
|
||||||
stream,
|
for (_, pool) in get_all_pools() {
|
||||||
"RESUME requires a database and a user, e.g. RESUME my_db,my_user",
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
} else {
|
|
||||||
let database = parts[0];
|
|
||||||
let user = parts[1];
|
|
||||||
|
|
||||||
match get_pool(database, user) {
|
|
||||||
Some(pool) => {
|
|
||||||
pool.resume();
|
pool.resume();
|
||||||
|
|
||||||
let mut res = BytesMut::new();
|
|
||||||
|
|
||||||
res.put(command_complete(&format!("RESUME {},{}", database, user)));
|
|
||||||
|
|
||||||
// ReadyForQuery
|
|
||||||
res.put_u8(b'Z');
|
|
||||||
res.put_i32(5);
|
|
||||||
res.put_u8(b'I');
|
|
||||||
|
|
||||||
write_all_half(stream, &res).await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
None => {
|
let mut res = BytesMut::new();
|
||||||
error_response(
|
|
||||||
stream,
|
res.put(command_complete("RESUME"));
|
||||||
&format!(
|
|
||||||
"No pool configured for database: {}, user: {}",
|
// ReadyForQuery
|
||||||
database, user
|
res.put_u8(b'Z');
|
||||||
),
|
res.put_i32(5);
|
||||||
)
|
res.put_u8(b'I');
|
||||||
.await
|
|
||||||
|
write_all_half(stream, &res).await
|
||||||
|
}
|
||||||
|
2 => {
|
||||||
|
let database = parts[0];
|
||||||
|
let user = parts[1];
|
||||||
|
|
||||||
|
match get_pool(database, user) {
|
||||||
|
Some(pool) => {
|
||||||
|
pool.resume();
|
||||||
|
|
||||||
|
let mut res = BytesMut::new();
|
||||||
|
|
||||||
|
res.put(command_complete(&format!("RESUME {},{}", database, user)));
|
||||||
|
|
||||||
|
// ReadyForQuery
|
||||||
|
res.put_u8(b'Z');
|
||||||
|
res.put_i32(5);
|
||||||
|
res.put_u8(b'I');
|
||||||
|
|
||||||
|
write_all_half(stream, &res).await
|
||||||
|
}
|
||||||
|
|
||||||
|
None => {
|
||||||
|
error_response(
|
||||||
|
stream,
|
||||||
|
&format!(
|
||||||
|
"No pool configured for database: {}, user: {}",
|
||||||
|
database, user
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
_ => error_response(stream, "usage: RESUME [db, user]").await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
use crate::config::AuthType;
|
||||||
use crate::errors::Error;
|
use crate::errors::Error;
|
||||||
use crate::pool::ConnectionPool;
|
use crate::pool::ConnectionPool;
|
||||||
use crate::server::Server;
|
use crate::server::Server;
|
||||||
@@ -71,12 +72,17 @@ impl AuthPassthrough {
|
|||||||
pub async fn fetch_hash(&self, address: &crate::config::Address) -> Result<String, Error> {
|
pub async fn fetch_hash(&self, address: &crate::config::Address) -> Result<String, Error> {
|
||||||
let auth_user = crate::config::User {
|
let auth_user = crate::config::User {
|
||||||
username: self.user.clone(),
|
username: self.user.clone(),
|
||||||
|
auth_type: AuthType::MD5,
|
||||||
password: Some(self.password.clone()),
|
password: Some(self.password.clone()),
|
||||||
server_username: None,
|
server_username: None,
|
||||||
server_password: None,
|
server_password: None,
|
||||||
pool_size: 1,
|
pool_size: 1,
|
||||||
statement_timeout: 0,
|
statement_timeout: 0,
|
||||||
pool_mode: None,
|
pool_mode: None,
|
||||||
|
server_lifetime: None,
|
||||||
|
min_pool_size: None,
|
||||||
|
connect_timeout: None,
|
||||||
|
idle_timeout: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
let user = &address.username;
|
let user = &address.username;
|
||||||
|
|||||||
1414
src/client.rs
1414
src/client.rs
File diff suppressed because it is too large
Load Diff
36
src/cmd_args.rs
Normal file
36
src/cmd_args.rs
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
use clap::{Parser, ValueEnum};
|
||||||
|
use tracing::Level;
|
||||||
|
|
||||||
|
/// PgCat: Nextgen PostgreSQL Pooler
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[command(author, version, about, long_about = None)]
|
||||||
|
pub struct Args {
|
||||||
|
#[arg(default_value_t = String::from("pgcat.toml"), env)]
|
||||||
|
pub config_file: String,
|
||||||
|
|
||||||
|
#[arg(short, long, default_value_t = tracing::Level::INFO, env)]
|
||||||
|
pub log_level: Level,
|
||||||
|
|
||||||
|
#[clap(short='F', long, value_enum, default_value_t=LogFormat::Text, env)]
|
||||||
|
pub log_format: LogFormat,
|
||||||
|
|
||||||
|
#[arg(
|
||||||
|
short,
|
||||||
|
long,
|
||||||
|
default_value_t = false,
|
||||||
|
env,
|
||||||
|
help = "disable colors in the log output"
|
||||||
|
)]
|
||||||
|
pub no_color: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn parse() -> Args {
|
||||||
|
Args::parse()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(ValueEnum, Clone, Debug)]
|
||||||
|
pub enum LogFormat {
|
||||||
|
Text,
|
||||||
|
Structured,
|
||||||
|
Debug,
|
||||||
|
}
|
||||||
652
src/config.rs
652
src/config.rs
File diff suppressed because it is too large
Load Diff
410
src/dns_cache.rs
Normal file
410
src/dns_cache.rs
Normal file
@@ -0,0 +1,410 @@
|
|||||||
|
use crate::config::get_config;
|
||||||
|
use crate::errors::Error;
|
||||||
|
use arc_swap::ArcSwap;
|
||||||
|
use log::{debug, error, info, warn};
|
||||||
|
use once_cell::sync::Lazy;
|
||||||
|
use std::collections::{HashMap, HashSet};
|
||||||
|
use std::io;
|
||||||
|
use std::net::IpAddr;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::sync::RwLock;
|
||||||
|
use tokio::time::{sleep, Duration};
|
||||||
|
use trust_dns_resolver::error::{ResolveError, ResolveResult};
|
||||||
|
use trust_dns_resolver::lookup_ip::LookupIp;
|
||||||
|
use trust_dns_resolver::TokioAsyncResolver;
|
||||||
|
|
||||||
|
/// Cached Resolver Globally available
|
||||||
|
pub static CACHED_RESOLVER: Lazy<ArcSwap<CachedResolver>> =
|
||||||
|
Lazy::new(|| ArcSwap::from_pointee(CachedResolver::default()));
|
||||||
|
|
||||||
|
// Ip addressed are returned as a set of addresses
|
||||||
|
// so we can compare.
|
||||||
|
#[derive(Clone, PartialEq, Debug)]
|
||||||
|
pub struct AddrSet {
|
||||||
|
set: HashSet<IpAddr>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AddrSet {
|
||||||
|
fn new() -> AddrSet {
|
||||||
|
AddrSet {
|
||||||
|
set: HashSet::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<LookupIp> for AddrSet {
|
||||||
|
fn from(lookup_ip: LookupIp) -> Self {
|
||||||
|
let mut addr_set = AddrSet::new();
|
||||||
|
for address in lookup_ip.iter() {
|
||||||
|
addr_set.set.insert(address);
|
||||||
|
}
|
||||||
|
addr_set
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
///
|
||||||
|
/// A CachedResolver is a DNS resolution cache mechanism with customizable expiration time.
|
||||||
|
///
|
||||||
|
/// The system works as follows:
|
||||||
|
///
|
||||||
|
/// When a host is to be resolved, if we have not resolved it before, a new resolution is
|
||||||
|
/// executed and stored in the internal cache. Concurrently, every `dns_max_ttl` time, the
|
||||||
|
/// cache is refreshed.
|
||||||
|
///
|
||||||
|
/// # Example:
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use pgcat::dns_cache::{CachedResolverConfig, CachedResolver};
|
||||||
|
///
|
||||||
|
/// # tokio_test::block_on(async {
|
||||||
|
/// let config = CachedResolverConfig::default();
|
||||||
|
/// let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||||
|
/// let addrset = resolver.lookup_ip("www.example.com.").await.unwrap();
|
||||||
|
/// # })
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// // Now the ip resolution is stored in local cache and subsequent
|
||||||
|
/// // calls will be returned from cache. Also, the cache is refreshed
|
||||||
|
/// // and updated every 10 seconds.
|
||||||
|
///
|
||||||
|
/// // You can now check if an 'old' lookup differs from what it's currently
|
||||||
|
/// // store in cache by using `has_changed`.
|
||||||
|
/// resolver.has_changed("www.example.com.", addrset)
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct CachedResolver {
|
||||||
|
// The configuration of the cached_resolver.
|
||||||
|
config: CachedResolverConfig,
|
||||||
|
|
||||||
|
// This is the hash that contains the hash.
|
||||||
|
data: Option<RwLock<HashMap<String, AddrSet>>>,
|
||||||
|
|
||||||
|
// The resolver to be used for DNS queries.
|
||||||
|
resolver: Option<TokioAsyncResolver>,
|
||||||
|
|
||||||
|
// The RefreshLoop
|
||||||
|
refresh_loop: RwLock<Option<tokio::task::JoinHandle<()>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
///
|
||||||
|
/// Configuration
|
||||||
|
#[derive(Clone, Debug, Default, PartialEq)]
|
||||||
|
pub struct CachedResolverConfig {
|
||||||
|
/// Amount of time in secods that a resolved dns address is considered stale.
|
||||||
|
dns_max_ttl: u64,
|
||||||
|
|
||||||
|
/// Enabled or disabled? (this is so we can reload config)
|
||||||
|
enabled: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CachedResolverConfig {
|
||||||
|
fn new(dns_max_ttl: u64, enabled: bool) -> Self {
|
||||||
|
CachedResolverConfig {
|
||||||
|
dns_max_ttl,
|
||||||
|
enabled,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<crate::config::Config> for CachedResolverConfig {
|
||||||
|
fn from(config: crate::config::Config) -> Self {
|
||||||
|
CachedResolverConfig::new(config.general.dns_max_ttl, config.general.dns_cache_enabled)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CachedResolver {
|
||||||
|
///
|
||||||
|
/// Returns a new Arc<CachedResolver> based on passed configuration.
|
||||||
|
/// It also starts the loop that will refresh cache entries.
|
||||||
|
///
|
||||||
|
/// # Arguments:
|
||||||
|
///
|
||||||
|
/// * `config` - The `CachedResolverConfig` to be used to create the resolver.
|
||||||
|
///
|
||||||
|
/// # Example:
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use pgcat::dns_cache::{CachedResolverConfig, CachedResolver};
|
||||||
|
///
|
||||||
|
/// # tokio_test::block_on(async {
|
||||||
|
/// let config = CachedResolverConfig::default();
|
||||||
|
/// let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||||
|
/// # })
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
pub async fn new(
|
||||||
|
config: CachedResolverConfig,
|
||||||
|
data: Option<HashMap<String, AddrSet>>,
|
||||||
|
) -> Result<Arc<Self>, io::Error> {
|
||||||
|
// Construct a new Resolver with default configuration options
|
||||||
|
let resolver = Some(TokioAsyncResolver::tokio_from_system_conf()?);
|
||||||
|
|
||||||
|
let data = if let Some(hash) = data {
|
||||||
|
Some(RwLock::new(hash))
|
||||||
|
} else {
|
||||||
|
Some(RwLock::new(HashMap::new()))
|
||||||
|
};
|
||||||
|
|
||||||
|
let instance = Arc::new(Self {
|
||||||
|
config,
|
||||||
|
resolver,
|
||||||
|
data,
|
||||||
|
refresh_loop: RwLock::new(None),
|
||||||
|
});
|
||||||
|
|
||||||
|
if instance.enabled() {
|
||||||
|
info!("Scheduling DNS refresh loop");
|
||||||
|
let refresh_loop = tokio::task::spawn({
|
||||||
|
let instance = instance.clone();
|
||||||
|
async move {
|
||||||
|
instance.refresh_dns_entries_loop().await;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
*(instance.refresh_loop.write().unwrap()) = Some(refresh_loop);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(instance)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn enabled(&self) -> bool {
|
||||||
|
self.config.enabled
|
||||||
|
}
|
||||||
|
|
||||||
|
// Schedules the refresher
|
||||||
|
async fn refresh_dns_entries_loop(&self) {
|
||||||
|
let resolver = TokioAsyncResolver::tokio_from_system_conf().unwrap();
|
||||||
|
let interval = Duration::from_secs(self.config.dns_max_ttl);
|
||||||
|
loop {
|
||||||
|
debug!("Begin refreshing cached DNS addresses.");
|
||||||
|
// To minimize the time we hold the lock, we first create
|
||||||
|
// an array with keys.
|
||||||
|
let mut hostnames: Vec<String> = Vec::new();
|
||||||
|
{
|
||||||
|
if let Some(ref data) = self.data {
|
||||||
|
for hostname in data.read().unwrap().keys() {
|
||||||
|
hostnames.push(hostname.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for hostname in hostnames.iter() {
|
||||||
|
let addrset = self
|
||||||
|
.fetch_from_cache(hostname.as_str())
|
||||||
|
.expect("Could not obtain expected address from cache, this should not happen");
|
||||||
|
|
||||||
|
match resolver.lookup_ip(hostname).await {
|
||||||
|
Ok(lookup_ip) => {
|
||||||
|
let new_addrset = AddrSet::from(lookup_ip);
|
||||||
|
debug!(
|
||||||
|
"Obtained address for host ({}) -> ({:?})",
|
||||||
|
hostname, new_addrset
|
||||||
|
);
|
||||||
|
|
||||||
|
if addrset != new_addrset {
|
||||||
|
debug!(
|
||||||
|
"Addr changed from {:?} to {:?} updating cache.",
|
||||||
|
addrset, new_addrset
|
||||||
|
);
|
||||||
|
self.store_in_cache(hostname, new_addrset);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
error!(
|
||||||
|
"There was an error trying to resolv {}: ({}).",
|
||||||
|
hostname, err
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
debug!("Finished refreshing cached DNS addresses.");
|
||||||
|
sleep(interval).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a `AddrSet` given the specified hostname.
|
||||||
|
///
|
||||||
|
/// This method first tries to fetch the value from the cache, if it misses
|
||||||
|
/// then it is resolved and stored in the cache. TTL from records is ignored.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `host` - A string slice referencing the hostname to be resolved.
|
||||||
|
///
|
||||||
|
/// # Example:
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use pgcat::dns_cache::{CachedResolverConfig, CachedResolver};
|
||||||
|
///
|
||||||
|
/// # tokio_test::block_on(async {
|
||||||
|
/// let config = CachedResolverConfig::default();
|
||||||
|
/// let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||||
|
/// let response = resolver.lookup_ip("www.google.com.");
|
||||||
|
/// # })
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
pub async fn lookup_ip(&self, host: &str) -> ResolveResult<AddrSet> {
|
||||||
|
debug!("Lookup up {} in cache", host);
|
||||||
|
match self.fetch_from_cache(host) {
|
||||||
|
Some(addr_set) => {
|
||||||
|
debug!("Cache hit!");
|
||||||
|
Ok(addr_set)
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
debug!("Not found, executing a dns query!");
|
||||||
|
if let Some(ref resolver) = self.resolver {
|
||||||
|
let addr_set = AddrSet::from(resolver.lookup_ip(host).await?);
|
||||||
|
debug!("Obtained: {:?}", addr_set);
|
||||||
|
self.store_in_cache(host, addr_set.clone());
|
||||||
|
Ok(addr_set)
|
||||||
|
} else {
|
||||||
|
Err(ResolveError::from("No resolver available"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Returns true if the stored host resolution differs from the AddrSet passed.
|
||||||
|
pub fn has_changed(&self, host: &str, addr_set: &AddrSet) -> bool {
|
||||||
|
if let Some(fetched_addr_set) = self.fetch_from_cache(host) {
|
||||||
|
return fetched_addr_set != *addr_set;
|
||||||
|
}
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetches an AddrSet from the inner cache adquiring the read lock.
|
||||||
|
fn fetch_from_cache(&self, key: &str) -> Option<AddrSet> {
|
||||||
|
if let Some(ref hash) = self.data {
|
||||||
|
if let Some(addr_set) = hash.read().unwrap().get(key) {
|
||||||
|
return Some(addr_set.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sets up the global CACHED_RESOLVER static variable so we can globally use DNS
|
||||||
|
// cache.
|
||||||
|
pub async fn from_config() -> Result<(), Error> {
|
||||||
|
let cached_resolver = CACHED_RESOLVER.load();
|
||||||
|
let desired_config = CachedResolverConfig::from(get_config());
|
||||||
|
|
||||||
|
if cached_resolver.config != desired_config {
|
||||||
|
if let Some(ref refresh_loop) = *(cached_resolver.refresh_loop.write().unwrap()) {
|
||||||
|
warn!("Killing Dnscache refresh loop as its configuration is being reloaded");
|
||||||
|
refresh_loop.abort()
|
||||||
|
}
|
||||||
|
let new_resolver = if let Some(ref data) = cached_resolver.data {
|
||||||
|
let data = Some(data.read().unwrap().clone());
|
||||||
|
CachedResolver::new(desired_config, data).await
|
||||||
|
} else {
|
||||||
|
CachedResolver::new(desired_config, None).await
|
||||||
|
};
|
||||||
|
|
||||||
|
match new_resolver {
|
||||||
|
Ok(ok) => {
|
||||||
|
CACHED_RESOLVER.store(ok);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
let message = format!("Error setting up cached_resolver. Error: {:?}, will continue without this feature.", err);
|
||||||
|
Err(Error::DNSCachedError(message))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stores the AddrSet in cache adquiring the write lock.
|
||||||
|
fn store_in_cache(&self, host: &str, addr_set: AddrSet) {
|
||||||
|
if let Some(ref data) = self.data {
|
||||||
|
data.write().unwrap().insert(host.to_string(), addr_set);
|
||||||
|
} else {
|
||||||
|
error!("Could not insert, Hash not initialized");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use trust_dns_resolver::error::ResolveError;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn new() {
|
||||||
|
let config = CachedResolverConfig {
|
||||||
|
dns_max_ttl: 10,
|
||||||
|
enabled: true,
|
||||||
|
};
|
||||||
|
let resolver = CachedResolver::new(config, None).await;
|
||||||
|
assert!(resolver.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn lookup_ip() {
|
||||||
|
let config = CachedResolverConfig {
|
||||||
|
dns_max_ttl: 10,
|
||||||
|
enabled: true,
|
||||||
|
};
|
||||||
|
let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||||
|
let response = resolver.lookup_ip("www.google.com.").await;
|
||||||
|
assert!(response.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn has_changed() {
|
||||||
|
let config = CachedResolverConfig {
|
||||||
|
dns_max_ttl: 10,
|
||||||
|
enabled: true,
|
||||||
|
};
|
||||||
|
let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||||
|
let hostname = "www.google.com.";
|
||||||
|
let response = resolver.lookup_ip(hostname).await;
|
||||||
|
let addr_set = response.unwrap();
|
||||||
|
assert!(!resolver.has_changed(hostname, &addr_set));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn unknown_host() {
|
||||||
|
let config = CachedResolverConfig {
|
||||||
|
dns_max_ttl: 10,
|
||||||
|
enabled: true,
|
||||||
|
};
|
||||||
|
let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||||
|
let hostname = "www.idontexists.";
|
||||||
|
let response = resolver.lookup_ip(hostname).await;
|
||||||
|
assert!(matches!(response, Err(ResolveError { .. })));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn incorrect_address() {
|
||||||
|
let config = CachedResolverConfig {
|
||||||
|
dns_max_ttl: 10,
|
||||||
|
enabled: true,
|
||||||
|
};
|
||||||
|
let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||||
|
let hostname = "w ww.idontexists.";
|
||||||
|
let response = resolver.lookup_ip(hostname).await;
|
||||||
|
assert!(matches!(response, Err(ResolveError { .. })));
|
||||||
|
assert!(!resolver.has_changed(hostname, &AddrSet::new()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
// Ok, this test is based on the fact that google does DNS RR
|
||||||
|
// and does not responds with every available ip everytime, so
|
||||||
|
// if I cache here, it will miss after one cache iteration or two.
|
||||||
|
async fn thread() {
|
||||||
|
let config = CachedResolverConfig {
|
||||||
|
dns_max_ttl: 10,
|
||||||
|
enabled: true,
|
||||||
|
};
|
||||||
|
let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||||
|
let hostname = "www.google.com.";
|
||||||
|
let response = resolver.lookup_ip(hostname).await;
|
||||||
|
let addr_set = response.unwrap();
|
||||||
|
assert!(!resolver.has_changed(hostname, &addr_set));
|
||||||
|
let resolver_for_refresher = resolver.clone();
|
||||||
|
let _thread_handle = tokio::task::spawn(async move {
|
||||||
|
resolver_for_refresher.refresh_dns_entries_loop().await;
|
||||||
|
});
|
||||||
|
assert!(!resolver.has_changed(hostname, &addr_set));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,7 +1,7 @@
|
|||||||
//! Errors.
|
//! Errors.
|
||||||
|
|
||||||
/// Various errors.
|
/// Various errors.
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
SocketError(String),
|
SocketError(String),
|
||||||
ClientSocketError(String, ClientIdentifier),
|
ClientSocketError(String, ClientIdentifier),
|
||||||
@@ -12,6 +12,7 @@ pub enum Error {
|
|||||||
ProtocolSyncError(String),
|
ProtocolSyncError(String),
|
||||||
BadQuery(String),
|
BadQuery(String),
|
||||||
ServerError,
|
ServerError,
|
||||||
|
ServerMessageParserError(String),
|
||||||
ServerStartupError(String, ServerIdentifier),
|
ServerStartupError(String, ServerIdentifier),
|
||||||
ServerAuthError(String, ServerIdentifier),
|
ServerAuthError(String, ServerIdentifier),
|
||||||
BadConfig,
|
BadConfig,
|
||||||
@@ -19,10 +20,16 @@ pub enum Error {
|
|||||||
ClientError(String),
|
ClientError(String),
|
||||||
TlsError,
|
TlsError,
|
||||||
StatementTimeout,
|
StatementTimeout,
|
||||||
|
DNSCachedError(String),
|
||||||
ShuttingDown,
|
ShuttingDown,
|
||||||
ParseBytesError(String),
|
ParseBytesError(String),
|
||||||
AuthError(String),
|
AuthError(String),
|
||||||
AuthPassthroughError(String),
|
AuthPassthroughError(String),
|
||||||
|
UnsupportedStatement,
|
||||||
|
QueryRouterParserError(String),
|
||||||
|
QueryRouterError(String),
|
||||||
|
InvalidShardId(usize),
|
||||||
|
PreparedStatementError,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Debug)]
|
#[derive(Clone, PartialEq, Debug)]
|
||||||
@@ -118,3 +125,9 @@ impl std::fmt::Display for Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<std::ffi::NulError> for Error {
|
||||||
|
fn from(err: std::ffi::NulError) -> Self {
|
||||||
|
Error::QueryRouterError(err.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,11 +1,18 @@
|
|||||||
|
pub mod admin;
|
||||||
pub mod auth_passthrough;
|
pub mod auth_passthrough;
|
||||||
|
pub mod client;
|
||||||
|
pub mod cmd_args;
|
||||||
pub mod config;
|
pub mod config;
|
||||||
pub mod constants;
|
pub mod constants;
|
||||||
|
pub mod dns_cache;
|
||||||
pub mod errors;
|
pub mod errors;
|
||||||
|
pub mod logger;
|
||||||
pub mod messages;
|
pub mod messages;
|
||||||
pub mod mirrors;
|
pub mod mirrors;
|
||||||
pub mod multi_logger;
|
pub mod plugins;
|
||||||
pub mod pool;
|
pub mod pool;
|
||||||
|
pub mod prometheus;
|
||||||
|
pub mod query_router;
|
||||||
pub mod scram;
|
pub mod scram;
|
||||||
pub mod server;
|
pub mod server;
|
||||||
pub mod sharding;
|
pub mod sharding;
|
||||||
|
|||||||
20
src/logger.rs
Normal file
20
src/logger.rs
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
use crate::cmd_args::{Args, LogFormat};
|
||||||
|
use tracing_subscriber;
|
||||||
|
use tracing_subscriber::EnvFilter;
|
||||||
|
|
||||||
|
pub fn init(args: &Args) {
|
||||||
|
// Iniitalize a default filter, and then override the builtin default "warning" with our
|
||||||
|
// commandline, (default: "info")
|
||||||
|
let filter = EnvFilter::from_default_env().add_directive(args.log_level.into());
|
||||||
|
|
||||||
|
let trace_sub = tracing_subscriber::fmt()
|
||||||
|
.with_thread_ids(true)
|
||||||
|
.with_env_filter(filter)
|
||||||
|
.with_ansi(!args.no_color);
|
||||||
|
|
||||||
|
match args.log_format {
|
||||||
|
LogFormat::Structured => trace_sub.json().init(),
|
||||||
|
LogFormat::Debug => trace_sub.pretty().init(),
|
||||||
|
_ => trace_sub.init(),
|
||||||
|
};
|
||||||
|
}
|
||||||
58
src/main.rs
58
src/main.rs
@@ -23,7 +23,6 @@ extern crate arc_swap;
|
|||||||
extern crate async_trait;
|
extern crate async_trait;
|
||||||
extern crate bb8;
|
extern crate bb8;
|
||||||
extern crate bytes;
|
extern crate bytes;
|
||||||
extern crate env_logger;
|
|
||||||
extern crate exitcode;
|
extern crate exitcode;
|
||||||
extern crate log;
|
extern crate log;
|
||||||
extern crate md5;
|
extern crate md5;
|
||||||
@@ -36,6 +35,7 @@ extern crate sqlparser;
|
|||||||
extern crate tokio;
|
extern crate tokio;
|
||||||
extern crate tokio_rustls;
|
extern crate tokio_rustls;
|
||||||
extern crate toml;
|
extern crate toml;
|
||||||
|
extern crate trust_dns_resolver;
|
||||||
|
|
||||||
#[cfg(not(target_env = "msvc"))]
|
#[cfg(not(target_env = "msvc"))]
|
||||||
use jemallocator::Jemalloc;
|
use jemallocator::Jemalloc;
|
||||||
@@ -60,54 +60,32 @@ use std::str::FromStr;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::sync::broadcast;
|
use tokio::sync::broadcast;
|
||||||
|
|
||||||
mod admin;
|
use pgcat::cmd_args;
|
||||||
mod auth_passthrough;
|
use pgcat::config::{get_config, reload_config, VERSION};
|
||||||
mod client;
|
use pgcat::dns_cache;
|
||||||
mod config;
|
use pgcat::logger;
|
||||||
mod constants;
|
use pgcat::messages::configure_socket;
|
||||||
mod errors;
|
use pgcat::pool::{ClientServerMap, ConnectionPool};
|
||||||
mod messages;
|
use pgcat::prometheus::start_metric_server;
|
||||||
mod mirrors;
|
use pgcat::stats::{Collector, Reporter, REPORTER};
|
||||||
mod multi_logger;
|
|
||||||
mod pool;
|
|
||||||
mod prometheus;
|
|
||||||
mod query_router;
|
|
||||||
mod scram;
|
|
||||||
mod server;
|
|
||||||
mod sharding;
|
|
||||||
mod stats;
|
|
||||||
mod tls;
|
|
||||||
|
|
||||||
use crate::config::{get_config, reload_config, VERSION};
|
|
||||||
use crate::messages::configure_socket;
|
|
||||||
use crate::pool::{ClientServerMap, ConnectionPool};
|
|
||||||
use crate::prometheus::start_metric_server;
|
|
||||||
use crate::stats::{Collector, Reporter, REPORTER};
|
|
||||||
|
|
||||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
multi_logger::MultiLogger::init().unwrap();
|
let args = cmd_args::parse();
|
||||||
|
logger::init(&args);
|
||||||
|
|
||||||
info!("Welcome to PgCat! Meow. (Version {})", VERSION);
|
info!("Welcome to PgCat! Meow. (Version {})", VERSION);
|
||||||
|
|
||||||
if !query_router::QueryRouter::setup() {
|
if !pgcat::query_router::QueryRouter::setup() {
|
||||||
error!("Could not setup query router");
|
error!("Could not setup query router");
|
||||||
std::process::exit(exitcode::CONFIG);
|
std::process::exit(exitcode::CONFIG);
|
||||||
}
|
}
|
||||||
|
|
||||||
let args = std::env::args().collect::<Vec<String>>();
|
|
||||||
|
|
||||||
let config_file = if args.len() == 2 {
|
|
||||||
args[1].to_string()
|
|
||||||
} else {
|
|
||||||
String::from("pgcat.toml")
|
|
||||||
};
|
|
||||||
|
|
||||||
// Create a transient runtime for loading the config for the first time.
|
// Create a transient runtime for loading the config for the first time.
|
||||||
{
|
{
|
||||||
let runtime = Builder::new_multi_thread().worker_threads(1).build()?;
|
let runtime = Builder::new_multi_thread().worker_threads(1).build()?;
|
||||||
|
|
||||||
runtime.block_on(async {
|
runtime.block_on(async {
|
||||||
match config::parse(&config_file).await {
|
match pgcat::config::parse(args.config_file.as_str()).await {
|
||||||
Ok(_) => (),
|
Ok(_) => (),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
error!("Config parse error: {:?}", err);
|
error!("Config parse error: {:?}", err);
|
||||||
@@ -166,6 +144,12 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
// Statistics reporting.
|
// Statistics reporting.
|
||||||
REPORTER.store(Arc::new(Reporter::default()));
|
REPORTER.store(Arc::new(Reporter::default()));
|
||||||
|
|
||||||
|
// Starts (if enabled) dns cache before pools initialization
|
||||||
|
match dns_cache::CachedResolver::from_config().await {
|
||||||
|
Ok(_) => (),
|
||||||
|
Err(err) => error!("DNS cache initialization error: {:?}", err),
|
||||||
|
};
|
||||||
|
|
||||||
// Connection pool that allows to query all shards and replicas.
|
// Connection pool that allows to query all shards and replicas.
|
||||||
match ConnectionPool::from_config(client_server_map.clone()).await {
|
match ConnectionPool::from_config(client_server_map.clone()).await {
|
||||||
Ok(_) => (),
|
Ok(_) => (),
|
||||||
@@ -295,7 +279,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
tokio::task::spawn(async move {
|
tokio::task::spawn(async move {
|
||||||
let start = chrono::offset::Utc::now().naive_utc();
|
let start = chrono::offset::Utc::now().naive_utc();
|
||||||
|
|
||||||
match client::client_entrypoint(
|
match pgcat::client::client_entrypoint(
|
||||||
socket,
|
socket,
|
||||||
client_server_map,
|
client_server_map,
|
||||||
shutdown_rx,
|
shutdown_rx,
|
||||||
@@ -326,7 +310,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
|
|||||||
|
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
match err {
|
match err {
|
||||||
errors::Error::ClientBadStartup => debug!("Client disconnected with error {:?}", err),
|
pgcat::errors::Error::ClientBadStartup => debug!("Client disconnected with error {:?}", err),
|
||||||
_ => warn!("Client disconnected with error {:?}", err),
|
_ => warn!("Client disconnected with error {:?}", err),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
1009
src/messages.rs
1009
src/messages.rs
File diff suppressed because it is too large
Load Diff
@@ -7,8 +7,7 @@ use bytes::{Bytes, BytesMut};
|
|||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
|
|
||||||
use crate::config::{get_config, Address, Role, User};
|
use crate::config::{get_config, Address, Role, User};
|
||||||
use crate::pool::{ClientServerMap, PoolIdentifier, ServerPool};
|
use crate::pool::{ClientServerMap, ServerPool};
|
||||||
use crate::stats::PoolStats;
|
|
||||||
use log::{error, info, trace, warn};
|
use log::{error, info, trace, warn};
|
||||||
use tokio::sync::mpsc::{channel, Receiver, Sender};
|
use tokio::sync::mpsc::{channel, Receiver, Sender};
|
||||||
|
|
||||||
@@ -24,25 +23,27 @@ impl MirroredClient {
|
|||||||
async fn create_pool(&self) -> Pool<ServerPool> {
|
async fn create_pool(&self) -> Pool<ServerPool> {
|
||||||
let config = get_config();
|
let config = get_config();
|
||||||
let default = std::time::Duration::from_millis(10_000).as_millis() as u64;
|
let default = std::time::Duration::from_millis(10_000).as_millis() as u64;
|
||||||
let (connection_timeout, idle_timeout, cfg) =
|
let (connection_timeout, idle_timeout, _cfg, prepared_statement_cache_size) =
|
||||||
match config.pools.get(&self.address.pool_name) {
|
match config.pools.get(&self.address.pool_name) {
|
||||||
Some(cfg) => (
|
Some(cfg) => (
|
||||||
cfg.connect_timeout.unwrap_or(default),
|
cfg.connect_timeout.unwrap_or(default),
|
||||||
cfg.idle_timeout.unwrap_or(default),
|
cfg.idle_timeout.unwrap_or(default),
|
||||||
cfg.clone(),
|
cfg.clone(),
|
||||||
|
cfg.prepared_statements_cache_size,
|
||||||
),
|
),
|
||||||
None => (default, default, crate::config::Pool::default()),
|
None => (default, default, crate::config::Pool::default(), 0),
|
||||||
};
|
};
|
||||||
|
|
||||||
let identifier = PoolIdentifier::new(&self.database, &self.user.username);
|
|
||||||
|
|
||||||
let manager = ServerPool::new(
|
let manager = ServerPool::new(
|
||||||
self.address.clone(),
|
self.address.clone(),
|
||||||
self.user.clone(),
|
self.user.clone(),
|
||||||
self.database.as_str(),
|
self.database.as_str(),
|
||||||
ClientServerMap::default(),
|
ClientServerMap::default(),
|
||||||
Arc::new(PoolStats::new(identifier, cfg.clone())),
|
|
||||||
Arc::new(RwLock::new(None)),
|
Arc::new(RwLock::new(None)),
|
||||||
|
None,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
prepared_statement_cache_size,
|
||||||
);
|
);
|
||||||
|
|
||||||
Pool::builder()
|
Pool::builder()
|
||||||
@@ -80,12 +81,13 @@ impl MirroredClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Incoming data from server (we read to clear the socket buffer and discard the data)
|
// Incoming data from server (we read to clear the socket buffer and discard the data)
|
||||||
recv_result = server.recv() => {
|
recv_result = server.recv(None) => {
|
||||||
match recv_result {
|
match recv_result {
|
||||||
Ok(message) => trace!("Received from mirror: {} {:?}", String::from_utf8_lossy(&message[..]), address.clone()),
|
Ok(message) => trace!("Received from mirror: {} {:?}", String::from_utf8_lossy(&message[..]), address.clone()),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
server.mark_bad();
|
server.mark_bad(
|
||||||
error!("Failed to receive from mirror {:?} {:?}", err, address.clone());
|
format!("Failed to send to mirror, Discarding message {:?}, {:?}", err, address.clone()).as_str()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -97,8 +99,9 @@ impl MirroredClient {
|
|||||||
match server.send(&BytesMut::from(&bytes[..])).await {
|
match server.send(&BytesMut::from(&bytes[..])).await {
|
||||||
Ok(_) => trace!("Sent to mirror: {} {:?}", String::from_utf8_lossy(&bytes[..]), address.clone()),
|
Ok(_) => trace!("Sent to mirror: {} {:?}", String::from_utf8_lossy(&bytes[..]), address.clone()),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
server.mark_bad();
|
server.mark_bad(
|
||||||
error!("Failed to send to mirror, Discarding message {:?}, {:?}", err, address.clone())
|
format!("Failed to receive from mirror {:?} {:?}", err, address.clone()).as_str()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -138,18 +141,18 @@ impl MirroringManager {
|
|||||||
bytes_rx,
|
bytes_rx,
|
||||||
disconnect_rx: exit_rx,
|
disconnect_rx: exit_rx,
|
||||||
};
|
};
|
||||||
exit_senders.push(exit_tx.clone());
|
exit_senders.push(exit_tx);
|
||||||
byte_senders.push(bytes_tx.clone());
|
byte_senders.push(bytes_tx);
|
||||||
client.start();
|
client.start();
|
||||||
});
|
});
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
byte_senders: byte_senders,
|
byte_senders,
|
||||||
disconnect_senders: exit_senders,
|
disconnect_senders: exit_senders,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn send(self: &mut Self, bytes: &BytesMut) {
|
pub fn send(&mut self, bytes: &BytesMut) {
|
||||||
// We want to avoid performing an allocation if we won't be able to send the message
|
// We want to avoid performing an allocation if we won't be able to send the message
|
||||||
// There is a possibility of a race here where we check the capacity and then the channel is
|
// There is a possibility of a race here where we check the capacity and then the channel is
|
||||||
// closed or the capacity is reduced to 0, but mirroring is best effort anyway
|
// closed or the capacity is reduced to 0, but mirroring is best effort anyway
|
||||||
@@ -171,7 +174,7 @@ impl MirroringManager {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn disconnect(self: &mut Self) {
|
pub fn disconnect(&mut self) {
|
||||||
self.disconnect_senders
|
self.disconnect_senders
|
||||||
.iter_mut()
|
.iter_mut()
|
||||||
.for_each(|sender| match sender.try_send(()) {
|
.for_each(|sender| match sender.try_send(()) {
|
||||||
|
|||||||
@@ -1,80 +0,0 @@
|
|||||||
use log::{Level, Log, Metadata, Record, SetLoggerError};
|
|
||||||
|
|
||||||
// This is a special kind of logger that allows sending logs to different
|
|
||||||
// targets depending on the log level.
|
|
||||||
//
|
|
||||||
// By default, if nothing is set, it acts as a regular env_log logger,
|
|
||||||
// it sends everything to standard error.
|
|
||||||
//
|
|
||||||
// If the Env variable `STDOUT_LOG` is defined, it will be used for
|
|
||||||
// configuring the standard out logger.
|
|
||||||
//
|
|
||||||
// The behavior is:
|
|
||||||
// - If it is an error, the message is written to standard error.
|
|
||||||
// - If it is not, and it matches the log level of the standard output logger (`STDOUT_LOG` env var), it will be send to standard output.
|
|
||||||
// - If the above is not true, it is sent to the stderr logger that will log it or not depending on the value
|
|
||||||
// of the RUST_LOG env var.
|
|
||||||
//
|
|
||||||
// So to summarize, if no `STDOUT_LOG` env var is present, the logger is the default logger. If `STDOUT_LOG` is set, everything
|
|
||||||
// but errors, that matches the log level set in the `STDOUT_LOG` env var is sent to stdout. You can have also some esoteric configuration
|
|
||||||
// where you set `RUST_LOG=debug` and `STDOUT_LOG=info`, in here, errors will go to stderr, warns and infos to stdout and debugs to stderr.
|
|
||||||
//
|
|
||||||
pub struct MultiLogger {
|
|
||||||
stderr_logger: env_logger::Logger,
|
|
||||||
stdout_logger: env_logger::Logger,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MultiLogger {
|
|
||||||
fn new() -> Self {
|
|
||||||
let stderr_logger = env_logger::builder().format_timestamp_micros().build();
|
|
||||||
let stdout_logger = env_logger::Builder::from_env("STDOUT_LOG")
|
|
||||||
.format_timestamp_micros()
|
|
||||||
.target(env_logger::Target::Stdout)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
Self {
|
|
||||||
stderr_logger,
|
|
||||||
stdout_logger,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn init() -> Result<(), SetLoggerError> {
|
|
||||||
let logger = Self::new();
|
|
||||||
|
|
||||||
log::set_max_level(logger.stderr_logger.filter());
|
|
||||||
log::set_boxed_logger(Box::new(logger))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Log for MultiLogger {
|
|
||||||
fn enabled(&self, metadata: &Metadata) -> bool {
|
|
||||||
self.stderr_logger.enabled(metadata) && self.stdout_logger.enabled(metadata)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn log(&self, record: &Record) {
|
|
||||||
if record.level() == Level::Error {
|
|
||||||
self.stderr_logger.log(record);
|
|
||||||
} else {
|
|
||||||
if self.stdout_logger.matches(record) {
|
|
||||||
self.stdout_logger.log(record);
|
|
||||||
} else {
|
|
||||||
self.stderr_logger.log(record);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn flush(&self) {
|
|
||||||
self.stderr_logger.flush();
|
|
||||||
self.stdout_logger.flush();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_init() {
|
|
||||||
MultiLogger::init().unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
120
src/plugins/intercept.rs
Normal file
120
src/plugins/intercept.rs
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
//! The intercept plugin.
|
||||||
|
//!
|
||||||
|
//! It intercepts queries and returns fake results.
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use bytes::{BufMut, BytesMut};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use sqlparser::ast::Statement;
|
||||||
|
|
||||||
|
use log::debug;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
config::Intercept as InterceptConfig,
|
||||||
|
errors::Error,
|
||||||
|
messages::{command_complete, data_row_nullable, row_description, DataType},
|
||||||
|
plugins::{Plugin, PluginOutput},
|
||||||
|
query_router::QueryRouter,
|
||||||
|
};
|
||||||
|
|
||||||
|
// TODO: use these structs for deserialization
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
pub struct Rule {
|
||||||
|
query: String,
|
||||||
|
schema: Vec<Column>,
|
||||||
|
result: Vec<Vec<String>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
pub struct Column {
|
||||||
|
name: String,
|
||||||
|
data_type: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The intercept plugin.
|
||||||
|
pub struct Intercept<'a> {
|
||||||
|
pub enabled: bool,
|
||||||
|
pub config: &'a InterceptConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<'a> Plugin for Intercept<'a> {
|
||||||
|
async fn run(
|
||||||
|
&mut self,
|
||||||
|
query_router: &QueryRouter,
|
||||||
|
ast: &Vec<Statement>,
|
||||||
|
) -> Result<PluginOutput, Error> {
|
||||||
|
if !self.enabled || ast.is_empty() {
|
||||||
|
return Ok(PluginOutput::Allow);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut config = self.config.clone();
|
||||||
|
config.substitute(
|
||||||
|
&query_router.pool_settings().db,
|
||||||
|
&query_router.pool_settings().user.username,
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut result = BytesMut::new();
|
||||||
|
|
||||||
|
for q in ast {
|
||||||
|
// Normalization
|
||||||
|
let q = q.to_string().to_ascii_lowercase();
|
||||||
|
|
||||||
|
for (_, target) in config.queries.iter() {
|
||||||
|
if target.query.as_str() == q {
|
||||||
|
debug!("Intercepting query: {}", q);
|
||||||
|
|
||||||
|
let rd = target
|
||||||
|
.schema
|
||||||
|
.iter()
|
||||||
|
.map(|row| {
|
||||||
|
let name = &row[0];
|
||||||
|
let data_type = &row[1];
|
||||||
|
(
|
||||||
|
name.as_str(),
|
||||||
|
match data_type.as_str() {
|
||||||
|
"text" => DataType::Text,
|
||||||
|
"anyarray" => DataType::AnyArray,
|
||||||
|
"oid" => DataType::Oid,
|
||||||
|
"bool" => DataType::Bool,
|
||||||
|
"int4" => DataType::Int4,
|
||||||
|
_ => DataType::Any,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect::<Vec<(&str, DataType)>>();
|
||||||
|
|
||||||
|
result.put(row_description(&rd));
|
||||||
|
|
||||||
|
target.result.iter().for_each(|row| {
|
||||||
|
let row = row
|
||||||
|
.iter()
|
||||||
|
.map(|s| {
|
||||||
|
let s = s.as_str().to_string();
|
||||||
|
|
||||||
|
if s.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect::<Vec<Option<String>>>();
|
||||||
|
result.put(data_row_nullable(&row));
|
||||||
|
});
|
||||||
|
|
||||||
|
result.put(command_complete("SELECT"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !result.is_empty() {
|
||||||
|
result.put_u8(b'Z');
|
||||||
|
result.put_i32(5);
|
||||||
|
result.put_u8(b'I');
|
||||||
|
|
||||||
|
return Ok(PluginOutput::Intercept(result));
|
||||||
|
} else {
|
||||||
|
Ok(PluginOutput::Allow)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
45
src/plugins/mod.rs
Normal file
45
src/plugins/mod.rs
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
//! The plugin ecosystem.
|
||||||
|
//!
|
||||||
|
//! Currently plugins only grant access or deny access to the database for a particual query.
|
||||||
|
//! Example use cases:
|
||||||
|
//! - block known bad queries
|
||||||
|
//! - block access to system catalogs
|
||||||
|
//! - block dangerous modifications like `DROP TABLE`
|
||||||
|
//! - etc
|
||||||
|
//!
|
||||||
|
|
||||||
|
pub mod intercept;
|
||||||
|
pub mod prewarmer;
|
||||||
|
pub mod query_logger;
|
||||||
|
pub mod table_access;
|
||||||
|
|
||||||
|
use crate::{errors::Error, query_router::QueryRouter};
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use bytes::BytesMut;
|
||||||
|
use sqlparser::ast::Statement;
|
||||||
|
|
||||||
|
pub use intercept::Intercept;
|
||||||
|
pub use query_logger::QueryLogger;
|
||||||
|
pub use table_access::TableAccess;
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
|
pub enum PluginOutput {
|
||||||
|
Allow,
|
||||||
|
Deny(String),
|
||||||
|
Overwrite(Vec<Statement>),
|
||||||
|
Intercept(BytesMut),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait Plugin {
|
||||||
|
// Run before the query is sent to the server.
|
||||||
|
#[allow(clippy::ptr_arg)]
|
||||||
|
async fn run(
|
||||||
|
&mut self,
|
||||||
|
query_router: &QueryRouter,
|
||||||
|
ast: &Vec<Statement>,
|
||||||
|
) -> Result<PluginOutput, Error>;
|
||||||
|
|
||||||
|
// TODO: run after the result is returned
|
||||||
|
// async fn callback(&mut self, query_router: &QueryRouter);
|
||||||
|
}
|
||||||
28
src/plugins/prewarmer.rs
Normal file
28
src/plugins/prewarmer.rs
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
//! Prewarm new connections before giving them to the client.
|
||||||
|
use crate::{errors::Error, server::Server};
|
||||||
|
use log::info;
|
||||||
|
|
||||||
|
pub struct Prewarmer<'a> {
|
||||||
|
pub enabled: bool,
|
||||||
|
pub server: &'a mut Server,
|
||||||
|
pub queries: &'a Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Prewarmer<'a> {
|
||||||
|
pub async fn run(&mut self) -> Result<(), Error> {
|
||||||
|
if !self.enabled {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
for query in self.queries {
|
||||||
|
info!(
|
||||||
|
"{} Prewarning with query: `{}`",
|
||||||
|
self.server.address(),
|
||||||
|
query
|
||||||
|
);
|
||||||
|
self.server.query(query).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
38
src/plugins/query_logger.rs
Normal file
38
src/plugins/query_logger.rs
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
//! Log all queries to stdout (or somewhere else, why not).
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
errors::Error,
|
||||||
|
plugins::{Plugin, PluginOutput},
|
||||||
|
query_router::QueryRouter,
|
||||||
|
};
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use log::info;
|
||||||
|
use sqlparser::ast::Statement;
|
||||||
|
|
||||||
|
pub struct QueryLogger<'a> {
|
||||||
|
pub enabled: bool,
|
||||||
|
pub user: &'a str,
|
||||||
|
pub db: &'a str,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<'a> Plugin for QueryLogger<'a> {
|
||||||
|
async fn run(
|
||||||
|
&mut self,
|
||||||
|
_query_router: &QueryRouter,
|
||||||
|
ast: &Vec<Statement>,
|
||||||
|
) -> Result<PluginOutput, Error> {
|
||||||
|
if !self.enabled {
|
||||||
|
return Ok(PluginOutput::Allow);
|
||||||
|
}
|
||||||
|
|
||||||
|
let query = ast
|
||||||
|
.iter()
|
||||||
|
.map(|q| q.to_string())
|
||||||
|
.collect::<Vec<String>>()
|
||||||
|
.join("; ");
|
||||||
|
info!("[pool: {}][user: {}] {}", self.db, self.user, query);
|
||||||
|
|
||||||
|
Ok(PluginOutput::Allow)
|
||||||
|
}
|
||||||
|
}
|
||||||
59
src/plugins/table_access.rs
Normal file
59
src/plugins/table_access.rs
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
//! This query router plugin will check if the user can access a particular
|
||||||
|
//! table as part of their query. If they can't, the query will not be routed.
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use sqlparser::ast::{visit_relations, Statement};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
errors::Error,
|
||||||
|
plugins::{Plugin, PluginOutput},
|
||||||
|
query_router::QueryRouter,
|
||||||
|
};
|
||||||
|
|
||||||
|
use log::debug;
|
||||||
|
|
||||||
|
use core::ops::ControlFlow;
|
||||||
|
|
||||||
|
pub struct TableAccess<'a> {
|
||||||
|
pub enabled: bool,
|
||||||
|
pub tables: &'a Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<'a> Plugin for TableAccess<'a> {
|
||||||
|
async fn run(
|
||||||
|
&mut self,
|
||||||
|
_query_router: &QueryRouter,
|
||||||
|
ast: &Vec<Statement>,
|
||||||
|
) -> Result<PluginOutput, Error> {
|
||||||
|
if !self.enabled {
|
||||||
|
return Ok(PluginOutput::Allow);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut found = None;
|
||||||
|
|
||||||
|
visit_relations(ast, |relation| {
|
||||||
|
let relation = relation.to_string();
|
||||||
|
let parts = relation.split('.').collect::<Vec<&str>>();
|
||||||
|
let table_name = parts.last().unwrap();
|
||||||
|
|
||||||
|
if self.tables.contains(&table_name.to_string()) {
|
||||||
|
found = Some(table_name.to_string());
|
||||||
|
ControlFlow::<()>::Break(())
|
||||||
|
} else {
|
||||||
|
ControlFlow::<()>::Continue(())
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
if let Some(found) = found {
|
||||||
|
debug!("Blocking access to table \"{}\"", found);
|
||||||
|
|
||||||
|
Ok(PluginOutput::Deny(format!(
|
||||||
|
"permission for table \"{}\" denied",
|
||||||
|
found
|
||||||
|
)))
|
||||||
|
} else {
|
||||||
|
Ok(PluginOutput::Allow)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
506
src/pool.rs
506
src/pool.rs
@@ -1,15 +1,18 @@
|
|||||||
use arc_swap::ArcSwap;
|
use arc_swap::ArcSwap;
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use bb8::{ManageConnection, Pool, PooledConnection};
|
use bb8::{ManageConnection, Pool, PooledConnection, QueueStrategy};
|
||||||
use bytes::{BufMut, BytesMut};
|
|
||||||
use chrono::naive::NaiveDateTime;
|
use chrono::naive::NaiveDateTime;
|
||||||
use log::{debug, error, info, warn};
|
use log::{debug, error, info, warn};
|
||||||
|
use lru::LruCache;
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use parking_lot::{Mutex, RwLock};
|
use parking_lot::{Mutex, RwLock};
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
use rand::thread_rng;
|
use rand::thread_rng;
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::fmt::{Display, Formatter};
|
||||||
|
use std::num::NonZeroUsize;
|
||||||
|
use std::sync::atomic::AtomicU64;
|
||||||
use std::sync::{
|
use std::sync::{
|
||||||
atomic::{AtomicBool, Ordering},
|
atomic::{AtomicBool, Ordering},
|
||||||
Arc,
|
Arc,
|
||||||
@@ -17,13 +20,17 @@ use std::sync::{
|
|||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
use tokio::sync::Notify;
|
use tokio::sync::Notify;
|
||||||
|
|
||||||
use crate::config::{get_config, Address, General, LoadBalancingMode, PoolMode, Role, User};
|
use crate::config::{
|
||||||
|
get_config, Address, DefaultShard, General, LoadBalancingMode, Plugins, PoolMode, Role, User,
|
||||||
|
};
|
||||||
use crate::errors::Error;
|
use crate::errors::Error;
|
||||||
|
|
||||||
use crate::auth_passthrough::AuthPassthrough;
|
use crate::auth_passthrough::AuthPassthrough;
|
||||||
use crate::server::Server;
|
use crate::messages::Parse;
|
||||||
|
use crate::plugins::prewarmer;
|
||||||
|
use crate::server::{Server, ServerParameters};
|
||||||
use crate::sharding::ShardingFunction;
|
use crate::sharding::ShardingFunction;
|
||||||
use crate::stats::{AddressStats, ClientStats, PoolStats, ServerStats};
|
use crate::stats::{AddressStats, ClientStats, ServerStats};
|
||||||
|
|
||||||
pub type ProcessId = i32;
|
pub type ProcessId = i32;
|
||||||
pub type SecretKey = i32;
|
pub type SecretKey = i32;
|
||||||
@@ -50,6 +57,57 @@ pub enum BanReason {
|
|||||||
AdminBan(i64),
|
AdminBan(i64),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub type PreparedStatementCacheType = Arc<Mutex<PreparedStatementCache>>;
|
||||||
|
|
||||||
|
// TODO: Add stats the this cache
|
||||||
|
// TODO: Add application name to the cache value to help identify which application is using the cache
|
||||||
|
// TODO: Create admin command to show which statements are in the cache
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct PreparedStatementCache {
|
||||||
|
cache: LruCache<u64, Arc<Parse>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PreparedStatementCache {
|
||||||
|
pub fn new(mut size: usize) -> Self {
|
||||||
|
// Cannot be zeros
|
||||||
|
if size == 0 {
|
||||||
|
size = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
PreparedStatementCache {
|
||||||
|
cache: LruCache::new(NonZeroUsize::new(size).unwrap()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds the prepared statement to the cache if it doesn't exist with a new name
|
||||||
|
/// if it already exists will give you the existing parse
|
||||||
|
///
|
||||||
|
/// Pass the hash to this so that we can do the compute before acquiring the lock
|
||||||
|
pub fn get_or_insert(&mut self, parse: &Parse, hash: u64) -> Arc<Parse> {
|
||||||
|
match self.cache.get(&hash) {
|
||||||
|
Some(rewritten_parse) => rewritten_parse.clone(),
|
||||||
|
None => {
|
||||||
|
let new_parse = Arc::new(parse.clone().rewrite());
|
||||||
|
let evicted = self.cache.push(hash, new_parse.clone());
|
||||||
|
|
||||||
|
if let Some((_, evicted_parse)) = evicted {
|
||||||
|
debug!(
|
||||||
|
"Evicted prepared statement {} from cache",
|
||||||
|
evicted_parse.name
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
new_parse
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Marks the hash as most recently used if it exists
|
||||||
|
pub fn promote(&mut self, hash: &u64) {
|
||||||
|
self.cache.promote(hash);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// An identifier for a PgCat pool,
|
/// An identifier for a PgCat pool,
|
||||||
/// a database visible to clients.
|
/// a database visible to clients.
|
||||||
#[derive(Hash, Debug, Clone, PartialEq, Eq, Default)]
|
#[derive(Hash, Debug, Clone, PartialEq, Eq, Default)]
|
||||||
@@ -61,6 +119,8 @@ pub struct PoolIdentifier {
|
|||||||
pub user: String,
|
pub user: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static POOL_REAPER_RATE: u64 = 30_000; // 30 seconds by default
|
||||||
|
|
||||||
impl PoolIdentifier {
|
impl PoolIdentifier {
|
||||||
/// Create a new user/pool identifier.
|
/// Create a new user/pool identifier.
|
||||||
pub fn new(db: &str, user: &str) -> PoolIdentifier {
|
pub fn new(db: &str, user: &str) -> PoolIdentifier {
|
||||||
@@ -71,6 +131,12 @@ impl PoolIdentifier {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Display for PoolIdentifier {
|
||||||
|
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||||
|
write!(f, "{}@{}", self.user, self.db)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<&Address> for PoolIdentifier {
|
impl From<&Address> for PoolIdentifier {
|
||||||
fn from(address: &Address) -> PoolIdentifier {
|
fn from(address: &Address) -> PoolIdentifier {
|
||||||
PoolIdentifier::new(&address.database, &address.username)
|
PoolIdentifier::new(&address.database, &address.username)
|
||||||
@@ -91,6 +157,7 @@ pub struct PoolSettings {
|
|||||||
|
|
||||||
// Connecting user.
|
// Connecting user.
|
||||||
pub user: User,
|
pub user: User,
|
||||||
|
pub db: String,
|
||||||
|
|
||||||
// Default server role to connect to.
|
// Default server role to connect to.
|
||||||
pub default_role: Option<Role>,
|
pub default_role: Option<Role>,
|
||||||
@@ -98,6 +165,12 @@ pub struct PoolSettings {
|
|||||||
// Enable/disable query parser.
|
// Enable/disable query parser.
|
||||||
pub query_parser_enabled: bool,
|
pub query_parser_enabled: bool,
|
||||||
|
|
||||||
|
// Max length of query the parser will parse.
|
||||||
|
pub query_parser_max_length: Option<usize>,
|
||||||
|
|
||||||
|
// Infer role
|
||||||
|
pub query_parser_read_write_splitting: bool,
|
||||||
|
|
||||||
// Read from the primary as well or not.
|
// Read from the primary as well or not.
|
||||||
pub primary_reads_enabled: bool,
|
pub primary_reads_enabled: bool,
|
||||||
|
|
||||||
@@ -122,6 +195,9 @@ pub struct PoolSettings {
|
|||||||
// Regex for searching for the shard id in SQL statements
|
// Regex for searching for the shard id in SQL statements
|
||||||
pub shard_id_regex: Option<Regex>,
|
pub shard_id_regex: Option<Regex>,
|
||||||
|
|
||||||
|
// What to do when no shard is selected in a sharded system
|
||||||
|
pub default_shard: DefaultShard,
|
||||||
|
|
||||||
// Limit how much of each query is searched for a potential shard regex match
|
// Limit how much of each query is searched for a potential shard regex match
|
||||||
pub regex_search_limit: usize,
|
pub regex_search_limit: usize,
|
||||||
|
|
||||||
@@ -129,6 +205,9 @@ pub struct PoolSettings {
|
|||||||
pub auth_query: Option<String>,
|
pub auth_query: Option<String>,
|
||||||
pub auth_query_user: Option<String>,
|
pub auth_query_user: Option<String>,
|
||||||
pub auth_query_password: Option<String>,
|
pub auth_query_password: Option<String>,
|
||||||
|
|
||||||
|
/// Plugins
|
||||||
|
pub plugins: Option<Plugins>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for PoolSettings {
|
impl Default for PoolSettings {
|
||||||
@@ -138,8 +217,11 @@ impl Default for PoolSettings {
|
|||||||
load_balancing_mode: LoadBalancingMode::Random,
|
load_balancing_mode: LoadBalancingMode::Random,
|
||||||
shards: 1,
|
shards: 1,
|
||||||
user: User::default(),
|
user: User::default(),
|
||||||
|
db: String::default(),
|
||||||
default_role: None,
|
default_role: None,
|
||||||
query_parser_enabled: false,
|
query_parser_enabled: false,
|
||||||
|
query_parser_max_length: None,
|
||||||
|
query_parser_read_write_splitting: false,
|
||||||
primary_reads_enabled: true,
|
primary_reads_enabled: true,
|
||||||
sharding_function: ShardingFunction::PgBigintHash,
|
sharding_function: ShardingFunction::PgBigintHash,
|
||||||
automatic_sharding_key: None,
|
automatic_sharding_key: None,
|
||||||
@@ -149,9 +231,11 @@ impl Default for PoolSettings {
|
|||||||
sharding_key_regex: None,
|
sharding_key_regex: None,
|
||||||
shard_id_regex: None,
|
shard_id_regex: None,
|
||||||
regex_search_limit: 1000,
|
regex_search_limit: 1000,
|
||||||
|
default_shard: DefaultShard::Shard(0),
|
||||||
auth_query: None,
|
auth_query: None,
|
||||||
auth_query_user: None,
|
auth_query_user: None,
|
||||||
auth_query_password: None,
|
auth_query_password: None,
|
||||||
|
plugins: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -160,23 +244,23 @@ impl Default for PoolSettings {
|
|||||||
#[derive(Clone, Debug, Default)]
|
#[derive(Clone, Debug, Default)]
|
||||||
pub struct ConnectionPool {
|
pub struct ConnectionPool {
|
||||||
/// The pools handled internally by bb8.
|
/// The pools handled internally by bb8.
|
||||||
databases: Vec<Vec<Pool<ServerPool>>>,
|
databases: Arc<Vec<Vec<Pool<ServerPool>>>>,
|
||||||
|
|
||||||
/// The addresses (host, port, role) to handle
|
/// The addresses (host, port, role) to handle
|
||||||
/// failover and load balancing deterministically.
|
/// failover and load balancing deterministically.
|
||||||
addresses: Vec<Vec<Address>>,
|
addresses: Arc<Vec<Vec<Address>>>,
|
||||||
|
|
||||||
/// List of banned addresses (see above)
|
/// List of banned addresses (see above)
|
||||||
/// that should not be queried.
|
/// that should not be queried.
|
||||||
banlist: BanList,
|
banlist: BanList,
|
||||||
|
|
||||||
/// The server information (K messages) have to be passed to the
|
/// The server information has to be passed to the
|
||||||
/// clients on startup. We pre-connect to all shards and replicas
|
/// clients on startup. We pre-connect to all shards and replicas
|
||||||
/// on pool creation and save the K messages here.
|
/// on pool creation and save the startup parameters here.
|
||||||
server_info: Arc<RwLock<BytesMut>>,
|
original_server_parameters: Arc<RwLock<ServerParameters>>,
|
||||||
|
|
||||||
/// Pool configuration.
|
/// Pool configuration.
|
||||||
pub settings: PoolSettings,
|
pub settings: Arc<PoolSettings>,
|
||||||
|
|
||||||
/// If not validated, we need to double check the pool is available before allowing a client
|
/// If not validated, we need to double check the pool is available before allowing a client
|
||||||
/// to use it.
|
/// to use it.
|
||||||
@@ -191,10 +275,11 @@ pub struct ConnectionPool {
|
|||||||
paused: Arc<AtomicBool>,
|
paused: Arc<AtomicBool>,
|
||||||
paused_waiter: Arc<Notify>,
|
paused_waiter: Arc<Notify>,
|
||||||
|
|
||||||
pub stats: Arc<PoolStats>,
|
|
||||||
|
|
||||||
/// AuthInfo
|
/// AuthInfo
|
||||||
pub auth_hash: Arc<RwLock<Option<String>>>,
|
pub auth_hash: Arc<RwLock<Option<String>>>,
|
||||||
|
|
||||||
|
/// Cache
|
||||||
|
pub prepared_statement_cache: Option<PreparedStatementCacheType>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ConnectionPool {
|
impl ConnectionPool {
|
||||||
@@ -213,20 +298,17 @@ impl ConnectionPool {
|
|||||||
let old_pool_ref = get_pool(pool_name, &user.username);
|
let old_pool_ref = get_pool(pool_name, &user.username);
|
||||||
let identifier = PoolIdentifier::new(pool_name, &user.username);
|
let identifier = PoolIdentifier::new(pool_name, &user.username);
|
||||||
|
|
||||||
match old_pool_ref {
|
if let Some(pool) = old_pool_ref {
|
||||||
Some(pool) => {
|
// If the pool hasn't changed, get existing reference and insert it into the new_pools.
|
||||||
// If the pool hasn't changed, get existing reference and insert it into the new_pools.
|
// We replace all pools at the end, but if the reference is kept, the pool won't get re-created (bb8).
|
||||||
// We replace all pools at the end, but if the reference is kept, the pool won't get re-created (bb8).
|
if pool.config_hash == new_pool_hash_value {
|
||||||
if pool.config_hash == new_pool_hash_value {
|
info!(
|
||||||
info!(
|
"[pool: {}][user: {}] has not changed",
|
||||||
"[pool: {}][user: {}] has not changed",
|
pool_name, user.username
|
||||||
pool_name, user.username
|
);
|
||||||
);
|
new_pools.insert(identifier.clone(), pool.clone());
|
||||||
new_pools.insert(identifier.clone(), pool.clone());
|
continue;
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
None => (),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
@@ -242,10 +324,6 @@ impl ConnectionPool {
|
|||||||
.clone()
|
.clone()
|
||||||
.into_keys()
|
.into_keys()
|
||||||
.collect::<Vec<String>>();
|
.collect::<Vec<String>>();
|
||||||
let pool_stats = Arc::new(PoolStats::new(identifier, pool_config.clone()));
|
|
||||||
|
|
||||||
// Allow the pool to be seen in statistics
|
|
||||||
pool_stats.register(pool_stats.clone());
|
|
||||||
|
|
||||||
// Sort by shard number to ensure consistency.
|
// Sort by shard number to ensure consistency.
|
||||||
shard_ids.sort_by_key(|k| k.parse::<i64>().unwrap());
|
shard_ids.sort_by_key(|k| k.parse::<i64>().unwrap());
|
||||||
@@ -280,6 +358,7 @@ impl ConnectionPool {
|
|||||||
pool_name: pool_name.clone(),
|
pool_name: pool_name.clone(),
|
||||||
mirrors: vec![],
|
mirrors: vec![],
|
||||||
stats: Arc::new(AddressStats::default()),
|
stats: Arc::new(AddressStats::default()),
|
||||||
|
error_count: Arc::new(AtomicU64::new(0)),
|
||||||
});
|
});
|
||||||
address_id += 1;
|
address_id += 1;
|
||||||
}
|
}
|
||||||
@@ -298,6 +377,7 @@ impl ConnectionPool {
|
|||||||
pool_name: pool_name.clone(),
|
pool_name: pool_name.clone(),
|
||||||
mirrors: mirror_addresses,
|
mirrors: mirror_addresses,
|
||||||
stats: Arc::new(AddressStats::default()),
|
stats: Arc::new(AddressStats::default()),
|
||||||
|
error_count: Arc::new(AtomicU64::new(0)),
|
||||||
};
|
};
|
||||||
|
|
||||||
address_id += 1;
|
address_id += 1;
|
||||||
@@ -311,21 +391,34 @@ impl ConnectionPool {
|
|||||||
|
|
||||||
if let Some(apt) = &auth_passthrough {
|
if let Some(apt) = &auth_passthrough {
|
||||||
match apt.fetch_hash(&address).await {
|
match apt.fetch_hash(&address).await {
|
||||||
Ok(ok) => {
|
Ok(ok) => {
|
||||||
if let Some(ref pool_auth_hash_value) = *(pool_auth_hash.read()) {
|
if let Some(ref pool_auth_hash_value) = *(pool_auth_hash.read())
|
||||||
if ok != *pool_auth_hash_value {
|
{
|
||||||
warn!("Hash is not the same across shards of the same pool, client auth will \
|
if ok != *pool_auth_hash_value {
|
||||||
be done using last obtained hash. Server: {}:{}, Database: {}", server.host, server.port, shard.database);
|
warn!(
|
||||||
}
|
"Hash is not the same across shards \
|
||||||
}
|
of the same pool, client auth will \
|
||||||
debug!("Hash obtained for {:?}", address);
|
be done using last obtained hash. \
|
||||||
{
|
Server: {}:{}, Database: {}",
|
||||||
let mut pool_auth_hash = pool_auth_hash.write();
|
server.host, server.port, shard.database,
|
||||||
*pool_auth_hash = Some(ok.clone());
|
);
|
||||||
}
|
}
|
||||||
},
|
}
|
||||||
Err(err) => warn!("Could not obtain password hashes using auth_query config, ignoring. Error: {:?}", err),
|
|
||||||
}
|
debug!("Hash obtained for {:?}", address);
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut pool_auth_hash = pool_auth_hash.write();
|
||||||
|
*pool_auth_hash = Some(ok.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(err) => warn!(
|
||||||
|
"Could not obtain password hashes \
|
||||||
|
using auth_query config, ignoring. \
|
||||||
|
Error: {:?}",
|
||||||
|
err,
|
||||||
|
),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let manager = ServerPool::new(
|
let manager = ServerPool::new(
|
||||||
@@ -333,28 +426,70 @@ impl ConnectionPool {
|
|||||||
user.clone(),
|
user.clone(),
|
||||||
&shard.database,
|
&shard.database,
|
||||||
client_server_map.clone(),
|
client_server_map.clone(),
|
||||||
pool_stats.clone(),
|
|
||||||
pool_auth_hash.clone(),
|
pool_auth_hash.clone(),
|
||||||
|
match pool_config.plugins {
|
||||||
|
Some(ref plugins) => Some(plugins.clone()),
|
||||||
|
None => config.plugins.clone(),
|
||||||
|
},
|
||||||
|
pool_config.cleanup_server_connections,
|
||||||
|
pool_config.log_client_parameter_status_changes,
|
||||||
|
pool_config.prepared_statements_cache_size,
|
||||||
);
|
);
|
||||||
|
|
||||||
let connect_timeout = match pool_config.connect_timeout {
|
let connect_timeout = match user.connect_timeout {
|
||||||
Some(connect_timeout) => connect_timeout,
|
Some(connect_timeout) => connect_timeout,
|
||||||
None => config.general.connect_timeout,
|
None => match pool_config.connect_timeout {
|
||||||
|
Some(connect_timeout) => connect_timeout,
|
||||||
|
None => config.general.connect_timeout,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
let idle_timeout = match pool_config.idle_timeout {
|
let idle_timeout = match user.idle_timeout {
|
||||||
Some(idle_timeout) => idle_timeout,
|
Some(idle_timeout) => idle_timeout,
|
||||||
None => config.general.idle_timeout,
|
None => match pool_config.idle_timeout {
|
||||||
|
Some(idle_timeout) => idle_timeout,
|
||||||
|
None => config.general.idle_timeout,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let server_lifetime = match user.server_lifetime {
|
||||||
|
Some(server_lifetime) => server_lifetime,
|
||||||
|
None => match pool_config.server_lifetime {
|
||||||
|
Some(server_lifetime) => server_lifetime,
|
||||||
|
None => config.general.server_lifetime,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
let reaper_rate = *[idle_timeout, server_lifetime, POOL_REAPER_RATE]
|
||||||
|
.iter()
|
||||||
|
.min()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let queue_strategy = match config.general.server_round_robin {
|
||||||
|
true => QueueStrategy::Fifo,
|
||||||
|
false => QueueStrategy::Lifo,
|
||||||
|
};
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"[pool: {}][user: {}] Pool reaper rate: {}ms",
|
||||||
|
pool_name, user.username, reaper_rate
|
||||||
|
);
|
||||||
|
|
||||||
let pool = Pool::builder()
|
let pool = Pool::builder()
|
||||||
.max_size(user.pool_size)
|
.max_size(user.pool_size)
|
||||||
|
.min_idle(user.min_pool_size)
|
||||||
.connection_timeout(std::time::Duration::from_millis(connect_timeout))
|
.connection_timeout(std::time::Duration::from_millis(connect_timeout))
|
||||||
.idle_timeout(Some(std::time::Duration::from_millis(idle_timeout)))
|
.idle_timeout(Some(std::time::Duration::from_millis(idle_timeout)))
|
||||||
.test_on_check_out(false)
|
.max_lifetime(Some(std::time::Duration::from_millis(server_lifetime)))
|
||||||
.build(manager)
|
.reaper_rate(std::time::Duration::from_millis(reaper_rate))
|
||||||
.await
|
.queue_strategy(queue_strategy)
|
||||||
.unwrap();
|
.test_on_check_out(false);
|
||||||
|
|
||||||
|
let pool = if config.general.validate_config {
|
||||||
|
pool.build(manager).await?
|
||||||
|
} else {
|
||||||
|
pool.build_unchecked(manager)
|
||||||
|
};
|
||||||
|
|
||||||
pools.push(pool);
|
pools.push(pool);
|
||||||
servers.push(address);
|
servers.push(address);
|
||||||
@@ -374,14 +509,13 @@ impl ConnectionPool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let pool = ConnectionPool {
|
let pool = ConnectionPool {
|
||||||
databases: shards,
|
databases: Arc::new(shards),
|
||||||
stats: pool_stats,
|
addresses: Arc::new(addresses),
|
||||||
addresses,
|
|
||||||
banlist: Arc::new(RwLock::new(banlist)),
|
banlist: Arc::new(RwLock::new(banlist)),
|
||||||
config_hash: new_pool_hash_value,
|
config_hash: new_pool_hash_value,
|
||||||
server_info: Arc::new(RwLock::new(BytesMut::new())),
|
original_server_parameters: Arc::new(RwLock::new(ServerParameters::new())),
|
||||||
auth_hash: pool_auth_hash,
|
auth_hash: pool_auth_hash,
|
||||||
settings: PoolSettings {
|
settings: Arc::new(PoolSettings {
|
||||||
pool_mode: match user.pool_mode {
|
pool_mode: match user.pool_mode {
|
||||||
Some(pool_mode) => pool_mode,
|
Some(pool_mode) => pool_mode,
|
||||||
None => pool_config.pool_mode,
|
None => pool_config.pool_mode,
|
||||||
@@ -390,6 +524,7 @@ impl ConnectionPool {
|
|||||||
// shards: pool_config.shards.clone(),
|
// shards: pool_config.shards.clone(),
|
||||||
shards: shard_ids.len(),
|
shards: shard_ids.len(),
|
||||||
user: user.clone(),
|
user: user.clone(),
|
||||||
|
db: pool_name.clone(),
|
||||||
default_role: match pool_config.default_role.as_str() {
|
default_role: match pool_config.default_role.as_str() {
|
||||||
"any" => None,
|
"any" => None,
|
||||||
"replica" => Some(Role::Replica),
|
"replica" => Some(Role::Replica),
|
||||||
@@ -397,6 +532,9 @@ impl ConnectionPool {
|
|||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
},
|
},
|
||||||
query_parser_enabled: pool_config.query_parser_enabled,
|
query_parser_enabled: pool_config.query_parser_enabled,
|
||||||
|
query_parser_max_length: pool_config.query_parser_max_length,
|
||||||
|
query_parser_read_write_splitting: pool_config
|
||||||
|
.query_parser_read_write_splitting,
|
||||||
primary_reads_enabled: pool_config.primary_reads_enabled,
|
primary_reads_enabled: pool_config.primary_reads_enabled,
|
||||||
sharding_function: pool_config.sharding_function,
|
sharding_function: pool_config.sharding_function,
|
||||||
automatic_sharding_key: pool_config.automatic_sharding_key.clone(),
|
automatic_sharding_key: pool_config.automatic_sharding_key.clone(),
|
||||||
@@ -412,22 +550,35 @@ impl ConnectionPool {
|
|||||||
.clone()
|
.clone()
|
||||||
.map(|regex| Regex::new(regex.as_str()).unwrap()),
|
.map(|regex| Regex::new(regex.as_str()).unwrap()),
|
||||||
regex_search_limit: pool_config.regex_search_limit.unwrap_or(1000),
|
regex_search_limit: pool_config.regex_search_limit.unwrap_or(1000),
|
||||||
|
default_shard: pool_config.default_shard,
|
||||||
auth_query: pool_config.auth_query.clone(),
|
auth_query: pool_config.auth_query.clone(),
|
||||||
auth_query_user: pool_config.auth_query_user.clone(),
|
auth_query_user: pool_config.auth_query_user.clone(),
|
||||||
auth_query_password: pool_config.auth_query_password.clone(),
|
auth_query_password: pool_config.auth_query_password.clone(),
|
||||||
},
|
plugins: match pool_config.plugins {
|
||||||
|
Some(ref plugins) => Some(plugins.clone()),
|
||||||
|
None => config.plugins.clone(),
|
||||||
|
},
|
||||||
|
}),
|
||||||
validated: Arc::new(AtomicBool::new(false)),
|
validated: Arc::new(AtomicBool::new(false)),
|
||||||
paused: Arc::new(AtomicBool::new(false)),
|
paused: Arc::new(AtomicBool::new(false)),
|
||||||
paused_waiter: Arc::new(Notify::new()),
|
paused_waiter: Arc::new(Notify::new()),
|
||||||
|
prepared_statement_cache: match pool_config.prepared_statements_cache_size {
|
||||||
|
0 => None,
|
||||||
|
_ => Some(Arc::new(Mutex::new(PreparedStatementCache::new(
|
||||||
|
pool_config.prepared_statements_cache_size,
|
||||||
|
)))),
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
// Connect to the servers to make sure pool configuration is valid
|
// Connect to the servers to make sure pool configuration is valid
|
||||||
// before setting it globally.
|
// before setting it globally.
|
||||||
// Do this async and somewhere else, we don't have to wait here.
|
// Do this async and somewhere else, we don't have to wait here.
|
||||||
let mut validate_pool = pool.clone();
|
if config.general.validate_config {
|
||||||
tokio::task::spawn(async move {
|
let validate_pool = pool.clone();
|
||||||
let _ = validate_pool.validate().await;
|
tokio::task::spawn(async move {
|
||||||
});
|
let _ = validate_pool.validate().await;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
// There is one pool per database/user pair.
|
// There is one pool per database/user pair.
|
||||||
new_pools.insert(PoolIdentifier::new(pool_name, &user.username), pool);
|
new_pools.insert(PoolIdentifier::new(pool_name, &user.username), pool);
|
||||||
@@ -444,7 +595,7 @@ impl ConnectionPool {
|
|||||||
/// when they connect.
|
/// when they connect.
|
||||||
/// This also warms up the pool for clients that connect when
|
/// This also warms up the pool for clients that connect when
|
||||||
/// the pooler starts up.
|
/// the pooler starts up.
|
||||||
pub async fn validate(&mut self) -> Result<(), Error> {
|
pub async fn validate(&self) -> Result<(), Error> {
|
||||||
let mut futures = Vec::new();
|
let mut futures = Vec::new();
|
||||||
let validated = Arc::clone(&self.validated);
|
let validated = Arc::clone(&self.validated);
|
||||||
|
|
||||||
@@ -452,7 +603,7 @@ impl ConnectionPool {
|
|||||||
for server in 0..self.servers(shard) {
|
for server in 0..self.servers(shard) {
|
||||||
let databases = self.databases.clone();
|
let databases = self.databases.clone();
|
||||||
let validated = Arc::clone(&validated);
|
let validated = Arc::clone(&validated);
|
||||||
let pool_server_info = Arc::clone(&self.server_info);
|
let pool_server_parameters = Arc::clone(&self.original_server_parameters);
|
||||||
|
|
||||||
let task = tokio::task::spawn(async move {
|
let task = tokio::task::spawn(async move {
|
||||||
let connection = match databases[shard][server].get().await {
|
let connection = match databases[shard][server].get().await {
|
||||||
@@ -465,11 +616,10 @@ impl ConnectionPool {
|
|||||||
|
|
||||||
let proxy = connection;
|
let proxy = connection;
|
||||||
let server = &*proxy;
|
let server = &*proxy;
|
||||||
let server_info = server.server_info();
|
let server_parameters: ServerParameters = server.server_parameters();
|
||||||
|
|
||||||
let mut guard = pool_server_info.write();
|
let mut guard = pool_server_parameters.write();
|
||||||
guard.clear();
|
*guard = server_parameters;
|
||||||
guard.put(server_info.clone());
|
|
||||||
validated.store(true, Ordering::Relaxed);
|
validated.store(true, Ordering::Relaxed);
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -481,7 +631,7 @@ impl ConnectionPool {
|
|||||||
|
|
||||||
// TODO: compare server information to make sure
|
// TODO: compare server information to make sure
|
||||||
// all shards are running identical configurations.
|
// all shards are running identical configurations.
|
||||||
if self.server_info.read().is_empty() {
|
if !self.validated() {
|
||||||
error!("Could not validate connection pool");
|
error!("Could not validate connection pool");
|
||||||
return Err(Error::AllServersDown);
|
return Err(Error::AllServersDown);
|
||||||
}
|
}
|
||||||
@@ -528,19 +678,51 @@ impl ConnectionPool {
|
|||||||
/// Get a connection from the pool.
|
/// Get a connection from the pool.
|
||||||
pub async fn get(
|
pub async fn get(
|
||||||
&self,
|
&self,
|
||||||
shard: usize, // shard number
|
shard: Option<usize>, // shard number
|
||||||
role: Option<Role>, // primary or replica
|
role: Option<Role>, // primary or replica
|
||||||
client_stats: &ClientStats, // client id
|
client_stats: &ClientStats, // client id
|
||||||
) -> Result<(PooledConnection<'_, ServerPool>, Address), Error> {
|
) -> Result<(PooledConnection<'_, ServerPool>, Address), Error> {
|
||||||
let mut candidates: Vec<&Address> = self.addresses[shard]
|
let effective_shard_id = if self.shards() == 1 {
|
||||||
.iter()
|
// The base, unsharded case
|
||||||
.filter(|address| address.role == role)
|
Some(0)
|
||||||
.collect();
|
} else {
|
||||||
|
if !self.valid_shard_id(shard) {
|
||||||
|
// None is valid shard ID so it is safe to unwrap here
|
||||||
|
return Err(Error::InvalidShardId(shard.unwrap()));
|
||||||
|
}
|
||||||
|
shard
|
||||||
|
};
|
||||||
|
|
||||||
// We shuffle even if least_outstanding_queries is used to avoid imbalance
|
let mut candidates = self
|
||||||
// in cases where all candidates have more or less the same number of outstanding
|
.addresses
|
||||||
// queries
|
.iter()
|
||||||
|
.flatten()
|
||||||
|
.filter(|address| address.role == role)
|
||||||
|
.collect::<Vec<&Address>>();
|
||||||
|
|
||||||
|
// We start with a shuffled list of addresses even if we end up resorting
|
||||||
|
// this is meant to avoid hitting instance 0 everytime if the sorting metric
|
||||||
|
// ends up being the same for all instances
|
||||||
candidates.shuffle(&mut thread_rng());
|
candidates.shuffle(&mut thread_rng());
|
||||||
|
|
||||||
|
match effective_shard_id {
|
||||||
|
Some(shard_id) => candidates.retain(|address| address.shard == shard_id),
|
||||||
|
None => match self.settings.default_shard {
|
||||||
|
DefaultShard::Shard(shard_id) => {
|
||||||
|
candidates.retain(|address| address.shard == shard_id)
|
||||||
|
}
|
||||||
|
DefaultShard::Random => (),
|
||||||
|
DefaultShard::RandomHealthy => {
|
||||||
|
candidates.sort_by(|a, b| {
|
||||||
|
b.error_count
|
||||||
|
.load(Ordering::Relaxed)
|
||||||
|
.partial_cmp(&a.error_count.load(Ordering::Relaxed))
|
||||||
|
.unwrap()
|
||||||
|
});
|
||||||
|
}
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
if self.settings.load_balancing_mode == LoadBalancingMode::LeastOutstandingConnections {
|
if self.settings.load_balancing_mode == LoadBalancingMode::LeastOutstandingConnections {
|
||||||
candidates.sort_by(|a, b| {
|
candidates.sort_by(|a, b| {
|
||||||
self.busy_connection_count(b)
|
self.busy_connection_count(b)
|
||||||
@@ -549,6 +731,10 @@ impl ConnectionPool {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Indicate we're waiting on a server connection from a pool.
|
||||||
|
let now = Instant::now();
|
||||||
|
client_stats.waiting();
|
||||||
|
|
||||||
while !candidates.is_empty() {
|
while !candidates.is_empty() {
|
||||||
// Get the next candidate
|
// Get the next candidate
|
||||||
let address = match candidates.pop() {
|
let address = match candidates.pop() {
|
||||||
@@ -559,7 +745,7 @@ impl ConnectionPool {
|
|||||||
let mut force_healthcheck = false;
|
let mut force_healthcheck = false;
|
||||||
|
|
||||||
if self.is_banned(address) {
|
if self.is_banned(address) {
|
||||||
if self.try_unban(&address).await {
|
if self.try_unban(address).await {
|
||||||
force_healthcheck = true;
|
force_healthcheck = true;
|
||||||
} else {
|
} else {
|
||||||
debug!("Address {:?} is banned", address);
|
debug!("Address {:?} is banned", address);
|
||||||
@@ -567,21 +753,22 @@ impl ConnectionPool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Indicate we're waiting on a server connection from a pool.
|
|
||||||
let now = Instant::now();
|
|
||||||
client_stats.waiting();
|
|
||||||
|
|
||||||
// Check if we can connect
|
// Check if we can connect
|
||||||
let mut conn = match self.databases[address.shard][address.address_index]
|
let mut conn = match self.databases[address.shard][address.address_index]
|
||||||
.get()
|
.get()
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(conn) => conn,
|
Ok(conn) => {
|
||||||
|
address.reset_error_count();
|
||||||
|
conn
|
||||||
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
error!("Banning instance {:?}, error: {:?}", address, err);
|
error!(
|
||||||
|
"Connection checkout error for instance {:?}, error: {:?}",
|
||||||
|
address, err
|
||||||
|
);
|
||||||
self.ban(address, BanReason::FailedCheckout, Some(client_stats));
|
self.ban(address, BanReason::FailedCheckout, Some(client_stats));
|
||||||
address.stats.error();
|
address.stats.error();
|
||||||
client_stats.idle();
|
|
||||||
client_stats.checkout_error();
|
client_stats.checkout_error();
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@@ -599,13 +786,13 @@ impl ConnectionPool {
|
|||||||
// since we last checked the server is ok.
|
// since we last checked the server is ok.
|
||||||
// Health checks are pretty expensive.
|
// Health checks are pretty expensive.
|
||||||
if !require_healthcheck {
|
if !require_healthcheck {
|
||||||
let checkout_time: u64 = now.elapsed().as_micros() as u64;
|
let checkout_time = now.elapsed().as_micros() as u64;
|
||||||
client_stats.checkout_time(checkout_time);
|
client_stats.checkout_success();
|
||||||
server
|
server
|
||||||
.stats()
|
.stats()
|
||||||
.checkout_time(checkout_time, client_stats.application_name());
|
.checkout_time(checkout_time, client_stats.application_name());
|
||||||
server.stats().active(client_stats.application_name());
|
server.stats().active(client_stats.application_name());
|
||||||
|
client_stats.active();
|
||||||
return Ok((conn, address.clone()));
|
return Ok((conn, address.clone()));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -613,11 +800,21 @@ impl ConnectionPool {
|
|||||||
.run_health_check(address, server, now, client_stats)
|
.run_health_check(address, server, now, client_stats)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
|
let checkout_time = now.elapsed().as_micros() as u64;
|
||||||
|
client_stats.checkout_success();
|
||||||
|
server
|
||||||
|
.stats()
|
||||||
|
.checkout_time(checkout_time, client_stats.application_name());
|
||||||
|
server.stats().active(client_stats.application_name());
|
||||||
|
client_stats.active();
|
||||||
return Ok((conn, address.clone()));
|
return Ok((conn, address.clone()));
|
||||||
} else {
|
} else {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
client_stats.checkout_error();
|
||||||
|
|
||||||
Err(Error::AllServersDown)
|
Err(Error::AllServersDown)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -642,7 +839,7 @@ impl ConnectionPool {
|
|||||||
Ok(res) => match res {
|
Ok(res) => match res {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
let checkout_time: u64 = start.elapsed().as_micros() as u64;
|
let checkout_time: u64 = start.elapsed().as_micros() as u64;
|
||||||
client_info.checkout_time(checkout_time);
|
client_info.checkout_success();
|
||||||
server
|
server
|
||||||
.stats()
|
.stats()
|
||||||
.checkout_time(checkout_time, client_info.application_name());
|
.checkout_time(checkout_time, client_info.application_name());
|
||||||
@@ -654,7 +851,7 @@ impl ConnectionPool {
|
|||||||
// Health check failed.
|
// Health check failed.
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
error!(
|
error!(
|
||||||
"Banning instance {:?} because of failed health check, {:?}",
|
"Failed health check on instance {:?}, error: {:?}",
|
||||||
address, err
|
address, err
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -663,35 +860,50 @@ impl ConnectionPool {
|
|||||||
// Health check timed out.
|
// Health check timed out.
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
error!(
|
error!(
|
||||||
"Banning instance {:?} because of health check timeout, {:?}",
|
"Health check timeout on instance {:?}, error: {:?}",
|
||||||
address, err
|
address, err
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't leave a bad connection in the pool.
|
// Don't leave a bad connection in the pool.
|
||||||
server.mark_bad();
|
server.mark_bad("failed health check");
|
||||||
|
|
||||||
self.ban(&address, BanReason::FailedHealthCheck, Some(client_info));
|
self.ban(address, BanReason::FailedHealthCheck, Some(client_info));
|
||||||
return false;
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ban an address (i.e. replica). It no longer will serve
|
/// Ban an address (i.e. replica). It no longer will serve
|
||||||
/// traffic for any new transactions. Existing transactions on that replica
|
/// traffic for any new transactions. Existing transactions on that replica
|
||||||
/// will finish successfully or error out to the clients.
|
/// will finish successfully or error out to the clients.
|
||||||
pub fn ban(&self, address: &Address, reason: BanReason, client_info: Option<&ClientStats>) {
|
pub fn ban(&self, address: &Address, reason: BanReason, client_info: Option<&ClientStats>) {
|
||||||
|
// Count the number of errors since the last successful checkout
|
||||||
|
// This is used to determine if the shard is down
|
||||||
|
match reason {
|
||||||
|
BanReason::FailedHealthCheck
|
||||||
|
| BanReason::FailedCheckout
|
||||||
|
| BanReason::MessageSendFailed
|
||||||
|
| BanReason::MessageReceiveFailed => {
|
||||||
|
address.increment_error_count();
|
||||||
|
}
|
||||||
|
_ => (),
|
||||||
|
};
|
||||||
|
|
||||||
// Primary can never be banned
|
// Primary can never be banned
|
||||||
if address.role == Role::Primary {
|
if address.role == Role::Primary {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
error!("Banning instance {:?}, reason: {:?}", address, reason);
|
||||||
|
|
||||||
let now = chrono::offset::Utc::now().naive_utc();
|
let now = chrono::offset::Utc::now().naive_utc();
|
||||||
let mut guard = self.banlist.write();
|
let mut guard = self.banlist.write();
|
||||||
error!("Banning {:?}", address);
|
|
||||||
if let Some(client_info) = client_info {
|
if let Some(client_info) = client_info {
|
||||||
client_info.ban_error();
|
client_info.ban_error();
|
||||||
address.stats.error();
|
address.stats.error();
|
||||||
}
|
}
|
||||||
|
|
||||||
guard[address.shard].insert(address.clone(), (reason, now));
|
guard[address.shard].insert(address.clone(), (reason, now));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -777,19 +989,18 @@ impl ConnectionPool {
|
|||||||
self.databases.len()
|
self.databases.len()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Retrieve all bans for all servers.
|
|
||||||
pub fn get_bans(&self) -> Vec<(Address, (BanReason, NaiveDateTime))> {
|
pub fn get_bans(&self) -> Vec<(Address, (BanReason, NaiveDateTime))> {
|
||||||
let mut bans: Vec<(Address, (BanReason, NaiveDateTime))> = Vec::new();
|
let mut bans: Vec<(Address, (BanReason, NaiveDateTime))> = Vec::new();
|
||||||
let guard = self.banlist.read();
|
let guard = self.banlist.read();
|
||||||
for banlist in guard.iter() {
|
for banlist in guard.iter() {
|
||||||
for (address, (reason, timestamp)) in banlist.iter() {
|
for (address, (reason, timestamp)) in banlist.iter() {
|
||||||
bans.push((address.clone(), (reason.clone(), timestamp.clone())));
|
bans.push((address.clone(), (reason.clone(), *timestamp)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return bans;
|
bans
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the address from the host url.
|
/// Get the address from the host url
|
||||||
pub fn get_addresses_from_host(&self, host: &str) -> Vec<Address> {
|
pub fn get_addresses_from_host(&self, host: &str) -> Vec<Address> {
|
||||||
let mut addresses = Vec::new();
|
let mut addresses = Vec::new();
|
||||||
for shard in 0..self.shards() {
|
for shard in 0..self.shards() {
|
||||||
@@ -828,13 +1039,11 @@ impl ConnectionPool {
|
|||||||
&self.addresses[shard][server]
|
&self.addresses[shard][server]
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get server settings retrieved at connection setup.
|
pub fn server_parameters(&self) -> ServerParameters {
|
||||||
pub fn server_info(&self) -> BytesMut {
|
self.original_server_parameters.read().clone()
|
||||||
self.server_info.read().clone()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Calculate how many used connections in the pool
|
/// Get the number of checked out connection for an address
|
||||||
/// for the given server.
|
|
||||||
fn busy_connection_count(&self, address: &Address) -> u32 {
|
fn busy_connection_count(&self, address: &Address) -> u32 {
|
||||||
let state = self.pool_state(address.shard, address.address_index);
|
let state = self.pool_state(address.shard, address.address_index);
|
||||||
let idle = state.idle_connections;
|
let idle = state.idle_connections;
|
||||||
@@ -846,36 +1055,93 @@ impl ConnectionPool {
|
|||||||
}
|
}
|
||||||
let busy = provisioned - idle;
|
let busy = provisioned - idle;
|
||||||
debug!("{:?} has {:?} busy connections", address, busy);
|
debug!("{:?} has {:?} busy connections", address, busy);
|
||||||
return busy;
|
busy
|
||||||
|
}
|
||||||
|
|
||||||
|
fn valid_shard_id(&self, shard: Option<usize>) -> bool {
|
||||||
|
match shard {
|
||||||
|
None => true,
|
||||||
|
Some(shard) => shard < self.shards(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Register a parse statement to the pool's cache and return the rewritten parse
|
||||||
|
///
|
||||||
|
/// Do not pass an anonymous parse statement to this function
|
||||||
|
pub fn register_parse_to_cache(&self, hash: u64, parse: &Parse) -> Option<Arc<Parse>> {
|
||||||
|
// We should only be calling this function if the cache is enabled
|
||||||
|
match self.prepared_statement_cache {
|
||||||
|
Some(ref prepared_statement_cache) => {
|
||||||
|
let mut cache = prepared_statement_cache.lock();
|
||||||
|
Some(cache.get_or_insert(parse, hash))
|
||||||
|
}
|
||||||
|
None => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Promote a prepared statement hash in the LRU
|
||||||
|
pub fn promote_prepared_statement_hash(&self, hash: &u64) {
|
||||||
|
// We should only be calling this function if the cache is enabled
|
||||||
|
if let Some(ref prepared_statement_cache) = self.prepared_statement_cache {
|
||||||
|
let mut cache = prepared_statement_cache.lock();
|
||||||
|
cache.promote(hash);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Wrapper for the bb8 connection pool.
|
/// Wrapper for the bb8 connection pool.
|
||||||
pub struct ServerPool {
|
pub struct ServerPool {
|
||||||
|
/// Server address.
|
||||||
address: Address,
|
address: Address,
|
||||||
|
|
||||||
|
/// Server Postgres user.
|
||||||
user: User,
|
user: User,
|
||||||
|
|
||||||
|
/// Server database.
|
||||||
database: String,
|
database: String,
|
||||||
|
|
||||||
|
/// Client/server mapping.
|
||||||
client_server_map: ClientServerMap,
|
client_server_map: ClientServerMap,
|
||||||
stats: Arc<PoolStats>,
|
|
||||||
|
/// Server auth hash (for auth passthrough).
|
||||||
auth_hash: Arc<RwLock<Option<String>>>,
|
auth_hash: Arc<RwLock<Option<String>>>,
|
||||||
|
|
||||||
|
/// Server plugins.
|
||||||
|
plugins: Option<Plugins>,
|
||||||
|
|
||||||
|
/// Should we clean up dirty connections before putting them into the pool?
|
||||||
|
cleanup_connections: bool,
|
||||||
|
|
||||||
|
/// Log client parameter status changes
|
||||||
|
log_client_parameter_status_changes: bool,
|
||||||
|
|
||||||
|
/// Prepared statement cache size
|
||||||
|
prepared_statement_cache_size: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ServerPool {
|
impl ServerPool {
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
address: Address,
|
address: Address,
|
||||||
user: User,
|
user: User,
|
||||||
database: &str,
|
database: &str,
|
||||||
client_server_map: ClientServerMap,
|
client_server_map: ClientServerMap,
|
||||||
stats: Arc<PoolStats>,
|
|
||||||
auth_hash: Arc<RwLock<Option<String>>>,
|
auth_hash: Arc<RwLock<Option<String>>>,
|
||||||
|
plugins: Option<Plugins>,
|
||||||
|
cleanup_connections: bool,
|
||||||
|
log_client_parameter_status_changes: bool,
|
||||||
|
prepared_statement_cache_size: usize,
|
||||||
) -> ServerPool {
|
) -> ServerPool {
|
||||||
ServerPool {
|
ServerPool {
|
||||||
address,
|
address,
|
||||||
user: user.clone(),
|
user,
|
||||||
database: database.to_string(),
|
database: database.to_string(),
|
||||||
client_server_map,
|
client_server_map,
|
||||||
stats,
|
|
||||||
auth_hash,
|
auth_hash,
|
||||||
|
plugins,
|
||||||
|
cleanup_connections,
|
||||||
|
log_client_parameter_status_changes,
|
||||||
|
prepared_statement_cache_size,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -891,7 +1157,6 @@ impl ManageConnection for ServerPool {
|
|||||||
|
|
||||||
let stats = Arc::new(ServerStats::new(
|
let stats = Arc::new(ServerStats::new(
|
||||||
self.address.clone(),
|
self.address.clone(),
|
||||||
self.stats.clone(),
|
|
||||||
tokio::time::Instant::now(),
|
tokio::time::Instant::now(),
|
||||||
));
|
));
|
||||||
|
|
||||||
@@ -905,10 +1170,25 @@ impl ManageConnection for ServerPool {
|
|||||||
self.client_server_map.clone(),
|
self.client_server_map.clone(),
|
||||||
stats.clone(),
|
stats.clone(),
|
||||||
self.auth_hash.clone(),
|
self.auth_hash.clone(),
|
||||||
|
self.cleanup_connections,
|
||||||
|
self.log_client_parameter_status_changes,
|
||||||
|
self.prepared_statement_cache_size,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
Ok(conn) => {
|
Ok(mut conn) => {
|
||||||
|
if let Some(ref plugins) = self.plugins {
|
||||||
|
if let Some(ref prewarmer) = plugins.prewarmer {
|
||||||
|
let mut prewarmer = prewarmer::Prewarmer {
|
||||||
|
enabled: prewarmer.enabled,
|
||||||
|
server: &mut conn,
|
||||||
|
queries: &prewarmer.queries,
|
||||||
|
};
|
||||||
|
|
||||||
|
prewarmer.run().await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
stats.idle();
|
stats.idle();
|
||||||
Ok(conn)
|
Ok(conn)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,22 +1,41 @@
|
|||||||
use hyper::service::{make_service_fn, service_fn};
|
use http_body_util::Full;
|
||||||
use hyper::{Body, Method, Request, Response, Server, StatusCode};
|
use hyper::body;
|
||||||
use log::{error, info, warn};
|
use hyper::body::Bytes;
|
||||||
|
|
||||||
|
use hyper::server::conn::http1;
|
||||||
|
use hyper::service::service_fn;
|
||||||
|
use hyper::{Method, Request, Response, StatusCode};
|
||||||
|
use hyper_util::rt::TokioIo;
|
||||||
|
use log::{debug, error, info};
|
||||||
use phf::phf_map;
|
use phf::phf_map;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::sync::atomic::Ordering;
|
use std::sync::atomic::Ordering;
|
||||||
use std::sync::Arc;
|
use tokio::net::TcpListener;
|
||||||
|
|
||||||
use crate::config::Address;
|
use crate::config::Address;
|
||||||
use crate::pool::get_all_pools;
|
use crate::pool::{get_all_pools, PoolIdentifier};
|
||||||
use crate::stats::{get_pool_stats, get_server_stats, ServerStats};
|
use crate::stats::get_server_stats;
|
||||||
|
use crate::stats::pool::PoolStats;
|
||||||
|
|
||||||
struct MetricHelpType {
|
struct MetricHelpType {
|
||||||
help: &'static str,
|
help: &'static str,
|
||||||
ty: &'static str,
|
ty: &'static str,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct ServerPrometheusStats {
|
||||||
|
bytes_received: u64,
|
||||||
|
bytes_sent: u64,
|
||||||
|
transaction_count: u64,
|
||||||
|
query_count: u64,
|
||||||
|
error_count: u64,
|
||||||
|
active_count: u64,
|
||||||
|
idle_count: u64,
|
||||||
|
login_count: u64,
|
||||||
|
tested_count: u64,
|
||||||
|
}
|
||||||
|
|
||||||
// reference for metric types: https://prometheus.io/docs/concepts/metric_types/
|
// reference for metric types: https://prometheus.io/docs/concepts/metric_types/
|
||||||
// counters only increase
|
// counters only increase
|
||||||
// gauges can arbitrarily increase or decrease
|
// gauges can arbitrarily increase or decrease
|
||||||
@@ -119,22 +138,46 @@ static METRIC_HELP_AND_TYPES_LOOKUP: phf::Map<&'static str, MetricHelpType> = ph
|
|||||||
},
|
},
|
||||||
"servers_bytes_received" => MetricHelpType {
|
"servers_bytes_received" => MetricHelpType {
|
||||||
help: "Volume in bytes of network traffic received by server",
|
help: "Volume in bytes of network traffic received by server",
|
||||||
ty: "gauge",
|
ty: "counter",
|
||||||
},
|
},
|
||||||
"servers_bytes_sent" => MetricHelpType {
|
"servers_bytes_sent" => MetricHelpType {
|
||||||
help: "Volume in bytes of network traffic sent by server",
|
help: "Volume in bytes of network traffic sent by server",
|
||||||
ty: "gauge",
|
ty: "counter",
|
||||||
},
|
},
|
||||||
"servers_transaction_count" => MetricHelpType {
|
"servers_transaction_count" => MetricHelpType {
|
||||||
help: "Number of transactions executed by server",
|
help: "Number of transactions executed by server",
|
||||||
ty: "gauge",
|
ty: "counter",
|
||||||
},
|
},
|
||||||
"servers_query_count" => MetricHelpType {
|
"servers_query_count" => MetricHelpType {
|
||||||
help: "Number of queries executed by server",
|
help: "Number of queries executed by server",
|
||||||
ty: "gauge",
|
ty: "counter",
|
||||||
},
|
},
|
||||||
"servers_error_count" => MetricHelpType {
|
"servers_error_count" => MetricHelpType {
|
||||||
help: "Number of errors",
|
help: "Number of errors",
|
||||||
|
ty: "counter",
|
||||||
|
},
|
||||||
|
"servers_idle_count" => MetricHelpType {
|
||||||
|
help: "Number of server connection in idle state",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"servers_active_count" => MetricHelpType {
|
||||||
|
help: "Number of server connection in active state",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"servers_tested_count" => MetricHelpType {
|
||||||
|
help: "Number of server connection in tested state",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"servers_login_count" => MetricHelpType {
|
||||||
|
help: "Number of server connection in login state",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"servers_is_banned" => MetricHelpType {
|
||||||
|
help: "0 if server is not banned, 1 if server is banned",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"servers_is_paused" => MetricHelpType {
|
||||||
|
help: "0 if server is not paused, 1 if server is paused",
|
||||||
ty: "gauge",
|
ty: "gauge",
|
||||||
},
|
},
|
||||||
"databases_pool_size" => MetricHelpType {
|
"databases_pool_size" => MetricHelpType {
|
||||||
@@ -157,18 +200,17 @@ struct PrometheusMetric<Value: fmt::Display> {
|
|||||||
|
|
||||||
impl<Value: fmt::Display> fmt::Display for PrometheusMetric<Value> {
|
impl<Value: fmt::Display> fmt::Display for PrometheusMetric<Value> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
let formatted_labels = self
|
let mut sorted_labels: Vec<_> = self.labels.iter().collect();
|
||||||
.labels
|
sorted_labels.sort_by_key(|&(key, _)| key);
|
||||||
|
let formatted_labels = sorted_labels
|
||||||
.iter()
|
.iter()
|
||||||
.map(|(key, value)| format!("{}=\"{}\"", key, value))
|
.map(|(key, value)| format!("{}=\"{}\"", key, value))
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.join(",");
|
.join(",");
|
||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
"# HELP {name} {help}\n# TYPE {name} {ty}\n{name}{{{formatted_labels}}} {value}\n",
|
"{name}{{{formatted_labels}}} {value}",
|
||||||
name = format_args!("pgcat_{}", self.name),
|
name = format_args!("pgcat_{}", self.name),
|
||||||
help = self.help,
|
|
||||||
ty = self.ty,
|
|
||||||
formatted_labels = formatted_labels,
|
formatted_labels = formatted_labels,
|
||||||
value = self.value
|
value = self.value
|
||||||
)
|
)
|
||||||
@@ -202,7 +244,9 @@ impl<Value: fmt::Display> PrometheusMetric<Value> {
|
|||||||
labels.insert("shard", address.shard.to_string());
|
labels.insert("shard", address.shard.to_string());
|
||||||
labels.insert("role", address.role.to_string());
|
labels.insert("role", address.role.to_string());
|
||||||
labels.insert("pool", address.pool_name.clone());
|
labels.insert("pool", address.pool_name.clone());
|
||||||
|
labels.insert("index", address.address_index.to_string());
|
||||||
labels.insert("database", address.database.to_string());
|
labels.insert("database", address.database.to_string());
|
||||||
|
labels.insert("username", address.username.clone());
|
||||||
|
|
||||||
Self::from_name(&format!("databases_{}", name), value, labels)
|
Self::from_name(&format!("databases_{}", name), value, labels)
|
||||||
}
|
}
|
||||||
@@ -217,7 +261,9 @@ impl<Value: fmt::Display> PrometheusMetric<Value> {
|
|||||||
labels.insert("shard", address.shard.to_string());
|
labels.insert("shard", address.shard.to_string());
|
||||||
labels.insert("role", address.role.to_string());
|
labels.insert("role", address.role.to_string());
|
||||||
labels.insert("pool", address.pool_name.clone());
|
labels.insert("pool", address.pool_name.clone());
|
||||||
|
labels.insert("index", address.address_index.to_string());
|
||||||
labels.insert("database", address.database.to_string());
|
labels.insert("database", address.database.to_string());
|
||||||
|
labels.insert("username", address.username.clone());
|
||||||
|
|
||||||
Self::from_name(&format!("servers_{}", name), value, labels)
|
Self::from_name(&format!("servers_{}", name), value, labels)
|
||||||
}
|
}
|
||||||
@@ -228,21 +274,34 @@ impl<Value: fmt::Display> PrometheusMetric<Value> {
|
|||||||
labels.insert("shard", address.shard.to_string());
|
labels.insert("shard", address.shard.to_string());
|
||||||
labels.insert("pool", address.pool_name.clone());
|
labels.insert("pool", address.pool_name.clone());
|
||||||
labels.insert("role", address.role.to_string());
|
labels.insert("role", address.role.to_string());
|
||||||
|
labels.insert("index", address.address_index.to_string());
|
||||||
labels.insert("database", address.database.to_string());
|
labels.insert("database", address.database.to_string());
|
||||||
|
labels.insert("username", address.username.clone());
|
||||||
|
|
||||||
Self::from_name(&format!("stats_{}", name), value, labels)
|
Self::from_name(&format!("stats_{}", name), value, labels)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn from_pool(pool: &(String, String), name: &str, value: u64) -> Option<PrometheusMetric<u64>> {
|
fn from_pool(pool_id: PoolIdentifier, name: &str, value: u64) -> Option<PrometheusMetric<u64>> {
|
||||||
let mut labels = HashMap::new();
|
let mut labels = HashMap::new();
|
||||||
labels.insert("pool", pool.0.clone());
|
labels.insert("pool", pool_id.db);
|
||||||
labels.insert("user", pool.1.clone());
|
labels.insert("user", pool_id.user);
|
||||||
|
|
||||||
Self::from_name(&format!("pools_{}", name), value, labels)
|
Self::from_name(&format!("pools_{}", name), value, labels)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn get_header(&self) -> String {
|
||||||
|
format!(
|
||||||
|
"\n# HELP {name} {help}\n# TYPE {name} {ty}",
|
||||||
|
name = format_args!("pgcat_{}", self.name),
|
||||||
|
help = self.help,
|
||||||
|
ty = self.ty,
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn prometheus_stats(request: Request<Body>) -> Result<Response<Body>, hyper::http::Error> {
|
async fn prometheus_stats(
|
||||||
|
request: Request<body::Incoming>,
|
||||||
|
) -> Result<Response<Full<Bytes>>, hyper::http::Error> {
|
||||||
match (request.method(), request.uri().path()) {
|
match (request.method(), request.uri().path()) {
|
||||||
(&Method::GET, "/metrics") => {
|
(&Method::GET, "/metrics") => {
|
||||||
let mut lines = Vec::new();
|
let mut lines = Vec::new();
|
||||||
@@ -250,6 +309,7 @@ async fn prometheus_stats(request: Request<Body>) -> Result<Response<Body>, hype
|
|||||||
push_pool_stats(&mut lines);
|
push_pool_stats(&mut lines);
|
||||||
push_server_stats(&mut lines);
|
push_server_stats(&mut lines);
|
||||||
push_database_stats(&mut lines);
|
push_database_stats(&mut lines);
|
||||||
|
lines.push("".to_string()); // Ensure to end the stats with a line terminator as required by the specification.
|
||||||
|
|
||||||
Response::builder()
|
Response::builder()
|
||||||
.header("content-type", "text/plain; version=0.0.4")
|
.header("content-type", "text/plain; version=0.0.4")
|
||||||
@@ -263,6 +323,7 @@ async fn prometheus_stats(request: Request<Body>) -> Result<Response<Body>, hype
|
|||||||
|
|
||||||
// Adds metrics shown in a SHOW STATS admin command.
|
// Adds metrics shown in a SHOW STATS admin command.
|
||||||
fn push_address_stats(lines: &mut Vec<String>) {
|
fn push_address_stats(lines: &mut Vec<String>) {
|
||||||
|
let mut grouped_metrics: HashMap<String, Vec<PrometheusMetric<u64>>> = HashMap::new();
|
||||||
for (_, pool) in get_all_pools() {
|
for (_, pool) in get_all_pools() {
|
||||||
for shard in 0..pool.shards() {
|
for shard in 0..pool.shards() {
|
||||||
for server in 0..pool.servers(shard) {
|
for server in 0..pool.servers(shard) {
|
||||||
@@ -272,30 +333,50 @@ fn push_address_stats(lines: &mut Vec<String>) {
|
|||||||
if let Some(prometheus_metric) =
|
if let Some(prometheus_metric) =
|
||||||
PrometheusMetric::<u64>::from_address(address, &key, value)
|
PrometheusMetric::<u64>::from_address(address, &key, value)
|
||||||
{
|
{
|
||||||
lines.push(prometheus_metric.to_string());
|
grouped_metrics
|
||||||
|
.entry(key)
|
||||||
|
.or_default()
|
||||||
|
.push(prometheus_metric);
|
||||||
} else {
|
} else {
|
||||||
warn!("Metric {} not implemented for {}", key, address.name());
|
debug!("Metric {} not implemented for {}", key, address.name());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for (_key, metrics) in grouped_metrics {
|
||||||
|
if !metrics.is_empty() {
|
||||||
|
lines.push(metrics[0].get_header());
|
||||||
|
for metric in metrics {
|
||||||
|
lines.push(metric.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds relevant metrics shown in a SHOW POOLS admin command.
|
// Adds relevant metrics shown in a SHOW POOLS admin command.
|
||||||
fn push_pool_stats(lines: &mut Vec<String>) {
|
fn push_pool_stats(lines: &mut Vec<String>) {
|
||||||
let pool_stats = get_pool_stats();
|
let mut grouped_metrics: HashMap<String, Vec<PrometheusMetric<u64>>> = HashMap::new();
|
||||||
for (pool, stats) in pool_stats.iter() {
|
let pool_stats = PoolStats::construct_pool_lookup();
|
||||||
let stats = &**stats;
|
for (pool_id, stats) in pool_stats.iter() {
|
||||||
for (name, value) in stats.clone() {
|
for (name, value) in stats.clone() {
|
||||||
if let Some(prometheus_metric) = PrometheusMetric::<u64>::from_pool(pool, &name, value)
|
if let Some(prometheus_metric) =
|
||||||
|
PrometheusMetric::<u64>::from_pool(pool_id.clone(), &name, value)
|
||||||
{
|
{
|
||||||
lines.push(prometheus_metric.to_string());
|
grouped_metrics
|
||||||
|
.entry(name)
|
||||||
|
.or_default()
|
||||||
|
.push(prometheus_metric);
|
||||||
} else {
|
} else {
|
||||||
warn!(
|
debug!("Metric {} not implemented for ({})", name, *pool_id);
|
||||||
"Metric {} not implemented for ({},{})",
|
}
|
||||||
name, pool.0, pool.1
|
}
|
||||||
);
|
}
|
||||||
|
for (_key, metrics) in grouped_metrics {
|
||||||
|
if !metrics.is_empty() {
|
||||||
|
lines.push(metrics[0].get_header());
|
||||||
|
for metric in metrics {
|
||||||
|
lines.push(metric.to_string());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -303,13 +384,13 @@ fn push_pool_stats(lines: &mut Vec<String>) {
|
|||||||
|
|
||||||
// Adds relevant metrics shown in a SHOW DATABASES admin command.
|
// Adds relevant metrics shown in a SHOW DATABASES admin command.
|
||||||
fn push_database_stats(lines: &mut Vec<String>) {
|
fn push_database_stats(lines: &mut Vec<String>) {
|
||||||
|
let mut grouped_metrics: HashMap<String, Vec<PrometheusMetric<u32>>> = HashMap::new();
|
||||||
for (_, pool) in get_all_pools() {
|
for (_, pool) in get_all_pools() {
|
||||||
let pool_config = pool.settings.clone();
|
let pool_config = pool.settings.clone();
|
||||||
for shard in 0..pool.shards() {
|
for shard in 0..pool.shards() {
|
||||||
for server in 0..pool.servers(shard) {
|
for server in 0..pool.servers(shard) {
|
||||||
let address = pool.address(shard, server);
|
let address = pool.address(shard, server);
|
||||||
let pool_state = pool.pool_state(shard, server);
|
let pool_state = pool.pool_state(shard, server);
|
||||||
|
|
||||||
let metrics = vec![
|
let metrics = vec![
|
||||||
("pool_size", pool_config.user.pool_size),
|
("pool_size", pool_config.user.pool_size),
|
||||||
("current_connections", pool_state.connections),
|
("current_connections", pool_state.connections),
|
||||||
@@ -318,72 +399,132 @@ fn push_database_stats(lines: &mut Vec<String>) {
|
|||||||
if let Some(prometheus_metric) =
|
if let Some(prometheus_metric) =
|
||||||
PrometheusMetric::<u32>::from_database_info(address, key, value)
|
PrometheusMetric::<u32>::from_database_info(address, key, value)
|
||||||
{
|
{
|
||||||
lines.push(prometheus_metric.to_string());
|
grouped_metrics
|
||||||
|
.entry(key.to_string())
|
||||||
|
.or_default()
|
||||||
|
.push(prometheus_metric);
|
||||||
} else {
|
} else {
|
||||||
warn!("Metric {} not implemented for {}", key, address.name());
|
debug!("Metric {} not implemented for {}", key, address.name());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for (_key, metrics) in grouped_metrics {
|
||||||
|
if !metrics.is_empty() {
|
||||||
|
lines.push(metrics[0].get_header());
|
||||||
|
for metric in metrics {
|
||||||
|
lines.push(metric.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds relevant metrics shown in a SHOW SERVERS admin command.
|
// Adds relevant metrics shown in a SHOW SERVERS admin command.
|
||||||
fn push_server_stats(lines: &mut Vec<String>) {
|
fn push_server_stats(lines: &mut Vec<String>) {
|
||||||
let server_stats = get_server_stats();
|
let server_stats = get_server_stats();
|
||||||
let mut server_stats_by_addresses = HashMap::<String, Arc<ServerStats>>::new();
|
let mut prom_stats = HashMap::<String, ServerPrometheusStats>::new();
|
||||||
for (_, stats) in server_stats {
|
for (_, stats) in server_stats {
|
||||||
server_stats_by_addresses.insert(stats.address_name(), stats);
|
let entry = prom_stats
|
||||||
|
.entry(stats.address_name())
|
||||||
|
.or_insert(ServerPrometheusStats {
|
||||||
|
bytes_received: 0,
|
||||||
|
bytes_sent: 0,
|
||||||
|
transaction_count: 0,
|
||||||
|
query_count: 0,
|
||||||
|
error_count: 0,
|
||||||
|
active_count: 0,
|
||||||
|
idle_count: 0,
|
||||||
|
login_count: 0,
|
||||||
|
tested_count: 0,
|
||||||
|
});
|
||||||
|
entry.bytes_received += stats.bytes_received.load(Ordering::Relaxed);
|
||||||
|
entry.bytes_sent += stats.bytes_sent.load(Ordering::Relaxed);
|
||||||
|
entry.transaction_count += stats.transaction_count.load(Ordering::Relaxed);
|
||||||
|
entry.query_count += stats.query_count.load(Ordering::Relaxed);
|
||||||
|
entry.error_count += stats.error_count.load(Ordering::Relaxed);
|
||||||
|
match stats.state.load(Ordering::Relaxed) {
|
||||||
|
crate::stats::ServerState::Login => entry.login_count += 1,
|
||||||
|
crate::stats::ServerState::Active => entry.active_count += 1,
|
||||||
|
crate::stats::ServerState::Tested => entry.tested_count += 1,
|
||||||
|
crate::stats::ServerState::Idle => entry.idle_count += 1,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
let mut grouped_metrics: HashMap<String, Vec<PrometheusMetric<u64>>> = HashMap::new();
|
||||||
for (_, pool) in get_all_pools() {
|
for (_, pool) in get_all_pools() {
|
||||||
for shard in 0..pool.shards() {
|
for shard in 0..pool.shards() {
|
||||||
for server in 0..pool.servers(shard) {
|
for server in 0..pool.servers(shard) {
|
||||||
let address = pool.address(shard, server);
|
let address = pool.address(shard, server);
|
||||||
if let Some(server_info) = server_stats_by_addresses.get(&address.name()) {
|
if let Some(server_info) = prom_stats.get(&address.name()) {
|
||||||
let metrics = [
|
let metrics = [
|
||||||
(
|
("bytes_received", server_info.bytes_received),
|
||||||
"bytes_received",
|
("bytes_sent", server_info.bytes_sent),
|
||||||
server_info.bytes_received.load(Ordering::Relaxed),
|
("transaction_count", server_info.transaction_count),
|
||||||
),
|
("query_count", server_info.query_count),
|
||||||
("bytes_sent", server_info.bytes_sent.load(Ordering::Relaxed)),
|
("error_count", server_info.error_count),
|
||||||
(
|
("idle_count", server_info.idle_count),
|
||||||
"transaction_count",
|
("active_count", server_info.active_count),
|
||||||
server_info.transaction_count.load(Ordering::Relaxed),
|
("login_count", server_info.login_count),
|
||||||
),
|
("tested_count", server_info.tested_count),
|
||||||
(
|
("is_banned", if pool.is_banned(address) { 1 } else { 0 }),
|
||||||
"query_count",
|
("is_paused", if pool.paused() { 1 } else { 0 }),
|
||||||
server_info.query_count.load(Ordering::Relaxed),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"error_count",
|
|
||||||
server_info.error_count.load(Ordering::Relaxed),
|
|
||||||
),
|
|
||||||
];
|
];
|
||||||
for (key, value) in metrics {
|
for (key, value) in metrics {
|
||||||
if let Some(prometheus_metric) =
|
if let Some(prometheus_metric) =
|
||||||
PrometheusMetric::<u64>::from_server_info(address, key, value)
|
PrometheusMetric::<u64>::from_server_info(address, key, value)
|
||||||
{
|
{
|
||||||
lines.push(prometheus_metric.to_string());
|
grouped_metrics
|
||||||
|
.entry(key.to_string())
|
||||||
|
.or_default()
|
||||||
|
.push(prometheus_metric);
|
||||||
} else {
|
} else {
|
||||||
warn!("Metric {} not implemented for {}", key, address.name());
|
debug!("Metric {} not implemented for {}", key, address.name());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for (_key, metrics) in grouped_metrics {
|
||||||
|
if !metrics.is_empty() {
|
||||||
|
lines.push(metrics[0].get_header());
|
||||||
|
for metric in metrics {
|
||||||
|
lines.push(metric.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn start_metric_server(http_addr: SocketAddr) {
|
pub async fn start_metric_server(http_addr: SocketAddr) {
|
||||||
let http_service_factory =
|
let listener = TcpListener::bind(http_addr);
|
||||||
make_service_fn(|_conn| async { Ok::<_, hyper::Error>(service_fn(prometheus_stats)) });
|
let listener = match listener.await {
|
||||||
let server = Server::bind(&http_addr).serve(http_service_factory);
|
Ok(listener) => listener,
|
||||||
|
Err(e) => {
|
||||||
|
error!("Failed to bind prometheus server to HTTP address: {}.", e);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
info!(
|
info!(
|
||||||
"Exposing prometheus metrics on http://{}/metrics.",
|
"Exposing prometheus metrics on http://{}/metrics.",
|
||||||
http_addr
|
http_addr
|
||||||
);
|
);
|
||||||
if let Err(e) = server.await {
|
loop {
|
||||||
error!("Failed to run HTTP server: {}.", e);
|
let stream = match listener.accept().await {
|
||||||
|
Ok((stream, _)) => stream,
|
||||||
|
Err(e) => {
|
||||||
|
error!("Error accepting connection: {}", e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let io = TokioIo::new(stream);
|
||||||
|
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
if let Err(err) = http1::Builder::new()
|
||||||
|
.serve_connection(io, service_fn(prometheus_stats))
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
eprintln!("Error serving HTTP connection for metrics: {:?}", err);
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
1103
src/query_router.rs
1103
src/query_router.rs
File diff suppressed because it is too large
Load Diff
14
src/scram.rs
14
src/scram.rs
@@ -79,12 +79,12 @@ impl ScramSha256 {
|
|||||||
let server_message = Message::parse(message)?;
|
let server_message = Message::parse(message)?;
|
||||||
|
|
||||||
if !server_message.nonce.starts_with(&self.nonce) {
|
if !server_message.nonce.starts_with(&self.nonce) {
|
||||||
return Err(Error::ProtocolSyncError(format!("SCRAM")));
|
return Err(Error::ProtocolSyncError("SCRAM".to_string()));
|
||||||
}
|
}
|
||||||
|
|
||||||
let salt = match general_purpose::STANDARD.decode(&server_message.salt) {
|
let salt = match general_purpose::STANDARD.decode(&server_message.salt) {
|
||||||
Ok(salt) => salt,
|
Ok(salt) => salt,
|
||||||
Err(_) => return Err(Error::ProtocolSyncError(format!("SCRAM"))),
|
Err(_) => return Err(Error::ProtocolSyncError("SCRAM".to_string())),
|
||||||
};
|
};
|
||||||
|
|
||||||
let salted_password = Self::hi(
|
let salted_password = Self::hi(
|
||||||
@@ -166,9 +166,9 @@ impl ScramSha256 {
|
|||||||
pub fn finish(&mut self, message: &BytesMut) -> Result<(), Error> {
|
pub fn finish(&mut self, message: &BytesMut) -> Result<(), Error> {
|
||||||
let final_message = FinalMessage::parse(message)?;
|
let final_message = FinalMessage::parse(message)?;
|
||||||
|
|
||||||
let verifier = match general_purpose::STANDARD.decode(&final_message.value) {
|
let verifier = match general_purpose::STANDARD.decode(final_message.value) {
|
||||||
Ok(verifier) => verifier,
|
Ok(verifier) => verifier,
|
||||||
Err(_) => return Err(Error::ProtocolSyncError(format!("SCRAM"))),
|
Err(_) => return Err(Error::ProtocolSyncError("SCRAM".to_string())),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut hmac = match Hmac::<Sha256>::new_from_slice(&self.salted_password) {
|
let mut hmac = match Hmac::<Sha256>::new_from_slice(&self.salted_password) {
|
||||||
@@ -230,14 +230,14 @@ impl Message {
|
|||||||
.collect::<Vec<String>>();
|
.collect::<Vec<String>>();
|
||||||
|
|
||||||
if parts.len() != 3 {
|
if parts.len() != 3 {
|
||||||
return Err(Error::ProtocolSyncError(format!("SCRAM")));
|
return Err(Error::ProtocolSyncError("SCRAM".to_string()));
|
||||||
}
|
}
|
||||||
|
|
||||||
let nonce = str::replace(&parts[0], "r=", "");
|
let nonce = str::replace(&parts[0], "r=", "");
|
||||||
let salt = str::replace(&parts[1], "s=", "");
|
let salt = str::replace(&parts[1], "s=", "");
|
||||||
let iterations = match str::replace(&parts[2], "i=", "").parse::<u32>() {
|
let iterations = match str::replace(&parts[2], "i=", "").parse::<u32>() {
|
||||||
Ok(iterations) => iterations,
|
Ok(iterations) => iterations,
|
||||||
Err(_) => return Err(Error::ProtocolSyncError(format!("SCRAM"))),
|
Err(_) => return Err(Error::ProtocolSyncError("SCRAM".to_string())),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Message {
|
Ok(Message {
|
||||||
@@ -257,7 +257,7 @@ impl FinalMessage {
|
|||||||
/// Parse the server final validation message.
|
/// Parse the server final validation message.
|
||||||
pub fn parse(message: &BytesMut) -> Result<FinalMessage, Error> {
|
pub fn parse(message: &BytesMut) -> Result<FinalMessage, Error> {
|
||||||
if !message.starts_with(b"v=") || message.len() < 4 {
|
if !message.starts_with(b"v=") || message.len() < 4 {
|
||||||
return Err(Error::ProtocolSyncError(format!("SCRAM")));
|
return Err(Error::ProtocolSyncError("SCRAM".to_string()));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(FinalMessage {
|
Ok(FinalMessage {
|
||||||
|
|||||||
842
src/server.rs
842
src/server.rs
File diff suppressed because it is too large
Load Diff
@@ -14,11 +14,11 @@ pub enum ShardingFunction {
|
|||||||
Sha1,
|
Sha1,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ToString for ShardingFunction {
|
impl std::fmt::Display for ShardingFunction {
|
||||||
fn to_string(&self) -> String {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
match *self {
|
match self {
|
||||||
ShardingFunction::PgBigintHash => "pg_bigint_hash".to_string(),
|
ShardingFunction::PgBigintHash => write!(f, "pg_bigint_hash"),
|
||||||
ShardingFunction::Sha1 => "sha1".to_string(),
|
ShardingFunction::Sha1 => write!(f, "sha1"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -64,7 +64,7 @@ impl Sharder {
|
|||||||
fn sha1(&self, key: i64) -> usize {
|
fn sha1(&self, key: i64) -> usize {
|
||||||
let mut hasher = Sha1::new();
|
let mut hasher = Sha1::new();
|
||||||
|
|
||||||
hasher.update(&key.to_string().as_bytes());
|
hasher.update(key.to_string().as_bytes());
|
||||||
|
|
||||||
let result = hasher.finalize();
|
let result = hasher.finalize();
|
||||||
|
|
||||||
@@ -202,10 +202,10 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_sha1_hash() {
|
fn test_sha1_hash() {
|
||||||
let sharder = Sharder::new(12, ShardingFunction::Sha1);
|
let sharder = Sharder::new(12, ShardingFunction::Sha1);
|
||||||
let ids = vec![
|
let ids = [
|
||||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
||||||
];
|
];
|
||||||
let shards = vec![
|
let shards = [
|
||||||
4, 7, 8, 3, 6, 0, 0, 10, 3, 11, 1, 7, 4, 4, 11, 2, 5, 0, 8, 3,
|
4, 7, 8, 3, 6, 0, 0, 10, 3, 11, 1, 7, 4, 4, 11, 2, 5, 0, 8, 3,
|
||||||
];
|
];
|
||||||
|
|
||||||
|
|||||||
37
src/stats.rs
37
src/stats.rs
@@ -1,4 +1,3 @@
|
|||||||
use crate::pool::PoolIdentifier;
|
|
||||||
/// Statistics and reporting.
|
/// Statistics and reporting.
|
||||||
use arc_swap::ArcSwap;
|
use arc_swap::ArcSwap;
|
||||||
|
|
||||||
@@ -16,13 +15,11 @@ pub mod pool;
|
|||||||
pub mod server;
|
pub mod server;
|
||||||
pub use address::AddressStats;
|
pub use address::AddressStats;
|
||||||
pub use client::{ClientState, ClientStats};
|
pub use client::{ClientState, ClientStats};
|
||||||
pub use pool::PoolStats;
|
|
||||||
pub use server::{ServerState, ServerStats};
|
pub use server::{ServerState, ServerStats};
|
||||||
|
|
||||||
/// Convenience types for various stats
|
/// Convenience types for various stats
|
||||||
type ClientStatesLookup = HashMap<i32, Arc<ClientStats>>;
|
type ClientStatesLookup = HashMap<i32, Arc<ClientStats>>;
|
||||||
type ServerStatesLookup = HashMap<i32, Arc<ServerStats>>;
|
type ServerStatesLookup = HashMap<i32, Arc<ServerStats>>;
|
||||||
type PoolStatsLookup = HashMap<(String, String), Arc<PoolStats>>;
|
|
||||||
|
|
||||||
/// Stats for individual client connections
|
/// Stats for individual client connections
|
||||||
/// Used in SHOW CLIENTS.
|
/// Used in SHOW CLIENTS.
|
||||||
@@ -34,11 +31,6 @@ static CLIENT_STATS: Lazy<Arc<RwLock<ClientStatesLookup>>> =
|
|||||||
static SERVER_STATS: Lazy<Arc<RwLock<ServerStatesLookup>>> =
|
static SERVER_STATS: Lazy<Arc<RwLock<ServerStatesLookup>>> =
|
||||||
Lazy::new(|| Arc::new(RwLock::new(ServerStatesLookup::default())));
|
Lazy::new(|| Arc::new(RwLock::new(ServerStatesLookup::default())));
|
||||||
|
|
||||||
/// Aggregate stats for each pool (a pool is identified by database name and username)
|
|
||||||
/// Used in SHOW POOLS.
|
|
||||||
static POOL_STATS: Lazy<Arc<RwLock<PoolStatsLookup>>> =
|
|
||||||
Lazy::new(|| Arc::new(RwLock::new(PoolStatsLookup::default())));
|
|
||||||
|
|
||||||
/// The statistics reporter. An instance is given to each possible source of statistics,
|
/// The statistics reporter. An instance is given to each possible source of statistics,
|
||||||
/// e.g. client stats, server stats, connection pool stats.
|
/// e.g. client stats, server stats, connection pool stats.
|
||||||
pub static REPORTER: Lazy<ArcSwap<Reporter>> =
|
pub static REPORTER: Lazy<ArcSwap<Reporter>> =
|
||||||
@@ -80,13 +72,6 @@ impl Reporter {
|
|||||||
fn server_disconnecting(&self, server_id: i32) {
|
fn server_disconnecting(&self, server_id: i32) {
|
||||||
SERVER_STATS.write().remove(&server_id);
|
SERVER_STATS.write().remove(&server_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Register a pool with the stats system.
|
|
||||||
fn pool_register(&self, identifier: PoolIdentifier, stats: Arc<PoolStats>) {
|
|
||||||
POOL_STATS
|
|
||||||
.write()
|
|
||||||
.insert((identifier.db, identifier.user), stats);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The statistics collector which used for calculating averages
|
/// The statistics collector which used for calculating averages
|
||||||
@@ -107,8 +92,20 @@ impl Collector {
|
|||||||
loop {
|
loop {
|
||||||
interval.tick().await;
|
interval.tick().await;
|
||||||
|
|
||||||
for stats in SERVER_STATS.read().values() {
|
// Hold read lock for duration of update to retain all server stats
|
||||||
stats.address_stats().update_averages();
|
let server_stats = SERVER_STATS.read();
|
||||||
|
|
||||||
|
for stats in server_stats.values() {
|
||||||
|
if !stats.check_address_stat_average_is_updated_status() {
|
||||||
|
stats.address_stats().update_averages();
|
||||||
|
stats.address_stats().reset_current_counts();
|
||||||
|
stats.set_address_stat_average_is_updated_status(true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset to false for next update
|
||||||
|
for stats in server_stats.values() {
|
||||||
|
stats.set_address_stat_average_is_updated_status(false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@@ -127,12 +124,6 @@ pub fn get_server_stats() -> ServerStatesLookup {
|
|||||||
SERVER_STATS.read().clone()
|
SERVER_STATS.read().clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get a snapshot of pool statistics.
|
|
||||||
/// by the `Collector`.
|
|
||||||
pub fn get_pool_stats() -> PoolStatsLookup {
|
|
||||||
POOL_STATS.read().clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the statistics reporter used to update stats across the pools/clients.
|
/// Get the statistics reporter used to update stats across the pools/clients.
|
||||||
pub fn get_reporter() -> Reporter {
|
pub fn get_reporter() -> Reporter {
|
||||||
(*(*REPORTER.load())).clone()
|
(*(*REPORTER.load())).clone()
|
||||||
|
|||||||
@@ -1,26 +1,29 @@
|
|||||||
use log::warn;
|
|
||||||
use std::sync::atomic::*;
|
use std::sync::atomic::*;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Default)]
|
||||||
|
struct AddressStatFields {
|
||||||
|
xact_count: Arc<AtomicU64>,
|
||||||
|
query_count: Arc<AtomicU64>,
|
||||||
|
bytes_received: Arc<AtomicU64>,
|
||||||
|
bytes_sent: Arc<AtomicU64>,
|
||||||
|
xact_time: Arc<AtomicU64>,
|
||||||
|
query_time: Arc<AtomicU64>,
|
||||||
|
wait_time: Arc<AtomicU64>,
|
||||||
|
errors: Arc<AtomicU64>,
|
||||||
|
}
|
||||||
|
|
||||||
/// Internal address stats
|
/// Internal address stats
|
||||||
#[derive(Debug, Clone, Default)]
|
#[derive(Debug, Clone, Default)]
|
||||||
pub struct AddressStats {
|
pub struct AddressStats {
|
||||||
pub total_xact_count: Arc<AtomicU64>,
|
total: AddressStatFields,
|
||||||
pub total_query_count: Arc<AtomicU64>,
|
|
||||||
pub total_received: Arc<AtomicU64>,
|
current: AddressStatFields,
|
||||||
pub total_sent: Arc<AtomicU64>,
|
|
||||||
pub total_xact_time: Arc<AtomicU64>,
|
averages: AddressStatFields,
|
||||||
pub total_query_time: Arc<AtomicU64>,
|
|
||||||
pub total_wait_time: Arc<AtomicU64>,
|
// Determines if the averages have been updated since the last time they were reported
|
||||||
pub total_errors: Arc<AtomicU64>,
|
pub averages_updated: Arc<AtomicBool>,
|
||||||
pub avg_query_count: Arc<AtomicU64>,
|
|
||||||
pub avg_query_time: Arc<AtomicU64>,
|
|
||||||
pub avg_recv: Arc<AtomicU64>,
|
|
||||||
pub avg_sent: Arc<AtomicU64>,
|
|
||||||
pub avg_errors: Arc<AtomicU64>,
|
|
||||||
pub avg_xact_time: Arc<AtomicU64>,
|
|
||||||
pub avg_xact_count: Arc<AtomicU64>,
|
|
||||||
pub avg_wait_time: Arc<AtomicU64>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IntoIterator for AddressStats {
|
impl IntoIterator for AddressStats {
|
||||||
@@ -31,67 +34,67 @@ impl IntoIterator for AddressStats {
|
|||||||
vec![
|
vec![
|
||||||
(
|
(
|
||||||
"total_xact_count".to_string(),
|
"total_xact_count".to_string(),
|
||||||
self.total_xact_count.load(Ordering::Relaxed),
|
self.total.xact_count.load(Ordering::Relaxed),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"total_query_count".to_string(),
|
"total_query_count".to_string(),
|
||||||
self.total_query_count.load(Ordering::Relaxed),
|
self.total.query_count.load(Ordering::Relaxed),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"total_received".to_string(),
|
"total_received".to_string(),
|
||||||
self.total_received.load(Ordering::Relaxed),
|
self.total.bytes_received.load(Ordering::Relaxed),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"total_sent".to_string(),
|
"total_sent".to_string(),
|
||||||
self.total_sent.load(Ordering::Relaxed),
|
self.total.bytes_sent.load(Ordering::Relaxed),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"total_xact_time".to_string(),
|
"total_xact_time".to_string(),
|
||||||
self.total_xact_time.load(Ordering::Relaxed),
|
self.total.xact_time.load(Ordering::Relaxed),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"total_query_time".to_string(),
|
"total_query_time".to_string(),
|
||||||
self.total_query_time.load(Ordering::Relaxed),
|
self.total.query_time.load(Ordering::Relaxed),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"total_wait_time".to_string(),
|
"total_wait_time".to_string(),
|
||||||
self.total_wait_time.load(Ordering::Relaxed),
|
self.total.wait_time.load(Ordering::Relaxed),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"total_errors".to_string(),
|
"total_errors".to_string(),
|
||||||
self.total_errors.load(Ordering::Relaxed),
|
self.total.errors.load(Ordering::Relaxed),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"avg_xact_count".to_string(),
|
"avg_xact_count".to_string(),
|
||||||
self.avg_xact_count.load(Ordering::Relaxed),
|
self.averages.xact_count.load(Ordering::Relaxed),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"avg_query_count".to_string(),
|
"avg_query_count".to_string(),
|
||||||
self.avg_query_count.load(Ordering::Relaxed),
|
self.averages.query_count.load(Ordering::Relaxed),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"avg_recv".to_string(),
|
"avg_recv".to_string(),
|
||||||
self.avg_recv.load(Ordering::Relaxed),
|
self.averages.bytes_received.load(Ordering::Relaxed),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"avg_sent".to_string(),
|
"avg_sent".to_string(),
|
||||||
self.avg_sent.load(Ordering::Relaxed),
|
self.averages.bytes_sent.load(Ordering::Relaxed),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"avg_errors".to_string(),
|
"avg_errors".to_string(),
|
||||||
self.avg_errors.load(Ordering::Relaxed),
|
self.averages.errors.load(Ordering::Relaxed),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"avg_xact_time".to_string(),
|
"avg_xact_time".to_string(),
|
||||||
self.avg_xact_time.load(Ordering::Relaxed),
|
self.averages.xact_time.load(Ordering::Relaxed),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"avg_query_time".to_string(),
|
"avg_query_time".to_string(),
|
||||||
self.avg_query_time.load(Ordering::Relaxed),
|
self.averages.query_time.load(Ordering::Relaxed),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
"avg_wait_time".to_string(),
|
"avg_wait_time".to_string(),
|
||||||
self.avg_wait_time.load(Ordering::Relaxed),
|
self.averages.wait_time.load(Ordering::Relaxed),
|
||||||
),
|
),
|
||||||
]
|
]
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@@ -99,22 +102,120 @@ impl IntoIterator for AddressStats {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl AddressStats {
|
impl AddressStats {
|
||||||
|
pub fn xact_count_add(&self) {
|
||||||
|
self.total.xact_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
self.current.xact_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn query_count_add(&self) {
|
||||||
|
self.total.query_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
self.current.query_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn bytes_received_add(&self, bytes: u64) {
|
||||||
|
self.total
|
||||||
|
.bytes_received
|
||||||
|
.fetch_add(bytes, Ordering::Relaxed);
|
||||||
|
self.current
|
||||||
|
.bytes_received
|
||||||
|
.fetch_add(bytes, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn bytes_sent_add(&self, bytes: u64) {
|
||||||
|
self.total.bytes_sent.fetch_add(bytes, Ordering::Relaxed);
|
||||||
|
self.current.bytes_sent.fetch_add(bytes, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn xact_time_add(&self, time: u64) {
|
||||||
|
self.total.xact_time.fetch_add(time, Ordering::Relaxed);
|
||||||
|
self.current.xact_time.fetch_add(time, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn query_time_add(&self, time: u64) {
|
||||||
|
self.total.query_time.fetch_add(time, Ordering::Relaxed);
|
||||||
|
self.current.query_time.fetch_add(time, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn wait_time_add(&self, time: u64) {
|
||||||
|
self.total.wait_time.fetch_add(time, Ordering::Relaxed);
|
||||||
|
self.current.wait_time.fetch_add(time, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
pub fn error(&self) {
|
pub fn error(&self) {
|
||||||
self.total_errors.fetch_add(1, Ordering::Relaxed);
|
self.total.errors.fetch_add(1, Ordering::Relaxed);
|
||||||
|
self.current.errors.fetch_add(1, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update_averages(&self) {
|
pub fn update_averages(&self) {
|
||||||
let (totals, averages) = self.fields_iterators();
|
let stat_period_per_second = crate::stats::STAT_PERIOD / 1_000;
|
||||||
for data in totals.iter().zip(averages.iter()) {
|
|
||||||
let (total, average) = data;
|
// xact_count
|
||||||
if let Err(err) = average.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |avg| {
|
let current_xact_count = self.current.xact_count.load(Ordering::Relaxed);
|
||||||
let total = total.load(Ordering::Relaxed);
|
let current_xact_time = self.current.xact_time.load(Ordering::Relaxed);
|
||||||
let avg = (total - avg) / (crate::stats::STAT_PERIOD / 1_000); // Avg / second
|
self.averages.xact_count.store(
|
||||||
Some(avg)
|
current_xact_count / stat_period_per_second,
|
||||||
}) {
|
Ordering::Relaxed,
|
||||||
warn!("Could not update averages for addresses stats, {:?}", err);
|
);
|
||||||
}
|
if current_xact_count == 0 {
|
||||||
|
self.averages.xact_time.store(0, Ordering::Relaxed);
|
||||||
|
} else {
|
||||||
|
self.averages
|
||||||
|
.xact_time
|
||||||
|
.store(current_xact_time / current_xact_count, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// query_count
|
||||||
|
let current_query_count = self.current.query_count.load(Ordering::Relaxed);
|
||||||
|
let current_query_time = self.current.query_time.load(Ordering::Relaxed);
|
||||||
|
self.averages.query_count.store(
|
||||||
|
current_query_count / stat_period_per_second,
|
||||||
|
Ordering::Relaxed,
|
||||||
|
);
|
||||||
|
if current_query_count == 0 {
|
||||||
|
self.averages.query_time.store(0, Ordering::Relaxed);
|
||||||
|
} else {
|
||||||
|
self.averages
|
||||||
|
.query_time
|
||||||
|
.store(current_query_time / current_query_count, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
// bytes_received
|
||||||
|
let current_bytes_received = self.current.bytes_received.load(Ordering::Relaxed);
|
||||||
|
self.averages.bytes_received.store(
|
||||||
|
current_bytes_received / stat_period_per_second,
|
||||||
|
Ordering::Relaxed,
|
||||||
|
);
|
||||||
|
|
||||||
|
// bytes_sent
|
||||||
|
let current_bytes_sent = self.current.bytes_sent.load(Ordering::Relaxed);
|
||||||
|
self.averages.bytes_sent.store(
|
||||||
|
current_bytes_sent / stat_period_per_second,
|
||||||
|
Ordering::Relaxed,
|
||||||
|
);
|
||||||
|
|
||||||
|
// wait_time
|
||||||
|
let current_wait_time = self.current.wait_time.load(Ordering::Relaxed);
|
||||||
|
self.averages.wait_time.store(
|
||||||
|
current_wait_time / stat_period_per_second,
|
||||||
|
Ordering::Relaxed,
|
||||||
|
);
|
||||||
|
|
||||||
|
// errors
|
||||||
|
let current_errors = self.current.errors.load(Ordering::Relaxed);
|
||||||
|
self.averages
|
||||||
|
.errors
|
||||||
|
.store(current_errors / stat_period_per_second, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn reset_current_counts(&self) {
|
||||||
|
self.current.xact_count.store(0, Ordering::Relaxed);
|
||||||
|
self.current.xact_time.store(0, Ordering::Relaxed);
|
||||||
|
self.current.query_count.store(0, Ordering::Relaxed);
|
||||||
|
self.current.query_time.store(0, Ordering::Relaxed);
|
||||||
|
self.current.bytes_received.store(0, Ordering::Relaxed);
|
||||||
|
self.current.bytes_sent.store(0, Ordering::Relaxed);
|
||||||
|
self.current.wait_time.store(0, Ordering::Relaxed);
|
||||||
|
self.current.errors.store(0, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn populate_row(&self, row: &mut Vec<String>) {
|
pub fn populate_row(&self, row: &mut Vec<String>) {
|
||||||
@@ -122,28 +223,4 @@ impl AddressStats {
|
|||||||
row.push(value.to_string());
|
row.push(value.to_string());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn fields_iterators(&self) -> (Vec<Arc<AtomicU64>>, Vec<Arc<AtomicU64>>) {
|
|
||||||
let mut totals: Vec<Arc<AtomicU64>> = Vec::new();
|
|
||||||
let mut averages: Vec<Arc<AtomicU64>> = Vec::new();
|
|
||||||
|
|
||||||
totals.push(self.total_xact_count.clone());
|
|
||||||
averages.push(self.avg_xact_count.clone());
|
|
||||||
totals.push(self.total_query_count.clone());
|
|
||||||
averages.push(self.avg_query_count.clone());
|
|
||||||
totals.push(self.total_received.clone());
|
|
||||||
averages.push(self.avg_recv.clone());
|
|
||||||
totals.push(self.total_sent.clone());
|
|
||||||
averages.push(self.avg_sent.clone());
|
|
||||||
totals.push(self.total_xact_time.clone());
|
|
||||||
averages.push(self.avg_xact_time.clone());
|
|
||||||
totals.push(self.total_query_time.clone());
|
|
||||||
averages.push(self.avg_query_time.clone());
|
|
||||||
totals.push(self.total_wait_time.clone());
|
|
||||||
averages.push(self.avg_wait_time.clone());
|
|
||||||
totals.push(self.total_errors.clone());
|
|
||||||
averages.push(self.avg_errors.clone());
|
|
||||||
|
|
||||||
(totals, averages)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
use super::PoolStats;
|
|
||||||
use super::{get_reporter, Reporter};
|
use super::{get_reporter, Reporter};
|
||||||
use atomic_enum::atomic_enum;
|
use atomic_enum::atomic_enum;
|
||||||
use std::sync::atomic::*;
|
use std::sync::atomic::*;
|
||||||
@@ -34,12 +33,19 @@ pub struct ClientStats {
|
|||||||
pool_name: String,
|
pool_name: String,
|
||||||
connect_time: Instant,
|
connect_time: Instant,
|
||||||
|
|
||||||
pool_stats: Arc<PoolStats>,
|
|
||||||
reporter: Reporter,
|
reporter: Reporter,
|
||||||
|
|
||||||
/// Total time spent waiting for a connection from pool, measures in microseconds
|
/// Total time spent waiting for a connection from pool, measures in microseconds
|
||||||
pub total_wait_time: Arc<AtomicU64>,
|
pub total_wait_time: Arc<AtomicU64>,
|
||||||
|
|
||||||
|
/// Maximum time spent waiting for a connection from pool, measures in microseconds
|
||||||
|
pub max_wait_time: Arc<AtomicU64>,
|
||||||
|
|
||||||
|
// Time when the client started waiting for a connection from pool, measures in microseconds
|
||||||
|
// We use connect_time as the reference point for this value
|
||||||
|
// U64 can represent ~5850 centuries in microseconds, so we should be fine
|
||||||
|
pub wait_start_us: Arc<AtomicU64>,
|
||||||
|
|
||||||
/// Current state of the client
|
/// Current state of the client
|
||||||
pub state: Arc<AtomicClientState>,
|
pub state: Arc<AtomicClientState>,
|
||||||
|
|
||||||
@@ -61,8 +67,9 @@ impl Default for ClientStats {
|
|||||||
application_name: String::new(),
|
application_name: String::new(),
|
||||||
username: String::new(),
|
username: String::new(),
|
||||||
pool_name: String::new(),
|
pool_name: String::new(),
|
||||||
pool_stats: Arc::new(PoolStats::default()),
|
|
||||||
total_wait_time: Arc::new(AtomicU64::new(0)),
|
total_wait_time: Arc::new(AtomicU64::new(0)),
|
||||||
|
max_wait_time: Arc::new(AtomicU64::new(0)),
|
||||||
|
wait_start_us: Arc::new(AtomicU64::new(0)),
|
||||||
state: Arc::new(AtomicClientState::new(ClientState::Idle)),
|
state: Arc::new(AtomicClientState::new(ClientState::Idle)),
|
||||||
transaction_count: Arc::new(AtomicU64::new(0)),
|
transaction_count: Arc::new(AtomicU64::new(0)),
|
||||||
query_count: Arc::new(AtomicU64::new(0)),
|
query_count: Arc::new(AtomicU64::new(0)),
|
||||||
@@ -79,11 +86,9 @@ impl ClientStats {
|
|||||||
username: &str,
|
username: &str,
|
||||||
pool_name: &str,
|
pool_name: &str,
|
||||||
connect_time: Instant,
|
connect_time: Instant,
|
||||||
pool_stats: Arc<PoolStats>,
|
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
client_id,
|
client_id,
|
||||||
pool_stats,
|
|
||||||
connect_time,
|
connect_time,
|
||||||
application_name: application_name.to_string(),
|
application_name: application_name.to_string(),
|
||||||
username: username.to_string(),
|
username: username.to_string(),
|
||||||
@@ -96,8 +101,6 @@ impl ClientStats {
|
|||||||
/// update metrics on the corresponding pool.
|
/// update metrics on the corresponding pool.
|
||||||
pub fn disconnect(&self) {
|
pub fn disconnect(&self) {
|
||||||
self.reporter.client_disconnecting(self.client_id);
|
self.reporter.client_disconnecting(self.client_id);
|
||||||
self.pool_stats
|
|
||||||
.client_disconnect(self.state.load(Ordering::Relaxed))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Register a client with the stats system. The stats system uses client_id
|
/// Register a client with the stats system. The stats system uses client_id
|
||||||
@@ -105,33 +108,36 @@ impl ClientStats {
|
|||||||
pub fn register(&self, stats: Arc<ClientStats>) {
|
pub fn register(&self, stats: Arc<ClientStats>) {
|
||||||
self.reporter.client_register(self.client_id, stats);
|
self.reporter.client_register(self.client_id, stats);
|
||||||
self.state.store(ClientState::Idle, Ordering::Relaxed);
|
self.state.store(ClientState::Idle, Ordering::Relaxed);
|
||||||
self.pool_stats.cl_idle.fetch_add(1, Ordering::Relaxed);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Reports a client is done querying the server and is no longer assigned a server connection
|
/// Reports a client is done querying the server and is no longer assigned a server connection
|
||||||
pub fn idle(&self) {
|
pub fn idle(&self) {
|
||||||
self.pool_stats
|
|
||||||
.client_idle(self.state.load(Ordering::Relaxed));
|
|
||||||
self.state.store(ClientState::Idle, Ordering::Relaxed);
|
self.state.store(ClientState::Idle, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Reports a client is waiting for a connection
|
/// Reports a client is waiting for a connection
|
||||||
pub fn waiting(&self) {
|
pub fn waiting(&self) {
|
||||||
self.pool_stats
|
let wait_start = self.connect_time.elapsed().as_micros() as u64;
|
||||||
.client_waiting(self.state.load(Ordering::Relaxed));
|
|
||||||
|
self.wait_start_us.store(wait_start, Ordering::Relaxed);
|
||||||
self.state.store(ClientState::Waiting, Ordering::Relaxed);
|
self.state.store(ClientState::Waiting, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Reports a client is done waiting for a connection and is about to query the server.
|
/// Reports a client is done waiting for a connection and is about to query the server.
|
||||||
pub fn active(&self) {
|
pub fn active(&self) {
|
||||||
self.pool_stats
|
|
||||||
.client_active(self.state.load(Ordering::Relaxed));
|
|
||||||
self.state.store(ClientState::Active, Ordering::Relaxed);
|
self.state.store(ClientState::Active, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Reports a client has failed to obtain a connection from a connection pool
|
/// Reports a client has failed to obtain a connection from a connection pool
|
||||||
pub fn checkout_error(&self) {
|
pub fn checkout_error(&self) {
|
||||||
self.state.store(ClientState::Idle, Ordering::Relaxed);
|
self.state.store(ClientState::Idle, Ordering::Relaxed);
|
||||||
|
self.update_wait_times();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reports a client has succeeded in obtaining a connection from a connection pool
|
||||||
|
pub fn checkout_success(&self) {
|
||||||
|
self.state.store(ClientState::Active, Ordering::Relaxed);
|
||||||
|
self.update_wait_times();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Reports a client has had the server assigned to it be banned
|
/// Reports a client has had the server assigned to it be banned
|
||||||
@@ -140,10 +146,26 @@ impl ClientStats {
|
|||||||
self.error_count.fetch_add(1, Ordering::Relaxed);
|
self.error_count.fetch_add(1, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Reporters the time spent by a client waiting to get a healthy connection from the pool
|
fn update_wait_times(&self) {
|
||||||
pub fn checkout_time(&self, microseconds: u64) {
|
if self.wait_start_us.load(Ordering::Relaxed) == 0 {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let wait_time_us = self.get_current_wait_time_us();
|
||||||
self.total_wait_time
|
self.total_wait_time
|
||||||
.fetch_add(microseconds, Ordering::Relaxed);
|
.fetch_add(wait_time_us, Ordering::Relaxed);
|
||||||
|
self.max_wait_time
|
||||||
|
.fetch_max(wait_time_us, Ordering::Relaxed);
|
||||||
|
self.wait_start_us.store(0, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_current_wait_time_us(&self) -> u64 {
|
||||||
|
let wait_start_us = self.wait_start_us.load(Ordering::Relaxed);
|
||||||
|
let microseconds_since_connection_epoch = self.connect_time.elapsed().as_micros() as u64;
|
||||||
|
if wait_start_us == 0 || microseconds_since_connection_epoch < wait_start_us {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
microseconds_since_connection_epoch - wait_start_us
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Report a query executed by a client against a server
|
/// Report a query executed by a client against a server
|
||||||
|
|||||||
@@ -1,36 +1,134 @@
|
|||||||
use crate::config::Pool;
|
use log::debug;
|
||||||
use crate::config::PoolMode;
|
|
||||||
use crate::pool::PoolIdentifier;
|
|
||||||
use std::sync::atomic::*;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use super::get_reporter;
|
|
||||||
use super::Reporter;
|
|
||||||
use super::{ClientState, ServerState};
|
use super::{ClientState, ServerState};
|
||||||
|
use crate::{config::PoolMode, messages::DataType, pool::PoolIdentifier};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::sync::atomic::*;
|
||||||
|
|
||||||
#[derive(Debug, Clone, Default)]
|
use crate::pool::get_all_pools;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
/// A struct that holds information about a Pool .
|
/// A struct that holds information about a Pool .
|
||||||
pub struct PoolStats {
|
pub struct PoolStats {
|
||||||
// Pool identifier, cannot be changed after creating the instance
|
pub identifier: PoolIdentifier,
|
||||||
identifier: PoolIdentifier,
|
pub mode: PoolMode,
|
||||||
|
pub cl_idle: u64,
|
||||||
|
pub cl_active: u64,
|
||||||
|
pub cl_waiting: u64,
|
||||||
|
pub cl_cancel_req: u64,
|
||||||
|
pub sv_active: u64,
|
||||||
|
pub sv_idle: u64,
|
||||||
|
pub sv_used: u64,
|
||||||
|
pub sv_tested: u64,
|
||||||
|
pub sv_login: u64,
|
||||||
|
pub maxwait: u64,
|
||||||
|
}
|
||||||
|
impl PoolStats {
|
||||||
|
pub fn new(identifier: PoolIdentifier, mode: PoolMode) -> Self {
|
||||||
|
PoolStats {
|
||||||
|
identifier,
|
||||||
|
mode,
|
||||||
|
cl_idle: 0,
|
||||||
|
cl_active: 0,
|
||||||
|
cl_waiting: 0,
|
||||||
|
cl_cancel_req: 0,
|
||||||
|
sv_active: 0,
|
||||||
|
sv_idle: 0,
|
||||||
|
sv_used: 0,
|
||||||
|
sv_tested: 0,
|
||||||
|
sv_login: 0,
|
||||||
|
maxwait: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Pool Config, cannot be changed after creating the instance
|
pub fn construct_pool_lookup() -> HashMap<PoolIdentifier, PoolStats> {
|
||||||
config: Pool,
|
let mut map: HashMap<PoolIdentifier, PoolStats> = HashMap::new();
|
||||||
|
let client_map = super::get_client_stats();
|
||||||
|
let server_map = super::get_server_stats();
|
||||||
|
|
||||||
// A reference to the global reporter.
|
for (identifier, pool) in get_all_pools() {
|
||||||
reporter: Reporter,
|
map.insert(
|
||||||
|
identifier.clone(),
|
||||||
|
PoolStats::new(identifier, pool.settings.pool_mode),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
/// Counters (atomics)
|
for client in client_map.values() {
|
||||||
pub cl_idle: Arc<AtomicU64>,
|
match map.get_mut(&PoolIdentifier {
|
||||||
pub cl_active: Arc<AtomicU64>,
|
db: client.pool_name(),
|
||||||
pub cl_waiting: Arc<AtomicU64>,
|
user: client.username(),
|
||||||
pub cl_cancel_req: Arc<AtomicU64>,
|
}) {
|
||||||
pub sv_active: Arc<AtomicU64>,
|
Some(pool_stats) => {
|
||||||
pub sv_idle: Arc<AtomicU64>,
|
match client.state.load(Ordering::Relaxed) {
|
||||||
pub sv_used: Arc<AtomicU64>,
|
ClientState::Active => pool_stats.cl_active += 1,
|
||||||
pub sv_tested: Arc<AtomicU64>,
|
ClientState::Idle => pool_stats.cl_idle += 1,
|
||||||
pub sv_login: Arc<AtomicU64>,
|
ClientState::Waiting => pool_stats.cl_waiting += 1,
|
||||||
pub maxwait: Arc<AtomicU64>,
|
}
|
||||||
|
let wait_start_us = client.wait_start_us.load(Ordering::Relaxed);
|
||||||
|
if wait_start_us > 0 {
|
||||||
|
let wait_time_us = client.get_current_wait_time_us();
|
||||||
|
pool_stats.maxwait = std::cmp::max(pool_stats.maxwait, wait_time_us);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => debug!("Client from an obselete pool"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for server in server_map.values() {
|
||||||
|
match map.get_mut(&PoolIdentifier {
|
||||||
|
db: server.pool_name(),
|
||||||
|
user: server.username(),
|
||||||
|
}) {
|
||||||
|
Some(pool_stats) => match server.state.load(Ordering::Relaxed) {
|
||||||
|
ServerState::Active => pool_stats.sv_active += 1,
|
||||||
|
ServerState::Idle => pool_stats.sv_idle += 1,
|
||||||
|
ServerState::Login => pool_stats.sv_login += 1,
|
||||||
|
ServerState::Tested => pool_stats.sv_tested += 1,
|
||||||
|
},
|
||||||
|
None => debug!("Server from an obselete pool"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
map
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn generate_header() -> Vec<(&'static str, DataType)> {
|
||||||
|
vec![
|
||||||
|
("database", DataType::Text),
|
||||||
|
("user", DataType::Text),
|
||||||
|
("pool_mode", DataType::Text),
|
||||||
|
("cl_idle", DataType::Numeric),
|
||||||
|
("cl_active", DataType::Numeric),
|
||||||
|
("cl_waiting", DataType::Numeric),
|
||||||
|
("cl_cancel_req", DataType::Numeric),
|
||||||
|
("sv_active", DataType::Numeric),
|
||||||
|
("sv_idle", DataType::Numeric),
|
||||||
|
("sv_used", DataType::Numeric),
|
||||||
|
("sv_tested", DataType::Numeric),
|
||||||
|
("sv_login", DataType::Numeric),
|
||||||
|
("maxwait", DataType::Numeric),
|
||||||
|
("maxwait_us", DataType::Numeric),
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn generate_row(&self) -> Vec<String> {
|
||||||
|
vec![
|
||||||
|
self.identifier.db.clone(),
|
||||||
|
self.identifier.user.clone(),
|
||||||
|
self.mode.to_string(),
|
||||||
|
self.cl_idle.to_string(),
|
||||||
|
self.cl_active.to_string(),
|
||||||
|
self.cl_waiting.to_string(),
|
||||||
|
self.cl_cancel_req.to_string(),
|
||||||
|
self.sv_active.to_string(),
|
||||||
|
self.sv_idle.to_string(),
|
||||||
|
self.sv_used.to_string(),
|
||||||
|
self.sv_tested.to_string(),
|
||||||
|
self.sv_login.to_string(),
|
||||||
|
(self.maxwait / 1_000_000).to_string(),
|
||||||
|
(self.maxwait % 1_000_000).to_string(),
|
||||||
|
]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IntoIterator for PoolStats {
|
impl IntoIterator for PoolStats {
|
||||||
@@ -39,236 +137,18 @@ impl IntoIterator for PoolStats {
|
|||||||
|
|
||||||
fn into_iter(self) -> Self::IntoIter {
|
fn into_iter(self) -> Self::IntoIter {
|
||||||
vec![
|
vec![
|
||||||
("cl_idle".to_string(), self.cl_idle.load(Ordering::Relaxed)),
|
("cl_idle".to_string(), self.cl_idle),
|
||||||
(
|
("cl_active".to_string(), self.cl_active),
|
||||||
"cl_active".to_string(),
|
("cl_waiting".to_string(), self.cl_waiting),
|
||||||
self.cl_active.load(Ordering::Relaxed),
|
("cl_cancel_req".to_string(), self.cl_cancel_req),
|
||||||
),
|
("sv_active".to_string(), self.sv_active),
|
||||||
(
|
("sv_idle".to_string(), self.sv_idle),
|
||||||
"cl_waiting".to_string(),
|
("sv_used".to_string(), self.sv_used),
|
||||||
self.cl_waiting.load(Ordering::Relaxed),
|
("sv_tested".to_string(), self.sv_tested),
|
||||||
),
|
("sv_login".to_string(), self.sv_login),
|
||||||
(
|
("maxwait".to_string(), self.maxwait / 1_000_000),
|
||||||
"cl_cancel_req".to_string(),
|
("maxwait_us".to_string(), self.maxwait % 1_000_000),
|
||||||
self.cl_cancel_req.load(Ordering::Relaxed),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"sv_active".to_string(),
|
|
||||||
self.sv_active.load(Ordering::Relaxed),
|
|
||||||
),
|
|
||||||
("sv_idle".to_string(), self.sv_idle.load(Ordering::Relaxed)),
|
|
||||||
("sv_used".to_string(), self.sv_used.load(Ordering::Relaxed)),
|
|
||||||
(
|
|
||||||
"sv_tested".to_string(),
|
|
||||||
self.sv_tested.load(Ordering::Relaxed),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"sv_login".to_string(),
|
|
||||||
self.sv_login.load(Ordering::Relaxed),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"maxwait".to_string(),
|
|
||||||
self.maxwait.load(Ordering::Relaxed) / 1_000_000,
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"maxwait_us".to_string(),
|
|
||||||
self.maxwait.load(Ordering::Relaxed) % 1_000_000,
|
|
||||||
),
|
|
||||||
]
|
]
|
||||||
.into_iter()
|
.into_iter()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PoolStats {
|
|
||||||
pub fn new(identifier: PoolIdentifier, config: Pool) -> Self {
|
|
||||||
Self {
|
|
||||||
identifier,
|
|
||||||
config,
|
|
||||||
reporter: get_reporter(),
|
|
||||||
..Default::default()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Getters
|
|
||||||
pub fn register(&self, stats: Arc<PoolStats>) {
|
|
||||||
self.reporter.pool_register(self.identifier.clone(), stats);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn database(&self) -> String {
|
|
||||||
self.identifier.db.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn user(&self) -> String {
|
|
||||||
self.identifier.user.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn pool_mode(&self) -> PoolMode {
|
|
||||||
self.config.pool_mode
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Populates an array of strings with counters (used by admin in show pools)
|
|
||||||
pub fn populate_row(&self, row: &mut Vec<String>) {
|
|
||||||
for (_key, value) in self.clone() {
|
|
||||||
row.push(value.to_string());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Deletes the maxwait counter, this is done everytime we obtain metrics
|
|
||||||
pub fn clear_maxwait(&self) {
|
|
||||||
self.maxwait.store(0, Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Notified when a server of the pool enters login state.
|
|
||||||
///
|
|
||||||
/// Arguments:
|
|
||||||
///
|
|
||||||
/// `from`: The state of the server that notifies.
|
|
||||||
pub fn server_login(&self, from: ServerState) {
|
|
||||||
self.sv_login.fetch_add(1, Ordering::Relaxed);
|
|
||||||
if from != ServerState::Login {
|
|
||||||
self.decrease_from_server_state(from);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Notified when a server of the pool become 'active'
|
|
||||||
///
|
|
||||||
/// Arguments:
|
|
||||||
///
|
|
||||||
/// `from`: The state of the server that notifies.
|
|
||||||
pub fn server_active(&self, from: ServerState) {
|
|
||||||
self.sv_active.fetch_add(1, Ordering::Relaxed);
|
|
||||||
if from != ServerState::Active {
|
|
||||||
self.decrease_from_server_state(from);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Notified when a server of the pool become 'tested'
|
|
||||||
///
|
|
||||||
/// Arguments:
|
|
||||||
///
|
|
||||||
/// `from`: The state of the server that notifies.
|
|
||||||
pub fn server_tested(&self, from: ServerState) {
|
|
||||||
self.sv_tested.fetch_add(1, Ordering::Relaxed);
|
|
||||||
if from != ServerState::Tested {
|
|
||||||
self.decrease_from_server_state(from);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Notified when a server of the pool become 'idle'
|
|
||||||
///
|
|
||||||
/// Arguments:
|
|
||||||
///
|
|
||||||
/// `from`: The state of the server that notifies.
|
|
||||||
pub fn server_idle(&self, from: ServerState) {
|
|
||||||
self.sv_idle.fetch_add(1, Ordering::Relaxed);
|
|
||||||
if from != ServerState::Idle {
|
|
||||||
self.decrease_from_server_state(from);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Notified when a client of the pool become 'waiting'
|
|
||||||
///
|
|
||||||
/// Arguments:
|
|
||||||
///
|
|
||||||
/// `from`: The state of the client that notifies.
|
|
||||||
pub fn client_waiting(&self, from: ClientState) {
|
|
||||||
if from != ClientState::Waiting {
|
|
||||||
self.cl_waiting.fetch_add(1, Ordering::Relaxed);
|
|
||||||
self.decrease_from_client_state(from);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Notified when a client of the pool become 'active'
|
|
||||||
///
|
|
||||||
/// Arguments:
|
|
||||||
///
|
|
||||||
/// `from`: The state of the client that notifies.
|
|
||||||
pub fn client_active(&self, from: ClientState) {
|
|
||||||
if from != ClientState::Active {
|
|
||||||
self.cl_active.fetch_add(1, Ordering::Relaxed);
|
|
||||||
self.decrease_from_client_state(from);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Notified when a client of the pool become 'idle'
|
|
||||||
///
|
|
||||||
/// Arguments:
|
|
||||||
///
|
|
||||||
/// `from`: The state of the client that notifies.
|
|
||||||
pub fn client_idle(&self, from: ClientState) {
|
|
||||||
if from != ClientState::Idle {
|
|
||||||
self.cl_idle.fetch_add(1, Ordering::Relaxed);
|
|
||||||
self.decrease_from_client_state(from);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Notified when a client disconnects.
|
|
||||||
///
|
|
||||||
/// Arguments:
|
|
||||||
///
|
|
||||||
/// `from`: The state of the client that notifies.
|
|
||||||
pub fn client_disconnect(&self, from: ClientState) {
|
|
||||||
let counter = match from {
|
|
||||||
ClientState::Idle => &self.cl_idle,
|
|
||||||
ClientState::Waiting => &self.cl_waiting,
|
|
||||||
ClientState::Active => &self.cl_active,
|
|
||||||
};
|
|
||||||
|
|
||||||
Self::decrease_counter(counter.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Notified when a server disconnects.
|
|
||||||
///
|
|
||||||
/// Arguments:
|
|
||||||
///
|
|
||||||
/// `from`: The state of the client that notifies.
|
|
||||||
pub fn server_disconnect(&self, from: ServerState) {
|
|
||||||
let counter = match from {
|
|
||||||
ServerState::Active => &self.sv_active,
|
|
||||||
ServerState::Idle => &self.sv_idle,
|
|
||||||
ServerState::Login => &self.sv_login,
|
|
||||||
ServerState::Tested => &self.sv_tested,
|
|
||||||
};
|
|
||||||
Self::decrease_counter(counter.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
// helpers for counter decrease
|
|
||||||
fn decrease_from_server_state(&self, from: ServerState) {
|
|
||||||
let counter = match from {
|
|
||||||
ServerState::Tested => &self.sv_tested,
|
|
||||||
ServerState::Active => &self.sv_active,
|
|
||||||
ServerState::Idle => &self.sv_idle,
|
|
||||||
ServerState::Login => &self.sv_login,
|
|
||||||
};
|
|
||||||
Self::decrease_counter(counter.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
fn decrease_from_client_state(&self, from: ClientState) {
|
|
||||||
let counter = match from {
|
|
||||||
ClientState::Active => &self.cl_active,
|
|
||||||
ClientState::Idle => &self.cl_idle,
|
|
||||||
ClientState::Waiting => &self.cl_waiting,
|
|
||||||
};
|
|
||||||
Self::decrease_counter(counter.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
fn decrease_counter(value: Arc<AtomicU64>) {
|
|
||||||
if value.load(Ordering::Relaxed) > 0 {
|
|
||||||
value.fetch_sub(1, Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod test {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_decrease() {
|
|
||||||
let stat: PoolStats = PoolStats::default();
|
|
||||||
stat.server_login(ServerState::Login);
|
|
||||||
stat.server_idle(ServerState::Login);
|
|
||||||
assert_eq!(stat.sv_login.load(Ordering::Relaxed), 0);
|
|
||||||
assert_eq!(stat.sv_idle.load(Ordering::Relaxed), 1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
use super::AddressStats;
|
use super::AddressStats;
|
||||||
use super::PoolStats;
|
|
||||||
use super::{get_reporter, Reporter};
|
use super::{get_reporter, Reporter};
|
||||||
use crate::config::Address;
|
use crate::config::Address;
|
||||||
use atomic_enum::atomic_enum;
|
use atomic_enum::atomic_enum;
|
||||||
@@ -38,7 +37,6 @@ pub struct ServerStats {
|
|||||||
address: Address,
|
address: Address,
|
||||||
connect_time: Instant,
|
connect_time: Instant,
|
||||||
|
|
||||||
pool_stats: Arc<PoolStats>,
|
|
||||||
reporter: Reporter,
|
reporter: Reporter,
|
||||||
|
|
||||||
/// Data
|
/// Data
|
||||||
@@ -49,6 +47,10 @@ pub struct ServerStats {
|
|||||||
pub transaction_count: Arc<AtomicU64>,
|
pub transaction_count: Arc<AtomicU64>,
|
||||||
pub query_count: Arc<AtomicU64>,
|
pub query_count: Arc<AtomicU64>,
|
||||||
pub error_count: Arc<AtomicU64>,
|
pub error_count: Arc<AtomicU64>,
|
||||||
|
pub prepared_hit_count: Arc<AtomicU64>,
|
||||||
|
pub prepared_miss_count: Arc<AtomicU64>,
|
||||||
|
pub prepared_eviction_count: Arc<AtomicU64>,
|
||||||
|
pub prepared_cache_size: Arc<AtomicU64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for ServerStats {
|
impl Default for ServerStats {
|
||||||
@@ -57,7 +59,6 @@ impl Default for ServerStats {
|
|||||||
server_id: 0,
|
server_id: 0,
|
||||||
application_name: Arc::new(RwLock::new(String::new())),
|
application_name: Arc::new(RwLock::new(String::new())),
|
||||||
address: Address::default(),
|
address: Address::default(),
|
||||||
pool_stats: Arc::new(PoolStats::default()),
|
|
||||||
connect_time: Instant::now(),
|
connect_time: Instant::now(),
|
||||||
state: Arc::new(AtomicServerState::new(ServerState::Login)),
|
state: Arc::new(AtomicServerState::new(ServerState::Login)),
|
||||||
bytes_sent: Arc::new(AtomicU64::new(0)),
|
bytes_sent: Arc::new(AtomicU64::new(0)),
|
||||||
@@ -66,15 +67,18 @@ impl Default for ServerStats {
|
|||||||
query_count: Arc::new(AtomicU64::new(0)),
|
query_count: Arc::new(AtomicU64::new(0)),
|
||||||
error_count: Arc::new(AtomicU64::new(0)),
|
error_count: Arc::new(AtomicU64::new(0)),
|
||||||
reporter: get_reporter(),
|
reporter: get_reporter(),
|
||||||
|
prepared_hit_count: Arc::new(AtomicU64::new(0)),
|
||||||
|
prepared_miss_count: Arc::new(AtomicU64::new(0)),
|
||||||
|
prepared_eviction_count: Arc::new(AtomicU64::new(0)),
|
||||||
|
prepared_cache_size: Arc::new(AtomicU64::new(0)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ServerStats {
|
impl ServerStats {
|
||||||
pub fn new(address: Address, pool_stats: Arc<PoolStats>, connect_time: Instant) -> Self {
|
pub fn new(address: Address, connect_time: Instant) -> Self {
|
||||||
Self {
|
Self {
|
||||||
address,
|
address,
|
||||||
pool_stats,
|
|
||||||
connect_time,
|
connect_time,
|
||||||
server_id: rand::random::<i32>(),
|
server_id: rand::random::<i32>(),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
@@ -96,9 +100,6 @@ impl ServerStats {
|
|||||||
/// Reports a server connection is no longer assigned to a client
|
/// Reports a server connection is no longer assigned to a client
|
||||||
/// and is available for the next client to pick it up
|
/// and is available for the next client to pick it up
|
||||||
pub fn idle(&self) {
|
pub fn idle(&self) {
|
||||||
self.pool_stats
|
|
||||||
.server_idle(self.state.load(Ordering::Relaxed));
|
|
||||||
|
|
||||||
self.state.store(ServerState::Idle, Ordering::Relaxed);
|
self.state.store(ServerState::Idle, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -106,22 +107,16 @@ impl ServerStats {
|
|||||||
/// Also updates metrics on the pool regarding server usage.
|
/// Also updates metrics on the pool regarding server usage.
|
||||||
pub fn disconnect(&self) {
|
pub fn disconnect(&self) {
|
||||||
self.reporter.server_disconnecting(self.server_id);
|
self.reporter.server_disconnecting(self.server_id);
|
||||||
self.pool_stats
|
|
||||||
.server_disconnect(self.state.load(Ordering::Relaxed))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Reports a server connection is being tested before being given to a client.
|
/// Reports a server connection is being tested before being given to a client.
|
||||||
pub fn tested(&self) {
|
pub fn tested(&self) {
|
||||||
self.set_undefined_application();
|
self.set_undefined_application();
|
||||||
self.pool_stats
|
|
||||||
.server_tested(self.state.load(Ordering::Relaxed));
|
|
||||||
self.state.store(ServerState::Tested, Ordering::Relaxed);
|
self.state.store(ServerState::Tested, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Reports a server connection is attempting to login.
|
/// Reports a server connection is attempting to login.
|
||||||
pub fn login(&self) {
|
pub fn login(&self) {
|
||||||
self.pool_stats
|
|
||||||
.server_login(self.state.load(Ordering::Relaxed));
|
|
||||||
self.state.store(ServerState::Login, Ordering::Relaxed);
|
self.state.store(ServerState::Login, Ordering::Relaxed);
|
||||||
self.set_undefined_application();
|
self.set_undefined_application();
|
||||||
}
|
}
|
||||||
@@ -129,8 +124,6 @@ impl ServerStats {
|
|||||||
/// Reports a server connection has been assigned to a client that
|
/// Reports a server connection has been assigned to a client that
|
||||||
/// is about to query the server
|
/// is about to query the server
|
||||||
pub fn active(&self, application_name: String) {
|
pub fn active(&self, application_name: String) {
|
||||||
self.pool_stats
|
|
||||||
.server_active(self.state.load(Ordering::Relaxed));
|
|
||||||
self.state.store(ServerState::Active, Ordering::Relaxed);
|
self.state.store(ServerState::Active, Ordering::Relaxed);
|
||||||
self.set_application(application_name);
|
self.set_application(application_name);
|
||||||
}
|
}
|
||||||
@@ -139,13 +132,24 @@ impl ServerStats {
|
|||||||
self.address.stats.clone()
|
self.address.stats.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn check_address_stat_average_is_updated_status(&self) -> bool {
|
||||||
|
self.address.stats.averages_updated.load(Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_address_stat_average_is_updated_status(&self, is_checked: bool) {
|
||||||
|
self.address
|
||||||
|
.stats
|
||||||
|
.averages_updated
|
||||||
|
.store(is_checked, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
// Helper methods for show_servers
|
// Helper methods for show_servers
|
||||||
pub fn pool_name(&self) -> String {
|
pub fn pool_name(&self) -> String {
|
||||||
self.pool_stats.database()
|
self.address.pool_name.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn username(&self) -> String {
|
pub fn username(&self) -> String {
|
||||||
self.pool_stats.user()
|
self.address.username.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn address_name(&self) -> String {
|
pub fn address_name(&self) -> String {
|
||||||
@@ -166,27 +170,17 @@ impl ServerStats {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn checkout_time(&self, microseconds: u64, application_name: String) {
|
pub fn checkout_time(&self, microseconds: u64, application_name: String) {
|
||||||
// Update server stats and address aggergation stats
|
// Update server stats and address aggregation stats
|
||||||
self.set_application(application_name);
|
self.set_application(application_name);
|
||||||
self.address
|
self.address.stats.wait_time_add(microseconds);
|
||||||
.stats
|
|
||||||
.total_wait_time
|
|
||||||
.fetch_add(microseconds, Ordering::Relaxed);
|
|
||||||
self.pool_stats
|
|
||||||
.maxwait
|
|
||||||
.fetch_max(microseconds, Ordering::Relaxed);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Report a query executed by a client against a server
|
/// Report a query executed by a client against a server
|
||||||
pub fn query(&self, milliseconds: u64, application_name: &str) {
|
pub fn query(&self, milliseconds: u64, application_name: &str) {
|
||||||
self.set_application(application_name.to_string());
|
self.set_application(application_name.to_string());
|
||||||
let address_stats = self.address_stats();
|
self.address.stats.query_count_add();
|
||||||
address_stats
|
self.address.stats.query_time_add(milliseconds);
|
||||||
.total_query_count
|
self.query_count.fetch_add(1, Ordering::Relaxed);
|
||||||
.fetch_add(1, Ordering::Relaxed);
|
|
||||||
address_stats
|
|
||||||
.total_query_time
|
|
||||||
.fetch_add(milliseconds, Ordering::Relaxed);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Report a transaction executed by a client a server
|
/// Report a transaction executed by a client a server
|
||||||
@@ -197,29 +191,39 @@ impl ServerStats {
|
|||||||
self.set_application(application_name.to_string());
|
self.set_application(application_name.to_string());
|
||||||
|
|
||||||
self.transaction_count.fetch_add(1, Ordering::Relaxed);
|
self.transaction_count.fetch_add(1, Ordering::Relaxed);
|
||||||
self.address
|
self.address.stats.xact_count_add();
|
||||||
.stats
|
|
||||||
.total_xact_count
|
|
||||||
.fetch_add(1, Ordering::Relaxed);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Report data sent to a server
|
/// Report data sent to a server
|
||||||
pub fn data_sent(&self, amount_bytes: usize) {
|
pub fn data_sent(&self, amount_bytes: usize) {
|
||||||
self.bytes_sent
|
self.bytes_sent
|
||||||
.fetch_add(amount_bytes as u64, Ordering::Relaxed);
|
.fetch_add(amount_bytes as u64, Ordering::Relaxed);
|
||||||
self.address
|
self.address.stats.bytes_sent_add(amount_bytes as u64);
|
||||||
.stats
|
|
||||||
.total_sent
|
|
||||||
.fetch_add(amount_bytes as u64, Ordering::Relaxed);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Report data received from a server
|
/// Report data received from a server
|
||||||
pub fn data_received(&self, amount_bytes: usize) {
|
pub fn data_received(&self, amount_bytes: usize) {
|
||||||
self.bytes_received
|
self.bytes_received
|
||||||
.fetch_add(amount_bytes as u64, Ordering::Relaxed);
|
.fetch_add(amount_bytes as u64, Ordering::Relaxed);
|
||||||
self.address
|
self.address.stats.bytes_received_add(amount_bytes as u64);
|
||||||
.stats
|
}
|
||||||
.total_received
|
|
||||||
.fetch_add(amount_bytes as u64, Ordering::Relaxed);
|
/// Report a prepared statement that already exists on the server.
|
||||||
|
pub fn prepared_cache_hit(&self) {
|
||||||
|
self.prepared_hit_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Report a prepared statement that does not exist on the server yet.
|
||||||
|
pub fn prepared_cache_miss(&self) {
|
||||||
|
self.prepared_miss_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn prepared_cache_add(&self) {
|
||||||
|
self.prepared_cache_size.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn prepared_cache_remove(&self) {
|
||||||
|
self.prepared_eviction_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
self.prepared_cache_size.fetch_sub(1, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
23
src/tls.rs
23
src/tls.rs
@@ -4,7 +4,12 @@ use rustls_pemfile::{certs, read_one, Item};
|
|||||||
use std::iter;
|
use std::iter;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio_rustls::rustls::{self, Certificate, PrivateKey};
|
use std::time::SystemTime;
|
||||||
|
use tokio_rustls::rustls::{
|
||||||
|
self,
|
||||||
|
client::{ServerCertVerified, ServerCertVerifier},
|
||||||
|
Certificate, PrivateKey, ServerName,
|
||||||
|
};
|
||||||
use tokio_rustls::TlsAcceptor;
|
use tokio_rustls::TlsAcceptor;
|
||||||
|
|
||||||
use crate::config::get_config;
|
use crate::config::get_config;
|
||||||
@@ -64,3 +69,19 @@ impl Tls {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub struct NoCertificateVerification;
|
||||||
|
|
||||||
|
impl ServerCertVerifier for NoCertificateVerification {
|
||||||
|
fn verify_server_cert(
|
||||||
|
&self,
|
||||||
|
_end_entity: &Certificate,
|
||||||
|
_intermediates: &[Certificate],
|
||||||
|
_server_name: &ServerName,
|
||||||
|
_scts: &mut dyn Iterator<Item = &[u8]>,
|
||||||
|
_ocsp_response: &[u8],
|
||||||
|
_now: SystemTime,
|
||||||
|
) -> Result<ServerCertVerified, rustls::Error> {
|
||||||
|
Ok(ServerCertVerified::assertion())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
34
start_test_env.sh
Executable file
34
start_test_env.sh
Executable file
@@ -0,0 +1,34 @@
|
|||||||
|
GREEN="\033[0;32m"
|
||||||
|
RED="\033[0;31m"
|
||||||
|
BLUE="\033[0;34m"
|
||||||
|
RESET="\033[0m"
|
||||||
|
|
||||||
|
|
||||||
|
cd tests/docker/
|
||||||
|
docker compose kill main || true
|
||||||
|
docker compose build main
|
||||||
|
docker compose down
|
||||||
|
docker compose up -d
|
||||||
|
# wait for the container to start
|
||||||
|
while ! docker compose exec main ls; do
|
||||||
|
echo "Waiting for test environment to start"
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
echo "==================================="
|
||||||
|
docker compose exec -e LOG_LEVEL=error -d main toxiproxy-server
|
||||||
|
docker compose exec --workdir /app main cargo build
|
||||||
|
docker compose exec -d --workdir /app main ./target/debug/pgcat ./.circleci/pgcat.toml
|
||||||
|
docker compose exec --workdir /app/tests/ruby main bundle install
|
||||||
|
docker compose exec --workdir /app/tests/python main pip3 install -r requirements.txt
|
||||||
|
echo "Interactive test environment ready"
|
||||||
|
echo "To run integration tests, you can use the following commands:"
|
||||||
|
echo -e " ${BLUE}Ruby: ${RED}cd /app/tests/ruby && bundle exec ruby tests.rb --format documentation${RESET}"
|
||||||
|
echo -e " ${BLUE}Python: ${RED}cd /app/ && pytest ${RESET}"
|
||||||
|
echo -e " ${BLUE}Rust: ${RED}cd /app/tests/rust && cargo run ${RESET}"
|
||||||
|
echo -e " ${BLUE}Go: ${RED}cd /app/tests/go && /usr/local/go/bin/go test${RESET}"
|
||||||
|
echo "the source code for tests are directly linked to the source code in the container so you can modify the code and run the tests again"
|
||||||
|
echo "You can rebuild PgCat from within the container by running"
|
||||||
|
echo -e " ${GREEN}cargo build${RESET}"
|
||||||
|
echo "and then run the tests again"
|
||||||
|
echo "==================================="
|
||||||
|
docker compose exec --workdir /app/tests main bash
|
||||||
@@ -1,8 +1,13 @@
|
|||||||
FROM rust:bullseye
|
FROM rust:bullseye
|
||||||
|
|
||||||
|
COPY --from=sclevine/yj /bin/yj /bin/yj
|
||||||
|
RUN /bin/yj -h
|
||||||
RUN apt-get update && apt-get install llvm-11 psmisc postgresql-contrib postgresql-client ruby ruby-dev libpq-dev python3 python3-pip lcov curl sudo iproute2 -y
|
RUN apt-get update && apt-get install llvm-11 psmisc postgresql-contrib postgresql-client ruby ruby-dev libpq-dev python3 python3-pip lcov curl sudo iproute2 -y
|
||||||
RUN cargo install cargo-binutils rustfilt
|
RUN cargo install cargo-binutils rustfilt
|
||||||
RUN rustup component add llvm-tools-preview
|
RUN rustup component add llvm-tools-preview
|
||||||
RUN sudo gem install bundler
|
RUN sudo gem install bundler
|
||||||
RUN wget -O toxiproxy-2.4.0.deb https://github.com/Shopify/toxiproxy/releases/download/v2.4.0/toxiproxy_2.4.0_linux_$(dpkg --print-architecture).deb && \
|
RUN wget -O toxiproxy-2.4.0.deb https://github.com/Shopify/toxiproxy/releases/download/v2.4.0/toxiproxy_2.4.0_linux_$(dpkg --print-architecture).deb && \
|
||||||
sudo dpkg -i toxiproxy-2.4.0.deb
|
sudo dpkg -i toxiproxy-2.4.0.deb
|
||||||
|
RUN wget -O go1.21.3.linux-$(dpkg --print-architecture).tar.gz https://go.dev/dl/go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \
|
||||||
|
sudo tar -C /usr/local -xzf go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \
|
||||||
|
rm go1.21.3.linux-$(dpkg --print-architecture).tar.gz
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
version: "3"
|
|
||||||
services:
|
services:
|
||||||
pg1:
|
pg1:
|
||||||
image: postgres:14
|
image: postgres:14
|
||||||
@@ -48,6 +47,8 @@ services:
|
|||||||
main:
|
main:
|
||||||
build: .
|
build: .
|
||||||
command: ["bash", "/app/tests/docker/run.sh"]
|
command: ["bash", "/app/tests/docker/run.sh"]
|
||||||
|
environment:
|
||||||
|
- INTERACTIVE_TEST_ENVIRONMENT=true
|
||||||
volumes:
|
volumes:
|
||||||
- ../../:/app/
|
- ../../:/app/
|
||||||
- /app/target/
|
- /app/target/
|
||||||
|
|||||||
@@ -5,6 +5,38 @@ rm /app/*.profraw || true
|
|||||||
rm /app/pgcat.profdata || true
|
rm /app/pgcat.profdata || true
|
||||||
rm -rf /app/cov || true
|
rm -rf /app/cov || true
|
||||||
|
|
||||||
|
# Prepares the interactive test environment
|
||||||
|
#
|
||||||
|
if [ -n "$INTERACTIVE_TEST_ENVIRONMENT" ]; then
|
||||||
|
ports=(5432 7432 8432 9432 10432)
|
||||||
|
for port in "${ports[@]}"; do
|
||||||
|
is_it_up=0
|
||||||
|
attempts=0
|
||||||
|
while [ $is_it_up -eq 0 ]; do
|
||||||
|
PGPASSWORD=postgres psql -h 127.0.0.1 -p $port -U postgres -c '\q' > /dev/null 2>&1
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
echo "PostgreSQL on port $port is up."
|
||||||
|
is_it_up=1
|
||||||
|
else
|
||||||
|
attempts=$((attempts+1))
|
||||||
|
if [ $attempts -gt 10 ]; then
|
||||||
|
echo "PostgreSQL on port $port is down, giving up."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "PostgreSQL on port $port is down, waiting for it to start."
|
||||||
|
sleep 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
done
|
||||||
|
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 5432 -U postgres -f /app/tests/sharding/query_routing_setup.sql
|
||||||
|
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 7432 -U postgres -f /app/tests/sharding/query_routing_setup.sql
|
||||||
|
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 8432 -U postgres -f /app/tests/sharding/query_routing_setup.sql
|
||||||
|
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 9432 -U postgres -f /app/tests/sharding/query_routing_setup.sql
|
||||||
|
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 10432 -U postgres -f /app/tests/sharding/query_routing_setup.sql
|
||||||
|
sleep 100000000000000000
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
export LLVM_PROFILE_FILE="/app/pgcat-%m-%p.profraw"
|
export LLVM_PROFILE_FILE="/app/pgcat-%m-%p.profraw"
|
||||||
export RUSTC_BOOTSTRAP=1
|
export RUSTC_BOOTSTRAP=1
|
||||||
export CARGO_INCREMENTAL=0
|
export CARGO_INCREMENTAL=0
|
||||||
|
|||||||
5
tests/go/go.mod
Normal file
5
tests/go/go.mod
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
module pgcat
|
||||||
|
|
||||||
|
go 1.21
|
||||||
|
|
||||||
|
require github.com/lib/pq v1.10.9
|
||||||
2
tests/go/go.sum
Normal file
2
tests/go/go.sum
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||||
|
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||||
162
tests/go/pgcat.toml
Normal file
162
tests/go/pgcat.toml
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
#
|
||||||
|
# PgCat config example.
|
||||||
|
#
|
||||||
|
|
||||||
|
#
|
||||||
|
# General pooler settings
|
||||||
|
[general]
|
||||||
|
# What IP to run on, 0.0.0.0 means accessible from everywhere.
|
||||||
|
host = "0.0.0.0"
|
||||||
|
|
||||||
|
# Port to run on, same as PgBouncer used in this example.
|
||||||
|
port = "${PORT}"
|
||||||
|
|
||||||
|
# Whether to enable prometheus exporter or not.
|
||||||
|
enable_prometheus_exporter = true
|
||||||
|
|
||||||
|
# Port at which prometheus exporter listens on.
|
||||||
|
prometheus_exporter_port = 9930
|
||||||
|
|
||||||
|
# How long to wait before aborting a server connection (ms).
|
||||||
|
connect_timeout = 1000
|
||||||
|
|
||||||
|
# How much time to give the health check query to return with a result (ms).
|
||||||
|
healthcheck_timeout = 1000
|
||||||
|
|
||||||
|
# How long to keep connection available for immediate re-use, without running a healthcheck query on it
|
||||||
|
healthcheck_delay = 30000
|
||||||
|
|
||||||
|
# How much time to give clients during shutdown before forcibly killing client connections (ms).
|
||||||
|
shutdown_timeout = 5000
|
||||||
|
|
||||||
|
# For how long to ban a server if it fails a health check (seconds).
|
||||||
|
ban_time = 60 # Seconds
|
||||||
|
|
||||||
|
# If we should log client connections
|
||||||
|
log_client_connections = false
|
||||||
|
|
||||||
|
# If we should log client disconnections
|
||||||
|
log_client_disconnections = false
|
||||||
|
|
||||||
|
# Reload config automatically if it changes.
|
||||||
|
autoreload = 15000
|
||||||
|
|
||||||
|
server_round_robin = false
|
||||||
|
|
||||||
|
# TLS
|
||||||
|
tls_certificate = "../../.circleci/server.cert"
|
||||||
|
tls_private_key = "../../.circleci/server.key"
|
||||||
|
|
||||||
|
# Credentials to access the virtual administrative database (pgbouncer or pgcat)
|
||||||
|
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
||||||
|
admin_username = "admin_user"
|
||||||
|
admin_password = "admin_pass"
|
||||||
|
|
||||||
|
# pool
|
||||||
|
# configs are structured as pool.<pool_name>
|
||||||
|
# the pool_name is what clients use as database name when connecting
|
||||||
|
# For the example below a client can connect using "postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded_db"
|
||||||
|
[pools.sharded_db]
|
||||||
|
# Pool mode (see PgBouncer docs for more).
|
||||||
|
# session: one server connection per connected client
|
||||||
|
# transaction: one server connection per client transaction
|
||||||
|
pool_mode = "transaction"
|
||||||
|
|
||||||
|
# If the client doesn't specify, route traffic to
|
||||||
|
# this role by default.
|
||||||
|
#
|
||||||
|
# any: round-robin between primary and replicas,
|
||||||
|
# replica: round-robin between replicas only without touching the primary,
|
||||||
|
# primary: all queries go to the primary unless otherwise specified.
|
||||||
|
default_role = "any"
|
||||||
|
|
||||||
|
# Query parser. If enabled, we'll attempt to parse
|
||||||
|
# every incoming query to determine if it's a read or a write.
|
||||||
|
# If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||||
|
# we'll direct it to the primary.
|
||||||
|
query_parser_enabled = true
|
||||||
|
|
||||||
|
# If the query parser is enabled and this setting is enabled, we'll attempt to
|
||||||
|
# infer the role from the query itself.
|
||||||
|
query_parser_read_write_splitting = true
|
||||||
|
|
||||||
|
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||||
|
# load balancing of read queries. Otherwise, the primary will only be used for write
|
||||||
|
# queries. The primary can always be explicitely selected with our custom protocol.
|
||||||
|
primary_reads_enabled = true
|
||||||
|
|
||||||
|
# So what if you wanted to implement a different hashing function,
|
||||||
|
# or you've already built one and you want this pooler to use it?
|
||||||
|
#
|
||||||
|
# Current options:
|
||||||
|
#
|
||||||
|
# pg_bigint_hash: PARTITION BY HASH (Postgres hashing function)
|
||||||
|
# sha1: A hashing function based on SHA1
|
||||||
|
#
|
||||||
|
sharding_function = "pg_bigint_hash"
|
||||||
|
|
||||||
|
# Prepared statements cache size.
|
||||||
|
prepared_statements_cache_size = 500
|
||||||
|
|
||||||
|
# Credentials for users that may connect to this cluster
|
||||||
|
[pools.sharded_db.users.0]
|
||||||
|
username = "sharding_user"
|
||||||
|
password = "sharding_user"
|
||||||
|
# Maximum number of server connections that can be established for this user
|
||||||
|
# The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||||
|
# is the sum of pool_size across all users.
|
||||||
|
pool_size = 5
|
||||||
|
statement_timeout = 0
|
||||||
|
|
||||||
|
|
||||||
|
[pools.sharded_db.users.1]
|
||||||
|
username = "other_user"
|
||||||
|
password = "other_user"
|
||||||
|
pool_size = 21
|
||||||
|
statement_timeout = 30000
|
||||||
|
|
||||||
|
# Shard 0
|
||||||
|
[pools.sharded_db.shards.0]
|
||||||
|
# [ host, port, role ]
|
||||||
|
servers = [
|
||||||
|
[ "127.0.0.1", 5432, "primary" ],
|
||||||
|
[ "localhost", 5432, "replica" ]
|
||||||
|
]
|
||||||
|
# Database name (e.g. "postgres")
|
||||||
|
database = "shard0"
|
||||||
|
|
||||||
|
[pools.sharded_db.shards.1]
|
||||||
|
servers = [
|
||||||
|
[ "127.0.0.1", 5432, "primary" ],
|
||||||
|
[ "localhost", 5432, "replica" ],
|
||||||
|
]
|
||||||
|
database = "shard1"
|
||||||
|
|
||||||
|
[pools.sharded_db.shards.2]
|
||||||
|
servers = [
|
||||||
|
[ "127.0.0.1", 5432, "primary" ],
|
||||||
|
[ "localhost", 5432, "replica" ],
|
||||||
|
]
|
||||||
|
database = "shard2"
|
||||||
|
|
||||||
|
|
||||||
|
[pools.simple_db]
|
||||||
|
pool_mode = "session"
|
||||||
|
default_role = "primary"
|
||||||
|
query_parser_enabled = true
|
||||||
|
query_parser_read_write_splitting = true
|
||||||
|
primary_reads_enabled = true
|
||||||
|
sharding_function = "pg_bigint_hash"
|
||||||
|
|
||||||
|
[pools.simple_db.users.0]
|
||||||
|
username = "simple_user"
|
||||||
|
password = "simple_user"
|
||||||
|
pool_size = 5
|
||||||
|
statement_timeout = 30000
|
||||||
|
|
||||||
|
[pools.simple_db.shards.0]
|
||||||
|
servers = [
|
||||||
|
[ "127.0.0.1", 5432, "primary" ],
|
||||||
|
[ "localhost", 5432, "replica" ]
|
||||||
|
]
|
||||||
|
database = "some_db"
|
||||||
52
tests/go/prepared_test.go
Normal file
52
tests/go/prepared_test.go
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
package pgcat
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
_ "github.com/lib/pq"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test(t *testing.T) {
|
||||||
|
t.Cleanup(setup(t))
|
||||||
|
t.Run("Named parameterized prepared statement works", namedParameterizedPreparedStatement)
|
||||||
|
t.Run("Unnamed parameterized prepared statement works", unnamedParameterizedPreparedStatement)
|
||||||
|
}
|
||||||
|
|
||||||
|
func namedParameterizedPreparedStatement(t *testing.T) {
|
||||||
|
db, err := sql.Open("postgres", fmt.Sprintf("host=localhost port=%d database=sharded_db user=sharding_user password=sharding_user sslmode=disable", port))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not open connection: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stmt, err := db.Prepare("SELECT $1")
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not prepare: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
rows, err := stmt.Query(1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not query: %+v", err)
|
||||||
|
}
|
||||||
|
_ = rows.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func unnamedParameterizedPreparedStatement(t *testing.T) {
|
||||||
|
db, err := sql.Open("postgres", fmt.Sprintf("host=localhost port=%d database=sharded_db user=sharding_user password=sharding_user sslmode=disable", port))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not open connection: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
// Under the hood QueryContext generates an unnamed parameterized prepared statement
|
||||||
|
rows, err := db.QueryContext(context.Background(), "SELECT $1", 1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not query: %+v", err)
|
||||||
|
}
|
||||||
|
_ = rows.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
81
tests/go/setup.go
Normal file
81
tests/go/setup.go
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
package pgcat
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
_ "embed"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:embed pgcat.toml
|
||||||
|
var pgcatCfg string
|
||||||
|
|
||||||
|
var port = rand.Intn(32760-20000) + 20000
|
||||||
|
|
||||||
|
func setup(t *testing.T) func() {
|
||||||
|
cfg, err := os.CreateTemp("/tmp", "pgcat_cfg_*.toml")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not create temp file: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pgcatCfg = strings.Replace(pgcatCfg, "\"${PORT}\"", fmt.Sprintf("%d", port), 1)
|
||||||
|
|
||||||
|
_, err = cfg.Write([]byte(pgcatCfg))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not write temp file: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
commandPath := "../../target/debug/pgcat"
|
||||||
|
if os.Getenv("CARGO_TARGET_DIR") != "" {
|
||||||
|
commandPath = os.Getenv("CARGO_TARGET_DIR") + "/debug/pgcat"
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.Command(commandPath, cfg.Name())
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
go func() {
|
||||||
|
err = cmd.Run()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("could not run pgcat: %+v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
deadline, cancelFunc := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second))
|
||||||
|
defer cancelFunc()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-deadline.Done():
|
||||||
|
break
|
||||||
|
case <-time.After(50 * time.Millisecond):
|
||||||
|
db, err := sql.Open("postgres", fmt.Sprintf("host=localhost port=%d database=pgcat user=admin_user password=admin_pass sslmode=disable", port))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
rows, err := db.QueryContext(deadline, "SHOW STATS")
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
_ = rows.Close()
|
||||||
|
_ = db.Close()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() {
|
||||||
|
err := cmd.Process.Signal(os.Interrupt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not interrupt pgcat: %+v", err)
|
||||||
|
}
|
||||||
|
err = os.Remove(cfg.Name())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not remove temp file: %+v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -36,4 +36,4 @@ SELECT abalance FROM pgbench_accounts WHERE aid = :aid;
|
|||||||
SET SERVER ROLE TO 'replica';
|
SET SERVER ROLE TO 'replica';
|
||||||
|
|
||||||
-- Read load balancing
|
-- Read load balancing
|
||||||
SELECT abalance FROM pgbench_accounts WHERE aid = :aid;
|
SELECT abalance FROM pgbench_accounts WHERE aid = :aid;
|
||||||
@@ -1,60 +0,0 @@
|
|||||||
import psycopg2
|
|
||||||
import asyncio
|
|
||||||
import asyncpg
|
|
||||||
|
|
||||||
PGCAT_HOST = "127.0.0.1"
|
|
||||||
PGCAT_PORT = "6432"
|
|
||||||
|
|
||||||
|
|
||||||
def regular_main():
|
|
||||||
# Connect to the PostgreSQL database
|
|
||||||
conn = psycopg2.connect(
|
|
||||||
host=PGCAT_HOST,
|
|
||||||
database="sharded_db",
|
|
||||||
user="sharding_user",
|
|
||||||
password="sharding_user",
|
|
||||||
port=PGCAT_PORT,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Open a cursor to perform database operations
|
|
||||||
cur = conn.cursor()
|
|
||||||
|
|
||||||
# Execute a SQL query
|
|
||||||
cur.execute("SELECT 1")
|
|
||||||
|
|
||||||
# Fetch the results
|
|
||||||
rows = cur.fetchall()
|
|
||||||
|
|
||||||
# Print the results
|
|
||||||
for row in rows:
|
|
||||||
print(row[0])
|
|
||||||
|
|
||||||
# Close the cursor and the database connection
|
|
||||||
cur.close()
|
|
||||||
conn.close()
|
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
# Connect to the PostgreSQL database
|
|
||||||
conn = await asyncpg.connect(
|
|
||||||
host=PGCAT_HOST,
|
|
||||||
database="sharded_db",
|
|
||||||
user="sharding_user",
|
|
||||||
password="sharding_user",
|
|
||||||
port=PGCAT_PORT,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Execute a SQL query
|
|
||||||
for _ in range(25):
|
|
||||||
rows = await conn.fetch("SELECT 1")
|
|
||||||
|
|
||||||
# Print the results
|
|
||||||
for row in rows:
|
|
||||||
print(row[0])
|
|
||||||
|
|
||||||
# Close the database connection
|
|
||||||
await conn.close()
|
|
||||||
|
|
||||||
|
|
||||||
regular_main()
|
|
||||||
asyncio.run(main())
|
|
||||||
@@ -1,11 +1,3 @@
|
|||||||
asyncio==3.4.3
|
pytest
|
||||||
asyncpg==0.27.0
|
|
||||||
black==23.3.0
|
|
||||||
click==8.1.3
|
|
||||||
mypy-extensions==1.0.0
|
|
||||||
packaging==23.1
|
|
||||||
pathspec==0.11.1
|
|
||||||
platformdirs==3.2.0
|
|
||||||
psutil==5.9.1
|
|
||||||
psycopg2==2.9.3
|
psycopg2==2.9.3
|
||||||
tomli==2.0.1
|
psutil==5.9.1
|
||||||
|
|||||||
71
tests/python/test_auth.py
Normal file
71
tests/python/test_auth.py
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
import utils
|
||||||
|
import signal
|
||||||
|
|
||||||
|
class TestTrustAuth:
|
||||||
|
@classmethod
|
||||||
|
def setup_method(cls):
|
||||||
|
config= """
|
||||||
|
[general]
|
||||||
|
host = "0.0.0.0"
|
||||||
|
port = 6432
|
||||||
|
admin_username = "admin_user"
|
||||||
|
admin_password = ""
|
||||||
|
admin_auth_type = "trust"
|
||||||
|
|
||||||
|
[pools.sharded_db.users.0]
|
||||||
|
username = "sharding_user"
|
||||||
|
password = "sharding_user"
|
||||||
|
auth_type = "trust"
|
||||||
|
pool_size = 10
|
||||||
|
min_pool_size = 1
|
||||||
|
pool_mode = "transaction"
|
||||||
|
|
||||||
|
[pools.sharded_db.shards.0]
|
||||||
|
servers = [
|
||||||
|
[ "127.0.0.1", 5432, "primary" ],
|
||||||
|
]
|
||||||
|
database = "shard0"
|
||||||
|
"""
|
||||||
|
utils.pgcat_generic_start(config)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def teardown_method(self):
|
||||||
|
utils.pg_cat_send_signal(signal.SIGTERM)
|
||||||
|
|
||||||
|
def test_admin_trust_auth(self):
|
||||||
|
conn, cur = utils.connect_db_trust(admin=True)
|
||||||
|
cur.execute("SHOW POOLS")
|
||||||
|
res = cur.fetchall()
|
||||||
|
print(res)
|
||||||
|
utils.cleanup_conn(conn, cur)
|
||||||
|
|
||||||
|
def test_normal_trust_auth(self):
|
||||||
|
conn, cur = utils.connect_db_trust(autocommit=False)
|
||||||
|
cur.execute("SELECT 1")
|
||||||
|
res = cur.fetchall()
|
||||||
|
print(res)
|
||||||
|
utils.cleanup_conn(conn, cur)
|
||||||
|
|
||||||
|
class TestMD5Auth:
|
||||||
|
@classmethod
|
||||||
|
def setup_method(cls):
|
||||||
|
utils.pgcat_start()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def teardown_method(self):
|
||||||
|
utils.pg_cat_send_signal(signal.SIGTERM)
|
||||||
|
|
||||||
|
def test_normal_db_access(self):
|
||||||
|
conn, cur = utils.connect_db(autocommit=False)
|
||||||
|
cur.execute("SELECT 1")
|
||||||
|
res = cur.fetchall()
|
||||||
|
print(res)
|
||||||
|
utils.cleanup_conn(conn, cur)
|
||||||
|
|
||||||
|
def test_admin_db_access(self):
|
||||||
|
conn, cur = utils.connect_db(admin=True)
|
||||||
|
|
||||||
|
cur.execute("SHOW POOLS")
|
||||||
|
res = cur.fetchall()
|
||||||
|
print(res)
|
||||||
|
utils.cleanup_conn(conn, cur)
|
||||||
@@ -1,83 +1,12 @@
|
|||||||
from typing import Tuple
|
|
||||||
import psycopg2
|
|
||||||
import psutil
|
|
||||||
import os
|
|
||||||
import signal
|
import signal
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
import psycopg2
|
||||||
|
import utils
|
||||||
|
|
||||||
SHUTDOWN_TIMEOUT = 5
|
SHUTDOWN_TIMEOUT = 5
|
||||||
|
|
||||||
PGCAT_HOST = "127.0.0.1"
|
|
||||||
PGCAT_PORT = "6432"
|
|
||||||
|
|
||||||
|
|
||||||
def pgcat_start():
|
|
||||||
pg_cat_send_signal(signal.SIGTERM)
|
|
||||||
os.system("./target/debug/pgcat .circleci/pgcat.toml &")
|
|
||||||
time.sleep(2)
|
|
||||||
|
|
||||||
|
|
||||||
def pg_cat_send_signal(signal: signal.Signals):
|
|
||||||
try:
|
|
||||||
for proc in psutil.process_iter(["pid", "name"]):
|
|
||||||
if "pgcat" == proc.name():
|
|
||||||
os.kill(proc.pid, signal)
|
|
||||||
except Exception as e:
|
|
||||||
# The process can be gone when we send this signal
|
|
||||||
print(e)
|
|
||||||
|
|
||||||
if signal == signal.SIGTERM:
|
|
||||||
# Returns 0 if pgcat process exists
|
|
||||||
time.sleep(2)
|
|
||||||
if not os.system('pgrep pgcat'):
|
|
||||||
raise Exception("pgcat not closed after SIGTERM")
|
|
||||||
|
|
||||||
|
|
||||||
def connect_db(
|
|
||||||
autocommit: bool = True,
|
|
||||||
admin: bool = False,
|
|
||||||
) -> Tuple[psycopg2.extensions.connection, psycopg2.extensions.cursor]:
|
|
||||||
|
|
||||||
if admin:
|
|
||||||
user = "admin_user"
|
|
||||||
password = "admin_pass"
|
|
||||||
db = "pgcat"
|
|
||||||
else:
|
|
||||||
user = "sharding_user"
|
|
||||||
password = "sharding_user"
|
|
||||||
db = "sharded_db"
|
|
||||||
|
|
||||||
conn = psycopg2.connect(
|
|
||||||
f"postgres://{user}:{password}@{PGCAT_HOST}:{PGCAT_PORT}/{db}?application_name=testing_pgcat",
|
|
||||||
connect_timeout=2,
|
|
||||||
)
|
|
||||||
conn.autocommit = autocommit
|
|
||||||
cur = conn.cursor()
|
|
||||||
|
|
||||||
return (conn, cur)
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup_conn(conn: psycopg2.extensions.connection, cur: psycopg2.extensions.cursor):
|
|
||||||
cur.close()
|
|
||||||
conn.close()
|
|
||||||
|
|
||||||
|
|
||||||
def test_normal_db_access():
|
|
||||||
conn, cur = connect_db(autocommit=False)
|
|
||||||
cur.execute("SELECT 1")
|
|
||||||
res = cur.fetchall()
|
|
||||||
print(res)
|
|
||||||
cleanup_conn(conn, cur)
|
|
||||||
|
|
||||||
|
|
||||||
def test_admin_db_access():
|
|
||||||
conn, cur = connect_db(admin=True)
|
|
||||||
|
|
||||||
cur.execute("SHOW POOLS")
|
|
||||||
res = cur.fetchall()
|
|
||||||
print(res)
|
|
||||||
cleanup_conn(conn, cur)
|
|
||||||
|
|
||||||
|
|
||||||
def test_shutdown_logic():
|
def test_shutdown_logic():
|
||||||
|
|
||||||
@@ -85,17 +14,17 @@ def test_shutdown_logic():
|
|||||||
# NO ACTIVE QUERIES SIGINT HANDLING
|
# NO ACTIVE QUERIES SIGINT HANDLING
|
||||||
|
|
||||||
# Start pgcat
|
# Start pgcat
|
||||||
pgcat_start()
|
utils.pgcat_start()
|
||||||
|
|
||||||
# Create client connection and send query (not in transaction)
|
# Create client connection and send query (not in transaction)
|
||||||
conn, cur = connect_db()
|
conn, cur = utils.connect_db()
|
||||||
|
|
||||||
cur.execute("BEGIN;")
|
cur.execute("BEGIN;")
|
||||||
cur.execute("SELECT 1;")
|
cur.execute("SELECT 1;")
|
||||||
cur.execute("COMMIT;")
|
cur.execute("COMMIT;")
|
||||||
|
|
||||||
# Send sigint to pgcat
|
# Send sigint to pgcat
|
||||||
pg_cat_send_signal(signal.SIGINT)
|
utils.pg_cat_send_signal(signal.SIGINT)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
# Check that any new queries fail after sigint since server should close with no active transactions
|
# Check that any new queries fail after sigint since server should close with no active transactions
|
||||||
@@ -107,18 +36,18 @@ def test_shutdown_logic():
|
|||||||
# Fail if query execution succeeded
|
# Fail if query execution succeeded
|
||||||
raise Exception("Server not closed after sigint")
|
raise Exception("Server not closed after sigint")
|
||||||
|
|
||||||
cleanup_conn(conn, cur)
|
utils.cleanup_conn(conn, cur)
|
||||||
pg_cat_send_signal(signal.SIGTERM)
|
utils.pg_cat_send_signal(signal.SIGTERM)
|
||||||
|
|
||||||
# - - - - - - - - - - - - - - - - - -
|
# - - - - - - - - - - - - - - - - - -
|
||||||
# NO ACTIVE QUERIES ADMIN SHUTDOWN COMMAND
|
# NO ACTIVE QUERIES ADMIN SHUTDOWN COMMAND
|
||||||
|
|
||||||
# Start pgcat
|
# Start pgcat
|
||||||
pgcat_start()
|
utils.pgcat_start()
|
||||||
|
|
||||||
# Create client connection and begin transaction
|
# Create client connection and begin transaction
|
||||||
conn, cur = connect_db()
|
conn, cur = utils.connect_db()
|
||||||
admin_conn, admin_cur = connect_db(admin=True)
|
admin_conn, admin_cur = utils.connect_db(admin=True)
|
||||||
|
|
||||||
cur.execute("BEGIN;")
|
cur.execute("BEGIN;")
|
||||||
cur.execute("SELECT 1;")
|
cur.execute("SELECT 1;")
|
||||||
@@ -137,24 +66,24 @@ def test_shutdown_logic():
|
|||||||
# Fail if query execution succeeded
|
# Fail if query execution succeeded
|
||||||
raise Exception("Server not closed after sigint")
|
raise Exception("Server not closed after sigint")
|
||||||
|
|
||||||
cleanup_conn(conn, cur)
|
utils.cleanup_conn(conn, cur)
|
||||||
cleanup_conn(admin_conn, admin_cur)
|
utils.cleanup_conn(admin_conn, admin_cur)
|
||||||
pg_cat_send_signal(signal.SIGTERM)
|
utils.pg_cat_send_signal(signal.SIGTERM)
|
||||||
|
|
||||||
# - - - - - - - - - - - - - - - - - -
|
# - - - - - - - - - - - - - - - - - -
|
||||||
# HANDLE TRANSACTION WITH SIGINT
|
# HANDLE TRANSACTION WITH SIGINT
|
||||||
|
|
||||||
# Start pgcat
|
# Start pgcat
|
||||||
pgcat_start()
|
utils.pgcat_start()
|
||||||
|
|
||||||
# Create client connection and begin transaction
|
# Create client connection and begin transaction
|
||||||
conn, cur = connect_db()
|
conn, cur = utils.connect_db()
|
||||||
|
|
||||||
cur.execute("BEGIN;")
|
cur.execute("BEGIN;")
|
||||||
cur.execute("SELECT 1;")
|
cur.execute("SELECT 1;")
|
||||||
|
|
||||||
# Send sigint to pgcat while still in transaction
|
# Send sigint to pgcat while still in transaction
|
||||||
pg_cat_send_signal(signal.SIGINT)
|
utils.pg_cat_send_signal(signal.SIGINT)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
# Check that any new queries succeed after sigint since server should still allow transaction to complete
|
# Check that any new queries succeed after sigint since server should still allow transaction to complete
|
||||||
@@ -164,18 +93,18 @@ def test_shutdown_logic():
|
|||||||
# Fail if query fails since server closed
|
# Fail if query fails since server closed
|
||||||
raise Exception("Server closed while in transaction", e.pgerror)
|
raise Exception("Server closed while in transaction", e.pgerror)
|
||||||
|
|
||||||
cleanup_conn(conn, cur)
|
utils.cleanup_conn(conn, cur)
|
||||||
pg_cat_send_signal(signal.SIGTERM)
|
utils.pg_cat_send_signal(signal.SIGTERM)
|
||||||
|
|
||||||
# - - - - - - - - - - - - - - - - - -
|
# - - - - - - - - - - - - - - - - - -
|
||||||
# HANDLE TRANSACTION WITH ADMIN SHUTDOWN COMMAND
|
# HANDLE TRANSACTION WITH ADMIN SHUTDOWN COMMAND
|
||||||
|
|
||||||
# Start pgcat
|
# Start pgcat
|
||||||
pgcat_start()
|
utils.pgcat_start()
|
||||||
|
|
||||||
# Create client connection and begin transaction
|
# Create client connection and begin transaction
|
||||||
conn, cur = connect_db()
|
conn, cur = utils.connect_db()
|
||||||
admin_conn, admin_cur = connect_db(admin=True)
|
admin_conn, admin_cur = utils.connect_db(admin=True)
|
||||||
|
|
||||||
cur.execute("BEGIN;")
|
cur.execute("BEGIN;")
|
||||||
cur.execute("SELECT 1;")
|
cur.execute("SELECT 1;")
|
||||||
@@ -193,30 +122,30 @@ def test_shutdown_logic():
|
|||||||
# Fail if query fails since server closed
|
# Fail if query fails since server closed
|
||||||
raise Exception("Server closed while in transaction", e.pgerror)
|
raise Exception("Server closed while in transaction", e.pgerror)
|
||||||
|
|
||||||
cleanup_conn(conn, cur)
|
utils.cleanup_conn(conn, cur)
|
||||||
cleanup_conn(admin_conn, admin_cur)
|
utils.cleanup_conn(admin_conn, admin_cur)
|
||||||
pg_cat_send_signal(signal.SIGTERM)
|
utils.pg_cat_send_signal(signal.SIGTERM)
|
||||||
|
|
||||||
# - - - - - - - - - - - - - - - - - -
|
# - - - - - - - - - - - - - - - - - -
|
||||||
# NO NEW NON-ADMIN CONNECTIONS DURING SHUTDOWN
|
# NO NEW NON-ADMIN CONNECTIONS DURING SHUTDOWN
|
||||||
# Start pgcat
|
# Start pgcat
|
||||||
pgcat_start()
|
utils.pgcat_start()
|
||||||
|
|
||||||
# Create client connection and begin transaction
|
# Create client connection and begin transaction
|
||||||
transaction_conn, transaction_cur = connect_db()
|
transaction_conn, transaction_cur = utils.connect_db()
|
||||||
|
|
||||||
transaction_cur.execute("BEGIN;")
|
transaction_cur.execute("BEGIN;")
|
||||||
transaction_cur.execute("SELECT 1;")
|
transaction_cur.execute("SELECT 1;")
|
||||||
|
|
||||||
# Send sigint to pgcat while still in transaction
|
# Send sigint to pgcat while still in transaction
|
||||||
pg_cat_send_signal(signal.SIGINT)
|
utils.pg_cat_send_signal(signal.SIGINT)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
start = time.perf_counter()
|
start = time.perf_counter()
|
||||||
try:
|
try:
|
||||||
conn, cur = connect_db()
|
conn, cur = utils.connect_db()
|
||||||
cur.execute("SELECT 1;")
|
cur.execute("SELECT 1;")
|
||||||
cleanup_conn(conn, cur)
|
utils.cleanup_conn(conn, cur)
|
||||||
except psycopg2.OperationalError as e:
|
except psycopg2.OperationalError as e:
|
||||||
time_taken = time.perf_counter() - start
|
time_taken = time.perf_counter() - start
|
||||||
if time_taken > 0.1:
|
if time_taken > 0.1:
|
||||||
@@ -226,49 +155,49 @@ def test_shutdown_logic():
|
|||||||
else:
|
else:
|
||||||
raise Exception("Able connect to database during shutdown")
|
raise Exception("Able connect to database during shutdown")
|
||||||
|
|
||||||
cleanup_conn(transaction_conn, transaction_cur)
|
utils.cleanup_conn(transaction_conn, transaction_cur)
|
||||||
pg_cat_send_signal(signal.SIGTERM)
|
utils.pg_cat_send_signal(signal.SIGTERM)
|
||||||
|
|
||||||
# - - - - - - - - - - - - - - - - - -
|
# - - - - - - - - - - - - - - - - - -
|
||||||
# ALLOW NEW ADMIN CONNECTIONS DURING SHUTDOWN
|
# ALLOW NEW ADMIN CONNECTIONS DURING SHUTDOWN
|
||||||
# Start pgcat
|
# Start pgcat
|
||||||
pgcat_start()
|
utils.pgcat_start()
|
||||||
|
|
||||||
# Create client connection and begin transaction
|
# Create client connection and begin transaction
|
||||||
transaction_conn, transaction_cur = connect_db()
|
transaction_conn, transaction_cur = utils.connect_db()
|
||||||
|
|
||||||
transaction_cur.execute("BEGIN;")
|
transaction_cur.execute("BEGIN;")
|
||||||
transaction_cur.execute("SELECT 1;")
|
transaction_cur.execute("SELECT 1;")
|
||||||
|
|
||||||
# Send sigint to pgcat while still in transaction
|
# Send sigint to pgcat while still in transaction
|
||||||
pg_cat_send_signal(signal.SIGINT)
|
utils.pg_cat_send_signal(signal.SIGINT)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
conn, cur = connect_db(admin=True)
|
conn, cur = utils.connect_db(admin=True)
|
||||||
cur.execute("SHOW DATABASES;")
|
cur.execute("SHOW DATABASES;")
|
||||||
cleanup_conn(conn, cur)
|
utils.cleanup_conn(conn, cur)
|
||||||
except psycopg2.OperationalError as e:
|
except psycopg2.OperationalError as e:
|
||||||
raise Exception(e)
|
raise Exception(e)
|
||||||
|
|
||||||
cleanup_conn(transaction_conn, transaction_cur)
|
utils.cleanup_conn(transaction_conn, transaction_cur)
|
||||||
pg_cat_send_signal(signal.SIGTERM)
|
utils.pg_cat_send_signal(signal.SIGTERM)
|
||||||
|
|
||||||
# - - - - - - - - - - - - - - - - - -
|
# - - - - - - - - - - - - - - - - - -
|
||||||
# ADMIN CONNECTIONS CONTINUING TO WORK AFTER SHUTDOWN
|
# ADMIN CONNECTIONS CONTINUING TO WORK AFTER SHUTDOWN
|
||||||
# Start pgcat
|
# Start pgcat
|
||||||
pgcat_start()
|
utils.pgcat_start()
|
||||||
|
|
||||||
# Create client connection and begin transaction
|
# Create client connection and begin transaction
|
||||||
transaction_conn, transaction_cur = connect_db()
|
transaction_conn, transaction_cur = utils.connect_db()
|
||||||
transaction_cur.execute("BEGIN;")
|
transaction_cur.execute("BEGIN;")
|
||||||
transaction_cur.execute("SELECT 1;")
|
transaction_cur.execute("SELECT 1;")
|
||||||
|
|
||||||
admin_conn, admin_cur = connect_db(admin=True)
|
admin_conn, admin_cur = utils.connect_db(admin=True)
|
||||||
admin_cur.execute("SHOW DATABASES;")
|
admin_cur.execute("SHOW DATABASES;")
|
||||||
|
|
||||||
# Send sigint to pgcat while still in transaction
|
# Send sigint to pgcat while still in transaction
|
||||||
pg_cat_send_signal(signal.SIGINT)
|
utils.pg_cat_send_signal(signal.SIGINT)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -276,24 +205,24 @@ def test_shutdown_logic():
|
|||||||
except psycopg2.OperationalError as e:
|
except psycopg2.OperationalError as e:
|
||||||
raise Exception("Could not execute admin command:", e)
|
raise Exception("Could not execute admin command:", e)
|
||||||
|
|
||||||
cleanup_conn(transaction_conn, transaction_cur)
|
utils.cleanup_conn(transaction_conn, transaction_cur)
|
||||||
cleanup_conn(admin_conn, admin_cur)
|
utils.cleanup_conn(admin_conn, admin_cur)
|
||||||
pg_cat_send_signal(signal.SIGTERM)
|
utils.pg_cat_send_signal(signal.SIGTERM)
|
||||||
|
|
||||||
# - - - - - - - - - - - - - - - - - -
|
# - - - - - - - - - - - - - - - - - -
|
||||||
# HANDLE SHUTDOWN TIMEOUT WITH SIGINT
|
# HANDLE SHUTDOWN TIMEOUT WITH SIGINT
|
||||||
|
|
||||||
# Start pgcat
|
# Start pgcat
|
||||||
pgcat_start()
|
utils.pgcat_start()
|
||||||
|
|
||||||
# Create client connection and begin transaction, which should prevent server shutdown unless shutdown timeout is reached
|
# Create client connection and begin transaction, which should prevent server shutdown unless shutdown timeout is reached
|
||||||
conn, cur = connect_db()
|
conn, cur = utils.connect_db()
|
||||||
|
|
||||||
cur.execute("BEGIN;")
|
cur.execute("BEGIN;")
|
||||||
cur.execute("SELECT 1;")
|
cur.execute("SELECT 1;")
|
||||||
|
|
||||||
# Send sigint to pgcat while still in transaction
|
# Send sigint to pgcat while still in transaction
|
||||||
pg_cat_send_signal(signal.SIGINT)
|
utils.pg_cat_send_signal(signal.SIGINT)
|
||||||
|
|
||||||
# pgcat shutdown timeout is set to SHUTDOWN_TIMEOUT seconds, so we sleep for SHUTDOWN_TIMEOUT + 1 seconds
|
# pgcat shutdown timeout is set to SHUTDOWN_TIMEOUT seconds, so we sleep for SHUTDOWN_TIMEOUT + 1 seconds
|
||||||
time.sleep(SHUTDOWN_TIMEOUT + 1)
|
time.sleep(SHUTDOWN_TIMEOUT + 1)
|
||||||
@@ -307,12 +236,7 @@ def test_shutdown_logic():
|
|||||||
# Fail if query execution succeeded
|
# Fail if query execution succeeded
|
||||||
raise Exception("Server not closed after sigint and expected timeout")
|
raise Exception("Server not closed after sigint and expected timeout")
|
||||||
|
|
||||||
cleanup_conn(conn, cur)
|
utils.cleanup_conn(conn, cur)
|
||||||
pg_cat_send_signal(signal.SIGTERM)
|
utils.pg_cat_send_signal(signal.SIGTERM)
|
||||||
|
|
||||||
# - - - - - - - - - - - - - - - - - -
|
# - - - - - - - - - - - - - - - - - -
|
||||||
|
|
||||||
|
|
||||||
test_normal_db_access()
|
|
||||||
test_admin_db_access()
|
|
||||||
test_shutdown_logic()
|
|
||||||
110
tests/python/utils.py
Normal file
110
tests/python/utils.py
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
import os
|
||||||
|
import signal
|
||||||
|
import time
|
||||||
|
from typing import Tuple
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
import psutil
|
||||||
|
import psycopg2
|
||||||
|
|
||||||
|
PGCAT_HOST = "127.0.0.1"
|
||||||
|
PGCAT_PORT = "6432"
|
||||||
|
|
||||||
|
|
||||||
|
def _pgcat_start(config_path: str):
|
||||||
|
pg_cat_send_signal(signal.SIGTERM)
|
||||||
|
os.system(f"./target/debug/pgcat {config_path} &")
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
|
||||||
|
def pgcat_start():
|
||||||
|
_pgcat_start(config_path='.circleci/pgcat.toml')
|
||||||
|
|
||||||
|
|
||||||
|
def pgcat_generic_start(config: str):
|
||||||
|
tmp = tempfile.NamedTemporaryFile()
|
||||||
|
with open(tmp.name, 'w') as f:
|
||||||
|
f.write(config)
|
||||||
|
_pgcat_start(config_path=tmp.name)
|
||||||
|
|
||||||
|
|
||||||
|
def glauth_send_signal(signal: signal.Signals):
|
||||||
|
try:
|
||||||
|
for proc in psutil.process_iter(["pid", "name"]):
|
||||||
|
if proc.name() == "glauth":
|
||||||
|
os.kill(proc.pid, signal)
|
||||||
|
except Exception as e:
|
||||||
|
# The process can be gone when we send this signal
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
if signal == signal.SIGTERM:
|
||||||
|
# Returns 0 if pgcat process exists
|
||||||
|
time.sleep(2)
|
||||||
|
if not os.system('pgrep glauth'):
|
||||||
|
raise Exception("glauth not closed after SIGTERM")
|
||||||
|
|
||||||
|
|
||||||
|
def pg_cat_send_signal(signal: signal.Signals):
|
||||||
|
try:
|
||||||
|
for proc in psutil.process_iter(["pid", "name"]):
|
||||||
|
if "pgcat" == proc.name():
|
||||||
|
os.kill(proc.pid, signal)
|
||||||
|
except Exception as e:
|
||||||
|
# The process can be gone when we send this signal
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
if signal == signal.SIGTERM:
|
||||||
|
# Returns 0 if pgcat process exists
|
||||||
|
time.sleep(2)
|
||||||
|
if not os.system('pgrep pgcat'):
|
||||||
|
raise Exception("pgcat not closed after SIGTERM")
|
||||||
|
|
||||||
|
|
||||||
|
def connect_db(
|
||||||
|
autocommit: bool = True,
|
||||||
|
admin: bool = False,
|
||||||
|
) -> Tuple[psycopg2.extensions.connection, psycopg2.extensions.cursor]:
|
||||||
|
|
||||||
|
if admin:
|
||||||
|
user = "admin_user"
|
||||||
|
password = "admin_pass"
|
||||||
|
db = "pgcat"
|
||||||
|
else:
|
||||||
|
user = "sharding_user"
|
||||||
|
password = "sharding_user"
|
||||||
|
db = "sharded_db"
|
||||||
|
|
||||||
|
conn = psycopg2.connect(
|
||||||
|
f"postgres://{user}:{password}@{PGCAT_HOST}:{PGCAT_PORT}/{db}?application_name=testing_pgcat",
|
||||||
|
connect_timeout=2,
|
||||||
|
)
|
||||||
|
conn.autocommit = autocommit
|
||||||
|
cur = conn.cursor()
|
||||||
|
|
||||||
|
return (conn, cur)
|
||||||
|
|
||||||
|
def connect_db_trust(
|
||||||
|
autocommit: bool = True,
|
||||||
|
admin: bool = False,
|
||||||
|
) -> Tuple[psycopg2.extensions.connection, psycopg2.extensions.cursor]:
|
||||||
|
|
||||||
|
if admin:
|
||||||
|
user = "admin_user"
|
||||||
|
db = "pgcat"
|
||||||
|
else:
|
||||||
|
user = "sharding_user"
|
||||||
|
db = "sharded_db"
|
||||||
|
|
||||||
|
conn = psycopg2.connect(
|
||||||
|
f"postgres://{user}@{PGCAT_HOST}:{PGCAT_PORT}/{db}?application_name=testing_pgcat",
|
||||||
|
connect_timeout=2,
|
||||||
|
)
|
||||||
|
conn.autocommit = autocommit
|
||||||
|
cur = conn.cursor()
|
||||||
|
|
||||||
|
return (conn, cur)
|
||||||
|
|
||||||
|
|
||||||
|
def cleanup_conn(conn: psycopg2.extensions.connection, cur: psycopg2.extensions.cursor):
|
||||||
|
cur.close()
|
||||||
|
conn.close()
|
||||||
@@ -1,22 +1,33 @@
|
|||||||
GEM
|
GEM
|
||||||
remote: https://rubygems.org/
|
remote: https://rubygems.org/
|
||||||
specs:
|
specs:
|
||||||
activemodel (7.0.4.1)
|
activemodel (7.1.4)
|
||||||
activesupport (= 7.0.4.1)
|
activesupport (= 7.1.4)
|
||||||
activerecord (7.0.4.1)
|
activerecord (7.1.4)
|
||||||
activemodel (= 7.0.4.1)
|
activemodel (= 7.1.4)
|
||||||
activesupport (= 7.0.4.1)
|
activesupport (= 7.1.4)
|
||||||
activesupport (7.0.4.1)
|
timeout (>= 0.4.0)
|
||||||
|
activesupport (7.1.4)
|
||||||
|
base64
|
||||||
|
bigdecimal
|
||||||
concurrent-ruby (~> 1.0, >= 1.0.2)
|
concurrent-ruby (~> 1.0, >= 1.0.2)
|
||||||
|
connection_pool (>= 2.2.5)
|
||||||
|
drb
|
||||||
i18n (>= 1.6, < 2)
|
i18n (>= 1.6, < 2)
|
||||||
minitest (>= 5.1)
|
minitest (>= 5.1)
|
||||||
|
mutex_m
|
||||||
tzinfo (~> 2.0)
|
tzinfo (~> 2.0)
|
||||||
ast (2.4.2)
|
ast (2.4.2)
|
||||||
concurrent-ruby (1.1.10)
|
base64 (0.2.0)
|
||||||
|
bigdecimal (3.1.8)
|
||||||
|
concurrent-ruby (1.3.4)
|
||||||
|
connection_pool (2.4.1)
|
||||||
diff-lcs (1.5.0)
|
diff-lcs (1.5.0)
|
||||||
i18n (1.12.0)
|
drb (2.2.1)
|
||||||
|
i18n (1.14.5)
|
||||||
concurrent-ruby (~> 1.0)
|
concurrent-ruby (~> 1.0)
|
||||||
minitest (5.17.0)
|
minitest (5.25.1)
|
||||||
|
mutex_m (0.2.0)
|
||||||
parallel (1.22.1)
|
parallel (1.22.1)
|
||||||
parser (3.1.2.0)
|
parser (3.1.2.0)
|
||||||
ast (~> 2.4.1)
|
ast (~> 2.4.1)
|
||||||
@@ -24,7 +35,8 @@ GEM
|
|||||||
pg (1.3.2)
|
pg (1.3.2)
|
||||||
rainbow (3.1.1)
|
rainbow (3.1.1)
|
||||||
regexp_parser (2.3.1)
|
regexp_parser (2.3.1)
|
||||||
rexml (3.2.5)
|
rexml (3.3.6)
|
||||||
|
strscan
|
||||||
rspec (3.11.0)
|
rspec (3.11.0)
|
||||||
rspec-core (~> 3.11.0)
|
rspec-core (~> 3.11.0)
|
||||||
rspec-expectations (~> 3.11.0)
|
rspec-expectations (~> 3.11.0)
|
||||||
@@ -50,10 +62,12 @@ GEM
|
|||||||
rubocop-ast (1.17.0)
|
rubocop-ast (1.17.0)
|
||||||
parser (>= 3.1.1.0)
|
parser (>= 3.1.1.0)
|
||||||
ruby-progressbar (1.11.0)
|
ruby-progressbar (1.11.0)
|
||||||
|
strscan (3.1.0)
|
||||||
|
timeout (0.4.1)
|
||||||
toml (0.3.0)
|
toml (0.3.0)
|
||||||
parslet (>= 1.8.0, < 3.0.0)
|
parslet (>= 1.8.0, < 3.0.0)
|
||||||
toxiproxy (2.0.1)
|
toxiproxy (2.0.1)
|
||||||
tzinfo (2.0.5)
|
tzinfo (2.0.6)
|
||||||
concurrent-ruby (~> 1.0)
|
concurrent-ruby (~> 1.0)
|
||||||
unicode-display_width (2.1.0)
|
unicode-display_width (2.1.0)
|
||||||
|
|
||||||
|
|||||||
@@ -11,323 +11,6 @@ describe "Admin" do
|
|||||||
processes.pgcat.shutdown
|
processes.pgcat.shutdown
|
||||||
end
|
end
|
||||||
|
|
||||||
describe "SHOW STATS" do
|
|
||||||
context "clients connect and make one query" do
|
|
||||||
it "updates *_query_time and *_wait_time" do
|
|
||||||
connection = PG::connect("#{pgcat_conn_str}?application_name=one_query")
|
|
||||||
connection.async_exec("SELECT pg_sleep(0.25)")
|
|
||||||
connection.async_exec("SELECT pg_sleep(0.25)")
|
|
||||||
connection.async_exec("SELECT pg_sleep(0.25)")
|
|
||||||
connection.close
|
|
||||||
|
|
||||||
# wait for averages to be calculated, we shouldn't do this too often
|
|
||||||
sleep(15.5)
|
|
||||||
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
|
||||||
results = admin_conn.async_exec("SHOW STATS")[0]
|
|
||||||
admin_conn.close
|
|
||||||
expect(results["total_query_time"].to_i).to be_within(200).of(750)
|
|
||||||
expect(results["avg_query_time"].to_i).to_not eq(0)
|
|
||||||
|
|
||||||
expect(results["total_wait_time"].to_i).to_not eq(0)
|
|
||||||
expect(results["avg_wait_time"].to_i).to_not eq(0)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
describe "SHOW POOLS" do
|
|
||||||
context "bad credentials" do
|
|
||||||
it "does not change any stats" do
|
|
||||||
bad_password_url = URI(pgcat_conn_str)
|
|
||||||
bad_password_url.password = "wrong"
|
|
||||||
expect { PG::connect("#{bad_password_url.to_s}?application_name=bad_password") }.to raise_error(PG::ConnectionBad)
|
|
||||||
|
|
||||||
sleep(1)
|
|
||||||
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
|
||||||
results = admin_conn.async_exec("SHOW POOLS")[0]
|
|
||||||
%w[cl_idle cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
|
|
||||||
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
|
|
||||||
end
|
|
||||||
|
|
||||||
expect(results["sv_idle"]).to eq("1")
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
context "bad database name" do
|
|
||||||
it "does not change any stats" do
|
|
||||||
bad_db_url = URI(pgcat_conn_str)
|
|
||||||
bad_db_url.path = "/wrong_db"
|
|
||||||
expect { PG::connect("#{bad_db_url.to_s}?application_name=bad_db") }.to raise_error(PG::ConnectionBad)
|
|
||||||
|
|
||||||
sleep(1)
|
|
||||||
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
|
||||||
results = admin_conn.async_exec("SHOW POOLS")[0]
|
|
||||||
%w[cl_idle cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
|
|
||||||
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
|
|
||||||
end
|
|
||||||
|
|
||||||
expect(results["sv_idle"]).to eq("1")
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
context "client connects but issues no queries" do
|
|
||||||
it "only affects cl_idle stats" do
|
|
||||||
connections = Array.new(20) { PG::connect(pgcat_conn_str) }
|
|
||||||
sleep(1)
|
|
||||||
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
|
||||||
results = admin_conn.async_exec("SHOW POOLS")[0]
|
|
||||||
%w[cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
|
|
||||||
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
|
|
||||||
end
|
|
||||||
expect(results["cl_idle"]).to eq("20")
|
|
||||||
expect(results["sv_idle"]).to eq("1")
|
|
||||||
|
|
||||||
connections.map(&:close)
|
|
||||||
sleep(1.1)
|
|
||||||
results = admin_conn.async_exec("SHOW POOLS")[0]
|
|
||||||
%w[cl_active cl_idle cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
|
|
||||||
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
|
|
||||||
end
|
|
||||||
expect(results["sv_idle"]).to eq("1")
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
context "clients connect and make one query" do
|
|
||||||
it "only affects cl_idle, sv_idle stats" do
|
|
||||||
connections = Array.new(5) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
|
|
||||||
connections.each do |c|
|
|
||||||
Thread.new { c.async_exec("SELECT pg_sleep(2.5)") }
|
|
||||||
end
|
|
||||||
|
|
||||||
sleep(1.1)
|
|
||||||
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
|
||||||
results = admin_conn.async_exec("SHOW POOLS")[0]
|
|
||||||
%w[cl_idle cl_waiting cl_cancel_req sv_idle sv_used sv_tested sv_login maxwait].each do |s|
|
|
||||||
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
|
|
||||||
end
|
|
||||||
expect(results["cl_active"]).to eq("5")
|
|
||||||
expect(results["sv_active"]).to eq("5")
|
|
||||||
|
|
||||||
sleep(3)
|
|
||||||
results = admin_conn.async_exec("SHOW POOLS")[0]
|
|
||||||
%w[cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
|
|
||||||
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
|
|
||||||
end
|
|
||||||
expect(results["cl_idle"]).to eq("5")
|
|
||||||
expect(results["sv_idle"]).to eq("5")
|
|
||||||
|
|
||||||
connections.map(&:close)
|
|
||||||
sleep(1)
|
|
||||||
results = admin_conn.async_exec("SHOW POOLS")[0]
|
|
||||||
%w[cl_idle cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
|
|
||||||
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
|
|
||||||
end
|
|
||||||
expect(results["sv_idle"]).to eq("5")
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
context "client connects and opens a transaction and closes connection uncleanly" do
|
|
||||||
it "produces correct statistics" do
|
|
||||||
connections = Array.new(5) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
|
|
||||||
connections.each do |c|
|
|
||||||
Thread.new do
|
|
||||||
c.async_exec("BEGIN")
|
|
||||||
c.async_exec("SELECT pg_sleep(0.01)")
|
|
||||||
c.close
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
sleep(1.1)
|
|
||||||
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
|
||||||
results = admin_conn.async_exec("SHOW POOLS")[0]
|
|
||||||
%w[cl_idle cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
|
|
||||||
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
|
|
||||||
end
|
|
||||||
expect(results["sv_idle"]).to eq("5")
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
context "client fail to checkout connection from the pool" do
|
|
||||||
it "counts clients as idle" do
|
|
||||||
new_configs = processes.pgcat.current_config
|
|
||||||
new_configs["general"]["connect_timeout"] = 500
|
|
||||||
new_configs["general"]["ban_time"] = 1
|
|
||||||
new_configs["general"]["shutdown_timeout"] = 1
|
|
||||||
new_configs["pools"]["sharded_db"]["users"]["0"]["pool_size"] = 1
|
|
||||||
processes.pgcat.update_config(new_configs)
|
|
||||||
processes.pgcat.reload_config
|
|
||||||
|
|
||||||
threads = []
|
|
||||||
connections = Array.new(5) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
|
|
||||||
connections.each do |c|
|
|
||||||
threads << Thread.new { c.async_exec("SELECT pg_sleep(1)") rescue PG::SystemError }
|
|
||||||
end
|
|
||||||
|
|
||||||
sleep(2)
|
|
||||||
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
|
||||||
results = admin_conn.async_exec("SHOW POOLS")[0]
|
|
||||||
%w[cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
|
|
||||||
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
|
|
||||||
end
|
|
||||||
expect(results["cl_idle"]).to eq("5")
|
|
||||||
expect(results["sv_idle"]).to eq("1")
|
|
||||||
|
|
||||||
threads.map(&:join)
|
|
||||||
connections.map(&:close)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
context "clients connects and disconnect normally" do
|
|
||||||
let(:processes) { Helpers::Pgcat.single_instance_setup("sharded_db", 2) }
|
|
||||||
|
|
||||||
it 'shows the same number of clients before and after' do
|
|
||||||
clients_before = clients_connected_to_pool(processes: processes)
|
|
||||||
threads = []
|
|
||||||
connections = Array.new(4) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
|
|
||||||
connections.each do |c|
|
|
||||||
threads << Thread.new { c.async_exec("SELECT 1") }
|
|
||||||
end
|
|
||||||
clients_between = clients_connected_to_pool(processes: processes)
|
|
||||||
expect(clients_before).not_to eq(clients_between)
|
|
||||||
connections.each(&:close)
|
|
||||||
clients_after = clients_connected_to_pool(processes: processes)
|
|
||||||
expect(clients_before).to eq(clients_after)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
context "clients connects and disconnect abruptly" do
|
|
||||||
let(:processes) { Helpers::Pgcat.single_instance_setup("sharded_db", 10) }
|
|
||||||
|
|
||||||
it 'shows the same number of clients before and after' do
|
|
||||||
threads = []
|
|
||||||
connections = Array.new(2) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
|
|
||||||
connections.each do |c|
|
|
||||||
threads << Thread.new { c.async_exec("SELECT 1") }
|
|
||||||
end
|
|
||||||
clients_before = clients_connected_to_pool(processes: processes)
|
|
||||||
random_string = (0...8).map { (65 + rand(26)).chr }.join
|
|
||||||
connection_string = "#{pgcat_conn_str}?application_name=#{random_string}"
|
|
||||||
faulty_client = Process.spawn("psql -Atx #{connection_string} >/dev/null")
|
|
||||||
sleep(1)
|
|
||||||
# psql starts two processes, we only know the pid of the parent, this
|
|
||||||
# ensure both are killed
|
|
||||||
`pkill -9 -f '#{random_string}'`
|
|
||||||
Process.wait(faulty_client)
|
|
||||||
clients_after = clients_connected_to_pool(processes: processes)
|
|
||||||
expect(clients_before).to eq(clients_after)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
context "clients overwhelm server pools" do
|
|
||||||
let(:processes) { Helpers::Pgcat.single_instance_setup("sharded_db", 2) }
|
|
||||||
|
|
||||||
it "cl_waiting is updated to show it" do
|
|
||||||
threads = []
|
|
||||||
connections = Array.new(4) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
|
|
||||||
connections.each do |c|
|
|
||||||
threads << Thread.new { c.async_exec("SELECT pg_sleep(1.5)") }
|
|
||||||
end
|
|
||||||
|
|
||||||
sleep(1.1) # Allow time for stats to update
|
|
||||||
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
|
||||||
results = admin_conn.async_exec("SHOW POOLS")[0]
|
|
||||||
%w[cl_idle cl_cancel_req sv_idle sv_used sv_tested sv_login maxwait].each do |s|
|
|
||||||
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
|
|
||||||
end
|
|
||||||
|
|
||||||
expect(results["cl_waiting"]).to eq("2")
|
|
||||||
expect(results["cl_active"]).to eq("2")
|
|
||||||
expect(results["sv_active"]).to eq("2")
|
|
||||||
|
|
||||||
sleep(2.5) # Allow time for stats to update
|
|
||||||
results = admin_conn.async_exec("SHOW POOLS")[0]
|
|
||||||
%w[cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login].each do |s|
|
|
||||||
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
|
|
||||||
end
|
|
||||||
expect(results["cl_idle"]).to eq("4")
|
|
||||||
expect(results["sv_idle"]).to eq("2")
|
|
||||||
|
|
||||||
threads.map(&:join)
|
|
||||||
connections.map(&:close)
|
|
||||||
end
|
|
||||||
|
|
||||||
it "show correct max_wait" do
|
|
||||||
threads = []
|
|
||||||
connections = Array.new(4) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
|
|
||||||
connections.each do |c|
|
|
||||||
threads << Thread.new { c.async_exec("SELECT pg_sleep(1.5)") }
|
|
||||||
end
|
|
||||||
|
|
||||||
sleep(2.5) # Allow time for stats to update
|
|
||||||
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
|
||||||
results = admin_conn.async_exec("SHOW POOLS")[0]
|
|
||||||
|
|
||||||
expect(results["maxwait"]).to eq("1")
|
|
||||||
expect(results["maxwait_us"].to_i).to be_within(200_000).of(500_000)
|
|
||||||
|
|
||||||
sleep(4.5) # Allow time for stats to update
|
|
||||||
results = admin_conn.async_exec("SHOW POOLS")[0]
|
|
||||||
expect(results["maxwait"]).to eq("0")
|
|
||||||
|
|
||||||
threads.map(&:join)
|
|
||||||
connections.map(&:close)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
describe "SHOW CLIENTS" do
|
|
||||||
it "reports correct number and application names" do
|
|
||||||
conn_str = processes.pgcat.connection_string("sharded_db", "sharding_user")
|
|
||||||
connections = Array.new(20) { |i| PG::connect("#{conn_str}?application_name=app#{i % 5}") }
|
|
||||||
|
|
||||||
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
|
||||||
sleep(1) # Wait for stats to be updated
|
|
||||||
|
|
||||||
results = admin_conn.async_exec("SHOW CLIENTS")
|
|
||||||
expect(results.count).to eq(21) # count admin clients
|
|
||||||
expect(results.select { |c| c["application_name"] == "app3" || c["application_name"] == "app4" }.count).to eq(8)
|
|
||||||
expect(results.select { |c| c["database"] == "pgcat" }.count).to eq(1)
|
|
||||||
|
|
||||||
connections[0..5].map(&:close)
|
|
||||||
sleep(1) # Wait for stats to be updated
|
|
||||||
results = admin_conn.async_exec("SHOW CLIENTS")
|
|
||||||
expect(results.count).to eq(15)
|
|
||||||
|
|
||||||
connections[6..].map(&:close)
|
|
||||||
sleep(1) # Wait for stats to be updated
|
|
||||||
expect(admin_conn.async_exec("SHOW CLIENTS").count).to eq(1)
|
|
||||||
admin_conn.close
|
|
||||||
end
|
|
||||||
|
|
||||||
it "reports correct number of queries and transactions" do
|
|
||||||
conn_str = processes.pgcat.connection_string("sharded_db", "sharding_user")
|
|
||||||
|
|
||||||
connections = Array.new(2) { |i| PG::connect("#{conn_str}?application_name=app#{i}") }
|
|
||||||
connections.each do |c|
|
|
||||||
c.async_exec("SELECT 1")
|
|
||||||
c.async_exec("SELECT 2")
|
|
||||||
c.async_exec("SELECT 3")
|
|
||||||
c.async_exec("BEGIN")
|
|
||||||
c.async_exec("SELECT 4")
|
|
||||||
c.async_exec("SELECT 5")
|
|
||||||
c.async_exec("COMMIT")
|
|
||||||
end
|
|
||||||
|
|
||||||
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
|
||||||
sleep(1) # Wait for stats to be updated
|
|
||||||
|
|
||||||
results = admin_conn.async_exec("SHOW CLIENTS")
|
|
||||||
expect(results.count).to eq(3)
|
|
||||||
normal_client_results = results.reject { |r| r["database"] == "pgcat" }
|
|
||||||
expect(normal_client_results[0]["transaction_count"]).to eq("4")
|
|
||||||
expect(normal_client_results[1]["transaction_count"]).to eq("4")
|
|
||||||
expect(normal_client_results[0]["query_count"]).to eq("7")
|
|
||||||
expect(normal_client_results[1]["query_count"]).to eq("7")
|
|
||||||
|
|
||||||
admin_conn.close
|
|
||||||
connections.map(&:close)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
describe "Manual Banning" do
|
describe "Manual Banning" do
|
||||||
let(:processes) { Helpers::Pgcat.single_shard_setup("sharded_db", 10) }
|
let(:processes) { Helpers::Pgcat.single_shard_setup("sharded_db", 10) }
|
||||||
before do
|
before do
|
||||||
@@ -398,7 +81,7 @@ describe "Admin" do
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
describe "SHOW users" do
|
describe "SHOW USERS" do
|
||||||
it "returns the right users" do
|
it "returns the right users" do
|
||||||
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
||||||
results = admin_conn.async_exec("SHOW USERS")[0]
|
results = admin_conn.async_exec("SHOW USERS")[0]
|
||||||
@@ -407,4 +90,49 @@ describe "Admin" do
|
|||||||
expect(results["pool_mode"]).to eq("transaction")
|
expect(results["pool_mode"]).to eq("transaction")
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
[
|
||||||
|
"SHOW ME THE MONEY",
|
||||||
|
"SHOW ME THE WAY",
|
||||||
|
"SHOW UP",
|
||||||
|
"SHOWTIME",
|
||||||
|
"HAMMER TIME",
|
||||||
|
"SHOWN TO BE TRUE",
|
||||||
|
"SHOW ",
|
||||||
|
"SHOW ",
|
||||||
|
"SHOW 1",
|
||||||
|
";;;;;"
|
||||||
|
].each do |cmd|
|
||||||
|
describe "Bad command #{cmd}" do
|
||||||
|
it "does not panic and responds with PG::SystemError" do
|
||||||
|
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
||||||
|
expect { admin_conn.async_exec(cmd) }.to raise_error(PG::SystemError).with_message(/Unsupported/)
|
||||||
|
admin_conn.close
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe "PAUSE" do
|
||||||
|
it "pauses all pools" do
|
||||||
|
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
||||||
|
results = admin_conn.async_exec("SHOW DATABASES").to_a
|
||||||
|
expect(results.map{ |r| r["paused"] }.uniq).to eq(["0"])
|
||||||
|
|
||||||
|
admin_conn.async_exec("PAUSE")
|
||||||
|
|
||||||
|
results = admin_conn.async_exec("SHOW DATABASES").to_a
|
||||||
|
expect(results.map{ |r| r["paused"] }.uniq).to eq(["1"])
|
||||||
|
|
||||||
|
admin_conn.async_exec("RESUME")
|
||||||
|
|
||||||
|
results = admin_conn.async_exec("SHOW DATABASES").to_a
|
||||||
|
expect(results.map{ |r| r["paused"] }.uniq).to eq(["0"])
|
||||||
|
end
|
||||||
|
|
||||||
|
it "handles errors" do
|
||||||
|
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
||||||
|
expect { admin_conn.async_exec("PAUSE foo").to_a }.to raise_error(PG::SystemError)
|
||||||
|
expect { admin_conn.async_exec("PAUSE foo,bar").to_a }.to raise_error(PG::SystemError)
|
||||||
|
end
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -185,7 +185,7 @@ describe "Auth Query" do
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
context 'and with cleartext passwords set' do
|
context 'and with cleartext passwords set' do
|
||||||
it 'it uses local passwords' do
|
it 'it uses local passwords' do
|
||||||
|
|||||||
102
tests/ruby/copy_spec.rb
Normal file
102
tests/ruby/copy_spec.rb
Normal file
@@ -0,0 +1,102 @@
|
|||||||
|
# frozen_string_literal: true
|
||||||
|
require_relative 'spec_helper'
|
||||||
|
|
||||||
|
|
||||||
|
describe "COPY Handling" do
|
||||||
|
let(:processes) { Helpers::Pgcat.single_instance_setup("sharded_db", 5) }
|
||||||
|
before do
|
||||||
|
new_configs = processes.pgcat.current_config
|
||||||
|
|
||||||
|
# Allow connections in the pool to expire faster
|
||||||
|
new_configs["general"]["idle_timeout"] = 5
|
||||||
|
processes.pgcat.update_config(new_configs)
|
||||||
|
# We need to kill the old process that was using the default configs
|
||||||
|
processes.pgcat.stop
|
||||||
|
processes.pgcat.start
|
||||||
|
processes.pgcat.wait_until_ready
|
||||||
|
end
|
||||||
|
|
||||||
|
before do
|
||||||
|
processes.all_databases.first.with_connection do |conn|
|
||||||
|
conn.async_exec "CREATE TABLE copy_test_table (a TEXT,b TEXT,c TEXT,d TEXT)"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
after do
|
||||||
|
processes.all_databases.first.with_connection do |conn|
|
||||||
|
conn.async_exec "DROP TABLE copy_test_table;"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
after do
|
||||||
|
processes.all_databases.map(&:reset)
|
||||||
|
processes.pgcat.shutdown
|
||||||
|
end
|
||||||
|
|
||||||
|
describe "COPY FROM" do
|
||||||
|
context "within transaction" do
|
||||||
|
it "finishes within alloted time" do
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
|
Timeout.timeout(3) do
|
||||||
|
conn.async_exec("BEGIN")
|
||||||
|
conn.copy_data "COPY copy_test_table FROM STDIN CSV" do
|
||||||
|
sleep 0.5
|
||||||
|
conn.put_copy_data "some,data,to,copy\n"
|
||||||
|
conn.put_copy_data "more,data,to,copy\n"
|
||||||
|
end
|
||||||
|
conn.async_exec("COMMIT")
|
||||||
|
end
|
||||||
|
|
||||||
|
res = conn.async_exec("SELECT * FROM copy_test_table").to_a
|
||||||
|
expect(res).to eq([
|
||||||
|
{"a"=>"some", "b"=>"data", "c"=>"to", "d"=>"copy"},
|
||||||
|
{"a"=>"more", "b"=>"data", "c"=>"to", "d"=>"copy"}
|
||||||
|
])
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context "outside transaction" do
|
||||||
|
it "finishes within alloted time" do
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
|
Timeout.timeout(3) do
|
||||||
|
conn.copy_data "COPY copy_test_table FROM STDIN CSV" do
|
||||||
|
sleep 0.5
|
||||||
|
conn.put_copy_data "some,data,to,copy\n"
|
||||||
|
conn.put_copy_data "more,data,to,copy\n"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
res = conn.async_exec("SELECT * FROM copy_test_table").to_a
|
||||||
|
expect(res).to eq([
|
||||||
|
{"a"=>"some", "b"=>"data", "c"=>"to", "d"=>"copy"},
|
||||||
|
{"a"=>"more", "b"=>"data", "c"=>"to", "d"=>"copy"}
|
||||||
|
])
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
describe "COPY TO" do
|
||||||
|
before do
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
|
conn.async_exec("BEGIN")
|
||||||
|
conn.copy_data "COPY copy_test_table FROM STDIN CSV" do
|
||||||
|
conn.put_copy_data "some,data,to,copy\n"
|
||||||
|
conn.put_copy_data "more,data,to,copy\n"
|
||||||
|
end
|
||||||
|
conn.async_exec("COMMIT")
|
||||||
|
conn.close
|
||||||
|
end
|
||||||
|
|
||||||
|
it "works" do
|
||||||
|
res = []
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
|
conn.copy_data "COPY copy_test_table TO STDOUT CSV" do
|
||||||
|
while row=conn.get_copy_data
|
||||||
|
res << row
|
||||||
|
end
|
||||||
|
end
|
||||||
|
expect(res).to eq(["some,data,to,copy\n", "more,data,to,copy\n"])
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
end
|
||||||
@@ -33,18 +33,18 @@ module Helpers
|
|||||||
"0" => {
|
"0" => {
|
||||||
"database" => "shard0",
|
"database" => "shard0",
|
||||||
"servers" => [
|
"servers" => [
|
||||||
["localhost", primary.port.to_s, "primary"],
|
["localhost", primary.port.to_i, "primary"],
|
||||||
["localhost", replica.port.to_s, "replica"],
|
["localhost", replica.port.to_i, "replica"],
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"users" => { "0" => user.merge(config_user) }
|
"users" => { "0" => user.merge(config_user) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pgcat_cfg["general"]["port"] = pgcat.port
|
pgcat_cfg["general"]["port"] = pgcat.port.to_i
|
||||||
pgcat.update_config(pgcat_cfg)
|
pgcat.update_config(pgcat_cfg)
|
||||||
pgcat.start
|
pgcat.start
|
||||||
|
|
||||||
pgcat.wait_until_ready(
|
pgcat.wait_until_ready(
|
||||||
pgcat.connection_string(
|
pgcat.connection_string(
|
||||||
"sharded_db",
|
"sharded_db",
|
||||||
@@ -92,13 +92,13 @@ module Helpers
|
|||||||
"0" => {
|
"0" => {
|
||||||
"database" => database,
|
"database" => database,
|
||||||
"servers" => [
|
"servers" => [
|
||||||
["localhost", primary.port.to_s, "primary"],
|
["localhost", primary.port.to_i, "primary"],
|
||||||
["localhost", replica.port.to_s, "replica"],
|
["localhost", replica.port.to_i, "replica"],
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"users" => { "0" => user.merge(config_user) }
|
"users" => { "0" => user.merge(config_user) }
|
||||||
}
|
}
|
||||||
end
|
end
|
||||||
# Main proxy configs
|
# Main proxy configs
|
||||||
pgcat_cfg["pools"] = {
|
pgcat_cfg["pools"] = {
|
||||||
@@ -109,7 +109,7 @@ module Helpers
|
|||||||
pgcat_cfg["general"]["port"] = pgcat.port
|
pgcat_cfg["general"]["port"] = pgcat.port
|
||||||
pgcat.update_config(pgcat_cfg.deep_merge(extra_conf))
|
pgcat.update_config(pgcat_cfg.deep_merge(extra_conf))
|
||||||
pgcat.start
|
pgcat.start
|
||||||
|
|
||||||
pgcat.wait_until_ready(pgcat.connection_string("sharded_db0", pg_user['username'], pg_user['password']))
|
pgcat.wait_until_ready(pgcat.connection_string("sharded_db0", pg_user['username'], pg_user['password']))
|
||||||
|
|
||||||
OpenStruct.new.tap do |struct|
|
OpenStruct.new.tap do |struct|
|
||||||
|
|||||||
@@ -7,10 +7,24 @@ class PgInstance
|
|||||||
attr_reader :password
|
attr_reader :password
|
||||||
attr_reader :database_name
|
attr_reader :database_name
|
||||||
|
|
||||||
|
def self.mass_takedown(databases)
|
||||||
|
raise StandardError "block missing" unless block_given?
|
||||||
|
|
||||||
|
databases.each do |database|
|
||||||
|
database.toxiproxy.toxic(:limit_data, bytes: 1).toxics.each(&:save)
|
||||||
|
end
|
||||||
|
sleep 0.1
|
||||||
|
yield
|
||||||
|
ensure
|
||||||
|
databases.each do |database|
|
||||||
|
database.toxiproxy.toxics.each(&:destroy)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
def initialize(port, username, password, database_name)
|
def initialize(port, username, password, database_name)
|
||||||
@original_port = port
|
@original_port = port.to_i
|
||||||
@toxiproxy_port = 10000 + port.to_i
|
@toxiproxy_port = 10000 + port.to_i
|
||||||
@port = @toxiproxy_port
|
@port = @toxiproxy_port.to_i
|
||||||
|
|
||||||
@username = username
|
@username = username
|
||||||
@password = password
|
@password = password
|
||||||
@@ -48,9 +62,9 @@ class PgInstance
|
|||||||
|
|
||||||
def take_down
|
def take_down
|
||||||
if block_given?
|
if block_given?
|
||||||
Toxiproxy[@toxiproxy_name].toxic(:limit_data, bytes: 5).apply { yield }
|
Toxiproxy[@toxiproxy_name].toxic(:limit_data, bytes: 1).apply { yield }
|
||||||
else
|
else
|
||||||
Toxiproxy[@toxiproxy_name].toxic(:limit_data, bytes: 5).toxics.each(&:save)
|
Toxiproxy[@toxiproxy_name].toxic(:limit_data, bytes: 1).toxics.each(&:save)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@@ -89,6 +103,6 @@ class PgInstance
|
|||||||
end
|
end
|
||||||
|
|
||||||
def count_select_1_plus_2
|
def count_select_1_plus_2
|
||||||
with_connection { |c| c.async_exec("SELECT SUM(calls) FROM pg_stat_statements WHERE query = 'SELECT $1 + $2'")[0]["sum"].to_i }
|
with_connection { |c| c.async_exec("SELECT SUM(calls) FROM pg_stat_statements WHERE query LIKE '%SELECT $1 + $2%'")[0]["sum"].to_i }
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -34,14 +34,32 @@ module Helpers
|
|||||||
"load_balancing_mode" => lb_mode,
|
"load_balancing_mode" => lb_mode,
|
||||||
"primary_reads_enabled" => true,
|
"primary_reads_enabled" => true,
|
||||||
"query_parser_enabled" => true,
|
"query_parser_enabled" => true,
|
||||||
|
"query_parser_read_write_splitting" => true,
|
||||||
"automatic_sharding_key" => "data.id",
|
"automatic_sharding_key" => "data.id",
|
||||||
"sharding_function" => "pg_bigint_hash",
|
"sharding_function" => "pg_bigint_hash",
|
||||||
"shards" => {
|
"shards" => {
|
||||||
"0" => { "database" => "shard0", "servers" => [["localhost", primary0.port.to_s, "primary"]] },
|
"0" => { "database" => "shard0", "servers" => [["localhost", primary0.port.to_i, "primary"]] },
|
||||||
"1" => { "database" => "shard1", "servers" => [["localhost", primary1.port.to_s, "primary"]] },
|
"1" => { "database" => "shard1", "servers" => [["localhost", primary1.port.to_i, "primary"]] },
|
||||||
"2" => { "database" => "shard2", "servers" => [["localhost", primary2.port.to_s, "primary"]] },
|
"2" => { "database" => "shard2", "servers" => [["localhost", primary2.port.to_i, "primary"]] },
|
||||||
},
|
},
|
||||||
"users" => { "0" => user }
|
"users" => { "0" => user },
|
||||||
|
"plugins" => {
|
||||||
|
"intercept" => {
|
||||||
|
"enabled" => true,
|
||||||
|
"queries" => {
|
||||||
|
"0" => {
|
||||||
|
"query" => "select current_database() as a, current_schemas(false) as b",
|
||||||
|
"schema" => [
|
||||||
|
["a", "text"],
|
||||||
|
["b", "text"],
|
||||||
|
],
|
||||||
|
"result" => [
|
||||||
|
["${DATABASE}", "{public}"],
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pgcat.update_config(pgcat_cfg)
|
pgcat.update_config(pgcat_cfg)
|
||||||
@@ -82,7 +100,7 @@ module Helpers
|
|||||||
"0" => {
|
"0" => {
|
||||||
"database" => "shard0",
|
"database" => "shard0",
|
||||||
"servers" => [
|
"servers" => [
|
||||||
["localhost", primary.port.to_s, "primary"]
|
["localhost", primary.port.to_i, "primary"]
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -101,7 +119,7 @@ module Helpers
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def self.single_shard_setup(pool_name, pool_size, pool_mode="transaction", lb_mode="random", log_level="info")
|
def self.single_shard_setup(pool_name, pool_size, pool_mode="transaction", lb_mode="random", log_level="info", pool_settings={})
|
||||||
user = {
|
user = {
|
||||||
"password" => "sharding_user",
|
"password" => "sharding_user",
|
||||||
"pool_size" => pool_size,
|
"pool_size" => pool_size,
|
||||||
@@ -117,28 +135,32 @@ module Helpers
|
|||||||
replica1 = PgInstance.new(8432, user["username"], user["password"], "shard0")
|
replica1 = PgInstance.new(8432, user["username"], user["password"], "shard0")
|
||||||
replica2 = PgInstance.new(9432, user["username"], user["password"], "shard0")
|
replica2 = PgInstance.new(9432, user["username"], user["password"], "shard0")
|
||||||
|
|
||||||
|
pool_config = {
|
||||||
|
"default_role" => "any",
|
||||||
|
"pool_mode" => pool_mode,
|
||||||
|
"load_balancing_mode" => lb_mode,
|
||||||
|
"primary_reads_enabled" => false,
|
||||||
|
"query_parser_enabled" => false,
|
||||||
|
"sharding_function" => "pg_bigint_hash",
|
||||||
|
"shards" => {
|
||||||
|
"0" => {
|
||||||
|
"database" => "shard0",
|
||||||
|
"servers" => [
|
||||||
|
["localhost", primary.port.to_i, "primary"],
|
||||||
|
["localhost", replica0.port.to_i, "replica"],
|
||||||
|
["localhost", replica1.port.to_i, "replica"],
|
||||||
|
["localhost", replica2.port.to_i, "replica"]
|
||||||
|
]
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"users" => { "0" => user }
|
||||||
|
}
|
||||||
|
|
||||||
|
pool_config = pool_config.merge(pool_settings)
|
||||||
|
|
||||||
# Main proxy configs
|
# Main proxy configs
|
||||||
pgcat_cfg["pools"] = {
|
pgcat_cfg["pools"] = {
|
||||||
"#{pool_name}" => {
|
"#{pool_name}" => pool_config,
|
||||||
"default_role" => "any",
|
|
||||||
"pool_mode" => pool_mode,
|
|
||||||
"load_balancing_mode" => lb_mode,
|
|
||||||
"primary_reads_enabled" => false,
|
|
||||||
"query_parser_enabled" => false,
|
|
||||||
"sharding_function" => "pg_bigint_hash",
|
|
||||||
"shards" => {
|
|
||||||
"0" => {
|
|
||||||
"database" => "shard0",
|
|
||||||
"servers" => [
|
|
||||||
["localhost", primary.port.to_s, "primary"],
|
|
||||||
["localhost", replica0.port.to_s, "replica"],
|
|
||||||
["localhost", replica1.port.to_s, "replica"],
|
|
||||||
["localhost", replica2.port.to_s, "replica"]
|
|
||||||
]
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"users" => { "0" => user }
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
pgcat_cfg["general"]["port"] = pgcat.port
|
pgcat_cfg["general"]["port"] = pgcat.port
|
||||||
pgcat.update_config(pgcat_cfg)
|
pgcat.update_config(pgcat_cfg)
|
||||||
|
|||||||
@@ -1,8 +1,10 @@
|
|||||||
require 'pg'
|
require 'pg'
|
||||||
require 'toml'
|
require 'json'
|
||||||
|
require 'tempfile'
|
||||||
require 'fileutils'
|
require 'fileutils'
|
||||||
require 'securerandom'
|
require 'securerandom'
|
||||||
|
|
||||||
|
class ConfigReloadFailed < StandardError; end
|
||||||
class PgcatProcess
|
class PgcatProcess
|
||||||
attr_reader :port
|
attr_reader :port
|
||||||
attr_reader :pid
|
attr_reader :pid
|
||||||
@@ -18,7 +20,7 @@ class PgcatProcess
|
|||||||
end
|
end
|
||||||
|
|
||||||
def initialize(log_level)
|
def initialize(log_level)
|
||||||
@env = {"RUST_LOG" => log_level}
|
@env = {}
|
||||||
@port = rand(20000..32760)
|
@port = rand(20000..32760)
|
||||||
@log_level = log_level
|
@log_level = log_level
|
||||||
@log_filename = "/tmp/pgcat_log_#{SecureRandom.urlsafe_base64}.log"
|
@log_filename = "/tmp/pgcat_log_#{SecureRandom.urlsafe_base64}.log"
|
||||||
@@ -30,7 +32,7 @@ class PgcatProcess
|
|||||||
'../../target/debug/pgcat'
|
'../../target/debug/pgcat'
|
||||||
end
|
end
|
||||||
|
|
||||||
@command = "#{command_path} #{@config_filename}"
|
@command = "#{command_path} #{@config_filename} --log-level #{@log_level}"
|
||||||
|
|
||||||
FileUtils.cp("../../pgcat.toml", @config_filename)
|
FileUtils.cp("../../pgcat.toml", @config_filename)
|
||||||
cfg = current_config
|
cfg = current_config
|
||||||
@@ -46,22 +48,34 @@ class PgcatProcess
|
|||||||
|
|
||||||
def update_config(config_hash)
|
def update_config(config_hash)
|
||||||
@original_config = current_config
|
@original_config = current_config
|
||||||
output_to_write = TOML::Generator.new(config_hash).body
|
Tempfile.create('json_out', '/tmp') do |f|
|
||||||
output_to_write = output_to_write.gsub(/,\s*["|'](\d+)["|']\s*,/, ',\1,')
|
f.write(config_hash.to_json)
|
||||||
output_to_write = output_to_write.gsub(/,\s*["|'](\d+)["|']\s*\]/, ',\1]')
|
f.flush
|
||||||
File.write(@config_filename, output_to_write)
|
`cat #{f.path} | yj -jt > #{@config_filename}`
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def current_config
|
def current_config
|
||||||
loadable_string = File.read(@config_filename)
|
JSON.parse(`cat #{@config_filename} | yj -tj`)
|
||||||
loadable_string = loadable_string.gsub(/,\s*(\d+)\s*,/, ', "\1",')
|
end
|
||||||
loadable_string = loadable_string.gsub(/,\s*(\d+)\s*\]/, ', "\1"]')
|
|
||||||
TOML.load(loadable_string)
|
def raw_config_file
|
||||||
|
File.read(@config_filename)
|
||||||
end
|
end
|
||||||
|
|
||||||
def reload_config
|
def reload_config
|
||||||
`kill -s HUP #{@pid}`
|
conn = PG.connect(admin_connection_string)
|
||||||
sleep 0.5
|
|
||||||
|
conn.async_exec("RELOAD")
|
||||||
|
rescue PG::ConnectionBad => e
|
||||||
|
errors = logs.split("Reloading config").last
|
||||||
|
errors = errors.gsub(/\e\[([;\d]+)?m/, '') # Remove color codes
|
||||||
|
errors = errors.
|
||||||
|
split("\n").select{|line| line.include?("ERROR") }.
|
||||||
|
map { |line| line.split("pgcat::config: ").last }
|
||||||
|
raise ConfigReloadFailed, errors.join("\n")
|
||||||
|
ensure
|
||||||
|
conn&.close
|
||||||
end
|
end
|
||||||
|
|
||||||
def start
|
def start
|
||||||
@@ -112,10 +126,16 @@ class PgcatProcess
|
|||||||
"postgresql://#{username}:#{password}@0.0.0.0:#{@port}/pgcat"
|
"postgresql://#{username}:#{password}@0.0.0.0:#{@port}/pgcat"
|
||||||
end
|
end
|
||||||
|
|
||||||
def connection_string(pool_name, username, password = nil)
|
def connection_string(pool_name, username, password = nil, parameters: {})
|
||||||
cfg = current_config
|
cfg = current_config
|
||||||
user_idx, user_obj = cfg["pools"][pool_name]["users"].detect { |k, user| user["username"] == username }
|
user_idx, user_obj = cfg["pools"][pool_name]["users"].detect { |k, user| user["username"] == username }
|
||||||
"postgresql://#{username}:#{password || user_obj["password"]}@0.0.0.0:#{@port}/#{pool_name}"
|
connection_string = "postgresql://#{username}:#{password || user_obj["password"]}@0.0.0.0:#{@port}/#{pool_name}"
|
||||||
|
|
||||||
|
# Add the additional parameters to the connection string
|
||||||
|
parameter_string = parameters.map { |key, value| "#{key}=#{value}" }.join("&")
|
||||||
|
connection_string += "?#{parameter_string}" unless parameter_string.empty?
|
||||||
|
|
||||||
|
connection_string
|
||||||
end
|
end
|
||||||
|
|
||||||
def example_connection_string
|
def example_connection_string
|
||||||
|
|||||||
@@ -56,6 +56,41 @@ describe "Random Load Balancing" do
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
context "when all replicas are down " do
|
||||||
|
let(:processes) { Helpers::Pgcat.single_shard_setup("sharded_db", 5, "transaction", "random", "debug", {"default_role" => "replica"}) }
|
||||||
|
|
||||||
|
it "unbans them automatically to prevent false positives in health checks that could make all replicas unavailable" do
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
|
failed_count = 0
|
||||||
|
number_of_replicas = processes[:replicas].length
|
||||||
|
|
||||||
|
# Take down all replicas
|
||||||
|
processes[:replicas].each(&:take_down)
|
||||||
|
|
||||||
|
(number_of_replicas + 1).times do |n|
|
||||||
|
conn.async_exec("SELECT 1 + 2")
|
||||||
|
rescue
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
|
failed_count += 1
|
||||||
|
end
|
||||||
|
|
||||||
|
expect(failed_count).to eq(number_of_replicas + 1)
|
||||||
|
failed_count = 0
|
||||||
|
|
||||||
|
# Ban_time is configured to 60 so this reset will only work
|
||||||
|
# if the replicas are unbanned automatically
|
||||||
|
processes[:replicas].each(&:reset)
|
||||||
|
|
||||||
|
number_of_replicas.times do
|
||||||
|
conn.async_exec("SELECT 1 + 2")
|
||||||
|
rescue
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
|
failed_count += 1
|
||||||
|
end
|
||||||
|
expect(failed_count).to eq(0)
|
||||||
|
end
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
describe "Least Outstanding Queries Load Balancing" do
|
describe "Least Outstanding Queries Load Balancing" do
|
||||||
@@ -161,4 +196,3 @@ describe "Least Outstanding Queries Load Balancing" do
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|||||||
@@ -11,9 +11,9 @@ describe "Query Mirroing" do
|
|||||||
before do
|
before do
|
||||||
new_configs = processes.pgcat.current_config
|
new_configs = processes.pgcat.current_config
|
||||||
new_configs["pools"]["sharded_db"]["shards"]["0"]["mirrors"] = [
|
new_configs["pools"]["sharded_db"]["shards"]["0"]["mirrors"] = [
|
||||||
[mirror_host, mirror_pg.port.to_s, "0"],
|
[mirror_host, mirror_pg.port.to_i, 0],
|
||||||
[mirror_host, mirror_pg.port.to_s, "0"],
|
[mirror_host, mirror_pg.port.to_i, 0],
|
||||||
[mirror_host, mirror_pg.port.to_s, "0"],
|
[mirror_host, mirror_pg.port.to_i, 0],
|
||||||
]
|
]
|
||||||
processes.pgcat.update_config(new_configs)
|
processes.pgcat.update_config(new_configs)
|
||||||
processes.pgcat.reload_config
|
processes.pgcat.reload_config
|
||||||
@@ -25,13 +25,14 @@ describe "Query Mirroing" do
|
|||||||
processes.pgcat.shutdown
|
processes.pgcat.shutdown
|
||||||
end
|
end
|
||||||
|
|
||||||
it "can mirror a query" do
|
xit "can mirror a query" do
|
||||||
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
runs = 15
|
runs = 15
|
||||||
runs.times { conn.async_exec("SELECT 1 + 2") }
|
runs.times { conn.async_exec("SELECT 1 + 2") }
|
||||||
sleep 0.5
|
sleep 0.5
|
||||||
expect(processes.all_databases.first.count_select_1_plus_2).to eq(runs)
|
expect(processes.all_databases.first.count_select_1_plus_2).to eq(runs)
|
||||||
expect(mirror_pg.count_select_1_plus_2).to eq(runs * 3)
|
# Allow some slack in mirroring successes
|
||||||
|
expect(mirror_pg.count_select_1_plus_2).to be > ((runs - 5) * 3)
|
||||||
end
|
end
|
||||||
|
|
||||||
context "when main server connection is closed" do
|
context "when main server connection is closed" do
|
||||||
@@ -42,9 +43,9 @@ describe "Query Mirroing" do
|
|||||||
new_configs = processes.pgcat.current_config
|
new_configs = processes.pgcat.current_config
|
||||||
new_configs["pools"]["sharded_db"]["idle_timeout"] = 5000 + i
|
new_configs["pools"]["sharded_db"]["idle_timeout"] = 5000 + i
|
||||||
new_configs["pools"]["sharded_db"]["shards"]["0"]["mirrors"] = [
|
new_configs["pools"]["sharded_db"]["shards"]["0"]["mirrors"] = [
|
||||||
[mirror_host, mirror_pg.port.to_s, "0"],
|
[mirror_host, mirror_pg.port.to_i, 0],
|
||||||
[mirror_host, mirror_pg.port.to_s, "0"],
|
[mirror_host, mirror_pg.port.to_i, 0],
|
||||||
[mirror_host, mirror_pg.port.to_s, "0"],
|
[mirror_host, mirror_pg.port.to_i, 0],
|
||||||
]
|
]
|
||||||
processes.pgcat.update_config(new_configs)
|
processes.pgcat.update_config(new_configs)
|
||||||
processes.pgcat.reload_config
|
processes.pgcat.reload_config
|
||||||
|
|||||||
@@ -221,7 +221,7 @@ describe "Miscellaneous" do
|
|||||||
conn.close
|
conn.close
|
||||||
end
|
end
|
||||||
|
|
||||||
it "Does not send DISCARD ALL unless necessary" do
|
it "Does not send RESET ALL unless necessary" do
|
||||||
10.times do
|
10.times do
|
||||||
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
conn.async_exec("SET SERVER ROLE to 'primary'")
|
conn.async_exec("SET SERVER ROLE to 'primary'")
|
||||||
@@ -229,7 +229,7 @@ describe "Miscellaneous" do
|
|||||||
conn.close
|
conn.close
|
||||||
end
|
end
|
||||||
|
|
||||||
expect(processes.primary.count_query("DISCARD ALL")).to eq(0)
|
expect(processes.primary.count_query("RESET ALL")).to eq(0)
|
||||||
|
|
||||||
10.times do
|
10.times do
|
||||||
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
@@ -239,7 +239,19 @@ describe "Miscellaneous" do
|
|||||||
conn.close
|
conn.close
|
||||||
end
|
end
|
||||||
|
|
||||||
expect(processes.primary.count_query("DISCARD ALL")).to eq(10)
|
expect(processes.primary.count_query("RESET ALL")).to eq(10)
|
||||||
|
end
|
||||||
|
|
||||||
|
it "Resets server roles correctly" do
|
||||||
|
10.times do
|
||||||
|
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
|
conn.async_exec("SET SERVER ROLE to 'primary'")
|
||||||
|
conn.async_exec("SELECT 1")
|
||||||
|
conn.async_exec("SET statement_timeout to 5000")
|
||||||
|
conn.close
|
||||||
|
end
|
||||||
|
|
||||||
|
expect(processes.primary.count_query("RESET ROLE")).to eq(10)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@@ -261,7 +273,7 @@ describe "Miscellaneous" do
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
it "Does not send DISCARD ALL unless necessary" do
|
it "Does not send RESET ALL unless necessary" do
|
||||||
10.times do
|
10.times do
|
||||||
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
conn.async_exec("SET SERVER ROLE to 'primary'")
|
conn.async_exec("SET SERVER ROLE to 'primary'")
|
||||||
@@ -270,7 +282,7 @@ describe "Miscellaneous" do
|
|||||||
conn.close
|
conn.close
|
||||||
end
|
end
|
||||||
|
|
||||||
expect(processes.primary.count_query("DISCARD ALL")).to eq(0)
|
expect(processes.primary.count_query("RESET ALL")).to eq(0)
|
||||||
|
|
||||||
10.times do
|
10.times do
|
||||||
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
@@ -280,8 +292,32 @@ describe "Miscellaneous" do
|
|||||||
conn.close
|
conn.close
|
||||||
end
|
end
|
||||||
|
|
||||||
expect(processes.primary.count_query("DISCARD ALL")).to eq(10)
|
expect(processes.primary.count_query("RESET ALL")).to eq(10)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
it "Respects tracked parameters on startup" do
|
||||||
|
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user", parameters: { "application_name" => "my_pgcat_test" }))
|
||||||
|
|
||||||
|
expect(conn.async_exec("SHOW application_name")[0]["application_name"]).to eq("my_pgcat_test")
|
||||||
|
conn.close
|
||||||
|
end
|
||||||
|
|
||||||
|
it "Respect tracked parameter on set statemet" do
|
||||||
|
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
|
|
||||||
|
conn.async_exec("SET application_name to 'my_pgcat_test'")
|
||||||
|
expect(conn.async_exec("SHOW application_name")[0]["application_name"]).to eq("my_pgcat_test")
|
||||||
|
end
|
||||||
|
|
||||||
|
|
||||||
|
it "Ignore untracked parameter on set statemet" do
|
||||||
|
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
|
orignal_statement_timeout = conn.async_exec("SHOW statement_timeout")[0]["statement_timeout"]
|
||||||
|
|
||||||
|
conn.async_exec("SET statement_timeout to 1500")
|
||||||
|
expect(conn.async_exec("SHOW statement_timeout")[0]["statement_timeout"]).to eq(orignal_statement_timeout)
|
||||||
|
end
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|
||||||
context "transaction mode with transactions" do
|
context "transaction mode with transactions" do
|
||||||
@@ -295,7 +331,7 @@ describe "Miscellaneous" do
|
|||||||
conn.async_exec("COMMIT")
|
conn.async_exec("COMMIT")
|
||||||
conn.close
|
conn.close
|
||||||
end
|
end
|
||||||
expect(processes.primary.count_query("DISCARD ALL")).to eq(0)
|
expect(processes.primary.count_query("RESET ALL")).to eq(0)
|
||||||
|
|
||||||
10.times do
|
10.times do
|
||||||
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
@@ -305,7 +341,30 @@ describe "Miscellaneous" do
|
|||||||
conn.async_exec("COMMIT")
|
conn.async_exec("COMMIT")
|
||||||
conn.close
|
conn.close
|
||||||
end
|
end
|
||||||
expect(processes.primary.count_query("DISCARD ALL")).to eq(0)
|
expect(processes.primary.count_query("RESET ALL")).to eq(0)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context "server cleanup disabled" do
|
||||||
|
let(:processes) { Helpers::Pgcat.single_shard_setup("sharded_db", 1, "transaction", "random", "info", { "cleanup_server_connections" => false }) }
|
||||||
|
|
||||||
|
it "will not clean up connection state" do
|
||||||
|
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
|
processes.primary.reset_stats
|
||||||
|
conn.async_exec("SET statement_timeout TO 1000")
|
||||||
|
conn.close
|
||||||
|
|
||||||
|
expect(processes.primary.count_query("RESET ALL")).to eq(0)
|
||||||
|
end
|
||||||
|
|
||||||
|
it "will not clean up prepared statements" do
|
||||||
|
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
|
processes.primary.reset_stats
|
||||||
|
conn.async_exec("PREPARE prepared_q (int) AS SELECT $1")
|
||||||
|
|
||||||
|
conn.close
|
||||||
|
|
||||||
|
expect(processes.primary.count_query("RESET ALL")).to eq(0)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@@ -315,10 +374,9 @@ describe "Miscellaneous" do
|
|||||||
before do
|
before do
|
||||||
current_configs = processes.pgcat.current_config
|
current_configs = processes.pgcat.current_config
|
||||||
correct_idle_client_transaction_timeout = current_configs["general"]["idle_client_in_transaction_timeout"]
|
correct_idle_client_transaction_timeout = current_configs["general"]["idle_client_in_transaction_timeout"]
|
||||||
puts(current_configs["general"]["idle_client_in_transaction_timeout"])
|
|
||||||
|
|
||||||
current_configs["general"]["idle_client_in_transaction_timeout"] = 0
|
current_configs["general"]["idle_client_in_transaction_timeout"] = 0
|
||||||
|
|
||||||
processes.pgcat.update_config(current_configs) # with timeout 0
|
processes.pgcat.update_config(current_configs) # with timeout 0
|
||||||
processes.pgcat.reload_config
|
processes.pgcat.reload_config
|
||||||
end
|
end
|
||||||
@@ -336,9 +394,9 @@ describe "Miscellaneous" do
|
|||||||
context "idle transaction timeout set to 500ms" do
|
context "idle transaction timeout set to 500ms" do
|
||||||
before do
|
before do
|
||||||
current_configs = processes.pgcat.current_config
|
current_configs = processes.pgcat.current_config
|
||||||
correct_idle_client_transaction_timeout = current_configs["general"]["idle_client_in_transaction_timeout"]
|
correct_idle_client_transaction_timeout = current_configs["general"]["idle_client_in_transaction_timeout"]
|
||||||
current_configs["general"]["idle_client_in_transaction_timeout"] = 500
|
current_configs["general"]["idle_client_in_transaction_timeout"] = 500
|
||||||
|
|
||||||
processes.pgcat.update_config(current_configs) # with timeout 500
|
processes.pgcat.update_config(current_configs) # with timeout 500
|
||||||
processes.pgcat.reload_config
|
processes.pgcat.reload_config
|
||||||
end
|
end
|
||||||
@@ -357,7 +415,7 @@ describe "Miscellaneous" do
|
|||||||
conn.async_exec("BEGIN")
|
conn.async_exec("BEGIN")
|
||||||
conn.async_exec("SELECT 1")
|
conn.async_exec("SELECT 1")
|
||||||
sleep(1) # above 500ms
|
sleep(1) # above 500ms
|
||||||
expect{ conn.async_exec("COMMIT") }.to raise_error(PG::SystemError, /idle transaction timeout/)
|
expect{ conn.async_exec("COMMIT") }.to raise_error(PG::SystemError, /idle transaction timeout/)
|
||||||
conn.async_exec("SELECT 1") # should be able to send another query
|
conn.async_exec("SELECT 1") # should be able to send another query
|
||||||
conn.close
|
conn.close
|
||||||
end
|
end
|
||||||
|
|||||||
14
tests/ruby/plugins_spec.rb
Normal file
14
tests/ruby/plugins_spec.rb
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
require_relative 'spec_helper'
|
||||||
|
|
||||||
|
|
||||||
|
describe "Plugins" do
|
||||||
|
let(:processes) { Helpers::Pgcat.three_shard_setup("sharded_db", 5) }
|
||||||
|
|
||||||
|
context "intercept" do
|
||||||
|
it "will intercept an intellij query" do
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
|
res = conn.exec("select current_database() as a, current_schemas(false) as b")
|
||||||
|
expect(res.values).to eq([["sharded_db", "{public}"]])
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
214
tests/ruby/prepared_spec.rb
Normal file
214
tests/ruby/prepared_spec.rb
Normal file
@@ -0,0 +1,214 @@
|
|||||||
|
require_relative 'spec_helper'
|
||||||
|
|
||||||
|
describe 'Prepared statements' do
|
||||||
|
let(:pool_size) { 5 }
|
||||||
|
let(:processes) { Helpers::Pgcat.single_instance_setup("sharded_db", pool_size) }
|
||||||
|
let(:prepared_statements_cache_size) { 100 }
|
||||||
|
let(:server_round_robin) { false }
|
||||||
|
|
||||||
|
before do
|
||||||
|
new_configs = processes.pgcat.current_config
|
||||||
|
new_configs["general"]["server_round_robin"] = server_round_robin
|
||||||
|
new_configs["pools"]["sharded_db"]["prepared_statements_cache_size"] = prepared_statements_cache_size
|
||||||
|
new_configs["pools"]["sharded_db"]["users"]["0"]["pool_size"] = pool_size
|
||||||
|
processes.pgcat.update_config(new_configs)
|
||||||
|
processes.pgcat.reload_config
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when trying prepared statements' do
|
||||||
|
it 'it allows unparameterized statements to succeed' do
|
||||||
|
conn1 = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
conn2 = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
|
||||||
|
prepared_query = "SELECT 1"
|
||||||
|
|
||||||
|
# prepare query on server 1 and client 1
|
||||||
|
conn1.prepare('statement1', prepared_query)
|
||||||
|
conn1.exec_prepared('statement1')
|
||||||
|
|
||||||
|
conn2.transaction do
|
||||||
|
# Claim server 1 with client 2
|
||||||
|
conn2.exec("SELECT 2")
|
||||||
|
|
||||||
|
# Client 1 now runs the prepared query, and it's automatically
|
||||||
|
# prepared on server 2
|
||||||
|
conn1.prepare('statement2', prepared_query)
|
||||||
|
conn1.exec_prepared('statement2')
|
||||||
|
|
||||||
|
# Client 2 now prepares the same query that was already
|
||||||
|
# prepared on server 1. And PgBouncer reuses that already
|
||||||
|
# prepared query for this different client.
|
||||||
|
conn2.prepare('statement3', prepared_query)
|
||||||
|
conn2.exec_prepared('statement3')
|
||||||
|
end
|
||||||
|
ensure
|
||||||
|
conn1.close if conn1
|
||||||
|
conn2.close if conn2
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'it allows parameterized statements to succeed' do
|
||||||
|
conn1 = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
conn2 = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
|
||||||
|
prepared_query = "SELECT $1"
|
||||||
|
|
||||||
|
# prepare query on server 1 and client 1
|
||||||
|
conn1.prepare('statement1', prepared_query)
|
||||||
|
conn1.exec_prepared('statement1', [1])
|
||||||
|
|
||||||
|
conn2.transaction do
|
||||||
|
# Claim server 1 with client 2
|
||||||
|
conn2.exec("SELECT 2")
|
||||||
|
|
||||||
|
# Client 1 now runs the prepared query, and it's automatically
|
||||||
|
# prepared on server 2
|
||||||
|
conn1.prepare('statement2', prepared_query)
|
||||||
|
conn1.exec_prepared('statement2', [1])
|
||||||
|
|
||||||
|
# Client 2 now prepares the same query that was already
|
||||||
|
# prepared on server 1. And PgBouncer reuses that already
|
||||||
|
# prepared query for this different client.
|
||||||
|
conn2.prepare('statement3', prepared_query)
|
||||||
|
conn2.exec_prepared('statement3', [1])
|
||||||
|
end
|
||||||
|
ensure
|
||||||
|
conn1.close if conn1
|
||||||
|
conn2.close if conn2
|
||||||
|
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when trying large packets' do
|
||||||
|
it "works with large parse" do
|
||||||
|
conn1 = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
|
||||||
|
long_string = "1" * 4096 * 10
|
||||||
|
prepared_query = "SELECT '#{long_string}'"
|
||||||
|
|
||||||
|
|
||||||
|
# prepare query on server 1 and client 1
|
||||||
|
conn1.prepare('statement1', prepared_query)
|
||||||
|
result = conn1.exec_prepared('statement1')
|
||||||
|
|
||||||
|
# assert result matches long_string
|
||||||
|
expect(result.getvalue(0, 0)).to eq(long_string)
|
||||||
|
ensure
|
||||||
|
conn1.close if conn1
|
||||||
|
end
|
||||||
|
|
||||||
|
it "works with large bind" do
|
||||||
|
conn1 = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
|
||||||
|
long_string = "1" * 4096 * 10
|
||||||
|
prepared_query = "SELECT $1::text"
|
||||||
|
|
||||||
|
# prepare query on server 1 and client 1
|
||||||
|
conn1.prepare('statement1', prepared_query)
|
||||||
|
result = conn1.exec_prepared('statement1', [long_string])
|
||||||
|
|
||||||
|
# assert result matches long_string
|
||||||
|
expect(result.getvalue(0, 0)).to eq(long_string)
|
||||||
|
ensure
|
||||||
|
conn1.close if conn1
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when statement cache is smaller than set of unqiue statements' do
|
||||||
|
let(:prepared_statements_cache_size) { 1 }
|
||||||
|
let(:pool_size) { 1 }
|
||||||
|
|
||||||
|
it "evicts all but 1 statement from the server cache" do
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
|
||||||
|
5.times do |i|
|
||||||
|
prepared_query = "SELECT '#{i}'"
|
||||||
|
conn.prepare("statement#{i}", prepared_query)
|
||||||
|
result = conn.exec_prepared("statement#{i}")
|
||||||
|
expect(result.getvalue(0, 0)).to eq(i.to_s)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Check number of prepared statements (expected: 1)
|
||||||
|
n_statements = conn.exec("SELECT count(*) FROM pg_prepared_statements").getvalue(0, 0).to_i
|
||||||
|
expect(n_statements).to eq(1)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when statement cache is larger than set of unqiue statements' do
|
||||||
|
let(:pool_size) { 1 }
|
||||||
|
|
||||||
|
it "does not evict any of the statements from the cache" do
|
||||||
|
# cache size 5
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
|
||||||
|
5.times do |i|
|
||||||
|
prepared_query = "SELECT '#{i}'"
|
||||||
|
conn.prepare("statement#{i}", prepared_query)
|
||||||
|
result = conn.exec_prepared("statement#{i}")
|
||||||
|
expect(result.getvalue(0, 0)).to eq(i.to_s)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Check number of prepared statements (expected: 1)
|
||||||
|
n_statements = conn.exec("SELECT count(*) FROM pg_prepared_statements").getvalue(0, 0).to_i
|
||||||
|
expect(n_statements).to eq(5)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when preparing the same query' do
|
||||||
|
let(:prepared_statements_cache_size) { 5 }
|
||||||
|
let(:pool_size) { 5 }
|
||||||
|
|
||||||
|
it "reuses statement cache when there are different statement names on the same connection" do
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
|
||||||
|
10.times do |i|
|
||||||
|
statement_name = "statement_#{i}"
|
||||||
|
conn.prepare(statement_name, 'SELECT $1::int')
|
||||||
|
conn.exec_prepared(statement_name, [1])
|
||||||
|
end
|
||||||
|
|
||||||
|
# Check number of prepared statements (expected: 1)
|
||||||
|
n_statements = conn.exec("SELECT count(*) FROM pg_prepared_statements").getvalue(0, 0).to_i
|
||||||
|
expect(n_statements).to eq(1)
|
||||||
|
end
|
||||||
|
|
||||||
|
it "reuses statement cache when there are different statement names on different connections" do
|
||||||
|
10.times do |i|
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
statement_name = "statement_#{i}"
|
||||||
|
conn.prepare(statement_name, 'SELECT $1::int')
|
||||||
|
conn.exec_prepared(statement_name, [1])
|
||||||
|
end
|
||||||
|
|
||||||
|
# Check number of prepared statements (expected: 1)
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
n_statements = conn.exec("SELECT count(*) FROM pg_prepared_statements").getvalue(0, 0).to_i
|
||||||
|
expect(n_statements).to eq(1)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when reloading config' do
|
||||||
|
let(:pool_size) { 1 }
|
||||||
|
|
||||||
|
it "test_reload_config" do
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
|
||||||
|
# prepare query
|
||||||
|
conn.prepare('statement1', 'SELECT 1')
|
||||||
|
conn.exec_prepared('statement1')
|
||||||
|
|
||||||
|
# Reload config which triggers pool recreation
|
||||||
|
new_configs = processes.pgcat.current_config
|
||||||
|
new_configs["pools"]["sharded_db"]["prepared_statements_cache_size"] = prepared_statements_cache_size + 1
|
||||||
|
processes.pgcat.update_config(new_configs)
|
||||||
|
processes.pgcat.reload_config
|
||||||
|
|
||||||
|
# check that we're starting with no prepared statements on the server
|
||||||
|
conn_check = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
n_statements = conn_check.exec("SELECT count(*) FROM pg_prepared_statements").getvalue(0, 0).to_i
|
||||||
|
expect(n_statements).to eq(0)
|
||||||
|
|
||||||
|
# still able to run prepared query
|
||||||
|
conn.exec_prepared('statement1')
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user