mirror of
https://github.com/postgresml/pgcat.git
synced 2026-03-27 02:36:29 +00:00
Compare commits
71 Commits
levkk-tls-
...
circleci_O
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
16a2cece21 | ||
|
|
0ee59c0c40 | ||
|
|
b61d2cc6f0 | ||
|
|
c11418c083 | ||
|
|
c9544bdff2 | ||
|
|
cdcfa99fb9 | ||
|
|
f27dc6b483 | ||
|
|
326efc22b3 | ||
|
|
01c6afb2e5 | ||
|
|
a68071dd28 | ||
|
|
c27d801abf | ||
|
|
186e72298f | ||
|
|
3935366d86 | ||
|
|
b575935b1d | ||
|
|
efbab1c333 | ||
|
|
9f12d7958e | ||
|
|
e6634ef461 | ||
|
|
dab2e58647 | ||
|
|
4aaa4378cf | ||
|
|
670311daf9 | ||
|
|
b9ec7f8036 | ||
|
|
d91d23848b | ||
|
|
bbbc01a467 | ||
|
|
9bb71ede9d | ||
|
|
88b2afb19b | ||
|
|
f0865ca616 | ||
|
|
7d047c6c19 | ||
|
|
f73d15f82c | ||
|
|
69af6cc5e5 | ||
|
|
ca34597002 | ||
|
|
2def40ea6a | ||
|
|
c05129018d | ||
|
|
4a7a6a8e7a | ||
|
|
29a476e190 | ||
|
|
81933b918d | ||
|
|
7cbc9178d8 | ||
|
|
2c8b2f0776 | ||
|
|
8f9a2b8e6f | ||
|
|
cbf4d58144 | ||
|
|
731aa047ba | ||
|
|
88dbcc21d1 | ||
|
|
c34b15bddc | ||
|
|
0b034a6831 | ||
|
|
966b8e093c | ||
|
|
c9270a47d4 | ||
|
|
0d94d0b90a | ||
|
|
358724f7a9 | ||
|
|
e1e4929d43 | ||
|
|
dc4d6edf17 | ||
|
|
ec3920d60f | ||
|
|
4c5498b915 | ||
|
|
0e8064b049 | ||
|
|
4dbef49ec9 | ||
|
|
bc07dc9c81 | ||
|
|
9b8166b313 | ||
|
|
e58d69f3de | ||
|
|
e76d720ffb | ||
|
|
998cc16a3c | ||
|
|
7c37da2fad | ||
|
|
b45c6b1d23 | ||
|
|
dae240d30c | ||
|
|
b52ea8e7f1 | ||
|
|
7d3003a16a | ||
|
|
d37df43a90 | ||
|
|
2c7bf52c17 | ||
|
|
de8df29ca4 | ||
|
|
c4fb72b9fc | ||
|
|
3371c01e0e | ||
|
|
c2a483f36a | ||
|
|
51cd13b8b5 | ||
|
|
a054b454d2 |
@@ -63,6 +63,9 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
name: "Lint"
|
name: "Lint"
|
||||||
command: "cargo fmt --check"
|
command: "cargo fmt --check"
|
||||||
|
- run:
|
||||||
|
name: "Clippy"
|
||||||
|
command: "cargo clippy --all --all-targets -- -Dwarnings"
|
||||||
- run:
|
- run:
|
||||||
name: "Tests"
|
name: "Tests"
|
||||||
command: "cargo clean && cargo build && cargo test && bash .circleci/run_tests.sh && .circleci/generate_coverage.sh"
|
command: "cargo clean && cargo build && cargo test && bash .circleci/run_tests.sh && .circleci/generate_coverage.sh"
|
||||||
|
|||||||
@@ -59,6 +59,7 @@ admin_password = "admin_pass"
|
|||||||
# session: one server connection per connected client
|
# session: one server connection per connected client
|
||||||
# transaction: one server connection per client transaction
|
# transaction: one server connection per client transaction
|
||||||
pool_mode = "transaction"
|
pool_mode = "transaction"
|
||||||
|
prepared_statements_cache_size = 500
|
||||||
|
|
||||||
# If the client doesn't specify, route traffic to
|
# If the client doesn't specify, route traffic to
|
||||||
# this role by default.
|
# this role by default.
|
||||||
@@ -141,6 +142,7 @@ query_parser_enabled = true
|
|||||||
query_parser_read_write_splitting = true
|
query_parser_read_write_splitting = true
|
||||||
primary_reads_enabled = true
|
primary_reads_enabled = true
|
||||||
sharding_function = "pg_bigint_hash"
|
sharding_function = "pg_bigint_hash"
|
||||||
|
prepared_statements_cache_size = 500
|
||||||
|
|
||||||
[pools.simple_db.users.0]
|
[pools.simple_db.users.0]
|
||||||
username = "simple_user"
|
username = "simple_user"
|
||||||
|
|||||||
@@ -26,6 +26,7 @@ PGPASSWORD=sharding_user pgbench -h 127.0.0.1 -U sharding_user shard1 -i
|
|||||||
PGPASSWORD=sharding_user pgbench -h 127.0.0.1 -U sharding_user shard2 -i
|
PGPASSWORD=sharding_user pgbench -h 127.0.0.1 -U sharding_user shard2 -i
|
||||||
|
|
||||||
# Start Toxiproxy
|
# Start Toxiproxy
|
||||||
|
kill -9 $(pgrep toxiproxy) || true
|
||||||
LOG_LEVEL=error toxiproxy-server &
|
LOG_LEVEL=error toxiproxy-server &
|
||||||
sleep 1
|
sleep 1
|
||||||
|
|
||||||
@@ -106,10 +107,26 @@ cd ../..
|
|||||||
# These tests will start and stop the pgcat server so it will need to be restarted after the tests
|
# These tests will start and stop the pgcat server so it will need to be restarted after the tests
|
||||||
#
|
#
|
||||||
pip3 install -r tests/python/requirements.txt
|
pip3 install -r tests/python/requirements.txt
|
||||||
python3 tests/python/tests.py || exit 1
|
pytest || exit 1
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Go tests
|
||||||
|
# Starts its own pgcat server
|
||||||
|
#
|
||||||
|
pushd tests/go
|
||||||
|
/usr/local/go/bin/go test || exit 1
|
||||||
|
popd
|
||||||
|
|
||||||
start_pgcat "info"
|
start_pgcat "info"
|
||||||
|
|
||||||
|
#
|
||||||
|
# Rust tests
|
||||||
|
#
|
||||||
|
cd tests/rust
|
||||||
|
cargo run
|
||||||
|
cd ../../
|
||||||
|
|
||||||
# Admin tests
|
# Admin tests
|
||||||
export PGPASSWORD=admin_pass
|
export PGPASSWORD=admin_pass
|
||||||
psql -U admin_user -e -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW STATS' > /dev/null
|
psql -U admin_user -e -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW STATS' > /dev/null
|
||||||
@@ -161,3 +178,6 @@ killall pgcat -s SIGINT
|
|||||||
|
|
||||||
# Allow for graceful shutdown
|
# Allow for graceful shutdown
|
||||||
sleep 1
|
sleep 1
|
||||||
|
|
||||||
|
kill -9 $(pgrep toxiproxy)
|
||||||
|
sleep 1
|
||||||
|
|||||||
4
.github/dependabot.yml
vendored
4
.github/dependabot.yml
vendored
@@ -10,3 +10,7 @@ updates:
|
|||||||
commit-message:
|
commit-message:
|
||||||
prefix: "chore(deps)"
|
prefix: "chore(deps)"
|
||||||
open-pull-requests-limit: 10
|
open-pull-requests-limit: 10
|
||||||
|
- package-ecosystem: "github-actions"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "weekly"
|
||||||
|
|||||||
20
.github/workflows/build-and-push.yaml
vendored
20
.github/workflows/build-and-push.yaml
vendored
@@ -2,7 +2,9 @@ name: Build and Push
|
|||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
paths:
|
||||||
|
- '!charts/**.md'
|
||||||
|
branches:
|
||||||
- main
|
- main
|
||||||
tags:
|
tags:
|
||||||
- v*
|
- v*
|
||||||
@@ -21,14 +23,17 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout Repository
|
- name: Checkout Repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
- name: Determine tags
|
- name: Determine tags
|
||||||
id: metadata
|
id: metadata
|
||||||
uses: docker/metadata-action@v4
|
uses: docker/metadata-action@v5
|
||||||
with:
|
with:
|
||||||
images: ${{ env.registry }}/${{ env.image-name }}
|
images: ${{ env.registry }}/${{ env.image-name }}
|
||||||
tags: |
|
tags: |
|
||||||
@@ -40,15 +45,18 @@ jobs:
|
|||||||
type=raw,value=latest,enable={{ is_default_branch }}
|
type=raw,value=latest,enable={{ is_default_branch }}
|
||||||
|
|
||||||
- name: Log in to the Container registry
|
- name: Log in to the Container registry
|
||||||
uses: docker/login-action@v2.1.0
|
uses: docker/login-action@v3
|
||||||
with:
|
with:
|
||||||
registry: ${{ env.registry }}
|
registry: ${{ env.registry }}
|
||||||
username: ${{ github.actor }}
|
username: ${{ github.actor }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Build and push ${{ env.image-name }}
|
- name: Build and push ${{ env.image-name }}
|
||||||
uses: docker/build-push-action@v3
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
|
context: .
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
provenance: false
|
||||||
push: true
|
push: true
|
||||||
tags: ${{ steps.metadata.outputs.tags }}
|
tags: ${{ steps.metadata.outputs.tags }}
|
||||||
labels: ${{ steps.metadata.outputs.labels }}
|
labels: ${{ steps.metadata.outputs.labels }}
|
||||||
|
|||||||
50
.github/workflows/chart-lint-test.yaml
vendored
Normal file
50
.github/workflows/chart-lint-test.yaml
vendored
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
name: Lint and Test Charts
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- charts/**
|
||||||
|
- '!charts/**.md'
|
||||||
|
jobs:
|
||||||
|
lint-test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3.1.0
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Set up Helm
|
||||||
|
uses: azure/setup-helm@v3
|
||||||
|
with:
|
||||||
|
version: v3.8.1
|
||||||
|
|
||||||
|
# Python is required because `ct lint` runs Yamale (https://github.com/23andMe/Yamale) and
|
||||||
|
# yamllint (https://github.com/adrienverge/yamllint) which require Python
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5.1.0
|
||||||
|
with:
|
||||||
|
python-version: 3.7
|
||||||
|
|
||||||
|
- name: Set up chart-testing
|
||||||
|
uses: helm/chart-testing-action@v2.2.1
|
||||||
|
with:
|
||||||
|
version: v3.5.1
|
||||||
|
|
||||||
|
- name: Run chart-testing (list-changed)
|
||||||
|
id: list-changed
|
||||||
|
run: |
|
||||||
|
changed=$(ct list-changed --config ct.yaml)
|
||||||
|
if [[ -n "$changed" ]]; then
|
||||||
|
echo "changed=true" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Run chart-testing (lint)
|
||||||
|
run: ct lint --config ct.yaml
|
||||||
|
|
||||||
|
- name: Create kind cluster
|
||||||
|
uses: helm/kind-action@v1.10.0
|
||||||
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
|
||||||
|
- name: Run chart-testing (install)
|
||||||
|
run: ct install --config ct.yaml
|
||||||
40
.github/workflows/chart-release.yaml
vendored
Normal file
40
.github/workflows/chart-release.yaml
vendored
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
name: Release Charts
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- charts/**
|
||||||
|
- '!**.md'
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
release:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Configure Git
|
||||||
|
run: |
|
||||||
|
git config user.name "$GITHUB_ACTOR"
|
||||||
|
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||||
|
|
||||||
|
- name: Install Helm
|
||||||
|
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 # v3.5
|
||||||
|
with:
|
||||||
|
version: v3.13.0
|
||||||
|
|
||||||
|
- name: Run chart-releaser
|
||||||
|
uses: helm/chart-releaser-action@a917fd15b20e8b64b94d9158ad54cd6345335584 # v1.6.0
|
||||||
|
with:
|
||||||
|
charts_dir: charts
|
||||||
|
config: cr.yaml
|
||||||
|
env:
|
||||||
|
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
48
.github/workflows/generate-chart-readme.yaml
vendored
Normal file
48
.github/workflows/generate-chart-readme.yaml
vendored
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
name: '[CI/CD] Update README metadata'
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
paths:
|
||||||
|
- 'charts/*/values.yaml'
|
||||||
|
# Remove all permissions by default
|
||||||
|
permissions: {}
|
||||||
|
jobs:
|
||||||
|
update-readme-metadata:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
steps:
|
||||||
|
- name: Install readme-generator-for-helm
|
||||||
|
run: npm install -g @bitnami/readme-generator-for-helm
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||||
|
with:
|
||||||
|
path: charts
|
||||||
|
ref: ${{github.event.pull_request.head.ref}}
|
||||||
|
repository: ${{github.event.pull_request.head.repo.full_name}}
|
||||||
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- name: Execute readme-generator-for-helm
|
||||||
|
env:
|
||||||
|
DIFF_URL: "${{github.event.pull_request.diff_url}}"
|
||||||
|
TEMP_FILE: "${{runner.temp}}/pr-${{github.event.number}}.diff"
|
||||||
|
run: |
|
||||||
|
# This request doesn't consume API calls.
|
||||||
|
curl -Lkso $TEMP_FILE $DIFF_URL
|
||||||
|
files_changed="$(sed -nr 's/[\-\+]{3} [ab]\/(.*)/\1/p' $TEMP_FILE | sort | uniq)"
|
||||||
|
# Adding || true to avoid "Process exited with code 1" errors
|
||||||
|
charts_dirs_changed="$(echo "$files_changed" | xargs dirname | grep -o "pgcat/[^/]*" | sort | uniq || true)"
|
||||||
|
for chart in ${charts_dirs_changed}; do
|
||||||
|
echo "Updating README.md for ${chart}"
|
||||||
|
readme-generator --values "charts/${chart}/values.yaml" --readme "charts/${chart}/README.md" --schema "/tmp/schema.json"
|
||||||
|
done
|
||||||
|
- name: Push changes
|
||||||
|
run: |
|
||||||
|
# Push all the changes
|
||||||
|
cd charts
|
||||||
|
if git status -s | grep pgcat; then
|
||||||
|
git config user.name "$GITHUB_ACTOR"
|
||||||
|
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||||
|
git add . && git commit -am "Update README.md with readme-generator-for-helm" --signoff && git push
|
||||||
|
fi
|
||||||
17
.github/workflows/publish-deb-package.yml
vendored
17
.github/workflows/publish-deb-package.yml
vendored
@@ -1,10 +1,13 @@
|
|||||||
name: pgcat package (deb)
|
name: pgcat package (deb)
|
||||||
|
|
||||||
on:
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- v*
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
packageVersion:
|
packageVersion:
|
||||||
default: "1.1.2-dev"
|
default: "1.1.2-dev1"
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
strategy:
|
strategy:
|
||||||
@@ -16,6 +19,14 @@ jobs:
|
|||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
- name: Set package version
|
||||||
|
if: github.event_name == 'push' # For push event
|
||||||
|
run: |
|
||||||
|
TAG=${{ github.ref_name }}
|
||||||
|
echo "packageVersion=${TAG#v}" >> "$GITHUB_ENV"
|
||||||
|
- name: Set package version (manual dispatch)
|
||||||
|
if: github.event_name == 'workflow_dispatch' # For manual dispatch
|
||||||
|
run: echo "packageVersion=${{ github.event.inputs.packageVersion }}" >> "$GITHUB_ENV"
|
||||||
- uses: actions-rs/toolchain@v1
|
- uses: actions-rs/toolchain@v1
|
||||||
with:
|
with:
|
||||||
toolchain: stable
|
toolchain: stable
|
||||||
@@ -39,10 +50,10 @@ jobs:
|
|||||||
export ARCH=arm64
|
export ARCH=arm64
|
||||||
fi
|
fi
|
||||||
|
|
||||||
bash utilities/deb.sh ${{ inputs.packageVersion }}
|
bash utilities/deb.sh ${{ env.packageVersion }}
|
||||||
|
|
||||||
deb-s3 upload \
|
deb-s3 upload \
|
||||||
--lock \
|
--lock \
|
||||||
--bucket apt.postgresml.org \
|
--bucket apt.postgresml.org \
|
||||||
pgcat-${{ inputs.packageVersion }}-ubuntu22.04-${ARCH}.deb \
|
pgcat-${{ env.packageVersion }}-ubuntu22.04-${ARCH}.deb \
|
||||||
--codename $(lsb_release -cs)
|
--codename $(lsb_release -cs)
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -10,3 +10,6 @@ lcov.info
|
|||||||
dev/.bash_history
|
dev/.bash_history
|
||||||
dev/cache
|
dev/cache
|
||||||
!dev/cache/.keepme
|
!dev/cache/.keepme
|
||||||
|
.venv
|
||||||
|
**/__pycache__
|
||||||
|
.bundle
|
||||||
60
CONFIG.md
60
CONFIG.md
@@ -36,10 +36,11 @@ Port at which prometheus exporter listens on.
|
|||||||
### connect_timeout
|
### connect_timeout
|
||||||
```
|
```
|
||||||
path: general.connect_timeout
|
path: general.connect_timeout
|
||||||
default: 5000 # milliseconds
|
default: 1000 # milliseconds
|
||||||
```
|
```
|
||||||
|
|
||||||
How long to wait before aborting a server connection (ms).
|
How long the client waits to obtain a server connection before aborting (ms).
|
||||||
|
This is similar to PgBouncer's `query_wait_timeout`.
|
||||||
|
|
||||||
### idle_timeout
|
### idle_timeout
|
||||||
```
|
```
|
||||||
@@ -259,22 +260,6 @@ Password to be used for connecting to servers to obtain the hash used for md5 au
|
|||||||
specified in `auth_query_user`. The connection will be established using the database configured in the pool.
|
specified in `auth_query_user`. The connection will be established using the database configured in the pool.
|
||||||
This parameter is inherited by every pool and can be redefined in pool configuration.
|
This parameter is inherited by every pool and can be redefined in pool configuration.
|
||||||
|
|
||||||
### prepared_statements
|
|
||||||
```
|
|
||||||
path: general.prepared_statements
|
|
||||||
default: false
|
|
||||||
```
|
|
||||||
|
|
||||||
Whether to use prepared statements or not.
|
|
||||||
|
|
||||||
### prepared_statements_cache_size
|
|
||||||
```
|
|
||||||
path: general.prepared_statements_cache_size
|
|
||||||
default: 500
|
|
||||||
```
|
|
||||||
|
|
||||||
Size of the prepared statements cache.
|
|
||||||
|
|
||||||
### dns_cache_enabled
|
### dns_cache_enabled
|
||||||
```
|
```
|
||||||
path: general.dns_cache_enabled
|
path: general.dns_cache_enabled
|
||||||
@@ -324,6 +309,24 @@ If the client doesn't specify, PgCat routes traffic to this role by default.
|
|||||||
`replica` round-robin between replicas only without touching the primary,
|
`replica` round-robin between replicas only without touching the primary,
|
||||||
`primary` all queries go to the primary unless otherwise specified.
|
`primary` all queries go to the primary unless otherwise specified.
|
||||||
|
|
||||||
|
### replica_to_primary_failover_enabled
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.replica_to_primary_failover_enabled
|
||||||
|
default: "false"
|
||||||
|
```
|
||||||
|
|
||||||
|
If set to true, when the specified role is `replica` (either by setting `default_role` or manually)
|
||||||
|
and all replicas are banned, queries will be sent to the primary (until a replica is back online).
|
||||||
|
|
||||||
|
### prepared_statements_cache_size
|
||||||
|
```
|
||||||
|
path: general.prepared_statements_cache_size
|
||||||
|
default: 0
|
||||||
|
```
|
||||||
|
|
||||||
|
Size of the prepared statements cache. 0 means disabled.
|
||||||
|
TODO: update documentation
|
||||||
|
|
||||||
### query_parser_enabled
|
### query_parser_enabled
|
||||||
```
|
```
|
||||||
path: pools.<pool_name>.query_parser_enabled
|
path: pools.<pool_name>.query_parser_enabled
|
||||||
@@ -469,10 +472,18 @@ path: pools.<pool_name>.users.<user_index>.pool_size
|
|||||||
default: 9
|
default: 9
|
||||||
```
|
```
|
||||||
|
|
||||||
Maximum number of server connections that can be established for this user
|
Maximum number of server connections that can be established for this user.
|
||||||
The maximum number of connection from a single Pgcat process to any database in the cluster
|
The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||||
is the sum of pool_size across all users.
|
is the sum of pool_size across all users.
|
||||||
|
|
||||||
|
### min_pool_size
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.users.<user_index>.min_pool_size
|
||||||
|
default: 0
|
||||||
|
```
|
||||||
|
|
||||||
|
Minimum number of idle server connections to retain for this pool.
|
||||||
|
|
||||||
### statement_timeout
|
### statement_timeout
|
||||||
```
|
```
|
||||||
path: pools.<pool_name>.users.<user_index>.statement_timeout
|
path: pools.<pool_name>.users.<user_index>.statement_timeout
|
||||||
@@ -482,6 +493,16 @@ default: 0
|
|||||||
Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
|
Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
|
||||||
0 means it is disabled.
|
0 means it is disabled.
|
||||||
|
|
||||||
|
### connect_timeout
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.users.<user_index>.connect_timeout
|
||||||
|
default: <UNSET> # milliseconds
|
||||||
|
```
|
||||||
|
|
||||||
|
How long the client waits to obtain a server connection before aborting (ms).
|
||||||
|
This is similar to PgBouncer's `query_wait_timeout`.
|
||||||
|
If unset, uses the `connect_timeout` defined globally.
|
||||||
|
|
||||||
## `pools.<pool_name>.shards.<shard_index>` Section
|
## `pools.<pool_name>.shards.<shard_index>` Section
|
||||||
|
|
||||||
### servers
|
### servers
|
||||||
@@ -509,4 +530,3 @@ default: "shard0"
|
|||||||
```
|
```
|
||||||
|
|
||||||
Database name (e.g. "postgres")
|
Database name (e.g. "postgres")
|
||||||
|
|
||||||
|
|||||||
@@ -2,10 +2,36 @@
|
|||||||
|
|
||||||
Thank you for contributing! Just a few tips here:
|
Thank you for contributing! Just a few tips here:
|
||||||
|
|
||||||
1. `cargo fmt` your code before opening up a PR
|
1. `cargo fmt` and `cargo clippy` your code before opening up a PR
|
||||||
2. Run the test suite (e.g. `pgbench`) to make sure everything still works. The tests are in `.circleci/run_tests.sh`.
|
2. Run the test suite (e.g. `pgbench`) to make sure everything still works. The tests are in `.circleci/run_tests.sh`.
|
||||||
3. Performance is important, make sure there are no regressions in your branch vs. `main`.
|
3. Performance is important, make sure there are no regressions in your branch vs. `main`.
|
||||||
|
|
||||||
|
## How to run the integration tests locally and iterate on them
|
||||||
|
We have integration tests written in Ruby, Python, Go and Rust.
|
||||||
|
Below are the steps to run them in a developer-friendly way that allows iterating and quick turnaround.
|
||||||
|
Hear me out, this should be easy, it will involve opening a shell into a container with all the necessary dependancies available for you and you can modify the test code and immediately rerun your test in the interactive shell.
|
||||||
|
|
||||||
|
|
||||||
|
Quite simply, make sure you have docker installed and then run
|
||||||
|
`./start_test_env.sh`
|
||||||
|
|
||||||
|
That is it!
|
||||||
|
|
||||||
|
Within this test environment you can modify the file in your favorite IDE and rerun the tests without having to bootstrap the entire environment again.
|
||||||
|
|
||||||
|
Once the environment is ready, you can run the tests by running
|
||||||
|
Ruby: `cd /app/tests/ruby && bundle exec ruby <test_name>.rb --format documentation`
|
||||||
|
Python: `cd /app/ && pytest`
|
||||||
|
Rust: `cd /app/tests/rust && cargo run`
|
||||||
|
Go: `cd /app/tests/go && /usr/local/go/bin/go test`
|
||||||
|
|
||||||
|
You can also rebuild PgCat directly within the environment and the tests will run against the newly built binary
|
||||||
|
To rebuild PgCat, just run `cargo build` within the container under `/app`
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Happy hacking!
|
Happy hacking!
|
||||||
|
|
||||||
## TODOs
|
## TODOs
|
||||||
|
|||||||
138
Cargo.lock
generated
138
Cargo.lock
generated
@@ -17,6 +17,17 @@ version = "1.0.2"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
|
checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ahash"
|
||||||
|
version = "0.8.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"once_cell",
|
||||||
|
"version_check",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "aho-corasick"
|
name = "aho-corasick"
|
||||||
version = "1.0.2"
|
version = "1.0.2"
|
||||||
@@ -26,6 +37,12 @@ dependencies = [
|
|||||||
"memchr",
|
"memchr",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "allocator-api2"
|
||||||
|
version = "0.2.16"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "android-tzdata"
|
name = "android-tzdata"
|
||||||
version = "0.1.1"
|
version = "0.1.1"
|
||||||
@@ -129,6 +146,12 @@ dependencies = [
|
|||||||
"syn 2.0.26",
|
"syn 2.0.26",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "atomic-waker"
|
||||||
|
version = "1.1.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "atomic_enum"
|
name = "atomic_enum"
|
||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
@@ -169,12 +192,11 @@ checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "bb8"
|
name = "bb8"
|
||||||
version = "0.8.1"
|
version = "0.8.6"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "98b4b0f25f18bcdc3ac72bdb486ed0acf7e185221fd4dc985bc15db5800b0ba2"
|
checksum = "d89aabfae550a5c44b43ab941844ffcd2e993cb6900b342debf59e9ea74acdb8"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"async-trait",
|
"async-trait",
|
||||||
"futures-channel",
|
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"parking_lot",
|
"parking_lot",
|
||||||
"tokio",
|
"tokio",
|
||||||
@@ -525,34 +547,32 @@ checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "h2"
|
name = "h2"
|
||||||
version = "0.3.20"
|
version = "0.4.6"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049"
|
checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
|
"atomic-waker",
|
||||||
"bytes",
|
"bytes",
|
||||||
"fnv",
|
"fnv",
|
||||||
"futures-core",
|
"futures-core",
|
||||||
"futures-sink",
|
"futures-sink",
|
||||||
"futures-util",
|
|
||||||
"http",
|
"http",
|
||||||
"indexmap 1.9.3",
|
"indexmap",
|
||||||
"slab",
|
"slab",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-util",
|
"tokio-util",
|
||||||
"tracing",
|
"tracing",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "hashbrown"
|
|
||||||
version = "0.12.3"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hashbrown"
|
name = "hashbrown"
|
||||||
version = "0.14.0"
|
version = "0.14.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a"
|
checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a"
|
||||||
|
dependencies = [
|
||||||
|
"ahash",
|
||||||
|
"allocator-api2",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "heck"
|
name = "heck"
|
||||||
@@ -588,9 +608,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "http"
|
name = "http"
|
||||||
version = "0.2.9"
|
version = "1.1.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482"
|
checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"fnv",
|
"fnv",
|
||||||
@@ -599,12 +619,24 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "http-body"
|
name = "http-body"
|
||||||
version = "0.4.5"
|
version = "1.0.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1"
|
checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"http",
|
"http",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "http-body-util"
|
||||||
|
version = "0.1.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f"
|
||||||
|
dependencies = [
|
||||||
|
"bytes",
|
||||||
|
"futures-util",
|
||||||
|
"http",
|
||||||
|
"http-body",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -622,13 +654,12 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hyper"
|
name = "hyper"
|
||||||
version = "0.14.27"
|
version = "1.4.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468"
|
checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"futures-channel",
|
"futures-channel",
|
||||||
"futures-core",
|
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"h2",
|
"h2",
|
||||||
"http",
|
"http",
|
||||||
@@ -637,13 +668,26 @@ dependencies = [
|
|||||||
"httpdate",
|
"httpdate",
|
||||||
"itoa",
|
"itoa",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
"socket2 0.4.9",
|
"smallvec",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tower-service",
|
|
||||||
"tracing",
|
|
||||||
"want",
|
"want",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "hyper-util"
|
||||||
|
version = "0.1.7"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9"
|
||||||
|
dependencies = [
|
||||||
|
"bytes",
|
||||||
|
"futures-util",
|
||||||
|
"http",
|
||||||
|
"http-body",
|
||||||
|
"hyper",
|
||||||
|
"pin-project-lite",
|
||||||
|
"tokio",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "iana-time-zone"
|
name = "iana-time-zone"
|
||||||
version = "0.1.57"
|
version = "0.1.57"
|
||||||
@@ -688,16 +732,6 @@ dependencies = [
|
|||||||
"unicode-normalization",
|
"unicode-normalization",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "indexmap"
|
|
||||||
version = "1.9.3"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"
|
|
||||||
dependencies = [
|
|
||||||
"autocfg",
|
|
||||||
"hashbrown 0.12.3",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "indexmap"
|
name = "indexmap"
|
||||||
version = "2.0.0"
|
version = "2.0.0"
|
||||||
@@ -705,7 +739,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d"
|
checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"equivalent",
|
"equivalent",
|
||||||
"hashbrown 0.14.0",
|
"hashbrown",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -821,6 +855,15 @@ version = "0.4.19"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4"
|
checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "lru"
|
||||||
|
version = "0.12.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "1efa59af2ddfad1854ae27d75009d538d0998b4b2fd47083e743ac1a10e46c60"
|
||||||
|
dependencies = [
|
||||||
|
"hashbrown",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lru-cache"
|
name = "lru-cache"
|
||||||
version = "0.1.2"
|
version = "0.1.2"
|
||||||
@@ -990,7 +1033,7 @@ checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pgcat"
|
name = "pgcat"
|
||||||
version = "1.1.2-dev"
|
version = "1.2.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arc-swap",
|
"arc-swap",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
@@ -1004,10 +1047,13 @@ dependencies = [
|
|||||||
"fallible-iterator",
|
"fallible-iterator",
|
||||||
"futures",
|
"futures",
|
||||||
"hmac",
|
"hmac",
|
||||||
|
"http-body-util",
|
||||||
"hyper",
|
"hyper",
|
||||||
|
"hyper-util",
|
||||||
"itertools",
|
"itertools",
|
||||||
"jemallocator",
|
"jemallocator",
|
||||||
"log",
|
"log",
|
||||||
|
"lru",
|
||||||
"md-5",
|
"md-5",
|
||||||
"nix",
|
"nix",
|
||||||
"num_cpus",
|
"num_cpus",
|
||||||
@@ -1447,9 +1493,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "smallvec"
|
name = "smallvec"
|
||||||
version = "1.11.0"
|
version = "1.13.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9"
|
checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "socket2"
|
name = "socket2"
|
||||||
@@ -1479,9 +1525,9 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sqlparser"
|
name = "sqlparser"
|
||||||
version = "0.34.0"
|
version = "0.41.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "37d3706eefb17039056234df6b566b0014f303f867f2656108334a55b8096f59"
|
checksum = "5cc2c25a6c66789625ef164b4c7d2e548d627902280c13710d33da8222169964"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"log",
|
"log",
|
||||||
"sqlparser_derive",
|
"sqlparser_derive",
|
||||||
@@ -1489,13 +1535,13 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sqlparser_derive"
|
name = "sqlparser_derive"
|
||||||
version = "0.1.1"
|
version = "0.2.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "55fe75cb4a364c7f7ae06c7dbbc8d84bddd85d6cdf9975963c3935bc1991761e"
|
checksum = "01b2e185515564f15375f593fb966b5718bc624ba77fe49fa4616ad619690554"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn 1.0.109",
|
"syn 2.0.26",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -1710,19 +1756,13 @@ version = "0.19.14"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a"
|
checksum = "f8123f27e969974a3dfba720fdb560be359f57b44302d280ba72e76a74480e8a"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"indexmap 2.0.0",
|
"indexmap",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_spanned",
|
"serde_spanned",
|
||||||
"toml_datetime",
|
"toml_datetime",
|
||||||
"winnow",
|
"winnow",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "tower-service"
|
|
||||||
version = "0.3.2"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tracing"
|
name = "tracing"
|
||||||
version = "0.1.37"
|
version = "0.1.37"
|
||||||
|
|||||||
18
Cargo.toml
18
Cargo.toml
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "pgcat"
|
name = "pgcat"
|
||||||
version = "1.1.2-dev"
|
version = "1.2.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
@@ -8,7 +8,7 @@ edition = "2021"
|
|||||||
tokio = { version = "1", features = ["full"] }
|
tokio = { version = "1", features = ["full"] }
|
||||||
bytes = "1"
|
bytes = "1"
|
||||||
md-5 = "0.10"
|
md-5 = "0.10"
|
||||||
bb8 = "0.8.1"
|
bb8 = "=0.8.6"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
@@ -19,7 +19,7 @@ serde_derive = "1"
|
|||||||
regex = "1"
|
regex = "1"
|
||||||
num_cpus = "1"
|
num_cpus = "1"
|
||||||
once_cell = "1"
|
once_cell = "1"
|
||||||
sqlparser = {version = "0.34", features = ["visitor"] }
|
sqlparser = { version = "0.41", features = ["visitor"] }
|
||||||
log = "0.4"
|
log = "0.4"
|
||||||
arc-swap = "1"
|
arc-swap = "1"
|
||||||
parking_lot = "0.12.1"
|
parking_lot = "0.12.1"
|
||||||
@@ -29,7 +29,9 @@ base64 = "0.21"
|
|||||||
stringprep = "0.1"
|
stringprep = "0.1"
|
||||||
tokio-rustls = "0.24"
|
tokio-rustls = "0.24"
|
||||||
rustls-pemfile = "1"
|
rustls-pemfile = "1"
|
||||||
hyper = { version = "0.14", features = ["full"] }
|
http-body-util = "0.1.2"
|
||||||
|
hyper = { version = "1.4.1", features = ["full"] }
|
||||||
|
hyper-util = { version = "0.1.7", features = ["tokio"] }
|
||||||
phf = { version = "0.11.1", features = ["macros"] }
|
phf = { version = "0.11.1", features = ["macros"] }
|
||||||
exitcode = "1.1.2"
|
exitcode = "1.1.2"
|
||||||
futures = "0.3"
|
futures = "0.3"
|
||||||
@@ -47,8 +49,12 @@ serde_json = "1"
|
|||||||
itertools = "0.10"
|
itertools = "0.10"
|
||||||
clap = { version = "4.3.1", features = ["derive", "env"] }
|
clap = { version = "4.3.1", features = ["derive", "env"] }
|
||||||
tracing = "0.1.37"
|
tracing = "0.1.37"
|
||||||
tracing-subscriber = { version = "0.3.17", features = ["json", "env-filter", "std"]}
|
tracing-subscriber = { version = "0.3.17", features = [
|
||||||
|
"json",
|
||||||
|
"env-filter",
|
||||||
|
"std",
|
||||||
|
] }
|
||||||
|
lru = "0.12.0"
|
||||||
|
|
||||||
[target.'cfg(not(target_env = "msvc"))'.dependencies]
|
[target.'cfg(not(target_env = "msvc"))'.dependencies]
|
||||||
jemallocator = "0.5.0"
|
jemallocator = "0.5.0"
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
FROM rust:1-slim-bookworm AS builder
|
FROM rust:1.79.0-slim-bookworm AS builder
|
||||||
|
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y build-essential
|
apt-get install -y build-essential
|
||||||
@@ -8,8 +8,15 @@ WORKDIR /app
|
|||||||
RUN cargo build --release
|
RUN cargo build --release
|
||||||
|
|
||||||
FROM debian:bookworm-slim
|
FROM debian:bookworm-slim
|
||||||
|
RUN apt-get update && apt-get install -o Dpkg::Options::=--force-confdef -yq --no-install-recommends \
|
||||||
|
postgresql-client \
|
||||||
|
# Clean up layer
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \
|
||||||
|
&& truncate -s 0 /var/log/*log
|
||||||
COPY --from=builder /app/target/release/pgcat /usr/bin/pgcat
|
COPY --from=builder /app/target/release/pgcat /usr/bin/pgcat
|
||||||
COPY --from=builder /app/pgcat.toml /etc/pgcat/pgcat.toml
|
COPY --from=builder /app/pgcat.toml /etc/pgcat/pgcat.toml
|
||||||
WORKDIR /etc/pgcat
|
WORKDIR /etc/pgcat
|
||||||
ENV RUST_LOG=info
|
ENV RUST_LOG=info
|
||||||
CMD ["pgcat"]
|
CMD ["pgcat"]
|
||||||
|
STOPSIGNAL SIGINT
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
FROM cimg/rust:1.67.1
|
FROM cimg/rust:1.79.0
|
||||||
COPY --from=sclevine/yj /bin/yj /bin/yj
|
COPY --from=sclevine/yj /bin/yj /bin/yj
|
||||||
RUN /bin/yj -h
|
RUN /bin/yj -h
|
||||||
RUN sudo apt-get update && \
|
RUN sudo apt-get update && \
|
||||||
@@ -9,6 +9,9 @@ RUN sudo apt-get update && \
|
|||||||
sudo apt-get upgrade curl && \
|
sudo apt-get upgrade curl && \
|
||||||
cargo install cargo-binutils rustfilt && \
|
cargo install cargo-binutils rustfilt && \
|
||||||
rustup component add llvm-tools-preview && \
|
rustup component add llvm-tools-preview && \
|
||||||
pip3 install psycopg2 && sudo gem install bundler && \
|
pip3 install psycopg2 && sudo gem install bundler && \
|
||||||
wget -O /tmp/toxiproxy-2.4.0.deb https://github.com/Shopify/toxiproxy/releases/download/v2.4.0/toxiproxy_2.4.0_linux_$(dpkg --print-architecture).deb && \
|
wget -O /tmp/toxiproxy-2.4.0.deb https://github.com/Shopify/toxiproxy/releases/download/v2.4.0/toxiproxy_2.4.0_linux_$(dpkg --print-architecture).deb && \
|
||||||
sudo dpkg -i /tmp/toxiproxy-2.4.0.deb
|
sudo dpkg -i /tmp/toxiproxy-2.4.0.deb
|
||||||
|
RUN wget -O /tmp/go1.21.3.linux-$(dpkg --print-architecture).tar.gz https://go.dev/dl/go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \
|
||||||
|
sudo tar -C /usr/local -xzf /tmp/go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \
|
||||||
|
rm /tmp/go1.21.3.linux-$(dpkg --print-architecture).tar.gz
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ PgCat is stable and used in production to serve hundreds of thousands of queries
|
|||||||
</a>
|
</a>
|
||||||
</td>
|
</td>
|
||||||
<td>
|
<td>
|
||||||
<a href="https://postgresml.org/blog/scaling-postgresml-to-one-million-requests-per-second">
|
<a href="https://postgresml.org/blog/scaling-postgresml-to-1-million-requests-per-second">
|
||||||
<img src="./images/postgresml.webp" height="70" width="auto">
|
<img src="./images/postgresml.webp" height="70" width="auto">
|
||||||
</a>
|
</a>
|
||||||
</td>
|
</td>
|
||||||
@@ -57,7 +57,7 @@ PgCat is stable and used in production to serve hundreds of thousands of queries
|
|||||||
</a>
|
</a>
|
||||||
</td>
|
</td>
|
||||||
<td>
|
<td>
|
||||||
<a href="https://postgresml.org/blog/scaling-postgresml-to-one-million-requests-per-second">
|
<a href="https://postgresml.org/blog/scaling-postgresml-to-1-million-requests-per-second">
|
||||||
PostgresML
|
PostgresML
|
||||||
</a>
|
</a>
|
||||||
</td>
|
</td>
|
||||||
@@ -175,7 +175,7 @@ The setting will persist until it's changed again or the client disconnects.
|
|||||||
By default, all queries are routed to the first available server; `default_role` setting controls this behavior.
|
By default, all queries are routed to the first available server; `default_role` setting controls this behavior.
|
||||||
|
|
||||||
### Failover
|
### Failover
|
||||||
All servers are checked with a `;` (very fast) query before being given to a client. Additionally, the server health is monitored with every client query that it processes. If the server is not reachable, it will be banned and cannot serve any more transactions for the duration of the ban. The queries are routed to the remaining servers. If all servers become banned, the ban list is cleared: this is a safety precaution against false positives. The primary can never be banned.
|
All servers are checked with a `;` (very fast) query before being given to a client. Additionally, the server health is monitored with every client query that it processes. If the server is not reachable, it will be banned and cannot serve any more transactions for the duration of the ban. The queries are routed to the remaining servers. If `replica_to_primary_failover_enabled` is set to true and all replicas become banned, the query will be routed to the primary. If `replica_to_primary_failover_enabled` is false and all servers (replicas) become banned, the ban list is cleared: this is a safety precaution against false positives. The primary can never be banned.
|
||||||
|
|
||||||
The ban time can be changed with `ban_time`. The default is 60 seconds.
|
The ban time can be changed with `ban_time`. The default is 60 seconds.
|
||||||
|
|
||||||
@@ -268,6 +268,8 @@ psql -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW DATABASES'
|
|||||||
|
|
||||||
Additionally, Prometheus statistics are available at `/metrics` via HTTP.
|
Additionally, Prometheus statistics are available at `/metrics` via HTTP.
|
||||||
|
|
||||||
|
We also have a [basic Grafana dashboard](https://github.com/postgresml/pgcat/blob/main/grafana_dashboard.json) based on Prometheus metrics that you can import into Grafana and build on it or use it for monitoring.
|
||||||
|
|
||||||
### Live configuration reloading
|
### Live configuration reloading
|
||||||
|
|
||||||
The config can be reloaded by sending a `kill -s SIGHUP` to the process or by querying `RELOAD` to the admin database. All settings except the `host` and `port` can be reloaded without restarting the pooler, including sharding and replicas configurations.
|
The config can be reloaded by sending a `kill -s SIGHUP` to the process or by querying `RELOAD` to the admin database. All settings except the `host` and `port` can be reloaded without restarting the pooler, including sharding and replicas configurations.
|
||||||
|
|||||||
23
charts/pgcat/.helmignore
Normal file
23
charts/pgcat/.helmignore
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# Patterns to ignore when building packages.
|
||||||
|
# This supports shell glob matching, relative path matching, and
|
||||||
|
# negation (prefixed with !). Only one pattern per line.
|
||||||
|
.DS_Store
|
||||||
|
# Common VCS dirs
|
||||||
|
.git/
|
||||||
|
.gitignore
|
||||||
|
.bzr/
|
||||||
|
.bzrignore
|
||||||
|
.hg/
|
||||||
|
.hgignore
|
||||||
|
.svn/
|
||||||
|
# Common backup files
|
||||||
|
*.swp
|
||||||
|
*.bak
|
||||||
|
*.tmp
|
||||||
|
*.orig
|
||||||
|
*~
|
||||||
|
# Various IDEs
|
||||||
|
.project
|
||||||
|
.idea/
|
||||||
|
*.tmproj
|
||||||
|
.vscode/
|
||||||
8
charts/pgcat/Chart.yaml
Normal file
8
charts/pgcat/Chart.yaml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
apiVersion: v2
|
||||||
|
name: pgcat
|
||||||
|
description: A Helm chart for PgCat a PostgreSQL pooler and proxy (like PgBouncer) with support for sharding, load balancing, failover and mirroring.
|
||||||
|
maintainers:
|
||||||
|
- name: Wildcard
|
||||||
|
email: support@w6d.io
|
||||||
|
appVersion: "1.2.0"
|
||||||
|
version: 0.2.4
|
||||||
22
charts/pgcat/templates/NOTES.txt
Normal file
22
charts/pgcat/templates/NOTES.txt
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
1. Get the application URL by running these commands:
|
||||||
|
{{- if .Values.ingress.enabled }}
|
||||||
|
{{- range $host := .Values.ingress.hosts }}
|
||||||
|
{{- range .paths }}
|
||||||
|
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- else if contains "NodePort" .Values.service.type }}
|
||||||
|
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "pgcat.fullname" . }})
|
||||||
|
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||||
|
echo http://$NODE_IP:$NODE_PORT
|
||||||
|
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||||
|
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||||
|
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "pgcat.fullname" . }}'
|
||||||
|
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "pgcat.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||||
|
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||||
|
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||||
|
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "pgcat.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||||
|
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||||
|
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||||
|
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
|
||||||
|
{{- end }}
|
||||||
3
charts/pgcat/templates/_config.tpl
Normal file
3
charts/pgcat/templates/_config.tpl
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
{{/*
|
||||||
|
Configuration template definition
|
||||||
|
*/}}
|
||||||
62
charts/pgcat/templates/_helpers.tpl
Normal file
62
charts/pgcat/templates/_helpers.tpl
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
{{/*
|
||||||
|
Expand the name of the chart.
|
||||||
|
*/}}
|
||||||
|
{{- define "pgcat.name" -}}
|
||||||
|
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create a default fully qualified app name.
|
||||||
|
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||||
|
If release name contains chart name it will be used as a full name.
|
||||||
|
*/}}
|
||||||
|
{{- define "pgcat.fullname" -}}
|
||||||
|
{{- if .Values.fullnameOverride }}
|
||||||
|
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||||
|
{{- if contains $name .Release.Name }}
|
||||||
|
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create chart name and version as used by the chart label.
|
||||||
|
*/}}
|
||||||
|
{{- define "pgcat.chart" -}}
|
||||||
|
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Common labels
|
||||||
|
*/}}
|
||||||
|
{{- define "pgcat.labels" -}}
|
||||||
|
helm.sh/chart: {{ include "pgcat.chart" . }}
|
||||||
|
{{ include "pgcat.selectorLabels" . }}
|
||||||
|
{{- if .Chart.AppVersion }}
|
||||||
|
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||||
|
{{- end }}
|
||||||
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Selector labels
|
||||||
|
*/}}
|
||||||
|
{{- define "pgcat.selectorLabels" -}}
|
||||||
|
app.kubernetes.io/name: {{ include "pgcat.name" . }}
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create the name of the service account to use
|
||||||
|
*/}}
|
||||||
|
{{- define "pgcat.serviceAccountName" -}}
|
||||||
|
{{- if .Values.serviceAccount.create }}
|
||||||
|
{{- default (include "pgcat.fullname" .) .Values.serviceAccount.name }}
|
||||||
|
{{- else }}
|
||||||
|
{{- default "default" .Values.serviceAccount.name }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
66
charts/pgcat/templates/deployment.yaml
Normal file
66
charts/pgcat/templates/deployment.yaml
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: {{ include "pgcat.fullname" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "pgcat.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
replicas: {{ .Values.replicaCount }}
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{- include "pgcat.selectorLabels" . | nindent 6 }}
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
annotations:
|
||||||
|
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
|
||||||
|
{{- with .Values.podAnnotations }}
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
labels:
|
||||||
|
{{- include "pgcat.selectorLabels" . | nindent 8 }}
|
||||||
|
spec:
|
||||||
|
{{- with .Values.image.pullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
serviceAccountName: {{ include "pgcat.serviceAccountName" . }}
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||||
|
containers:
|
||||||
|
- name: {{ .Chart.Name }}
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.containerSecurityContext | nindent 12 }}
|
||||||
|
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||||
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
ports:
|
||||||
|
- name: pgcat
|
||||||
|
containerPort: {{ .Values.configuration.general.port }}
|
||||||
|
protocol: TCP
|
||||||
|
livenessProbe:
|
||||||
|
tcpSocket:
|
||||||
|
port: pgcat
|
||||||
|
readinessProbe:
|
||||||
|
tcpSocket:
|
||||||
|
port: pgcat
|
||||||
|
resources:
|
||||||
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /etc/pgcat
|
||||||
|
name: config
|
||||||
|
{{- with .Values.nodeSelector }}
|
||||||
|
nodeSelector:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.affinity }}
|
||||||
|
affinity:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.tolerations }}
|
||||||
|
tolerations:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
volumes:
|
||||||
|
- secret:
|
||||||
|
defaultMode: 420
|
||||||
|
secretName: {{ include "pgcat.fullname" . }}
|
||||||
|
name: config
|
||||||
61
charts/pgcat/templates/ingress.yaml
Normal file
61
charts/pgcat/templates/ingress.yaml
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
{{- if .Values.ingress.enabled -}}
|
||||||
|
{{- $fullName := include "pgcat.fullname" . -}}
|
||||||
|
{{- $svcPort := .Values.service.port -}}
|
||||||
|
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
|
||||||
|
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
|
||||||
|
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||||
|
apiVersion: networking.k8s.io/v1beta1
|
||||||
|
{{- else -}}
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
{{- end }}
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: {{ $fullName }}
|
||||||
|
labels:
|
||||||
|
{{- include "pgcat.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.ingress.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
spec:
|
||||||
|
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
|
||||||
|
ingressClassName: {{ .Values.ingress.className }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.ingress.tls }}
|
||||||
|
tls:
|
||||||
|
{{- range .Values.ingress.tls }}
|
||||||
|
- hosts:
|
||||||
|
{{- range .hosts }}
|
||||||
|
- {{ . | quote }}
|
||||||
|
{{- end }}
|
||||||
|
secretName: {{ .secretName }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
rules:
|
||||||
|
{{- range .Values.ingress.hosts }}
|
||||||
|
- host: {{ .host | quote }}
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
{{- range .paths }}
|
||||||
|
- path: {{ .path }}
|
||||||
|
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
|
||||||
|
pathType: {{ .pathType }}
|
||||||
|
{{- end }}
|
||||||
|
backend:
|
||||||
|
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
|
||||||
|
service:
|
||||||
|
name: {{ $fullName }}
|
||||||
|
port:
|
||||||
|
number: {{ $svcPort }}
|
||||||
|
{{- else }}
|
||||||
|
serviceName: {{ $fullName }}
|
||||||
|
servicePort: {{ $svcPort }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
97
charts/pgcat/templates/secret.yaml
Normal file
97
charts/pgcat/templates/secret.yaml
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: {{ include "pgcat.fullname" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "pgcat.labels" . | nindent 4 }}
|
||||||
|
type: Opaque
|
||||||
|
stringData:
|
||||||
|
pgcat.toml: |
|
||||||
|
[general]
|
||||||
|
host = {{ .Values.configuration.general.host | quote }}
|
||||||
|
port = {{ .Values.configuration.general.port }}
|
||||||
|
enable_prometheus_exporter = {{ .Values.configuration.general.enable_prometheus_exporter }}
|
||||||
|
prometheus_exporter_port = {{ .Values.configuration.general.prometheus_exporter_port }}
|
||||||
|
connect_timeout = {{ .Values.configuration.general.connect_timeout }}
|
||||||
|
idle_timeout = {{ .Values.configuration.general.idle_timeout | int }}
|
||||||
|
server_lifetime = {{ .Values.configuration.general.server_lifetime | int }}
|
||||||
|
server_tls = {{ .Values.configuration.general.server_tls }}
|
||||||
|
idle_client_in_transaction_timeout = {{ .Values.configuration.general.idle_client_in_transaction_timeout | int }}
|
||||||
|
healthcheck_timeout = {{ .Values.configuration.general.healthcheck_timeout }}
|
||||||
|
healthcheck_delay = {{ .Values.configuration.general.healthcheck_delay }}
|
||||||
|
shutdown_timeout = {{ .Values.configuration.general.shutdown_timeout }}
|
||||||
|
ban_time = {{ .Values.configuration.general.ban_time }}
|
||||||
|
log_client_connections = {{ .Values.configuration.general.log_client_connections }}
|
||||||
|
log_client_disconnections = {{ .Values.configuration.general.log_client_disconnections }}
|
||||||
|
tcp_keepalives_idle = {{ .Values.configuration.general.tcp_keepalives_idle }}
|
||||||
|
tcp_keepalives_count = {{ .Values.configuration.general.tcp_keepalives_count }}
|
||||||
|
tcp_keepalives_interval = {{ .Values.configuration.general.tcp_keepalives_interval }}
|
||||||
|
{{- if and (ne .Values.configuration.general.tls_certificate "-") (ne .Values.configuration.general.tls_private_key "-") }}
|
||||||
|
tls_certificate = "{{ .Values.configuration.general.tls_certificate }}"
|
||||||
|
tls_private_key = "{{ .Values.configuration.general.tls_private_key }}"
|
||||||
|
{{- end }}
|
||||||
|
admin_username = {{ .Values.configuration.general.admin_username | quote }}
|
||||||
|
admin_password = {{ .Values.configuration.general.admin_password | quote }}
|
||||||
|
{{- if and .Values.configuration.general.auth_query_user .Values.configuration.general.auth_query_password .Values.configuration.general.auth_query }}
|
||||||
|
auth_query = {{ .Values.configuration.general.auth_query | quote }}
|
||||||
|
auth_query_user = {{ .Values.configuration.general.auth_query_user | quote }}
|
||||||
|
auth_query_password = {{ .Values.configuration.general.auth_query_password | quote }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- range $pool := .Values.configuration.pools }}
|
||||||
|
|
||||||
|
##
|
||||||
|
## pool for {{ $pool.name }}
|
||||||
|
##
|
||||||
|
[pools.{{ $pool.name | quote }}]
|
||||||
|
pool_mode = {{ default "transaction" $pool.pool_mode | quote }}
|
||||||
|
load_balancing_mode = {{ default "random" $pool.load_balancing_mode | quote }}
|
||||||
|
default_role = {{ default "any" $pool.default_role | quote }}
|
||||||
|
prepared_statements_cache_size = {{ default 500 $pool.prepared_statements_cache_size }}
|
||||||
|
query_parser_enabled = {{ default true $pool.query_parser_enabled }}
|
||||||
|
query_parser_read_write_splitting = {{ default true $pool.query_parser_read_write_splitting }}
|
||||||
|
primary_reads_enabled = {{ default true $pool.primary_reads_enabled }}
|
||||||
|
sharding_function = {{ default "pg_bigint_hash" $pool.sharding_function | quote }}
|
||||||
|
|
||||||
|
{{- range $index, $user := $pool.users }}
|
||||||
|
|
||||||
|
## pool {{ $pool.name }} user {{ $user.username | quote }}
|
||||||
|
##
|
||||||
|
[pools.{{ $pool.name | quote }}.users.{{ $index }}]
|
||||||
|
username = {{ $user.username | quote }}
|
||||||
|
{{- if $user.password }}
|
||||||
|
password = {{ $user.password | quote }}
|
||||||
|
{{- else if and $user.passwordSecret.name $user.passwordSecret.key }}
|
||||||
|
{{- $secret := (lookup "v1" "Secret" $.Release.Namespace $user.passwordSecret.name) }}
|
||||||
|
{{- if $secret }}
|
||||||
|
{{- $password := index $secret.data $user.passwordSecret.key | b64dec }}
|
||||||
|
password = {{ $password | quote }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
pool_size = {{ $user.pool_size }}
|
||||||
|
statement_timeout = {{ default 0 $user.statement_timeout }}
|
||||||
|
min_pool_size = {{ default 3 $user.min_pool_size }}
|
||||||
|
{{- if $user.server_lifetime }}
|
||||||
|
server_lifetime = {{ $user.server_lifetime }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if and $user.server_username $user.server_password }}
|
||||||
|
server_username = {{ $user.server_username | quote }}
|
||||||
|
server_password = {{ $user.server_password | quote }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- range $index, $shard := $pool.shards }}
|
||||||
|
|
||||||
|
## pool {{ $pool.name }} database {{ $shard.database }}
|
||||||
|
##
|
||||||
|
[pools.{{ $pool.name | quote }}.shards.{{ $index }}]
|
||||||
|
{{- if gt (len $shard.servers) 0}}
|
||||||
|
servers = [
|
||||||
|
{{- range $server := $shard.servers }}
|
||||||
|
[ {{ $server.host | quote }}, {{ $server.port }}, {{ $server.role | quote }} ],
|
||||||
|
{{- end }}
|
||||||
|
]
|
||||||
|
{{- end }}
|
||||||
|
database = {{ $shard.database | quote }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
15
charts/pgcat/templates/service.yaml
Normal file
15
charts/pgcat/templates/service.yaml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: {{ include "pgcat.fullname" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "pgcat.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
type: {{ .Values.service.type }}
|
||||||
|
ports:
|
||||||
|
- port: {{ .Values.service.port }}
|
||||||
|
targetPort: pgcat
|
||||||
|
protocol: TCP
|
||||||
|
name: pgcat
|
||||||
|
selector:
|
||||||
|
{{- include "pgcat.selectorLabels" . | nindent 4 }}
|
||||||
12
charts/pgcat/templates/serviceaccount.yaml
Normal file
12
charts/pgcat/templates/serviceaccount.yaml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
{{- if .Values.serviceAccount.create -}}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: {{ include "pgcat.serviceAccountName" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "pgcat.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.serviceAccount.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
374
charts/pgcat/values.yaml
Normal file
374
charts/pgcat/values.yaml
Normal file
@@ -0,0 +1,374 @@
|
|||||||
|
## String to partially override aspnet-core.fullname template (will maintain the release name)
|
||||||
|
## @param nameOverride String to partially override common.names.fullname
|
||||||
|
##
|
||||||
|
nameOverride: ""
|
||||||
|
|
||||||
|
## String to fully override aspnet-core.fullname template
|
||||||
|
## @param fullnameOverride String to fully override common.names.fullname
|
||||||
|
##
|
||||||
|
fullnameOverride: ""
|
||||||
|
|
||||||
|
## Number of PgCat replicas to deploy
|
||||||
|
## @param replicaCount Number of PgCat replicas to deploy
|
||||||
|
replicaCount: 1
|
||||||
|
|
||||||
|
## Bitnami PgCat image version
|
||||||
|
## ref: https://hub.docker.com/r/bitnami/kubewatch/tags/
|
||||||
|
##
|
||||||
|
## @param image.registry PgCat image registry
|
||||||
|
## @param image.repository PgCat image name
|
||||||
|
## @param image.tag PgCat image tag
|
||||||
|
## @param image.pullPolicy PgCat image tag
|
||||||
|
## @param image.pullSecrets Specify docker-registry secret names as an array
|
||||||
|
image:
|
||||||
|
repository: ghcr.io/postgresml/pgcat
|
||||||
|
# Overrides the image tag whose default is the chart appVersion.
|
||||||
|
tag: "main"
|
||||||
|
## Specify a imagePullPolicy
|
||||||
|
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
|
||||||
|
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
|
||||||
|
##
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
## Optionally specify an array of imagePullSecrets.
|
||||||
|
## Secrets must be manually created in the namespace.
|
||||||
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||||
|
## Example:
|
||||||
|
## pullSecrets:
|
||||||
|
## - myRegistryKeySecretName
|
||||||
|
##
|
||||||
|
pullSecrets: []
|
||||||
|
|
||||||
|
## Specifies whether a ServiceAccount should be created
|
||||||
|
##
|
||||||
|
## @param serviceAccount.create Enable the creation of a ServiceAccount for PgCat pods
|
||||||
|
## @param serviceAccount.name Name of the created ServiceAccount
|
||||||
|
##
|
||||||
|
serviceAccount:
|
||||||
|
## Specifies whether a service account should be created
|
||||||
|
create: true
|
||||||
|
## Annotations to add to the service account
|
||||||
|
annotations: {}
|
||||||
|
## The name of the service account to use.
|
||||||
|
## If not set and create is true, a name is generated using the fullname template
|
||||||
|
name: ""
|
||||||
|
|
||||||
|
## Annotations for server pods.
|
||||||
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||||
|
##
|
||||||
|
## @param podAnnotations Annotations for PgCat pods
|
||||||
|
##
|
||||||
|
podAnnotations: {}
|
||||||
|
|
||||||
|
## PgCat containers' SecurityContext
|
||||||
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
|
||||||
|
##
|
||||||
|
## @param podSecurityContext.enabled Enabled PgCat pods' Security Context
|
||||||
|
## @param podSecurityContext.fsGroup Set PgCat pod's Security Context fsGroup
|
||||||
|
##
|
||||||
|
podSecurityContext: {}
|
||||||
|
# fsGroup: 2000
|
||||||
|
|
||||||
|
## PgCat pods' Security Context
|
||||||
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
||||||
|
##
|
||||||
|
## @param containerSecurityContext.enabled Enabled PgCat containers' Security Context
|
||||||
|
## @param containerSecurityContext.runAsUser Set PgCat container's Security Context runAsUser
|
||||||
|
## @param containerSecurityContext.runAsNonRoot Set PgCat container's Security Context runAsNonRoot
|
||||||
|
##
|
||||||
|
containerSecurityContext: {}
|
||||||
|
# capabilities:
|
||||||
|
# drop:
|
||||||
|
# - ALL
|
||||||
|
# readOnlyRootFilesystem: true
|
||||||
|
# runAsNonRoot: true
|
||||||
|
# runAsUser: 1000
|
||||||
|
|
||||||
|
## PgCat service
|
||||||
|
##
|
||||||
|
## @param service.type PgCat service type
|
||||||
|
## @param service.port PgCat service port
|
||||||
|
service:
|
||||||
|
type: ClusterIP
|
||||||
|
port: 6432
|
||||||
|
|
||||||
|
ingress:
|
||||||
|
enabled: false
|
||||||
|
className: ""
|
||||||
|
annotations: {}
|
||||||
|
# kubernetes.io/ingress.class: nginx
|
||||||
|
# kubernetes.io/tls-acme: "true"
|
||||||
|
hosts:
|
||||||
|
- host: chart-example.local
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: ImplementationSpecific
|
||||||
|
tls: []
|
||||||
|
# - secretName: chart-example-tls
|
||||||
|
# hosts:
|
||||||
|
# - chart-example.local
|
||||||
|
|
||||||
|
## PgCat resource requests and limits
|
||||||
|
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
|
||||||
|
##
|
||||||
|
## @skip resources Optional description
|
||||||
|
## @disabled-param resources.limits The resources limits for the PgCat container
|
||||||
|
## @disabled-param resources.requests The requested resources for the PgCat container
|
||||||
|
##
|
||||||
|
resources:
|
||||||
|
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||||
|
# choice for the user. This also increases chances charts run on environments with little
|
||||||
|
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||||
|
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||||
|
limits: {}
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 128Mi
|
||||||
|
requests: {}
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 128Mi
|
||||||
|
|
||||||
|
## Node labels for pod assignment. Evaluated as a template.
|
||||||
|
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||||
|
##
|
||||||
|
## @param nodeSelector Node labels for pod assignment
|
||||||
|
##
|
||||||
|
nodeSelector: {}
|
||||||
|
|
||||||
|
## Tolerations for pod assignment. Evaluated as a template.
|
||||||
|
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||||
|
##
|
||||||
|
## @param tolerations Tolerations for pod assignment
|
||||||
|
##
|
||||||
|
tolerations: []
|
||||||
|
|
||||||
|
## Affinity for pod assignment. Evaluated as a template.
|
||||||
|
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||||
|
## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
|
||||||
|
##
|
||||||
|
## @param affinity Affinity for pod assignment
|
||||||
|
##
|
||||||
|
affinity: {}
|
||||||
|
|
||||||
|
## PgCat configuration
|
||||||
|
## @param configuration [object]
|
||||||
|
configuration:
|
||||||
|
## General pooler settings
|
||||||
|
## @param [object]
|
||||||
|
general:
|
||||||
|
## @param configuration.general.host What IP to run on, 0.0.0.0 means accessible from everywhere.
|
||||||
|
host: "0.0.0.0"
|
||||||
|
|
||||||
|
## @param configuration.general.port Port to run on, same as PgBouncer used in this example.
|
||||||
|
port: 6432
|
||||||
|
|
||||||
|
## @param configuration.general.enable_prometheus_exporter Whether to enable prometheus exporter or not.
|
||||||
|
enable_prometheus_exporter: false
|
||||||
|
|
||||||
|
## @param configuration.general.prometheus_exporter_port Port at which prometheus exporter listens on.
|
||||||
|
prometheus_exporter_port: 9930
|
||||||
|
|
||||||
|
# @param configuration.general.connect_timeout How long to wait before aborting a server connection (ms).
|
||||||
|
connect_timeout: 5000
|
||||||
|
|
||||||
|
# How long an idle connection with a server is left open (ms).
|
||||||
|
idle_timeout: 30000 # milliseconds
|
||||||
|
|
||||||
|
# Max connection lifetime before it's closed, even if actively used.
|
||||||
|
server_lifetime: 86400000 # 24 hours
|
||||||
|
|
||||||
|
# Whether to use TLS for server connections or not.
|
||||||
|
server_tls: false
|
||||||
|
|
||||||
|
# How long a client is allowed to be idle while in a transaction (ms).
|
||||||
|
idle_client_in_transaction_timeout: 0 # milliseconds
|
||||||
|
|
||||||
|
# @param configuration.general.healthcheck_timeout How much time to give `SELECT 1` health check query to return with a result (ms).
|
||||||
|
healthcheck_timeout: 1000
|
||||||
|
|
||||||
|
# @param configuration.general.healthcheck_delay How long to keep connection available for immediate re-use, without running a healthcheck query on it
|
||||||
|
healthcheck_delay: 30000
|
||||||
|
|
||||||
|
# @param configuration.general.shutdown_timeout How much time to give clients during shutdown before forcibly killing client connections (ms).
|
||||||
|
shutdown_timeout: 60000
|
||||||
|
|
||||||
|
# @param configuration.general.ban_time For how long to ban a server if it fails a health check (seconds).
|
||||||
|
ban_time: 60 # seconds
|
||||||
|
|
||||||
|
# @param configuration.general.log_client_connections If we should log client connections
|
||||||
|
log_client_connections: false
|
||||||
|
|
||||||
|
# @param configuration.general.log_client_disconnections If we should log client disconnections
|
||||||
|
log_client_disconnections: false
|
||||||
|
|
||||||
|
# TLS
|
||||||
|
# tls_certificate: "server.cert"
|
||||||
|
# tls_private_key: "server.key"
|
||||||
|
tls_certificate: "-"
|
||||||
|
tls_private_key: "-"
|
||||||
|
|
||||||
|
# Credentials to access the virtual administrative database (pgbouncer or pgcat)
|
||||||
|
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
||||||
|
admin_username: "postgres"
|
||||||
|
admin_password: "postgres"
|
||||||
|
|
||||||
|
# Query to be sent to servers to obtain the hash used for md5 authentication. The connection will be
|
||||||
|
# established using the database configured in the pool. This parameter is inherited by every pool and
|
||||||
|
# can be redefined in pool configuration.
|
||||||
|
auth_query: null
|
||||||
|
|
||||||
|
# User to be used for connecting to servers to obtain the hash used for md5 authentication by sending
|
||||||
|
# the query specified in auth_query_user. The connection will be established using the database configured
|
||||||
|
# in the pool. This parameter is inherited by every pool and can be redefined in pool configuration.
|
||||||
|
#
|
||||||
|
# @param configuration.general.auth_query_user
|
||||||
|
auth_query_user: null
|
||||||
|
|
||||||
|
# Password to be used for connecting to servers to obtain the hash used for md5 authentication by sending
|
||||||
|
# the query specified in auth_query_user. The connection will be established using the database configured
|
||||||
|
# in the pool. This parameter is inherited by every pool and can be redefined in pool configuration.
|
||||||
|
#
|
||||||
|
# @param configuration.general.auth_query_password
|
||||||
|
auth_query_password: null
|
||||||
|
|
||||||
|
# Number of seconds of connection idleness to wait before sending a keepalive packet to the server.
|
||||||
|
tcp_keepalives_idle: 5
|
||||||
|
|
||||||
|
# Number of unacknowledged keepalive packets allowed before giving up and closing the connection.
|
||||||
|
tcp_keepalives_count: 5
|
||||||
|
|
||||||
|
# Number of seconds between keepalive packets.
|
||||||
|
tcp_keepalives_interval: 5
|
||||||
|
|
||||||
|
## pool
|
||||||
|
## configs are structured as pool.<pool_name>
|
||||||
|
## the pool_name is what clients use as database name when connecting
|
||||||
|
## For the example below a client can connect using "postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded"
|
||||||
|
## @param [object]
|
||||||
|
pools:
|
||||||
|
[{
|
||||||
|
name: "simple", pool_mode: "transaction",
|
||||||
|
users: [{username: "user", password: "pass", pool_size: 5, statement_timeout: 0}],
|
||||||
|
shards: [{
|
||||||
|
servers: [{host: "postgres", port: 5432, role: "primary"}],
|
||||||
|
database: "postgres"
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
# - ## default values
|
||||||
|
# ##
|
||||||
|
# ##
|
||||||
|
# ##
|
||||||
|
# name: "db"
|
||||||
|
|
||||||
|
# ## Pool mode (see PgBouncer docs for more).
|
||||||
|
# ## session: one server connection per connected client
|
||||||
|
# ## transaction: one server connection per client transaction
|
||||||
|
# ## @param configuration.poolsPostgres.pool_mode
|
||||||
|
# pool_mode: "transaction"
|
||||||
|
|
||||||
|
# ## Load balancing mode
|
||||||
|
# ## `random` selects the server at random
|
||||||
|
# ## `loc` selects the server with the least outstanding busy connections
|
||||||
|
# ##
|
||||||
|
# ## @param configuration.poolsPostgres.load_balancing_mode
|
||||||
|
# load_balancing_mode: "random"
|
||||||
|
|
||||||
|
# ## Prepared statements cache size.
|
||||||
|
# ## TODO: update documentation
|
||||||
|
# ##
|
||||||
|
# ## @param configuration.poolsPostgres.prepared_statements_cache_size
|
||||||
|
# prepared_statements_cache_size: 500
|
||||||
|
|
||||||
|
# ## If the client doesn't specify, route traffic to
|
||||||
|
# ## this role by default.
|
||||||
|
# ##
|
||||||
|
# ## any: round-robin between primary and replicas,
|
||||||
|
# ## replica: round-robin between replicas only without touching the primary,
|
||||||
|
# ## primary: all queries go to the primary unless otherwise specified.
|
||||||
|
# ## @param configuration.poolsPostgres.default_role
|
||||||
|
# default_role: "any"
|
||||||
|
|
||||||
|
# ## Query parser. If enabled, we'll attempt to parse
|
||||||
|
# ## every incoming query to determine if it's a read or a write.
|
||||||
|
# ## If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||||
|
# ## we'll direct it to the primary.
|
||||||
|
# ## @param configuration.poolsPostgres.query_parser_enabled
|
||||||
|
# query_parser_enabled: true
|
||||||
|
|
||||||
|
# ## If the query parser is enabled and this setting is enabled, we'll attempt to
|
||||||
|
# ## infer the role from the query itself.
|
||||||
|
# ## @param configuration.poolsPostgres.query_parser_read_write_splitting
|
||||||
|
# query_parser_read_write_splitting: true
|
||||||
|
|
||||||
|
# ## If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||||
|
# ## load balancing of read queries. Otherwise, the primary will only be used for write
|
||||||
|
# ## queries. The primary can always be explicitly selected with our custom protocol.
|
||||||
|
# ## @param configuration.poolsPostgres.primary_reads_enabled
|
||||||
|
# primary_reads_enabled: true
|
||||||
|
|
||||||
|
# ## So what if you wanted to implement a different hashing function,
|
||||||
|
# ## or you've already built one and you want this pooler to use it?
|
||||||
|
# ##
|
||||||
|
# ## Current options:
|
||||||
|
# ##
|
||||||
|
# ## pg_bigint_hash: PARTITION BY HASH (Postgres hashing function)
|
||||||
|
# ## sha1: A hashing function based on SHA1
|
||||||
|
# ##
|
||||||
|
# ## @param configuration.poolsPostgres.sharding_function
|
||||||
|
# sharding_function: "pg_bigint_hash"
|
||||||
|
|
||||||
|
# ## Credentials for users that may connect to this cluster
|
||||||
|
# ## @param users [array]
|
||||||
|
# ## @param users[0].username Name of the env var (required)
|
||||||
|
# ## @param users[0].password Value for the env var (required) leave empty to use existing secret see passwordSecret.name and passwordSecret.key
|
||||||
|
# ## @param users[0].passwordSecret.name Name of the secret containing the password
|
||||||
|
# ## @param users[0].passwordSecret.key Key in the secret containing the password
|
||||||
|
# ## @param users[0].pool_size Maximum number of server connections that can be established for this user
|
||||||
|
# ## @param users[0].statement_timeout Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
|
||||||
|
# users: []
|
||||||
|
# # - username: "user"
|
||||||
|
# # password: "pass"
|
||||||
|
# #
|
||||||
|
# # # The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||||
|
# # # is the sum of pool_size across all users.
|
||||||
|
# # pool_size: 9
|
||||||
|
# #
|
||||||
|
# # # Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
|
||||||
|
# # statement_timeout: 0
|
||||||
|
# #
|
||||||
|
# # # PostgreSQL username used to connect to the server.
|
||||||
|
# # server_username: "postgres
|
||||||
|
# #
|
||||||
|
# # # PostgreSQL password used to connect to the server.
|
||||||
|
# # server_password: "postgres
|
||||||
|
|
||||||
|
# ## @param shards [array]
|
||||||
|
# ## @param shards[0].server[0].host Host for this shard
|
||||||
|
# ## @param shards[0].server[0].port Port for this shard
|
||||||
|
# ## @param shards[0].server[0].role Role for this shard
|
||||||
|
# shards: []
|
||||||
|
# # [ host, port, role ]
|
||||||
|
# # - servers:
|
||||||
|
# # - host: "postgres"
|
||||||
|
# # port: 5432
|
||||||
|
# # role: "primary"
|
||||||
|
# # - host: "postgres"
|
||||||
|
# # port: 5432
|
||||||
|
# # role: "replica"
|
||||||
|
# # database: "postgres"
|
||||||
|
# # # [ host, port, role ]
|
||||||
|
# # - servers:
|
||||||
|
# # - host: "postgres"
|
||||||
|
# # port: 5432
|
||||||
|
# # role: "primary"
|
||||||
|
# # - host: "postgres"
|
||||||
|
# # port: 5432
|
||||||
|
# # role: "replica"
|
||||||
|
# # database: "postgres"
|
||||||
|
# # # [ host, port, role ]
|
||||||
|
# # - servers:
|
||||||
|
# # - host: "postgres"
|
||||||
|
# # port: 5432
|
||||||
|
# # role: "primary"
|
||||||
|
# # - host: "postgres"
|
||||||
|
# # port: 5432
|
||||||
|
# # role: "replica"
|
||||||
|
# # database: "postgres"
|
||||||
5
ct.yaml
Normal file
5
ct.yaml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
remote: origin
|
||||||
|
target-branch: main
|
||||||
|
chart-dirs:
|
||||||
|
- charts
|
||||||
|
|
||||||
@@ -1,6 +1,8 @@
|
|||||||
FROM rust:1.70-bullseye
|
FROM rust:bullseye
|
||||||
|
|
||||||
# Dependencies
|
# Dependencies
|
||||||
|
COPY --from=sclevine/yj /bin/yj /bin/yj
|
||||||
|
RUN /bin/yj -h
|
||||||
RUN apt-get update -y \
|
RUN apt-get update -y \
|
||||||
&& apt-get install -y \
|
&& apt-get install -y \
|
||||||
llvm-11 psmisc postgresql-contrib postgresql-client \
|
llvm-11 psmisc postgresql-contrib postgresql-client \
|
||||||
|
|||||||
2124
grafana_dashboard.json
Normal file
2124
grafana_dashboard.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -11,6 +11,7 @@ RestartSec=1
|
|||||||
Environment=RUST_LOG=info
|
Environment=RUST_LOG=info
|
||||||
LimitNOFILE=65536
|
LimitNOFILE=65536
|
||||||
ExecStart=/usr/bin/pgcat /etc/pgcat.toml
|
ExecStart=/usr/bin/pgcat /etc/pgcat.toml
|
||||||
|
ExecReload=/bin/kill -SIGHUP $MAINPID
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
|
|||||||
14
pgcat.toml
14
pgcat.toml
@@ -60,12 +60,6 @@ tcp_keepalives_count = 5
|
|||||||
# Number of seconds between keepalive packets.
|
# Number of seconds between keepalive packets.
|
||||||
tcp_keepalives_interval = 5
|
tcp_keepalives_interval = 5
|
||||||
|
|
||||||
# Handle prepared statements.
|
|
||||||
prepared_statements = true
|
|
||||||
|
|
||||||
# Prepared statements server cache size.
|
|
||||||
prepared_statements_cache_size = 500
|
|
||||||
|
|
||||||
# Path to TLS Certificate file to use for TLS connections
|
# Path to TLS Certificate file to use for TLS connections
|
||||||
# tls_certificate = ".circleci/server.cert"
|
# tls_certificate = ".circleci/server.cert"
|
||||||
# Path to TLS private key file to use for TLS connections
|
# Path to TLS private key file to use for TLS connections
|
||||||
@@ -156,6 +150,10 @@ load_balancing_mode = "random"
|
|||||||
# `primary` all queries go to the primary unless otherwise specified.
|
# `primary` all queries go to the primary unless otherwise specified.
|
||||||
default_role = "any"
|
default_role = "any"
|
||||||
|
|
||||||
|
# Prepared statements cache size.
|
||||||
|
# TODO: update documentation
|
||||||
|
prepared_statements_cache_size = 500
|
||||||
|
|
||||||
# If Query Parser is enabled, we'll attempt to parse
|
# If Query Parser is enabled, we'll attempt to parse
|
||||||
# every incoming query to determine if it's a read or a write.
|
# every incoming query to determine if it's a read or a write.
|
||||||
# If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
# If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||||
@@ -181,7 +179,7 @@ primary_reads_enabled = true
|
|||||||
# `random`: picks a shard at random
|
# `random`: picks a shard at random
|
||||||
# `random_healthy`: picks a shard at random favoring shards with the least number of recent errors
|
# `random_healthy`: picks a shard at random favoring shards with the least number of recent errors
|
||||||
# `shard_<number>`: e.g. shard_0, shard_4, etc. picks a specific shard, everytime
|
# `shard_<number>`: e.g. shard_0, shard_4, etc. picks a specific shard, everytime
|
||||||
# no_shard_specified_behavior = "shard_0"
|
# default_shard = "shard_0"
|
||||||
|
|
||||||
# So what if you wanted to implement a different hashing function,
|
# So what if you wanted to implement a different hashing function,
|
||||||
# or you've already built one and you want this pooler to use it?
|
# or you've already built one and you want this pooler to use it?
|
||||||
@@ -303,6 +301,8 @@ username = "other_user"
|
|||||||
password = "other_user"
|
password = "other_user"
|
||||||
pool_size = 21
|
pool_size = 21
|
||||||
statement_timeout = 15000
|
statement_timeout = 15000
|
||||||
|
connect_timeout = 1000
|
||||||
|
idle_timeout = 1000
|
||||||
|
|
||||||
# Shard configs are structured as pool.<pool_name>.shards.<shard_id>
|
# Shard configs are structured as pool.<pool_name>.shards.<shard_id>
|
||||||
# Each shard config contains a list of servers that make up the shard
|
# Each shard config contains a list of servers that make up the shard
|
||||||
|
|||||||
4
postinst
4
postinst
@@ -7,3 +7,7 @@ systemctl enable pgcat
|
|||||||
if ! id pgcat 2> /dev/null; then
|
if ! id pgcat 2> /dev/null; then
|
||||||
useradd -s /usr/bin/false pgcat
|
useradd -s /usr/bin/false pgcat
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ -f /etc/pgcat.toml ]; then
|
||||||
|
systemctl start pgcat
|
||||||
|
fi
|
||||||
|
|||||||
31
src/admin.rs
31
src/admin.rs
@@ -55,7 +55,12 @@ where
|
|||||||
|
|
||||||
let query_parts: Vec<&str> = query.trim_end_matches(';').split_whitespace().collect();
|
let query_parts: Vec<&str> = query.trim_end_matches(';').split_whitespace().collect();
|
||||||
|
|
||||||
match query_parts[0].to_ascii_uppercase().as_str() {
|
match query_parts
|
||||||
|
.first()
|
||||||
|
.unwrap_or(&"")
|
||||||
|
.to_ascii_uppercase()
|
||||||
|
.as_str()
|
||||||
|
{
|
||||||
"BAN" => {
|
"BAN" => {
|
||||||
trace!("BAN");
|
trace!("BAN");
|
||||||
ban(stream, query_parts).await
|
ban(stream, query_parts).await
|
||||||
@@ -84,7 +89,12 @@ where
|
|||||||
trace!("SHUTDOWN");
|
trace!("SHUTDOWN");
|
||||||
shutdown(stream).await
|
shutdown(stream).await
|
||||||
}
|
}
|
||||||
"SHOW" => match query_parts[1].to_ascii_uppercase().as_str() {
|
"SHOW" => match query_parts
|
||||||
|
.get(1)
|
||||||
|
.unwrap_or(&"")
|
||||||
|
.to_ascii_uppercase()
|
||||||
|
.as_str()
|
||||||
|
{
|
||||||
"HELP" => {
|
"HELP" => {
|
||||||
trace!("SHOW HELP");
|
trace!("SHOW HELP");
|
||||||
show_help(stream).await
|
show_help(stream).await
|
||||||
@@ -283,7 +293,7 @@ where
|
|||||||
{
|
{
|
||||||
let mut res = BytesMut::new();
|
let mut res = BytesMut::new();
|
||||||
|
|
||||||
let detail_msg = vec![
|
let detail_msg = [
|
||||||
"",
|
"",
|
||||||
"SHOW HELP|CONFIG|DATABASES|POOLS|CLIENTS|SERVERS|USERS|VERSION",
|
"SHOW HELP|CONFIG|DATABASES|POOLS|CLIENTS|SERVERS|USERS|VERSION",
|
||||||
// "SHOW PEERS|PEER_POOLS", // missing PEERS|PEER_POOLS
|
// "SHOW PEERS|PEER_POOLS", // missing PEERS|PEER_POOLS
|
||||||
@@ -301,7 +311,6 @@ where
|
|||||||
// "KILL <db>",
|
// "KILL <db>",
|
||||||
// "SUSPEND",
|
// "SUSPEND",
|
||||||
"SHUTDOWN",
|
"SHUTDOWN",
|
||||||
// "WAIT_CLOSE [<db>]", // missing
|
|
||||||
];
|
];
|
||||||
|
|
||||||
res.put(notify("Console usage", detail_msg.join("\n\t")));
|
res.put(notify("Console usage", detail_msg.join("\n\t")));
|
||||||
@@ -691,6 +700,8 @@ where
|
|||||||
("query_count", DataType::Numeric),
|
("query_count", DataType::Numeric),
|
||||||
("error_count", DataType::Numeric),
|
("error_count", DataType::Numeric),
|
||||||
("age_seconds", DataType::Numeric),
|
("age_seconds", DataType::Numeric),
|
||||||
|
("maxwait", DataType::Numeric),
|
||||||
|
("maxwait_us", DataType::Numeric),
|
||||||
];
|
];
|
||||||
|
|
||||||
let new_map = get_client_stats();
|
let new_map = get_client_stats();
|
||||||
@@ -698,6 +709,7 @@ where
|
|||||||
res.put(row_description(&columns));
|
res.put(row_description(&columns));
|
||||||
|
|
||||||
for (_, client) in new_map {
|
for (_, client) in new_map {
|
||||||
|
let max_wait = client.max_wait_time.load(Ordering::Relaxed);
|
||||||
let row = vec![
|
let row = vec![
|
||||||
format!("{:#010X}", client.client_id()),
|
format!("{:#010X}", client.client_id()),
|
||||||
client.pool_name(),
|
client.pool_name(),
|
||||||
@@ -711,6 +723,8 @@ where
|
|||||||
.duration_since(client.connect_time())
|
.duration_since(client.connect_time())
|
||||||
.as_secs()
|
.as_secs()
|
||||||
.to_string(),
|
.to_string(),
|
||||||
|
(max_wait / 1_000_000).to_string(),
|
||||||
|
(max_wait % 1_000_000).to_string(),
|
||||||
];
|
];
|
||||||
|
|
||||||
res.put(data_row(&row));
|
res.put(data_row(&row));
|
||||||
@@ -745,6 +759,7 @@ where
|
|||||||
("age_seconds", DataType::Numeric),
|
("age_seconds", DataType::Numeric),
|
||||||
("prepare_cache_hit", DataType::Numeric),
|
("prepare_cache_hit", DataType::Numeric),
|
||||||
("prepare_cache_miss", DataType::Numeric),
|
("prepare_cache_miss", DataType::Numeric),
|
||||||
|
("prepare_cache_eviction", DataType::Numeric),
|
||||||
("prepare_cache_size", DataType::Numeric),
|
("prepare_cache_size", DataType::Numeric),
|
||||||
];
|
];
|
||||||
|
|
||||||
@@ -777,6 +792,10 @@ where
|
|||||||
.prepared_miss_count
|
.prepared_miss_count
|
||||||
.load(Ordering::Relaxed)
|
.load(Ordering::Relaxed)
|
||||||
.to_string(),
|
.to_string(),
|
||||||
|
server
|
||||||
|
.prepared_eviction_count
|
||||||
|
.load(Ordering::Relaxed)
|
||||||
|
.to_string(),
|
||||||
server
|
server
|
||||||
.prepared_cache_size
|
.prepared_cache_size
|
||||||
.load(Ordering::Relaxed)
|
.load(Ordering::Relaxed)
|
||||||
@@ -802,7 +821,7 @@ where
|
|||||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
{
|
{
|
||||||
let parts: Vec<&str> = match tokens.len() == 2 {
|
let parts: Vec<&str> = match tokens.len() == 2 {
|
||||||
true => tokens[1].split(",").map(|part| part.trim()).collect(),
|
true => tokens[1].split(',').map(|part| part.trim()).collect(),
|
||||||
false => Vec::new(),
|
false => Vec::new(),
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -865,7 +884,7 @@ where
|
|||||||
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
{
|
{
|
||||||
let parts: Vec<&str> = match tokens.len() == 2 {
|
let parts: Vec<&str> = match tokens.len() == 2 {
|
||||||
true => tokens[1].split(",").map(|part| part.trim()).collect(),
|
true => tokens[1].split(',').map(|part| part.trim()).collect(),
|
||||||
false => Vec::new(),
|
false => Vec::new(),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
use crate::config::AuthType;
|
||||||
use crate::errors::Error;
|
use crate::errors::Error;
|
||||||
use crate::pool::ConnectionPool;
|
use crate::pool::ConnectionPool;
|
||||||
use crate::server::Server;
|
use crate::server::Server;
|
||||||
@@ -71,6 +72,7 @@ impl AuthPassthrough {
|
|||||||
pub async fn fetch_hash(&self, address: &crate::config::Address) -> Result<String, Error> {
|
pub async fn fetch_hash(&self, address: &crate::config::Address) -> Result<String, Error> {
|
||||||
let auth_user = crate::config::User {
|
let auth_user = crate::config::User {
|
||||||
username: self.user.clone(),
|
username: self.user.clone(),
|
||||||
|
auth_type: AuthType::MD5,
|
||||||
password: Some(self.password.clone()),
|
password: Some(self.password.clone()),
|
||||||
server_username: None,
|
server_username: None,
|
||||||
server_password: None,
|
server_password: None,
|
||||||
@@ -79,6 +81,8 @@ impl AuthPassthrough {
|
|||||||
pool_mode: None,
|
pool_mode: None,
|
||||||
server_lifetime: None,
|
server_lifetime: None,
|
||||||
min_pool_size: None,
|
min_pool_size: None,
|
||||||
|
connect_timeout: None,
|
||||||
|
idle_timeout: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
let user = &address.username;
|
let user = &address.username;
|
||||||
|
|||||||
1199
src/client.rs
1199
src/client.rs
File diff suppressed because it is too large
Load Diff
@@ -25,7 +25,7 @@ pub struct Args {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn parse() -> Args {
|
pub fn parse() -> Args {
|
||||||
return Args::parse();
|
Args::parse()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(ValueEnum, Clone, Debug)]
|
#[derive(ValueEnum, Clone, Debug)]
|
||||||
|
|||||||
322
src/config.rs
322
src/config.rs
@@ -38,12 +38,12 @@ pub enum Role {
|
|||||||
Mirror,
|
Mirror,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ToString for Role {
|
impl std::fmt::Display for Role {
|
||||||
fn to_string(&self) -> String {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
match *self {
|
match self {
|
||||||
Role::Primary => "primary".to_string(),
|
Role::Primary => write!(f, "primary"),
|
||||||
Role::Replica => "replica".to_string(),
|
Role::Replica => write!(f, "replica"),
|
||||||
Role::Mirror => "mirror".to_string(),
|
Role::Mirror => write!(f, "mirror"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -116,10 +116,10 @@ impl Default for Address {
|
|||||||
host: String::from("127.0.0.1"),
|
host: String::from("127.0.0.1"),
|
||||||
port: 5432,
|
port: 5432,
|
||||||
shard: 0,
|
shard: 0,
|
||||||
address_index: 0,
|
|
||||||
replica_number: 0,
|
|
||||||
database: String::from("database"),
|
database: String::from("database"),
|
||||||
role: Role::Replica,
|
role: Role::Replica,
|
||||||
|
replica_number: 0,
|
||||||
|
address_index: 0,
|
||||||
username: String::from("username"),
|
username: String::from("username"),
|
||||||
pool_name: String::from("pool_name"),
|
pool_name: String::from("pool_name"),
|
||||||
mirrors: Vec::new(),
|
mirrors: Vec::new(),
|
||||||
@@ -208,6 +208,9 @@ impl Address {
|
|||||||
pub struct User {
|
pub struct User {
|
||||||
pub username: String,
|
pub username: String,
|
||||||
pub password: Option<String>,
|
pub password: Option<String>,
|
||||||
|
|
||||||
|
#[serde(default = "User::default_auth_type")]
|
||||||
|
pub auth_type: AuthType,
|
||||||
pub server_username: Option<String>,
|
pub server_username: Option<String>,
|
||||||
pub server_password: Option<String>,
|
pub server_password: Option<String>,
|
||||||
pub pool_size: u32,
|
pub pool_size: u32,
|
||||||
@@ -216,6 +219,8 @@ pub struct User {
|
|||||||
pub server_lifetime: Option<u64>,
|
pub server_lifetime: Option<u64>,
|
||||||
#[serde(default)] // 0
|
#[serde(default)] // 0
|
||||||
pub statement_timeout: u64,
|
pub statement_timeout: u64,
|
||||||
|
pub connect_timeout: Option<u64>,
|
||||||
|
pub idle_timeout: Option<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for User {
|
impl Default for User {
|
||||||
@@ -223,6 +228,7 @@ impl Default for User {
|
|||||||
User {
|
User {
|
||||||
username: String::from("postgres"),
|
username: String::from("postgres"),
|
||||||
password: None,
|
password: None,
|
||||||
|
auth_type: AuthType::MD5,
|
||||||
server_username: None,
|
server_username: None,
|
||||||
server_password: None,
|
server_password: None,
|
||||||
pool_size: 15,
|
pool_size: 15,
|
||||||
@@ -230,24 +236,26 @@ impl Default for User {
|
|||||||
statement_timeout: 0,
|
statement_timeout: 0,
|
||||||
pool_mode: None,
|
pool_mode: None,
|
||||||
server_lifetime: None,
|
server_lifetime: None,
|
||||||
|
connect_timeout: None,
|
||||||
|
idle_timeout: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl User {
|
impl User {
|
||||||
fn validate(&self) -> Result<(), Error> {
|
pub fn default_auth_type() -> AuthType {
|
||||||
match self.min_pool_size {
|
AuthType::MD5
|
||||||
Some(min_pool_size) => {
|
}
|
||||||
if min_pool_size > self.pool_size {
|
|
||||||
error!(
|
|
||||||
"min_pool_size of {} cannot be larger than pool_size of {}",
|
|
||||||
min_pool_size, self.pool_size
|
|
||||||
);
|
|
||||||
return Err(Error::BadConfig);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
None => (),
|
fn validate(&self) -> Result<(), Error> {
|
||||||
|
if let Some(min_pool_size) = self.min_pool_size {
|
||||||
|
if min_pool_size > self.pool_size {
|
||||||
|
error!(
|
||||||
|
"min_pool_size of {} cannot be larger than pool_size of {}",
|
||||||
|
min_pool_size, self.pool_size
|
||||||
|
);
|
||||||
|
return Err(Error::BadConfig);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -334,6 +342,9 @@ pub struct General {
|
|||||||
pub admin_username: String,
|
pub admin_username: String,
|
||||||
pub admin_password: String,
|
pub admin_password: String,
|
||||||
|
|
||||||
|
#[serde(default = "General::default_admin_auth_type")]
|
||||||
|
pub admin_auth_type: AuthType,
|
||||||
|
|
||||||
#[serde(default = "General::default_validate_config")]
|
#[serde(default = "General::default_validate_config")]
|
||||||
pub validate_config: bool,
|
pub validate_config: bool,
|
||||||
|
|
||||||
@@ -341,12 +352,6 @@ pub struct General {
|
|||||||
pub auth_query: Option<String>,
|
pub auth_query: Option<String>,
|
||||||
pub auth_query_user: Option<String>,
|
pub auth_query_user: Option<String>,
|
||||||
pub auth_query_password: Option<String>,
|
pub auth_query_password: Option<String>,
|
||||||
|
|
||||||
#[serde(default)]
|
|
||||||
pub prepared_statements: bool,
|
|
||||||
|
|
||||||
#[serde(default = "General::default_prepared_statements_cache_size")]
|
|
||||||
pub prepared_statements_cache_size: usize,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl General {
|
impl General {
|
||||||
@@ -354,6 +359,10 @@ impl General {
|
|||||||
"0.0.0.0".into()
|
"0.0.0.0".into()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn default_admin_auth_type() -> AuthType {
|
||||||
|
AuthType::MD5
|
||||||
|
}
|
||||||
|
|
||||||
pub fn default_port() -> u16 {
|
pub fn default_port() -> u16 {
|
||||||
5432
|
5432
|
||||||
}
|
}
|
||||||
@@ -428,10 +437,6 @@ impl General {
|
|||||||
pub fn default_server_round_robin() -> bool {
|
pub fn default_server_round_robin() -> bool {
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn default_prepared_statements_cache_size() -> usize {
|
|
||||||
500
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for General {
|
impl Default for General {
|
||||||
@@ -443,35 +448,34 @@ impl Default for General {
|
|||||||
prometheus_exporter_port: 9930,
|
prometheus_exporter_port: 9930,
|
||||||
connect_timeout: General::default_connect_timeout(),
|
connect_timeout: General::default_connect_timeout(),
|
||||||
idle_timeout: General::default_idle_timeout(),
|
idle_timeout: General::default_idle_timeout(),
|
||||||
shutdown_timeout: Self::default_shutdown_timeout(),
|
|
||||||
healthcheck_timeout: Self::default_healthcheck_timeout(),
|
|
||||||
healthcheck_delay: Self::default_healthcheck_delay(),
|
|
||||||
ban_time: Self::default_ban_time(),
|
|
||||||
worker_threads: Self::default_worker_threads(),
|
|
||||||
idle_client_in_transaction_timeout: Self::default_idle_client_in_transaction_timeout(),
|
|
||||||
tcp_keepalives_idle: Self::default_tcp_keepalives_idle(),
|
tcp_keepalives_idle: Self::default_tcp_keepalives_idle(),
|
||||||
tcp_keepalives_count: Self::default_tcp_keepalives_count(),
|
tcp_keepalives_count: Self::default_tcp_keepalives_count(),
|
||||||
tcp_keepalives_interval: Self::default_tcp_keepalives_interval(),
|
tcp_keepalives_interval: Self::default_tcp_keepalives_interval(),
|
||||||
tcp_user_timeout: Self::default_tcp_user_timeout(),
|
tcp_user_timeout: Self::default_tcp_user_timeout(),
|
||||||
log_client_connections: false,
|
log_client_connections: false,
|
||||||
log_client_disconnections: false,
|
log_client_disconnections: false,
|
||||||
autoreload: None,
|
|
||||||
dns_cache_enabled: false,
|
dns_cache_enabled: false,
|
||||||
dns_max_ttl: Self::default_dns_max_ttl(),
|
dns_max_ttl: Self::default_dns_max_ttl(),
|
||||||
|
shutdown_timeout: Self::default_shutdown_timeout(),
|
||||||
|
healthcheck_timeout: Self::default_healthcheck_timeout(),
|
||||||
|
healthcheck_delay: Self::default_healthcheck_delay(),
|
||||||
|
ban_time: Self::default_ban_time(),
|
||||||
|
idle_client_in_transaction_timeout: Self::default_idle_client_in_transaction_timeout(),
|
||||||
|
server_lifetime: Self::default_server_lifetime(),
|
||||||
|
server_round_robin: Self::default_server_round_robin(),
|
||||||
|
worker_threads: Self::default_worker_threads(),
|
||||||
|
autoreload: None,
|
||||||
tls_certificate: None,
|
tls_certificate: None,
|
||||||
tls_private_key: None,
|
tls_private_key: None,
|
||||||
server_tls: false,
|
server_tls: false,
|
||||||
verify_server_certificate: false,
|
verify_server_certificate: false,
|
||||||
admin_username: String::from("admin"),
|
admin_username: String::from("admin"),
|
||||||
admin_password: String::from("admin"),
|
admin_password: String::from("admin"),
|
||||||
|
admin_auth_type: AuthType::MD5,
|
||||||
|
validate_config: true,
|
||||||
auth_query: None,
|
auth_query: None,
|
||||||
auth_query_user: None,
|
auth_query_user: None,
|
||||||
auth_query_password: None,
|
auth_query_password: None,
|
||||||
server_lifetime: Self::default_server_lifetime(),
|
|
||||||
server_round_robin: Self::default_server_round_robin(),
|
|
||||||
validate_config: true,
|
|
||||||
prepared_statements: false,
|
|
||||||
prepared_statements_cache_size: 500,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -488,11 +492,20 @@ pub enum PoolMode {
|
|||||||
Session,
|
Session,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ToString for PoolMode {
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Copy, Hash)]
|
||||||
fn to_string(&self) -> String {
|
pub enum AuthType {
|
||||||
match *self {
|
#[serde(alias = "trust", alias = "Trust")]
|
||||||
PoolMode::Transaction => "transaction".to_string(),
|
Trust,
|
||||||
PoolMode::Session => "session".to_string(),
|
|
||||||
|
#[serde(alias = "md5", alias = "MD5")]
|
||||||
|
MD5,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for PoolMode {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
PoolMode::Transaction => write!(f, "transaction"),
|
||||||
|
PoolMode::Session => write!(f, "session"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -505,12 +518,13 @@ pub enum LoadBalancingMode {
|
|||||||
#[serde(alias = "loc", alias = "LOC", alias = "least_outstanding_connections")]
|
#[serde(alias = "loc", alias = "LOC", alias = "least_outstanding_connections")]
|
||||||
LeastOutstandingConnections,
|
LeastOutstandingConnections,
|
||||||
}
|
}
|
||||||
impl ToString for LoadBalancingMode {
|
|
||||||
fn to_string(&self) -> String {
|
impl std::fmt::Display for LoadBalancingMode {
|
||||||
match *self {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
LoadBalancingMode::Random => "random".to_string(),
|
match self {
|
||||||
|
LoadBalancingMode::Random => write!(f, "random"),
|
||||||
LoadBalancingMode::LeastOutstandingConnections => {
|
LoadBalancingMode::LeastOutstandingConnections => {
|
||||||
"least_outstanding_connections".to_string()
|
write!(f, "least_outstanding_connections")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -527,6 +541,9 @@ pub struct Pool {
|
|||||||
#[serde(default = "Pool::default_default_role")]
|
#[serde(default = "Pool::default_default_role")]
|
||||||
pub default_role: String,
|
pub default_role: String,
|
||||||
|
|
||||||
|
#[serde(default)] // False
|
||||||
|
pub replica_to_primary_failover_enabled: bool,
|
||||||
|
|
||||||
#[serde(default)] // False
|
#[serde(default)] // False
|
||||||
pub query_parser_enabled: bool,
|
pub query_parser_enabled: bool,
|
||||||
|
|
||||||
@@ -572,6 +589,9 @@ pub struct Pool {
|
|||||||
#[serde(default)] // False
|
#[serde(default)] // False
|
||||||
pub log_client_parameter_status_changes: bool,
|
pub log_client_parameter_status_changes: bool,
|
||||||
|
|
||||||
|
#[serde(default = "Pool::default_prepared_statements_cache_size")]
|
||||||
|
pub prepared_statements_cache_size: usize,
|
||||||
|
|
||||||
pub plugins: Option<Plugins>,
|
pub plugins: Option<Plugins>,
|
||||||
pub shards: BTreeMap<String, Shard>,
|
pub shards: BTreeMap<String, Shard>,
|
||||||
pub users: BTreeMap<String, User>,
|
pub users: BTreeMap<String, User>,
|
||||||
@@ -621,6 +641,10 @@ impl Pool {
|
|||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn default_prepared_statements_cache_size() -> usize {
|
||||||
|
0
|
||||||
|
}
|
||||||
|
|
||||||
pub fn validate(&mut self) -> Result<(), Error> {
|
pub fn validate(&mut self) -> Result<(), Error> {
|
||||||
match self.default_role.as_ref() {
|
match self.default_role.as_ref() {
|
||||||
"any" => (),
|
"any" => (),
|
||||||
@@ -677,9 +701,9 @@ impl Pool {
|
|||||||
Some(key) => {
|
Some(key) => {
|
||||||
// No quotes in the key so we don't have to compare quoted
|
// No quotes in the key so we don't have to compare quoted
|
||||||
// to unquoted idents.
|
// to unquoted idents.
|
||||||
let key = key.replace("\"", "");
|
let key = key.replace('\"', "");
|
||||||
|
|
||||||
if key.split(".").count() != 2 {
|
if key.split('.').count() != 2 {
|
||||||
error!(
|
error!(
|
||||||
"automatic_sharding_key '{}' must be fully qualified, e.g. t.{}`",
|
"automatic_sharding_key '{}' must be fully qualified, e.g. t.{}`",
|
||||||
key, key
|
key, key
|
||||||
@@ -692,17 +716,14 @@ impl Pool {
|
|||||||
None => None,
|
None => None,
|
||||||
};
|
};
|
||||||
|
|
||||||
match self.default_shard {
|
if let DefaultShard::Shard(shard_number) = self.default_shard {
|
||||||
DefaultShard::Shard(shard_number) => {
|
if shard_number >= self.shards.len() {
|
||||||
if shard_number >= self.shards.len() {
|
error!("Invalid shard {:?}", shard_number);
|
||||||
error!("Invalid shard {:?}", shard_number);
|
return Err(Error::BadConfig);
|
||||||
return Err(Error::BadConfig);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
_ => (),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (_, user) in &self.users {
|
for user in self.users.values() {
|
||||||
user.validate()?;
|
user.validate()?;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -715,17 +736,17 @@ impl Default for Pool {
|
|||||||
Pool {
|
Pool {
|
||||||
pool_mode: Self::default_pool_mode(),
|
pool_mode: Self::default_pool_mode(),
|
||||||
load_balancing_mode: Self::default_load_balancing_mode(),
|
load_balancing_mode: Self::default_load_balancing_mode(),
|
||||||
shards: BTreeMap::from([(String::from("1"), Shard::default())]),
|
|
||||||
users: BTreeMap::default(),
|
|
||||||
default_role: String::from("any"),
|
default_role: String::from("any"),
|
||||||
|
replica_to_primary_failover_enabled: false,
|
||||||
query_parser_enabled: false,
|
query_parser_enabled: false,
|
||||||
query_parser_max_length: None,
|
query_parser_max_length: None,
|
||||||
query_parser_read_write_splitting: false,
|
query_parser_read_write_splitting: false,
|
||||||
primary_reads_enabled: false,
|
primary_reads_enabled: false,
|
||||||
sharding_function: ShardingFunction::PgBigintHash,
|
|
||||||
automatic_sharding_key: None,
|
|
||||||
connect_timeout: None,
|
connect_timeout: None,
|
||||||
idle_timeout: None,
|
idle_timeout: None,
|
||||||
|
server_lifetime: None,
|
||||||
|
sharding_function: ShardingFunction::PgBigintHash,
|
||||||
|
automatic_sharding_key: None,
|
||||||
sharding_key_regex: None,
|
sharding_key_regex: None,
|
||||||
shard_id_regex: None,
|
shard_id_regex: None,
|
||||||
regex_search_limit: Some(1000),
|
regex_search_limit: Some(1000),
|
||||||
@@ -733,10 +754,12 @@ impl Default for Pool {
|
|||||||
auth_query: None,
|
auth_query: None,
|
||||||
auth_query_user: None,
|
auth_query_user: None,
|
||||||
auth_query_password: None,
|
auth_query_password: None,
|
||||||
server_lifetime: None,
|
|
||||||
plugins: None,
|
|
||||||
cleanup_server_connections: true,
|
cleanup_server_connections: true,
|
||||||
log_client_parameter_status_changes: false,
|
log_client_parameter_status_changes: false,
|
||||||
|
prepared_statements_cache_size: Self::default_prepared_statements_cache_size(),
|
||||||
|
plugins: None,
|
||||||
|
shards: BTreeMap::from([(String::from("1"), Shard::default())]),
|
||||||
|
users: BTreeMap::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -777,8 +800,8 @@ impl<'de> serde::Deserialize<'de> for DefaultShard {
|
|||||||
D: Deserializer<'de>,
|
D: Deserializer<'de>,
|
||||||
{
|
{
|
||||||
let s = String::deserialize(deserializer)?;
|
let s = String::deserialize(deserializer)?;
|
||||||
if s.starts_with("shard_") {
|
if let Some(s) = s.strip_prefix("shard_") {
|
||||||
let shard = s[6..].parse::<usize>().map_err(serde::de::Error::custom)?;
|
let shard = s.parse::<usize>().map_err(serde::de::Error::custom)?;
|
||||||
return Ok(DefaultShard::Shard(shard));
|
return Ok(DefaultShard::Shard(shard));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -848,13 +871,13 @@ impl Shard {
|
|||||||
impl Default for Shard {
|
impl Default for Shard {
|
||||||
fn default() -> Shard {
|
fn default() -> Shard {
|
||||||
Shard {
|
Shard {
|
||||||
|
database: String::from("postgres"),
|
||||||
|
mirrors: None,
|
||||||
servers: vec![ServerConfig {
|
servers: vec![ServerConfig {
|
||||||
host: String::from("localhost"),
|
host: String::from("localhost"),
|
||||||
port: 5432,
|
port: 5432,
|
||||||
role: Role::Primary,
|
role: Role::Primary,
|
||||||
}],
|
}],
|
||||||
mirrors: None,
|
|
||||||
database: String::from("postgres"),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -867,15 +890,26 @@ pub struct Plugins {
|
|||||||
pub prewarmer: Option<Prewarmer>,
|
pub prewarmer: Option<Prewarmer>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub trait Plugin {
|
||||||
|
fn is_enabled(&self) -> bool;
|
||||||
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for Plugins {
|
impl std::fmt::Display for Plugins {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||||
|
fn is_enabled<T: Plugin>(arg: Option<&T>) -> bool {
|
||||||
|
if let Some(arg) = arg {
|
||||||
|
arg.is_enabled()
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
"interceptor: {}, table_access: {}, query_logger: {}, prewarmer: {}",
|
"interceptor: {}, table_access: {}, query_logger: {}, prewarmer: {}",
|
||||||
self.intercept.is_some(),
|
is_enabled(self.intercept.as_ref()),
|
||||||
self.table_access.is_some(),
|
is_enabled(self.table_access.as_ref()),
|
||||||
self.query_logger.is_some(),
|
is_enabled(self.query_logger.as_ref()),
|
||||||
self.prewarmer.is_some(),
|
is_enabled(self.prewarmer.as_ref()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -886,23 +920,47 @@ pub struct Intercept {
|
|||||||
pub queries: BTreeMap<String, Query>,
|
pub queries: BTreeMap<String, Query>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Plugin for Intercept {
|
||||||
|
fn is_enabled(&self) -> bool {
|
||||||
|
self.enabled
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, Hash, Eq)]
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, Hash, Eq)]
|
||||||
pub struct TableAccess {
|
pub struct TableAccess {
|
||||||
pub enabled: bool,
|
pub enabled: bool,
|
||||||
pub tables: Vec<String>,
|
pub tables: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Plugin for TableAccess {
|
||||||
|
fn is_enabled(&self) -> bool {
|
||||||
|
self.enabled
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, Hash, Eq)]
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, Hash, Eq)]
|
||||||
pub struct QueryLogger {
|
pub struct QueryLogger {
|
||||||
pub enabled: bool,
|
pub enabled: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Plugin for QueryLogger {
|
||||||
|
fn is_enabled(&self) -> bool {
|
||||||
|
self.enabled
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, Hash, Eq)]
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, Hash, Eq)]
|
||||||
pub struct Prewarmer {
|
pub struct Prewarmer {
|
||||||
pub enabled: bool,
|
pub enabled: bool,
|
||||||
pub queries: Vec<String>,
|
pub queries: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Plugin for Prewarmer {
|
||||||
|
fn is_enabled(&self) -> bool {
|
||||||
|
self.enabled
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Intercept {
|
impl Intercept {
|
||||||
pub fn substitute(&mut self, db: &str, user: &str) {
|
pub fn substitute(&mut self, db: &str, user: &str) {
|
||||||
for (_, query) in self.queries.iter_mut() {
|
for (_, query) in self.queries.iter_mut() {
|
||||||
@@ -920,6 +978,7 @@ pub struct Query {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Query {
|
impl Query {
|
||||||
|
#[allow(clippy::needless_range_loop)]
|
||||||
pub fn substitute(&mut self, db: &str, user: &str) {
|
pub fn substitute(&mut self, db: &str, user: &str) {
|
||||||
for col in self.result.iter_mut() {
|
for col in self.result.iter_mut() {
|
||||||
for i in 0..col.len() {
|
for i in 0..col.len() {
|
||||||
@@ -970,15 +1029,17 @@ impl Config {
|
|||||||
pub fn fill_up_auth_query_config(&mut self) {
|
pub fn fill_up_auth_query_config(&mut self) {
|
||||||
for (_name, pool) in self.pools.iter_mut() {
|
for (_name, pool) in self.pools.iter_mut() {
|
||||||
if pool.auth_query.is_none() {
|
if pool.auth_query.is_none() {
|
||||||
pool.auth_query = self.general.auth_query.clone();
|
pool.auth_query.clone_from(&self.general.auth_query);
|
||||||
}
|
}
|
||||||
|
|
||||||
if pool.auth_query_user.is_none() {
|
if pool.auth_query_user.is_none() {
|
||||||
pool.auth_query_user = self.general.auth_query_user.clone();
|
pool.auth_query_user
|
||||||
|
.clone_from(&self.general.auth_query_user);
|
||||||
}
|
}
|
||||||
|
|
||||||
if pool.auth_query_password.is_none() {
|
if pool.auth_query_password.is_none() {
|
||||||
pool.auth_query_password = self.general.auth_query_password.clone();
|
pool.auth_query_password
|
||||||
|
.clone_from(&self.general.auth_query_password);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -989,8 +1050,8 @@ impl Default for Config {
|
|||||||
Config {
|
Config {
|
||||||
path: Self::default_path(),
|
path: Self::default_path(),
|
||||||
general: General::default(),
|
general: General::default(),
|
||||||
pools: HashMap::default(),
|
|
||||||
plugins: None,
|
plugins: None,
|
||||||
|
pools: HashMap::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1044,8 +1105,8 @@ impl From<&Config> for std::collections::HashMap<String, String> {
|
|||||||
(
|
(
|
||||||
format!("pools.{:?}.users", pool_name),
|
format!("pools.{:?}.users", pool_name),
|
||||||
pool.users
|
pool.users
|
||||||
.iter()
|
.values()
|
||||||
.map(|(_username, user)| &user.username)
|
.map(|user| &user.username)
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect::<Vec<String>>()
|
.collect::<Vec<String>>()
|
||||||
.join(", "),
|
.join(", "),
|
||||||
@@ -1099,6 +1160,7 @@ impl From<&Config> for std::collections::HashMap<String, String> {
|
|||||||
impl Config {
|
impl Config {
|
||||||
/// Print current configuration.
|
/// Print current configuration.
|
||||||
pub fn show(&self) {
|
pub fn show(&self) {
|
||||||
|
info!("Config path: {}", self.path);
|
||||||
info!("Ban time: {}s", self.general.ban_time);
|
info!("Ban time: {}s", self.general.ban_time);
|
||||||
info!(
|
info!(
|
||||||
"Idle client in transaction timeout: {}ms",
|
"Idle client in transaction timeout: {}ms",
|
||||||
@@ -1125,18 +1187,14 @@ impl Config {
|
|||||||
"Default max server lifetime: {}ms",
|
"Default max server lifetime: {}ms",
|
||||||
self.general.server_lifetime
|
self.general.server_lifetime
|
||||||
);
|
);
|
||||||
info!("Sever round robin: {}", self.general.server_round_robin);
|
info!("Server round robin: {}", self.general.server_round_robin);
|
||||||
match self.general.tls_certificate.clone() {
|
match self.general.tls_certificate.clone() {
|
||||||
Some(tls_certificate) => {
|
Some(tls_certificate) => {
|
||||||
info!("TLS certificate: {}", tls_certificate);
|
info!("TLS certificate: {}", tls_certificate);
|
||||||
|
|
||||||
match self.general.tls_private_key.clone() {
|
if let Some(tls_private_key) = self.general.tls_private_key.clone() {
|
||||||
Some(tls_private_key) => {
|
info!("TLS private key: {}", tls_private_key);
|
||||||
info!("TLS private key: {}", tls_private_key);
|
info!("TLS support is enabled");
|
||||||
info!("TLS support is enabled");
|
|
||||||
}
|
|
||||||
|
|
||||||
None => (),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1149,13 +1207,6 @@ impl Config {
|
|||||||
"Server TLS certificate verification: {}",
|
"Server TLS certificate verification: {}",
|
||||||
self.general.verify_server_certificate
|
self.general.verify_server_certificate
|
||||||
);
|
);
|
||||||
info!("Prepared statements: {}", self.general.prepared_statements);
|
|
||||||
if self.general.prepared_statements {
|
|
||||||
info!(
|
|
||||||
"Prepared statements server cache size: {}",
|
|
||||||
self.general.prepared_statements_cache_size
|
|
||||||
);
|
|
||||||
}
|
|
||||||
info!(
|
info!(
|
||||||
"Plugins: {}",
|
"Plugins: {}",
|
||||||
match self.plugins {
|
match self.plugins {
|
||||||
@@ -1171,8 +1222,8 @@ impl Config {
|
|||||||
pool_name,
|
pool_name,
|
||||||
pool_config
|
pool_config
|
||||||
.users
|
.users
|
||||||
.iter()
|
.values()
|
||||||
.map(|(_, user_cfg)| user_cfg.pool_size)
|
.map(|user_cfg| user_cfg.pool_size)
|
||||||
.sum::<u32>()
|
.sum::<u32>()
|
||||||
.to_string()
|
.to_string()
|
||||||
);
|
);
|
||||||
@@ -1246,6 +1297,10 @@ impl Config {
|
|||||||
"[pool: {}] Log client parameter status changes: {}",
|
"[pool: {}] Log client parameter status changes: {}",
|
||||||
pool_name, pool_config.log_client_parameter_status_changes
|
pool_name, pool_config.log_client_parameter_status_changes
|
||||||
);
|
);
|
||||||
|
info!(
|
||||||
|
"[pool: {}] Prepared statements server cache size: {}",
|
||||||
|
pool_name, pool_config.prepared_statements_cache_size
|
||||||
|
);
|
||||||
info!(
|
info!(
|
||||||
"[pool: {}] Plugins: {}",
|
"[pool: {}] Plugins: {}",
|
||||||
pool_name,
|
pool_name,
|
||||||
@@ -1288,6 +1343,24 @@ impl Config {
|
|||||||
None => "default".to_string(),
|
None => "default".to_string(),
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
info!(
|
||||||
|
"[pool: {}][user: {}] Connection timeout: {}",
|
||||||
|
pool_name,
|
||||||
|
user.1.username,
|
||||||
|
match user.1.connect_timeout {
|
||||||
|
Some(connect_timeout) => format!("{}ms", connect_timeout),
|
||||||
|
None => "not set".to_string(),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
info!(
|
||||||
|
"[pool: {}][user: {}] Idle timeout: {}",
|
||||||
|
pool_name,
|
||||||
|
user.1.username,
|
||||||
|
match user.1.idle_timeout {
|
||||||
|
Some(idle_timeout) => format!("{}ms", idle_timeout),
|
||||||
|
None => "not set".to_string(),
|
||||||
|
}
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1342,34 +1415,31 @@ impl Config {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Validate TLS!
|
// Validate TLS!
|
||||||
match self.general.tls_certificate.clone() {
|
if let Some(tls_certificate) = self.general.tls_certificate.clone() {
|
||||||
Some(tls_certificate) => {
|
match load_certs(Path::new(&tls_certificate)) {
|
||||||
match load_certs(Path::new(&tls_certificate)) {
|
Ok(_) => {
|
||||||
Ok(_) => {
|
// Cert is okay, but what about the private key?
|
||||||
// Cert is okay, but what about the private key?
|
match self.general.tls_private_key.clone() {
|
||||||
match self.general.tls_private_key.clone() {
|
Some(tls_private_key) => match load_keys(Path::new(&tls_private_key)) {
|
||||||
Some(tls_private_key) => match load_keys(Path::new(&tls_private_key)) {
|
Ok(_) => (),
|
||||||
Ok(_) => (),
|
Err(err) => {
|
||||||
Err(err) => {
|
error!("tls_private_key is incorrectly configured: {:?}", err);
|
||||||
error!("tls_private_key is incorrectly configured: {:?}", err);
|
|
||||||
return Err(Error::BadConfig);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
None => {
|
|
||||||
error!("tls_certificate is set, but the tls_private_key is not");
|
|
||||||
return Err(Error::BadConfig);
|
return Err(Error::BadConfig);
|
||||||
}
|
}
|
||||||
};
|
},
|
||||||
}
|
|
||||||
|
|
||||||
Err(err) => {
|
None => {
|
||||||
error!("tls_certificate is incorrectly configured: {:?}", err);
|
error!("tls_certificate is set, but the tls_private_key is not");
|
||||||
return Err(Error::BadConfig);
|
return Err(Error::BadConfig);
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(err) => {
|
||||||
|
error!("tls_certificate is incorrectly configured: {:?}", err);
|
||||||
|
return Err(Error::BadConfig);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => (),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
for pool in self.pools.values_mut() {
|
for pool in self.pools.values_mut() {
|
||||||
@@ -1391,14 +1461,6 @@ pub fn get_idle_client_in_transaction_timeout() -> u64 {
|
|||||||
CONFIG.load().general.idle_client_in_transaction_timeout
|
CONFIG.load().general.idle_client_in_transaction_timeout
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_prepared_statements() -> bool {
|
|
||||||
CONFIG.load().general.prepared_statements
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_prepared_statements_cache_size() -> usize {
|
|
||||||
CONFIG.load().general.prepared_statements_cache_size
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse the configuration file located at the path.
|
/// Parse the configuration file located at the path.
|
||||||
pub async fn parse(path: &str) -> Result<(), Error> {
|
pub async fn parse(path: &str) -> Result<(), Error> {
|
||||||
let mut contents = String::new();
|
let mut contents = String::new();
|
||||||
|
|||||||
@@ -29,6 +29,7 @@ pub enum Error {
|
|||||||
QueryRouterParserError(String),
|
QueryRouterParserError(String),
|
||||||
QueryRouterError(String),
|
QueryRouterError(String),
|
||||||
InvalidShardId(usize),
|
InvalidShardId(usize),
|
||||||
|
PreparedStatementError,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Debug)]
|
#[derive(Clone, PartialEq, Debug)]
|
||||||
|
|||||||
311
src/messages.rs
311
src/messages.rs
@@ -12,13 +12,16 @@ use crate::config::get_config;
|
|||||||
use crate::errors::Error;
|
use crate::errors::Error;
|
||||||
|
|
||||||
use crate::constants::MESSAGE_TERMINATOR;
|
use crate::constants::MESSAGE_TERMINATOR;
|
||||||
|
use std::collections::hash_map::DefaultHasher;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::ffi::CString;
|
use std::ffi::CString;
|
||||||
use std::fmt::{Display, Formatter};
|
use std::fmt::{Display, Formatter};
|
||||||
|
use std::hash::{Hash, Hasher};
|
||||||
use std::io::{BufRead, Cursor};
|
use std::io::{BufRead, Cursor};
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::sync::atomic::Ordering;
|
use std::sync::atomic::Ordering;
|
||||||
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
/// Postgres data type mappings
|
/// Postgres data type mappings
|
||||||
@@ -114,19 +117,11 @@ pub fn simple_query(query: &str) -> BytesMut {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Tell the client we're ready for another query.
|
/// Tell the client we're ready for another query.
|
||||||
pub async fn ready_for_query<S>(stream: &mut S) -> Result<(), Error>
|
pub async fn send_ready_for_query<S>(stream: &mut S) -> Result<(), Error>
|
||||||
where
|
where
|
||||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
{
|
{
|
||||||
let mut bytes = BytesMut::with_capacity(
|
write_all(stream, ready_for_query(false)).await
|
||||||
mem::size_of::<u8>() + mem::size_of::<i32>() + mem::size_of::<u8>(),
|
|
||||||
);
|
|
||||||
|
|
||||||
bytes.put_u8(b'Z');
|
|
||||||
bytes.put_i32(5);
|
|
||||||
bytes.put_u8(b'I'); // Idle
|
|
||||||
|
|
||||||
write_all(stream, bytes).await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send the startup packet the server. We're pretending we're a Pg client.
|
/// Send the startup packet the server. We're pretending we're a Pg client.
|
||||||
@@ -163,12 +158,10 @@ where
|
|||||||
|
|
||||||
match stream.write_all(&startup).await {
|
match stream.write_all(&startup).await {
|
||||||
Ok(_) => Ok(()),
|
Ok(_) => Ok(()),
|
||||||
Err(err) => {
|
Err(err) => Err(Error::SocketError(format!(
|
||||||
return Err(Error::SocketError(format!(
|
"Error writing startup to server socket - Error: {:?}",
|
||||||
"Error writing startup to server socket - Error: {:?}",
|
err
|
||||||
err
|
))),
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -244,8 +237,8 @@ pub fn md5_hash_password(user: &str, password: &str, salt: &[u8]) -> Vec<u8> {
|
|||||||
let mut md5 = Md5::new();
|
let mut md5 = Md5::new();
|
||||||
|
|
||||||
// First pass
|
// First pass
|
||||||
md5.update(&password.as_bytes());
|
md5.update(password.as_bytes());
|
||||||
md5.update(&user.as_bytes());
|
md5.update(user.as_bytes());
|
||||||
|
|
||||||
let output = md5.finalize_reset();
|
let output = md5.finalize_reset();
|
||||||
|
|
||||||
@@ -281,7 +274,7 @@ where
|
|||||||
{
|
{
|
||||||
let password = md5_hash_password(user, password, salt);
|
let password = md5_hash_password(user, password, salt);
|
||||||
|
|
||||||
let mut message = BytesMut::with_capacity(password.len() as usize + 5);
|
let mut message = BytesMut::with_capacity(password.len() + 5);
|
||||||
|
|
||||||
message.put_u8(b'p');
|
message.put_u8(b'p');
|
||||||
message.put_i32(password.len() as i32 + 4);
|
message.put_i32(password.len() as i32 + 4);
|
||||||
@@ -295,7 +288,7 @@ where
|
|||||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
{
|
{
|
||||||
let password = md5_hash_second_pass(hash, salt);
|
let password = md5_hash_second_pass(hash, salt);
|
||||||
let mut message = BytesMut::with_capacity(password.len() as usize + 5);
|
let mut message = BytesMut::with_capacity(password.len() + 5);
|
||||||
|
|
||||||
message.put_u8(b'p');
|
message.put_u8(b'p');
|
||||||
message.put_i32(password.len() as i32 + 4);
|
message.put_i32(password.len() as i32 + 4);
|
||||||
@@ -322,7 +315,7 @@ where
|
|||||||
res.put_slice(&set_complete[..]);
|
res.put_slice(&set_complete[..]);
|
||||||
|
|
||||||
write_all_half(stream, &res).await?;
|
write_all_half(stream, &res).await?;
|
||||||
ready_for_query(stream).await
|
send_ready_for_query(stream).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send a custom error message to the client.
|
/// Send a custom error message to the client.
|
||||||
@@ -333,7 +326,7 @@ where
|
|||||||
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
S: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
{
|
{
|
||||||
error_response_terminal(stream, message).await?;
|
error_response_terminal(stream, message).await?;
|
||||||
ready_for_query(stream).await
|
send_ready_for_query(stream).await
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send a custom error message to the client.
|
/// Send a custom error message to the client.
|
||||||
@@ -434,7 +427,7 @@ where
|
|||||||
res.put(command_complete("SELECT 1"));
|
res.put(command_complete("SELECT 1"));
|
||||||
|
|
||||||
write_all_half(stream, &res).await?;
|
write_all_half(stream, &res).await?;
|
||||||
ready_for_query(stream).await
|
send_ready_for_query(stream).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn row_description(columns: &Vec<(&str, DataType)>) -> BytesMut {
|
pub fn row_description(columns: &Vec<(&str, DataType)>) -> BytesMut {
|
||||||
@@ -516,7 +509,7 @@ pub fn data_row_nullable(row: &Vec<Option<String>>) -> BytesMut {
|
|||||||
data_row.put_i32(column.len() as i32);
|
data_row.put_i32(column.len() as i32);
|
||||||
data_row.put_slice(column);
|
data_row.put_slice(column);
|
||||||
} else {
|
} else {
|
||||||
data_row.put_i32(-1 as i32);
|
data_row.put_i32(-1_i32);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -564,6 +557,37 @@ pub fn flush() -> BytesMut {
|
|||||||
bytes
|
bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn sync() -> BytesMut {
|
||||||
|
let mut bytes = BytesMut::with_capacity(mem::size_of::<u8>() + mem::size_of::<i32>());
|
||||||
|
bytes.put_u8(b'S');
|
||||||
|
bytes.put_i32(4);
|
||||||
|
bytes
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn parse_complete() -> BytesMut {
|
||||||
|
let mut bytes = BytesMut::with_capacity(mem::size_of::<u8>() + mem::size_of::<i32>());
|
||||||
|
|
||||||
|
bytes.put_u8(b'1');
|
||||||
|
bytes.put_i32(4);
|
||||||
|
bytes
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ready_for_query(in_transaction: bool) -> BytesMut {
|
||||||
|
let mut bytes = BytesMut::with_capacity(
|
||||||
|
mem::size_of::<u8>() + mem::size_of::<i32>() + mem::size_of::<u8>(),
|
||||||
|
);
|
||||||
|
|
||||||
|
bytes.put_u8(b'Z');
|
||||||
|
bytes.put_i32(5);
|
||||||
|
if in_transaction {
|
||||||
|
bytes.put_u8(b'T');
|
||||||
|
} else {
|
||||||
|
bytes.put_u8(b'I');
|
||||||
|
}
|
||||||
|
|
||||||
|
bytes
|
||||||
|
}
|
||||||
|
|
||||||
/// Write all data in the buffer to the TcpStream.
|
/// Write all data in the buffer to the TcpStream.
|
||||||
pub async fn write_all<S>(stream: &mut S, buf: BytesMut) -> Result<(), Error>
|
pub async fn write_all<S>(stream: &mut S, buf: BytesMut) -> Result<(), Error>
|
||||||
where
|
where
|
||||||
@@ -571,12 +595,10 @@ where
|
|||||||
{
|
{
|
||||||
match stream.write_all(&buf).await {
|
match stream.write_all(&buf).await {
|
||||||
Ok(_) => Ok(()),
|
Ok(_) => Ok(()),
|
||||||
Err(err) => {
|
Err(err) => Err(Error::SocketError(format!(
|
||||||
return Err(Error::SocketError(format!(
|
"Error writing to socket - Error: {:?}",
|
||||||
"Error writing to socket - Error: {:?}",
|
err
|
||||||
err
|
))),
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -587,12 +609,10 @@ where
|
|||||||
{
|
{
|
||||||
match stream.write_all(buf).await {
|
match stream.write_all(buf).await {
|
||||||
Ok(_) => Ok(()),
|
Ok(_) => Ok(()),
|
||||||
Err(err) => {
|
Err(err) => Err(Error::SocketError(format!(
|
||||||
return Err(Error::SocketError(format!(
|
"Error writing to socket - Error: {:?}",
|
||||||
"Error writing to socket - Error: {:?}",
|
err
|
||||||
err
|
))),
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -603,19 +623,15 @@ where
|
|||||||
match stream.write_all(buf).await {
|
match stream.write_all(buf).await {
|
||||||
Ok(_) => match stream.flush().await {
|
Ok(_) => match stream.flush().await {
|
||||||
Ok(_) => Ok(()),
|
Ok(_) => Ok(()),
|
||||||
Err(err) => {
|
Err(err) => Err(Error::SocketError(format!(
|
||||||
return Err(Error::SocketError(format!(
|
"Error flushing socket - Error: {:?}",
|
||||||
"Error flushing socket - Error: {:?}",
|
|
||||||
err
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
},
|
|
||||||
Err(err) => {
|
|
||||||
return Err(Error::SocketError(format!(
|
|
||||||
"Error writing to socket - Error: {:?}",
|
|
||||||
err
|
err
|
||||||
)))
|
))),
|
||||||
}
|
},
|
||||||
|
Err(err) => Err(Error::SocketError(format!(
|
||||||
|
"Error writing to socket - Error: {:?}",
|
||||||
|
err
|
||||||
|
))),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -717,6 +733,10 @@ pub fn configure_socket(stream: &TcpStream) {
|
|||||||
}
|
}
|
||||||
Err(err) => error!("Could not configure socket: {}", err),
|
Err(err) => error!("Could not configure socket: {}", err),
|
||||||
}
|
}
|
||||||
|
match sock_ref.set_nodelay(true) {
|
||||||
|
Ok(_) => (),
|
||||||
|
Err(err) => error!("Could not configure TCP_NODELAY for socket: {}", err),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait BytesMutReader {
|
pub trait BytesMutReader {
|
||||||
@@ -730,7 +750,7 @@ impl BytesMutReader for Cursor<&BytesMut> {
|
|||||||
let mut buf = vec![];
|
let mut buf = vec![];
|
||||||
match self.read_until(b'\0', &mut buf) {
|
match self.read_until(b'\0', &mut buf) {
|
||||||
Ok(_) => Ok(String::from_utf8_lossy(&buf[..buf.len() - 1]).to_string()),
|
Ok(_) => Ok(String::from_utf8_lossy(&buf[..buf.len() - 1]).to_string()),
|
||||||
Err(err) => return Err(Error::ParseBytesError(err.to_string())),
|
Err(err) => Err(Error::ParseBytesError(err.to_string())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -746,10 +766,55 @@ impl BytesMutReader for BytesMut {
|
|||||||
let string_bytes = self.split_to(index + 1);
|
let string_bytes = self.split_to(index + 1);
|
||||||
Ok(String::from_utf8_lossy(&string_bytes[..string_bytes.len() - 1]).to_string())
|
Ok(String::from_utf8_lossy(&string_bytes[..string_bytes.len() - 1]).to_string())
|
||||||
}
|
}
|
||||||
None => return Err(Error::ParseBytesError("Could not read string".to_string())),
|
None => Err(Error::ParseBytesError("Could not read string".to_string())),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub enum ExtendedProtocolData {
|
||||||
|
Parse {
|
||||||
|
data: BytesMut,
|
||||||
|
metadata: Option<(Arc<Parse>, u64)>,
|
||||||
|
},
|
||||||
|
Bind {
|
||||||
|
data: BytesMut,
|
||||||
|
metadata: Option<String>,
|
||||||
|
},
|
||||||
|
Describe {
|
||||||
|
data: BytesMut,
|
||||||
|
metadata: Option<String>,
|
||||||
|
},
|
||||||
|
Execute {
|
||||||
|
data: BytesMut,
|
||||||
|
},
|
||||||
|
Close {
|
||||||
|
data: BytesMut,
|
||||||
|
close: Close,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ExtendedProtocolData {
|
||||||
|
pub fn create_new_parse(data: BytesMut, metadata: Option<(Arc<Parse>, u64)>) -> Self {
|
||||||
|
Self::Parse { data, metadata }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn create_new_bind(data: BytesMut, metadata: Option<String>) -> Self {
|
||||||
|
Self::Bind { data, metadata }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn create_new_describe(data: BytesMut, metadata: Option<String>) -> Self {
|
||||||
|
Self::Describe { data, metadata }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn create_new_execute(data: BytesMut) -> Self {
|
||||||
|
Self::Execute { data }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn create_new_close(data: BytesMut, close: Close) -> Self {
|
||||||
|
Self::Close { data, close }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Parse (F) message.
|
/// Parse (F) message.
|
||||||
/// See: <https://www.postgresql.org/docs/current/protocol-message-formats.html>
|
/// See: <https://www.postgresql.org/docs/current/protocol-message-formats.html>
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
@@ -758,7 +823,6 @@ pub struct Parse {
|
|||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
len: i32,
|
len: i32,
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub generated_name: String,
|
|
||||||
query: String,
|
query: String,
|
||||||
num_params: i16,
|
num_params: i16,
|
||||||
param_types: Vec<i32>,
|
param_types: Vec<i32>,
|
||||||
@@ -784,7 +848,6 @@ impl TryFrom<&BytesMut> for Parse {
|
|||||||
code,
|
code,
|
||||||
len,
|
len,
|
||||||
name,
|
name,
|
||||||
generated_name: prepared_statement_name(),
|
|
||||||
query,
|
query,
|
||||||
num_params,
|
num_params,
|
||||||
param_types,
|
param_types,
|
||||||
@@ -833,11 +896,44 @@ impl TryFrom<&Parse> for BytesMut {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Parse {
|
impl Parse {
|
||||||
pub fn rename(mut self) -> Self {
|
/// Renames the prepared statement to a new name based on the global counter
|
||||||
self.name = self.generated_name.to_string();
|
pub fn rewrite(mut self) -> Self {
|
||||||
|
self.name = format!(
|
||||||
|
"PGCAT_{}",
|
||||||
|
PREPARED_STATEMENT_COUNTER.fetch_add(1, Ordering::SeqCst)
|
||||||
|
);
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Gets the name of the prepared statement from the buffer
|
||||||
|
pub fn get_name(buf: &BytesMut) -> Result<String, Error> {
|
||||||
|
let mut cursor = Cursor::new(buf);
|
||||||
|
// Skip the code and length
|
||||||
|
cursor.advance(mem::size_of::<u8>() + mem::size_of::<i32>());
|
||||||
|
cursor.read_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Hashes the parse statement to be used as a key in the global cache
|
||||||
|
pub fn get_hash(&self) -> u64 {
|
||||||
|
// TODO_ZAIN: Take a look at which hashing function is being used
|
||||||
|
let mut hasher = DefaultHasher::new();
|
||||||
|
|
||||||
|
let concatenated = format!(
|
||||||
|
"{}{}{}",
|
||||||
|
self.query,
|
||||||
|
self.num_params,
|
||||||
|
self.param_types
|
||||||
|
.iter()
|
||||||
|
.map(ToString::to_string)
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join(",")
|
||||||
|
);
|
||||||
|
|
||||||
|
concatenated.hash(&mut hasher);
|
||||||
|
|
||||||
|
hasher.finish()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn anonymous(&self) -> bool {
|
pub fn anonymous(&self) -> bool {
|
||||||
self.name.is_empty()
|
self.name.is_empty()
|
||||||
}
|
}
|
||||||
@@ -968,9 +1064,42 @@ impl TryFrom<Bind> for BytesMut {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Bind {
|
impl Bind {
|
||||||
pub fn reassign(mut self, parse: &Parse) -> Self {
|
/// Gets the name of the prepared statement from the buffer
|
||||||
self.prepared_statement = parse.name.clone();
|
pub fn get_name(buf: &BytesMut) -> Result<String, Error> {
|
||||||
self
|
let mut cursor = Cursor::new(buf);
|
||||||
|
// Skip the code and length
|
||||||
|
cursor.advance(mem::size_of::<u8>() + mem::size_of::<i32>());
|
||||||
|
cursor.read_string()?;
|
||||||
|
cursor.read_string()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Renames the prepared statement to a new name
|
||||||
|
pub fn rename(buf: BytesMut, new_name: &str) -> Result<BytesMut, Error> {
|
||||||
|
let mut cursor = Cursor::new(&buf);
|
||||||
|
// Read basic data from the cursor
|
||||||
|
let code = cursor.get_u8();
|
||||||
|
let current_len = cursor.get_i32();
|
||||||
|
let portal = cursor.read_string()?;
|
||||||
|
let prepared_statement = cursor.read_string()?;
|
||||||
|
|
||||||
|
// Calculate new length
|
||||||
|
let new_len = current_len + new_name.len() as i32 - prepared_statement.len() as i32;
|
||||||
|
|
||||||
|
// Begin building the response buffer
|
||||||
|
let mut response_buf = BytesMut::with_capacity(new_len as usize + 1);
|
||||||
|
response_buf.put_u8(code);
|
||||||
|
response_buf.put_i32(new_len);
|
||||||
|
|
||||||
|
// Put the portal and new name into the buffer
|
||||||
|
// Note: panic if the provided string contains null byte
|
||||||
|
response_buf.put_slice(CString::new(portal)?.as_bytes_with_nul());
|
||||||
|
response_buf.put_slice(CString::new(new_name)?.as_bytes_with_nul());
|
||||||
|
|
||||||
|
// Add the remainder of the original buffer into the response
|
||||||
|
response_buf.put_slice(&buf[cursor.position() as usize..]);
|
||||||
|
|
||||||
|
// Return the buffer
|
||||||
|
Ok(response_buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn anonymous(&self) -> bool {
|
pub fn anonymous(&self) -> bool {
|
||||||
@@ -984,7 +1113,7 @@ pub struct Describe {
|
|||||||
|
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
len: i32,
|
len: i32,
|
||||||
target: char,
|
pub target: char,
|
||||||
pub statement_name: String,
|
pub statement_name: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1026,6 +1155,15 @@ impl TryFrom<Describe> for BytesMut {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Describe {
|
impl Describe {
|
||||||
|
pub fn empty_new() -> Describe {
|
||||||
|
Describe {
|
||||||
|
code: 'D',
|
||||||
|
len: 4 + 1 + 1,
|
||||||
|
target: 'S',
|
||||||
|
statement_name: "".to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn rename(mut self, name: &str) -> Self {
|
pub fn rename(mut self, name: &str) -> Self {
|
||||||
self.statement_name = name.to_string();
|
self.statement_name = name.to_string();
|
||||||
self
|
self
|
||||||
@@ -1114,13 +1252,6 @@ pub fn close_complete() -> BytesMut {
|
|||||||
bytes
|
bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn prepared_statement_name() -> String {
|
|
||||||
format!(
|
|
||||||
"P_{}",
|
|
||||||
PREPARED_STATEMENT_COUNTER.fetch_add(1, Ordering::SeqCst)
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// from https://www.postgresql.org/docs/12/protocol-error-fields.html
|
// from https://www.postgresql.org/docs/12/protocol-error-fields.html
|
||||||
#[derive(Debug, Default, PartialEq)]
|
#[derive(Debug, Default, PartialEq)]
|
||||||
pub struct PgErrorMsg {
|
pub struct PgErrorMsg {
|
||||||
@@ -1203,7 +1334,7 @@ impl Display for PgErrorMsg {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl PgErrorMsg {
|
impl PgErrorMsg {
|
||||||
pub fn parse(error_msg: Vec<u8>) -> Result<PgErrorMsg, Error> {
|
pub fn parse(error_msg: &[u8]) -> Result<PgErrorMsg, Error> {
|
||||||
let mut out = PgErrorMsg {
|
let mut out = PgErrorMsg {
|
||||||
severity_localized: "".to_string(),
|
severity_localized: "".to_string(),
|
||||||
severity: "".to_string(),
|
severity: "".to_string(),
|
||||||
@@ -1311,38 +1442,38 @@ mod tests {
|
|||||||
fn parse_fields() {
|
fn parse_fields() {
|
||||||
let mut complete_msg = vec![];
|
let mut complete_msg = vec![];
|
||||||
let severity = "FATAL";
|
let severity = "FATAL";
|
||||||
complete_msg.extend(field('S', &severity));
|
complete_msg.extend(field('S', severity));
|
||||||
complete_msg.extend(field('V', &severity));
|
complete_msg.extend(field('V', severity));
|
||||||
|
|
||||||
let error_code = "29P02";
|
let error_code = "29P02";
|
||||||
complete_msg.extend(field('C', &error_code));
|
complete_msg.extend(field('C', error_code));
|
||||||
let message = "password authentication failed for user \"wrong_user\"";
|
let message = "password authentication failed for user \"wrong_user\"";
|
||||||
complete_msg.extend(field('M', &message));
|
complete_msg.extend(field('M', message));
|
||||||
let detail_msg = "super detailed message";
|
let detail_msg = "super detailed message";
|
||||||
complete_msg.extend(field('D', &detail_msg));
|
complete_msg.extend(field('D', detail_msg));
|
||||||
let hint_msg = "hint detail here";
|
let hint_msg = "hint detail here";
|
||||||
complete_msg.extend(field('H', &hint_msg));
|
complete_msg.extend(field('H', hint_msg));
|
||||||
complete_msg.extend(field('P', "123"));
|
complete_msg.extend(field('P', "123"));
|
||||||
complete_msg.extend(field('p', "234"));
|
complete_msg.extend(field('p', "234"));
|
||||||
let internal_query = "SELECT * from foo;";
|
let internal_query = "SELECT * from foo;";
|
||||||
complete_msg.extend(field('q', &internal_query));
|
complete_msg.extend(field('q', internal_query));
|
||||||
let where_msg = "where goes here";
|
let where_msg = "where goes here";
|
||||||
complete_msg.extend(field('W', &where_msg));
|
complete_msg.extend(field('W', where_msg));
|
||||||
let schema_msg = "schema_name";
|
let schema_msg = "schema_name";
|
||||||
complete_msg.extend(field('s', &schema_msg));
|
complete_msg.extend(field('s', schema_msg));
|
||||||
let table_msg = "table_name";
|
let table_msg = "table_name";
|
||||||
complete_msg.extend(field('t', &table_msg));
|
complete_msg.extend(field('t', table_msg));
|
||||||
let column_msg = "column_name";
|
let column_msg = "column_name";
|
||||||
complete_msg.extend(field('c', &column_msg));
|
complete_msg.extend(field('c', column_msg));
|
||||||
let data_type_msg = "type_name";
|
let data_type_msg = "type_name";
|
||||||
complete_msg.extend(field('d', &data_type_msg));
|
complete_msg.extend(field('d', data_type_msg));
|
||||||
let constraint_msg = "constraint_name";
|
let constraint_msg = "constraint_name";
|
||||||
complete_msg.extend(field('n', &constraint_msg));
|
complete_msg.extend(field('n', constraint_msg));
|
||||||
let file_msg = "pgcat.c";
|
let file_msg = "pgcat.c";
|
||||||
complete_msg.extend(field('F', &file_msg));
|
complete_msg.extend(field('F', file_msg));
|
||||||
complete_msg.extend(field('L', "335"));
|
complete_msg.extend(field('L', "335"));
|
||||||
let routine_msg = "my_failing_routine";
|
let routine_msg = "my_failing_routine";
|
||||||
complete_msg.extend(field('R', &routine_msg));
|
complete_msg.extend(field('R', routine_msg));
|
||||||
|
|
||||||
tracing_subscriber::fmt()
|
tracing_subscriber::fmt()
|
||||||
.with_max_level(tracing::Level::INFO)
|
.with_max_level(tracing::Level::INFO)
|
||||||
@@ -1351,7 +1482,7 @@ mod tests {
|
|||||||
|
|
||||||
info!(
|
info!(
|
||||||
"full message: {}",
|
"full message: {}",
|
||||||
PgErrorMsg::parse(complete_msg.clone()).unwrap()
|
PgErrorMsg::parse(&complete_msg).unwrap()
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
PgErrorMsg {
|
PgErrorMsg {
|
||||||
@@ -1374,17 +1505,17 @@ mod tests {
|
|||||||
line: Some(335),
|
line: Some(335),
|
||||||
routine: Some(routine_msg.to_string()),
|
routine: Some(routine_msg.to_string()),
|
||||||
},
|
},
|
||||||
PgErrorMsg::parse(complete_msg).unwrap()
|
PgErrorMsg::parse(&complete_msg).unwrap()
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut only_mandatory_msg = vec![];
|
let mut only_mandatory_msg = vec![];
|
||||||
only_mandatory_msg.extend(field('S', &severity));
|
only_mandatory_msg.extend(field('S', severity));
|
||||||
only_mandatory_msg.extend(field('V', &severity));
|
only_mandatory_msg.extend(field('V', severity));
|
||||||
only_mandatory_msg.extend(field('C', &error_code));
|
only_mandatory_msg.extend(field('C', error_code));
|
||||||
only_mandatory_msg.extend(field('M', &message));
|
only_mandatory_msg.extend(field('M', message));
|
||||||
only_mandatory_msg.extend(field('D', &detail_msg));
|
only_mandatory_msg.extend(field('D', detail_msg));
|
||||||
|
|
||||||
let err_fields = PgErrorMsg::parse(only_mandatory_msg.clone()).unwrap();
|
let err_fields = PgErrorMsg::parse(&only_mandatory_msg).unwrap();
|
||||||
info!("only mandatory fields: {}", &err_fields);
|
info!("only mandatory fields: {}", &err_fields);
|
||||||
error!(
|
error!(
|
||||||
"server error: {}: {}",
|
"server error: {}: {}",
|
||||||
@@ -1411,7 +1542,7 @@ mod tests {
|
|||||||
line: None,
|
line: None,
|
||||||
routine: None,
|
routine: None,
|
||||||
},
|
},
|
||||||
PgErrorMsg::parse(only_mandatory_msg).unwrap()
|
PgErrorMsg::parse(&only_mandatory_msg).unwrap()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,14 +23,15 @@ impl MirroredClient {
|
|||||||
async fn create_pool(&self) -> Pool<ServerPool> {
|
async fn create_pool(&self) -> Pool<ServerPool> {
|
||||||
let config = get_config();
|
let config = get_config();
|
||||||
let default = std::time::Duration::from_millis(10_000).as_millis() as u64;
|
let default = std::time::Duration::from_millis(10_000).as_millis() as u64;
|
||||||
let (connection_timeout, idle_timeout, _cfg) =
|
let (connection_timeout, idle_timeout, _cfg, prepared_statement_cache_size) =
|
||||||
match config.pools.get(&self.address.pool_name) {
|
match config.pools.get(&self.address.pool_name) {
|
||||||
Some(cfg) => (
|
Some(cfg) => (
|
||||||
cfg.connect_timeout.unwrap_or(default),
|
cfg.connect_timeout.unwrap_or(default),
|
||||||
cfg.idle_timeout.unwrap_or(default),
|
cfg.idle_timeout.unwrap_or(default),
|
||||||
cfg.clone(),
|
cfg.clone(),
|
||||||
|
cfg.prepared_statements_cache_size,
|
||||||
),
|
),
|
||||||
None => (default, default, crate::config::Pool::default()),
|
None => (default, default, crate::config::Pool::default(), 0),
|
||||||
};
|
};
|
||||||
|
|
||||||
let manager = ServerPool::new(
|
let manager = ServerPool::new(
|
||||||
@@ -42,6 +43,7 @@ impl MirroredClient {
|
|||||||
None,
|
None,
|
||||||
true,
|
true,
|
||||||
false,
|
false,
|
||||||
|
prepared_statement_cache_size,
|
||||||
);
|
);
|
||||||
|
|
||||||
Pool::builder()
|
Pool::builder()
|
||||||
@@ -83,8 +85,9 @@ impl MirroredClient {
|
|||||||
match recv_result {
|
match recv_result {
|
||||||
Ok(message) => trace!("Received from mirror: {} {:?}", String::from_utf8_lossy(&message[..]), address.clone()),
|
Ok(message) => trace!("Received from mirror: {} {:?}", String::from_utf8_lossy(&message[..]), address.clone()),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
server.mark_bad();
|
server.mark_bad(
|
||||||
error!("Failed to receive from mirror {:?} {:?}", err, address.clone());
|
format!("Failed to send to mirror, Discarding message {:?}, {:?}", err, address.clone()).as_str()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -96,8 +99,9 @@ impl MirroredClient {
|
|||||||
match server.send(&BytesMut::from(&bytes[..])).await {
|
match server.send(&BytesMut::from(&bytes[..])).await {
|
||||||
Ok(_) => trace!("Sent to mirror: {} {:?}", String::from_utf8_lossy(&bytes[..]), address.clone()),
|
Ok(_) => trace!("Sent to mirror: {} {:?}", String::from_utf8_lossy(&bytes[..]), address.clone()),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
server.mark_bad();
|
server.mark_bad(
|
||||||
error!("Failed to send to mirror, Discarding message {:?}, {:?}", err, address.clone())
|
format!("Failed to receive from mirror {:?} {:?}", err, address.clone()).as_str()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -137,18 +141,18 @@ impl MirroringManager {
|
|||||||
bytes_rx,
|
bytes_rx,
|
||||||
disconnect_rx: exit_rx,
|
disconnect_rx: exit_rx,
|
||||||
};
|
};
|
||||||
exit_senders.push(exit_tx.clone());
|
exit_senders.push(exit_tx);
|
||||||
byte_senders.push(bytes_tx.clone());
|
byte_senders.push(bytes_tx);
|
||||||
client.start();
|
client.start();
|
||||||
});
|
});
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
byte_senders: byte_senders,
|
byte_senders,
|
||||||
disconnect_senders: exit_senders,
|
disconnect_senders: exit_senders,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn send(self: &mut Self, bytes: &BytesMut) {
|
pub fn send(&mut self, bytes: &BytesMut) {
|
||||||
// We want to avoid performing an allocation if we won't be able to send the message
|
// We want to avoid performing an allocation if we won't be able to send the message
|
||||||
// There is a possibility of a race here where we check the capacity and then the channel is
|
// There is a possibility of a race here where we check the capacity and then the channel is
|
||||||
// closed or the capacity is reduced to 0, but mirroring is best effort anyway
|
// closed or the capacity is reduced to 0, but mirroring is best effort anyway
|
||||||
@@ -170,7 +174,7 @@ impl MirroringManager {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn disconnect(self: &mut Self) {
|
pub fn disconnect(&mut self) {
|
||||||
self.disconnect_senders
|
self.disconnect_senders
|
||||||
.iter_mut()
|
.iter_mut()
|
||||||
.for_each(|sender| match sender.try_send(()) {
|
.for_each(|sender| match sender.try_send(()) {
|
||||||
|
|||||||
@@ -92,7 +92,7 @@ impl<'a> Plugin for Intercept<'a> {
|
|||||||
.map(|s| {
|
.map(|s| {
|
||||||
let s = s.as_str().to_string();
|
let s = s.as_str().to_string();
|
||||||
|
|
||||||
if s == "" {
|
if s.is_empty() {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
Some(s)
|
Some(s)
|
||||||
|
|||||||
@@ -33,6 +33,7 @@ pub enum PluginOutput {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait Plugin {
|
pub trait Plugin {
|
||||||
// Run before the query is sent to the server.
|
// Run before the query is sent to the server.
|
||||||
|
#[allow(clippy::ptr_arg)]
|
||||||
async fn run(
|
async fn run(
|
||||||
&mut self,
|
&mut self,
|
||||||
query_router: &QueryRouter,
|
query_router: &QueryRouter,
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ impl<'a> Prewarmer<'a> {
|
|||||||
self.server.address(),
|
self.server.address(),
|
||||||
query
|
query
|
||||||
);
|
);
|
||||||
self.server.query(&query).await?;
|
self.server.query(query).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ impl<'a> Plugin for TableAccess<'a> {
|
|||||||
|
|
||||||
visit_relations(ast, |relation| {
|
visit_relations(ast, |relation| {
|
||||||
let relation = relation.to_string();
|
let relation = relation.to_string();
|
||||||
let parts = relation.split(".").collect::<Vec<&str>>();
|
let parts = relation.split('.').collect::<Vec<&str>>();
|
||||||
let table_name = parts.last().unwrap();
|
let table_name = parts.last().unwrap();
|
||||||
|
|
||||||
if self.tables.contains(&table_name.to_string()) {
|
if self.tables.contains(&table_name.to_string()) {
|
||||||
|
|||||||
232
src/pool.rs
232
src/pool.rs
@@ -3,6 +3,7 @@ use async_trait::async_trait;
|
|||||||
use bb8::{ManageConnection, Pool, PooledConnection, QueueStrategy};
|
use bb8::{ManageConnection, Pool, PooledConnection, QueueStrategy};
|
||||||
use chrono::naive::NaiveDateTime;
|
use chrono::naive::NaiveDateTime;
|
||||||
use log::{debug, error, info, warn};
|
use log::{debug, error, info, warn};
|
||||||
|
use lru::LruCache;
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use parking_lot::{Mutex, RwLock};
|
use parking_lot::{Mutex, RwLock};
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
@@ -10,6 +11,7 @@ use rand::thread_rng;
|
|||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fmt::{Display, Formatter};
|
use std::fmt::{Display, Formatter};
|
||||||
|
use std::num::NonZeroUsize;
|
||||||
use std::sync::atomic::AtomicU64;
|
use std::sync::atomic::AtomicU64;
|
||||||
use std::sync::{
|
use std::sync::{
|
||||||
atomic::{AtomicBool, Ordering},
|
atomic::{AtomicBool, Ordering},
|
||||||
@@ -24,6 +26,7 @@ use crate::config::{
|
|||||||
use crate::errors::Error;
|
use crate::errors::Error;
|
||||||
|
|
||||||
use crate::auth_passthrough::AuthPassthrough;
|
use crate::auth_passthrough::AuthPassthrough;
|
||||||
|
use crate::messages::Parse;
|
||||||
use crate::plugins::prewarmer;
|
use crate::plugins::prewarmer;
|
||||||
use crate::server::{Server, ServerParameters};
|
use crate::server::{Server, ServerParameters};
|
||||||
use crate::sharding::ShardingFunction;
|
use crate::sharding::ShardingFunction;
|
||||||
@@ -54,6 +57,57 @@ pub enum BanReason {
|
|||||||
AdminBan(i64),
|
AdminBan(i64),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub type PreparedStatementCacheType = Arc<Mutex<PreparedStatementCache>>;
|
||||||
|
|
||||||
|
// TODO: Add stats the this cache
|
||||||
|
// TODO: Add application name to the cache value to help identify which application is using the cache
|
||||||
|
// TODO: Create admin command to show which statements are in the cache
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct PreparedStatementCache {
|
||||||
|
cache: LruCache<u64, Arc<Parse>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PreparedStatementCache {
|
||||||
|
pub fn new(mut size: usize) -> Self {
|
||||||
|
// Cannot be zeros
|
||||||
|
if size == 0 {
|
||||||
|
size = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
PreparedStatementCache {
|
||||||
|
cache: LruCache::new(NonZeroUsize::new(size).unwrap()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds the prepared statement to the cache if it doesn't exist with a new name
|
||||||
|
/// if it already exists will give you the existing parse
|
||||||
|
///
|
||||||
|
/// Pass the hash to this so that we can do the compute before acquiring the lock
|
||||||
|
pub fn get_or_insert(&mut self, parse: &Parse, hash: u64) -> Arc<Parse> {
|
||||||
|
match self.cache.get(&hash) {
|
||||||
|
Some(rewritten_parse) => rewritten_parse.clone(),
|
||||||
|
None => {
|
||||||
|
let new_parse = Arc::new(parse.clone().rewrite());
|
||||||
|
let evicted = self.cache.push(hash, new_parse.clone());
|
||||||
|
|
||||||
|
if let Some((_, evicted_parse)) = evicted {
|
||||||
|
debug!(
|
||||||
|
"Evicted prepared statement {} from cache",
|
||||||
|
evicted_parse.name
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
new_parse
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Marks the hash as most recently used if it exists
|
||||||
|
pub fn promote(&mut self, hash: &u64) {
|
||||||
|
self.cache.promote(hash);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// An identifier for a PgCat pool,
|
/// An identifier for a PgCat pool,
|
||||||
/// a database visible to clients.
|
/// a database visible to clients.
|
||||||
#[derive(Hash, Debug, Clone, PartialEq, Eq, Default)]
|
#[derive(Hash, Debug, Clone, PartialEq, Eq, Default)]
|
||||||
@@ -108,6 +162,9 @@ pub struct PoolSettings {
|
|||||||
// Default server role to connect to.
|
// Default server role to connect to.
|
||||||
pub default_role: Option<Role>,
|
pub default_role: Option<Role>,
|
||||||
|
|
||||||
|
// Whether or not we should use primary when replicas are unavailable
|
||||||
|
pub replica_to_primary_failover_enabled: bool,
|
||||||
|
|
||||||
// Enable/disable query parser.
|
// Enable/disable query parser.
|
||||||
pub query_parser_enabled: bool,
|
pub query_parser_enabled: bool,
|
||||||
|
|
||||||
@@ -165,6 +222,7 @@ impl Default for PoolSettings {
|
|||||||
user: User::default(),
|
user: User::default(),
|
||||||
db: String::default(),
|
db: String::default(),
|
||||||
default_role: None,
|
default_role: None,
|
||||||
|
replica_to_primary_failover_enabled: false,
|
||||||
query_parser_enabled: false,
|
query_parser_enabled: false,
|
||||||
query_parser_max_length: None,
|
query_parser_max_length: None,
|
||||||
query_parser_read_write_splitting: false,
|
query_parser_read_write_splitting: false,
|
||||||
@@ -190,11 +248,11 @@ impl Default for PoolSettings {
|
|||||||
#[derive(Clone, Debug, Default)]
|
#[derive(Clone, Debug, Default)]
|
||||||
pub struct ConnectionPool {
|
pub struct ConnectionPool {
|
||||||
/// The pools handled internally by bb8.
|
/// The pools handled internally by bb8.
|
||||||
databases: Vec<Vec<Pool<ServerPool>>>,
|
databases: Arc<Vec<Vec<Pool<ServerPool>>>>,
|
||||||
|
|
||||||
/// The addresses (host, port, role) to handle
|
/// The addresses (host, port, role) to handle
|
||||||
/// failover and load balancing deterministically.
|
/// failover and load balancing deterministically.
|
||||||
addresses: Vec<Vec<Address>>,
|
addresses: Arc<Vec<Vec<Address>>>,
|
||||||
|
|
||||||
/// List of banned addresses (see above)
|
/// List of banned addresses (see above)
|
||||||
/// that should not be queried.
|
/// that should not be queried.
|
||||||
@@ -206,7 +264,7 @@ pub struct ConnectionPool {
|
|||||||
original_server_parameters: Arc<RwLock<ServerParameters>>,
|
original_server_parameters: Arc<RwLock<ServerParameters>>,
|
||||||
|
|
||||||
/// Pool configuration.
|
/// Pool configuration.
|
||||||
pub settings: PoolSettings,
|
pub settings: Arc<PoolSettings>,
|
||||||
|
|
||||||
/// If not validated, we need to double check the pool is available before allowing a client
|
/// If not validated, we need to double check the pool is available before allowing a client
|
||||||
/// to use it.
|
/// to use it.
|
||||||
@@ -223,6 +281,9 @@ pub struct ConnectionPool {
|
|||||||
|
|
||||||
/// AuthInfo
|
/// AuthInfo
|
||||||
pub auth_hash: Arc<RwLock<Option<String>>>,
|
pub auth_hash: Arc<RwLock<Option<String>>>,
|
||||||
|
|
||||||
|
/// Cache
|
||||||
|
pub prepared_statement_cache: Option<PreparedStatementCacheType>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ConnectionPool {
|
impl ConnectionPool {
|
||||||
@@ -241,20 +302,17 @@ impl ConnectionPool {
|
|||||||
let old_pool_ref = get_pool(pool_name, &user.username);
|
let old_pool_ref = get_pool(pool_name, &user.username);
|
||||||
let identifier = PoolIdentifier::new(pool_name, &user.username);
|
let identifier = PoolIdentifier::new(pool_name, &user.username);
|
||||||
|
|
||||||
match old_pool_ref {
|
if let Some(pool) = old_pool_ref {
|
||||||
Some(pool) => {
|
// If the pool hasn't changed, get existing reference and insert it into the new_pools.
|
||||||
// If the pool hasn't changed, get existing reference and insert it into the new_pools.
|
// We replace all pools at the end, but if the reference is kept, the pool won't get re-created (bb8).
|
||||||
// We replace all pools at the end, but if the reference is kept, the pool won't get re-created (bb8).
|
if pool.config_hash == new_pool_hash_value {
|
||||||
if pool.config_hash == new_pool_hash_value {
|
info!(
|
||||||
info!(
|
"[pool: {}][user: {}] has not changed",
|
||||||
"[pool: {}][user: {}] has not changed",
|
pool_name, user.username
|
||||||
pool_name, user.username
|
);
|
||||||
);
|
new_pools.insert(identifier.clone(), pool.clone());
|
||||||
new_pools.insert(identifier.clone(), pool.clone());
|
continue;
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
None => (),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
@@ -379,16 +437,23 @@ impl ConnectionPool {
|
|||||||
},
|
},
|
||||||
pool_config.cleanup_server_connections,
|
pool_config.cleanup_server_connections,
|
||||||
pool_config.log_client_parameter_status_changes,
|
pool_config.log_client_parameter_status_changes,
|
||||||
|
pool_config.prepared_statements_cache_size,
|
||||||
);
|
);
|
||||||
|
|
||||||
let connect_timeout = match pool_config.connect_timeout {
|
let connect_timeout = match user.connect_timeout {
|
||||||
Some(connect_timeout) => connect_timeout,
|
Some(connect_timeout) => connect_timeout,
|
||||||
None => config.general.connect_timeout,
|
None => match pool_config.connect_timeout {
|
||||||
|
Some(connect_timeout) => connect_timeout,
|
||||||
|
None => config.general.connect_timeout,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
let idle_timeout = match pool_config.idle_timeout {
|
let idle_timeout = match user.idle_timeout {
|
||||||
Some(idle_timeout) => idle_timeout,
|
Some(idle_timeout) => idle_timeout,
|
||||||
None => config.general.idle_timeout,
|
None => match pool_config.idle_timeout {
|
||||||
|
Some(idle_timeout) => idle_timeout,
|
||||||
|
None => config.general.idle_timeout,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
let server_lifetime = match user.server_lifetime {
|
let server_lifetime = match user.server_lifetime {
|
||||||
@@ -399,7 +464,7 @@ impl ConnectionPool {
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
let reaper_rate = *vec![idle_timeout, server_lifetime, POOL_REAPER_RATE]
|
let reaper_rate = *[idle_timeout, server_lifetime, POOL_REAPER_RATE]
|
||||||
.iter()
|
.iter()
|
||||||
.min()
|
.min()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@@ -448,13 +513,13 @@ impl ConnectionPool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let pool = ConnectionPool {
|
let pool = ConnectionPool {
|
||||||
databases: shards,
|
databases: Arc::new(shards),
|
||||||
addresses,
|
addresses: Arc::new(addresses),
|
||||||
banlist: Arc::new(RwLock::new(banlist)),
|
banlist: Arc::new(RwLock::new(banlist)),
|
||||||
config_hash: new_pool_hash_value,
|
config_hash: new_pool_hash_value,
|
||||||
original_server_parameters: Arc::new(RwLock::new(ServerParameters::new())),
|
original_server_parameters: Arc::new(RwLock::new(ServerParameters::new())),
|
||||||
auth_hash: pool_auth_hash,
|
auth_hash: pool_auth_hash,
|
||||||
settings: PoolSettings {
|
settings: Arc::new(PoolSettings {
|
||||||
pool_mode: match user.pool_mode {
|
pool_mode: match user.pool_mode {
|
||||||
Some(pool_mode) => pool_mode,
|
Some(pool_mode) => pool_mode,
|
||||||
None => pool_config.pool_mode,
|
None => pool_config.pool_mode,
|
||||||
@@ -470,6 +535,8 @@ impl ConnectionPool {
|
|||||||
"primary" => Some(Role::Primary),
|
"primary" => Some(Role::Primary),
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
},
|
},
|
||||||
|
replica_to_primary_failover_enabled: pool_config
|
||||||
|
.replica_to_primary_failover_enabled,
|
||||||
query_parser_enabled: pool_config.query_parser_enabled,
|
query_parser_enabled: pool_config.query_parser_enabled,
|
||||||
query_parser_max_length: pool_config.query_parser_max_length,
|
query_parser_max_length: pool_config.query_parser_max_length,
|
||||||
query_parser_read_write_splitting: pool_config
|
query_parser_read_write_splitting: pool_config
|
||||||
@@ -489,7 +556,7 @@ impl ConnectionPool {
|
|||||||
.clone()
|
.clone()
|
||||||
.map(|regex| Regex::new(regex.as_str()).unwrap()),
|
.map(|regex| Regex::new(regex.as_str()).unwrap()),
|
||||||
regex_search_limit: pool_config.regex_search_limit.unwrap_or(1000),
|
regex_search_limit: pool_config.regex_search_limit.unwrap_or(1000),
|
||||||
default_shard: pool_config.default_shard.clone(),
|
default_shard: pool_config.default_shard,
|
||||||
auth_query: pool_config.auth_query.clone(),
|
auth_query: pool_config.auth_query.clone(),
|
||||||
auth_query_user: pool_config.auth_query_user.clone(),
|
auth_query_user: pool_config.auth_query_user.clone(),
|
||||||
auth_query_password: pool_config.auth_query_password.clone(),
|
auth_query_password: pool_config.auth_query_password.clone(),
|
||||||
@@ -497,17 +564,23 @@ impl ConnectionPool {
|
|||||||
Some(ref plugins) => Some(plugins.clone()),
|
Some(ref plugins) => Some(plugins.clone()),
|
||||||
None => config.plugins.clone(),
|
None => config.plugins.clone(),
|
||||||
},
|
},
|
||||||
},
|
}),
|
||||||
validated: Arc::new(AtomicBool::new(false)),
|
validated: Arc::new(AtomicBool::new(false)),
|
||||||
paused: Arc::new(AtomicBool::new(false)),
|
paused: Arc::new(AtomicBool::new(false)),
|
||||||
paused_waiter: Arc::new(Notify::new()),
|
paused_waiter: Arc::new(Notify::new()),
|
||||||
|
prepared_statement_cache: match pool_config.prepared_statements_cache_size {
|
||||||
|
0 => None,
|
||||||
|
_ => Some(Arc::new(Mutex::new(PreparedStatementCache::new(
|
||||||
|
pool_config.prepared_statements_cache_size,
|
||||||
|
)))),
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
// Connect to the servers to make sure pool configuration is valid
|
// Connect to the servers to make sure pool configuration is valid
|
||||||
// before setting it globally.
|
// before setting it globally.
|
||||||
// Do this async and somewhere else, we don't have to wait here.
|
// Do this async and somewhere else, we don't have to wait here.
|
||||||
if config.general.validate_config {
|
if config.general.validate_config {
|
||||||
let mut validate_pool = pool.clone();
|
let validate_pool = pool.clone();
|
||||||
tokio::task::spawn(async move {
|
tokio::task::spawn(async move {
|
||||||
let _ = validate_pool.validate().await;
|
let _ = validate_pool.validate().await;
|
||||||
});
|
});
|
||||||
@@ -528,7 +601,7 @@ impl ConnectionPool {
|
|||||||
/// when they connect.
|
/// when they connect.
|
||||||
/// This also warms up the pool for clients that connect when
|
/// This also warms up the pool for clients that connect when
|
||||||
/// the pooler starts up.
|
/// the pooler starts up.
|
||||||
pub async fn validate(&mut self) -> Result<(), Error> {
|
pub async fn validate(&self) -> Result<(), Error> {
|
||||||
let mut futures = Vec::new();
|
let mut futures = Vec::new();
|
||||||
let validated = Arc::clone(&self.validated);
|
let validated = Arc::clone(&self.validated);
|
||||||
|
|
||||||
@@ -664,6 +737,19 @@ impl ConnectionPool {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the role is replica and we allow sending traffic to primary when replicas are unavailble,
|
||||||
|
// we add primary address at the end of the list of candidates, this way it will be tried when
|
||||||
|
// replicas are all unavailable.
|
||||||
|
if role == Role::Replica && self.settings.replica_to_primary_failover_enabled {
|
||||||
|
let mut primaries = self
|
||||||
|
.addresses
|
||||||
|
.iter()
|
||||||
|
.flatten()
|
||||||
|
.filter(|address| address.role == Role::Primary)
|
||||||
|
.collect::<Vec<&Address>>();
|
||||||
|
candidates.insert(0, primaries.pop().unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
// Indicate we're waiting on a server connection from a pool.
|
// Indicate we're waiting on a server connection from a pool.
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
client_stats.waiting();
|
client_stats.waiting();
|
||||||
@@ -678,7 +764,7 @@ impl ConnectionPool {
|
|||||||
let mut force_healthcheck = false;
|
let mut force_healthcheck = false;
|
||||||
|
|
||||||
if self.is_banned(address) {
|
if self.is_banned(address) {
|
||||||
if self.try_unban(&address).await {
|
if self.try_unban(address).await {
|
||||||
force_healthcheck = true;
|
force_healthcheck = true;
|
||||||
} else {
|
} else {
|
||||||
debug!("Address {:?} is banned", address);
|
debug!("Address {:?} is banned", address);
|
||||||
@@ -702,7 +788,6 @@ impl ConnectionPool {
|
|||||||
);
|
);
|
||||||
self.ban(address, BanReason::FailedCheckout, Some(client_stats));
|
self.ban(address, BanReason::FailedCheckout, Some(client_stats));
|
||||||
address.stats.error();
|
address.stats.error();
|
||||||
client_stats.idle();
|
|
||||||
client_stats.checkout_error();
|
client_stats.checkout_error();
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@@ -721,7 +806,7 @@ impl ConnectionPool {
|
|||||||
// Health checks are pretty expensive.
|
// Health checks are pretty expensive.
|
||||||
if !require_healthcheck {
|
if !require_healthcheck {
|
||||||
let checkout_time = now.elapsed().as_micros() as u64;
|
let checkout_time = now.elapsed().as_micros() as u64;
|
||||||
client_stats.checkout_time(checkout_time);
|
client_stats.checkout_success();
|
||||||
server
|
server
|
||||||
.stats()
|
.stats()
|
||||||
.checkout_time(checkout_time, client_stats.application_name());
|
.checkout_time(checkout_time, client_stats.application_name());
|
||||||
@@ -735,7 +820,7 @@ impl ConnectionPool {
|
|||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
let checkout_time = now.elapsed().as_micros() as u64;
|
let checkout_time = now.elapsed().as_micros() as u64;
|
||||||
client_stats.checkout_time(checkout_time);
|
client_stats.checkout_success();
|
||||||
server
|
server
|
||||||
.stats()
|
.stats()
|
||||||
.checkout_time(checkout_time, client_stats.application_name());
|
.checkout_time(checkout_time, client_stats.application_name());
|
||||||
@@ -747,10 +832,7 @@ impl ConnectionPool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
client_stats.idle();
|
client_stats.checkout_error();
|
||||||
|
|
||||||
let checkout_time = now.elapsed().as_micros() as u64;
|
|
||||||
client_stats.checkout_time(checkout_time);
|
|
||||||
|
|
||||||
Err(Error::AllServersDown)
|
Err(Error::AllServersDown)
|
||||||
}
|
}
|
||||||
@@ -776,7 +858,7 @@ impl ConnectionPool {
|
|||||||
Ok(res) => match res {
|
Ok(res) => match res {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
let checkout_time: u64 = start.elapsed().as_micros() as u64;
|
let checkout_time: u64 = start.elapsed().as_micros() as u64;
|
||||||
client_info.checkout_time(checkout_time);
|
client_info.checkout_success();
|
||||||
server
|
server
|
||||||
.stats()
|
.stats()
|
||||||
.checkout_time(checkout_time, client_info.application_name());
|
.checkout_time(checkout_time, client_info.application_name());
|
||||||
@@ -804,10 +886,10 @@ impl ConnectionPool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Don't leave a bad connection in the pool.
|
// Don't leave a bad connection in the pool.
|
||||||
server.mark_bad();
|
server.mark_bad("failed health check");
|
||||||
|
|
||||||
self.ban(&address, BanReason::FailedHealthCheck, Some(client_info));
|
self.ban(address, BanReason::FailedHealthCheck, Some(client_info));
|
||||||
return false;
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ban an address (i.e. replica). It no longer will serve
|
/// Ban an address (i.e. replica). It no longer will serve
|
||||||
@@ -872,24 +954,28 @@ impl ConnectionPool {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if all replicas are banned, in that case unban all of them
|
// If we have replica to primary failover we should not unban replicas
|
||||||
let replicas_available = self.addresses[address.shard]
|
// as we still have the primary to server traffic.
|
||||||
.iter()
|
if !self.settings.replica_to_primary_failover_enabled {
|
||||||
.filter(|addr| addr.role == Role::Replica)
|
// Check if all replicas are banned, in that case unban all of them
|
||||||
.count();
|
let replicas_available = self.addresses[address.shard]
|
||||||
|
.iter()
|
||||||
|
.filter(|addr| addr.role == Role::Replica)
|
||||||
|
.count();
|
||||||
|
|
||||||
debug!("Available targets: {}", replicas_available);
|
debug!("Available targets: {}", replicas_available);
|
||||||
|
|
||||||
let read_guard = self.banlist.read();
|
let read_guard = self.banlist.read();
|
||||||
let all_replicas_banned = read_guard[address.shard].len() == replicas_available;
|
let all_replicas_banned = read_guard[address.shard].len() == replicas_available;
|
||||||
drop(read_guard);
|
drop(read_guard);
|
||||||
|
|
||||||
if all_replicas_banned {
|
if all_replicas_banned {
|
||||||
let mut write_guard = self.banlist.write();
|
let mut write_guard = self.banlist.write();
|
||||||
warn!("Unbanning all replicas.");
|
warn!("Unbanning all replicas.");
|
||||||
write_guard[address.shard].clear();
|
write_guard[address.shard].clear();
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if ban time is expired
|
// Check if ban time is expired
|
||||||
@@ -931,10 +1017,10 @@ impl ConnectionPool {
|
|||||||
let guard = self.banlist.read();
|
let guard = self.banlist.read();
|
||||||
for banlist in guard.iter() {
|
for banlist in guard.iter() {
|
||||||
for (address, (reason, timestamp)) in banlist.iter() {
|
for (address, (reason, timestamp)) in banlist.iter() {
|
||||||
bans.push((address.clone(), (reason.clone(), timestamp.clone())));
|
bans.push((address.clone(), (reason.clone(), *timestamp)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return bans;
|
bans
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the address from the host url
|
/// Get the address from the host url
|
||||||
@@ -992,7 +1078,7 @@ impl ConnectionPool {
|
|||||||
}
|
}
|
||||||
let busy = provisioned - idle;
|
let busy = provisioned - idle;
|
||||||
debug!("{:?} has {:?} busy connections", address, busy);
|
debug!("{:?} has {:?} busy connections", address, busy);
|
||||||
return busy;
|
busy
|
||||||
}
|
}
|
||||||
|
|
||||||
fn valid_shard_id(&self, shard: Option<usize>) -> bool {
|
fn valid_shard_id(&self, shard: Option<usize>) -> bool {
|
||||||
@@ -1001,6 +1087,29 @@ impl ConnectionPool {
|
|||||||
Some(shard) => shard < self.shards(),
|
Some(shard) => shard < self.shards(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Register a parse statement to the pool's cache and return the rewritten parse
|
||||||
|
///
|
||||||
|
/// Do not pass an anonymous parse statement to this function
|
||||||
|
pub fn register_parse_to_cache(&self, hash: u64, parse: &Parse) -> Option<Arc<Parse>> {
|
||||||
|
// We should only be calling this function if the cache is enabled
|
||||||
|
match self.prepared_statement_cache {
|
||||||
|
Some(ref prepared_statement_cache) => {
|
||||||
|
let mut cache = prepared_statement_cache.lock();
|
||||||
|
Some(cache.get_or_insert(parse, hash))
|
||||||
|
}
|
||||||
|
None => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Promote a prepared statement hash in the LRU
|
||||||
|
pub fn promote_prepared_statement_hash(&self, hash: &u64) {
|
||||||
|
// We should only be calling this function if the cache is enabled
|
||||||
|
if let Some(ref prepared_statement_cache) = self.prepared_statement_cache {
|
||||||
|
let mut cache = prepared_statement_cache.lock();
|
||||||
|
cache.promote(hash);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Wrapper for the bb8 connection pool.
|
/// Wrapper for the bb8 connection pool.
|
||||||
@@ -1028,9 +1137,13 @@ pub struct ServerPool {
|
|||||||
|
|
||||||
/// Log client parameter status changes
|
/// Log client parameter status changes
|
||||||
log_client_parameter_status_changes: bool,
|
log_client_parameter_status_changes: bool,
|
||||||
|
|
||||||
|
/// Prepared statement cache size
|
||||||
|
prepared_statement_cache_size: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ServerPool {
|
impl ServerPool {
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
address: Address,
|
address: Address,
|
||||||
user: User,
|
user: User,
|
||||||
@@ -1040,16 +1153,18 @@ impl ServerPool {
|
|||||||
plugins: Option<Plugins>,
|
plugins: Option<Plugins>,
|
||||||
cleanup_connections: bool,
|
cleanup_connections: bool,
|
||||||
log_client_parameter_status_changes: bool,
|
log_client_parameter_status_changes: bool,
|
||||||
|
prepared_statement_cache_size: usize,
|
||||||
) -> ServerPool {
|
) -> ServerPool {
|
||||||
ServerPool {
|
ServerPool {
|
||||||
address,
|
address,
|
||||||
user: user.clone(),
|
user,
|
||||||
database: database.to_string(),
|
database: database.to_string(),
|
||||||
client_server_map,
|
client_server_map,
|
||||||
auth_hash,
|
auth_hash,
|
||||||
plugins,
|
plugins,
|
||||||
cleanup_connections,
|
cleanup_connections,
|
||||||
log_client_parameter_status_changes,
|
log_client_parameter_status_changes,
|
||||||
|
prepared_statement_cache_size,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1080,6 +1195,7 @@ impl ManageConnection for ServerPool {
|
|||||||
self.auth_hash.clone(),
|
self.auth_hash.clone(),
|
||||||
self.cleanup_connections,
|
self.cleanup_connections,
|
||||||
self.log_client_parameter_status_changes,
|
self.log_client_parameter_status_changes,
|
||||||
|
self.prepared_statement_cache_size,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,23 +1,41 @@
|
|||||||
use hyper::service::{make_service_fn, service_fn};
|
use http_body_util::Full;
|
||||||
use hyper::{Body, Method, Request, Response, Server, StatusCode};
|
use hyper::body;
|
||||||
|
use hyper::body::Bytes;
|
||||||
|
|
||||||
|
use hyper::server::conn::http1;
|
||||||
|
use hyper::service::service_fn;
|
||||||
|
use hyper::{Method, Request, Response, StatusCode};
|
||||||
|
use hyper_util::rt::TokioIo;
|
||||||
use log::{debug, error, info};
|
use log::{debug, error, info};
|
||||||
use phf::phf_map;
|
use phf::phf_map;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::sync::atomic::Ordering;
|
use std::sync::atomic::Ordering;
|
||||||
use std::sync::Arc;
|
use tokio::net::TcpListener;
|
||||||
|
|
||||||
use crate::config::Address;
|
use crate::config::Address;
|
||||||
use crate::pool::{get_all_pools, PoolIdentifier};
|
use crate::pool::{get_all_pools, PoolIdentifier};
|
||||||
|
use crate::stats::get_server_stats;
|
||||||
use crate::stats::pool::PoolStats;
|
use crate::stats::pool::PoolStats;
|
||||||
use crate::stats::{get_server_stats, ServerStats};
|
|
||||||
|
|
||||||
struct MetricHelpType {
|
struct MetricHelpType {
|
||||||
help: &'static str,
|
help: &'static str,
|
||||||
ty: &'static str,
|
ty: &'static str,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct ServerPrometheusStats {
|
||||||
|
bytes_received: u64,
|
||||||
|
bytes_sent: u64,
|
||||||
|
transaction_count: u64,
|
||||||
|
query_count: u64,
|
||||||
|
error_count: u64,
|
||||||
|
active_count: u64,
|
||||||
|
idle_count: u64,
|
||||||
|
login_count: u64,
|
||||||
|
tested_count: u64,
|
||||||
|
}
|
||||||
|
|
||||||
// reference for metric types: https://prometheus.io/docs/concepts/metric_types/
|
// reference for metric types: https://prometheus.io/docs/concepts/metric_types/
|
||||||
// counters only increase
|
// counters only increase
|
||||||
// gauges can arbitrarily increase or decrease
|
// gauges can arbitrarily increase or decrease
|
||||||
@@ -120,22 +138,46 @@ static METRIC_HELP_AND_TYPES_LOOKUP: phf::Map<&'static str, MetricHelpType> = ph
|
|||||||
},
|
},
|
||||||
"servers_bytes_received" => MetricHelpType {
|
"servers_bytes_received" => MetricHelpType {
|
||||||
help: "Volume in bytes of network traffic received by server",
|
help: "Volume in bytes of network traffic received by server",
|
||||||
ty: "gauge",
|
ty: "counter",
|
||||||
},
|
},
|
||||||
"servers_bytes_sent" => MetricHelpType {
|
"servers_bytes_sent" => MetricHelpType {
|
||||||
help: "Volume in bytes of network traffic sent by server",
|
help: "Volume in bytes of network traffic sent by server",
|
||||||
ty: "gauge",
|
ty: "counter",
|
||||||
},
|
},
|
||||||
"servers_transaction_count" => MetricHelpType {
|
"servers_transaction_count" => MetricHelpType {
|
||||||
help: "Number of transactions executed by server",
|
help: "Number of transactions executed by server",
|
||||||
ty: "gauge",
|
ty: "counter",
|
||||||
},
|
},
|
||||||
"servers_query_count" => MetricHelpType {
|
"servers_query_count" => MetricHelpType {
|
||||||
help: "Number of queries executed by server",
|
help: "Number of queries executed by server",
|
||||||
ty: "gauge",
|
ty: "counter",
|
||||||
},
|
},
|
||||||
"servers_error_count" => MetricHelpType {
|
"servers_error_count" => MetricHelpType {
|
||||||
help: "Number of errors",
|
help: "Number of errors",
|
||||||
|
ty: "counter",
|
||||||
|
},
|
||||||
|
"servers_idle_count" => MetricHelpType {
|
||||||
|
help: "Number of server connection in idle state",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"servers_active_count" => MetricHelpType {
|
||||||
|
help: "Number of server connection in active state",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"servers_tested_count" => MetricHelpType {
|
||||||
|
help: "Number of server connection in tested state",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"servers_login_count" => MetricHelpType {
|
||||||
|
help: "Number of server connection in login state",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"servers_is_banned" => MetricHelpType {
|
||||||
|
help: "0 if server is not banned, 1 if server is banned",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"servers_is_paused" => MetricHelpType {
|
||||||
|
help: "0 if server is not paused, 1 if server is paused",
|
||||||
ty: "gauge",
|
ty: "gauge",
|
||||||
},
|
},
|
||||||
"databases_pool_size" => MetricHelpType {
|
"databases_pool_size" => MetricHelpType {
|
||||||
@@ -158,18 +200,17 @@ struct PrometheusMetric<Value: fmt::Display> {
|
|||||||
|
|
||||||
impl<Value: fmt::Display> fmt::Display for PrometheusMetric<Value> {
|
impl<Value: fmt::Display> fmt::Display for PrometheusMetric<Value> {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
let formatted_labels = self
|
let mut sorted_labels: Vec<_> = self.labels.iter().collect();
|
||||||
.labels
|
sorted_labels.sort_by_key(|&(key, _)| key);
|
||||||
|
let formatted_labels = sorted_labels
|
||||||
.iter()
|
.iter()
|
||||||
.map(|(key, value)| format!("{}=\"{}\"", key, value))
|
.map(|(key, value)| format!("{}=\"{}\"", key, value))
|
||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
.join(",");
|
.join(",");
|
||||||
write!(
|
write!(
|
||||||
f,
|
f,
|
||||||
"# HELP {name} {help}\n# TYPE {name} {ty}\n{name}{{{formatted_labels}}} {value}\n",
|
"{name}{{{formatted_labels}}} {value}",
|
||||||
name = format_args!("pgcat_{}", self.name),
|
name = format_args!("pgcat_{}", self.name),
|
||||||
help = self.help,
|
|
||||||
ty = self.ty,
|
|
||||||
formatted_labels = formatted_labels,
|
formatted_labels = formatted_labels,
|
||||||
value = self.value
|
value = self.value
|
||||||
)
|
)
|
||||||
@@ -203,7 +244,9 @@ impl<Value: fmt::Display> PrometheusMetric<Value> {
|
|||||||
labels.insert("shard", address.shard.to_string());
|
labels.insert("shard", address.shard.to_string());
|
||||||
labels.insert("role", address.role.to_string());
|
labels.insert("role", address.role.to_string());
|
||||||
labels.insert("pool", address.pool_name.clone());
|
labels.insert("pool", address.pool_name.clone());
|
||||||
|
labels.insert("index", address.address_index.to_string());
|
||||||
labels.insert("database", address.database.to_string());
|
labels.insert("database", address.database.to_string());
|
||||||
|
labels.insert("username", address.username.clone());
|
||||||
|
|
||||||
Self::from_name(&format!("databases_{}", name), value, labels)
|
Self::from_name(&format!("databases_{}", name), value, labels)
|
||||||
}
|
}
|
||||||
@@ -218,7 +261,9 @@ impl<Value: fmt::Display> PrometheusMetric<Value> {
|
|||||||
labels.insert("shard", address.shard.to_string());
|
labels.insert("shard", address.shard.to_string());
|
||||||
labels.insert("role", address.role.to_string());
|
labels.insert("role", address.role.to_string());
|
||||||
labels.insert("pool", address.pool_name.clone());
|
labels.insert("pool", address.pool_name.clone());
|
||||||
|
labels.insert("index", address.address_index.to_string());
|
||||||
labels.insert("database", address.database.to_string());
|
labels.insert("database", address.database.to_string());
|
||||||
|
labels.insert("username", address.username.clone());
|
||||||
|
|
||||||
Self::from_name(&format!("servers_{}", name), value, labels)
|
Self::from_name(&format!("servers_{}", name), value, labels)
|
||||||
}
|
}
|
||||||
@@ -229,7 +274,9 @@ impl<Value: fmt::Display> PrometheusMetric<Value> {
|
|||||||
labels.insert("shard", address.shard.to_string());
|
labels.insert("shard", address.shard.to_string());
|
||||||
labels.insert("pool", address.pool_name.clone());
|
labels.insert("pool", address.pool_name.clone());
|
||||||
labels.insert("role", address.role.to_string());
|
labels.insert("role", address.role.to_string());
|
||||||
|
labels.insert("index", address.address_index.to_string());
|
||||||
labels.insert("database", address.database.to_string());
|
labels.insert("database", address.database.to_string());
|
||||||
|
labels.insert("username", address.username.clone());
|
||||||
|
|
||||||
Self::from_name(&format!("stats_{}", name), value, labels)
|
Self::from_name(&format!("stats_{}", name), value, labels)
|
||||||
}
|
}
|
||||||
@@ -241,9 +288,20 @@ impl<Value: fmt::Display> PrometheusMetric<Value> {
|
|||||||
|
|
||||||
Self::from_name(&format!("pools_{}", name), value, labels)
|
Self::from_name(&format!("pools_{}", name), value, labels)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn get_header(&self) -> String {
|
||||||
|
format!(
|
||||||
|
"\n# HELP {name} {help}\n# TYPE {name} {ty}",
|
||||||
|
name = format_args!("pgcat_{}", self.name),
|
||||||
|
help = self.help,
|
||||||
|
ty = self.ty,
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn prometheus_stats(request: Request<Body>) -> Result<Response<Body>, hyper::http::Error> {
|
async fn prometheus_stats(
|
||||||
|
request: Request<body::Incoming>,
|
||||||
|
) -> Result<Response<Full<Bytes>>, hyper::http::Error> {
|
||||||
match (request.method(), request.uri().path()) {
|
match (request.method(), request.uri().path()) {
|
||||||
(&Method::GET, "/metrics") => {
|
(&Method::GET, "/metrics") => {
|
||||||
let mut lines = Vec::new();
|
let mut lines = Vec::new();
|
||||||
@@ -251,6 +309,7 @@ async fn prometheus_stats(request: Request<Body>) -> Result<Response<Body>, hype
|
|||||||
push_pool_stats(&mut lines);
|
push_pool_stats(&mut lines);
|
||||||
push_server_stats(&mut lines);
|
push_server_stats(&mut lines);
|
||||||
push_database_stats(&mut lines);
|
push_database_stats(&mut lines);
|
||||||
|
lines.push("".to_string()); // Ensure to end the stats with a line terminator as required by the specification.
|
||||||
|
|
||||||
Response::builder()
|
Response::builder()
|
||||||
.header("content-type", "text/plain; version=0.0.4")
|
.header("content-type", "text/plain; version=0.0.4")
|
||||||
@@ -264,6 +323,7 @@ async fn prometheus_stats(request: Request<Body>) -> Result<Response<Body>, hype
|
|||||||
|
|
||||||
// Adds metrics shown in a SHOW STATS admin command.
|
// Adds metrics shown in a SHOW STATS admin command.
|
||||||
fn push_address_stats(lines: &mut Vec<String>) {
|
fn push_address_stats(lines: &mut Vec<String>) {
|
||||||
|
let mut grouped_metrics: HashMap<String, Vec<PrometheusMetric<u64>>> = HashMap::new();
|
||||||
for (_, pool) in get_all_pools() {
|
for (_, pool) in get_all_pools() {
|
||||||
for shard in 0..pool.shards() {
|
for shard in 0..pool.shards() {
|
||||||
for server in 0..pool.servers(shard) {
|
for server in 0..pool.servers(shard) {
|
||||||
@@ -273,7 +333,10 @@ fn push_address_stats(lines: &mut Vec<String>) {
|
|||||||
if let Some(prometheus_metric) =
|
if let Some(prometheus_metric) =
|
||||||
PrometheusMetric::<u64>::from_address(address, &key, value)
|
PrometheusMetric::<u64>::from_address(address, &key, value)
|
||||||
{
|
{
|
||||||
lines.push(prometheus_metric.to_string());
|
grouped_metrics
|
||||||
|
.entry(key)
|
||||||
|
.or_default()
|
||||||
|
.push(prometheus_metric);
|
||||||
} else {
|
} else {
|
||||||
debug!("Metric {} not implemented for {}", key, address.name());
|
debug!("Metric {} not implemented for {}", key, address.name());
|
||||||
}
|
}
|
||||||
@@ -281,33 +344,53 @@ fn push_address_stats(lines: &mut Vec<String>) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for (_key, metrics) in grouped_metrics {
|
||||||
|
if !metrics.is_empty() {
|
||||||
|
lines.push(metrics[0].get_header());
|
||||||
|
for metric in metrics {
|
||||||
|
lines.push(metric.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds relevant metrics shown in a SHOW POOLS admin command.
|
// Adds relevant metrics shown in a SHOW POOLS admin command.
|
||||||
fn push_pool_stats(lines: &mut Vec<String>) {
|
fn push_pool_stats(lines: &mut Vec<String>) {
|
||||||
|
let mut grouped_metrics: HashMap<String, Vec<PrometheusMetric<u64>>> = HashMap::new();
|
||||||
let pool_stats = PoolStats::construct_pool_lookup();
|
let pool_stats = PoolStats::construct_pool_lookup();
|
||||||
for (pool_id, stats) in pool_stats.iter() {
|
for (pool_id, stats) in pool_stats.iter() {
|
||||||
for (name, value) in stats.clone() {
|
for (name, value) in stats.clone() {
|
||||||
if let Some(prometheus_metric) =
|
if let Some(prometheus_metric) =
|
||||||
PrometheusMetric::<u64>::from_pool(pool_id.clone(), &name, value)
|
PrometheusMetric::<u64>::from_pool(pool_id.clone(), &name, value)
|
||||||
{
|
{
|
||||||
lines.push(prometheus_metric.to_string());
|
grouped_metrics
|
||||||
|
.entry(name)
|
||||||
|
.or_default()
|
||||||
|
.push(prometheus_metric);
|
||||||
} else {
|
} else {
|
||||||
debug!("Metric {} not implemented for ({})", name, *pool_id);
|
debug!("Metric {} not implemented for ({})", name, *pool_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for (_key, metrics) in grouped_metrics {
|
||||||
|
if !metrics.is_empty() {
|
||||||
|
lines.push(metrics[0].get_header());
|
||||||
|
for metric in metrics {
|
||||||
|
lines.push(metric.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds relevant metrics shown in a SHOW DATABASES admin command.
|
// Adds relevant metrics shown in a SHOW DATABASES admin command.
|
||||||
fn push_database_stats(lines: &mut Vec<String>) {
|
fn push_database_stats(lines: &mut Vec<String>) {
|
||||||
|
let mut grouped_metrics: HashMap<String, Vec<PrometheusMetric<u32>>> = HashMap::new();
|
||||||
for (_, pool) in get_all_pools() {
|
for (_, pool) in get_all_pools() {
|
||||||
let pool_config = pool.settings.clone();
|
let pool_config = pool.settings.clone();
|
||||||
for shard in 0..pool.shards() {
|
for shard in 0..pool.shards() {
|
||||||
for server in 0..pool.servers(shard) {
|
for server in 0..pool.servers(shard) {
|
||||||
let address = pool.address(shard, server);
|
let address = pool.address(shard, server);
|
||||||
let pool_state = pool.pool_state(shard, server);
|
let pool_state = pool.pool_state(shard, server);
|
||||||
|
|
||||||
let metrics = vec![
|
let metrics = vec![
|
||||||
("pool_size", pool_config.user.pool_size),
|
("pool_size", pool_config.user.pool_size),
|
||||||
("current_connections", pool_state.connections),
|
("current_connections", pool_state.connections),
|
||||||
@@ -316,7 +399,10 @@ fn push_database_stats(lines: &mut Vec<String>) {
|
|||||||
if let Some(prometheus_metric) =
|
if let Some(prometheus_metric) =
|
||||||
PrometheusMetric::<u32>::from_database_info(address, key, value)
|
PrometheusMetric::<u32>::from_database_info(address, key, value)
|
||||||
{
|
{
|
||||||
lines.push(prometheus_metric.to_string());
|
grouped_metrics
|
||||||
|
.entry(key.to_string())
|
||||||
|
.or_default()
|
||||||
|
.push(prometheus_metric);
|
||||||
} else {
|
} else {
|
||||||
debug!("Metric {} not implemented for {}", key, address.name());
|
debug!("Metric {} not implemented for {}", key, address.name());
|
||||||
}
|
}
|
||||||
@@ -324,45 +410,73 @@ fn push_database_stats(lines: &mut Vec<String>) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for (_key, metrics) in grouped_metrics {
|
||||||
|
if !metrics.is_empty() {
|
||||||
|
lines.push(metrics[0].get_header());
|
||||||
|
for metric in metrics {
|
||||||
|
lines.push(metric.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds relevant metrics shown in a SHOW SERVERS admin command.
|
// Adds relevant metrics shown in a SHOW SERVERS admin command.
|
||||||
fn push_server_stats(lines: &mut Vec<String>) {
|
fn push_server_stats(lines: &mut Vec<String>) {
|
||||||
let server_stats = get_server_stats();
|
let server_stats = get_server_stats();
|
||||||
let mut server_stats_by_addresses = HashMap::<String, Arc<ServerStats>>::new();
|
let mut prom_stats = HashMap::<String, ServerPrometheusStats>::new();
|
||||||
for (_, stats) in server_stats {
|
for (_, stats) in server_stats {
|
||||||
server_stats_by_addresses.insert(stats.address_name(), stats);
|
let entry = prom_stats
|
||||||
|
.entry(stats.address_name())
|
||||||
|
.or_insert(ServerPrometheusStats {
|
||||||
|
bytes_received: 0,
|
||||||
|
bytes_sent: 0,
|
||||||
|
transaction_count: 0,
|
||||||
|
query_count: 0,
|
||||||
|
error_count: 0,
|
||||||
|
active_count: 0,
|
||||||
|
idle_count: 0,
|
||||||
|
login_count: 0,
|
||||||
|
tested_count: 0,
|
||||||
|
});
|
||||||
|
entry.bytes_received += stats.bytes_received.load(Ordering::Relaxed);
|
||||||
|
entry.bytes_sent += stats.bytes_sent.load(Ordering::Relaxed);
|
||||||
|
entry.transaction_count += stats.transaction_count.load(Ordering::Relaxed);
|
||||||
|
entry.query_count += stats.query_count.load(Ordering::Relaxed);
|
||||||
|
entry.error_count += stats.error_count.load(Ordering::Relaxed);
|
||||||
|
match stats.state.load(Ordering::Relaxed) {
|
||||||
|
crate::stats::ServerState::Login => entry.login_count += 1,
|
||||||
|
crate::stats::ServerState::Active => entry.active_count += 1,
|
||||||
|
crate::stats::ServerState::Tested => entry.tested_count += 1,
|
||||||
|
crate::stats::ServerState::Idle => entry.idle_count += 1,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
let mut grouped_metrics: HashMap<String, Vec<PrometheusMetric<u64>>> = HashMap::new();
|
||||||
for (_, pool) in get_all_pools() {
|
for (_, pool) in get_all_pools() {
|
||||||
for shard in 0..pool.shards() {
|
for shard in 0..pool.shards() {
|
||||||
for server in 0..pool.servers(shard) {
|
for server in 0..pool.servers(shard) {
|
||||||
let address = pool.address(shard, server);
|
let address = pool.address(shard, server);
|
||||||
if let Some(server_info) = server_stats_by_addresses.get(&address.name()) {
|
if let Some(server_info) = prom_stats.get(&address.name()) {
|
||||||
let metrics = [
|
let metrics = [
|
||||||
(
|
("bytes_received", server_info.bytes_received),
|
||||||
"bytes_received",
|
("bytes_sent", server_info.bytes_sent),
|
||||||
server_info.bytes_received.load(Ordering::Relaxed),
|
("transaction_count", server_info.transaction_count),
|
||||||
),
|
("query_count", server_info.query_count),
|
||||||
("bytes_sent", server_info.bytes_sent.load(Ordering::Relaxed)),
|
("error_count", server_info.error_count),
|
||||||
(
|
("idle_count", server_info.idle_count),
|
||||||
"transaction_count",
|
("active_count", server_info.active_count),
|
||||||
server_info.transaction_count.load(Ordering::Relaxed),
|
("login_count", server_info.login_count),
|
||||||
),
|
("tested_count", server_info.tested_count),
|
||||||
(
|
("is_banned", if pool.is_banned(address) { 1 } else { 0 }),
|
||||||
"query_count",
|
("is_paused", if pool.paused() { 1 } else { 0 }),
|
||||||
server_info.query_count.load(Ordering::Relaxed),
|
|
||||||
),
|
|
||||||
(
|
|
||||||
"error_count",
|
|
||||||
server_info.error_count.load(Ordering::Relaxed),
|
|
||||||
),
|
|
||||||
];
|
];
|
||||||
for (key, value) in metrics {
|
for (key, value) in metrics {
|
||||||
if let Some(prometheus_metric) =
|
if let Some(prometheus_metric) =
|
||||||
PrometheusMetric::<u64>::from_server_info(address, key, value)
|
PrometheusMetric::<u64>::from_server_info(address, key, value)
|
||||||
{
|
{
|
||||||
lines.push(prometheus_metric.to_string());
|
grouped_metrics
|
||||||
|
.entry(key.to_string())
|
||||||
|
.or_default()
|
||||||
|
.push(prometheus_metric);
|
||||||
} else {
|
} else {
|
||||||
debug!("Metric {} not implemented for {}", key, address.name());
|
debug!("Metric {} not implemented for {}", key, address.name());
|
||||||
}
|
}
|
||||||
@@ -371,17 +485,46 @@ fn push_server_stats(lines: &mut Vec<String>) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for (_key, metrics) in grouped_metrics {
|
||||||
|
if !metrics.is_empty() {
|
||||||
|
lines.push(metrics[0].get_header());
|
||||||
|
for metric in metrics {
|
||||||
|
lines.push(metric.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn start_metric_server(http_addr: SocketAddr) {
|
pub async fn start_metric_server(http_addr: SocketAddr) {
|
||||||
let http_service_factory =
|
let listener = TcpListener::bind(http_addr);
|
||||||
make_service_fn(|_conn| async { Ok::<_, hyper::Error>(service_fn(prometheus_stats)) });
|
let listener = match listener.await {
|
||||||
let server = Server::bind(&http_addr).serve(http_service_factory);
|
Ok(listener) => listener,
|
||||||
|
Err(e) => {
|
||||||
|
error!("Failed to bind prometheus server to HTTP address: {}.", e);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
info!(
|
info!(
|
||||||
"Exposing prometheus metrics on http://{}/metrics.",
|
"Exposing prometheus metrics on http://{}/metrics.",
|
||||||
http_addr
|
http_addr
|
||||||
);
|
);
|
||||||
if let Err(e) = server.await {
|
loop {
|
||||||
error!("Failed to run HTTP server: {}.", e);
|
let stream = match listener.accept().await {
|
||||||
|
Ok((stream, _)) => stream,
|
||||||
|
Err(e) => {
|
||||||
|
error!("Error accepting connection: {}", e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let io = TokioIo::new(stream);
|
||||||
|
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
if let Err(err) = http1::Builder::new()
|
||||||
|
.serve_connection(io, service_fn(prometheus_stats))
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
eprintln!("Error serving HTTP connection for metrics: {:?}", err);
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
14
src/scram.rs
14
src/scram.rs
@@ -79,12 +79,12 @@ impl ScramSha256 {
|
|||||||
let server_message = Message::parse(message)?;
|
let server_message = Message::parse(message)?;
|
||||||
|
|
||||||
if !server_message.nonce.starts_with(&self.nonce) {
|
if !server_message.nonce.starts_with(&self.nonce) {
|
||||||
return Err(Error::ProtocolSyncError(format!("SCRAM")));
|
return Err(Error::ProtocolSyncError("SCRAM".to_string()));
|
||||||
}
|
}
|
||||||
|
|
||||||
let salt = match general_purpose::STANDARD.decode(&server_message.salt) {
|
let salt = match general_purpose::STANDARD.decode(&server_message.salt) {
|
||||||
Ok(salt) => salt,
|
Ok(salt) => salt,
|
||||||
Err(_) => return Err(Error::ProtocolSyncError(format!("SCRAM"))),
|
Err(_) => return Err(Error::ProtocolSyncError("SCRAM".to_string())),
|
||||||
};
|
};
|
||||||
|
|
||||||
let salted_password = Self::hi(
|
let salted_password = Self::hi(
|
||||||
@@ -166,9 +166,9 @@ impl ScramSha256 {
|
|||||||
pub fn finish(&mut self, message: &BytesMut) -> Result<(), Error> {
|
pub fn finish(&mut self, message: &BytesMut) -> Result<(), Error> {
|
||||||
let final_message = FinalMessage::parse(message)?;
|
let final_message = FinalMessage::parse(message)?;
|
||||||
|
|
||||||
let verifier = match general_purpose::STANDARD.decode(&final_message.value) {
|
let verifier = match general_purpose::STANDARD.decode(final_message.value) {
|
||||||
Ok(verifier) => verifier,
|
Ok(verifier) => verifier,
|
||||||
Err(_) => return Err(Error::ProtocolSyncError(format!("SCRAM"))),
|
Err(_) => return Err(Error::ProtocolSyncError("SCRAM".to_string())),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut hmac = match Hmac::<Sha256>::new_from_slice(&self.salted_password) {
|
let mut hmac = match Hmac::<Sha256>::new_from_slice(&self.salted_password) {
|
||||||
@@ -230,14 +230,14 @@ impl Message {
|
|||||||
.collect::<Vec<String>>();
|
.collect::<Vec<String>>();
|
||||||
|
|
||||||
if parts.len() != 3 {
|
if parts.len() != 3 {
|
||||||
return Err(Error::ProtocolSyncError(format!("SCRAM")));
|
return Err(Error::ProtocolSyncError("SCRAM".to_string()));
|
||||||
}
|
}
|
||||||
|
|
||||||
let nonce = str::replace(&parts[0], "r=", "");
|
let nonce = str::replace(&parts[0], "r=", "");
|
||||||
let salt = str::replace(&parts[1], "s=", "");
|
let salt = str::replace(&parts[1], "s=", "");
|
||||||
let iterations = match str::replace(&parts[2], "i=", "").parse::<u32>() {
|
let iterations = match str::replace(&parts[2], "i=", "").parse::<u32>() {
|
||||||
Ok(iterations) => iterations,
|
Ok(iterations) => iterations,
|
||||||
Err(_) => return Err(Error::ProtocolSyncError(format!("SCRAM"))),
|
Err(_) => return Err(Error::ProtocolSyncError("SCRAM".to_string())),
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Message {
|
Ok(Message {
|
||||||
@@ -257,7 +257,7 @@ impl FinalMessage {
|
|||||||
/// Parse the server final validation message.
|
/// Parse the server final validation message.
|
||||||
pub fn parse(message: &BytesMut) -> Result<FinalMessage, Error> {
|
pub fn parse(message: &BytesMut) -> Result<FinalMessage, Error> {
|
||||||
if !message.starts_with(b"v=") || message.len() < 4 {
|
if !message.starts_with(b"v=") || message.len() < 4 {
|
||||||
return Err(Error::ProtocolSyncError(format!("SCRAM")));
|
return Err(Error::ProtocolSyncError("SCRAM".to_string()));
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(FinalMessage {
|
Ok(FinalMessage {
|
||||||
|
|||||||
277
src/server.rs
277
src/server.rs
@@ -3,12 +3,14 @@
|
|||||||
use bytes::{Buf, BufMut, BytesMut};
|
use bytes::{Buf, BufMut, BytesMut};
|
||||||
use fallible_iterator::FallibleIterator;
|
use fallible_iterator::FallibleIterator;
|
||||||
use log::{debug, error, info, trace, warn};
|
use log::{debug, error, info, trace, warn};
|
||||||
|
use lru::LruCache;
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use parking_lot::{Mutex, RwLock};
|
use parking_lot::{Mutex, RwLock};
|
||||||
use postgres_protocol::message;
|
use postgres_protocol::message;
|
||||||
use std::collections::{BTreeSet, HashMap, HashSet};
|
use std::collections::{HashMap, HashSet, VecDeque};
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::net::IpAddr;
|
use std::net::IpAddr;
|
||||||
|
use std::num::NonZeroUsize;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::SystemTime;
|
use std::time::SystemTime;
|
||||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, BufStream};
|
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, BufStream};
|
||||||
@@ -16,7 +18,7 @@ use tokio::net::TcpStream;
|
|||||||
use tokio_rustls::rustls::{OwnedTrustAnchor, RootCertStore};
|
use tokio_rustls::rustls::{OwnedTrustAnchor, RootCertStore};
|
||||||
use tokio_rustls::{client::TlsStream, TlsConnector};
|
use tokio_rustls::{client::TlsStream, TlsConnector};
|
||||||
|
|
||||||
use crate::config::{get_config, get_prepared_statements_cache_size, Address, User};
|
use crate::config::{get_config, Address, User};
|
||||||
use crate::constants::*;
|
use crate::constants::*;
|
||||||
use crate::dns_cache::{AddrSet, CACHED_RESOLVER};
|
use crate::dns_cache::{AddrSet, CACHED_RESOLVER};
|
||||||
use crate::errors::{Error, ServerIdentifier};
|
use crate::errors::{Error, ServerIdentifier};
|
||||||
@@ -197,12 +199,8 @@ impl ServerParameters {
|
|||||||
key = "DateStyle".to_string();
|
key = "DateStyle".to_string();
|
||||||
};
|
};
|
||||||
|
|
||||||
if TRACKED_PARAMETERS.contains(&key) {
|
if TRACKED_PARAMETERS.contains(&key) || startup {
|
||||||
self.parameters.insert(key, value);
|
self.parameters.insert(key, value);
|
||||||
} else {
|
|
||||||
if startup {
|
|
||||||
self.parameters.insert(key, value);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -326,12 +324,16 @@ pub struct Server {
|
|||||||
log_client_parameter_status_changes: bool,
|
log_client_parameter_status_changes: bool,
|
||||||
|
|
||||||
/// Prepared statements
|
/// Prepared statements
|
||||||
prepared_statements: BTreeSet<String>,
|
prepared_statement_cache: Option<LruCache<String, ()>>,
|
||||||
|
|
||||||
|
/// Prepared statement being currently registered on the server.
|
||||||
|
registering_prepared_statement: VecDeque<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Server {
|
impl Server {
|
||||||
/// Pretend to be the Postgres client and connect to the server given host, port and credentials.
|
/// Pretend to be the Postgres client and connect to the server given host, port and credentials.
|
||||||
/// Perform the authentication and return the server in a ready for query state.
|
/// Perform the authentication and return the server in a ready for query state.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub async fn startup(
|
pub async fn startup(
|
||||||
address: &Address,
|
address: &Address,
|
||||||
user: &User,
|
user: &User,
|
||||||
@@ -341,6 +343,7 @@ impl Server {
|
|||||||
auth_hash: Arc<RwLock<Option<String>>>,
|
auth_hash: Arc<RwLock<Option<String>>>,
|
||||||
cleanup_connections: bool,
|
cleanup_connections: bool,
|
||||||
log_client_parameter_status_changes: bool,
|
log_client_parameter_status_changes: bool,
|
||||||
|
prepared_statement_cache_size: usize,
|
||||||
) -> Result<Server, Error> {
|
) -> Result<Server, Error> {
|
||||||
let cached_resolver = CACHED_RESOLVER.load();
|
let cached_resolver = CACHED_RESOLVER.load();
|
||||||
let mut addr_set: Option<AddrSet> = None;
|
let mut addr_set: Option<AddrSet> = None;
|
||||||
@@ -440,10 +443,7 @@ impl Server {
|
|||||||
|
|
||||||
// Something else?
|
// Something else?
|
||||||
m => {
|
m => {
|
||||||
return Err(Error::SocketError(format!(
|
return Err(Error::SocketError(format!("Unknown message: {}", { m })));
|
||||||
"Unknown message: {}",
|
|
||||||
m as char
|
|
||||||
)));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -461,26 +461,20 @@ impl Server {
|
|||||||
None => &user.username,
|
None => &user.username,
|
||||||
};
|
};
|
||||||
|
|
||||||
let password = match user.server_password {
|
let password = match user.server_password.as_ref() {
|
||||||
Some(ref server_password) => Some(server_password),
|
Some(server_password) => Some(server_password),
|
||||||
None => match user.password {
|
None => user.password.as_ref(),
|
||||||
Some(ref password) => Some(password),
|
|
||||||
None => None,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
startup(&mut stream, username, database).await?;
|
startup(&mut stream, username, database).await?;
|
||||||
|
|
||||||
let mut process_id: i32 = 0;
|
let mut process_id: i32 = 0;
|
||||||
let mut secret_key: i32 = 0;
|
let mut secret_key: i32 = 0;
|
||||||
let server_identifier = ServerIdentifier::new(username, &database);
|
let server_identifier = ServerIdentifier::new(username, database);
|
||||||
|
|
||||||
// We'll be handling multiple packets, but they will all be structured the same.
|
// We'll be handling multiple packets, but they will all be structured the same.
|
||||||
// We'll loop here until this exchange is complete.
|
// We'll loop here until this exchange is complete.
|
||||||
let mut scram: Option<ScramSha256> = match password {
|
let mut scram: Option<ScramSha256> = password.map(|password| ScramSha256::new(password));
|
||||||
Some(password) => Some(ScramSha256::new(password)),
|
|
||||||
None => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut server_parameters = ServerParameters::new();
|
let mut server_parameters = ServerParameters::new();
|
||||||
|
|
||||||
@@ -725,7 +719,7 @@ impl Server {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let fields = match PgErrorMsg::parse(error) {
|
let fields = match PgErrorMsg::parse(&error) {
|
||||||
Ok(f) => f,
|
Ok(f) => f,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
return Err(err);
|
return Err(err);
|
||||||
@@ -830,7 +824,13 @@ impl Server {
|
|||||||
},
|
},
|
||||||
cleanup_connections,
|
cleanup_connections,
|
||||||
log_client_parameter_status_changes,
|
log_client_parameter_status_changes,
|
||||||
prepared_statements: BTreeSet::new(),
|
prepared_statement_cache: match prepared_statement_cache_size {
|
||||||
|
0 => None,
|
||||||
|
_ => Some(LruCache::new(
|
||||||
|
NonZeroUsize::new(prepared_statement_cache_size).unwrap(),
|
||||||
|
)),
|
||||||
|
},
|
||||||
|
registering_prepared_statement: VecDeque::new(),
|
||||||
};
|
};
|
||||||
|
|
||||||
return Ok(server);
|
return Ok(server);
|
||||||
@@ -882,7 +882,7 @@ impl Server {
|
|||||||
self.mirror_send(messages);
|
self.mirror_send(messages);
|
||||||
self.stats().data_sent(messages.len());
|
self.stats().data_sent(messages.len());
|
||||||
|
|
||||||
match write_all_flush(&mut self.stream, &messages).await {
|
match write_all_flush(&mut self.stream, messages).await {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
// Successfully sent to server
|
// Successfully sent to server
|
||||||
self.last_activity = SystemTime::now();
|
self.last_activity = SystemTime::now();
|
||||||
@@ -960,7 +960,6 @@ impl Server {
|
|||||||
|
|
||||||
// There is no more data available from the server.
|
// There is no more data available from the server.
|
||||||
self.data_available = false;
|
self.data_available = false;
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -969,6 +968,37 @@ impl Server {
|
|||||||
if self.in_copy_mode {
|
if self.in_copy_mode {
|
||||||
self.in_copy_mode = false;
|
self.in_copy_mode = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Remove the prepared statement from the cache, it has a syntax error or something else bad happened.
|
||||||
|
if let Some(prepared_stmt_name) =
|
||||||
|
self.registering_prepared_statement.pop_front()
|
||||||
|
{
|
||||||
|
if let Some(ref mut cache) = self.prepared_statement_cache {
|
||||||
|
if let Some(_removed) = cache.pop(&prepared_stmt_name) {
|
||||||
|
debug!(
|
||||||
|
"Removed {} from prepared statement cache",
|
||||||
|
prepared_stmt_name
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
// Shouldn't happen.
|
||||||
|
debug!("Prepared statement {} was not cached", prepared_stmt_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.prepared_statement_cache.is_some() {
|
||||||
|
let error_message = PgErrorMsg::parse(&message)?;
|
||||||
|
if error_message.message == "cached plan must not change result type" {
|
||||||
|
warn!("Server {:?} changed schema, dropping connection to clean up prepared statements", self.address);
|
||||||
|
// This will still result in an error to the client, but this server connection will drop all cached prepared statements
|
||||||
|
// so that any new queries will be re-prepared
|
||||||
|
// TODO: Other ideas to solve errors when there are DDL changes after a statement has been prepared
|
||||||
|
// - Recreate entire connection pool to force recreation of all server connections
|
||||||
|
// - Clear the ConnectionPool's statement cache so that new statement names are generated
|
||||||
|
// - Implement a retry (re-prepare) so the client doesn't see an error
|
||||||
|
self.cleanup_state.needs_cleanup_prepare = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CommandComplete
|
// CommandComplete
|
||||||
@@ -1058,6 +1088,11 @@ impl Server {
|
|||||||
// Buffer until ReadyForQuery shows up, so don't exit the loop yet.
|
// Buffer until ReadyForQuery shows up, so don't exit the loop yet.
|
||||||
'c' => (),
|
'c' => (),
|
||||||
|
|
||||||
|
// Parse complete successfully
|
||||||
|
'1' => {
|
||||||
|
self.registering_prepared_statement.pop_front();
|
||||||
|
}
|
||||||
|
|
||||||
// Anything else, e.g. errors, notices, etc.
|
// Anything else, e.g. errors, notices, etc.
|
||||||
// Keep buffering until ReadyForQuery shows up.
|
// Keep buffering until ReadyForQuery shows up.
|
||||||
_ => (),
|
_ => (),
|
||||||
@@ -1079,117 +1114,103 @@ impl Server {
|
|||||||
Ok(bytes)
|
Ok(bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add the prepared statement to being tracked by this server.
|
// Determines if the server already has a prepared statement with the given name
|
||||||
/// The client is processing data that will create a prepared statement on this server.
|
// Increments the prepared statement cache hit counter
|
||||||
pub fn will_prepare(&mut self, name: &str) {
|
pub fn has_prepared_statement(&mut self, name: &str) -> bool {
|
||||||
debug!("Will prepare `{}`", name);
|
let cache = match &mut self.prepared_statement_cache {
|
||||||
|
Some(cache) => cache,
|
||||||
|
None => return false,
|
||||||
|
};
|
||||||
|
|
||||||
self.prepared_statements.insert(name.to_string());
|
let has_it = cache.get(name).is_some();
|
||||||
self.stats.prepared_cache_add();
|
if has_it {
|
||||||
}
|
|
||||||
|
|
||||||
/// Check if we should prepare a statement on the server.
|
|
||||||
pub fn should_prepare(&self, name: &str) -> bool {
|
|
||||||
let should_prepare = !self.prepared_statements.contains(name);
|
|
||||||
|
|
||||||
debug!("Should prepare `{}`: {}", name, should_prepare);
|
|
||||||
|
|
||||||
if should_prepare {
|
|
||||||
self.stats.prepared_cache_miss();
|
|
||||||
} else {
|
|
||||||
self.stats.prepared_cache_hit();
|
self.stats.prepared_cache_hit();
|
||||||
|
} else {
|
||||||
|
self.stats.prepared_cache_miss();
|
||||||
}
|
}
|
||||||
|
|
||||||
should_prepare
|
has_it
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a prepared statement on the server.
|
fn add_prepared_statement_to_cache(&mut self, name: &str) -> Option<String> {
|
||||||
pub async fn prepare(&mut self, parse: &Parse) -> Result<(), Error> {
|
let cache = match &mut self.prepared_statement_cache {
|
||||||
debug!("Preparing `{}`", parse.name);
|
Some(cache) => cache,
|
||||||
|
None => return None,
|
||||||
|
};
|
||||||
|
|
||||||
let bytes: BytesMut = parse.try_into()?;
|
|
||||||
self.send(&bytes).await?;
|
|
||||||
self.send(&flush()).await?;
|
|
||||||
|
|
||||||
// Read and discard ParseComplete (B)
|
|
||||||
match read_message(&mut self.stream).await {
|
|
||||||
Ok(_) => (),
|
|
||||||
Err(err) => {
|
|
||||||
self.bad = true;
|
|
||||||
return Err(err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
self.prepared_statements.insert(parse.name.to_string());
|
|
||||||
self.stats.prepared_cache_add();
|
self.stats.prepared_cache_add();
|
||||||
|
|
||||||
debug!("Prepared `{}`", parse.name);
|
// If we evict something, we need to close it on the server
|
||||||
|
if let Some((evicted_name, _)) = cache.push(name.to_string(), ()) {
|
||||||
Ok(())
|
if evicted_name != name {
|
||||||
}
|
debug!(
|
||||||
|
"Evicted prepared statement {} from cache, replaced with {}",
|
||||||
/// Maintain adequate cache size on the server.
|
evicted_name, name
|
||||||
pub async fn maintain_cache(&mut self) -> Result<(), Error> {
|
);
|
||||||
debug!("Cache maintenance run");
|
return Some(evicted_name);
|
||||||
|
|
||||||
let max_cache_size = get_prepared_statements_cache_size();
|
|
||||||
let mut names = Vec::new();
|
|
||||||
|
|
||||||
while self.prepared_statements.len() >= max_cache_size {
|
|
||||||
// The prepared statmeents are alphanumerically sorted by the BTree.
|
|
||||||
// FIFO.
|
|
||||||
if let Some(name) = self.prepared_statements.pop_last() {
|
|
||||||
names.push(name);
|
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
|
|
||||||
if !names.is_empty() {
|
None
|
||||||
self.deallocate(names).await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Remove the prepared statement from being tracked by this server.
|
fn remove_prepared_statement_from_cache(&mut self, name: &str) {
|
||||||
/// The client is processing data that will cause the server to close the prepared statement.
|
let cache = match &mut self.prepared_statement_cache {
|
||||||
pub fn will_close(&mut self, name: &str) {
|
Some(cache) => cache,
|
||||||
debug!("Will close `{}`", name);
|
None => return,
|
||||||
|
};
|
||||||
|
|
||||||
self.prepared_statements.remove(name);
|
self.stats.prepared_cache_remove();
|
||||||
|
cache.pop(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Close a prepared statement on the server.
|
pub async fn register_prepared_statement(
|
||||||
pub async fn deallocate(&mut self, names: Vec<String>) -> Result<(), Error> {
|
&mut self,
|
||||||
for name in &names {
|
parse: &Parse,
|
||||||
debug!("Deallocating prepared statement `{}`", name);
|
should_send_parse_to_server: bool,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
if !self.has_prepared_statement(&parse.name) {
|
||||||
|
self.registering_prepared_statement
|
||||||
|
.push_back(parse.name.clone());
|
||||||
|
|
||||||
let close = Close::new(name);
|
let mut bytes = BytesMut::new();
|
||||||
let bytes: BytesMut = close.try_into()?;
|
|
||||||
|
|
||||||
self.send(&bytes).await?;
|
if should_send_parse_to_server {
|
||||||
}
|
let parse_bytes: BytesMut = parse.try_into()?;
|
||||||
|
bytes.extend_from_slice(&parse_bytes);
|
||||||
|
}
|
||||||
|
|
||||||
if !names.is_empty() {
|
// If we evict something, we need to close it on the server
|
||||||
self.send(&flush()).await?;
|
// We do this by adding it to the messages we're sending to the server before the sync
|
||||||
}
|
if let Some(evicted_name) = self.add_prepared_statement_to_cache(&parse.name) {
|
||||||
|
self.remove_prepared_statement_from_cache(&evicted_name);
|
||||||
// Read and discard CloseComplete (3)
|
let close_bytes: BytesMut = Close::new(&evicted_name).try_into()?;
|
||||||
for name in &names {
|
bytes.extend_from_slice(&close_bytes);
|
||||||
match read_message(&mut self.stream).await {
|
|
||||||
Ok(_) => {
|
|
||||||
self.prepared_statements.remove(name);
|
|
||||||
self.stats.prepared_cache_remove();
|
|
||||||
debug!("Closed `{}`", name);
|
|
||||||
}
|
|
||||||
|
|
||||||
Err(err) => {
|
|
||||||
self.bad = true;
|
|
||||||
return Err(err);
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
// If we have a parse or close we need to send to the server, send them and sync
|
||||||
|
if !bytes.is_empty() {
|
||||||
|
bytes.extend_from_slice(&sync());
|
||||||
|
|
||||||
|
self.send(&bytes).await?;
|
||||||
|
|
||||||
|
loop {
|
||||||
|
self.recv(None).await?;
|
||||||
|
|
||||||
|
if !self.is_data_available() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// If it's not there, something went bad, I'm guessing bad syntax or permissions error
|
||||||
|
// on the server.
|
||||||
|
if !self.has_prepared_statement(&parse.name) {
|
||||||
|
Err(Error::PreparedStatementError)
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// If the server is still inside a transaction.
|
/// If the server is still inside a transaction.
|
||||||
@@ -1199,6 +1220,7 @@ impl Server {
|
|||||||
self.in_transaction
|
self.in_transaction
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Currently copying data from client to server or vice-versa.
|
||||||
pub fn in_copy_mode(&self) -> bool {
|
pub fn in_copy_mode(&self) -> bool {
|
||||||
self.in_copy_mode
|
self.in_copy_mode
|
||||||
}
|
}
|
||||||
@@ -1257,8 +1279,8 @@ impl Server {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Indicate that this server connection cannot be re-used and must be discarded.
|
/// Indicate that this server connection cannot be re-used and must be discarded.
|
||||||
pub fn mark_bad(&mut self) {
|
pub fn mark_bad(&mut self, reason: &str) {
|
||||||
error!("Server {:?} marked bad", self.address);
|
error!("Server {:?} marked bad, reason: {}", self.address, reason);
|
||||||
self.bad = true;
|
self.bad = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1324,6 +1346,10 @@ impl Server {
|
|||||||
|
|
||||||
if self.cleanup_state.needs_cleanup_prepare {
|
if self.cleanup_state.needs_cleanup_prepare {
|
||||||
reset_string.push_str("DEALLOCATE ALL;");
|
reset_string.push_str("DEALLOCATE ALL;");
|
||||||
|
// Since we deallocated all prepared statements, we need to clear the cache
|
||||||
|
if let Some(cache) = &mut self.prepared_statement_cache {
|
||||||
|
cache.clear();
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
self.query(&reset_string).await?;
|
self.query(&reset_string).await?;
|
||||||
@@ -1359,16 +1385,14 @@ impl Server {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn mirror_send(&mut self, bytes: &BytesMut) {
|
pub fn mirror_send(&mut self, bytes: &BytesMut) {
|
||||||
match self.mirror_manager.as_mut() {
|
if let Some(manager) = self.mirror_manager.as_mut() {
|
||||||
Some(manager) => manager.send(bytes),
|
manager.send(bytes)
|
||||||
None => (),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn mirror_disconnect(&mut self) {
|
pub fn mirror_disconnect(&mut self) {
|
||||||
match self.mirror_manager.as_mut() {
|
if let Some(manager) = self.mirror_manager.as_mut() {
|
||||||
Some(manager) => manager.disconnect(),
|
manager.disconnect()
|
||||||
None => (),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1391,13 +1415,14 @@ impl Server {
|
|||||||
Arc::new(RwLock::new(None)),
|
Arc::new(RwLock::new(None)),
|
||||||
true,
|
true,
|
||||||
false,
|
false,
|
||||||
|
0,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
debug!("Connected!, sending query.");
|
debug!("Connected!, sending query.");
|
||||||
server.send(&simple_query(query)).await?;
|
server.send(&simple_query(query)).await?;
|
||||||
let mut message = server.recv(None).await?;
|
let mut message = server.recv(None).await?;
|
||||||
|
|
||||||
Ok(parse_query_message(&mut message).await?)
|
parse_query_message(&mut message).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -14,11 +14,11 @@ pub enum ShardingFunction {
|
|||||||
Sha1,
|
Sha1,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ToString for ShardingFunction {
|
impl std::fmt::Display for ShardingFunction {
|
||||||
fn to_string(&self) -> String {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
match *self {
|
match self {
|
||||||
ShardingFunction::PgBigintHash => "pg_bigint_hash".to_string(),
|
ShardingFunction::PgBigintHash => write!(f, "pg_bigint_hash"),
|
||||||
ShardingFunction::Sha1 => "sha1".to_string(),
|
ShardingFunction::Sha1 => write!(f, "sha1"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -64,7 +64,7 @@ impl Sharder {
|
|||||||
fn sha1(&self, key: i64) -> usize {
|
fn sha1(&self, key: i64) -> usize {
|
||||||
let mut hasher = Sha1::new();
|
let mut hasher = Sha1::new();
|
||||||
|
|
||||||
hasher.update(&key.to_string().as_bytes());
|
hasher.update(key.to_string().as_bytes());
|
||||||
|
|
||||||
let result = hasher.finalize();
|
let result = hasher.finalize();
|
||||||
|
|
||||||
@@ -202,10 +202,10 @@ mod test {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_sha1_hash() {
|
fn test_sha1_hash() {
|
||||||
let sharder = Sharder::new(12, ShardingFunction::Sha1);
|
let sharder = Sharder::new(12, ShardingFunction::Sha1);
|
||||||
let ids = vec![
|
let ids = [
|
||||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
||||||
];
|
];
|
||||||
let shards = vec![
|
let shards = [
|
||||||
4, 7, 8, 3, 6, 0, 0, 10, 3, 11, 1, 7, 4, 4, 11, 2, 5, 0, 8, 3,
|
4, 7, 8, 3, 6, 0, 0, 10, 3, 11, 1, 7, 4, 4, 11, 2, 5, 0, 8, 3,
|
||||||
];
|
];
|
||||||
|
|
||||||
|
|||||||
@@ -41,6 +41,11 @@ pub struct ClientStats {
|
|||||||
/// Maximum time spent waiting for a connection from pool, measures in microseconds
|
/// Maximum time spent waiting for a connection from pool, measures in microseconds
|
||||||
pub max_wait_time: Arc<AtomicU64>,
|
pub max_wait_time: Arc<AtomicU64>,
|
||||||
|
|
||||||
|
// Time when the client started waiting for a connection from pool, measures in microseconds
|
||||||
|
// We use connect_time as the reference point for this value
|
||||||
|
// U64 can represent ~5850 centuries in microseconds, so we should be fine
|
||||||
|
pub wait_start_us: Arc<AtomicU64>,
|
||||||
|
|
||||||
/// Current state of the client
|
/// Current state of the client
|
||||||
pub state: Arc<AtomicClientState>,
|
pub state: Arc<AtomicClientState>,
|
||||||
|
|
||||||
@@ -64,6 +69,7 @@ impl Default for ClientStats {
|
|||||||
pool_name: String::new(),
|
pool_name: String::new(),
|
||||||
total_wait_time: Arc::new(AtomicU64::new(0)),
|
total_wait_time: Arc::new(AtomicU64::new(0)),
|
||||||
max_wait_time: Arc::new(AtomicU64::new(0)),
|
max_wait_time: Arc::new(AtomicU64::new(0)),
|
||||||
|
wait_start_us: Arc::new(AtomicU64::new(0)),
|
||||||
state: Arc::new(AtomicClientState::new(ClientState::Idle)),
|
state: Arc::new(AtomicClientState::new(ClientState::Idle)),
|
||||||
transaction_count: Arc::new(AtomicU64::new(0)),
|
transaction_count: Arc::new(AtomicU64::new(0)),
|
||||||
query_count: Arc::new(AtomicU64::new(0)),
|
query_count: Arc::new(AtomicU64::new(0)),
|
||||||
@@ -111,6 +117,9 @@ impl ClientStats {
|
|||||||
|
|
||||||
/// Reports a client is waiting for a connection
|
/// Reports a client is waiting for a connection
|
||||||
pub fn waiting(&self) {
|
pub fn waiting(&self) {
|
||||||
|
let wait_start = self.connect_time.elapsed().as_micros() as u64;
|
||||||
|
|
||||||
|
self.wait_start_us.store(wait_start, Ordering::Relaxed);
|
||||||
self.state.store(ClientState::Waiting, Ordering::Relaxed);
|
self.state.store(ClientState::Waiting, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -122,6 +131,13 @@ impl ClientStats {
|
|||||||
/// Reports a client has failed to obtain a connection from a connection pool
|
/// Reports a client has failed to obtain a connection from a connection pool
|
||||||
pub fn checkout_error(&self) {
|
pub fn checkout_error(&self) {
|
||||||
self.state.store(ClientState::Idle, Ordering::Relaxed);
|
self.state.store(ClientState::Idle, Ordering::Relaxed);
|
||||||
|
self.update_wait_times();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reports a client has succeeded in obtaining a connection from a connection pool
|
||||||
|
pub fn checkout_success(&self) {
|
||||||
|
self.state.store(ClientState::Active, Ordering::Relaxed);
|
||||||
|
self.update_wait_times();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Reports a client has had the server assigned to it be banned
|
/// Reports a client has had the server assigned to it be banned
|
||||||
@@ -130,12 +146,26 @@ impl ClientStats {
|
|||||||
self.error_count.fetch_add(1, Ordering::Relaxed);
|
self.error_count.fetch_add(1, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Reporters the time spent by a client waiting to get a healthy connection from the pool
|
fn update_wait_times(&self) {
|
||||||
pub fn checkout_time(&self, microseconds: u64) {
|
if self.wait_start_us.load(Ordering::Relaxed) == 0 {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let wait_time_us = self.get_current_wait_time_us();
|
||||||
self.total_wait_time
|
self.total_wait_time
|
||||||
.fetch_add(microseconds, Ordering::Relaxed);
|
.fetch_add(wait_time_us, Ordering::Relaxed);
|
||||||
self.max_wait_time
|
self.max_wait_time
|
||||||
.fetch_max(microseconds, Ordering::Relaxed);
|
.fetch_max(wait_time_us, Ordering::Relaxed);
|
||||||
|
self.wait_start_us.store(0, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_current_wait_time_us(&self) -> u64 {
|
||||||
|
let wait_start_us = self.wait_start_us.load(Ordering::Relaxed);
|
||||||
|
let microseconds_since_connection_epoch = self.connect_time.elapsed().as_micros() as u64;
|
||||||
|
if wait_start_us == 0 || microseconds_since_connection_epoch < wait_start_us {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
microseconds_since_connection_epoch - wait_start_us
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Report a query executed by a client against a server
|
/// Report a query executed by a client against a server
|
||||||
|
|||||||
@@ -64,8 +64,11 @@ impl PoolStats {
|
|||||||
ClientState::Idle => pool_stats.cl_idle += 1,
|
ClientState::Idle => pool_stats.cl_idle += 1,
|
||||||
ClientState::Waiting => pool_stats.cl_waiting += 1,
|
ClientState::Waiting => pool_stats.cl_waiting += 1,
|
||||||
}
|
}
|
||||||
let max_wait = client.max_wait_time.load(Ordering::Relaxed);
|
let wait_start_us = client.wait_start_us.load(Ordering::Relaxed);
|
||||||
pool_stats.maxwait = std::cmp::max(pool_stats.maxwait, max_wait);
|
if wait_start_us > 0 {
|
||||||
|
let wait_time_us = client.get_current_wait_time_us();
|
||||||
|
pool_stats.maxwait = std::cmp::max(pool_stats.maxwait, wait_time_us);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
None => debug!("Client from an obselete pool"),
|
None => debug!("Client from an obselete pool"),
|
||||||
}
|
}
|
||||||
@@ -86,11 +89,11 @@ impl PoolStats {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return map;
|
map
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_header() -> Vec<(&'static str, DataType)> {
|
pub fn generate_header() -> Vec<(&'static str, DataType)> {
|
||||||
return vec![
|
vec![
|
||||||
("database", DataType::Text),
|
("database", DataType::Text),
|
||||||
("user", DataType::Text),
|
("user", DataType::Text),
|
||||||
("pool_mode", DataType::Text),
|
("pool_mode", DataType::Text),
|
||||||
@@ -105,11 +108,11 @@ impl PoolStats {
|
|||||||
("sv_login", DataType::Numeric),
|
("sv_login", DataType::Numeric),
|
||||||
("maxwait", DataType::Numeric),
|
("maxwait", DataType::Numeric),
|
||||||
("maxwait_us", DataType::Numeric),
|
("maxwait_us", DataType::Numeric),
|
||||||
];
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generate_row(&self) -> Vec<String> {
|
pub fn generate_row(&self) -> Vec<String> {
|
||||||
return vec![
|
vec![
|
||||||
self.identifier.db.clone(),
|
self.identifier.db.clone(),
|
||||||
self.identifier.user.clone(),
|
self.identifier.user.clone(),
|
||||||
self.mode.to_string(),
|
self.mode.to_string(),
|
||||||
@@ -124,7 +127,7 @@ impl PoolStats {
|
|||||||
self.sv_login.to_string(),
|
self.sv_login.to_string(),
|
||||||
(self.maxwait / 1_000_000).to_string(),
|
(self.maxwait / 1_000_000).to_string(),
|
||||||
(self.maxwait % 1_000_000).to_string(),
|
(self.maxwait % 1_000_000).to_string(),
|
||||||
];
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -49,6 +49,7 @@ pub struct ServerStats {
|
|||||||
pub error_count: Arc<AtomicU64>,
|
pub error_count: Arc<AtomicU64>,
|
||||||
pub prepared_hit_count: Arc<AtomicU64>,
|
pub prepared_hit_count: Arc<AtomicU64>,
|
||||||
pub prepared_miss_count: Arc<AtomicU64>,
|
pub prepared_miss_count: Arc<AtomicU64>,
|
||||||
|
pub prepared_eviction_count: Arc<AtomicU64>,
|
||||||
pub prepared_cache_size: Arc<AtomicU64>,
|
pub prepared_cache_size: Arc<AtomicU64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -68,6 +69,7 @@ impl Default for ServerStats {
|
|||||||
reporter: get_reporter(),
|
reporter: get_reporter(),
|
||||||
prepared_hit_count: Arc::new(AtomicU64::new(0)),
|
prepared_hit_count: Arc::new(AtomicU64::new(0)),
|
||||||
prepared_miss_count: Arc::new(AtomicU64::new(0)),
|
prepared_miss_count: Arc::new(AtomicU64::new(0)),
|
||||||
|
prepared_eviction_count: Arc::new(AtomicU64::new(0)),
|
||||||
prepared_cache_size: Arc::new(AtomicU64::new(0)),
|
prepared_cache_size: Arc::new(AtomicU64::new(0)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -221,6 +223,7 @@ impl ServerStats {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn prepared_cache_remove(&self) {
|
pub fn prepared_cache_remove(&self) {
|
||||||
|
self.prepared_eviction_count.fetch_add(1, Ordering::Relaxed);
|
||||||
self.prepared_cache_size.fetch_sub(1, Ordering::Relaxed);
|
self.prepared_cache_size.fetch_sub(1, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
34
start_test_env.sh
Executable file
34
start_test_env.sh
Executable file
@@ -0,0 +1,34 @@
|
|||||||
|
GREEN="\033[0;32m"
|
||||||
|
RED="\033[0;31m"
|
||||||
|
BLUE="\033[0;34m"
|
||||||
|
RESET="\033[0m"
|
||||||
|
|
||||||
|
|
||||||
|
cd tests/docker/
|
||||||
|
docker compose kill main || true
|
||||||
|
docker compose build main
|
||||||
|
docker compose down
|
||||||
|
docker compose up -d
|
||||||
|
# wait for the container to start
|
||||||
|
while ! docker compose exec main ls; do
|
||||||
|
echo "Waiting for test environment to start"
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
echo "==================================="
|
||||||
|
docker compose exec -e LOG_LEVEL=error -d main toxiproxy-server
|
||||||
|
docker compose exec --workdir /app main cargo build
|
||||||
|
docker compose exec -d --workdir /app main ./target/debug/pgcat ./.circleci/pgcat.toml
|
||||||
|
docker compose exec --workdir /app/tests/ruby main bundle install
|
||||||
|
docker compose exec --workdir /app/tests/python main pip3 install -r requirements.txt
|
||||||
|
echo "Interactive test environment ready"
|
||||||
|
echo "To run integration tests, you can use the following commands:"
|
||||||
|
echo -e " ${BLUE}Ruby: ${RED}cd /app/tests/ruby && bundle exec ruby tests.rb --format documentation${RESET}"
|
||||||
|
echo -e " ${BLUE}Python: ${RED}cd /app/ && pytest ${RESET}"
|
||||||
|
echo -e " ${BLUE}Rust: ${RED}cd /app/tests/rust && cargo run ${RESET}"
|
||||||
|
echo -e " ${BLUE}Go: ${RED}cd /app/tests/go && /usr/local/go/bin/go test${RESET}"
|
||||||
|
echo "the source code for tests are directly linked to the source code in the container so you can modify the code and run the tests again"
|
||||||
|
echo "You can rebuild PgCat from within the container by running"
|
||||||
|
echo -e " ${GREEN}cargo build${RESET}"
|
||||||
|
echo "and then run the tests again"
|
||||||
|
echo "==================================="
|
||||||
|
docker compose exec --workdir /app/tests main bash
|
||||||
@@ -8,3 +8,6 @@ RUN rustup component add llvm-tools-preview
|
|||||||
RUN sudo gem install bundler
|
RUN sudo gem install bundler
|
||||||
RUN wget -O toxiproxy-2.4.0.deb https://github.com/Shopify/toxiproxy/releases/download/v2.4.0/toxiproxy_2.4.0_linux_$(dpkg --print-architecture).deb && \
|
RUN wget -O toxiproxy-2.4.0.deb https://github.com/Shopify/toxiproxy/releases/download/v2.4.0/toxiproxy_2.4.0_linux_$(dpkg --print-architecture).deb && \
|
||||||
sudo dpkg -i toxiproxy-2.4.0.deb
|
sudo dpkg -i toxiproxy-2.4.0.deb
|
||||||
|
RUN wget -O go1.21.3.linux-$(dpkg --print-architecture).tar.gz https://go.dev/dl/go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \
|
||||||
|
sudo tar -C /usr/local -xzf go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \
|
||||||
|
rm go1.21.3.linux-$(dpkg --print-architecture).tar.gz
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
version: "3"
|
|
||||||
services:
|
services:
|
||||||
pg1:
|
pg1:
|
||||||
image: postgres:14
|
image: postgres:14
|
||||||
@@ -48,6 +47,8 @@ services:
|
|||||||
main:
|
main:
|
||||||
build: .
|
build: .
|
||||||
command: ["bash", "/app/tests/docker/run.sh"]
|
command: ["bash", "/app/tests/docker/run.sh"]
|
||||||
|
environment:
|
||||||
|
- INTERACTIVE_TEST_ENVIRONMENT=true
|
||||||
volumes:
|
volumes:
|
||||||
- ../../:/app/
|
- ../../:/app/
|
||||||
- /app/target/
|
- /app/target/
|
||||||
|
|||||||
@@ -5,6 +5,38 @@ rm /app/*.profraw || true
|
|||||||
rm /app/pgcat.profdata || true
|
rm /app/pgcat.profdata || true
|
||||||
rm -rf /app/cov || true
|
rm -rf /app/cov || true
|
||||||
|
|
||||||
|
# Prepares the interactive test environment
|
||||||
|
#
|
||||||
|
if [ -n "$INTERACTIVE_TEST_ENVIRONMENT" ]; then
|
||||||
|
ports=(5432 7432 8432 9432 10432)
|
||||||
|
for port in "${ports[@]}"; do
|
||||||
|
is_it_up=0
|
||||||
|
attempts=0
|
||||||
|
while [ $is_it_up -eq 0 ]; do
|
||||||
|
PGPASSWORD=postgres psql -h 127.0.0.1 -p $port -U postgres -c '\q' > /dev/null 2>&1
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
echo "PostgreSQL on port $port is up."
|
||||||
|
is_it_up=1
|
||||||
|
else
|
||||||
|
attempts=$((attempts+1))
|
||||||
|
if [ $attempts -gt 10 ]; then
|
||||||
|
echo "PostgreSQL on port $port is down, giving up."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "PostgreSQL on port $port is down, waiting for it to start."
|
||||||
|
sleep 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
done
|
||||||
|
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 5432 -U postgres -f /app/tests/sharding/query_routing_setup.sql
|
||||||
|
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 7432 -U postgres -f /app/tests/sharding/query_routing_setup.sql
|
||||||
|
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 8432 -U postgres -f /app/tests/sharding/query_routing_setup.sql
|
||||||
|
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 9432 -U postgres -f /app/tests/sharding/query_routing_setup.sql
|
||||||
|
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 10432 -U postgres -f /app/tests/sharding/query_routing_setup.sql
|
||||||
|
sleep 100000000000000000
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
export LLVM_PROFILE_FILE="/app/pgcat-%m-%p.profraw"
|
export LLVM_PROFILE_FILE="/app/pgcat-%m-%p.profraw"
|
||||||
export RUSTC_BOOTSTRAP=1
|
export RUSTC_BOOTSTRAP=1
|
||||||
export CARGO_INCREMENTAL=0
|
export CARGO_INCREMENTAL=0
|
||||||
|
|||||||
5
tests/go/go.mod
Normal file
5
tests/go/go.mod
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
module pgcat
|
||||||
|
|
||||||
|
go 1.21
|
||||||
|
|
||||||
|
require github.com/lib/pq v1.10.9
|
||||||
2
tests/go/go.sum
Normal file
2
tests/go/go.sum
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||||
|
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||||
162
tests/go/pgcat.toml
Normal file
162
tests/go/pgcat.toml
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
#
|
||||||
|
# PgCat config example.
|
||||||
|
#
|
||||||
|
|
||||||
|
#
|
||||||
|
# General pooler settings
|
||||||
|
[general]
|
||||||
|
# What IP to run on, 0.0.0.0 means accessible from everywhere.
|
||||||
|
host = "0.0.0.0"
|
||||||
|
|
||||||
|
# Port to run on, same as PgBouncer used in this example.
|
||||||
|
port = "${PORT}"
|
||||||
|
|
||||||
|
# Whether to enable prometheus exporter or not.
|
||||||
|
enable_prometheus_exporter = true
|
||||||
|
|
||||||
|
# Port at which prometheus exporter listens on.
|
||||||
|
prometheus_exporter_port = 9930
|
||||||
|
|
||||||
|
# How long to wait before aborting a server connection (ms).
|
||||||
|
connect_timeout = 1000
|
||||||
|
|
||||||
|
# How much time to give the health check query to return with a result (ms).
|
||||||
|
healthcheck_timeout = 1000
|
||||||
|
|
||||||
|
# How long to keep connection available for immediate re-use, without running a healthcheck query on it
|
||||||
|
healthcheck_delay = 30000
|
||||||
|
|
||||||
|
# How much time to give clients during shutdown before forcibly killing client connections (ms).
|
||||||
|
shutdown_timeout = 5000
|
||||||
|
|
||||||
|
# For how long to ban a server if it fails a health check (seconds).
|
||||||
|
ban_time = 60 # Seconds
|
||||||
|
|
||||||
|
# If we should log client connections
|
||||||
|
log_client_connections = false
|
||||||
|
|
||||||
|
# If we should log client disconnections
|
||||||
|
log_client_disconnections = false
|
||||||
|
|
||||||
|
# Reload config automatically if it changes.
|
||||||
|
autoreload = 15000
|
||||||
|
|
||||||
|
server_round_robin = false
|
||||||
|
|
||||||
|
# TLS
|
||||||
|
tls_certificate = "../../.circleci/server.cert"
|
||||||
|
tls_private_key = "../../.circleci/server.key"
|
||||||
|
|
||||||
|
# Credentials to access the virtual administrative database (pgbouncer or pgcat)
|
||||||
|
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
||||||
|
admin_username = "admin_user"
|
||||||
|
admin_password = "admin_pass"
|
||||||
|
|
||||||
|
# pool
|
||||||
|
# configs are structured as pool.<pool_name>
|
||||||
|
# the pool_name is what clients use as database name when connecting
|
||||||
|
# For the example below a client can connect using "postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded_db"
|
||||||
|
[pools.sharded_db]
|
||||||
|
# Pool mode (see PgBouncer docs for more).
|
||||||
|
# session: one server connection per connected client
|
||||||
|
# transaction: one server connection per client transaction
|
||||||
|
pool_mode = "transaction"
|
||||||
|
|
||||||
|
# If the client doesn't specify, route traffic to
|
||||||
|
# this role by default.
|
||||||
|
#
|
||||||
|
# any: round-robin between primary and replicas,
|
||||||
|
# replica: round-robin between replicas only without touching the primary,
|
||||||
|
# primary: all queries go to the primary unless otherwise specified.
|
||||||
|
default_role = "any"
|
||||||
|
|
||||||
|
# Query parser. If enabled, we'll attempt to parse
|
||||||
|
# every incoming query to determine if it's a read or a write.
|
||||||
|
# If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||||
|
# we'll direct it to the primary.
|
||||||
|
query_parser_enabled = true
|
||||||
|
|
||||||
|
# If the query parser is enabled and this setting is enabled, we'll attempt to
|
||||||
|
# infer the role from the query itself.
|
||||||
|
query_parser_read_write_splitting = true
|
||||||
|
|
||||||
|
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||||
|
# load balancing of read queries. Otherwise, the primary will only be used for write
|
||||||
|
# queries. The primary can always be explicitely selected with our custom protocol.
|
||||||
|
primary_reads_enabled = true
|
||||||
|
|
||||||
|
# So what if you wanted to implement a different hashing function,
|
||||||
|
# or you've already built one and you want this pooler to use it?
|
||||||
|
#
|
||||||
|
# Current options:
|
||||||
|
#
|
||||||
|
# pg_bigint_hash: PARTITION BY HASH (Postgres hashing function)
|
||||||
|
# sha1: A hashing function based on SHA1
|
||||||
|
#
|
||||||
|
sharding_function = "pg_bigint_hash"
|
||||||
|
|
||||||
|
# Prepared statements cache size.
|
||||||
|
prepared_statements_cache_size = 500
|
||||||
|
|
||||||
|
# Credentials for users that may connect to this cluster
|
||||||
|
[pools.sharded_db.users.0]
|
||||||
|
username = "sharding_user"
|
||||||
|
password = "sharding_user"
|
||||||
|
# Maximum number of server connections that can be established for this user
|
||||||
|
# The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||||
|
# is the sum of pool_size across all users.
|
||||||
|
pool_size = 5
|
||||||
|
statement_timeout = 0
|
||||||
|
|
||||||
|
|
||||||
|
[pools.sharded_db.users.1]
|
||||||
|
username = "other_user"
|
||||||
|
password = "other_user"
|
||||||
|
pool_size = 21
|
||||||
|
statement_timeout = 30000
|
||||||
|
|
||||||
|
# Shard 0
|
||||||
|
[pools.sharded_db.shards.0]
|
||||||
|
# [ host, port, role ]
|
||||||
|
servers = [
|
||||||
|
[ "127.0.0.1", 5432, "primary" ],
|
||||||
|
[ "localhost", 5432, "replica" ]
|
||||||
|
]
|
||||||
|
# Database name (e.g. "postgres")
|
||||||
|
database = "shard0"
|
||||||
|
|
||||||
|
[pools.sharded_db.shards.1]
|
||||||
|
servers = [
|
||||||
|
[ "127.0.0.1", 5432, "primary" ],
|
||||||
|
[ "localhost", 5432, "replica" ],
|
||||||
|
]
|
||||||
|
database = "shard1"
|
||||||
|
|
||||||
|
[pools.sharded_db.shards.2]
|
||||||
|
servers = [
|
||||||
|
[ "127.0.0.1", 5432, "primary" ],
|
||||||
|
[ "localhost", 5432, "replica" ],
|
||||||
|
]
|
||||||
|
database = "shard2"
|
||||||
|
|
||||||
|
|
||||||
|
[pools.simple_db]
|
||||||
|
pool_mode = "session"
|
||||||
|
default_role = "primary"
|
||||||
|
query_parser_enabled = true
|
||||||
|
query_parser_read_write_splitting = true
|
||||||
|
primary_reads_enabled = true
|
||||||
|
sharding_function = "pg_bigint_hash"
|
||||||
|
|
||||||
|
[pools.simple_db.users.0]
|
||||||
|
username = "simple_user"
|
||||||
|
password = "simple_user"
|
||||||
|
pool_size = 5
|
||||||
|
statement_timeout = 30000
|
||||||
|
|
||||||
|
[pools.simple_db.shards.0]
|
||||||
|
servers = [
|
||||||
|
[ "127.0.0.1", 5432, "primary" ],
|
||||||
|
[ "localhost", 5432, "replica" ]
|
||||||
|
]
|
||||||
|
database = "some_db"
|
||||||
52
tests/go/prepared_test.go
Normal file
52
tests/go/prepared_test.go
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
package pgcat
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
_ "github.com/lib/pq"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test(t *testing.T) {
|
||||||
|
t.Cleanup(setup(t))
|
||||||
|
t.Run("Named parameterized prepared statement works", namedParameterizedPreparedStatement)
|
||||||
|
t.Run("Unnamed parameterized prepared statement works", unnamedParameterizedPreparedStatement)
|
||||||
|
}
|
||||||
|
|
||||||
|
func namedParameterizedPreparedStatement(t *testing.T) {
|
||||||
|
db, err := sql.Open("postgres", fmt.Sprintf("host=localhost port=%d database=sharded_db user=sharding_user password=sharding_user sslmode=disable", port))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not open connection: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stmt, err := db.Prepare("SELECT $1")
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not prepare: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
rows, err := stmt.Query(1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not query: %+v", err)
|
||||||
|
}
|
||||||
|
_ = rows.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func unnamedParameterizedPreparedStatement(t *testing.T) {
|
||||||
|
db, err := sql.Open("postgres", fmt.Sprintf("host=localhost port=%d database=sharded_db user=sharding_user password=sharding_user sslmode=disable", port))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not open connection: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
// Under the hood QueryContext generates an unnamed parameterized prepared statement
|
||||||
|
rows, err := db.QueryContext(context.Background(), "SELECT $1", 1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not query: %+v", err)
|
||||||
|
}
|
||||||
|
_ = rows.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
81
tests/go/setup.go
Normal file
81
tests/go/setup.go
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
package pgcat
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
_ "embed"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:embed pgcat.toml
|
||||||
|
var pgcatCfg string
|
||||||
|
|
||||||
|
var port = rand.Intn(32760-20000) + 20000
|
||||||
|
|
||||||
|
func setup(t *testing.T) func() {
|
||||||
|
cfg, err := os.CreateTemp("/tmp", "pgcat_cfg_*.toml")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not create temp file: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pgcatCfg = strings.Replace(pgcatCfg, "\"${PORT}\"", fmt.Sprintf("%d", port), 1)
|
||||||
|
|
||||||
|
_, err = cfg.Write([]byte(pgcatCfg))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not write temp file: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
commandPath := "../../target/debug/pgcat"
|
||||||
|
if os.Getenv("CARGO_TARGET_DIR") != "" {
|
||||||
|
commandPath = os.Getenv("CARGO_TARGET_DIR") + "/debug/pgcat"
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.Command(commandPath, cfg.Name())
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
go func() {
|
||||||
|
err = cmd.Run()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("could not run pgcat: %+v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
deadline, cancelFunc := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second))
|
||||||
|
defer cancelFunc()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-deadline.Done():
|
||||||
|
break
|
||||||
|
case <-time.After(50 * time.Millisecond):
|
||||||
|
db, err := sql.Open("postgres", fmt.Sprintf("host=localhost port=%d database=pgcat user=admin_user password=admin_pass sslmode=disable", port))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
rows, err := db.QueryContext(deadline, "SHOW STATS")
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
_ = rows.Close()
|
||||||
|
_ = db.Close()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() {
|
||||||
|
err := cmd.Process.Signal(os.Interrupt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not interrupt pgcat: %+v", err)
|
||||||
|
}
|
||||||
|
err = os.Remove(cfg.Name())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not remove temp file: %+v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -36,4 +36,4 @@ SELECT abalance FROM pgbench_accounts WHERE aid = :aid;
|
|||||||
SET SERVER ROLE TO 'replica';
|
SET SERVER ROLE TO 'replica';
|
||||||
|
|
||||||
-- Read load balancing
|
-- Read load balancing
|
||||||
SELECT abalance FROM pgbench_accounts WHERE aid = :aid;
|
SELECT abalance FROM pgbench_accounts WHERE aid = :aid;
|
||||||
@@ -1,2 +1,3 @@
|
|||||||
|
pytest
|
||||||
psycopg2==2.9.3
|
psycopg2==2.9.3
|
||||||
psutil==5.9.1
|
psutil==5.9.1
|
||||||
|
|||||||
71
tests/python/test_auth.py
Normal file
71
tests/python/test_auth.py
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
import utils
|
||||||
|
import signal
|
||||||
|
|
||||||
|
class TestTrustAuth:
|
||||||
|
@classmethod
|
||||||
|
def setup_method(cls):
|
||||||
|
config= """
|
||||||
|
[general]
|
||||||
|
host = "0.0.0.0"
|
||||||
|
port = 6432
|
||||||
|
admin_username = "admin_user"
|
||||||
|
admin_password = ""
|
||||||
|
admin_auth_type = "trust"
|
||||||
|
|
||||||
|
[pools.sharded_db.users.0]
|
||||||
|
username = "sharding_user"
|
||||||
|
password = "sharding_user"
|
||||||
|
auth_type = "trust"
|
||||||
|
pool_size = 10
|
||||||
|
min_pool_size = 1
|
||||||
|
pool_mode = "transaction"
|
||||||
|
|
||||||
|
[pools.sharded_db.shards.0]
|
||||||
|
servers = [
|
||||||
|
[ "127.0.0.1", 5432, "primary" ],
|
||||||
|
]
|
||||||
|
database = "shard0"
|
||||||
|
"""
|
||||||
|
utils.pgcat_generic_start(config)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def teardown_method(self):
|
||||||
|
utils.pg_cat_send_signal(signal.SIGTERM)
|
||||||
|
|
||||||
|
def test_admin_trust_auth(self):
|
||||||
|
conn, cur = utils.connect_db_trust(admin=True)
|
||||||
|
cur.execute("SHOW POOLS")
|
||||||
|
res = cur.fetchall()
|
||||||
|
print(res)
|
||||||
|
utils.cleanup_conn(conn, cur)
|
||||||
|
|
||||||
|
def test_normal_trust_auth(self):
|
||||||
|
conn, cur = utils.connect_db_trust(autocommit=False)
|
||||||
|
cur.execute("SELECT 1")
|
||||||
|
res = cur.fetchall()
|
||||||
|
print(res)
|
||||||
|
utils.cleanup_conn(conn, cur)
|
||||||
|
|
||||||
|
class TestMD5Auth:
|
||||||
|
@classmethod
|
||||||
|
def setup_method(cls):
|
||||||
|
utils.pgcat_start()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def teardown_method(self):
|
||||||
|
utils.pg_cat_send_signal(signal.SIGTERM)
|
||||||
|
|
||||||
|
def test_normal_db_access(self):
|
||||||
|
conn, cur = utils.connect_db(autocommit=False)
|
||||||
|
cur.execute("SELECT 1")
|
||||||
|
res = cur.fetchall()
|
||||||
|
print(res)
|
||||||
|
utils.cleanup_conn(conn, cur)
|
||||||
|
|
||||||
|
def test_admin_db_access(self):
|
||||||
|
conn, cur = utils.connect_db(admin=True)
|
||||||
|
|
||||||
|
cur.execute("SHOW POOLS")
|
||||||
|
res = cur.fetchall()
|
||||||
|
print(res)
|
||||||
|
utils.cleanup_conn(conn, cur)
|
||||||
@@ -1,84 +1,12 @@
|
|||||||
from typing import Tuple
|
|
||||||
import psycopg2
|
|
||||||
import psutil
|
|
||||||
import os
|
|
||||||
import signal
|
import signal
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
import psycopg2
|
||||||
|
import utils
|
||||||
|
|
||||||
SHUTDOWN_TIMEOUT = 5
|
SHUTDOWN_TIMEOUT = 5
|
||||||
|
|
||||||
PGCAT_HOST = "127.0.0.1"
|
|
||||||
PGCAT_PORT = "6432"
|
|
||||||
|
|
||||||
|
|
||||||
def pgcat_start():
|
|
||||||
pg_cat_send_signal(signal.SIGTERM)
|
|
||||||
os.system("./target/debug/pgcat .circleci/pgcat.toml &")
|
|
||||||
time.sleep(2)
|
|
||||||
|
|
||||||
|
|
||||||
def pg_cat_send_signal(signal: signal.Signals):
|
|
||||||
try:
|
|
||||||
for proc in psutil.process_iter(["pid", "name"]):
|
|
||||||
if "pgcat" == proc.name():
|
|
||||||
os.kill(proc.pid, signal)
|
|
||||||
except Exception as e:
|
|
||||||
# The process can be gone when we send this signal
|
|
||||||
print(e)
|
|
||||||
|
|
||||||
if signal == signal.SIGTERM:
|
|
||||||
# Returns 0 if pgcat process exists
|
|
||||||
time.sleep(2)
|
|
||||||
if not os.system('pgrep pgcat'):
|
|
||||||
raise Exception("pgcat not closed after SIGTERM")
|
|
||||||
|
|
||||||
|
|
||||||
def connect_db(
|
|
||||||
autocommit: bool = True,
|
|
||||||
admin: bool = False,
|
|
||||||
) -> Tuple[psycopg2.extensions.connection, psycopg2.extensions.cursor]:
|
|
||||||
|
|
||||||
if admin:
|
|
||||||
user = "admin_user"
|
|
||||||
password = "admin_pass"
|
|
||||||
db = "pgcat"
|
|
||||||
else:
|
|
||||||
user = "sharding_user"
|
|
||||||
password = "sharding_user"
|
|
||||||
db = "sharded_db"
|
|
||||||
|
|
||||||
conn = psycopg2.connect(
|
|
||||||
f"postgres://{user}:{password}@{PGCAT_HOST}:{PGCAT_PORT}/{db}?application_name=testing_pgcat",
|
|
||||||
connect_timeout=2,
|
|
||||||
)
|
|
||||||
conn.autocommit = autocommit
|
|
||||||
cur = conn.cursor()
|
|
||||||
|
|
||||||
return (conn, cur)
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup_conn(conn: psycopg2.extensions.connection, cur: psycopg2.extensions.cursor):
|
|
||||||
cur.close()
|
|
||||||
conn.close()
|
|
||||||
|
|
||||||
|
|
||||||
def test_normal_db_access():
|
|
||||||
pgcat_start()
|
|
||||||
conn, cur = connect_db(autocommit=False)
|
|
||||||
cur.execute("SELECT 1")
|
|
||||||
res = cur.fetchall()
|
|
||||||
print(res)
|
|
||||||
cleanup_conn(conn, cur)
|
|
||||||
|
|
||||||
|
|
||||||
def test_admin_db_access():
|
|
||||||
conn, cur = connect_db(admin=True)
|
|
||||||
|
|
||||||
cur.execute("SHOW POOLS")
|
|
||||||
res = cur.fetchall()
|
|
||||||
print(res)
|
|
||||||
cleanup_conn(conn, cur)
|
|
||||||
|
|
||||||
|
|
||||||
def test_shutdown_logic():
|
def test_shutdown_logic():
|
||||||
|
|
||||||
@@ -86,17 +14,17 @@ def test_shutdown_logic():
|
|||||||
# NO ACTIVE QUERIES SIGINT HANDLING
|
# NO ACTIVE QUERIES SIGINT HANDLING
|
||||||
|
|
||||||
# Start pgcat
|
# Start pgcat
|
||||||
pgcat_start()
|
utils.pgcat_start()
|
||||||
|
|
||||||
# Create client connection and send query (not in transaction)
|
# Create client connection and send query (not in transaction)
|
||||||
conn, cur = connect_db()
|
conn, cur = utils.connect_db()
|
||||||
|
|
||||||
cur.execute("BEGIN;")
|
cur.execute("BEGIN;")
|
||||||
cur.execute("SELECT 1;")
|
cur.execute("SELECT 1;")
|
||||||
cur.execute("COMMIT;")
|
cur.execute("COMMIT;")
|
||||||
|
|
||||||
# Send sigint to pgcat
|
# Send sigint to pgcat
|
||||||
pg_cat_send_signal(signal.SIGINT)
|
utils.pg_cat_send_signal(signal.SIGINT)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
# Check that any new queries fail after sigint since server should close with no active transactions
|
# Check that any new queries fail after sigint since server should close with no active transactions
|
||||||
@@ -108,18 +36,18 @@ def test_shutdown_logic():
|
|||||||
# Fail if query execution succeeded
|
# Fail if query execution succeeded
|
||||||
raise Exception("Server not closed after sigint")
|
raise Exception("Server not closed after sigint")
|
||||||
|
|
||||||
cleanup_conn(conn, cur)
|
utils.cleanup_conn(conn, cur)
|
||||||
pg_cat_send_signal(signal.SIGTERM)
|
utils.pg_cat_send_signal(signal.SIGTERM)
|
||||||
|
|
||||||
# - - - - - - - - - - - - - - - - - -
|
# - - - - - - - - - - - - - - - - - -
|
||||||
# NO ACTIVE QUERIES ADMIN SHUTDOWN COMMAND
|
# NO ACTIVE QUERIES ADMIN SHUTDOWN COMMAND
|
||||||
|
|
||||||
# Start pgcat
|
# Start pgcat
|
||||||
pgcat_start()
|
utils.pgcat_start()
|
||||||
|
|
||||||
# Create client connection and begin transaction
|
# Create client connection and begin transaction
|
||||||
conn, cur = connect_db()
|
conn, cur = utils.connect_db()
|
||||||
admin_conn, admin_cur = connect_db(admin=True)
|
admin_conn, admin_cur = utils.connect_db(admin=True)
|
||||||
|
|
||||||
cur.execute("BEGIN;")
|
cur.execute("BEGIN;")
|
||||||
cur.execute("SELECT 1;")
|
cur.execute("SELECT 1;")
|
||||||
@@ -138,24 +66,24 @@ def test_shutdown_logic():
|
|||||||
# Fail if query execution succeeded
|
# Fail if query execution succeeded
|
||||||
raise Exception("Server not closed after sigint")
|
raise Exception("Server not closed after sigint")
|
||||||
|
|
||||||
cleanup_conn(conn, cur)
|
utils.cleanup_conn(conn, cur)
|
||||||
cleanup_conn(admin_conn, admin_cur)
|
utils.cleanup_conn(admin_conn, admin_cur)
|
||||||
pg_cat_send_signal(signal.SIGTERM)
|
utils.pg_cat_send_signal(signal.SIGTERM)
|
||||||
|
|
||||||
# - - - - - - - - - - - - - - - - - -
|
# - - - - - - - - - - - - - - - - - -
|
||||||
# HANDLE TRANSACTION WITH SIGINT
|
# HANDLE TRANSACTION WITH SIGINT
|
||||||
|
|
||||||
# Start pgcat
|
# Start pgcat
|
||||||
pgcat_start()
|
utils.pgcat_start()
|
||||||
|
|
||||||
# Create client connection and begin transaction
|
# Create client connection and begin transaction
|
||||||
conn, cur = connect_db()
|
conn, cur = utils.connect_db()
|
||||||
|
|
||||||
cur.execute("BEGIN;")
|
cur.execute("BEGIN;")
|
||||||
cur.execute("SELECT 1;")
|
cur.execute("SELECT 1;")
|
||||||
|
|
||||||
# Send sigint to pgcat while still in transaction
|
# Send sigint to pgcat while still in transaction
|
||||||
pg_cat_send_signal(signal.SIGINT)
|
utils.pg_cat_send_signal(signal.SIGINT)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
# Check that any new queries succeed after sigint since server should still allow transaction to complete
|
# Check that any new queries succeed after sigint since server should still allow transaction to complete
|
||||||
@@ -165,18 +93,18 @@ def test_shutdown_logic():
|
|||||||
# Fail if query fails since server closed
|
# Fail if query fails since server closed
|
||||||
raise Exception("Server closed while in transaction", e.pgerror)
|
raise Exception("Server closed while in transaction", e.pgerror)
|
||||||
|
|
||||||
cleanup_conn(conn, cur)
|
utils.cleanup_conn(conn, cur)
|
||||||
pg_cat_send_signal(signal.SIGTERM)
|
utils.pg_cat_send_signal(signal.SIGTERM)
|
||||||
|
|
||||||
# - - - - - - - - - - - - - - - - - -
|
# - - - - - - - - - - - - - - - - - -
|
||||||
# HANDLE TRANSACTION WITH ADMIN SHUTDOWN COMMAND
|
# HANDLE TRANSACTION WITH ADMIN SHUTDOWN COMMAND
|
||||||
|
|
||||||
# Start pgcat
|
# Start pgcat
|
||||||
pgcat_start()
|
utils.pgcat_start()
|
||||||
|
|
||||||
# Create client connection and begin transaction
|
# Create client connection and begin transaction
|
||||||
conn, cur = connect_db()
|
conn, cur = utils.connect_db()
|
||||||
admin_conn, admin_cur = connect_db(admin=True)
|
admin_conn, admin_cur = utils.connect_db(admin=True)
|
||||||
|
|
||||||
cur.execute("BEGIN;")
|
cur.execute("BEGIN;")
|
||||||
cur.execute("SELECT 1;")
|
cur.execute("SELECT 1;")
|
||||||
@@ -194,30 +122,30 @@ def test_shutdown_logic():
|
|||||||
# Fail if query fails since server closed
|
# Fail if query fails since server closed
|
||||||
raise Exception("Server closed while in transaction", e.pgerror)
|
raise Exception("Server closed while in transaction", e.pgerror)
|
||||||
|
|
||||||
cleanup_conn(conn, cur)
|
utils.cleanup_conn(conn, cur)
|
||||||
cleanup_conn(admin_conn, admin_cur)
|
utils.cleanup_conn(admin_conn, admin_cur)
|
||||||
pg_cat_send_signal(signal.SIGTERM)
|
utils.pg_cat_send_signal(signal.SIGTERM)
|
||||||
|
|
||||||
# - - - - - - - - - - - - - - - - - -
|
# - - - - - - - - - - - - - - - - - -
|
||||||
# NO NEW NON-ADMIN CONNECTIONS DURING SHUTDOWN
|
# NO NEW NON-ADMIN CONNECTIONS DURING SHUTDOWN
|
||||||
# Start pgcat
|
# Start pgcat
|
||||||
pgcat_start()
|
utils.pgcat_start()
|
||||||
|
|
||||||
# Create client connection and begin transaction
|
# Create client connection and begin transaction
|
||||||
transaction_conn, transaction_cur = connect_db()
|
transaction_conn, transaction_cur = utils.connect_db()
|
||||||
|
|
||||||
transaction_cur.execute("BEGIN;")
|
transaction_cur.execute("BEGIN;")
|
||||||
transaction_cur.execute("SELECT 1;")
|
transaction_cur.execute("SELECT 1;")
|
||||||
|
|
||||||
# Send sigint to pgcat while still in transaction
|
# Send sigint to pgcat while still in transaction
|
||||||
pg_cat_send_signal(signal.SIGINT)
|
utils.pg_cat_send_signal(signal.SIGINT)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
start = time.perf_counter()
|
start = time.perf_counter()
|
||||||
try:
|
try:
|
||||||
conn, cur = connect_db()
|
conn, cur = utils.connect_db()
|
||||||
cur.execute("SELECT 1;")
|
cur.execute("SELECT 1;")
|
||||||
cleanup_conn(conn, cur)
|
utils.cleanup_conn(conn, cur)
|
||||||
except psycopg2.OperationalError as e:
|
except psycopg2.OperationalError as e:
|
||||||
time_taken = time.perf_counter() - start
|
time_taken = time.perf_counter() - start
|
||||||
if time_taken > 0.1:
|
if time_taken > 0.1:
|
||||||
@@ -227,49 +155,49 @@ def test_shutdown_logic():
|
|||||||
else:
|
else:
|
||||||
raise Exception("Able connect to database during shutdown")
|
raise Exception("Able connect to database during shutdown")
|
||||||
|
|
||||||
cleanup_conn(transaction_conn, transaction_cur)
|
utils.cleanup_conn(transaction_conn, transaction_cur)
|
||||||
pg_cat_send_signal(signal.SIGTERM)
|
utils.pg_cat_send_signal(signal.SIGTERM)
|
||||||
|
|
||||||
# - - - - - - - - - - - - - - - - - -
|
# - - - - - - - - - - - - - - - - - -
|
||||||
# ALLOW NEW ADMIN CONNECTIONS DURING SHUTDOWN
|
# ALLOW NEW ADMIN CONNECTIONS DURING SHUTDOWN
|
||||||
# Start pgcat
|
# Start pgcat
|
||||||
pgcat_start()
|
utils.pgcat_start()
|
||||||
|
|
||||||
# Create client connection and begin transaction
|
# Create client connection and begin transaction
|
||||||
transaction_conn, transaction_cur = connect_db()
|
transaction_conn, transaction_cur = utils.connect_db()
|
||||||
|
|
||||||
transaction_cur.execute("BEGIN;")
|
transaction_cur.execute("BEGIN;")
|
||||||
transaction_cur.execute("SELECT 1;")
|
transaction_cur.execute("SELECT 1;")
|
||||||
|
|
||||||
# Send sigint to pgcat while still in transaction
|
# Send sigint to pgcat while still in transaction
|
||||||
pg_cat_send_signal(signal.SIGINT)
|
utils.pg_cat_send_signal(signal.SIGINT)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
conn, cur = connect_db(admin=True)
|
conn, cur = utils.connect_db(admin=True)
|
||||||
cur.execute("SHOW DATABASES;")
|
cur.execute("SHOW DATABASES;")
|
||||||
cleanup_conn(conn, cur)
|
utils.cleanup_conn(conn, cur)
|
||||||
except psycopg2.OperationalError as e:
|
except psycopg2.OperationalError as e:
|
||||||
raise Exception(e)
|
raise Exception(e)
|
||||||
|
|
||||||
cleanup_conn(transaction_conn, transaction_cur)
|
utils.cleanup_conn(transaction_conn, transaction_cur)
|
||||||
pg_cat_send_signal(signal.SIGTERM)
|
utils.pg_cat_send_signal(signal.SIGTERM)
|
||||||
|
|
||||||
# - - - - - - - - - - - - - - - - - -
|
# - - - - - - - - - - - - - - - - - -
|
||||||
# ADMIN CONNECTIONS CONTINUING TO WORK AFTER SHUTDOWN
|
# ADMIN CONNECTIONS CONTINUING TO WORK AFTER SHUTDOWN
|
||||||
# Start pgcat
|
# Start pgcat
|
||||||
pgcat_start()
|
utils.pgcat_start()
|
||||||
|
|
||||||
# Create client connection and begin transaction
|
# Create client connection and begin transaction
|
||||||
transaction_conn, transaction_cur = connect_db()
|
transaction_conn, transaction_cur = utils.connect_db()
|
||||||
transaction_cur.execute("BEGIN;")
|
transaction_cur.execute("BEGIN;")
|
||||||
transaction_cur.execute("SELECT 1;")
|
transaction_cur.execute("SELECT 1;")
|
||||||
|
|
||||||
admin_conn, admin_cur = connect_db(admin=True)
|
admin_conn, admin_cur = utils.connect_db(admin=True)
|
||||||
admin_cur.execute("SHOW DATABASES;")
|
admin_cur.execute("SHOW DATABASES;")
|
||||||
|
|
||||||
# Send sigint to pgcat while still in transaction
|
# Send sigint to pgcat while still in transaction
|
||||||
pg_cat_send_signal(signal.SIGINT)
|
utils.pg_cat_send_signal(signal.SIGINT)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -277,24 +205,24 @@ def test_shutdown_logic():
|
|||||||
except psycopg2.OperationalError as e:
|
except psycopg2.OperationalError as e:
|
||||||
raise Exception("Could not execute admin command:", e)
|
raise Exception("Could not execute admin command:", e)
|
||||||
|
|
||||||
cleanup_conn(transaction_conn, transaction_cur)
|
utils.cleanup_conn(transaction_conn, transaction_cur)
|
||||||
cleanup_conn(admin_conn, admin_cur)
|
utils.cleanup_conn(admin_conn, admin_cur)
|
||||||
pg_cat_send_signal(signal.SIGTERM)
|
utils.pg_cat_send_signal(signal.SIGTERM)
|
||||||
|
|
||||||
# - - - - - - - - - - - - - - - - - -
|
# - - - - - - - - - - - - - - - - - -
|
||||||
# HANDLE SHUTDOWN TIMEOUT WITH SIGINT
|
# HANDLE SHUTDOWN TIMEOUT WITH SIGINT
|
||||||
|
|
||||||
# Start pgcat
|
# Start pgcat
|
||||||
pgcat_start()
|
utils.pgcat_start()
|
||||||
|
|
||||||
# Create client connection and begin transaction, which should prevent server shutdown unless shutdown timeout is reached
|
# Create client connection and begin transaction, which should prevent server shutdown unless shutdown timeout is reached
|
||||||
conn, cur = connect_db()
|
conn, cur = utils.connect_db()
|
||||||
|
|
||||||
cur.execute("BEGIN;")
|
cur.execute("BEGIN;")
|
||||||
cur.execute("SELECT 1;")
|
cur.execute("SELECT 1;")
|
||||||
|
|
||||||
# Send sigint to pgcat while still in transaction
|
# Send sigint to pgcat while still in transaction
|
||||||
pg_cat_send_signal(signal.SIGINT)
|
utils.pg_cat_send_signal(signal.SIGINT)
|
||||||
|
|
||||||
# pgcat shutdown timeout is set to SHUTDOWN_TIMEOUT seconds, so we sleep for SHUTDOWN_TIMEOUT + 1 seconds
|
# pgcat shutdown timeout is set to SHUTDOWN_TIMEOUT seconds, so we sleep for SHUTDOWN_TIMEOUT + 1 seconds
|
||||||
time.sleep(SHUTDOWN_TIMEOUT + 1)
|
time.sleep(SHUTDOWN_TIMEOUT + 1)
|
||||||
@@ -308,12 +236,7 @@ def test_shutdown_logic():
|
|||||||
# Fail if query execution succeeded
|
# Fail if query execution succeeded
|
||||||
raise Exception("Server not closed after sigint and expected timeout")
|
raise Exception("Server not closed after sigint and expected timeout")
|
||||||
|
|
||||||
cleanup_conn(conn, cur)
|
utils.cleanup_conn(conn, cur)
|
||||||
pg_cat_send_signal(signal.SIGTERM)
|
utils.pg_cat_send_signal(signal.SIGTERM)
|
||||||
|
|
||||||
# - - - - - - - - - - - - - - - - - -
|
# - - - - - - - - - - - - - - - - - -
|
||||||
|
|
||||||
|
|
||||||
test_normal_db_access()
|
|
||||||
test_admin_db_access()
|
|
||||||
test_shutdown_logic()
|
|
||||||
110
tests/python/utils.py
Normal file
110
tests/python/utils.py
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
import os
|
||||||
|
import signal
|
||||||
|
import time
|
||||||
|
from typing import Tuple
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
import psutil
|
||||||
|
import psycopg2
|
||||||
|
|
||||||
|
PGCAT_HOST = "127.0.0.1"
|
||||||
|
PGCAT_PORT = "6432"
|
||||||
|
|
||||||
|
|
||||||
|
def _pgcat_start(config_path: str):
|
||||||
|
pg_cat_send_signal(signal.SIGTERM)
|
||||||
|
os.system(f"./target/debug/pgcat {config_path} &")
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
|
||||||
|
def pgcat_start():
|
||||||
|
_pgcat_start(config_path='.circleci/pgcat.toml')
|
||||||
|
|
||||||
|
|
||||||
|
def pgcat_generic_start(config: str):
|
||||||
|
tmp = tempfile.NamedTemporaryFile()
|
||||||
|
with open(tmp.name, 'w') as f:
|
||||||
|
f.write(config)
|
||||||
|
_pgcat_start(config_path=tmp.name)
|
||||||
|
|
||||||
|
|
||||||
|
def glauth_send_signal(signal: signal.Signals):
|
||||||
|
try:
|
||||||
|
for proc in psutil.process_iter(["pid", "name"]):
|
||||||
|
if proc.name() == "glauth":
|
||||||
|
os.kill(proc.pid, signal)
|
||||||
|
except Exception as e:
|
||||||
|
# The process can be gone when we send this signal
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
if signal == signal.SIGTERM:
|
||||||
|
# Returns 0 if pgcat process exists
|
||||||
|
time.sleep(2)
|
||||||
|
if not os.system('pgrep glauth'):
|
||||||
|
raise Exception("glauth not closed after SIGTERM")
|
||||||
|
|
||||||
|
|
||||||
|
def pg_cat_send_signal(signal: signal.Signals):
|
||||||
|
try:
|
||||||
|
for proc in psutil.process_iter(["pid", "name"]):
|
||||||
|
if "pgcat" == proc.name():
|
||||||
|
os.kill(proc.pid, signal)
|
||||||
|
except Exception as e:
|
||||||
|
# The process can be gone when we send this signal
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
if signal == signal.SIGTERM:
|
||||||
|
# Returns 0 if pgcat process exists
|
||||||
|
time.sleep(2)
|
||||||
|
if not os.system('pgrep pgcat'):
|
||||||
|
raise Exception("pgcat not closed after SIGTERM")
|
||||||
|
|
||||||
|
|
||||||
|
def connect_db(
|
||||||
|
autocommit: bool = True,
|
||||||
|
admin: bool = False,
|
||||||
|
) -> Tuple[psycopg2.extensions.connection, psycopg2.extensions.cursor]:
|
||||||
|
|
||||||
|
if admin:
|
||||||
|
user = "admin_user"
|
||||||
|
password = "admin_pass"
|
||||||
|
db = "pgcat"
|
||||||
|
else:
|
||||||
|
user = "sharding_user"
|
||||||
|
password = "sharding_user"
|
||||||
|
db = "sharded_db"
|
||||||
|
|
||||||
|
conn = psycopg2.connect(
|
||||||
|
f"postgres://{user}:{password}@{PGCAT_HOST}:{PGCAT_PORT}/{db}?application_name=testing_pgcat",
|
||||||
|
connect_timeout=2,
|
||||||
|
)
|
||||||
|
conn.autocommit = autocommit
|
||||||
|
cur = conn.cursor()
|
||||||
|
|
||||||
|
return (conn, cur)
|
||||||
|
|
||||||
|
def connect_db_trust(
|
||||||
|
autocommit: bool = True,
|
||||||
|
admin: bool = False,
|
||||||
|
) -> Tuple[psycopg2.extensions.connection, psycopg2.extensions.cursor]:
|
||||||
|
|
||||||
|
if admin:
|
||||||
|
user = "admin_user"
|
||||||
|
db = "pgcat"
|
||||||
|
else:
|
||||||
|
user = "sharding_user"
|
||||||
|
db = "sharded_db"
|
||||||
|
|
||||||
|
conn = psycopg2.connect(
|
||||||
|
f"postgres://{user}@{PGCAT_HOST}:{PGCAT_PORT}/{db}?application_name=testing_pgcat",
|
||||||
|
connect_timeout=2,
|
||||||
|
)
|
||||||
|
conn.autocommit = autocommit
|
||||||
|
cur = conn.cursor()
|
||||||
|
|
||||||
|
return (conn, cur)
|
||||||
|
|
||||||
|
|
||||||
|
def cleanup_conn(conn: psycopg2.extensions.connection, cur: psycopg2.extensions.cursor):
|
||||||
|
cur.close()
|
||||||
|
conn.close()
|
||||||
@@ -1,22 +1,33 @@
|
|||||||
GEM
|
GEM
|
||||||
remote: https://rubygems.org/
|
remote: https://rubygems.org/
|
||||||
specs:
|
specs:
|
||||||
activemodel (7.0.4.1)
|
activemodel (7.1.4)
|
||||||
activesupport (= 7.0.4.1)
|
activesupport (= 7.1.4)
|
||||||
activerecord (7.0.4.1)
|
activerecord (7.1.4)
|
||||||
activemodel (= 7.0.4.1)
|
activemodel (= 7.1.4)
|
||||||
activesupport (= 7.0.4.1)
|
activesupport (= 7.1.4)
|
||||||
activesupport (7.0.4.1)
|
timeout (>= 0.4.0)
|
||||||
|
activesupport (7.1.4)
|
||||||
|
base64
|
||||||
|
bigdecimal
|
||||||
concurrent-ruby (~> 1.0, >= 1.0.2)
|
concurrent-ruby (~> 1.0, >= 1.0.2)
|
||||||
|
connection_pool (>= 2.2.5)
|
||||||
|
drb
|
||||||
i18n (>= 1.6, < 2)
|
i18n (>= 1.6, < 2)
|
||||||
minitest (>= 5.1)
|
minitest (>= 5.1)
|
||||||
|
mutex_m
|
||||||
tzinfo (~> 2.0)
|
tzinfo (~> 2.0)
|
||||||
ast (2.4.2)
|
ast (2.4.2)
|
||||||
concurrent-ruby (1.1.10)
|
base64 (0.2.0)
|
||||||
|
bigdecimal (3.1.8)
|
||||||
|
concurrent-ruby (1.3.4)
|
||||||
|
connection_pool (2.4.1)
|
||||||
diff-lcs (1.5.0)
|
diff-lcs (1.5.0)
|
||||||
i18n (1.12.0)
|
drb (2.2.1)
|
||||||
|
i18n (1.14.5)
|
||||||
concurrent-ruby (~> 1.0)
|
concurrent-ruby (~> 1.0)
|
||||||
minitest (5.17.0)
|
minitest (5.25.1)
|
||||||
|
mutex_m (0.2.0)
|
||||||
parallel (1.22.1)
|
parallel (1.22.1)
|
||||||
parser (3.1.2.0)
|
parser (3.1.2.0)
|
||||||
ast (~> 2.4.1)
|
ast (~> 2.4.1)
|
||||||
@@ -24,7 +35,8 @@ GEM
|
|||||||
pg (1.3.2)
|
pg (1.3.2)
|
||||||
rainbow (3.1.1)
|
rainbow (3.1.1)
|
||||||
regexp_parser (2.3.1)
|
regexp_parser (2.3.1)
|
||||||
rexml (3.2.5)
|
rexml (3.3.6)
|
||||||
|
strscan
|
||||||
rspec (3.11.0)
|
rspec (3.11.0)
|
||||||
rspec-core (~> 3.11.0)
|
rspec-core (~> 3.11.0)
|
||||||
rspec-expectations (~> 3.11.0)
|
rspec-expectations (~> 3.11.0)
|
||||||
@@ -50,10 +62,12 @@ GEM
|
|||||||
rubocop-ast (1.17.0)
|
rubocop-ast (1.17.0)
|
||||||
parser (>= 3.1.1.0)
|
parser (>= 3.1.1.0)
|
||||||
ruby-progressbar (1.11.0)
|
ruby-progressbar (1.11.0)
|
||||||
|
strscan (3.1.0)
|
||||||
|
timeout (0.4.1)
|
||||||
toml (0.3.0)
|
toml (0.3.0)
|
||||||
parslet (>= 1.8.0, < 3.0.0)
|
parslet (>= 1.8.0, < 3.0.0)
|
||||||
toxiproxy (2.0.1)
|
toxiproxy (2.0.1)
|
||||||
tzinfo (2.0.5)
|
tzinfo (2.0.6)
|
||||||
concurrent-ruby (~> 1.0)
|
concurrent-ruby (~> 1.0)
|
||||||
unicode-display_width (2.1.0)
|
unicode-display_width (2.1.0)
|
||||||
|
|
||||||
|
|||||||
@@ -91,6 +91,27 @@ describe "Admin" do
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
[
|
||||||
|
"SHOW ME THE MONEY",
|
||||||
|
"SHOW ME THE WAY",
|
||||||
|
"SHOW UP",
|
||||||
|
"SHOWTIME",
|
||||||
|
"HAMMER TIME",
|
||||||
|
"SHOWN TO BE TRUE",
|
||||||
|
"SHOW ",
|
||||||
|
"SHOW ",
|
||||||
|
"SHOW 1",
|
||||||
|
";;;;;"
|
||||||
|
].each do |cmd|
|
||||||
|
describe "Bad command #{cmd}" do
|
||||||
|
it "does not panic and responds with PG::SystemError" do
|
||||||
|
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
||||||
|
expect { admin_conn.async_exec(cmd) }.to raise_error(PG::SystemError).with_message(/Unsupported/)
|
||||||
|
admin_conn.close
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
describe "PAUSE" do
|
describe "PAUSE" do
|
||||||
it "pauses all pools" do
|
it "pauses all pools" do
|
||||||
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# frozen_string_literal: true
|
# frozen_string_literal: true
|
||||||
require_relative 'spec_helper'
|
require_relative "spec_helper"
|
||||||
|
|
||||||
describe "Random Load Balancing" do
|
describe "Random Load Balancing" do
|
||||||
let(:processes) { Helpers::Pgcat.single_shard_setup("sharded_db", 5) }
|
let(:processes) { Helpers::Pgcat.single_shard_setup("sharded_db", 5) }
|
||||||
@@ -8,7 +8,7 @@ describe "Random Load Balancing" do
|
|||||||
processes.pgcat.shutdown
|
processes.pgcat.shutdown
|
||||||
end
|
end
|
||||||
|
|
||||||
context "under regular circumstances" do
|
context("under regular circumstances") do
|
||||||
it "balances query volume between all instances" do
|
it "balances query volume between all instances" do
|
||||||
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
|
|
||||||
@@ -22,14 +22,14 @@ describe "Random Load Balancing" do
|
|||||||
failed_count += 1
|
failed_count += 1
|
||||||
end
|
end
|
||||||
|
|
||||||
expect(failed_count).to eq(0)
|
expect(failed_count).to(eq(0))
|
||||||
processes.all_databases.map(&:count_select_1_plus_2).each do |instance_share|
|
processes.all_databases.map(&:count_select_1_plus_2).each do |instance_share|
|
||||||
expect(instance_share).to be_within(expected_share * MARGIN_OF_ERROR).of(expected_share)
|
expect(instance_share).to(be_within(expected_share * MARGIN_OF_ERROR).of(expected_share))
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
context "when some replicas are down" do
|
context("when some replicas are down") do
|
||||||
it "balances query volume between working instances" do
|
it "balances query volume between working instances" do
|
||||||
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
expected_share = QUERY_COUNT / (processes.all_databases.count - 2)
|
expected_share = QUERY_COUNT / (processes.all_databases.count - 2)
|
||||||
@@ -49,9 +49,9 @@ describe "Random Load Balancing" do
|
|||||||
processes.all_databases.each do |instance|
|
processes.all_databases.each do |instance|
|
||||||
queries_routed = instance.count_select_1_plus_2
|
queries_routed = instance.count_select_1_plus_2
|
||||||
if processes.replicas[0..1].include?(instance)
|
if processes.replicas[0..1].include?(instance)
|
||||||
expect(queries_routed).to eq(0)
|
expect(queries_routed).to(eq(0))
|
||||||
else
|
else
|
||||||
expect(queries_routed).to be_within(expected_share * MARGIN_OF_ERROR).of(expected_share)
|
expect(queries_routed).to(be_within(expected_share * MARGIN_OF_ERROR).of(expected_share))
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
@@ -65,7 +65,7 @@ describe "Least Outstanding Queries Load Balancing" do
|
|||||||
processes.pgcat.shutdown
|
processes.pgcat.shutdown
|
||||||
end
|
end
|
||||||
|
|
||||||
context "under homogeneous load" do
|
context("under homogeneous load") do
|
||||||
it "balances query volume between all instances" do
|
it "balances query volume between all instances" do
|
||||||
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
|
|
||||||
@@ -79,15 +79,15 @@ describe "Least Outstanding Queries Load Balancing" do
|
|||||||
failed_count += 1
|
failed_count += 1
|
||||||
end
|
end
|
||||||
|
|
||||||
expect(failed_count).to eq(0)
|
expect(failed_count).to(eq(0))
|
||||||
processes.all_databases.map(&:count_select_1_plus_2).each do |instance_share|
|
processes.all_databases.map(&:count_select_1_plus_2).each do |instance_share|
|
||||||
expect(instance_share).to be_within(expected_share * MARGIN_OF_ERROR).of(expected_share)
|
expect(instance_share).to(be_within(expected_share * MARGIN_OF_ERROR).of(expected_share))
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
context "under heterogeneous load" do
|
context("under heterogeneous load") do
|
||||||
xit "balances query volume between all instances based on how busy they are" do
|
xit("balances query volume between all instances based on how busy they are") do
|
||||||
slow_query_count = 2
|
slow_query_count = 2
|
||||||
threads = Array.new(slow_query_count) do
|
threads = Array.new(slow_query_count) do
|
||||||
Thread.new do
|
Thread.new do
|
||||||
@@ -108,31 +108,32 @@ describe "Least Outstanding Queries Load Balancing" do
|
|||||||
failed_count += 1
|
failed_count += 1
|
||||||
end
|
end
|
||||||
|
|
||||||
expect(failed_count).to eq(0)
|
expect(failed_count).to(eq(0))
|
||||||
# Under LOQ, we expect replicas running the slow pg_sleep
|
# Under LOQ, we expect replicas running the slow pg_sleep
|
||||||
# to get no selects
|
# to get no selects
|
||||||
expect(
|
expect(
|
||||||
processes.
|
processes
|
||||||
all_databases.
|
.all_databases
|
||||||
map(&:count_select_1_plus_2).
|
.map(&:count_select_1_plus_2)
|
||||||
count { |instance_share| instance_share == 0 }
|
.count { |instance_share| instance_share == 0 }
|
||||||
).to eq(slow_query_count)
|
)
|
||||||
|
.to(eq(slow_query_count))
|
||||||
|
|
||||||
# We also expect the quick queries to be spread across
|
# We also expect the quick queries to be spread across
|
||||||
# the idle servers only
|
# the idle servers only
|
||||||
processes.
|
processes
|
||||||
all_databases.
|
.all_databases
|
||||||
map(&:count_select_1_plus_2).
|
.map(&:count_select_1_plus_2)
|
||||||
reject { |instance_share| instance_share == 0 }.
|
.reject { |instance_share| instance_share == 0 }
|
||||||
each do |instance_share|
|
.each do |instance_share|
|
||||||
expect(instance_share).to be_within(expected_share * MARGIN_OF_ERROR).of(expected_share)
|
expect(instance_share).to(be_within(expected_share * MARGIN_OF_ERROR).of(expected_share))
|
||||||
end
|
end
|
||||||
|
|
||||||
threads.map(&:join)
|
threads.map(&:join)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
context "when some replicas are down" do
|
context("when some replicas are down") do
|
||||||
it "balances query volume between working instances" do
|
it "balances query volume between working instances" do
|
||||||
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
expected_share = QUERY_COUNT / (processes.all_databases.count - 2)
|
expected_share = QUERY_COUNT / (processes.all_databases.count - 2)
|
||||||
@@ -149,16 +150,106 @@ describe "Least Outstanding Queries Load Balancing" do
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
expect(failed_count).to be <= 2
|
expect(failed_count).to(be <= 2)
|
||||||
processes.all_databases.each do |instance|
|
processes.all_databases.each do |instance|
|
||||||
queries_routed = instance.count_select_1_plus_2
|
queries_routed = instance.count_select_1_plus_2
|
||||||
if processes.replicas[0..1].include?(instance)
|
if processes.replicas[0..1].include?(instance)
|
||||||
expect(queries_routed).to eq(0)
|
expect(queries_routed).to(eq(0))
|
||||||
else
|
else
|
||||||
expect(queries_routed).to be_within(expected_share * MARGIN_OF_ERROR).of(expected_share)
|
expect(queries_routed).to(be_within(expected_share * MARGIN_OF_ERROR).of(expected_share))
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
describe "Candidate filtering based on `default_pool`" do
|
||||||
|
let(:processes) {
|
||||||
|
Helpers::Pgcat.single_shard_setup("sharded_db", 5, "transaction", "random", "debug", pool_settings)
|
||||||
|
}
|
||||||
|
|
||||||
|
after do
|
||||||
|
processes.all_databases.map(&:reset)
|
||||||
|
processes.pgcat.shutdown
|
||||||
|
end
|
||||||
|
|
||||||
|
context("with default_pool set to replicas") do
|
||||||
|
context("when all replicas are down ") do
|
||||||
|
let(:pool_settings) do
|
||||||
|
{
|
||||||
|
"default_role" => "replica",
|
||||||
|
"replica_to_primary_failover_enabled" => replica_to_primary_failover_enabled
|
||||||
|
}
|
||||||
|
end
|
||||||
|
|
||||||
|
context("with `replica_to_primary_failover_enabled` set to false`") do
|
||||||
|
let(:replica_to_primary_failover_enabled) { false }
|
||||||
|
|
||||||
|
it(
|
||||||
|
"unbans them automatically to prevent false positives in health checks that could make all replicas unavailable"
|
||||||
|
) do
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
|
failed_count = 0
|
||||||
|
number_of_replicas = processes[:replicas].length
|
||||||
|
|
||||||
|
# Take down all replicas
|
||||||
|
processes[:replicas].each(&:take_down)
|
||||||
|
|
||||||
|
(number_of_replicas + 1).times do |n|
|
||||||
|
conn.async_exec("SELECT 1 + 2")
|
||||||
|
rescue
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
|
failed_count += 1
|
||||||
|
end
|
||||||
|
|
||||||
|
expect(failed_count).to(eq(number_of_replicas + 1))
|
||||||
|
failed_count = 0
|
||||||
|
|
||||||
|
# Ban_time is configured to 60 so this reset will only work
|
||||||
|
# if the replicas are unbanned automatically
|
||||||
|
processes[:replicas].each(&:reset)
|
||||||
|
|
||||||
|
number_of_replicas.times do
|
||||||
|
conn.async_exec("SELECT 1 + 2")
|
||||||
|
rescue
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
|
failed_count += 1
|
||||||
|
end
|
||||||
|
|
||||||
|
expect(failed_count).to(eq(0))
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context("with `replica_to_primary_failover_enabled` set to true`") do
|
||||||
|
let(:replica_to_primary_failover_enabled) { true }
|
||||||
|
|
||||||
|
it "does not unbans them automatically" do
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
|
failed_count = 0
|
||||||
|
number_of_replicas = processes[:replicas].length
|
||||||
|
|
||||||
|
# We need to allow pgcat to open connections to replicas
|
||||||
|
(number_of_replicas + 10).times do |n|
|
||||||
|
conn.async_exec("SELECT 1 + 2")
|
||||||
|
rescue
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
|
failed_count += 1
|
||||||
|
end
|
||||||
|
expect(failed_count).to(eq(0))
|
||||||
|
|
||||||
|
# Take down all replicas
|
||||||
|
processes[:replicas].each(&:take_down)
|
||||||
|
|
||||||
|
(number_of_replicas + 10).times do |n|
|
||||||
|
conn.async_exec("SELECT 1 + 2")
|
||||||
|
rescue
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
|
||||||
|
failed_count += 1
|
||||||
|
end
|
||||||
|
|
||||||
|
expect(failed_count).to(eq(number_of_replicas))
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|||||||
@@ -1,29 +1,214 @@
|
|||||||
require_relative 'spec_helper'
|
require_relative 'spec_helper'
|
||||||
|
|
||||||
describe 'Prepared statements' do
|
describe 'Prepared statements' do
|
||||||
let(:processes) { Helpers::Pgcat.three_shard_setup('sharded_db', 5) }
|
let(:pool_size) { 5 }
|
||||||
|
let(:processes) { Helpers::Pgcat.single_instance_setup("sharded_db", pool_size) }
|
||||||
|
let(:prepared_statements_cache_size) { 100 }
|
||||||
|
let(:server_round_robin) { false }
|
||||||
|
|
||||||
context 'enabled' do
|
before do
|
||||||
it 'will work over the same connection' do
|
new_configs = processes.pgcat.current_config
|
||||||
|
new_configs["general"]["server_round_robin"] = server_round_robin
|
||||||
|
new_configs["pools"]["sharded_db"]["prepared_statements_cache_size"] = prepared_statements_cache_size
|
||||||
|
new_configs["pools"]["sharded_db"]["users"]["0"]["pool_size"] = pool_size
|
||||||
|
processes.pgcat.update_config(new_configs)
|
||||||
|
processes.pgcat.reload_config
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when trying prepared statements' do
|
||||||
|
it 'it allows unparameterized statements to succeed' do
|
||||||
|
conn1 = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
conn2 = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
|
||||||
|
prepared_query = "SELECT 1"
|
||||||
|
|
||||||
|
# prepare query on server 1 and client 1
|
||||||
|
conn1.prepare('statement1', prepared_query)
|
||||||
|
conn1.exec_prepared('statement1')
|
||||||
|
|
||||||
|
conn2.transaction do
|
||||||
|
# Claim server 1 with client 2
|
||||||
|
conn2.exec("SELECT 2")
|
||||||
|
|
||||||
|
# Client 1 now runs the prepared query, and it's automatically
|
||||||
|
# prepared on server 2
|
||||||
|
conn1.prepare('statement2', prepared_query)
|
||||||
|
conn1.exec_prepared('statement2')
|
||||||
|
|
||||||
|
# Client 2 now prepares the same query that was already
|
||||||
|
# prepared on server 1. And PgBouncer reuses that already
|
||||||
|
# prepared query for this different client.
|
||||||
|
conn2.prepare('statement3', prepared_query)
|
||||||
|
conn2.exec_prepared('statement3')
|
||||||
|
end
|
||||||
|
ensure
|
||||||
|
conn1.close if conn1
|
||||||
|
conn2.close if conn2
|
||||||
|
end
|
||||||
|
|
||||||
|
it 'it allows parameterized statements to succeed' do
|
||||||
|
conn1 = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
conn2 = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
|
||||||
|
prepared_query = "SELECT $1"
|
||||||
|
|
||||||
|
# prepare query on server 1 and client 1
|
||||||
|
conn1.prepare('statement1', prepared_query)
|
||||||
|
conn1.exec_prepared('statement1', [1])
|
||||||
|
|
||||||
|
conn2.transaction do
|
||||||
|
# Claim server 1 with client 2
|
||||||
|
conn2.exec("SELECT 2")
|
||||||
|
|
||||||
|
# Client 1 now runs the prepared query, and it's automatically
|
||||||
|
# prepared on server 2
|
||||||
|
conn1.prepare('statement2', prepared_query)
|
||||||
|
conn1.exec_prepared('statement2', [1])
|
||||||
|
|
||||||
|
# Client 2 now prepares the same query that was already
|
||||||
|
# prepared on server 1. And PgBouncer reuses that already
|
||||||
|
# prepared query for this different client.
|
||||||
|
conn2.prepare('statement3', prepared_query)
|
||||||
|
conn2.exec_prepared('statement3', [1])
|
||||||
|
end
|
||||||
|
ensure
|
||||||
|
conn1.close if conn1
|
||||||
|
conn2.close if conn2
|
||||||
|
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when trying large packets' do
|
||||||
|
it "works with large parse" do
|
||||||
|
conn1 = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
|
||||||
|
long_string = "1" * 4096 * 10
|
||||||
|
prepared_query = "SELECT '#{long_string}'"
|
||||||
|
|
||||||
|
|
||||||
|
# prepare query on server 1 and client 1
|
||||||
|
conn1.prepare('statement1', prepared_query)
|
||||||
|
result = conn1.exec_prepared('statement1')
|
||||||
|
|
||||||
|
# assert result matches long_string
|
||||||
|
expect(result.getvalue(0, 0)).to eq(long_string)
|
||||||
|
ensure
|
||||||
|
conn1.close if conn1
|
||||||
|
end
|
||||||
|
|
||||||
|
it "works with large bind" do
|
||||||
|
conn1 = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
|
||||||
|
long_string = "1" * 4096 * 10
|
||||||
|
prepared_query = "SELECT $1::text"
|
||||||
|
|
||||||
|
# prepare query on server 1 and client 1
|
||||||
|
conn1.prepare('statement1', prepared_query)
|
||||||
|
result = conn1.exec_prepared('statement1', [long_string])
|
||||||
|
|
||||||
|
# assert result matches long_string
|
||||||
|
expect(result.getvalue(0, 0)).to eq(long_string)
|
||||||
|
ensure
|
||||||
|
conn1.close if conn1
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when statement cache is smaller than set of unqiue statements' do
|
||||||
|
let(:prepared_statements_cache_size) { 1 }
|
||||||
|
let(:pool_size) { 1 }
|
||||||
|
|
||||||
|
it "evicts all but 1 statement from the server cache" do
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
|
||||||
|
5.times do |i|
|
||||||
|
prepared_query = "SELECT '#{i}'"
|
||||||
|
conn.prepare("statement#{i}", prepared_query)
|
||||||
|
result = conn.exec_prepared("statement#{i}")
|
||||||
|
expect(result.getvalue(0, 0)).to eq(i.to_s)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Check number of prepared statements (expected: 1)
|
||||||
|
n_statements = conn.exec("SELECT count(*) FROM pg_prepared_statements").getvalue(0, 0).to_i
|
||||||
|
expect(n_statements).to eq(1)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when statement cache is larger than set of unqiue statements' do
|
||||||
|
let(:pool_size) { 1 }
|
||||||
|
|
||||||
|
it "does not evict any of the statements from the cache" do
|
||||||
|
# cache size 5
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
|
||||||
|
5.times do |i|
|
||||||
|
prepared_query = "SELECT '#{i}'"
|
||||||
|
conn.prepare("statement#{i}", prepared_query)
|
||||||
|
result = conn.exec_prepared("statement#{i}")
|
||||||
|
expect(result.getvalue(0, 0)).to eq(i.to_s)
|
||||||
|
end
|
||||||
|
|
||||||
|
# Check number of prepared statements (expected: 1)
|
||||||
|
n_statements = conn.exec("SELECT count(*) FROM pg_prepared_statements").getvalue(0, 0).to_i
|
||||||
|
expect(n_statements).to eq(5)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when preparing the same query' do
|
||||||
|
let(:prepared_statements_cache_size) { 5 }
|
||||||
|
let(:pool_size) { 5 }
|
||||||
|
|
||||||
|
it "reuses statement cache when there are different statement names on the same connection" do
|
||||||
conn = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
conn = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
|
||||||
10.times do |i|
|
10.times do |i|
|
||||||
statement_name = "statement_#{i}"
|
statement_name = "statement_#{i}"
|
||||||
conn.prepare(statement_name, 'SELECT $1::int')
|
conn.prepare(statement_name, 'SELECT $1::int')
|
||||||
conn.exec_prepared(statement_name, [1])
|
conn.exec_prepared(statement_name, [1])
|
||||||
conn.describe_prepared(statement_name)
|
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# Check number of prepared statements (expected: 1)
|
||||||
|
n_statements = conn.exec("SELECT count(*) FROM pg_prepared_statements").getvalue(0, 0).to_i
|
||||||
|
expect(n_statements).to eq(1)
|
||||||
end
|
end
|
||||||
|
|
||||||
it 'will work with new connections' do
|
it "reuses statement cache when there are different statement names on different connections" do
|
||||||
10.times do
|
10.times do |i|
|
||||||
conn = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
conn = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
statement_name = "statement_#{i}"
|
||||||
statement_name = 'statement1'
|
conn.prepare(statement_name, 'SELECT $1::int')
|
||||||
conn.prepare('statement1', 'SELECT $1::int')
|
conn.exec_prepared(statement_name, [1])
|
||||||
conn.exec_prepared('statement1', [1])
|
|
||||||
conn.describe_prepared('statement1')
|
|
||||||
end
|
end
|
||||||
|
|
||||||
|
# Check number of prepared statements (expected: 1)
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
n_statements = conn.exec("SELECT count(*) FROM pg_prepared_statements").getvalue(0, 0).to_i
|
||||||
|
expect(n_statements).to eq(1)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
context 'when reloading config' do
|
||||||
|
let(:pool_size) { 1 }
|
||||||
|
|
||||||
|
it "test_reload_config" do
|
||||||
|
conn = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
|
||||||
|
# prepare query
|
||||||
|
conn.prepare('statement1', 'SELECT 1')
|
||||||
|
conn.exec_prepared('statement1')
|
||||||
|
|
||||||
|
# Reload config which triggers pool recreation
|
||||||
|
new_configs = processes.pgcat.current_config
|
||||||
|
new_configs["pools"]["sharded_db"]["prepared_statements_cache_size"] = prepared_statements_cache_size + 1
|
||||||
|
processes.pgcat.update_config(new_configs)
|
||||||
|
processes.pgcat.reload_config
|
||||||
|
|
||||||
|
# check that we're starting with no prepared statements on the server
|
||||||
|
conn_check = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
|
||||||
|
n_statements = conn_check.exec("SELECT count(*) FROM pg_prepared_statements").getvalue(0, 0).to_i
|
||||||
|
expect(n_statements).to eq(0)
|
||||||
|
|
||||||
|
# still able to run prepared query
|
||||||
|
conn.exec_prepared('statement1')
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|||||||
@@ -233,17 +233,19 @@ describe "Stats" do
|
|||||||
sleep(1.1) # Allow time for stats to update
|
sleep(1.1) # Allow time for stats to update
|
||||||
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
||||||
results = admin_conn.async_exec("SHOW POOLS")[0]
|
results = admin_conn.async_exec("SHOW POOLS")[0]
|
||||||
%w[cl_idle cl_cancel_req sv_idle sv_used sv_tested sv_login maxwait].each do |s|
|
|
||||||
|
%w[cl_idle cl_cancel_req sv_idle sv_used sv_tested sv_login].each do |s|
|
||||||
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
|
||||||
end
|
end
|
||||||
|
|
||||||
|
expect(results["maxwait"]).to eq("1")
|
||||||
expect(results["cl_waiting"]).to eq("2")
|
expect(results["cl_waiting"]).to eq("2")
|
||||||
expect(results["cl_active"]).to eq("2")
|
expect(results["cl_active"]).to eq("2")
|
||||||
expect(results["sv_active"]).to eq("2")
|
expect(results["sv_active"]).to eq("2")
|
||||||
|
|
||||||
sleep(2.5) # Allow time for stats to update
|
sleep(2.5) # Allow time for stats to update
|
||||||
results = admin_conn.async_exec("SHOW POOLS")[0]
|
results = admin_conn.async_exec("SHOW POOLS")[0]
|
||||||
%w[cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login].each do |s|
|
%w[cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
|
||||||
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
|
||||||
end
|
end
|
||||||
expect(results["cl_idle"]).to eq("4")
|
expect(results["cl_idle"]).to eq("4")
|
||||||
@@ -255,22 +257,23 @@ describe "Stats" do
|
|||||||
|
|
||||||
it "show correct max_wait" do
|
it "show correct max_wait" do
|
||||||
threads = []
|
threads = []
|
||||||
|
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
||||||
connections = Array.new(4) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
|
connections = Array.new(4) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
|
||||||
connections.each do |c|
|
connections.each do |c|
|
||||||
threads << Thread.new { c.async_exec("SELECT pg_sleep(1.5)") rescue nil }
|
threads << Thread.new { c.async_exec("SELECT pg_sleep(1.5)") rescue nil }
|
||||||
end
|
end
|
||||||
|
sleep(1.1)
|
||||||
|
results = admin_conn.async_exec("SHOW POOLS")[0]
|
||||||
|
# Value is only reported when there are clients waiting
|
||||||
|
expect(results["maxwait"]).to eq("1")
|
||||||
|
expect(results["maxwait_us"].to_i).to be_within(20_000).of(100_000)
|
||||||
|
|
||||||
sleep(2.5) # Allow time for stats to update
|
sleep(2.5) # Allow time for stats to update
|
||||||
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
|
||||||
results = admin_conn.async_exec("SHOW POOLS")[0]
|
|
||||||
|
|
||||||
expect(results["maxwait"]).to eq("1")
|
|
||||||
expect(results["maxwait_us"].to_i).to be_within(200_000).of(500_000)
|
|
||||||
connections.map(&:close)
|
|
||||||
|
|
||||||
sleep(4.5) # Allow time for stats to update
|
|
||||||
results = admin_conn.async_exec("SHOW POOLS")[0]
|
results = admin_conn.async_exec("SHOW POOLS")[0]
|
||||||
|
# no clients are waiting so value is 0
|
||||||
expect(results["maxwait"]).to eq("0")
|
expect(results["maxwait"]).to eq("0")
|
||||||
|
expect(results["maxwait_us"]).to eq("0")
|
||||||
|
connections.map(&:close)
|
||||||
|
|
||||||
threads.map(&:join)
|
threads.map(&:join)
|
||||||
end
|
end
|
||||||
@@ -329,6 +332,40 @@ describe "Stats" do
|
|||||||
admin_conn.close
|
admin_conn.close
|
||||||
connections.map(&:close)
|
connections.map(&:close)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
context "when client has waited for a server" do
|
||||||
|
let(:processes) { Helpers::Pgcat.single_instance_setup("sharded_db", 2) }
|
||||||
|
|
||||||
|
it "shows correct maxwait" do
|
||||||
|
threads = []
|
||||||
|
connections = Array.new(3) { |i| PG::connect("#{pgcat_conn_str}?application_name=app#{i}") }
|
||||||
|
connections.each do |c|
|
||||||
|
threads << Thread.new { c.async_exec("SELECT pg_sleep(1.5)") rescue nil }
|
||||||
|
end
|
||||||
|
|
||||||
|
sleep(2.5) # Allow time for stats to update
|
||||||
|
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
|
||||||
|
results = admin_conn.async_exec("SHOW CLIENTS")
|
||||||
|
|
||||||
|
normal_client_results = results.reject { |r| r["database"] == "pgcat" }
|
||||||
|
|
||||||
|
non_waiting_clients = normal_client_results.select { |c| c["maxwait"] == "0" }
|
||||||
|
waiting_clients = normal_client_results.select { |c| c["maxwait"].to_i > 0 }
|
||||||
|
|
||||||
|
expect(non_waiting_clients.count).to eq(2)
|
||||||
|
non_waiting_clients.each do |client|
|
||||||
|
expect(client["maxwait_us"].to_i).to be_between(0, 50_000)
|
||||||
|
end
|
||||||
|
|
||||||
|
expect(waiting_clients.count).to eq(1)
|
||||||
|
waiting_clients.each do |client|
|
||||||
|
expect(client["maxwait_us"].to_i).to be_within(200_000).of(500_000)
|
||||||
|
end
|
||||||
|
|
||||||
|
admin_conn.close
|
||||||
|
connections.map(&:close)
|
||||||
|
end
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
682
tests/rust/Cargo.lock
generated
682
tests/rust/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -15,8 +15,13 @@ async fn test_prepared_statements() {
|
|||||||
for _ in 0..5 {
|
for _ in 0..5 {
|
||||||
let pool = pool.clone();
|
let pool = pool.clone();
|
||||||
let handle = tokio::task::spawn(async move {
|
let handle = tokio::task::spawn(async move {
|
||||||
for _ in 0..1000 {
|
for i in 0..1000 {
|
||||||
sqlx::query("SELECT 1").fetch_all(&pool).await.unwrap();
|
match sqlx::query(&format!("SELECT {:?}", i % 5)).fetch_all(&pool).await {
|
||||||
|
Ok(_) => (),
|
||||||
|
Err(err) => {
|
||||||
|
panic!("prepared statement error: {}", err);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ mkdir -p "$deb_dir/etc/systemd/system"
|
|||||||
cp target/release/pgcat "$deb_dir/usr/bin/pgcat"
|
cp target/release/pgcat "$deb_dir/usr/bin/pgcat"
|
||||||
chmod +x "$deb_dir/usr/bin/pgcat"
|
chmod +x "$deb_dir/usr/bin/pgcat"
|
||||||
|
|
||||||
cp pgcat.toml "$deb_dir/etc/pgcat.toml"
|
cp pgcat.toml "$deb_dir/etc/pgcat.example.toml"
|
||||||
cp pgcat.service "$deb_dir/etc/systemd/system/pgcat.service"
|
cp pgcat.service "$deb_dir/etc/systemd/system/pgcat.service"
|
||||||
|
|
||||||
(cat control | envsubst) > "$deb_dir/DEBIAN/control"
|
(cat control | envsubst) > "$deb_dir/DEBIAN/control"
|
||||||
|
|||||||
Reference in New Issue
Block a user