Compare commits

..

4 Commits

Author SHA1 Message Date
Lev Kokotov
e7265cbf91 fix flakey test 2023-05-03 16:01:48 -07:00
Lev Kokotov
d738ba28b6 fix tests 2023-05-03 15:42:16 -07:00
Lev Kokotov
ff80bb75cc clean up 2023-05-03 15:38:03 -07:00
Lev Kokotov
374a6b138b more plugins 2023-05-03 15:29:16 -07:00
95 changed files with 2628 additions and 11446 deletions

View File

@@ -9,7 +9,7 @@ jobs:
# Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub. # Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub.
# See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor # See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor
docker: docker:
- image: ghcr.io/postgresml/pgcat-ci:latest - image: ghcr.io/levkk/pgcat-ci:1.67
environment: environment:
RUST_LOG: info RUST_LOG: info
LLVM_PROFILE_FILE: /tmp/pgcat-%m-%p.profraw LLVM_PROFILE_FILE: /tmp/pgcat-%m-%p.profraw
@@ -63,9 +63,6 @@ jobs:
- run: - run:
name: "Lint" name: "Lint"
command: "cargo fmt --check" command: "cargo fmt --check"
- run:
name: "Clippy"
command: "cargo clippy --all --all-targets -- -Dwarnings"
- run: - run:
name: "Tests" name: "Tests"
command: "cargo clean && cargo build && cargo test && bash .circleci/run_tests.sh && .circleci/generate_coverage.sh" command: "cargo clean && cargo build && cargo test && bash .circleci/run_tests.sh && .circleci/generate_coverage.sh"

View File

@@ -74,10 +74,6 @@ default_role = "any"
# we'll direct it to the primary. # we'll direct it to the primary.
query_parser_enabled = true query_parser_enabled = true
# If the query parser is enabled and this setting is enabled, we'll attempt to
# infer the role from the query itself.
query_parser_read_write_splitting = true
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for # If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
# load balancing of read queries. Otherwise, the primary will only be used for write # load balancing of read queries. Otherwise, the primary will only be used for write
# queries. The primary can always be explicitely selected with our custom protocol. # queries. The primary can always be explicitely selected with our custom protocol.
@@ -138,7 +134,6 @@ database = "shard2"
pool_mode = "session" pool_mode = "session"
default_role = "primary" default_role = "primary"
query_parser_enabled = true query_parser_enabled = true
query_parser_read_write_splitting = true
primary_reads_enabled = true primary_reads_enabled = true
sharding_function = "pg_bigint_hash" sharding_function = "pg_bigint_hash"

View File

@@ -108,24 +108,8 @@ cd ../..
pip3 install -r tests/python/requirements.txt pip3 install -r tests/python/requirements.txt
python3 tests/python/tests.py || exit 1 python3 tests/python/tests.py || exit 1
#
# Go tests
# Starts its own pgcat server
#
pushd tests/go
/usr/local/go/bin/go test || exit 1
popd
start_pgcat "info" start_pgcat "info"
#
# Rust tests
#
cd tests/rust
cargo run
cd ../../
# Admin tests # Admin tests
export PGPASSWORD=admin_pass export PGPASSWORD=admin_pass
psql -U admin_user -e -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW STATS' > /dev/null psql -U admin_user -e -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW STATS' > /dev/null

View File

@@ -10,7 +10,3 @@ updates:
commit-message: commit-message:
prefix: "chore(deps)" prefix: "chore(deps)"
open-pull-requests-limit: 10 open-pull-requests-limit: 10
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"

View File

@@ -1,13 +1,6 @@
name: Build and Push name: Build and Push
on: on: push
push:
paths:
- '!charts/**.md'
branches:
- main
tags:
- v*
env: env:
registry: ghcr.io registry: ghcr.io
@@ -23,40 +16,33 @@ jobs:
steps: steps:
- name: Checkout Repository - name: Checkout Repository
uses: actions/checkout@v4 uses: actions/checkout@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3 uses: docker/setup-buildx-action@v2
- name: Determine tags - name: Determine tags
id: metadata id: metadata
uses: docker/metadata-action@v5 uses: docker/metadata-action@v4
with: with:
images: ${{ env.registry }}/${{ env.image-name }} images: ${{ env.registry }}/${{ env.image-name }}
tags: | tags: |
type=sha,prefix=,format=long type=sha,prefix=,format=long
type=schedule type=schedule
type=ref,event=tag
type=ref,event=branch type=ref,event=branch
type=ref,event=pr type=ref,event=pr
type=raw,value=latest,enable={{ is_default_branch }} type=raw,value=latest,enable={{ is_default_branch }}
- name: Log in to the Container registry - name: Log in to the Container registry
uses: docker/login-action@v3 uses: docker/login-action@v2.1.0
with: with:
registry: ${{ env.registry }} registry: ${{ env.registry }}
username: ${{ github.actor }} username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push ${{ env.image-name }} - name: Build and push ${{ env.image-name }}
uses: docker/build-push-action@v6 uses: docker/build-push-action@v3
with: with:
context: .
platforms: linux/amd64,linux/arm64
provenance: false
push: true push: true
tags: ${{ steps.metadata.outputs.tags }} tags: ${{ steps.metadata.outputs.tags }}
labels: ${{ steps.metadata.outputs.labels }} labels: ${{ steps.metadata.outputs.labels }}

View File

@@ -1,50 +0,0 @@
name: Lint and Test Charts
on:
pull_request:
paths:
- charts/**
- '!charts/**.md'
jobs:
lint-test:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3.1.0
with:
fetch-depth: 0
- name: Set up Helm
uses: azure/setup-helm@v3
with:
version: v3.8.1
# Python is required because `ct lint` runs Yamale (https://github.com/23andMe/Yamale) and
# yamllint (https://github.com/adrienverge/yamllint) which require Python
- name: Set up Python
uses: actions/setup-python@v4.1.0
with:
python-version: 3.7
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.2.1
with:
version: v3.5.1
- name: Run chart-testing (list-changed)
id: list-changed
run: |
changed=$(ct list-changed --config ct.yaml)
if [[ -n "$changed" ]]; then
echo "changed=true" >> $GITHUB_OUTPUT
fi
- name: Run chart-testing (lint)
run: ct lint --config ct.yaml
- name: Create kind cluster
uses: helm/kind-action@v1.7.0
if: steps.list-changed.outputs.changed == 'true'
- name: Run chart-testing (install)
run: ct install --config ct.yaml

View File

@@ -1,40 +0,0 @@
name: Release Charts
on:
push:
paths:
- charts/**
- '!**.md'
branches:
- main
jobs:
release:
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Checkout
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
with:
fetch-depth: 0
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Install Helm
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 # v3.5
with:
version: v3.13.0
- name: Run chart-releaser
uses: helm/chart-releaser-action@be16258da8010256c6e82849661221415f031968 # v1.5.0
with:
charts_dir: charts
config: cr.yaml
env:
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"

View File

@@ -1,48 +0,0 @@
name: '[CI/CD] Update README metadata'
on:
pull_request_target:
branches:
- main
paths:
- 'charts/*/values.yaml'
# Remove all permissions by default
permissions: {}
jobs:
update-readme-metadata:
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Install readme-generator-for-helm
run: npm install -g @bitnami/readme-generator-for-helm
- name: Checkout
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
with:
path: charts
ref: ${{github.event.pull_request.head.ref}}
repository: ${{github.event.pull_request.head.repo.full_name}}
token: ${{ secrets.GITHUB_TOKEN }}
- name: Execute readme-generator-for-helm
env:
DIFF_URL: "${{github.event.pull_request.diff_url}}"
TEMP_FILE: "${{runner.temp}}/pr-${{github.event.number}}.diff"
run: |
# This request doesn't consume API calls.
curl -Lkso $TEMP_FILE $DIFF_URL
files_changed="$(sed -nr 's/[\-\+]{3} [ab]\/(.*)/\1/p' $TEMP_FILE | sort | uniq)"
# Adding || true to avoid "Process exited with code 1" errors
charts_dirs_changed="$(echo "$files_changed" | xargs dirname | grep -o "pgcat/[^/]*" | sort | uniq || true)"
for chart in ${charts_dirs_changed}; do
echo "Updating README.md for ${chart}"
readme-generator --values "charts/${chart}/values.yaml" --readme "charts/${chart}/README.md" --schema "/tmp/schema.json"
done
- name: Push changes
run: |
# Push all the changes
cd charts
if git status -s | grep pgcat; then
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
git add . && git commit -am "Update README.md with readme-generator-for-helm" --signoff && git push
fi

View File

@@ -1,48 +0,0 @@
name: pgcat package (deb)
on:
workflow_dispatch:
inputs:
packageVersion:
default: "1.1.2-dev1"
jobs:
build:
strategy:
max-parallel: 1
fail-fast: false # Let the other job finish, or they can lock each other out
matrix:
os: ["buildjet-4vcpu-ubuntu-2204", "buildjet-4vcpu-ubuntu-2204-arm"]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v3
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
- name: Install dependencies
env:
DEBIAN_FRONTEND: noninteractive
TZ: Etc/UTC
run: |
curl -sLO https://github.com/deb-s3/deb-s3/releases/download/0.11.4/deb-s3-0.11.4.gem
sudo gem install deb-s3-0.11.4.gem
dpkg-deb --version
- name: Build and release package
env:
AWS_ACCESS_KEY_ID: ${{ vars.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
AWS_DEFAULT_REGION: ${{ vars.AWS_DEFAULT_REGION }}
run: |
if [[ $(arch) == "x86_64" ]]; then
export ARCH=amd64
else
export ARCH=arm64
fi
bash utilities/deb.sh ${{ inputs.packageVersion }}
deb-s3 upload \
--lock \
--bucket apt.postgresml.org \
pgcat-${{ inputs.packageVersion }}-ubuntu22.04-${ARCH}.deb \
--codename $(lsb_release -cs)

1
.gitignore vendored
View File

@@ -10,4 +10,3 @@ lcov.info
dev/.bash_history dev/.bash_history
dev/cache dev/cache
!dev/cache/.keepme !dev/cache/.keepme
.venv

View File

@@ -57,38 +57,6 @@ default: 86400000 # 24 hours
Max connection lifetime before it's closed, even if actively used. Max connection lifetime before it's closed, even if actively used.
### server_round_robin
```
path: general.server_round_robin
default: false
```
Whether to use round robin for server selection or not.
### server_tls
```
path: general.server_tls
default: false
```
Whether to use TLS for server connections or not.
### verify_server_certificate
```
path: general.verify_server_certificate
default: false
```
Whether to verify server certificate or not.
### verify_config
```
path: general.verify_config
default: true
```
Whether to verify config or not.
### idle_client_in_transaction_timeout ### idle_client_in_transaction_timeout
``` ```
path: general.idle_client_in_transaction_timeout path: general.idle_client_in_transaction_timeout
@@ -148,10 +116,10 @@ If we should log client disconnections
### autoreload ### autoreload
``` ```
path: general.autoreload path: general.autoreload
default: 15000 # milliseconds default: 15000
``` ```
When set, PgCat automatically reloads its configurations at the specified interval (in milliseconds) if it detects changes in the configuration file. The default interval is 15000 milliseconds or 15 seconds. When set to true, PgCat reloads configs if it detects a change in the config file.
### worker_threads ### worker_threads
``` ```
@@ -183,13 +151,7 @@ path: general.tcp_keepalives_interval
default: 5 default: 5
``` ```
### tcp_user_timeout Number of seconds between keepalive packets.
```
path: general.tcp_user_timeout
default: 10000
```
A linux-only parameters that defines the amount of time in milliseconds that transmitted data may remain unacknowledged or buffered data may remain untransmitted (due to zero window size) before TCP will forcibly disconnect
### tls_certificate ### tls_certificate
``` ```
@@ -226,39 +188,6 @@ default: "admin_pass"
Password to access the virtual administrative database Password to access the virtual administrative database
### auth_query
```
path: general.auth_query
default: <UNSET>
example: "SELECT $1"
```
Query to be sent to servers to obtain the hash used for md5 authentication. The connection will be
established using the database configured in the pool. This parameter is inherited by every pool
and can be redefined in pool configuration.
### auth_query_user
```
path: general.auth_query_user
default: <UNSET>
example: "sharding_user"
```
User to be used for connecting to servers to obtain the hash used for md5 authentication by sending the query
specified in `auth_query_user`. The connection will be established using the database configured in the pool.
This parameter is inherited by every pool and can be redefined in pool configuration.
### auth_query_password
```
path: general.auth_query_password
default: <UNSET>
example: "sharding_user"
```
Password to be used for connecting to servers to obtain the hash used for md5 authentication by sending the query
specified in `auth_query_user`. The connection will be established using the database configured in the pool.
This parameter is inherited by every pool and can be redefined in pool configuration.
### dns_cache_enabled ### dns_cache_enabled
``` ```
path: general.dns_cache_enabled path: general.dns_cache_enabled
@@ -295,7 +224,7 @@ default: "random"
Load balancing mode Load balancing mode
`random` selects the server at random `random` selects the server at random
`loc` selects the server with the least outstanding busy connections `loc` selects the server with the least outstanding busy conncetions
### default_role ### default_role
``` ```
@@ -308,15 +237,6 @@ If the client doesn't specify, PgCat routes traffic to this role by default.
`replica` round-robin between replicas only without touching the primary, `replica` round-robin between replicas only without touching the primary,
`primary` all queries go to the primary unless otherwise specified. `primary` all queries go to the primary unless otherwise specified.
### prepared_statements_cache_size
```
path: general.prepared_statements_cache_size
default: 0
```
Size of the prepared statements cache. 0 means disabled.
TODO: update documentation
### query_parser_enabled ### query_parser_enabled
``` ```
path: pools.<pool_name>.query_parser_enabled path: pools.<pool_name>.query_parser_enabled

View File

@@ -2,7 +2,7 @@
Thank you for contributing! Just a few tips here: Thank you for contributing! Just a few tips here:
1. `cargo fmt` and `cargo clippy` your code before opening up a PR 1. `cargo fmt` your code before opening up a PR
2. Run the test suite (e.g. `pgbench`) to make sure everything still works. The tests are in `.circleci/run_tests.sh`. 2. Run the test suite (e.g. `pgbench`) to make sure everything still works. The tests are in `.circleci/run_tests.sh`.
3. Performance is important, make sure there are no regressions in your branch vs. `main`. 3. Performance is important, make sure there are no regressions in your branch vs. `main`.

1063
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "pgcat" name = "pgcat"
version = "1.2.0" version = "1.0.2-alpha1"
edition = "2021" edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@@ -8,7 +8,7 @@ edition = "2021"
tokio = { version = "1", features = ["full"] } tokio = { version = "1", features = ["full"] }
bytes = "1" bytes = "1"
md-5 = "0.10" md-5 = "0.10"
bb8 = "0.8.1" bb8 = "0.8.0"
async-trait = "0.1" async-trait = "0.1"
rand = "0.8" rand = "0.8"
chrono = "0.4" chrono = "0.4"
@@ -19,9 +19,10 @@ serde_derive = "1"
regex = "1" regex = "1"
num_cpus = "1" num_cpus = "1"
once_cell = "1" once_cell = "1"
sqlparser = { version = "0.41", features = ["visitor"] } sqlparser = {version = "0.33", features = ["visitor"] }
log = "0.4" log = "0.4"
arc-swap = "1" arc-swap = "1"
env_logger = "0.10"
parking_lot = "0.12.1" parking_lot = "0.12.1"
hmac = "0.12" hmac = "0.12"
sha2 = "0.10" sha2 = "0.10"
@@ -29,9 +30,7 @@ base64 = "0.21"
stringprep = "0.1" stringprep = "0.1"
tokio-rustls = "0.24" tokio-rustls = "0.24"
rustls-pemfile = "1" rustls-pemfile = "1"
http-body-util = "0.1.2" hyper = { version = "0.14", features = ["full"] }
hyper = { version = "1.4.1", features = ["full"] }
hyper-util = { version = "0.1.7", features = ["tokio"] }
phf = { version = "0.11.1", features = ["macros"] } phf = { version = "0.11.1", features = ["macros"] }
exitcode = "1.1.2" exitcode = "1.1.2"
futures = "0.3" futures = "0.3"
@@ -46,15 +45,7 @@ rustls = { version = "0.21", features = ["dangerous_configuration"] }
trust-dns-resolver = "0.22.0" trust-dns-resolver = "0.22.0"
tokio-test = "0.4.2" tokio-test = "0.4.2"
serde_json = "1" serde_json = "1"
itertools = "0.10"
clap = { version = "4.3.1", features = ["derive", "env"] }
tracing = "0.1.37"
tracing-subscriber = { version = "0.3.17", features = [
"json",
"env-filter",
"std",
] }
lru = "0.12.0"
[target.'cfg(not(target_env = "msvc"))'.dependencies] [target.'cfg(not(target_env = "msvc"))'.dependencies]
jemallocator = "0.5.0" jemallocator = "0.5.0"

View File

@@ -1,22 +1,11 @@
FROM rust:1.79.0-slim-bookworm AS builder FROM rust:1 AS builder
RUN apt-get update && \
apt-get install -y build-essential
COPY . /app COPY . /app
WORKDIR /app WORKDIR /app
RUN cargo build --release RUN cargo build --release
FROM debian:bookworm-slim FROM debian:bullseye-slim
RUN apt-get update && apt-get install -o Dpkg::Options::=--force-confdef -yq --no-install-recommends \
postgresql-client \
# Clean up layer
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \
&& truncate -s 0 /var/log/*log
COPY --from=builder /app/target/release/pgcat /usr/bin/pgcat COPY --from=builder /app/target/release/pgcat /usr/bin/pgcat
COPY --from=builder /app/pgcat.toml /etc/pgcat/pgcat.toml COPY --from=builder /app/pgcat.toml /etc/pgcat/pgcat.toml
WORKDIR /etc/pgcat WORKDIR /etc/pgcat
ENV RUST_LOG=info ENV RUST_LOG=info
CMD ["pgcat"] CMD ["pgcat"]
STOPSIGNAL SIGINT

View File

@@ -1,6 +1,4 @@
FROM cimg/rust:1.79.0 FROM cimg/rust:1.67.1
COPY --from=sclevine/yj /bin/yj /bin/yj
RUN /bin/yj -h
RUN sudo apt-get update && \ RUN sudo apt-get update && \
sudo apt-get install -y \ sudo apt-get install -y \
psmisc postgresql-contrib-14 postgresql-client-14 libpq-dev \ psmisc postgresql-contrib-14 postgresql-client-14 libpq-dev \
@@ -12,6 +10,3 @@ RUN sudo apt-get update && \
pip3 install psycopg2 && sudo gem install bundler && \ pip3 install psycopg2 && sudo gem install bundler && \
wget -O /tmp/toxiproxy-2.4.0.deb https://github.com/Shopify/toxiproxy/releases/download/v2.4.0/toxiproxy_2.4.0_linux_$(dpkg --print-architecture).deb && \ wget -O /tmp/toxiproxy-2.4.0.deb https://github.com/Shopify/toxiproxy/releases/download/v2.4.0/toxiproxy_2.4.0_linux_$(dpkg --print-architecture).deb && \
sudo dpkg -i /tmp/toxiproxy-2.4.0.deb sudo dpkg -i /tmp/toxiproxy-2.4.0.deb
RUN wget -O /tmp/go1.21.3.linux-$(dpkg --print-architecture).tar.gz https://go.dev/dl/go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \
sudo tar -C /usr/local -xzf /tmp/go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \
rm /tmp/go1.21.3.linux-$(dpkg --print-architecture).tar.gz

View File

@@ -1,25 +0,0 @@
FROM lukemathwalker/cargo-chef:latest-rust-1 AS chef
RUN apt-get update && \
apt-get install -y build-essential
WORKDIR /app
FROM chef AS planner
COPY . .
RUN cargo chef prepare --recipe-path recipe.json
FROM chef AS builder
COPY --from=planner /app/recipe.json recipe.json
# Build dependencies - this is the caching Docker layer!
RUN cargo chef cook --release --recipe-path recipe.json
# Build application
COPY . .
RUN cargo build
FROM debian:bookworm-slim
COPY --from=builder /app/target/release/pgcat /usr/bin/pgcat
COPY --from=builder /app/pgcat.toml /etc/pgcat/pgcat.toml
WORKDIR /etc/pgcat
ENV RUST_LOG=info
CMD ["pgcat"]

View File

@@ -40,7 +40,7 @@ PgCat is stable and used in production to serve hundreds of thousands of queries
</a> </a>
</td> </td>
<td> <td>
<a href="https://postgresml.org/blog/scaling-postgresml-to-1-million-requests-per-second"> <a href="https://postgresml.org/blog/scaling-postgresml-to-one-million-requests-per-second">
<img src="./images/postgresml.webp" height="70" width="auto"> <img src="./images/postgresml.webp" height="70" width="auto">
</a> </a>
</td> </td>
@@ -57,7 +57,7 @@ PgCat is stable and used in production to serve hundreds of thousands of queries
</a> </a>
</td> </td>
<td> <td>
<a href="https://postgresml.org/blog/scaling-postgresml-to-1-million-requests-per-second"> <a href="https://postgresml.org/blog/scaling-postgresml-to-one-million-requests-per-second">
PostgresML PostgresML
</a> </a>
</td> </td>
@@ -268,8 +268,6 @@ psql -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW DATABASES'
Additionally, Prometheus statistics are available at `/metrics` via HTTP. Additionally, Prometheus statistics are available at `/metrics` via HTTP.
We also have a [basic Grafana dashboard](https://github.com/postgresml/pgcat/blob/main/grafana_dashboard.json) based on Prometheus metrics that you can import into Grafana and build on it or use it for monitoring.
### Live configuration reloading ### Live configuration reloading
The config can be reloaded by sending a `kill -s SIGHUP` to the process or by querying `RELOAD` to the admin database. All settings except the `host` and `port` can be reloaded without restarting the pooler, including sharding and replicas configurations. The config can be reloaded by sending a `kill -s SIGHUP` to the process or by querying `RELOAD` to the admin database. All settings except the `host` and `port` can be reloaded without restarting the pooler, including sharding and replicas configurations.

View File

@@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,8 +0,0 @@
apiVersion: v2
name: pgcat
description: A Helm chart for PgCat a PostgreSQL pooler and proxy (like PgBouncer) with support for sharding, load balancing, failover and mirroring.
maintainers:
- name: Wildcard
email: support@w6d.io
appVersion: "1.2.0"
version: 0.2.0

View File

@@ -1,22 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "pgcat.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "pgcat.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "pgcat.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "pgcat.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}

View File

@@ -1,3 +0,0 @@
{{/*
Configuration template definition
*/}}

View File

@@ -1,62 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "pgcat.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "pgcat.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "pgcat.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "pgcat.labels" -}}
helm.sh/chart: {{ include "pgcat.chart" . }}
{{ include "pgcat.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "pgcat.selectorLabels" -}}
app.kubernetes.io/name: {{ include "pgcat.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "pgcat.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "pgcat.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@@ -1,66 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "pgcat.fullname" . }}
labels:
{{- include "pgcat.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "pgcat.selectorLabels" . | nindent 6 }}
template:
metadata:
annotations:
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "pgcat.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.image.pullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "pgcat.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.containerSecurityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: pgcat
containerPort: {{ .Values.configuration.general.port }}
protocol: TCP
livenessProbe:
tcpSocket:
port: pgcat
readinessProbe:
tcpSocket:
port: pgcat
resources:
{{- toYaml .Values.resources | nindent 12 }}
volumeMounts:
- mountPath: /etc/pgcat
name: config
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
- secret:
defaultMode: 420
secretName: {{ include "pgcat.fullname" . }}
name: config

View File

@@ -1,61 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "pgcat.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
{{- end }}
{{- end }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "pgcat.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,86 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ include "pgcat.fullname" . }}
labels:
{{- include "pgcat.labels" . | nindent 4 }}
type: Opaque
stringData:
pgcat.toml: |
[general]
host = {{ .Values.configuration.general.host | quote }}
port = {{ .Values.configuration.general.port }}
enable_prometheus_exporter = {{ .Values.configuration.general.enable_prometheus_exporter }}
prometheus_exporter_port = {{ .Values.configuration.general.prometheus_exporter_port }}
connect_timeout = {{ .Values.configuration.general.connect_timeout }}
idle_timeout = {{ .Values.configuration.general.idle_timeout | int }}
server_lifetime = {{ .Values.configuration.general.server_lifetime | int }}
idle_client_in_transaction_timeout = {{ .Values.configuration.general.idle_client_in_transaction_timeout | int }}
healthcheck_timeout = {{ .Values.configuration.general.healthcheck_timeout }}
healthcheck_delay = {{ .Values.configuration.general.healthcheck_delay }}
shutdown_timeout = {{ .Values.configuration.general.shutdown_timeout }}
ban_time = {{ .Values.configuration.general.ban_time }}
log_client_connections = {{ .Values.configuration.general.log_client_connections }}
log_client_disconnections = {{ .Values.configuration.general.log_client_disconnections }}
tcp_keepalives_idle = {{ .Values.configuration.general.tcp_keepalives_idle }}
tcp_keepalives_count = {{ .Values.configuration.general.tcp_keepalives_count }}
tcp_keepalives_interval = {{ .Values.configuration.general.tcp_keepalives_interval }}
{{- if and (ne .Values.configuration.general.tls_certificate "-") (ne .Values.configuration.general.tls_private_key "-") }}
tls_certificate = "{{ .Values.configuration.general.tls_certificate }}"
tls_private_key = "{{ .Values.configuration.general.tls_private_key }}"
{{- end }}
admin_username = {{ .Values.configuration.general.admin_username | quote }}
admin_password = {{ .Values.configuration.general.admin_password | quote }}
{{- if and .Values.configuration.general.auth_query_user .Values.configuration.general.auth_query_password .Values.configuration.general.auth_query }}
auth_query = {{ .Values.configuration.general.auth_query | quote }}
auth_query_user = {{ .Values.configuration.general.auth_query_user | quote }}
auth_query_password = {{ .Values.configuration.general.auth_query_password | quote }}
{{- end }}
{{- range $pool := .Values.configuration.pools }}
##
## pool for {{ $pool.name }}
##
[pools.{{ $pool.name | quote }}]
pool_mode = {{ default "transaction" $pool.pool_mode | quote }}
load_balancing_mode = {{ default "random" $pool.load_balancing_mode | quote }}
default_role = {{ default "any" $pool.default_role | quote }}
prepared_statements_cache_size = {{ default 500 $pool.prepared_statements_cache_size }}
query_parser_enabled = {{ default true $pool.query_parser_enabled }}
query_parser_read_write_splitting = {{ default true $pool.query_parser_read_write_splitting }}
primary_reads_enabled = {{ default true $pool.primary_reads_enabled }}
sharding_function = {{ default "pg_bigint_hash" $pool.sharding_function | quote }}
{{- range $index, $user := $pool.users }}
## pool {{ $pool.name }} user {{ $user.username | quote }}
##
[pools.{{ $pool.name | quote }}.users.{{ $index }}]
username = {{ $user.username | quote }}
password = {{ $user.password | quote }}
pool_size = {{ $user.pool_size }}
statement_timeout = {{ $user.statement_timeout }}
min_pool_size = 3
server_lifetime = 60000
{{- if and $user.server_username $user.server_password }}
server_username = {{ $user.server_username | quote }}
server_password = {{ $user.server_password | quote }}
{{- end }}
{{- end }}
{{- range $index, $shard := $pool.shards }}
## pool {{ $pool.name }} database {{ $shard.database }}
##
[pools.{{ $pool.name | quote }}.shards.{{ $index }}]
{{- if gt (len $shard.servers) 0}}
servers = [
{{- range $server := $shard.servers }}
[ {{ $server.host | quote }}, {{ $server.port }}, {{ $server.role | quote }} ],
{{- end }}
]
{{- end }}
database = {{ $shard.database | quote }}
{{- end }}
{{- end }}

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "pgcat.fullname" . }}
labels:
{{- include "pgcat.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: pgcat
protocol: TCP
name: pgcat
selector:
{{- include "pgcat.selectorLabels" . | nindent 4 }}

View File

@@ -1,12 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "pgcat.serviceAccountName" . }}
labels:
{{- include "pgcat.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -1,369 +0,0 @@
## String to partially override aspnet-core.fullname template (will maintain the release name)
## @param nameOverride String to partially override common.names.fullname
##
nameOverride: ""
## String to fully override aspnet-core.fullname template
## @param fullnameOverride String to fully override common.names.fullname
##
fullnameOverride: ""
## Number of PgCat replicas to deploy
## @param replicaCount Number of PgCat replicas to deploy
replicaCount: 1
## Bitnami PgCat image version
## ref: https://hub.docker.com/r/bitnami/kubewatch/tags/
##
## @param image.registry PgCat image registry
## @param image.repository PgCat image name
## @param image.tag PgCat image tag
## @param image.pullPolicy PgCat image tag
## @param image.pullSecrets Specify docker-registry secret names as an array
image:
repository: ghcr.io/postgresml/pgcat
# Overrides the image tag whose default is the chart appVersion.
tag: "main"
## Specify a imagePullPolicy
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
##
pullPolicy: IfNotPresent
## Optionally specify an array of imagePullSecrets.
## Secrets must be manually created in the namespace.
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## Example:
## pullSecrets:
## - myRegistryKeySecretName
##
pullSecrets: []
## Specifies whether a ServiceAccount should be created
##
## @param serviceAccount.create Enable the creation of a ServiceAccount for PgCat pods
## @param serviceAccount.name Name of the created ServiceAccount
##
serviceAccount:
## Specifies whether a service account should be created
create: true
## Annotations to add to the service account
annotations: {}
## The name of the service account to use.
## If not set and create is true, a name is generated using the fullname template
name: ""
## Annotations for server pods.
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
##
## @param podAnnotations Annotations for PgCat pods
##
podAnnotations: {}
## PgCat containers' SecurityContext
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
##
## @param podSecurityContext.enabled Enabled PgCat pods' Security Context
## @param podSecurityContext.fsGroup Set PgCat pod's Security Context fsGroup
##
podSecurityContext: {}
# fsGroup: 2000
## PgCat pods' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
##
## @param containerSecurityContext.enabled Enabled PgCat containers' Security Context
## @param containerSecurityContext.runAsUser Set PgCat container's Security Context runAsUser
## @param containerSecurityContext.runAsNonRoot Set PgCat container's Security Context runAsNonRoot
##
containerSecurityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
## PgCat service
##
## @param service.type PgCat service type
## @param service.port PgCat service port
service:
type: ClusterIP
port: 6432
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
## PgCat resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
## @skip resources Optional description
## @disabled-param resources.limits The resources limits for the PgCat container
## @disabled-param resources.requests The requested resources for the PgCat container
##
resources:
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
limits: {}
# cpu: 100m
# memory: 128Mi
requests: {}
# cpu: 100m
# memory: 128Mi
## Node labels for pod assignment. Evaluated as a template.
## ref: https://kubernetes.io/docs/user-guide/node-selection/
##
## @param nodeSelector Node labels for pod assignment
##
nodeSelector: {}
## Tolerations for pod assignment. Evaluated as a template.
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
## @param tolerations Tolerations for pod assignment
##
tolerations: []
## Affinity for pod assignment. Evaluated as a template.
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
##
## @param affinity Affinity for pod assignment
##
affinity: {}
## PgCat configuration
## @param configuration [object]
configuration:
## General pooler settings
## @param [object]
general:
## @param configuration.general.host What IP to run on, 0.0.0.0 means accessible from everywhere.
host: "0.0.0.0"
## @param configuration.general.port Port to run on, same as PgBouncer used in this example.
port: 6432
## @param configuration.general.enable_prometheus_exporter Whether to enable prometheus exporter or not.
enable_prometheus_exporter: false
## @param configuration.general.prometheus_exporter_port Port at which prometheus exporter listens on.
prometheus_exporter_port: 9930
# @param configuration.general.connect_timeout How long to wait before aborting a server connection (ms).
connect_timeout: 5000
# How long an idle connection with a server is left open (ms).
idle_timeout: 30000 # milliseconds
# Max connection lifetime before it's closed, even if actively used.
server_lifetime: 86400000 # 24 hours
# How long a client is allowed to be idle while in a transaction (ms).
idle_client_in_transaction_timeout: 0 # milliseconds
# @param configuration.general.healthcheck_timeout How much time to give `SELECT 1` health check query to return with a result (ms).
healthcheck_timeout: 1000
# @param configuration.general.healthcheck_delay How long to keep connection available for immediate re-use, without running a healthcheck query on it
healthcheck_delay: 30000
# @param configuration.general.shutdown_timeout How much time to give clients during shutdown before forcibly killing client connections (ms).
shutdown_timeout: 60000
# @param configuration.general.ban_time For how long to ban a server if it fails a health check (seconds).
ban_time: 60 # seconds
# @param configuration.general.log_client_connections If we should log client connections
log_client_connections: false
# @param configuration.general.log_client_disconnections If we should log client disconnections
log_client_disconnections: false
# TLS
# tls_certificate: "server.cert"
# tls_private_key: "server.key"
tls_certificate: "-"
tls_private_key: "-"
# Credentials to access the virtual administrative database (pgbouncer or pgcat)
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
admin_username: "postgres"
admin_password: "postgres"
# Query to be sent to servers to obtain the hash used for md5 authentication. The connection will be
# established using the database configured in the pool. This parameter is inherited by every pool and
# can be redefined in pool configuration.
auth_query: null
# User to be used for connecting to servers to obtain the hash used for md5 authentication by sending
# the query specified in auth_query_user. The connection will be established using the database configured
# in the pool. This parameter is inherited by every pool and can be redefined in pool configuration.
#
# @param configuration.general.auth_query_user
auth_query_user: null
# Password to be used for connecting to servers to obtain the hash used for md5 authentication by sending
# the query specified in auth_query_user. The connection will be established using the database configured
# in the pool. This parameter is inherited by every pool and can be redefined in pool configuration.
#
# @param configuration.general.auth_query_password
auth_query_password: null
# Number of seconds of connection idleness to wait before sending a keepalive packet to the server.
tcp_keepalives_idle: 5
# Number of unacknowledged keepalive packets allowed before giving up and closing the connection.
tcp_keepalives_count: 5
# Number of seconds between keepalive packets.
tcp_keepalives_interval: 5
## pool
## configs are structured as pool.<pool_name>
## the pool_name is what clients use as database name when connecting
## For the example below a client can connect using "postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded"
## @param [object]
pools:
[{
name: "simple", pool_mode: "transaction",
users: [{username: "user", password: "pass", pool_size: 5, statement_timeout: 0}],
shards: [{
servers: [{host: "postgres", port: 5432, role: "primary"}],
database: "postgres"
}]
}]
# - ## default values
# ##
# ##
# ##
# name: "db"
# ## Pool mode (see PgBouncer docs for more).
# ## session: one server connection per connected client
# ## transaction: one server connection per client transaction
# ## @param configuration.poolsPostgres.pool_mode
# pool_mode: "transaction"
# ## Load balancing mode
# ## `random` selects the server at random
# ## `loc` selects the server with the least outstanding busy connections
# ##
# ## @param configuration.poolsPostgres.load_balancing_mode
# load_balancing_mode: "random"
# ## Prepared statements cache size.
# ## TODO: update documentation
# ##
# ## @param configuration.poolsPostgres.prepared_statements_cache_size
# prepared_statements_cache_size: 500
# ## If the client doesn't specify, route traffic to
# ## this role by default.
# ##
# ## any: round-robin between primary and replicas,
# ## replica: round-robin between replicas only without touching the primary,
# ## primary: all queries go to the primary unless otherwise specified.
# ## @param configuration.poolsPostgres.default_role
# default_role: "any"
# ## Query parser. If enabled, we'll attempt to parse
# ## every incoming query to determine if it's a read or a write.
# ## If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
# ## we'll direct it to the primary.
# ## @param configuration.poolsPostgres.query_parser_enabled
# query_parser_enabled: true
# ## If the query parser is enabled and this setting is enabled, we'll attempt to
# ## infer the role from the query itself.
# ## @param configuration.poolsPostgres.query_parser_read_write_splitting
# query_parser_read_write_splitting: true
# ## If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
# ## load balancing of read queries. Otherwise, the primary will only be used for write
# ## queries. The primary can always be explicitly selected with our custom protocol.
# ## @param configuration.poolsPostgres.primary_reads_enabled
# primary_reads_enabled: true
# ## So what if you wanted to implement a different hashing function,
# ## or you've already built one and you want this pooler to use it?
# ##
# ## Current options:
# ##
# ## pg_bigint_hash: PARTITION BY HASH (Postgres hashing function)
# ## sha1: A hashing function based on SHA1
# ##
# ## @param configuration.poolsPostgres.sharding_function
# sharding_function: "pg_bigint_hash"
# ## Credentials for users that may connect to this cluster
# ## @param users [array]
# ## @param users[0].username Name of the env var (required)
# ## @param users[0].password Value for the env var (required)
# ## @param users[0].pool_size Maximum number of server connections that can be established for this user
# ## @param users[0].statement_timeout Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
# users: []
# # - username: "user"
# # password: "pass"
# #
# # # The maximum number of connection from a single Pgcat process to any database in the cluster
# # # is the sum of pool_size across all users.
# # pool_size: 9
# #
# # # Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
# # statement_timeout: 0
# #
# # # PostgreSQL username used to connect to the server.
# # server_username: "postgres
# #
# # # PostgreSQL password used to connect to the server.
# # server_password: "postgres
# ## @param shards [array]
# ## @param shards[0].server[0].host Host for this shard
# ## @param shards[0].server[0].port Port for this shard
# ## @param shards[0].server[0].role Role for this shard
# shards: []
# # [ host, port, role ]
# # - servers:
# # - host: "postgres"
# # port: 5432
# # role: "primary"
# # - host: "postgres"
# # port: 5432
# # role: "replica"
# # database: "postgres"
# # # [ host, port, role ]
# # - servers:
# # - host: "postgres"
# # port: 5432
# # role: "primary"
# # - host: "postgres"
# # port: 5432
# # role: "replica"
# # database: "postgres"
# # # [ host, port, role ]
# # - servers:
# # - host: "postgres"
# # port: 5432
# # role: "primary"
# # - host: "postgres"
# # port: 5432
# # role: "replica"
# # database: "postgres"

View File

@@ -1,9 +0,0 @@
Package: pgcat
Version: ${PACKAGE_VERSION}
Section: database
Priority: optional
Architecture: ${ARCH}
Maintainer: PostgresML <team@postgresml.org>
Homepage: https://postgresml.org
Description: PgCat - NextGen PostgreSQL Pooler
PostgreSQL pooler and proxy (like PgBouncer) with support for sharding, load balancing, failover and mirroring.

View File

@@ -1 +0,0 @@
sign: false

View File

@@ -1,5 +0,0 @@
remote: origin
target-branch: main
chart-dirs:
- charts

View File

@@ -1,8 +1,6 @@
FROM rust:bullseye FROM rust:bullseye
# Dependencies # Dependencies
COPY --from=sclevine/yj /bin/yj /bin/yj
RUN /bin/yj -h
RUN apt-get update -y \ RUN apt-get update -y \
&& apt-get install -y \ && apt-get install -y \
llvm-11 psmisc postgresql-contrib postgresql-client \ llvm-11 psmisc postgresql-contrib postgresql-client \

View File

@@ -25,7 +25,7 @@ x-common-env-pg:
services: services:
main: main:
image: gcr.io/google_containers/pause:3.2 image: kubernetes/pause
ports: ports:
- 6432 - 6432
@@ -64,7 +64,7 @@ services:
<<: *common-env-pg <<: *common-env-pg
POSTGRES_INITDB_ARGS: --auth-local=md5 --auth-host=md5 --auth=md5 POSTGRES_INITDB_ARGS: --auth-local=md5 --auth-host=md5 --auth=md5
PGPORT: 10432 PGPORT: 10432
command: ["postgres", "-p", "10432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"] command: ["postgres", "-p", "5432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
toxiproxy: toxiproxy:
build: . build: .

View File

@@ -71,10 +71,6 @@ default_role = "any"
# we'll direct it to the primary. # we'll direct it to the primary.
query_parser_enabled = true query_parser_enabled = true
# If the query parser is enabled and this setting is enabled, we'll attempt to
# infer the role from the query itself.
query_parser_read_write_splitting = true
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for # If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
# load balancing of read queries. Otherwise, the primary will only be used for write # load balancing of read queries. Otherwise, the primary will only be used for write
# queries. The primary can always be explicitly selected with our custom protocol. # queries. The primary can always be explicitly selected with our custom protocol.

File diff suppressed because it is too large Load Diff

View File

@@ -1,22 +0,0 @@
# This is an example of the most basic config
# that will mimic what PgBouncer does in transaction mode with one server.
[general]
host = "0.0.0.0"
port = 6433
admin_username = "pgcat"
admin_password = "pgcat"
[pools.pgml.users.0]
username = "postgres"
password = "postgres"
pool_size = 10
min_pool_size = 1
pool_mode = "transaction"
[pools.pgml.shards.0]
servers = [
["127.0.0.1", 28815, "primary"]
]
database = "postgres"

View File

@@ -1,17 +0,0 @@
[Unit]
Description=PgCat pooler
After=network.target
StartLimitIntervalSec=0
[Service]
User=pgcat
Type=simple
Restart=always
RestartSec=1
Environment=RUST_LOG=info
LimitNOFILE=65536
ExecStart=/usr/bin/pgcat /etc/pgcat.toml
ExecReload=/bin/kill -SIGHUP $MAINPID
[Install]
WantedBy=multi-user.target

View File

@@ -77,58 +77,6 @@ admin_username = "admin_user"
# Password to access the virtual administrative database # Password to access the virtual administrative database
admin_password = "admin_pass" admin_password = "admin_pass"
# Default plugins that are configured on all pools.
[plugins]
# Prewarmer plugin that runs queries on server startup, before giving the connection
# to the client.
[plugins.prewarmer]
enabled = false
queries = [
"SELECT pg_prewarm('pgbench_accounts')",
]
# Log all queries to stdout.
[plugins.query_logger]
enabled = false
# Block access to tables that Postgres does not allow us to control.
[plugins.table_access]
enabled = false
tables = [
"pg_user",
"pg_roles",
"pg_database",
]
# Intercept user queries and give a fake reply.
[plugins.intercept]
enabled = true
[plugins.intercept.queries.0]
query = "select current_database() as a, current_schemas(false) as b"
schema = [
["a", "text"],
["b", "text"],
]
result = [
["${DATABASE}", "{public}"],
]
[plugins.intercept.queries.1]
query = "select current_database(), current_schema(), current_user"
schema = [
["current_database", "text"],
["current_schema", "text"],
["current_user", "text"],
]
result = [
["${DATABASE}", "public", "${USER}"],
]
# pool configs are structured as pool.<pool_name> # pool configs are structured as pool.<pool_name>
# the pool_name is what clients use as database name when connecting. # the pool_name is what clients use as database name when connecting.
# For a pool named `sharded_db`, clients access that pool using connection string like # For a pool named `sharded_db`, clients access that pool using connection string like
@@ -150,20 +98,12 @@ load_balancing_mode = "random"
# `primary` all queries go to the primary unless otherwise specified. # `primary` all queries go to the primary unless otherwise specified.
default_role = "any" default_role = "any"
# Prepared statements cache size.
# TODO: update documentation
prepared_statements_cache_size = 500
# If Query Parser is enabled, we'll attempt to parse # If Query Parser is enabled, we'll attempt to parse
# every incoming query to determine if it's a read or a write. # every incoming query to determine if it's a read or a write.
# If it's a read query, we'll direct it to a replica. Otherwise, if it's a write, # If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
# we'll direct it to the primary. # we'll direct it to the primary.
query_parser_enabled = true query_parser_enabled = true
# If the query parser is enabled and this setting is enabled, we'll attempt to
# infer the role from the query itself.
query_parser_read_write_splitting = true
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for # If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
# load balancing of read queries. Otherwise, the primary will only be used for write # load balancing of read queries. Otherwise, the primary will only be used for write
# queries. The primary can always be explicitly selected with our custom protocol. # queries. The primary can always be explicitly selected with our custom protocol.
@@ -175,12 +115,6 @@ primary_reads_enabled = true
# shard_id_regex = '/\* shard_id: (\d+) \*/' # shard_id_regex = '/\* shard_id: (\d+) \*/'
# regex_search_limit = 1000 # only look at the first 1000 characters of SQL statements # regex_search_limit = 1000 # only look at the first 1000 characters of SQL statements
# Defines the behavior when no shard is selected in a sharded system.
# `random`: picks a shard at random
# `random_healthy`: picks a shard at random favoring shards with the least number of recent errors
# `shard_<number>`: e.g. shard_0, shard_4, etc. picks a specific shard, everytime
# no_shard_specified_behavior = "shard_0"
# So what if you wanted to implement a different hashing function, # So what if you wanted to implement a different hashing function,
# or you've already built one and you want this pooler to use it? # or you've already built one and you want this pooler to use it?
# Current options: # Current options:
@@ -191,7 +125,7 @@ sharding_function = "pg_bigint_hash"
# Query to be sent to servers to obtain the hash used for md5 authentication. The connection will be # Query to be sent to servers to obtain the hash used for md5 authentication. The connection will be
# established using the database configured in the pool. This parameter is inherited by every pool # established using the database configured in the pool. This parameter is inherited by every pool
# and can be redefined in pool configuration. # and can be redefined in pool configuration.
# auth_query="SELECT usename, passwd FROM pg_shadow WHERE usename='$1'" # auth_query = "SELECT $1"
# User to be used for connecting to servers to obtain the hash used for md5 authentication by sending the query # User to be used for connecting to servers to obtain the hash used for md5 authentication by sending the query
# specified in `auth_query_user`. The connection will be established using the database configured in the pool. # specified in `auth_query_user`. The connection will be established using the database configured in the pool.
@@ -220,20 +154,12 @@ connect_timeout = 3000
# Specifies how often (in seconds) cached ip addresses for servers are rechecked (see `dns_cache_enabled`). # Specifies how often (in seconds) cached ip addresses for servers are rechecked (see `dns_cache_enabled`).
# dns_max_ttl = 30 # dns_max_ttl = 30
# Plugins can be configured on a pool-per-pool basis. This overrides the global plugins setting, [plugins]
# so all plugins have to be configured here again.
[pool.sharded_db.plugins]
[pools.sharded_db.plugins.prewarmer] [plugins.query_logger]
enabled = true
queries = [
"SELECT pg_prewarm('pgbench_accounts')",
]
[pools.sharded_db.plugins.query_logger]
enabled = false enabled = false
[pools.sharded_db.plugins.table_access] [plugins.table_access]
enabled = false enabled = false
tables = [ tables = [
"pg_user", "pg_user",
@@ -241,10 +167,10 @@ tables = [
"pg_database", "pg_database",
] ]
[pools.sharded_db.plugins.intercept] [plugins.intercept]
enabled = true enabled = true
[pools.sharded_db.plugins.intercept.queries.0] [plugins.intercept.queries.0]
query = "select current_database() as a, current_schemas(false) as b" query = "select current_database() as a, current_schemas(false) as b"
schema = [ schema = [
@@ -255,7 +181,7 @@ result = [
["${DATABASE}", "{public}"], ["${DATABASE}", "{public}"],
] ]
[pools.sharded_db.plugins.intercept.queries.1] [plugins.intercept.queries.1]
query = "select current_database(), current_schema(), current_user" query = "select current_database(), current_schema(), current_user"
schema = [ schema = [
@@ -278,7 +204,7 @@ username = "sharding_user"
# if `server_password` is not set. # if `server_password` is not set.
password = "sharding_user" password = "sharding_user"
pool_mode = "transaction" pool_mode = "session"
# PostgreSQL username used to connect to the server. # PostgreSQL username used to connect to the server.
# server_username = "another_user" # server_username = "another_user"
@@ -301,8 +227,6 @@ username = "other_user"
password = "other_user" password = "other_user"
pool_size = 21 pool_size = 21
statement_timeout = 15000 statement_timeout = 15000
connect_timeout = 1000
idle_timeout = 1000
# Shard configs are structured as pool.<pool_name>.shards.<shard_id> # Shard configs are structured as pool.<pool_name>.shards.<shard_id>
# Each shard config contains a list of servers that make up the shard # Each shard config contains a list of servers that make up the shard

View File

@@ -1,9 +0,0 @@
#!/bin/bash
set -e
systemctl daemon-reload
systemctl enable pgcat
if ! id pgcat 2> /dev/null; then
useradd -s /usr/bin/false pgcat
fi

4
postrm
View File

@@ -1,4 +0,0 @@
#!/bin/bash
set -e
systemctl daemon-reload

5
prerm
View File

@@ -1,5 +0,0 @@
#!/bin/bash
set -e
systemctl stop pgcat
systemctl disable pgcat

View File

@@ -1,6 +1,4 @@
use crate::pool::BanReason; use crate::pool::BanReason;
use crate::server::ServerParameters;
use crate::stats::pool::PoolStats;
use bytes::{Buf, BufMut, BytesMut}; use bytes::{Buf, BufMut, BytesMut};
use log::{error, info, trace}; use log::{error, info, trace};
use nix::sys::signal::{self, Signal}; use nix::sys::signal::{self, Signal};
@@ -16,18 +14,18 @@ use crate::errors::Error;
use crate::messages::*; use crate::messages::*;
use crate::pool::ClientServerMap; use crate::pool::ClientServerMap;
use crate::pool::{get_all_pools, get_pool}; use crate::pool::{get_all_pools, get_pool};
use crate::stats::{get_client_stats, get_server_stats, ClientState, ServerState}; use crate::stats::{get_client_stats, get_pool_stats, get_server_stats, ClientState, ServerState};
pub fn generate_server_parameters_for_admin() -> ServerParameters { pub fn generate_server_info_for_admin() -> BytesMut {
let mut server_parameters = ServerParameters::new(); let mut server_info = BytesMut::new();
server_parameters.set_param("application_name".to_string(), "".to_string(), true); server_info.put(server_parameter_message("application_name", ""));
server_parameters.set_param("client_encoding".to_string(), "UTF8".to_string(), true); server_info.put(server_parameter_message("client_encoding", "UTF8"));
server_parameters.set_param("server_encoding".to_string(), "UTF8".to_string(), true); server_info.put(server_parameter_message("server_encoding", "UTF8"));
server_parameters.set_param("server_version".to_string(), VERSION.to_string(), true); server_info.put(server_parameter_message("server_version", VERSION));
server_parameters.set_param("DateStyle".to_string(), "ISO, MDY".to_string(), true); server_info.put(server_parameter_message("DateStyle", "ISO, MDY"));
server_parameters server_info
} }
/// Handle admin client. /// Handle admin client.
@@ -55,12 +53,7 @@ where
let query_parts: Vec<&str> = query.trim_end_matches(';').split_whitespace().collect(); let query_parts: Vec<&str> = query.trim_end_matches(';').split_whitespace().collect();
match query_parts match query_parts[0].to_ascii_uppercase().as_str() {
.first()
.unwrap_or(&"")
.to_ascii_uppercase()
.as_str()
{
"BAN" => { "BAN" => {
trace!("BAN"); trace!("BAN");
ban(stream, query_parts).await ban(stream, query_parts).await
@@ -79,26 +72,17 @@ where
} }
"PAUSE" => { "PAUSE" => {
trace!("PAUSE"); trace!("PAUSE");
pause(stream, query_parts).await pause(stream, query_parts[1]).await
} }
"RESUME" => { "RESUME" => {
trace!("RESUME"); trace!("RESUME");
resume(stream, query_parts).await resume(stream, query_parts[1]).await
} }
"SHUTDOWN" => { "SHUTDOWN" => {
trace!("SHUTDOWN"); trace!("SHUTDOWN");
shutdown(stream).await shutdown(stream).await
} }
"SHOW" => match query_parts "SHOW" => match query_parts[1].to_ascii_uppercase().as_str() {
.get(1)
.unwrap_or(&"")
.to_ascii_uppercase()
.as_str()
{
"HELP" => {
trace!("SHOW HELP");
show_help(stream).await
}
"BANS" => { "BANS" => {
trace!("SHOW BANS"); trace!("SHOW BANS");
show_bans(stream).await show_bans(stream).await
@@ -270,50 +254,39 @@ async fn show_pools<T>(stream: &mut T) -> Result<(), Error>
where where
T: tokio::io::AsyncWrite + std::marker::Unpin, T: tokio::io::AsyncWrite + std::marker::Unpin,
{ {
let pool_lookup = PoolStats::construct_pool_lookup(); let all_pool_stats = get_pool_stats();
let mut res = BytesMut::new();
res.put(row_description(&PoolStats::generate_header()));
pool_lookup.iter().for_each(|(_identifier, pool_stats)| {
res.put(data_row(&pool_stats.generate_row()));
});
res.put(command_complete("SHOW"));
// ReadyForQuery let columns = vec![
res.put_u8(b'Z'); ("database", DataType::Text),
res.put_i32(5); ("user", DataType::Text),
res.put_u8(b'I'); ("pool_mode", DataType::Text),
("cl_idle", DataType::Numeric),
write_all_half(stream, &res).await ("cl_active", DataType::Numeric),
} ("cl_waiting", DataType::Numeric),
("cl_cancel_req", DataType::Numeric),
/// Show all available options. ("sv_active", DataType::Numeric),
async fn show_help<T>(stream: &mut T) -> Result<(), Error> ("sv_idle", DataType::Numeric),
where ("sv_used", DataType::Numeric),
T: tokio::io::AsyncWrite + std::marker::Unpin, ("sv_tested", DataType::Numeric),
{ ("sv_login", DataType::Numeric),
let mut res = BytesMut::new(); ("maxwait", DataType::Numeric),
("maxwait_us", DataType::Numeric),
let detail_msg = [
"",
"SHOW HELP|CONFIG|DATABASES|POOLS|CLIENTS|SERVERS|USERS|VERSION",
// "SHOW PEERS|PEER_POOLS", // missing PEERS|PEER_POOLS
// "SHOW FDS|SOCKETS|ACTIVE_SOCKETS|LISTS|MEM|STATE", // missing FDS|SOCKETS|ACTIVE_SOCKETS|MEM|STATE
"SHOW LISTS",
// "SHOW DNS_HOSTS|DNS_ZONES", // missing DNS_HOSTS|DNS_ZONES
"SHOW STATS", // missing STATS_TOTALS|STATS_AVERAGES|TOTALS
"SET key = arg",
"RELOAD",
"PAUSE [<db>, <user>]",
"RESUME [<db>, <user>]",
// "DISABLE <db>", // missing
// "ENABLE <db>", // missing
// "RECONNECT [<db>]", missing
// "KILL <db>",
// "SUSPEND",
"SHUTDOWN",
]; ];
res.put(notify("Console usage", detail_msg.join("\n\t"))); let mut res = BytesMut::new();
res.put(row_description(&columns));
for ((_user_pool, _pool), pool_stats) in all_pool_stats {
let mut row = vec![
pool_stats.database(),
pool_stats.user(),
pool_stats.pool_mode().to_string(),
];
pool_stats.populate_row(&mut row);
pool_stats.clear_maxwait();
res.put(data_row(&row));
}
res.put(command_complete("SHOW")); res.put(command_complete("SHOW"));
// ReadyForQuery // ReadyForQuery
@@ -367,7 +340,7 @@ where
database_name.to_string(), // database database_name.to_string(), // database
pool_config.user.username.to_string(), // force_user pool_config.user.username.to_string(), // force_user
pool_config.user.pool_size.to_string(), // pool_size pool_config.user.pool_size.to_string(), // pool_size
pool_config.user.min_pool_size.unwrap_or(0).to_string(), // min_pool_size "0".to_string(), // min_pool_size
"0".to_string(), // reserve_pool "0".to_string(), // reserve_pool
pool_config.pool_mode.to_string(), // pool_mode pool_config.pool_mode.to_string(), // pool_mode
pool_config.user.pool_size.to_string(), // max_connections pool_config.user.pool_size.to_string(), // max_connections
@@ -700,8 +673,6 @@ where
("query_count", DataType::Numeric), ("query_count", DataType::Numeric),
("error_count", DataType::Numeric), ("error_count", DataType::Numeric),
("age_seconds", DataType::Numeric), ("age_seconds", DataType::Numeric),
("maxwait", DataType::Numeric),
("maxwait_us", DataType::Numeric),
]; ];
let new_map = get_client_stats(); let new_map = get_client_stats();
@@ -709,7 +680,6 @@ where
res.put(row_description(&columns)); res.put(row_description(&columns));
for (_, client) in new_map { for (_, client) in new_map {
let max_wait = client.max_wait_time.load(Ordering::Relaxed);
let row = vec![ let row = vec![
format!("{:#010X}", client.client_id()), format!("{:#010X}", client.client_id()),
client.pool_name(), client.pool_name(),
@@ -723,8 +693,6 @@ where
.duration_since(client.connect_time()) .duration_since(client.connect_time())
.as_secs() .as_secs()
.to_string(), .to_string(),
(max_wait / 1_000_000).to_string(),
(max_wait % 1_000_000).to_string(),
]; ];
res.put(data_row(&row)); res.put(data_row(&row));
@@ -757,10 +725,6 @@ where
("bytes_sent", DataType::Numeric), ("bytes_sent", DataType::Numeric),
("bytes_received", DataType::Numeric), ("bytes_received", DataType::Numeric),
("age_seconds", DataType::Numeric), ("age_seconds", DataType::Numeric),
("prepare_cache_hit", DataType::Numeric),
("prepare_cache_miss", DataType::Numeric),
("prepare_cache_eviction", DataType::Numeric),
("prepare_cache_size", DataType::Numeric),
]; ];
let new_map = get_server_stats(); let new_map = get_server_stats();
@@ -784,22 +748,6 @@ where
.duration_since(server.connect_time()) .duration_since(server.connect_time())
.as_secs() .as_secs()
.to_string(), .to_string(),
server
.prepared_hit_count
.load(Ordering::Relaxed)
.to_string(),
server
.prepared_miss_count
.load(Ordering::Relaxed)
.to_string(),
server
.prepared_eviction_count
.load(Ordering::Relaxed)
.to_string(),
server
.prepared_cache_size
.load(Ordering::Relaxed)
.to_string(),
]; ];
res.put(data_row(&row)); res.put(data_row(&row));
@@ -816,33 +764,19 @@ where
} }
/// Pause a pool. It won't pass any more queries to the backends. /// Pause a pool. It won't pass any more queries to the backends.
async fn pause<T>(stream: &mut T, tokens: Vec<&str>) -> Result<(), Error> async fn pause<T>(stream: &mut T, query: &str) -> Result<(), Error>
where where
T: tokio::io::AsyncWrite + std::marker::Unpin, T: tokio::io::AsyncWrite + std::marker::Unpin,
{ {
let parts: Vec<&str> = match tokens.len() == 2 { let parts: Vec<&str> = query.split(",").map(|part| part.trim()).collect();
true => tokens[1].split(',').map(|part| part.trim()).collect(),
false => Vec::new(),
};
match parts.len() { if parts.len() != 2 {
0 => { error_response(
for (_, pool) in get_all_pools() { stream,
pool.pause(); "PAUSE requires a database and a user, e.g. PAUSE my_db,my_user",
} )
.await
let mut res = BytesMut::new(); } else {
res.put(command_complete("PAUSE"));
// ReadyForQuery
res.put_u8(b'Z');
res.put_i32(5);
res.put_u8(b'I');
write_all_half(stream, &res).await
}
2 => {
let database = parts[0]; let database = parts[0];
let user = parts[1]; let user = parts[1];
@@ -874,38 +808,22 @@ where
} }
} }
} }
_ => error_response(stream, "usage: PAUSE [db, user]").await,
}
} }
/// Resume a pool. Queries are allowed again. /// Resume a pool. Queries are allowed again.
async fn resume<T>(stream: &mut T, tokens: Vec<&str>) -> Result<(), Error> async fn resume<T>(stream: &mut T, query: &str) -> Result<(), Error>
where where
T: tokio::io::AsyncWrite + std::marker::Unpin, T: tokio::io::AsyncWrite + std::marker::Unpin,
{ {
let parts: Vec<&str> = match tokens.len() == 2 { let parts: Vec<&str> = query.split(",").map(|part| part.trim()).collect();
true => tokens[1].split(',').map(|part| part.trim()).collect(),
false => Vec::new(),
};
match parts.len() { if parts.len() != 2 {
0 => { error_response(
for (_, pool) in get_all_pools() { stream,
pool.resume(); "RESUME requires a database and a user, e.g. RESUME my_db,my_user",
} )
.await
let mut res = BytesMut::new(); } else {
res.put(command_complete("RESUME"));
// ReadyForQuery
res.put_u8(b'Z');
res.put_i32(5);
res.put_u8(b'I');
write_all_half(stream, &res).await
}
2 => {
let database = parts[0]; let database = parts[0];
let user = parts[1]; let user = parts[1];
@@ -937,8 +855,6 @@ where
} }
} }
} }
_ => error_response(stream, "usage: RESUME [db, user]").await,
}
} }
/// Send response packets for shutdown. /// Send response packets for shutdown.

View File

@@ -79,8 +79,6 @@ impl AuthPassthrough {
pool_mode: None, pool_mode: None,
server_lifetime: None, server_lifetime: None,
min_pool_size: None, min_pool_size: None,
connect_timeout: None,
idle_timeout: None,
}; };
let user = &address.username; let user = &address.username;

File diff suppressed because it is too large Load Diff

View File

@@ -1,36 +0,0 @@
use clap::{Parser, ValueEnum};
use tracing::Level;
/// PgCat: Nextgen PostgreSQL Pooler
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
pub struct Args {
#[arg(default_value_t = String::from("pgcat.toml"), env)]
pub config_file: String,
#[arg(short, long, default_value_t = tracing::Level::INFO, env)]
pub log_level: Level,
#[clap(short='F', long, value_enum, default_value_t=LogFormat::Text, env)]
pub log_format: LogFormat,
#[arg(
short,
long,
default_value_t = false,
env,
help = "disable colors in the log output"
)]
pub no_color: bool,
}
pub fn parse() -> Args {
Args::parse()
}
#[derive(ValueEnum, Clone, Debug)]
pub enum LogFormat {
Text,
Structured,
Debug,
}

View File

@@ -3,14 +3,11 @@ use arc_swap::ArcSwap;
use log::{error, info}; use log::{error, info};
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use regex::Regex; use regex::Regex;
use serde::{Deserializer, Serializer};
use serde_derive::{Deserialize, Serialize}; use serde_derive::{Deserialize, Serialize};
use std::collections::hash_map::DefaultHasher; use std::collections::hash_map::DefaultHasher;
use std::collections::{BTreeMap, HashMap, HashSet}; use std::collections::{BTreeMap, HashMap, HashSet};
use std::hash::{Hash, Hasher}; use std::hash::{Hash, Hasher};
use std::path::Path; use std::path::Path;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc; use std::sync::Arc;
use tokio::fs::File; use tokio::fs::File;
use tokio::io::AsyncReadExt; use tokio::io::AsyncReadExt;
@@ -38,12 +35,12 @@ pub enum Role {
Mirror, Mirror,
} }
impl std::fmt::Display for Role { impl ToString for Role {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn to_string(&self) -> String {
match self { match *self {
Role::Primary => write!(f, "primary"), Role::Primary => "primary".to_string(),
Role::Replica => write!(f, "replica"), Role::Replica => "replica".to_string(),
Role::Mirror => write!(f, "mirror"), Role::Mirror => "mirror".to_string(),
} }
} }
} }
@@ -104,9 +101,6 @@ pub struct Address {
/// Address stats /// Address stats
pub stats: Arc<AddressStats>, pub stats: Arc<AddressStats>,
/// Number of errors encountered since last successful checkout
pub error_count: Arc<AtomicU64>,
} }
impl Default for Address { impl Default for Address {
@@ -116,29 +110,18 @@ impl Default for Address {
host: String::from("127.0.0.1"), host: String::from("127.0.0.1"),
port: 5432, port: 5432,
shard: 0, shard: 0,
address_index: 0,
replica_number: 0,
database: String::from("database"), database: String::from("database"),
role: Role::Replica, role: Role::Replica,
replica_number: 0,
address_index: 0,
username: String::from("username"), username: String::from("username"),
pool_name: String::from("pool_name"), pool_name: String::from("pool_name"),
mirrors: Vec::new(), mirrors: Vec::new(),
stats: Arc::new(AddressStats::default()), stats: Arc::new(AddressStats::default()),
error_count: Arc::new(AtomicU64::new(0)),
} }
} }
} }
impl std::fmt::Display for Address {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
f,
"[address: {}:{}][database: {}][user: {}]",
self.host, self.port, self.database, self.username
)
}
}
// We need to implement PartialEq by ourselves so we skip stats in the comparison // We need to implement PartialEq by ourselves so we skip stats in the comparison
impl PartialEq for Address { impl PartialEq for Address {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
@@ -189,18 +172,6 @@ impl Address {
), ),
} }
} }
pub fn error_count(&self) -> u64 {
self.error_count.load(Ordering::Relaxed)
}
pub fn increment_error_count(&self) {
self.error_count.fetch_add(1, Ordering::Relaxed);
}
pub fn reset_error_count(&self) {
self.error_count.store(0, Ordering::Relaxed);
}
} }
/// PostgreSQL user. /// PostgreSQL user.
@@ -216,8 +187,6 @@ pub struct User {
pub server_lifetime: Option<u64>, pub server_lifetime: Option<u64>,
#[serde(default)] // 0 #[serde(default)] // 0
pub statement_timeout: u64, pub statement_timeout: u64,
pub connect_timeout: Option<u64>,
pub idle_timeout: Option<u64>,
} }
impl Default for User { impl Default for User {
@@ -232,15 +201,14 @@ impl Default for User {
statement_timeout: 0, statement_timeout: 0,
pool_mode: None, pool_mode: None,
server_lifetime: None, server_lifetime: None,
connect_timeout: None,
idle_timeout: None,
} }
} }
} }
impl User { impl User {
fn validate(&self) -> Result<(), Error> { fn validate(&self) -> Result<(), Error> {
if let Some(min_pool_size) = self.min_pool_size { match self.min_pool_size {
Some(min_pool_size) => {
if min_pool_size > self.pool_size { if min_pool_size > self.pool_size {
error!( error!(
"min_pool_size of {} cannot be larger than pool_size of {}", "min_pool_size of {} cannot be larger than pool_size of {}",
@@ -248,6 +216,9 @@ impl User {
); );
return Err(Error::BadConfig); return Err(Error::BadConfig);
} }
}
None => (),
}; };
Ok(()) Ok(())
@@ -264,8 +235,6 @@ pub struct General {
pub port: u16, pub port: u16,
pub enable_prometheus_exporter: Option<bool>, pub enable_prometheus_exporter: Option<bool>,
#[serde(default = "General::default_prometheus_exporter_port")]
pub prometheus_exporter_port: i16, pub prometheus_exporter_port: i16,
#[serde(default = "General::default_connect_timeout")] #[serde(default = "General::default_connect_timeout")]
@@ -280,8 +249,6 @@ pub struct General {
pub tcp_keepalives_count: u32, pub tcp_keepalives_count: u32,
#[serde(default = "General::default_tcp_keepalives_interval")] #[serde(default = "General::default_tcp_keepalives_interval")]
pub tcp_keepalives_interval: u64, pub tcp_keepalives_interval: u64,
#[serde(default = "General::default_tcp_user_timeout")]
pub tcp_user_timeout: u64,
#[serde(default)] // False #[serde(default)] // False
pub log_client_connections: bool, pub log_client_connections: bool,
@@ -313,9 +280,6 @@ pub struct General {
#[serde(default = "General::default_server_lifetime")] #[serde(default = "General::default_server_lifetime")]
pub server_lifetime: u64, pub server_lifetime: u64,
#[serde(default = "General::default_server_round_robin")] // False
pub server_round_robin: bool,
#[serde(default = "General::default_worker_threads")] #[serde(default = "General::default_worker_threads")]
pub worker_threads: usize, pub worker_threads: usize,
@@ -334,9 +298,6 @@ pub struct General {
pub admin_username: String, pub admin_username: String,
pub admin_password: String, pub admin_password: String,
#[serde(default = "General::default_validate_config")]
pub validate_config: bool,
// Support for auth query // Support for auth query
pub auth_query: Option<String>, pub auth_query: Option<String>,
pub auth_query_user: Option<String>, pub auth_query_user: Option<String>,
@@ -353,7 +314,7 @@ impl General {
} }
pub fn default_server_lifetime() -> u64 { pub fn default_server_lifetime() -> u64 {
1000 * 60 * 60 // 1 hour 1000 * 60 * 60 * 24 // 24 hours
} }
pub fn default_connect_timeout() -> u64 { pub fn default_connect_timeout() -> u64 {
@@ -375,12 +336,8 @@ impl General {
5 // 5 seconds 5 // 5 seconds
} }
pub fn default_tcp_user_timeout() -> u64 {
10000 // 10000 milliseconds
}
pub fn default_idle_timeout() -> u64 { pub fn default_idle_timeout() -> u64 {
600000 // 10 minutes 60000 // 10 minutes
} }
pub fn default_shutdown_timeout() -> u64 { pub fn default_shutdown_timeout() -> u64 {
@@ -410,18 +367,6 @@ impl General {
pub fn default_idle_client_in_transaction_timeout() -> u64 { pub fn default_idle_client_in_transaction_timeout() -> u64 {
0 0
} }
pub fn default_validate_config() -> bool {
true
}
pub fn default_prometheus_exporter_port() -> i16 {
9930
}
pub fn default_server_round_robin() -> bool {
true
}
} }
impl Default for General { impl Default for General {
@@ -433,33 +378,30 @@ impl Default for General {
prometheus_exporter_port: 9930, prometheus_exporter_port: 9930,
connect_timeout: General::default_connect_timeout(), connect_timeout: General::default_connect_timeout(),
idle_timeout: General::default_idle_timeout(), idle_timeout: General::default_idle_timeout(),
tcp_keepalives_idle: Self::default_tcp_keepalives_idle(),
tcp_keepalives_count: Self::default_tcp_keepalives_count(),
tcp_keepalives_interval: Self::default_tcp_keepalives_interval(),
tcp_user_timeout: Self::default_tcp_user_timeout(),
log_client_connections: false,
log_client_disconnections: false,
dns_cache_enabled: false,
dns_max_ttl: Self::default_dns_max_ttl(),
shutdown_timeout: Self::default_shutdown_timeout(), shutdown_timeout: Self::default_shutdown_timeout(),
healthcheck_timeout: Self::default_healthcheck_timeout(), healthcheck_timeout: Self::default_healthcheck_timeout(),
healthcheck_delay: Self::default_healthcheck_delay(), healthcheck_delay: Self::default_healthcheck_delay(),
ban_time: Self::default_ban_time(), ban_time: Self::default_ban_time(),
idle_client_in_transaction_timeout: Self::default_idle_client_in_transaction_timeout(),
server_lifetime: Self::default_server_lifetime(),
server_round_robin: Self::default_server_round_robin(),
worker_threads: Self::default_worker_threads(), worker_threads: Self::default_worker_threads(),
idle_client_in_transaction_timeout: Self::default_idle_client_in_transaction_timeout(),
tcp_keepalives_idle: Self::default_tcp_keepalives_idle(),
tcp_keepalives_count: Self::default_tcp_keepalives_count(),
tcp_keepalives_interval: Self::default_tcp_keepalives_interval(),
log_client_connections: false,
log_client_disconnections: false,
autoreload: None, autoreload: None,
dns_cache_enabled: false,
dns_max_ttl: Self::default_dns_max_ttl(),
tls_certificate: None, tls_certificate: None,
tls_private_key: None, tls_private_key: None,
server_tls: false, server_tls: false,
verify_server_certificate: false, verify_server_certificate: false,
admin_username: String::from("admin"), admin_username: String::from("admin"),
admin_password: String::from("admin"), admin_password: String::from("admin"),
validate_config: true,
auth_query: None, auth_query: None,
auth_query_user: None, auth_query_user: None,
auth_query_password: None, auth_query_password: None,
server_lifetime: 1000 * 3600 * 24, // 24 hours,
} }
} }
} }
@@ -476,11 +418,11 @@ pub enum PoolMode {
Session, Session,
} }
impl std::fmt::Display for PoolMode { impl ToString for PoolMode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn to_string(&self) -> String {
match self { match *self {
PoolMode::Transaction => write!(f, "transaction"), PoolMode::Transaction => "transaction".to_string(),
PoolMode::Session => write!(f, "session"), PoolMode::Session => "session".to_string(),
} }
} }
} }
@@ -493,13 +435,12 @@ pub enum LoadBalancingMode {
#[serde(alias = "loc", alias = "LOC", alias = "least_outstanding_connections")] #[serde(alias = "loc", alias = "LOC", alias = "least_outstanding_connections")]
LeastOutstandingConnections, LeastOutstandingConnections,
} }
impl ToString for LoadBalancingMode {
impl std::fmt::Display for LoadBalancingMode { fn to_string(&self) -> String {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match *self {
match self { LoadBalancingMode::Random => "random".to_string(),
LoadBalancingMode::Random => write!(f, "random"),
LoadBalancingMode::LeastOutstandingConnections => { LoadBalancingMode::LeastOutstandingConnections => {
write!(f, "least_outstanding_connections") "least_outstanding_connections".to_string()
} }
} }
} }
@@ -513,32 +454,20 @@ pub struct Pool {
#[serde(default = "Pool::default_load_balancing_mode")] #[serde(default = "Pool::default_load_balancing_mode")]
pub load_balancing_mode: LoadBalancingMode, pub load_balancing_mode: LoadBalancingMode,
#[serde(default = "Pool::default_default_role")]
pub default_role: String, pub default_role: String,
#[serde(default)] // False #[serde(default)] // False
pub query_parser_enabled: bool, pub query_parser_enabled: bool,
pub query_parser_max_length: Option<usize>,
#[serde(default)] // False
pub query_parser_read_write_splitting: bool,
#[serde(default)] // False #[serde(default)] // False
pub primary_reads_enabled: bool, pub primary_reads_enabled: bool,
/// Maximum time to allow for establishing a new server connection.
pub connect_timeout: Option<u64>, pub connect_timeout: Option<u64>,
/// Close idle connections that have been opened for longer than this.
pub idle_timeout: Option<u64>, pub idle_timeout: Option<u64>,
/// Close server connections that have been opened for longer than this.
/// Only applied to idle connections. If the connection is actively used for
/// longer than this period, the pool will not interrupt it.
pub server_lifetime: Option<u64>, pub server_lifetime: Option<u64>,
#[serde(default = "Pool::default_sharding_function")]
pub sharding_function: ShardingFunction, pub sharding_function: ShardingFunction,
#[serde(default = "Pool::default_automatic_sharding_key")] #[serde(default = "Pool::default_automatic_sharding_key")]
@@ -548,23 +477,10 @@ pub struct Pool {
pub shard_id_regex: Option<String>, pub shard_id_regex: Option<String>,
pub regex_search_limit: Option<usize>, pub regex_search_limit: Option<usize>,
#[serde(default = "Pool::default_default_shard")]
pub default_shard: DefaultShard,
pub auth_query: Option<String>, pub auth_query: Option<String>,
pub auth_query_user: Option<String>, pub auth_query_user: Option<String>,
pub auth_query_password: Option<String>, pub auth_query_password: Option<String>,
#[serde(default = "Pool::default_cleanup_server_connections")]
pub cleanup_server_connections: bool,
#[serde(default)] // False
pub log_client_parameter_status_changes: bool,
#[serde(default = "Pool::default_prepared_statements_cache_size")]
pub prepared_statements_cache_size: usize,
pub plugins: Option<Plugins>,
pub shards: BTreeMap<String, Shard>, pub shards: BTreeMap<String, Shard>,
pub users: BTreeMap<String, User>, pub users: BTreeMap<String, User>,
// Note, don't put simple fields below these configs. There's a compatibility issue with TOML that makes it // Note, don't put simple fields below these configs. There's a compatibility issue with TOML that makes it
@@ -589,10 +505,6 @@ impl Pool {
PoolMode::Transaction PoolMode::Transaction
} }
pub fn default_default_shard() -> DefaultShard {
DefaultShard::default()
}
pub fn default_load_balancing_mode() -> LoadBalancingMode { pub fn default_load_balancing_mode() -> LoadBalancingMode {
LoadBalancingMode::Random LoadBalancingMode::Random
} }
@@ -601,22 +513,6 @@ impl Pool {
None None
} }
pub fn default_default_role() -> String {
"any".into()
}
pub fn default_sharding_function() -> ShardingFunction {
ShardingFunction::PgBigintHash
}
pub fn default_cleanup_server_connections() -> bool {
true
}
pub fn default_prepared_statements_cache_size() -> usize {
0
}
pub fn validate(&mut self) -> Result<(), Error> { pub fn validate(&mut self) -> Result<(), Error> {
match self.default_role.as_ref() { match self.default_role.as_ref() {
"any" => (), "any" => (),
@@ -657,25 +553,13 @@ impl Pool {
} }
} }
if self.query_parser_read_write_splitting && !self.query_parser_enabled {
error!(
"query_parser_read_write_splitting is only valid when query_parser_enabled is true"
);
return Err(Error::BadConfig);
}
if self.plugins.is_some() && !self.query_parser_enabled {
error!("plugins are only valid when query_parser_enabled is true");
return Err(Error::BadConfig);
}
self.automatic_sharding_key = match &self.automatic_sharding_key { self.automatic_sharding_key = match &self.automatic_sharding_key {
Some(key) => { Some(key) => {
// No quotes in the key so we don't have to compare quoted // No quotes in the key so we don't have to compare quoted
// to unquoted idents. // to unquoted idents.
let key = key.replace('\"', ""); let key = key.replace("\"", "");
if key.split('.').count() != 2 { if key.split(".").count() != 2 {
error!( error!(
"automatic_sharding_key '{}' must be fully qualified, e.g. t.{}`", "automatic_sharding_key '{}' must be fully qualified, e.g. t.{}`",
key, key key, key
@@ -688,14 +572,7 @@ impl Pool {
None => None, None => None,
}; };
if let DefaultShard::Shard(shard_number) = self.default_shard { for (_, user) in &self.users {
if shard_number >= self.shards.len() {
error!("Invalid shard {:?}", shard_number);
return Err(Error::BadConfig);
}
}
for user in self.users.values() {
user.validate()?; user.validate()?;
} }
@@ -708,29 +585,22 @@ impl Default for Pool {
Pool { Pool {
pool_mode: Self::default_pool_mode(), pool_mode: Self::default_pool_mode(),
load_balancing_mode: Self::default_load_balancing_mode(), load_balancing_mode: Self::default_load_balancing_mode(),
shards: BTreeMap::from([(String::from("1"), Shard::default())]),
users: BTreeMap::default(),
default_role: String::from("any"), default_role: String::from("any"),
query_parser_enabled: false, query_parser_enabled: false,
query_parser_max_length: None,
query_parser_read_write_splitting: false,
primary_reads_enabled: false, primary_reads_enabled: false,
connect_timeout: None,
idle_timeout: None,
server_lifetime: None,
sharding_function: ShardingFunction::PgBigintHash, sharding_function: ShardingFunction::PgBigintHash,
automatic_sharding_key: None, automatic_sharding_key: None,
connect_timeout: None,
idle_timeout: None,
sharding_key_regex: None, sharding_key_regex: None,
shard_id_regex: None, shard_id_regex: None,
regex_search_limit: Some(1000), regex_search_limit: Some(1000),
default_shard: Self::default_default_shard(),
auth_query: None, auth_query: None,
auth_query_user: None, auth_query_user: None,
auth_query_password: None, auth_query_password: None,
cleanup_server_connections: true, server_lifetime: None,
log_client_parameter_status_changes: false,
prepared_statements_cache_size: Self::default_prepared_statements_cache_size(),
plugins: None,
shards: BTreeMap::from([(String::from("1"), Shard::default())]),
users: BTreeMap::default(),
} }
} }
} }
@@ -742,50 +612,6 @@ pub struct ServerConfig {
pub role: Role, pub role: Role,
} }
// No Shard Specified handling.
#[derive(Debug, PartialEq, Clone, Eq, Hash, Copy)]
pub enum DefaultShard {
Shard(usize),
Random,
RandomHealthy,
}
impl Default for DefaultShard {
fn default() -> Self {
DefaultShard::Shard(0)
}
}
impl serde::Serialize for DefaultShard {
fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
match self {
DefaultShard::Shard(shard) => {
serializer.serialize_str(&format!("shard_{}", &shard.to_string()))
}
DefaultShard::Random => serializer.serialize_str("random"),
DefaultShard::RandomHealthy => serializer.serialize_str("random_healthy"),
}
}
}
impl<'de> serde::Deserialize<'de> for DefaultShard {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
if let Some(s) = s.strip_prefix("shard_") {
let shard = s.parse::<usize>().map_err(serde::de::Error::custom)?;
return Ok(DefaultShard::Shard(shard));
}
match s.as_str() {
"random" => Ok(DefaultShard::Random),
"random_healthy" => Ok(DefaultShard::RandomHealthy),
_ => Err(serde::de::Error::custom(
"invalid value for no_shard_specified_behavior",
)),
}
}
}
#[derive(Clone, PartialEq, Serialize, Deserialize, Debug, Hash, Eq)] #[derive(Clone, PartialEq, Serialize, Deserialize, Debug, Hash, Eq)]
pub struct MirrorServerConfig { pub struct MirrorServerConfig {
pub host: String, pub host: String,
@@ -842,106 +668,50 @@ impl Shard {
impl Default for Shard { impl Default for Shard {
fn default() -> Shard { fn default() -> Shard {
Shard { Shard {
database: String::from("postgres"),
mirrors: None,
servers: vec![ServerConfig { servers: vec![ServerConfig {
host: String::from("localhost"), host: String::from("localhost"),
port: 5432, port: 5432,
role: Role::Primary, role: Role::Primary,
}], }],
mirrors: None,
database: String::from("postgres"),
} }
} }
} }
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, Hash, Eq)] #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
pub struct Plugins { pub struct Plugins {
pub intercept: Option<Intercept>, pub intercept: Option<Intercept>,
pub table_access: Option<TableAccess>, pub table_access: Option<TableAccess>,
pub query_logger: Option<QueryLogger>, pub query_logger: Option<QueryLogger>,
pub prewarmer: Option<Prewarmer>,
} }
pub trait Plugin { #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
fn is_enabled(&self) -> bool;
}
impl std::fmt::Display for Plugins {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn is_enabled<T: Plugin>(arg: Option<&T>) -> bool {
if let Some(arg) = arg {
arg.is_enabled()
} else {
false
}
}
write!(
f,
"interceptor: {}, table_access: {}, query_logger: {}, prewarmer: {}",
is_enabled(self.intercept.as_ref()),
is_enabled(self.table_access.as_ref()),
is_enabled(self.query_logger.as_ref()),
is_enabled(self.prewarmer.as_ref()),
)
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, Hash, Eq)]
pub struct Intercept { pub struct Intercept {
pub enabled: bool, pub enabled: bool,
pub queries: BTreeMap<String, Query>, pub queries: BTreeMap<String, Query>,
} }
impl Plugin for Intercept { #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
fn is_enabled(&self) -> bool {
self.enabled
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, Hash, Eq)]
pub struct TableAccess { pub struct TableAccess {
pub enabled: bool, pub enabled: bool,
pub tables: Vec<String>, pub tables: Vec<String>,
} }
impl Plugin for TableAccess { #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
fn is_enabled(&self) -> bool {
self.enabled
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, Hash, Eq)]
pub struct QueryLogger { pub struct QueryLogger {
pub enabled: bool, pub enabled: bool,
} }
impl Plugin for QueryLogger {
fn is_enabled(&self) -> bool {
self.enabled
}
}
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, Hash, Eq)]
pub struct Prewarmer {
pub enabled: bool,
pub queries: Vec<String>,
}
impl Plugin for Prewarmer {
fn is_enabled(&self) -> bool {
self.enabled
}
}
impl Intercept { impl Intercept {
pub fn substitute(&mut self, db: &str, user: &str) { pub fn substitute(&mut self, db: &str, user: &str) {
for (_, query) in self.queries.iter_mut() { for (_, query) in self.queries.iter_mut() {
query.substitute(db, user); query.substitute(db, user);
query.query = query.query.to_ascii_lowercase();
} }
} }
} }
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, Hash, Eq)] #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)]
pub struct Query { pub struct Query {
pub query: String, pub query: String,
pub schema: Vec<Vec<String>>, pub schema: Vec<Vec<String>>,
@@ -949,7 +719,6 @@ pub struct Query {
} }
impl Query { impl Query {
#[allow(clippy::needless_range_loop)]
pub fn substitute(&mut self, db: &str, user: &str) { pub fn substitute(&mut self, db: &str, user: &str) {
for col in self.result.iter_mut() { for col in self.result.iter_mut() {
for i in 0..col.len() { for i in 0..col.len() {
@@ -976,13 +745,8 @@ pub struct Config {
#[serde(default = "Config::default_path")] #[serde(default = "Config::default_path")]
pub path: String, pub path: String,
// General and global settings.
pub general: General, pub general: General,
// Plugins that should run in all pools.
pub plugins: Option<Plugins>, pub plugins: Option<Plugins>,
// Connection pools.
pub pools: HashMap<String, Pool>, pub pools: HashMap<String, Pool>,
} }
@@ -1000,17 +764,15 @@ impl Config {
pub fn fill_up_auth_query_config(&mut self) { pub fn fill_up_auth_query_config(&mut self) {
for (_name, pool) in self.pools.iter_mut() { for (_name, pool) in self.pools.iter_mut() {
if pool.auth_query.is_none() { if pool.auth_query.is_none() {
pool.auth_query.clone_from(&self.general.auth_query); pool.auth_query = self.general.auth_query.clone();
} }
if pool.auth_query_user.is_none() { if pool.auth_query_user.is_none() {
pool.auth_query_user pool.auth_query_user = self.general.auth_query_user.clone();
.clone_from(&self.general.auth_query_user);
} }
if pool.auth_query_password.is_none() { if pool.auth_query_password.is_none() {
pool.auth_query_password pool.auth_query_password = self.general.auth_query_password.clone();
.clone_from(&self.general.auth_query_password);
} }
} }
} }
@@ -1021,8 +783,8 @@ impl Default for Config {
Config { Config {
path: Self::default_path(), path: Self::default_path(),
general: General::default(), general: General::default(),
plugins: None,
pools: HashMap::default(), pools: HashMap::default(),
plugins: None,
} }
} }
} }
@@ -1050,17 +812,6 @@ impl From<&Config> for std::collections::HashMap<String, String> {
format!("pools.{}.query_parser_enabled", pool_name), format!("pools.{}.query_parser_enabled", pool_name),
pool.query_parser_enabled.to_string(), pool.query_parser_enabled.to_string(),
), ),
(
format!("pools.{}.query_parser_max_length", pool_name),
match pool.query_parser_max_length {
Some(max_length) => max_length.to_string(),
None => String::from("unlimited"),
},
),
(
format!("pools.{}.query_parser_read_write_splitting", pool_name),
pool.query_parser_read_write_splitting.to_string(),
),
( (
format!("pools.{}.default_role", pool_name), format!("pools.{}.default_role", pool_name),
pool.default_role.clone(), pool.default_role.clone(),
@@ -1076,8 +827,8 @@ impl From<&Config> for std::collections::HashMap<String, String> {
( (
format!("pools.{:?}.users", pool_name), format!("pools.{:?}.users", pool_name),
pool.users pool.users
.values() .iter()
.map(|user| &user.username) .map(|(_username, user)| &user.username)
.cloned() .cloned()
.collect::<Vec<String>>() .collect::<Vec<String>>()
.join(", "), .join(", "),
@@ -1131,7 +882,6 @@ impl From<&Config> for std::collections::HashMap<String, String> {
impl Config { impl Config {
/// Print current configuration. /// Print current configuration.
pub fn show(&self) { pub fn show(&self) {
info!("Config path: {}", self.path);
info!("Ban time: {}s", self.general.ban_time); info!("Ban time: {}s", self.general.ban_time);
info!( info!(
"Idle client in transaction timeout: {}ms", "Idle client in transaction timeout: {}ms",
@@ -1158,15 +908,18 @@ impl Config {
"Default max server lifetime: {}ms", "Default max server lifetime: {}ms",
self.general.server_lifetime self.general.server_lifetime
); );
info!("Server round robin: {}", self.general.server_round_robin);
match self.general.tls_certificate.clone() { match self.general.tls_certificate.clone() {
Some(tls_certificate) => { Some(tls_certificate) => {
info!("TLS certificate: {}", tls_certificate); info!("TLS certificate: {}", tls_certificate);
if let Some(tls_private_key) = self.general.tls_private_key.clone() { match self.general.tls_private_key.clone() {
Some(tls_private_key) => {
info!("TLS private key: {}", tls_private_key); info!("TLS private key: {}", tls_private_key);
info!("TLS support is enabled"); info!("TLS support is enabled");
} }
None => (),
}
} }
None => { None => {
@@ -1178,13 +931,6 @@ impl Config {
"Server TLS certificate verification: {}", "Server TLS certificate verification: {}",
self.general.verify_server_certificate self.general.verify_server_certificate
); );
info!(
"Plugins: {}",
match self.plugins {
Some(ref plugins) => plugins.to_string(),
None => "not configured".into(),
}
);
for (pool_name, pool_config) in &self.pools { for (pool_name, pool_config) in &self.pools {
// TODO: Make this output prettier (maybe a table?) // TODO: Make this output prettier (maybe a table?)
@@ -1193,8 +939,8 @@ impl Config {
pool_name, pool_name,
pool_config pool_config
.users .users
.values() .iter()
.map(|user_cfg| user_cfg.pool_size) .map(|(_, user_cfg)| user_cfg.pool_size)
.sum::<u32>() .sum::<u32>()
.to_string() .to_string()
); );
@@ -1233,15 +979,6 @@ impl Config {
"[pool: {}] Query router: {}", "[pool: {}] Query router: {}",
pool_name, pool_config.query_parser_enabled pool_name, pool_config.query_parser_enabled
); );
info!(
"[pool: {}] Query parser max length: {:?}",
pool_name, pool_config.query_parser_max_length
);
info!(
"[pool: {}] Infer role from query: {}",
pool_name, pool_config.query_parser_read_write_splitting
);
info!( info!(
"[pool: {}] Number of shards: {}", "[pool: {}] Number of shards: {}",
pool_name, pool_name,
@@ -1260,26 +997,6 @@ impl Config {
None => "default".to_string(), None => "default".to_string(),
} }
); );
info!(
"[pool: {}] Cleanup server connections: {}",
pool_name, pool_config.cleanup_server_connections
);
info!(
"[pool: {}] Log client parameter status changes: {}",
pool_name, pool_config.log_client_parameter_status_changes
);
info!(
"[pool: {}] Prepared statements server cache size: {}",
pool_name, pool_config.prepared_statements_cache_size
);
info!(
"[pool: {}] Plugins: {}",
pool_name,
match pool_config.plugins {
Some(ref plugins) => plugins.to_string(),
None => "not configured".into(),
}
);
for user in &pool_config.users { for user in &pool_config.users {
info!( info!(
@@ -1314,24 +1031,6 @@ impl Config {
None => "default".to_string(), None => "default".to_string(),
} }
); );
info!(
"[pool: {}][user: {}] Connection timeout: {}",
pool_name,
user.1.username,
match user.1.connect_timeout {
Some(connect_timeout) => format!("{}ms", connect_timeout),
None => "not set".to_string(),
}
);
info!(
"[pool: {}][user: {}] Idle timeout: {}",
pool_name,
user.1.username,
match user.1.idle_timeout {
Some(idle_timeout) => format!("{}ms", idle_timeout),
None => "not set".to_string(),
}
);
} }
} }
} }
@@ -1386,7 +1085,8 @@ impl Config {
} }
// Validate TLS! // Validate TLS!
if let Some(tls_certificate) = self.general.tls_certificate.clone() { match self.general.tls_certificate.clone() {
Some(tls_certificate) => {
match load_certs(Path::new(&tls_certificate)) { match load_certs(Path::new(&tls_certificate)) {
Ok(_) => { Ok(_) => {
// Cert is okay, but what about the private key? // Cert is okay, but what about the private key?
@@ -1411,6 +1111,8 @@ impl Config {
return Err(Error::BadConfig); return Err(Error::BadConfig);
} }
} }
}
None => (),
}; };
for pool in self.pools.values_mut() { for pool in self.pools.values_mut() {
@@ -1429,7 +1131,9 @@ pub fn get_config() -> Config {
} }
pub fn get_idle_client_in_transaction_timeout() -> u64 { pub fn get_idle_client_in_transaction_timeout() -> u64 {
CONFIG.load().general.idle_client_in_transaction_timeout (*(*CONFIG.load()))
.general
.idle_client_in_transaction_timeout
} }
/// Parse the configuration file located at the path. /// Parse the configuration file located at the path.

View File

@@ -12,7 +12,6 @@ pub enum Error {
ProtocolSyncError(String), ProtocolSyncError(String),
BadQuery(String), BadQuery(String),
ServerError, ServerError,
ServerMessageParserError(String),
ServerStartupError(String, ServerIdentifier), ServerStartupError(String, ServerIdentifier),
ServerAuthError(String, ServerIdentifier), ServerAuthError(String, ServerIdentifier),
BadConfig, BadConfig,
@@ -27,9 +26,6 @@ pub enum Error {
AuthPassthroughError(String), AuthPassthroughError(String),
UnsupportedStatement, UnsupportedStatement,
QueryRouterParserError(String), QueryRouterParserError(String),
QueryRouterError(String),
InvalidShardId(usize),
PreparedStatementError,
} }
#[derive(Clone, PartialEq, Debug)] #[derive(Clone, PartialEq, Debug)]
@@ -125,9 +121,3 @@ impl std::fmt::Display for Error {
} }
} }
} }
impl From<std::ffi::NulError> for Error {
fn from(err: std::ffi::NulError) -> Self {
Error::QueryRouterError(err.to_string())
}
}

View File

@@ -1,14 +1,13 @@
pub mod admin; pub mod admin;
pub mod auth_passthrough; pub mod auth_passthrough;
pub mod client; pub mod client;
pub mod cmd_args;
pub mod config; pub mod config;
pub mod constants; pub mod constants;
pub mod dns_cache; pub mod dns_cache;
pub mod errors; pub mod errors;
pub mod logger;
pub mod messages; pub mod messages;
pub mod mirrors; pub mod mirrors;
pub mod multi_logger;
pub mod plugins; pub mod plugins;
pub mod pool; pub mod pool;
pub mod prometheus; pub mod prometheus;

View File

@@ -1,20 +0,0 @@
use crate::cmd_args::{Args, LogFormat};
use tracing_subscriber;
use tracing_subscriber::EnvFilter;
pub fn init(args: &Args) {
// Iniitalize a default filter, and then override the builtin default "warning" with our
// commandline, (default: "info")
let filter = EnvFilter::from_default_env().add_directive(args.log_level.into());
let trace_sub = tracing_subscriber::fmt()
.with_thread_ids(true)
.with_env_filter(filter)
.with_ansi(!args.no_color);
match args.log_format {
LogFormat::Structured => trace_sub.json().init(),
LogFormat::Debug => trace_sub.pretty().init(),
_ => trace_sub.init(),
};
}

View File

@@ -23,6 +23,7 @@ extern crate arc_swap;
extern crate async_trait; extern crate async_trait;
extern crate bb8; extern crate bb8;
extern crate bytes; extern crate bytes;
extern crate env_logger;
extern crate exitcode; extern crate exitcode;
extern crate log; extern crate log;
extern crate md5; extern crate md5;
@@ -60,18 +61,15 @@ use std::str::FromStr;
use std::sync::Arc; use std::sync::Arc;
use tokio::sync::broadcast; use tokio::sync::broadcast;
use pgcat::cmd_args;
use pgcat::config::{get_config, reload_config, VERSION}; use pgcat::config::{get_config, reload_config, VERSION};
use pgcat::dns_cache; use pgcat::dns_cache;
use pgcat::logger;
use pgcat::messages::configure_socket; use pgcat::messages::configure_socket;
use pgcat::pool::{ClientServerMap, ConnectionPool}; use pgcat::pool::{ClientServerMap, ConnectionPool};
use pgcat::prometheus::start_metric_server; use pgcat::prometheus::start_metric_server;
use pgcat::stats::{Collector, Reporter, REPORTER}; use pgcat::stats::{Collector, Reporter, REPORTER};
fn main() -> Result<(), Box<dyn std::error::Error>> { fn main() -> Result<(), Box<dyn std::error::Error>> {
let args = cmd_args::parse(); pgcat::multi_logger::MultiLogger::init().unwrap();
logger::init(&args);
info!("Welcome to PgCat! Meow. (Version {})", VERSION); info!("Welcome to PgCat! Meow. (Version {})", VERSION);
@@ -80,12 +78,20 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
std::process::exit(exitcode::CONFIG); std::process::exit(exitcode::CONFIG);
} }
let args = std::env::args().collect::<Vec<String>>();
let config_file = if args.len() == 2 {
args[1].to_string()
} else {
String::from("pgcat.toml")
};
// Create a transient runtime for loading the config for the first time. // Create a transient runtime for loading the config for the first time.
{ {
let runtime = Builder::new_multi_thread().worker_threads(1).build()?; let runtime = Builder::new_multi_thread().worker_threads(1).build()?;
runtime.block_on(async { runtime.block_on(async {
match pgcat::config::parse(args.config_file.as_str()).await { match pgcat::config::parse(&config_file).await {
Ok(_) => (), Ok(_) => (),
Err(err) => { Err(err) => {
error!("Config parse error: {:?}", err); error!("Config parse error: {:?}", err);

File diff suppressed because it is too large Load Diff

View File

@@ -7,7 +7,8 @@ use bytes::{Bytes, BytesMut};
use parking_lot::RwLock; use parking_lot::RwLock;
use crate::config::{get_config, Address, Role, User}; use crate::config::{get_config, Address, Role, User};
use crate::pool::{ClientServerMap, ServerPool}; use crate::pool::{ClientServerMap, PoolIdentifier, ServerPool};
use crate::stats::PoolStats;
use log::{error, info, trace, warn}; use log::{error, info, trace, warn};
use tokio::sync::mpsc::{channel, Receiver, Sender}; use tokio::sync::mpsc::{channel, Receiver, Sender};
@@ -23,27 +24,25 @@ impl MirroredClient {
async fn create_pool(&self) -> Pool<ServerPool> { async fn create_pool(&self) -> Pool<ServerPool> {
let config = get_config(); let config = get_config();
let default = std::time::Duration::from_millis(10_000).as_millis() as u64; let default = std::time::Duration::from_millis(10_000).as_millis() as u64;
let (connection_timeout, idle_timeout, _cfg, prepared_statement_cache_size) = let (connection_timeout, idle_timeout, cfg) =
match config.pools.get(&self.address.pool_name) { match config.pools.get(&self.address.pool_name) {
Some(cfg) => ( Some(cfg) => (
cfg.connect_timeout.unwrap_or(default), cfg.connect_timeout.unwrap_or(default),
cfg.idle_timeout.unwrap_or(default), cfg.idle_timeout.unwrap_or(default),
cfg.clone(), cfg.clone(),
cfg.prepared_statements_cache_size,
), ),
None => (default, default, crate::config::Pool::default(), 0), None => (default, default, crate::config::Pool::default()),
}; };
let identifier = PoolIdentifier::new(&self.database, &self.user.username);
let manager = ServerPool::new( let manager = ServerPool::new(
self.address.clone(), self.address.clone(),
self.user.clone(), self.user.clone(),
self.database.as_str(), self.database.as_str(),
ClientServerMap::default(), ClientServerMap::default(),
Arc::new(PoolStats::new(identifier, cfg.clone())),
Arc::new(RwLock::new(None)), Arc::new(RwLock::new(None)),
None,
true,
false,
prepared_statement_cache_size,
); );
Pool::builder() Pool::builder()
@@ -81,13 +80,12 @@ impl MirroredClient {
} }
// Incoming data from server (we read to clear the socket buffer and discard the data) // Incoming data from server (we read to clear the socket buffer and discard the data)
recv_result = server.recv(None) => { recv_result = server.recv() => {
match recv_result { match recv_result {
Ok(message) => trace!("Received from mirror: {} {:?}", String::from_utf8_lossy(&message[..]), address.clone()), Ok(message) => trace!("Received from mirror: {} {:?}", String::from_utf8_lossy(&message[..]), address.clone()),
Err(err) => { Err(err) => {
server.mark_bad( server.mark_bad();
format!("Failed to send to mirror, Discarding message {:?}, {:?}", err, address.clone()).as_str() error!("Failed to receive from mirror {:?} {:?}", err, address.clone());
);
} }
} }
} }
@@ -99,9 +97,8 @@ impl MirroredClient {
match server.send(&BytesMut::from(&bytes[..])).await { match server.send(&BytesMut::from(&bytes[..])).await {
Ok(_) => trace!("Sent to mirror: {} {:?}", String::from_utf8_lossy(&bytes[..]), address.clone()), Ok(_) => trace!("Sent to mirror: {} {:?}", String::from_utf8_lossy(&bytes[..]), address.clone()),
Err(err) => { Err(err) => {
server.mark_bad( server.mark_bad();
format!("Failed to receive from mirror {:?} {:?}", err, address.clone()).as_str() error!("Failed to send to mirror, Discarding message {:?}, {:?}", err, address.clone())
);
} }
} }
} }
@@ -141,18 +138,18 @@ impl MirroringManager {
bytes_rx, bytes_rx,
disconnect_rx: exit_rx, disconnect_rx: exit_rx,
}; };
exit_senders.push(exit_tx); exit_senders.push(exit_tx.clone());
byte_senders.push(bytes_tx); byte_senders.push(bytes_tx.clone());
client.start(); client.start();
}); });
Self { Self {
byte_senders, byte_senders: byte_senders,
disconnect_senders: exit_senders, disconnect_senders: exit_senders,
} }
} }
pub fn send(&mut self, bytes: &BytesMut) { pub fn send(self: &mut Self, bytes: &BytesMut) {
// We want to avoid performing an allocation if we won't be able to send the message // We want to avoid performing an allocation if we won't be able to send the message
// There is a possibility of a race here where we check the capacity and then the channel is // There is a possibility of a race here where we check the capacity and then the channel is
// closed or the capacity is reduced to 0, but mirroring is best effort anyway // closed or the capacity is reduced to 0, but mirroring is best effort anyway
@@ -174,7 +171,7 @@ impl MirroringManager {
}); });
} }
pub fn disconnect(&mut self) { pub fn disconnect(self: &mut Self) {
self.disconnect_senders self.disconnect_senders
.iter_mut() .iter_mut()
.for_each(|sender| match sender.try_send(()) { .for_each(|sender| match sender.try_send(()) {

80
src/multi_logger.rs Normal file
View File

@@ -0,0 +1,80 @@
use log::{Level, Log, Metadata, Record, SetLoggerError};
// This is a special kind of logger that allows sending logs to different
// targets depending on the log level.
//
// By default, if nothing is set, it acts as a regular env_log logger,
// it sends everything to standard error.
//
// If the Env variable `STDOUT_LOG` is defined, it will be used for
// configuring the standard out logger.
//
// The behavior is:
// - If it is an error, the message is written to standard error.
// - If it is not, and it matches the log level of the standard output logger (`STDOUT_LOG` env var), it will be send to standard output.
// - If the above is not true, it is sent to the stderr logger that will log it or not depending on the value
// of the RUST_LOG env var.
//
// So to summarize, if no `STDOUT_LOG` env var is present, the logger is the default logger. If `STDOUT_LOG` is set, everything
// but errors, that matches the log level set in the `STDOUT_LOG` env var is sent to stdout. You can have also some esoteric configuration
// where you set `RUST_LOG=debug` and `STDOUT_LOG=info`, in here, errors will go to stderr, warns and infos to stdout and debugs to stderr.
//
pub struct MultiLogger {
stderr_logger: env_logger::Logger,
stdout_logger: env_logger::Logger,
}
impl MultiLogger {
fn new() -> Self {
let stderr_logger = env_logger::builder().format_timestamp_micros().build();
let stdout_logger = env_logger::Builder::from_env("STDOUT_LOG")
.format_timestamp_micros()
.target(env_logger::Target::Stdout)
.build();
Self {
stderr_logger,
stdout_logger,
}
}
pub fn init() -> Result<(), SetLoggerError> {
let logger = Self::new();
log::set_max_level(logger.stderr_logger.filter());
log::set_boxed_logger(Box::new(logger))
}
}
impl Log for MultiLogger {
fn enabled(&self, metadata: &Metadata) -> bool {
self.stderr_logger.enabled(metadata) && self.stdout_logger.enabled(metadata)
}
fn log(&self, record: &Record) {
if record.level() == Level::Error {
self.stderr_logger.log(record);
} else {
if self.stdout_logger.matches(record) {
self.stdout_logger.log(record);
} else {
self.stderr_logger.log(record);
}
}
}
fn flush(&self) {
self.stderr_logger.flush();
self.stdout_logger.flush();
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_init() {
MultiLogger::init().unwrap();
}
}

View File

@@ -2,21 +2,52 @@
//! //!
//! It intercepts queries and returns fake results. //! It intercepts queries and returns fake results.
use arc_swap::ArcSwap;
use async_trait::async_trait; use async_trait::async_trait;
use bytes::{BufMut, BytesMut}; use bytes::{BufMut, BytesMut};
use once_cell::sync::Lazy;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use sqlparser::ast::Statement; use sqlparser::ast::Statement;
use std::collections::HashMap;
use log::debug; use log::{debug, info};
use std::sync::Arc;
use crate::{ use crate::{
config::Intercept as InterceptConfig, config::Intercept as InterceptConfig,
errors::Error, errors::Error,
messages::{command_complete, data_row_nullable, row_description, DataType}, messages::{command_complete, data_row_nullable, row_description, DataType},
plugins::{Plugin, PluginOutput}, plugins::{Plugin, PluginOutput},
pool::{PoolIdentifier, PoolMap},
query_router::QueryRouter, query_router::QueryRouter,
}; };
pub static CONFIG: Lazy<ArcSwap<HashMap<PoolIdentifier, InterceptConfig>>> =
Lazy::new(|| ArcSwap::from_pointee(HashMap::new()));
/// Check if the interceptor plugin has been enabled.
pub fn enabled() -> bool {
!CONFIG.load().is_empty()
}
pub fn setup(intercept_config: &InterceptConfig, pools: &PoolMap) {
let mut config = HashMap::new();
for (identifier, _) in pools.iter() {
let mut intercept_config = intercept_config.clone();
intercept_config.substitute(&identifier.db, &identifier.user);
config.insert(identifier.clone(), intercept_config);
}
CONFIG.store(Arc::new(config));
info!("Intercepting {} queries", intercept_config.queries.len());
}
pub fn disable() {
CONFIG.store(Arc::new(HashMap::new()));
}
// TODO: use these structs for deserialization // TODO: use these structs for deserialization
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
pub struct Rule { pub struct Rule {
@@ -32,35 +63,33 @@ pub struct Column {
} }
/// The intercept plugin. /// The intercept plugin.
pub struct Intercept<'a> { pub struct Intercept;
pub enabled: bool,
pub config: &'a InterceptConfig,
}
#[async_trait] #[async_trait]
impl<'a> Plugin for Intercept<'a> { impl Plugin for Intercept {
async fn run( async fn run(
&mut self, &mut self,
query_router: &QueryRouter, query_router: &QueryRouter,
ast: &Vec<Statement>, ast: &Vec<Statement>,
) -> Result<PluginOutput, Error> { ) -> Result<PluginOutput, Error> {
if !self.enabled || ast.is_empty() { if ast.is_empty() {
return Ok(PluginOutput::Allow); return Ok(PluginOutput::Allow);
} }
let mut config = self.config.clone(); let mut result = BytesMut::new();
config.substitute( let query_map = match CONFIG.load().get(&PoolIdentifier::new(
&query_router.pool_settings().db, &query_router.pool_settings().db,
&query_router.pool_settings().user.username, &query_router.pool_settings().user.username,
); )) {
Some(query_map) => query_map.clone(),
let mut result = BytesMut::new(); None => return Ok(PluginOutput::Allow),
};
for q in ast { for q in ast {
// Normalization // Normalization
let q = q.to_string().to_ascii_lowercase(); let q = q.to_string().to_ascii_lowercase();
for (_, target) in config.queries.iter() { for (_, target) in query_map.queries.iter() {
if target.query.as_str() == q { if target.query.as_str() == q {
debug!("Intercepting query: {}", q); debug!("Intercepting query: {}", q);
@@ -92,7 +121,7 @@ impl<'a> Plugin for Intercept<'a> {
.map(|s| { .map(|s| {
let s = s.as_str().to_string(); let s = s.as_str().to_string();
if s.is_empty() { if s == "" {
None None
} else { } else {
Some(s) Some(s)
@@ -118,3 +147,142 @@ impl<'a> Plugin for Intercept<'a> {
} }
} }
} }
/// Make IntelliJ SQL plugin believe it's talking to an actual database
/// instead of PgCat.
#[allow(dead_code)]
fn fool_datagrip(database: &str, user: &str) -> Value {
json!([
{
"query": "select current_database() as a, current_schemas(false) as b",
"schema": [
{
"name": "a",
"data_type": "text",
},
{
"name": "b",
"data_type": "anyarray",
},
],
"result": [
[database, "{public}"],
],
},
{
"query": "select current_database(), current_schema(), current_user",
"schema": [
{
"name": "current_database",
"data_type": "text",
},
{
"name": "current_schema",
"data_type": "text",
},
{
"name": "current_user",
"data_type": "text",
}
],
"result": [
["sharded_db", "public", "sharding_user"],
],
},
{
"query": "select cast(n.oid as bigint) as id, datname as name, d.description, datistemplate as is_template, datallowconn as allow_connections, pg_catalog.pg_get_userbyid(n.datdba) as \"owner\" from pg_catalog.pg_database as n left join pg_catalog.pg_shdescription as d on n.oid = d.objoid order by case when datname = pg_catalog.current_database() then -cast(1 as bigint) else cast(n.oid as bigint) end",
"schema": [
{
"name": "id",
"data_type": "oid",
},
{
"name": "name",
"data_type": "text",
},
{
"name": "description",
"data_type": "text",
},
{
"name": "is_template",
"data_type": "bool",
},
{
"name": "allow_connections",
"data_type": "bool",
},
{
"name": "owner",
"data_type": "text",
}
],
"result": [
["16387", database, "", "f", "t", user],
]
},
{
"query": "select cast(r.oid as bigint) as role_id, rolname as role_name, rolsuper as is_super, rolinherit as is_inherit, rolcreaterole as can_createrole, rolcreatedb as can_createdb, rolcanlogin as can_login, rolreplication as is_replication, rolconnlimit as conn_limit, rolvaliduntil as valid_until, rolbypassrls as bypass_rls, rolconfig as config, d.description from pg_catalog.pg_roles as r left join pg_catalog.pg_shdescription as d on d.objoid = r.oid",
"schema": [
{
"name": "role_id",
"data_type": "oid",
},
{
"name": "role_name",
"data_type": "text",
},
{
"name": "is_super",
"data_type": "bool",
},
{
"name": "is_inherit",
"data_type": "bool",
},
{
"name": "can_createrole",
"data_type": "bool",
},
{
"name": "can_createdb",
"data_type": "bool",
},
{
"name": "can_login",
"data_type": "bool",
},
{
"name": "is_replication",
"data_type": "bool",
},
{
"name": "conn_limit",
"data_type": "int4",
},
{
"name": "valid_until",
"data_type": "text",
},
{
"name": "bypass_rls",
"data_type": "bool",
},
{
"name": "config",
"data_type": "text",
},
{
"name": "description",
"data_type": "text",
},
],
"result": [
["10", "postgres", "f", "t", "f", "f", "t", "f", "-1", "", "f", "", ""],
["16419", user, "f", "t", "f", "f", "t", "f", "-1", "", "f", "", ""],
]
}
])
}

View File

@@ -9,7 +9,6 @@
//! //!
pub mod intercept; pub mod intercept;
pub mod prewarmer;
pub mod query_logger; pub mod query_logger;
pub mod table_access; pub mod table_access;
@@ -33,7 +32,6 @@ pub enum PluginOutput {
#[async_trait] #[async_trait]
pub trait Plugin { pub trait Plugin {
// Run before the query is sent to the server. // Run before the query is sent to the server.
#[allow(clippy::ptr_arg)]
async fn run( async fn run(
&mut self, &mut self,
query_router: &QueryRouter, query_router: &QueryRouter,

View File

@@ -1,28 +0,0 @@
//! Prewarm new connections before giving them to the client.
use crate::{errors::Error, server::Server};
use log::info;
pub struct Prewarmer<'a> {
pub enabled: bool,
pub server: &'a mut Server,
pub queries: &'a Vec<String>,
}
impl<'a> Prewarmer<'a> {
pub async fn run(&mut self) -> Result<(), Error> {
if !self.enabled {
return Ok(());
}
for query in self.queries {
info!(
"{} Prewarning with query: `{}`",
self.server.address(),
query
);
self.server.query(query).await?;
}
Ok(())
}
}

View File

@@ -5,33 +5,44 @@ use crate::{
plugins::{Plugin, PluginOutput}, plugins::{Plugin, PluginOutput},
query_router::QueryRouter, query_router::QueryRouter,
}; };
use arc_swap::ArcSwap;
use async_trait::async_trait; use async_trait::async_trait;
use log::info; use log::info;
use once_cell::sync::Lazy;
use sqlparser::ast::Statement; use sqlparser::ast::Statement;
use std::sync::Arc;
pub struct QueryLogger<'a> { static ENABLED: Lazy<ArcSwap<bool>> = Lazy::new(|| ArcSwap::from_pointee(false));
pub enabled: bool,
pub user: &'a str, pub struct QueryLogger;
pub db: &'a str,
pub fn setup() {
ENABLED.store(Arc::new(true));
info!("Logging queries to stdout");
}
pub fn disable() {
ENABLED.store(Arc::new(false));
}
pub fn enabled() -> bool {
**ENABLED.load()
} }
#[async_trait] #[async_trait]
impl<'a> Plugin for QueryLogger<'a> { impl Plugin for QueryLogger {
async fn run( async fn run(
&mut self, &mut self,
_query_router: &QueryRouter, _query_router: &QueryRouter,
ast: &Vec<Statement>, ast: &Vec<Statement>,
) -> Result<PluginOutput, Error> { ) -> Result<PluginOutput, Error> {
if !self.enabled {
return Ok(PluginOutput::Allow);
}
let query = ast let query = ast
.iter() .iter()
.map(|q| q.to_string()) .map(|q| q.to_string())
.collect::<Vec<String>>() .collect::<Vec<String>>()
.join("; "); .join("; ");
info!("[pool: {}][user: {}] {}", self.db, self.user, query); info!("{}", query);
Ok(PluginOutput::Allow) Ok(PluginOutput::Allow)
} }

View File

@@ -5,39 +5,53 @@ use async_trait::async_trait;
use sqlparser::ast::{visit_relations, Statement}; use sqlparser::ast::{visit_relations, Statement};
use crate::{ use crate::{
config::TableAccess as TableAccessConfig,
errors::Error, errors::Error,
plugins::{Plugin, PluginOutput}, plugins::{Plugin, PluginOutput},
query_router::QueryRouter, query_router::QueryRouter,
}; };
use log::debug; use log::{debug, info};
use arc_swap::ArcSwap;
use core::ops::ControlFlow; use core::ops::ControlFlow;
use once_cell::sync::Lazy;
use std::sync::Arc;
pub struct TableAccess<'a> { static CONFIG: Lazy<ArcSwap<Vec<String>>> = Lazy::new(|| ArcSwap::from_pointee(vec![]));
pub enabled: bool,
pub tables: &'a Vec<String>, pub fn setup(config: &TableAccessConfig) {
CONFIG.store(Arc::new(config.tables.clone()));
info!("Blocking access to {} tables", config.tables.len());
} }
pub fn enabled() -> bool {
!CONFIG.load().is_empty()
}
pub fn disable() {
CONFIG.store(Arc::new(vec![]));
}
pub struct TableAccess;
#[async_trait] #[async_trait]
impl<'a> Plugin for TableAccess<'a> { impl Plugin for TableAccess {
async fn run( async fn run(
&mut self, &mut self,
_query_router: &QueryRouter, _query_router: &QueryRouter,
ast: &Vec<Statement>, ast: &Vec<Statement>,
) -> Result<PluginOutput, Error> { ) -> Result<PluginOutput, Error> {
if !self.enabled {
return Ok(PluginOutput::Allow);
}
let mut found = None; let mut found = None;
let forbidden_tables = CONFIG.load();
visit_relations(ast, |relation| { visit_relations(ast, |relation| {
let relation = relation.to_string(); let relation = relation.to_string();
let parts = relation.split('.').collect::<Vec<&str>>(); let parts = relation.split(".").collect::<Vec<&str>>();
let table_name = parts.last().unwrap(); let table_name = parts.last().unwrap();
if self.tables.contains(&table_name.to_string()) { if forbidden_tables.contains(&table_name.to_string()) {
found = Some(table_name.to_string()); found = Some(table_name.to_string());
ControlFlow::<()>::Break(()) ControlFlow::<()>::Break(())
} else { } else {

View File

@@ -1,18 +1,15 @@
use arc_swap::ArcSwap; use arc_swap::ArcSwap;
use async_trait::async_trait; use async_trait::async_trait;
use bb8::{ManageConnection, Pool, PooledConnection, QueueStrategy}; use bb8::{ManageConnection, Pool, PooledConnection};
use bytes::{BufMut, BytesMut};
use chrono::naive::NaiveDateTime; use chrono::naive::NaiveDateTime;
use log::{debug, error, info, warn}; use log::{debug, error, info, warn};
use lru::LruCache;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use parking_lot::{Mutex, RwLock}; use parking_lot::{Mutex, RwLock};
use rand::seq::SliceRandom; use rand::seq::SliceRandom;
use rand::thread_rng; use rand::thread_rng;
use regex::Regex; use regex::Regex;
use std::collections::HashMap; use std::collections::HashMap;
use std::fmt::{Display, Formatter};
use std::num::NonZeroUsize;
use std::sync::atomic::AtomicU64;
use std::sync::{ use std::sync::{
atomic::{AtomicBool, Ordering}, atomic::{AtomicBool, Ordering},
Arc, Arc,
@@ -20,17 +17,13 @@ use std::sync::{
use std::time::Instant; use std::time::Instant;
use tokio::sync::Notify; use tokio::sync::Notify;
use crate::config::{ use crate::config::{get_config, Address, General, LoadBalancingMode, PoolMode, Role, User};
get_config, Address, DefaultShard, General, LoadBalancingMode, Plugins, PoolMode, Role, User,
};
use crate::errors::Error; use crate::errors::Error;
use crate::auth_passthrough::AuthPassthrough; use crate::auth_passthrough::AuthPassthrough;
use crate::messages::Parse; use crate::server::Server;
use crate::plugins::prewarmer;
use crate::server::{Server, ServerParameters};
use crate::sharding::ShardingFunction; use crate::sharding::ShardingFunction;
use crate::stats::{AddressStats, ClientStats, ServerStats}; use crate::stats::{AddressStats, ClientStats, PoolStats, ServerStats};
pub type ProcessId = i32; pub type ProcessId = i32;
pub type SecretKey = i32; pub type SecretKey = i32;
@@ -57,57 +50,6 @@ pub enum BanReason {
AdminBan(i64), AdminBan(i64),
} }
pub type PreparedStatementCacheType = Arc<Mutex<PreparedStatementCache>>;
// TODO: Add stats the this cache
// TODO: Add application name to the cache value to help identify which application is using the cache
// TODO: Create admin command to show which statements are in the cache
#[derive(Debug)]
pub struct PreparedStatementCache {
cache: LruCache<u64, Arc<Parse>>,
}
impl PreparedStatementCache {
pub fn new(mut size: usize) -> Self {
// Cannot be zeros
if size == 0 {
size = 1;
}
PreparedStatementCache {
cache: LruCache::new(NonZeroUsize::new(size).unwrap()),
}
}
/// Adds the prepared statement to the cache if it doesn't exist with a new name
/// if it already exists will give you the existing parse
///
/// Pass the hash to this so that we can do the compute before acquiring the lock
pub fn get_or_insert(&mut self, parse: &Parse, hash: u64) -> Arc<Parse> {
match self.cache.get(&hash) {
Some(rewritten_parse) => rewritten_parse.clone(),
None => {
let new_parse = Arc::new(parse.clone().rewrite());
let evicted = self.cache.push(hash, new_parse.clone());
if let Some((_, evicted_parse)) = evicted {
debug!(
"Evicted prepared statement {} from cache",
evicted_parse.name
);
}
new_parse
}
}
}
/// Marks the hash as most recently used if it exists
pub fn promote(&mut self, hash: &u64) {
self.cache.promote(hash);
}
}
/// An identifier for a PgCat pool, /// An identifier for a PgCat pool,
/// a database visible to clients. /// a database visible to clients.
#[derive(Hash, Debug, Clone, PartialEq, Eq, Default)] #[derive(Hash, Debug, Clone, PartialEq, Eq, Default)]
@@ -131,12 +73,6 @@ impl PoolIdentifier {
} }
} }
impl Display for PoolIdentifier {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}@{}", self.user, self.db)
}
}
impl From<&Address> for PoolIdentifier { impl From<&Address> for PoolIdentifier {
fn from(address: &Address) -> PoolIdentifier { fn from(address: &Address) -> PoolIdentifier {
PoolIdentifier::new(&address.database, &address.username) PoolIdentifier::new(&address.database, &address.username)
@@ -165,12 +101,6 @@ pub struct PoolSettings {
// Enable/disable query parser. // Enable/disable query parser.
pub query_parser_enabled: bool, pub query_parser_enabled: bool,
// Max length of query the parser will parse.
pub query_parser_max_length: Option<usize>,
// Infer role
pub query_parser_read_write_splitting: bool,
// Read from the primary as well or not. // Read from the primary as well or not.
pub primary_reads_enabled: bool, pub primary_reads_enabled: bool,
@@ -195,9 +125,6 @@ pub struct PoolSettings {
// Regex for searching for the shard id in SQL statements // Regex for searching for the shard id in SQL statements
pub shard_id_regex: Option<Regex>, pub shard_id_regex: Option<Regex>,
// What to do when no shard is selected in a sharded system
pub default_shard: DefaultShard,
// Limit how much of each query is searched for a potential shard regex match // Limit how much of each query is searched for a potential shard regex match
pub regex_search_limit: usize, pub regex_search_limit: usize,
@@ -205,9 +132,6 @@ pub struct PoolSettings {
pub auth_query: Option<String>, pub auth_query: Option<String>,
pub auth_query_user: Option<String>, pub auth_query_user: Option<String>,
pub auth_query_password: Option<String>, pub auth_query_password: Option<String>,
/// Plugins
pub plugins: Option<Plugins>,
} }
impl Default for PoolSettings { impl Default for PoolSettings {
@@ -220,8 +144,6 @@ impl Default for PoolSettings {
db: String::default(), db: String::default(),
default_role: None, default_role: None,
query_parser_enabled: false, query_parser_enabled: false,
query_parser_max_length: None,
query_parser_read_write_splitting: false,
primary_reads_enabled: true, primary_reads_enabled: true,
sharding_function: ShardingFunction::PgBigintHash, sharding_function: ShardingFunction::PgBigintHash,
automatic_sharding_key: None, automatic_sharding_key: None,
@@ -231,11 +153,9 @@ impl Default for PoolSettings {
sharding_key_regex: None, sharding_key_regex: None,
shard_id_regex: None, shard_id_regex: None,
regex_search_limit: 1000, regex_search_limit: 1000,
default_shard: DefaultShard::Shard(0),
auth_query: None, auth_query: None,
auth_query_user: None, auth_query_user: None,
auth_query_password: None, auth_query_password: None,
plugins: None,
} }
} }
} }
@@ -244,23 +164,23 @@ impl Default for PoolSettings {
#[derive(Clone, Debug, Default)] #[derive(Clone, Debug, Default)]
pub struct ConnectionPool { pub struct ConnectionPool {
/// The pools handled internally by bb8. /// The pools handled internally by bb8.
databases: Arc<Vec<Vec<Pool<ServerPool>>>>, databases: Vec<Vec<Pool<ServerPool>>>,
/// The addresses (host, port, role) to handle /// The addresses (host, port, role) to handle
/// failover and load balancing deterministically. /// failover and load balancing deterministically.
addresses: Arc<Vec<Vec<Address>>>, addresses: Vec<Vec<Address>>,
/// List of banned addresses (see above) /// List of banned addresses (see above)
/// that should not be queried. /// that should not be queried.
banlist: BanList, banlist: BanList,
/// The server information has to be passed to the /// The server information (K messages) have to be passed to the
/// clients on startup. We pre-connect to all shards and replicas /// clients on startup. We pre-connect to all shards and replicas
/// on pool creation and save the startup parameters here. /// on pool creation and save the K messages here.
original_server_parameters: Arc<RwLock<ServerParameters>>, server_info: Arc<RwLock<BytesMut>>,
/// Pool configuration. /// Pool configuration.
pub settings: Arc<PoolSettings>, pub settings: PoolSettings,
/// If not validated, we need to double check the pool is available before allowing a client /// If not validated, we need to double check the pool is available before allowing a client
/// to use it. /// to use it.
@@ -275,11 +195,10 @@ pub struct ConnectionPool {
paused: Arc<AtomicBool>, paused: Arc<AtomicBool>,
paused_waiter: Arc<Notify>, paused_waiter: Arc<Notify>,
pub stats: Arc<PoolStats>,
/// AuthInfo /// AuthInfo
pub auth_hash: Arc<RwLock<Option<String>>>, pub auth_hash: Arc<RwLock<Option<String>>>,
/// Cache
pub prepared_statement_cache: Option<PreparedStatementCacheType>,
} }
impl ConnectionPool { impl ConnectionPool {
@@ -298,7 +217,8 @@ impl ConnectionPool {
let old_pool_ref = get_pool(pool_name, &user.username); let old_pool_ref = get_pool(pool_name, &user.username);
let identifier = PoolIdentifier::new(pool_name, &user.username); let identifier = PoolIdentifier::new(pool_name, &user.username);
if let Some(pool) = old_pool_ref { match old_pool_ref {
Some(pool) => {
// If the pool hasn't changed, get existing reference and insert it into the new_pools. // If the pool hasn't changed, get existing reference and insert it into the new_pools.
// We replace all pools at the end, but if the reference is kept, the pool won't get re-created (bb8). // We replace all pools at the end, but if the reference is kept, the pool won't get re-created (bb8).
if pool.config_hash == new_pool_hash_value { if pool.config_hash == new_pool_hash_value {
@@ -310,6 +230,8 @@ impl ConnectionPool {
continue; continue;
} }
} }
None => (),
}
info!( info!(
"[pool: {}][user: {}] creating new pool", "[pool: {}][user: {}] creating new pool",
@@ -324,6 +246,10 @@ impl ConnectionPool {
.clone() .clone()
.into_keys() .into_keys()
.collect::<Vec<String>>(); .collect::<Vec<String>>();
let pool_stats = Arc::new(PoolStats::new(identifier, pool_config.clone()));
// Allow the pool to be seen in statistics
pool_stats.register(pool_stats.clone());
// Sort by shard number to ensure consistency. // Sort by shard number to ensure consistency.
shard_ids.sort_by_key(|k| k.parse::<i64>().unwrap()); shard_ids.sort_by_key(|k| k.parse::<i64>().unwrap());
@@ -358,7 +284,6 @@ impl ConnectionPool {
pool_name: pool_name.clone(), pool_name: pool_name.clone(),
mirrors: vec![], mirrors: vec![],
stats: Arc::new(AddressStats::default()), stats: Arc::new(AddressStats::default()),
error_count: Arc::new(AtomicU64::new(0)),
}); });
address_id += 1; address_id += 1;
} }
@@ -377,7 +302,6 @@ impl ConnectionPool {
pool_name: pool_name.clone(), pool_name: pool_name.clone(),
mirrors: mirror_addresses, mirrors: mirror_addresses,
stats: Arc::new(AddressStats::default()), stats: Arc::new(AddressStats::default()),
error_count: Arc::new(AtomicU64::new(0)),
}; };
address_id += 1; address_id += 1;
@@ -426,30 +350,18 @@ impl ConnectionPool {
user.clone(), user.clone(),
&shard.database, &shard.database,
client_server_map.clone(), client_server_map.clone(),
pool_stats.clone(),
pool_auth_hash.clone(), pool_auth_hash.clone(),
match pool_config.plugins {
Some(ref plugins) => Some(plugins.clone()),
None => config.plugins.clone(),
},
pool_config.cleanup_server_connections,
pool_config.log_client_parameter_status_changes,
pool_config.prepared_statements_cache_size,
); );
let connect_timeout = match user.connect_timeout { let connect_timeout = match pool_config.connect_timeout {
Some(connect_timeout) => connect_timeout,
None => match pool_config.connect_timeout {
Some(connect_timeout) => connect_timeout, Some(connect_timeout) => connect_timeout,
None => config.general.connect_timeout, None => config.general.connect_timeout,
},
}; };
let idle_timeout = match user.idle_timeout { let idle_timeout = match pool_config.idle_timeout {
Some(idle_timeout) => idle_timeout,
None => match pool_config.idle_timeout {
Some(idle_timeout) => idle_timeout, Some(idle_timeout) => idle_timeout,
None => config.general.idle_timeout, None => config.general.idle_timeout,
},
}; };
let server_lifetime = match user.server_lifetime { let server_lifetime = match user.server_lifetime {
@@ -460,20 +372,12 @@ impl ConnectionPool {
}, },
}; };
let reaper_rate = *[idle_timeout, server_lifetime, POOL_REAPER_RATE] let reaper_rate = *vec![idle_timeout, server_lifetime, POOL_REAPER_RATE]
.iter() .iter()
.min() .min()
.unwrap(); .unwrap();
let queue_strategy = match config.general.server_round_robin { debug!("Pool reaper rate: {}ms", reaper_rate);
true => QueueStrategy::Fifo,
false => QueueStrategy::Lifo,
};
debug!(
"[pool: {}][user: {}] Pool reaper rate: {}ms",
pool_name, user.username, reaper_rate
);
let pool = Pool::builder() let pool = Pool::builder()
.max_size(user.pool_size) .max_size(user.pool_size)
@@ -482,14 +386,9 @@ impl ConnectionPool {
.idle_timeout(Some(std::time::Duration::from_millis(idle_timeout))) .idle_timeout(Some(std::time::Duration::from_millis(idle_timeout)))
.max_lifetime(Some(std::time::Duration::from_millis(server_lifetime))) .max_lifetime(Some(std::time::Duration::from_millis(server_lifetime)))
.reaper_rate(std::time::Duration::from_millis(reaper_rate)) .reaper_rate(std::time::Duration::from_millis(reaper_rate))
.queue_strategy(queue_strategy) .test_on_check_out(false)
.test_on_check_out(false); .build(manager)
.await?;
let pool = if config.general.validate_config {
pool.build(manager).await?
} else {
pool.build_unchecked(manager)
};
pools.push(pool); pools.push(pool);
servers.push(address); servers.push(address);
@@ -509,13 +408,14 @@ impl ConnectionPool {
} }
let pool = ConnectionPool { let pool = ConnectionPool {
databases: Arc::new(shards), databases: shards,
addresses: Arc::new(addresses), stats: pool_stats,
addresses,
banlist: Arc::new(RwLock::new(banlist)), banlist: Arc::new(RwLock::new(banlist)),
config_hash: new_pool_hash_value, config_hash: new_pool_hash_value,
original_server_parameters: Arc::new(RwLock::new(ServerParameters::new())), server_info: Arc::new(RwLock::new(BytesMut::new())),
auth_hash: pool_auth_hash, auth_hash: pool_auth_hash,
settings: Arc::new(PoolSettings { settings: PoolSettings {
pool_mode: match user.pool_mode { pool_mode: match user.pool_mode {
Some(pool_mode) => pool_mode, Some(pool_mode) => pool_mode,
None => pool_config.pool_mode, None => pool_config.pool_mode,
@@ -532,9 +432,6 @@ impl ConnectionPool {
_ => unreachable!(), _ => unreachable!(),
}, },
query_parser_enabled: pool_config.query_parser_enabled, query_parser_enabled: pool_config.query_parser_enabled,
query_parser_max_length: pool_config.query_parser_max_length,
query_parser_read_write_splitting: pool_config
.query_parser_read_write_splitting,
primary_reads_enabled: pool_config.primary_reads_enabled, primary_reads_enabled: pool_config.primary_reads_enabled,
sharding_function: pool_config.sharding_function, sharding_function: pool_config.sharding_function,
automatic_sharding_key: pool_config.automatic_sharding_key.clone(), automatic_sharding_key: pool_config.automatic_sharding_key.clone(),
@@ -550,41 +447,54 @@ impl ConnectionPool {
.clone() .clone()
.map(|regex| Regex::new(regex.as_str()).unwrap()), .map(|regex| Regex::new(regex.as_str()).unwrap()),
regex_search_limit: pool_config.regex_search_limit.unwrap_or(1000), regex_search_limit: pool_config.regex_search_limit.unwrap_or(1000),
default_shard: pool_config.default_shard,
auth_query: pool_config.auth_query.clone(), auth_query: pool_config.auth_query.clone(),
auth_query_user: pool_config.auth_query_user.clone(), auth_query_user: pool_config.auth_query_user.clone(),
auth_query_password: pool_config.auth_query_password.clone(), auth_query_password: pool_config.auth_query_password.clone(),
plugins: match pool_config.plugins {
Some(ref plugins) => Some(plugins.clone()),
None => config.plugins.clone(),
}, },
}),
validated: Arc::new(AtomicBool::new(false)), validated: Arc::new(AtomicBool::new(false)),
paused: Arc::new(AtomicBool::new(false)), paused: Arc::new(AtomicBool::new(false)),
paused_waiter: Arc::new(Notify::new()), paused_waiter: Arc::new(Notify::new()),
prepared_statement_cache: match pool_config.prepared_statements_cache_size {
0 => None,
_ => Some(Arc::new(Mutex::new(PreparedStatementCache::new(
pool_config.prepared_statements_cache_size,
)))),
},
}; };
// Connect to the servers to make sure pool configuration is valid // Connect to the servers to make sure pool configuration is valid
// before setting it globally. // before setting it globally.
// Do this async and somewhere else, we don't have to wait here. // Do this async and somewhere else, we don't have to wait here.
if config.general.validate_config { let mut validate_pool = pool.clone();
let validate_pool = pool.clone();
tokio::task::spawn(async move { tokio::task::spawn(async move {
let _ = validate_pool.validate().await; let _ = validate_pool.validate().await;
}); });
}
// There is one pool per database/user pair. // There is one pool per database/user pair.
new_pools.insert(PoolIdentifier::new(pool_name, &user.username), pool); new_pools.insert(PoolIdentifier::new(pool_name, &user.username), pool);
} }
} }
if let Some(ref plugins) = config.plugins {
if let Some(ref intercept) = plugins.intercept {
if intercept.enabled {
crate::plugins::intercept::setup(intercept, &new_pools);
} else {
crate::plugins::intercept::disable();
}
}
if let Some(ref table_access) = plugins.table_access {
if table_access.enabled {
crate::plugins::table_access::setup(table_access);
} else {
crate::plugins::table_access::disable();
}
}
if let Some(ref query_logger) = plugins.query_logger {
if query_logger.enabled {
crate::plugins::query_logger::setup();
} else {
crate::plugins::query_logger::disable();
}
}
}
POOLS.store(Arc::new(new_pools.clone())); POOLS.store(Arc::new(new_pools.clone()));
Ok(()) Ok(())
} }
@@ -595,7 +505,7 @@ impl ConnectionPool {
/// when they connect. /// when they connect.
/// This also warms up the pool for clients that connect when /// This also warms up the pool for clients that connect when
/// the pooler starts up. /// the pooler starts up.
pub async fn validate(&self) -> Result<(), Error> { pub async fn validate(&mut self) -> Result<(), Error> {
let mut futures = Vec::new(); let mut futures = Vec::new();
let validated = Arc::clone(&self.validated); let validated = Arc::clone(&self.validated);
@@ -603,7 +513,7 @@ impl ConnectionPool {
for server in 0..self.servers(shard) { for server in 0..self.servers(shard) {
let databases = self.databases.clone(); let databases = self.databases.clone();
let validated = Arc::clone(&validated); let validated = Arc::clone(&validated);
let pool_server_parameters = Arc::clone(&self.original_server_parameters); let pool_server_info = Arc::clone(&self.server_info);
let task = tokio::task::spawn(async move { let task = tokio::task::spawn(async move {
let connection = match databases[shard][server].get().await { let connection = match databases[shard][server].get().await {
@@ -616,10 +526,11 @@ impl ConnectionPool {
let proxy = connection; let proxy = connection;
let server = &*proxy; let server = &*proxy;
let server_parameters: ServerParameters = server.server_parameters(); let server_info = server.server_info();
let mut guard = pool_server_parameters.write(); let mut guard = pool_server_info.write();
*guard = server_parameters; guard.clear();
guard.put(server_info.clone());
validated.store(true, Ordering::Relaxed); validated.store(true, Ordering::Relaxed);
}); });
@@ -631,7 +542,7 @@ impl ConnectionPool {
// TODO: compare server information to make sure // TODO: compare server information to make sure
// all shards are running identical configurations. // all shards are running identical configurations.
if !self.validated() { if self.server_info.read().is_empty() {
error!("Could not validate connection pool"); error!("Could not validate connection pool");
return Err(Error::AllServersDown); return Err(Error::AllServersDown);
} }
@@ -678,51 +589,19 @@ impl ConnectionPool {
/// Get a connection from the pool. /// Get a connection from the pool.
pub async fn get( pub async fn get(
&self, &self,
shard: Option<usize>, // shard number shard: usize, // shard number
role: Option<Role>, // primary or replica role: Option<Role>, // primary or replica
client_stats: &ClientStats, // client id client_stats: &ClientStats, // client id
) -> Result<(PooledConnection<'_, ServerPool>, Address), Error> { ) -> Result<(PooledConnection<'_, ServerPool>, Address), Error> {
let effective_shard_id = if self.shards() == 1 { let mut candidates: Vec<&Address> = self.addresses[shard]
// The base, unsharded case
Some(0)
} else {
if !self.valid_shard_id(shard) {
// None is valid shard ID so it is safe to unwrap here
return Err(Error::InvalidShardId(shard.unwrap()));
}
shard
};
let mut candidates = self
.addresses
.iter() .iter()
.flatten()
.filter(|address| address.role == role) .filter(|address| address.role == role)
.collect::<Vec<&Address>>(); .collect();
// We start with a shuffled list of addresses even if we end up resorting // We shuffle even if least_outstanding_queries is used to avoid imbalance
// this is meant to avoid hitting instance 0 everytime if the sorting metric // in cases where all candidates have more or less the same number of outstanding
// ends up being the same for all instances // queries
candidates.shuffle(&mut thread_rng()); candidates.shuffle(&mut thread_rng());
match effective_shard_id {
Some(shard_id) => candidates.retain(|address| address.shard == shard_id),
None => match self.settings.default_shard {
DefaultShard::Shard(shard_id) => {
candidates.retain(|address| address.shard == shard_id)
}
DefaultShard::Random => (),
DefaultShard::RandomHealthy => {
candidates.sort_by(|a, b| {
b.error_count
.load(Ordering::Relaxed)
.partial_cmp(&a.error_count.load(Ordering::Relaxed))
.unwrap()
});
}
},
};
if self.settings.load_balancing_mode == LoadBalancingMode::LeastOutstandingConnections { if self.settings.load_balancing_mode == LoadBalancingMode::LeastOutstandingConnections {
candidates.sort_by(|a, b| { candidates.sort_by(|a, b| {
self.busy_connection_count(b) self.busy_connection_count(b)
@@ -731,10 +610,6 @@ impl ConnectionPool {
}); });
} }
// Indicate we're waiting on a server connection from a pool.
let now = Instant::now();
client_stats.waiting();
while !candidates.is_empty() { while !candidates.is_empty() {
// Get the next candidate // Get the next candidate
let address = match candidates.pop() { let address = match candidates.pop() {
@@ -745,7 +620,7 @@ impl ConnectionPool {
let mut force_healthcheck = false; let mut force_healthcheck = false;
if self.is_banned(address) { if self.is_banned(address) {
if self.try_unban(address).await { if self.try_unban(&address).await {
force_healthcheck = true; force_healthcheck = true;
} else { } else {
debug!("Address {:?} is banned", address); debug!("Address {:?} is banned", address);
@@ -753,22 +628,21 @@ impl ConnectionPool {
} }
} }
// Indicate we're waiting on a server connection from a pool.
let now = Instant::now();
client_stats.waiting();
// Check if we can connect // Check if we can connect
let mut conn = match self.databases[address.shard][address.address_index] let mut conn = match self.databases[address.shard][address.address_index]
.get() .get()
.await .await
{ {
Ok(conn) => { Ok(conn) => conn,
address.reset_error_count();
conn
}
Err(err) => { Err(err) => {
error!( error!("Banning instance {:?}, error: {:?}", address, err);
"Connection checkout error for instance {:?}, error: {:?}",
address, err
);
self.ban(address, BanReason::FailedCheckout, Some(client_stats)); self.ban(address, BanReason::FailedCheckout, Some(client_stats));
address.stats.error(); address.stats.error();
client_stats.idle();
client_stats.checkout_error(); client_stats.checkout_error();
continue; continue;
} }
@@ -786,13 +660,13 @@ impl ConnectionPool {
// since we last checked the server is ok. // since we last checked the server is ok.
// Health checks are pretty expensive. // Health checks are pretty expensive.
if !require_healthcheck { if !require_healthcheck {
let checkout_time = now.elapsed().as_micros() as u64; let checkout_time: u64 = now.elapsed().as_micros() as u64;
client_stats.checkout_success(); client_stats.checkout_time(checkout_time);
server server
.stats() .stats()
.checkout_time(checkout_time, client_stats.application_name()); .checkout_time(checkout_time, client_stats.application_name());
server.stats().active(client_stats.application_name()); server.stats().active(client_stats.application_name());
client_stats.active();
return Ok((conn, address.clone())); return Ok((conn, address.clone()));
} }
@@ -800,21 +674,11 @@ impl ConnectionPool {
.run_health_check(address, server, now, client_stats) .run_health_check(address, server, now, client_stats)
.await .await
{ {
let checkout_time = now.elapsed().as_micros() as u64;
client_stats.checkout_success();
server
.stats()
.checkout_time(checkout_time, client_stats.application_name());
server.stats().active(client_stats.application_name());
client_stats.active();
return Ok((conn, address.clone())); return Ok((conn, address.clone()));
} else { } else {
continue; continue;
} }
} }
client_stats.checkout_error();
Err(Error::AllServersDown) Err(Error::AllServersDown)
} }
@@ -839,7 +703,7 @@ impl ConnectionPool {
Ok(res) => match res { Ok(res) => match res {
Ok(_) => { Ok(_) => {
let checkout_time: u64 = start.elapsed().as_micros() as u64; let checkout_time: u64 = start.elapsed().as_micros() as u64;
client_info.checkout_success(); client_info.checkout_time(checkout_time);
server server
.stats() .stats()
.checkout_time(checkout_time, client_info.application_name()); .checkout_time(checkout_time, client_info.application_name());
@@ -851,7 +715,7 @@ impl ConnectionPool {
// Health check failed. // Health check failed.
Err(err) => { Err(err) => {
error!( error!(
"Failed health check on instance {:?}, error: {:?}", "Banning instance {:?} because of failed health check, {:?}",
address, err address, err
); );
} }
@@ -860,50 +724,35 @@ impl ConnectionPool {
// Health check timed out. // Health check timed out.
Err(err) => { Err(err) => {
error!( error!(
"Health check timeout on instance {:?}, error: {:?}", "Banning instance {:?} because of health check timeout, {:?}",
address, err address, err
); );
} }
} }
// Don't leave a bad connection in the pool. // Don't leave a bad connection in the pool.
server.mark_bad("failed health check"); server.mark_bad();
self.ban(address, BanReason::FailedHealthCheck, Some(client_info)); self.ban(&address, BanReason::FailedHealthCheck, Some(client_info));
false return false;
} }
/// Ban an address (i.e. replica). It no longer will serve /// Ban an address (i.e. replica). It no longer will serve
/// traffic for any new transactions. Existing transactions on that replica /// traffic for any new transactions. Existing transactions on that replica
/// will finish successfully or error out to the clients. /// will finish successfully or error out to the clients.
pub fn ban(&self, address: &Address, reason: BanReason, client_info: Option<&ClientStats>) { pub fn ban(&self, address: &Address, reason: BanReason, client_info: Option<&ClientStats>) {
// Count the number of errors since the last successful checkout
// This is used to determine if the shard is down
match reason {
BanReason::FailedHealthCheck
| BanReason::FailedCheckout
| BanReason::MessageSendFailed
| BanReason::MessageReceiveFailed => {
address.increment_error_count();
}
_ => (),
};
// Primary can never be banned // Primary can never be banned
if address.role == Role::Primary { if address.role == Role::Primary {
return; return;
} }
error!("Banning instance {:?}, reason: {:?}", address, reason);
let now = chrono::offset::Utc::now().naive_utc(); let now = chrono::offset::Utc::now().naive_utc();
let mut guard = self.banlist.write(); let mut guard = self.banlist.write();
error!("Banning {:?}", address);
if let Some(client_info) = client_info { if let Some(client_info) = client_info {
client_info.ban_error(); client_info.ban_error();
address.stats.error(); address.stats.error();
} }
guard[address.shard].insert(address.clone(), (reason, now)); guard[address.shard].insert(address.clone(), (reason, now));
} }
@@ -994,10 +843,10 @@ impl ConnectionPool {
let guard = self.banlist.read(); let guard = self.banlist.read();
for banlist in guard.iter() { for banlist in guard.iter() {
for (address, (reason, timestamp)) in banlist.iter() { for (address, (reason, timestamp)) in banlist.iter() {
bans.push((address.clone(), (reason.clone(), *timestamp))); bans.push((address.clone(), (reason.clone(), timestamp.clone())));
} }
} }
bans return bans;
} }
/// Get the address from the host url /// Get the address from the host url
@@ -1039,11 +888,10 @@ impl ConnectionPool {
&self.addresses[shard][server] &self.addresses[shard][server]
} }
pub fn server_parameters(&self) -> ServerParameters { pub fn server_info(&self) -> BytesMut {
self.original_server_parameters.read().clone() self.server_info.read().clone()
} }
/// Get the number of checked out connection for an address
fn busy_connection_count(&self, address: &Address) -> u32 { fn busy_connection_count(&self, address: &Address) -> u32 {
let state = self.pool_state(address.shard, address.address_index); let state = self.pool_state(address.shard, address.address_index);
let idle = state.idle_connections; let idle = state.idle_connections;
@@ -1055,93 +903,36 @@ impl ConnectionPool {
} }
let busy = provisioned - idle; let busy = provisioned - idle;
debug!("{:?} has {:?} busy connections", address, busy); debug!("{:?} has {:?} busy connections", address, busy);
busy return busy;
}
fn valid_shard_id(&self, shard: Option<usize>) -> bool {
match shard {
None => true,
Some(shard) => shard < self.shards(),
}
}
/// Register a parse statement to the pool's cache and return the rewritten parse
///
/// Do not pass an anonymous parse statement to this function
pub fn register_parse_to_cache(&self, hash: u64, parse: &Parse) -> Option<Arc<Parse>> {
// We should only be calling this function if the cache is enabled
match self.prepared_statement_cache {
Some(ref prepared_statement_cache) => {
let mut cache = prepared_statement_cache.lock();
Some(cache.get_or_insert(parse, hash))
}
None => None,
}
}
/// Promote a prepared statement hash in the LRU
pub fn promote_prepared_statement_hash(&self, hash: &u64) {
// We should only be calling this function if the cache is enabled
if let Some(ref prepared_statement_cache) = self.prepared_statement_cache {
let mut cache = prepared_statement_cache.lock();
cache.promote(hash);
}
} }
} }
/// Wrapper for the bb8 connection pool. /// Wrapper for the bb8 connection pool.
pub struct ServerPool { pub struct ServerPool {
/// Server address.
address: Address, address: Address,
/// Server Postgres user.
user: User, user: User,
/// Server database.
database: String, database: String,
/// Client/server mapping.
client_server_map: ClientServerMap, client_server_map: ClientServerMap,
stats: Arc<PoolStats>,
/// Server auth hash (for auth passthrough).
auth_hash: Arc<RwLock<Option<String>>>, auth_hash: Arc<RwLock<Option<String>>>,
/// Server plugins.
plugins: Option<Plugins>,
/// Should we clean up dirty connections before putting them into the pool?
cleanup_connections: bool,
/// Log client parameter status changes
log_client_parameter_status_changes: bool,
/// Prepared statement cache size
prepared_statement_cache_size: usize,
} }
impl ServerPool { impl ServerPool {
#[allow(clippy::too_many_arguments)]
pub fn new( pub fn new(
address: Address, address: Address,
user: User, user: User,
database: &str, database: &str,
client_server_map: ClientServerMap, client_server_map: ClientServerMap,
stats: Arc<PoolStats>,
auth_hash: Arc<RwLock<Option<String>>>, auth_hash: Arc<RwLock<Option<String>>>,
plugins: Option<Plugins>,
cleanup_connections: bool,
log_client_parameter_status_changes: bool,
prepared_statement_cache_size: usize,
) -> ServerPool { ) -> ServerPool {
ServerPool { ServerPool {
address, address,
user, user: user.clone(),
database: database.to_string(), database: database.to_string(),
client_server_map, client_server_map,
stats,
auth_hash, auth_hash,
plugins,
cleanup_connections,
log_client_parameter_status_changes,
prepared_statement_cache_size,
} }
} }
} }
@@ -1157,6 +948,7 @@ impl ManageConnection for ServerPool {
let stats = Arc::new(ServerStats::new( let stats = Arc::new(ServerStats::new(
self.address.clone(), self.address.clone(),
self.stats.clone(),
tokio::time::Instant::now(), tokio::time::Instant::now(),
)); ));
@@ -1170,25 +962,10 @@ impl ManageConnection for ServerPool {
self.client_server_map.clone(), self.client_server_map.clone(),
stats.clone(), stats.clone(),
self.auth_hash.clone(), self.auth_hash.clone(),
self.cleanup_connections,
self.log_client_parameter_status_changes,
self.prepared_statement_cache_size,
) )
.await .await
{ {
Ok(mut conn) => { Ok(conn) => {
if let Some(ref plugins) = self.plugins {
if let Some(ref prewarmer) = plugins.prewarmer {
let mut prewarmer = prewarmer::Prewarmer {
enabled: prewarmer.enabled,
server: &mut conn,
queries: &prewarmer.queries,
};
prewarmer.run().await?;
}
}
stats.idle(); stats.idle();
Ok(conn) Ok(conn)
} }

View File

@@ -1,41 +1,22 @@
use http_body_util::Full; use hyper::service::{make_service_fn, service_fn};
use hyper::body; use hyper::{Body, Method, Request, Response, Server, StatusCode};
use hyper::body::Bytes; use log::{error, info, warn};
use hyper::server::conn::http1;
use hyper::service::service_fn;
use hyper::{Method, Request, Response, StatusCode};
use hyper_util::rt::TokioIo;
use log::{debug, error, info};
use phf::phf_map; use phf::phf_map;
use std::collections::HashMap; use std::collections::HashMap;
use std::fmt; use std::fmt;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::atomic::Ordering; use std::sync::atomic::Ordering;
use tokio::net::TcpListener; use std::sync::Arc;
use crate::config::Address; use crate::config::Address;
use crate::pool::{get_all_pools, PoolIdentifier}; use crate::pool::get_all_pools;
use crate::stats::get_server_stats; use crate::stats::{get_pool_stats, get_server_stats, ServerStats};
use crate::stats::pool::PoolStats;
struct MetricHelpType { struct MetricHelpType {
help: &'static str, help: &'static str,
ty: &'static str, ty: &'static str,
} }
struct ServerPrometheusStats {
bytes_received: u64,
bytes_sent: u64,
transaction_count: u64,
query_count: u64,
error_count: u64,
active_count: u64,
idle_count: u64,
login_count: u64,
tested_count: u64,
}
// reference for metric types: https://prometheus.io/docs/concepts/metric_types/ // reference for metric types: https://prometheus.io/docs/concepts/metric_types/
// counters only increase // counters only increase
// gauges can arbitrarily increase or decrease // gauges can arbitrarily increase or decrease
@@ -138,46 +119,22 @@ static METRIC_HELP_AND_TYPES_LOOKUP: phf::Map<&'static str, MetricHelpType> = ph
}, },
"servers_bytes_received" => MetricHelpType { "servers_bytes_received" => MetricHelpType {
help: "Volume in bytes of network traffic received by server", help: "Volume in bytes of network traffic received by server",
ty: "counter", ty: "gauge",
}, },
"servers_bytes_sent" => MetricHelpType { "servers_bytes_sent" => MetricHelpType {
help: "Volume in bytes of network traffic sent by server", help: "Volume in bytes of network traffic sent by server",
ty: "counter", ty: "gauge",
}, },
"servers_transaction_count" => MetricHelpType { "servers_transaction_count" => MetricHelpType {
help: "Number of transactions executed by server", help: "Number of transactions executed by server",
ty: "counter", ty: "gauge",
}, },
"servers_query_count" => MetricHelpType { "servers_query_count" => MetricHelpType {
help: "Number of queries executed by server", help: "Number of queries executed by server",
ty: "counter", ty: "gauge",
}, },
"servers_error_count" => MetricHelpType { "servers_error_count" => MetricHelpType {
help: "Number of errors", help: "Number of errors",
ty: "counter",
},
"servers_idle_count" => MetricHelpType {
help: "Number of server connection in idle state",
ty: "gauge",
},
"servers_active_count" => MetricHelpType {
help: "Number of server connection in active state",
ty: "gauge",
},
"servers_tested_count" => MetricHelpType {
help: "Number of server connection in tested state",
ty: "gauge",
},
"servers_login_count" => MetricHelpType {
help: "Number of server connection in login state",
ty: "gauge",
},
"servers_is_banned" => MetricHelpType {
help: "0 if server is not banned, 1 if server is banned",
ty: "gauge",
},
"servers_is_paused" => MetricHelpType {
help: "0 if server is not paused, 1 if server is paused",
ty: "gauge", ty: "gauge",
}, },
"databases_pool_size" => MetricHelpType { "databases_pool_size" => MetricHelpType {
@@ -245,9 +202,7 @@ impl<Value: fmt::Display> PrometheusMetric<Value> {
labels.insert("shard", address.shard.to_string()); labels.insert("shard", address.shard.to_string());
labels.insert("role", address.role.to_string()); labels.insert("role", address.role.to_string());
labels.insert("pool", address.pool_name.clone()); labels.insert("pool", address.pool_name.clone());
labels.insert("index", address.address_index.to_string());
labels.insert("database", address.database.to_string()); labels.insert("database", address.database.to_string());
labels.insert("user", address.username.clone());
Self::from_name(&format!("databases_{}", name), value, labels) Self::from_name(&format!("databases_{}", name), value, labels)
} }
@@ -262,9 +217,8 @@ impl<Value: fmt::Display> PrometheusMetric<Value> {
labels.insert("shard", address.shard.to_string()); labels.insert("shard", address.shard.to_string());
labels.insert("role", address.role.to_string()); labels.insert("role", address.role.to_string());
labels.insert("pool", address.pool_name.clone()); labels.insert("pool", address.pool_name.clone());
labels.insert("index", address.address_index.to_string());
labels.insert("database", address.database.to_string()); labels.insert("database", address.database.to_string());
labels.insert("user", address.username.clone());
Self::from_name(&format!("servers_{}", name), value, labels) Self::from_name(&format!("servers_{}", name), value, labels)
} }
@@ -274,25 +228,21 @@ impl<Value: fmt::Display> PrometheusMetric<Value> {
labels.insert("shard", address.shard.to_string()); labels.insert("shard", address.shard.to_string());
labels.insert("pool", address.pool_name.clone()); labels.insert("pool", address.pool_name.clone());
labels.insert("role", address.role.to_string()); labels.insert("role", address.role.to_string());
labels.insert("index", address.address_index.to_string());
labels.insert("database", address.database.to_string()); labels.insert("database", address.database.to_string());
labels.insert("user", address.username.clone());
Self::from_name(&format!("stats_{}", name), value, labels) Self::from_name(&format!("stats_{}", name), value, labels)
} }
fn from_pool(pool_id: PoolIdentifier, name: &str, value: u64) -> Option<PrometheusMetric<u64>> { fn from_pool(pool: &(String, String), name: &str, value: u64) -> Option<PrometheusMetric<u64>> {
let mut labels = HashMap::new(); let mut labels = HashMap::new();
labels.insert("pool", pool_id.db); labels.insert("pool", pool.0.clone());
labels.insert("user", pool_id.user); labels.insert("user", pool.1.clone());
Self::from_name(&format!("pools_{}", name), value, labels) Self::from_name(&format!("pools_{}", name), value, labels)
} }
} }
async fn prometheus_stats( async fn prometheus_stats(request: Request<Body>) -> Result<Response<Body>, hyper::http::Error> {
request: Request<body::Incoming>,
) -> Result<Response<Full<Bytes>>, hyper::http::Error> {
match (request.method(), request.uri().path()) { match (request.method(), request.uri().path()) {
(&Method::GET, "/metrics") => { (&Method::GET, "/metrics") => {
let mut lines = Vec::new(); let mut lines = Vec::new();
@@ -324,7 +274,7 @@ fn push_address_stats(lines: &mut Vec<String>) {
{ {
lines.push(prometheus_metric.to_string()); lines.push(prometheus_metric.to_string());
} else { } else {
debug!("Metric {} not implemented for {}", key, address.name()); warn!("Metric {} not implemented for {}", key, address.name());
} }
} }
} }
@@ -334,15 +284,18 @@ fn push_address_stats(lines: &mut Vec<String>) {
// Adds relevant metrics shown in a SHOW POOLS admin command. // Adds relevant metrics shown in a SHOW POOLS admin command.
fn push_pool_stats(lines: &mut Vec<String>) { fn push_pool_stats(lines: &mut Vec<String>) {
let pool_stats = PoolStats::construct_pool_lookup(); let pool_stats = get_pool_stats();
for (pool_id, stats) in pool_stats.iter() { for (pool, stats) in pool_stats.iter() {
let stats = &**stats;
for (name, value) in stats.clone() { for (name, value) in stats.clone() {
if let Some(prometheus_metric) = if let Some(prometheus_metric) = PrometheusMetric::<u64>::from_pool(pool, &name, value)
PrometheusMetric::<u64>::from_pool(pool_id.clone(), &name, value)
{ {
lines.push(prometheus_metric.to_string()); lines.push(prometheus_metric.to_string());
} else { } else {
debug!("Metric {} not implemented for ({})", name, *pool_id); warn!(
"Metric {} not implemented for ({},{})",
name, pool.0, pool.1
);
} }
} }
} }
@@ -367,7 +320,7 @@ fn push_database_stats(lines: &mut Vec<String>) {
{ {
lines.push(prometheus_metric.to_string()); lines.push(prometheus_metric.to_string());
} else { } else {
debug!("Metric {} not implemented for {}", key, address.name()); warn!("Metric {} not implemented for {}", key, address.name());
} }
} }
} }
@@ -378,51 +331,34 @@ fn push_database_stats(lines: &mut Vec<String>) {
// Adds relevant metrics shown in a SHOW SERVERS admin command. // Adds relevant metrics shown in a SHOW SERVERS admin command.
fn push_server_stats(lines: &mut Vec<String>) { fn push_server_stats(lines: &mut Vec<String>) {
let server_stats = get_server_stats(); let server_stats = get_server_stats();
let mut prom_stats = HashMap::<String, ServerPrometheusStats>::new(); let mut server_stats_by_addresses = HashMap::<String, Arc<ServerStats>>::new();
for (_, stats) in server_stats { for (_, stats) in server_stats {
let entry = prom_stats server_stats_by_addresses.insert(stats.address_name(), stats);
.entry(stats.address_name())
.or_insert(ServerPrometheusStats {
bytes_received: 0,
bytes_sent: 0,
transaction_count: 0,
query_count: 0,
error_count: 0,
active_count: 0,
idle_count: 0,
login_count: 0,
tested_count: 0,
});
entry.bytes_received += stats.bytes_received.load(Ordering::Relaxed);
entry.bytes_sent += stats.bytes_sent.load(Ordering::Relaxed);
entry.transaction_count += stats.transaction_count.load(Ordering::Relaxed);
entry.query_count += stats.query_count.load(Ordering::Relaxed);
entry.error_count += stats.error_count.load(Ordering::Relaxed);
match stats.state.load(Ordering::Relaxed) {
crate::stats::ServerState::Login => entry.login_count += 1,
crate::stats::ServerState::Active => entry.active_count += 1,
crate::stats::ServerState::Tested => entry.tested_count += 1,
crate::stats::ServerState::Idle => entry.idle_count += 1,
}
} }
for (_, pool) in get_all_pools() { for (_, pool) in get_all_pools() {
for shard in 0..pool.shards() { for shard in 0..pool.shards() {
for server in 0..pool.servers(shard) { for server in 0..pool.servers(shard) {
let address = pool.address(shard, server); let address = pool.address(shard, server);
if let Some(server_info) = prom_stats.get(&address.name()) { if let Some(server_info) = server_stats_by_addresses.get(&address.name()) {
let metrics = [ let metrics = [
("bytes_received", server_info.bytes_received), (
("bytes_sent", server_info.bytes_sent), "bytes_received",
("transaction_count", server_info.transaction_count), server_info.bytes_received.load(Ordering::Relaxed),
("query_count", server_info.query_count), ),
("error_count", server_info.error_count), ("bytes_sent", server_info.bytes_sent.load(Ordering::Relaxed)),
("idle_count", server_info.idle_count), (
("active_count", server_info.active_count), "transaction_count",
("login_count", server_info.login_count), server_info.transaction_count.load(Ordering::Relaxed),
("tested_count", server_info.tested_count), ),
("is_banned", if pool.is_banned(address) { 1 } else { 0 }), (
("is_paused", if pool.paused() { 1 } else { 0 }), "query_count",
server_info.query_count.load(Ordering::Relaxed),
),
(
"error_count",
server_info.error_count.load(Ordering::Relaxed),
),
]; ];
for (key, value) in metrics { for (key, value) in metrics {
if let Some(prometheus_metric) = if let Some(prometheus_metric) =
@@ -430,7 +366,7 @@ fn push_server_stats(lines: &mut Vec<String>) {
{ {
lines.push(prometheus_metric.to_string()); lines.push(prometheus_metric.to_string());
} else { } else {
debug!("Metric {} not implemented for {}", key, address.name()); warn!("Metric {} not implemented for {}", key, address.name());
} }
} }
} }
@@ -440,35 +376,14 @@ fn push_server_stats(lines: &mut Vec<String>) {
} }
pub async fn start_metric_server(http_addr: SocketAddr) { pub async fn start_metric_server(http_addr: SocketAddr) {
let listener = TcpListener::bind(http_addr); let http_service_factory =
let listener = match listener.await { make_service_fn(|_conn| async { Ok::<_, hyper::Error>(service_fn(prometheus_stats)) });
Ok(listener) => listener, let server = Server::bind(&http_addr).serve(http_service_factory);
Err(e) => {
error!("Failed to bind prometheus server to HTTP address: {}.", e);
return;
}
};
info!( info!(
"Exposing prometheus metrics on http://{}/metrics.", "Exposing prometheus metrics on http://{}/metrics.",
http_addr http_addr
); );
loop { if let Err(e) = server.await {
let stream = match listener.accept().await { error!("Failed to run HTTP server: {}.", e);
Ok((stream, _)) => stream,
Err(e) => {
error!("Error accepting connection: {}", e);
continue;
}
};
let io = TokioIo::new(stream);
tokio::task::spawn(async move {
if let Err(err) = http1::Builder::new()
.serve_connection(io, service_fn(prometheus_stats))
.await
{
eprintln!("Error serving HTTP connection for metrics: {:?}", err);
}
});
} }
} }

File diff suppressed because it is too large Load Diff

View File

@@ -79,12 +79,12 @@ impl ScramSha256 {
let server_message = Message::parse(message)?; let server_message = Message::parse(message)?;
if !server_message.nonce.starts_with(&self.nonce) { if !server_message.nonce.starts_with(&self.nonce) {
return Err(Error::ProtocolSyncError("SCRAM".to_string())); return Err(Error::ProtocolSyncError(format!("SCRAM")));
} }
let salt = match general_purpose::STANDARD.decode(&server_message.salt) { let salt = match general_purpose::STANDARD.decode(&server_message.salt) {
Ok(salt) => salt, Ok(salt) => salt,
Err(_) => return Err(Error::ProtocolSyncError("SCRAM".to_string())), Err(_) => return Err(Error::ProtocolSyncError(format!("SCRAM"))),
}; };
let salted_password = Self::hi( let salted_password = Self::hi(
@@ -166,9 +166,9 @@ impl ScramSha256 {
pub fn finish(&mut self, message: &BytesMut) -> Result<(), Error> { pub fn finish(&mut self, message: &BytesMut) -> Result<(), Error> {
let final_message = FinalMessage::parse(message)?; let final_message = FinalMessage::parse(message)?;
let verifier = match general_purpose::STANDARD.decode(final_message.value) { let verifier = match general_purpose::STANDARD.decode(&final_message.value) {
Ok(verifier) => verifier, Ok(verifier) => verifier,
Err(_) => return Err(Error::ProtocolSyncError("SCRAM".to_string())), Err(_) => return Err(Error::ProtocolSyncError(format!("SCRAM"))),
}; };
let mut hmac = match Hmac::<Sha256>::new_from_slice(&self.salted_password) { let mut hmac = match Hmac::<Sha256>::new_from_slice(&self.salted_password) {
@@ -230,14 +230,14 @@ impl Message {
.collect::<Vec<String>>(); .collect::<Vec<String>>();
if parts.len() != 3 { if parts.len() != 3 {
return Err(Error::ProtocolSyncError("SCRAM".to_string())); return Err(Error::ProtocolSyncError(format!("SCRAM")));
} }
let nonce = str::replace(&parts[0], "r=", ""); let nonce = str::replace(&parts[0], "r=", "");
let salt = str::replace(&parts[1], "s=", ""); let salt = str::replace(&parts[1], "s=", "");
let iterations = match str::replace(&parts[2], "i=", "").parse::<u32>() { let iterations = match str::replace(&parts[2], "i=", "").parse::<u32>() {
Ok(iterations) => iterations, Ok(iterations) => iterations,
Err(_) => return Err(Error::ProtocolSyncError("SCRAM".to_string())), Err(_) => return Err(Error::ProtocolSyncError(format!("SCRAM"))),
}; };
Ok(Message { Ok(Message {
@@ -257,7 +257,7 @@ impl FinalMessage {
/// Parse the server final validation message. /// Parse the server final validation message.
pub fn parse(message: &BytesMut) -> Result<FinalMessage, Error> { pub fn parse(message: &BytesMut) -> Result<FinalMessage, Error> {
if !message.starts_with(b"v=") || message.len() < 4 { if !message.starts_with(b"v=") || message.len() < 4 {
return Err(Error::ProtocolSyncError("SCRAM".to_string())); return Err(Error::ProtocolSyncError(format!("SCRAM")));
} }
Ok(FinalMessage { Ok(FinalMessage {

View File

@@ -3,14 +3,11 @@
use bytes::{Buf, BufMut, BytesMut}; use bytes::{Buf, BufMut, BytesMut};
use fallible_iterator::FallibleIterator; use fallible_iterator::FallibleIterator;
use log::{debug, error, info, trace, warn}; use log::{debug, error, info, trace, warn};
use lru::LruCache;
use once_cell::sync::Lazy;
use parking_lot::{Mutex, RwLock}; use parking_lot::{Mutex, RwLock};
use postgres_protocol::message; use postgres_protocol::message;
use std::collections::{HashMap, HashSet, VecDeque}; use std::collections::HashMap;
use std::mem; use std::io::Read;
use std::net::IpAddr; use std::net::IpAddr;
use std::num::NonZeroUsize;
use std::sync::Arc; use std::sync::Arc;
use std::time::SystemTime; use std::time::SystemTime;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, BufStream}; use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, BufStream};
@@ -22,7 +19,6 @@ use crate::config::{get_config, Address, User};
use crate::constants::*; use crate::constants::*;
use crate::dns_cache::{AddrSet, CACHED_RESOLVER}; use crate::dns_cache::{AddrSet, CACHED_RESOLVER};
use crate::errors::{Error, ServerIdentifier}; use crate::errors::{Error, ServerIdentifier};
use crate::messages::BytesMutReader;
use crate::messages::*; use crate::messages::*;
use crate::mirrors::MirroringManager; use crate::mirrors::MirroringManager;
use crate::pool::ClientServerMap; use crate::pool::ClientServerMap;
@@ -107,162 +103,6 @@ impl StreamInner {
} }
} }
#[derive(Copy, Clone)]
struct CleanupState {
/// If server connection requires RESET ALL before checkin because of set statement
needs_cleanup_set: bool,
/// If server connection requires DEALLOCATE ALL before checkin because of prepare statement
needs_cleanup_prepare: bool,
}
impl CleanupState {
fn new() -> Self {
CleanupState {
needs_cleanup_set: false,
needs_cleanup_prepare: false,
}
}
fn needs_cleanup(&self) -> bool {
self.needs_cleanup_set || self.needs_cleanup_prepare
}
fn set_true(&mut self) {
self.needs_cleanup_set = true;
self.needs_cleanup_prepare = true;
}
fn reset(&mut self) {
self.needs_cleanup_set = false;
self.needs_cleanup_prepare = false;
}
}
impl std::fmt::Display for CleanupState {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"SET: {}, PREPARE: {}",
self.needs_cleanup_set, self.needs_cleanup_prepare
)
}
}
static TRACKED_PARAMETERS: Lazy<HashSet<String>> = Lazy::new(|| {
let mut set = HashSet::new();
set.insert("client_encoding".to_string());
set.insert("DateStyle".to_string());
set.insert("TimeZone".to_string());
set.insert("standard_conforming_strings".to_string());
set.insert("application_name".to_string());
set
});
#[derive(Debug, Clone)]
pub struct ServerParameters {
parameters: HashMap<String, String>,
}
impl Default for ServerParameters {
fn default() -> Self {
Self::new()
}
}
impl ServerParameters {
pub fn new() -> Self {
let mut server_parameters = ServerParameters {
parameters: HashMap::new(),
};
server_parameters.set_param("client_encoding".to_string(), "UTF8".to_string(), false);
server_parameters.set_param("DateStyle".to_string(), "ISO, MDY".to_string(), false);
server_parameters.set_param("TimeZone".to_string(), "Etc/UTC".to_string(), false);
server_parameters.set_param(
"standard_conforming_strings".to_string(),
"on".to_string(),
false,
);
server_parameters.set_param("application_name".to_string(), "pgcat".to_string(), false);
server_parameters
}
/// returns true if a tracked parameter was set, false if it was a non-tracked parameter
/// if startup is false, then then only tracked parameters will be set
pub fn set_param(&mut self, mut key: String, value: String, startup: bool) {
// The startup parameter will send uncapitalized keys but parameter status packets will send capitalized keys
if key == "timezone" {
key = "TimeZone".to_string();
} else if key == "datestyle" {
key = "DateStyle".to_string();
};
if TRACKED_PARAMETERS.contains(&key) || startup {
self.parameters.insert(key, value);
}
}
pub fn set_from_hashmap(&mut self, parameters: &HashMap<String, String>, startup: bool) {
// iterate through each and call set_param
for (key, value) in parameters {
self.set_param(key.to_string(), value.to_string(), startup);
}
}
// Gets the diff of the parameters
fn compare_params(&self, incoming_parameters: &ServerParameters) -> HashMap<String, String> {
let mut diff = HashMap::new();
// iterate through tracked parameters
for key in TRACKED_PARAMETERS.iter() {
if let Some(incoming_value) = incoming_parameters.parameters.get(key) {
if let Some(value) = self.parameters.get(key) {
if value != incoming_value {
diff.insert(key.to_string(), incoming_value.to_string());
}
}
}
}
diff
}
pub fn get_application_name(&self) -> &String {
// Can unwrap because we set it in the constructor
self.parameters.get("application_name").unwrap()
}
fn add_parameter_message(key: &str, value: &str, buffer: &mut BytesMut) {
buffer.put_u8(b'S');
// 4 is len of i32, the plus for the null terminator
let len = 4 + key.len() + 1 + value.len() + 1;
buffer.put_i32(len as i32);
buffer.put_slice(key.as_bytes());
buffer.put_u8(0);
buffer.put_slice(value.as_bytes());
buffer.put_u8(0);
}
}
impl From<&ServerParameters> for BytesMut {
fn from(server_parameters: &ServerParameters) -> Self {
let mut bytes = BytesMut::new();
for (key, value) in &server_parameters.parameters {
ServerParameters::add_parameter_message(key, value, &mut bytes);
}
bytes
}
}
// pub fn compare
/// Server state. /// Server state.
pub struct Server { pub struct Server {
/// Server host, e.g. localhost, /// Server host, e.g. localhost,
@@ -276,7 +116,7 @@ pub struct Server {
buffer: BytesMut, buffer: BytesMut,
/// Server information the server sent us over on startup. /// Server information the server sent us over on startup.
server_parameters: ServerParameters, server_info: BytesMut,
/// Backend id and secret key used for query cancellation. /// Backend id and secret key used for query cancellation.
process_id: i32, process_id: i32,
@@ -288,14 +128,11 @@ pub struct Server {
/// Is there more data for the client to read. /// Is there more data for the client to read.
data_available: bool, data_available: bool,
/// Is the server in copy-in or copy-out modes
in_copy_mode: bool,
/// Is the server broken? We'll remote it from the pool if so. /// Is the server broken? We'll remote it from the pool if so.
bad: bool, bad: bool,
/// If server connection requires reset statements before checkin /// If server connection requires a DISCARD ALL before checkin
cleanup_state: CleanupState, needs_cleanup: bool,
/// Mapping of clients and servers used for query cancellation. /// Mapping of clients and servers used for query cancellation.
client_server_map: ClientServerMap, client_server_map: ClientServerMap,
@@ -309,31 +146,18 @@ pub struct Server {
/// Application name using the server at the moment. /// Application name using the server at the moment.
application_name: String, application_name: String,
/// Last time that a successful server send or response happened // Last time that a successful server send or response happened
last_activity: SystemTime, last_activity: SystemTime,
mirror_manager: Option<MirroringManager>, mirror_manager: Option<MirroringManager>,
/// Associated addresses used // Associated addresses used
addr_set: Option<AddrSet>, addr_set: Option<AddrSet>,
/// Should clean up dirty connections?
cleanup_connections: bool,
/// Log client parameter status changes
log_client_parameter_status_changes: bool,
/// Prepared statements
prepared_statement_cache: Option<LruCache<String, ()>>,
/// Prepared statement being currently registered on the server.
registering_prepared_statement: VecDeque<String>,
} }
impl Server { impl Server {
/// Pretend to be the Postgres client and connect to the server given host, port and credentials. /// Pretend to be the Postgres client and connect to the server given host, port and credentials.
/// Perform the authentication and return the server in a ready for query state. /// Perform the authentication and return the server in a ready for query state.
#[allow(clippy::too_many_arguments)]
pub async fn startup( pub async fn startup(
address: &Address, address: &Address,
user: &User, user: &User,
@@ -341,9 +165,6 @@ impl Server {
client_server_map: ClientServerMap, client_server_map: ClientServerMap,
stats: Arc<ServerStats>, stats: Arc<ServerStats>,
auth_hash: Arc<RwLock<Option<String>>>, auth_hash: Arc<RwLock<Option<String>>>,
cleanup_connections: bool,
log_client_parameter_status_changes: bool,
prepared_statement_cache_size: usize,
) -> Result<Server, Error> { ) -> Result<Server, Error> {
let cached_resolver = CACHED_RESOLVER.load(); let cached_resolver = CACHED_RESOLVER.load();
let mut addr_set: Option<AddrSet> = None; let mut addr_set: Option<AddrSet> = None;
@@ -443,7 +264,10 @@ impl Server {
// Something else? // Something else?
m => { m => {
return Err(Error::SocketError(format!("Unknown message: {}", { m }))); return Err(Error::SocketError(format!(
"Unknown message: {}",
m as char
)));
} }
} }
} else { } else {
@@ -461,22 +285,27 @@ impl Server {
None => &user.username, None => &user.username,
}; };
let password = match user.server_password.as_ref() { let password = match user.server_password {
Some(server_password) => Some(server_password), Some(ref server_password) => Some(server_password),
None => user.password.as_ref(), None => match user.password {
Some(ref password) => Some(password),
None => None,
},
}; };
startup(&mut stream, username, database).await?; startup(&mut stream, username, database).await?;
let mut server_info = BytesMut::new();
let mut process_id: i32 = 0; let mut process_id: i32 = 0;
let mut secret_key: i32 = 0; let mut secret_key: i32 = 0;
let server_identifier = ServerIdentifier::new(username, database); let server_identifier = ServerIdentifier::new(username, &database);
// We'll be handling multiple packets, but they will all be structured the same. // We'll be handling multiple packets, but they will all be structured the same.
// We'll loop here until this exchange is complete. // We'll loop here until this exchange is complete.
let mut scram: Option<ScramSha256> = password.map(|password| ScramSha256::new(password)); let mut scram: Option<ScramSha256> = match password {
Some(password) => Some(ScramSha256::new(password)),
let mut server_parameters = ServerParameters::new(); None => None,
};
loop { loop {
let code = match stream.read_u8().await { let code = match stream.read_u8().await {
@@ -707,7 +536,8 @@ impl Server {
// An error message will be present. // An error message will be present.
_ => { _ => {
let mut error = vec![0u8; len as usize]; // Read the error message without the terminating null character.
let mut error = vec![0u8; len as usize - 4 - 1];
match stream.read_exact(&mut error).await { match stream.read_exact(&mut error).await {
Ok(_) => (), Ok(_) => (),
@@ -719,14 +549,10 @@ impl Server {
} }
}; };
let fields = match PgErrorMsg::parse(&error) { // TODO: the error message contains multiple fields; we can decode them and
Ok(f) => f, // present a prettier message to the user.
Err(err) => { // See: https://www.postgresql.org/docs/12/protocol-error-fields.html
return Err(err); error!("Server error: {}", String::from_utf8_lossy(&error));
}
};
trace!("error fields: {}", &fields);
error!("server error: {}: {}", fields.severity, fields.message);
} }
}; };
@@ -735,10 +561,9 @@ impl Server {
// ParameterStatus // ParameterStatus
'S' => { 'S' => {
let mut bytes = BytesMut::with_capacity(len as usize - 4); let mut param = vec![0u8; len as usize - 4];
bytes.resize(len as usize - mem::size_of::<i32>(), b'0');
match stream.read_exact(&mut bytes[..]).await { match stream.read_exact(&mut param).await {
Ok(_) => (), Ok(_) => (),
Err(_) => { Err(_) => {
return Err(Error::ServerStartupError( return Err(Error::ServerStartupError(
@@ -748,13 +573,12 @@ impl Server {
} }
}; };
let key = bytes.read_string().unwrap();
let value = bytes.read_string().unwrap();
// Save the parameter so we can pass it to the client later. // Save the parameter so we can pass it to the client later.
// These can be server_encoding, client_encoding, server timezone, Postgres version, // These can be server_encoding, client_encoding, server timezone, Postgres version,
// and many more interesting things we should know about the Postgres server we are talking to. // and many more interesting things we should know about the Postgres server we are talking to.
server_parameters.set_param(key, value, true); server_info.put_u8(b'S');
server_info.put_i32(len);
server_info.put_slice(&param[..]);
} }
// BackendKeyData // BackendKeyData
@@ -796,23 +620,22 @@ impl Server {
} }
}; };
let server = Server { let mut server = Server {
address: address.clone(), address: address.clone(),
stream: BufStream::new(stream), stream: BufStream::new(stream),
buffer: BytesMut::with_capacity(8196), buffer: BytesMut::with_capacity(8196),
server_parameters, server_info,
process_id, process_id,
secret_key, secret_key,
in_transaction: false, in_transaction: false,
in_copy_mode: false,
data_available: false, data_available: false,
bad: false, bad: false,
cleanup_state: CleanupState::new(), needs_cleanup: false,
client_server_map, client_server_map,
addr_set, addr_set,
connected_at: chrono::offset::Utc::now().naive_utc(), connected_at: chrono::offset::Utc::now().naive_utc(),
stats, stats,
application_name: "pgcat".to_string(), application_name: String::new(),
last_activity: SystemTime::now(), last_activity: SystemTime::now(),
mirror_manager: match address.mirrors.len() { mirror_manager: match address.mirrors.len() {
0 => None, 0 => None,
@@ -822,17 +645,10 @@ impl Server {
address.mirrors.clone(), address.mirrors.clone(),
)), )),
}, },
cleanup_connections,
log_client_parameter_status_changes,
prepared_statement_cache: match prepared_statement_cache_size {
0 => None,
_ => Some(LruCache::new(
NonZeroUsize::new(prepared_statement_cache_size).unwrap(),
)),
},
registering_prepared_statement: VecDeque::new(),
}; };
server.set_name("pgcat").await?;
return Ok(server); return Ok(server);
} }
@@ -882,17 +698,14 @@ impl Server {
self.mirror_send(messages); self.mirror_send(messages);
self.stats().data_sent(messages.len()); self.stats().data_sent(messages.len());
match write_all_flush(&mut self.stream, messages).await { match write_all_flush(&mut self.stream, &messages).await {
Ok(_) => { Ok(_) => {
// Successfully sent to server // Successfully sent to server
self.last_activity = SystemTime::now(); self.last_activity = SystemTime::now();
Ok(()) Ok(())
} }
Err(err) => { Err(err) => {
error!( error!("Terminating server because of: {:?}", err);
"Terminating server {:?} because of: {:?}",
self.address, err
);
self.bad = true; self.bad = true;
Err(err) Err(err)
} }
@@ -902,18 +715,12 @@ impl Server {
/// Receive data from the server in response to a client request. /// Receive data from the server in response to a client request.
/// This method must be called multiple times while `self.is_data_available()` is true /// This method must be called multiple times while `self.is_data_available()` is true
/// in order to receive all data the server has to offer. /// in order to receive all data the server has to offer.
pub async fn recv( pub async fn recv(&mut self) -> Result<BytesMut, Error> {
&mut self,
mut client_server_parameters: Option<&mut ServerParameters>,
) -> Result<BytesMut, Error> {
loop { loop {
let mut message = match read_message(&mut self.stream).await { let mut message = match read_message(&mut self.stream).await {
Ok(message) => message, Ok(message) => message,
Err(err) => { Err(err) => {
error!( error!("Terminating server because of: {:?}", err);
"Terminating server {:?} because of: {:?}",
self.address, err
);
self.bad = true; self.bad = true;
return Err(err); return Err(err);
} }
@@ -960,73 +767,32 @@ impl Server {
// There is no more data available from the server. // There is no more data available from the server.
self.data_available = false; self.data_available = false;
break; break;
} }
// ErrorResponse
'E' => {
if self.in_copy_mode {
self.in_copy_mode = false;
}
// Remove the prepared statement from the cache, it has a syntax error or something else bad happened.
if let Some(prepared_stmt_name) =
self.registering_prepared_statement.pop_front()
{
if let Some(ref mut cache) = self.prepared_statement_cache {
if let Some(_removed) = cache.pop(&prepared_stmt_name) {
debug!(
"Removed {} from prepared statement cache",
prepared_stmt_name
);
} else {
// Shouldn't happen.
debug!("Prepared statement {} was not cached", prepared_stmt_name);
}
}
}
if self.prepared_statement_cache.is_some() {
let error_message = PgErrorMsg::parse(&message)?;
if error_message.message == "cached plan must not change result type" {
warn!("Server {:?} changed schema, dropping connection to clean up prepared statements", self.address);
// This will still result in an error to the client, but this server connection will drop all cached prepared statements
// so that any new queries will be re-prepared
// TODO: Other ideas to solve errors when there are DDL changes after a statement has been prepared
// - Recreate entire connection pool to force recreation of all server connections
// - Clear the ConnectionPool's statement cache so that new statement names are generated
// - Implement a retry (re-prepare) so the client doesn't see an error
self.cleanup_state.needs_cleanup_prepare = true;
}
}
}
// CommandComplete // CommandComplete
'C' => { 'C' => {
if self.in_copy_mode { let mut command_tag = String::new();
self.in_copy_mode = false; match message.reader().read_to_string(&mut command_tag) {
} Ok(_) => {
match message.read_string() {
Ok(command) => {
// Non-exhaustive list of commands that are likely to change session variables/resources // Non-exhaustive list of commands that are likely to change session variables/resources
// which can leak between clients. This is a best effort to block bad clients // which can leak between clients. This is a best effort to block bad clients
// from poisoning a transaction-mode pool by setting inappropriate session variables // from poisoning a transaction-mode pool by setting inappropriate session variables
match command.as_str() { match command_tag.as_str() {
"SET" => { "SET\0" => {
// We don't detect set statements in transactions // We don't detect set statements in transactions
// No great way to differentiate between set and set local // No great way to differentiate between set and set local
// As a result, we will miss cases when set statements are used in transactions // As a result, we will miss cases when set statements are used in transactions
// This will reduce amount of reset statements sent // This will reduce amount of discard statements sent
if !self.in_transaction { if !self.in_transaction {
debug!("Server connection marked for clean up"); debug!("Server connection marked for clean up");
self.cleanup_state.needs_cleanup_set = true; self.needs_cleanup = true;
} }
} }
"PREPARE\0" => {
"PREPARE" => {
debug!("Server connection marked for clean up"); debug!("Server connection marked for clean up");
self.cleanup_state.needs_cleanup_prepare = true; self.needs_cleanup = true;
} }
_ => (), _ => (),
} }
@@ -1038,20 +804,6 @@ impl Server {
} }
} }
'S' => {
let key = message.read_string().unwrap();
let value = message.read_string().unwrap();
if let Some(client_server_parameters) = client_server_parameters.as_mut() {
client_server_parameters.set_param(key.clone(), value.clone(), false);
if self.log_client_parameter_status_changes {
info!("Client parameter status change: {} = {}", key, value)
}
}
self.server_parameters.set_param(key, value, false);
}
// DataRow // DataRow
'D' => { 'D' => {
// More data is available after this message, this is not the end of the reply. // More data is available after this message, this is not the end of the reply.
@@ -1064,14 +816,10 @@ impl Server {
} }
// CopyInResponse: copy is starting from client to server. // CopyInResponse: copy is starting from client to server.
'G' => { 'G' => break,
self.in_copy_mode = true;
break;
}
// CopyOutResponse: copy is starting from the server to the client. // CopyOutResponse: copy is starting from the server to the client.
'H' => { 'H' => {
self.in_copy_mode = true;
self.data_available = true; self.data_available = true;
break; break;
} }
@@ -1088,11 +836,6 @@ impl Server {
// Buffer until ReadyForQuery shows up, so don't exit the loop yet. // Buffer until ReadyForQuery shows up, so don't exit the loop yet.
'c' => (), 'c' => (),
// Parse complete successfully
'1' => {
self.registering_prepared_statement.pop_front();
}
// Anything else, e.g. errors, notices, etc. // Anything else, e.g. errors, notices, etc.
// Keep buffering until ReadyForQuery shows up. // Keep buffering until ReadyForQuery shows up.
_ => (), _ => (),
@@ -1114,105 +857,6 @@ impl Server {
Ok(bytes) Ok(bytes)
} }
// Determines if the server already has a prepared statement with the given name
// Increments the prepared statement cache hit counter
pub fn has_prepared_statement(&mut self, name: &str) -> bool {
let cache = match &mut self.prepared_statement_cache {
Some(cache) => cache,
None => return false,
};
let has_it = cache.get(name).is_some();
if has_it {
self.stats.prepared_cache_hit();
} else {
self.stats.prepared_cache_miss();
}
has_it
}
fn add_prepared_statement_to_cache(&mut self, name: &str) -> Option<String> {
let cache = match &mut self.prepared_statement_cache {
Some(cache) => cache,
None => return None,
};
self.stats.prepared_cache_add();
// If we evict something, we need to close it on the server
if let Some((evicted_name, _)) = cache.push(name.to_string(), ()) {
if evicted_name != name {
debug!(
"Evicted prepared statement {} from cache, replaced with {}",
evicted_name, name
);
return Some(evicted_name);
}
};
None
}
fn remove_prepared_statement_from_cache(&mut self, name: &str) {
let cache = match &mut self.prepared_statement_cache {
Some(cache) => cache,
None => return,
};
self.stats.prepared_cache_remove();
cache.pop(name);
}
pub async fn register_prepared_statement(
&mut self,
parse: &Parse,
should_send_parse_to_server: bool,
) -> Result<(), Error> {
if !self.has_prepared_statement(&parse.name) {
self.registering_prepared_statement
.push_back(parse.name.clone());
let mut bytes = BytesMut::new();
if should_send_parse_to_server {
let parse_bytes: BytesMut = parse.try_into()?;
bytes.extend_from_slice(&parse_bytes);
}
// If we evict something, we need to close it on the server
// We do this by adding it to the messages we're sending to the server before the sync
if let Some(evicted_name) = self.add_prepared_statement_to_cache(&parse.name) {
self.remove_prepared_statement_from_cache(&evicted_name);
let close_bytes: BytesMut = Close::new(&evicted_name).try_into()?;
bytes.extend_from_slice(&close_bytes);
};
// If we have a parse or close we need to send to the server, send them and sync
if !bytes.is_empty() {
bytes.extend_from_slice(&sync());
self.send(&bytes).await?;
loop {
self.recv(None).await?;
if !self.is_data_available() {
break;
}
}
}
};
// If it's not there, something went bad, I'm guessing bad syntax or permissions error
// on the server.
if !self.has_prepared_statement(&parse.name) {
Err(Error::PreparedStatementError)
} else {
Ok(())
}
}
/// If the server is still inside a transaction. /// If the server is still inside a transaction.
/// If the client disconnects while the server is in a transaction, we will clean it up. /// If the client disconnects while the server is in a transaction, we will clean it up.
pub fn in_transaction(&self) -> bool { pub fn in_transaction(&self) -> bool {
@@ -1220,11 +864,6 @@ impl Server {
self.in_transaction self.in_transaction
} }
/// Currently copying data from client to server or vice-versa.
pub fn in_copy_mode(&self) -> bool {
self.in_copy_mode
}
/// We don't buffer all of server responses, e.g. COPY OUT produces too much data. /// We don't buffer all of server responses, e.g. COPY OUT produces too much data.
/// The client is responsible to call `self.recv()` while this method returns true. /// The client is responsible to call `self.recv()` while this method returns true.
pub fn is_data_available(&self) -> bool { pub fn is_data_available(&self) -> bool {
@@ -1254,33 +893,14 @@ impl Server {
} }
/// Get server startup information to forward it to the client. /// Get server startup information to forward it to the client.
pub fn server_parameters(&self) -> ServerParameters { /// Not used at the moment.
self.server_parameters.clone() pub fn server_info(&self) -> BytesMut {
} self.server_info.clone()
pub async fn sync_parameters(&mut self, parameters: &ServerParameters) -> Result<(), Error> {
let parameter_diff = self.server_parameters.compare_params(parameters);
if parameter_diff.is_empty() {
return Ok(());
}
let mut query = String::from("");
for (key, value) in parameter_diff {
query.push_str(&format!("SET {} TO '{}';", key, value));
}
let res = self.query(&query).await;
self.cleanup_state.reset();
res
} }
/// Indicate that this server connection cannot be re-used and must be discarded. /// Indicate that this server connection cannot be re-used and must be discarded.
pub fn mark_bad(&mut self, reason: &str) { pub fn mark_bad(&mut self) {
error!("Server {:?} marked bad, reason: {}", self.address, reason); error!("Server {:?} marked bad", self.address);
self.bad = true; self.bad = true;
} }
@@ -1302,14 +922,12 @@ impl Server {
/// It will use the simple query protocol. /// It will use the simple query protocol.
/// Result will not be returned, so this is useful for things like `SET` or `ROLLBACK`. /// Result will not be returned, so this is useful for things like `SET` or `ROLLBACK`.
pub async fn query(&mut self, query: &str) -> Result<(), Error> { pub async fn query(&mut self, query: &str) -> Result<(), Error> {
debug!("Running `{}` on server {:?}", query, self.address);
let query = simple_query(query); let query = simple_query(query);
self.send(&query).await?; self.send(&query).await?;
loop { loop {
let _ = self.recv(None).await?; let _ = self.recv().await?;
if !self.data_available { if !self.data_available {
break; break;
@@ -1327,42 +945,42 @@ impl Server {
// server connection thrashing if clients repeatedly do this. // server connection thrashing if clients repeatedly do this.
// Instead, we ROLLBACK that transaction before putting the connection back in the pool // Instead, we ROLLBACK that transaction before putting the connection back in the pool
if self.in_transaction() { if self.in_transaction() {
warn!(target: "pgcat::server::cleanup", "Server returned while still in transaction, rolling back transaction"); warn!("Server returned while still in transaction, rolling back transaction");
self.query("ROLLBACK").await?; self.query("ROLLBACK").await?;
} }
// Client disconnected but it performed session-altering operations such as // Client disconnected but it performed session-altering operations such as
// SET statement_timeout to 1 or create a prepared statement. We clear that // SET statement_timeout to 1 or create a prepared statement. We clear that
// to avoid leaking state between clients. For performance reasons we only // to avoid leaking state between clients. For performance reasons we only
// send `RESET ALL` if we think the session is altered instead of just sending // send `DISCARD ALL` if we think the session is altered instead of just sending
// it before each checkin. // it before each checkin.
if self.cleanup_state.needs_cleanup() && self.cleanup_connections { if self.needs_cleanup {
info!(target: "pgcat::server::cleanup", "Server returned with session state altered, discarding state ({}) for application {}", self.cleanup_state, self.application_name); warn!("Server returned with session state altered, discarding state");
let mut reset_string = String::from("RESET ROLE;"); self.query("DISCARD ALL").await?;
self.needs_cleanup = false;
if self.cleanup_state.needs_cleanup_set {
reset_string.push_str("RESET ALL;");
};
if self.cleanup_state.needs_cleanup_prepare {
reset_string.push_str("DEALLOCATE ALL;");
// Since we deallocated all prepared statements, we need to clear the cache
if let Some(cache) = &mut self.prepared_statement_cache {
cache.clear();
}
};
self.query(&reset_string).await?;
self.cleanup_state.reset();
}
if self.in_copy_mode() {
warn!(target: "pgcat::server::cleanup", "Server returned while still in copy-mode");
} }
Ok(()) Ok(())
} }
/// A shorthand for `SET application_name = $1`.
pub async fn set_name(&mut self, name: &str) -> Result<(), Error> {
if self.application_name != name {
self.application_name = name.to_string();
// We don't want `SET application_name` to mark the server connection
// as needing cleanup
let needs_cleanup_before = self.needs_cleanup;
let result = Ok(self
.query(&format!("SET application_name = '{}'", name))
.await?);
self.needs_cleanup = needs_cleanup_before;
result
} else {
Ok(())
}
}
/// get Server stats /// get Server stats
pub fn stats(&self) -> Arc<ServerStats> { pub fn stats(&self) -> Arc<ServerStats> {
self.stats.clone() self.stats.clone()
@@ -1379,20 +997,22 @@ impl Server {
self.last_activity self.last_activity
} }
// Marks a connection as needing cleanup at checkin // Marks a connection as needing DISCARD ALL at checkin
pub fn mark_dirty(&mut self) { pub fn mark_dirty(&mut self) {
self.cleanup_state.set_true(); self.needs_cleanup = true;
} }
pub fn mirror_send(&mut self, bytes: &BytesMut) { pub fn mirror_send(&mut self, bytes: &BytesMut) {
if let Some(manager) = self.mirror_manager.as_mut() { match self.mirror_manager.as_mut() {
manager.send(bytes) Some(manager) => manager.send(bytes),
None => (),
} }
} }
pub fn mirror_disconnect(&mut self) { pub fn mirror_disconnect(&mut self) {
if let Some(manager) = self.mirror_manager.as_mut() { match self.mirror_manager.as_mut() {
manager.disconnect() Some(manager) => manager.disconnect(),
None => (),
} }
} }
@@ -1413,16 +1033,13 @@ impl Server {
client_server_map, client_server_map,
Arc::new(ServerStats::default()), Arc::new(ServerStats::default()),
Arc::new(RwLock::new(None)), Arc::new(RwLock::new(None)),
true,
false,
0,
) )
.await?; .await?;
debug!("Connected!, sending query."); debug!("Connected!, sending query.");
server.send(&simple_query(query)).await?; server.send(&simple_query(query)).await?;
let mut message = server.recv(None).await?; let mut message = server.recv().await?;
parse_query_message(&mut message).await Ok(parse_query_message(&mut message).await?)
} }
} }
@@ -1518,18 +1135,14 @@ impl Drop for Server {
_ => debug!("Dirty shutdown"), _ => debug!("Dirty shutdown"),
}; };
// Should not matter.
self.bad = true;
let now = chrono::offset::Utc::now().naive_utc(); let now = chrono::offset::Utc::now().naive_utc();
let duration = now - self.connected_at; let duration = now - self.connected_at;
let message = if self.bad {
"Server connection terminated"
} else {
"Server connection closed"
};
info!( info!(
"{} {:?}, session duration: {}", "Server connection closed {:?}, session duration: {}",
message,
self.address, self.address,
crate::format_duration(&duration) crate::format_duration(&duration)
); );

View File

@@ -14,11 +14,11 @@ pub enum ShardingFunction {
Sha1, Sha1,
} }
impl std::fmt::Display for ShardingFunction { impl ToString for ShardingFunction {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn to_string(&self) -> String {
match self { match *self {
ShardingFunction::PgBigintHash => write!(f, "pg_bigint_hash"), ShardingFunction::PgBigintHash => "pg_bigint_hash".to_string(),
ShardingFunction::Sha1 => write!(f, "sha1"), ShardingFunction::Sha1 => "sha1".to_string(),
} }
} }
} }
@@ -64,7 +64,7 @@ impl Sharder {
fn sha1(&self, key: i64) -> usize { fn sha1(&self, key: i64) -> usize {
let mut hasher = Sha1::new(); let mut hasher = Sha1::new();
hasher.update(key.to_string().as_bytes()); hasher.update(&key.to_string().as_bytes());
let result = hasher.finalize(); let result = hasher.finalize();
@@ -202,10 +202,10 @@ mod test {
#[test] #[test]
fn test_sha1_hash() { fn test_sha1_hash() {
let sharder = Sharder::new(12, ShardingFunction::Sha1); let sharder = Sharder::new(12, ShardingFunction::Sha1);
let ids = [ let ids = vec![
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
]; ];
let shards = [ let shards = vec![
4, 7, 8, 3, 6, 0, 0, 10, 3, 11, 1, 7, 4, 4, 11, 2, 5, 0, 8, 3, 4, 7, 8, 3, 6, 0, 0, 10, 3, 11, 1, 7, 4, 4, 11, 2, 5, 0, 8, 3,
]; ];

View File

@@ -1,3 +1,4 @@
use crate::pool::PoolIdentifier;
/// Statistics and reporting. /// Statistics and reporting.
use arc_swap::ArcSwap; use arc_swap::ArcSwap;
@@ -15,11 +16,13 @@ pub mod pool;
pub mod server; pub mod server;
pub use address::AddressStats; pub use address::AddressStats;
pub use client::{ClientState, ClientStats}; pub use client::{ClientState, ClientStats};
pub use pool::PoolStats;
pub use server::{ServerState, ServerStats}; pub use server::{ServerState, ServerStats};
/// Convenience types for various stats /// Convenience types for various stats
type ClientStatesLookup = HashMap<i32, Arc<ClientStats>>; type ClientStatesLookup = HashMap<i32, Arc<ClientStats>>;
type ServerStatesLookup = HashMap<i32, Arc<ServerStats>>; type ServerStatesLookup = HashMap<i32, Arc<ServerStats>>;
type PoolStatsLookup = HashMap<(String, String), Arc<PoolStats>>;
/// Stats for individual client connections /// Stats for individual client connections
/// Used in SHOW CLIENTS. /// Used in SHOW CLIENTS.
@@ -31,6 +34,11 @@ static CLIENT_STATS: Lazy<Arc<RwLock<ClientStatesLookup>>> =
static SERVER_STATS: Lazy<Arc<RwLock<ServerStatesLookup>>> = static SERVER_STATS: Lazy<Arc<RwLock<ServerStatesLookup>>> =
Lazy::new(|| Arc::new(RwLock::new(ServerStatesLookup::default()))); Lazy::new(|| Arc::new(RwLock::new(ServerStatesLookup::default())));
/// Aggregate stats for each pool (a pool is identified by database name and username)
/// Used in SHOW POOLS.
static POOL_STATS: Lazy<Arc<RwLock<PoolStatsLookup>>> =
Lazy::new(|| Arc::new(RwLock::new(PoolStatsLookup::default())));
/// The statistics reporter. An instance is given to each possible source of statistics, /// The statistics reporter. An instance is given to each possible source of statistics,
/// e.g. client stats, server stats, connection pool stats. /// e.g. client stats, server stats, connection pool stats.
pub static REPORTER: Lazy<ArcSwap<Reporter>> = pub static REPORTER: Lazy<ArcSwap<Reporter>> =
@@ -72,6 +80,13 @@ impl Reporter {
fn server_disconnecting(&self, server_id: i32) { fn server_disconnecting(&self, server_id: i32) {
SERVER_STATS.write().remove(&server_id); SERVER_STATS.write().remove(&server_id);
} }
/// Register a pool with the stats system.
fn pool_register(&self, identifier: PoolIdentifier, stats: Arc<PoolStats>) {
POOL_STATS
.write()
.insert((identifier.db, identifier.user), stats);
}
} }
/// The statistics collector which used for calculating averages /// The statistics collector which used for calculating averages
@@ -92,20 +107,8 @@ impl Collector {
loop { loop {
interval.tick().await; interval.tick().await;
// Hold read lock for duration of update to retain all server stats for stats in SERVER_STATS.read().values() {
let server_stats = SERVER_STATS.read();
for stats in server_stats.values() {
if !stats.check_address_stat_average_is_updated_status() {
stats.address_stats().update_averages(); stats.address_stats().update_averages();
stats.address_stats().reset_current_counts();
stats.set_address_stat_average_is_updated_status(true);
}
}
// Reset to false for next update
for stats in server_stats.values() {
stats.set_address_stat_average_is_updated_status(false);
} }
} }
}); });
@@ -124,6 +127,12 @@ pub fn get_server_stats() -> ServerStatesLookup {
SERVER_STATS.read().clone() SERVER_STATS.read().clone()
} }
/// Get a snapshot of pool statistics.
/// by the `Collector`.
pub fn get_pool_stats() -> PoolStatsLookup {
POOL_STATS.read().clone()
}
/// Get the statistics reporter used to update stats across the pools/clients. /// Get the statistics reporter used to update stats across the pools/clients.
pub fn get_reporter() -> Reporter { pub fn get_reporter() -> Reporter {
(*(*REPORTER.load())).clone() (*(*REPORTER.load())).clone()

View File

@@ -1,29 +1,26 @@
use log::warn;
use std::sync::atomic::*; use std::sync::atomic::*;
use std::sync::Arc; use std::sync::Arc;
#[derive(Debug, Clone, Default)]
struct AddressStatFields {
xact_count: Arc<AtomicU64>,
query_count: Arc<AtomicU64>,
bytes_received: Arc<AtomicU64>,
bytes_sent: Arc<AtomicU64>,
xact_time: Arc<AtomicU64>,
query_time: Arc<AtomicU64>,
wait_time: Arc<AtomicU64>,
errors: Arc<AtomicU64>,
}
/// Internal address stats /// Internal address stats
#[derive(Debug, Clone, Default)] #[derive(Debug, Clone, Default)]
pub struct AddressStats { pub struct AddressStats {
total: AddressStatFields, pub total_xact_count: Arc<AtomicU64>,
pub total_query_count: Arc<AtomicU64>,
current: AddressStatFields, pub total_received: Arc<AtomicU64>,
pub total_sent: Arc<AtomicU64>,
averages: AddressStatFields, pub total_xact_time: Arc<AtomicU64>,
pub total_query_time: Arc<AtomicU64>,
// Determines if the averages have been updated since the last time they were reported pub total_wait_time: Arc<AtomicU64>,
pub averages_updated: Arc<AtomicBool>, pub total_errors: Arc<AtomicU64>,
pub avg_query_count: Arc<AtomicU64>,
pub avg_query_time: Arc<AtomicU64>,
pub avg_recv: Arc<AtomicU64>,
pub avg_sent: Arc<AtomicU64>,
pub avg_errors: Arc<AtomicU64>,
pub avg_xact_time: Arc<AtomicU64>,
pub avg_xact_count: Arc<AtomicU64>,
pub avg_wait_time: Arc<AtomicU64>,
} }
impl IntoIterator for AddressStats { impl IntoIterator for AddressStats {
@@ -34,67 +31,67 @@ impl IntoIterator for AddressStats {
vec![ vec![
( (
"total_xact_count".to_string(), "total_xact_count".to_string(),
self.total.xact_count.load(Ordering::Relaxed), self.total_xact_count.load(Ordering::Relaxed),
), ),
( (
"total_query_count".to_string(), "total_query_count".to_string(),
self.total.query_count.load(Ordering::Relaxed), self.total_query_count.load(Ordering::Relaxed),
), ),
( (
"total_received".to_string(), "total_received".to_string(),
self.total.bytes_received.load(Ordering::Relaxed), self.total_received.load(Ordering::Relaxed),
), ),
( (
"total_sent".to_string(), "total_sent".to_string(),
self.total.bytes_sent.load(Ordering::Relaxed), self.total_sent.load(Ordering::Relaxed),
), ),
( (
"total_xact_time".to_string(), "total_xact_time".to_string(),
self.total.xact_time.load(Ordering::Relaxed), self.total_xact_time.load(Ordering::Relaxed),
), ),
( (
"total_query_time".to_string(), "total_query_time".to_string(),
self.total.query_time.load(Ordering::Relaxed), self.total_query_time.load(Ordering::Relaxed),
), ),
( (
"total_wait_time".to_string(), "total_wait_time".to_string(),
self.total.wait_time.load(Ordering::Relaxed), self.total_wait_time.load(Ordering::Relaxed),
), ),
( (
"total_errors".to_string(), "total_errors".to_string(),
self.total.errors.load(Ordering::Relaxed), self.total_errors.load(Ordering::Relaxed),
), ),
( (
"avg_xact_count".to_string(), "avg_xact_count".to_string(),
self.averages.xact_count.load(Ordering::Relaxed), self.avg_xact_count.load(Ordering::Relaxed),
), ),
( (
"avg_query_count".to_string(), "avg_query_count".to_string(),
self.averages.query_count.load(Ordering::Relaxed), self.avg_query_count.load(Ordering::Relaxed),
), ),
( (
"avg_recv".to_string(), "avg_recv".to_string(),
self.averages.bytes_received.load(Ordering::Relaxed), self.avg_recv.load(Ordering::Relaxed),
), ),
( (
"avg_sent".to_string(), "avg_sent".to_string(),
self.averages.bytes_sent.load(Ordering::Relaxed), self.avg_sent.load(Ordering::Relaxed),
), ),
( (
"avg_errors".to_string(), "avg_errors".to_string(),
self.averages.errors.load(Ordering::Relaxed), self.avg_errors.load(Ordering::Relaxed),
), ),
( (
"avg_xact_time".to_string(), "avg_xact_time".to_string(),
self.averages.xact_time.load(Ordering::Relaxed), self.avg_xact_time.load(Ordering::Relaxed),
), ),
( (
"avg_query_time".to_string(), "avg_query_time".to_string(),
self.averages.query_time.load(Ordering::Relaxed), self.avg_query_time.load(Ordering::Relaxed),
), ),
( (
"avg_wait_time".to_string(), "avg_wait_time".to_string(),
self.averages.wait_time.load(Ordering::Relaxed), self.avg_wait_time.load(Ordering::Relaxed),
), ),
] ]
.into_iter() .into_iter()
@@ -102,120 +99,22 @@ impl IntoIterator for AddressStats {
} }
impl AddressStats { impl AddressStats {
pub fn xact_count_add(&self) {
self.total.xact_count.fetch_add(1, Ordering::Relaxed);
self.current.xact_count.fetch_add(1, Ordering::Relaxed);
}
pub fn query_count_add(&self) {
self.total.query_count.fetch_add(1, Ordering::Relaxed);
self.current.query_count.fetch_add(1, Ordering::Relaxed);
}
pub fn bytes_received_add(&self, bytes: u64) {
self.total
.bytes_received
.fetch_add(bytes, Ordering::Relaxed);
self.current
.bytes_received
.fetch_add(bytes, Ordering::Relaxed);
}
pub fn bytes_sent_add(&self, bytes: u64) {
self.total.bytes_sent.fetch_add(bytes, Ordering::Relaxed);
self.current.bytes_sent.fetch_add(bytes, Ordering::Relaxed);
}
pub fn xact_time_add(&self, time: u64) {
self.total.xact_time.fetch_add(time, Ordering::Relaxed);
self.current.xact_time.fetch_add(time, Ordering::Relaxed);
}
pub fn query_time_add(&self, time: u64) {
self.total.query_time.fetch_add(time, Ordering::Relaxed);
self.current.query_time.fetch_add(time, Ordering::Relaxed);
}
pub fn wait_time_add(&self, time: u64) {
self.total.wait_time.fetch_add(time, Ordering::Relaxed);
self.current.wait_time.fetch_add(time, Ordering::Relaxed);
}
pub fn error(&self) { pub fn error(&self) {
self.total.errors.fetch_add(1, Ordering::Relaxed); self.total_errors.fetch_add(1, Ordering::Relaxed);
self.current.errors.fetch_add(1, Ordering::Relaxed);
} }
pub fn update_averages(&self) { pub fn update_averages(&self) {
let stat_period_per_second = crate::stats::STAT_PERIOD / 1_000; let (totals, averages) = self.fields_iterators();
for data in totals.iter().zip(averages.iter()) {
// xact_count let (total, average) = data;
let current_xact_count = self.current.xact_count.load(Ordering::Relaxed); if let Err(err) = average.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |avg| {
let current_xact_time = self.current.xact_time.load(Ordering::Relaxed); let total = total.load(Ordering::Relaxed);
self.averages.xact_count.store( let avg = (total - avg) / (crate::stats::STAT_PERIOD / 1_000); // Avg / second
current_xact_count / stat_period_per_second, Some(avg)
Ordering::Relaxed, }) {
); warn!("Could not update averages for addresses stats, {:?}", err);
if current_xact_count == 0 {
self.averages.xact_time.store(0, Ordering::Relaxed);
} else {
self.averages
.xact_time
.store(current_xact_time / current_xact_count, Ordering::Relaxed);
} }
// query_count
let current_query_count = self.current.query_count.load(Ordering::Relaxed);
let current_query_time = self.current.query_time.load(Ordering::Relaxed);
self.averages.query_count.store(
current_query_count / stat_period_per_second,
Ordering::Relaxed,
);
if current_query_count == 0 {
self.averages.query_time.store(0, Ordering::Relaxed);
} else {
self.averages
.query_time
.store(current_query_time / current_query_count, Ordering::Relaxed);
} }
// bytes_received
let current_bytes_received = self.current.bytes_received.load(Ordering::Relaxed);
self.averages.bytes_received.store(
current_bytes_received / stat_period_per_second,
Ordering::Relaxed,
);
// bytes_sent
let current_bytes_sent = self.current.bytes_sent.load(Ordering::Relaxed);
self.averages.bytes_sent.store(
current_bytes_sent / stat_period_per_second,
Ordering::Relaxed,
);
// wait_time
let current_wait_time = self.current.wait_time.load(Ordering::Relaxed);
self.averages.wait_time.store(
current_wait_time / stat_period_per_second,
Ordering::Relaxed,
);
// errors
let current_errors = self.current.errors.load(Ordering::Relaxed);
self.averages
.errors
.store(current_errors / stat_period_per_second, Ordering::Relaxed);
}
pub fn reset_current_counts(&self) {
self.current.xact_count.store(0, Ordering::Relaxed);
self.current.xact_time.store(0, Ordering::Relaxed);
self.current.query_count.store(0, Ordering::Relaxed);
self.current.query_time.store(0, Ordering::Relaxed);
self.current.bytes_received.store(0, Ordering::Relaxed);
self.current.bytes_sent.store(0, Ordering::Relaxed);
self.current.wait_time.store(0, Ordering::Relaxed);
self.current.errors.store(0, Ordering::Relaxed);
} }
pub fn populate_row(&self, row: &mut Vec<String>) { pub fn populate_row(&self, row: &mut Vec<String>) {
@@ -223,4 +122,28 @@ impl AddressStats {
row.push(value.to_string()); row.push(value.to_string());
} }
} }
fn fields_iterators(&self) -> (Vec<Arc<AtomicU64>>, Vec<Arc<AtomicU64>>) {
let mut totals: Vec<Arc<AtomicU64>> = Vec::new();
let mut averages: Vec<Arc<AtomicU64>> = Vec::new();
totals.push(self.total_xact_count.clone());
averages.push(self.avg_xact_count.clone());
totals.push(self.total_query_count.clone());
averages.push(self.avg_query_count.clone());
totals.push(self.total_received.clone());
averages.push(self.avg_recv.clone());
totals.push(self.total_sent.clone());
averages.push(self.avg_sent.clone());
totals.push(self.total_xact_time.clone());
averages.push(self.avg_xact_time.clone());
totals.push(self.total_query_time.clone());
averages.push(self.avg_query_time.clone());
totals.push(self.total_wait_time.clone());
averages.push(self.avg_wait_time.clone());
totals.push(self.total_errors.clone());
averages.push(self.avg_errors.clone());
(totals, averages)
}
} }

View File

@@ -1,3 +1,4 @@
use super::PoolStats;
use super::{get_reporter, Reporter}; use super::{get_reporter, Reporter};
use atomic_enum::atomic_enum; use atomic_enum::atomic_enum;
use std::sync::atomic::*; use std::sync::atomic::*;
@@ -33,19 +34,12 @@ pub struct ClientStats {
pool_name: String, pool_name: String,
connect_time: Instant, connect_time: Instant,
pool_stats: Arc<PoolStats>,
reporter: Reporter, reporter: Reporter,
/// Total time spent waiting for a connection from pool, measures in microseconds /// Total time spent waiting for a connection from pool, measures in microseconds
pub total_wait_time: Arc<AtomicU64>, pub total_wait_time: Arc<AtomicU64>,
/// Maximum time spent waiting for a connection from pool, measures in microseconds
pub max_wait_time: Arc<AtomicU64>,
// Time when the client started waiting for a connection from pool, measures in microseconds
// We use connect_time as the reference point for this value
// U64 can represent ~5850 centuries in microseconds, so we should be fine
pub wait_start_us: Arc<AtomicU64>,
/// Current state of the client /// Current state of the client
pub state: Arc<AtomicClientState>, pub state: Arc<AtomicClientState>,
@@ -67,9 +61,8 @@ impl Default for ClientStats {
application_name: String::new(), application_name: String::new(),
username: String::new(), username: String::new(),
pool_name: String::new(), pool_name: String::new(),
pool_stats: Arc::new(PoolStats::default()),
total_wait_time: Arc::new(AtomicU64::new(0)), total_wait_time: Arc::new(AtomicU64::new(0)),
max_wait_time: Arc::new(AtomicU64::new(0)),
wait_start_us: Arc::new(AtomicU64::new(0)),
state: Arc::new(AtomicClientState::new(ClientState::Idle)), state: Arc::new(AtomicClientState::new(ClientState::Idle)),
transaction_count: Arc::new(AtomicU64::new(0)), transaction_count: Arc::new(AtomicU64::new(0)),
query_count: Arc::new(AtomicU64::new(0)), query_count: Arc::new(AtomicU64::new(0)),
@@ -86,9 +79,11 @@ impl ClientStats {
username: &str, username: &str,
pool_name: &str, pool_name: &str,
connect_time: Instant, connect_time: Instant,
pool_stats: Arc<PoolStats>,
) -> Self { ) -> Self {
Self { Self {
client_id, client_id,
pool_stats,
connect_time, connect_time,
application_name: application_name.to_string(), application_name: application_name.to_string(),
username: username.to_string(), username: username.to_string(),
@@ -101,6 +96,8 @@ impl ClientStats {
/// update metrics on the corresponding pool. /// update metrics on the corresponding pool.
pub fn disconnect(&self) { pub fn disconnect(&self) {
self.reporter.client_disconnecting(self.client_id); self.reporter.client_disconnecting(self.client_id);
self.pool_stats
.client_disconnect(self.state.load(Ordering::Relaxed))
} }
/// Register a client with the stats system. The stats system uses client_id /// Register a client with the stats system. The stats system uses client_id
@@ -108,36 +105,33 @@ impl ClientStats {
pub fn register(&self, stats: Arc<ClientStats>) { pub fn register(&self, stats: Arc<ClientStats>) {
self.reporter.client_register(self.client_id, stats); self.reporter.client_register(self.client_id, stats);
self.state.store(ClientState::Idle, Ordering::Relaxed); self.state.store(ClientState::Idle, Ordering::Relaxed);
self.pool_stats.cl_idle.fetch_add(1, Ordering::Relaxed);
} }
/// Reports a client is done querying the server and is no longer assigned a server connection /// Reports a client is done querying the server and is no longer assigned a server connection
pub fn idle(&self) { pub fn idle(&self) {
self.pool_stats
.client_idle(self.state.load(Ordering::Relaxed));
self.state.store(ClientState::Idle, Ordering::Relaxed); self.state.store(ClientState::Idle, Ordering::Relaxed);
} }
/// Reports a client is waiting for a connection /// Reports a client is waiting for a connection
pub fn waiting(&self) { pub fn waiting(&self) {
let wait_start = self.connect_time.elapsed().as_micros() as u64; self.pool_stats
.client_waiting(self.state.load(Ordering::Relaxed));
self.wait_start_us.store(wait_start, Ordering::Relaxed);
self.state.store(ClientState::Waiting, Ordering::Relaxed); self.state.store(ClientState::Waiting, Ordering::Relaxed);
} }
/// Reports a client is done waiting for a connection and is about to query the server. /// Reports a client is done waiting for a connection and is about to query the server.
pub fn active(&self) { pub fn active(&self) {
self.pool_stats
.client_active(self.state.load(Ordering::Relaxed));
self.state.store(ClientState::Active, Ordering::Relaxed); self.state.store(ClientState::Active, Ordering::Relaxed);
} }
/// Reports a client has failed to obtain a connection from a connection pool /// Reports a client has failed to obtain a connection from a connection pool
pub fn checkout_error(&self) { pub fn checkout_error(&self) {
self.state.store(ClientState::Idle, Ordering::Relaxed); self.state.store(ClientState::Idle, Ordering::Relaxed);
self.update_wait_times();
}
/// Reports a client has succeeded in obtaining a connection from a connection pool
pub fn checkout_success(&self) {
self.state.store(ClientState::Active, Ordering::Relaxed);
self.update_wait_times();
} }
/// Reports a client has had the server assigned to it be banned /// Reports a client has had the server assigned to it be banned
@@ -146,26 +140,10 @@ impl ClientStats {
self.error_count.fetch_add(1, Ordering::Relaxed); self.error_count.fetch_add(1, Ordering::Relaxed);
} }
fn update_wait_times(&self) { /// Reporters the time spent by a client waiting to get a healthy connection from the pool
if self.wait_start_us.load(Ordering::Relaxed) == 0 { pub fn checkout_time(&self, microseconds: u64) {
return;
}
let wait_time_us = self.get_current_wait_time_us();
self.total_wait_time self.total_wait_time
.fetch_add(wait_time_us, Ordering::Relaxed); .fetch_add(microseconds, Ordering::Relaxed);
self.max_wait_time
.fetch_max(wait_time_us, Ordering::Relaxed);
self.wait_start_us.store(0, Ordering::Relaxed);
}
pub fn get_current_wait_time_us(&self) -> u64 {
let wait_start_us = self.wait_start_us.load(Ordering::Relaxed);
let microseconds_since_connection_epoch = self.connect_time.elapsed().as_micros() as u64;
if wait_start_us == 0 || microseconds_since_connection_epoch < wait_start_us {
return 0;
}
microseconds_since_connection_epoch - wait_start_us
} }
/// Report a query executed by a client against a server /// Report a query executed by a client against a server

View File

@@ -1,134 +1,36 @@
use log::debug; use crate::config::Pool;
use crate::config::PoolMode;
use super::{ClientState, ServerState}; use crate::pool::PoolIdentifier;
use crate::{config::PoolMode, messages::DataType, pool::PoolIdentifier};
use std::collections::HashMap;
use std::sync::atomic::*; use std::sync::atomic::*;
use std::sync::Arc;
use crate::pool::get_all_pools; use super::get_reporter;
use super::Reporter;
use super::{ClientState, ServerState};
#[derive(Debug, Clone)] #[derive(Debug, Clone, Default)]
/// A struct that holds information about a Pool . /// A struct that holds information about a Pool .
pub struct PoolStats { pub struct PoolStats {
pub identifier: PoolIdentifier, // Pool identifier, cannot be changed after creating the instance
pub mode: PoolMode, identifier: PoolIdentifier,
pub cl_idle: u64,
pub cl_active: u64,
pub cl_waiting: u64,
pub cl_cancel_req: u64,
pub sv_active: u64,
pub sv_idle: u64,
pub sv_used: u64,
pub sv_tested: u64,
pub sv_login: u64,
pub maxwait: u64,
}
impl PoolStats {
pub fn new(identifier: PoolIdentifier, mode: PoolMode) -> Self {
PoolStats {
identifier,
mode,
cl_idle: 0,
cl_active: 0,
cl_waiting: 0,
cl_cancel_req: 0,
sv_active: 0,
sv_idle: 0,
sv_used: 0,
sv_tested: 0,
sv_login: 0,
maxwait: 0,
}
}
pub fn construct_pool_lookup() -> HashMap<PoolIdentifier, PoolStats> { // Pool Config, cannot be changed after creating the instance
let mut map: HashMap<PoolIdentifier, PoolStats> = HashMap::new(); config: Pool,
let client_map = super::get_client_stats();
let server_map = super::get_server_stats();
for (identifier, pool) in get_all_pools() { // A reference to the global reporter.
map.insert( reporter: Reporter,
identifier.clone(),
PoolStats::new(identifier, pool.settings.pool_mode),
);
}
for client in client_map.values() { /// Counters (atomics)
match map.get_mut(&PoolIdentifier { pub cl_idle: Arc<AtomicU64>,
db: client.pool_name(), pub cl_active: Arc<AtomicU64>,
user: client.username(), pub cl_waiting: Arc<AtomicU64>,
}) { pub cl_cancel_req: Arc<AtomicU64>,
Some(pool_stats) => { pub sv_active: Arc<AtomicU64>,
match client.state.load(Ordering::Relaxed) { pub sv_idle: Arc<AtomicU64>,
ClientState::Active => pool_stats.cl_active += 1, pub sv_used: Arc<AtomicU64>,
ClientState::Idle => pool_stats.cl_idle += 1, pub sv_tested: Arc<AtomicU64>,
ClientState::Waiting => pool_stats.cl_waiting += 1, pub sv_login: Arc<AtomicU64>,
} pub maxwait: Arc<AtomicU64>,
let wait_start_us = client.wait_start_us.load(Ordering::Relaxed);
if wait_start_us > 0 {
let wait_time_us = client.get_current_wait_time_us();
pool_stats.maxwait = std::cmp::max(pool_stats.maxwait, wait_time_us);
}
}
None => debug!("Client from an obselete pool"),
}
}
for server in server_map.values() {
match map.get_mut(&PoolIdentifier {
db: server.pool_name(),
user: server.username(),
}) {
Some(pool_stats) => match server.state.load(Ordering::Relaxed) {
ServerState::Active => pool_stats.sv_active += 1,
ServerState::Idle => pool_stats.sv_idle += 1,
ServerState::Login => pool_stats.sv_login += 1,
ServerState::Tested => pool_stats.sv_tested += 1,
},
None => debug!("Server from an obselete pool"),
}
}
map
}
pub fn generate_header() -> Vec<(&'static str, DataType)> {
vec![
("database", DataType::Text),
("user", DataType::Text),
("pool_mode", DataType::Text),
("cl_idle", DataType::Numeric),
("cl_active", DataType::Numeric),
("cl_waiting", DataType::Numeric),
("cl_cancel_req", DataType::Numeric),
("sv_active", DataType::Numeric),
("sv_idle", DataType::Numeric),
("sv_used", DataType::Numeric),
("sv_tested", DataType::Numeric),
("sv_login", DataType::Numeric),
("maxwait", DataType::Numeric),
("maxwait_us", DataType::Numeric),
]
}
pub fn generate_row(&self) -> Vec<String> {
vec![
self.identifier.db.clone(),
self.identifier.user.clone(),
self.mode.to_string(),
self.cl_idle.to_string(),
self.cl_active.to_string(),
self.cl_waiting.to_string(),
self.cl_cancel_req.to_string(),
self.sv_active.to_string(),
self.sv_idle.to_string(),
self.sv_used.to_string(),
self.sv_tested.to_string(),
self.sv_login.to_string(),
(self.maxwait / 1_000_000).to_string(),
(self.maxwait % 1_000_000).to_string(),
]
}
} }
impl IntoIterator for PoolStats { impl IntoIterator for PoolStats {
@@ -137,18 +39,236 @@ impl IntoIterator for PoolStats {
fn into_iter(self) -> Self::IntoIter { fn into_iter(self) -> Self::IntoIter {
vec![ vec![
("cl_idle".to_string(), self.cl_idle), ("cl_idle".to_string(), self.cl_idle.load(Ordering::Relaxed)),
("cl_active".to_string(), self.cl_active), (
("cl_waiting".to_string(), self.cl_waiting), "cl_active".to_string(),
("cl_cancel_req".to_string(), self.cl_cancel_req), self.cl_active.load(Ordering::Relaxed),
("sv_active".to_string(), self.sv_active), ),
("sv_idle".to_string(), self.sv_idle), (
("sv_used".to_string(), self.sv_used), "cl_waiting".to_string(),
("sv_tested".to_string(), self.sv_tested), self.cl_waiting.load(Ordering::Relaxed),
("sv_login".to_string(), self.sv_login), ),
("maxwait".to_string(), self.maxwait / 1_000_000), (
("maxwait_us".to_string(), self.maxwait % 1_000_000), "cl_cancel_req".to_string(),
self.cl_cancel_req.load(Ordering::Relaxed),
),
(
"sv_active".to_string(),
self.sv_active.load(Ordering::Relaxed),
),
("sv_idle".to_string(), self.sv_idle.load(Ordering::Relaxed)),
("sv_used".to_string(), self.sv_used.load(Ordering::Relaxed)),
(
"sv_tested".to_string(),
self.sv_tested.load(Ordering::Relaxed),
),
(
"sv_login".to_string(),
self.sv_login.load(Ordering::Relaxed),
),
(
"maxwait".to_string(),
self.maxwait.load(Ordering::Relaxed) / 1_000_000,
),
(
"maxwait_us".to_string(),
self.maxwait.load(Ordering::Relaxed) % 1_000_000,
),
] ]
.into_iter() .into_iter()
} }
} }
impl PoolStats {
pub fn new(identifier: PoolIdentifier, config: Pool) -> Self {
Self {
identifier,
config,
reporter: get_reporter(),
..Default::default()
}
}
// Getters
pub fn register(&self, stats: Arc<PoolStats>) {
self.reporter.pool_register(self.identifier.clone(), stats);
}
pub fn database(&self) -> String {
self.identifier.db.clone()
}
pub fn user(&self) -> String {
self.identifier.user.clone()
}
pub fn pool_mode(&self) -> PoolMode {
self.config.pool_mode
}
/// Populates an array of strings with counters (used by admin in show pools)
pub fn populate_row(&self, row: &mut Vec<String>) {
for (_key, value) in self.clone() {
row.push(value.to_string());
}
}
/// Deletes the maxwait counter, this is done everytime we obtain metrics
pub fn clear_maxwait(&self) {
self.maxwait.store(0, Ordering::Relaxed);
}
/// Notified when a server of the pool enters login state.
///
/// Arguments:
///
/// `from`: The state of the server that notifies.
pub fn server_login(&self, from: ServerState) {
self.sv_login.fetch_add(1, Ordering::Relaxed);
if from != ServerState::Login {
self.decrease_from_server_state(from);
}
}
/// Notified when a server of the pool become 'active'
///
/// Arguments:
///
/// `from`: The state of the server that notifies.
pub fn server_active(&self, from: ServerState) {
self.sv_active.fetch_add(1, Ordering::Relaxed);
if from != ServerState::Active {
self.decrease_from_server_state(from);
}
}
/// Notified when a server of the pool become 'tested'
///
/// Arguments:
///
/// `from`: The state of the server that notifies.
pub fn server_tested(&self, from: ServerState) {
self.sv_tested.fetch_add(1, Ordering::Relaxed);
if from != ServerState::Tested {
self.decrease_from_server_state(from);
}
}
/// Notified when a server of the pool become 'idle'
///
/// Arguments:
///
/// `from`: The state of the server that notifies.
pub fn server_idle(&self, from: ServerState) {
self.sv_idle.fetch_add(1, Ordering::Relaxed);
if from != ServerState::Idle {
self.decrease_from_server_state(from);
}
}
/// Notified when a client of the pool become 'waiting'
///
/// Arguments:
///
/// `from`: The state of the client that notifies.
pub fn client_waiting(&self, from: ClientState) {
if from != ClientState::Waiting {
self.cl_waiting.fetch_add(1, Ordering::Relaxed);
self.decrease_from_client_state(from);
}
}
/// Notified when a client of the pool become 'active'
///
/// Arguments:
///
/// `from`: The state of the client that notifies.
pub fn client_active(&self, from: ClientState) {
if from != ClientState::Active {
self.cl_active.fetch_add(1, Ordering::Relaxed);
self.decrease_from_client_state(from);
}
}
/// Notified when a client of the pool become 'idle'
///
/// Arguments:
///
/// `from`: The state of the client that notifies.
pub fn client_idle(&self, from: ClientState) {
if from != ClientState::Idle {
self.cl_idle.fetch_add(1, Ordering::Relaxed);
self.decrease_from_client_state(from);
}
}
/// Notified when a client disconnects.
///
/// Arguments:
///
/// `from`: The state of the client that notifies.
pub fn client_disconnect(&self, from: ClientState) {
let counter = match from {
ClientState::Idle => &self.cl_idle,
ClientState::Waiting => &self.cl_waiting,
ClientState::Active => &self.cl_active,
};
Self::decrease_counter(counter.clone());
}
/// Notified when a server disconnects.
///
/// Arguments:
///
/// `from`: The state of the client that notifies.
pub fn server_disconnect(&self, from: ServerState) {
let counter = match from {
ServerState::Active => &self.sv_active,
ServerState::Idle => &self.sv_idle,
ServerState::Login => &self.sv_login,
ServerState::Tested => &self.sv_tested,
};
Self::decrease_counter(counter.clone());
}
// helpers for counter decrease
fn decrease_from_server_state(&self, from: ServerState) {
let counter = match from {
ServerState::Tested => &self.sv_tested,
ServerState::Active => &self.sv_active,
ServerState::Idle => &self.sv_idle,
ServerState::Login => &self.sv_login,
};
Self::decrease_counter(counter.clone());
}
fn decrease_from_client_state(&self, from: ClientState) {
let counter = match from {
ClientState::Active => &self.cl_active,
ClientState::Idle => &self.cl_idle,
ClientState::Waiting => &self.cl_waiting,
};
Self::decrease_counter(counter.clone());
}
fn decrease_counter(value: Arc<AtomicU64>) {
if value.load(Ordering::Relaxed) > 0 {
value.fetch_sub(1, Ordering::Relaxed);
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_decrease() {
let stat: PoolStats = PoolStats::default();
stat.server_login(ServerState::Login);
stat.server_idle(ServerState::Login);
assert_eq!(stat.sv_login.load(Ordering::Relaxed), 0);
assert_eq!(stat.sv_idle.load(Ordering::Relaxed), 1);
}
}

View File

@@ -1,4 +1,5 @@
use super::AddressStats; use super::AddressStats;
use super::PoolStats;
use super::{get_reporter, Reporter}; use super::{get_reporter, Reporter};
use crate::config::Address; use crate::config::Address;
use atomic_enum::atomic_enum; use atomic_enum::atomic_enum;
@@ -37,6 +38,7 @@ pub struct ServerStats {
address: Address, address: Address,
connect_time: Instant, connect_time: Instant,
pool_stats: Arc<PoolStats>,
reporter: Reporter, reporter: Reporter,
/// Data /// Data
@@ -47,10 +49,6 @@ pub struct ServerStats {
pub transaction_count: Arc<AtomicU64>, pub transaction_count: Arc<AtomicU64>,
pub query_count: Arc<AtomicU64>, pub query_count: Arc<AtomicU64>,
pub error_count: Arc<AtomicU64>, pub error_count: Arc<AtomicU64>,
pub prepared_hit_count: Arc<AtomicU64>,
pub prepared_miss_count: Arc<AtomicU64>,
pub prepared_eviction_count: Arc<AtomicU64>,
pub prepared_cache_size: Arc<AtomicU64>,
} }
impl Default for ServerStats { impl Default for ServerStats {
@@ -59,6 +57,7 @@ impl Default for ServerStats {
server_id: 0, server_id: 0,
application_name: Arc::new(RwLock::new(String::new())), application_name: Arc::new(RwLock::new(String::new())),
address: Address::default(), address: Address::default(),
pool_stats: Arc::new(PoolStats::default()),
connect_time: Instant::now(), connect_time: Instant::now(),
state: Arc::new(AtomicServerState::new(ServerState::Login)), state: Arc::new(AtomicServerState::new(ServerState::Login)),
bytes_sent: Arc::new(AtomicU64::new(0)), bytes_sent: Arc::new(AtomicU64::new(0)),
@@ -67,18 +66,15 @@ impl Default for ServerStats {
query_count: Arc::new(AtomicU64::new(0)), query_count: Arc::new(AtomicU64::new(0)),
error_count: Arc::new(AtomicU64::new(0)), error_count: Arc::new(AtomicU64::new(0)),
reporter: get_reporter(), reporter: get_reporter(),
prepared_hit_count: Arc::new(AtomicU64::new(0)),
prepared_miss_count: Arc::new(AtomicU64::new(0)),
prepared_eviction_count: Arc::new(AtomicU64::new(0)),
prepared_cache_size: Arc::new(AtomicU64::new(0)),
} }
} }
} }
impl ServerStats { impl ServerStats {
pub fn new(address: Address, connect_time: Instant) -> Self { pub fn new(address: Address, pool_stats: Arc<PoolStats>, connect_time: Instant) -> Self {
Self { Self {
address, address,
pool_stats,
connect_time, connect_time,
server_id: rand::random::<i32>(), server_id: rand::random::<i32>(),
..Default::default() ..Default::default()
@@ -100,6 +96,9 @@ impl ServerStats {
/// Reports a server connection is no longer assigned to a client /// Reports a server connection is no longer assigned to a client
/// and is available for the next client to pick it up /// and is available for the next client to pick it up
pub fn idle(&self) { pub fn idle(&self) {
self.pool_stats
.server_idle(self.state.load(Ordering::Relaxed));
self.state.store(ServerState::Idle, Ordering::Relaxed); self.state.store(ServerState::Idle, Ordering::Relaxed);
} }
@@ -107,16 +106,22 @@ impl ServerStats {
/// Also updates metrics on the pool regarding server usage. /// Also updates metrics on the pool regarding server usage.
pub fn disconnect(&self) { pub fn disconnect(&self) {
self.reporter.server_disconnecting(self.server_id); self.reporter.server_disconnecting(self.server_id);
self.pool_stats
.server_disconnect(self.state.load(Ordering::Relaxed))
} }
/// Reports a server connection is being tested before being given to a client. /// Reports a server connection is being tested before being given to a client.
pub fn tested(&self) { pub fn tested(&self) {
self.set_undefined_application(); self.set_undefined_application();
self.pool_stats
.server_tested(self.state.load(Ordering::Relaxed));
self.state.store(ServerState::Tested, Ordering::Relaxed); self.state.store(ServerState::Tested, Ordering::Relaxed);
} }
/// Reports a server connection is attempting to login. /// Reports a server connection is attempting to login.
pub fn login(&self) { pub fn login(&self) {
self.pool_stats
.server_login(self.state.load(Ordering::Relaxed));
self.state.store(ServerState::Login, Ordering::Relaxed); self.state.store(ServerState::Login, Ordering::Relaxed);
self.set_undefined_application(); self.set_undefined_application();
} }
@@ -124,6 +129,8 @@ impl ServerStats {
/// Reports a server connection has been assigned to a client that /// Reports a server connection has been assigned to a client that
/// is about to query the server /// is about to query the server
pub fn active(&self, application_name: String) { pub fn active(&self, application_name: String) {
self.pool_stats
.server_active(self.state.load(Ordering::Relaxed));
self.state.store(ServerState::Active, Ordering::Relaxed); self.state.store(ServerState::Active, Ordering::Relaxed);
self.set_application(application_name); self.set_application(application_name);
} }
@@ -132,24 +139,13 @@ impl ServerStats {
self.address.stats.clone() self.address.stats.clone()
} }
pub fn check_address_stat_average_is_updated_status(&self) -> bool {
self.address.stats.averages_updated.load(Ordering::Relaxed)
}
pub fn set_address_stat_average_is_updated_status(&self, is_checked: bool) {
self.address
.stats
.averages_updated
.store(is_checked, Ordering::Relaxed);
}
// Helper methods for show_servers // Helper methods for show_servers
pub fn pool_name(&self) -> String { pub fn pool_name(&self) -> String {
self.address.pool_name.clone() self.pool_stats.database()
} }
pub fn username(&self) -> String { pub fn username(&self) -> String {
self.address.username.clone() self.pool_stats.user()
} }
pub fn address_name(&self) -> String { pub fn address_name(&self) -> String {
@@ -170,17 +166,27 @@ impl ServerStats {
} }
pub fn checkout_time(&self, microseconds: u64, application_name: String) { pub fn checkout_time(&self, microseconds: u64, application_name: String) {
// Update server stats and address aggregation stats // Update server stats and address aggergation stats
self.set_application(application_name); self.set_application(application_name);
self.address.stats.wait_time_add(microseconds); self.address
.stats
.total_wait_time
.fetch_add(microseconds, Ordering::Relaxed);
self.pool_stats
.maxwait
.fetch_max(microseconds, Ordering::Relaxed);
} }
/// Report a query executed by a client against a server /// Report a query executed by a client against a server
pub fn query(&self, milliseconds: u64, application_name: &str) { pub fn query(&self, milliseconds: u64, application_name: &str) {
self.set_application(application_name.to_string()); self.set_application(application_name.to_string());
self.address.stats.query_count_add(); let address_stats = self.address_stats();
self.address.stats.query_time_add(milliseconds); address_stats
self.query_count.fetch_add(1, Ordering::Relaxed); .total_query_count
.fetch_add(1, Ordering::Relaxed);
address_stats
.total_query_time
.fetch_add(milliseconds, Ordering::Relaxed);
} }
/// Report a transaction executed by a client a server /// Report a transaction executed by a client a server
@@ -191,39 +197,29 @@ impl ServerStats {
self.set_application(application_name.to_string()); self.set_application(application_name.to_string());
self.transaction_count.fetch_add(1, Ordering::Relaxed); self.transaction_count.fetch_add(1, Ordering::Relaxed);
self.address.stats.xact_count_add(); self.address
.stats
.total_xact_count
.fetch_add(1, Ordering::Relaxed);
} }
/// Report data sent to a server /// Report data sent to a server
pub fn data_sent(&self, amount_bytes: usize) { pub fn data_sent(&self, amount_bytes: usize) {
self.bytes_sent self.bytes_sent
.fetch_add(amount_bytes as u64, Ordering::Relaxed); .fetch_add(amount_bytes as u64, Ordering::Relaxed);
self.address.stats.bytes_sent_add(amount_bytes as u64); self.address
.stats
.total_sent
.fetch_add(amount_bytes as u64, Ordering::Relaxed);
} }
/// Report data received from a server /// Report data received from a server
pub fn data_received(&self, amount_bytes: usize) { pub fn data_received(&self, amount_bytes: usize) {
self.bytes_received self.bytes_received
.fetch_add(amount_bytes as u64, Ordering::Relaxed); .fetch_add(amount_bytes as u64, Ordering::Relaxed);
self.address.stats.bytes_received_add(amount_bytes as u64); self.address
} .stats
.total_received
/// Report a prepared statement that already exists on the server. .fetch_add(amount_bytes as u64, Ordering::Relaxed);
pub fn prepared_cache_hit(&self) {
self.prepared_hit_count.fetch_add(1, Ordering::Relaxed);
}
/// Report a prepared statement that does not exist on the server yet.
pub fn prepared_cache_miss(&self) {
self.prepared_miss_count.fetch_add(1, Ordering::Relaxed);
}
pub fn prepared_cache_add(&self) {
self.prepared_cache_size.fetch_add(1, Ordering::Relaxed);
}
pub fn prepared_cache_remove(&self) {
self.prepared_eviction_count.fetch_add(1, Ordering::Relaxed);
self.prepared_cache_size.fetch_sub(1, Ordering::Relaxed);
} }
} }

View File

@@ -1,13 +1,8 @@
FROM rust:bullseye FROM rust:bullseye
COPY --from=sclevine/yj /bin/yj /bin/yj
RUN /bin/yj -h
RUN apt-get update && apt-get install llvm-11 psmisc postgresql-contrib postgresql-client ruby ruby-dev libpq-dev python3 python3-pip lcov curl sudo iproute2 -y RUN apt-get update && apt-get install llvm-11 psmisc postgresql-contrib postgresql-client ruby ruby-dev libpq-dev python3 python3-pip lcov curl sudo iproute2 -y
RUN cargo install cargo-binutils rustfilt RUN cargo install cargo-binutils rustfilt
RUN rustup component add llvm-tools-preview RUN rustup component add llvm-tools-preview
RUN sudo gem install bundler RUN sudo gem install bundler
RUN wget -O toxiproxy-2.4.0.deb https://github.com/Shopify/toxiproxy/releases/download/v2.4.0/toxiproxy_2.4.0_linux_$(dpkg --print-architecture).deb && \ RUN wget -O toxiproxy-2.4.0.deb https://github.com/Shopify/toxiproxy/releases/download/v2.4.0/toxiproxy_2.4.0_linux_$(dpkg --print-architecture).deb && \
sudo dpkg -i toxiproxy-2.4.0.deb sudo dpkg -i toxiproxy-2.4.0.deb
RUN wget -O go1.21.3.linux-$(dpkg --print-architecture).tar.gz https://go.dev/dl/go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \
sudo tar -C /usr/local -xzf go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \
rm go1.21.3.linux-$(dpkg --print-architecture).tar.gz

View File

@@ -1,5 +0,0 @@
module pgcat
go 1.21
require github.com/lib/pq v1.10.9

View File

@@ -1,2 +0,0 @@
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=

View File

@@ -1,162 +0,0 @@
#
# PgCat config example.
#
#
# General pooler settings
[general]
# What IP to run on, 0.0.0.0 means accessible from everywhere.
host = "0.0.0.0"
# Port to run on, same as PgBouncer used in this example.
port = "${PORT}"
# Whether to enable prometheus exporter or not.
enable_prometheus_exporter = true
# Port at which prometheus exporter listens on.
prometheus_exporter_port = 9930
# How long to wait before aborting a server connection (ms).
connect_timeout = 1000
# How much time to give the health check query to return with a result (ms).
healthcheck_timeout = 1000
# How long to keep connection available for immediate re-use, without running a healthcheck query on it
healthcheck_delay = 30000
# How much time to give clients during shutdown before forcibly killing client connections (ms).
shutdown_timeout = 5000
# For how long to ban a server if it fails a health check (seconds).
ban_time = 60 # Seconds
# If we should log client connections
log_client_connections = false
# If we should log client disconnections
log_client_disconnections = false
# Reload config automatically if it changes.
autoreload = 15000
server_round_robin = false
# TLS
tls_certificate = "../../.circleci/server.cert"
tls_private_key = "../../.circleci/server.key"
# Credentials to access the virtual administrative database (pgbouncer or pgcat)
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
admin_username = "admin_user"
admin_password = "admin_pass"
# pool
# configs are structured as pool.<pool_name>
# the pool_name is what clients use as database name when connecting
# For the example below a client can connect using "postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded_db"
[pools.sharded_db]
# Pool mode (see PgBouncer docs for more).
# session: one server connection per connected client
# transaction: one server connection per client transaction
pool_mode = "transaction"
# If the client doesn't specify, route traffic to
# this role by default.
#
# any: round-robin between primary and replicas,
# replica: round-robin between replicas only without touching the primary,
# primary: all queries go to the primary unless otherwise specified.
default_role = "any"
# Query parser. If enabled, we'll attempt to parse
# every incoming query to determine if it's a read or a write.
# If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
# we'll direct it to the primary.
query_parser_enabled = true
# If the query parser is enabled and this setting is enabled, we'll attempt to
# infer the role from the query itself.
query_parser_read_write_splitting = true
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
# load balancing of read queries. Otherwise, the primary will only be used for write
# queries. The primary can always be explicitely selected with our custom protocol.
primary_reads_enabled = true
# So what if you wanted to implement a different hashing function,
# or you've already built one and you want this pooler to use it?
#
# Current options:
#
# pg_bigint_hash: PARTITION BY HASH (Postgres hashing function)
# sha1: A hashing function based on SHA1
#
sharding_function = "pg_bigint_hash"
# Prepared statements cache size.
prepared_statements_cache_size = 500
# Credentials for users that may connect to this cluster
[pools.sharded_db.users.0]
username = "sharding_user"
password = "sharding_user"
# Maximum number of server connections that can be established for this user
# The maximum number of connection from a single Pgcat process to any database in the cluster
# is the sum of pool_size across all users.
pool_size = 5
statement_timeout = 0
[pools.sharded_db.users.1]
username = "other_user"
password = "other_user"
pool_size = 21
statement_timeout = 30000
# Shard 0
[pools.sharded_db.shards.0]
# [ host, port, role ]
servers = [
[ "127.0.0.1", 5432, "primary" ],
[ "localhost", 5432, "replica" ]
]
# Database name (e.g. "postgres")
database = "shard0"
[pools.sharded_db.shards.1]
servers = [
[ "127.0.0.1", 5432, "primary" ],
[ "localhost", 5432, "replica" ],
]
database = "shard1"
[pools.sharded_db.shards.2]
servers = [
[ "127.0.0.1", 5432, "primary" ],
[ "localhost", 5432, "replica" ],
]
database = "shard2"
[pools.simple_db]
pool_mode = "session"
default_role = "primary"
query_parser_enabled = true
query_parser_read_write_splitting = true
primary_reads_enabled = true
sharding_function = "pg_bigint_hash"
[pools.simple_db.users.0]
username = "simple_user"
password = "simple_user"
pool_size = 5
statement_timeout = 30000
[pools.simple_db.shards.0]
servers = [
[ "127.0.0.1", 5432, "primary" ],
[ "localhost", 5432, "replica" ]
]
database = "some_db"

View File

@@ -1,52 +0,0 @@
package pgcat
import (
"context"
"database/sql"
"fmt"
_ "github.com/lib/pq"
"testing"
)
func Test(t *testing.T) {
t.Cleanup(setup(t))
t.Run("Named parameterized prepared statement works", namedParameterizedPreparedStatement)
t.Run("Unnamed parameterized prepared statement works", unnamedParameterizedPreparedStatement)
}
func namedParameterizedPreparedStatement(t *testing.T) {
db, err := sql.Open("postgres", fmt.Sprintf("host=localhost port=%d database=sharded_db user=sharding_user password=sharding_user sslmode=disable", port))
if err != nil {
t.Fatalf("could not open connection: %+v", err)
}
stmt, err := db.Prepare("SELECT $1")
if err != nil {
t.Fatalf("could not prepare: %+v", err)
}
for i := 0; i < 100; i++ {
rows, err := stmt.Query(1)
if err != nil {
t.Fatalf("could not query: %+v", err)
}
_ = rows.Close()
}
}
func unnamedParameterizedPreparedStatement(t *testing.T) {
db, err := sql.Open("postgres", fmt.Sprintf("host=localhost port=%d database=sharded_db user=sharding_user password=sharding_user sslmode=disable", port))
if err != nil {
t.Fatalf("could not open connection: %+v", err)
}
for i := 0; i < 100; i++ {
// Under the hood QueryContext generates an unnamed parameterized prepared statement
rows, err := db.QueryContext(context.Background(), "SELECT $1", 1)
if err != nil {
t.Fatalf("could not query: %+v", err)
}
_ = rows.Close()
}
}

View File

@@ -1,81 +0,0 @@
package pgcat
import (
"context"
"database/sql"
_ "embed"
"fmt"
"math/rand"
"os"
"os/exec"
"strings"
"testing"
"time"
)
//go:embed pgcat.toml
var pgcatCfg string
var port = rand.Intn(32760-20000) + 20000
func setup(t *testing.T) func() {
cfg, err := os.CreateTemp("/tmp", "pgcat_cfg_*.toml")
if err != nil {
t.Fatalf("could not create temp file: %+v", err)
}
pgcatCfg = strings.Replace(pgcatCfg, "\"${PORT}\"", fmt.Sprintf("%d", port), 1)
_, err = cfg.Write([]byte(pgcatCfg))
if err != nil {
t.Fatalf("could not write temp file: %+v", err)
}
commandPath := "../../target/debug/pgcat"
if os.Getenv("CARGO_TARGET_DIR") != "" {
commandPath = os.Getenv("CARGO_TARGET_DIR") + "/debug/pgcat"
}
cmd := exec.Command(commandPath, cfg.Name())
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
go func() {
err = cmd.Run()
if err != nil {
t.Errorf("could not run pgcat: %+v", err)
}
}()
deadline, cancelFunc := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second))
defer cancelFunc()
for {
select {
case <-deadline.Done():
break
case <-time.After(50 * time.Millisecond):
db, err := sql.Open("postgres", fmt.Sprintf("host=localhost port=%d database=pgcat user=admin_user password=admin_pass sslmode=disable", port))
if err != nil {
continue
}
rows, err := db.QueryContext(deadline, "SHOW STATS")
if err != nil {
continue
}
_ = rows.Close()
_ = db.Close()
break
}
break
}
return func() {
err := cmd.Process.Signal(os.Interrupt)
if err != nil {
t.Fatalf("could not interrupt pgcat: %+v", err)
}
err = os.Remove(cfg.Name())
if err != nil {
t.Fatalf("could not remove temp file: %+v", err)
}
}
}

View File

@@ -63,7 +63,6 @@ def cleanup_conn(conn: psycopg2.extensions.connection, cur: psycopg2.extensions.
def test_normal_db_access(): def test_normal_db_access():
pgcat_start()
conn, cur = connect_db(autocommit=False) conn, cur = connect_db(autocommit=False)
cur.execute("SELECT 1") cur.execute("SELECT 1")
res = cur.fetchall() res = cur.fetchall()

View File

@@ -11,6 +11,325 @@ describe "Admin" do
processes.pgcat.shutdown processes.pgcat.shutdown
end end
describe "SHOW STATS" do
context "clients connect and make one query" do
it "updates *_query_time and *_wait_time" do
connection = PG::connect("#{pgcat_conn_str}?application_name=one_query")
connection.async_exec("SELECT pg_sleep(0.25)")
connection.async_exec("SELECT pg_sleep(0.25)")
connection.async_exec("SELECT pg_sleep(0.25)")
connection.close
# wait for averages to be calculated, we shouldn't do this too often
sleep(15.5)
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
results = admin_conn.async_exec("SHOW STATS")[0]
admin_conn.close
expect(results["total_query_time"].to_i).to be_within(200).of(750)
expect(results["avg_query_time"].to_i).to_not eq(0)
expect(results["total_wait_time"].to_i).to_not eq(0)
expect(results["avg_wait_time"].to_i).to_not eq(0)
end
end
end
describe "SHOW POOLS" do
context "bad credentials" do
it "does not change any stats" do
bad_password_url = URI(pgcat_conn_str)
bad_password_url.password = "wrong"
expect { PG::connect("#{bad_password_url.to_s}?application_name=bad_password") }.to raise_error(PG::ConnectionBad)
sleep(1)
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
results = admin_conn.async_exec("SHOW POOLS")[0]
%w[cl_idle cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
end
expect(results["sv_idle"]).to eq("1")
end
end
context "bad database name" do
it "does not change any stats" do
bad_db_url = URI(pgcat_conn_str)
bad_db_url.path = "/wrong_db"
expect { PG::connect("#{bad_db_url.to_s}?application_name=bad_db") }.to raise_error(PG::ConnectionBad)
sleep(1)
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
results = admin_conn.async_exec("SHOW POOLS")[0]
%w[cl_idle cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
end
expect(results["sv_idle"]).to eq("1")
end
end
context "client connects but issues no queries" do
it "only affects cl_idle stats" do
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
before_test = admin_conn.async_exec("SHOW POOLS")[0]["sv_idle"]
connections = Array.new(20) { PG::connect(pgcat_conn_str) }
sleep(1)
results = admin_conn.async_exec("SHOW POOLS")[0]
%w[cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
end
expect(results["cl_idle"]).to eq("20")
expect(results["sv_idle"]).to eq(before_test)
connections.map(&:close)
sleep(1.1)
results = admin_conn.async_exec("SHOW POOLS")[0]
%w[cl_active cl_idle cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
end
expect(results["sv_idle"]).to eq(before_test)
end
end
context "clients connect and make one query" do
it "only affects cl_idle, sv_idle stats" do
connections = Array.new(5) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
connections.each do |c|
Thread.new { c.async_exec("SELECT pg_sleep(2.5)") }
end
sleep(1.1)
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
results = admin_conn.async_exec("SHOW POOLS")[0]
%w[cl_idle cl_waiting cl_cancel_req sv_idle sv_used sv_tested sv_login maxwait].each do |s|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
end
expect(results["cl_active"]).to eq("5")
expect(results["sv_active"]).to eq("5")
sleep(3)
results = admin_conn.async_exec("SHOW POOLS")[0]
%w[cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
end
expect(results["cl_idle"]).to eq("5")
expect(results["sv_idle"]).to eq("5")
connections.map(&:close)
sleep(1)
results = admin_conn.async_exec("SHOW POOLS")[0]
%w[cl_idle cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
end
expect(results["sv_idle"]).to eq("5")
end
end
context "client connects and opens a transaction and closes connection uncleanly" do
it "produces correct statistics" do
connections = Array.new(5) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
connections.each do |c|
Thread.new do
c.async_exec("BEGIN")
c.async_exec("SELECT pg_sleep(0.01)")
c.close
end
end
sleep(1.1)
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
results = admin_conn.async_exec("SHOW POOLS")[0]
%w[cl_idle cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
end
expect(results["sv_idle"]).to eq("5")
end
end
context "client fail to checkout connection from the pool" do
it "counts clients as idle" do
new_configs = processes.pgcat.current_config
new_configs["general"]["connect_timeout"] = 500
new_configs["general"]["ban_time"] = 1
new_configs["general"]["shutdown_timeout"] = 1
new_configs["pools"]["sharded_db"]["users"]["0"]["pool_size"] = 1
processes.pgcat.update_config(new_configs)
processes.pgcat.reload_config
threads = []
connections = Array.new(5) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
connections.each do |c|
threads << Thread.new { c.async_exec("SELECT pg_sleep(1)") rescue PG::SystemError }
end
sleep(2)
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
results = admin_conn.async_exec("SHOW POOLS")[0]
%w[cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
end
expect(results["cl_idle"]).to eq("5")
expect(results["sv_idle"]).to eq("1")
threads.map(&:join)
connections.map(&:close)
end
end
context "clients connects and disconnect normally" do
let(:processes) { Helpers::Pgcat.single_instance_setup("sharded_db", 2) }
it 'shows the same number of clients before and after' do
clients_before = clients_connected_to_pool(processes: processes)
threads = []
connections = Array.new(4) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
connections.each do |c|
threads << Thread.new { c.async_exec("SELECT 1") }
end
clients_between = clients_connected_to_pool(processes: processes)
expect(clients_before).not_to eq(clients_between)
connections.each(&:close)
clients_after = clients_connected_to_pool(processes: processes)
expect(clients_before).to eq(clients_after)
end
end
context "clients connects and disconnect abruptly" do
let(:processes) { Helpers::Pgcat.single_instance_setup("sharded_db", 10) }
it 'shows the same number of clients before and after' do
threads = []
connections = Array.new(2) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
connections.each do |c|
threads << Thread.new { c.async_exec("SELECT 1") }
end
clients_before = clients_connected_to_pool(processes: processes)
random_string = (0...8).map { (65 + rand(26)).chr }.join
connection_string = "#{pgcat_conn_str}?application_name=#{random_string}"
faulty_client = Process.spawn("psql -Atx #{connection_string} >/dev/null")
sleep(1)
# psql starts two processes, we only know the pid of the parent, this
# ensure both are killed
`pkill -9 -f '#{random_string}'`
Process.wait(faulty_client)
clients_after = clients_connected_to_pool(processes: processes)
expect(clients_before).to eq(clients_after)
end
end
context "clients overwhelm server pools" do
let(:processes) { Helpers::Pgcat.single_instance_setup("sharded_db", 2) }
it "cl_waiting is updated to show it" do
threads = []
connections = Array.new(4) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
connections.each do |c|
threads << Thread.new { c.async_exec("SELECT pg_sleep(1.5)") }
end
sleep(1.1) # Allow time for stats to update
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
results = admin_conn.async_exec("SHOW POOLS")[0]
%w[cl_idle cl_cancel_req sv_idle sv_used sv_tested sv_login maxwait].each do |s|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
end
expect(results["cl_waiting"]).to eq("2")
expect(results["cl_active"]).to eq("2")
expect(results["sv_active"]).to eq("2")
sleep(2.5) # Allow time for stats to update
results = admin_conn.async_exec("SHOW POOLS")[0]
%w[cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login].each do |s|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
end
expect(results["cl_idle"]).to eq("4")
expect(results["sv_idle"]).to eq("2")
threads.map(&:join)
connections.map(&:close)
end
it "show correct max_wait" do
threads = []
connections = Array.new(4) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
connections.each do |c|
threads << Thread.new { c.async_exec("SELECT pg_sleep(1.5)") }
end
sleep(2.5) # Allow time for stats to update
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
results = admin_conn.async_exec("SHOW POOLS")[0]
expect(results["maxwait"]).to eq("1")
expect(results["maxwait_us"].to_i).to be_within(200_000).of(500_000)
sleep(4.5) # Allow time for stats to update
results = admin_conn.async_exec("SHOW POOLS")[0]
expect(results["maxwait"]).to eq("0")
threads.map(&:join)
connections.map(&:close)
end
end
end
describe "SHOW CLIENTS" do
it "reports correct number and application names" do
conn_str = processes.pgcat.connection_string("sharded_db", "sharding_user")
connections = Array.new(20) { |i| PG::connect("#{conn_str}?application_name=app#{i % 5}") }
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
sleep(1) # Wait for stats to be updated
results = admin_conn.async_exec("SHOW CLIENTS")
expect(results.count).to eq(21) # count admin clients
expect(results.select { |c| c["application_name"] == "app3" || c["application_name"] == "app4" }.count).to eq(8)
expect(results.select { |c| c["database"] == "pgcat" }.count).to eq(1)
connections[0..5].map(&:close)
sleep(1) # Wait for stats to be updated
results = admin_conn.async_exec("SHOW CLIENTS")
expect(results.count).to eq(15)
connections[6..].map(&:close)
sleep(1) # Wait for stats to be updated
expect(admin_conn.async_exec("SHOW CLIENTS").count).to eq(1)
admin_conn.close
end
it "reports correct number of queries and transactions" do
conn_str = processes.pgcat.connection_string("sharded_db", "sharding_user")
connections = Array.new(2) { |i| PG::connect("#{conn_str}?application_name=app#{i}") }
connections.each do |c|
c.async_exec("SELECT 1")
c.async_exec("SELECT 2")
c.async_exec("SELECT 3")
c.async_exec("BEGIN")
c.async_exec("SELECT 4")
c.async_exec("SELECT 5")
c.async_exec("COMMIT")
end
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
sleep(1) # Wait for stats to be updated
results = admin_conn.async_exec("SHOW CLIENTS")
expect(results.count).to eq(3)
normal_client_results = results.reject { |r| r["database"] == "pgcat" }
expect(normal_client_results[0]["transaction_count"]).to eq("4")
expect(normal_client_results[1]["transaction_count"]).to eq("4")
expect(normal_client_results[0]["query_count"]).to eq("7")
expect(normal_client_results[1]["query_count"]).to eq("7")
admin_conn.close
connections.map(&:close)
end
end
describe "Manual Banning" do describe "Manual Banning" do
let(:processes) { Helpers::Pgcat.single_shard_setup("sharded_db", 10) } let(:processes) { Helpers::Pgcat.single_shard_setup("sharded_db", 10) }
before do before do
@@ -81,7 +400,7 @@ describe "Admin" do
end end
end end
describe "SHOW USERS" do describe "SHOW users" do
it "returns the right users" do it "returns the right users" do
admin_conn = PG::connect(processes.pgcat.admin_connection_string) admin_conn = PG::connect(processes.pgcat.admin_connection_string)
results = admin_conn.async_exec("SHOW USERS")[0] results = admin_conn.async_exec("SHOW USERS")[0]
@@ -90,49 +409,4 @@ describe "Admin" do
expect(results["pool_mode"]).to eq("transaction") expect(results["pool_mode"]).to eq("transaction")
end end
end end
[
"SHOW ME THE MONEY",
"SHOW ME THE WAY",
"SHOW UP",
"SHOWTIME",
"HAMMER TIME",
"SHOWN TO BE TRUE",
"SHOW ",
"SHOW ",
"SHOW 1",
";;;;;"
].each do |cmd|
describe "Bad command #{cmd}" do
it "does not panic and responds with PG::SystemError" do
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
expect { admin_conn.async_exec(cmd) }.to raise_error(PG::SystemError).with_message(/Unsupported/)
admin_conn.close
end
end
end
describe "PAUSE" do
it "pauses all pools" do
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
results = admin_conn.async_exec("SHOW DATABASES").to_a
expect(results.map{ |r| r["paused"] }.uniq).to eq(["0"])
admin_conn.async_exec("PAUSE")
results = admin_conn.async_exec("SHOW DATABASES").to_a
expect(results.map{ |r| r["paused"] }.uniq).to eq(["1"])
admin_conn.async_exec("RESUME")
results = admin_conn.async_exec("SHOW DATABASES").to_a
expect(results.map{ |r| r["paused"] }.uniq).to eq(["0"])
end
it "handles errors" do
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
expect { admin_conn.async_exec("PAUSE foo").to_a }.to raise_error(PG::SystemError)
expect { admin_conn.async_exec("PAUSE foo,bar").to_a }.to raise_error(PG::SystemError)
end
end
end end

View File

@@ -1,102 +0,0 @@
# frozen_string_literal: true
require_relative 'spec_helper'
describe "COPY Handling" do
let(:processes) { Helpers::Pgcat.single_instance_setup("sharded_db", 5) }
before do
new_configs = processes.pgcat.current_config
# Allow connections in the pool to expire faster
new_configs["general"]["idle_timeout"] = 5
processes.pgcat.update_config(new_configs)
# We need to kill the old process that was using the default configs
processes.pgcat.stop
processes.pgcat.start
processes.pgcat.wait_until_ready
end
before do
processes.all_databases.first.with_connection do |conn|
conn.async_exec "CREATE TABLE copy_test_table (a TEXT,b TEXT,c TEXT,d TEXT)"
end
end
after do
processes.all_databases.first.with_connection do |conn|
conn.async_exec "DROP TABLE copy_test_table;"
end
end
after do
processes.all_databases.map(&:reset)
processes.pgcat.shutdown
end
describe "COPY FROM" do
context "within transaction" do
it "finishes within alloted time" do
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
Timeout.timeout(3) do
conn.async_exec("BEGIN")
conn.copy_data "COPY copy_test_table FROM STDIN CSV" do
sleep 0.5
conn.put_copy_data "some,data,to,copy\n"
conn.put_copy_data "more,data,to,copy\n"
end
conn.async_exec("COMMIT")
end
res = conn.async_exec("SELECT * FROM copy_test_table").to_a
expect(res).to eq([
{"a"=>"some", "b"=>"data", "c"=>"to", "d"=>"copy"},
{"a"=>"more", "b"=>"data", "c"=>"to", "d"=>"copy"}
])
end
end
context "outside transaction" do
it "finishes within alloted time" do
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
Timeout.timeout(3) do
conn.copy_data "COPY copy_test_table FROM STDIN CSV" do
sleep 0.5
conn.put_copy_data "some,data,to,copy\n"
conn.put_copy_data "more,data,to,copy\n"
end
end
res = conn.async_exec("SELECT * FROM copy_test_table").to_a
expect(res).to eq([
{"a"=>"some", "b"=>"data", "c"=>"to", "d"=>"copy"},
{"a"=>"more", "b"=>"data", "c"=>"to", "d"=>"copy"}
])
end
end
end
describe "COPY TO" do
before do
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
conn.async_exec("BEGIN")
conn.copy_data "COPY copy_test_table FROM STDIN CSV" do
conn.put_copy_data "some,data,to,copy\n"
conn.put_copy_data "more,data,to,copy\n"
end
conn.async_exec("COMMIT")
conn.close
end
it "works" do
res = []
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
conn.copy_data "COPY copy_test_table TO STDOUT CSV" do
while row=conn.get_copy_data
res << row
end
end
expect(res).to eq(["some,data,to,copy\n", "more,data,to,copy\n"])
end
end
end

View File

@@ -33,15 +33,15 @@ module Helpers
"0" => { "0" => {
"database" => "shard0", "database" => "shard0",
"servers" => [ "servers" => [
["localhost", primary.port.to_i, "primary"], ["localhost", primary.port.to_s, "primary"],
["localhost", replica.port.to_i, "replica"], ["localhost", replica.port.to_s, "replica"],
] ]
}, },
}, },
"users" => { "0" => user.merge(config_user) } "users" => { "0" => user.merge(config_user) }
} }
} }
pgcat_cfg["general"]["port"] = pgcat.port.to_i pgcat_cfg["general"]["port"] = pgcat.port
pgcat.update_config(pgcat_cfg) pgcat.update_config(pgcat_cfg)
pgcat.start pgcat.start
@@ -92,8 +92,8 @@ module Helpers
"0" => { "0" => {
"database" => database, "database" => database,
"servers" => [ "servers" => [
["localhost", primary.port.to_i, "primary"], ["localhost", primary.port.to_s, "primary"],
["localhost", replica.port.to_i, "replica"], ["localhost", replica.port.to_s, "replica"],
] ]
}, },
}, },

View File

@@ -7,24 +7,10 @@ class PgInstance
attr_reader :password attr_reader :password
attr_reader :database_name attr_reader :database_name
def self.mass_takedown(databases)
raise StandardError "block missing" unless block_given?
databases.each do |database|
database.toxiproxy.toxic(:limit_data, bytes: 1).toxics.each(&:save)
end
sleep 0.1
yield
ensure
databases.each do |database|
database.toxiproxy.toxics.each(&:destroy)
end
end
def initialize(port, username, password, database_name) def initialize(port, username, password, database_name)
@original_port = port.to_i @original_port = port
@toxiproxy_port = 10000 + port.to_i @toxiproxy_port = 10000 + port.to_i
@port = @toxiproxy_port.to_i @port = @toxiproxy_port
@username = username @username = username
@password = password @password = password
@@ -62,9 +48,9 @@ class PgInstance
def take_down def take_down
if block_given? if block_given?
Toxiproxy[@toxiproxy_name].toxic(:limit_data, bytes: 1).apply { yield } Toxiproxy[@toxiproxy_name].toxic(:limit_data, bytes: 5).apply { yield }
else else
Toxiproxy[@toxiproxy_name].toxic(:limit_data, bytes: 1).toxics.each(&:save) Toxiproxy[@toxiproxy_name].toxic(:limit_data, bytes: 5).toxics.each(&:save)
end end
end end
@@ -103,6 +89,6 @@ class PgInstance
end end
def count_select_1_plus_2 def count_select_1_plus_2
with_connection { |c| c.async_exec("SELECT SUM(calls) FROM pg_stat_statements WHERE query LIKE '%SELECT $1 + $2%'")[0]["sum"].to_i } with_connection { |c| c.async_exec("SELECT SUM(calls) FROM pg_stat_statements WHERE query = 'SELECT $1 + $2'")[0]["sum"].to_i }
end end
end end

View File

@@ -34,32 +34,14 @@ module Helpers
"load_balancing_mode" => lb_mode, "load_balancing_mode" => lb_mode,
"primary_reads_enabled" => true, "primary_reads_enabled" => true,
"query_parser_enabled" => true, "query_parser_enabled" => true,
"query_parser_read_write_splitting" => true,
"automatic_sharding_key" => "data.id", "automatic_sharding_key" => "data.id",
"sharding_function" => "pg_bigint_hash", "sharding_function" => "pg_bigint_hash",
"shards" => { "shards" => {
"0" => { "database" => "shard0", "servers" => [["localhost", primary0.port.to_i, "primary"]] }, "0" => { "database" => "shard0", "servers" => [["localhost", primary0.port.to_s, "primary"]] },
"1" => { "database" => "shard1", "servers" => [["localhost", primary1.port.to_i, "primary"]] }, "1" => { "database" => "shard1", "servers" => [["localhost", primary1.port.to_s, "primary"]] },
"2" => { "database" => "shard2", "servers" => [["localhost", primary2.port.to_i, "primary"]] }, "2" => { "database" => "shard2", "servers" => [["localhost", primary2.port.to_s, "primary"]] },
}, },
"users" => { "0" => user }, "users" => { "0" => user }
"plugins" => {
"intercept" => {
"enabled" => true,
"queries" => {
"0" => {
"query" => "select current_database() as a, current_schemas(false) as b",
"schema" => [
["a", "text"],
["b", "text"],
],
"result" => [
["${DATABASE}", "{public}"],
]
}
}
}
}
} }
} }
pgcat.update_config(pgcat_cfg) pgcat.update_config(pgcat_cfg)
@@ -100,7 +82,7 @@ module Helpers
"0" => { "0" => {
"database" => "shard0", "database" => "shard0",
"servers" => [ "servers" => [
["localhost", primary.port.to_i, "primary"] ["localhost", primary.port.to_s, "primary"]
] ]
}, },
}, },
@@ -119,7 +101,7 @@ module Helpers
end end
end end
def self.single_shard_setup(pool_name, pool_size, pool_mode="transaction", lb_mode="random", log_level="info", pool_settings={}) def self.single_shard_setup(pool_name, pool_size, pool_mode="transaction", lb_mode="random", log_level="info")
user = { user = {
"password" => "sharding_user", "password" => "sharding_user",
"pool_size" => pool_size, "pool_size" => pool_size,
@@ -135,7 +117,9 @@ module Helpers
replica1 = PgInstance.new(8432, user["username"], user["password"], "shard0") replica1 = PgInstance.new(8432, user["username"], user["password"], "shard0")
replica2 = PgInstance.new(9432, user["username"], user["password"], "shard0") replica2 = PgInstance.new(9432, user["username"], user["password"], "shard0")
pool_config = { # Main proxy configs
pgcat_cfg["pools"] = {
"#{pool_name}" => {
"default_role" => "any", "default_role" => "any",
"pool_mode" => pool_mode, "pool_mode" => pool_mode,
"load_balancing_mode" => lb_mode, "load_balancing_mode" => lb_mode,
@@ -146,21 +130,15 @@ module Helpers
"0" => { "0" => {
"database" => "shard0", "database" => "shard0",
"servers" => [ "servers" => [
["localhost", primary.port.to_i, "primary"], ["localhost", primary.port.to_s, "primary"],
["localhost", replica0.port.to_i, "replica"], ["localhost", replica0.port.to_s, "replica"],
["localhost", replica1.port.to_i, "replica"], ["localhost", replica1.port.to_s, "replica"],
["localhost", replica2.port.to_i, "replica"] ["localhost", replica2.port.to_s, "replica"]
] ]
}, },
}, },
"users" => { "0" => user } "users" => { "0" => user }
} }
pool_config = pool_config.merge(pool_settings)
# Main proxy configs
pgcat_cfg["pools"] = {
"#{pool_name}" => pool_config,
} }
pgcat_cfg["general"]["port"] = pgcat.port pgcat_cfg["general"]["port"] = pgcat.port
pgcat.update_config(pgcat_cfg) pgcat.update_config(pgcat_cfg)

View File

@@ -1,10 +1,8 @@
require 'pg' require 'pg'
require 'json' require 'toml'
require 'tempfile'
require 'fileutils' require 'fileutils'
require 'securerandom' require 'securerandom'
class ConfigReloadFailed < StandardError; end
class PgcatProcess class PgcatProcess
attr_reader :port attr_reader :port
attr_reader :pid attr_reader :pid
@@ -20,7 +18,7 @@ class PgcatProcess
end end
def initialize(log_level) def initialize(log_level)
@env = {} @env = {"RUST_LOG" => log_level}
@port = rand(20000..32760) @port = rand(20000..32760)
@log_level = log_level @log_level = log_level
@log_filename = "/tmp/pgcat_log_#{SecureRandom.urlsafe_base64}.log" @log_filename = "/tmp/pgcat_log_#{SecureRandom.urlsafe_base64}.log"
@@ -32,7 +30,7 @@ class PgcatProcess
'../../target/debug/pgcat' '../../target/debug/pgcat'
end end
@command = "#{command_path} #{@config_filename} --log-level #{@log_level}" @command = "#{command_path} #{@config_filename}"
FileUtils.cp("../../pgcat.toml", @config_filename) FileUtils.cp("../../pgcat.toml", @config_filename)
cfg = current_config cfg = current_config
@@ -48,34 +46,22 @@ class PgcatProcess
def update_config(config_hash) def update_config(config_hash)
@original_config = current_config @original_config = current_config
Tempfile.create('json_out', '/tmp') do |f| output_to_write = TOML::Generator.new(config_hash).body
f.write(config_hash.to_json) output_to_write = output_to_write.gsub(/,\s*["|'](\d+)["|']\s*,/, ',\1,')
f.flush output_to_write = output_to_write.gsub(/,\s*["|'](\d+)["|']\s*\]/, ',\1]')
`cat #{f.path} | yj -jt > #{@config_filename}` File.write(@config_filename, output_to_write)
end
end end
def current_config def current_config
JSON.parse(`cat #{@config_filename} | yj -tj`) loadable_string = File.read(@config_filename)
end loadable_string = loadable_string.gsub(/,\s*(\d+)\s*,/, ', "\1",')
loadable_string = loadable_string.gsub(/,\s*(\d+)\s*\]/, ', "\1"]')
def raw_config_file TOML.load(loadable_string)
File.read(@config_filename)
end end
def reload_config def reload_config
conn = PG.connect(admin_connection_string) `kill -s HUP #{@pid}`
sleep 0.5
conn.async_exec("RELOAD")
rescue PG::ConnectionBad => e
errors = logs.split("Reloading config").last
errors = errors.gsub(/\e\[([;\d]+)?m/, '') # Remove color codes
errors = errors.
split("\n").select{|line| line.include?("ERROR") }.
map { |line| line.split("pgcat::config: ").last }
raise ConfigReloadFailed, errors.join("\n")
ensure
conn&.close
end end
def start def start
@@ -126,16 +112,10 @@ class PgcatProcess
"postgresql://#{username}:#{password}@0.0.0.0:#{@port}/pgcat" "postgresql://#{username}:#{password}@0.0.0.0:#{@port}/pgcat"
end end
def connection_string(pool_name, username, password = nil, parameters: {}) def connection_string(pool_name, username, password = nil)
cfg = current_config cfg = current_config
user_idx, user_obj = cfg["pools"][pool_name]["users"].detect { |k, user| user["username"] == username } user_idx, user_obj = cfg["pools"][pool_name]["users"].detect { |k, user| user["username"] == username }
connection_string = "postgresql://#{username}:#{password || user_obj["password"]}@0.0.0.0:#{@port}/#{pool_name}" "postgresql://#{username}:#{password || user_obj["password"]}@0.0.0.0:#{@port}/#{pool_name}"
# Add the additional parameters to the connection string
parameter_string = parameters.map { |key, value| "#{key}=#{value}" }.join("&")
connection_string += "?#{parameter_string}" unless parameter_string.empty?
connection_string
end end
def example_connection_string def example_connection_string

View File

@@ -11,9 +11,9 @@ describe "Query Mirroing" do
before do before do
new_configs = processes.pgcat.current_config new_configs = processes.pgcat.current_config
new_configs["pools"]["sharded_db"]["shards"]["0"]["mirrors"] = [ new_configs["pools"]["sharded_db"]["shards"]["0"]["mirrors"] = [
[mirror_host, mirror_pg.port.to_i, 0], [mirror_host, mirror_pg.port.to_s, "0"],
[mirror_host, mirror_pg.port.to_i, 0], [mirror_host, mirror_pg.port.to_s, "0"],
[mirror_host, mirror_pg.port.to_i, 0], [mirror_host, mirror_pg.port.to_s, "0"],
] ]
processes.pgcat.update_config(new_configs) processes.pgcat.update_config(new_configs)
processes.pgcat.reload_config processes.pgcat.reload_config
@@ -31,8 +31,7 @@ describe "Query Mirroing" do
runs.times { conn.async_exec("SELECT 1 + 2") } runs.times { conn.async_exec("SELECT 1 + 2") }
sleep 0.5 sleep 0.5
expect(processes.all_databases.first.count_select_1_plus_2).to eq(runs) expect(processes.all_databases.first.count_select_1_plus_2).to eq(runs)
# Allow some slack in mirroring successes expect(mirror_pg.count_select_1_plus_2).to eq(runs * 3)
expect(mirror_pg.count_select_1_plus_2).to be > ((runs - 5) * 3)
end end
context "when main server connection is closed" do context "when main server connection is closed" do
@@ -43,9 +42,9 @@ describe "Query Mirroing" do
new_configs = processes.pgcat.current_config new_configs = processes.pgcat.current_config
new_configs["pools"]["sharded_db"]["idle_timeout"] = 5000 + i new_configs["pools"]["sharded_db"]["idle_timeout"] = 5000 + i
new_configs["pools"]["sharded_db"]["shards"]["0"]["mirrors"] = [ new_configs["pools"]["sharded_db"]["shards"]["0"]["mirrors"] = [
[mirror_host, mirror_pg.port.to_i, 0], [mirror_host, mirror_pg.port.to_s, "0"],
[mirror_host, mirror_pg.port.to_i, 0], [mirror_host, mirror_pg.port.to_s, "0"],
[mirror_host, mirror_pg.port.to_i, 0], [mirror_host, mirror_pg.port.to_s, "0"],
] ]
processes.pgcat.update_config(new_configs) processes.pgcat.update_config(new_configs)
processes.pgcat.reload_config processes.pgcat.reload_config

View File

@@ -221,7 +221,7 @@ describe "Miscellaneous" do
conn.close conn.close
end end
it "Does not send RESET ALL unless necessary" do it "Does not send DISCARD ALL unless necessary" do
10.times do 10.times do
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user")) conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
conn.async_exec("SET SERVER ROLE to 'primary'") conn.async_exec("SET SERVER ROLE to 'primary'")
@@ -229,7 +229,7 @@ describe "Miscellaneous" do
conn.close conn.close
end end
expect(processes.primary.count_query("RESET ALL")).to eq(0) expect(processes.primary.count_query("DISCARD ALL")).to eq(0)
10.times do 10.times do
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user")) conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
@@ -239,19 +239,7 @@ describe "Miscellaneous" do
conn.close conn.close
end end
expect(processes.primary.count_query("RESET ALL")).to eq(10) expect(processes.primary.count_query("DISCARD ALL")).to eq(10)
end
it "Resets server roles correctly" do
10.times do
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
conn.async_exec("SET SERVER ROLE to 'primary'")
conn.async_exec("SELECT 1")
conn.async_exec("SET statement_timeout to 5000")
conn.close
end
expect(processes.primary.count_query("RESET ROLE")).to eq(10)
end end
end end
@@ -273,7 +261,7 @@ describe "Miscellaneous" do
end end
end end
it "Does not send RESET ALL unless necessary" do it "Does not send DISCARD ALL unless necessary" do
10.times do 10.times do
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user")) conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
conn.async_exec("SET SERVER ROLE to 'primary'") conn.async_exec("SET SERVER ROLE to 'primary'")
@@ -282,7 +270,7 @@ describe "Miscellaneous" do
conn.close conn.close
end end
expect(processes.primary.count_query("RESET ALL")).to eq(0) expect(processes.primary.count_query("DISCARD ALL")).to eq(0)
10.times do 10.times do
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user")) conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
@@ -292,32 +280,8 @@ describe "Miscellaneous" do
conn.close conn.close
end end
expect(processes.primary.count_query("RESET ALL")).to eq(10) expect(processes.primary.count_query("DISCARD ALL")).to eq(10)
end end
it "Respects tracked parameters on startup" do
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user", parameters: { "application_name" => "my_pgcat_test" }))
expect(conn.async_exec("SHOW application_name")[0]["application_name"]).to eq("my_pgcat_test")
conn.close
end
it "Respect tracked parameter on set statemet" do
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
conn.async_exec("SET application_name to 'my_pgcat_test'")
expect(conn.async_exec("SHOW application_name")[0]["application_name"]).to eq("my_pgcat_test")
end
it "Ignore untracked parameter on set statemet" do
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
orignal_statement_timeout = conn.async_exec("SHOW statement_timeout")[0]["statement_timeout"]
conn.async_exec("SET statement_timeout to 1500")
expect(conn.async_exec("SHOW statement_timeout")[0]["statement_timeout"]).to eq(orignal_statement_timeout)
end
end end
context "transaction mode with transactions" do context "transaction mode with transactions" do
@@ -331,7 +295,7 @@ describe "Miscellaneous" do
conn.async_exec("COMMIT") conn.async_exec("COMMIT")
conn.close conn.close
end end
expect(processes.primary.count_query("RESET ALL")).to eq(0) expect(processes.primary.count_query("DISCARD ALL")).to eq(0)
10.times do 10.times do
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user")) conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
@@ -341,30 +305,7 @@ describe "Miscellaneous" do
conn.async_exec("COMMIT") conn.async_exec("COMMIT")
conn.close conn.close
end end
expect(processes.primary.count_query("RESET ALL")).to eq(0) expect(processes.primary.count_query("DISCARD ALL")).to eq(0)
end
end
context "server cleanup disabled" do
let(:processes) { Helpers::Pgcat.single_shard_setup("sharded_db", 1, "transaction", "random", "info", { "cleanup_server_connections" => false }) }
it "will not clean up connection state" do
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
processes.primary.reset_stats
conn.async_exec("SET statement_timeout TO 1000")
conn.close
expect(processes.primary.count_query("RESET ALL")).to eq(0)
end
it "will not clean up prepared statements" do
conn = PG::connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
processes.primary.reset_stats
conn.async_exec("PREPARE prepared_q (int) AS SELECT $1")
conn.close
expect(processes.primary.count_query("RESET ALL")).to eq(0)
end end
end end
end end
@@ -374,6 +315,7 @@ describe "Miscellaneous" do
before do before do
current_configs = processes.pgcat.current_config current_configs = processes.pgcat.current_config
correct_idle_client_transaction_timeout = current_configs["general"]["idle_client_in_transaction_timeout"] correct_idle_client_transaction_timeout = current_configs["general"]["idle_client_in_transaction_timeout"]
puts(current_configs["general"]["idle_client_in_transaction_timeout"])
current_configs["general"]["idle_client_in_transaction_timeout"] = 0 current_configs["general"]["idle_client_in_transaction_timeout"] = 0

View File

@@ -1,214 +0,0 @@
require_relative 'spec_helper'
describe 'Prepared statements' do
let(:pool_size) { 5 }
let(:processes) { Helpers::Pgcat.single_instance_setup("sharded_db", pool_size) }
let(:prepared_statements_cache_size) { 100 }
let(:server_round_robin) { false }
before do
new_configs = processes.pgcat.current_config
new_configs["general"]["server_round_robin"] = server_round_robin
new_configs["pools"]["sharded_db"]["prepared_statements_cache_size"] = prepared_statements_cache_size
new_configs["pools"]["sharded_db"]["users"]["0"]["pool_size"] = pool_size
processes.pgcat.update_config(new_configs)
processes.pgcat.reload_config
end
context 'when trying prepared statements' do
it 'it allows unparameterized statements to succeed' do
conn1 = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
conn2 = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
prepared_query = "SELECT 1"
# prepare query on server 1 and client 1
conn1.prepare('statement1', prepared_query)
conn1.exec_prepared('statement1')
conn2.transaction do
# Claim server 1 with client 2
conn2.exec("SELECT 2")
# Client 1 now runs the prepared query, and it's automatically
# prepared on server 2
conn1.prepare('statement2', prepared_query)
conn1.exec_prepared('statement2')
# Client 2 now prepares the same query that was already
# prepared on server 1. And PgBouncer reuses that already
# prepared query for this different client.
conn2.prepare('statement3', prepared_query)
conn2.exec_prepared('statement3')
end
ensure
conn1.close if conn1
conn2.close if conn2
end
it 'it allows parameterized statements to succeed' do
conn1 = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
conn2 = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
prepared_query = "SELECT $1"
# prepare query on server 1 and client 1
conn1.prepare('statement1', prepared_query)
conn1.exec_prepared('statement1', [1])
conn2.transaction do
# Claim server 1 with client 2
conn2.exec("SELECT 2")
# Client 1 now runs the prepared query, and it's automatically
# prepared on server 2
conn1.prepare('statement2', prepared_query)
conn1.exec_prepared('statement2', [1])
# Client 2 now prepares the same query that was already
# prepared on server 1. And PgBouncer reuses that already
# prepared query for this different client.
conn2.prepare('statement3', prepared_query)
conn2.exec_prepared('statement3', [1])
end
ensure
conn1.close if conn1
conn2.close if conn2
end
end
context 'when trying large packets' do
it "works with large parse" do
conn1 = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
long_string = "1" * 4096 * 10
prepared_query = "SELECT '#{long_string}'"
# prepare query on server 1 and client 1
conn1.prepare('statement1', prepared_query)
result = conn1.exec_prepared('statement1')
# assert result matches long_string
expect(result.getvalue(0, 0)).to eq(long_string)
ensure
conn1.close if conn1
end
it "works with large bind" do
conn1 = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
long_string = "1" * 4096 * 10
prepared_query = "SELECT $1::text"
# prepare query on server 1 and client 1
conn1.prepare('statement1', prepared_query)
result = conn1.exec_prepared('statement1', [long_string])
# assert result matches long_string
expect(result.getvalue(0, 0)).to eq(long_string)
ensure
conn1.close if conn1
end
end
context 'when statement cache is smaller than set of unqiue statements' do
let(:prepared_statements_cache_size) { 1 }
let(:pool_size) { 1 }
it "evicts all but 1 statement from the server cache" do
conn = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
5.times do |i|
prepared_query = "SELECT '#{i}'"
conn.prepare("statement#{i}", prepared_query)
result = conn.exec_prepared("statement#{i}")
expect(result.getvalue(0, 0)).to eq(i.to_s)
end
# Check number of prepared statements (expected: 1)
n_statements = conn.exec("SELECT count(*) FROM pg_prepared_statements").getvalue(0, 0).to_i
expect(n_statements).to eq(1)
end
end
context 'when statement cache is larger than set of unqiue statements' do
let(:pool_size) { 1 }
it "does not evict any of the statements from the cache" do
# cache size 5
conn = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
5.times do |i|
prepared_query = "SELECT '#{i}'"
conn.prepare("statement#{i}", prepared_query)
result = conn.exec_prepared("statement#{i}")
expect(result.getvalue(0, 0)).to eq(i.to_s)
end
# Check number of prepared statements (expected: 1)
n_statements = conn.exec("SELECT count(*) FROM pg_prepared_statements").getvalue(0, 0).to_i
expect(n_statements).to eq(5)
end
end
context 'when preparing the same query' do
let(:prepared_statements_cache_size) { 5 }
let(:pool_size) { 5 }
it "reuses statement cache when there are different statement names on the same connection" do
conn = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
10.times do |i|
statement_name = "statement_#{i}"
conn.prepare(statement_name, 'SELECT $1::int')
conn.exec_prepared(statement_name, [1])
end
# Check number of prepared statements (expected: 1)
n_statements = conn.exec("SELECT count(*) FROM pg_prepared_statements").getvalue(0, 0).to_i
expect(n_statements).to eq(1)
end
it "reuses statement cache when there are different statement names on different connections" do
10.times do |i|
conn = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
statement_name = "statement_#{i}"
conn.prepare(statement_name, 'SELECT $1::int')
conn.exec_prepared(statement_name, [1])
end
# Check number of prepared statements (expected: 1)
conn = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
n_statements = conn.exec("SELECT count(*) FROM pg_prepared_statements").getvalue(0, 0).to_i
expect(n_statements).to eq(1)
end
end
context 'when reloading config' do
let(:pool_size) { 1 }
it "test_reload_config" do
conn = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
# prepare query
conn.prepare('statement1', 'SELECT 1')
conn.exec_prepared('statement1')
# Reload config which triggers pool recreation
new_configs = processes.pgcat.current_config
new_configs["pools"]["sharded_db"]["prepared_statements_cache_size"] = prepared_statements_cache_size + 1
processes.pgcat.update_config(new_configs)
processes.pgcat.reload_config
# check that we're starting with no prepared statements on the server
conn_check = PG.connect(processes.pgcat.connection_string('sharded_db', 'sharding_user'))
n_statements = conn_check.exec("SELECT count(*) FROM pg_prepared_statements").getvalue(0, 0).to_i
expect(n_statements).to eq(0)
# still able to run prepared query
conn.exec_prepared('statement1')
end
end
end

View File

@@ -7,11 +7,11 @@ describe "Sharding" do
before do before do
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user")) conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
# Setup the sharding data # Setup the sharding data
3.times do |i| 3.times do |i|
conn.exec("SET SHARD TO '#{i}'") conn.exec("SET SHARD TO '#{i}'")
conn.exec("DELETE FROM data WHERE id > 0")
conn.exec("DELETE FROM data WHERE id > 0") rescue nil
end end
18.times do |i| 18.times do |i|
@@ -19,11 +19,10 @@ describe "Sharding" do
conn.exec("SET SHARDING KEY TO '#{i}'") conn.exec("SET SHARDING KEY TO '#{i}'")
conn.exec("INSERT INTO data (id, value) VALUES (#{i}, 'value_#{i}')") conn.exec("INSERT INTO data (id, value) VALUES (#{i}, 'value_#{i}')")
end end
conn.close
end end
after do after do
processes.all_databases.map(&:reset) processes.all_databases.map(&:reset)
processes.pgcat.shutdown processes.pgcat.shutdown
end end
@@ -49,148 +48,4 @@ describe "Sharding" do
end end
end end
end end
describe "no_shard_specified_behavior config" do
context "when default shard number is invalid" do
it "prevents config reload" do
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
current_configs = processes.pgcat.current_config
current_configs["pools"]["sharded_db"]["default_shard"] = "shard_99"
processes.pgcat.update_config(current_configs)
expect { processes.pgcat.reload_config }.to raise_error(ConfigReloadFailed, /Invalid shard 99/)
end
end
end
describe "comment-based routing" do
context "when no configs are set" do
it "routes queries with a shard_id comment to the default shard" do
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
10.times { conn.async_exec("/* shard_id: 2 */ SELECT 1 + 2") }
expect(processes.all_databases.map(&:count_select_1_plus_2)).to eq([10, 0, 0])
end
it "does not honor no_shard_specified_behavior directives" do
end
end
[
["shard_id_regex", "/\\* the_shard_id: (\\d+) \\*/", "/* the_shard_id: 1 */"],
["sharding_key_regex", "/\\* the_sharding_key: (\\d+) \\*/", "/* the_sharding_key: 3 */"],
].each do |config_name, config_value, comment_to_use|
context "when #{config_name} config is set" do
let(:no_shard_specified_behavior) { nil }
before do
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
current_configs = processes.pgcat.current_config
current_configs["pools"]["sharded_db"][config_name] = config_value
if no_shard_specified_behavior
current_configs["pools"]["sharded_db"]["default_shard"] = no_shard_specified_behavior
else
current_configs["pools"]["sharded_db"].delete("default_shard")
end
processes.pgcat.update_config(current_configs)
processes.pgcat.reload_config
end
it "routes queries with a shard_id comment to the correct shard" do
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
25.times { conn.async_exec("#{comment_to_use} SELECT 1 + 2") }
expect(processes.all_databases.map(&:count_select_1_plus_2)).to eq([0, 25, 0])
end
context "when no_shard_specified_behavior config is set to random" do
let(:no_shard_specified_behavior) { "random" }
context "with no shard comment" do
it "sends queries to random shard" do
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
25.times { conn.async_exec("SELECT 1 + 2") }
expect(processes.all_databases.map(&:count_select_1_plus_2).all?(&:positive?)).to be true
end
end
context "with a shard comment" do
it "honors the comment" do
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
25.times { conn.async_exec("#{comment_to_use} SELECT 1 + 2") }
expect(processes.all_databases.map(&:count_select_1_plus_2)).to eq([0, 25, 0])
end
end
end
context "when no_shard_specified_behavior config is set to random_healthy" do
let(:no_shard_specified_behavior) { "random_healthy" }
context "with no shard comment" do
it "sends queries to random healthy shard" do
good_databases = [processes.all_databases[0], processes.all_databases[2]]
bad_database = processes.all_databases[1]
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
250.times { conn.async_exec("SELECT 99") }
bad_database.take_down do
250.times do
conn.async_exec("SELECT 99")
rescue PG::ConnectionBad => e
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
end
end
# Routes traffic away from bad shard
25.times { conn.async_exec("SELECT 1 + 2") }
expect(good_databases.map(&:count_select_1_plus_2).all?(&:positive?)).to be true
expect(bad_database.count_select_1_plus_2).to eq(0)
# Routes traffic to the bad shard if the shard_id is specified
25.times { conn.async_exec("#{comment_to_use} SELECT 1 + 2") }
bad_database = processes.all_databases[1]
expect(bad_database.count_select_1_plus_2).to eq(25)
end
end
context "with a shard comment" do
it "honors the comment" do
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
25.times { conn.async_exec("#{comment_to_use} SELECT 1 + 2") }
expect(processes.all_databases.map(&:count_select_1_plus_2)).to eq([0, 25, 0])
end
end
end
context "when no_shard_specified_behavior config is set to shard_x" do
let(:no_shard_specified_behavior) { "shard_2" }
context "with no shard comment" do
it "sends queries to the specified shard" do
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
25.times { conn.async_exec("SELECT 1 + 2") }
expect(processes.all_databases.map(&:count_select_1_plus_2)).to eq([0, 0, 25])
end
end
context "with a shard comment" do
it "honors the comment" do
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
25.times { conn.async_exec("#{comment_to_use} SELECT 1 + 2") }
expect(processes.all_databases.map(&:count_select_1_plus_2)).to eq([0, 25, 0])
end
end
end
end
end
end
end end

View File

@@ -1,406 +0,0 @@
# frozen_string_literal: true
require 'open3'
require_relative 'spec_helper'
describe "Stats" do
let(:processes) { Helpers::Pgcat.single_instance_setup("sharded_db", 10) }
let(:pgcat_conn_str) { processes.pgcat.connection_string("sharded_db", "sharding_user") }
after do
processes.all_databases.map(&:reset)
processes.pgcat.shutdown
end
describe "SHOW STATS" do
context "clients connect and make one query" do
it "updates *_query_time and *_wait_time" do
connections = Array.new(3) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
connections.each do |c|
Thread.new { c.async_exec("SELECT pg_sleep(0.25)") }
end
sleep(1)
connections.map(&:close)
# wait for averages to be calculated, we shouldn't do this too often
sleep(15.5)
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
results = admin_conn.async_exec("SHOW STATS")[0]
admin_conn.close
expect(results["total_query_time"].to_i).to be_within(200).of(750)
expect(results["avg_query_time"].to_i).to be_within(50).of(250)
expect(results["total_wait_time"].to_i).to_not eq(0)
expect(results["avg_wait_time"].to_i).to_not eq(0)
end
end
end
describe "SHOW POOLS" do
context "bad credentials" do
it "does not change any stats" do
bad_password_url = URI(pgcat_conn_str)
bad_password_url.password = "wrong"
expect { PG::connect("#{bad_password_url.to_s}?application_name=bad_password") }.to raise_error(PG::ConnectionBad)
sleep(1)
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
results = admin_conn.async_exec("SHOW POOLS")[0]
%w[cl_idle cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
end
expect(results["sv_idle"]).to eq("1")
end
end
context "bad database name" do
it "does not change any stats" do
bad_db_url = URI(pgcat_conn_str)
bad_db_url.path = "/wrong_db"
expect { PG::connect("#{bad_db_url.to_s}?application_name=bad_db") }.to raise_error(PG::ConnectionBad)
sleep(1)
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
results = admin_conn.async_exec("SHOW POOLS")[0]
%w[cl_idle cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
end
expect(results["sv_idle"]).to eq("1")
end
end
context "client connects but issues no queries" do
it "only affects cl_idle stats" do
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
before_test = admin_conn.async_exec("SHOW POOLS")[0]["sv_idle"]
connections = Array.new(20) { PG::connect(pgcat_conn_str) }
sleep(1)
results = admin_conn.async_exec("SHOW POOLS")[0]
%w[cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
end
expect(results["cl_idle"]).to eq("20")
expect(results["sv_idle"]).to eq(before_test)
connections.map(&:close)
sleep(1.1)
results = admin_conn.async_exec("SHOW POOLS")[0]
%w[cl_active cl_idle cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
end
expect(results["sv_idle"]).to eq(before_test)
end
end
context "clients connect and make one query" do
it "only affects cl_idle, sv_idle stats" do
connections = Array.new(5) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
connections.each do |c|
Thread.new { c.async_exec("SELECT pg_sleep(2.5)") }
end
sleep(1.1)
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
results = admin_conn.async_exec("SHOW POOLS")[0]
%w[cl_idle cl_waiting cl_cancel_req sv_idle sv_used sv_tested sv_login maxwait].each do |s|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
end
expect(results["cl_active"]).to eq("5")
expect(results["sv_active"]).to eq("5")
sleep(3)
results = admin_conn.async_exec("SHOW POOLS")[0]
%w[cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
end
expect(results["cl_idle"]).to eq("5")
expect(results["sv_idle"]).to eq("5")
connections.map(&:close)
sleep(1)
results = admin_conn.async_exec("SHOW POOLS")[0]
%w[cl_idle cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
end
expect(results["sv_idle"]).to eq("5")
end
end
context "client connects and opens a transaction and closes connection uncleanly" do
it "produces correct statistics" do
connections = Array.new(5) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
connections.each do |c|
Thread.new do
c.async_exec("BEGIN")
c.async_exec("SELECT pg_sleep(0.01)")
c.close
end
end
sleep(1.1)
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
results = admin_conn.async_exec("SHOW POOLS")[0]
%w[cl_idle cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
end
expect(results["sv_idle"]).to eq("5")
end
end
context "client fail to checkout connection from the pool" do
it "counts clients as idle" do
new_configs = processes.pgcat.current_config
new_configs["general"]["connect_timeout"] = 500
new_configs["general"]["ban_time"] = 1
new_configs["general"]["shutdown_timeout"] = 1
new_configs["pools"]["sharded_db"]["users"]["0"]["pool_size"] = 1
processes.pgcat.update_config(new_configs)
processes.pgcat.reload_config
threads = []
connections = Array.new(5) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
connections.each do |c|
threads << Thread.new { c.async_exec("SELECT pg_sleep(1)") rescue PG::SystemError }
end
sleep(2)
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
results = admin_conn.async_exec("SHOW POOLS")[0]
%w[cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
end
expect(results["cl_idle"]).to eq("5")
expect(results["sv_idle"]).to eq("1")
threads.map(&:join)
connections.map(&:close)
end
end
context "clients connects and disconnect normally" do
let(:processes) { Helpers::Pgcat.single_instance_setup("sharded_db", 2) }
it 'shows the same number of clients before and after' do
clients_before = clients_connected_to_pool(processes: processes)
threads = []
connections = Array.new(4) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
connections.each do |c|
threads << Thread.new { c.async_exec("SELECT 1") rescue nil }
end
clients_between = clients_connected_to_pool(processes: processes)
expect(clients_before).not_to eq(clients_between)
connections.each(&:close)
clients_after = clients_connected_to_pool(processes: processes)
expect(clients_before).to eq(clients_after)
end
end
context "clients connects and disconnect abruptly" do
let(:processes) { Helpers::Pgcat.single_instance_setup("sharded_db", 10) }
it 'shows the same number of clients before and after' do
threads = []
connections = Array.new(2) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
connections.each do |c|
threads << Thread.new { c.async_exec("SELECT 1") }
end
clients_before = clients_connected_to_pool(processes: processes)
random_string = (0...8).map { (65 + rand(26)).chr }.join
connection_string = "#{pgcat_conn_str}?application_name=#{random_string}"
faulty_client = Process.spawn("psql -Atx #{connection_string} >/dev/null")
sleep(1)
# psql starts two processes, we only know the pid of the parent, this
# ensure both are killed
`pkill -9 -f '#{random_string}'`
Process.wait(faulty_client)
clients_after = clients_connected_to_pool(processes: processes)
expect(clients_before).to eq(clients_after)
end
end
context "clients overwhelm server pools" do
let(:processes) { Helpers::Pgcat.single_instance_setup("sharded_db", 2) }
it "cl_waiting is updated to show it" do
threads = []
connections = Array.new(4) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
connections.each do |c|
threads << Thread.new { c.async_exec("SELECT pg_sleep(1.5)") }
end
sleep(1.1) # Allow time for stats to update
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
results = admin_conn.async_exec("SHOW POOLS")[0]
%w[cl_idle cl_cancel_req sv_idle sv_used sv_tested sv_login].each do |s|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
end
expect(results["maxwait"]).to eq("1")
expect(results["cl_waiting"]).to eq("2")
expect(results["cl_active"]).to eq("2")
expect(results["sv_active"]).to eq("2")
sleep(2.5) # Allow time for stats to update
results = admin_conn.async_exec("SHOW POOLS")[0]
%w[cl_active cl_waiting cl_cancel_req sv_active sv_used sv_tested sv_login maxwait].each do |s|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
end
expect(results["cl_idle"]).to eq("4")
expect(results["sv_idle"]).to eq("2")
threads.map(&:join)
connections.map(&:close)
end
it "show correct max_wait" do
threads = []
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
connections = Array.new(4) { PG::connect("#{pgcat_conn_str}?application_name=one_query") }
connections.each do |c|
threads << Thread.new { c.async_exec("SELECT pg_sleep(1.5)") rescue nil }
end
sleep(1.1)
results = admin_conn.async_exec("SHOW POOLS")[0]
# Value is only reported when there are clients waiting
expect(results["maxwait"]).to eq("1")
expect(results["maxwait_us"].to_i).to be_within(20_000).of(100_000)
sleep(2.5) # Allow time for stats to update
results = admin_conn.async_exec("SHOW POOLS")[0]
# no clients are waiting so value is 0
expect(results["maxwait"]).to eq("0")
expect(results["maxwait_us"]).to eq("0")
connections.map(&:close)
threads.map(&:join)
end
end
end
describe "SHOW CLIENTS" do
it "reports correct number and application names" do
conn_str = processes.pgcat.connection_string("sharded_db", "sharding_user")
connections = Array.new(20) { |i| PG::connect("#{conn_str}?application_name=app#{i % 5}") }
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
sleep(1) # Wait for stats to be updated
results = admin_conn.async_exec("SHOW CLIENTS")
expect(results.count).to eq(21) # count admin clients
expect(results.select { |c| c["application_name"] == "app3" || c["application_name"] == "app4" }.count).to eq(8)
expect(results.select { |c| c["database"] == "pgcat" }.count).to eq(1)
connections[0..5].map(&:close)
sleep(1) # Wait for stats to be updated
results = admin_conn.async_exec("SHOW CLIENTS")
expect(results.count).to eq(15)
connections[6..].map(&:close)
sleep(1) # Wait for stats to be updated
expect(admin_conn.async_exec("SHOW CLIENTS").count).to eq(1)
admin_conn.close
end
it "reports correct number of queries and transactions" do
conn_str = processes.pgcat.connection_string("sharded_db", "sharding_user")
connections = Array.new(2) { |i| PG::connect("#{conn_str}?application_name=app#{i}") }
connections.each do |c|
c.async_exec("SELECT 1")
c.async_exec("SELECT 2")
c.async_exec("SELECT 3")
c.async_exec("BEGIN")
c.async_exec("SELECT 4")
c.async_exec("SELECT 5")
c.async_exec("COMMIT")
end
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
sleep(1) # Wait for stats to be updated
results = admin_conn.async_exec("SHOW CLIENTS")
expect(results.count).to eq(3)
normal_client_results = results.reject { |r| r["database"] == "pgcat" }
expect(normal_client_results[0]["transaction_count"]).to eq("4")
expect(normal_client_results[1]["transaction_count"]).to eq("4")
expect(normal_client_results[0]["query_count"]).to eq("7")
expect(normal_client_results[1]["query_count"]).to eq("7")
admin_conn.close
connections.map(&:close)
end
context "when client has waited for a server" do
let(:processes) { Helpers::Pgcat.single_instance_setup("sharded_db", 2) }
it "shows correct maxwait" do
threads = []
connections = Array.new(3) { |i| PG::connect("#{pgcat_conn_str}?application_name=app#{i}") }
connections.each do |c|
threads << Thread.new { c.async_exec("SELECT pg_sleep(1.5)") rescue nil }
end
sleep(2.5) # Allow time for stats to update
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
results = admin_conn.async_exec("SHOW CLIENTS")
normal_client_results = results.reject { |r| r["database"] == "pgcat" }
non_waiting_clients = normal_client_results.select { |c| c["maxwait"] == "0" }
waiting_clients = normal_client_results.select { |c| c["maxwait"].to_i > 0 }
expect(non_waiting_clients.count).to eq(2)
non_waiting_clients.each do |client|
expect(client["maxwait_us"].to_i).to be_between(0, 50_000)
end
expect(waiting_clients.count).to eq(1)
waiting_clients.each do |client|
expect(client["maxwait_us"].to_i).to be_within(200_000).of(500_000)
end
admin_conn.close
connections.map(&:close)
end
end
end
describe "Query Storm" do
context "when the proxy receives overwhelmingly large number of short quick queries" do
it "should not have lingering clients or active servers" do
new_configs = processes.pgcat.current_config
new_configs["general"]["connect_timeout"] = 500
new_configs["general"]["ban_time"] = 1
new_configs["general"]["shutdown_timeout"] = 1
new_configs["pools"]["sharded_db"]["users"]["0"]["pool_size"] = 1
processes.pgcat.update_config(new_configs)
processes.pgcat.reload_config
Array.new(40) do
Thread.new do
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
conn.async_exec("SELECT pg_sleep(0.1)")
rescue PG::SystemError
ensure
conn.close
end
end.each(&:join)
sleep 1
admin_conn = PG::connect(processes.pgcat.admin_connection_string)
results = admin_conn.async_exec("SHOW POOLS")[0]
%w[cl_idle cl_waiting cl_cancel_req sv_used sv_tested sv_login].each do |s|
raise StandardError, "Field #{s} was expected to be 0 but found to be #{results[s]}" if results[s] != "0"
end
admin_conn.close
end
end
end
end

View File

@@ -1 +0,0 @@
target/

1322
tests/rust/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +0,0 @@
[package]
name = "rust"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
sqlx = { version = "0.6.2", features = [ "runtime-tokio-rustls", "postgres", "json", "tls", "migrate", "time", "uuid", "ipnetwork"] }
tokio = { version = "1", features = ["full"] }

View File

@@ -1,36 +0,0 @@
#[tokio::main]
async fn main() {
test_prepared_statements().await;
}
async fn test_prepared_statements() {
let pool = sqlx::postgres::PgPoolOptions::new()
.max_connections(5)
.connect("postgres://sharding_user:sharding_user@127.0.0.1:6432/sharded_db")
.await
.unwrap();
let mut handles = Vec::new();
for _ in 0..5 {
let pool = pool.clone();
let handle = tokio::task::spawn(async move {
for _ in 0..1000 {
match sqlx::query("SELECT one").fetch_all(&pool).await {
Ok(_) => (),
Err(err) => {
if err.to_string().contains("prepared statement") {
panic!("prepared statement error: {}", err);
}
}
}
}
});
handles.push(handle);
}
for handle in handles {
handle.await.unwrap();
}
}

View File

@@ -1,40 +0,0 @@
#!/bin/bash
#
# Build an Ubuntu deb.
#
script_dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
deb_dir="/tmp/pgcat-build"
export PACKAGE_VERSION=${1:-"1.1.1"}
if [[ $(arch) == "x86_64" ]]; then
export ARCH=amd64
else
export ARCH=arm64
fi
cd "$script_dir/.."
cargo build --release
rm -rf "$deb_dir"
mkdir -p "$deb_dir/DEBIAN"
mkdir -p "$deb_dir/usr/bin"
mkdir -p "$deb_dir/etc/systemd/system"
cp target/release/pgcat "$deb_dir/usr/bin/pgcat"
chmod +x "$deb_dir/usr/bin/pgcat"
cp pgcat.toml "$deb_dir/etc/pgcat.example.toml"
cp pgcat.service "$deb_dir/etc/systemd/system/pgcat.service"
(cat control | envsubst) > "$deb_dir/DEBIAN/control"
cp postinst "$deb_dir/DEBIAN/postinst"
cp postrm "$deb_dir/DEBIAN/postrm"
cp prerm "$deb_dir/DEBIAN/prerm"
chmod +x ${deb_dir}/DEBIAN/post*
chmod +x ${deb_dir}/DEBIAN/pre*
dpkg-deb \
--root-owner-group \
-z1 \
--build "$deb_dir" \
pgcat-${PACKAGE_VERSION}-ubuntu22.04-${ARCH}.deb