mirror of
https://github.com/postgresml/pgcat.git
synced 2026-03-23 17:36:28 +00:00
Compare commits
499 Commits
sharded
...
mostafa_up
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0338fa773a | ||
|
|
c11418c083 | ||
|
|
c9544bdff2 | ||
|
|
cdcfa99fb9 | ||
|
|
f27dc6b483 | ||
|
|
326efc22b3 | ||
|
|
01c6afb2e5 | ||
|
|
a68071dd28 | ||
|
|
c27d801abf | ||
|
|
186e72298f | ||
|
|
3935366d86 | ||
|
|
b575935b1d | ||
|
|
efbab1c333 | ||
|
|
9f12d7958e | ||
|
|
e6634ef461 | ||
|
|
dab2e58647 | ||
|
|
4aaa4378cf | ||
|
|
670311daf9 | ||
|
|
b9ec7f8036 | ||
|
|
d91d23848b | ||
|
|
bbbc01a467 | ||
|
|
9bb71ede9d | ||
|
|
88b2afb19b | ||
|
|
f0865ca616 | ||
|
|
7d047c6c19 | ||
|
|
f73d15f82c | ||
|
|
69af6cc5e5 | ||
|
|
ca34597002 | ||
|
|
2def40ea6a | ||
|
|
c05129018d | ||
|
|
4a7a6a8e7a | ||
|
|
29a476e190 | ||
|
|
81933b918d | ||
|
|
7cbc9178d8 | ||
|
|
2c8b2f0776 | ||
|
|
8f9a2b8e6f | ||
|
|
cbf4d58144 | ||
|
|
731aa047ba | ||
|
|
88dbcc21d1 | ||
|
|
c34b15bddc | ||
|
|
0b034a6831 | ||
|
|
966b8e093c | ||
|
|
c9270a47d4 | ||
|
|
0d94d0b90a | ||
|
|
358724f7a9 | ||
|
|
e1e4929d43 | ||
|
|
dc4d6edf17 | ||
|
|
ec3920d60f | ||
|
|
4c5498b915 | ||
|
|
0e8064b049 | ||
|
|
4dbef49ec9 | ||
|
|
bc07dc9c81 | ||
|
|
9b8166b313 | ||
|
|
e58d69f3de | ||
|
|
e76d720ffb | ||
|
|
998cc16a3c | ||
|
|
7c37da2fad | ||
|
|
b45c6b1d23 | ||
|
|
dae240d30c | ||
|
|
b52ea8e7f1 | ||
|
|
7d3003a16a | ||
|
|
d37df43a90 | ||
|
|
2c7bf52c17 | ||
|
|
de8df29ca4 | ||
|
|
c4fb72b9fc | ||
|
|
3371c01e0e | ||
|
|
c2a483f36a | ||
|
|
51cd13b8b5 | ||
|
|
a054b454d2 | ||
|
|
04e9814770 | ||
|
|
037d232fcd | ||
|
|
b2933762e7 | ||
|
|
df8aa888f9 | ||
|
|
7f5639c94a | ||
|
|
c0112f6f12 | ||
|
|
b7ceee2ddf | ||
|
|
0b01d70b55 | ||
|
|
33db0dffa8 | ||
|
|
7994a661d9 | ||
|
|
9937193332 | ||
|
|
baa00ff546 | ||
|
|
ffe820497f | ||
|
|
be549f3faa | ||
|
|
4301ab0606 | ||
|
|
5143500c9a | ||
|
|
3255323bff | ||
|
|
bb27586758 | ||
|
|
4f0f45b576 | ||
|
|
f94ce97ebc | ||
|
|
9ab128579d | ||
|
|
1cde74f05e | ||
|
|
a4de6c1eb6 | ||
|
|
e14b283f0c | ||
|
|
7c3c90c38e | ||
|
|
2ca21b2bec | ||
|
|
3986eaa4b2 | ||
|
|
1f2c6507f7 | ||
|
|
aefcf4281c | ||
|
|
9d1c46a3e9 | ||
|
|
328108aeb5 | ||
|
|
4cf54a6122 | ||
|
|
2a8f3653a6 | ||
|
|
19cb8a3022 | ||
|
|
f85e5bd9e8 | ||
|
|
7bdb4e5cd9 | ||
|
|
5d87e3781e | ||
|
|
3e08c6bd8d | ||
|
|
15b6db8e4e | ||
|
|
b2e6dfd9bb | ||
|
|
3c9565d351 | ||
|
|
67579c9af4 | ||
|
|
cf7f6f35ab | ||
|
|
7205537b49 | ||
|
|
1ed6e925ed | ||
|
|
4b78af9676 | ||
|
|
73500c0c96 | ||
|
|
b167de5aa3 | ||
|
|
473bb3d17d | ||
|
|
c7d6273037 | ||
|
|
94c781881f | ||
|
|
a8c81e5df6 | ||
|
|
1d3746ec9e | ||
|
|
b5489dc1e6 | ||
|
|
557b425fb1 | ||
|
|
aca9738821 | ||
|
|
0bc453a771 | ||
|
|
b67c33b6d0 | ||
|
|
a8a30ad43b | ||
|
|
d63be9b93a | ||
|
|
100778670c | ||
|
|
37e3349c24 | ||
|
|
7f57a89d75 | ||
|
|
0898461c01 | ||
|
|
52b1b43850 | ||
|
|
0907f1b77f | ||
|
|
73260690b0 | ||
|
|
5056cbe8ed | ||
|
|
571b02e178 | ||
|
|
159eb89bf0 | ||
|
|
389993bf3e | ||
|
|
ba5243b6dd | ||
|
|
128ef72911 | ||
|
|
811885f464 | ||
|
|
d5e329fec5 | ||
|
|
09e54e1175 | ||
|
|
23819c8549 | ||
|
|
7dfbd993f2 | ||
|
|
3601130ba1 | ||
|
|
0d504032b2 | ||
|
|
4a87b4807d | ||
|
|
cb5ff40a59 | ||
|
|
62b2d994c1 | ||
|
|
66805d7e77 | ||
|
|
4ccc1e7fa3 | ||
|
|
3dae3d0777 | ||
|
|
a18eb42df5 | ||
|
|
6aacf1fa19 | ||
|
|
8e99e65215 | ||
|
|
5dfbc102a9 | ||
|
|
bae12fca99 | ||
|
|
421c5d4b64 | ||
|
|
d568739db9 | ||
|
|
692353c839 | ||
|
|
a62f6b0eea | ||
|
|
89e15f09b5 | ||
|
|
7ddd23b514 | ||
|
|
faa9c1f64a | ||
|
|
9094988491 | ||
|
|
6f768a84ce | ||
|
|
0757d7f3a0 | ||
|
|
568f04feee | ||
|
|
58ce76d9b9 | ||
|
|
9a2076a9eb | ||
|
|
e7e7118725 | ||
|
|
99f790cacf | ||
|
|
434b0bb69e | ||
|
|
714e043ef0 | ||
|
|
863104aadd | ||
|
|
7dd96141e3 | ||
|
|
0d5feac4b2 | ||
|
|
90aba9c011 | ||
|
|
0f34b49503 | ||
|
|
ca4431b67e | ||
|
|
d66b377a8e | ||
|
|
ac21ce50f1 | ||
|
|
e5df179ac9 | ||
|
|
9a668e584f | ||
|
|
a5c360e848 | ||
|
|
b09f0a3e6b | ||
|
|
0704ea089c | ||
|
|
b4baa86e8a | ||
|
|
76e195a8a4 | ||
|
|
aa89e357e0 | ||
|
|
c0855bf27d | ||
|
|
9d523ca49d | ||
|
|
b765581975 | ||
|
|
039c875909 | ||
|
|
2cc6a09fba | ||
|
|
8a0da10a87 | ||
|
|
c3eaf023c7 | ||
|
|
02839e4dc2 | ||
|
|
bd286d9128 | ||
|
|
9241df18e2 | ||
|
|
eb8cfdb1f1 | ||
|
|
75a7d4409a | ||
|
|
37e1c5297a | ||
|
|
28f2d19cac | ||
|
|
f9134807d7 | ||
|
|
2a0483b6de | ||
|
|
57dc2ae5ab | ||
|
|
0172523f10 | ||
|
|
c69f461be5 | ||
|
|
2b05ff4ee5 | ||
|
|
d5f60b1720 | ||
|
|
9388288afb | ||
|
|
97f5a0564d | ||
|
|
9830c18315 | ||
|
|
bf6efde8cc | ||
|
|
f1265a5570 | ||
|
|
d81a744154 | ||
|
|
cc63c95dcb | ||
|
|
b1b1714e76 | ||
|
|
ad4eaa859c | ||
|
|
4ac8d367ca | ||
|
|
e3f902cb31 | ||
|
|
bb0b64e089 | ||
|
|
a90c7b0684 | ||
|
|
1c73889fb9 | ||
|
|
24e79dcf05 | ||
|
|
2e3eb2663e | ||
|
|
fbe256cc4e | ||
|
|
f10da57ee3 | ||
|
|
e7f7adfa14 | ||
|
|
a0e740d30f | ||
|
|
c58f9557ae | ||
|
|
ca8901910c | ||
|
|
87a771aecc | ||
|
|
99a3b9896d | ||
|
|
89689e3663 | ||
|
|
85ac3ef9a5 | ||
|
|
7894bba59b | ||
|
|
ab0bad6da0 | ||
|
|
3f70956775 | ||
|
|
4b0cdcbd5c | ||
|
|
4977489b89 | ||
|
|
27b845fa80 | ||
|
|
62e78f5769 | ||
|
|
ae870894b3 | ||
|
|
7d93ead7f4 | ||
|
|
880bc3e0a8 | ||
|
|
33bb4b3a0f | ||
|
|
af1f199908 | ||
|
|
2282d8c044 | ||
|
|
4be1b7fc80 | ||
|
|
8720ed3826 | ||
|
|
de7d7d7d99 | ||
|
|
6807dd81bd | ||
|
|
934be934e7 | ||
|
|
11fb1d5e27 | ||
|
|
9e8ef566c6 | ||
|
|
99247f7c88 | ||
|
|
72e98a2d41 | ||
|
|
2746327f12 | ||
|
|
1d7dcb17e4 | ||
|
|
077528b2ac | ||
|
|
b9b5635be2 | ||
|
|
0ca353cb0c | ||
|
|
3e39a07626 | ||
|
|
4e34e288c5 | ||
|
|
e4cc692e0d | ||
|
|
b964c2be9d | ||
|
|
9cced5afc7 | ||
|
|
51b4439697 | ||
|
|
3acfe43cb5 | ||
|
|
c62b86f4e6 | ||
|
|
fcd2cae4e1 | ||
|
|
5145b20e02 | ||
|
|
fe0b012832 | ||
|
|
0c96156dae | ||
|
|
b7e70b885c | ||
|
|
ab85000ad4 | ||
|
|
6266721750 | ||
|
|
dfa26ec6f8 | ||
|
|
4bd5717ab1 | ||
|
|
f7fc04b080 | ||
|
|
ad89ef1b6e | ||
|
|
ab719e82b8 | ||
|
|
416a6401bf | ||
|
|
09451a469e | ||
|
|
353306f546 | ||
|
|
63d4431046 | ||
|
|
edacca8da3 | ||
|
|
95202c5927 | ||
|
|
02acecb602 | ||
|
|
8c8fedd1db | ||
|
|
c8b06e2f9f | ||
|
|
e8f58fc5f6 | ||
|
|
dec6de405f | ||
|
|
50476993c4 | ||
|
|
4069b07e8e | ||
|
|
37d07287f8 | ||
|
|
3eec99dc5c | ||
|
|
b61959a2c6 | ||
|
|
101db7e88b | ||
|
|
01bbc1f093 | ||
|
|
e13c6091dd | ||
|
|
70c791b173 | ||
|
|
7ec866d4a9 | ||
|
|
552e1cf0e7 | ||
|
|
19ffeffb3b | ||
|
|
9fe8d5e76f | ||
|
|
0524787d31 | ||
|
|
fa267733d9 | ||
|
|
dea952e4ca | ||
|
|
19f635881a | ||
|
|
eceb7f092e | ||
|
|
83fd639918 | ||
|
|
3d33ccf4b0 | ||
|
|
7987c5ffad | ||
|
|
24f5eec3ea | ||
|
|
af064ef447 | ||
|
|
e84a6f834c | ||
|
|
19fd677891 | ||
|
|
964a5e1708 | ||
|
|
d126c7424d | ||
|
|
f72dac420b | ||
|
|
3a729bb75b | ||
|
|
85cc2f4147 | ||
|
|
8c09ab6c20 | ||
|
|
f7a951745c | ||
|
|
4ae1bc8d32 | ||
|
|
075167431d | ||
|
|
9514b3b2d1 | ||
|
|
6d41640ea9 | ||
|
|
744ceada86 | ||
|
|
a5c8dd69b2 | ||
|
|
6a9a4db648 | ||
|
|
976b406468 | ||
|
|
417358c35d | ||
|
|
23a642f4a4 | ||
|
|
7f20dc3054 | ||
|
|
36339bd96f | ||
|
|
65b69b46d2 | ||
|
|
d48c04a7fb | ||
|
|
2628dec42e | ||
|
|
3bc4f9351c | ||
|
|
9d84d6f131 | ||
|
|
c054ff068d | ||
|
|
5a0cea6a24 | ||
|
|
d0e8171b1b | ||
|
|
069d76029f | ||
|
|
902fafd8d7 | ||
|
|
5f5b5e2543 | ||
|
|
5948fef6cf | ||
|
|
790898c20e | ||
|
|
d64f6793c1 | ||
|
|
cea35db35c | ||
|
|
a3aefabb47 | ||
|
|
3285006440 | ||
|
|
52303cc808 | ||
|
|
be254cedd9 | ||
|
|
a5db6881b8 | ||
|
|
f963b12821 | ||
|
|
a262337ba5 | ||
|
|
014628d6e0 | ||
|
|
65c32ad9fb | ||
|
|
1b166b462d | ||
|
|
7592339092 | ||
|
|
3719c22322 | ||
|
|
106ebee71c | ||
|
|
b79f55abd6 | ||
|
|
b828e62408 | ||
|
|
499612dd76 | ||
|
|
5ac85eaadd | ||
|
|
20e8f9d74c | ||
|
|
1b648ca00e | ||
|
|
35381ba8fd | ||
|
|
e591865d78 | ||
|
|
48cff1f955 | ||
|
|
8a06fc4047 | ||
|
|
14d4dc45f5 | ||
|
|
2ae4b438e3 | ||
|
|
c5be5565a5 | ||
|
|
eff8e3e229 | ||
|
|
ae3db111ac | ||
|
|
8bcfbed574 | ||
|
|
773602dedf | ||
|
|
21bf07258c | ||
|
|
186f8be5b3 | ||
|
|
7667fefead | ||
|
|
c11d595ac7 | ||
|
|
8f3202ed92 | ||
|
|
eb58920870 | ||
|
|
b974aacd71 | ||
|
|
7dfe59a91a | ||
|
|
5bcd3bf9c3 | ||
|
|
f06f64119c | ||
|
|
b93303eb83 | ||
|
|
d865d9f9d8 | ||
|
|
d3310a62c2 | ||
|
|
d412238f47 | ||
|
|
7782933f59 | ||
|
|
bac4e1f52c | ||
|
|
37e3a86881 | ||
|
|
61db13f614 | ||
|
|
fe32b5ef17 | ||
|
|
54699222f8 | ||
|
|
ccbca66e7a | ||
|
|
df85139281 | ||
|
|
509e4815a3 | ||
|
|
5338ff2323 | ||
|
|
1ea0a7f332 | ||
|
|
d1b86d363d | ||
|
|
b309ead58f | ||
|
|
341ebf4123 | ||
|
|
35828a0a8c | ||
|
|
1e8fa110ae | ||
|
|
d4186b7815 | ||
|
|
aaeef69d59 | ||
|
|
b21e0f4a7e | ||
|
|
eb1473060e | ||
|
|
26f75f8d5d | ||
|
|
99d65fc475 | ||
|
|
206fdc9769 | ||
|
|
f74101cdfe | ||
|
|
8e0682482d | ||
|
|
6db51b4a11 | ||
|
|
a784883611 | ||
|
|
5972b6fa52 | ||
|
|
b3c8ca4b8a | ||
|
|
dce72ba262 | ||
|
|
af1716bcd7 | ||
|
|
3f16123cc5 | ||
|
|
f6f5471aa0 | ||
|
|
a6fc935040 | ||
|
|
754381fc6c | ||
|
|
b1e9a406fb | ||
|
|
f21a3d8d8c | ||
|
|
f805b43a08 | ||
|
|
86941d62e4 | ||
|
|
aceb2ace24 | ||
|
|
303fec063b | ||
|
|
64574211c6 | ||
|
|
44b5e7eeee | ||
|
|
108f5715c0 | ||
|
|
3b795464a8 | ||
|
|
d4c1fc87ee | ||
|
|
4ca50b9a71 | ||
|
|
a556ec1c43 | ||
|
|
bbacb9cf01 | ||
|
|
aa796289bf | ||
|
|
4c8a3987fe | ||
|
|
7b0ceefb96 | ||
|
|
bb84dcee64 | ||
|
|
1c406e0fc6 | ||
|
|
05b4cccb97 | ||
|
|
659b1e00b8 | ||
|
|
8e5e28a139 | ||
|
|
574ebe02b8 | ||
|
|
9c521f07c1 | ||
|
|
4aa9c3d3c7 | ||
|
|
20ceb729a0 | ||
|
|
eb45d65110 | ||
|
|
526b9eb666 | ||
|
|
ab8573c94f | ||
|
|
bc5b9e422f | ||
|
|
e8263430a3 | ||
|
|
0d369ab90a | ||
|
|
595e564216 | ||
|
|
06575eae7b | ||
|
|
0bec14ba1c | ||
|
|
ee9f609d4e | ||
|
|
19e9f26467 | ||
|
|
070c38ddc5 | ||
|
|
1798065b76 | ||
|
|
a3b910ea72 | ||
|
|
b9b89db708 | ||
|
|
604af32b94 | ||
|
|
39028282b9 | ||
|
|
9d51fe8985 | ||
|
|
12011be3ec | ||
|
|
86386c7377 | ||
|
|
66c5271453 | ||
|
|
17aed5dcee | ||
|
|
89dc33f8aa | ||
|
|
c6ccc6b6ae | ||
|
|
495d6ce6c3 | ||
|
|
883b6ee793 | ||
|
|
22c6f13dc7 | ||
|
|
c1476d29da | ||
|
|
8209633e05 | ||
|
|
daf120aeac | ||
|
|
6d5ab79ed3 | ||
|
|
fccfb40258 | ||
|
|
a9b2a41a9b | ||
|
|
28c70d47b6 | ||
|
|
00f2d39446 | ||
|
|
4c16ba3848 | ||
|
|
d530f30def |
@@ -9,7 +9,51 @@ jobs:
|
|||||||
# Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub.
|
# Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub.
|
||||||
# See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor
|
# See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor
|
||||||
docker:
|
docker:
|
||||||
- image: cimg/rust:1.58.1
|
- image: ghcr.io/postgresml/pgcat-ci:latest
|
||||||
|
environment:
|
||||||
|
RUST_LOG: info
|
||||||
|
LLVM_PROFILE_FILE: /tmp/pgcat-%m-%p.profraw
|
||||||
|
RUSTC_BOOTSTRAP: 1
|
||||||
|
CARGO_INCREMENTAL: 0
|
||||||
|
RUSTFLAGS: "-Zprofile -Ccodegen-units=1 -Copt-level=0 -Clink-dead-code -Coverflow-checks=off -Zpanic_abort_tests -Cpanic=abort -Cinstrument-coverage"
|
||||||
|
RUSTDOCFLAGS: "-Cpanic=abort"
|
||||||
|
- image: postgres:14
|
||||||
|
command: ["postgres", "-p", "5432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: postgres
|
||||||
|
POSTGRES_DB: postgres
|
||||||
|
POSTGRES_PASSWORD: postgres
|
||||||
|
POSTGRES_INITDB_ARGS: --auth-local=md5 --auth-host=md5 --auth=md5
|
||||||
|
- image: postgres:14
|
||||||
|
command: ["postgres", "-p", "7432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: postgres
|
||||||
|
POSTGRES_DB: postgres
|
||||||
|
POSTGRES_PASSWORD: postgres
|
||||||
|
POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256
|
||||||
|
- image: postgres:14
|
||||||
|
command: ["postgres", "-p", "8432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: postgres
|
||||||
|
POSTGRES_DB: postgres
|
||||||
|
POSTGRES_PASSWORD: postgres
|
||||||
|
POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256
|
||||||
|
- image: postgres:14
|
||||||
|
command: ["postgres", "-p", "9432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: postgres
|
||||||
|
POSTGRES_DB: postgres
|
||||||
|
POSTGRES_PASSWORD: postgres
|
||||||
|
POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256
|
||||||
|
|
||||||
|
- image: postgres:14
|
||||||
|
command: ["postgres", "-p", "10432", "-c", "shared_preload_libraries=pg_stat_statements"]
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: postgres
|
||||||
|
POSTGRES_DB: postgres
|
||||||
|
POSTGRES_PASSWORD: postgres
|
||||||
|
POSTGRES_INITDB_ARGS: --auth-local=md5 --auth-host=md5 --auth=md5
|
||||||
|
|
||||||
# Add steps to the job
|
# Add steps to the job
|
||||||
# See: https://circleci.com/docs/2.0/configuration-reference/#steps
|
# See: https://circleci.com/docs/2.0/configuration-reference/#steps
|
||||||
steps:
|
steps:
|
||||||
@@ -17,11 +61,17 @@ jobs:
|
|||||||
- restore_cache:
|
- restore_cache:
|
||||||
key: cargo-lock-2-{{ checksum "Cargo.lock" }}
|
key: cargo-lock-2-{{ checksum "Cargo.lock" }}
|
||||||
- run:
|
- run:
|
||||||
name: "Build"
|
name: "Lint"
|
||||||
command: "cargo build"
|
command: "cargo fmt --check"
|
||||||
- run:
|
- run:
|
||||||
name: "Test"
|
name: "Clippy"
|
||||||
command: "cargo test"
|
command: "cargo clippy --all --all-targets -- -Dwarnings"
|
||||||
|
- run:
|
||||||
|
name: "Tests"
|
||||||
|
command: "cargo clean && cargo build && cargo test && bash .circleci/run_tests.sh && .circleci/generate_coverage.sh"
|
||||||
|
- store_artifacts:
|
||||||
|
path: /tmp/cov
|
||||||
|
destination: coverage-data
|
||||||
- save_cache:
|
- save_cache:
|
||||||
key: cargo-lock-2-{{ checksum "Cargo.lock" }}
|
key: cargo-lock-2-{{ checksum "Cargo.lock" }}
|
||||||
paths:
|
paths:
|
||||||
@@ -34,4 +84,4 @@ jobs:
|
|||||||
workflows:
|
workflows:
|
||||||
build:
|
build:
|
||||||
jobs:
|
jobs:
|
||||||
- build
|
- build
|
||||||
|
|||||||
15
.circleci/generate_coverage.sh
Executable file
15
.circleci/generate_coverage.sh
Executable file
@@ -0,0 +1,15 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# inspired by https://doc.rust-lang.org/rustc/instrument-coverage.html#tips-for-listing-the-binaries-automatically
|
||||||
|
TEST_OBJECTS=$( \
|
||||||
|
for file in $(cargo test --no-run 2>&1 | grep "target/debug/deps/pgcat-[[:alnum:]]\+" -o); \
|
||||||
|
do \
|
||||||
|
printf "%s %s " --object $file; \
|
||||||
|
done \
|
||||||
|
)
|
||||||
|
|
||||||
|
rust-profdata merge -sparse /tmp/pgcat-*.profraw -o /tmp/pgcat.profdata
|
||||||
|
|
||||||
|
bash -c "rust-cov export -ignore-filename-regex='rustc|registry' -Xdemangler=rustfilt -instr-profile=/tmp/pgcat.profdata $TEST_OBJECTS --object ./target/debug/pgcat --format lcov > ./lcov.info"
|
||||||
|
|
||||||
|
genhtml lcov.info --title "PgCat Code Coverage" --css-file ./cov-style.css --no-function-coverage --highlight --ignore-errors source --legend --output-directory /tmp/cov --prefix $(pwd)
|
||||||
158
.circleci/pgcat.toml
Normal file
158
.circleci/pgcat.toml
Normal file
@@ -0,0 +1,158 @@
|
|||||||
|
#
|
||||||
|
# PgCat config example.
|
||||||
|
#
|
||||||
|
|
||||||
|
#
|
||||||
|
# General pooler settings
|
||||||
|
[general]
|
||||||
|
# What IP to run on, 0.0.0.0 means accessible from everywhere.
|
||||||
|
host = "0.0.0.0"
|
||||||
|
|
||||||
|
# Port to run on, same as PgBouncer used in this example.
|
||||||
|
port = 6432
|
||||||
|
|
||||||
|
# Whether to enable prometheus exporter or not.
|
||||||
|
enable_prometheus_exporter = true
|
||||||
|
|
||||||
|
# Port at which prometheus exporter listens on.
|
||||||
|
prometheus_exporter_port = 9930
|
||||||
|
|
||||||
|
# How long to wait before aborting a server connection (ms).
|
||||||
|
connect_timeout = 1000
|
||||||
|
|
||||||
|
# How much time to give the health check query to return with a result (ms).
|
||||||
|
healthcheck_timeout = 1000
|
||||||
|
|
||||||
|
# How long to keep connection available for immediate re-use, without running a healthcheck query on it
|
||||||
|
healthcheck_delay = 30000
|
||||||
|
|
||||||
|
# How much time to give clients during shutdown before forcibly killing client connections (ms).
|
||||||
|
shutdown_timeout = 5000
|
||||||
|
|
||||||
|
# For how long to ban a server if it fails a health check (seconds).
|
||||||
|
ban_time = 60 # Seconds
|
||||||
|
|
||||||
|
# If we should log client connections
|
||||||
|
log_client_connections = false
|
||||||
|
|
||||||
|
# If we should log client disconnections
|
||||||
|
log_client_disconnections = false
|
||||||
|
|
||||||
|
# Reload config automatically if it changes.
|
||||||
|
autoreload = 15000
|
||||||
|
|
||||||
|
# TLS
|
||||||
|
tls_certificate = ".circleci/server.cert"
|
||||||
|
tls_private_key = ".circleci/server.key"
|
||||||
|
|
||||||
|
# Credentials to access the virtual administrative database (pgbouncer or pgcat)
|
||||||
|
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
||||||
|
admin_username = "admin_user"
|
||||||
|
admin_password = "admin_pass"
|
||||||
|
|
||||||
|
# pool
|
||||||
|
# configs are structured as pool.<pool_name>
|
||||||
|
# the pool_name is what clients use as database name when connecting
|
||||||
|
# For the example below a client can connect using "postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded_db"
|
||||||
|
[pools.sharded_db]
|
||||||
|
# Pool mode (see PgBouncer docs for more).
|
||||||
|
# session: one server connection per connected client
|
||||||
|
# transaction: one server connection per client transaction
|
||||||
|
pool_mode = "transaction"
|
||||||
|
prepared_statements_cache_size = 500
|
||||||
|
|
||||||
|
# If the client doesn't specify, route traffic to
|
||||||
|
# this role by default.
|
||||||
|
#
|
||||||
|
# any: round-robin between primary and replicas,
|
||||||
|
# replica: round-robin between replicas only without touching the primary,
|
||||||
|
# primary: all queries go to the primary unless otherwise specified.
|
||||||
|
default_role = "any"
|
||||||
|
|
||||||
|
# Query parser. If enabled, we'll attempt to parse
|
||||||
|
# every incoming query to determine if it's a read or a write.
|
||||||
|
# If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||||
|
# we'll direct it to the primary.
|
||||||
|
query_parser_enabled = true
|
||||||
|
|
||||||
|
# If the query parser is enabled and this setting is enabled, we'll attempt to
|
||||||
|
# infer the role from the query itself.
|
||||||
|
query_parser_read_write_splitting = true
|
||||||
|
|
||||||
|
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||||
|
# load balancing of read queries. Otherwise, the primary will only be used for write
|
||||||
|
# queries. The primary can always be explicitely selected with our custom protocol.
|
||||||
|
primary_reads_enabled = true
|
||||||
|
|
||||||
|
# So what if you wanted to implement a different hashing function,
|
||||||
|
# or you've already built one and you want this pooler to use it?
|
||||||
|
#
|
||||||
|
# Current options:
|
||||||
|
#
|
||||||
|
# pg_bigint_hash: PARTITION BY HASH (Postgres hashing function)
|
||||||
|
# sha1: A hashing function based on SHA1
|
||||||
|
#
|
||||||
|
sharding_function = "pg_bigint_hash"
|
||||||
|
|
||||||
|
# Credentials for users that may connect to this cluster
|
||||||
|
[pools.sharded_db.users.0]
|
||||||
|
username = "sharding_user"
|
||||||
|
password = "sharding_user"
|
||||||
|
# Maximum number of server connections that can be established for this user
|
||||||
|
# The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||||
|
# is the sum of pool_size across all users.
|
||||||
|
pool_size = 9
|
||||||
|
statement_timeout = 0
|
||||||
|
|
||||||
|
[pools.sharded_db.users.1]
|
||||||
|
username = "other_user"
|
||||||
|
password = "other_user"
|
||||||
|
pool_size = 21
|
||||||
|
statement_timeout = 30000
|
||||||
|
|
||||||
|
# Shard 0
|
||||||
|
[pools.sharded_db.shards.0]
|
||||||
|
# [ host, port, role ]
|
||||||
|
servers = [
|
||||||
|
[ "127.0.0.1", 5432, "primary" ],
|
||||||
|
[ "localhost", 5432, "replica" ]
|
||||||
|
]
|
||||||
|
# Database name (e.g. "postgres")
|
||||||
|
database = "shard0"
|
||||||
|
|
||||||
|
[pools.sharded_db.shards.1]
|
||||||
|
servers = [
|
||||||
|
[ "127.0.0.1", 5432, "primary" ],
|
||||||
|
[ "localhost", 5432, "replica" ],
|
||||||
|
]
|
||||||
|
database = "shard1"
|
||||||
|
|
||||||
|
[pools.sharded_db.shards.2]
|
||||||
|
servers = [
|
||||||
|
[ "127.0.0.1", 5432, "primary" ],
|
||||||
|
[ "localhost", 5432, "replica" ],
|
||||||
|
]
|
||||||
|
database = "shard2"
|
||||||
|
|
||||||
|
|
||||||
|
[pools.simple_db]
|
||||||
|
pool_mode = "session"
|
||||||
|
default_role = "primary"
|
||||||
|
query_parser_enabled = true
|
||||||
|
query_parser_read_write_splitting = true
|
||||||
|
primary_reads_enabled = true
|
||||||
|
sharding_function = "pg_bigint_hash"
|
||||||
|
prepared_statements_cache_size = 500
|
||||||
|
|
||||||
|
[pools.simple_db.users.0]
|
||||||
|
username = "simple_user"
|
||||||
|
password = "simple_user"
|
||||||
|
pool_size = 5
|
||||||
|
statement_timeout = 30000
|
||||||
|
|
||||||
|
[pools.simple_db.shards.0]
|
||||||
|
servers = [
|
||||||
|
[ "127.0.0.1", 5432, "primary" ],
|
||||||
|
[ "localhost", 5432, "replica" ]
|
||||||
|
]
|
||||||
|
database = "some_db"
|
||||||
183
.circleci/run_tests.sh
Normal file
183
.circleci/run_tests.sh
Normal file
@@ -0,0 +1,183 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
set -o xtrace
|
||||||
|
|
||||||
|
# non-zero exit code if we provide bad configs
|
||||||
|
(! ./target/debug/pgcat "fake_configs" 2>/dev/null)
|
||||||
|
|
||||||
|
# Start PgCat with a particular log level
|
||||||
|
# for inspection.
|
||||||
|
function start_pgcat() {
|
||||||
|
kill -s SIGINT $(pgrep pgcat) || true
|
||||||
|
RUST_LOG=${1} ./target/debug/pgcat .circleci/pgcat.toml &
|
||||||
|
sleep 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Setup the database with shards and user
|
||||||
|
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 5432 -U postgres -f tests/sharding/query_routing_setup.sql
|
||||||
|
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 7432 -U postgres -f tests/sharding/query_routing_setup.sql
|
||||||
|
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 8432 -U postgres -f tests/sharding/query_routing_setup.sql
|
||||||
|
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 9432 -U postgres -f tests/sharding/query_routing_setup.sql
|
||||||
|
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 10432 -U postgres -f tests/sharding/query_routing_setup.sql
|
||||||
|
|
||||||
|
PGPASSWORD=sharding_user pgbench -h 127.0.0.1 -U sharding_user shard0 -i
|
||||||
|
PGPASSWORD=sharding_user pgbench -h 127.0.0.1 -U sharding_user shard1 -i
|
||||||
|
PGPASSWORD=sharding_user pgbench -h 127.0.0.1 -U sharding_user shard2 -i
|
||||||
|
|
||||||
|
# Start Toxiproxy
|
||||||
|
kill -9 $(pgrep toxiproxy) || true
|
||||||
|
LOG_LEVEL=error toxiproxy-server &
|
||||||
|
sleep 1
|
||||||
|
|
||||||
|
# Create a database at port 5433, forward it to Postgres
|
||||||
|
toxiproxy-cli create -l 127.0.0.1:5433 -u 127.0.0.1:5432 postgres_replica
|
||||||
|
|
||||||
|
start_pgcat "info"
|
||||||
|
|
||||||
|
# Check that prometheus is running
|
||||||
|
curl --fail localhost:9930/metrics
|
||||||
|
|
||||||
|
export PGPASSWORD=sharding_user
|
||||||
|
export PGDATABASE=sharded_db
|
||||||
|
|
||||||
|
# pgbench test
|
||||||
|
pgbench -U sharding_user -i -h 127.0.0.1 -p 6432
|
||||||
|
pgbench -U sharding_user -h 127.0.0.1 -p 6432 -t 500 -c 2 --protocol simple -f tests/pgbench/simple.sql
|
||||||
|
pgbench -U sharding_user -h 127.0.0.1 -p 6432 -t 500 -c 2 --protocol extended
|
||||||
|
|
||||||
|
# COPY TO STDOUT test
|
||||||
|
psql -U sharding_user -h 127.0.0.1 -p 6432 -c 'COPY (SELECT * FROM pgbench_accounts LIMIT 15) TO STDOUT;' > /dev/null
|
||||||
|
|
||||||
|
# Query cancellation test
|
||||||
|
(psql -U sharding_user -h 127.0.0.1 -p 6432 -c 'SELECT pg_sleep(50)' || true) &
|
||||||
|
sleep 1
|
||||||
|
killall psql -s SIGINT
|
||||||
|
|
||||||
|
# Pause/resume test.
|
||||||
|
# Running benches before, during, and after pause/resume.
|
||||||
|
pgbench -U sharding_user -t 500 -c 2 -h 127.0.0.1 -p 6432 --protocol extended &
|
||||||
|
BENCH_ONE=$!
|
||||||
|
PGPASSWORD=admin_pass psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'PAUSE sharded_db,sharding_user'
|
||||||
|
pgbench -U sharding_user -h 127.0.0.1 -p 6432 -t 500 -c 2 --protocol extended &
|
||||||
|
BENCH_TWO=$!
|
||||||
|
PGPASSWORD=admin_pass psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'RESUME sharded_db,sharding_user'
|
||||||
|
wait ${BENCH_ONE}
|
||||||
|
wait ${BENCH_TWO}
|
||||||
|
|
||||||
|
# Reload pool (closing unused server connections)
|
||||||
|
PGPASSWORD=admin_pass psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'RELOAD'
|
||||||
|
|
||||||
|
(psql -U sharding_user -h 127.0.0.1 -p 6432 -c 'SELECT pg_sleep(50)' || true) &
|
||||||
|
sleep 1
|
||||||
|
killall psql -s SIGINT
|
||||||
|
|
||||||
|
# Sharding insert
|
||||||
|
psql -U sharding_user -e -h 127.0.0.1 -p 6432 -f tests/sharding/query_routing_test_insert.sql
|
||||||
|
|
||||||
|
# Sharding select
|
||||||
|
psql -U sharding_user -e -h 127.0.0.1 -p 6432 -f tests/sharding/query_routing_test_select.sql > /dev/null
|
||||||
|
|
||||||
|
# Replica/primary selection & more sharding tests
|
||||||
|
psql -U sharding_user -e -h 127.0.0.1 -p 6432 -f tests/sharding/query_routing_test_primary_replica.sql > /dev/null
|
||||||
|
|
||||||
|
# Statement timeout tests
|
||||||
|
sed -i 's/statement_timeout = 0/statement_timeout = 100/' .circleci/pgcat.toml
|
||||||
|
kill -SIGHUP $(pgrep pgcat) # Reload config
|
||||||
|
sleep 0.2
|
||||||
|
|
||||||
|
# This should timeout
|
||||||
|
(! psql -U sharding_user -e -h 127.0.0.1 -p 6432 -c 'select pg_sleep(0.5)')
|
||||||
|
|
||||||
|
# Disable statement timeout
|
||||||
|
sed -i 's/statement_timeout = 100/statement_timeout = 0/' .circleci/pgcat.toml
|
||||||
|
kill -SIGHUP $(pgrep pgcat) # Reload config again
|
||||||
|
|
||||||
|
#
|
||||||
|
# Integration tests and ActiveRecord tests
|
||||||
|
#
|
||||||
|
cd tests/ruby
|
||||||
|
sudo bundle install
|
||||||
|
bundle exec ruby tests.rb --format documentation || exit 1
|
||||||
|
bundle exec rspec *_spec.rb --format documentation || exit 1
|
||||||
|
cd ../..
|
||||||
|
|
||||||
|
#
|
||||||
|
# Python tests
|
||||||
|
# These tests will start and stop the pgcat server so it will need to be restarted after the tests
|
||||||
|
#
|
||||||
|
pip3 install -r tests/python/requirements.txt
|
||||||
|
pytest || exit 1
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Go tests
|
||||||
|
# Starts its own pgcat server
|
||||||
|
#
|
||||||
|
pushd tests/go
|
||||||
|
/usr/local/go/bin/go test || exit 1
|
||||||
|
popd
|
||||||
|
|
||||||
|
start_pgcat "info"
|
||||||
|
|
||||||
|
#
|
||||||
|
# Rust tests
|
||||||
|
#
|
||||||
|
cd tests/rust
|
||||||
|
cargo run
|
||||||
|
cd ../../
|
||||||
|
|
||||||
|
# Admin tests
|
||||||
|
export PGPASSWORD=admin_pass
|
||||||
|
psql -U admin_user -e -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW STATS' > /dev/null
|
||||||
|
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'RELOAD' > /dev/null
|
||||||
|
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW CONFIG' > /dev/null
|
||||||
|
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW DATABASES' > /dev/null
|
||||||
|
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgcat -c 'SHOW LISTS' > /dev/null
|
||||||
|
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgcat -c 'SHOW POOLS' > /dev/null
|
||||||
|
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgcat -c 'SHOW VERSION' > /dev/null
|
||||||
|
psql -U admin_user -h 127.0.0.1 -p 6432 -d pgbouncer -c "SET client_encoding TO 'utf8'" > /dev/null # will ignore
|
||||||
|
(! psql -U admin_user -e -h 127.0.0.1 -p 6432 -d random_db -c 'SHOW STATS' > /dev/null)
|
||||||
|
export PGPASSWORD=sharding_user
|
||||||
|
|
||||||
|
# Start PgCat in debug to demonstrate failover better
|
||||||
|
start_pgcat "trace"
|
||||||
|
|
||||||
|
# Add latency to the replica at port 5433 slightly above the healthcheck timeout
|
||||||
|
toxiproxy-cli toxic add -t latency -a latency=300 postgres_replica
|
||||||
|
sleep 1
|
||||||
|
|
||||||
|
# Note the failover in the logs
|
||||||
|
timeout 5 psql -U sharding_user -e -h 127.0.0.1 -p 6432 <<-EOF
|
||||||
|
SELECT 1;
|
||||||
|
SELECT 1;
|
||||||
|
SELECT 1;
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# Remove latency
|
||||||
|
toxiproxy-cli toxic remove --toxicName latency_downstream postgres_replica
|
||||||
|
|
||||||
|
start_pgcat "info"
|
||||||
|
|
||||||
|
# Test session mode (and config reload)
|
||||||
|
sed -i '0,/simple_db/s/pool_mode = "transaction"/pool_mode = "session"/' .circleci/pgcat.toml
|
||||||
|
|
||||||
|
# Reload config test
|
||||||
|
kill -SIGHUP $(pgrep pgcat)
|
||||||
|
|
||||||
|
# Revert settings after reload. Makes test runs idempotent
|
||||||
|
sed -i '0,/simple_db/s/pool_mode = "session"/pool_mode = "transaction"/' .circleci/pgcat.toml
|
||||||
|
|
||||||
|
sleep 1
|
||||||
|
|
||||||
|
# Prepared statements that will only work in session mode
|
||||||
|
pgbench -U sharding_user -h 127.0.0.1 -p 6432 -t 500 -c 2 --protocol prepared
|
||||||
|
|
||||||
|
# Attempt clean shut down
|
||||||
|
killall pgcat -s SIGINT
|
||||||
|
|
||||||
|
# Allow for graceful shutdown
|
||||||
|
sleep 1
|
||||||
|
|
||||||
|
kill -9 $(pgrep toxiproxy)
|
||||||
|
sleep 1
|
||||||
21
.circleci/server.cert
Normal file
21
.circleci/server.cert
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIDazCCAlOgAwIBAgIUChIvUGFJGJe5EDch32rchqoxER0wDQYJKoZIhvcNAQEL
|
||||||
|
BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
|
||||||
|
GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMjA2MjcyMjI2MDZaFw0yMjA3
|
||||||
|
MjcyMjI2MDZaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
|
||||||
|
HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
|
||||||
|
AQUAA4IBDwAwggEKAoIBAQDdTwrBzV1v79faVckFvIn/9V4fypYs4vDi3X+h3wGn
|
||||||
|
AjEh6mmizlKCwSwAam07D9Q5zKiXFrzNJqzSioOv5zsOAvObwrnzbtKSwfs3aP5g
|
||||||
|
eEh2clHCZYx9p06WszPcgSB5nTz1NeY4XAwvGn3A+SVCLyPMTNwnem48+ONh2F9u
|
||||||
|
FHtSuIsEVvTjMlH09O7LjwJlODxy3HNv2JHYM5Hx9tzc+NVYdERPtaVcX8ycw1Eh
|
||||||
|
9hgGSgfaNM52/JfRMIDhENrsn0S1omRUtcJe72loreiwrECUOLAnAfp9Xqc+rMPP
|
||||||
|
aLA6ElzmYef1+ZEC0p6isCHPhxY5ESVhKYhE9nQvksjnAgMBAAGjUzBRMB0GA1Ud
|
||||||
|
DgQWBBQLDtzexqjx7xPtUZuZB/angU9oSDAfBgNVHSMEGDAWgBQLDtzexqjx7xPt
|
||||||
|
UZuZB/angU9oSDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQC/
|
||||||
|
mxY/a/WeLENVj2Gg9EUH0CKzfqeTey1mb6YfPGxzrD7oq1m0Vn2MmTbjZrJgh/Ob
|
||||||
|
QckO3ElF4kC9+6XP+iDPmabGpjeLgllBboT5l2aqnD1syMrf61WPLzgRzRfplYGy
|
||||||
|
cjBQDDKPu8Lu0QRMWU28tHYN0bMxJoCuXysGGX5WsuFnKCA6f/V+nycJJXxJH3eB
|
||||||
|
eLjTueD9/RE3OXhi6m8A29Q1E9AE5EF4uRxYXrr91BmYnk4aFvSmBxhUEzE12eSN
|
||||||
|
lHB/uSc0+Dp+UVmVr6wW8AQfd16UBA0BUf3kSW3aSvirYPYH0rXiOOpEJgOwOMnR
|
||||||
|
f5+XAbN1Y+3OsFz/ZmP9
|
||||||
|
-----END CERTIFICATE-----
|
||||||
28
.circleci/server.key
Normal file
28
.circleci/server.key
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
-----BEGIN RSA PRIVATE KEY-----
|
||||||
|
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDdTwrBzV1v79fa
|
||||||
|
VckFvIn/9V4fypYs4vDi3X+h3wGnAjEh6mmizlKCwSwAam07D9Q5zKiXFrzNJqzS
|
||||||
|
ioOv5zsOAvObwrnzbtKSwfs3aP5geEh2clHCZYx9p06WszPcgSB5nTz1NeY4XAwv
|
||||||
|
Gn3A+SVCLyPMTNwnem48+ONh2F9uFHtSuIsEVvTjMlH09O7LjwJlODxy3HNv2JHY
|
||||||
|
M5Hx9tzc+NVYdERPtaVcX8ycw1Eh9hgGSgfaNM52/JfRMIDhENrsn0S1omRUtcJe
|
||||||
|
72loreiwrECUOLAnAfp9Xqc+rMPPaLA6ElzmYef1+ZEC0p6isCHPhxY5ESVhKYhE
|
||||||
|
9nQvksjnAgMBAAECggEAbnvddO9frFhivJ+DIhgEFQKcIOb0nigV9kx6QYehvYy8
|
||||||
|
lp/+aMb0Lk7d9r8rFQdL/icMK5GwZALg2KNKJvEbbF1Q3PwT9VHoUlgBYKJMDEFA
|
||||||
|
e9GKu7ASuVBjTZzdUUItwkkbe5eS/aQGeSWSjlpTnX0HNCFS72qRymK+scRhsAQf
|
||||||
|
ZoHyZHDslkvPR3Pos+sndWBYCDHag5/KoPhsMt1+5S9NQcOUHx9Ac0gLHjau3N+P
|
||||||
|
0FhODHFFGnnpyQvLvj6u3ZOR34ladMgoBglE0O3vPFhckn92EK4teeTWOsUMotiz
|
||||||
|
qM3QIJTOJjtiY6VDGY93bIa4pFvt7Zi4vIerenKt0QKBgQD/UMFqfevTAMrk10AC
|
||||||
|
bOa4+cM07ORY4ZwVj5ILhZn+8crDEEtBsUyuEU2FTINtnoEq1yGc/IXpsyS1BHjL
|
||||||
|
L1xSml5LN3jInbi8z5XQfY5Sj3VOMtwY6yD20jcdeDC44rz3nStXdkcMWxbTMapx
|
||||||
|
iOPsap5ciUKOMS7LyMidPEG/LQKBgQDd5vHgrLN0FBIIm+vZg6MEm4QyobstVp4l
|
||||||
|
7V/GZsdL+M8AQv1Rx+5wSUSWKomOIv5lglis7f6g0c9O7Qkr78/wzoyoKC2RRqPp
|
||||||
|
I90GjY2Iv22N4GIkRrDAgMZbkTitzIB6tbXEVeLAOh3frFJ8IwauRCOiXIjrZdJ4
|
||||||
|
FvV86+nU4wKBgQDdWTP2kWkMrBk7QOp7r9Jv+AmnLuHhtOdPQgOJ/bA++X2ik9PL
|
||||||
|
Bl3GY7XjpSwks1CkxZKcucmXjPp7/X6EGXFfI/owF82dkDADca0e7lufdERtIWb0
|
||||||
|
K5WOpz2lTPhgsiLGQfq7fw2lxqsJOnvcpqOD6gOVkmKjSDyb7F0RBJazmQKBgQDD
|
||||||
|
a8PQTcesjpBjLI3EfX1vbVY7ENu6zfFxDV+vZoxVh8UlQdm90AlYse3JIaUKnB7W
|
||||||
|
Xrihcucv0hZ0N6RAIW5LcFvHK7sVmdR4WbEpODhRGeTtcZJ8yBSZM898jKQRy2vK
|
||||||
|
pYRyaADNsWDlvujVkjMr/a40KrIaPQ3h3LZNUaYYaQKBgQD1x8A5S5SiE1cN1vFr
|
||||||
|
aACkmA2WqEDKKhUsUigJdwW6WB/B9kWlIlz/iV1H9uwBXtSIYG4VqCSTAvh0z4gX
|
||||||
|
Qu2SrdPm5PYnKzpdynpz78OnGdflD1RKWFGHItR6GN6tj/VmulO6mlFvT4jzBQ7j
|
||||||
|
+Hf8m2TcD4U3ksz3xw+YOD+cmA==
|
||||||
|
-----END RSA PRIVATE KEY-----
|
||||||
6
.dockerignore
Normal file
6
.dockerignore
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
target/
|
||||||
|
tests/
|
||||||
|
tracing/
|
||||||
|
.circleci/
|
||||||
|
.git/
|
||||||
|
dev/
|
||||||
14
.editorconfig
Normal file
14
.editorconfig
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
root = true
|
||||||
|
|
||||||
|
[*]
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
insert_final_newline = true
|
||||||
|
|
||||||
|
[*.rs]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 4
|
||||||
|
max_line_length = 120
|
||||||
|
|
||||||
|
[*.toml]
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 2
|
||||||
38
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
38
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
---
|
||||||
|
name: Bug report
|
||||||
|
about: Create a report to help us improve
|
||||||
|
title: ''
|
||||||
|
labels: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Describe the bug**
|
||||||
|
A clear and concise description of what the bug is.
|
||||||
|
|
||||||
|
**To Reproduce**
|
||||||
|
Steps to reproduce the behavior:
|
||||||
|
1. Go to '...'
|
||||||
|
2. Click on '....'
|
||||||
|
3. Scroll down to '....'
|
||||||
|
4. See error
|
||||||
|
|
||||||
|
**Expected behavior**
|
||||||
|
A clear and concise description of what you expected to happen.
|
||||||
|
|
||||||
|
**Screenshots**
|
||||||
|
If applicable, add screenshots to help explain your problem.
|
||||||
|
|
||||||
|
**Desktop (please complete the following information):**
|
||||||
|
- OS: [e.g. iOS]
|
||||||
|
- Browser [e.g. chrome, safari]
|
||||||
|
- Version [e.g. 22]
|
||||||
|
|
||||||
|
**Smartphone (please complete the following information):**
|
||||||
|
- Device: [e.g. iPhone6]
|
||||||
|
- OS: [e.g. iOS8.1]
|
||||||
|
- Browser [e.g. stock browser, safari]
|
||||||
|
- Version [e.g. 22]
|
||||||
|
|
||||||
|
**Additional context**
|
||||||
|
Add any other context about the problem here.
|
||||||
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
20
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
---
|
||||||
|
name: Feature request
|
||||||
|
about: Suggest an idea for this project
|
||||||
|
title: ''
|
||||||
|
labels: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Is your feature request related to a problem? Please describe.**
|
||||||
|
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||||
|
|
||||||
|
**Describe the solution you'd like**
|
||||||
|
A clear and concise description of what you want to happen.
|
||||||
|
|
||||||
|
**Describe alternatives you've considered**
|
||||||
|
A clear and concise description of any alternative solutions or features you've considered.
|
||||||
|
|
||||||
|
**Additional context**
|
||||||
|
Add any other context or screenshots about the feature request here.
|
||||||
16
.github/dependabot.yml
vendored
Normal file
16
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
version: 2
|
||||||
|
updates:
|
||||||
|
- package-ecosystem: "cargo"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "daily"
|
||||||
|
time: "04:00" # UTC
|
||||||
|
labels:
|
||||||
|
- "dependencies"
|
||||||
|
commit-message:
|
||||||
|
prefix: "chore(deps)"
|
||||||
|
open-pull-requests-limit: 10
|
||||||
|
- package-ecosystem: "github-actions"
|
||||||
|
directory: "/"
|
||||||
|
schedule:
|
||||||
|
interval: "weekly"
|
||||||
68
.github/workflows/build-and-push.yaml
vendored
Normal file
68
.github/workflows/build-and-push.yaml
vendored
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
name: Build and Push
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- '!charts/**.md'
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
tags:
|
||||||
|
- v*
|
||||||
|
|
||||||
|
env:
|
||||||
|
registry: ghcr.io
|
||||||
|
image-name: ${{ github.repository }}
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build-and-push:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout Repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
- name: Determine tags
|
||||||
|
id: metadata
|
||||||
|
uses: docker/metadata-action@v5
|
||||||
|
with:
|
||||||
|
images: ${{ env.registry }}/${{ env.image-name }}
|
||||||
|
tags: |
|
||||||
|
type=sha,prefix=,format=long
|
||||||
|
type=schedule
|
||||||
|
type=ref,event=tag
|
||||||
|
type=ref,event=branch
|
||||||
|
type=ref,event=pr
|
||||||
|
type=raw,value=latest,enable={{ is_default_branch }}
|
||||||
|
|
||||||
|
- name: Log in to the Container registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ${{ env.registry }}
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Build and push ${{ env.image-name }}
|
||||||
|
uses: docker/build-push-action@v6
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
provenance: false
|
||||||
|
push: true
|
||||||
|
tags: ${{ steps.metadata.outputs.tags }}
|
||||||
|
labels: ${{ steps.metadata.outputs.labels }}
|
||||||
|
cache-from: type=gha
|
||||||
|
cache-to: type=gha,mode=max
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.ref }}
|
||||||
|
cancel-in-progress: true
|
||||||
50
.github/workflows/chart-lint-test.yaml
vendored
Normal file
50
.github/workflows/chart-lint-test.yaml
vendored
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
name: Lint and Test Charts
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- charts/**
|
||||||
|
- '!charts/**.md'
|
||||||
|
jobs:
|
||||||
|
lint-test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3.1.0
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Set up Helm
|
||||||
|
uses: azure/setup-helm@v3
|
||||||
|
with:
|
||||||
|
version: v3.8.1
|
||||||
|
|
||||||
|
# Python is required because `ct lint` runs Yamale (https://github.com/23andMe/Yamale) and
|
||||||
|
# yamllint (https://github.com/adrienverge/yamllint) which require Python
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5.1.0
|
||||||
|
with:
|
||||||
|
python-version: 3.7
|
||||||
|
|
||||||
|
- name: Set up chart-testing
|
||||||
|
uses: helm/chart-testing-action@v2.2.1
|
||||||
|
with:
|
||||||
|
version: v3.5.1
|
||||||
|
|
||||||
|
- name: Run chart-testing (list-changed)
|
||||||
|
id: list-changed
|
||||||
|
run: |
|
||||||
|
changed=$(ct list-changed --config ct.yaml)
|
||||||
|
if [[ -n "$changed" ]]; then
|
||||||
|
echo "changed=true" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Run chart-testing (lint)
|
||||||
|
run: ct lint --config ct.yaml
|
||||||
|
|
||||||
|
- name: Create kind cluster
|
||||||
|
uses: helm/kind-action@v1.10.0
|
||||||
|
if: steps.list-changed.outputs.changed == 'true'
|
||||||
|
|
||||||
|
- name: Run chart-testing (install)
|
||||||
|
run: ct install --config ct.yaml
|
||||||
40
.github/workflows/chart-release.yaml
vendored
Normal file
40
.github/workflows/chart-release.yaml
vendored
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
name: Release Charts
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- charts/**
|
||||||
|
- '!**.md'
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
release:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Configure Git
|
||||||
|
run: |
|
||||||
|
git config user.name "$GITHUB_ACTOR"
|
||||||
|
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||||
|
|
||||||
|
- name: Install Helm
|
||||||
|
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 # v3.5
|
||||||
|
with:
|
||||||
|
version: v3.13.0
|
||||||
|
|
||||||
|
- name: Run chart-releaser
|
||||||
|
uses: helm/chart-releaser-action@a917fd15b20e8b64b94d9158ad54cd6345335584 # v1.6.0
|
||||||
|
with:
|
||||||
|
charts_dir: charts
|
||||||
|
config: cr.yaml
|
||||||
|
env:
|
||||||
|
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
48
.github/workflows/generate-chart-readme.yaml
vendored
Normal file
48
.github/workflows/generate-chart-readme.yaml
vendored
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
name: '[CI/CD] Update README metadata'
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
paths:
|
||||||
|
- 'charts/*/values.yaml'
|
||||||
|
# Remove all permissions by default
|
||||||
|
permissions: {}
|
||||||
|
jobs:
|
||||||
|
update-readme-metadata:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
steps:
|
||||||
|
- name: Install readme-generator-for-helm
|
||||||
|
run: npm install -g @bitnami/readme-generator-for-helm
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608
|
||||||
|
with:
|
||||||
|
path: charts
|
||||||
|
ref: ${{github.event.pull_request.head.ref}}
|
||||||
|
repository: ${{github.event.pull_request.head.repo.full_name}}
|
||||||
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- name: Execute readme-generator-for-helm
|
||||||
|
env:
|
||||||
|
DIFF_URL: "${{github.event.pull_request.diff_url}}"
|
||||||
|
TEMP_FILE: "${{runner.temp}}/pr-${{github.event.number}}.diff"
|
||||||
|
run: |
|
||||||
|
# This request doesn't consume API calls.
|
||||||
|
curl -Lkso $TEMP_FILE $DIFF_URL
|
||||||
|
files_changed="$(sed -nr 's/[\-\+]{3} [ab]\/(.*)/\1/p' $TEMP_FILE | sort | uniq)"
|
||||||
|
# Adding || true to avoid "Process exited with code 1" errors
|
||||||
|
charts_dirs_changed="$(echo "$files_changed" | xargs dirname | grep -o "pgcat/[^/]*" | sort | uniq || true)"
|
||||||
|
for chart in ${charts_dirs_changed}; do
|
||||||
|
echo "Updating README.md for ${chart}"
|
||||||
|
readme-generator --values "charts/${chart}/values.yaml" --readme "charts/${chart}/README.md" --schema "/tmp/schema.json"
|
||||||
|
done
|
||||||
|
- name: Push changes
|
||||||
|
run: |
|
||||||
|
# Push all the changes
|
||||||
|
cd charts
|
||||||
|
if git status -s | grep pgcat; then
|
||||||
|
git config user.name "$GITHUB_ACTOR"
|
||||||
|
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||||
|
git add . && git commit -am "Update README.md with readme-generator-for-helm" --signoff && git push
|
||||||
|
fi
|
||||||
20
.github/workflows/publish-ci-docker-image.yml
vendored
Normal file
20
.github/workflows/publish-ci-docker-image.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
name: publish-ci-docker-image
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ main ]
|
||||||
|
jobs:
|
||||||
|
publish-ci-docker-image:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v2
|
||||||
|
- name: Login to GitHub Container Registry
|
||||||
|
uses: docker/login-action@v1
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- name: Build CI Docker image
|
||||||
|
run: |
|
||||||
|
docker build . -f Dockerfile.ci --tag ghcr.io/postgresml/pgcat-ci:latest
|
||||||
|
docker run ghcr.io/postgresml/pgcat-ci:latest
|
||||||
|
docker push ghcr.io/postgresml/pgcat-ci:latest
|
||||||
59
.github/workflows/publish-deb-package.yml
vendored
Normal file
59
.github/workflows/publish-deb-package.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
name: pgcat package (deb)
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- v*
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
packageVersion:
|
||||||
|
default: "1.1.2-dev1"
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
strategy:
|
||||||
|
max-parallel: 1
|
||||||
|
fail-fast: false # Let the other job finish, or they can lock each other out
|
||||||
|
matrix:
|
||||||
|
os: ["buildjet-4vcpu-ubuntu-2204", "buildjet-4vcpu-ubuntu-2204-arm"]
|
||||||
|
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- name: Set package version
|
||||||
|
if: github.event_name == 'push' # For push event
|
||||||
|
run: |
|
||||||
|
TAG=${{ github.ref_name }}
|
||||||
|
echo "packageVersion=${TAG#v}" >> "$GITHUB_ENV"
|
||||||
|
- name: Set package version (manual dispatch)
|
||||||
|
if: github.event_name == 'workflow_dispatch' # For manual dispatch
|
||||||
|
run: echo "packageVersion=${{ github.event.inputs.packageVersion }}" >> "$GITHUB_ENV"
|
||||||
|
- uses: actions-rs/toolchain@v1
|
||||||
|
with:
|
||||||
|
toolchain: stable
|
||||||
|
- name: Install dependencies
|
||||||
|
env:
|
||||||
|
DEBIAN_FRONTEND: noninteractive
|
||||||
|
TZ: Etc/UTC
|
||||||
|
run: |
|
||||||
|
curl -sLO https://github.com/deb-s3/deb-s3/releases/download/0.11.4/deb-s3-0.11.4.gem
|
||||||
|
sudo gem install deb-s3-0.11.4.gem
|
||||||
|
dpkg-deb --version
|
||||||
|
- name: Build and release package
|
||||||
|
env:
|
||||||
|
AWS_ACCESS_KEY_ID: ${{ vars.AWS_ACCESS_KEY_ID }}
|
||||||
|
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||||
|
AWS_DEFAULT_REGION: ${{ vars.AWS_DEFAULT_REGION }}
|
||||||
|
run: |
|
||||||
|
if [[ $(arch) == "x86_64" ]]; then
|
||||||
|
export ARCH=amd64
|
||||||
|
else
|
||||||
|
export ARCH=arm64
|
||||||
|
fi
|
||||||
|
|
||||||
|
bash utilities/deb.sh ${{ env.packageVersion }}
|
||||||
|
|
||||||
|
deb-s3 upload \
|
||||||
|
--lock \
|
||||||
|
--bucket apt.postgresml.org \
|
||||||
|
pgcat-${{ env.packageVersion }}-ubuntu22.04-${ARCH}.deb \
|
||||||
|
--codename $(lsb_release -cs)
|
||||||
13
.gitignore
vendored
13
.gitignore
vendored
@@ -1,2 +1,15 @@
|
|||||||
|
.idea
|
||||||
/target
|
/target
|
||||||
*.deb
|
*.deb
|
||||||
|
.vscode
|
||||||
|
*.profraw
|
||||||
|
cov/
|
||||||
|
lcov.info
|
||||||
|
|
||||||
|
# Dev
|
||||||
|
dev/.bash_history
|
||||||
|
dev/cache
|
||||||
|
!dev/cache/.keepme
|
||||||
|
.venv
|
||||||
|
**/__pycache__
|
||||||
|
.bundle
|
||||||
2
.rustfmt.toml
Normal file
2
.rustfmt.toml
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
edition = "2021"
|
||||||
|
hard_tabs = false
|
||||||
523
CONFIG.md
Normal file
523
CONFIG.md
Normal file
@@ -0,0 +1,523 @@
|
|||||||
|
# PgCat Configurations
|
||||||
|
## `general` Section
|
||||||
|
|
||||||
|
### host
|
||||||
|
```
|
||||||
|
path: general.host
|
||||||
|
default: "0.0.0.0"
|
||||||
|
```
|
||||||
|
|
||||||
|
What IP to run on, 0.0.0.0 means accessible from everywhere.
|
||||||
|
|
||||||
|
### port
|
||||||
|
```
|
||||||
|
path: general.port
|
||||||
|
default: 6432
|
||||||
|
```
|
||||||
|
|
||||||
|
Port to run on, same as PgBouncer used in this example.
|
||||||
|
|
||||||
|
### enable_prometheus_exporter
|
||||||
|
```
|
||||||
|
path: general.enable_prometheus_exporter
|
||||||
|
default: true
|
||||||
|
```
|
||||||
|
|
||||||
|
Whether to enable prometheus exporter or not.
|
||||||
|
|
||||||
|
### prometheus_exporter_port
|
||||||
|
```
|
||||||
|
path: general.prometheus_exporter_port
|
||||||
|
default: 9930
|
||||||
|
```
|
||||||
|
|
||||||
|
Port at which prometheus exporter listens on.
|
||||||
|
|
||||||
|
### connect_timeout
|
||||||
|
```
|
||||||
|
path: general.connect_timeout
|
||||||
|
default: 1000 # milliseconds
|
||||||
|
```
|
||||||
|
|
||||||
|
How long the client waits to obtain a server connection before aborting (ms).
|
||||||
|
This is similar to PgBouncer's `query_wait_timeout`.
|
||||||
|
|
||||||
|
### idle_timeout
|
||||||
|
```
|
||||||
|
path: general.idle_timeout
|
||||||
|
default: 30000 # milliseconds
|
||||||
|
```
|
||||||
|
|
||||||
|
How long an idle connection with a server is left open (ms).
|
||||||
|
|
||||||
|
### server_lifetime
|
||||||
|
```
|
||||||
|
path: general.server_lifetime
|
||||||
|
default: 86400000 # 24 hours
|
||||||
|
```
|
||||||
|
|
||||||
|
Max connection lifetime before it's closed, even if actively used.
|
||||||
|
|
||||||
|
### server_round_robin
|
||||||
|
```
|
||||||
|
path: general.server_round_robin
|
||||||
|
default: false
|
||||||
|
```
|
||||||
|
|
||||||
|
Whether to use round robin for server selection or not.
|
||||||
|
|
||||||
|
### server_tls
|
||||||
|
```
|
||||||
|
path: general.server_tls
|
||||||
|
default: false
|
||||||
|
```
|
||||||
|
|
||||||
|
Whether to use TLS for server connections or not.
|
||||||
|
|
||||||
|
### verify_server_certificate
|
||||||
|
```
|
||||||
|
path: general.verify_server_certificate
|
||||||
|
default: false
|
||||||
|
```
|
||||||
|
|
||||||
|
Whether to verify server certificate or not.
|
||||||
|
|
||||||
|
### verify_config
|
||||||
|
```
|
||||||
|
path: general.verify_config
|
||||||
|
default: true
|
||||||
|
```
|
||||||
|
|
||||||
|
Whether to verify config or not.
|
||||||
|
|
||||||
|
### idle_client_in_transaction_timeout
|
||||||
|
```
|
||||||
|
path: general.idle_client_in_transaction_timeout
|
||||||
|
default: 0 # milliseconds
|
||||||
|
```
|
||||||
|
|
||||||
|
How long a client is allowed to be idle while in a transaction (ms).
|
||||||
|
|
||||||
|
### healthcheck_timeout
|
||||||
|
```
|
||||||
|
path: general.healthcheck_timeout
|
||||||
|
default: 1000 # milliseconds
|
||||||
|
```
|
||||||
|
|
||||||
|
How much time to give the health check query to return with a result (ms).
|
||||||
|
|
||||||
|
### healthcheck_delay
|
||||||
|
```
|
||||||
|
path: general.healthcheck_delay
|
||||||
|
default: 30000 # milliseconds
|
||||||
|
```
|
||||||
|
|
||||||
|
How long to keep connection available for immediate re-use, without running a healthcheck query on it
|
||||||
|
|
||||||
|
### shutdown_timeout
|
||||||
|
```
|
||||||
|
path: general.shutdown_timeout
|
||||||
|
default: 60000 # milliseconds
|
||||||
|
```
|
||||||
|
|
||||||
|
How much time to give clients during shutdown before forcibly killing client connections (ms).
|
||||||
|
|
||||||
|
### ban_time
|
||||||
|
```
|
||||||
|
path: general.ban_time
|
||||||
|
default: 60 # seconds
|
||||||
|
```
|
||||||
|
|
||||||
|
How long to ban a server if it fails a health check (seconds).
|
||||||
|
|
||||||
|
### log_client_connections
|
||||||
|
```
|
||||||
|
path: general.log_client_connections
|
||||||
|
default: false
|
||||||
|
```
|
||||||
|
|
||||||
|
If we should log client connections
|
||||||
|
|
||||||
|
### log_client_disconnections
|
||||||
|
```
|
||||||
|
path: general.log_client_disconnections
|
||||||
|
default: false
|
||||||
|
```
|
||||||
|
|
||||||
|
If we should log client disconnections
|
||||||
|
|
||||||
|
### autoreload
|
||||||
|
```
|
||||||
|
path: general.autoreload
|
||||||
|
default: 15000 # milliseconds
|
||||||
|
```
|
||||||
|
|
||||||
|
When set, PgCat automatically reloads its configurations at the specified interval (in milliseconds) if it detects changes in the configuration file. The default interval is 15000 milliseconds or 15 seconds.
|
||||||
|
|
||||||
|
### worker_threads
|
||||||
|
```
|
||||||
|
path: general.worker_threads
|
||||||
|
default: 5
|
||||||
|
```
|
||||||
|
|
||||||
|
Number of worker threads the Runtime will use (4 by default).
|
||||||
|
|
||||||
|
### tcp_keepalives_idle
|
||||||
|
```
|
||||||
|
path: general.tcp_keepalives_idle
|
||||||
|
default: 5
|
||||||
|
```
|
||||||
|
|
||||||
|
Number of seconds of connection idleness to wait before sending a keepalive packet to the server.
|
||||||
|
|
||||||
|
### tcp_keepalives_count
|
||||||
|
```
|
||||||
|
path: general.tcp_keepalives_count
|
||||||
|
default: 5
|
||||||
|
```
|
||||||
|
|
||||||
|
Number of unacknowledged keepalive packets allowed before giving up and closing the connection.
|
||||||
|
|
||||||
|
### tcp_keepalives_interval
|
||||||
|
```
|
||||||
|
path: general.tcp_keepalives_interval
|
||||||
|
default: 5
|
||||||
|
```
|
||||||
|
|
||||||
|
### tcp_user_timeout
|
||||||
|
```
|
||||||
|
path: general.tcp_user_timeout
|
||||||
|
default: 10000
|
||||||
|
```
|
||||||
|
A linux-only parameters that defines the amount of time in milliseconds that transmitted data may remain unacknowledged or buffered data may remain untransmitted (due to zero window size) before TCP will forcibly disconnect
|
||||||
|
|
||||||
|
|
||||||
|
### tls_certificate
|
||||||
|
```
|
||||||
|
path: general.tls_certificate
|
||||||
|
default: <UNSET>
|
||||||
|
example: "server.cert"
|
||||||
|
```
|
||||||
|
|
||||||
|
Path to TLS Certificate file to use for TLS connections
|
||||||
|
|
||||||
|
### tls_private_key
|
||||||
|
```
|
||||||
|
path: general.tls_private_key
|
||||||
|
default: <UNSET>
|
||||||
|
example: "server.key"
|
||||||
|
```
|
||||||
|
|
||||||
|
Path to TLS private key file to use for TLS connections
|
||||||
|
|
||||||
|
### admin_username
|
||||||
|
```
|
||||||
|
path: general.admin_username
|
||||||
|
default: "admin_user"
|
||||||
|
```
|
||||||
|
|
||||||
|
User name to access the virtual administrative database (pgbouncer or pgcat)
|
||||||
|
Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
||||||
|
|
||||||
|
### admin_password
|
||||||
|
```
|
||||||
|
path: general.admin_password
|
||||||
|
default: "admin_pass"
|
||||||
|
```
|
||||||
|
|
||||||
|
Password to access the virtual administrative database
|
||||||
|
|
||||||
|
### auth_query
|
||||||
|
```
|
||||||
|
path: general.auth_query
|
||||||
|
default: <UNSET>
|
||||||
|
example: "SELECT $1"
|
||||||
|
```
|
||||||
|
|
||||||
|
Query to be sent to servers to obtain the hash used for md5 authentication. The connection will be
|
||||||
|
established using the database configured in the pool. This parameter is inherited by every pool
|
||||||
|
and can be redefined in pool configuration.
|
||||||
|
|
||||||
|
### auth_query_user
|
||||||
|
```
|
||||||
|
path: general.auth_query_user
|
||||||
|
default: <UNSET>
|
||||||
|
example: "sharding_user"
|
||||||
|
```
|
||||||
|
|
||||||
|
User to be used for connecting to servers to obtain the hash used for md5 authentication by sending the query
|
||||||
|
specified in `auth_query_user`. The connection will be established using the database configured in the pool.
|
||||||
|
This parameter is inherited by every pool and can be redefined in pool configuration.
|
||||||
|
|
||||||
|
### auth_query_password
|
||||||
|
```
|
||||||
|
path: general.auth_query_password
|
||||||
|
default: <UNSET>
|
||||||
|
example: "sharding_user"
|
||||||
|
```
|
||||||
|
|
||||||
|
Password to be used for connecting to servers to obtain the hash used for md5 authentication by sending the query
|
||||||
|
specified in `auth_query_user`. The connection will be established using the database configured in the pool.
|
||||||
|
This parameter is inherited by every pool and can be redefined in pool configuration.
|
||||||
|
|
||||||
|
### dns_cache_enabled
|
||||||
|
```
|
||||||
|
path: general.dns_cache_enabled
|
||||||
|
default: false
|
||||||
|
```
|
||||||
|
When enabled, ip resolutions for server connections specified using hostnames will be cached
|
||||||
|
and checked for changes every `dns_max_ttl` seconds. If a change in the host resolution is found
|
||||||
|
old ip connections are closed (gracefully) and new connections will start using new ip.
|
||||||
|
|
||||||
|
### dns_max_ttl
|
||||||
|
```
|
||||||
|
path: general.dns_max_ttl
|
||||||
|
default: 30
|
||||||
|
```
|
||||||
|
Specifies how often (in seconds) cached ip addresses for servers are rechecked (see `dns_cache_enabled`).
|
||||||
|
|
||||||
|
## `pools.<pool_name>` Section
|
||||||
|
|
||||||
|
### pool_mode
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.pool_mode
|
||||||
|
default: "transaction"
|
||||||
|
```
|
||||||
|
|
||||||
|
Pool mode (see PgBouncer docs for more).
|
||||||
|
`session` one server connection per connected client
|
||||||
|
`transaction` one server connection per client transaction
|
||||||
|
|
||||||
|
### load_balancing_mode
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.load_balancing_mode
|
||||||
|
default: "random"
|
||||||
|
```
|
||||||
|
|
||||||
|
Load balancing mode
|
||||||
|
`random` selects the server at random
|
||||||
|
`loc` selects the server with the least outstanding busy connections
|
||||||
|
|
||||||
|
### default_role
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.default_role
|
||||||
|
default: "any"
|
||||||
|
```
|
||||||
|
|
||||||
|
If the client doesn't specify, PgCat routes traffic to this role by default.
|
||||||
|
`any` round-robin between primary and replicas,
|
||||||
|
`replica` round-robin between replicas only without touching the primary,
|
||||||
|
`primary` all queries go to the primary unless otherwise specified.
|
||||||
|
|
||||||
|
### prepared_statements_cache_size
|
||||||
|
```
|
||||||
|
path: general.prepared_statements_cache_size
|
||||||
|
default: 0
|
||||||
|
```
|
||||||
|
|
||||||
|
Size of the prepared statements cache. 0 means disabled.
|
||||||
|
TODO: update documentation
|
||||||
|
|
||||||
|
### query_parser_enabled
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.query_parser_enabled
|
||||||
|
default: true
|
||||||
|
```
|
||||||
|
|
||||||
|
If Query Parser is enabled, we'll attempt to parse
|
||||||
|
every incoming query to determine if it's a read or a write.
|
||||||
|
If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||||
|
we'll direct it to the primary.
|
||||||
|
|
||||||
|
### primary_reads_enabled
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.primary_reads_enabled
|
||||||
|
default: true
|
||||||
|
```
|
||||||
|
|
||||||
|
If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||||
|
load balancing of read queries. Otherwise, the primary will only be used for write
|
||||||
|
queries. The primary can always be explicitly selected with our custom protocol.
|
||||||
|
|
||||||
|
### sharding_key_regex
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.sharding_key_regex
|
||||||
|
default: <UNSET>
|
||||||
|
example: '/\* sharding_key: (\d+) \*/'
|
||||||
|
```
|
||||||
|
|
||||||
|
Allow sharding commands to be passed as statement comments instead of
|
||||||
|
separate commands. If these are unset this functionality is disabled.
|
||||||
|
|
||||||
|
### sharding_function
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.sharding_function
|
||||||
|
default: "pg_bigint_hash"
|
||||||
|
```
|
||||||
|
|
||||||
|
So what if you wanted to implement a different hashing function,
|
||||||
|
or you've already built one and you want this pooler to use it?
|
||||||
|
Current options:
|
||||||
|
`pg_bigint_hash`: PARTITION BY HASH (Postgres hashing function)
|
||||||
|
`sha1`: A hashing function based on SHA1
|
||||||
|
|
||||||
|
### auth_query
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.auth_query
|
||||||
|
default: <UNSET>
|
||||||
|
example: "SELECT $1"
|
||||||
|
```
|
||||||
|
|
||||||
|
Query to be sent to servers to obtain the hash used for md5 authentication. The connection will be
|
||||||
|
established using the database configured in the pool. This parameter is inherited by every pool
|
||||||
|
and can be redefined in pool configuration.
|
||||||
|
|
||||||
|
### auth_query_user
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.auth_query_user
|
||||||
|
default: <UNSET>
|
||||||
|
example: "sharding_user"
|
||||||
|
```
|
||||||
|
|
||||||
|
User to be used for connecting to servers to obtain the hash used for md5 authentication by sending the query
|
||||||
|
specified in `auth_query_user`. The connection will be established using the database configured in the pool.
|
||||||
|
This parameter is inherited by every pool and can be redefined in pool configuration.
|
||||||
|
|
||||||
|
### auth_query_password
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.auth_query_password
|
||||||
|
default: <UNSET>
|
||||||
|
example: "sharding_user"
|
||||||
|
```
|
||||||
|
|
||||||
|
Password to be used for connecting to servers to obtain the hash used for md5 authentication by sending the query
|
||||||
|
specified in `auth_query_user`. The connection will be established using the database configured in the pool.
|
||||||
|
This parameter is inherited by every pool and can be redefined in pool configuration.
|
||||||
|
|
||||||
|
### automatic_sharding_key
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.automatic_sharding_key
|
||||||
|
default: <UNSET>
|
||||||
|
example: "data.id"
|
||||||
|
```
|
||||||
|
|
||||||
|
Automatically parse this from queries and route queries to the right shard!
|
||||||
|
|
||||||
|
### idle_timeout
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.idle_timeout
|
||||||
|
default: 40000
|
||||||
|
```
|
||||||
|
|
||||||
|
Idle timeout can be overwritten in the pool
|
||||||
|
|
||||||
|
### connect_timeout
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.connect_timeout
|
||||||
|
default: 3000
|
||||||
|
```
|
||||||
|
|
||||||
|
Connect timeout can be overwritten in the pool
|
||||||
|
|
||||||
|
## `pools.<pool_name>.users.<user_index>` Section
|
||||||
|
|
||||||
|
### username
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.users.<user_index>.username
|
||||||
|
default: "sharding_user"
|
||||||
|
```
|
||||||
|
|
||||||
|
PostgreSQL username used to authenticate the user and connect to the server
|
||||||
|
if `server_username` is not set.
|
||||||
|
|
||||||
|
### password
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.users.<user_index>.password
|
||||||
|
default: "sharding_user"
|
||||||
|
```
|
||||||
|
|
||||||
|
PostgreSQL password used to authenticate the user and connect to the server
|
||||||
|
if `server_password` is not set.
|
||||||
|
|
||||||
|
### server_username
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.users.<user_index>.server_username
|
||||||
|
default: <UNSET>
|
||||||
|
example: "another_user"
|
||||||
|
```
|
||||||
|
|
||||||
|
PostgreSQL username used to connect to the server.
|
||||||
|
|
||||||
|
### server_password
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.users.<user_index>.server_password
|
||||||
|
default: <UNSET>
|
||||||
|
example: "another_password"
|
||||||
|
```
|
||||||
|
|
||||||
|
PostgreSQL password used to connect to the server.
|
||||||
|
|
||||||
|
### pool_size
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.users.<user_index>.pool_size
|
||||||
|
default: 9
|
||||||
|
```
|
||||||
|
|
||||||
|
Maximum number of server connections that can be established for this user.
|
||||||
|
The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||||
|
is the sum of pool_size across all users.
|
||||||
|
|
||||||
|
### min_pool_size
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.users.<user_index>.min_pool_size
|
||||||
|
default: 0
|
||||||
|
```
|
||||||
|
|
||||||
|
Minimum number of idle server connections to retain for this pool.
|
||||||
|
|
||||||
|
### statement_timeout
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.users.<user_index>.statement_timeout
|
||||||
|
default: 0
|
||||||
|
```
|
||||||
|
|
||||||
|
Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
|
||||||
|
0 means it is disabled.
|
||||||
|
|
||||||
|
### connect_timeout
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.users.<user_index>.connect_timeout
|
||||||
|
default: <UNSET> # milliseconds
|
||||||
|
```
|
||||||
|
|
||||||
|
How long the client waits to obtain a server connection before aborting (ms).
|
||||||
|
This is similar to PgBouncer's `query_wait_timeout`.
|
||||||
|
If unset, uses the `connect_timeout` defined globally.
|
||||||
|
|
||||||
|
## `pools.<pool_name>.shards.<shard_index>` Section
|
||||||
|
|
||||||
|
### servers
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.shards.<shard_index>.servers
|
||||||
|
default: [["127.0.0.1", 5432, "primary"], ["localhost", 5432, "replica"]]
|
||||||
|
```
|
||||||
|
|
||||||
|
Array of servers in the shard, each server entry is an array of `[host, port, role]`
|
||||||
|
|
||||||
|
### mirrors
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.shards.<shard_index>.mirrors
|
||||||
|
default: <UNSET>
|
||||||
|
example: [["1.2.3.4", 5432, 0], ["1.2.3.4", 5432, 1]]
|
||||||
|
```
|
||||||
|
|
||||||
|
Array of mirrors for the shard, each mirror entry is an array of `[host, port, index of server in servers array]`
|
||||||
|
Traffic hitting the server identified by the index will be sent to the mirror.
|
||||||
|
|
||||||
|
### database
|
||||||
|
```
|
||||||
|
path: pools.<pool_name>.shards.<shard_index>.database
|
||||||
|
default: "shard0"
|
||||||
|
```
|
||||||
|
|
||||||
|
Database name (e.g. "postgres")
|
||||||
@@ -1,6 +1,39 @@
|
|||||||
|
## Introduction
|
||||||
|
|
||||||
Thank you for contributing! Just a few tips here:
|
Thank you for contributing! Just a few tips here:
|
||||||
|
|
||||||
1. `cargo fmt` your code before opening up a PR
|
1. `cargo fmt` and `cargo clippy` your code before opening up a PR
|
||||||
2. Run the "test suite" (i.e. PgBench) to make sure everything still works.
|
2. Run the test suite (e.g. `pgbench`) to make sure everything still works. The tests are in `.circleci/run_tests.sh`.
|
||||||
|
3. Performance is important, make sure there are no regressions in your branch vs. `main`.
|
||||||
|
|
||||||
|
## How to run the integration tests locally and iterate on them
|
||||||
|
We have integration tests written in Ruby, Python, Go and Rust.
|
||||||
|
Below are the steps to run them in a developer-friendly way that allows iterating and quick turnaround.
|
||||||
|
Hear me out, this should be easy, it will involve opening a shell into a container with all the necessary dependancies available for you and you can modify the test code and immediately rerun your test in the interactive shell.
|
||||||
|
|
||||||
|
|
||||||
|
Quite simply, make sure you have docker installed and then run
|
||||||
|
`./start_test_env.sh`
|
||||||
|
|
||||||
|
That is it!
|
||||||
|
|
||||||
|
Within this test environment you can modify the file in your favorite IDE and rerun the tests without having to bootstrap the entire environment again.
|
||||||
|
|
||||||
|
Once the environment is ready, you can run the tests by running
|
||||||
|
Ruby: `cd /app/tests/ruby && bundle exec ruby <test_name>.rb --format documentation`
|
||||||
|
Python: `cd /app/ && pytest`
|
||||||
|
Rust: `cd /app/tests/rust && cargo run`
|
||||||
|
Go: `cd /app/tests/go && /usr/local/go/bin/go test`
|
||||||
|
|
||||||
|
You can also rebuild PgCat directly within the environment and the tests will run against the newly built binary
|
||||||
|
To rebuild PgCat, just run `cargo build` within the container under `/app`
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Happy hacking!
|
Happy hacking!
|
||||||
|
|
||||||
|
## TODOs
|
||||||
|
|
||||||
|
See [Issues]([url](https://github.com/levkk/pgcat/issues)).
|
||||||
|
|||||||
1929
Cargo.lock
generated
1929
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
50
Cargo.toml
50
Cargo.toml
@@ -1,20 +1,60 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "pgcat"
|
name = "pgcat"
|
||||||
version = "0.1.0"
|
version = "1.2.0"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
tokio = { version = "1", features = ["full"] }
|
tokio = { version = "1", features = ["full"] }
|
||||||
bytes = "1"
|
bytes = "1"
|
||||||
md-5 = "0.10"
|
md-5 = "0.10"
|
||||||
bb8 = "0.7"
|
bb8 = "=0.8.6"
|
||||||
async-trait = "0.1"
|
async-trait = "0.1"
|
||||||
rand = "0.8"
|
rand = "0.8"
|
||||||
chrono = "0.4"
|
chrono = "0.4"
|
||||||
sha-1 = "0.10"
|
sha-1 = "0.10"
|
||||||
toml = "0.5"
|
toml = "0.7"
|
||||||
serde = "1"
|
serde = { version = "1", features = ["derive"] }
|
||||||
serde_derive = "1"
|
serde_derive = "1"
|
||||||
regex = "1"
|
regex = "1"
|
||||||
|
num_cpus = "1"
|
||||||
|
once_cell = "1"
|
||||||
|
sqlparser = { version = "0.41", features = ["visitor"] }
|
||||||
|
log = "0.4"
|
||||||
|
arc-swap = "1"
|
||||||
|
parking_lot = "0.12.1"
|
||||||
|
hmac = "0.12"
|
||||||
|
sha2 = "0.10"
|
||||||
|
base64 = "0.21"
|
||||||
|
stringprep = "0.1"
|
||||||
|
tokio-rustls = "0.24"
|
||||||
|
rustls-pemfile = "1"
|
||||||
|
http-body-util = "0.1.2"
|
||||||
|
hyper = { version = "1.4.1", features = ["full"] }
|
||||||
|
hyper-util = { version = "0.1.7", features = ["tokio"] }
|
||||||
|
phf = { version = "0.11.1", features = ["macros"] }
|
||||||
|
exitcode = "1.1.2"
|
||||||
|
futures = "0.3"
|
||||||
|
socket2 = { version = "0.4.7", features = ["all"] }
|
||||||
|
nix = "0.26.2"
|
||||||
|
atomic_enum = "0.2.0"
|
||||||
|
postgres-protocol = "0.6.5"
|
||||||
|
fallible-iterator = "0.2"
|
||||||
|
pin-project = "1"
|
||||||
|
webpki-roots = "0.23"
|
||||||
|
rustls = { version = "0.21", features = ["dangerous_configuration"] }
|
||||||
|
trust-dns-resolver = "0.22.0"
|
||||||
|
tokio-test = "0.4.2"
|
||||||
|
serde_json = "1"
|
||||||
|
itertools = "0.10"
|
||||||
|
clap = { version = "4.3.1", features = ["derive", "env"] }
|
||||||
|
tracing = "0.1.37"
|
||||||
|
tracing-subscriber = { version = "0.3.17", features = [
|
||||||
|
"json",
|
||||||
|
"env-filter",
|
||||||
|
"std",
|
||||||
|
] }
|
||||||
|
lru = "0.12.0"
|
||||||
|
|
||||||
|
[target.'cfg(not(target_env = "msvc"))'.dependencies]
|
||||||
|
jemallocator = "0.5.0"
|
||||||
|
|||||||
22
Dockerfile
Normal file
22
Dockerfile
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
FROM rust:1.79.0-slim-bookworm AS builder
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y build-essential
|
||||||
|
|
||||||
|
COPY . /app
|
||||||
|
WORKDIR /app
|
||||||
|
RUN cargo build --release
|
||||||
|
|
||||||
|
FROM debian:bookworm-slim
|
||||||
|
RUN apt-get update && apt-get install -o Dpkg::Options::=--force-confdef -yq --no-install-recommends \
|
||||||
|
postgresql-client \
|
||||||
|
# Clean up layer
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \
|
||||||
|
&& truncate -s 0 /var/log/*log
|
||||||
|
COPY --from=builder /app/target/release/pgcat /usr/bin/pgcat
|
||||||
|
COPY --from=builder /app/pgcat.toml /etc/pgcat/pgcat.toml
|
||||||
|
WORKDIR /etc/pgcat
|
||||||
|
ENV RUST_LOG=info
|
||||||
|
CMD ["pgcat"]
|
||||||
|
STOPSIGNAL SIGINT
|
||||||
17
Dockerfile.ci
Normal file
17
Dockerfile.ci
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
FROM cimg/rust:1.79.0
|
||||||
|
COPY --from=sclevine/yj /bin/yj /bin/yj
|
||||||
|
RUN /bin/yj -h
|
||||||
|
RUN sudo apt-get update && \
|
||||||
|
sudo apt-get install -y \
|
||||||
|
psmisc postgresql-contrib-14 postgresql-client-14 libpq-dev \
|
||||||
|
ruby ruby-dev python3 python3-pip \
|
||||||
|
lcov llvm-11 iproute2 && \
|
||||||
|
sudo apt-get upgrade curl && \
|
||||||
|
cargo install cargo-binutils rustfilt && \
|
||||||
|
rustup component add llvm-tools-preview && \
|
||||||
|
pip3 install psycopg2 && sudo gem install bundler && \
|
||||||
|
wget -O /tmp/toxiproxy-2.4.0.deb https://github.com/Shopify/toxiproxy/releases/download/v2.4.0/toxiproxy_2.4.0_linux_$(dpkg --print-architecture).deb && \
|
||||||
|
sudo dpkg -i /tmp/toxiproxy-2.4.0.deb
|
||||||
|
RUN wget -O /tmp/go1.21.3.linux-$(dpkg --print-architecture).tar.gz https://go.dev/dl/go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \
|
||||||
|
sudo tar -C /usr/local -xzf /tmp/go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \
|
||||||
|
rm /tmp/go1.21.3.linux-$(dpkg --print-architecture).tar.gz
|
||||||
25
Dockerfile.dev
Normal file
25
Dockerfile.dev
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
FROM lukemathwalker/cargo-chef:latest-rust-1 AS chef
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y build-essential
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
FROM chef AS planner
|
||||||
|
COPY . .
|
||||||
|
RUN cargo chef prepare --recipe-path recipe.json
|
||||||
|
|
||||||
|
FROM chef AS builder
|
||||||
|
COPY --from=planner /app/recipe.json recipe.json
|
||||||
|
# Build dependencies - this is the caching Docker layer!
|
||||||
|
RUN cargo chef cook --release --recipe-path recipe.json
|
||||||
|
# Build application
|
||||||
|
COPY . .
|
||||||
|
RUN cargo build
|
||||||
|
|
||||||
|
FROM debian:bookworm-slim
|
||||||
|
COPY --from=builder /app/target/release/pgcat /usr/bin/pgcat
|
||||||
|
COPY --from=builder /app/pgcat.toml /etc/pgcat/pgcat.toml
|
||||||
|
WORKDIR /etc/pgcat
|
||||||
|
ENV RUST_LOG=info
|
||||||
|
CMD ["pgcat"]
|
||||||
694
LICENSE
694
LICENSE
@@ -1,674 +1,20 @@
|
|||||||
GNU GENERAL PUBLIC LICENSE
|
Copyright (c) 2023 PgCat Contributors
|
||||||
Version 3, 29 June 2007
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
a copy of this software and associated documentation files (the
|
||||||
Everyone is permitted to copy and distribute verbatim copies
|
"Software"), to deal in the Software without restriction, including
|
||||||
of this license document, but changing it is not allowed.
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
Preamble
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
The GNU General Public License is a free, copyleft license for
|
|
||||||
software and other kinds of works.
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
The licenses for most software and other practical works are designed
|
|
||||||
to take away your freedom to share and change the works. By contrast,
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
the GNU General Public License is intended to guarantee your freedom to
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
share and change all versions of a program--to make sure it remains free
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
software for all its users. We, the Free Software Foundation, use the
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
GNU General Public License for most of our software; it applies also to
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
any other work released this way by its authors. You can apply it to
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
your programs, too.
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
When we speak of free software, we are referring to freedom, not
|
|
||||||
price. Our General Public Licenses are designed to make sure that you
|
|
||||||
have the freedom to distribute copies of free software (and charge for
|
|
||||||
them if you wish), that you receive source code or can get it if you
|
|
||||||
want it, that you can change the software or use pieces of it in new
|
|
||||||
free programs, and that you know you can do these things.
|
|
||||||
|
|
||||||
To protect your rights, we need to prevent others from denying you
|
|
||||||
these rights or asking you to surrender the rights. Therefore, you have
|
|
||||||
certain responsibilities if you distribute copies of the software, or if
|
|
||||||
you modify it: responsibilities to respect the freedom of others.
|
|
||||||
|
|
||||||
For example, if you distribute copies of such a program, whether
|
|
||||||
gratis or for a fee, you must pass on to the recipients the same
|
|
||||||
freedoms that you received. You must make sure that they, too, receive
|
|
||||||
or can get the source code. And you must show them these terms so they
|
|
||||||
know their rights.
|
|
||||||
|
|
||||||
Developers that use the GNU GPL protect your rights with two steps:
|
|
||||||
(1) assert copyright on the software, and (2) offer you this License
|
|
||||||
giving you legal permission to copy, distribute and/or modify it.
|
|
||||||
|
|
||||||
For the developers' and authors' protection, the GPL clearly explains
|
|
||||||
that there is no warranty for this free software. For both users' and
|
|
||||||
authors' sake, the GPL requires that modified versions be marked as
|
|
||||||
changed, so that their problems will not be attributed erroneously to
|
|
||||||
authors of previous versions.
|
|
||||||
|
|
||||||
Some devices are designed to deny users access to install or run
|
|
||||||
modified versions of the software inside them, although the manufacturer
|
|
||||||
can do so. This is fundamentally incompatible with the aim of
|
|
||||||
protecting users' freedom to change the software. The systematic
|
|
||||||
pattern of such abuse occurs in the area of products for individuals to
|
|
||||||
use, which is precisely where it is most unacceptable. Therefore, we
|
|
||||||
have designed this version of the GPL to prohibit the practice for those
|
|
||||||
products. If such problems arise substantially in other domains, we
|
|
||||||
stand ready to extend this provision to those domains in future versions
|
|
||||||
of the GPL, as needed to protect the freedom of users.
|
|
||||||
|
|
||||||
Finally, every program is threatened constantly by software patents.
|
|
||||||
States should not allow patents to restrict development and use of
|
|
||||||
software on general-purpose computers, but in those that do, we wish to
|
|
||||||
avoid the special danger that patents applied to a free program could
|
|
||||||
make it effectively proprietary. To prevent this, the GPL assures that
|
|
||||||
patents cannot be used to render the program non-free.
|
|
||||||
|
|
||||||
The precise terms and conditions for copying, distribution and
|
|
||||||
modification follow.
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
0. Definitions.
|
|
||||||
|
|
||||||
"This License" refers to version 3 of the GNU General Public License.
|
|
||||||
|
|
||||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
|
||||||
works, such as semiconductor masks.
|
|
||||||
|
|
||||||
"The Program" refers to any copyrightable work licensed under this
|
|
||||||
License. Each licensee is addressed as "you". "Licensees" and
|
|
||||||
"recipients" may be individuals or organizations.
|
|
||||||
|
|
||||||
To "modify" a work means to copy from or adapt all or part of the work
|
|
||||||
in a fashion requiring copyright permission, other than the making of an
|
|
||||||
exact copy. The resulting work is called a "modified version" of the
|
|
||||||
earlier work or a work "based on" the earlier work.
|
|
||||||
|
|
||||||
A "covered work" means either the unmodified Program or a work based
|
|
||||||
on the Program.
|
|
||||||
|
|
||||||
To "propagate" a work means to do anything with it that, without
|
|
||||||
permission, would make you directly or secondarily liable for
|
|
||||||
infringement under applicable copyright law, except executing it on a
|
|
||||||
computer or modifying a private copy. Propagation includes copying,
|
|
||||||
distribution (with or without modification), making available to the
|
|
||||||
public, and in some countries other activities as well.
|
|
||||||
|
|
||||||
To "convey" a work means any kind of propagation that enables other
|
|
||||||
parties to make or receive copies. Mere interaction with a user through
|
|
||||||
a computer network, with no transfer of a copy, is not conveying.
|
|
||||||
|
|
||||||
An interactive user interface displays "Appropriate Legal Notices"
|
|
||||||
to the extent that it includes a convenient and prominently visible
|
|
||||||
feature that (1) displays an appropriate copyright notice, and (2)
|
|
||||||
tells the user that there is no warranty for the work (except to the
|
|
||||||
extent that warranties are provided), that licensees may convey the
|
|
||||||
work under this License, and how to view a copy of this License. If
|
|
||||||
the interface presents a list of user commands or options, such as a
|
|
||||||
menu, a prominent item in the list meets this criterion.
|
|
||||||
|
|
||||||
1. Source Code.
|
|
||||||
|
|
||||||
The "source code" for a work means the preferred form of the work
|
|
||||||
for making modifications to it. "Object code" means any non-source
|
|
||||||
form of a work.
|
|
||||||
|
|
||||||
A "Standard Interface" means an interface that either is an official
|
|
||||||
standard defined by a recognized standards body, or, in the case of
|
|
||||||
interfaces specified for a particular programming language, one that
|
|
||||||
is widely used among developers working in that language.
|
|
||||||
|
|
||||||
The "System Libraries" of an executable work include anything, other
|
|
||||||
than the work as a whole, that (a) is included in the normal form of
|
|
||||||
packaging a Major Component, but which is not part of that Major
|
|
||||||
Component, and (b) serves only to enable use of the work with that
|
|
||||||
Major Component, or to implement a Standard Interface for which an
|
|
||||||
implementation is available to the public in source code form. A
|
|
||||||
"Major Component", in this context, means a major essential component
|
|
||||||
(kernel, window system, and so on) of the specific operating system
|
|
||||||
(if any) on which the executable work runs, or a compiler used to
|
|
||||||
produce the work, or an object code interpreter used to run it.
|
|
||||||
|
|
||||||
The "Corresponding Source" for a work in object code form means all
|
|
||||||
the source code needed to generate, install, and (for an executable
|
|
||||||
work) run the object code and to modify the work, including scripts to
|
|
||||||
control those activities. However, it does not include the work's
|
|
||||||
System Libraries, or general-purpose tools or generally available free
|
|
||||||
programs which are used unmodified in performing those activities but
|
|
||||||
which are not part of the work. For example, Corresponding Source
|
|
||||||
includes interface definition files associated with source files for
|
|
||||||
the work, and the source code for shared libraries and dynamically
|
|
||||||
linked subprograms that the work is specifically designed to require,
|
|
||||||
such as by intimate data communication or control flow between those
|
|
||||||
subprograms and other parts of the work.
|
|
||||||
|
|
||||||
The Corresponding Source need not include anything that users
|
|
||||||
can regenerate automatically from other parts of the Corresponding
|
|
||||||
Source.
|
|
||||||
|
|
||||||
The Corresponding Source for a work in source code form is that
|
|
||||||
same work.
|
|
||||||
|
|
||||||
2. Basic Permissions.
|
|
||||||
|
|
||||||
All rights granted under this License are granted for the term of
|
|
||||||
copyright on the Program, and are irrevocable provided the stated
|
|
||||||
conditions are met. This License explicitly affirms your unlimited
|
|
||||||
permission to run the unmodified Program. The output from running a
|
|
||||||
covered work is covered by this License only if the output, given its
|
|
||||||
content, constitutes a covered work. This License acknowledges your
|
|
||||||
rights of fair use or other equivalent, as provided by copyright law.
|
|
||||||
|
|
||||||
You may make, run and propagate covered works that you do not
|
|
||||||
convey, without conditions so long as your license otherwise remains
|
|
||||||
in force. You may convey covered works to others for the sole purpose
|
|
||||||
of having them make modifications exclusively for you, or provide you
|
|
||||||
with facilities for running those works, provided that you comply with
|
|
||||||
the terms of this License in conveying all material for which you do
|
|
||||||
not control copyright. Those thus making or running the covered works
|
|
||||||
for you must do so exclusively on your behalf, under your direction
|
|
||||||
and control, on terms that prohibit them from making any copies of
|
|
||||||
your copyrighted material outside their relationship with you.
|
|
||||||
|
|
||||||
Conveying under any other circumstances is permitted solely under
|
|
||||||
the conditions stated below. Sublicensing is not allowed; section 10
|
|
||||||
makes it unnecessary.
|
|
||||||
|
|
||||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
|
||||||
|
|
||||||
No covered work shall be deemed part of an effective technological
|
|
||||||
measure under any applicable law fulfilling obligations under article
|
|
||||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
|
||||||
similar laws prohibiting or restricting circumvention of such
|
|
||||||
measures.
|
|
||||||
|
|
||||||
When you convey a covered work, you waive any legal power to forbid
|
|
||||||
circumvention of technological measures to the extent such circumvention
|
|
||||||
is effected by exercising rights under this License with respect to
|
|
||||||
the covered work, and you disclaim any intention to limit operation or
|
|
||||||
modification of the work as a means of enforcing, against the work's
|
|
||||||
users, your or third parties' legal rights to forbid circumvention of
|
|
||||||
technological measures.
|
|
||||||
|
|
||||||
4. Conveying Verbatim Copies.
|
|
||||||
|
|
||||||
You may convey verbatim copies of the Program's source code as you
|
|
||||||
receive it, in any medium, provided that you conspicuously and
|
|
||||||
appropriately publish on each copy an appropriate copyright notice;
|
|
||||||
keep intact all notices stating that this License and any
|
|
||||||
non-permissive terms added in accord with section 7 apply to the code;
|
|
||||||
keep intact all notices of the absence of any warranty; and give all
|
|
||||||
recipients a copy of this License along with the Program.
|
|
||||||
|
|
||||||
You may charge any price or no price for each copy that you convey,
|
|
||||||
and you may offer support or warranty protection for a fee.
|
|
||||||
|
|
||||||
5. Conveying Modified Source Versions.
|
|
||||||
|
|
||||||
You may convey a work based on the Program, or the modifications to
|
|
||||||
produce it from the Program, in the form of source code under the
|
|
||||||
terms of section 4, provided that you also meet all of these conditions:
|
|
||||||
|
|
||||||
a) The work must carry prominent notices stating that you modified
|
|
||||||
it, and giving a relevant date.
|
|
||||||
|
|
||||||
b) The work must carry prominent notices stating that it is
|
|
||||||
released under this License and any conditions added under section
|
|
||||||
7. This requirement modifies the requirement in section 4 to
|
|
||||||
"keep intact all notices".
|
|
||||||
|
|
||||||
c) You must license the entire work, as a whole, under this
|
|
||||||
License to anyone who comes into possession of a copy. This
|
|
||||||
License will therefore apply, along with any applicable section 7
|
|
||||||
additional terms, to the whole of the work, and all its parts,
|
|
||||||
regardless of how they are packaged. This License gives no
|
|
||||||
permission to license the work in any other way, but it does not
|
|
||||||
invalidate such permission if you have separately received it.
|
|
||||||
|
|
||||||
d) If the work has interactive user interfaces, each must display
|
|
||||||
Appropriate Legal Notices; however, if the Program has interactive
|
|
||||||
interfaces that do not display Appropriate Legal Notices, your
|
|
||||||
work need not make them do so.
|
|
||||||
|
|
||||||
A compilation of a covered work with other separate and independent
|
|
||||||
works, which are not by their nature extensions of the covered work,
|
|
||||||
and which are not combined with it such as to form a larger program,
|
|
||||||
in or on a volume of a storage or distribution medium, is called an
|
|
||||||
"aggregate" if the compilation and its resulting copyright are not
|
|
||||||
used to limit the access or legal rights of the compilation's users
|
|
||||||
beyond what the individual works permit. Inclusion of a covered work
|
|
||||||
in an aggregate does not cause this License to apply to the other
|
|
||||||
parts of the aggregate.
|
|
||||||
|
|
||||||
6. Conveying Non-Source Forms.
|
|
||||||
|
|
||||||
You may convey a covered work in object code form under the terms
|
|
||||||
of sections 4 and 5, provided that you also convey the
|
|
||||||
machine-readable Corresponding Source under the terms of this License,
|
|
||||||
in one of these ways:
|
|
||||||
|
|
||||||
a) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by the
|
|
||||||
Corresponding Source fixed on a durable physical medium
|
|
||||||
customarily used for software interchange.
|
|
||||||
|
|
||||||
b) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by a
|
|
||||||
written offer, valid for at least three years and valid for as
|
|
||||||
long as you offer spare parts or customer support for that product
|
|
||||||
model, to give anyone who possesses the object code either (1) a
|
|
||||||
copy of the Corresponding Source for all the software in the
|
|
||||||
product that is covered by this License, on a durable physical
|
|
||||||
medium customarily used for software interchange, for a price no
|
|
||||||
more than your reasonable cost of physically performing this
|
|
||||||
conveying of source, or (2) access to copy the
|
|
||||||
Corresponding Source from a network server at no charge.
|
|
||||||
|
|
||||||
c) Convey individual copies of the object code with a copy of the
|
|
||||||
written offer to provide the Corresponding Source. This
|
|
||||||
alternative is allowed only occasionally and noncommercially, and
|
|
||||||
only if you received the object code with such an offer, in accord
|
|
||||||
with subsection 6b.
|
|
||||||
|
|
||||||
d) Convey the object code by offering access from a designated
|
|
||||||
place (gratis or for a charge), and offer equivalent access to the
|
|
||||||
Corresponding Source in the same way through the same place at no
|
|
||||||
further charge. You need not require recipients to copy the
|
|
||||||
Corresponding Source along with the object code. If the place to
|
|
||||||
copy the object code is a network server, the Corresponding Source
|
|
||||||
may be on a different server (operated by you or a third party)
|
|
||||||
that supports equivalent copying facilities, provided you maintain
|
|
||||||
clear directions next to the object code saying where to find the
|
|
||||||
Corresponding Source. Regardless of what server hosts the
|
|
||||||
Corresponding Source, you remain obligated to ensure that it is
|
|
||||||
available for as long as needed to satisfy these requirements.
|
|
||||||
|
|
||||||
e) Convey the object code using peer-to-peer transmission, provided
|
|
||||||
you inform other peers where the object code and Corresponding
|
|
||||||
Source of the work are being offered to the general public at no
|
|
||||||
charge under subsection 6d.
|
|
||||||
|
|
||||||
A separable portion of the object code, whose source code is excluded
|
|
||||||
from the Corresponding Source as a System Library, need not be
|
|
||||||
included in conveying the object code work.
|
|
||||||
|
|
||||||
A "User Product" is either (1) a "consumer product", which means any
|
|
||||||
tangible personal property which is normally used for personal, family,
|
|
||||||
or household purposes, or (2) anything designed or sold for incorporation
|
|
||||||
into a dwelling. In determining whether a product is a consumer product,
|
|
||||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
|
||||||
product received by a particular user, "normally used" refers to a
|
|
||||||
typical or common use of that class of product, regardless of the status
|
|
||||||
of the particular user or of the way in which the particular user
|
|
||||||
actually uses, or expects or is expected to use, the product. A product
|
|
||||||
is a consumer product regardless of whether the product has substantial
|
|
||||||
commercial, industrial or non-consumer uses, unless such uses represent
|
|
||||||
the only significant mode of use of the product.
|
|
||||||
|
|
||||||
"Installation Information" for a User Product means any methods,
|
|
||||||
procedures, authorization keys, or other information required to install
|
|
||||||
and execute modified versions of a covered work in that User Product from
|
|
||||||
a modified version of its Corresponding Source. The information must
|
|
||||||
suffice to ensure that the continued functioning of the modified object
|
|
||||||
code is in no case prevented or interfered with solely because
|
|
||||||
modification has been made.
|
|
||||||
|
|
||||||
If you convey an object code work under this section in, or with, or
|
|
||||||
specifically for use in, a User Product, and the conveying occurs as
|
|
||||||
part of a transaction in which the right of possession and use of the
|
|
||||||
User Product is transferred to the recipient in perpetuity or for a
|
|
||||||
fixed term (regardless of how the transaction is characterized), the
|
|
||||||
Corresponding Source conveyed under this section must be accompanied
|
|
||||||
by the Installation Information. But this requirement does not apply
|
|
||||||
if neither you nor any third party retains the ability to install
|
|
||||||
modified object code on the User Product (for example, the work has
|
|
||||||
been installed in ROM).
|
|
||||||
|
|
||||||
The requirement to provide Installation Information does not include a
|
|
||||||
requirement to continue to provide support service, warranty, or updates
|
|
||||||
for a work that has been modified or installed by the recipient, or for
|
|
||||||
the User Product in which it has been modified or installed. Access to a
|
|
||||||
network may be denied when the modification itself materially and
|
|
||||||
adversely affects the operation of the network or violates the rules and
|
|
||||||
protocols for communication across the network.
|
|
||||||
|
|
||||||
Corresponding Source conveyed, and Installation Information provided,
|
|
||||||
in accord with this section must be in a format that is publicly
|
|
||||||
documented (and with an implementation available to the public in
|
|
||||||
source code form), and must require no special password or key for
|
|
||||||
unpacking, reading or copying.
|
|
||||||
|
|
||||||
7. Additional Terms.
|
|
||||||
|
|
||||||
"Additional permissions" are terms that supplement the terms of this
|
|
||||||
License by making exceptions from one or more of its conditions.
|
|
||||||
Additional permissions that are applicable to the entire Program shall
|
|
||||||
be treated as though they were included in this License, to the extent
|
|
||||||
that they are valid under applicable law. If additional permissions
|
|
||||||
apply only to part of the Program, that part may be used separately
|
|
||||||
under those permissions, but the entire Program remains governed by
|
|
||||||
this License without regard to the additional permissions.
|
|
||||||
|
|
||||||
When you convey a copy of a covered work, you may at your option
|
|
||||||
remove any additional permissions from that copy, or from any part of
|
|
||||||
it. (Additional permissions may be written to require their own
|
|
||||||
removal in certain cases when you modify the work.) You may place
|
|
||||||
additional permissions on material, added by you to a covered work,
|
|
||||||
for which you have or can give appropriate copyright permission.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, for material you
|
|
||||||
add to a covered work, you may (if authorized by the copyright holders of
|
|
||||||
that material) supplement the terms of this License with terms:
|
|
||||||
|
|
||||||
a) Disclaiming warranty or limiting liability differently from the
|
|
||||||
terms of sections 15 and 16 of this License; or
|
|
||||||
|
|
||||||
b) Requiring preservation of specified reasonable legal notices or
|
|
||||||
author attributions in that material or in the Appropriate Legal
|
|
||||||
Notices displayed by works containing it; or
|
|
||||||
|
|
||||||
c) Prohibiting misrepresentation of the origin of that material, or
|
|
||||||
requiring that modified versions of such material be marked in
|
|
||||||
reasonable ways as different from the original version; or
|
|
||||||
|
|
||||||
d) Limiting the use for publicity purposes of names of licensors or
|
|
||||||
authors of the material; or
|
|
||||||
|
|
||||||
e) Declining to grant rights under trademark law for use of some
|
|
||||||
trade names, trademarks, or service marks; or
|
|
||||||
|
|
||||||
f) Requiring indemnification of licensors and authors of that
|
|
||||||
material by anyone who conveys the material (or modified versions of
|
|
||||||
it) with contractual assumptions of liability to the recipient, for
|
|
||||||
any liability that these contractual assumptions directly impose on
|
|
||||||
those licensors and authors.
|
|
||||||
|
|
||||||
All other non-permissive additional terms are considered "further
|
|
||||||
restrictions" within the meaning of section 10. If the Program as you
|
|
||||||
received it, or any part of it, contains a notice stating that it is
|
|
||||||
governed by this License along with a term that is a further
|
|
||||||
restriction, you may remove that term. If a license document contains
|
|
||||||
a further restriction but permits relicensing or conveying under this
|
|
||||||
License, you may add to a covered work material governed by the terms
|
|
||||||
of that license document, provided that the further restriction does
|
|
||||||
not survive such relicensing or conveying.
|
|
||||||
|
|
||||||
If you add terms to a covered work in accord with this section, you
|
|
||||||
must place, in the relevant source files, a statement of the
|
|
||||||
additional terms that apply to those files, or a notice indicating
|
|
||||||
where to find the applicable terms.
|
|
||||||
|
|
||||||
Additional terms, permissive or non-permissive, may be stated in the
|
|
||||||
form of a separately written license, or stated as exceptions;
|
|
||||||
the above requirements apply either way.
|
|
||||||
|
|
||||||
8. Termination.
|
|
||||||
|
|
||||||
You may not propagate or modify a covered work except as expressly
|
|
||||||
provided under this License. Any attempt otherwise to propagate or
|
|
||||||
modify it is void, and will automatically terminate your rights under
|
|
||||||
this License (including any patent licenses granted under the third
|
|
||||||
paragraph of section 11).
|
|
||||||
|
|
||||||
However, if you cease all violation of this License, then your
|
|
||||||
license from a particular copyright holder is reinstated (a)
|
|
||||||
provisionally, unless and until the copyright holder explicitly and
|
|
||||||
finally terminates your license, and (b) permanently, if the copyright
|
|
||||||
holder fails to notify you of the violation by some reasonable means
|
|
||||||
prior to 60 days after the cessation.
|
|
||||||
|
|
||||||
Moreover, your license from a particular copyright holder is
|
|
||||||
reinstated permanently if the copyright holder notifies you of the
|
|
||||||
violation by some reasonable means, this is the first time you have
|
|
||||||
received notice of violation of this License (for any work) from that
|
|
||||||
copyright holder, and you cure the violation prior to 30 days after
|
|
||||||
your receipt of the notice.
|
|
||||||
|
|
||||||
Termination of your rights under this section does not terminate the
|
|
||||||
licenses of parties who have received copies or rights from you under
|
|
||||||
this License. If your rights have been terminated and not permanently
|
|
||||||
reinstated, you do not qualify to receive new licenses for the same
|
|
||||||
material under section 10.
|
|
||||||
|
|
||||||
9. Acceptance Not Required for Having Copies.
|
|
||||||
|
|
||||||
You are not required to accept this License in order to receive or
|
|
||||||
run a copy of the Program. Ancillary propagation of a covered work
|
|
||||||
occurring solely as a consequence of using peer-to-peer transmission
|
|
||||||
to receive a copy likewise does not require acceptance. However,
|
|
||||||
nothing other than this License grants you permission to propagate or
|
|
||||||
modify any covered work. These actions infringe copyright if you do
|
|
||||||
not accept this License. Therefore, by modifying or propagating a
|
|
||||||
covered work, you indicate your acceptance of this License to do so.
|
|
||||||
|
|
||||||
10. Automatic Licensing of Downstream Recipients.
|
|
||||||
|
|
||||||
Each time you convey a covered work, the recipient automatically
|
|
||||||
receives a license from the original licensors, to run, modify and
|
|
||||||
propagate that work, subject to this License. You are not responsible
|
|
||||||
for enforcing compliance by third parties with this License.
|
|
||||||
|
|
||||||
An "entity transaction" is a transaction transferring control of an
|
|
||||||
organization, or substantially all assets of one, or subdividing an
|
|
||||||
organization, or merging organizations. If propagation of a covered
|
|
||||||
work results from an entity transaction, each party to that
|
|
||||||
transaction who receives a copy of the work also receives whatever
|
|
||||||
licenses to the work the party's predecessor in interest had or could
|
|
||||||
give under the previous paragraph, plus a right to possession of the
|
|
||||||
Corresponding Source of the work from the predecessor in interest, if
|
|
||||||
the predecessor has it or can get it with reasonable efforts.
|
|
||||||
|
|
||||||
You may not impose any further restrictions on the exercise of the
|
|
||||||
rights granted or affirmed under this License. For example, you may
|
|
||||||
not impose a license fee, royalty, or other charge for exercise of
|
|
||||||
rights granted under this License, and you may not initiate litigation
|
|
||||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
|
||||||
any patent claim is infringed by making, using, selling, offering for
|
|
||||||
sale, or importing the Program or any portion of it.
|
|
||||||
|
|
||||||
11. Patents.
|
|
||||||
|
|
||||||
A "contributor" is a copyright holder who authorizes use under this
|
|
||||||
License of the Program or a work on which the Program is based. The
|
|
||||||
work thus licensed is called the contributor's "contributor version".
|
|
||||||
|
|
||||||
A contributor's "essential patent claims" are all patent claims
|
|
||||||
owned or controlled by the contributor, whether already acquired or
|
|
||||||
hereafter acquired, that would be infringed by some manner, permitted
|
|
||||||
by this License, of making, using, or selling its contributor version,
|
|
||||||
but do not include claims that would be infringed only as a
|
|
||||||
consequence of further modification of the contributor version. For
|
|
||||||
purposes of this definition, "control" includes the right to grant
|
|
||||||
patent sublicenses in a manner consistent with the requirements of
|
|
||||||
this License.
|
|
||||||
|
|
||||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
|
||||||
patent license under the contributor's essential patent claims, to
|
|
||||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
|
||||||
propagate the contents of its contributor version.
|
|
||||||
|
|
||||||
In the following three paragraphs, a "patent license" is any express
|
|
||||||
agreement or commitment, however denominated, not to enforce a patent
|
|
||||||
(such as an express permission to practice a patent or covenant not to
|
|
||||||
sue for patent infringement). To "grant" such a patent license to a
|
|
||||||
party means to make such an agreement or commitment not to enforce a
|
|
||||||
patent against the party.
|
|
||||||
|
|
||||||
If you convey a covered work, knowingly relying on a patent license,
|
|
||||||
and the Corresponding Source of the work is not available for anyone
|
|
||||||
to copy, free of charge and under the terms of this License, through a
|
|
||||||
publicly available network server or other readily accessible means,
|
|
||||||
then you must either (1) cause the Corresponding Source to be so
|
|
||||||
available, or (2) arrange to deprive yourself of the benefit of the
|
|
||||||
patent license for this particular work, or (3) arrange, in a manner
|
|
||||||
consistent with the requirements of this License, to extend the patent
|
|
||||||
license to downstream recipients. "Knowingly relying" means you have
|
|
||||||
actual knowledge that, but for the patent license, your conveying the
|
|
||||||
covered work in a country, or your recipient's use of the covered work
|
|
||||||
in a country, would infringe one or more identifiable patents in that
|
|
||||||
country that you have reason to believe are valid.
|
|
||||||
|
|
||||||
If, pursuant to or in connection with a single transaction or
|
|
||||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
|
||||||
covered work, and grant a patent license to some of the parties
|
|
||||||
receiving the covered work authorizing them to use, propagate, modify
|
|
||||||
or convey a specific copy of the covered work, then the patent license
|
|
||||||
you grant is automatically extended to all recipients of the covered
|
|
||||||
work and works based on it.
|
|
||||||
|
|
||||||
A patent license is "discriminatory" if it does not include within
|
|
||||||
the scope of its coverage, prohibits the exercise of, or is
|
|
||||||
conditioned on the non-exercise of one or more of the rights that are
|
|
||||||
specifically granted under this License. You may not convey a covered
|
|
||||||
work if you are a party to an arrangement with a third party that is
|
|
||||||
in the business of distributing software, under which you make payment
|
|
||||||
to the third party based on the extent of your activity of conveying
|
|
||||||
the work, and under which the third party grants, to any of the
|
|
||||||
parties who would receive the covered work from you, a discriminatory
|
|
||||||
patent license (a) in connection with copies of the covered work
|
|
||||||
conveyed by you (or copies made from those copies), or (b) primarily
|
|
||||||
for and in connection with specific products or compilations that
|
|
||||||
contain the covered work, unless you entered into that arrangement,
|
|
||||||
or that patent license was granted, prior to 28 March 2007.
|
|
||||||
|
|
||||||
Nothing in this License shall be construed as excluding or limiting
|
|
||||||
any implied license or other defenses to infringement that may
|
|
||||||
otherwise be available to you under applicable patent law.
|
|
||||||
|
|
||||||
12. No Surrender of Others' Freedom.
|
|
||||||
|
|
||||||
If conditions are imposed on you (whether by court order, agreement or
|
|
||||||
otherwise) that contradict the conditions of this License, they do not
|
|
||||||
excuse you from the conditions of this License. If you cannot convey a
|
|
||||||
covered work so as to satisfy simultaneously your obligations under this
|
|
||||||
License and any other pertinent obligations, then as a consequence you may
|
|
||||||
not convey it at all. For example, if you agree to terms that obligate you
|
|
||||||
to collect a royalty for further conveying from those to whom you convey
|
|
||||||
the Program, the only way you could satisfy both those terms and this
|
|
||||||
License would be to refrain entirely from conveying the Program.
|
|
||||||
|
|
||||||
13. Use with the GNU Affero General Public License.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, you have
|
|
||||||
permission to link or combine any covered work with a work licensed
|
|
||||||
under version 3 of the GNU Affero General Public License into a single
|
|
||||||
combined work, and to convey the resulting work. The terms of this
|
|
||||||
License will continue to apply to the part which is the covered work,
|
|
||||||
but the special requirements of the GNU Affero General Public License,
|
|
||||||
section 13, concerning interaction through a network will apply to the
|
|
||||||
combination as such.
|
|
||||||
|
|
||||||
14. Revised Versions of this License.
|
|
||||||
|
|
||||||
The Free Software Foundation may publish revised and/or new versions of
|
|
||||||
the GNU General Public License from time to time. Such new versions will
|
|
||||||
be similar in spirit to the present version, but may differ in detail to
|
|
||||||
address new problems or concerns.
|
|
||||||
|
|
||||||
Each version is given a distinguishing version number. If the
|
|
||||||
Program specifies that a certain numbered version of the GNU General
|
|
||||||
Public License "or any later version" applies to it, you have the
|
|
||||||
option of following the terms and conditions either of that numbered
|
|
||||||
version or of any later version published by the Free Software
|
|
||||||
Foundation. If the Program does not specify a version number of the
|
|
||||||
GNU General Public License, you may choose any version ever published
|
|
||||||
by the Free Software Foundation.
|
|
||||||
|
|
||||||
If the Program specifies that a proxy can decide which future
|
|
||||||
versions of the GNU General Public License can be used, that proxy's
|
|
||||||
public statement of acceptance of a version permanently authorizes you
|
|
||||||
to choose that version for the Program.
|
|
||||||
|
|
||||||
Later license versions may give you additional or different
|
|
||||||
permissions. However, no additional obligations are imposed on any
|
|
||||||
author or copyright holder as a result of your choosing to follow a
|
|
||||||
later version.
|
|
||||||
|
|
||||||
15. Disclaimer of Warranty.
|
|
||||||
|
|
||||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
|
||||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
|
||||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
|
||||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
||||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
|
||||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
|
||||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
|
||||||
|
|
||||||
16. Limitation of Liability.
|
|
||||||
|
|
||||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
|
||||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
|
||||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
|
||||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
|
||||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
|
||||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
|
||||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
|
||||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
|
||||||
SUCH DAMAGES.
|
|
||||||
|
|
||||||
17. Interpretation of Sections 15 and 16.
|
|
||||||
|
|
||||||
If the disclaimer of warranty and limitation of liability provided
|
|
||||||
above cannot be given local legal effect according to their terms,
|
|
||||||
reviewing courts shall apply local law that most closely approximates
|
|
||||||
an absolute waiver of all civil liability in connection with the
|
|
||||||
Program, unless a warranty or assumption of liability accompanies a
|
|
||||||
copy of the Program in return for a fee.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
How to Apply These Terms to Your New Programs
|
|
||||||
|
|
||||||
If you develop a new program, and you want it to be of the greatest
|
|
||||||
possible use to the public, the best way to achieve this is to make it
|
|
||||||
free software which everyone can redistribute and change under these terms.
|
|
||||||
|
|
||||||
To do so, attach the following notices to the program. It is safest
|
|
||||||
to attach them to the start of each source file to most effectively
|
|
||||||
state the exclusion of warranty; and each file should have at least
|
|
||||||
the "copyright" line and a pointer to where the full notice is found.
|
|
||||||
|
|
||||||
<one line to give the program's name and a brief idea of what it does.>
|
|
||||||
Copyright (C) <year> <name of author>
|
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU General Public License as published by
|
|
||||||
the Free Software Foundation, either version 3 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
Also add information on how to contact you by electronic and paper mail.
|
|
||||||
|
|
||||||
If the program does terminal interaction, make it output a short
|
|
||||||
notice like this when it starts in an interactive mode:
|
|
||||||
|
|
||||||
<program> Copyright (C) <year> <name of author>
|
|
||||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
|
||||||
This is free software, and you are welcome to redistribute it
|
|
||||||
under certain conditions; type `show c' for details.
|
|
||||||
|
|
||||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
|
||||||
parts of the General Public License. Of course, your program's commands
|
|
||||||
might be different; for a GUI interface, you would use an "about box".
|
|
||||||
|
|
||||||
You should also get your employer (if you work as a programmer) or school,
|
|
||||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
|
||||||
For more information on this, and how to apply and follow the GNU GPL, see
|
|
||||||
<http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
The GNU General Public License does not permit incorporating your program
|
|
||||||
into proprietary programs. If your program is a subroutine library, you
|
|
||||||
may consider it more useful to permit linking proprietary applications with
|
|
||||||
the library. If this is what you want to do, use the GNU Lesser General
|
|
||||||
Public License instead of this License. But first, please read
|
|
||||||
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
|
||||||
|
|||||||
396
README.md
396
README.md
@@ -1,23 +1,108 @@
|
|||||||
# PgCat
|
## PgCat: Nextgen PostgreSQL Pooler
|
||||||
|
|
||||||
[](https://circleci.com/gh/levkk/pgcat/tree/main)
|
[](https://circleci.com/gh/postgresml/pgcat/tree/main)
|
||||||
|
<a href="https://discord.gg/DmyJP3qJ7U" target="_blank">
|
||||||
|
<img src="https://img.shields.io/discord/1013868243036930099" alt="Join our Discord!" />
|
||||||
|
</a>
|
||||||
|
|
||||||

|
PostgreSQL pooler and proxy (like PgBouncer) with support for sharding, load balancing, failover and mirroring.
|
||||||
|
|
||||||
Meow. PgBouncer rewritten in Rust, with sharding, load balancing and failover support.
|
## Features
|
||||||
|
|
||||||
**Alpha**: don't use in production just yet.
|
| **Feature** | **Status** | **Comments** |
|
||||||
|
|-------------|------------|--------------|
|
||||||
|
| Transaction pooling | **Stable** | Identical to PgBouncer with notable improvements for handling bad clients and abandoned transactions. |
|
||||||
|
| Session pooling | **Stable** | Identical to PgBouncer. |
|
||||||
|
| Multi-threaded runtime | **Stable** | Using Tokio asynchronous runtime, the pooler takes advantage of multicore machines. |
|
||||||
|
| Load balancing of read queries | **Stable** | Queries are automatically load balanced between replicas and the primary. |
|
||||||
|
| Failover | **Stable** | Queries are automatically rerouted around broken replicas, validated by regular health checks. |
|
||||||
|
| Admin database statistics | **Stable** | Pooler statistics and administration via the `pgbouncer` and `pgcat` databases. |
|
||||||
|
| Prometheus statistics | **Stable** | Statistics are reported via a HTTP endpoint for Prometheus. |
|
||||||
|
| SSL/TLS | **Stable** | Clients can connect to the pooler using TLS. Pooler can connect to Postgres servers using TLS. |
|
||||||
|
| Client/Server authentication | **Stable** | Clients can connect using MD5 authentication, supported by `libpq` and all Postgres client drivers. PgCat can connect to Postgres using MD5 and SCRAM-SHA-256. |
|
||||||
|
| Live configuration reloading | **Stable** | Identical to PgBouncer; all settings can be reloaded dynamically (except `host` and `port`). |
|
||||||
|
| Auth passthrough | **Stable** | MD5 password authentication can be configured to use an `auth_query` so no cleartext passwords are needed in the config file.|
|
||||||
|
| Sharding using extended SQL syntax | **Experimental** | Clients can dynamically configure the pooler to route queries to specific shards. |
|
||||||
|
| Sharding using comments parsing/Regex | **Experimental** | Clients can include shard information (sharding key, shard ID) in the query comments. |
|
||||||
|
| Automatic sharding | **Experimental** | PgCat can parse queries, detect sharding keys automatically, and route queries to the correct shard. |
|
||||||
|
| Mirroring | **Experimental** | Mirror queries between multiple databases in order to test servers with realistic production traffic. |
|
||||||
|
|
||||||
## Local development
|
|
||||||
|
## Status
|
||||||
|
|
||||||
|
PgCat is stable and used in production to serve hundreds of thousands of queries per second.
|
||||||
|
|
||||||
|
<table>
|
||||||
|
<tr>
|
||||||
|
<td>
|
||||||
|
<a href="https://tech.instacart.com/adopting-pgcat-a-nextgen-postgres-proxy-3cf284e68c2f">
|
||||||
|
<img src="./images/instacart.webp" height="70" width="auto">
|
||||||
|
</a>
|
||||||
|
</td>
|
||||||
|
<td>
|
||||||
|
<a href="https://postgresml.org/blog/scaling-postgresml-to-1-million-requests-per-second">
|
||||||
|
<img src="./images/postgresml.webp" height="70" width="auto">
|
||||||
|
</a>
|
||||||
|
</td>
|
||||||
|
<td>
|
||||||
|
<a href="https://onesignal.com">
|
||||||
|
<img src="./images/one_signal.webp" height="70" width="auto">
|
||||||
|
</a>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td>
|
||||||
|
<a href="https://tech.instacart.com/adopting-pgcat-a-nextgen-postgres-proxy-3cf284e68c2f">
|
||||||
|
Instacart
|
||||||
|
</a>
|
||||||
|
</td>
|
||||||
|
<td>
|
||||||
|
<a href="https://postgresml.org/blog/scaling-postgresml-to-1-million-requests-per-second">
|
||||||
|
PostgresML
|
||||||
|
</a>
|
||||||
|
</td>
|
||||||
|
<td>
|
||||||
|
OneSignal
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
</table>
|
||||||
|
|
||||||
|
Some features remain experimental and are being actively developed. They are optional and can be enabled through configuration.
|
||||||
|
|
||||||
|
## Deployment
|
||||||
|
|
||||||
|
See `Dockerfile` for example deployment using Docker. The pooler is configured to spawn 4 workers so 4 CPUs are recommended for optimal performance. That setting can be adjusted to spawn as many (or as little) workers as needed.
|
||||||
|
|
||||||
|
A Docker image is available from `docker pull ghcr.io/postgresml/pgcat:latest`. See our [Github packages repository](https://github.com/postgresml/pgcat/pkgs/container/pgcat).
|
||||||
|
|
||||||
|
For quick local example, use the Docker Compose environment provided:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker-compose up
|
||||||
|
|
||||||
|
# In a new terminal:
|
||||||
|
PGPASSWORD=postgres psql -h 127.0.0.1 -p 6432 -U postgres -c 'SELECT 1'
|
||||||
|
```
|
||||||
|
|
||||||
|
### Config
|
||||||
|
|
||||||
|
See **[Configuration](https://github.com/levkk/pgcat/blob/main/CONFIG.md)**.
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
The project is being actively developed and looking for additional contributors and production deployments.
|
||||||
|
|
||||||
|
### Local development
|
||||||
|
|
||||||
1. Install Rust (latest stable will work great).
|
1. Install Rust (latest stable will work great).
|
||||||
2. `cargo run --release` (to get better benchmarks).
|
2. `cargo build --release` (to get better benchmarks).
|
||||||
3. Change the config in `pgcat.toml` to fit your setup (optional given next step).
|
3. Change the config in `pgcat.toml` to fit your setup (optional given next step).
|
||||||
4. Install Postgres and run `psql -f tests/sharding/query_routing_setup.sql`
|
4. Install Postgres and run `psql -f tests/sharding/query_routing_setup.sql` (user/password may be required depending on your setup)
|
||||||
|
5. `RUST_LOG=info cargo run --release` You're ready to go!
|
||||||
|
|
||||||
### Tests
|
### Tests
|
||||||
|
|
||||||
You can just PgBench to test your changes:
|
When making substantial modifications to the protocol implementation, make sure to test them with pgbench:
|
||||||
|
|
||||||
```
|
```
|
||||||
pgbench -i -h 127.0.0.1 -p 6432 && \
|
pgbench -i -h 127.0.0.1 -p 6432 && \
|
||||||
@@ -27,182 +112,181 @@ pgbench -t 1000 -p 6432 -h 127.0.0.1 --protocol extended
|
|||||||
|
|
||||||
See [sharding README](./tests/sharding/README.md) for sharding logic testing.
|
See [sharding README](./tests/sharding/README.md) for sharding logic testing.
|
||||||
|
|
||||||
## Features
|
Additionally, all features are tested with Ruby, Python, and Rust unit and integration tests.
|
||||||
|
|
||||||
1. Session mode.
|
Run `cargo test` to run Rust unit tests.
|
||||||
2. Transaction mode.
|
|
||||||
3. `COPY` protocol support.
|
Run the following commands to run Ruby and Python integration tests:
|
||||||
4. Query cancellation.
|
|
||||||
5. Round-robin load balancing of replicas.
|
```
|
||||||
6. Banlist & failover
|
cd tests/docker/
|
||||||
7. Sharding!
|
docker compose up --exit-code-from main # This will also produce coverage report under ./cov/
|
||||||
|
```
|
||||||
|
|
||||||
|
### Docker-based local development
|
||||||
|
|
||||||
|
You can open a Docker development environment where you can debug tests easier. Run the following command to spin it up:
|
||||||
|
|
||||||
|
```
|
||||||
|
./dev/script/console
|
||||||
|
```
|
||||||
|
|
||||||
|
This will open a terminal in an environment similar to that used in tests. In there, you can compile the pooler, run tests, do some debugging with the test environment, etc. Objects compiled inside the container (and bundled gems) will be placed in `dev/cache` so they don't interfere with what you have on your machine.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
### Session mode
|
### Session mode
|
||||||
Each client owns its own server for the duration of the session. Commands like `SET` are allowed.
|
In session mode, a client talks to one server for the duration of the connection. Prepared statements, `SET`, and advisory locks are supported. In terms of supported features, there is very little if any difference between session mode and talking directly to the server.
|
||||||
This is identical to PgBouncer session mode.
|
|
||||||
|
To use session mode, change `pool_mode = "session"`.
|
||||||
|
|
||||||
### Transaction mode
|
### Transaction mode
|
||||||
The connection is attached to the server for the duration of the transaction. `SET` will pollute the connection,
|
In transaction mode, a client talks to one server for the duration of a single transaction; once it's over, the server is returned to the pool. Prepared statements, `SET`, and advisory locks are not supported; alternatives are to use `SET LOCAL` and `pg_advisory_xact_lock` which are scoped to the transaction.
|
||||||
but `SET LOCAL` works great. Identical to PgBouncer transaction mode.
|
|
||||||
|
|
||||||
### COPY protocol
|
This mode is enabled by default.
|
||||||
That one isn't particularly special, but good to mention that you can `COPY` data in and from the server
|
|
||||||
using this pooler.
|
|
||||||
|
|
||||||
### Query cancellation
|
### Load balancing of read queries
|
||||||
Okay, this is just basic stuff, but we support cancelling queries. If you know the Postgres protocol,
|
All queries are load balanced against the configured servers using either the random or least open connections algorithms. The most straightforward configuration example would be to put this pooler in front of several replicas and let it load balance all queries.
|
||||||
this might be relevant given than this is a transactional pooler but if you're new to Pg, don't worry about it, it works.
|
|
||||||
|
|
||||||
### Round-robin load balancing
|
If the configuration includes a primary and replicas, the queries can be separated with the built-in query parser. The query parser, implemented with the `sqlparser` crate, will interpret the query and route all `SELECT` queries to a replica, while all other queries including explicit transactions will be routed to the primary.
|
||||||
This is the novel part. PgBouncer doesn't support it and suggests we use DNS or a TCP proxy instead.
|
|
||||||
We prefer to have everything as part of one package; arguably, it's easier to understand and optimize.
|
|
||||||
This pooler will round-robin between multiple replicas keeping load reasonably even.
|
|
||||||
|
|
||||||
### Banlist & failover
|
#### Query parser
|
||||||
This is where it gets even more interesting. If we fail to connect to one of the replicas or it fails a health check,
|
The query parser will do its best to determine where the query should go, but sometimes that's not possible. In that case, the client can select which server it wants using this custom SQL syntax:
|
||||||
we add it to a ban list. No more new transactions will be served by that replica for, in our case, 60 seconds. This
|
|
||||||
gives it the opportunity to recover while clients are happily served by the remaining replicas.
|
|
||||||
|
|
||||||
This decreases error rates substantially! Worth noting here that on busy systems, if the replicas are running too hot,
|
|
||||||
failing over could bring even more load and tip over the remaining healthy-ish replicas. In this case, a decision should be made:
|
|
||||||
either lose 1/x of your traffic or risk losing it all eventually. Ideally you overprovision your system, so you don't necessarily need
|
|
||||||
to make this choice :-).
|
|
||||||
|
|
||||||
### Sharding
|
|
||||||
We're implemeting Postgres' `PARTITION BY HASH` sharding function for `BIGINT` fields. This works well for tables that use `BIGSERIAL` primary key which I think is common enough these days. We can also add many more functions here, but this is a good start. See `src/sharding.rs` and `tests/sharding/partition_hash_test_setup.sql` for more details on the implementation.
|
|
||||||
|
|
||||||
The biggest advantage of using this sharding function is that anyone can shard the dataset using Postgres partitions
|
|
||||||
while also access it for both reads and writes using this pooler. No custom obscure sharding function is needed and database sharding can be done entirely in Postgres.
|
|
||||||
|
|
||||||
To select the shard we want to talk to, we introduced special syntax:
|
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
-- To talk to the primary for the duration of the next transaction:
|
||||||
|
SET SERVER ROLE TO 'primary';
|
||||||
|
|
||||||
|
-- To talk to the replica for the duration of the next transaction:
|
||||||
|
SET SERVER ROLE TO 'replica';
|
||||||
|
|
||||||
|
-- Let the query parser decide
|
||||||
|
SET SERVER ROLE TO 'auto';
|
||||||
|
|
||||||
|
-- Pick any server at random
|
||||||
|
SET SERVER ROLE TO 'any';
|
||||||
|
|
||||||
|
-- Reset to default configured settings
|
||||||
|
SET SERVER ROLE TO 'default';
|
||||||
|
```
|
||||||
|
|
||||||
|
The setting will persist until it's changed again or the client disconnects.
|
||||||
|
|
||||||
|
By default, all queries are routed to the first available server; `default_role` setting controls this behavior.
|
||||||
|
|
||||||
|
### Failover
|
||||||
|
All servers are checked with a `;` (very fast) query before being given to a client. Additionally, the server health is monitored with every client query that it processes. If the server is not reachable, it will be banned and cannot serve any more transactions for the duration of the ban. The queries are routed to the remaining servers. If all servers become banned, the ban list is cleared: this is a safety precaution against false positives. The primary can never be banned.
|
||||||
|
|
||||||
|
The ban time can be changed with `ban_time`. The default is 60 seconds.
|
||||||
|
|
||||||
|
### Sharding
|
||||||
|
We use the `PARTITION BY HASH` hashing function, the same as used by Postgres for declarative partitioning. This allows to shard the database using Postgres partitions and place the partitions on different servers (shards). Both read and write queries can be routed to the shards using this pooler.
|
||||||
|
|
||||||
|
#### Extended syntax
|
||||||
|
To route queries to a particular shard, we use this custom SQL syntax:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
-- To talk to a shard explicitly
|
||||||
|
SET SHARD TO '1';
|
||||||
|
|
||||||
|
-- To let the pooler choose based on a value
|
||||||
SET SHARDING KEY TO '1234';
|
SET SHARDING KEY TO '1234';
|
||||||
```
|
```
|
||||||
|
|
||||||
This sharding key will be hashed and the pooler will select a shard to use for the next transaction. If the pooler is in session mode, this sharding key will be used until it's set again or the client disconnects.
|
The active shard will last until it's changed again or the client disconnects. By default, the queries are routed to shard 0.
|
||||||
|
|
||||||
|
For hash function implementation, see `src/sharding.rs` and `tests/sharding/partition_hash_test_setup.sql`.
|
||||||
|
|
||||||
|
|
||||||
## Missing
|
##### ActiveRecord/Rails
|
||||||
|
|
||||||
1. Authentication, ehem, this proxy is letting anyone in at the moment.
|
```ruby
|
||||||
|
class User < ActiveRecord::Base
|
||||||
|
end
|
||||||
|
|
||||||
## Benchmarks
|
# Metadata will be fetched from shard 0
|
||||||
|
ActiveRecord::Base.establish_connection
|
||||||
|
|
||||||
You can setup PgBench locally through PgCat:
|
# Grab a bunch of users from shard 1
|
||||||
|
User.connection.execute "SET SHARD TO '1'"
|
||||||
|
User.take(10)
|
||||||
|
|
||||||
```
|
# Using id as the sharding key
|
||||||
pgbench -h 127.0.0.1 -p 6432 -i
|
User.connection.execute "SET SHARDING KEY TO '1234'"
|
||||||
|
User.find_by_id(1234)
|
||||||
|
|
||||||
|
# Using geographical sharding
|
||||||
|
User.connection.execute "SET SERVER ROLE TO 'primary'"
|
||||||
|
User.connection.execute "SET SHARDING KEY TO '85'"
|
||||||
|
User.create(name: "test user", email: "test@example.com", zone_id: 85)
|
||||||
|
|
||||||
|
# Let the query parser figure out where the query should go.
|
||||||
|
# We are still on shard = hash(85) % shards.
|
||||||
|
User.connection.execute "SET SERVER ROLE TO 'auto'"
|
||||||
|
User.find_by_email("test@example.com")
|
||||||
```
|
```
|
||||||
|
|
||||||
Coincidenly, this uses `COPY` so you can test if that works.
|
##### Raw SQL
|
||||||
|
|
||||||
### PgBouncer
|
```sql
|
||||||
|
-- Grab a bunch of users from shard 1
|
||||||
|
SET SHARD TO '1';
|
||||||
|
SELECT * FROM users LIMT 10;
|
||||||
|
|
||||||
```
|
-- Find by id
|
||||||
$ pgbench -i -h 127.0.0.1 -p 6432 && pgbench -t 1000 -p 6432 -h 127.0.0.1 --protocol simple && pgbench -t 1000 -p 6432 -h 127.0.0.1 --protocol extended
|
SET SHARDING KEY TO '1234';
|
||||||
dropping old tables...
|
SELECT * FROM USERS WHERE id = 1234;
|
||||||
creating tables...
|
|
||||||
generating data...
|
-- Writing in a primary/replicas configuration.
|
||||||
100000 of 100000 tuples (100%) done (elapsed 0.01 s, remaining 0.00 s)
|
SET SHARDING ROLE TO 'primary';
|
||||||
vacuuming...
|
SET SHARDING KEY TO '85';
|
||||||
creating primary keys...
|
INSERT INTO users (name, email, zome_id) VALUES ('test user', 'test@example.com', 85);
|
||||||
done.
|
|
||||||
starting vacuum...end.
|
SET SERVER ROLE TO 'auto'; -- let the query router figure out where the query should go
|
||||||
transaction type: <builtin: TPC-B (sort of)>
|
SELECT * FROM users WHERE email = 'test@example.com'; -- shard setting lasts until set again; we are reading from the primary
|
||||||
scaling factor: 1
|
|
||||||
query mode: simple
|
|
||||||
number of clients: 1
|
|
||||||
number of threads: 1
|
|
||||||
number of transactions per client: 1000
|
|
||||||
number of transactions actually processed: 1000/1000
|
|
||||||
latency average = 1.089 ms
|
|
||||||
tps = 918.687098 (including connections establishing)
|
|
||||||
tps = 918.847790 (excluding connections establishing)
|
|
||||||
starting vacuum...end.
|
|
||||||
transaction type: <builtin: TPC-B (sort of)>
|
|
||||||
scaling factor: 1
|
|
||||||
query mode: extended
|
|
||||||
number of clients: 1
|
|
||||||
number of threads: 1
|
|
||||||
number of transactions per client: 1000
|
|
||||||
number of transactions actually processed: 1000/1000
|
|
||||||
latency average = 1.136 ms
|
|
||||||
tps = 880.622009 (including connections establishing)
|
|
||||||
tps = 880.769550 (excluding connections establishing)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### PgCat
|
#### With comments
|
||||||
|
Issuing queries to the pooler can cause additional latency. To reduce its impact, it's possible to include sharding information inside SQL comments sent via the query. This is reasonably easy to implement with ORMs like [ActiveRecord](https://api.rubyonrails.org/classes/ActiveRecord/QueryMethods.html#method-i-annotate) and [SQLAlchemy](https://docs.sqlalchemy.org/en/20/core/events.html#sql-execution-and-connection-events).
|
||||||
|
|
||||||
```
|
```
|
||||||
$ pgbench -i -h 127.0.0.1 -p 6432 && pgbench -t 1000 -p 6432 -h 127.0.0.1 --protocol simple && pgbench -t 1000 -p 6432 -h 127.0.0.1 --protocol extended
|
/* shard_id: 5 */ SELECT * FROM foo WHERE id = 1234;
|
||||||
dropping old tables...
|
|
||||||
creating tables...
|
/* sharding_key: 1234 */ SELECT * FROM foo WHERE id = 1234;
|
||||||
generating data...
|
|
||||||
100000 of 100000 tuples (100%) done (elapsed 0.01 s, remaining 0.00 s)
|
|
||||||
vacuuming...
|
|
||||||
creating primary keys...
|
|
||||||
done.
|
|
||||||
starting vacuum...end.
|
|
||||||
transaction type: <builtin: TPC-B (sort of)>
|
|
||||||
scaling factor: 1
|
|
||||||
query mode: simple
|
|
||||||
number of clients: 1
|
|
||||||
number of threads: 1
|
|
||||||
number of transactions per client: 1000
|
|
||||||
number of transactions actually processed: 1000/1000
|
|
||||||
latency average = 1.142 ms
|
|
||||||
tps = 875.645437 (including connections establishing)
|
|
||||||
tps = 875.799995 (excluding connections establishing)
|
|
||||||
starting vacuum...end.
|
|
||||||
transaction type: <builtin: TPC-B (sort of)>
|
|
||||||
scaling factor: 1
|
|
||||||
query mode: extended
|
|
||||||
number of clients: 1
|
|
||||||
number of threads: 1
|
|
||||||
number of transactions per client: 1000
|
|
||||||
number of transactions actually processed: 1000/1000
|
|
||||||
latency average = 1.181 ms
|
|
||||||
tps = 846.539176 (including connections establishing)
|
|
||||||
tps = 846.713636 (excluding connections establishing)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Direct Postgres
|
#### Automatic query parsing
|
||||||
|
PgCat can use the `sqlparser` crate to parse SQL queries and extract the sharding key. This is configurable with the `automatic_sharding_key` setting. This feature is still experimental, but it's the ideal implementation for sharding, requiring no client modifications.
|
||||||
|
|
||||||
|
### Statistics reporting
|
||||||
|
|
||||||
|
The stats are very similar to what PgBouncer reports and the names are kept to be comparable. They are accessible by querying the admin database `pgcat`, and `pgbouncer` for compatibility.
|
||||||
|
|
||||||
```
|
```
|
||||||
$ pgbench -i -h 127.0.0.1 -p 5432 && pgbench -t 1000 -p 5432 -h 127.0.0.1 --protocol simple && pgbench -t 1000 -p
|
psql -h 127.0.0.1 -p 6432 -d pgbouncer -c 'SHOW DATABASES'
|
||||||
5432 -h 127.0.0.1 --protocol extended
|
```
|
||||||
Password:
|
|
||||||
dropping old tables...
|
Additionally, Prometheus statistics are available at `/metrics` via HTTP.
|
||||||
creating tables...
|
|
||||||
generating data...
|
We also have a [basic Grafana dashboard](https://github.com/postgresml/pgcat/blob/main/grafana_dashboard.json) based on Prometheus metrics that you can import into Grafana and build on it or use it for monitoring.
|
||||||
100000 of 100000 tuples (100%) done (elapsed 0.01 s, remaining 0.00 s)
|
|
||||||
vacuuming...
|
### Live configuration reloading
|
||||||
creating primary keys...
|
|
||||||
done.
|
The config can be reloaded by sending a `kill -s SIGHUP` to the process or by querying `RELOAD` to the admin database. All settings except the `host` and `port` can be reloaded without restarting the pooler, including sharding and replicas configurations.
|
||||||
Password:
|
|
||||||
starting vacuum...end.
|
### Mirroring
|
||||||
transaction type: <builtin: TPC-B (sort of)>
|
|
||||||
scaling factor: 1
|
Mirroring allows to route queries to multiple databases at the same time. This is useful for prewarning replicas before placing them into the active configuration, or for testing different versions of Postgres with live traffic.
|
||||||
query mode: simple
|
|
||||||
number of clients: 1
|
## License
|
||||||
number of threads: 1
|
|
||||||
number of transactions per client: 1000
|
PgCat is free and open source, released under the MIT license.
|
||||||
number of transactions actually processed: 1000/1000
|
|
||||||
latency average = 0.902 ms
|
## Contributors
|
||||||
tps = 1109.014867 (including connections establishing)
|
|
||||||
tps = 1112.318595 (excluding connections establishing)
|
Many thanks to our amazing contributors!
|
||||||
Password:
|
|
||||||
starting vacuum...end.
|
<a href = "https://github.com/postgresml/pgcat/graphs/contributors">
|
||||||
transaction type: <builtin: TPC-B (sort of)>
|
<img src = "https://contrib.rocks/image?repo=postgresml/pgcat"/>
|
||||||
scaling factor: 1
|
</a>
|
||||||
query mode: extended
|
|
||||||
number of clients: 1
|
|
||||||
number of threads: 1
|
|
||||||
number of transactions per client: 1000
|
|
||||||
number of transactions actually processed: 1000/1000
|
|
||||||
latency average = 0.931 ms
|
|
||||||
tps = 1074.017747 (including connections establishing)
|
|
||||||
tps = 1077.121752 (excluding connections establishing)
|
|
||||||
```
|
|
||||||
|
|||||||
23
charts/pgcat/.helmignore
Normal file
23
charts/pgcat/.helmignore
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
# Patterns to ignore when building packages.
|
||||||
|
# This supports shell glob matching, relative path matching, and
|
||||||
|
# negation (prefixed with !). Only one pattern per line.
|
||||||
|
.DS_Store
|
||||||
|
# Common VCS dirs
|
||||||
|
.git/
|
||||||
|
.gitignore
|
||||||
|
.bzr/
|
||||||
|
.bzrignore
|
||||||
|
.hg/
|
||||||
|
.hgignore
|
||||||
|
.svn/
|
||||||
|
# Common backup files
|
||||||
|
*.swp
|
||||||
|
*.bak
|
||||||
|
*.tmp
|
||||||
|
*.orig
|
||||||
|
*~
|
||||||
|
# Various IDEs
|
||||||
|
.project
|
||||||
|
.idea/
|
||||||
|
*.tmproj
|
||||||
|
.vscode/
|
||||||
8
charts/pgcat/Chart.yaml
Normal file
8
charts/pgcat/Chart.yaml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
apiVersion: v2
|
||||||
|
name: pgcat
|
||||||
|
description: A Helm chart for PgCat a PostgreSQL pooler and proxy (like PgBouncer) with support for sharding, load balancing, failover and mirroring.
|
||||||
|
maintainers:
|
||||||
|
- name: Wildcard
|
||||||
|
email: support@w6d.io
|
||||||
|
appVersion: "1.2.0"
|
||||||
|
version: 0.2.3
|
||||||
22
charts/pgcat/templates/NOTES.txt
Normal file
22
charts/pgcat/templates/NOTES.txt
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
1. Get the application URL by running these commands:
|
||||||
|
{{- if .Values.ingress.enabled }}
|
||||||
|
{{- range $host := .Values.ingress.hosts }}
|
||||||
|
{{- range .paths }}
|
||||||
|
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- else if contains "NodePort" .Values.service.type }}
|
||||||
|
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "pgcat.fullname" . }})
|
||||||
|
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||||
|
echo http://$NODE_IP:$NODE_PORT
|
||||||
|
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||||
|
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||||
|
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "pgcat.fullname" . }}'
|
||||||
|
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "pgcat.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||||
|
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||||
|
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||||
|
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "pgcat.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||||
|
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||||
|
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||||
|
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
|
||||||
|
{{- end }}
|
||||||
3
charts/pgcat/templates/_config.tpl
Normal file
3
charts/pgcat/templates/_config.tpl
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
{{/*
|
||||||
|
Configuration template definition
|
||||||
|
*/}}
|
||||||
62
charts/pgcat/templates/_helpers.tpl
Normal file
62
charts/pgcat/templates/_helpers.tpl
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
{{/*
|
||||||
|
Expand the name of the chart.
|
||||||
|
*/}}
|
||||||
|
{{- define "pgcat.name" -}}
|
||||||
|
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create a default fully qualified app name.
|
||||||
|
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||||
|
If release name contains chart name it will be used as a full name.
|
||||||
|
*/}}
|
||||||
|
{{- define "pgcat.fullname" -}}
|
||||||
|
{{- if .Values.fullnameOverride }}
|
||||||
|
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||||
|
{{- if contains $name .Release.Name }}
|
||||||
|
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- else }}
|
||||||
|
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create chart name and version as used by the chart label.
|
||||||
|
*/}}
|
||||||
|
{{- define "pgcat.chart" -}}
|
||||||
|
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Common labels
|
||||||
|
*/}}
|
||||||
|
{{- define "pgcat.labels" -}}
|
||||||
|
helm.sh/chart: {{ include "pgcat.chart" . }}
|
||||||
|
{{ include "pgcat.selectorLabels" . }}
|
||||||
|
{{- if .Chart.AppVersion }}
|
||||||
|
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||||
|
{{- end }}
|
||||||
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Selector labels
|
||||||
|
*/}}
|
||||||
|
{{- define "pgcat.selectorLabels" -}}
|
||||||
|
app.kubernetes.io/name: {{ include "pgcat.name" . }}
|
||||||
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{/*
|
||||||
|
Create the name of the service account to use
|
||||||
|
*/}}
|
||||||
|
{{- define "pgcat.serviceAccountName" -}}
|
||||||
|
{{- if .Values.serviceAccount.create }}
|
||||||
|
{{- default (include "pgcat.fullname" .) .Values.serviceAccount.name }}
|
||||||
|
{{- else }}
|
||||||
|
{{- default "default" .Values.serviceAccount.name }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
66
charts/pgcat/templates/deployment.yaml
Normal file
66
charts/pgcat/templates/deployment.yaml
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: {{ include "pgcat.fullname" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "pgcat.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
replicas: {{ .Values.replicaCount }}
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
{{- include "pgcat.selectorLabels" . | nindent 6 }}
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
annotations:
|
||||||
|
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
|
||||||
|
{{- with .Values.podAnnotations }}
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
labels:
|
||||||
|
{{- include "pgcat.selectorLabels" . | nindent 8 }}
|
||||||
|
spec:
|
||||||
|
{{- with .Values.image.pullSecrets }}
|
||||||
|
imagePullSecrets:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
serviceAccountName: {{ include "pgcat.serviceAccountName" . }}
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||||
|
containers:
|
||||||
|
- name: {{ .Chart.Name }}
|
||||||
|
securityContext:
|
||||||
|
{{- toYaml .Values.containerSecurityContext | nindent 12 }}
|
||||||
|
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||||
|
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||||
|
ports:
|
||||||
|
- name: pgcat
|
||||||
|
containerPort: {{ .Values.configuration.general.port }}
|
||||||
|
protocol: TCP
|
||||||
|
livenessProbe:
|
||||||
|
tcpSocket:
|
||||||
|
port: pgcat
|
||||||
|
readinessProbe:
|
||||||
|
tcpSocket:
|
||||||
|
port: pgcat
|
||||||
|
resources:
|
||||||
|
{{- toYaml .Values.resources | nindent 12 }}
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /etc/pgcat
|
||||||
|
name: config
|
||||||
|
{{- with .Values.nodeSelector }}
|
||||||
|
nodeSelector:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.affinity }}
|
||||||
|
affinity:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- with .Values.tolerations }}
|
||||||
|
tolerations:
|
||||||
|
{{- toYaml . | nindent 8 }}
|
||||||
|
{{- end }}
|
||||||
|
volumes:
|
||||||
|
- secret:
|
||||||
|
defaultMode: 420
|
||||||
|
secretName: {{ include "pgcat.fullname" . }}
|
||||||
|
name: config
|
||||||
61
charts/pgcat/templates/ingress.yaml
Normal file
61
charts/pgcat/templates/ingress.yaml
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
{{- if .Values.ingress.enabled -}}
|
||||||
|
{{- $fullName := include "pgcat.fullname" . -}}
|
||||||
|
{{- $svcPort := .Values.service.port -}}
|
||||||
|
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
|
||||||
|
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
|
||||||
|
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||||
|
apiVersion: networking.k8s.io/v1beta1
|
||||||
|
{{- else -}}
|
||||||
|
apiVersion: extensions/v1beta1
|
||||||
|
{{- end }}
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: {{ $fullName }}
|
||||||
|
labels:
|
||||||
|
{{- include "pgcat.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.ingress.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
spec:
|
||||||
|
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
|
||||||
|
ingressClassName: {{ .Values.ingress.className }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.ingress.tls }}
|
||||||
|
tls:
|
||||||
|
{{- range .Values.ingress.tls }}
|
||||||
|
- hosts:
|
||||||
|
{{- range .hosts }}
|
||||||
|
- {{ . | quote }}
|
||||||
|
{{- end }}
|
||||||
|
secretName: {{ .secretName }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
rules:
|
||||||
|
{{- range .Values.ingress.hosts }}
|
||||||
|
- host: {{ .host | quote }}
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
{{- range .paths }}
|
||||||
|
- path: {{ .path }}
|
||||||
|
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
|
||||||
|
pathType: {{ .pathType }}
|
||||||
|
{{- end }}
|
||||||
|
backend:
|
||||||
|
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
|
||||||
|
service:
|
||||||
|
name: {{ $fullName }}
|
||||||
|
port:
|
||||||
|
number: {{ $svcPort }}
|
||||||
|
{{- else }}
|
||||||
|
serviceName: {{ $fullName }}
|
||||||
|
servicePort: {{ $svcPort }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
97
charts/pgcat/templates/secret.yaml
Normal file
97
charts/pgcat/templates/secret.yaml
Normal file
@@ -0,0 +1,97 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: {{ include "pgcat.fullname" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "pgcat.labels" . | nindent 4 }}
|
||||||
|
type: Opaque
|
||||||
|
stringData:
|
||||||
|
pgcat.toml: |
|
||||||
|
[general]
|
||||||
|
host = {{ .Values.configuration.general.host | quote }}
|
||||||
|
port = {{ .Values.configuration.general.port }}
|
||||||
|
enable_prometheus_exporter = {{ .Values.configuration.general.enable_prometheus_exporter }}
|
||||||
|
prometheus_exporter_port = {{ .Values.configuration.general.prometheus_exporter_port }}
|
||||||
|
connect_timeout = {{ .Values.configuration.general.connect_timeout }}
|
||||||
|
idle_timeout = {{ .Values.configuration.general.idle_timeout | int }}
|
||||||
|
server_lifetime = {{ .Values.configuration.general.server_lifetime | int }}
|
||||||
|
server_tls = {{ .Values.configuration.general.server_tls }}
|
||||||
|
idle_client_in_transaction_timeout = {{ .Values.configuration.general.idle_client_in_transaction_timeout | int }}
|
||||||
|
healthcheck_timeout = {{ .Values.configuration.general.healthcheck_timeout }}
|
||||||
|
healthcheck_delay = {{ .Values.configuration.general.healthcheck_delay }}
|
||||||
|
shutdown_timeout = {{ .Values.configuration.general.shutdown_timeout }}
|
||||||
|
ban_time = {{ .Values.configuration.general.ban_time }}
|
||||||
|
log_client_connections = {{ .Values.configuration.general.log_client_connections }}
|
||||||
|
log_client_disconnections = {{ .Values.configuration.general.log_client_disconnections }}
|
||||||
|
tcp_keepalives_idle = {{ .Values.configuration.general.tcp_keepalives_idle }}
|
||||||
|
tcp_keepalives_count = {{ .Values.configuration.general.tcp_keepalives_count }}
|
||||||
|
tcp_keepalives_interval = {{ .Values.configuration.general.tcp_keepalives_interval }}
|
||||||
|
{{- if and (ne .Values.configuration.general.tls_certificate "-") (ne .Values.configuration.general.tls_private_key "-") }}
|
||||||
|
tls_certificate = "{{ .Values.configuration.general.tls_certificate }}"
|
||||||
|
tls_private_key = "{{ .Values.configuration.general.tls_private_key }}"
|
||||||
|
{{- end }}
|
||||||
|
admin_username = {{ .Values.configuration.general.admin_username | quote }}
|
||||||
|
admin_password = {{ .Values.configuration.general.admin_password | quote }}
|
||||||
|
{{- if and .Values.configuration.general.auth_query_user .Values.configuration.general.auth_query_password .Values.configuration.general.auth_query }}
|
||||||
|
auth_query = {{ .Values.configuration.general.auth_query | quote }}
|
||||||
|
auth_query_user = {{ .Values.configuration.general.auth_query_user | quote }}
|
||||||
|
auth_query_password = {{ .Values.configuration.general.auth_query_password | quote }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- range $pool := .Values.configuration.pools }}
|
||||||
|
|
||||||
|
##
|
||||||
|
## pool for {{ $pool.name }}
|
||||||
|
##
|
||||||
|
[pools.{{ $pool.name | quote }}]
|
||||||
|
pool_mode = {{ default "transaction" $pool.pool_mode | quote }}
|
||||||
|
load_balancing_mode = {{ default "random" $pool.load_balancing_mode | quote }}
|
||||||
|
default_role = {{ default "any" $pool.default_role | quote }}
|
||||||
|
prepared_statements_cache_size = {{ default 500 $pool.prepared_statements_cache_size }}
|
||||||
|
query_parser_enabled = {{ default true $pool.query_parser_enabled }}
|
||||||
|
query_parser_read_write_splitting = {{ default true $pool.query_parser_read_write_splitting }}
|
||||||
|
primary_reads_enabled = {{ default true $pool.primary_reads_enabled }}
|
||||||
|
sharding_function = {{ default "pg_bigint_hash" $pool.sharding_function | quote }}
|
||||||
|
|
||||||
|
{{- range $index, $user := $pool.users }}
|
||||||
|
|
||||||
|
## pool {{ $pool.name }} user {{ $user.username | quote }}
|
||||||
|
##
|
||||||
|
[pools.{{ $pool.name | quote }}.users.{{ $index }}]
|
||||||
|
username = {{ $user.username | quote }}
|
||||||
|
{{- if $user.password }}
|
||||||
|
password = {{ $user.password | quote }}
|
||||||
|
{{- else if and $user.passwordSecret.name $user.passwordSecret.key }}
|
||||||
|
{{- $secret := (lookup "v1" "Secret" $.Release.Namespace $user.passwordSecret.name) }}
|
||||||
|
{{- if $secret }}
|
||||||
|
{{- $password := index $secret.data $user.passwordSecret.key | b64dec }}
|
||||||
|
password = {{ $password | quote }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
pool_size = {{ $user.pool_size }}
|
||||||
|
statement_timeout = {{ default 0 $user.statement_timeout }}
|
||||||
|
min_pool_size = {{ default 3 $user.min_pool_size }}
|
||||||
|
{{- if $user.server_lifetime }}
|
||||||
|
server_lifetime = {{ $user.server_lifetime }}
|
||||||
|
{{- end }}
|
||||||
|
{{- if and $user.server_username $user.server_password }}
|
||||||
|
server_username = {{ $user.server_username | quote }}
|
||||||
|
server_password = {{ $user.server_password | quote }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
|
|
||||||
|
{{- range $index, $shard := $pool.shards }}
|
||||||
|
|
||||||
|
## pool {{ $pool.name }} database {{ $shard.database }}
|
||||||
|
##
|
||||||
|
[pools.{{ $pool.name | quote }}.shards.{{ $index }}]
|
||||||
|
{{- if gt (len $shard.servers) 0}}
|
||||||
|
servers = [
|
||||||
|
{{- range $server := $shard.servers }}
|
||||||
|
[ {{ $server.host | quote }}, {{ $server.port }}, {{ $server.role | quote }} ],
|
||||||
|
{{- end }}
|
||||||
|
]
|
||||||
|
{{- end }}
|
||||||
|
database = {{ $shard.database | quote }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
15
charts/pgcat/templates/service.yaml
Normal file
15
charts/pgcat/templates/service.yaml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: {{ include "pgcat.fullname" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "pgcat.labels" . | nindent 4 }}
|
||||||
|
spec:
|
||||||
|
type: {{ .Values.service.type }}
|
||||||
|
ports:
|
||||||
|
- port: {{ .Values.service.port }}
|
||||||
|
targetPort: pgcat
|
||||||
|
protocol: TCP
|
||||||
|
name: pgcat
|
||||||
|
selector:
|
||||||
|
{{- include "pgcat.selectorLabels" . | nindent 4 }}
|
||||||
12
charts/pgcat/templates/serviceaccount.yaml
Normal file
12
charts/pgcat/templates/serviceaccount.yaml
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
{{- if .Values.serviceAccount.create -}}
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: {{ include "pgcat.serviceAccountName" . }}
|
||||||
|
labels:
|
||||||
|
{{- include "pgcat.labels" . | nindent 4 }}
|
||||||
|
{{- with .Values.serviceAccount.annotations }}
|
||||||
|
annotations:
|
||||||
|
{{- toYaml . | nindent 4 }}
|
||||||
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
374
charts/pgcat/values.yaml
Normal file
374
charts/pgcat/values.yaml
Normal file
@@ -0,0 +1,374 @@
|
|||||||
|
## String to partially override aspnet-core.fullname template (will maintain the release name)
|
||||||
|
## @param nameOverride String to partially override common.names.fullname
|
||||||
|
##
|
||||||
|
nameOverride: ""
|
||||||
|
|
||||||
|
## String to fully override aspnet-core.fullname template
|
||||||
|
## @param fullnameOverride String to fully override common.names.fullname
|
||||||
|
##
|
||||||
|
fullnameOverride: ""
|
||||||
|
|
||||||
|
## Number of PgCat replicas to deploy
|
||||||
|
## @param replicaCount Number of PgCat replicas to deploy
|
||||||
|
replicaCount: 1
|
||||||
|
|
||||||
|
## Bitnami PgCat image version
|
||||||
|
## ref: https://hub.docker.com/r/bitnami/kubewatch/tags/
|
||||||
|
##
|
||||||
|
## @param image.registry PgCat image registry
|
||||||
|
## @param image.repository PgCat image name
|
||||||
|
## @param image.tag PgCat image tag
|
||||||
|
## @param image.pullPolicy PgCat image tag
|
||||||
|
## @param image.pullSecrets Specify docker-registry secret names as an array
|
||||||
|
image:
|
||||||
|
repository: ghcr.io/postgresml/pgcat
|
||||||
|
# Overrides the image tag whose default is the chart appVersion.
|
||||||
|
tag: "main"
|
||||||
|
## Specify a imagePullPolicy
|
||||||
|
## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent'
|
||||||
|
## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images
|
||||||
|
##
|
||||||
|
pullPolicy: IfNotPresent
|
||||||
|
## Optionally specify an array of imagePullSecrets.
|
||||||
|
## Secrets must be manually created in the namespace.
|
||||||
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||||
|
## Example:
|
||||||
|
## pullSecrets:
|
||||||
|
## - myRegistryKeySecretName
|
||||||
|
##
|
||||||
|
pullSecrets: []
|
||||||
|
|
||||||
|
## Specifies whether a ServiceAccount should be created
|
||||||
|
##
|
||||||
|
## @param serviceAccount.create Enable the creation of a ServiceAccount for PgCat pods
|
||||||
|
## @param serviceAccount.name Name of the created ServiceAccount
|
||||||
|
##
|
||||||
|
serviceAccount:
|
||||||
|
## Specifies whether a service account should be created
|
||||||
|
create: true
|
||||||
|
## Annotations to add to the service account
|
||||||
|
annotations: {}
|
||||||
|
## The name of the service account to use.
|
||||||
|
## If not set and create is true, a name is generated using the fullname template
|
||||||
|
name: ""
|
||||||
|
|
||||||
|
## Annotations for server pods.
|
||||||
|
## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
|
||||||
|
##
|
||||||
|
## @param podAnnotations Annotations for PgCat pods
|
||||||
|
##
|
||||||
|
podAnnotations: {}
|
||||||
|
|
||||||
|
## PgCat containers' SecurityContext
|
||||||
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
|
||||||
|
##
|
||||||
|
## @param podSecurityContext.enabled Enabled PgCat pods' Security Context
|
||||||
|
## @param podSecurityContext.fsGroup Set PgCat pod's Security Context fsGroup
|
||||||
|
##
|
||||||
|
podSecurityContext: {}
|
||||||
|
# fsGroup: 2000
|
||||||
|
|
||||||
|
## PgCat pods' Security Context
|
||||||
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
||||||
|
##
|
||||||
|
## @param containerSecurityContext.enabled Enabled PgCat containers' Security Context
|
||||||
|
## @param containerSecurityContext.runAsUser Set PgCat container's Security Context runAsUser
|
||||||
|
## @param containerSecurityContext.runAsNonRoot Set PgCat container's Security Context runAsNonRoot
|
||||||
|
##
|
||||||
|
containerSecurityContext: {}
|
||||||
|
# capabilities:
|
||||||
|
# drop:
|
||||||
|
# - ALL
|
||||||
|
# readOnlyRootFilesystem: true
|
||||||
|
# runAsNonRoot: true
|
||||||
|
# runAsUser: 1000
|
||||||
|
|
||||||
|
## PgCat service
|
||||||
|
##
|
||||||
|
## @param service.type PgCat service type
|
||||||
|
## @param service.port PgCat service port
|
||||||
|
service:
|
||||||
|
type: ClusterIP
|
||||||
|
port: 6432
|
||||||
|
|
||||||
|
ingress:
|
||||||
|
enabled: false
|
||||||
|
className: ""
|
||||||
|
annotations: {}
|
||||||
|
# kubernetes.io/ingress.class: nginx
|
||||||
|
# kubernetes.io/tls-acme: "true"
|
||||||
|
hosts:
|
||||||
|
- host: chart-example.local
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: ImplementationSpecific
|
||||||
|
tls: []
|
||||||
|
# - secretName: chart-example-tls
|
||||||
|
# hosts:
|
||||||
|
# - chart-example.local
|
||||||
|
|
||||||
|
## PgCat resource requests and limits
|
||||||
|
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
|
||||||
|
##
|
||||||
|
## @skip resources Optional description
|
||||||
|
## @disabled-param resources.limits The resources limits for the PgCat container
|
||||||
|
## @disabled-param resources.requests The requested resources for the PgCat container
|
||||||
|
##
|
||||||
|
resources:
|
||||||
|
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||||
|
# choice for the user. This also increases chances charts run on environments with little
|
||||||
|
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||||
|
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||||
|
limits: {}
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 128Mi
|
||||||
|
requests: {}
|
||||||
|
# cpu: 100m
|
||||||
|
# memory: 128Mi
|
||||||
|
|
||||||
|
## Node labels for pod assignment. Evaluated as a template.
|
||||||
|
## ref: https://kubernetes.io/docs/user-guide/node-selection/
|
||||||
|
##
|
||||||
|
## @param nodeSelector Node labels for pod assignment
|
||||||
|
##
|
||||||
|
nodeSelector: {}
|
||||||
|
|
||||||
|
## Tolerations for pod assignment. Evaluated as a template.
|
||||||
|
## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
|
||||||
|
##
|
||||||
|
## @param tolerations Tolerations for pod assignment
|
||||||
|
##
|
||||||
|
tolerations: []
|
||||||
|
|
||||||
|
## Affinity for pod assignment. Evaluated as a template.
|
||||||
|
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||||
|
## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set
|
||||||
|
##
|
||||||
|
## @param affinity Affinity for pod assignment
|
||||||
|
##
|
||||||
|
affinity: {}
|
||||||
|
|
||||||
|
## PgCat configuration
|
||||||
|
## @param configuration [object]
|
||||||
|
configuration:
|
||||||
|
## General pooler settings
|
||||||
|
## @param [object]
|
||||||
|
general:
|
||||||
|
## @param configuration.general.host What IP to run on, 0.0.0.0 means accessible from everywhere.
|
||||||
|
host: "0.0.0.0"
|
||||||
|
|
||||||
|
## @param configuration.general.port Port to run on, same as PgBouncer used in this example.
|
||||||
|
port: 6432
|
||||||
|
|
||||||
|
## @param configuration.general.enable_prometheus_exporter Whether to enable prometheus exporter or not.
|
||||||
|
enable_prometheus_exporter: false
|
||||||
|
|
||||||
|
## @param configuration.general.prometheus_exporter_port Port at which prometheus exporter listens on.
|
||||||
|
prometheus_exporter_port: 9930
|
||||||
|
|
||||||
|
# @param configuration.general.connect_timeout How long to wait before aborting a server connection (ms).
|
||||||
|
connect_timeout: 5000
|
||||||
|
|
||||||
|
# How long an idle connection with a server is left open (ms).
|
||||||
|
idle_timeout: 30000 # milliseconds
|
||||||
|
|
||||||
|
# Max connection lifetime before it's closed, even if actively used.
|
||||||
|
server_lifetime: 86400000 # 24 hours
|
||||||
|
|
||||||
|
# Whether to use TLS for server connections or not.
|
||||||
|
server_tls: false
|
||||||
|
|
||||||
|
# How long a client is allowed to be idle while in a transaction (ms).
|
||||||
|
idle_client_in_transaction_timeout: 0 # milliseconds
|
||||||
|
|
||||||
|
# @param configuration.general.healthcheck_timeout How much time to give `SELECT 1` health check query to return with a result (ms).
|
||||||
|
healthcheck_timeout: 1000
|
||||||
|
|
||||||
|
# @param configuration.general.healthcheck_delay How long to keep connection available for immediate re-use, without running a healthcheck query on it
|
||||||
|
healthcheck_delay: 30000
|
||||||
|
|
||||||
|
# @param configuration.general.shutdown_timeout How much time to give clients during shutdown before forcibly killing client connections (ms).
|
||||||
|
shutdown_timeout: 60000
|
||||||
|
|
||||||
|
# @param configuration.general.ban_time For how long to ban a server if it fails a health check (seconds).
|
||||||
|
ban_time: 60 # seconds
|
||||||
|
|
||||||
|
# @param configuration.general.log_client_connections If we should log client connections
|
||||||
|
log_client_connections: false
|
||||||
|
|
||||||
|
# @param configuration.general.log_client_disconnections If we should log client disconnections
|
||||||
|
log_client_disconnections: false
|
||||||
|
|
||||||
|
# TLS
|
||||||
|
# tls_certificate: "server.cert"
|
||||||
|
# tls_private_key: "server.key"
|
||||||
|
tls_certificate: "-"
|
||||||
|
tls_private_key: "-"
|
||||||
|
|
||||||
|
# Credentials to access the virtual administrative database (pgbouncer or pgcat)
|
||||||
|
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
||||||
|
admin_username: "postgres"
|
||||||
|
admin_password: "postgres"
|
||||||
|
|
||||||
|
# Query to be sent to servers to obtain the hash used for md5 authentication. The connection will be
|
||||||
|
# established using the database configured in the pool. This parameter is inherited by every pool and
|
||||||
|
# can be redefined in pool configuration.
|
||||||
|
auth_query: null
|
||||||
|
|
||||||
|
# User to be used for connecting to servers to obtain the hash used for md5 authentication by sending
|
||||||
|
# the query specified in auth_query_user. The connection will be established using the database configured
|
||||||
|
# in the pool. This parameter is inherited by every pool and can be redefined in pool configuration.
|
||||||
|
#
|
||||||
|
# @param configuration.general.auth_query_user
|
||||||
|
auth_query_user: null
|
||||||
|
|
||||||
|
# Password to be used for connecting to servers to obtain the hash used for md5 authentication by sending
|
||||||
|
# the query specified in auth_query_user. The connection will be established using the database configured
|
||||||
|
# in the pool. This parameter is inherited by every pool and can be redefined in pool configuration.
|
||||||
|
#
|
||||||
|
# @param configuration.general.auth_query_password
|
||||||
|
auth_query_password: null
|
||||||
|
|
||||||
|
# Number of seconds of connection idleness to wait before sending a keepalive packet to the server.
|
||||||
|
tcp_keepalives_idle: 5
|
||||||
|
|
||||||
|
# Number of unacknowledged keepalive packets allowed before giving up and closing the connection.
|
||||||
|
tcp_keepalives_count: 5
|
||||||
|
|
||||||
|
# Number of seconds between keepalive packets.
|
||||||
|
tcp_keepalives_interval: 5
|
||||||
|
|
||||||
|
## pool
|
||||||
|
## configs are structured as pool.<pool_name>
|
||||||
|
## the pool_name is what clients use as database name when connecting
|
||||||
|
## For the example below a client can connect using "postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded"
|
||||||
|
## @param [object]
|
||||||
|
pools:
|
||||||
|
[{
|
||||||
|
name: "simple", pool_mode: "transaction",
|
||||||
|
users: [{username: "user", password: "pass", pool_size: 5, statement_timeout: 0}],
|
||||||
|
shards: [{
|
||||||
|
servers: [{host: "postgres", port: 5432, role: "primary"}],
|
||||||
|
database: "postgres"
|
||||||
|
}]
|
||||||
|
}]
|
||||||
|
# - ## default values
|
||||||
|
# ##
|
||||||
|
# ##
|
||||||
|
# ##
|
||||||
|
# name: "db"
|
||||||
|
|
||||||
|
# ## Pool mode (see PgBouncer docs for more).
|
||||||
|
# ## session: one server connection per connected client
|
||||||
|
# ## transaction: one server connection per client transaction
|
||||||
|
# ## @param configuration.poolsPostgres.pool_mode
|
||||||
|
# pool_mode: "transaction"
|
||||||
|
|
||||||
|
# ## Load balancing mode
|
||||||
|
# ## `random` selects the server at random
|
||||||
|
# ## `loc` selects the server with the least outstanding busy connections
|
||||||
|
# ##
|
||||||
|
# ## @param configuration.poolsPostgres.load_balancing_mode
|
||||||
|
# load_balancing_mode: "random"
|
||||||
|
|
||||||
|
# ## Prepared statements cache size.
|
||||||
|
# ## TODO: update documentation
|
||||||
|
# ##
|
||||||
|
# ## @param configuration.poolsPostgres.prepared_statements_cache_size
|
||||||
|
# prepared_statements_cache_size: 500
|
||||||
|
|
||||||
|
# ## If the client doesn't specify, route traffic to
|
||||||
|
# ## this role by default.
|
||||||
|
# ##
|
||||||
|
# ## any: round-robin between primary and replicas,
|
||||||
|
# ## replica: round-robin between replicas only without touching the primary,
|
||||||
|
# ## primary: all queries go to the primary unless otherwise specified.
|
||||||
|
# ## @param configuration.poolsPostgres.default_role
|
||||||
|
# default_role: "any"
|
||||||
|
|
||||||
|
# ## Query parser. If enabled, we'll attempt to parse
|
||||||
|
# ## every incoming query to determine if it's a read or a write.
|
||||||
|
# ## If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||||
|
# ## we'll direct it to the primary.
|
||||||
|
# ## @param configuration.poolsPostgres.query_parser_enabled
|
||||||
|
# query_parser_enabled: true
|
||||||
|
|
||||||
|
# ## If the query parser is enabled and this setting is enabled, we'll attempt to
|
||||||
|
# ## infer the role from the query itself.
|
||||||
|
# ## @param configuration.poolsPostgres.query_parser_read_write_splitting
|
||||||
|
# query_parser_read_write_splitting: true
|
||||||
|
|
||||||
|
# ## If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||||
|
# ## load balancing of read queries. Otherwise, the primary will only be used for write
|
||||||
|
# ## queries. The primary can always be explicitly selected with our custom protocol.
|
||||||
|
# ## @param configuration.poolsPostgres.primary_reads_enabled
|
||||||
|
# primary_reads_enabled: true
|
||||||
|
|
||||||
|
# ## So what if you wanted to implement a different hashing function,
|
||||||
|
# ## or you've already built one and you want this pooler to use it?
|
||||||
|
# ##
|
||||||
|
# ## Current options:
|
||||||
|
# ##
|
||||||
|
# ## pg_bigint_hash: PARTITION BY HASH (Postgres hashing function)
|
||||||
|
# ## sha1: A hashing function based on SHA1
|
||||||
|
# ##
|
||||||
|
# ## @param configuration.poolsPostgres.sharding_function
|
||||||
|
# sharding_function: "pg_bigint_hash"
|
||||||
|
|
||||||
|
# ## Credentials for users that may connect to this cluster
|
||||||
|
# ## @param users [array]
|
||||||
|
# ## @param users[0].username Name of the env var (required)
|
||||||
|
# ## @param users[0].password Value for the env var (required) leave empty to use existing secret see passwordSecret.name and passwordSecret.key
|
||||||
|
# ## @param users[0].passwordSecret.name Name of the secret containing the password
|
||||||
|
# ## @param users[0].passwordSecret.key Key in the secret containing the password
|
||||||
|
# ## @param users[0].pool_size Maximum number of server connections that can be established for this user
|
||||||
|
# ## @param users[0].statement_timeout Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
|
||||||
|
# users: []
|
||||||
|
# # - username: "user"
|
||||||
|
# # password: "pass"
|
||||||
|
# #
|
||||||
|
# # # The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||||
|
# # # is the sum of pool_size across all users.
|
||||||
|
# # pool_size: 9
|
||||||
|
# #
|
||||||
|
# # # Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
|
||||||
|
# # statement_timeout: 0
|
||||||
|
# #
|
||||||
|
# # # PostgreSQL username used to connect to the server.
|
||||||
|
# # server_username: "postgres
|
||||||
|
# #
|
||||||
|
# # # PostgreSQL password used to connect to the server.
|
||||||
|
# # server_password: "postgres
|
||||||
|
|
||||||
|
# ## @param shards [array]
|
||||||
|
# ## @param shards[0].server[0].host Host for this shard
|
||||||
|
# ## @param shards[0].server[0].port Port for this shard
|
||||||
|
# ## @param shards[0].server[0].role Role for this shard
|
||||||
|
# shards: []
|
||||||
|
# # [ host, port, role ]
|
||||||
|
# # - servers:
|
||||||
|
# # - host: "postgres"
|
||||||
|
# # port: 5432
|
||||||
|
# # role: "primary"
|
||||||
|
# # - host: "postgres"
|
||||||
|
# # port: 5432
|
||||||
|
# # role: "replica"
|
||||||
|
# # database: "postgres"
|
||||||
|
# # # [ host, port, role ]
|
||||||
|
# # - servers:
|
||||||
|
# # - host: "postgres"
|
||||||
|
# # port: 5432
|
||||||
|
# # role: "primary"
|
||||||
|
# # - host: "postgres"
|
||||||
|
# # port: 5432
|
||||||
|
# # role: "replica"
|
||||||
|
# # database: "postgres"
|
||||||
|
# # # [ host, port, role ]
|
||||||
|
# # - servers:
|
||||||
|
# # - host: "postgres"
|
||||||
|
# # port: 5432
|
||||||
|
# # role: "primary"
|
||||||
|
# # - host: "postgres"
|
||||||
|
# # port: 5432
|
||||||
|
# # role: "replica"
|
||||||
|
# # database: "postgres"
|
||||||
9
control
Normal file
9
control
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
Package: pgcat
|
||||||
|
Version: ${PACKAGE_VERSION}
|
||||||
|
Section: database
|
||||||
|
Priority: optional
|
||||||
|
Architecture: ${ARCH}
|
||||||
|
Maintainer: PostgresML <team@postgresml.org>
|
||||||
|
Homepage: https://postgresml.org
|
||||||
|
Description: PgCat - NextGen PostgreSQL Pooler
|
||||||
|
PostgreSQL pooler and proxy (like PgBouncer) with support for sharding, load balancing, failover and mirroring.
|
||||||
158
cov-style.css
Normal file
158
cov-style.css
Normal file
@@ -0,0 +1,158 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2021 Collabora, Ltd.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
* a copy of this software and associated documentation files (the
|
||||||
|
* "Software"), to deal in the Software without restriction, including
|
||||||
|
* without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
* distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
* permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
* the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice (including the
|
||||||
|
* next paragraph) shall be included in all copies or substantial
|
||||||
|
* portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||||
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||||
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
* SOFTWARE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
body {
|
||||||
|
background-color: #f2f2f2;
|
||||||
|
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto,
|
||||||
|
"Noto Sans", Ubuntu, Cantarell, "Helvetica Neue", sans-serif,
|
||||||
|
"Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol",
|
||||||
|
"Noto Color Emoji";
|
||||||
|
}
|
||||||
|
|
||||||
|
.sourceHeading, .source, .coverFn,
|
||||||
|
.testName, .testPer, .testNum,
|
||||||
|
.coverLegendCovLo, .headerCovTableEntryLo, .coverPerLo, .coverNumLo,
|
||||||
|
.coverLegendCovMed, .headerCovTableEntryMed, .coverPerMed, .coverNumMed,
|
||||||
|
.coverLegendCovHi, .headerCovTableEntryHi, .coverPerHi, .coverNumHi,
|
||||||
|
.coverFile {
|
||||||
|
font-family: "Menlo", "DejaVu Sans Mono", "Liberation Mono",
|
||||||
|
"Consolas", "Ubuntu Mono", "Courier New", "andale mono",
|
||||||
|
"lucida console", monospace;
|
||||||
|
}
|
||||||
|
|
||||||
|
pre {
|
||||||
|
font-size: 0.7875rem;
|
||||||
|
}
|
||||||
|
|
||||||
|
.headerCovTableEntry, .testPer, .testNum, .testName,
|
||||||
|
.coverLegendCovLo, .headerCovTableEntryLo, .coverPerLo, .coverNumLo,
|
||||||
|
.coverLegendCovMed, .headerCovTableEntryMed, .coverPerMed, .coverNumMed,
|
||||||
|
.coverLegendCovHi, .headerCovTableEntryHi, .coverPerHi, .coverNumHi {
|
||||||
|
text-align: right;
|
||||||
|
white-space: nowrap;
|
||||||
|
}
|
||||||
|
|
||||||
|
.coverPerLo, .coverPerMed, .coverPerHi, .testPer {
|
||||||
|
/* font-weight: bold;*/
|
||||||
|
}
|
||||||
|
|
||||||
|
.coverNumLo, .coverNumMed, .coverNumHi, .testNum {
|
||||||
|
font-style: italic;
|
||||||
|
font-size: 90%;
|
||||||
|
padding-left: 1em;
|
||||||
|
}
|
||||||
|
|
||||||
|
.title {
|
||||||
|
font-size: 200%;
|
||||||
|
}
|
||||||
|
|
||||||
|
.tableHead {
|
||||||
|
text-align: center;
|
||||||
|
font-weight: bold;
|
||||||
|
background-color: #bfbfbf;
|
||||||
|
}
|
||||||
|
|
||||||
|
.coverFile, .coverBar, .coverFn {
|
||||||
|
background-color: #d9d9d9;
|
||||||
|
}
|
||||||
|
|
||||||
|
.headerCovTableHead {
|
||||||
|
font-weight: bold;
|
||||||
|
text-align: right;
|
||||||
|
}
|
||||||
|
|
||||||
|
.headerCovTableEntry {
|
||||||
|
background-color: #d9d9d9;
|
||||||
|
}
|
||||||
|
|
||||||
|
.coverFnLo,
|
||||||
|
.coverLegendCovLo, .headerCovTableEntryLo, .coverPerLo, .coverNumLo {
|
||||||
|
background-color: #f2dada;
|
||||||
|
}
|
||||||
|
|
||||||
|
.coverFnHi,
|
||||||
|
.coverLegendCovMed, .headerCovTableEntryMed, .coverPerMed, .coverNumMed {
|
||||||
|
background-color: #add9ad;
|
||||||
|
}
|
||||||
|
|
||||||
|
.coverLegendCovHi, .headerCovTableEntryHi, .coverPerHi, .coverNumHi {
|
||||||
|
background-color: #59b359;
|
||||||
|
}
|
||||||
|
|
||||||
|
.coverBarOutline {
|
||||||
|
border-style: solid;
|
||||||
|
border-width: 1px;
|
||||||
|
border-color: black;
|
||||||
|
padding: 0px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.coverFnLo, .coverFnHi {
|
||||||
|
text-align: right;
|
||||||
|
}
|
||||||
|
|
||||||
|
.lineNum {
|
||||||
|
background-color: #d9d9d9;
|
||||||
|
}
|
||||||
|
|
||||||
|
.coverLegendCov, .lineCov, .branchCov {
|
||||||
|
background-image: url('data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAABCAIAAABsYngUAAADAXpUWHRSYXcgcHJvZmlsZSB0eXBlIGV4aWYAAHjazZVbktwgDEX/WUWWgCSExHIwj6rsIMvPxcY9PY9MzVTyEVMNtCwkoYNwGL9+zvADDxHHkNQ8l5wjnlRS4YqJx+upZ08xnf313O/otTw8FBgzwShbP2/5gJyhz1vetp0KuT4ZKmO/OF6/qNsQ+3ZwO9yOhC4HcRsOdRsS3p7T9f+4thVzcXveQtv6sz5t1dfW0CUxzprJEvrE0SwXzJ1jMuStr0CPvhfqdvTmf7hVGTHxEJKI3leEsn4kFWNCT/CGfUnBXDEuyd4yaHGIhnm58/r581nk4Q59Y32N+p69Qc3xPelwJvRWkTeE8mP8UE76Ig/PSE9uT55z3jN+LZ/pJaibXLjxzdl9znHtrqaMLee9qXuL5wx6x8rWuSqjGX4afSV7tYLmKImGc9RxyA60RoUYGCcl6lRp0jjHRg0hJh4MjszcALcFCB0wCjcgJYBGo8kGzF0cB6DhOAik/IiFTrfldNfI4biTB5wegjHCkr9q4StKc66CIlq55CtXiItXwhHFIkeE6ocaiNDcSdUzwXd7+yyuAoJ6ptmxwRqPZQH4D6WXwyUnaIGiYrwKmKxvA0gRIlAEQwICMZMoZYrGHIwIiXQAqgidJfEBLKTKHUFyEsmAgyqAb6wxOlVZ+RLjIgQIlRzEwAaFCFgpKc6PJccZqiqaVDWrqWvRmiWvCsvZ8rpRq4klU8tm5lasBhdPrp7d3L14LVwEN64W1GPxUkqtcFphuWJ1hUKtBx9ypEOPfNjhRzlq49CkpaYtN2veSqudu3TUcc/duvfS66CBozTS0JGHDR9l1ImjNmWmqTNPmz5LmPVBbWN9175BjTY1PkktRXtQg9TsNkHrOtHFDMQ4EYDbIkASmBez6JQSL3KLWSyMqlBGkLrgdFrEQDANYp30YPdCToPkf8MtAAT/C3JhofsCuffcPqLW6/mhk5PQKsOV1CiovpHgnx3LcCvhwlnz9dF8P4Y/vfju+J8aQpZK+A373P3XzDqcKwAAAAZiS0dEAAAAAAAA+UO7fwAAAAlwSFlzAAAOxAAADsQBlSsOGwAAAAd0SU1FB+UEEQYyDQA04tUAAAAZdEVYdENvbW1lbnQAQ3JlYXRlZCB3aXRoIEdJTVBXgQ4XAAAADklEQVQI12PULVBlwAYAEagAxGHRDdwAAAAASUVORK5CYII=');
|
||||||
|
background-repeat: repeat-y;
|
||||||
|
background-position: left top;
|
||||||
|
background-color: #c6ffb8;
|
||||||
|
}
|
||||||
|
|
||||||
|
.coverLegendNoCov, .lineNoCov, .branchNoCov, .branchNoExec {
|
||||||
|
background-image: url('data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAABCAIAAABsYngUAAAACXBIWXMAAA7EAAAOxAGVKw4bAAAAB3RJTUUH5QMUCiMidNgp2gAAABl0RVh0Q29tbWVudABDcmVhdGVkIHdpdGggR0lNUFeBDhcAAAAPSURBVAjXY/wZIcWADQAAIa4BbZaExr0AAAAASUVORK5CYII=');
|
||||||
|
background-repeat: repeat-y;
|
||||||
|
background-position: left top;
|
||||||
|
background-color: #ffcfbb;
|
||||||
|
}
|
||||||
|
|
||||||
|
.coverLegendCov, .coverLegendNoCov {
|
||||||
|
padding: 0em 1em 0em 1em;
|
||||||
|
}
|
||||||
|
|
||||||
|
.headerItem, .headerValue, .headerValueLeg {
|
||||||
|
white-space: nowrap;
|
||||||
|
}
|
||||||
|
|
||||||
|
.headerItem {
|
||||||
|
text-align: right;
|
||||||
|
font-weight: bold;
|
||||||
|
}
|
||||||
|
|
||||||
|
.ruler {
|
||||||
|
background-color: #d9d9d9;
|
||||||
|
}
|
||||||
|
|
||||||
|
.detail {
|
||||||
|
font-size: 80%;
|
||||||
|
}
|
||||||
|
|
||||||
|
.versionInfo {
|
||||||
|
font-size: 80%;
|
||||||
|
text-align: right;
|
||||||
|
}
|
||||||
|
|
||||||
5
ct.yaml
Normal file
5
ct.yaml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
remote: origin
|
||||||
|
target-branch: main
|
||||||
|
chart-dirs:
|
||||||
|
- charts
|
||||||
|
|
||||||
35
dev/Dockerfile
Normal file
35
dev/Dockerfile
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
FROM rust:bullseye
|
||||||
|
|
||||||
|
# Dependencies
|
||||||
|
COPY --from=sclevine/yj /bin/yj /bin/yj
|
||||||
|
RUN /bin/yj -h
|
||||||
|
RUN apt-get update -y \
|
||||||
|
&& apt-get install -y \
|
||||||
|
llvm-11 psmisc postgresql-contrib postgresql-client \
|
||||||
|
ruby ruby-dev libpq-dev python3 python3-pip lcov curl sudo iproute2 \
|
||||||
|
strace ngrep iproute2 dnsutils lsof net-tools telnet
|
||||||
|
|
||||||
|
# Rust
|
||||||
|
RUN cargo install cargo-binutils rustfilt
|
||||||
|
RUN rustup component add llvm-tools-preview
|
||||||
|
|
||||||
|
# Ruby
|
||||||
|
RUN sudo gem install bundler
|
||||||
|
|
||||||
|
# Toxyproxy
|
||||||
|
RUN wget -O toxiproxy-2.4.0.deb https://github.com/Shopify/toxiproxy/releases/download/v2.4.0/toxiproxy_2.4.0_linux_$(dpkg --print-architecture).deb && \
|
||||||
|
sudo dpkg -i toxiproxy-2.4.0.deb
|
||||||
|
|
||||||
|
# Config
|
||||||
|
ENV APP_ROOT=/app
|
||||||
|
ARG APP_USER=pgcat
|
||||||
|
COPY dev_bashrc /etc/bash.bashrc
|
||||||
|
|
||||||
|
RUN useradd -m -o -u 999 ${APP_USER} || exit 0 && mkdir ${APP_ROOT} && chown ${APP_USER} ${APP_ROOT}
|
||||||
|
RUN adduser ${APP_USER} sudo \
|
||||||
|
&& echo "${APP_USER} ALL=NOPASSWD: ALL" > /etc/sudoers.d/${APP_USER} \
|
||||||
|
&& chmod ugo+s /usr/sbin/usermod /usr/sbin/groupmod
|
||||||
|
ENV HOME=${APP_ROOT}
|
||||||
|
WORKDIR ${APP_ROOT}
|
||||||
|
|
||||||
|
ENTRYPOINT ["/bin/bash"]
|
||||||
120
dev/dev_bashrc
Normal file
120
dev/dev_bashrc
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
# ~/.bashrc: executed by bash(1) for non-login shells.
|
||||||
|
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
|
||||||
|
# for examples
|
||||||
|
|
||||||
|
# FIX USER NEEDED SO WE CAN SHARE UID BETWEEN HOST AND DEV ENV
|
||||||
|
usermod -o -u $(id -u) pgcat
|
||||||
|
groupmod -o -g $(id -g) pgcat
|
||||||
|
|
||||||
|
# We fix the setuid in those commands as we now have sudo
|
||||||
|
sudo chmod ugo-s /usr/sbin/usermod /usr/sbin/groupmod
|
||||||
|
|
||||||
|
# Environment customization
|
||||||
|
export DEV_ROOT="${APP_ROOT}/dev"
|
||||||
|
export HISTFILE="${DEV_ROOT}/.bash_history"
|
||||||
|
export CARGO_TARGET_DIR="${DEV_ROOT}/cache/target"
|
||||||
|
export CARGO_HOME="${DEV_ROOT}/cache/target/.cargo"
|
||||||
|
export BUNDLE_PATH="${DEV_ROOT}/cache/bundle"
|
||||||
|
|
||||||
|
# Regular bashrc
|
||||||
|
# If not running interactively, don't do anything
|
||||||
|
case $- in
|
||||||
|
*i*) ;;
|
||||||
|
*) return;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# don't put duplicate lines or lines starting with space in the history.
|
||||||
|
# See bash(1) for more options
|
||||||
|
HISTCONTROL=ignoreboth
|
||||||
|
|
||||||
|
# append to the history file, don't overwrite it
|
||||||
|
shopt -s histappend
|
||||||
|
|
||||||
|
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
|
||||||
|
HISTSIZE=1000
|
||||||
|
HISTFILESIZE=2000
|
||||||
|
|
||||||
|
# check the window size after each command and, if necessary,
|
||||||
|
# update the values of LINES and COLUMNS.
|
||||||
|
shopt -s checkwinsize
|
||||||
|
|
||||||
|
# If set, the pattern "**" used in a pathname expansion context will
|
||||||
|
# match all files and zero or more directories and subdirectories.
|
||||||
|
#shopt -s globstar
|
||||||
|
|
||||||
|
# make less more friendly for non-text input files, see lesspipe(1)
|
||||||
|
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
|
||||||
|
|
||||||
|
# set variable identifying the chroot you work in (used in the prompt below)
|
||||||
|
if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then
|
||||||
|
debian_chroot=$(cat /etc/debian_chroot)
|
||||||
|
fi
|
||||||
|
|
||||||
|
# set a fancy prompt (non-color, unless we know we "want" color)
|
||||||
|
case "$TERM" in
|
||||||
|
xterm-color|*-256color) color_prompt=yes;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
# uncomment for a colored prompt, if the terminal has the capability; turned
|
||||||
|
# off by default to not distract the user: the focus in a terminal window
|
||||||
|
# should be on the output of commands, not on the prompt
|
||||||
|
#force_color_prompt=yes
|
||||||
|
|
||||||
|
if [ -n "$force_color_prompt" ]; then
|
||||||
|
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
|
||||||
|
# We have color support; assume it's compliant with Ecma-48
|
||||||
|
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
|
||||||
|
# a case would tend to support setf rather than setaf.)
|
||||||
|
color_prompt=yes
|
||||||
|
else
|
||||||
|
color_prompt=
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
PS1='\[\e]0;pgcat@dev-container\h: \w\a\]${debian_chroot:+($debian_chroot)}\[\033[01;32m\]pgcat\[\033[00m\]@\[\033[01;32m\]dev-container\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\[\033[01;31m\]$(git branch &>/dev/null; if [ $? -eq 0 ]; then echo " ($(git branch | grep ^* |sed s/\*\ //))"; fi)\[\033[00m\]\$ '
|
||||||
|
|
||||||
|
unset color_prompt force_color_prompt
|
||||||
|
|
||||||
|
# enable color support of ls and also add handy aliases
|
||||||
|
if [ -x /usr/bin/dircolors ]; then
|
||||||
|
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
|
||||||
|
alias ls='ls --color=auto'
|
||||||
|
#alias dir='dir --color=auto'
|
||||||
|
#alias vdir='vdir --color=auto'
|
||||||
|
|
||||||
|
alias grep='grep --color=auto'
|
||||||
|
alias fgrep='fgrep --color=auto'
|
||||||
|
alias egrep='egrep --color=auto'
|
||||||
|
fi
|
||||||
|
|
||||||
|
# colored GCC warnings and errors
|
||||||
|
#export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
|
||||||
|
|
||||||
|
# some more ls aliases
|
||||||
|
alias ll='ls -alF'
|
||||||
|
alias la='ls -A'
|
||||||
|
alias l='ls -CF'
|
||||||
|
|
||||||
|
# Add an "alert" alias for long running commands. Use like so:
|
||||||
|
# sleep 10; alert
|
||||||
|
alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
|
||||||
|
|
||||||
|
# Alias definitions.
|
||||||
|
# You may want to put all your additions into a separate file like
|
||||||
|
# ~/.bash_aliases, instead of adding them here directly.
|
||||||
|
# See /usr/share/doc/bash-doc/examples in the bash-doc package.
|
||||||
|
|
||||||
|
if [ -f ~/.bash_aliases ]; then
|
||||||
|
. ~/.bash_aliases
|
||||||
|
fi
|
||||||
|
|
||||||
|
# enable programmable completion features (you don't need to enable
|
||||||
|
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
|
||||||
|
# sources /etc/bash.bashrc).
|
||||||
|
if ! shopt -oq posix; then
|
||||||
|
if [ -f /usr/share/bash-completion/bash_completion ]; then
|
||||||
|
. /usr/share/bash-completion/bash_completion
|
||||||
|
elif [ -f /etc/bash_completion ]; then
|
||||||
|
. /etc/bash_completion
|
||||||
|
fi
|
||||||
|
fi
|
||||||
94
dev/docker-compose.yaml
Normal file
94
dev/docker-compose.yaml
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
version: "3"
|
||||||
|
|
||||||
|
x-common-definition-pg:
|
||||||
|
&common-definition-pg
|
||||||
|
image: postgres:14
|
||||||
|
network_mode: "service:main"
|
||||||
|
healthcheck:
|
||||||
|
test: [ "CMD-SHELL", "pg_isready -U postgres -d postgres" ]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 5
|
||||||
|
volumes:
|
||||||
|
- type: bind
|
||||||
|
source: ../tests/sharding/query_routing_setup.sql
|
||||||
|
target: /docker-entrypoint-initdb.d/query_routing_setup.sql
|
||||||
|
- type: bind
|
||||||
|
source: ../tests/sharding/partition_hash_test_setup.sql
|
||||||
|
target: /docker-entrypoint-initdb.d/partition_hash_test_setup.sql
|
||||||
|
|
||||||
|
x-common-env-pg:
|
||||||
|
&common-env-pg
|
||||||
|
POSTGRES_USER: postgres
|
||||||
|
POSTGRES_DB: postgres
|
||||||
|
POSTGRES_PASSWORD: postgres
|
||||||
|
|
||||||
|
services:
|
||||||
|
main:
|
||||||
|
image: gcr.io/google_containers/pause:3.2
|
||||||
|
ports:
|
||||||
|
- 6432
|
||||||
|
|
||||||
|
pg1:
|
||||||
|
<<: *common-definition-pg
|
||||||
|
environment:
|
||||||
|
<<: *common-env-pg
|
||||||
|
POSTGRES_INITDB_ARGS: --auth-local=md5 --auth-host=md5 --auth=md5
|
||||||
|
PGPORT: 5432
|
||||||
|
command: ["postgres", "-p", "5432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||||
|
|
||||||
|
pg2:
|
||||||
|
<<: *common-definition-pg
|
||||||
|
environment:
|
||||||
|
<<: *common-env-pg
|
||||||
|
POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256
|
||||||
|
PGPORT: 7432
|
||||||
|
command: ["postgres", "-p", "7432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||||
|
pg3:
|
||||||
|
<<: *common-definition-pg
|
||||||
|
environment:
|
||||||
|
<<: *common-env-pg
|
||||||
|
POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256
|
||||||
|
PGPORT: 8432
|
||||||
|
command: ["postgres", "-p", "8432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||||
|
pg4:
|
||||||
|
<<: *common-definition-pg
|
||||||
|
environment:
|
||||||
|
<<: *common-env-pg
|
||||||
|
POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256
|
||||||
|
PGPORT: 9432
|
||||||
|
command: ["postgres", "-p", "9432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||||
|
pg5:
|
||||||
|
<<: *common-definition-pg
|
||||||
|
environment:
|
||||||
|
<<: *common-env-pg
|
||||||
|
POSTGRES_INITDB_ARGS: --auth-local=md5 --auth-host=md5 --auth=md5
|
||||||
|
PGPORT: 10432
|
||||||
|
command: ["postgres", "-p", "10432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||||
|
|
||||||
|
toxiproxy:
|
||||||
|
build: .
|
||||||
|
network_mode: "service:main"
|
||||||
|
container_name: toxiproxy
|
||||||
|
environment:
|
||||||
|
LOG_LEVEL: info
|
||||||
|
entrypoint: toxiproxy-server
|
||||||
|
depends_on:
|
||||||
|
- pg1
|
||||||
|
- pg2
|
||||||
|
- pg3
|
||||||
|
- pg4
|
||||||
|
- pg5
|
||||||
|
|
||||||
|
pgcat-shell:
|
||||||
|
stdin_open: true
|
||||||
|
user: "${HOST_UID}:${HOST_GID}"
|
||||||
|
build: .
|
||||||
|
network_mode: "service:main"
|
||||||
|
depends_on:
|
||||||
|
- toxiproxy
|
||||||
|
volumes:
|
||||||
|
- ../:/app/
|
||||||
|
entrypoint:
|
||||||
|
- /bin/bash
|
||||||
|
- -i
|
||||||
12
dev/script/console
Executable file
12
dev/script/console
Executable file
@@ -0,0 +1,12 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||||
|
export HOST_UID="$(id -u)"
|
||||||
|
export HOST_GID="$(id -g)"
|
||||||
|
|
||||||
|
if [[ "${1}" == "down" ]]; then
|
||||||
|
docker-compose -f "${DIR}/../docker-compose.yaml" down
|
||||||
|
exit 0
|
||||||
|
else
|
||||||
|
docker-compose -f "${DIR}/../docker-compose.yaml" run --rm pgcat-shell
|
||||||
|
fi
|
||||||
17
docker-compose.yml
Normal file
17
docker-compose.yml
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
version: "3"
|
||||||
|
services:
|
||||||
|
postgres:
|
||||||
|
image: postgres:14
|
||||||
|
environment:
|
||||||
|
POSTGRES_PASSWORD: postgres
|
||||||
|
POSTGRES_HOST_AUTH_METHOD: md5
|
||||||
|
pgcat:
|
||||||
|
build: .
|
||||||
|
command:
|
||||||
|
- "pgcat"
|
||||||
|
- "/etc/pgcat/pgcat.toml"
|
||||||
|
volumes:
|
||||||
|
- "${PWD}/examples/docker/pgcat.toml:/etc/pgcat/pgcat.toml"
|
||||||
|
ports:
|
||||||
|
- "6432:6432"
|
||||||
|
- "9930:9930"
|
||||||
127
examples/docker/pgcat.toml
Normal file
127
examples/docker/pgcat.toml
Normal file
@@ -0,0 +1,127 @@
|
|||||||
|
#
|
||||||
|
# PgCat config example.
|
||||||
|
#
|
||||||
|
|
||||||
|
#
|
||||||
|
# General pooler settings
|
||||||
|
[general]
|
||||||
|
# What IP to run on, 0.0.0.0 means accessible from everywhere.
|
||||||
|
host = "0.0.0.0"
|
||||||
|
|
||||||
|
# Port to run on, same as PgBouncer used in this example.
|
||||||
|
port = 6432
|
||||||
|
|
||||||
|
# Whether to enable prometheus exporter or not.
|
||||||
|
enable_prometheus_exporter = true
|
||||||
|
|
||||||
|
# Port at which prometheus exporter listens on.
|
||||||
|
prometheus_exporter_port = 9930
|
||||||
|
|
||||||
|
# How long to wait before aborting a server connection (ms).
|
||||||
|
connect_timeout = 5000
|
||||||
|
|
||||||
|
# How much time to give `SELECT 1` health check query to return with a result (ms).
|
||||||
|
healthcheck_timeout = 1000
|
||||||
|
|
||||||
|
# How long to keep connection available for immediate re-use, without running a healthcheck query on it
|
||||||
|
healthcheck_delay = 30000
|
||||||
|
|
||||||
|
# How much time to give clients during shutdown before forcibly killing client connections (ms).
|
||||||
|
shutdown_timeout = 60000
|
||||||
|
|
||||||
|
# For how long to ban a server if it fails a health check (seconds).
|
||||||
|
ban_time = 60 # seconds
|
||||||
|
|
||||||
|
# If we should log client connections
|
||||||
|
log_client_connections = false
|
||||||
|
|
||||||
|
# If we should log client disconnections
|
||||||
|
log_client_disconnections = false
|
||||||
|
|
||||||
|
# TLS
|
||||||
|
# tls_certificate = "server.cert"
|
||||||
|
# tls_private_key = "server.key"
|
||||||
|
|
||||||
|
# Credentials to access the virtual administrative database (pgbouncer or pgcat)
|
||||||
|
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
||||||
|
admin_username = "postgres"
|
||||||
|
admin_password = "postgres"
|
||||||
|
|
||||||
|
# pool
|
||||||
|
# configs are structured as pool.<pool_name>
|
||||||
|
# the pool_name is what clients use as database name when connecting
|
||||||
|
# For the example below a client can connect using "postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded"
|
||||||
|
[pools.postgres]
|
||||||
|
# Pool mode (see PgBouncer docs for more).
|
||||||
|
# session: one server connection per connected client
|
||||||
|
# transaction: one server connection per client transaction
|
||||||
|
pool_mode = "transaction"
|
||||||
|
|
||||||
|
# If the client doesn't specify, route traffic to
|
||||||
|
# this role by default.
|
||||||
|
#
|
||||||
|
# any: round-robin between primary and replicas,
|
||||||
|
# replica: round-robin between replicas only without touching the primary,
|
||||||
|
# primary: all queries go to the primary unless otherwise specified.
|
||||||
|
default_role = "any"
|
||||||
|
|
||||||
|
# Query parser. If enabled, we'll attempt to parse
|
||||||
|
# every incoming query to determine if it's a read or a write.
|
||||||
|
# If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||||
|
# we'll direct it to the primary.
|
||||||
|
query_parser_enabled = true
|
||||||
|
|
||||||
|
# If the query parser is enabled and this setting is enabled, we'll attempt to
|
||||||
|
# infer the role from the query itself.
|
||||||
|
query_parser_read_write_splitting = true
|
||||||
|
|
||||||
|
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||||
|
# load balancing of read queries. Otherwise, the primary will only be used for write
|
||||||
|
# queries. The primary can always be explicitly selected with our custom protocol.
|
||||||
|
primary_reads_enabled = true
|
||||||
|
|
||||||
|
# So what if you wanted to implement a different hashing function,
|
||||||
|
# or you've already built one and you want this pooler to use it?
|
||||||
|
#
|
||||||
|
# Current options:
|
||||||
|
#
|
||||||
|
# pg_bigint_hash: PARTITION BY HASH (Postgres hashing function)
|
||||||
|
# sha1: A hashing function based on SHA1
|
||||||
|
#
|
||||||
|
sharding_function = "pg_bigint_hash"
|
||||||
|
|
||||||
|
# Credentials for users that may connect to this cluster
|
||||||
|
[pools.postgres.users.0]
|
||||||
|
username = "postgres"
|
||||||
|
password = "postgres"
|
||||||
|
# Maximum number of server connections that can be established for this user
|
||||||
|
# The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||||
|
# is the sum of pool_size across all users.
|
||||||
|
pool_size = 9
|
||||||
|
|
||||||
|
# Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
|
||||||
|
statement_timeout = 0
|
||||||
|
|
||||||
|
# Shard 0
|
||||||
|
[pools.postgres.shards.0]
|
||||||
|
# [ host, port, role ]
|
||||||
|
servers = [
|
||||||
|
[ "postgres", 5432, "primary" ],
|
||||||
|
[ "postgres", 5432, "replica" ]
|
||||||
|
]
|
||||||
|
# Database name (e.g. "postgres")
|
||||||
|
database = "postgres"
|
||||||
|
|
||||||
|
[pools.postgres.shards.1]
|
||||||
|
servers = [
|
||||||
|
[ "postgres", 5432, "primary" ],
|
||||||
|
[ "postgres", 5432, "replica" ],
|
||||||
|
]
|
||||||
|
database = "postgres"
|
||||||
|
|
||||||
|
[pools.postgres.shards.2]
|
||||||
|
servers = [
|
||||||
|
[ "postgres", 5432, "primary" ],
|
||||||
|
[ "postgres", 5432, "replica" ],
|
||||||
|
]
|
||||||
|
database = "postgres"
|
||||||
2124
grafana_dashboard.json
Normal file
2124
grafana_dashboard.json
Normal file
File diff suppressed because it is too large
Load Diff
BIN
images/instacart.webp
Normal file
BIN
images/instacart.webp
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 3.4 KiB |
BIN
images/one_signal.webp
Normal file
BIN
images/one_signal.webp
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 16 KiB |
BIN
images/postgresml.webp
Normal file
BIN
images/postgresml.webp
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 4.7 KiB |
22
pgcat.minimal.toml
Normal file
22
pgcat.minimal.toml
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
# This is an example of the most basic config
|
||||||
|
# that will mimic what PgBouncer does in transaction mode with one server.
|
||||||
|
|
||||||
|
[general]
|
||||||
|
|
||||||
|
host = "0.0.0.0"
|
||||||
|
port = 6433
|
||||||
|
admin_username = "pgcat"
|
||||||
|
admin_password = "pgcat"
|
||||||
|
|
||||||
|
[pools.pgml.users.0]
|
||||||
|
username = "postgres"
|
||||||
|
password = "postgres"
|
||||||
|
pool_size = 10
|
||||||
|
min_pool_size = 1
|
||||||
|
pool_mode = "transaction"
|
||||||
|
|
||||||
|
[pools.pgml.shards.0]
|
||||||
|
servers = [
|
||||||
|
["127.0.0.1", 28815, "primary"]
|
||||||
|
]
|
||||||
|
database = "postgres"
|
||||||
17
pgcat.service
Normal file
17
pgcat.service
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=PgCat pooler
|
||||||
|
After=network.target
|
||||||
|
StartLimitIntervalSec=0
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
User=pgcat
|
||||||
|
Type=simple
|
||||||
|
Restart=always
|
||||||
|
RestartSec=1
|
||||||
|
Environment=RUST_LOG=info
|
||||||
|
LimitNOFILE=65536
|
||||||
|
ExecStart=/usr/bin/pgcat /etc/pgcat.toml
|
||||||
|
ExecReload=/bin/kill -SIGHUP $MAINPID
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
356
pgcat.toml
356
pgcat.toml
@@ -5,64 +5,346 @@
|
|||||||
#
|
#
|
||||||
# General pooler settings
|
# General pooler settings
|
||||||
[general]
|
[general]
|
||||||
|
|
||||||
# What IP to run on, 0.0.0.0 means accessible from everywhere.
|
# What IP to run on, 0.0.0.0 means accessible from everywhere.
|
||||||
host = "0.0.0.0"
|
host = "0.0.0.0"
|
||||||
|
|
||||||
# Port to run on, same as PgBouncer used in this example.
|
# Port to run on, same as PgBouncer used in this example.
|
||||||
port = 6432
|
port = 6432
|
||||||
|
|
||||||
# How many connections to allocate per server.
|
# Whether to enable prometheus exporter or not.
|
||||||
pool_size = 15
|
enable_prometheus_exporter = true
|
||||||
|
|
||||||
# Pool mode (see PgBouncer docs for more).
|
# Port at which prometheus exporter listens on.
|
||||||
# session: one server connection per connected client
|
prometheus_exporter_port = 9930
|
||||||
# transaction: one server connection per client transaction
|
|
||||||
pool_mode = "transaction"
|
|
||||||
|
|
||||||
# How long to wait before aborting a server connection (ms).
|
# How long to wait before aborting a server connection (ms).
|
||||||
connect_timeout = 5000
|
connect_timeout = 5000 # milliseconds
|
||||||
|
|
||||||
# How much time to give `SELECT 1` health check query to return with a result (ms).
|
# How long an idle connection with a server is left open (ms).
|
||||||
healthcheck_timeout = 1000
|
idle_timeout = 30000 # milliseconds
|
||||||
|
|
||||||
# For how long to ban a server if it fails a health check (seconds).
|
# Max connection lifetime before it's closed, even if actively used.
|
||||||
ban_time = 60 # Seconds
|
server_lifetime = 86400000 # 24 hours
|
||||||
|
|
||||||
#
|
# How long a client is allowed to be idle while in a transaction (ms).
|
||||||
# User to use for authentication against the server.
|
idle_client_in_transaction_timeout = 0 # milliseconds
|
||||||
[user]
|
|
||||||
name = "sharding_user"
|
# How much time to give the health check query to return with a result (ms).
|
||||||
|
healthcheck_timeout = 1000 # milliseconds
|
||||||
|
|
||||||
|
# How long to keep connection available for immediate re-use, without running a healthcheck query on it
|
||||||
|
healthcheck_delay = 30000 # milliseconds
|
||||||
|
|
||||||
|
# How much time to give clients during shutdown before forcibly killing client connections (ms).
|
||||||
|
shutdown_timeout = 60000 # milliseconds
|
||||||
|
|
||||||
|
# How long to ban a server if it fails a health check (seconds).
|
||||||
|
ban_time = 60 # seconds
|
||||||
|
|
||||||
|
# If we should log client connections
|
||||||
|
log_client_connections = false
|
||||||
|
|
||||||
|
# If we should log client disconnections
|
||||||
|
log_client_disconnections = false
|
||||||
|
|
||||||
|
# When set to true, PgCat reloads configs if it detects a change in the config file.
|
||||||
|
autoreload = 15000
|
||||||
|
|
||||||
|
# Number of worker threads the Runtime will use (4 by default).
|
||||||
|
worker_threads = 5
|
||||||
|
|
||||||
|
# Number of seconds of connection idleness to wait before sending a keepalive packet to the server.
|
||||||
|
tcp_keepalives_idle = 5
|
||||||
|
# Number of unacknowledged keepalive packets allowed before giving up and closing the connection.
|
||||||
|
tcp_keepalives_count = 5
|
||||||
|
# Number of seconds between keepalive packets.
|
||||||
|
tcp_keepalives_interval = 5
|
||||||
|
|
||||||
|
# Path to TLS Certificate file to use for TLS connections
|
||||||
|
# tls_certificate = ".circleci/server.cert"
|
||||||
|
# Path to TLS private key file to use for TLS connections
|
||||||
|
# tls_private_key = ".circleci/server.key"
|
||||||
|
|
||||||
|
# Enable/disable server TLS
|
||||||
|
server_tls = false
|
||||||
|
|
||||||
|
# Verify server certificate is completely authentic.
|
||||||
|
verify_server_certificate = false
|
||||||
|
|
||||||
|
# User name to access the virtual administrative database (pgbouncer or pgcat)
|
||||||
|
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
||||||
|
admin_username = "admin_user"
|
||||||
|
# Password to access the virtual administrative database
|
||||||
|
admin_password = "admin_pass"
|
||||||
|
|
||||||
|
# Default plugins that are configured on all pools.
|
||||||
|
[plugins]
|
||||||
|
|
||||||
|
# Prewarmer plugin that runs queries on server startup, before giving the connection
|
||||||
|
# to the client.
|
||||||
|
[plugins.prewarmer]
|
||||||
|
enabled = false
|
||||||
|
queries = [
|
||||||
|
"SELECT pg_prewarm('pgbench_accounts')",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Log all queries to stdout.
|
||||||
|
[plugins.query_logger]
|
||||||
|
enabled = false
|
||||||
|
|
||||||
|
# Block access to tables that Postgres does not allow us to control.
|
||||||
|
[plugins.table_access]
|
||||||
|
enabled = false
|
||||||
|
tables = [
|
||||||
|
"pg_user",
|
||||||
|
"pg_roles",
|
||||||
|
"pg_database",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Intercept user queries and give a fake reply.
|
||||||
|
[plugins.intercept]
|
||||||
|
enabled = true
|
||||||
|
|
||||||
|
[plugins.intercept.queries.0]
|
||||||
|
|
||||||
|
query = "select current_database() as a, current_schemas(false) as b"
|
||||||
|
schema = [
|
||||||
|
["a", "text"],
|
||||||
|
["b", "text"],
|
||||||
|
]
|
||||||
|
result = [
|
||||||
|
["${DATABASE}", "{public}"],
|
||||||
|
]
|
||||||
|
|
||||||
|
[plugins.intercept.queries.1]
|
||||||
|
|
||||||
|
query = "select current_database(), current_schema(), current_user"
|
||||||
|
schema = [
|
||||||
|
["current_database", "text"],
|
||||||
|
["current_schema", "text"],
|
||||||
|
["current_user", "text"],
|
||||||
|
]
|
||||||
|
result = [
|
||||||
|
["${DATABASE}", "public", "${USER}"],
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
# pool configs are structured as pool.<pool_name>
|
||||||
|
# the pool_name is what clients use as database name when connecting.
|
||||||
|
# For a pool named `sharded_db`, clients access that pool using connection string like
|
||||||
|
# `postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded_db`
|
||||||
|
[pools.sharded_db]
|
||||||
|
# Pool mode (see PgBouncer docs for more).
|
||||||
|
# `session` one server connection per connected client
|
||||||
|
# `transaction` one server connection per client transaction
|
||||||
|
pool_mode = "transaction"
|
||||||
|
|
||||||
|
# Load balancing mode
|
||||||
|
# `random` selects the server at random
|
||||||
|
# `loc` selects the server with the least outstanding busy conncetions
|
||||||
|
load_balancing_mode = "random"
|
||||||
|
|
||||||
|
# If the client doesn't specify, PgCat routes traffic to this role by default.
|
||||||
|
# `any` round-robin between primary and replicas,
|
||||||
|
# `replica` round-robin between replicas only without touching the primary,
|
||||||
|
# `primary` all queries go to the primary unless otherwise specified.
|
||||||
|
default_role = "any"
|
||||||
|
|
||||||
|
# Prepared statements cache size.
|
||||||
|
# TODO: update documentation
|
||||||
|
prepared_statements_cache_size = 500
|
||||||
|
|
||||||
|
# If Query Parser is enabled, we'll attempt to parse
|
||||||
|
# every incoming query to determine if it's a read or a write.
|
||||||
|
# If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||||
|
# we'll direct it to the primary.
|
||||||
|
query_parser_enabled = true
|
||||||
|
|
||||||
|
# If the query parser is enabled and this setting is enabled, we'll attempt to
|
||||||
|
# infer the role from the query itself.
|
||||||
|
query_parser_read_write_splitting = true
|
||||||
|
|
||||||
|
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||||
|
# load balancing of read queries. Otherwise, the primary will only be used for write
|
||||||
|
# queries. The primary can always be explicitly selected with our custom protocol.
|
||||||
|
primary_reads_enabled = true
|
||||||
|
|
||||||
|
# Allow sharding commands to be passed as statement comments instead of
|
||||||
|
# separate commands. If these are unset this functionality is disabled.
|
||||||
|
# sharding_key_regex = '/\* sharding_key: (\d+) \*/'
|
||||||
|
# shard_id_regex = '/\* shard_id: (\d+) \*/'
|
||||||
|
# regex_search_limit = 1000 # only look at the first 1000 characters of SQL statements
|
||||||
|
|
||||||
|
# Defines the behavior when no shard is selected in a sharded system.
|
||||||
|
# `random`: picks a shard at random
|
||||||
|
# `random_healthy`: picks a shard at random favoring shards with the least number of recent errors
|
||||||
|
# `shard_<number>`: e.g. shard_0, shard_4, etc. picks a specific shard, everytime
|
||||||
|
# default_shard = "shard_0"
|
||||||
|
|
||||||
|
# So what if you wanted to implement a different hashing function,
|
||||||
|
# or you've already built one and you want this pooler to use it?
|
||||||
|
# Current options:
|
||||||
|
# `pg_bigint_hash`: PARTITION BY HASH (Postgres hashing function)
|
||||||
|
# `sha1`: A hashing function based on SHA1
|
||||||
|
sharding_function = "pg_bigint_hash"
|
||||||
|
|
||||||
|
# Query to be sent to servers to obtain the hash used for md5 authentication. The connection will be
|
||||||
|
# established using the database configured in the pool. This parameter is inherited by every pool
|
||||||
|
# and can be redefined in pool configuration.
|
||||||
|
# auth_query="SELECT usename, passwd FROM pg_shadow WHERE usename='$1'"
|
||||||
|
|
||||||
|
# User to be used for connecting to servers to obtain the hash used for md5 authentication by sending the query
|
||||||
|
# specified in `auth_query_user`. The connection will be established using the database configured in the pool.
|
||||||
|
# This parameter is inherited by every pool and can be redefined in pool configuration.
|
||||||
|
# auth_query_user = "sharding_user"
|
||||||
|
|
||||||
|
# Password to be used for connecting to servers to obtain the hash used for md5 authentication by sending the query
|
||||||
|
# specified in `auth_query_user`. The connection will be established using the database configured in the pool.
|
||||||
|
# This parameter is inherited by every pool and can be redefined in pool configuration.
|
||||||
|
# auth_query_password = "sharding_user"
|
||||||
|
|
||||||
|
# Automatically parse this from queries and route queries to the right shard!
|
||||||
|
# automatic_sharding_key = "data.id"
|
||||||
|
|
||||||
|
# Idle timeout can be overwritten in the pool
|
||||||
|
idle_timeout = 40000
|
||||||
|
|
||||||
|
# Connect timeout can be overwritten in the pool
|
||||||
|
connect_timeout = 3000
|
||||||
|
|
||||||
|
# When enabled, ip resolutions for server connections specified using hostnames will be cached
|
||||||
|
# and checked for changes every `dns_max_ttl` seconds. If a change in the host resolution is found
|
||||||
|
# old ip connections are closed (gracefully) and new connections will start using new ip.
|
||||||
|
# dns_cache_enabled = false
|
||||||
|
|
||||||
|
# Specifies how often (in seconds) cached ip addresses for servers are rechecked (see `dns_cache_enabled`).
|
||||||
|
# dns_max_ttl = 30
|
||||||
|
|
||||||
|
# Plugins can be configured on a pool-per-pool basis. This overrides the global plugins setting,
|
||||||
|
# so all plugins have to be configured here again.
|
||||||
|
[pool.sharded_db.plugins]
|
||||||
|
|
||||||
|
[pools.sharded_db.plugins.prewarmer]
|
||||||
|
enabled = true
|
||||||
|
queries = [
|
||||||
|
"SELECT pg_prewarm('pgbench_accounts')",
|
||||||
|
]
|
||||||
|
|
||||||
|
[pools.sharded_db.plugins.query_logger]
|
||||||
|
enabled = false
|
||||||
|
|
||||||
|
[pools.sharded_db.plugins.table_access]
|
||||||
|
enabled = false
|
||||||
|
tables = [
|
||||||
|
"pg_user",
|
||||||
|
"pg_roles",
|
||||||
|
"pg_database",
|
||||||
|
]
|
||||||
|
|
||||||
|
[pools.sharded_db.plugins.intercept]
|
||||||
|
enabled = true
|
||||||
|
|
||||||
|
[pools.sharded_db.plugins.intercept.queries.0]
|
||||||
|
|
||||||
|
query = "select current_database() as a, current_schemas(false) as b"
|
||||||
|
schema = [
|
||||||
|
["a", "text"],
|
||||||
|
["b", "text"],
|
||||||
|
]
|
||||||
|
result = [
|
||||||
|
["${DATABASE}", "{public}"],
|
||||||
|
]
|
||||||
|
|
||||||
|
[pools.sharded_db.plugins.intercept.queries.1]
|
||||||
|
|
||||||
|
query = "select current_database(), current_schema(), current_user"
|
||||||
|
schema = [
|
||||||
|
["current_database", "text"],
|
||||||
|
["current_schema", "text"],
|
||||||
|
["current_user", "text"],
|
||||||
|
]
|
||||||
|
result = [
|
||||||
|
["${DATABASE}", "public", "${USER}"],
|
||||||
|
]
|
||||||
|
|
||||||
|
# User configs are structured as pool.<pool_name>.users.<user_index>
|
||||||
|
# This section holds the credentials for users that may connect to this cluster
|
||||||
|
[pools.sharded_db.users.0]
|
||||||
|
# PostgreSQL username used to authenticate the user and connect to the server
|
||||||
|
# if `server_username` is not set.
|
||||||
|
username = "sharding_user"
|
||||||
|
|
||||||
|
# PostgreSQL password used to authenticate the user and connect to the server
|
||||||
|
# if `server_password` is not set.
|
||||||
password = "sharding_user"
|
password = "sharding_user"
|
||||||
|
|
||||||
|
pool_mode = "transaction"
|
||||||
|
|
||||||
#
|
# PostgreSQL username used to connect to the server.
|
||||||
# Shards in the cluster
|
# server_username = "another_user"
|
||||||
[shards]
|
|
||||||
|
|
||||||
# Shard 0
|
# PostgreSQL password used to connect to the server.
|
||||||
[shards.0]
|
# server_password = "another_password"
|
||||||
|
|
||||||
|
# Maximum number of server connections that can be established for this user
|
||||||
|
# The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||||
|
# is the sum of pool_size across all users.
|
||||||
|
pool_size = 9
|
||||||
|
|
||||||
|
|
||||||
|
# Maximum query duration. Dangerous, but protects against DBs that died in a non-obvious way.
|
||||||
|
# 0 means it is disabled.
|
||||||
|
statement_timeout = 0
|
||||||
|
|
||||||
|
[pools.sharded_db.users.1]
|
||||||
|
username = "other_user"
|
||||||
|
password = "other_user"
|
||||||
|
pool_size = 21
|
||||||
|
statement_timeout = 15000
|
||||||
|
connect_timeout = 1000
|
||||||
|
idle_timeout = 1000
|
||||||
|
|
||||||
|
# Shard configs are structured as pool.<pool_name>.shards.<shard_id>
|
||||||
|
# Each shard config contains a list of servers that make up the shard
|
||||||
|
# and the database name to use.
|
||||||
|
[pools.sharded_db.shards.0]
|
||||||
|
# Array of servers in the shard, each server entry is an array of `[host, port, role]`
|
||||||
|
servers = [["127.0.0.1", 5432, "primary"], ["localhost", 5432, "replica"]]
|
||||||
|
|
||||||
|
# Array of mirrors for the shard, each mirror entry is an array of `[host, port, index of server in servers array]`
|
||||||
|
# Traffic hitting the server identified by the index will be sent to the mirror.
|
||||||
|
# mirrors = [["1.2.3.4", 5432, 0], ["1.2.3.4", 5432, 1]]
|
||||||
|
|
||||||
# [ host, port ]
|
|
||||||
servers = [
|
|
||||||
[ "127.0.0.1", 5432 ],
|
|
||||||
[ "localhost", 5432 ],
|
|
||||||
]
|
|
||||||
# Database name (e.g. "postgres")
|
# Database name (e.g. "postgres")
|
||||||
database = "shard0"
|
database = "shard0"
|
||||||
|
|
||||||
[shards.1]
|
[pools.sharded_db.shards.1]
|
||||||
# [ host, port ]
|
servers = [["127.0.0.1", 5432, "primary"], ["localhost", 5432, "replica"]]
|
||||||
servers = [
|
|
||||||
[ "127.0.0.1", 5432 ],
|
|
||||||
[ "localhost", 5432 ],
|
|
||||||
]
|
|
||||||
database = "shard1"
|
database = "shard1"
|
||||||
|
|
||||||
[shards.2]
|
[pools.sharded_db.shards.2]
|
||||||
# [ host, port ]
|
servers = [["127.0.0.1", 5432, "primary" ], ["localhost", 5432, "replica" ]]
|
||||||
|
database = "shard2"
|
||||||
|
|
||||||
|
|
||||||
|
[pools.simple_db]
|
||||||
|
pool_mode = "session"
|
||||||
|
default_role = "primary"
|
||||||
|
query_parser_enabled = true
|
||||||
|
primary_reads_enabled = true
|
||||||
|
sharding_function = "pg_bigint_hash"
|
||||||
|
|
||||||
|
[pools.simple_db.users.0]
|
||||||
|
username = "simple_user"
|
||||||
|
password = "simple_user"
|
||||||
|
pool_size = 5
|
||||||
|
min_pool_size = 3
|
||||||
|
server_lifetime = 60000
|
||||||
|
statement_timeout = 0
|
||||||
|
|
||||||
|
[pools.simple_db.shards.0]
|
||||||
servers = [
|
servers = [
|
||||||
[ "127.0.0.1", 5432 ],
|
[ "127.0.0.1", 5432, "primary" ],
|
||||||
[ "localhost", 5432 ],
|
[ "localhost", 5432, "replica" ]
|
||||||
]
|
]
|
||||||
database = "shard2"
|
database = "some_db"
|
||||||
|
|||||||
BIN
pgcat3.png
BIN
pgcat3.png
Binary file not shown.
|
Before Width: | Height: | Size: 44 KiB |
13
postinst
Normal file
13
postinst
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
systemctl daemon-reload
|
||||||
|
systemctl enable pgcat
|
||||||
|
|
||||||
|
if ! id pgcat 2> /dev/null; then
|
||||||
|
useradd -s /usr/bin/false pgcat
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f /etc/pgcat.toml ]; then
|
||||||
|
systemctl start pgcat
|
||||||
|
fi
|
||||||
5
prerm
Normal file
5
prerm
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
systemctl stop pgcat
|
||||||
|
systemctl disable pgcat
|
||||||
999
src/admin.rs
Normal file
999
src/admin.rs
Normal file
@@ -0,0 +1,999 @@
|
|||||||
|
use crate::pool::BanReason;
|
||||||
|
use crate::server::ServerParameters;
|
||||||
|
use crate::stats::pool::PoolStats;
|
||||||
|
use bytes::{Buf, BufMut, BytesMut};
|
||||||
|
use log::{error, info, trace};
|
||||||
|
use nix::sys::signal::{self, Signal};
|
||||||
|
use nix::unistd::Pid;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
/// Admin database.
|
||||||
|
use std::sync::atomic::Ordering;
|
||||||
|
use std::time::{SystemTime, UNIX_EPOCH};
|
||||||
|
use tokio::time::Instant;
|
||||||
|
|
||||||
|
use crate::config::{get_config, reload_config, VERSION};
|
||||||
|
use crate::errors::Error;
|
||||||
|
use crate::messages::*;
|
||||||
|
use crate::pool::ClientServerMap;
|
||||||
|
use crate::pool::{get_all_pools, get_pool};
|
||||||
|
use crate::stats::{get_client_stats, get_server_stats, ClientState, ServerState};
|
||||||
|
|
||||||
|
pub fn generate_server_parameters_for_admin() -> ServerParameters {
|
||||||
|
let mut server_parameters = ServerParameters::new();
|
||||||
|
|
||||||
|
server_parameters.set_param("application_name".to_string(), "".to_string(), true);
|
||||||
|
server_parameters.set_param("client_encoding".to_string(), "UTF8".to_string(), true);
|
||||||
|
server_parameters.set_param("server_encoding".to_string(), "UTF8".to_string(), true);
|
||||||
|
server_parameters.set_param("server_version".to_string(), VERSION.to_string(), true);
|
||||||
|
server_parameters.set_param("DateStyle".to_string(), "ISO, MDY".to_string(), true);
|
||||||
|
|
||||||
|
server_parameters
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Handle admin client.
|
||||||
|
pub async fn handle_admin<T>(
|
||||||
|
stream: &mut T,
|
||||||
|
mut query: BytesMut,
|
||||||
|
client_server_map: ClientServerMap,
|
||||||
|
) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
|
{
|
||||||
|
let code = query.get_u8() as char;
|
||||||
|
|
||||||
|
if code != 'Q' {
|
||||||
|
return Err(Error::ProtocolSyncError(format!(
|
||||||
|
"Invalid code, expected 'Q' but got '{}'",
|
||||||
|
code
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let len = query.get_i32() as usize;
|
||||||
|
let query = String::from_utf8_lossy(&query[..len - 5]).to_string();
|
||||||
|
|
||||||
|
trace!("Admin query: {}", query);
|
||||||
|
|
||||||
|
let query_parts: Vec<&str> = query.trim_end_matches(';').split_whitespace().collect();
|
||||||
|
|
||||||
|
match query_parts
|
||||||
|
.first()
|
||||||
|
.unwrap_or(&"")
|
||||||
|
.to_ascii_uppercase()
|
||||||
|
.as_str()
|
||||||
|
{
|
||||||
|
"BAN" => {
|
||||||
|
trace!("BAN");
|
||||||
|
ban(stream, query_parts).await
|
||||||
|
}
|
||||||
|
"UNBAN" => {
|
||||||
|
trace!("UNBAN");
|
||||||
|
unban(stream, query_parts).await
|
||||||
|
}
|
||||||
|
"RELOAD" => {
|
||||||
|
trace!("RELOAD");
|
||||||
|
reload(stream, client_server_map).await
|
||||||
|
}
|
||||||
|
"SET" => {
|
||||||
|
trace!("SET");
|
||||||
|
ignore_set(stream).await
|
||||||
|
}
|
||||||
|
"PAUSE" => {
|
||||||
|
trace!("PAUSE");
|
||||||
|
pause(stream, query_parts).await
|
||||||
|
}
|
||||||
|
"RESUME" => {
|
||||||
|
trace!("RESUME");
|
||||||
|
resume(stream, query_parts).await
|
||||||
|
}
|
||||||
|
"SHUTDOWN" => {
|
||||||
|
trace!("SHUTDOWN");
|
||||||
|
shutdown(stream).await
|
||||||
|
}
|
||||||
|
"SHOW" => match query_parts
|
||||||
|
.get(1)
|
||||||
|
.unwrap_or(&"")
|
||||||
|
.to_ascii_uppercase()
|
||||||
|
.as_str()
|
||||||
|
{
|
||||||
|
"HELP" => {
|
||||||
|
trace!("SHOW HELP");
|
||||||
|
show_help(stream).await
|
||||||
|
}
|
||||||
|
"BANS" => {
|
||||||
|
trace!("SHOW BANS");
|
||||||
|
show_bans(stream).await
|
||||||
|
}
|
||||||
|
"CONFIG" => {
|
||||||
|
trace!("SHOW CONFIG");
|
||||||
|
show_config(stream).await
|
||||||
|
}
|
||||||
|
"DATABASES" => {
|
||||||
|
trace!("SHOW DATABASES");
|
||||||
|
show_databases(stream).await
|
||||||
|
}
|
||||||
|
"LISTS" => {
|
||||||
|
trace!("SHOW LISTS");
|
||||||
|
show_lists(stream).await
|
||||||
|
}
|
||||||
|
"POOLS" => {
|
||||||
|
trace!("SHOW POOLS");
|
||||||
|
show_pools(stream).await
|
||||||
|
}
|
||||||
|
"CLIENTS" => {
|
||||||
|
trace!("SHOW CLIENTS");
|
||||||
|
show_clients(stream).await
|
||||||
|
}
|
||||||
|
"SERVERS" => {
|
||||||
|
trace!("SHOW SERVERS");
|
||||||
|
show_servers(stream).await
|
||||||
|
}
|
||||||
|
"STATS" => {
|
||||||
|
trace!("SHOW STATS");
|
||||||
|
show_stats(stream).await
|
||||||
|
}
|
||||||
|
"VERSION" => {
|
||||||
|
trace!("SHOW VERSION");
|
||||||
|
show_version(stream).await
|
||||||
|
}
|
||||||
|
"USERS" => {
|
||||||
|
trace!("SHOW USERS");
|
||||||
|
show_users(stream).await
|
||||||
|
}
|
||||||
|
_ => error_response(stream, "Unsupported SHOW query against the admin database").await,
|
||||||
|
},
|
||||||
|
_ => error_response(stream, "Unsupported query against the admin database").await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Column-oriented statistics.
|
||||||
|
async fn show_lists<T>(stream: &mut T) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
|
{
|
||||||
|
let client_stats = get_client_stats();
|
||||||
|
let server_stats = get_server_stats();
|
||||||
|
|
||||||
|
let columns = vec![("list", DataType::Text), ("items", DataType::Int4)];
|
||||||
|
|
||||||
|
let mut users = 1;
|
||||||
|
let mut databases = 1;
|
||||||
|
for (_, pool) in get_all_pools() {
|
||||||
|
databases += pool.databases();
|
||||||
|
users += 1; // One user per pool
|
||||||
|
}
|
||||||
|
let mut res = BytesMut::new();
|
||||||
|
res.put(row_description(&columns));
|
||||||
|
res.put(data_row(&vec![
|
||||||
|
"databases".to_string(),
|
||||||
|
databases.to_string(),
|
||||||
|
]));
|
||||||
|
res.put(data_row(&vec!["users".to_string(), users.to_string()]));
|
||||||
|
res.put(data_row(&vec!["pools".to_string(), databases.to_string()]));
|
||||||
|
res.put(data_row(&vec![
|
||||||
|
"free_clients".to_string(),
|
||||||
|
client_stats
|
||||||
|
.keys()
|
||||||
|
.filter(|client_id| {
|
||||||
|
client_stats
|
||||||
|
.get(client_id)
|
||||||
|
.unwrap()
|
||||||
|
.state
|
||||||
|
.load(Ordering::Relaxed)
|
||||||
|
== ClientState::Idle
|
||||||
|
})
|
||||||
|
.count()
|
||||||
|
.to_string(),
|
||||||
|
]));
|
||||||
|
res.put(data_row(&vec![
|
||||||
|
"used_clients".to_string(),
|
||||||
|
client_stats
|
||||||
|
.keys()
|
||||||
|
.filter(|client_id| {
|
||||||
|
client_stats
|
||||||
|
.get(client_id)
|
||||||
|
.unwrap()
|
||||||
|
.state
|
||||||
|
.load(Ordering::Relaxed)
|
||||||
|
== ClientState::Active
|
||||||
|
})
|
||||||
|
.count()
|
||||||
|
.to_string(),
|
||||||
|
]));
|
||||||
|
res.put(data_row(&vec![
|
||||||
|
"login_clients".to_string(),
|
||||||
|
"0".to_string(),
|
||||||
|
]));
|
||||||
|
res.put(data_row(&vec![
|
||||||
|
"free_servers".to_string(),
|
||||||
|
server_stats
|
||||||
|
.keys()
|
||||||
|
.filter(|server_id| {
|
||||||
|
server_stats
|
||||||
|
.get(server_id)
|
||||||
|
.unwrap()
|
||||||
|
.state
|
||||||
|
.load(Ordering::Relaxed)
|
||||||
|
== ServerState::Idle
|
||||||
|
})
|
||||||
|
.count()
|
||||||
|
.to_string(),
|
||||||
|
]));
|
||||||
|
res.put(data_row(&vec![
|
||||||
|
"used_servers".to_string(),
|
||||||
|
server_stats
|
||||||
|
.keys()
|
||||||
|
.filter(|server_id| {
|
||||||
|
server_stats
|
||||||
|
.get(server_id)
|
||||||
|
.unwrap()
|
||||||
|
.state
|
||||||
|
.load(Ordering::Relaxed)
|
||||||
|
== ServerState::Active
|
||||||
|
})
|
||||||
|
.count()
|
||||||
|
.to_string(),
|
||||||
|
]));
|
||||||
|
res.put(data_row(&vec!["dns_names".to_string(), "0".to_string()]));
|
||||||
|
res.put(data_row(&vec!["dns_zones".to_string(), "0".to_string()]));
|
||||||
|
res.put(data_row(&vec!["dns_queries".to_string(), "0".to_string()]));
|
||||||
|
res.put(data_row(&vec!["dns_pending".to_string(), "0".to_string()]));
|
||||||
|
|
||||||
|
res.put(command_complete("SHOW"));
|
||||||
|
|
||||||
|
res.put_u8(b'Z');
|
||||||
|
res.put_i32(5);
|
||||||
|
res.put_u8(b'I');
|
||||||
|
|
||||||
|
write_all_half(stream, &res).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Show PgCat version.
|
||||||
|
async fn show_version<T>(stream: &mut T) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
|
{
|
||||||
|
let mut res = BytesMut::new();
|
||||||
|
|
||||||
|
res.put(row_description(&vec![("version", DataType::Text)]));
|
||||||
|
res.put(data_row(&vec![format!("PgCat {}", VERSION)]));
|
||||||
|
res.put(command_complete("SHOW"));
|
||||||
|
|
||||||
|
res.put_u8(b'Z');
|
||||||
|
res.put_i32(5);
|
||||||
|
res.put_u8(b'I');
|
||||||
|
|
||||||
|
write_all_half(stream, &res).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Show utilization of connection pools for each shard and replicas.
|
||||||
|
async fn show_pools<T>(stream: &mut T) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
|
{
|
||||||
|
let pool_lookup = PoolStats::construct_pool_lookup();
|
||||||
|
let mut res = BytesMut::new();
|
||||||
|
res.put(row_description(&PoolStats::generate_header()));
|
||||||
|
pool_lookup.iter().for_each(|(_identifier, pool_stats)| {
|
||||||
|
res.put(data_row(&pool_stats.generate_row()));
|
||||||
|
});
|
||||||
|
res.put(command_complete("SHOW"));
|
||||||
|
|
||||||
|
// ReadyForQuery
|
||||||
|
res.put_u8(b'Z');
|
||||||
|
res.put_i32(5);
|
||||||
|
res.put_u8(b'I');
|
||||||
|
|
||||||
|
write_all_half(stream, &res).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Show all available options.
|
||||||
|
async fn show_help<T>(stream: &mut T) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
|
{
|
||||||
|
let mut res = BytesMut::new();
|
||||||
|
|
||||||
|
let detail_msg = [
|
||||||
|
"",
|
||||||
|
"SHOW HELP|CONFIG|DATABASES|POOLS|CLIENTS|SERVERS|USERS|VERSION",
|
||||||
|
// "SHOW PEERS|PEER_POOLS", // missing PEERS|PEER_POOLS
|
||||||
|
// "SHOW FDS|SOCKETS|ACTIVE_SOCKETS|LISTS|MEM|STATE", // missing FDS|SOCKETS|ACTIVE_SOCKETS|MEM|STATE
|
||||||
|
"SHOW LISTS",
|
||||||
|
// "SHOW DNS_HOSTS|DNS_ZONES", // missing DNS_HOSTS|DNS_ZONES
|
||||||
|
"SHOW STATS", // missing STATS_TOTALS|STATS_AVERAGES|TOTALS
|
||||||
|
"SET key = arg",
|
||||||
|
"RELOAD",
|
||||||
|
"PAUSE [<db>, <user>]",
|
||||||
|
"RESUME [<db>, <user>]",
|
||||||
|
// "DISABLE <db>", // missing
|
||||||
|
// "ENABLE <db>", // missing
|
||||||
|
// "RECONNECT [<db>]", missing
|
||||||
|
// "KILL <db>",
|
||||||
|
// "SUSPEND",
|
||||||
|
"SHUTDOWN",
|
||||||
|
];
|
||||||
|
|
||||||
|
res.put(notify("Console usage", detail_msg.join("\n\t")));
|
||||||
|
res.put(command_complete("SHOW"));
|
||||||
|
|
||||||
|
// ReadyForQuery
|
||||||
|
res.put_u8(b'Z');
|
||||||
|
res.put_i32(5);
|
||||||
|
res.put_u8(b'I');
|
||||||
|
|
||||||
|
write_all_half(stream, &res).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Show shards and replicas.
|
||||||
|
async fn show_databases<T>(stream: &mut T) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
|
{
|
||||||
|
// Columns
|
||||||
|
let columns = vec![
|
||||||
|
("name", DataType::Text),
|
||||||
|
("host", DataType::Text),
|
||||||
|
("port", DataType::Text),
|
||||||
|
("database", DataType::Text),
|
||||||
|
("force_user", DataType::Text),
|
||||||
|
("pool_size", DataType::Int4),
|
||||||
|
("min_pool_size", DataType::Int4),
|
||||||
|
("reserve_pool", DataType::Int4),
|
||||||
|
("pool_mode", DataType::Text),
|
||||||
|
("max_connections", DataType::Int4),
|
||||||
|
("current_connections", DataType::Int4),
|
||||||
|
("paused", DataType::Int4),
|
||||||
|
("disabled", DataType::Int4),
|
||||||
|
];
|
||||||
|
|
||||||
|
let mut res = BytesMut::new();
|
||||||
|
|
||||||
|
res.put(row_description(&columns));
|
||||||
|
|
||||||
|
for (_, pool) in get_all_pools() {
|
||||||
|
let pool_config = pool.settings.clone();
|
||||||
|
for shard in 0..pool.shards() {
|
||||||
|
let database_name = &pool.address(shard, 0).database;
|
||||||
|
for server in 0..pool.servers(shard) {
|
||||||
|
let address = pool.address(shard, server);
|
||||||
|
let pool_state = pool.pool_state(shard, server);
|
||||||
|
let banned = pool.is_banned(address);
|
||||||
|
let paused = pool.paused();
|
||||||
|
|
||||||
|
res.put(data_row(&vec![
|
||||||
|
address.name(), // name
|
||||||
|
address.host.to_string(), // host
|
||||||
|
address.port.to_string(), // port
|
||||||
|
database_name.to_string(), // database
|
||||||
|
pool_config.user.username.to_string(), // force_user
|
||||||
|
pool_config.user.pool_size.to_string(), // pool_size
|
||||||
|
pool_config.user.min_pool_size.unwrap_or(0).to_string(), // min_pool_size
|
||||||
|
"0".to_string(), // reserve_pool
|
||||||
|
pool_config.pool_mode.to_string(), // pool_mode
|
||||||
|
pool_config.user.pool_size.to_string(), // max_connections
|
||||||
|
pool_state.connections.to_string(), // current_connections
|
||||||
|
match paused {
|
||||||
|
// paused
|
||||||
|
true => "1".to_string(),
|
||||||
|
false => "0".to_string(),
|
||||||
|
},
|
||||||
|
match banned {
|
||||||
|
// disabled
|
||||||
|
true => "1".to_string(),
|
||||||
|
false => "0".to_string(),
|
||||||
|
},
|
||||||
|
]));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
res.put(command_complete("SHOW"));
|
||||||
|
|
||||||
|
// ReadyForQuery
|
||||||
|
res.put_u8(b'Z');
|
||||||
|
res.put_i32(5);
|
||||||
|
res.put_u8(b'I');
|
||||||
|
|
||||||
|
write_all_half(stream, &res).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Ignore any SET commands the client sends.
|
||||||
|
/// This is common initialization done by ORMs.
|
||||||
|
async fn ignore_set<T>(stream: &mut T) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
|
{
|
||||||
|
custom_protocol_response_ok(stream, "SET").await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Bans a host from being used
|
||||||
|
async fn ban<T>(stream: &mut T, tokens: Vec<&str>) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
|
{
|
||||||
|
let host = match tokens.get(1) {
|
||||||
|
Some(host) => host,
|
||||||
|
None => return error_response(stream, "usage: BAN hostname duration_seconds").await,
|
||||||
|
};
|
||||||
|
|
||||||
|
let duration_seconds = match tokens.get(2) {
|
||||||
|
Some(duration_seconds) => match duration_seconds.parse::<i64>() {
|
||||||
|
Ok(duration_seconds) => duration_seconds,
|
||||||
|
Err(_) => {
|
||||||
|
return error_response(stream, "duration_seconds must be an integer").await;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
None => return error_response(stream, "usage: BAN hostname duration_seconds").await,
|
||||||
|
};
|
||||||
|
|
||||||
|
if duration_seconds <= 0 {
|
||||||
|
return error_response(stream, "duration_seconds must be >= 0").await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let columns = vec![
|
||||||
|
("db", DataType::Text),
|
||||||
|
("user", DataType::Text),
|
||||||
|
("role", DataType::Text),
|
||||||
|
("host", DataType::Text),
|
||||||
|
];
|
||||||
|
let mut res = BytesMut::new();
|
||||||
|
res.put(row_description(&columns));
|
||||||
|
|
||||||
|
for (id, pool) in get_all_pools().iter() {
|
||||||
|
for address in pool.get_addresses_from_host(host) {
|
||||||
|
if !pool.is_banned(&address) {
|
||||||
|
pool.ban(&address, BanReason::AdminBan(duration_seconds), None);
|
||||||
|
res.put(data_row(&vec![
|
||||||
|
id.db.clone(),
|
||||||
|
id.user.clone(),
|
||||||
|
address.role.to_string(),
|
||||||
|
address.host,
|
||||||
|
]));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res.put(command_complete("BAN"));
|
||||||
|
|
||||||
|
// ReadyForQuery
|
||||||
|
res.put_u8(b'Z');
|
||||||
|
res.put_i32(5);
|
||||||
|
res.put_u8(b'I');
|
||||||
|
|
||||||
|
write_all_half(stream, &res).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Clear a host for use
|
||||||
|
async fn unban<T>(stream: &mut T, tokens: Vec<&str>) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
|
{
|
||||||
|
let host = match tokens.get(1) {
|
||||||
|
Some(host) => host,
|
||||||
|
None => return error_response(stream, "UNBAN command requires a hostname to unban").await,
|
||||||
|
};
|
||||||
|
|
||||||
|
let columns = vec![
|
||||||
|
("db", DataType::Text),
|
||||||
|
("user", DataType::Text),
|
||||||
|
("role", DataType::Text),
|
||||||
|
("host", DataType::Text),
|
||||||
|
];
|
||||||
|
let mut res = BytesMut::new();
|
||||||
|
res.put(row_description(&columns));
|
||||||
|
|
||||||
|
for (id, pool) in get_all_pools().iter() {
|
||||||
|
for address in pool.get_addresses_from_host(host) {
|
||||||
|
if pool.is_banned(&address) {
|
||||||
|
pool.unban(&address);
|
||||||
|
res.put(data_row(&vec![
|
||||||
|
id.db.clone(),
|
||||||
|
id.user.clone(),
|
||||||
|
address.role.to_string(),
|
||||||
|
address.host,
|
||||||
|
]));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res.put(command_complete("UNBAN"));
|
||||||
|
|
||||||
|
// ReadyForQuery
|
||||||
|
res.put_u8(b'Z');
|
||||||
|
res.put_i32(5);
|
||||||
|
res.put_u8(b'I');
|
||||||
|
|
||||||
|
write_all_half(stream, &res).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Shows all the bans
|
||||||
|
async fn show_bans<T>(stream: &mut T) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
|
{
|
||||||
|
let columns = vec![
|
||||||
|
("db", DataType::Text),
|
||||||
|
("user", DataType::Text),
|
||||||
|
("role", DataType::Text),
|
||||||
|
("host", DataType::Text),
|
||||||
|
("reason", DataType::Text),
|
||||||
|
("ban_time", DataType::Text),
|
||||||
|
("ban_duration_seconds", DataType::Text),
|
||||||
|
("ban_remaining_seconds", DataType::Text),
|
||||||
|
];
|
||||||
|
let mut res = BytesMut::new();
|
||||||
|
res.put(row_description(&columns));
|
||||||
|
|
||||||
|
// The block should be pretty quick so we cache the time outside
|
||||||
|
let now = SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.expect("Time went backwards")
|
||||||
|
.as_secs() as i64;
|
||||||
|
|
||||||
|
for (id, pool) in get_all_pools().iter() {
|
||||||
|
for (address, (ban_reason, ban_time)) in pool.get_bans().iter() {
|
||||||
|
let ban_duration = match ban_reason {
|
||||||
|
BanReason::AdminBan(duration) => *duration,
|
||||||
|
_ => pool.settings.ban_time,
|
||||||
|
};
|
||||||
|
let remaining = ban_duration - (now - ban_time.timestamp());
|
||||||
|
if remaining <= 0 {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
res.put(data_row(&vec![
|
||||||
|
id.db.clone(),
|
||||||
|
id.user.clone(),
|
||||||
|
address.role.to_string(),
|
||||||
|
address.host.clone(),
|
||||||
|
format!("{:?}", ban_reason),
|
||||||
|
ban_time.to_string(),
|
||||||
|
ban_duration.to_string(),
|
||||||
|
remaining.to_string(),
|
||||||
|
]));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res.put(command_complete("SHOW BANS"));
|
||||||
|
|
||||||
|
// ReadyForQuery
|
||||||
|
res.put_u8(b'Z');
|
||||||
|
res.put_i32(5);
|
||||||
|
res.put_u8(b'I');
|
||||||
|
|
||||||
|
write_all_half(stream, &res).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reload the configuration file without restarting the process.
|
||||||
|
async fn reload<T>(stream: &mut T, client_server_map: ClientServerMap) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
|
{
|
||||||
|
info!("Reloading config");
|
||||||
|
|
||||||
|
reload_config(client_server_map).await?;
|
||||||
|
|
||||||
|
get_config().show();
|
||||||
|
|
||||||
|
let mut res = BytesMut::new();
|
||||||
|
|
||||||
|
res.put(command_complete("RELOAD"));
|
||||||
|
|
||||||
|
// ReadyForQuery
|
||||||
|
res.put_u8(b'Z');
|
||||||
|
res.put_i32(5);
|
||||||
|
res.put_u8(b'I');
|
||||||
|
|
||||||
|
write_all_half(stream, &res).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Shows current configuration.
|
||||||
|
async fn show_config<T>(stream: &mut T) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
|
{
|
||||||
|
let config = &get_config();
|
||||||
|
let config: HashMap<String, String> = config.into();
|
||||||
|
|
||||||
|
// Configs that cannot be changed without restarting.
|
||||||
|
let immutables = ["host", "port", "connect_timeout"];
|
||||||
|
|
||||||
|
// Columns
|
||||||
|
let columns = vec![
|
||||||
|
("key", DataType::Text),
|
||||||
|
("value", DataType::Text),
|
||||||
|
("default", DataType::Text),
|
||||||
|
("changeable", DataType::Text),
|
||||||
|
];
|
||||||
|
|
||||||
|
// Response data
|
||||||
|
let mut res = BytesMut::new();
|
||||||
|
res.put(row_description(&columns));
|
||||||
|
|
||||||
|
// DataRow rows
|
||||||
|
for (key, value) in config {
|
||||||
|
let changeable = if immutables.iter().filter(|col| *col == &key).count() == 1 {
|
||||||
|
"no".to_string()
|
||||||
|
} else {
|
||||||
|
"yes".to_string()
|
||||||
|
};
|
||||||
|
|
||||||
|
let row = vec![key, value, "-".to_string(), changeable];
|
||||||
|
|
||||||
|
res.put(data_row(&row));
|
||||||
|
}
|
||||||
|
|
||||||
|
res.put(command_complete("SHOW"));
|
||||||
|
|
||||||
|
// ReadyForQuery
|
||||||
|
res.put_u8(b'Z');
|
||||||
|
res.put_i32(5);
|
||||||
|
res.put_u8(b'I');
|
||||||
|
|
||||||
|
write_all_half(stream, &res).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Show shard and replicas statistics.
|
||||||
|
async fn show_stats<T>(stream: &mut T) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
|
{
|
||||||
|
let columns = vec![
|
||||||
|
("instance", DataType::Text),
|
||||||
|
("database", DataType::Text),
|
||||||
|
("user", DataType::Text),
|
||||||
|
("total_xact_count", DataType::Numeric),
|
||||||
|
("total_query_count", DataType::Numeric),
|
||||||
|
("total_received", DataType::Numeric),
|
||||||
|
("total_sent", DataType::Numeric),
|
||||||
|
("total_xact_time", DataType::Numeric),
|
||||||
|
("total_query_time", DataType::Numeric),
|
||||||
|
("total_wait_time", DataType::Numeric),
|
||||||
|
("total_errors", DataType::Numeric),
|
||||||
|
("avg_xact_count", DataType::Numeric),
|
||||||
|
("avg_query_count", DataType::Numeric),
|
||||||
|
("avg_recv", DataType::Numeric),
|
||||||
|
("avg_sent", DataType::Numeric),
|
||||||
|
("avg_errors", DataType::Numeric),
|
||||||
|
("avg_xact_time", DataType::Numeric),
|
||||||
|
("avg_query_time", DataType::Numeric),
|
||||||
|
("avg_wait_time", DataType::Numeric),
|
||||||
|
];
|
||||||
|
|
||||||
|
let mut res = BytesMut::new();
|
||||||
|
res.put(row_description(&columns));
|
||||||
|
|
||||||
|
for (user_pool, pool) in get_all_pools() {
|
||||||
|
for shard in 0..pool.shards() {
|
||||||
|
for server in 0..pool.servers(shard) {
|
||||||
|
let address = pool.address(shard, server);
|
||||||
|
|
||||||
|
let mut row = vec![address.name(), user_pool.db.clone(), user_pool.user.clone()];
|
||||||
|
let stats = address.stats.clone();
|
||||||
|
stats.populate_row(&mut row);
|
||||||
|
|
||||||
|
res.put(data_row(&row));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res.put(command_complete("SHOW"));
|
||||||
|
|
||||||
|
// ReadyForQuery
|
||||||
|
res.put_u8(b'Z');
|
||||||
|
res.put_i32(5);
|
||||||
|
res.put_u8(b'I');
|
||||||
|
|
||||||
|
write_all_half(stream, &res).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Show currently connected clients
|
||||||
|
async fn show_clients<T>(stream: &mut T) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
|
{
|
||||||
|
let columns = vec![
|
||||||
|
("client_id", DataType::Text),
|
||||||
|
("database", DataType::Text),
|
||||||
|
("user", DataType::Text),
|
||||||
|
("application_name", DataType::Text),
|
||||||
|
("state", DataType::Text),
|
||||||
|
("transaction_count", DataType::Numeric),
|
||||||
|
("query_count", DataType::Numeric),
|
||||||
|
("error_count", DataType::Numeric),
|
||||||
|
("age_seconds", DataType::Numeric),
|
||||||
|
("maxwait", DataType::Numeric),
|
||||||
|
("maxwait_us", DataType::Numeric),
|
||||||
|
];
|
||||||
|
|
||||||
|
let new_map = get_client_stats();
|
||||||
|
let mut res = BytesMut::new();
|
||||||
|
res.put(row_description(&columns));
|
||||||
|
|
||||||
|
for (_, client) in new_map {
|
||||||
|
let max_wait = client.max_wait_time.load(Ordering::Relaxed);
|
||||||
|
let row = vec![
|
||||||
|
format!("{:#010X}", client.client_id()),
|
||||||
|
client.pool_name(),
|
||||||
|
client.username(),
|
||||||
|
client.application_name(),
|
||||||
|
client.state.load(Ordering::Relaxed).to_string(),
|
||||||
|
client.transaction_count.load(Ordering::Relaxed).to_string(),
|
||||||
|
client.query_count.load(Ordering::Relaxed).to_string(),
|
||||||
|
client.error_count.load(Ordering::Relaxed).to_string(),
|
||||||
|
Instant::now()
|
||||||
|
.duration_since(client.connect_time())
|
||||||
|
.as_secs()
|
||||||
|
.to_string(),
|
||||||
|
(max_wait / 1_000_000).to_string(),
|
||||||
|
(max_wait % 1_000_000).to_string(),
|
||||||
|
];
|
||||||
|
|
||||||
|
res.put(data_row(&row));
|
||||||
|
}
|
||||||
|
|
||||||
|
res.put(command_complete("SHOW"));
|
||||||
|
|
||||||
|
// ReadyForQuery
|
||||||
|
res.put_u8(b'Z');
|
||||||
|
res.put_i32(5);
|
||||||
|
res.put_u8(b'I');
|
||||||
|
|
||||||
|
write_all_half(stream, &res).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Show currently connected servers
|
||||||
|
async fn show_servers<T>(stream: &mut T) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
|
{
|
||||||
|
let columns = vec![
|
||||||
|
("server_id", DataType::Text),
|
||||||
|
("database_name", DataType::Text),
|
||||||
|
("user", DataType::Text),
|
||||||
|
("address_id", DataType::Text),
|
||||||
|
("application_name", DataType::Text),
|
||||||
|
("state", DataType::Text),
|
||||||
|
("transaction_count", DataType::Numeric),
|
||||||
|
("query_count", DataType::Numeric),
|
||||||
|
("bytes_sent", DataType::Numeric),
|
||||||
|
("bytes_received", DataType::Numeric),
|
||||||
|
("age_seconds", DataType::Numeric),
|
||||||
|
("prepare_cache_hit", DataType::Numeric),
|
||||||
|
("prepare_cache_miss", DataType::Numeric),
|
||||||
|
("prepare_cache_eviction", DataType::Numeric),
|
||||||
|
("prepare_cache_size", DataType::Numeric),
|
||||||
|
];
|
||||||
|
|
||||||
|
let new_map = get_server_stats();
|
||||||
|
let mut res = BytesMut::new();
|
||||||
|
res.put(row_description(&columns));
|
||||||
|
|
||||||
|
for (_, server) in new_map {
|
||||||
|
let application_name = server.application_name.read();
|
||||||
|
let row = vec![
|
||||||
|
format!("{:#010X}", server.server_id()),
|
||||||
|
server.pool_name(),
|
||||||
|
server.username(),
|
||||||
|
server.address_name(),
|
||||||
|
application_name.clone(),
|
||||||
|
server.state.load(Ordering::Relaxed).to_string(),
|
||||||
|
server.transaction_count.load(Ordering::Relaxed).to_string(),
|
||||||
|
server.query_count.load(Ordering::Relaxed).to_string(),
|
||||||
|
server.bytes_sent.load(Ordering::Relaxed).to_string(),
|
||||||
|
server.bytes_received.load(Ordering::Relaxed).to_string(),
|
||||||
|
Instant::now()
|
||||||
|
.duration_since(server.connect_time())
|
||||||
|
.as_secs()
|
||||||
|
.to_string(),
|
||||||
|
server
|
||||||
|
.prepared_hit_count
|
||||||
|
.load(Ordering::Relaxed)
|
||||||
|
.to_string(),
|
||||||
|
server
|
||||||
|
.prepared_miss_count
|
||||||
|
.load(Ordering::Relaxed)
|
||||||
|
.to_string(),
|
||||||
|
server
|
||||||
|
.prepared_eviction_count
|
||||||
|
.load(Ordering::Relaxed)
|
||||||
|
.to_string(),
|
||||||
|
server
|
||||||
|
.prepared_cache_size
|
||||||
|
.load(Ordering::Relaxed)
|
||||||
|
.to_string(),
|
||||||
|
];
|
||||||
|
|
||||||
|
res.put(data_row(&row));
|
||||||
|
}
|
||||||
|
|
||||||
|
res.put(command_complete("SHOW"));
|
||||||
|
|
||||||
|
// ReadyForQuery
|
||||||
|
res.put_u8(b'Z');
|
||||||
|
res.put_i32(5);
|
||||||
|
res.put_u8(b'I');
|
||||||
|
|
||||||
|
write_all_half(stream, &res).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Pause a pool. It won't pass any more queries to the backends.
|
||||||
|
async fn pause<T>(stream: &mut T, tokens: Vec<&str>) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
|
{
|
||||||
|
let parts: Vec<&str> = match tokens.len() == 2 {
|
||||||
|
true => tokens[1].split(',').map(|part| part.trim()).collect(),
|
||||||
|
false => Vec::new(),
|
||||||
|
};
|
||||||
|
|
||||||
|
match parts.len() {
|
||||||
|
0 => {
|
||||||
|
for (_, pool) in get_all_pools() {
|
||||||
|
pool.pause();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut res = BytesMut::new();
|
||||||
|
|
||||||
|
res.put(command_complete("PAUSE"));
|
||||||
|
|
||||||
|
// ReadyForQuery
|
||||||
|
res.put_u8(b'Z');
|
||||||
|
res.put_i32(5);
|
||||||
|
res.put_u8(b'I');
|
||||||
|
|
||||||
|
write_all_half(stream, &res).await
|
||||||
|
}
|
||||||
|
2 => {
|
||||||
|
let database = parts[0];
|
||||||
|
let user = parts[1];
|
||||||
|
|
||||||
|
match get_pool(database, user) {
|
||||||
|
Some(pool) => {
|
||||||
|
pool.pause();
|
||||||
|
|
||||||
|
let mut res = BytesMut::new();
|
||||||
|
|
||||||
|
res.put(command_complete(&format!("PAUSE {},{}", database, user)));
|
||||||
|
|
||||||
|
// ReadyForQuery
|
||||||
|
res.put_u8(b'Z');
|
||||||
|
res.put_i32(5);
|
||||||
|
res.put_u8(b'I');
|
||||||
|
|
||||||
|
write_all_half(stream, &res).await
|
||||||
|
}
|
||||||
|
|
||||||
|
None => {
|
||||||
|
error_response(
|
||||||
|
stream,
|
||||||
|
&format!(
|
||||||
|
"No pool configured for database: {}, user: {}",
|
||||||
|
database, user
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => error_response(stream, "usage: PAUSE [db, user]").await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resume a pool. Queries are allowed again.
|
||||||
|
async fn resume<T>(stream: &mut T, tokens: Vec<&str>) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
|
{
|
||||||
|
let parts: Vec<&str> = match tokens.len() == 2 {
|
||||||
|
true => tokens[1].split(',').map(|part| part.trim()).collect(),
|
||||||
|
false => Vec::new(),
|
||||||
|
};
|
||||||
|
|
||||||
|
match parts.len() {
|
||||||
|
0 => {
|
||||||
|
for (_, pool) in get_all_pools() {
|
||||||
|
pool.resume();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut res = BytesMut::new();
|
||||||
|
|
||||||
|
res.put(command_complete("RESUME"));
|
||||||
|
|
||||||
|
// ReadyForQuery
|
||||||
|
res.put_u8(b'Z');
|
||||||
|
res.put_i32(5);
|
||||||
|
res.put_u8(b'I');
|
||||||
|
|
||||||
|
write_all_half(stream, &res).await
|
||||||
|
}
|
||||||
|
2 => {
|
||||||
|
let database = parts[0];
|
||||||
|
let user = parts[1];
|
||||||
|
|
||||||
|
match get_pool(database, user) {
|
||||||
|
Some(pool) => {
|
||||||
|
pool.resume();
|
||||||
|
|
||||||
|
let mut res = BytesMut::new();
|
||||||
|
|
||||||
|
res.put(command_complete(&format!("RESUME {},{}", database, user)));
|
||||||
|
|
||||||
|
// ReadyForQuery
|
||||||
|
res.put_u8(b'Z');
|
||||||
|
res.put_i32(5);
|
||||||
|
res.put_u8(b'I');
|
||||||
|
|
||||||
|
write_all_half(stream, &res).await
|
||||||
|
}
|
||||||
|
|
||||||
|
None => {
|
||||||
|
error_response(
|
||||||
|
stream,
|
||||||
|
&format!(
|
||||||
|
"No pool configured for database: {}, user: {}",
|
||||||
|
database, user
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => error_response(stream, "usage: RESUME [db, user]").await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send response packets for shutdown.
|
||||||
|
async fn shutdown<T>(stream: &mut T) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
|
{
|
||||||
|
let mut res = BytesMut::new();
|
||||||
|
|
||||||
|
res.put(row_description(&vec![("success", DataType::Text)]));
|
||||||
|
|
||||||
|
let mut shutdown_success = "t";
|
||||||
|
|
||||||
|
let pid = std::process::id();
|
||||||
|
if signal::kill(Pid::from_raw(pid.try_into().unwrap()), Signal::SIGINT).is_err() {
|
||||||
|
error!("Unable to send SIGINT to PID: {}", pid);
|
||||||
|
shutdown_success = "f";
|
||||||
|
}
|
||||||
|
|
||||||
|
res.put(data_row(&vec![shutdown_success.to_string()]));
|
||||||
|
|
||||||
|
res.put(command_complete("SHUTDOWN"));
|
||||||
|
|
||||||
|
res.put_u8(b'Z');
|
||||||
|
res.put_i32(5);
|
||||||
|
res.put_u8(b'I');
|
||||||
|
|
||||||
|
write_all_half(stream, &res).await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Show Users.
|
||||||
|
async fn show_users<T>(stream: &mut T) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
T: tokio::io::AsyncWrite + std::marker::Unpin,
|
||||||
|
{
|
||||||
|
let mut res = BytesMut::new();
|
||||||
|
|
||||||
|
res.put(row_description(&vec![
|
||||||
|
("name", DataType::Text),
|
||||||
|
("pool_mode", DataType::Text),
|
||||||
|
]));
|
||||||
|
|
||||||
|
for (user_pool, pool) in get_all_pools() {
|
||||||
|
let pool_config = &pool.settings;
|
||||||
|
res.put(data_row(&vec![
|
||||||
|
user_pool.user.clone(),
|
||||||
|
pool_config.pool_mode.to_string(),
|
||||||
|
]));
|
||||||
|
}
|
||||||
|
|
||||||
|
res.put(command_complete("SHOW"));
|
||||||
|
|
||||||
|
res.put_u8(b'Z');
|
||||||
|
res.put_i32(5);
|
||||||
|
res.put_u8(b'I');
|
||||||
|
|
||||||
|
write_all_half(stream, &res).await
|
||||||
|
}
|
||||||
138
src/auth_passthrough.rs
Normal file
138
src/auth_passthrough.rs
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
use crate::config::AuthType;
|
||||||
|
use crate::errors::Error;
|
||||||
|
use crate::pool::ConnectionPool;
|
||||||
|
use crate::server::Server;
|
||||||
|
use log::debug;
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct AuthPassthrough {
|
||||||
|
password: String,
|
||||||
|
query: String,
|
||||||
|
user: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AuthPassthrough {
|
||||||
|
/// Initializes an AuthPassthrough.
|
||||||
|
pub fn new(query: &str, user: &str, password: &str) -> Self {
|
||||||
|
AuthPassthrough {
|
||||||
|
password: password.to_string(),
|
||||||
|
query: query.to_string(),
|
||||||
|
user: user.to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an AuthPassthrough given the pool configuration.
|
||||||
|
/// If any of required values is not set, None is returned.
|
||||||
|
pub fn from_pool_config(pool_config: &crate::config::Pool) -> Option<Self> {
|
||||||
|
if pool_config.is_auth_query_configured() {
|
||||||
|
return Some(AuthPassthrough::new(
|
||||||
|
pool_config.auth_query.as_ref().unwrap(),
|
||||||
|
pool_config.auth_query_user.as_ref().unwrap(),
|
||||||
|
pool_config.auth_query_password.as_ref().unwrap(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an AuthPassthrough given the pool settings.
|
||||||
|
/// If any of required values is not set, None is returned.
|
||||||
|
pub fn from_pool_settings(pool_settings: &crate::pool::PoolSettings) -> Option<Self> {
|
||||||
|
let pool_config = crate::config::Pool {
|
||||||
|
auth_query: pool_settings.auth_query.clone(),
|
||||||
|
auth_query_password: pool_settings.auth_query_password.clone(),
|
||||||
|
auth_query_user: pool_settings.auth_query_user.clone(),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
AuthPassthrough::from_pool_config(&pool_config)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Connects to server and executes auth_query for the specified address.
|
||||||
|
/// If the response is a row with two columns containing the username set in the address.
|
||||||
|
/// and its MD5 hash, the MD5 hash returned.
|
||||||
|
///
|
||||||
|
/// Note that the query is executed, changing $1 with the name of the user
|
||||||
|
/// this is so we only hold in memory (and transfer) the least amount of 'sensitive' data.
|
||||||
|
/// Also, it is compatible with pgbouncer.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `address` - An Address of the server we want to connect to. The username for the hash will be obtained from this value.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use pgcat::auth_passthrough::AuthPassthrough;
|
||||||
|
/// use pgcat::config::Address;
|
||||||
|
/// let auth_passthrough = AuthPassthrough::new("SELECT * FROM public.user_lookup('$1');", "postgres", "postgres");
|
||||||
|
/// auth_passthrough.fetch_hash(&Address::default());
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
pub async fn fetch_hash(&self, address: &crate::config::Address) -> Result<String, Error> {
|
||||||
|
let auth_user = crate::config::User {
|
||||||
|
username: self.user.clone(),
|
||||||
|
auth_type: AuthType::MD5,
|
||||||
|
password: Some(self.password.clone()),
|
||||||
|
server_username: None,
|
||||||
|
server_password: None,
|
||||||
|
pool_size: 1,
|
||||||
|
statement_timeout: 0,
|
||||||
|
pool_mode: None,
|
||||||
|
server_lifetime: None,
|
||||||
|
min_pool_size: None,
|
||||||
|
connect_timeout: None,
|
||||||
|
idle_timeout: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let user = &address.username;
|
||||||
|
|
||||||
|
debug!("Connecting to server to obtain auth hashes");
|
||||||
|
|
||||||
|
let auth_query = self.query.replace("$1", user);
|
||||||
|
|
||||||
|
match Server::exec_simple_query(address, &auth_user, &auth_query).await {
|
||||||
|
Ok(password_data) => {
|
||||||
|
if password_data.len() == 2 && password_data.first().unwrap() == user {
|
||||||
|
if let Some(stripped_hash) = password_data
|
||||||
|
.last()
|
||||||
|
.unwrap()
|
||||||
|
.to_string()
|
||||||
|
.strip_prefix("md5") {
|
||||||
|
Ok(stripped_hash.to_string())
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
Err(Error::AuthPassthroughError(
|
||||||
|
"Obtained hash from auth_query does not seem to be in md5 format.".to_string(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Err(Error::AuthPassthroughError(
|
||||||
|
"Data obtained from query does not follow the scheme 'user','hash'."
|
||||||
|
.to_string(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
Err(Error::AuthPassthroughError(
|
||||||
|
format!("Error trying to obtain password from auth_query, ignoring hash for user '{}'. Error: {:?}",
|
||||||
|
user, err))
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn refetch_auth_hash(pool: &ConnectionPool) -> Result<String, Error> {
|
||||||
|
let address = pool.address(0, 0);
|
||||||
|
if let Some(apt) = AuthPassthrough::from_pool_settings(&pool.settings) {
|
||||||
|
let hash = apt.fetch_hash(address).await?;
|
||||||
|
|
||||||
|
return Ok(hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(Error::ClientError(format!(
|
||||||
|
"Could not obtain hash for {{ username: {:?}, database: {:?} }}. Auth passthrough not enabled.",
|
||||||
|
address.username, address.database
|
||||||
|
)))
|
||||||
|
}
|
||||||
2255
src/client.rs
2255
src/client.rs
File diff suppressed because it is too large
Load Diff
36
src/cmd_args.rs
Normal file
36
src/cmd_args.rs
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
use clap::{Parser, ValueEnum};
|
||||||
|
use tracing::Level;
|
||||||
|
|
||||||
|
/// PgCat: Nextgen PostgreSQL Pooler
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[command(author, version, about, long_about = None)]
|
||||||
|
pub struct Args {
|
||||||
|
#[arg(default_value_t = String::from("pgcat.toml"), env)]
|
||||||
|
pub config_file: String,
|
||||||
|
|
||||||
|
#[arg(short, long, default_value_t = tracing::Level::INFO, env)]
|
||||||
|
pub log_level: Level,
|
||||||
|
|
||||||
|
#[clap(short='F', long, value_enum, default_value_t=LogFormat::Text, env)]
|
||||||
|
pub log_format: LogFormat,
|
||||||
|
|
||||||
|
#[arg(
|
||||||
|
short,
|
||||||
|
long,
|
||||||
|
default_value_t = false,
|
||||||
|
env,
|
||||||
|
help = "disable colors in the log output"
|
||||||
|
)]
|
||||||
|
pub no_color: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn parse() -> Args {
|
||||||
|
Args::parse()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(ValueEnum, Clone, Debug)]
|
||||||
|
pub enum LogFormat {
|
||||||
|
Text,
|
||||||
|
Structured,
|
||||||
|
Debug,
|
||||||
|
}
|
||||||
1598
src/config.rs
1598
src/config.rs
File diff suppressed because it is too large
Load Diff
33
src/constants.rs
Normal file
33
src/constants.rs
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
/// Various protocol constants, as defined in
|
||||||
|
/// <https://www.postgresql.org/docs/12/protocol-message-formats.html>
|
||||||
|
/// and elsewhere in the source code.
|
||||||
|
|
||||||
|
// Used in the StartupMessage to indicate regular handshake.
|
||||||
|
pub const PROTOCOL_VERSION_NUMBER: i32 = 196608;
|
||||||
|
|
||||||
|
// SSLRequest: used to indicate we want an SSL connection.
|
||||||
|
pub const SSL_REQUEST_CODE: i32 = 80877103;
|
||||||
|
|
||||||
|
// CancelRequest: the cancel request code.
|
||||||
|
pub const CANCEL_REQUEST_CODE: i32 = 80877102;
|
||||||
|
|
||||||
|
// AuthenticationMD5Password
|
||||||
|
pub const MD5_ENCRYPTED_PASSWORD: i32 = 5;
|
||||||
|
|
||||||
|
// SASL
|
||||||
|
pub const SASL: i32 = 10;
|
||||||
|
pub const SASL_CONTINUE: i32 = 11;
|
||||||
|
pub const SASL_FINAL: i32 = 12;
|
||||||
|
pub const SCRAM_SHA_256: &str = "SCRAM-SHA-256";
|
||||||
|
pub const NONCE_LENGTH: usize = 24;
|
||||||
|
|
||||||
|
// AuthenticationOk
|
||||||
|
pub const AUTHENTICATION_SUCCESSFUL: i32 = 0;
|
||||||
|
|
||||||
|
// ErrorResponse: A code identifying the field type; if zero, this is the message terminator and no string follows.
|
||||||
|
pub const MESSAGE_TERMINATOR: u8 = 0;
|
||||||
|
|
||||||
|
//
|
||||||
|
// Data types
|
||||||
|
//
|
||||||
|
pub const _OID_INT8: i32 = 20; // bigint
|
||||||
410
src/dns_cache.rs
Normal file
410
src/dns_cache.rs
Normal file
@@ -0,0 +1,410 @@
|
|||||||
|
use crate::config::get_config;
|
||||||
|
use crate::errors::Error;
|
||||||
|
use arc_swap::ArcSwap;
|
||||||
|
use log::{debug, error, info, warn};
|
||||||
|
use once_cell::sync::Lazy;
|
||||||
|
use std::collections::{HashMap, HashSet};
|
||||||
|
use std::io;
|
||||||
|
use std::net::IpAddr;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::sync::RwLock;
|
||||||
|
use tokio::time::{sleep, Duration};
|
||||||
|
use trust_dns_resolver::error::{ResolveError, ResolveResult};
|
||||||
|
use trust_dns_resolver::lookup_ip::LookupIp;
|
||||||
|
use trust_dns_resolver::TokioAsyncResolver;
|
||||||
|
|
||||||
|
/// Cached Resolver Globally available
|
||||||
|
pub static CACHED_RESOLVER: Lazy<ArcSwap<CachedResolver>> =
|
||||||
|
Lazy::new(|| ArcSwap::from_pointee(CachedResolver::default()));
|
||||||
|
|
||||||
|
// Ip addressed are returned as a set of addresses
|
||||||
|
// so we can compare.
|
||||||
|
#[derive(Clone, PartialEq, Debug)]
|
||||||
|
pub struct AddrSet {
|
||||||
|
set: HashSet<IpAddr>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AddrSet {
|
||||||
|
fn new() -> AddrSet {
|
||||||
|
AddrSet {
|
||||||
|
set: HashSet::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<LookupIp> for AddrSet {
|
||||||
|
fn from(lookup_ip: LookupIp) -> Self {
|
||||||
|
let mut addr_set = AddrSet::new();
|
||||||
|
for address in lookup_ip.iter() {
|
||||||
|
addr_set.set.insert(address);
|
||||||
|
}
|
||||||
|
addr_set
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
///
|
||||||
|
/// A CachedResolver is a DNS resolution cache mechanism with customizable expiration time.
|
||||||
|
///
|
||||||
|
/// The system works as follows:
|
||||||
|
///
|
||||||
|
/// When a host is to be resolved, if we have not resolved it before, a new resolution is
|
||||||
|
/// executed and stored in the internal cache. Concurrently, every `dns_max_ttl` time, the
|
||||||
|
/// cache is refreshed.
|
||||||
|
///
|
||||||
|
/// # Example:
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use pgcat::dns_cache::{CachedResolverConfig, CachedResolver};
|
||||||
|
///
|
||||||
|
/// # tokio_test::block_on(async {
|
||||||
|
/// let config = CachedResolverConfig::default();
|
||||||
|
/// let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||||
|
/// let addrset = resolver.lookup_ip("www.example.com.").await.unwrap();
|
||||||
|
/// # })
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// // Now the ip resolution is stored in local cache and subsequent
|
||||||
|
/// // calls will be returned from cache. Also, the cache is refreshed
|
||||||
|
/// // and updated every 10 seconds.
|
||||||
|
///
|
||||||
|
/// // You can now check if an 'old' lookup differs from what it's currently
|
||||||
|
/// // store in cache by using `has_changed`.
|
||||||
|
/// resolver.has_changed("www.example.com.", addrset)
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct CachedResolver {
|
||||||
|
// The configuration of the cached_resolver.
|
||||||
|
config: CachedResolverConfig,
|
||||||
|
|
||||||
|
// This is the hash that contains the hash.
|
||||||
|
data: Option<RwLock<HashMap<String, AddrSet>>>,
|
||||||
|
|
||||||
|
// The resolver to be used for DNS queries.
|
||||||
|
resolver: Option<TokioAsyncResolver>,
|
||||||
|
|
||||||
|
// The RefreshLoop
|
||||||
|
refresh_loop: RwLock<Option<tokio::task::JoinHandle<()>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
///
|
||||||
|
/// Configuration
|
||||||
|
#[derive(Clone, Debug, Default, PartialEq)]
|
||||||
|
pub struct CachedResolverConfig {
|
||||||
|
/// Amount of time in secods that a resolved dns address is considered stale.
|
||||||
|
dns_max_ttl: u64,
|
||||||
|
|
||||||
|
/// Enabled or disabled? (this is so we can reload config)
|
||||||
|
enabled: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CachedResolverConfig {
|
||||||
|
fn new(dns_max_ttl: u64, enabled: bool) -> Self {
|
||||||
|
CachedResolverConfig {
|
||||||
|
dns_max_ttl,
|
||||||
|
enabled,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<crate::config::Config> for CachedResolverConfig {
|
||||||
|
fn from(config: crate::config::Config) -> Self {
|
||||||
|
CachedResolverConfig::new(config.general.dns_max_ttl, config.general.dns_cache_enabled)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CachedResolver {
|
||||||
|
///
|
||||||
|
/// Returns a new Arc<CachedResolver> based on passed configuration.
|
||||||
|
/// It also starts the loop that will refresh cache entries.
|
||||||
|
///
|
||||||
|
/// # Arguments:
|
||||||
|
///
|
||||||
|
/// * `config` - The `CachedResolverConfig` to be used to create the resolver.
|
||||||
|
///
|
||||||
|
/// # Example:
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use pgcat::dns_cache::{CachedResolverConfig, CachedResolver};
|
||||||
|
///
|
||||||
|
/// # tokio_test::block_on(async {
|
||||||
|
/// let config = CachedResolverConfig::default();
|
||||||
|
/// let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||||
|
/// # })
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
pub async fn new(
|
||||||
|
config: CachedResolverConfig,
|
||||||
|
data: Option<HashMap<String, AddrSet>>,
|
||||||
|
) -> Result<Arc<Self>, io::Error> {
|
||||||
|
// Construct a new Resolver with default configuration options
|
||||||
|
let resolver = Some(TokioAsyncResolver::tokio_from_system_conf()?);
|
||||||
|
|
||||||
|
let data = if let Some(hash) = data {
|
||||||
|
Some(RwLock::new(hash))
|
||||||
|
} else {
|
||||||
|
Some(RwLock::new(HashMap::new()))
|
||||||
|
};
|
||||||
|
|
||||||
|
let instance = Arc::new(Self {
|
||||||
|
config,
|
||||||
|
resolver,
|
||||||
|
data,
|
||||||
|
refresh_loop: RwLock::new(None),
|
||||||
|
});
|
||||||
|
|
||||||
|
if instance.enabled() {
|
||||||
|
info!("Scheduling DNS refresh loop");
|
||||||
|
let refresh_loop = tokio::task::spawn({
|
||||||
|
let instance = instance.clone();
|
||||||
|
async move {
|
||||||
|
instance.refresh_dns_entries_loop().await;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
*(instance.refresh_loop.write().unwrap()) = Some(refresh_loop);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(instance)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn enabled(&self) -> bool {
|
||||||
|
self.config.enabled
|
||||||
|
}
|
||||||
|
|
||||||
|
// Schedules the refresher
|
||||||
|
async fn refresh_dns_entries_loop(&self) {
|
||||||
|
let resolver = TokioAsyncResolver::tokio_from_system_conf().unwrap();
|
||||||
|
let interval = Duration::from_secs(self.config.dns_max_ttl);
|
||||||
|
loop {
|
||||||
|
debug!("Begin refreshing cached DNS addresses.");
|
||||||
|
// To minimize the time we hold the lock, we first create
|
||||||
|
// an array with keys.
|
||||||
|
let mut hostnames: Vec<String> = Vec::new();
|
||||||
|
{
|
||||||
|
if let Some(ref data) = self.data {
|
||||||
|
for hostname in data.read().unwrap().keys() {
|
||||||
|
hostnames.push(hostname.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for hostname in hostnames.iter() {
|
||||||
|
let addrset = self
|
||||||
|
.fetch_from_cache(hostname.as_str())
|
||||||
|
.expect("Could not obtain expected address from cache, this should not happen");
|
||||||
|
|
||||||
|
match resolver.lookup_ip(hostname).await {
|
||||||
|
Ok(lookup_ip) => {
|
||||||
|
let new_addrset = AddrSet::from(lookup_ip);
|
||||||
|
debug!(
|
||||||
|
"Obtained address for host ({}) -> ({:?})",
|
||||||
|
hostname, new_addrset
|
||||||
|
);
|
||||||
|
|
||||||
|
if addrset != new_addrset {
|
||||||
|
debug!(
|
||||||
|
"Addr changed from {:?} to {:?} updating cache.",
|
||||||
|
addrset, new_addrset
|
||||||
|
);
|
||||||
|
self.store_in_cache(hostname, new_addrset);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
error!(
|
||||||
|
"There was an error trying to resolv {}: ({}).",
|
||||||
|
hostname, err
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
debug!("Finished refreshing cached DNS addresses.");
|
||||||
|
sleep(interval).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a `AddrSet` given the specified hostname.
|
||||||
|
///
|
||||||
|
/// This method first tries to fetch the value from the cache, if it misses
|
||||||
|
/// then it is resolved and stored in the cache. TTL from records is ignored.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `host` - A string slice referencing the hostname to be resolved.
|
||||||
|
///
|
||||||
|
/// # Example:
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use pgcat::dns_cache::{CachedResolverConfig, CachedResolver};
|
||||||
|
///
|
||||||
|
/// # tokio_test::block_on(async {
|
||||||
|
/// let config = CachedResolverConfig::default();
|
||||||
|
/// let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||||
|
/// let response = resolver.lookup_ip("www.google.com.");
|
||||||
|
/// # })
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
pub async fn lookup_ip(&self, host: &str) -> ResolveResult<AddrSet> {
|
||||||
|
debug!("Lookup up {} in cache", host);
|
||||||
|
match self.fetch_from_cache(host) {
|
||||||
|
Some(addr_set) => {
|
||||||
|
debug!("Cache hit!");
|
||||||
|
Ok(addr_set)
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
debug!("Not found, executing a dns query!");
|
||||||
|
if let Some(ref resolver) = self.resolver {
|
||||||
|
let addr_set = AddrSet::from(resolver.lookup_ip(host).await?);
|
||||||
|
debug!("Obtained: {:?}", addr_set);
|
||||||
|
self.store_in_cache(host, addr_set.clone());
|
||||||
|
Ok(addr_set)
|
||||||
|
} else {
|
||||||
|
Err(ResolveError::from("No resolver available"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Returns true if the stored host resolution differs from the AddrSet passed.
|
||||||
|
pub fn has_changed(&self, host: &str, addr_set: &AddrSet) -> bool {
|
||||||
|
if let Some(fetched_addr_set) = self.fetch_from_cache(host) {
|
||||||
|
return fetched_addr_set != *addr_set;
|
||||||
|
}
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetches an AddrSet from the inner cache adquiring the read lock.
|
||||||
|
fn fetch_from_cache(&self, key: &str) -> Option<AddrSet> {
|
||||||
|
if let Some(ref hash) = self.data {
|
||||||
|
if let Some(addr_set) = hash.read().unwrap().get(key) {
|
||||||
|
return Some(addr_set.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sets up the global CACHED_RESOLVER static variable so we can globally use DNS
|
||||||
|
// cache.
|
||||||
|
pub async fn from_config() -> Result<(), Error> {
|
||||||
|
let cached_resolver = CACHED_RESOLVER.load();
|
||||||
|
let desired_config = CachedResolverConfig::from(get_config());
|
||||||
|
|
||||||
|
if cached_resolver.config != desired_config {
|
||||||
|
if let Some(ref refresh_loop) = *(cached_resolver.refresh_loop.write().unwrap()) {
|
||||||
|
warn!("Killing Dnscache refresh loop as its configuration is being reloaded");
|
||||||
|
refresh_loop.abort()
|
||||||
|
}
|
||||||
|
let new_resolver = if let Some(ref data) = cached_resolver.data {
|
||||||
|
let data = Some(data.read().unwrap().clone());
|
||||||
|
CachedResolver::new(desired_config, data).await
|
||||||
|
} else {
|
||||||
|
CachedResolver::new(desired_config, None).await
|
||||||
|
};
|
||||||
|
|
||||||
|
match new_resolver {
|
||||||
|
Ok(ok) => {
|
||||||
|
CACHED_RESOLVER.store(ok);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
let message = format!("Error setting up cached_resolver. Error: {:?}, will continue without this feature.", err);
|
||||||
|
Err(Error::DNSCachedError(message))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stores the AddrSet in cache adquiring the write lock.
|
||||||
|
fn store_in_cache(&self, host: &str, addr_set: AddrSet) {
|
||||||
|
if let Some(ref data) = self.data {
|
||||||
|
data.write().unwrap().insert(host.to_string(), addr_set);
|
||||||
|
} else {
|
||||||
|
error!("Could not insert, Hash not initialized");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use trust_dns_resolver::error::ResolveError;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn new() {
|
||||||
|
let config = CachedResolverConfig {
|
||||||
|
dns_max_ttl: 10,
|
||||||
|
enabled: true,
|
||||||
|
};
|
||||||
|
let resolver = CachedResolver::new(config, None).await;
|
||||||
|
assert!(resolver.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn lookup_ip() {
|
||||||
|
let config = CachedResolverConfig {
|
||||||
|
dns_max_ttl: 10,
|
||||||
|
enabled: true,
|
||||||
|
};
|
||||||
|
let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||||
|
let response = resolver.lookup_ip("www.google.com.").await;
|
||||||
|
assert!(response.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn has_changed() {
|
||||||
|
let config = CachedResolverConfig {
|
||||||
|
dns_max_ttl: 10,
|
||||||
|
enabled: true,
|
||||||
|
};
|
||||||
|
let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||||
|
let hostname = "www.google.com.";
|
||||||
|
let response = resolver.lookup_ip(hostname).await;
|
||||||
|
let addr_set = response.unwrap();
|
||||||
|
assert!(!resolver.has_changed(hostname, &addr_set));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn unknown_host() {
|
||||||
|
let config = CachedResolverConfig {
|
||||||
|
dns_max_ttl: 10,
|
||||||
|
enabled: true,
|
||||||
|
};
|
||||||
|
let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||||
|
let hostname = "www.idontexists.";
|
||||||
|
let response = resolver.lookup_ip(hostname).await;
|
||||||
|
assert!(matches!(response, Err(ResolveError { .. })));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn incorrect_address() {
|
||||||
|
let config = CachedResolverConfig {
|
||||||
|
dns_max_ttl: 10,
|
||||||
|
enabled: true,
|
||||||
|
};
|
||||||
|
let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||||
|
let hostname = "w ww.idontexists.";
|
||||||
|
let response = resolver.lookup_ip(hostname).await;
|
||||||
|
assert!(matches!(response, Err(ResolveError { .. })));
|
||||||
|
assert!(!resolver.has_changed(hostname, &AddrSet::new()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
// Ok, this test is based on the fact that google does DNS RR
|
||||||
|
// and does not responds with every available ip everytime, so
|
||||||
|
// if I cache here, it will miss after one cache iteration or two.
|
||||||
|
async fn thread() {
|
||||||
|
let config = CachedResolverConfig {
|
||||||
|
dns_max_ttl: 10,
|
||||||
|
enabled: true,
|
||||||
|
};
|
||||||
|
let resolver = CachedResolver::new(config, None).await.unwrap();
|
||||||
|
let hostname = "www.google.com.";
|
||||||
|
let response = resolver.lookup_ip(hostname).await;
|
||||||
|
let addr_set = response.unwrap();
|
||||||
|
assert!(!resolver.has_changed(hostname, &addr_set));
|
||||||
|
let resolver_for_refresher = resolver.clone();
|
||||||
|
let _thread_handle = tokio::task::spawn(async move {
|
||||||
|
resolver_for_refresher.refresh_dns_entries_loop().await;
|
||||||
|
});
|
||||||
|
assert!(!resolver.has_changed(hostname, &addr_set));
|
||||||
|
}
|
||||||
|
}
|
||||||
134
src/errors.rs
134
src/errors.rs
@@ -1,11 +1,133 @@
|
|||||||
#[derive(Debug, PartialEq)]
|
//! Errors.
|
||||||
|
|
||||||
|
/// Various errors.
|
||||||
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
SocketError,
|
SocketError(String),
|
||||||
// ClientDisconnected,
|
ClientSocketError(String, ClientIdentifier),
|
||||||
|
ClientGeneralError(String, ClientIdentifier),
|
||||||
|
ClientAuthImpossible(String),
|
||||||
|
ClientAuthPassthroughError(String, ClientIdentifier),
|
||||||
ClientBadStartup,
|
ClientBadStartup,
|
||||||
ProtocolSyncError,
|
ProtocolSyncError(String),
|
||||||
|
BadQuery(String),
|
||||||
ServerError,
|
ServerError,
|
||||||
// ServerTimeout,
|
ServerMessageParserError(String),
|
||||||
// DirtyServer,
|
ServerStartupError(String, ServerIdentifier),
|
||||||
|
ServerAuthError(String, ServerIdentifier),
|
||||||
BadConfig,
|
BadConfig,
|
||||||
|
AllServersDown,
|
||||||
|
ClientError(String),
|
||||||
|
TlsError,
|
||||||
|
StatementTimeout,
|
||||||
|
DNSCachedError(String),
|
||||||
|
ShuttingDown,
|
||||||
|
ParseBytesError(String),
|
||||||
|
AuthError(String),
|
||||||
|
AuthPassthroughError(String),
|
||||||
|
UnsupportedStatement,
|
||||||
|
QueryRouterParserError(String),
|
||||||
|
QueryRouterError(String),
|
||||||
|
InvalidShardId(usize),
|
||||||
|
PreparedStatementError,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, PartialEq, Debug)]
|
||||||
|
pub struct ClientIdentifier {
|
||||||
|
pub application_name: String,
|
||||||
|
pub username: String,
|
||||||
|
pub pool_name: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ClientIdentifier {
|
||||||
|
pub fn new(application_name: &str, username: &str, pool_name: &str) -> ClientIdentifier {
|
||||||
|
ClientIdentifier {
|
||||||
|
application_name: application_name.into(),
|
||||||
|
username: username.into(),
|
||||||
|
pool_name: pool_name.into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for ClientIdentifier {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"{{ application_name: {}, username: {}, pool_name: {} }}",
|
||||||
|
self.application_name, self.username, self.pool_name
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, PartialEq, Debug)]
|
||||||
|
pub struct ServerIdentifier {
|
||||||
|
pub username: String,
|
||||||
|
pub database: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ServerIdentifier {
|
||||||
|
pub fn new(username: &str, database: &str) -> ServerIdentifier {
|
||||||
|
ServerIdentifier {
|
||||||
|
username: username.into(),
|
||||||
|
database: database.into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for ServerIdentifier {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"{{ username: {}, database: {} }}",
|
||||||
|
self.username, self.database
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for Error {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||||
|
match &self {
|
||||||
|
&Error::ClientSocketError(error, client_identifier) => write!(
|
||||||
|
f,
|
||||||
|
"Error reading {} from client {}",
|
||||||
|
error, client_identifier
|
||||||
|
),
|
||||||
|
&Error::ClientGeneralError(error, client_identifier) => {
|
||||||
|
write!(f, "{} {}", error, client_identifier)
|
||||||
|
}
|
||||||
|
&Error::ClientAuthImpossible(username) => write!(
|
||||||
|
f,
|
||||||
|
"Client auth not possible, \
|
||||||
|
no cleartext password set for username: {} \
|
||||||
|
in config and auth passthrough (query_auth) \
|
||||||
|
is not set up.",
|
||||||
|
username
|
||||||
|
),
|
||||||
|
&Error::ClientAuthPassthroughError(error, client_identifier) => write!(
|
||||||
|
f,
|
||||||
|
"No cleartext password set, \
|
||||||
|
and no auth passthrough could not \
|
||||||
|
obtain the hash from server for {}, \
|
||||||
|
the error was: {}",
|
||||||
|
client_identifier, error
|
||||||
|
),
|
||||||
|
&Error::ServerStartupError(error, server_identifier) => write!(
|
||||||
|
f,
|
||||||
|
"Error reading {} on server startup {}",
|
||||||
|
error, server_identifier,
|
||||||
|
),
|
||||||
|
&Error::ServerAuthError(error, server_identifier) => {
|
||||||
|
write!(f, "{} for {}", error, server_identifier,)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The rest can use Debug.
|
||||||
|
err => write!(f, "{:?}", err),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<std::ffi::NulError> for Error {
|
||||||
|
fn from(err: std::ffi::NulError) -> Self {
|
||||||
|
Error::QueryRouterError(err.to_string())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
42
src/lib.rs
Normal file
42
src/lib.rs
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
pub mod admin;
|
||||||
|
pub mod auth_passthrough;
|
||||||
|
pub mod client;
|
||||||
|
pub mod cmd_args;
|
||||||
|
pub mod config;
|
||||||
|
pub mod constants;
|
||||||
|
pub mod dns_cache;
|
||||||
|
pub mod errors;
|
||||||
|
pub mod logger;
|
||||||
|
pub mod messages;
|
||||||
|
pub mod mirrors;
|
||||||
|
pub mod plugins;
|
||||||
|
pub mod pool;
|
||||||
|
pub mod prometheus;
|
||||||
|
pub mod query_router;
|
||||||
|
pub mod scram;
|
||||||
|
pub mod server;
|
||||||
|
pub mod sharding;
|
||||||
|
pub mod stats;
|
||||||
|
pub mod tls;
|
||||||
|
|
||||||
|
/// Format chrono::Duration to be more human-friendly.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `duration` - A duration of time
|
||||||
|
pub fn format_duration(duration: &chrono::Duration) -> String {
|
||||||
|
let milliseconds = format!("{:0>3}", duration.num_milliseconds() % 1000);
|
||||||
|
|
||||||
|
let seconds = format!("{:0>2}", duration.num_seconds() % 60);
|
||||||
|
|
||||||
|
let minutes = format!("{:0>2}", duration.num_minutes() % 60);
|
||||||
|
|
||||||
|
let hours = format!("{:0>2}", duration.num_hours() % 24);
|
||||||
|
|
||||||
|
let days = duration.num_days().to_string();
|
||||||
|
|
||||||
|
format!(
|
||||||
|
"{}d {}:{}:{}.{}",
|
||||||
|
days, hours, minutes, seconds, milliseconds
|
||||||
|
)
|
||||||
|
}
|
||||||
20
src/logger.rs
Normal file
20
src/logger.rs
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
use crate::cmd_args::{Args, LogFormat};
|
||||||
|
use tracing_subscriber;
|
||||||
|
use tracing_subscriber::EnvFilter;
|
||||||
|
|
||||||
|
pub fn init(args: &Args) {
|
||||||
|
// Iniitalize a default filter, and then override the builtin default "warning" with our
|
||||||
|
// commandline, (default: "info")
|
||||||
|
let filter = EnvFilter::from_default_env().add_directive(args.log_level.into());
|
||||||
|
|
||||||
|
let trace_sub = tracing_subscriber::fmt()
|
||||||
|
.with_thread_ids(true)
|
||||||
|
.with_env_filter(filter)
|
||||||
|
.with_ansi(!args.no_color);
|
||||||
|
|
||||||
|
match args.log_format {
|
||||||
|
LogFormat::Structured => trace_sub.json().init(),
|
||||||
|
LogFormat::Debug => trace_sub.pretty().init(),
|
||||||
|
_ => trace_sub.init(),
|
||||||
|
};
|
||||||
|
}
|
||||||
409
src/main.rs
409
src/main.rs
@@ -1,123 +1,340 @@
|
|||||||
// PgCat, a PostgreSQL pooler with load balancing, failover, and sharding support.
|
// Copyright (c) 2022 Lev Kokotov <hi@levthe.dev>
|
||||||
// Copyright (C) 2022 Lev Kokotov <lev@levthe.dev>
|
|
||||||
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
// Permission is hereby granted, free of charge, to any person obtaining
|
||||||
// it under the terms of the GNU General Public License as published by
|
// a copy of this software and associated documentation files (the
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
// "Software"), to deal in the Software without restriction, including
|
||||||
// (at your option) any later version.
|
// without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
// distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
// permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
// the following conditions:
|
||||||
|
|
||||||
// This program is distributed in the hope that it will be useful,
|
// The above copyright notice and this permission notice shall be
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
// included in all copies or substantial portions of the Software.
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
|
||||||
|
extern crate arc_swap;
|
||||||
extern crate async_trait;
|
extern crate async_trait;
|
||||||
extern crate bb8;
|
extern crate bb8;
|
||||||
extern crate bytes;
|
extern crate bytes;
|
||||||
|
extern crate exitcode;
|
||||||
|
extern crate log;
|
||||||
extern crate md5;
|
extern crate md5;
|
||||||
|
extern crate num_cpus;
|
||||||
|
extern crate once_cell;
|
||||||
|
extern crate rustls_pemfile;
|
||||||
extern crate serde;
|
extern crate serde;
|
||||||
extern crate serde_derive;
|
extern crate serde_derive;
|
||||||
|
extern crate sqlparser;
|
||||||
extern crate tokio;
|
extern crate tokio;
|
||||||
|
extern crate tokio_rustls;
|
||||||
extern crate toml;
|
extern crate toml;
|
||||||
|
extern crate trust_dns_resolver;
|
||||||
|
|
||||||
|
#[cfg(not(target_env = "msvc"))]
|
||||||
|
use jemallocator::Jemalloc;
|
||||||
|
|
||||||
|
#[cfg(not(target_env = "msvc"))]
|
||||||
|
#[global_allocator]
|
||||||
|
static GLOBAL: Jemalloc = Jemalloc;
|
||||||
|
|
||||||
|
use log::{debug, error, info, warn};
|
||||||
|
use parking_lot::Mutex;
|
||||||
|
use pgcat::format_duration;
|
||||||
use tokio::net::TcpListener;
|
use tokio::net::TcpListener;
|
||||||
|
#[cfg(not(windows))]
|
||||||
|
use tokio::signal::unix::{signal as unix_signal, SignalKind};
|
||||||
|
#[cfg(windows)]
|
||||||
|
use tokio::signal::windows as win_signal;
|
||||||
|
use tokio::{runtime::Builder, sync::mpsc};
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::net::SocketAddr;
|
||||||
|
use std::str::FromStr;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use tokio::sync::broadcast;
|
||||||
|
|
||||||
mod client;
|
use pgcat::cmd_args;
|
||||||
mod config;
|
use pgcat::config::{get_config, reload_config, VERSION};
|
||||||
mod errors;
|
use pgcat::dns_cache;
|
||||||
mod messages;
|
use pgcat::logger;
|
||||||
mod pool;
|
use pgcat::messages::configure_socket;
|
||||||
mod server;
|
use pgcat::pool::{ClientServerMap, ConnectionPool};
|
||||||
mod sharding;
|
use pgcat::prometheus::start_metric_server;
|
||||||
|
use pgcat::stats::{Collector, Reporter, REPORTER};
|
||||||
|
|
||||||
// Support for query cancellation: this maps our process_ids and
|
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
// secret keys to the backend's.
|
let args = cmd_args::parse();
|
||||||
use pool::{ClientServerMap, ConnectionPool};
|
logger::init(&args);
|
||||||
|
|
||||||
/// Main!
|
info!("Welcome to PgCat! Meow. (Version {})", VERSION);
|
||||||
#[tokio::main]
|
|
||||||
async fn main() {
|
|
||||||
println!("> Welcome to PgCat! Meow.");
|
|
||||||
|
|
||||||
let config = match config::parse("pgcat.toml").await {
|
if !pgcat::query_router::QueryRouter::setup() {
|
||||||
Ok(config) => config,
|
error!("Could not setup query router");
|
||||||
Err(err) => {
|
std::process::exit(exitcode::CONFIG);
|
||||||
println!("> Config parse error: {:?}", err);
|
}
|
||||||
return;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let addr = format!("{}:{}", config.general.host, config.general.port);
|
// Create a transient runtime for loading the config for the first time.
|
||||||
let listener = match TcpListener::bind(&addr).await {
|
{
|
||||||
Ok(sock) => sock,
|
let runtime = Builder::new_multi_thread().worker_threads(1).build()?;
|
||||||
Err(err) => {
|
|
||||||
println!("> Error: {:?}", err);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
println!("> Running on {}", addr);
|
|
||||||
|
|
||||||
// Tracks which client is connected to which server for query cancellation.
|
|
||||||
let client_server_map: ClientServerMap = Arc::new(Mutex::new(HashMap::new()));
|
|
||||||
|
|
||||||
println!("> Pool size: {}", config.general.pool_size);
|
|
||||||
println!("> Pool mode: {}", config.general.pool_mode);
|
|
||||||
println!("> Ban time: {}s", config.general.ban_time);
|
|
||||||
println!(
|
|
||||||
"> Healthcheck timeout: {}ms",
|
|
||||||
config.general.healthcheck_timeout
|
|
||||||
);
|
|
||||||
|
|
||||||
let pool = ConnectionPool::from_config(config.clone(), client_server_map.clone()).await;
|
|
||||||
let transaction_mode = config.general.pool_mode == "transaction";
|
|
||||||
|
|
||||||
println!("> Waiting for clients...");
|
|
||||||
|
|
||||||
loop {
|
|
||||||
let pool = pool.clone();
|
|
||||||
let client_server_map = client_server_map.clone();
|
|
||||||
|
|
||||||
let (socket, addr) = match listener.accept().await {
|
|
||||||
Ok((socket, addr)) => (socket, addr),
|
|
||||||
Err(err) => {
|
|
||||||
println!("> Listener: {:?}", err);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Client goes to another thread, bye.
|
|
||||||
tokio::task::spawn(async move {
|
|
||||||
println!(
|
|
||||||
">> Client {:?} connected, transaction pooling: {}",
|
|
||||||
addr, transaction_mode
|
|
||||||
);
|
|
||||||
|
|
||||||
match client::Client::startup(socket, client_server_map, transaction_mode).await {
|
|
||||||
Ok(mut client) => {
|
|
||||||
println!(">> Client {:?} authenticated successfully!", addr);
|
|
||||||
|
|
||||||
match client.handle(pool).await {
|
|
||||||
Ok(()) => {
|
|
||||||
println!(">> Client {:?} disconnected.", addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
Err(err) => {
|
|
||||||
println!(">> Client disconnected with error: {:?}", err);
|
|
||||||
client.release();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
runtime.block_on(async {
|
||||||
|
match pgcat::config::parse(args.config_file.as_str()).await {
|
||||||
|
Ok(_) => (),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
println!(">> Error: {:?}", err);
|
error!("Config parse error: {:?}", err);
|
||||||
|
std::process::exit(exitcode::CONFIG);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let config = get_config();
|
||||||
|
|
||||||
|
// Create the runtime now we know required worker_threads.
|
||||||
|
let runtime = Builder::new_multi_thread()
|
||||||
|
.worker_threads(config.general.worker_threads)
|
||||||
|
.enable_all()
|
||||||
|
.build()?;
|
||||||
|
|
||||||
|
runtime.block_on(async move {
|
||||||
|
|
||||||
|
if let Some(true) = config.general.enable_prometheus_exporter {
|
||||||
|
let http_addr_str = format!(
|
||||||
|
"{}:{}",
|
||||||
|
config.general.host, config.general.prometheus_exporter_port
|
||||||
|
);
|
||||||
|
|
||||||
|
let http_addr = match SocketAddr::from_str(&http_addr_str) {
|
||||||
|
Ok(addr) => addr,
|
||||||
|
Err(err) => {
|
||||||
|
error!("Invalid http address: {}", err);
|
||||||
|
std::process::exit(exitcode::CONFIG);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
start_metric_server(http_addr).await;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let addr = format!("{}:{}", config.general.host, config.general.port);
|
||||||
|
|
||||||
|
let listener = match TcpListener::bind(&addr).await {
|
||||||
|
Ok(sock) => sock,
|
||||||
|
Err(err) => {
|
||||||
|
error!("Listener socket error: {:?}", err);
|
||||||
|
std::process::exit(exitcode::CONFIG);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
info!("Running on {}", addr);
|
||||||
|
|
||||||
|
config.show();
|
||||||
|
|
||||||
|
// Tracks which client is connected to which server for query cancellation.
|
||||||
|
let client_server_map: ClientServerMap = Arc::new(Mutex::new(HashMap::new()));
|
||||||
|
|
||||||
|
// Statistics reporting.
|
||||||
|
REPORTER.store(Arc::new(Reporter::default()));
|
||||||
|
|
||||||
|
// Starts (if enabled) dns cache before pools initialization
|
||||||
|
match dns_cache::CachedResolver::from_config().await {
|
||||||
|
Ok(_) => (),
|
||||||
|
Err(err) => error!("DNS cache initialization error: {:?}", err),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Connection pool that allows to query all shards and replicas.
|
||||||
|
match ConnectionPool::from_config(client_server_map.clone()).await {
|
||||||
|
Ok(_) => (),
|
||||||
|
Err(err) => {
|
||||||
|
error!("Pool error: {:?}", err);
|
||||||
|
std::process::exit(exitcode::CONFIG);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
let mut stats_collector = Collector::default();
|
||||||
|
stats_collector.collect().await;
|
||||||
|
});
|
||||||
|
|
||||||
|
info!("Config autoreloader: {}", match config.general.autoreload {
|
||||||
|
Some(interval) => format!("{} ms", interval),
|
||||||
|
None => "disabled".into(),
|
||||||
|
});
|
||||||
|
|
||||||
|
if let Some(interval) = config.general.autoreload {
|
||||||
|
let mut autoreload_interval = tokio::time::interval(tokio::time::Duration::from_millis(interval));
|
||||||
|
let autoreload_client_server_map = client_server_map.clone();
|
||||||
|
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
loop {
|
||||||
|
autoreload_interval.tick().await;
|
||||||
|
debug!("Automatically reloading config");
|
||||||
|
|
||||||
|
if let Ok(changed) = reload_config(autoreload_client_server_map.clone()).await {
|
||||||
|
if changed {
|
||||||
|
get_config().show()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#[cfg(windows)]
|
||||||
|
let mut term_signal = win_signal::ctrl_close().unwrap();
|
||||||
|
#[cfg(windows)]
|
||||||
|
let mut interrupt_signal = win_signal::ctrl_c().unwrap();
|
||||||
|
#[cfg(windows)]
|
||||||
|
let mut sighup_signal = win_signal::ctrl_shutdown().unwrap();
|
||||||
|
|
||||||
|
#[cfg(not(windows))]
|
||||||
|
let mut term_signal = unix_signal(SignalKind::terminate()).unwrap();
|
||||||
|
#[cfg(not(windows))]
|
||||||
|
let mut interrupt_signal = unix_signal(SignalKind::interrupt()).unwrap();
|
||||||
|
#[cfg(not(windows))]
|
||||||
|
let mut sighup_signal = unix_signal(SignalKind::hangup()).unwrap();
|
||||||
|
let (shutdown_tx, _) = broadcast::channel::<()>(1);
|
||||||
|
let (drain_tx, mut drain_rx) = mpsc::channel::<i32>(2048);
|
||||||
|
let (exit_tx, mut exit_rx) = mpsc::channel::<()>(1);
|
||||||
|
let mut admin_only = false;
|
||||||
|
let mut total_clients = 0;
|
||||||
|
|
||||||
|
info!("Waiting for clients");
|
||||||
|
|
||||||
|
loop {
|
||||||
|
tokio::select! {
|
||||||
|
// Reload config:
|
||||||
|
// kill -SIGHUP $(pgrep pgcat)
|
||||||
|
_ = sighup_signal.recv() => {
|
||||||
|
info!("Reloading config");
|
||||||
|
|
||||||
|
_ = reload_config(client_server_map.clone()).await;
|
||||||
|
|
||||||
|
get_config().show();
|
||||||
|
},
|
||||||
|
|
||||||
|
// Initiate graceful shutdown sequence on sig int
|
||||||
|
_ = interrupt_signal.recv() => {
|
||||||
|
info!("Got SIGINT");
|
||||||
|
|
||||||
|
// Don't want this to happen more than once
|
||||||
|
if admin_only {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
admin_only = true;
|
||||||
|
|
||||||
|
// Broadcast that client tasks need to finish
|
||||||
|
let _ = shutdown_tx.send(());
|
||||||
|
let exit_tx = exit_tx.clone();
|
||||||
|
let _ = drain_tx.send(0).await;
|
||||||
|
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(config.general.shutdown_timeout));
|
||||||
|
|
||||||
|
// First tick fires immediately.
|
||||||
|
interval.tick().await;
|
||||||
|
|
||||||
|
// Second one in the interval time.
|
||||||
|
interval.tick().await;
|
||||||
|
|
||||||
|
// We're done waiting.
|
||||||
|
error!("Graceful shutdown timed out. {} active clients being closed", total_clients);
|
||||||
|
|
||||||
|
let _ = exit_tx.send(()).await;
|
||||||
|
});
|
||||||
|
},
|
||||||
|
|
||||||
|
_ = term_signal.recv() => {
|
||||||
|
info!("Got SIGTERM, closing with {} clients active", total_clients);
|
||||||
|
break;
|
||||||
|
},
|
||||||
|
|
||||||
|
new_client = listener.accept() => {
|
||||||
|
let (socket, addr) = match new_client {
|
||||||
|
Ok((socket, addr)) => (socket, addr),
|
||||||
|
Err(err) => {
|
||||||
|
error!("{:?}", err);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let shutdown_rx = shutdown_tx.subscribe();
|
||||||
|
let drain_tx = drain_tx.clone();
|
||||||
|
let client_server_map = client_server_map.clone();
|
||||||
|
|
||||||
|
let tls_certificate = get_config().general.tls_certificate.clone();
|
||||||
|
|
||||||
|
configure_socket(&socket);
|
||||||
|
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
let start = chrono::offset::Utc::now().naive_utc();
|
||||||
|
|
||||||
|
match pgcat::client::client_entrypoint(
|
||||||
|
socket,
|
||||||
|
client_server_map,
|
||||||
|
shutdown_rx,
|
||||||
|
drain_tx,
|
||||||
|
admin_only,
|
||||||
|
tls_certificate,
|
||||||
|
config.general.log_client_connections,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(()) => {
|
||||||
|
let duration = chrono::offset::Utc::now().naive_utc() - start;
|
||||||
|
|
||||||
|
if get_config().general.log_client_disconnections {
|
||||||
|
info!(
|
||||||
|
"Client {:?} disconnected, session duration: {}",
|
||||||
|
addr,
|
||||||
|
format_duration(&duration)
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
debug!(
|
||||||
|
"Client {:?} disconnected, session duration: {}",
|
||||||
|
addr,
|
||||||
|
format_duration(&duration)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(err) => {
|
||||||
|
match err {
|
||||||
|
pgcat::errors::Error::ClientBadStartup => debug!("Client disconnected with error {:?}", err),
|
||||||
|
_ => warn!("Client disconnected with error {:?}", err),
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
};
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = exit_rx.recv() => {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
client_ping = drain_rx.recv() => {
|
||||||
|
let client_ping = client_ping.unwrap();
|
||||||
|
total_clients += client_ping;
|
||||||
|
|
||||||
|
if total_clients == 0 && admin_only {
|
||||||
|
let _ = exit_tx.send(()).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("Shutting down...");
|
||||||
|
});
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
1510
src/messages.rs
1510
src/messages.rs
File diff suppressed because it is too large
Load Diff
190
src/mirrors.rs
Normal file
190
src/mirrors.rs
Normal file
@@ -0,0 +1,190 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
/// A mirrored PostgreSQL client.
|
||||||
|
/// Packets arrive to us through a channel from the main client and we send them to the server.
|
||||||
|
use bb8::Pool;
|
||||||
|
use bytes::{Bytes, BytesMut};
|
||||||
|
use parking_lot::RwLock;
|
||||||
|
|
||||||
|
use crate::config::{get_config, Address, Role, User};
|
||||||
|
use crate::pool::{ClientServerMap, ServerPool};
|
||||||
|
use log::{error, info, trace, warn};
|
||||||
|
use tokio::sync::mpsc::{channel, Receiver, Sender};
|
||||||
|
|
||||||
|
pub struct MirroredClient {
|
||||||
|
address: Address,
|
||||||
|
user: User,
|
||||||
|
database: String,
|
||||||
|
bytes_rx: Receiver<Bytes>,
|
||||||
|
disconnect_rx: Receiver<()>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MirroredClient {
|
||||||
|
async fn create_pool(&self) -> Pool<ServerPool> {
|
||||||
|
let config = get_config();
|
||||||
|
let default = std::time::Duration::from_millis(10_000).as_millis() as u64;
|
||||||
|
let (connection_timeout, idle_timeout, _cfg, prepared_statement_cache_size) =
|
||||||
|
match config.pools.get(&self.address.pool_name) {
|
||||||
|
Some(cfg) => (
|
||||||
|
cfg.connect_timeout.unwrap_or(default),
|
||||||
|
cfg.idle_timeout.unwrap_or(default),
|
||||||
|
cfg.clone(),
|
||||||
|
cfg.prepared_statements_cache_size,
|
||||||
|
),
|
||||||
|
None => (default, default, crate::config::Pool::default(), 0),
|
||||||
|
};
|
||||||
|
|
||||||
|
let manager = ServerPool::new(
|
||||||
|
self.address.clone(),
|
||||||
|
self.user.clone(),
|
||||||
|
self.database.as_str(),
|
||||||
|
ClientServerMap::default(),
|
||||||
|
Arc::new(RwLock::new(None)),
|
||||||
|
None,
|
||||||
|
true,
|
||||||
|
false,
|
||||||
|
prepared_statement_cache_size,
|
||||||
|
);
|
||||||
|
|
||||||
|
Pool::builder()
|
||||||
|
.max_size(1)
|
||||||
|
.connection_timeout(std::time::Duration::from_millis(connection_timeout))
|
||||||
|
.idle_timeout(Some(std::time::Duration::from_millis(idle_timeout)))
|
||||||
|
.test_on_check_out(false)
|
||||||
|
.build(manager)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn start(mut self) {
|
||||||
|
tokio::spawn(async move {
|
||||||
|
let pool = self.create_pool().await;
|
||||||
|
let address = self.address.clone();
|
||||||
|
loop {
|
||||||
|
let mut server = match pool.get().await {
|
||||||
|
Ok(server) => server,
|
||||||
|
Err(err) => {
|
||||||
|
error!(
|
||||||
|
"Failed to get connection from pool, Discarding message {:?}, {:?}",
|
||||||
|
err,
|
||||||
|
address.clone()
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
tokio::select! {
|
||||||
|
// Exit channel events
|
||||||
|
_ = self.disconnect_rx.recv() => {
|
||||||
|
info!("Got mirror exit signal, exiting {:?}", address.clone());
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Incoming data from server (we read to clear the socket buffer and discard the data)
|
||||||
|
recv_result = server.recv(None) => {
|
||||||
|
match recv_result {
|
||||||
|
Ok(message) => trace!("Received from mirror: {} {:?}", String::from_utf8_lossy(&message[..]), address.clone()),
|
||||||
|
Err(err) => {
|
||||||
|
server.mark_bad(
|
||||||
|
format!("Failed to send to mirror, Discarding message {:?}, {:?}", err, address.clone()).as_str()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Messages to send to the server
|
||||||
|
message = self.bytes_rx.recv() => {
|
||||||
|
match message {
|
||||||
|
Some(bytes) => {
|
||||||
|
match server.send(&BytesMut::from(&bytes[..])).await {
|
||||||
|
Ok(_) => trace!("Sent to mirror: {} {:?}", String::from_utf8_lossy(&bytes[..]), address.clone()),
|
||||||
|
Err(err) => {
|
||||||
|
server.mark_bad(
|
||||||
|
format!("Failed to receive from mirror {:?} {:?}", err, address.clone()).as_str()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
info!("Mirror channel closed, exiting {:?}", address.clone());
|
||||||
|
break;
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub struct MirroringManager {
|
||||||
|
pub byte_senders: Vec<Sender<Bytes>>,
|
||||||
|
pub disconnect_senders: Vec<Sender<()>>,
|
||||||
|
}
|
||||||
|
impl MirroringManager {
|
||||||
|
pub fn from_addresses(
|
||||||
|
user: User,
|
||||||
|
database: String,
|
||||||
|
addresses: Vec<Address>,
|
||||||
|
) -> MirroringManager {
|
||||||
|
let mut byte_senders: Vec<Sender<Bytes>> = vec![];
|
||||||
|
let mut exit_senders: Vec<Sender<()>> = vec![];
|
||||||
|
|
||||||
|
addresses.iter().for_each(|mirror| {
|
||||||
|
let (bytes_tx, bytes_rx) = channel::<Bytes>(10);
|
||||||
|
let (exit_tx, exit_rx) = channel::<()>(1);
|
||||||
|
let mut addr = mirror.clone();
|
||||||
|
addr.role = Role::Mirror;
|
||||||
|
let client = MirroredClient {
|
||||||
|
user: user.clone(),
|
||||||
|
database: database.to_owned(),
|
||||||
|
address: addr,
|
||||||
|
bytes_rx,
|
||||||
|
disconnect_rx: exit_rx,
|
||||||
|
};
|
||||||
|
exit_senders.push(exit_tx);
|
||||||
|
byte_senders.push(bytes_tx);
|
||||||
|
client.start();
|
||||||
|
});
|
||||||
|
|
||||||
|
Self {
|
||||||
|
byte_senders,
|
||||||
|
disconnect_senders: exit_senders,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn send(&mut self, bytes: &BytesMut) {
|
||||||
|
// We want to avoid performing an allocation if we won't be able to send the message
|
||||||
|
// There is a possibility of a race here where we check the capacity and then the channel is
|
||||||
|
// closed or the capacity is reduced to 0, but mirroring is best effort anyway
|
||||||
|
if self
|
||||||
|
.byte_senders
|
||||||
|
.iter()
|
||||||
|
.all(|sender| sender.capacity() == 0 || sender.is_closed())
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
let immutable_bytes = bytes.clone().freeze();
|
||||||
|
self.byte_senders.iter_mut().for_each(|sender| {
|
||||||
|
match sender.try_send(immutable_bytes.clone()) {
|
||||||
|
Ok(_) => {}
|
||||||
|
Err(err) => {
|
||||||
|
warn!("Failed to send bytes to a mirror channel {}", err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn disconnect(&mut self) {
|
||||||
|
self.disconnect_senders
|
||||||
|
.iter_mut()
|
||||||
|
.for_each(|sender| match sender.try_send(()) {
|
||||||
|
Ok(_) => {}
|
||||||
|
Err(err) => {
|
||||||
|
warn!(
|
||||||
|
"Failed to send disconnect signal to a mirror channel {}",
|
||||||
|
err
|
||||||
|
);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
120
src/plugins/intercept.rs
Normal file
120
src/plugins/intercept.rs
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
//! The intercept plugin.
|
||||||
|
//!
|
||||||
|
//! It intercepts queries and returns fake results.
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use bytes::{BufMut, BytesMut};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use sqlparser::ast::Statement;
|
||||||
|
|
||||||
|
use log::debug;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
config::Intercept as InterceptConfig,
|
||||||
|
errors::Error,
|
||||||
|
messages::{command_complete, data_row_nullable, row_description, DataType},
|
||||||
|
plugins::{Plugin, PluginOutput},
|
||||||
|
query_router::QueryRouter,
|
||||||
|
};
|
||||||
|
|
||||||
|
// TODO: use these structs for deserialization
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
pub struct Rule {
|
||||||
|
query: String,
|
||||||
|
schema: Vec<Column>,
|
||||||
|
result: Vec<Vec<String>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
pub struct Column {
|
||||||
|
name: String,
|
||||||
|
data_type: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The intercept plugin.
|
||||||
|
pub struct Intercept<'a> {
|
||||||
|
pub enabled: bool,
|
||||||
|
pub config: &'a InterceptConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<'a> Plugin for Intercept<'a> {
|
||||||
|
async fn run(
|
||||||
|
&mut self,
|
||||||
|
query_router: &QueryRouter,
|
||||||
|
ast: &Vec<Statement>,
|
||||||
|
) -> Result<PluginOutput, Error> {
|
||||||
|
if !self.enabled || ast.is_empty() {
|
||||||
|
return Ok(PluginOutput::Allow);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut config = self.config.clone();
|
||||||
|
config.substitute(
|
||||||
|
&query_router.pool_settings().db,
|
||||||
|
&query_router.pool_settings().user.username,
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut result = BytesMut::new();
|
||||||
|
|
||||||
|
for q in ast {
|
||||||
|
// Normalization
|
||||||
|
let q = q.to_string().to_ascii_lowercase();
|
||||||
|
|
||||||
|
for (_, target) in config.queries.iter() {
|
||||||
|
if target.query.as_str() == q {
|
||||||
|
debug!("Intercepting query: {}", q);
|
||||||
|
|
||||||
|
let rd = target
|
||||||
|
.schema
|
||||||
|
.iter()
|
||||||
|
.map(|row| {
|
||||||
|
let name = &row[0];
|
||||||
|
let data_type = &row[1];
|
||||||
|
(
|
||||||
|
name.as_str(),
|
||||||
|
match data_type.as_str() {
|
||||||
|
"text" => DataType::Text,
|
||||||
|
"anyarray" => DataType::AnyArray,
|
||||||
|
"oid" => DataType::Oid,
|
||||||
|
"bool" => DataType::Bool,
|
||||||
|
"int4" => DataType::Int4,
|
||||||
|
_ => DataType::Any,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect::<Vec<(&str, DataType)>>();
|
||||||
|
|
||||||
|
result.put(row_description(&rd));
|
||||||
|
|
||||||
|
target.result.iter().for_each(|row| {
|
||||||
|
let row = row
|
||||||
|
.iter()
|
||||||
|
.map(|s| {
|
||||||
|
let s = s.as_str().to_string();
|
||||||
|
|
||||||
|
if s.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect::<Vec<Option<String>>>();
|
||||||
|
result.put(data_row_nullable(&row));
|
||||||
|
});
|
||||||
|
|
||||||
|
result.put(command_complete("SELECT"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !result.is_empty() {
|
||||||
|
result.put_u8(b'Z');
|
||||||
|
result.put_i32(5);
|
||||||
|
result.put_u8(b'I');
|
||||||
|
|
||||||
|
return Ok(PluginOutput::Intercept(result));
|
||||||
|
} else {
|
||||||
|
Ok(PluginOutput::Allow)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
45
src/plugins/mod.rs
Normal file
45
src/plugins/mod.rs
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
//! The plugin ecosystem.
|
||||||
|
//!
|
||||||
|
//! Currently plugins only grant access or deny access to the database for a particual query.
|
||||||
|
//! Example use cases:
|
||||||
|
//! - block known bad queries
|
||||||
|
//! - block access to system catalogs
|
||||||
|
//! - block dangerous modifications like `DROP TABLE`
|
||||||
|
//! - etc
|
||||||
|
//!
|
||||||
|
|
||||||
|
pub mod intercept;
|
||||||
|
pub mod prewarmer;
|
||||||
|
pub mod query_logger;
|
||||||
|
pub mod table_access;
|
||||||
|
|
||||||
|
use crate::{errors::Error, query_router::QueryRouter};
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use bytes::BytesMut;
|
||||||
|
use sqlparser::ast::Statement;
|
||||||
|
|
||||||
|
pub use intercept::Intercept;
|
||||||
|
pub use query_logger::QueryLogger;
|
||||||
|
pub use table_access::TableAccess;
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq)]
|
||||||
|
pub enum PluginOutput {
|
||||||
|
Allow,
|
||||||
|
Deny(String),
|
||||||
|
Overwrite(Vec<Statement>),
|
||||||
|
Intercept(BytesMut),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
pub trait Plugin {
|
||||||
|
// Run before the query is sent to the server.
|
||||||
|
#[allow(clippy::ptr_arg)]
|
||||||
|
async fn run(
|
||||||
|
&mut self,
|
||||||
|
query_router: &QueryRouter,
|
||||||
|
ast: &Vec<Statement>,
|
||||||
|
) -> Result<PluginOutput, Error>;
|
||||||
|
|
||||||
|
// TODO: run after the result is returned
|
||||||
|
// async fn callback(&mut self, query_router: &QueryRouter);
|
||||||
|
}
|
||||||
28
src/plugins/prewarmer.rs
Normal file
28
src/plugins/prewarmer.rs
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
//! Prewarm new connections before giving them to the client.
|
||||||
|
use crate::{errors::Error, server::Server};
|
||||||
|
use log::info;
|
||||||
|
|
||||||
|
pub struct Prewarmer<'a> {
|
||||||
|
pub enabled: bool,
|
||||||
|
pub server: &'a mut Server,
|
||||||
|
pub queries: &'a Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Prewarmer<'a> {
|
||||||
|
pub async fn run(&mut self) -> Result<(), Error> {
|
||||||
|
if !self.enabled {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
for query in self.queries {
|
||||||
|
info!(
|
||||||
|
"{} Prewarning with query: `{}`",
|
||||||
|
self.server.address(),
|
||||||
|
query
|
||||||
|
);
|
||||||
|
self.server.query(query).await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
38
src/plugins/query_logger.rs
Normal file
38
src/plugins/query_logger.rs
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
//! Log all queries to stdout (or somewhere else, why not).
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
errors::Error,
|
||||||
|
plugins::{Plugin, PluginOutput},
|
||||||
|
query_router::QueryRouter,
|
||||||
|
};
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use log::info;
|
||||||
|
use sqlparser::ast::Statement;
|
||||||
|
|
||||||
|
pub struct QueryLogger<'a> {
|
||||||
|
pub enabled: bool,
|
||||||
|
pub user: &'a str,
|
||||||
|
pub db: &'a str,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<'a> Plugin for QueryLogger<'a> {
|
||||||
|
async fn run(
|
||||||
|
&mut self,
|
||||||
|
_query_router: &QueryRouter,
|
||||||
|
ast: &Vec<Statement>,
|
||||||
|
) -> Result<PluginOutput, Error> {
|
||||||
|
if !self.enabled {
|
||||||
|
return Ok(PluginOutput::Allow);
|
||||||
|
}
|
||||||
|
|
||||||
|
let query = ast
|
||||||
|
.iter()
|
||||||
|
.map(|q| q.to_string())
|
||||||
|
.collect::<Vec<String>>()
|
||||||
|
.join("; ");
|
||||||
|
info!("[pool: {}][user: {}] {}", self.db, self.user, query);
|
||||||
|
|
||||||
|
Ok(PluginOutput::Allow)
|
||||||
|
}
|
||||||
|
}
|
||||||
59
src/plugins/table_access.rs
Normal file
59
src/plugins/table_access.rs
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
//! This query router plugin will check if the user can access a particular
|
||||||
|
//! table as part of their query. If they can't, the query will not be routed.
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use sqlparser::ast::{visit_relations, Statement};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
errors::Error,
|
||||||
|
plugins::{Plugin, PluginOutput},
|
||||||
|
query_router::QueryRouter,
|
||||||
|
};
|
||||||
|
|
||||||
|
use log::debug;
|
||||||
|
|
||||||
|
use core::ops::ControlFlow;
|
||||||
|
|
||||||
|
pub struct TableAccess<'a> {
|
||||||
|
pub enabled: bool,
|
||||||
|
pub tables: &'a Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<'a> Plugin for TableAccess<'a> {
|
||||||
|
async fn run(
|
||||||
|
&mut self,
|
||||||
|
_query_router: &QueryRouter,
|
||||||
|
ast: &Vec<Statement>,
|
||||||
|
) -> Result<PluginOutput, Error> {
|
||||||
|
if !self.enabled {
|
||||||
|
return Ok(PluginOutput::Allow);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut found = None;
|
||||||
|
|
||||||
|
visit_relations(ast, |relation| {
|
||||||
|
let relation = relation.to_string();
|
||||||
|
let parts = relation.split('.').collect::<Vec<&str>>();
|
||||||
|
let table_name = parts.last().unwrap();
|
||||||
|
|
||||||
|
if self.tables.contains(&table_name.to_string()) {
|
||||||
|
found = Some(table_name.to_string());
|
||||||
|
ControlFlow::<()>::Break(())
|
||||||
|
} else {
|
||||||
|
ControlFlow::<()>::Continue(())
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
if let Some(found) = found {
|
||||||
|
debug!("Blocking access to table \"{}\"", found);
|
||||||
|
|
||||||
|
Ok(PluginOutput::Deny(format!(
|
||||||
|
"permission for table \"{}\" denied",
|
||||||
|
found
|
||||||
|
)))
|
||||||
|
} else {
|
||||||
|
Ok(PluginOutput::Allow)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
1260
src/pool.rs
1260
src/pool.rs
File diff suppressed because it is too large
Load Diff
530
src/prometheus.rs
Normal file
530
src/prometheus.rs
Normal file
@@ -0,0 +1,530 @@
|
|||||||
|
use http_body_util::Full;
|
||||||
|
use hyper::body;
|
||||||
|
use hyper::body::Bytes;
|
||||||
|
|
||||||
|
use hyper::server::conn::http1;
|
||||||
|
use hyper::service::service_fn;
|
||||||
|
use hyper::{Method, Request, Response, StatusCode};
|
||||||
|
use hyper_util::rt::TokioIo;
|
||||||
|
use log::{debug, error, info};
|
||||||
|
use phf::phf_map;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::fmt;
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::sync::atomic::Ordering;
|
||||||
|
use tokio::net::TcpListener;
|
||||||
|
|
||||||
|
use crate::config::Address;
|
||||||
|
use crate::pool::{get_all_pools, PoolIdentifier};
|
||||||
|
use crate::stats::get_server_stats;
|
||||||
|
use crate::stats::pool::PoolStats;
|
||||||
|
|
||||||
|
struct MetricHelpType {
|
||||||
|
help: &'static str,
|
||||||
|
ty: &'static str,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ServerPrometheusStats {
|
||||||
|
bytes_received: u64,
|
||||||
|
bytes_sent: u64,
|
||||||
|
transaction_count: u64,
|
||||||
|
query_count: u64,
|
||||||
|
error_count: u64,
|
||||||
|
active_count: u64,
|
||||||
|
idle_count: u64,
|
||||||
|
login_count: u64,
|
||||||
|
tested_count: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
// reference for metric types: https://prometheus.io/docs/concepts/metric_types/
|
||||||
|
// counters only increase
|
||||||
|
// gauges can arbitrarily increase or decrease
|
||||||
|
static METRIC_HELP_AND_TYPES_LOOKUP: phf::Map<&'static str, MetricHelpType> = phf_map! {
|
||||||
|
"stats_total_query_count" => MetricHelpType {
|
||||||
|
help: "Number of queries sent by all clients",
|
||||||
|
ty: "counter",
|
||||||
|
},
|
||||||
|
"stats_total_query_time" => MetricHelpType {
|
||||||
|
help: "Total amount of time for queries to execute",
|
||||||
|
ty: "counter",
|
||||||
|
},
|
||||||
|
"stats_total_received" => MetricHelpType {
|
||||||
|
help: "Number of bytes received from the server",
|
||||||
|
ty: "counter",
|
||||||
|
},
|
||||||
|
"stats_total_sent" => MetricHelpType {
|
||||||
|
help: "Number of bytes sent to the server",
|
||||||
|
ty: "counter",
|
||||||
|
},
|
||||||
|
"stats_total_xact_count" => MetricHelpType {
|
||||||
|
help: "Total number of transactions started by the client",
|
||||||
|
ty: "counter",
|
||||||
|
},
|
||||||
|
"stats_total_xact_time" => MetricHelpType {
|
||||||
|
help: "Total amount of time for all transactions to execute",
|
||||||
|
ty: "counter",
|
||||||
|
},
|
||||||
|
"stats_total_wait_time" => MetricHelpType {
|
||||||
|
help: "Total time client waited for a server connection",
|
||||||
|
ty: "counter",
|
||||||
|
},
|
||||||
|
"stats_avg_query_count" => MetricHelpType {
|
||||||
|
help: "Average of total_query_count every 15 seconds",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"stats_avg_query_time" => MetricHelpType {
|
||||||
|
help: "Average time taken for queries to execute every 15 seconds",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"stats_avg_recv" => MetricHelpType {
|
||||||
|
help: "Average of total_received bytes every 15 seconds",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"stats_avg_sent" => MetricHelpType {
|
||||||
|
help: "Average of total_sent bytes every 15 seconds",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"stats_avg_errors" => MetricHelpType {
|
||||||
|
help: "Average number of errors every 15 seconds",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"stats_avg_xact_count" => MetricHelpType {
|
||||||
|
help: "Average of total_xact_count every 15 seconds",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"stats_avg_xact_time" => MetricHelpType {
|
||||||
|
help: "Average of total_xact_time every 15 seconds",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"stats_avg_wait_time" => MetricHelpType {
|
||||||
|
help: "Average of total_wait_time every 15 seconds",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"pools_maxwait_us" => MetricHelpType {
|
||||||
|
help: "The time a client waited for a server connection in microseconds",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"pools_maxwait" => MetricHelpType {
|
||||||
|
help: "The time a client waited for a server connection in seconds",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"pools_cl_waiting" => MetricHelpType {
|
||||||
|
help: "How many clients are waiting for a connection from the pool",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"pools_cl_active" => MetricHelpType {
|
||||||
|
help: "How many clients are actively communicating with a server",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"pools_cl_idle" => MetricHelpType {
|
||||||
|
help: "How many clients are idle",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"pools_sv_idle" => MetricHelpType {
|
||||||
|
help: "How many server connections are idle",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"pools_sv_active" => MetricHelpType {
|
||||||
|
help: "How many server connections are actively communicating with a client",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"pools_sv_login" => MetricHelpType {
|
||||||
|
help: "How many server connections are currently being created",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"pools_sv_tested" => MetricHelpType {
|
||||||
|
help: "How many server connections are currently waiting on a health check to succeed",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"servers_bytes_received" => MetricHelpType {
|
||||||
|
help: "Volume in bytes of network traffic received by server",
|
||||||
|
ty: "counter",
|
||||||
|
},
|
||||||
|
"servers_bytes_sent" => MetricHelpType {
|
||||||
|
help: "Volume in bytes of network traffic sent by server",
|
||||||
|
ty: "counter",
|
||||||
|
},
|
||||||
|
"servers_transaction_count" => MetricHelpType {
|
||||||
|
help: "Number of transactions executed by server",
|
||||||
|
ty: "counter",
|
||||||
|
},
|
||||||
|
"servers_query_count" => MetricHelpType {
|
||||||
|
help: "Number of queries executed by server",
|
||||||
|
ty: "counter",
|
||||||
|
},
|
||||||
|
"servers_error_count" => MetricHelpType {
|
||||||
|
help: "Number of errors",
|
||||||
|
ty: "counter",
|
||||||
|
},
|
||||||
|
"servers_idle_count" => MetricHelpType {
|
||||||
|
help: "Number of server connection in idle state",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"servers_active_count" => MetricHelpType {
|
||||||
|
help: "Number of server connection in active state",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"servers_tested_count" => MetricHelpType {
|
||||||
|
help: "Number of server connection in tested state",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"servers_login_count" => MetricHelpType {
|
||||||
|
help: "Number of server connection in login state",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"servers_is_banned" => MetricHelpType {
|
||||||
|
help: "0 if server is not banned, 1 if server is banned",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"servers_is_paused" => MetricHelpType {
|
||||||
|
help: "0 if server is not paused, 1 if server is paused",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"databases_pool_size" => MetricHelpType {
|
||||||
|
help: "Maximum number of server connections",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
"databases_current_connections" => MetricHelpType {
|
||||||
|
help: "Current number of connections for this database",
|
||||||
|
ty: "gauge",
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
struct PrometheusMetric<Value: fmt::Display> {
|
||||||
|
name: String,
|
||||||
|
help: String,
|
||||||
|
ty: String,
|
||||||
|
labels: HashMap<&'static str, String>,
|
||||||
|
value: Value,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Value: fmt::Display> fmt::Display for PrometheusMetric<Value> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
let mut sorted_labels: Vec<_> = self.labels.iter().collect();
|
||||||
|
sorted_labels.sort_by_key(|&(key, _)| key);
|
||||||
|
let formatted_labels = sorted_labels
|
||||||
|
.iter()
|
||||||
|
.map(|(key, value)| format!("{}=\"{}\"", key, value))
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join(",");
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"{name}{{{formatted_labels}}} {value}",
|
||||||
|
name = format_args!("pgcat_{}", self.name),
|
||||||
|
formatted_labels = formatted_labels,
|
||||||
|
value = self.value
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Value: fmt::Display> PrometheusMetric<Value> {
|
||||||
|
fn from_name<V: fmt::Display>(
|
||||||
|
name: &str,
|
||||||
|
value: V,
|
||||||
|
labels: HashMap<&'static str, String>,
|
||||||
|
) -> Option<PrometheusMetric<V>> {
|
||||||
|
METRIC_HELP_AND_TYPES_LOOKUP
|
||||||
|
.get(name)
|
||||||
|
.map(|metric| PrometheusMetric::<V> {
|
||||||
|
name: name.to_owned(),
|
||||||
|
help: metric.help.to_owned(),
|
||||||
|
ty: metric.ty.to_owned(),
|
||||||
|
value,
|
||||||
|
labels,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_database_info(
|
||||||
|
address: &Address,
|
||||||
|
name: &str,
|
||||||
|
value: u32,
|
||||||
|
) -> Option<PrometheusMetric<u32>> {
|
||||||
|
let mut labels = HashMap::new();
|
||||||
|
labels.insert("host", address.host.clone());
|
||||||
|
labels.insert("shard", address.shard.to_string());
|
||||||
|
labels.insert("role", address.role.to_string());
|
||||||
|
labels.insert("pool", address.pool_name.clone());
|
||||||
|
labels.insert("index", address.address_index.to_string());
|
||||||
|
labels.insert("database", address.database.to_string());
|
||||||
|
labels.insert("username", address.username.clone());
|
||||||
|
|
||||||
|
Self::from_name(&format!("databases_{}", name), value, labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_server_info(
|
||||||
|
address: &Address,
|
||||||
|
name: &str,
|
||||||
|
value: u64,
|
||||||
|
) -> Option<PrometheusMetric<u64>> {
|
||||||
|
let mut labels = HashMap::new();
|
||||||
|
labels.insert("host", address.host.clone());
|
||||||
|
labels.insert("shard", address.shard.to_string());
|
||||||
|
labels.insert("role", address.role.to_string());
|
||||||
|
labels.insert("pool", address.pool_name.clone());
|
||||||
|
labels.insert("index", address.address_index.to_string());
|
||||||
|
labels.insert("database", address.database.to_string());
|
||||||
|
labels.insert("username", address.username.clone());
|
||||||
|
|
||||||
|
Self::from_name(&format!("servers_{}", name), value, labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_address(address: &Address, name: &str, value: u64) -> Option<PrometheusMetric<u64>> {
|
||||||
|
let mut labels = HashMap::new();
|
||||||
|
labels.insert("host", address.host.clone());
|
||||||
|
labels.insert("shard", address.shard.to_string());
|
||||||
|
labels.insert("pool", address.pool_name.clone());
|
||||||
|
labels.insert("role", address.role.to_string());
|
||||||
|
labels.insert("index", address.address_index.to_string());
|
||||||
|
labels.insert("database", address.database.to_string());
|
||||||
|
labels.insert("username", address.username.clone());
|
||||||
|
|
||||||
|
Self::from_name(&format!("stats_{}", name), value, labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_pool(pool_id: PoolIdentifier, name: &str, value: u64) -> Option<PrometheusMetric<u64>> {
|
||||||
|
let mut labels = HashMap::new();
|
||||||
|
labels.insert("pool", pool_id.db);
|
||||||
|
labels.insert("user", pool_id.user);
|
||||||
|
|
||||||
|
Self::from_name(&format!("pools_{}", name), value, labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_header(&self) -> String {
|
||||||
|
format!(
|
||||||
|
"\n# HELP {name} {help}\n# TYPE {name} {ty}",
|
||||||
|
name = format_args!("pgcat_{}", self.name),
|
||||||
|
help = self.help,
|
||||||
|
ty = self.ty,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn prometheus_stats(
|
||||||
|
request: Request<body::Incoming>,
|
||||||
|
) -> Result<Response<Full<Bytes>>, hyper::http::Error> {
|
||||||
|
match (request.method(), request.uri().path()) {
|
||||||
|
(&Method::GET, "/metrics") => {
|
||||||
|
let mut lines = Vec::new();
|
||||||
|
push_address_stats(&mut lines);
|
||||||
|
push_pool_stats(&mut lines);
|
||||||
|
push_server_stats(&mut lines);
|
||||||
|
push_database_stats(&mut lines);
|
||||||
|
lines.push("".to_string()); // Ensure to end the stats with a line terminator as required by the specification.
|
||||||
|
|
||||||
|
Response::builder()
|
||||||
|
.header("content-type", "text/plain; version=0.0.4")
|
||||||
|
.body(lines.join("\n").into())
|
||||||
|
}
|
||||||
|
_ => Response::builder()
|
||||||
|
.status(StatusCode::NOT_FOUND)
|
||||||
|
.body("".into()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adds metrics shown in a SHOW STATS admin command.
|
||||||
|
fn push_address_stats(lines: &mut Vec<String>) {
|
||||||
|
let mut grouped_metrics: HashMap<String, Vec<PrometheusMetric<u64>>> = HashMap::new();
|
||||||
|
for (_, pool) in get_all_pools() {
|
||||||
|
for shard in 0..pool.shards() {
|
||||||
|
for server in 0..pool.servers(shard) {
|
||||||
|
let address = pool.address(shard, server);
|
||||||
|
let stats = &*address.stats;
|
||||||
|
for (key, value) in stats.clone() {
|
||||||
|
if let Some(prometheus_metric) =
|
||||||
|
PrometheusMetric::<u64>::from_address(address, &key, value)
|
||||||
|
{
|
||||||
|
grouped_metrics
|
||||||
|
.entry(key)
|
||||||
|
.or_default()
|
||||||
|
.push(prometheus_metric);
|
||||||
|
} else {
|
||||||
|
debug!("Metric {} not implemented for {}", key, address.name());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (_key, metrics) in grouped_metrics {
|
||||||
|
if !metrics.is_empty() {
|
||||||
|
lines.push(metrics[0].get_header());
|
||||||
|
for metric in metrics {
|
||||||
|
lines.push(metric.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adds relevant metrics shown in a SHOW POOLS admin command.
|
||||||
|
fn push_pool_stats(lines: &mut Vec<String>) {
|
||||||
|
let mut grouped_metrics: HashMap<String, Vec<PrometheusMetric<u64>>> = HashMap::new();
|
||||||
|
let pool_stats = PoolStats::construct_pool_lookup();
|
||||||
|
for (pool_id, stats) in pool_stats.iter() {
|
||||||
|
for (name, value) in stats.clone() {
|
||||||
|
if let Some(prometheus_metric) =
|
||||||
|
PrometheusMetric::<u64>::from_pool(pool_id.clone(), &name, value)
|
||||||
|
{
|
||||||
|
grouped_metrics
|
||||||
|
.entry(name)
|
||||||
|
.or_default()
|
||||||
|
.push(prometheus_metric);
|
||||||
|
} else {
|
||||||
|
debug!("Metric {} not implemented for ({})", name, *pool_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (_key, metrics) in grouped_metrics {
|
||||||
|
if !metrics.is_empty() {
|
||||||
|
lines.push(metrics[0].get_header());
|
||||||
|
for metric in metrics {
|
||||||
|
lines.push(metric.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adds relevant metrics shown in a SHOW DATABASES admin command.
|
||||||
|
fn push_database_stats(lines: &mut Vec<String>) {
|
||||||
|
let mut grouped_metrics: HashMap<String, Vec<PrometheusMetric<u32>>> = HashMap::new();
|
||||||
|
for (_, pool) in get_all_pools() {
|
||||||
|
let pool_config = pool.settings.clone();
|
||||||
|
for shard in 0..pool.shards() {
|
||||||
|
for server in 0..pool.servers(shard) {
|
||||||
|
let address = pool.address(shard, server);
|
||||||
|
let pool_state = pool.pool_state(shard, server);
|
||||||
|
let metrics = vec![
|
||||||
|
("pool_size", pool_config.user.pool_size),
|
||||||
|
("current_connections", pool_state.connections),
|
||||||
|
];
|
||||||
|
for (key, value) in metrics {
|
||||||
|
if let Some(prometheus_metric) =
|
||||||
|
PrometheusMetric::<u32>::from_database_info(address, key, value)
|
||||||
|
{
|
||||||
|
grouped_metrics
|
||||||
|
.entry(key.to_string())
|
||||||
|
.or_default()
|
||||||
|
.push(prometheus_metric);
|
||||||
|
} else {
|
||||||
|
debug!("Metric {} not implemented for {}", key, address.name());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (_key, metrics) in grouped_metrics {
|
||||||
|
if !metrics.is_empty() {
|
||||||
|
lines.push(metrics[0].get_header());
|
||||||
|
for metric in metrics {
|
||||||
|
lines.push(metric.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adds relevant metrics shown in a SHOW SERVERS admin command.
|
||||||
|
fn push_server_stats(lines: &mut Vec<String>) {
|
||||||
|
let server_stats = get_server_stats();
|
||||||
|
let mut prom_stats = HashMap::<String, ServerPrometheusStats>::new();
|
||||||
|
for (_, stats) in server_stats {
|
||||||
|
let entry = prom_stats
|
||||||
|
.entry(stats.address_name())
|
||||||
|
.or_insert(ServerPrometheusStats {
|
||||||
|
bytes_received: 0,
|
||||||
|
bytes_sent: 0,
|
||||||
|
transaction_count: 0,
|
||||||
|
query_count: 0,
|
||||||
|
error_count: 0,
|
||||||
|
active_count: 0,
|
||||||
|
idle_count: 0,
|
||||||
|
login_count: 0,
|
||||||
|
tested_count: 0,
|
||||||
|
});
|
||||||
|
entry.bytes_received += stats.bytes_received.load(Ordering::Relaxed);
|
||||||
|
entry.bytes_sent += stats.bytes_sent.load(Ordering::Relaxed);
|
||||||
|
entry.transaction_count += stats.transaction_count.load(Ordering::Relaxed);
|
||||||
|
entry.query_count += stats.query_count.load(Ordering::Relaxed);
|
||||||
|
entry.error_count += stats.error_count.load(Ordering::Relaxed);
|
||||||
|
match stats.state.load(Ordering::Relaxed) {
|
||||||
|
crate::stats::ServerState::Login => entry.login_count += 1,
|
||||||
|
crate::stats::ServerState::Active => entry.active_count += 1,
|
||||||
|
crate::stats::ServerState::Tested => entry.tested_count += 1,
|
||||||
|
crate::stats::ServerState::Idle => entry.idle_count += 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let mut grouped_metrics: HashMap<String, Vec<PrometheusMetric<u64>>> = HashMap::new();
|
||||||
|
for (_, pool) in get_all_pools() {
|
||||||
|
for shard in 0..pool.shards() {
|
||||||
|
for server in 0..pool.servers(shard) {
|
||||||
|
let address = pool.address(shard, server);
|
||||||
|
if let Some(server_info) = prom_stats.get(&address.name()) {
|
||||||
|
let metrics = [
|
||||||
|
("bytes_received", server_info.bytes_received),
|
||||||
|
("bytes_sent", server_info.bytes_sent),
|
||||||
|
("transaction_count", server_info.transaction_count),
|
||||||
|
("query_count", server_info.query_count),
|
||||||
|
("error_count", server_info.error_count),
|
||||||
|
("idle_count", server_info.idle_count),
|
||||||
|
("active_count", server_info.active_count),
|
||||||
|
("login_count", server_info.login_count),
|
||||||
|
("tested_count", server_info.tested_count),
|
||||||
|
("is_banned", if pool.is_banned(address) { 1 } else { 0 }),
|
||||||
|
("is_paused", if pool.paused() { 1 } else { 0 }),
|
||||||
|
];
|
||||||
|
for (key, value) in metrics {
|
||||||
|
if let Some(prometheus_metric) =
|
||||||
|
PrometheusMetric::<u64>::from_server_info(address, key, value)
|
||||||
|
{
|
||||||
|
grouped_metrics
|
||||||
|
.entry(key.to_string())
|
||||||
|
.or_default()
|
||||||
|
.push(prometheus_metric);
|
||||||
|
} else {
|
||||||
|
debug!("Metric {} not implemented for {}", key, address.name());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (_key, metrics) in grouped_metrics {
|
||||||
|
if !metrics.is_empty() {
|
||||||
|
lines.push(metrics[0].get_header());
|
||||||
|
for metric in metrics {
|
||||||
|
lines.push(metric.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn start_metric_server(http_addr: SocketAddr) {
|
||||||
|
let listener = TcpListener::bind(http_addr);
|
||||||
|
let listener = match listener.await {
|
||||||
|
Ok(listener) => listener,
|
||||||
|
Err(e) => {
|
||||||
|
error!("Failed to bind prometheus server to HTTP address: {}.", e);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
info!(
|
||||||
|
"Exposing prometheus metrics on http://{}/metrics.",
|
||||||
|
http_addr
|
||||||
|
);
|
||||||
|
loop {
|
||||||
|
let stream = match listener.accept().await {
|
||||||
|
Ok((stream, _)) => stream,
|
||||||
|
Err(e) => {
|
||||||
|
error!("Error accepting connection: {}", e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let io = TokioIo::new(stream);
|
||||||
|
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
if let Err(err) = http1::Builder::new()
|
||||||
|
.serve_connection(io, service_fn(prometheus_stats))
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
eprintln!("Error serving HTTP connection for metrics: {:?}", err);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
1973
src/query_router.rs
Normal file
1973
src/query_router.rs
Normal file
File diff suppressed because it is too large
Load Diff
325
src/scram.rs
Normal file
325
src/scram.rs
Normal file
@@ -0,0 +1,325 @@
|
|||||||
|
// SCRAM-SHA-256 authentication. Heavily inspired by
|
||||||
|
// https://github.com/sfackler/rust-postgres/
|
||||||
|
// SASL implementation.
|
||||||
|
|
||||||
|
use base64::{engine::general_purpose, Engine as _};
|
||||||
|
use bytes::BytesMut;
|
||||||
|
use hmac::{Hmac, Mac};
|
||||||
|
use rand::{self, Rng};
|
||||||
|
use sha2::digest::FixedOutput;
|
||||||
|
use sha2::{Digest, Sha256};
|
||||||
|
|
||||||
|
use std::fmt::Write;
|
||||||
|
|
||||||
|
use crate::constants::*;
|
||||||
|
use crate::errors::Error;
|
||||||
|
|
||||||
|
/// Normalize a password string. Postgres
|
||||||
|
/// passwords don't have to be UTF-8.
|
||||||
|
fn normalize(pass: &[u8]) -> Vec<u8> {
|
||||||
|
let pass = match std::str::from_utf8(pass) {
|
||||||
|
Ok(pass) => pass,
|
||||||
|
Err(_) => return pass.to_vec(),
|
||||||
|
};
|
||||||
|
|
||||||
|
match stringprep::saslprep(pass) {
|
||||||
|
Ok(pass) => pass.into_owned().into_bytes(),
|
||||||
|
Err(_) => pass.as_bytes().to_vec(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Keep the SASL state through the exchange.
|
||||||
|
/// It takes 3 messages to complete the authentication.
|
||||||
|
pub struct ScramSha256 {
|
||||||
|
password: String,
|
||||||
|
salted_password: [u8; 32],
|
||||||
|
auth_message: String,
|
||||||
|
message: BytesMut,
|
||||||
|
nonce: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ScramSha256 {
|
||||||
|
/// Create the Scram state from a password. It'll automatically
|
||||||
|
/// generate a nonce.
|
||||||
|
pub fn new(password: &str) -> ScramSha256 {
|
||||||
|
let mut rng = rand::thread_rng();
|
||||||
|
let nonce = (0..NONCE_LENGTH)
|
||||||
|
.map(|_| {
|
||||||
|
let mut v = rng.gen_range(0x21u8..0x7e);
|
||||||
|
if v == 0x2c {
|
||||||
|
v = 0x7e
|
||||||
|
}
|
||||||
|
v as char
|
||||||
|
})
|
||||||
|
.collect::<String>();
|
||||||
|
|
||||||
|
Self::from_nonce(password, &nonce)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Used for testing.
|
||||||
|
pub fn from_nonce(password: &str, nonce: &str) -> ScramSha256 {
|
||||||
|
let message = BytesMut::from(format!("{}n=,r={}", "n,,", nonce).as_bytes());
|
||||||
|
|
||||||
|
ScramSha256 {
|
||||||
|
password: password.to_string(),
|
||||||
|
nonce: String::from(nonce),
|
||||||
|
message,
|
||||||
|
salted_password: [0u8; 32],
|
||||||
|
auth_message: String::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the current state of the SASL authentication.
|
||||||
|
pub fn message(&mut self) -> BytesMut {
|
||||||
|
self.message.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Update the state with message received from server.
|
||||||
|
pub fn update(&mut self, message: &BytesMut) -> Result<BytesMut, Error> {
|
||||||
|
let server_message = Message::parse(message)?;
|
||||||
|
|
||||||
|
if !server_message.nonce.starts_with(&self.nonce) {
|
||||||
|
return Err(Error::ProtocolSyncError("SCRAM".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
let salt = match general_purpose::STANDARD.decode(&server_message.salt) {
|
||||||
|
Ok(salt) => salt,
|
||||||
|
Err(_) => return Err(Error::ProtocolSyncError("SCRAM".to_string())),
|
||||||
|
};
|
||||||
|
|
||||||
|
let salted_password = Self::hi(
|
||||||
|
&normalize(self.password.as_bytes()),
|
||||||
|
&salt,
|
||||||
|
server_message.iterations,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Save for verification of final server message.
|
||||||
|
self.salted_password = salted_password;
|
||||||
|
|
||||||
|
let mut hmac = match Hmac::<Sha256>::new_from_slice(&salted_password) {
|
||||||
|
Ok(hmac) => hmac,
|
||||||
|
Err(_) => return Err(Error::ServerError),
|
||||||
|
};
|
||||||
|
|
||||||
|
hmac.update(b"Client Key");
|
||||||
|
|
||||||
|
let client_key = hmac.finalize().into_bytes();
|
||||||
|
|
||||||
|
let mut hash = Sha256::default();
|
||||||
|
hash.update(client_key.as_slice());
|
||||||
|
|
||||||
|
let stored_key = hash.finalize_fixed();
|
||||||
|
let mut cbind_input = vec![];
|
||||||
|
cbind_input.extend("n,,".as_bytes());
|
||||||
|
|
||||||
|
let cbind_input = general_purpose::STANDARD.encode(&cbind_input);
|
||||||
|
|
||||||
|
self.message.clear();
|
||||||
|
|
||||||
|
// Start writing the client reply.
|
||||||
|
match write!(
|
||||||
|
&mut self.message,
|
||||||
|
"c={},r={}",
|
||||||
|
cbind_input, server_message.nonce
|
||||||
|
) {
|
||||||
|
Ok(_) => (),
|
||||||
|
Err(_) => return Err(Error::ServerError),
|
||||||
|
};
|
||||||
|
|
||||||
|
let auth_message = format!(
|
||||||
|
"n=,r={},{},{}",
|
||||||
|
self.nonce,
|
||||||
|
String::from_utf8_lossy(&message[..]),
|
||||||
|
String::from_utf8_lossy(&self.message[..])
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut hmac = match Hmac::<Sha256>::new_from_slice(&stored_key) {
|
||||||
|
Ok(hmac) => hmac,
|
||||||
|
Err(_) => return Err(Error::ServerError),
|
||||||
|
};
|
||||||
|
hmac.update(auth_message.as_bytes());
|
||||||
|
|
||||||
|
// Save the auth message for server final message verification.
|
||||||
|
self.auth_message = auth_message;
|
||||||
|
|
||||||
|
let client_signature = hmac.finalize().into_bytes();
|
||||||
|
|
||||||
|
// Sign the client proof.
|
||||||
|
let mut client_proof = client_key;
|
||||||
|
for (proof, signature) in client_proof.iter_mut().zip(client_signature) {
|
||||||
|
*proof ^= signature;
|
||||||
|
}
|
||||||
|
|
||||||
|
match write!(
|
||||||
|
&mut self.message,
|
||||||
|
",p={}",
|
||||||
|
general_purpose::STANDARD.encode(&*client_proof)
|
||||||
|
) {
|
||||||
|
Ok(_) => (),
|
||||||
|
Err(_) => return Err(Error::ServerError),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(self.message.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Verify final server message.
|
||||||
|
pub fn finish(&mut self, message: &BytesMut) -> Result<(), Error> {
|
||||||
|
let final_message = FinalMessage::parse(message)?;
|
||||||
|
|
||||||
|
let verifier = match general_purpose::STANDARD.decode(final_message.value) {
|
||||||
|
Ok(verifier) => verifier,
|
||||||
|
Err(_) => return Err(Error::ProtocolSyncError("SCRAM".to_string())),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut hmac = match Hmac::<Sha256>::new_from_slice(&self.salted_password) {
|
||||||
|
Ok(hmac) => hmac,
|
||||||
|
Err(_) => return Err(Error::ServerError),
|
||||||
|
};
|
||||||
|
hmac.update(b"Server Key");
|
||||||
|
let server_key = hmac.finalize().into_bytes();
|
||||||
|
|
||||||
|
let mut hmac = match Hmac::<Sha256>::new_from_slice(&server_key) {
|
||||||
|
Ok(hmac) => hmac,
|
||||||
|
Err(_) => return Err(Error::ServerError),
|
||||||
|
};
|
||||||
|
hmac.update(self.auth_message.as_bytes());
|
||||||
|
|
||||||
|
match hmac.verify_slice(&verifier) {
|
||||||
|
Ok(_) => Ok(()),
|
||||||
|
Err(_) => Err(Error::ServerError),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Hash the password with the salt i-times.
|
||||||
|
fn hi(str: &[u8], salt: &[u8], i: u32) -> [u8; 32] {
|
||||||
|
let mut hmac =
|
||||||
|
Hmac::<Sha256>::new_from_slice(str).expect("HMAC is able to accept all key sizes");
|
||||||
|
hmac.update(salt);
|
||||||
|
hmac.update(&[0, 0, 0, 1]);
|
||||||
|
let mut prev = hmac.finalize().into_bytes();
|
||||||
|
|
||||||
|
let mut hi = prev;
|
||||||
|
|
||||||
|
for _ in 1..i {
|
||||||
|
let mut hmac = Hmac::<Sha256>::new_from_slice(str).expect("already checked above");
|
||||||
|
hmac.update(&prev);
|
||||||
|
prev = hmac.finalize().into_bytes();
|
||||||
|
|
||||||
|
for (hi, prev) in hi.iter_mut().zip(prev) {
|
||||||
|
*hi ^= prev;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
hi.into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse the server challenge.
|
||||||
|
struct Message {
|
||||||
|
nonce: String,
|
||||||
|
salt: String,
|
||||||
|
iterations: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Message {
|
||||||
|
/// Parse the server SASL challenge.
|
||||||
|
fn parse(message: &BytesMut) -> Result<Message, Error> {
|
||||||
|
let parts = String::from_utf8_lossy(&message[..])
|
||||||
|
.split(',')
|
||||||
|
.map(|s| s.to_string())
|
||||||
|
.collect::<Vec<String>>();
|
||||||
|
|
||||||
|
if parts.len() != 3 {
|
||||||
|
return Err(Error::ProtocolSyncError("SCRAM".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
let nonce = str::replace(&parts[0], "r=", "");
|
||||||
|
let salt = str::replace(&parts[1], "s=", "");
|
||||||
|
let iterations = match str::replace(&parts[2], "i=", "").parse::<u32>() {
|
||||||
|
Ok(iterations) => iterations,
|
||||||
|
Err(_) => return Err(Error::ProtocolSyncError("SCRAM".to_string())),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Message {
|
||||||
|
nonce,
|
||||||
|
salt,
|
||||||
|
iterations,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Parse server final validation message.
|
||||||
|
struct FinalMessage {
|
||||||
|
value: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FinalMessage {
|
||||||
|
/// Parse the server final validation message.
|
||||||
|
pub fn parse(message: &BytesMut) -> Result<FinalMessage, Error> {
|
||||||
|
if !message.starts_with(b"v=") || message.len() < 4 {
|
||||||
|
return Err(Error::ProtocolSyncError("SCRAM".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(FinalMessage {
|
||||||
|
value: String::from_utf8_lossy(&message[2..]).to_string(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parse_server_first_message() {
|
||||||
|
let message = BytesMut::from(
|
||||||
|
"r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096".as_bytes(),
|
||||||
|
);
|
||||||
|
let message = Message::parse(&message).unwrap();
|
||||||
|
assert_eq!(message.nonce, "fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j");
|
||||||
|
assert_eq!(message.salt, "QSXCR+Q6sek8bf92");
|
||||||
|
assert_eq!(message.iterations, 4096);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parse_server_last_message() {
|
||||||
|
let f = FinalMessage::parse(&BytesMut::from(
|
||||||
|
"v=U+ppxD5XUKtradnv8e2MkeupiA8FU87Sg8CXzXHDAzw".as_bytes(),
|
||||||
|
))
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
f.value,
|
||||||
|
"U+ppxD5XUKtradnv8e2MkeupiA8FU87Sg8CXzXHDAzw".to_string()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// recorded auth exchange from psql
|
||||||
|
#[test]
|
||||||
|
fn exchange() {
|
||||||
|
let password = "foobar";
|
||||||
|
let nonce = "9IZ2O01zb9IgiIZ1WJ/zgpJB";
|
||||||
|
|
||||||
|
let client_first = "n,,n=,r=9IZ2O01zb9IgiIZ1WJ/zgpJB";
|
||||||
|
let server_first =
|
||||||
|
"r=9IZ2O01zb9IgiIZ1WJ/zgpJBjx/oIRLs02gGSHcw1KEty3eY,s=fs3IXBy7U7+IvVjZ,i\
|
||||||
|
=4096";
|
||||||
|
let client_final =
|
||||||
|
"c=biws,r=9IZ2O01zb9IgiIZ1WJ/zgpJBjx/oIRLs02gGSHcw1KEty3eY,p=AmNKosjJzS3\
|
||||||
|
1NTlQYNs5BTeQjdHdk7lOflDo5re2an8=";
|
||||||
|
let server_final = "v=U+ppxD5XUKtradnv8e2MkeupiA8FU87Sg8CXzXHDAzw=";
|
||||||
|
|
||||||
|
let mut scram = ScramSha256::from_nonce(password, nonce);
|
||||||
|
|
||||||
|
let message = scram.message();
|
||||||
|
assert_eq!(std::str::from_utf8(&message).unwrap(), client_first);
|
||||||
|
|
||||||
|
let result = scram
|
||||||
|
.update(&BytesMut::from(server_first.as_bytes()))
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(std::str::from_utf8(&result).unwrap(), client_final);
|
||||||
|
|
||||||
|
scram
|
||||||
|
.finish(&BytesMut::from(server_final.as_bytes()))
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
1441
src/server.rs
1441
src/server.rs
File diff suppressed because it is too large
Load Diff
100
src/sharding.rs
100
src/sharding.rs
@@ -1,24 +1,80 @@
|
|||||||
// https://github.com/postgres/postgres/blob/27b77ecf9f4d5be211900eda54d8155ada50d696/src/include/catalog/partition.h#L20
|
use serde_derive::{Deserialize, Serialize};
|
||||||
|
/// Implements various sharding functions.
|
||||||
|
use sha1::{Digest, Sha1};
|
||||||
|
|
||||||
|
/// See: <https://github.com/postgres/postgres/blob/27b77ecf9f4d5be211900eda54d8155ada50d696/src/include/catalog/partition.h#L20>.
|
||||||
const PARTITION_HASH_SEED: u64 = 0x7A5B22367996DCFD;
|
const PARTITION_HASH_SEED: u64 = 0x7A5B22367996DCFD;
|
||||||
|
|
||||||
|
/// The sharding functions we support.
|
||||||
|
#[derive(Debug, PartialEq, Copy, Clone, Serialize, Deserialize, Hash, std::cmp::Eq)]
|
||||||
|
pub enum ShardingFunction {
|
||||||
|
#[serde(alias = "pg_bigint_hash", alias = "PgBigintHash")]
|
||||||
|
PgBigintHash,
|
||||||
|
#[serde(alias = "sha1", alias = "Sha1")]
|
||||||
|
Sha1,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Display for ShardingFunction {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
match self {
|
||||||
|
ShardingFunction::PgBigintHash => write!(f, "pg_bigint_hash"),
|
||||||
|
ShardingFunction::Sha1 => write!(f, "sha1"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The sharder.
|
||||||
pub struct Sharder {
|
pub struct Sharder {
|
||||||
|
/// Number of shards in the cluster.
|
||||||
shards: usize,
|
shards: usize,
|
||||||
|
|
||||||
|
/// The sharding function in use.
|
||||||
|
sharding_function: ShardingFunction,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Sharder {
|
impl Sharder {
|
||||||
pub fn new(shards: usize) -> Sharder {
|
/// Create new instance of the sharder.
|
||||||
Sharder { shards: shards }
|
pub fn new(shards: usize, sharding_function: ShardingFunction) -> Sharder {
|
||||||
|
Sharder {
|
||||||
|
shards,
|
||||||
|
sharding_function,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Compute the shard given sharding key.
|
||||||
|
pub fn shard(&self, key: i64) -> usize {
|
||||||
|
match self.sharding_function {
|
||||||
|
ShardingFunction::PgBigintHash => self.pg_bigint_hash(key),
|
||||||
|
ShardingFunction::Sha1 => self.sha1(key),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Hash function used by Postgres to determine which partition
|
/// Hash function used by Postgres to determine which partition
|
||||||
/// to put the row in when using HASH(column) partitioning.
|
/// to put the row in when using HASH(column) partitioning.
|
||||||
/// Source: https://github.com/postgres/postgres/blob/27b77ecf9f4d5be211900eda54d8155ada50d696/src/common/hashfn.c#L631
|
/// Source: <https://github.com/postgres/postgres/blob/27b77ecf9f4d5be211900eda54d8155ada50d696/src/common/hashfn.c#L631>.
|
||||||
/// Supports only 1 bigint at the moment, but we can add more later.
|
/// Supports only 1 bigint at the moment, but we can add more later.
|
||||||
pub fn pg_bigint_hash(&self, key: i64) -> usize {
|
fn pg_bigint_hash(&self, key: i64) -> usize {
|
||||||
let mut lohalf = key as u32;
|
let mut lohalf = key as u32;
|
||||||
let hihalf = (key >> 32) as u32;
|
let hihalf = (key >> 32) as u32;
|
||||||
lohalf ^= if key >= 0 { hihalf } else { !hihalf };
|
lohalf ^= if key >= 0 { hihalf } else { !hihalf };
|
||||||
Self::combine(0, Self::pg_u32_hash(lohalf)) as usize % self.shards as usize
|
Self::combine(0, Self::pg_u32_hash(lohalf)) as usize % self.shards
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Example of a hashing function based on SHA1.
|
||||||
|
fn sha1(&self, key: i64) -> usize {
|
||||||
|
let mut hasher = Sha1::new();
|
||||||
|
|
||||||
|
hasher.update(key.to_string().as_bytes());
|
||||||
|
|
||||||
|
let result = hasher.finalize();
|
||||||
|
|
||||||
|
// Convert the SHA1 hash into hex so we can parse it as a large integer.
|
||||||
|
let hex = format!("{:x}", result);
|
||||||
|
|
||||||
|
// Parse the last 8 bytes as an integer (8 bytes = bigint).
|
||||||
|
let key = i64::from_str_radix(&hex[hex.len() - 8..], 16).unwrap() as usize;
|
||||||
|
|
||||||
|
key % self.shards
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
@@ -77,14 +133,15 @@ impl Sharder {
|
|||||||
#[inline]
|
#[inline]
|
||||||
fn combine(mut a: u64, b: u64) -> u64 {
|
fn combine(mut a: u64, b: u64) -> u64 {
|
||||||
a ^= b
|
a ^= b
|
||||||
.wrapping_add(0x49a0f4dd15e5a8e3 as u64)
|
.wrapping_add(0x49a0f4dd15e5a8e3_u64)
|
||||||
.wrapping_add(a << 54)
|
.wrapping_add(a << 54)
|
||||||
.wrapping_add(a >> 7);
|
.wrapping_add(a >> 7);
|
||||||
a
|
a
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
fn pg_u32_hash(k: u32) -> u64 {
|
fn pg_u32_hash(k: u32) -> u64 {
|
||||||
let mut a: u32 = 0x9e3779b9 as u32 + std::mem::size_of::<u32>() as u32 + 3923095 as u32;
|
let mut a: u32 = 0x9e3779b9_u32 + std::mem::size_of::<u32>() as u32 + 3923095_u32;
|
||||||
let mut b = a;
|
let mut b = a;
|
||||||
let c = a;
|
let c = a;
|
||||||
|
|
||||||
@@ -109,36 +166,51 @@ mod test {
|
|||||||
// confirming that we implemented Postgres BIGINT hashing correctly.
|
// confirming that we implemented Postgres BIGINT hashing correctly.
|
||||||
#[test]
|
#[test]
|
||||||
fn test_pg_bigint_hash() {
|
fn test_pg_bigint_hash() {
|
||||||
let sharder = Sharder::new(5);
|
let sharder = Sharder::new(5, ShardingFunction::PgBigintHash);
|
||||||
|
|
||||||
let shard_0 = vec![1, 4, 5, 14, 19, 39, 40, 46, 47, 53];
|
let shard_0 = vec![1, 4, 5, 14, 19, 39, 40, 46, 47, 53];
|
||||||
|
|
||||||
for v in shard_0 {
|
for v in shard_0 {
|
||||||
assert_eq!(sharder.pg_bigint_hash(v), 0);
|
assert_eq!(sharder.shard(v), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
let shard_1 = vec![2, 3, 11, 17, 21, 23, 30, 49, 51, 54];
|
let shard_1 = vec![2, 3, 11, 17, 21, 23, 30, 49, 51, 54];
|
||||||
|
|
||||||
for v in shard_1 {
|
for v in shard_1 {
|
||||||
assert_eq!(sharder.pg_bigint_hash(v), 1);
|
assert_eq!(sharder.shard(v), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
let shard_2 = vec![6, 7, 15, 16, 18, 20, 25, 28, 34, 35];
|
let shard_2 = vec![6, 7, 15, 16, 18, 20, 25, 28, 34, 35];
|
||||||
|
|
||||||
for v in shard_2 {
|
for v in shard_2 {
|
||||||
assert_eq!(sharder.pg_bigint_hash(v), 2);
|
assert_eq!(sharder.shard(v), 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
let shard_3 = vec![8, 12, 13, 22, 29, 31, 33, 36, 41, 43];
|
let shard_3 = vec![8, 12, 13, 22, 29, 31, 33, 36, 41, 43];
|
||||||
|
|
||||||
for v in shard_3 {
|
for v in shard_3 {
|
||||||
assert_eq!(sharder.pg_bigint_hash(v), 3);
|
assert_eq!(sharder.shard(v), 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
let shard_4 = vec![9, 10, 24, 26, 27, 32, 37, 38, 42, 45];
|
let shard_4 = vec![9, 10, 24, 26, 27, 32, 37, 38, 42, 45];
|
||||||
|
|
||||||
for v in shard_4 {
|
for v in shard_4 {
|
||||||
assert_eq!(sharder.pg_bigint_hash(v), 4);
|
assert_eq!(sharder.shard(v), 4);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_sha1_hash() {
|
||||||
|
let sharder = Sharder::new(12, ShardingFunction::Sha1);
|
||||||
|
let ids = [
|
||||||
|
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
|
||||||
|
];
|
||||||
|
let shards = [
|
||||||
|
4, 7, 8, 3, 6, 0, 0, 10, 3, 11, 1, 7, 4, 4, 11, 2, 5, 0, 8, 3,
|
||||||
|
];
|
||||||
|
|
||||||
|
for (i, id) in ids.iter().enumerate() {
|
||||||
|
assert_eq!(sharder.shard(*id), shards[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
130
src/stats.rs
Normal file
130
src/stats.rs
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
/// Statistics and reporting.
|
||||||
|
use arc_swap::ArcSwap;
|
||||||
|
|
||||||
|
use log::{info, warn};
|
||||||
|
use once_cell::sync::Lazy;
|
||||||
|
use parking_lot::RwLock;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
// Structs that hold stats for different resources
|
||||||
|
pub mod address;
|
||||||
|
pub mod client;
|
||||||
|
pub mod pool;
|
||||||
|
pub mod server;
|
||||||
|
pub use address::AddressStats;
|
||||||
|
pub use client::{ClientState, ClientStats};
|
||||||
|
pub use server::{ServerState, ServerStats};
|
||||||
|
|
||||||
|
/// Convenience types for various stats
|
||||||
|
type ClientStatesLookup = HashMap<i32, Arc<ClientStats>>;
|
||||||
|
type ServerStatesLookup = HashMap<i32, Arc<ServerStats>>;
|
||||||
|
|
||||||
|
/// Stats for individual client connections
|
||||||
|
/// Used in SHOW CLIENTS.
|
||||||
|
static CLIENT_STATS: Lazy<Arc<RwLock<ClientStatesLookup>>> =
|
||||||
|
Lazy::new(|| Arc::new(RwLock::new(ClientStatesLookup::default())));
|
||||||
|
|
||||||
|
/// Stats for individual server connections
|
||||||
|
/// Used in SHOW SERVERS.
|
||||||
|
static SERVER_STATS: Lazy<Arc<RwLock<ServerStatesLookup>>> =
|
||||||
|
Lazy::new(|| Arc::new(RwLock::new(ServerStatesLookup::default())));
|
||||||
|
|
||||||
|
/// The statistics reporter. An instance is given to each possible source of statistics,
|
||||||
|
/// e.g. client stats, server stats, connection pool stats.
|
||||||
|
pub static REPORTER: Lazy<ArcSwap<Reporter>> =
|
||||||
|
Lazy::new(|| ArcSwap::from_pointee(Reporter::default()));
|
||||||
|
|
||||||
|
/// Statistics period used for average calculations.
|
||||||
|
/// 15 seconds.
|
||||||
|
static STAT_PERIOD: u64 = 15000;
|
||||||
|
|
||||||
|
/// The statistics reporter. An instance is given
|
||||||
|
/// to each possible source of statistics,
|
||||||
|
/// e.g. clients, servers, connection pool.
|
||||||
|
#[derive(Clone, Debug, Default)]
|
||||||
|
pub struct Reporter {}
|
||||||
|
|
||||||
|
impl Reporter {
|
||||||
|
/// Register a client with the stats system. The stats system uses client_id
|
||||||
|
/// to track and aggregate statistics from all source that relate to that client
|
||||||
|
fn client_register(&self, client_id: i32, stats: Arc<ClientStats>) {
|
||||||
|
if CLIENT_STATS.read().get(&client_id).is_some() {
|
||||||
|
warn!("Client {:?} was double registered!", client_id);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
CLIENT_STATS.write().insert(client_id, stats);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reports a client is disconnecting from the pooler.
|
||||||
|
fn client_disconnecting(&self, client_id: i32) {
|
||||||
|
CLIENT_STATS.write().remove(&client_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Register a server connection with the stats system. The stats system uses server_id
|
||||||
|
/// to track and aggregate statistics from all source that relate to that server
|
||||||
|
fn server_register(&self, server_id: i32, stats: Arc<ServerStats>) {
|
||||||
|
SERVER_STATS.write().insert(server_id, stats);
|
||||||
|
}
|
||||||
|
/// Reports a server connection is disconnecting from the pooler.
|
||||||
|
fn server_disconnecting(&self, server_id: i32) {
|
||||||
|
SERVER_STATS.write().remove(&server_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The statistics collector which used for calculating averages
|
||||||
|
/// There is only one collector (kind of like a singleton)
|
||||||
|
/// it updates averages every 15 seconds.
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct Collector {}
|
||||||
|
|
||||||
|
impl Collector {
|
||||||
|
/// The statistics collection handler. It will collect statistics
|
||||||
|
/// for `address_id`s starting at 0 up to `addresses`.
|
||||||
|
pub async fn collect(&mut self) {
|
||||||
|
info!("Events reporter started");
|
||||||
|
|
||||||
|
tokio::task::spawn(async move {
|
||||||
|
let mut interval =
|
||||||
|
tokio::time::interval(tokio::time::Duration::from_millis(STAT_PERIOD));
|
||||||
|
loop {
|
||||||
|
interval.tick().await;
|
||||||
|
|
||||||
|
// Hold read lock for duration of update to retain all server stats
|
||||||
|
let server_stats = SERVER_STATS.read();
|
||||||
|
|
||||||
|
for stats in server_stats.values() {
|
||||||
|
if !stats.check_address_stat_average_is_updated_status() {
|
||||||
|
stats.address_stats().update_averages();
|
||||||
|
stats.address_stats().reset_current_counts();
|
||||||
|
stats.set_address_stat_average_is_updated_status(true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset to false for next update
|
||||||
|
for stats in server_stats.values() {
|
||||||
|
stats.set_address_stat_average_is_updated_status(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get a snapshot of client statistics.
|
||||||
|
/// by the `Collector`.
|
||||||
|
pub fn get_client_stats() -> ClientStatesLookup {
|
||||||
|
CLIENT_STATS.read().clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get a snapshot of server statistics.
|
||||||
|
/// by the `Collector`.
|
||||||
|
pub fn get_server_stats() -> ServerStatesLookup {
|
||||||
|
SERVER_STATS.read().clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the statistics reporter used to update stats across the pools/clients.
|
||||||
|
pub fn get_reporter() -> Reporter {
|
||||||
|
(*(*REPORTER.load())).clone()
|
||||||
|
}
|
||||||
226
src/stats/address.rs
Normal file
226
src/stats/address.rs
Normal file
@@ -0,0 +1,226 @@
|
|||||||
|
use std::sync::atomic::*;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Default)]
|
||||||
|
struct AddressStatFields {
|
||||||
|
xact_count: Arc<AtomicU64>,
|
||||||
|
query_count: Arc<AtomicU64>,
|
||||||
|
bytes_received: Arc<AtomicU64>,
|
||||||
|
bytes_sent: Arc<AtomicU64>,
|
||||||
|
xact_time: Arc<AtomicU64>,
|
||||||
|
query_time: Arc<AtomicU64>,
|
||||||
|
wait_time: Arc<AtomicU64>,
|
||||||
|
errors: Arc<AtomicU64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Internal address stats
|
||||||
|
#[derive(Debug, Clone, Default)]
|
||||||
|
pub struct AddressStats {
|
||||||
|
total: AddressStatFields,
|
||||||
|
|
||||||
|
current: AddressStatFields,
|
||||||
|
|
||||||
|
averages: AddressStatFields,
|
||||||
|
|
||||||
|
// Determines if the averages have been updated since the last time they were reported
|
||||||
|
pub averages_updated: Arc<AtomicBool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IntoIterator for AddressStats {
|
||||||
|
type Item = (String, u64);
|
||||||
|
type IntoIter = std::vec::IntoIter<Self::Item>;
|
||||||
|
|
||||||
|
fn into_iter(self) -> Self::IntoIter {
|
||||||
|
vec![
|
||||||
|
(
|
||||||
|
"total_xact_count".to_string(),
|
||||||
|
self.total.xact_count.load(Ordering::Relaxed),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"total_query_count".to_string(),
|
||||||
|
self.total.query_count.load(Ordering::Relaxed),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"total_received".to_string(),
|
||||||
|
self.total.bytes_received.load(Ordering::Relaxed),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"total_sent".to_string(),
|
||||||
|
self.total.bytes_sent.load(Ordering::Relaxed),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"total_xact_time".to_string(),
|
||||||
|
self.total.xact_time.load(Ordering::Relaxed),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"total_query_time".to_string(),
|
||||||
|
self.total.query_time.load(Ordering::Relaxed),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"total_wait_time".to_string(),
|
||||||
|
self.total.wait_time.load(Ordering::Relaxed),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"total_errors".to_string(),
|
||||||
|
self.total.errors.load(Ordering::Relaxed),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"avg_xact_count".to_string(),
|
||||||
|
self.averages.xact_count.load(Ordering::Relaxed),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"avg_query_count".to_string(),
|
||||||
|
self.averages.query_count.load(Ordering::Relaxed),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"avg_recv".to_string(),
|
||||||
|
self.averages.bytes_received.load(Ordering::Relaxed),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"avg_sent".to_string(),
|
||||||
|
self.averages.bytes_sent.load(Ordering::Relaxed),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"avg_errors".to_string(),
|
||||||
|
self.averages.errors.load(Ordering::Relaxed),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"avg_xact_time".to_string(),
|
||||||
|
self.averages.xact_time.load(Ordering::Relaxed),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"avg_query_time".to_string(),
|
||||||
|
self.averages.query_time.load(Ordering::Relaxed),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"avg_wait_time".to_string(),
|
||||||
|
self.averages.wait_time.load(Ordering::Relaxed),
|
||||||
|
),
|
||||||
|
]
|
||||||
|
.into_iter()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AddressStats {
|
||||||
|
pub fn xact_count_add(&self) {
|
||||||
|
self.total.xact_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
self.current.xact_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn query_count_add(&self) {
|
||||||
|
self.total.query_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
self.current.query_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn bytes_received_add(&self, bytes: u64) {
|
||||||
|
self.total
|
||||||
|
.bytes_received
|
||||||
|
.fetch_add(bytes, Ordering::Relaxed);
|
||||||
|
self.current
|
||||||
|
.bytes_received
|
||||||
|
.fetch_add(bytes, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn bytes_sent_add(&self, bytes: u64) {
|
||||||
|
self.total.bytes_sent.fetch_add(bytes, Ordering::Relaxed);
|
||||||
|
self.current.bytes_sent.fetch_add(bytes, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn xact_time_add(&self, time: u64) {
|
||||||
|
self.total.xact_time.fetch_add(time, Ordering::Relaxed);
|
||||||
|
self.current.xact_time.fetch_add(time, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn query_time_add(&self, time: u64) {
|
||||||
|
self.total.query_time.fetch_add(time, Ordering::Relaxed);
|
||||||
|
self.current.query_time.fetch_add(time, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn wait_time_add(&self, time: u64) {
|
||||||
|
self.total.wait_time.fetch_add(time, Ordering::Relaxed);
|
||||||
|
self.current.wait_time.fetch_add(time, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn error(&self) {
|
||||||
|
self.total.errors.fetch_add(1, Ordering::Relaxed);
|
||||||
|
self.current.errors.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn update_averages(&self) {
|
||||||
|
let stat_period_per_second = crate::stats::STAT_PERIOD / 1_000;
|
||||||
|
|
||||||
|
// xact_count
|
||||||
|
let current_xact_count = self.current.xact_count.load(Ordering::Relaxed);
|
||||||
|
let current_xact_time = self.current.xact_time.load(Ordering::Relaxed);
|
||||||
|
self.averages.xact_count.store(
|
||||||
|
current_xact_count / stat_period_per_second,
|
||||||
|
Ordering::Relaxed,
|
||||||
|
);
|
||||||
|
if current_xact_count == 0 {
|
||||||
|
self.averages.xact_time.store(0, Ordering::Relaxed);
|
||||||
|
} else {
|
||||||
|
self.averages
|
||||||
|
.xact_time
|
||||||
|
.store(current_xact_time / current_xact_count, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
// query_count
|
||||||
|
let current_query_count = self.current.query_count.load(Ordering::Relaxed);
|
||||||
|
let current_query_time = self.current.query_time.load(Ordering::Relaxed);
|
||||||
|
self.averages.query_count.store(
|
||||||
|
current_query_count / stat_period_per_second,
|
||||||
|
Ordering::Relaxed,
|
||||||
|
);
|
||||||
|
if current_query_count == 0 {
|
||||||
|
self.averages.query_time.store(0, Ordering::Relaxed);
|
||||||
|
} else {
|
||||||
|
self.averages
|
||||||
|
.query_time
|
||||||
|
.store(current_query_time / current_query_count, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
// bytes_received
|
||||||
|
let current_bytes_received = self.current.bytes_received.load(Ordering::Relaxed);
|
||||||
|
self.averages.bytes_received.store(
|
||||||
|
current_bytes_received / stat_period_per_second,
|
||||||
|
Ordering::Relaxed,
|
||||||
|
);
|
||||||
|
|
||||||
|
// bytes_sent
|
||||||
|
let current_bytes_sent = self.current.bytes_sent.load(Ordering::Relaxed);
|
||||||
|
self.averages.bytes_sent.store(
|
||||||
|
current_bytes_sent / stat_period_per_second,
|
||||||
|
Ordering::Relaxed,
|
||||||
|
);
|
||||||
|
|
||||||
|
// wait_time
|
||||||
|
let current_wait_time = self.current.wait_time.load(Ordering::Relaxed);
|
||||||
|
self.averages.wait_time.store(
|
||||||
|
current_wait_time / stat_period_per_second,
|
||||||
|
Ordering::Relaxed,
|
||||||
|
);
|
||||||
|
|
||||||
|
// errors
|
||||||
|
let current_errors = self.current.errors.load(Ordering::Relaxed);
|
||||||
|
self.averages
|
||||||
|
.errors
|
||||||
|
.store(current_errors / stat_period_per_second, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn reset_current_counts(&self) {
|
||||||
|
self.current.xact_count.store(0, Ordering::Relaxed);
|
||||||
|
self.current.xact_time.store(0, Ordering::Relaxed);
|
||||||
|
self.current.query_count.store(0, Ordering::Relaxed);
|
||||||
|
self.current.query_time.store(0, Ordering::Relaxed);
|
||||||
|
self.current.bytes_received.store(0, Ordering::Relaxed);
|
||||||
|
self.current.bytes_sent.store(0, Ordering::Relaxed);
|
||||||
|
self.current.wait_time.store(0, Ordering::Relaxed);
|
||||||
|
self.current.errors.store(0, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn populate_row(&self, row: &mut Vec<String>) {
|
||||||
|
for (_key, value) in self.clone() {
|
||||||
|
row.push(value.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
204
src/stats/client.rs
Normal file
204
src/stats/client.rs
Normal file
@@ -0,0 +1,204 @@
|
|||||||
|
use super::{get_reporter, Reporter};
|
||||||
|
use atomic_enum::atomic_enum;
|
||||||
|
use std::sync::atomic::*;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use tokio::time::Instant;
|
||||||
|
/// The various states that a client can be in
|
||||||
|
#[atomic_enum]
|
||||||
|
#[derive(PartialEq)]
|
||||||
|
pub enum ClientState {
|
||||||
|
Idle = 0,
|
||||||
|
Waiting,
|
||||||
|
Active,
|
||||||
|
}
|
||||||
|
impl std::fmt::Display for ClientState {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||||
|
match *self {
|
||||||
|
ClientState::Idle => write!(f, "idle"),
|
||||||
|
ClientState::Waiting => write!(f, "waiting"),
|
||||||
|
ClientState::Active => write!(f, "active"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
/// Information we keep track of which can be queried by SHOW CLIENTS
|
||||||
|
pub struct ClientStats {
|
||||||
|
/// A random integer assigned to the client and used by stats to track the client
|
||||||
|
client_id: i32,
|
||||||
|
|
||||||
|
/// Data associated with the client, not writable, only set when we construct the ClientStat
|
||||||
|
application_name: String,
|
||||||
|
username: String,
|
||||||
|
pool_name: String,
|
||||||
|
connect_time: Instant,
|
||||||
|
|
||||||
|
reporter: Reporter,
|
||||||
|
|
||||||
|
/// Total time spent waiting for a connection from pool, measures in microseconds
|
||||||
|
pub total_wait_time: Arc<AtomicU64>,
|
||||||
|
|
||||||
|
/// Maximum time spent waiting for a connection from pool, measures in microseconds
|
||||||
|
pub max_wait_time: Arc<AtomicU64>,
|
||||||
|
|
||||||
|
// Time when the client started waiting for a connection from pool, measures in microseconds
|
||||||
|
// We use connect_time as the reference point for this value
|
||||||
|
// U64 can represent ~5850 centuries in microseconds, so we should be fine
|
||||||
|
pub wait_start_us: Arc<AtomicU64>,
|
||||||
|
|
||||||
|
/// Current state of the client
|
||||||
|
pub state: Arc<AtomicClientState>,
|
||||||
|
|
||||||
|
/// Number of transactions executed by this client
|
||||||
|
pub transaction_count: Arc<AtomicU64>,
|
||||||
|
|
||||||
|
/// Number of queries executed by this client
|
||||||
|
pub query_count: Arc<AtomicU64>,
|
||||||
|
|
||||||
|
/// Number of errors made by this client
|
||||||
|
pub error_count: Arc<AtomicU64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for ClientStats {
|
||||||
|
fn default() -> Self {
|
||||||
|
ClientStats {
|
||||||
|
client_id: 0,
|
||||||
|
connect_time: Instant::now(),
|
||||||
|
application_name: String::new(),
|
||||||
|
username: String::new(),
|
||||||
|
pool_name: String::new(),
|
||||||
|
total_wait_time: Arc::new(AtomicU64::new(0)),
|
||||||
|
max_wait_time: Arc::new(AtomicU64::new(0)),
|
||||||
|
wait_start_us: Arc::new(AtomicU64::new(0)),
|
||||||
|
state: Arc::new(AtomicClientState::new(ClientState::Idle)),
|
||||||
|
transaction_count: Arc::new(AtomicU64::new(0)),
|
||||||
|
query_count: Arc::new(AtomicU64::new(0)),
|
||||||
|
error_count: Arc::new(AtomicU64::new(0)),
|
||||||
|
reporter: get_reporter(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ClientStats {
|
||||||
|
pub fn new(
|
||||||
|
client_id: i32,
|
||||||
|
application_name: &str,
|
||||||
|
username: &str,
|
||||||
|
pool_name: &str,
|
||||||
|
connect_time: Instant,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
client_id,
|
||||||
|
connect_time,
|
||||||
|
application_name: application_name.to_string(),
|
||||||
|
username: username.to_string(),
|
||||||
|
pool_name: pool_name.to_string(),
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reports a client is disconnecting from the pooler and
|
||||||
|
/// update metrics on the corresponding pool.
|
||||||
|
pub fn disconnect(&self) {
|
||||||
|
self.reporter.client_disconnecting(self.client_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Register a client with the stats system. The stats system uses client_id
|
||||||
|
/// to track and aggregate statistics from all source that relate to that client
|
||||||
|
pub fn register(&self, stats: Arc<ClientStats>) {
|
||||||
|
self.reporter.client_register(self.client_id, stats);
|
||||||
|
self.state.store(ClientState::Idle, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reports a client is done querying the server and is no longer assigned a server connection
|
||||||
|
pub fn idle(&self) {
|
||||||
|
self.state.store(ClientState::Idle, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reports a client is waiting for a connection
|
||||||
|
pub fn waiting(&self) {
|
||||||
|
let wait_start = self.connect_time.elapsed().as_micros() as u64;
|
||||||
|
|
||||||
|
self.wait_start_us.store(wait_start, Ordering::Relaxed);
|
||||||
|
self.state.store(ClientState::Waiting, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reports a client is done waiting for a connection and is about to query the server.
|
||||||
|
pub fn active(&self) {
|
||||||
|
self.state.store(ClientState::Active, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reports a client has failed to obtain a connection from a connection pool
|
||||||
|
pub fn checkout_error(&self) {
|
||||||
|
self.state.store(ClientState::Idle, Ordering::Relaxed);
|
||||||
|
self.update_wait_times();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reports a client has succeeded in obtaining a connection from a connection pool
|
||||||
|
pub fn checkout_success(&self) {
|
||||||
|
self.state.store(ClientState::Active, Ordering::Relaxed);
|
||||||
|
self.update_wait_times();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reports a client has had the server assigned to it be banned
|
||||||
|
pub fn ban_error(&self) {
|
||||||
|
self.state.store(ClientState::Idle, Ordering::Relaxed);
|
||||||
|
self.error_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update_wait_times(&self) {
|
||||||
|
if self.wait_start_us.load(Ordering::Relaxed) == 0 {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let wait_time_us = self.get_current_wait_time_us();
|
||||||
|
self.total_wait_time
|
||||||
|
.fetch_add(wait_time_us, Ordering::Relaxed);
|
||||||
|
self.max_wait_time
|
||||||
|
.fetch_max(wait_time_us, Ordering::Relaxed);
|
||||||
|
self.wait_start_us.store(0, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_current_wait_time_us(&self) -> u64 {
|
||||||
|
let wait_start_us = self.wait_start_us.load(Ordering::Relaxed);
|
||||||
|
let microseconds_since_connection_epoch = self.connect_time.elapsed().as_micros() as u64;
|
||||||
|
if wait_start_us == 0 || microseconds_since_connection_epoch < wait_start_us {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
microseconds_since_connection_epoch - wait_start_us
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Report a query executed by a client against a server
|
||||||
|
pub fn query(&self) {
|
||||||
|
self.query_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Report a transaction executed by a client a server
|
||||||
|
/// we report each individual queries outside a transaction as a transaction
|
||||||
|
/// We only count the initial BEGIN as a transaction, all queries within do not
|
||||||
|
/// count as transactions
|
||||||
|
pub fn transaction(&self) {
|
||||||
|
self.transaction_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper methods for show clients
|
||||||
|
pub fn connect_time(&self) -> Instant {
|
||||||
|
self.connect_time
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn client_id(&self) -> i32 {
|
||||||
|
self.client_id
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn application_name(&self) -> String {
|
||||||
|
self.application_name.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn username(&self) -> String {
|
||||||
|
self.username.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn pool_name(&self) -> String {
|
||||||
|
self.pool_name.clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
154
src/stats/pool.rs
Normal file
154
src/stats/pool.rs
Normal file
@@ -0,0 +1,154 @@
|
|||||||
|
use log::debug;
|
||||||
|
|
||||||
|
use super::{ClientState, ServerState};
|
||||||
|
use crate::{config::PoolMode, messages::DataType, pool::PoolIdentifier};
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::sync::atomic::*;
|
||||||
|
|
||||||
|
use crate::pool::get_all_pools;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
/// A struct that holds information about a Pool .
|
||||||
|
pub struct PoolStats {
|
||||||
|
pub identifier: PoolIdentifier,
|
||||||
|
pub mode: PoolMode,
|
||||||
|
pub cl_idle: u64,
|
||||||
|
pub cl_active: u64,
|
||||||
|
pub cl_waiting: u64,
|
||||||
|
pub cl_cancel_req: u64,
|
||||||
|
pub sv_active: u64,
|
||||||
|
pub sv_idle: u64,
|
||||||
|
pub sv_used: u64,
|
||||||
|
pub sv_tested: u64,
|
||||||
|
pub sv_login: u64,
|
||||||
|
pub maxwait: u64,
|
||||||
|
}
|
||||||
|
impl PoolStats {
|
||||||
|
pub fn new(identifier: PoolIdentifier, mode: PoolMode) -> Self {
|
||||||
|
PoolStats {
|
||||||
|
identifier,
|
||||||
|
mode,
|
||||||
|
cl_idle: 0,
|
||||||
|
cl_active: 0,
|
||||||
|
cl_waiting: 0,
|
||||||
|
cl_cancel_req: 0,
|
||||||
|
sv_active: 0,
|
||||||
|
sv_idle: 0,
|
||||||
|
sv_used: 0,
|
||||||
|
sv_tested: 0,
|
||||||
|
sv_login: 0,
|
||||||
|
maxwait: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn construct_pool_lookup() -> HashMap<PoolIdentifier, PoolStats> {
|
||||||
|
let mut map: HashMap<PoolIdentifier, PoolStats> = HashMap::new();
|
||||||
|
let client_map = super::get_client_stats();
|
||||||
|
let server_map = super::get_server_stats();
|
||||||
|
|
||||||
|
for (identifier, pool) in get_all_pools() {
|
||||||
|
map.insert(
|
||||||
|
identifier.clone(),
|
||||||
|
PoolStats::new(identifier, pool.settings.pool_mode),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
for client in client_map.values() {
|
||||||
|
match map.get_mut(&PoolIdentifier {
|
||||||
|
db: client.pool_name(),
|
||||||
|
user: client.username(),
|
||||||
|
}) {
|
||||||
|
Some(pool_stats) => {
|
||||||
|
match client.state.load(Ordering::Relaxed) {
|
||||||
|
ClientState::Active => pool_stats.cl_active += 1,
|
||||||
|
ClientState::Idle => pool_stats.cl_idle += 1,
|
||||||
|
ClientState::Waiting => pool_stats.cl_waiting += 1,
|
||||||
|
}
|
||||||
|
let wait_start_us = client.wait_start_us.load(Ordering::Relaxed);
|
||||||
|
if wait_start_us > 0 {
|
||||||
|
let wait_time_us = client.get_current_wait_time_us();
|
||||||
|
pool_stats.maxwait = std::cmp::max(pool_stats.maxwait, wait_time_us);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => debug!("Client from an obselete pool"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for server in server_map.values() {
|
||||||
|
match map.get_mut(&PoolIdentifier {
|
||||||
|
db: server.pool_name(),
|
||||||
|
user: server.username(),
|
||||||
|
}) {
|
||||||
|
Some(pool_stats) => match server.state.load(Ordering::Relaxed) {
|
||||||
|
ServerState::Active => pool_stats.sv_active += 1,
|
||||||
|
ServerState::Idle => pool_stats.sv_idle += 1,
|
||||||
|
ServerState::Login => pool_stats.sv_login += 1,
|
||||||
|
ServerState::Tested => pool_stats.sv_tested += 1,
|
||||||
|
},
|
||||||
|
None => debug!("Server from an obselete pool"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
map
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn generate_header() -> Vec<(&'static str, DataType)> {
|
||||||
|
vec![
|
||||||
|
("database", DataType::Text),
|
||||||
|
("user", DataType::Text),
|
||||||
|
("pool_mode", DataType::Text),
|
||||||
|
("cl_idle", DataType::Numeric),
|
||||||
|
("cl_active", DataType::Numeric),
|
||||||
|
("cl_waiting", DataType::Numeric),
|
||||||
|
("cl_cancel_req", DataType::Numeric),
|
||||||
|
("sv_active", DataType::Numeric),
|
||||||
|
("sv_idle", DataType::Numeric),
|
||||||
|
("sv_used", DataType::Numeric),
|
||||||
|
("sv_tested", DataType::Numeric),
|
||||||
|
("sv_login", DataType::Numeric),
|
||||||
|
("maxwait", DataType::Numeric),
|
||||||
|
("maxwait_us", DataType::Numeric),
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn generate_row(&self) -> Vec<String> {
|
||||||
|
vec![
|
||||||
|
self.identifier.db.clone(),
|
||||||
|
self.identifier.user.clone(),
|
||||||
|
self.mode.to_string(),
|
||||||
|
self.cl_idle.to_string(),
|
||||||
|
self.cl_active.to_string(),
|
||||||
|
self.cl_waiting.to_string(),
|
||||||
|
self.cl_cancel_req.to_string(),
|
||||||
|
self.sv_active.to_string(),
|
||||||
|
self.sv_idle.to_string(),
|
||||||
|
self.sv_used.to_string(),
|
||||||
|
self.sv_tested.to_string(),
|
||||||
|
self.sv_login.to_string(),
|
||||||
|
(self.maxwait / 1_000_000).to_string(),
|
||||||
|
(self.maxwait % 1_000_000).to_string(),
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IntoIterator for PoolStats {
|
||||||
|
type Item = (String, u64);
|
||||||
|
type IntoIter = std::vec::IntoIter<Self::Item>;
|
||||||
|
|
||||||
|
fn into_iter(self) -> Self::IntoIter {
|
||||||
|
vec![
|
||||||
|
("cl_idle".to_string(), self.cl_idle),
|
||||||
|
("cl_active".to_string(), self.cl_active),
|
||||||
|
("cl_waiting".to_string(), self.cl_waiting),
|
||||||
|
("cl_cancel_req".to_string(), self.cl_cancel_req),
|
||||||
|
("sv_active".to_string(), self.sv_active),
|
||||||
|
("sv_idle".to_string(), self.sv_idle),
|
||||||
|
("sv_used".to_string(), self.sv_used),
|
||||||
|
("sv_tested".to_string(), self.sv_tested),
|
||||||
|
("sv_login".to_string(), self.sv_login),
|
||||||
|
("maxwait".to_string(), self.maxwait / 1_000_000),
|
||||||
|
("maxwait_us".to_string(), self.maxwait % 1_000_000),
|
||||||
|
]
|
||||||
|
.into_iter()
|
||||||
|
}
|
||||||
|
}
|
||||||
229
src/stats/server.rs
Normal file
229
src/stats/server.rs
Normal file
@@ -0,0 +1,229 @@
|
|||||||
|
use super::AddressStats;
|
||||||
|
use super::{get_reporter, Reporter};
|
||||||
|
use crate::config::Address;
|
||||||
|
use atomic_enum::atomic_enum;
|
||||||
|
use parking_lot::RwLock;
|
||||||
|
use std::sync::atomic::*;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use tokio::time::Instant;
|
||||||
|
|
||||||
|
/// The various states that a server can be in
|
||||||
|
#[atomic_enum]
|
||||||
|
#[derive(PartialEq)]
|
||||||
|
pub enum ServerState {
|
||||||
|
Login = 0,
|
||||||
|
Active,
|
||||||
|
Tested,
|
||||||
|
Idle,
|
||||||
|
}
|
||||||
|
impl std::fmt::Display for ServerState {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||||
|
match *self {
|
||||||
|
ServerState::Login => write!(f, "login"),
|
||||||
|
ServerState::Active => write!(f, "active"),
|
||||||
|
ServerState::Tested => write!(f, "tested"),
|
||||||
|
ServerState::Idle => write!(f, "idle"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Information we keep track of which can be queried by SHOW SERVERS
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct ServerStats {
|
||||||
|
/// A random integer assigned to the server and used by stats to track the server
|
||||||
|
server_id: i32,
|
||||||
|
|
||||||
|
/// Context information, only to be read
|
||||||
|
address: Address,
|
||||||
|
connect_time: Instant,
|
||||||
|
|
||||||
|
reporter: Reporter,
|
||||||
|
|
||||||
|
/// Data
|
||||||
|
pub application_name: Arc<RwLock<String>>,
|
||||||
|
pub state: Arc<AtomicServerState>,
|
||||||
|
pub bytes_sent: Arc<AtomicU64>,
|
||||||
|
pub bytes_received: Arc<AtomicU64>,
|
||||||
|
pub transaction_count: Arc<AtomicU64>,
|
||||||
|
pub query_count: Arc<AtomicU64>,
|
||||||
|
pub error_count: Arc<AtomicU64>,
|
||||||
|
pub prepared_hit_count: Arc<AtomicU64>,
|
||||||
|
pub prepared_miss_count: Arc<AtomicU64>,
|
||||||
|
pub prepared_eviction_count: Arc<AtomicU64>,
|
||||||
|
pub prepared_cache_size: Arc<AtomicU64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for ServerStats {
|
||||||
|
fn default() -> Self {
|
||||||
|
ServerStats {
|
||||||
|
server_id: 0,
|
||||||
|
application_name: Arc::new(RwLock::new(String::new())),
|
||||||
|
address: Address::default(),
|
||||||
|
connect_time: Instant::now(),
|
||||||
|
state: Arc::new(AtomicServerState::new(ServerState::Login)),
|
||||||
|
bytes_sent: Arc::new(AtomicU64::new(0)),
|
||||||
|
bytes_received: Arc::new(AtomicU64::new(0)),
|
||||||
|
transaction_count: Arc::new(AtomicU64::new(0)),
|
||||||
|
query_count: Arc::new(AtomicU64::new(0)),
|
||||||
|
error_count: Arc::new(AtomicU64::new(0)),
|
||||||
|
reporter: get_reporter(),
|
||||||
|
prepared_hit_count: Arc::new(AtomicU64::new(0)),
|
||||||
|
prepared_miss_count: Arc::new(AtomicU64::new(0)),
|
||||||
|
prepared_eviction_count: Arc::new(AtomicU64::new(0)),
|
||||||
|
prepared_cache_size: Arc::new(AtomicU64::new(0)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ServerStats {
|
||||||
|
pub fn new(address: Address, connect_time: Instant) -> Self {
|
||||||
|
Self {
|
||||||
|
address,
|
||||||
|
connect_time,
|
||||||
|
server_id: rand::random::<i32>(),
|
||||||
|
..Default::default()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn server_id(&self) -> i32 {
|
||||||
|
self.server_id
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Register a server connection with the stats system. The stats system uses server_id
|
||||||
|
/// to track and aggregate statistics from all source that relate to that server
|
||||||
|
// Delegates to reporter
|
||||||
|
pub fn register(&self, stats: Arc<ServerStats>) {
|
||||||
|
self.reporter.server_register(self.server_id, stats);
|
||||||
|
self.login();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reports a server connection is no longer assigned to a client
|
||||||
|
/// and is available for the next client to pick it up
|
||||||
|
pub fn idle(&self) {
|
||||||
|
self.state.store(ServerState::Idle, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reports a server connection is disconnecting from the pooler.
|
||||||
|
/// Also updates metrics on the pool regarding server usage.
|
||||||
|
pub fn disconnect(&self) {
|
||||||
|
self.reporter.server_disconnecting(self.server_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reports a server connection is being tested before being given to a client.
|
||||||
|
pub fn tested(&self) {
|
||||||
|
self.set_undefined_application();
|
||||||
|
self.state.store(ServerState::Tested, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reports a server connection is attempting to login.
|
||||||
|
pub fn login(&self) {
|
||||||
|
self.state.store(ServerState::Login, Ordering::Relaxed);
|
||||||
|
self.set_undefined_application();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reports a server connection has been assigned to a client that
|
||||||
|
/// is about to query the server
|
||||||
|
pub fn active(&self, application_name: String) {
|
||||||
|
self.state.store(ServerState::Active, Ordering::Relaxed);
|
||||||
|
self.set_application(application_name);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn address_stats(&self) -> Arc<AddressStats> {
|
||||||
|
self.address.stats.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn check_address_stat_average_is_updated_status(&self) -> bool {
|
||||||
|
self.address.stats.averages_updated.load(Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_address_stat_average_is_updated_status(&self, is_checked: bool) {
|
||||||
|
self.address
|
||||||
|
.stats
|
||||||
|
.averages_updated
|
||||||
|
.store(is_checked, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper methods for show_servers
|
||||||
|
pub fn pool_name(&self) -> String {
|
||||||
|
self.address.pool_name.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn username(&self) -> String {
|
||||||
|
self.address.username.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn address_name(&self) -> String {
|
||||||
|
self.address.name()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn connect_time(&self) -> Instant {
|
||||||
|
self.connect_time
|
||||||
|
}
|
||||||
|
|
||||||
|
fn set_application(&self, name: String) {
|
||||||
|
let mut application_name = self.application_name.write();
|
||||||
|
*application_name = name;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn set_undefined_application(&self) {
|
||||||
|
self.set_application(String::from("Undefined"))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn checkout_time(&self, microseconds: u64, application_name: String) {
|
||||||
|
// Update server stats and address aggregation stats
|
||||||
|
self.set_application(application_name);
|
||||||
|
self.address.stats.wait_time_add(microseconds);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Report a query executed by a client against a server
|
||||||
|
pub fn query(&self, milliseconds: u64, application_name: &str) {
|
||||||
|
self.set_application(application_name.to_string());
|
||||||
|
self.address.stats.query_count_add();
|
||||||
|
self.address.stats.query_time_add(milliseconds);
|
||||||
|
self.query_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Report a transaction executed by a client a server
|
||||||
|
/// we report each individual queries outside a transaction as a transaction
|
||||||
|
/// We only count the initial BEGIN as a transaction, all queries within do not
|
||||||
|
/// count as transactions
|
||||||
|
pub fn transaction(&self, application_name: &str) {
|
||||||
|
self.set_application(application_name.to_string());
|
||||||
|
|
||||||
|
self.transaction_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
self.address.stats.xact_count_add();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Report data sent to a server
|
||||||
|
pub fn data_sent(&self, amount_bytes: usize) {
|
||||||
|
self.bytes_sent
|
||||||
|
.fetch_add(amount_bytes as u64, Ordering::Relaxed);
|
||||||
|
self.address.stats.bytes_sent_add(amount_bytes as u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Report data received from a server
|
||||||
|
pub fn data_received(&self, amount_bytes: usize) {
|
||||||
|
self.bytes_received
|
||||||
|
.fetch_add(amount_bytes as u64, Ordering::Relaxed);
|
||||||
|
self.address.stats.bytes_received_add(amount_bytes as u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Report a prepared statement that already exists on the server.
|
||||||
|
pub fn prepared_cache_hit(&self) {
|
||||||
|
self.prepared_hit_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Report a prepared statement that does not exist on the server yet.
|
||||||
|
pub fn prepared_cache_miss(&self) {
|
||||||
|
self.prepared_miss_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn prepared_cache_add(&self) {
|
||||||
|
self.prepared_cache_size.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn prepared_cache_remove(&self) {
|
||||||
|
self.prepared_eviction_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
self.prepared_cache_size.fetch_sub(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
}
|
||||||
87
src/tls.rs
Normal file
87
src/tls.rs
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
// Stream wrapper.
|
||||||
|
|
||||||
|
use rustls_pemfile::{certs, read_one, Item};
|
||||||
|
use std::iter;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::SystemTime;
|
||||||
|
use tokio_rustls::rustls::{
|
||||||
|
self,
|
||||||
|
client::{ServerCertVerified, ServerCertVerifier},
|
||||||
|
Certificate, PrivateKey, ServerName,
|
||||||
|
};
|
||||||
|
use tokio_rustls::TlsAcceptor;
|
||||||
|
|
||||||
|
use crate::config::get_config;
|
||||||
|
use crate::errors::Error;
|
||||||
|
|
||||||
|
// TLS
|
||||||
|
pub fn load_certs(path: &Path) -> std::io::Result<Vec<Certificate>> {
|
||||||
|
certs(&mut std::io::BufReader::new(std::fs::File::open(path)?))
|
||||||
|
.map_err(|_| std::io::Error::new(std::io::ErrorKind::InvalidInput, "invalid cert"))
|
||||||
|
.map(|mut certs| certs.drain(..).map(Certificate).collect())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn load_keys(path: &Path) -> std::io::Result<Vec<PrivateKey>> {
|
||||||
|
let mut rd = std::io::BufReader::new(std::fs::File::open(path)?);
|
||||||
|
|
||||||
|
iter::from_fn(|| read_one(&mut rd).transpose())
|
||||||
|
.filter_map(|item| match item {
|
||||||
|
Err(err) => Some(Err(err)),
|
||||||
|
Ok(Item::RSAKey(key)) => Some(Ok(PrivateKey(key))),
|
||||||
|
Ok(Item::ECKey(key)) => Some(Ok(PrivateKey(key))),
|
||||||
|
Ok(Item::PKCS8Key(key)) => Some(Ok(PrivateKey(key))),
|
||||||
|
_ => None,
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Tls {
|
||||||
|
pub acceptor: TlsAcceptor,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Tls {
|
||||||
|
pub fn new() -> Result<Self, Error> {
|
||||||
|
let config = get_config();
|
||||||
|
|
||||||
|
let certs = match load_certs(Path::new(&config.general.tls_certificate.unwrap())) {
|
||||||
|
Ok(certs) => certs,
|
||||||
|
Err(_) => return Err(Error::TlsError),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut keys = match load_keys(Path::new(&config.general.tls_private_key.unwrap())) {
|
||||||
|
Ok(keys) => keys,
|
||||||
|
Err(_) => return Err(Error::TlsError),
|
||||||
|
};
|
||||||
|
|
||||||
|
let config = match rustls::ServerConfig::builder()
|
||||||
|
.with_safe_defaults()
|
||||||
|
.with_no_client_auth()
|
||||||
|
.with_single_cert(certs, keys.remove(0))
|
||||||
|
.map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidInput, err))
|
||||||
|
{
|
||||||
|
Ok(c) => c,
|
||||||
|
Err(_) => return Err(Error::TlsError),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Tls {
|
||||||
|
acceptor: TlsAcceptor::from(Arc::new(config)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct NoCertificateVerification;
|
||||||
|
|
||||||
|
impl ServerCertVerifier for NoCertificateVerification {
|
||||||
|
fn verify_server_cert(
|
||||||
|
&self,
|
||||||
|
_end_entity: &Certificate,
|
||||||
|
_intermediates: &[Certificate],
|
||||||
|
_server_name: &ServerName,
|
||||||
|
_scts: &mut dyn Iterator<Item = &[u8]>,
|
||||||
|
_ocsp_response: &[u8],
|
||||||
|
_now: SystemTime,
|
||||||
|
) -> Result<ServerCertVerified, rustls::Error> {
|
||||||
|
Ok(ServerCertVerified::assertion())
|
||||||
|
}
|
||||||
|
}
|
||||||
34
start_test_env.sh
Executable file
34
start_test_env.sh
Executable file
@@ -0,0 +1,34 @@
|
|||||||
|
GREEN="\033[0;32m"
|
||||||
|
RED="\033[0;31m"
|
||||||
|
BLUE="\033[0;34m"
|
||||||
|
RESET="\033[0m"
|
||||||
|
|
||||||
|
|
||||||
|
cd tests/docker/
|
||||||
|
docker compose kill main || true
|
||||||
|
docker compose build main
|
||||||
|
docker compose down
|
||||||
|
docker compose up -d
|
||||||
|
# wait for the container to start
|
||||||
|
while ! docker compose exec main ls; do
|
||||||
|
echo "Waiting for test environment to start"
|
||||||
|
sleep 1
|
||||||
|
done
|
||||||
|
echo "==================================="
|
||||||
|
docker compose exec -e LOG_LEVEL=error -d main toxiproxy-server
|
||||||
|
docker compose exec --workdir /app main cargo build
|
||||||
|
docker compose exec -d --workdir /app main ./target/debug/pgcat ./.circleci/pgcat.toml
|
||||||
|
docker compose exec --workdir /app/tests/ruby main bundle install
|
||||||
|
docker compose exec --workdir /app/tests/python main pip3 install -r requirements.txt
|
||||||
|
echo "Interactive test environment ready"
|
||||||
|
echo "To run integration tests, you can use the following commands:"
|
||||||
|
echo -e " ${BLUE}Ruby: ${RED}cd /app/tests/ruby && bundle exec ruby tests.rb --format documentation${RESET}"
|
||||||
|
echo -e " ${BLUE}Python: ${RED}cd /app/ && pytest ${RESET}"
|
||||||
|
echo -e " ${BLUE}Rust: ${RED}cd /app/tests/rust && cargo run ${RESET}"
|
||||||
|
echo -e " ${BLUE}Go: ${RED}cd /app/tests/go && /usr/local/go/bin/go test${RESET}"
|
||||||
|
echo "the source code for tests are directly linked to the source code in the container so you can modify the code and run the tests again"
|
||||||
|
echo "You can rebuild PgCat from within the container by running"
|
||||||
|
echo -e " ${GREEN}cargo build${RESET}"
|
||||||
|
echo "and then run the tests again"
|
||||||
|
echo "==================================="
|
||||||
|
docker compose exec --workdir /app/tests main bash
|
||||||
13
tests/docker/Dockerfile
Normal file
13
tests/docker/Dockerfile
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
FROM rust:bullseye
|
||||||
|
|
||||||
|
COPY --from=sclevine/yj /bin/yj /bin/yj
|
||||||
|
RUN /bin/yj -h
|
||||||
|
RUN apt-get update && apt-get install llvm-11 psmisc postgresql-contrib postgresql-client ruby ruby-dev libpq-dev python3 python3-pip lcov curl sudo iproute2 -y
|
||||||
|
RUN cargo install cargo-binutils rustfilt
|
||||||
|
RUN rustup component add llvm-tools-preview
|
||||||
|
RUN sudo gem install bundler
|
||||||
|
RUN wget -O toxiproxy-2.4.0.deb https://github.com/Shopify/toxiproxy/releases/download/v2.4.0/toxiproxy_2.4.0_linux_$(dpkg --print-architecture).deb && \
|
||||||
|
sudo dpkg -i toxiproxy-2.4.0.deb
|
||||||
|
RUN wget -O go1.21.3.linux-$(dpkg --print-architecture).tar.gz https://go.dev/dl/go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \
|
||||||
|
sudo tar -C /usr/local -xzf go1.21.3.linux-$(dpkg --print-architecture).tar.gz && \
|
||||||
|
rm go1.21.3.linux-$(dpkg --print-architecture).tar.gz
|
||||||
54
tests/docker/docker-compose.yml
Normal file
54
tests/docker/docker-compose.yml
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
services:
|
||||||
|
pg1:
|
||||||
|
image: postgres:14
|
||||||
|
network_mode: "service:main"
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: postgres
|
||||||
|
POSTGRES_DB: postgres
|
||||||
|
POSTGRES_PASSWORD: postgres
|
||||||
|
POSTGRES_INITDB_ARGS: --auth-local=md5 --auth-host=md5 --auth=md5
|
||||||
|
command: ["postgres", "-p", "5432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||||
|
pg2:
|
||||||
|
image: postgres:14
|
||||||
|
network_mode: "service:main"
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: postgres
|
||||||
|
POSTGRES_DB: postgres
|
||||||
|
POSTGRES_PASSWORD: postgres
|
||||||
|
POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256
|
||||||
|
command: ["postgres", "-p", "7432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||||
|
pg3:
|
||||||
|
image: postgres:14
|
||||||
|
network_mode: "service:main"
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: postgres
|
||||||
|
POSTGRES_DB: postgres
|
||||||
|
POSTGRES_PASSWORD: postgres
|
||||||
|
POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256
|
||||||
|
command: ["postgres", "-p", "8432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||||
|
pg4:
|
||||||
|
image: postgres:14
|
||||||
|
network_mode: "service:main"
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: postgres
|
||||||
|
POSTGRES_DB: postgres
|
||||||
|
POSTGRES_PASSWORD: postgres
|
||||||
|
POSTGRES_INITDB_ARGS: --auth-local=scram-sha-256 --auth-host=scram-sha-256 --auth=scram-sha-256
|
||||||
|
command: ["postgres", "-p", "9432", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-c", "pg_stat_statements.max=100000"]
|
||||||
|
pg5:
|
||||||
|
image: postgres:14
|
||||||
|
network_mode: "service:main"
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: postgres
|
||||||
|
POSTGRES_DB: postgres
|
||||||
|
POSTGRES_PASSWORD: postgres
|
||||||
|
POSTGRES_INITDB_ARGS: --auth-local=md5 --auth-host=md5 --auth=md5
|
||||||
|
command: ["postgres", "-c", "shared_preload_libraries=pg_stat_statements", "-c", "pg_stat_statements.track=all", "-p", "10432"]
|
||||||
|
main:
|
||||||
|
build: .
|
||||||
|
command: ["bash", "/app/tests/docker/run.sh"]
|
||||||
|
environment:
|
||||||
|
- INTERACTIVE_TEST_ENVIRONMENT=true
|
||||||
|
volumes:
|
||||||
|
- ../../:/app/
|
||||||
|
- /app/target/
|
||||||
69
tests/docker/run.sh
Normal file
69
tests/docker/run.sh
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
rm -rf /app/target/ || true
|
||||||
|
rm /app/*.profraw || true
|
||||||
|
rm /app/pgcat.profdata || true
|
||||||
|
rm -rf /app/cov || true
|
||||||
|
|
||||||
|
# Prepares the interactive test environment
|
||||||
|
#
|
||||||
|
if [ -n "$INTERACTIVE_TEST_ENVIRONMENT" ]; then
|
||||||
|
ports=(5432 7432 8432 9432 10432)
|
||||||
|
for port in "${ports[@]}"; do
|
||||||
|
is_it_up=0
|
||||||
|
attempts=0
|
||||||
|
while [ $is_it_up -eq 0 ]; do
|
||||||
|
PGPASSWORD=postgres psql -h 127.0.0.1 -p $port -U postgres -c '\q' > /dev/null 2>&1
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
echo "PostgreSQL on port $port is up."
|
||||||
|
is_it_up=1
|
||||||
|
else
|
||||||
|
attempts=$((attempts+1))
|
||||||
|
if [ $attempts -gt 10 ]; then
|
||||||
|
echo "PostgreSQL on port $port is down, giving up."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "PostgreSQL on port $port is down, waiting for it to start."
|
||||||
|
sleep 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
done
|
||||||
|
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 5432 -U postgres -f /app/tests/sharding/query_routing_setup.sql
|
||||||
|
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 7432 -U postgres -f /app/tests/sharding/query_routing_setup.sql
|
||||||
|
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 8432 -U postgres -f /app/tests/sharding/query_routing_setup.sql
|
||||||
|
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 9432 -U postgres -f /app/tests/sharding/query_routing_setup.sql
|
||||||
|
PGPASSWORD=postgres psql -e -h 127.0.0.1 -p 10432 -U postgres -f /app/tests/sharding/query_routing_setup.sql
|
||||||
|
sleep 100000000000000000
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
export LLVM_PROFILE_FILE="/app/pgcat-%m-%p.profraw"
|
||||||
|
export RUSTC_BOOTSTRAP=1
|
||||||
|
export CARGO_INCREMENTAL=0
|
||||||
|
export RUSTFLAGS="-Zprofile -Ccodegen-units=1 -Copt-level=0 -Clink-dead-code -Coverflow-checks=off -Zpanic_abort_tests -Cpanic=abort -Cinstrument-coverage"
|
||||||
|
export RUSTDOCFLAGS="-Cpanic=abort"
|
||||||
|
|
||||||
|
cd /app/
|
||||||
|
cargo clean
|
||||||
|
cargo build
|
||||||
|
cargo test --tests
|
||||||
|
|
||||||
|
bash .circleci/run_tests.sh
|
||||||
|
|
||||||
|
TEST_OBJECTS=$( \
|
||||||
|
for file in $(cargo test --no-run 2>&1 | grep "target/debug/deps/pgcat-[[:alnum:]]\+" -o); \
|
||||||
|
do \
|
||||||
|
printf "%s %s " --object $file; \
|
||||||
|
done \
|
||||||
|
)
|
||||||
|
|
||||||
|
echo "Generating coverage report"
|
||||||
|
|
||||||
|
rust-profdata merge -sparse /app/pgcat-*.profraw -o /app/pgcat.profdata
|
||||||
|
|
||||||
|
bash -c "rust-cov export -ignore-filename-regex='rustc|registry' -Xdemangler=rustfilt -instr-profile=/app/pgcat.profdata $TEST_OBJECTS --object ./target/debug/pgcat --format lcov > ./lcov.info"
|
||||||
|
|
||||||
|
genhtml lcov.info --title "PgCat Code Coverage" --css-file ./cov-style.css --highlight --no-function-coverage --ignore-errors source --legend --output-directory cov --prefix $(pwd)
|
||||||
|
|
||||||
|
rm /app/*.profraw
|
||||||
|
rm /app/pgcat.profdata
|
||||||
5
tests/go/go.mod
Normal file
5
tests/go/go.mod
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
module pgcat
|
||||||
|
|
||||||
|
go 1.21
|
||||||
|
|
||||||
|
require github.com/lib/pq v1.10.9
|
||||||
2
tests/go/go.sum
Normal file
2
tests/go/go.sum
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||||
|
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||||
162
tests/go/pgcat.toml
Normal file
162
tests/go/pgcat.toml
Normal file
@@ -0,0 +1,162 @@
|
|||||||
|
#
|
||||||
|
# PgCat config example.
|
||||||
|
#
|
||||||
|
|
||||||
|
#
|
||||||
|
# General pooler settings
|
||||||
|
[general]
|
||||||
|
# What IP to run on, 0.0.0.0 means accessible from everywhere.
|
||||||
|
host = "0.0.0.0"
|
||||||
|
|
||||||
|
# Port to run on, same as PgBouncer used in this example.
|
||||||
|
port = "${PORT}"
|
||||||
|
|
||||||
|
# Whether to enable prometheus exporter or not.
|
||||||
|
enable_prometheus_exporter = true
|
||||||
|
|
||||||
|
# Port at which prometheus exporter listens on.
|
||||||
|
prometheus_exporter_port = 9930
|
||||||
|
|
||||||
|
# How long to wait before aborting a server connection (ms).
|
||||||
|
connect_timeout = 1000
|
||||||
|
|
||||||
|
# How much time to give the health check query to return with a result (ms).
|
||||||
|
healthcheck_timeout = 1000
|
||||||
|
|
||||||
|
# How long to keep connection available for immediate re-use, without running a healthcheck query on it
|
||||||
|
healthcheck_delay = 30000
|
||||||
|
|
||||||
|
# How much time to give clients during shutdown before forcibly killing client connections (ms).
|
||||||
|
shutdown_timeout = 5000
|
||||||
|
|
||||||
|
# For how long to ban a server if it fails a health check (seconds).
|
||||||
|
ban_time = 60 # Seconds
|
||||||
|
|
||||||
|
# If we should log client connections
|
||||||
|
log_client_connections = false
|
||||||
|
|
||||||
|
# If we should log client disconnections
|
||||||
|
log_client_disconnections = false
|
||||||
|
|
||||||
|
# Reload config automatically if it changes.
|
||||||
|
autoreload = 15000
|
||||||
|
|
||||||
|
server_round_robin = false
|
||||||
|
|
||||||
|
# TLS
|
||||||
|
tls_certificate = "../../.circleci/server.cert"
|
||||||
|
tls_private_key = "../../.circleci/server.key"
|
||||||
|
|
||||||
|
# Credentials to access the virtual administrative database (pgbouncer or pgcat)
|
||||||
|
# Connecting to that database allows running commands like `SHOW POOLS`, `SHOW DATABASES`, etc..
|
||||||
|
admin_username = "admin_user"
|
||||||
|
admin_password = "admin_pass"
|
||||||
|
|
||||||
|
# pool
|
||||||
|
# configs are structured as pool.<pool_name>
|
||||||
|
# the pool_name is what clients use as database name when connecting
|
||||||
|
# For the example below a client can connect using "postgres://sharding_user:sharding_user@pgcat_host:pgcat_port/sharded_db"
|
||||||
|
[pools.sharded_db]
|
||||||
|
# Pool mode (see PgBouncer docs for more).
|
||||||
|
# session: one server connection per connected client
|
||||||
|
# transaction: one server connection per client transaction
|
||||||
|
pool_mode = "transaction"
|
||||||
|
|
||||||
|
# If the client doesn't specify, route traffic to
|
||||||
|
# this role by default.
|
||||||
|
#
|
||||||
|
# any: round-robin between primary and replicas,
|
||||||
|
# replica: round-robin between replicas only without touching the primary,
|
||||||
|
# primary: all queries go to the primary unless otherwise specified.
|
||||||
|
default_role = "any"
|
||||||
|
|
||||||
|
# Query parser. If enabled, we'll attempt to parse
|
||||||
|
# every incoming query to determine if it's a read or a write.
|
||||||
|
# If it's a read query, we'll direct it to a replica. Otherwise, if it's a write,
|
||||||
|
# we'll direct it to the primary.
|
||||||
|
query_parser_enabled = true
|
||||||
|
|
||||||
|
# If the query parser is enabled and this setting is enabled, we'll attempt to
|
||||||
|
# infer the role from the query itself.
|
||||||
|
query_parser_read_write_splitting = true
|
||||||
|
|
||||||
|
# If the query parser is enabled and this setting is enabled, the primary will be part of the pool of databases used for
|
||||||
|
# load balancing of read queries. Otherwise, the primary will only be used for write
|
||||||
|
# queries. The primary can always be explicitely selected with our custom protocol.
|
||||||
|
primary_reads_enabled = true
|
||||||
|
|
||||||
|
# So what if you wanted to implement a different hashing function,
|
||||||
|
# or you've already built one and you want this pooler to use it?
|
||||||
|
#
|
||||||
|
# Current options:
|
||||||
|
#
|
||||||
|
# pg_bigint_hash: PARTITION BY HASH (Postgres hashing function)
|
||||||
|
# sha1: A hashing function based on SHA1
|
||||||
|
#
|
||||||
|
sharding_function = "pg_bigint_hash"
|
||||||
|
|
||||||
|
# Prepared statements cache size.
|
||||||
|
prepared_statements_cache_size = 500
|
||||||
|
|
||||||
|
# Credentials for users that may connect to this cluster
|
||||||
|
[pools.sharded_db.users.0]
|
||||||
|
username = "sharding_user"
|
||||||
|
password = "sharding_user"
|
||||||
|
# Maximum number of server connections that can be established for this user
|
||||||
|
# The maximum number of connection from a single Pgcat process to any database in the cluster
|
||||||
|
# is the sum of pool_size across all users.
|
||||||
|
pool_size = 5
|
||||||
|
statement_timeout = 0
|
||||||
|
|
||||||
|
|
||||||
|
[pools.sharded_db.users.1]
|
||||||
|
username = "other_user"
|
||||||
|
password = "other_user"
|
||||||
|
pool_size = 21
|
||||||
|
statement_timeout = 30000
|
||||||
|
|
||||||
|
# Shard 0
|
||||||
|
[pools.sharded_db.shards.0]
|
||||||
|
# [ host, port, role ]
|
||||||
|
servers = [
|
||||||
|
[ "127.0.0.1", 5432, "primary" ],
|
||||||
|
[ "localhost", 5432, "replica" ]
|
||||||
|
]
|
||||||
|
# Database name (e.g. "postgres")
|
||||||
|
database = "shard0"
|
||||||
|
|
||||||
|
[pools.sharded_db.shards.1]
|
||||||
|
servers = [
|
||||||
|
[ "127.0.0.1", 5432, "primary" ],
|
||||||
|
[ "localhost", 5432, "replica" ],
|
||||||
|
]
|
||||||
|
database = "shard1"
|
||||||
|
|
||||||
|
[pools.sharded_db.shards.2]
|
||||||
|
servers = [
|
||||||
|
[ "127.0.0.1", 5432, "primary" ],
|
||||||
|
[ "localhost", 5432, "replica" ],
|
||||||
|
]
|
||||||
|
database = "shard2"
|
||||||
|
|
||||||
|
|
||||||
|
[pools.simple_db]
|
||||||
|
pool_mode = "session"
|
||||||
|
default_role = "primary"
|
||||||
|
query_parser_enabled = true
|
||||||
|
query_parser_read_write_splitting = true
|
||||||
|
primary_reads_enabled = true
|
||||||
|
sharding_function = "pg_bigint_hash"
|
||||||
|
|
||||||
|
[pools.simple_db.users.0]
|
||||||
|
username = "simple_user"
|
||||||
|
password = "simple_user"
|
||||||
|
pool_size = 5
|
||||||
|
statement_timeout = 30000
|
||||||
|
|
||||||
|
[pools.simple_db.shards.0]
|
||||||
|
servers = [
|
||||||
|
[ "127.0.0.1", 5432, "primary" ],
|
||||||
|
[ "localhost", 5432, "replica" ]
|
||||||
|
]
|
||||||
|
database = "some_db"
|
||||||
52
tests/go/prepared_test.go
Normal file
52
tests/go/prepared_test.go
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
package pgcat
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
"fmt"
|
||||||
|
_ "github.com/lib/pq"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test(t *testing.T) {
|
||||||
|
t.Cleanup(setup(t))
|
||||||
|
t.Run("Named parameterized prepared statement works", namedParameterizedPreparedStatement)
|
||||||
|
t.Run("Unnamed parameterized prepared statement works", unnamedParameterizedPreparedStatement)
|
||||||
|
}
|
||||||
|
|
||||||
|
func namedParameterizedPreparedStatement(t *testing.T) {
|
||||||
|
db, err := sql.Open("postgres", fmt.Sprintf("host=localhost port=%d database=sharded_db user=sharding_user password=sharding_user sslmode=disable", port))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not open connection: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stmt, err := db.Prepare("SELECT $1")
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not prepare: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
rows, err := stmt.Query(1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not query: %+v", err)
|
||||||
|
}
|
||||||
|
_ = rows.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func unnamedParameterizedPreparedStatement(t *testing.T) {
|
||||||
|
db, err := sql.Open("postgres", fmt.Sprintf("host=localhost port=%d database=sharded_db user=sharding_user password=sharding_user sslmode=disable", port))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not open connection: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
// Under the hood QueryContext generates an unnamed parameterized prepared statement
|
||||||
|
rows, err := db.QueryContext(context.Background(), "SELECT $1", 1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not query: %+v", err)
|
||||||
|
}
|
||||||
|
_ = rows.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
81
tests/go/setup.go
Normal file
81
tests/go/setup.go
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
package pgcat
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql"
|
||||||
|
_ "embed"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:embed pgcat.toml
|
||||||
|
var pgcatCfg string
|
||||||
|
|
||||||
|
var port = rand.Intn(32760-20000) + 20000
|
||||||
|
|
||||||
|
func setup(t *testing.T) func() {
|
||||||
|
cfg, err := os.CreateTemp("/tmp", "pgcat_cfg_*.toml")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not create temp file: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
pgcatCfg = strings.Replace(pgcatCfg, "\"${PORT}\"", fmt.Sprintf("%d", port), 1)
|
||||||
|
|
||||||
|
_, err = cfg.Write([]byte(pgcatCfg))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not write temp file: %+v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
commandPath := "../../target/debug/pgcat"
|
||||||
|
if os.Getenv("CARGO_TARGET_DIR") != "" {
|
||||||
|
commandPath = os.Getenv("CARGO_TARGET_DIR") + "/debug/pgcat"
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.Command(commandPath, cfg.Name())
|
||||||
|
cmd.Stdout = os.Stdout
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
go func() {
|
||||||
|
err = cmd.Run()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("could not run pgcat: %+v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
deadline, cancelFunc := context.WithDeadline(context.Background(), time.Now().Add(5*time.Second))
|
||||||
|
defer cancelFunc()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-deadline.Done():
|
||||||
|
break
|
||||||
|
case <-time.After(50 * time.Millisecond):
|
||||||
|
db, err := sql.Open("postgres", fmt.Sprintf("host=localhost port=%d database=pgcat user=admin_user password=admin_pass sslmode=disable", port))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
rows, err := db.QueryContext(deadline, "SHOW STATS")
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
_ = rows.Close()
|
||||||
|
_ = db.Close()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() {
|
||||||
|
err := cmd.Process.Signal(os.Interrupt)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not interrupt pgcat: %+v", err)
|
||||||
|
}
|
||||||
|
err = os.Remove(cfg.Name())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("could not remove temp file: %+v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
39
tests/pgbench/simple.sql
Normal file
39
tests/pgbench/simple.sql
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
|
||||||
|
-- \setrandom aid 1 :naccounts
|
||||||
|
\set aid random(1, 100000)
|
||||||
|
-- \setrandom bid 1 :nbranches
|
||||||
|
\set bid random(1, 100000)
|
||||||
|
-- \setrandom tid 1 :ntellers
|
||||||
|
\set tid random(1, 100000)
|
||||||
|
-- \setrandom delta -5000 5000
|
||||||
|
\set delta random(-5000,5000)
|
||||||
|
|
||||||
|
\set shard random(0, 2)
|
||||||
|
|
||||||
|
SET SHARD TO :shard;
|
||||||
|
|
||||||
|
SET SERVER ROLE TO 'auto';
|
||||||
|
|
||||||
|
BEGIN;
|
||||||
|
|
||||||
|
UPDATE pgbench_accounts SET abalance = abalance + :delta WHERE aid = :aid;
|
||||||
|
|
||||||
|
SELECT abalance FROM pgbench_accounts WHERE aid = :aid;
|
||||||
|
|
||||||
|
UPDATE pgbench_tellers SET tbalance = tbalance + :delta WHERE tid = :tid;
|
||||||
|
|
||||||
|
UPDATE pgbench_branches SET bbalance = bbalance + :delta WHERE bid = :bid;
|
||||||
|
|
||||||
|
INSERT INTO pgbench_history (tid, bid, aid, delta, mtime) VALUES (:tid, :bid, :aid, :delta, CURRENT_TIMESTAMP);
|
||||||
|
|
||||||
|
END;
|
||||||
|
|
||||||
|
SET SHARDING KEY TO :aid;
|
||||||
|
|
||||||
|
-- Read load balancing
|
||||||
|
SELECT abalance FROM pgbench_accounts WHERE aid = :aid;
|
||||||
|
|
||||||
|
SET SERVER ROLE TO 'replica';
|
||||||
|
|
||||||
|
-- Read load balancing
|
||||||
|
SELECT abalance FROM pgbench_accounts WHERE aid = :aid;
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user