2022-03-10 01:33:29 -08:00
/// Implementation of the PostgreSQL server (database) protocol.
/// Here we are pretending to the a Postgres client.
2022-02-20 22:47:08 -08:00
use bytes ::{ Buf , BufMut , BytesMut } ;
Auth passthrough (auth_query) (#266)
* Add a new exec_simple_query method
This adds a new `exec_simple_query` method so we can make 'out of band'
queries to servers that don't interfere with pools at all.
In order to reuse startup code for making these simple queries,
we need to set the stats (`Reporter`) optional, so using these
simple queries wont interfere with stats.
* Add auth passthough (auth_query)
Adds a feature that allows setting auth passthrough for md5 auth.
It adds 3 new (general and pool) config parameters:
- `auth_query`: An string containing a query that will be executed on boot
to obtain the hash of a given user. This query have to use a placeholder `$1`,
so pgcat can replace it with the user its trying to fetch the hash from.
- `auth_query_user`: The user to use for connecting to the server and executing the
auth_query.
- `auth_query_password`: The password to use for connecting to the server and executing the
auth_query.
The configuration can be done either on the general config (so pools share them) or in a per-pool basis.
The behavior is, at boot time, when validating server connections, a hash is fetched per server
and stored in the pool. When new server connections are created, and no cleartext password is specified,
the obtained hash is used for creating them, if the hash could not be obtained for whatever reason, it retries
it.
When client authentication is tried, it uses cleartext passwords if specified, it not, it checks whether
we have query_auth set up, if so, it tries to use the obtained hash for making client auth. If there is no
hash (we could not obtain one when validating the connection), a new fetch is tried.
Once we have a hash, we authenticate using it against whathever the client has sent us, if there is a failure
we refetch the hash and retry auth (so password changes can be done).
The idea with this 'retrial' mechanism is to make it fault tolerant, so if for whatever reason hash could not be
obtained during connection validation, or the password has change, we can still connect later.
* Add documentation for Auth passthrough
2023-03-30 22:29:23 +02:00
use fallible_iterator ::FallibleIterator ;
2022-09-01 22:06:55 -05:00
use log ::{ debug , error , info , trace , warn } ;
2023-10-25 18:11:57 -04:00
use lru ::LruCache ;
2023-08-10 11:18:46 -04:00
use once_cell ::sync ::Lazy ;
Auth passthrough (auth_query) (#266)
* Add a new exec_simple_query method
This adds a new `exec_simple_query` method so we can make 'out of band'
queries to servers that don't interfere with pools at all.
In order to reuse startup code for making these simple queries,
we need to set the stats (`Reporter`) optional, so using these
simple queries wont interfere with stats.
* Add auth passthough (auth_query)
Adds a feature that allows setting auth passthrough for md5 auth.
It adds 3 new (general and pool) config parameters:
- `auth_query`: An string containing a query that will be executed on boot
to obtain the hash of a given user. This query have to use a placeholder `$1`,
so pgcat can replace it with the user its trying to fetch the hash from.
- `auth_query_user`: The user to use for connecting to the server and executing the
auth_query.
- `auth_query_password`: The password to use for connecting to the server and executing the
auth_query.
The configuration can be done either on the general config (so pools share them) or in a per-pool basis.
The behavior is, at boot time, when validating server connections, a hash is fetched per server
and stored in the pool. When new server connections are created, and no cleartext password is specified,
the obtained hash is used for creating them, if the hash could not be obtained for whatever reason, it retries
it.
When client authentication is tried, it uses cleartext passwords if specified, it not, it checks whether
we have query_auth set up, if so, it tries to use the obtained hash for making client auth. If there is no
hash (we could not obtain one when validating the connection), a new fetch is tried.
Once we have a hash, we authenticate using it against whathever the client has sent us, if there is a failure
we refetch the hash and retry auth (so password changes can be done).
The idea with this 'retrial' mechanism is to make it fault tolerant, so if for whatever reason hash could not be
obtained during connection validation, or the password has change, we can still connect later.
* Add documentation for Auth passthrough
2023-03-30 22:29:23 +02:00
use parking_lot ::{ Mutex , RwLock } ;
use postgres_protocol ::message ;
2023-11-28 21:13:30 -08:00
use std ::collections ::{ HashMap , HashSet , VecDeque } ;
2023-08-10 11:18:46 -04:00
use std ::mem ;
2023-05-02 10:26:40 +02:00
use std ::net ::IpAddr ;
2023-10-25 18:11:57 -04:00
use std ::num ::NonZeroUsize ;
2023-08-10 12:25:43 -07:00
use std ::sync ::Arc ;
2022-08-11 17:42:40 -04:00
use std ::time ::SystemTime ;
2023-04-30 09:41:46 -07:00
use tokio ::io ::{ AsyncRead , AsyncReadExt , AsyncWrite , BufStream } ;
use tokio ::net ::TcpStream ;
use tokio_rustls ::rustls ::{ OwnedTrustAnchor , RootCertStore } ;
use tokio_rustls ::{ client ::TlsStream , TlsConnector } ;
2022-02-03 15:17:04 -08:00
2023-10-25 18:11:57 -04:00
use crate ::config ::{ get_config , Address , User } ;
2022-02-15 22:45:45 -08:00
use crate ::constants ::* ;
2023-05-02 10:26:40 +02:00
use crate ::dns_cache ::{ AddrSet , CACHED_RESOLVER } ;
2023-04-10 14:51:01 -07:00
use crate ::errors ::{ Error , ServerIdentifier } ;
2023-08-10 11:18:46 -04:00
use crate ::messages ::BytesMutReader ;
2022-02-03 15:17:04 -08:00
use crate ::messages ::* ;
2023-03-10 06:23:51 -06:00
use crate ::mirrors ::MirroringManager ;
2022-09-20 21:47:32 -04:00
use crate ::pool ::ClientServerMap ;
2022-06-18 18:36:00 -07:00
use crate ::scram ::ScramSha256 ;
2023-03-28 17:19:37 +02:00
use crate ::stats ::ServerStats ;
2023-04-30 09:41:46 -07:00
use std ::io ::Write ;
use pin_project ::pin_project ;
#[ pin_project(project = SteamInnerProj) ]
pub enum StreamInner {
Plain {
#[ pin ]
stream : TcpStream ,
} ,
Tls {
#[ pin ]
stream : TlsStream < TcpStream > ,
} ,
}
impl AsyncWrite for StreamInner {
fn poll_write (
self : std ::pin ::Pin < & mut Self > ,
cx : & mut std ::task ::Context < '_ > ,
buf : & [ u8 ] ,
) -> std ::task ::Poll < Result < usize , std ::io ::Error > > {
let this = self . project ( ) ;
match this {
SteamInnerProj ::Tls { stream } = > stream . poll_write ( cx , buf ) ,
SteamInnerProj ::Plain { stream } = > stream . poll_write ( cx , buf ) ,
}
}
fn poll_flush (
self : std ::pin ::Pin < & mut Self > ,
cx : & mut std ::task ::Context < '_ > ,
) -> std ::task ::Poll < Result < ( ) , std ::io ::Error > > {
let this = self . project ( ) ;
match this {
SteamInnerProj ::Tls { stream } = > stream . poll_flush ( cx ) ,
SteamInnerProj ::Plain { stream } = > stream . poll_flush ( cx ) ,
}
}
fn poll_shutdown (
self : std ::pin ::Pin < & mut Self > ,
cx : & mut std ::task ::Context < '_ > ,
) -> std ::task ::Poll < Result < ( ) , std ::io ::Error > > {
let this = self . project ( ) ;
match this {
SteamInnerProj ::Tls { stream } = > stream . poll_shutdown ( cx ) ,
SteamInnerProj ::Plain { stream } = > stream . poll_shutdown ( cx ) ,
}
}
}
impl AsyncRead for StreamInner {
fn poll_read (
self : std ::pin ::Pin < & mut Self > ,
cx : & mut std ::task ::Context < '_ > ,
buf : & mut tokio ::io ::ReadBuf < '_ > ,
) -> std ::task ::Poll < std ::io ::Result < ( ) > > {
let this = self . project ( ) ;
match this {
SteamInnerProj ::Tls { stream } = > stream . poll_read ( cx , buf ) ,
SteamInnerProj ::Plain { stream } = > stream . poll_read ( cx , buf ) ,
}
}
}
impl StreamInner {
pub fn try_write ( & mut self , buf : & [ u8 ] ) -> std ::io ::Result < usize > {
match self {
StreamInner ::Tls { stream } = > {
let r = stream . get_mut ( ) ;
let mut w = r . 1. writer ( ) ;
w . write ( buf )
}
StreamInner ::Plain { stream } = > stream . try_write ( buf ) ,
}
}
}
2022-02-03 15:17:04 -08:00
2023-05-11 20:40:10 -04:00
#[ derive(Copy, Clone) ]
struct CleanupState {
2023-08-16 13:08:48 -04:00
/// If server connection requires RESET ALL before checkin because of set statement
2023-05-11 20:40:10 -04:00
needs_cleanup_set : bool ,
2023-08-16 13:08:48 -04:00
/// If server connection requires DEALLOCATE ALL before checkin because of prepare statement
2023-05-11 20:40:10 -04:00
needs_cleanup_prepare : bool ,
}
impl CleanupState {
fn new ( ) -> Self {
CleanupState {
needs_cleanup_set : false ,
needs_cleanup_prepare : false ,
}
}
fn needs_cleanup ( & self ) -> bool {
self . needs_cleanup_set | | self . needs_cleanup_prepare
}
fn set_true ( & mut self ) {
self . needs_cleanup_set = true ;
self . needs_cleanup_prepare = true ;
}
fn reset ( & mut self ) {
self . needs_cleanup_set = false ;
self . needs_cleanup_prepare = false ;
}
}
impl std ::fmt ::Display for CleanupState {
fn fmt ( & self , f : & mut std ::fmt ::Formatter < '_ > ) -> std ::fmt ::Result {
write! (
f ,
" SET: {}, PREPARE: {} " ,
self . needs_cleanup_set , self . needs_cleanup_prepare
)
}
}
2023-08-10 11:18:46 -04:00
static TRACKED_PARAMETERS : Lazy < HashSet < String > > = Lazy ::new ( | | {
let mut set = HashSet ::new ( ) ;
set . insert ( " client_encoding " . to_string ( ) ) ;
set . insert ( " DateStyle " . to_string ( ) ) ;
set . insert ( " TimeZone " . to_string ( ) ) ;
set . insert ( " standard_conforming_strings " . to_string ( ) ) ;
set . insert ( " application_name " . to_string ( ) ) ;
set
} ) ;
#[ derive(Debug, Clone) ]
pub struct ServerParameters {
parameters : HashMap < String , String > ,
}
impl Default for ServerParameters {
fn default ( ) -> Self {
Self ::new ( )
}
}
impl ServerParameters {
pub fn new ( ) -> Self {
let mut server_parameters = ServerParameters {
parameters : HashMap ::new ( ) ,
} ;
server_parameters . set_param ( " client_encoding " . to_string ( ) , " UTF8 " . to_string ( ) , false ) ;
server_parameters . set_param ( " DateStyle " . to_string ( ) , " ISO, MDY " . to_string ( ) , false ) ;
server_parameters . set_param ( " TimeZone " . to_string ( ) , " Etc/UTC " . to_string ( ) , false ) ;
server_parameters . set_param (
" standard_conforming_strings " . to_string ( ) ,
" on " . to_string ( ) ,
false ,
) ;
server_parameters . set_param ( " application_name " . to_string ( ) , " pgcat " . to_string ( ) , false ) ;
server_parameters
}
/// returns true if a tracked parameter was set, false if it was a non-tracked parameter
/// if startup is false, then then only tracked parameters will be set
pub fn set_param ( & mut self , mut key : String , value : String , startup : bool ) {
// The startup parameter will send uncapitalized keys but parameter status packets will send capitalized keys
if key = = " timezone " {
key = " TimeZone " . to_string ( ) ;
} else if key = = " datestyle " {
key = " DateStyle " . to_string ( ) ;
} ;
2023-10-10 09:18:21 -07:00
if TRACKED_PARAMETERS . contains ( & key ) | | startup {
2023-08-10 11:18:46 -04:00
self . parameters . insert ( key , value ) ;
}
}
pub fn set_from_hashmap ( & mut self , parameters : & HashMap < String , String > , startup : bool ) {
// iterate through each and call set_param
for ( key , value ) in parameters {
self . set_param ( key . to_string ( ) , value . to_string ( ) , startup ) ;
}
}
// Gets the diff of the parameters
fn compare_params ( & self , incoming_parameters : & ServerParameters ) -> HashMap < String , String > {
let mut diff = HashMap ::new ( ) ;
// iterate through tracked parameters
for key in TRACKED_PARAMETERS . iter ( ) {
if let Some ( incoming_value ) = incoming_parameters . parameters . get ( key ) {
if let Some ( value ) = self . parameters . get ( key ) {
if value ! = incoming_value {
diff . insert ( key . to_string ( ) , incoming_value . to_string ( ) ) ;
}
}
}
}
diff
}
pub fn get_application_name ( & self ) -> & String {
// Can unwrap because we set it in the constructor
self . parameters . get ( " application_name " ) . unwrap ( )
}
fn add_parameter_message ( key : & str , value : & str , buffer : & mut BytesMut ) {
buffer . put_u8 ( b 'S' ) ;
// 4 is len of i32, the plus for the null terminator
let len = 4 + key . len ( ) + 1 + value . len ( ) + 1 ;
buffer . put_i32 ( len as i32 ) ;
buffer . put_slice ( key . as_bytes ( ) ) ;
buffer . put_u8 ( 0 ) ;
buffer . put_slice ( value . as_bytes ( ) ) ;
buffer . put_u8 ( 0 ) ;
}
}
impl From < & ServerParameters > for BytesMut {
fn from ( server_parameters : & ServerParameters ) -> Self {
let mut bytes = BytesMut ::new ( ) ;
for ( key , value ) in & server_parameters . parameters {
ServerParameters ::add_parameter_message ( key , value , & mut bytes ) ;
}
bytes
}
}
// pub fn compare
2022-02-04 09:28:52 -08:00
/// Server state.
2022-02-03 15:17:04 -08:00
pub struct Server {
2022-03-10 01:33:29 -08:00
/// Server host, e.g. localhost,
/// port, e.g. 5432, and role, e.g. primary or replica.
2022-02-15 08:18:01 -08:00
address : Address ,
2022-02-08 09:25:59 -08:00
2023-04-30 09:41:46 -07:00
/// Server TCP connection.
stream : BufStream < StreamInner > ,
2022-02-08 09:25:59 -08:00
2022-03-10 01:33:29 -08:00
/// Our server response buffer. We buffer data before we give it to the client.
2022-02-03 15:17:04 -08:00
buffer : BytesMut ,
2022-02-08 09:25:59 -08:00
2022-03-10 01:33:29 -08:00
/// Server information the server sent us over on startup.
2023-08-10 11:18:46 -04:00
server_parameters : ServerParameters ,
2022-02-08 09:25:59 -08:00
2022-03-10 01:33:29 -08:00
/// Backend id and secret key used for query cancellation.
2022-02-20 22:47:08 -08:00
process_id : i32 ,
2022-02-03 17:48:37 -08:00
secret_key : i32 ,
2022-02-08 09:25:59 -08:00
2022-03-10 01:33:29 -08:00
/// Is the server inside a transaction or idle.
2022-02-03 16:25:05 -08:00
in_transaction : bool ,
2022-02-08 09:25:59 -08:00
2022-03-10 01:33:29 -08:00
/// Is there more data for the client to read.
2022-02-04 08:26:50 -08:00
data_available : bool ,
2022-02-08 09:25:59 -08:00
2023-07-21 01:06:01 -05:00
/// Is the server in copy-in or copy-out modes
in_copy_mode : bool ,
2022-03-10 01:33:29 -08:00
/// Is the server broken? We'll remote it from the pool if so.
2022-02-03 17:06:19 -08:00
bad : bool ,
2022-02-08 09:25:59 -08:00
2023-08-16 13:08:48 -04:00
/// If server connection requires reset statements before checkin
2023-05-11 20:40:10 -04:00
cleanup_state : CleanupState ,
2022-09-01 22:06:55 -05:00
2022-03-10 01:33:29 -08:00
/// Mapping of clients and servers used for query cancellation.
2022-02-04 16:01:35 -08:00
client_server_map : ClientServerMap ,
2022-02-09 20:02:20 -08:00
2022-03-10 01:33:29 -08:00
/// Server connected at.
2022-02-12 10:16:05 -08:00
connected_at : chrono ::naive ::NaiveDateTime ,
2022-02-14 10:00:55 -08:00
2022-03-10 01:33:29 -08:00
/// Reports various metrics, e.g. data sent & received.
2023-03-28 17:19:37 +02:00
stats : Arc < ServerStats > ,
2022-06-05 09:48:06 -07:00
/// Application name using the server at the moment.
application_name : String ,
2022-08-11 17:42:40 -04:00
2023-05-18 10:46:55 -07:00
/// Last time that a successful server send or response happened
2022-08-11 17:42:40 -04:00
last_activity : SystemTime ,
2023-03-10 06:23:51 -06:00
mirror_manager : Option < MirroringManager > ,
2023-05-02 10:26:40 +02:00
2023-05-18 10:46:55 -07:00
/// Associated addresses used
2023-05-02 10:26:40 +02:00
addr_set : Option < AddrSet > ,
2023-05-18 10:46:55 -07:00
/// Should clean up dirty connections?
cleanup_connections : bool ,
2023-06-16 12:57:44 -07:00
2023-08-16 14:01:21 -04:00
/// Log client parameter status changes
log_client_parameter_status_changes : bool ,
2023-06-16 12:57:44 -07:00
/// Prepared statements
2023-10-25 18:11:57 -04:00
prepared_statement_cache : Option < LruCache < String , ( ) > > ,
2023-11-28 21:13:30 -08:00
/// Prepared statement being currently registered on the server.
registering_prepared_statement : VecDeque < String > ,
2022-02-03 15:17:04 -08:00
}
impl Server {
2022-02-04 09:28:52 -08:00
/// Pretend to be the Postgres client and connect to the server given host, port and credentials.
2022-02-15 22:45:45 -08:00
/// Perform the authentication and return the server in a ready for query state.
2023-10-10 09:18:21 -07:00
#[ allow(clippy::too_many_arguments) ]
2022-02-03 15:17:04 -08:00
pub async fn startup (
2022-02-15 08:18:01 -08:00
address : & Address ,
user : & User ,
2022-02-03 15:17:04 -08:00
database : & str ,
2022-02-04 16:01:35 -08:00
client_server_map : ClientServerMap ,
2023-03-28 17:19:37 +02:00
stats : Arc < ServerStats > ,
Auth passthrough (auth_query) (#266)
* Add a new exec_simple_query method
This adds a new `exec_simple_query` method so we can make 'out of band'
queries to servers that don't interfere with pools at all.
In order to reuse startup code for making these simple queries,
we need to set the stats (`Reporter`) optional, so using these
simple queries wont interfere with stats.
* Add auth passthough (auth_query)
Adds a feature that allows setting auth passthrough for md5 auth.
It adds 3 new (general and pool) config parameters:
- `auth_query`: An string containing a query that will be executed on boot
to obtain the hash of a given user. This query have to use a placeholder `$1`,
so pgcat can replace it with the user its trying to fetch the hash from.
- `auth_query_user`: The user to use for connecting to the server and executing the
auth_query.
- `auth_query_password`: The password to use for connecting to the server and executing the
auth_query.
The configuration can be done either on the general config (so pools share them) or in a per-pool basis.
The behavior is, at boot time, when validating server connections, a hash is fetched per server
and stored in the pool. When new server connections are created, and no cleartext password is specified,
the obtained hash is used for creating them, if the hash could not be obtained for whatever reason, it retries
it.
When client authentication is tried, it uses cleartext passwords if specified, it not, it checks whether
we have query_auth set up, if so, it tries to use the obtained hash for making client auth. If there is no
hash (we could not obtain one when validating the connection), a new fetch is tried.
Once we have a hash, we authenticate using it against whathever the client has sent us, if there is a failure
we refetch the hash and retry auth (so password changes can be done).
The idea with this 'retrial' mechanism is to make it fault tolerant, so if for whatever reason hash could not be
obtained during connection validation, or the password has change, we can still connect later.
* Add documentation for Auth passthrough
2023-03-30 22:29:23 +02:00
auth_hash : Arc < RwLock < Option < String > > > ,
2023-05-18 10:46:55 -07:00
cleanup_connections : bool ,
2023-08-16 14:01:21 -04:00
log_client_parameter_status_changes : bool ,
2023-10-25 18:11:57 -04:00
prepared_statement_cache_size : usize ,
2022-02-03 15:17:04 -08:00
) -> Result < Server , Error > {
2023-05-02 10:26:40 +02:00
let cached_resolver = CACHED_RESOLVER . load ( ) ;
let mut addr_set : Option < AddrSet > = None ;
// If we are caching addresses and hostname is not an IP
if cached_resolver . enabled ( ) & & address . host . parse ::< IpAddr > ( ) . is_err ( ) {
debug! ( " Resolving {} " , & address . host ) ;
addr_set = match cached_resolver . lookup_ip ( & address . host ) . await {
Ok ( ok ) = > {
debug! ( " Obtained: {:?} " , ok ) ;
Some ( ok )
}
Err ( err ) = > {
warn! ( " Error trying to resolve {}, ({:?}) " , & address . host , err ) ;
None
}
}
} ;
2022-02-15 08:18:01 -08:00
let mut stream =
2022-08-25 06:40:56 -07:00
match TcpStream ::connect ( & format! ( " {} : {} " , & address . host , address . port ) ) . await {
2022-02-15 08:18:01 -08:00
Ok ( stream ) = > stream ,
Err ( err ) = > {
2022-02-20 22:47:08 -08:00
error! ( " Could not connect to server: {} " , err ) ;
2022-11-17 09:24:39 -08:00
return Err ( Error ::SocketError ( format! (
" Could not connect to server: {} " ,
err
) ) ) ;
2022-02-15 08:18:01 -08:00
}
} ;
2023-04-30 09:41:46 -07:00
// TCP timeouts.
2023-02-08 11:35:38 -06:00
configure_socket ( & stream ) ;
2022-02-03 15:17:04 -08:00
2023-04-30 09:41:46 -07:00
let config = get_config ( ) ;
let mut stream = if config . general . server_tls {
// Request a TLS connection
ssl_request ( & mut stream ) . await ? ;
let response = match stream . read_u8 ( ) . await {
Ok ( response ) = > response as char ,
Err ( err ) = > {
return Err ( Error ::SocketError ( format! (
" Server socket error: {:?} " ,
err
) ) )
}
} ;
match response {
// Server supports TLS
'S' = > {
debug! ( " Connecting to server using TLS " ) ;
let mut root_store = RootCertStore ::empty ( ) ;
root_store . add_server_trust_anchors (
webpki_roots ::TLS_SERVER_ROOTS . 0. iter ( ) . map ( | ta | {
OwnedTrustAnchor ::from_subject_spki_name_constraints (
ta . subject ,
ta . spki ,
ta . name_constraints ,
)
} ) ,
) ;
let mut tls_config = rustls ::ClientConfig ::builder ( )
. with_safe_defaults ( )
. with_root_certificates ( root_store )
. with_no_client_auth ( ) ;
// Equivalent to sslmode=prefer which is fine most places.
// If you want verify-full, change `verify_server_certificate` to true.
if ! config . general . verify_server_certificate {
let mut dangerous = tls_config . dangerous ( ) ;
dangerous . set_certificate_verifier ( Arc ::new (
crate ::tls ::NoCertificateVerification { } ,
) ) ;
}
let connector = TlsConnector ::from ( Arc ::new ( tls_config ) ) ;
let stream = match connector
. connect ( address . host . as_str ( ) . try_into ( ) . unwrap ( ) , stream )
. await
{
Ok ( stream ) = > stream ,
Err ( err ) = > {
return Err ( Error ::SocketError ( format! ( " Server TLS error: {:?} " , err ) ) )
}
} ;
StreamInner ::Tls { stream }
}
// Server does not support TLS
'N' = > StreamInner ::Plain { stream } ,
// Something else?
m = > {
2023-10-10 09:18:21 -07:00
return Err ( Error ::SocketError ( format! ( " Unknown message: {} " , { m } ) ) ) ;
2023-04-30 09:41:46 -07:00
}
}
} else {
StreamInner ::Plain { stream }
} ;
// let (read, write) = split(stream);
// let (mut read, mut write) = (ReadInner::Plain { stream: read }, WriteInner::Plain { stream: write });
2022-02-24 08:44:41 -08:00
trace! ( " Sending StartupMessage " ) ;
2022-02-22 19:26:08 -08:00
2022-03-10 01:33:29 -08:00
// StartupMessage
2023-04-18 09:57:17 -07:00
let username = match user . server_username {
Some ( ref server_username ) = > server_username ,
None = > & user . username ,
} ;
2023-10-11 10:13:23 -07:00
let password = match user . server_password . as_ref ( ) {
Some ( server_password ) = > Some ( server_password ) ,
None = > user . password . as_ref ( ) ,
2023-04-18 09:57:17 -07:00
} ;
startup ( & mut stream , username , database ) . await ? ;
2022-02-03 15:17:04 -08:00
2022-02-20 22:47:08 -08:00
let mut process_id : i32 = 0 ;
2022-02-03 17:48:37 -08:00
let mut secret_key : i32 = 0 ;
2023-10-10 09:18:21 -07:00
let server_identifier = ServerIdentifier ::new ( username , database ) ;
2022-02-03 17:48:37 -08:00
2022-02-15 22:45:45 -08:00
// We'll be handling multiple packets, but they will all be structured the same.
// We'll loop here until this exchange is complete.
2023-10-10 09:18:21 -07:00
let mut scram : Option < ScramSha256 > = password . map ( | password | ScramSha256 ::new ( password ) ) ;
2022-06-18 18:36:00 -07:00
2023-08-10 11:18:46 -04:00
let mut server_parameters = ServerParameters ::new ( ) ;
2022-02-03 15:17:04 -08:00
loop {
let code = match stream . read_u8 ( ) . await {
Ok ( code ) = > code as char ,
2023-04-10 14:51:01 -07:00
Err ( _ ) = > {
return Err ( Error ::ServerStartupError (
" message code " . into ( ) ,
server_identifier ,
) )
}
2022-02-03 15:17:04 -08:00
} ;
let len = match stream . read_i32 ( ) . await {
Ok ( len ) = > len ,
2023-04-10 14:51:01 -07:00
Err ( _ ) = > {
return Err ( Error ::ServerStartupError (
" message len " . into ( ) ,
server_identifier ,
) )
}
2022-02-03 15:17:04 -08:00
} ;
2022-02-24 08:44:41 -08:00
trace! ( " Message: {} " , code ) ;
2022-02-22 19:26:08 -08:00
2022-02-03 15:17:04 -08:00
match code {
2022-02-15 22:45:45 -08:00
// Authentication
2022-02-03 15:17:04 -08:00
'R' = > {
2022-02-15 22:45:45 -08:00
// Determine which kind of authentication is required, if any.
let auth_code = match stream . read_i32 ( ) . await {
Ok ( auth_code ) = > auth_code ,
2023-04-10 14:51:01 -07:00
Err ( _ ) = > {
return Err ( Error ::ServerStartupError (
" auth code " . into ( ) ,
server_identifier ,
) )
}
2022-02-03 15:17:04 -08:00
} ;
2022-02-24 08:44:41 -08:00
trace! ( " Auth: {} " , auth_code ) ;
2022-02-22 19:26:08 -08:00
2022-02-15 22:45:45 -08:00
match auth_code {
MD5_ENCRYPTED_PASSWORD = > {
// The salt is 4 bytes.
// See: https://www.postgresql.org/docs/12/protocol-message-formats.html
2022-02-03 15:17:04 -08:00
let mut salt = vec! [ 0 u8 ; 4 ] ;
match stream . read_exact ( & mut salt ) . await {
Ok ( _ ) = > ( ) ,
2023-04-10 14:51:01 -07:00
Err ( _ ) = > {
return Err ( Error ::ServerStartupError (
" salt " . into ( ) ,
server_identifier ,
) )
}
2022-02-03 15:17:04 -08:00
} ;
2023-04-18 09:57:17 -07:00
match password {
Auth passthrough (auth_query) (#266)
* Add a new exec_simple_query method
This adds a new `exec_simple_query` method so we can make 'out of band'
queries to servers that don't interfere with pools at all.
In order to reuse startup code for making these simple queries,
we need to set the stats (`Reporter`) optional, so using these
simple queries wont interfere with stats.
* Add auth passthough (auth_query)
Adds a feature that allows setting auth passthrough for md5 auth.
It adds 3 new (general and pool) config parameters:
- `auth_query`: An string containing a query that will be executed on boot
to obtain the hash of a given user. This query have to use a placeholder `$1`,
so pgcat can replace it with the user its trying to fetch the hash from.
- `auth_query_user`: The user to use for connecting to the server and executing the
auth_query.
- `auth_query_password`: The password to use for connecting to the server and executing the
auth_query.
The configuration can be done either on the general config (so pools share them) or in a per-pool basis.
The behavior is, at boot time, when validating server connections, a hash is fetched per server
and stored in the pool. When new server connections are created, and no cleartext password is specified,
the obtained hash is used for creating them, if the hash could not be obtained for whatever reason, it retries
it.
When client authentication is tried, it uses cleartext passwords if specified, it not, it checks whether
we have query_auth set up, if so, it tries to use the obtained hash for making client auth. If there is no
hash (we could not obtain one when validating the connection), a new fetch is tried.
Once we have a hash, we authenticate using it against whathever the client has sent us, if there is a failure
we refetch the hash and retry auth (so password changes can be done).
The idea with this 'retrial' mechanism is to make it fault tolerant, so if for whatever reason hash could not be
obtained during connection validation, or the password has change, we can still connect later.
* Add documentation for Auth passthrough
2023-03-30 22:29:23 +02:00
// Using plaintext password
Some ( password ) = > {
2023-04-18 09:57:17 -07:00
md5_password ( & mut stream , username , password , & salt [ .. ] ) . await ?
Auth passthrough (auth_query) (#266)
* Add a new exec_simple_query method
This adds a new `exec_simple_query` method so we can make 'out of band'
queries to servers that don't interfere with pools at all.
In order to reuse startup code for making these simple queries,
we need to set the stats (`Reporter`) optional, so using these
simple queries wont interfere with stats.
* Add auth passthough (auth_query)
Adds a feature that allows setting auth passthrough for md5 auth.
It adds 3 new (general and pool) config parameters:
- `auth_query`: An string containing a query that will be executed on boot
to obtain the hash of a given user. This query have to use a placeholder `$1`,
so pgcat can replace it with the user its trying to fetch the hash from.
- `auth_query_user`: The user to use for connecting to the server and executing the
auth_query.
- `auth_query_password`: The password to use for connecting to the server and executing the
auth_query.
The configuration can be done either on the general config (so pools share them) or in a per-pool basis.
The behavior is, at boot time, when validating server connections, a hash is fetched per server
and stored in the pool. When new server connections are created, and no cleartext password is specified,
the obtained hash is used for creating them, if the hash could not be obtained for whatever reason, it retries
it.
When client authentication is tried, it uses cleartext passwords if specified, it not, it checks whether
we have query_auth set up, if so, it tries to use the obtained hash for making client auth. If there is no
hash (we could not obtain one when validating the connection), a new fetch is tried.
Once we have a hash, we authenticate using it against whathever the client has sent us, if there is a failure
we refetch the hash and retry auth (so password changes can be done).
The idea with this 'retrial' mechanism is to make it fault tolerant, so if for whatever reason hash could not be
obtained during connection validation, or the password has change, we can still connect later.
* Add documentation for Auth passthrough
2023-03-30 22:29:23 +02:00
}
// Using auth passthrough, in this case we should already have a
// hash obtained when the pool was validated. If we reach this point
// and don't have a hash, we return an error.
None = > {
let option_hash = ( * auth_hash . read ( ) ) . clone ( ) ;
match option_hash {
Some ( hash ) = >
md5_password_with_hash (
& mut stream ,
& hash ,
& salt [ .. ] ,
)
. await ? ,
2023-04-10 14:51:01 -07:00
None = > return Err (
Error ::ServerAuthError (
" Auth passthrough (auth_query) failed and no user password is set in cleartext " . into ( ) ,
server_identifier
)
) ,
Auth passthrough (auth_query) (#266)
* Add a new exec_simple_query method
This adds a new `exec_simple_query` method so we can make 'out of band'
queries to servers that don't interfere with pools at all.
In order to reuse startup code for making these simple queries,
we need to set the stats (`Reporter`) optional, so using these
simple queries wont interfere with stats.
* Add auth passthough (auth_query)
Adds a feature that allows setting auth passthrough for md5 auth.
It adds 3 new (general and pool) config parameters:
- `auth_query`: An string containing a query that will be executed on boot
to obtain the hash of a given user. This query have to use a placeholder `$1`,
so pgcat can replace it with the user its trying to fetch the hash from.
- `auth_query_user`: The user to use for connecting to the server and executing the
auth_query.
- `auth_query_password`: The password to use for connecting to the server and executing the
auth_query.
The configuration can be done either on the general config (so pools share them) or in a per-pool basis.
The behavior is, at boot time, when validating server connections, a hash is fetched per server
and stored in the pool. When new server connections are created, and no cleartext password is specified,
the obtained hash is used for creating them, if the hash could not be obtained for whatever reason, it retries
it.
When client authentication is tried, it uses cleartext passwords if specified, it not, it checks whether
we have query_auth set up, if so, it tries to use the obtained hash for making client auth. If there is no
hash (we could not obtain one when validating the connection), a new fetch is tried.
Once we have a hash, we authenticate using it against whathever the client has sent us, if there is a failure
we refetch the hash and retry auth (so password changes can be done).
The idea with this 'retrial' mechanism is to make it fault tolerant, so if for whatever reason hash could not be
obtained during connection validation, or the password has change, we can still connect later.
* Add documentation for Auth passthrough
2023-03-30 22:29:23 +02:00
}
}
}
2022-02-03 15:17:04 -08:00
}
2022-02-15 22:45:45 -08:00
AUTHENTICATION_SUCCESSFUL = > ( ) ,
2022-02-03 15:17:04 -08:00
2022-06-18 18:36:00 -07:00
SASL = > {
Auth passthrough (auth_query) (#266)
* Add a new exec_simple_query method
This adds a new `exec_simple_query` method so we can make 'out of band'
queries to servers that don't interfere with pools at all.
In order to reuse startup code for making these simple queries,
we need to set the stats (`Reporter`) optional, so using these
simple queries wont interfere with stats.
* Add auth passthough (auth_query)
Adds a feature that allows setting auth passthrough for md5 auth.
It adds 3 new (general and pool) config parameters:
- `auth_query`: An string containing a query that will be executed on boot
to obtain the hash of a given user. This query have to use a placeholder `$1`,
so pgcat can replace it with the user its trying to fetch the hash from.
- `auth_query_user`: The user to use for connecting to the server and executing the
auth_query.
- `auth_query_password`: The password to use for connecting to the server and executing the
auth_query.
The configuration can be done either on the general config (so pools share them) or in a per-pool basis.
The behavior is, at boot time, when validating server connections, a hash is fetched per server
and stored in the pool. When new server connections are created, and no cleartext password is specified,
the obtained hash is used for creating them, if the hash could not be obtained for whatever reason, it retries
it.
When client authentication is tried, it uses cleartext passwords if specified, it not, it checks whether
we have query_auth set up, if so, it tries to use the obtained hash for making client auth. If there is no
hash (we could not obtain one when validating the connection), a new fetch is tried.
Once we have a hash, we authenticate using it against whathever the client has sent us, if there is a failure
we refetch the hash and retry auth (so password changes can be done).
The idea with this 'retrial' mechanism is to make it fault tolerant, so if for whatever reason hash could not be
obtained during connection validation, or the password has change, we can still connect later.
* Add documentation for Auth passthrough
2023-03-30 22:29:23 +02:00
if scram . is_none ( ) {
2023-04-10 14:51:01 -07:00
return Err ( Error ::ServerAuthError (
" SASL auth required and no password specified. \
Auth passthrough ( auth_query ) method is currently \
unsupported for SASL auth "
. into ( ) ,
server_identifier ,
) ) ;
Auth passthrough (auth_query) (#266)
* Add a new exec_simple_query method
This adds a new `exec_simple_query` method so we can make 'out of band'
queries to servers that don't interfere with pools at all.
In order to reuse startup code for making these simple queries,
we need to set the stats (`Reporter`) optional, so using these
simple queries wont interfere with stats.
* Add auth passthough (auth_query)
Adds a feature that allows setting auth passthrough for md5 auth.
It adds 3 new (general and pool) config parameters:
- `auth_query`: An string containing a query that will be executed on boot
to obtain the hash of a given user. This query have to use a placeholder `$1`,
so pgcat can replace it with the user its trying to fetch the hash from.
- `auth_query_user`: The user to use for connecting to the server and executing the
auth_query.
- `auth_query_password`: The password to use for connecting to the server and executing the
auth_query.
The configuration can be done either on the general config (so pools share them) or in a per-pool basis.
The behavior is, at boot time, when validating server connections, a hash is fetched per server
and stored in the pool. When new server connections are created, and no cleartext password is specified,
the obtained hash is used for creating them, if the hash could not be obtained for whatever reason, it retries
it.
When client authentication is tried, it uses cleartext passwords if specified, it not, it checks whether
we have query_auth set up, if so, it tries to use the obtained hash for making client auth. If there is no
hash (we could not obtain one when validating the connection), a new fetch is tried.
Once we have a hash, we authenticate using it against whathever the client has sent us, if there is a failure
we refetch the hash and retry auth (so password changes can be done).
The idea with this 'retrial' mechanism is to make it fault tolerant, so if for whatever reason hash could not be
obtained during connection validation, or the password has change, we can still connect later.
* Add documentation for Auth passthrough
2023-03-30 22:29:23 +02:00
}
2022-06-18 18:36:00 -07:00
debug! ( " Starting SASL authentication " ) ;
2023-04-10 14:51:01 -07:00
2022-06-18 18:36:00 -07:00
let sasl_len = ( len - 8 ) as usize ;
let mut sasl_auth = vec! [ 0 u8 ; sasl_len ] ;
2022-06-20 06:15:54 -07:00
2022-06-18 18:36:00 -07:00
match stream . read_exact ( & mut sasl_auth ) . await {
Ok ( _ ) = > ( ) ,
2023-04-10 14:51:01 -07:00
Err ( _ ) = > {
return Err ( Error ::ServerStartupError (
" sasl message " . into ( ) ,
server_identifier ,
) )
}
2022-06-18 18:36:00 -07:00
} ;
let sasl_type = String ::from_utf8_lossy ( & sasl_auth [ .. sasl_len - 2 ] ) ;
2023-04-30 09:41:46 -07:00
if sasl_type . contains ( SCRAM_SHA_256 ) {
2022-06-18 18:36:00 -07:00
debug! ( " Using {} " , SCRAM_SHA_256 ) ;
2022-06-20 06:15:54 -07:00
// Generate client message.
Auth passthrough (auth_query) (#266)
* Add a new exec_simple_query method
This adds a new `exec_simple_query` method so we can make 'out of band'
queries to servers that don't interfere with pools at all.
In order to reuse startup code for making these simple queries,
we need to set the stats (`Reporter`) optional, so using these
simple queries wont interfere with stats.
* Add auth passthough (auth_query)
Adds a feature that allows setting auth passthrough for md5 auth.
It adds 3 new (general and pool) config parameters:
- `auth_query`: An string containing a query that will be executed on boot
to obtain the hash of a given user. This query have to use a placeholder `$1`,
so pgcat can replace it with the user its trying to fetch the hash from.
- `auth_query_user`: The user to use for connecting to the server and executing the
auth_query.
- `auth_query_password`: The password to use for connecting to the server and executing the
auth_query.
The configuration can be done either on the general config (so pools share them) or in a per-pool basis.
The behavior is, at boot time, when validating server connections, a hash is fetched per server
and stored in the pool. When new server connections are created, and no cleartext password is specified,
the obtained hash is used for creating them, if the hash could not be obtained for whatever reason, it retries
it.
When client authentication is tried, it uses cleartext passwords if specified, it not, it checks whether
we have query_auth set up, if so, it tries to use the obtained hash for making client auth. If there is no
hash (we could not obtain one when validating the connection), a new fetch is tried.
Once we have a hash, we authenticate using it against whathever the client has sent us, if there is a failure
we refetch the hash and retry auth (so password changes can be done).
The idea with this 'retrial' mechanism is to make it fault tolerant, so if for whatever reason hash could not be
obtained during connection validation, or the password has change, we can still connect later.
* Add documentation for Auth passthrough
2023-03-30 22:29:23 +02:00
let sasl_response = scram . as_mut ( ) . unwrap ( ) . message ( ) ;
2022-06-20 06:15:54 -07:00
// SASLInitialResponse (F)
2022-06-18 18:36:00 -07:00
let mut res = BytesMut ::new ( ) ;
res . put_u8 ( b 'p' ) ;
2022-06-20 06:15:54 -07:00
// length + String length + length + length of sasl response
2022-06-18 18:36:00 -07:00
res . put_i32 (
2022-06-20 06:15:54 -07:00
4 // i32 size
+ SCRAM_SHA_256 . len ( ) as i32 // length of SASL version string,
+ 1 // Null terminator for the SASL version string,
+ 4 // i32 size
+ sasl_response . len ( ) as i32 , // length of SASL response
2022-06-18 18:36:00 -07:00
) ;
2022-06-20 06:15:54 -07:00
2022-11-10 02:04:31 +08:00
res . put_slice ( format! ( " {} \0 " , SCRAM_SHA_256 ) . as_bytes ( ) ) ;
2022-06-18 18:36:00 -07:00
res . put_i32 ( sasl_response . len ( ) as i32 ) ;
res . put ( sasl_response ) ;
2023-04-30 09:41:46 -07:00
write_all_flush ( & mut stream , & res ) . await ? ;
2022-06-18 18:36:00 -07:00
} else {
error! ( " Unsupported SCRAM version: {} " , sasl_type ) ;
return Err ( Error ::ServerError ) ;
}
}
SASL_CONTINUE = > {
trace! ( " Continuing SASL " ) ;
let mut sasl_data = vec! [ 0 u8 ; ( len - 8 ) as usize ] ;
match stream . read_exact ( & mut sasl_data ) . await {
Ok ( _ ) = > ( ) ,
2023-04-10 14:51:01 -07:00
Err ( _ ) = > {
return Err ( Error ::ServerStartupError (
" sasl cont message " . into ( ) ,
server_identifier ,
) )
}
2022-06-18 18:36:00 -07:00
} ;
let msg = BytesMut ::from ( & sasl_data [ .. ] ) ;
Auth passthrough (auth_query) (#266)
* Add a new exec_simple_query method
This adds a new `exec_simple_query` method so we can make 'out of band'
queries to servers that don't interfere with pools at all.
In order to reuse startup code for making these simple queries,
we need to set the stats (`Reporter`) optional, so using these
simple queries wont interfere with stats.
* Add auth passthough (auth_query)
Adds a feature that allows setting auth passthrough for md5 auth.
It adds 3 new (general and pool) config parameters:
- `auth_query`: An string containing a query that will be executed on boot
to obtain the hash of a given user. This query have to use a placeholder `$1`,
so pgcat can replace it with the user its trying to fetch the hash from.
- `auth_query_user`: The user to use for connecting to the server and executing the
auth_query.
- `auth_query_password`: The password to use for connecting to the server and executing the
auth_query.
The configuration can be done either on the general config (so pools share them) or in a per-pool basis.
The behavior is, at boot time, when validating server connections, a hash is fetched per server
and stored in the pool. When new server connections are created, and no cleartext password is specified,
the obtained hash is used for creating them, if the hash could not be obtained for whatever reason, it retries
it.
When client authentication is tried, it uses cleartext passwords if specified, it not, it checks whether
we have query_auth set up, if so, it tries to use the obtained hash for making client auth. If there is no
hash (we could not obtain one when validating the connection), a new fetch is tried.
Once we have a hash, we authenticate using it against whathever the client has sent us, if there is a failure
we refetch the hash and retry auth (so password changes can be done).
The idea with this 'retrial' mechanism is to make it fault tolerant, so if for whatever reason hash could not be
obtained during connection validation, or the password has change, we can still connect later.
* Add documentation for Auth passthrough
2023-03-30 22:29:23 +02:00
let sasl_response = scram . as_mut ( ) . unwrap ( ) . update ( & msg ) ? ;
2022-06-18 18:36:00 -07:00
2022-06-20 06:15:54 -07:00
// SASLResponse
2022-06-18 18:36:00 -07:00
let mut res = BytesMut ::new ( ) ;
res . put_u8 ( b 'p' ) ;
res . put_i32 ( 4 + sasl_response . len ( ) as i32 ) ;
res . put ( sasl_response ) ;
2023-04-30 09:41:46 -07:00
write_all_flush ( & mut stream , & res ) . await ? ;
2022-06-18 18:36:00 -07:00
}
SASL_FINAL = > {
trace! ( " Final SASL " ) ;
let mut sasl_final = vec! [ 0 u8 ; len as usize - 8 ] ;
match stream . read_exact ( & mut sasl_final ) . await {
Ok ( _ ) = > ( ) ,
2023-04-10 14:51:01 -07:00
Err ( _ ) = > {
return Err ( Error ::ServerStartupError (
" sasl final message " . into ( ) ,
server_identifier ,
) )
}
2022-06-18 18:36:00 -07:00
} ;
Auth passthrough (auth_query) (#266)
* Add a new exec_simple_query method
This adds a new `exec_simple_query` method so we can make 'out of band'
queries to servers that don't interfere with pools at all.
In order to reuse startup code for making these simple queries,
we need to set the stats (`Reporter`) optional, so using these
simple queries wont interfere with stats.
* Add auth passthough (auth_query)
Adds a feature that allows setting auth passthrough for md5 auth.
It adds 3 new (general and pool) config parameters:
- `auth_query`: An string containing a query that will be executed on boot
to obtain the hash of a given user. This query have to use a placeholder `$1`,
so pgcat can replace it with the user its trying to fetch the hash from.
- `auth_query_user`: The user to use for connecting to the server and executing the
auth_query.
- `auth_query_password`: The password to use for connecting to the server and executing the
auth_query.
The configuration can be done either on the general config (so pools share them) or in a per-pool basis.
The behavior is, at boot time, when validating server connections, a hash is fetched per server
and stored in the pool. When new server connections are created, and no cleartext password is specified,
the obtained hash is used for creating them, if the hash could not be obtained for whatever reason, it retries
it.
When client authentication is tried, it uses cleartext passwords if specified, it not, it checks whether
we have query_auth set up, if so, it tries to use the obtained hash for making client auth. If there is no
hash (we could not obtain one when validating the connection), a new fetch is tried.
Once we have a hash, we authenticate using it against whathever the client has sent us, if there is a failure
we refetch the hash and retry auth (so password changes can be done).
The idea with this 'retrial' mechanism is to make it fault tolerant, so if for whatever reason hash could not be
obtained during connection validation, or the password has change, we can still connect later.
* Add documentation for Auth passthrough
2023-03-30 22:29:23 +02:00
match scram
. as_mut ( )
. unwrap ( )
. finish ( & BytesMut ::from ( & sasl_final [ .. ] ) )
{
2022-06-18 18:36:00 -07:00
Ok ( _ ) = > {
debug! ( " SASL authentication successful " ) ;
}
Err ( err ) = > {
debug! ( " SASL authentication failed " ) ;
return Err ( err ) ;
}
} ;
}
2022-02-03 15:17:04 -08:00
_ = > {
2022-02-20 22:47:08 -08:00
error! ( " Unsupported authentication mechanism: {} " , auth_code ) ;
2022-02-03 15:17:04 -08:00
return Err ( Error ::ServerError ) ;
}
}
}
2022-02-15 22:45:45 -08:00
// ErrorResponse
2022-02-03 15:17:04 -08:00
'E' = > {
2022-02-07 11:15:33 -08:00
let error_code = match stream . read_u8 ( ) . await {
Ok ( error_code ) = > error_code ,
2023-04-10 14:51:01 -07:00
Err ( _ ) = > {
return Err ( Error ::ServerStartupError (
" error code message " . into ( ) ,
server_identifier ,
) )
}
2022-02-07 11:15:33 -08:00
} ;
2022-02-24 08:44:41 -08:00
trace! ( " Error: {} " , error_code ) ;
2022-02-22 19:26:08 -08:00
2022-02-07 11:15:33 -08:00
match error_code {
2022-02-15 22:45:45 -08:00
// No error message is present in the message.
MESSAGE_TERMINATOR = > ( ) ,
// An error message will be present.
2022-02-07 11:15:33 -08:00
_ = > {
2023-08-09 13:14:05 -03:00
let mut error = vec! [ 0 u8 ; len as usize ] ;
2022-02-15 22:45:45 -08:00
2022-02-07 11:15:33 -08:00
match stream . read_exact ( & mut error ) . await {
Ok ( _ ) = > ( ) ,
2023-04-10 14:51:01 -07:00
Err ( _ ) = > {
return Err ( Error ::ServerStartupError (
" error message " . into ( ) ,
server_identifier ,
) )
}
2022-02-07 11:15:33 -08:00
} ;
2023-10-25 18:11:57 -04:00
let fields = match PgErrorMsg ::parse ( & error ) {
2023-08-09 13:14:05 -03:00
Ok ( f ) = > f ,
Err ( err ) = > {
return Err ( err ) ;
}
} ;
trace! ( " error fields: {} " , & fields ) ;
error! ( " server error: {}: {} " , fields . severity , fields . message ) ;
2022-02-07 11:15:33 -08:00
}
} ;
2022-02-15 22:45:45 -08:00
2022-02-03 15:17:04 -08:00
return Err ( Error ::ServerError ) ;
}
2022-02-15 22:45:45 -08:00
// ParameterStatus
2022-02-03 15:17:04 -08:00
'S' = > {
2023-08-10 11:18:46 -04:00
let mut bytes = BytesMut ::with_capacity ( len as usize - 4 ) ;
bytes . resize ( len as usize - mem ::size_of ::< i32 > ( ) , b '0' ) ;
2022-02-15 22:45:45 -08:00
2023-08-10 11:18:46 -04:00
match stream . read_exact ( & mut bytes [ .. ] ) . await {
2022-02-03 15:17:04 -08:00
Ok ( _ ) = > ( ) ,
2023-04-10 14:51:01 -07:00
Err ( _ ) = > {
return Err ( Error ::ServerStartupError (
" parameter status message " . into ( ) ,
server_identifier ,
) )
}
2022-02-03 15:17:04 -08:00
} ;
2022-02-03 17:48:37 -08:00
2023-08-10 11:18:46 -04:00
let key = bytes . read_string ( ) . unwrap ( ) ;
let value = bytes . read_string ( ) . unwrap ( ) ;
2022-02-15 22:45:45 -08:00
// Save the parameter so we can pass it to the client later.
// These can be server_encoding, client_encoding, server timezone, Postgres version,
// and many more interesting things we should know about the Postgres server we are talking to.
2023-08-10 11:18:46 -04:00
server_parameters . set_param ( key , value , true ) ;
2022-02-03 15:17:04 -08:00
}
2022-02-15 22:45:45 -08:00
// BackendKeyData
2022-02-03 15:17:04 -08:00
'K' = > {
2022-02-15 22:45:45 -08:00
// The frontend must save these values if it wishes to be able to issue CancelRequest messages later.
2022-03-10 01:33:29 -08:00
// See: <https://www.postgresql.org/docs/12/protocol-message-formats.html>.
2022-02-20 22:47:08 -08:00
process_id = match stream . read_i32 ( ) . await {
2022-02-03 17:48:37 -08:00
Ok ( id ) = > id ,
2023-04-10 14:51:01 -07:00
Err ( _ ) = > {
return Err ( Error ::ServerStartupError (
" process id message " . into ( ) ,
server_identifier ,
) )
}
2022-02-03 17:48:37 -08:00
} ;
secret_key = match stream . read_i32 ( ) . await {
Ok ( id ) = > id ,
2023-04-10 14:51:01 -07:00
Err ( _ ) = > {
return Err ( Error ::ServerStartupError (
" secret key message " . into ( ) ,
server_identifier ,
) )
}
2022-02-03 15:17:04 -08:00
} ;
}
2022-02-15 22:45:45 -08:00
// ReadyForQuery
2022-02-03 15:17:04 -08:00
'Z' = > {
let mut idle = vec! [ 0 u8 ; len as usize - 4 ] ;
match stream . read_exact ( & mut idle ) . await {
Ok ( _ ) = > ( ) ,
2023-04-10 14:51:01 -07:00
Err ( _ ) = > {
return Err ( Error ::ServerStartupError (
" transaction status message " . into ( ) ,
server_identifier ,
) )
}
2022-02-03 15:17:04 -08:00
} ;
2023-08-10 11:18:46 -04:00
let server = Server {
2022-02-15 08:18:01 -08:00
address : address . clone ( ) ,
2023-04-30 09:41:46 -07:00
stream : BufStream ::new ( stream ) ,
2022-02-03 15:17:04 -08:00
buffer : BytesMut ::with_capacity ( 8196 ) ,
2023-08-10 11:18:46 -04:00
server_parameters ,
2022-11-10 02:04:31 +08:00
process_id ,
secret_key ,
2022-02-03 16:25:05 -08:00
in_transaction : false ,
2023-07-21 01:06:01 -05:00
in_copy_mode : false ,
2022-02-04 08:26:50 -08:00
data_available : false ,
2022-02-03 17:06:19 -08:00
bad : false ,
2023-05-11 20:40:10 -04:00
cleanup_state : CleanupState ::new ( ) ,
2022-11-10 02:04:31 +08:00
client_server_map ,
2023-05-02 10:26:40 +02:00
addr_set ,
2022-02-12 10:16:05 -08:00
connected_at : chrono ::offset ::Utc ::now ( ) . naive_utc ( ) ,
2022-11-10 02:04:31 +08:00
stats ,
2023-08-10 11:18:46 -04:00
application_name : " pgcat " . to_string ( ) ,
2022-08-11 17:42:40 -04:00
last_activity : SystemTime ::now ( ) ,
2023-03-10 06:23:51 -06:00
mirror_manager : match address . mirrors . len ( ) {
0 = > None ,
_ = > Some ( MirroringManager ::from_addresses (
user . clone ( ) ,
database . to_owned ( ) ,
address . mirrors . clone ( ) ,
) ) ,
} ,
2023-05-18 10:46:55 -07:00
cleanup_connections ,
2023-08-16 14:01:21 -04:00
log_client_parameter_status_changes ,
2023-10-25 18:11:57 -04:00
prepared_statement_cache : match prepared_statement_cache_size {
0 = > None ,
_ = > Some ( LruCache ::new (
NonZeroUsize ::new ( prepared_statement_cache_size ) . unwrap ( ) ,
) ) ,
} ,
2023-11-28 21:13:30 -08:00
registering_prepared_statement : VecDeque ::new ( ) ,
2022-06-05 09:48:06 -07:00
} ;
return Ok ( server ) ;
2022-02-03 15:17:04 -08:00
}
2022-02-15 22:45:45 -08:00
// We have an unexpected message from the server during this exchange.
// Means we implemented the protocol wrong or we're not talking to a Postgres server.
2022-02-03 15:17:04 -08:00
_ = > {
2022-02-20 22:47:08 -08:00
error! ( " Unknown code: {} " , code ) ;
2022-11-17 09:24:39 -08:00
return Err ( Error ::ProtocolSyncError ( format! (
" Unknown server code: {} " ,
code
) ) ) ;
2022-02-03 15:17:04 -08:00
}
} ;
}
}
2022-02-15 22:45:45 -08:00
/// Issue a query cancellation request to the server.
2022-02-04 16:01:35 -08:00
/// Uses a separate connection that's not part of the connection pool.
pub async fn cancel (
host : & str ,
2022-08-25 06:40:56 -07:00
port : u16 ,
2022-02-04 16:01:35 -08:00
process_id : i32 ,
secret_key : i32 ,
) -> Result < ( ) , Error > {
let mut stream = match TcpStream ::connect ( & format! ( " {} : {} " , host , port ) ) . await {
Ok ( stream ) = > stream ,
Err ( err ) = > {
2022-02-20 22:47:08 -08:00
error! ( " Could not connect to server: {} " , err ) ;
2023-04-10 14:51:01 -07:00
return Err ( Error ::SocketError ( " Error reading cancel message " . into ( ) ) ) ;
2022-02-04 16:01:35 -08:00
}
} ;
2023-02-08 11:35:38 -06:00
configure_socket ( & stream ) ;
2022-02-04 16:01:35 -08:00
2022-02-22 19:26:08 -08:00
debug! ( " Sending CancelRequest " ) ;
2022-02-04 16:01:35 -08:00
let mut bytes = BytesMut ::with_capacity ( 16 ) ;
bytes . put_i32 ( 16 ) ;
2022-02-15 22:45:45 -08:00
bytes . put_i32 ( CANCEL_REQUEST_CODE ) ;
2022-02-04 16:01:35 -08:00
bytes . put_i32 ( process_id ) ;
bytes . put_i32 ( secret_key ) ;
2023-04-30 09:41:46 -07:00
write_all_flush ( & mut stream , & bytes ) . await
2022-02-04 16:01:35 -08:00
}
2022-02-15 22:45:45 -08:00
/// Send messages to the server from the client.
2023-01-17 20:39:55 -05:00
pub async fn send ( & mut self , messages : & BytesMut ) -> Result < ( ) , Error > {
2023-03-10 06:23:51 -06:00
self . mirror_send ( messages ) ;
2023-03-28 17:19:37 +02:00
self . stats ( ) . data_sent ( messages . len ( ) ) ;
2022-02-14 10:00:55 -08:00
2023-10-10 09:18:21 -07:00
match write_all_flush ( & mut self . stream , messages ) . await {
2022-08-11 17:42:40 -04:00
Ok ( _ ) = > {
// Successfully sent to server
self . last_activity = SystemTime ::now ( ) ;
Ok ( ( ) )
}
2022-02-03 17:06:19 -08:00
Err ( err ) = > {
2023-05-05 08:27:19 -07:00
error! (
" Terminating server {:?} because of: {:?} " ,
self . address , err
) ;
2022-02-03 17:06:19 -08:00
self . bad = true ;
Err ( err )
}
}
2022-02-03 15:17:04 -08:00
}
2022-02-15 22:45:45 -08:00
/// Receive data from the server in response to a client request.
2022-02-04 09:28:52 -08:00
/// This method must be called multiple times while `self.is_data_available()` is true
/// in order to receive all data the server has to offer.
2023-08-10 11:18:46 -04:00
pub async fn recv (
& mut self ,
mut client_server_parameters : Option < & mut ServerParameters > ,
) -> Result < BytesMut , Error > {
2022-02-03 15:17:04 -08:00
loop {
2023-04-30 09:41:46 -07:00
let mut message = match read_message ( & mut self . stream ) . await {
2022-02-03 17:06:19 -08:00
Ok ( message ) = > message ,
Err ( err ) = > {
2023-05-05 08:27:19 -07:00
error! (
" Terminating server {:?} because of: {:?} " ,
self . address , err
) ;
2022-02-03 17:06:19 -08:00
self . bad = true ;
return Err ( err ) ;
}
} ;
2022-02-03 15:17:04 -08:00
2022-02-15 22:45:45 -08:00
// Buffer the message we'll forward to the client later.
2022-02-03 15:17:04 -08:00
self . buffer . put ( & message [ .. ] ) ;
let code = message . get_u8 ( ) as char ;
2022-02-03 16:25:05 -08:00
let _len = message . get_i32 ( ) ;
2022-02-03 15:33:26 -08:00
2022-02-24 08:44:41 -08:00
trace! ( " Message: {} " , code ) ;
2022-02-22 19:26:08 -08:00
2022-02-03 15:17:04 -08:00
match code {
2022-02-15 22:45:45 -08:00
// ReadyForQuery
2022-02-03 15:17:04 -08:00
'Z' = > {
2022-02-03 16:25:05 -08:00
let transaction_state = message . get_u8 ( ) as char ;
2022-02-03 17:06:19 -08:00
2022-02-03 16:25:05 -08:00
match transaction_state {
2022-02-15 22:45:45 -08:00
// In transaction.
2022-02-03 16:25:05 -08:00
'T' = > {
self . in_transaction = true ;
2022-02-03 17:06:19 -08:00
}
2022-02-03 16:25:05 -08:00
2022-02-15 22:45:45 -08:00
// Idle, transaction over.
2022-02-03 16:25:05 -08:00
'I' = > {
self . in_transaction = false ;
2022-02-03 17:06:19 -08:00
}
2022-08-11 17:42:40 -04:00
// Some error occurred, the transaction was rolled back.
2022-02-03 17:06:19 -08:00
'E' = > {
2022-02-03 18:13:36 -08:00
self . in_transaction = true ;
2022-02-03 17:06:19 -08:00
}
2022-02-03 16:25:05 -08:00
2022-02-15 22:45:45 -08:00
// Something totally unexpected, this is not a Postgres server we know.
2022-02-03 16:25:05 -08:00
_ = > {
2022-02-03 17:06:19 -08:00
self . bad = true ;
2022-11-17 09:24:39 -08:00
return Err ( Error ::ProtocolSyncError ( format! (
" Unknown transaction state: {} " ,
transaction_state
) ) ) ;
2022-02-03 17:06:19 -08:00
}
2022-02-03 16:25:05 -08:00
} ;
2022-02-15 22:45:45 -08:00
// There is no more data available from the server.
2022-02-05 14:38:41 -08:00
self . data_available = false ;
2022-02-03 15:17:04 -08:00
break ;
2022-02-03 17:06:19 -08:00
}
2022-02-03 15:17:04 -08:00
2023-07-21 01:06:01 -05:00
// ErrorResponse
'E' = > {
if self . in_copy_mode {
self . in_copy_mode = false ;
}
2023-10-25 18:11:57 -04:00
2023-11-28 21:13:30 -08:00
// Remove the prepared statement from the cache, it has a syntax error or something else bad happened.
if let Some ( prepared_stmt_name ) =
self . registering_prepared_statement . pop_front ( )
{
if let Some ( ref mut cache ) = self . prepared_statement_cache {
if let Some ( _removed ) = cache . pop ( & prepared_stmt_name ) {
debug! (
" Removed {} from prepared statement cache " ,
prepared_stmt_name
) ;
} else {
// Shouldn't happen.
debug! ( " Prepared statement {} was not cached " , prepared_stmt_name ) ;
}
}
}
2023-10-25 18:11:57 -04:00
if self . prepared_statement_cache . is_some ( ) {
let error_message = PgErrorMsg ::parse ( & message ) ? ;
if error_message . message = = " cached plan must not change result type " {
warn! ( " Server {:?} changed schema, dropping connection to clean up prepared statements " , self . address ) ;
// This will still result in an error to the client, but this server connection will drop all cached prepared statements
// so that any new queries will be re-prepared
// TODO: Other ideas to solve errors when there are DDL changes after a statement has been prepared
// - Recreate entire connection pool to force recreation of all server connections
// - Clear the ConnectionPool's statement cache so that new statement names are generated
// - Implement a retry (re-prepare) so the client doesn't see an error
self . cleanup_state . needs_cleanup_prepare = true ;
}
}
2023-07-21 01:06:01 -05:00
}
2022-09-01 22:06:55 -05:00
// CommandComplete
'C' = > {
2023-07-21 01:06:01 -05:00
if self . in_copy_mode {
self . in_copy_mode = false ;
}
2023-08-10 11:18:46 -04:00
match message . read_string ( ) {
Ok ( command ) = > {
2022-09-01 22:06:55 -05:00
// Non-exhaustive list of commands that are likely to change session variables/resources
// which can leak between clients. This is a best effort to block bad clients
// from poisoning a transaction-mode pool by setting inappropriate session variables
2023-08-10 11:18:46 -04:00
match command . as_str ( ) {
" SET " = > {
2022-10-13 22:33:12 -04:00
// We don't detect set statements in transactions
// No great way to differentiate between set and set local
// As a result, we will miss cases when set statements are used in transactions
2023-08-16 13:08:48 -04:00
// This will reduce amount of reset statements sent
2022-10-13 22:33:12 -04:00
if ! self . in_transaction {
debug! ( " Server connection marked for clean up " ) ;
2023-05-11 20:40:10 -04:00
self . cleanup_state . needs_cleanup_set = true ;
2022-10-13 22:33:12 -04:00
}
}
2023-08-10 11:18:46 -04:00
" PREPARE " = > {
2022-09-01 22:06:55 -05:00
debug! ( " Server connection marked for clean up " ) ;
2023-05-11 20:40:10 -04:00
self . cleanup_state . needs_cleanup_prepare = true ;
2022-09-01 22:06:55 -05:00
}
_ = > ( ) ,
}
}
Err ( err ) = > {
warn! ( " Encountered an error while parsing CommandTag {} " , err ) ;
}
}
}
2023-08-10 11:18:46 -04:00
'S' = > {
let key = message . read_string ( ) . unwrap ( ) ;
let value = message . read_string ( ) . unwrap ( ) ;
if let Some ( client_server_parameters ) = client_server_parameters . as_mut ( ) {
client_server_parameters . set_param ( key . clone ( ) , value . clone ( ) , false ) ;
2023-08-16 14:01:21 -04:00
if self . log_client_parameter_status_changes {
info! ( " Client parameter status change: {} = {} " , key , value )
}
2023-08-10 11:18:46 -04:00
}
self . server_parameters . set_param ( key , value , false ) ;
}
2022-02-15 22:45:45 -08:00
// DataRow
2022-02-05 14:38:41 -08:00
'D' = > {
2022-02-15 22:45:45 -08:00
// More data is available after this message, this is not the end of the reply.
2022-02-05 14:38:41 -08:00
self . data_available = true ;
2022-03-10 01:33:29 -08:00
// Don't flush yet, the more we buffer, the faster this goes...up to a limit.
2022-02-05 14:38:41 -08:00
if self . buffer . len ( ) > = 8196 {
break ;
}
}
2022-02-15 22:45:45 -08:00
// CopyInResponse: copy is starting from client to server.
2023-07-21 01:06:01 -05:00
'G' = > {
self . in_copy_mode = true ;
break ;
}
2022-02-04 08:06:45 -08:00
2022-02-15 22:45:45 -08:00
// CopyOutResponse: copy is starting from the server to the client.
2022-02-04 08:26:50 -08:00
'H' = > {
2023-07-21 01:06:01 -05:00
self . in_copy_mode = true ;
2022-02-04 08:26:50 -08:00
self . data_available = true ;
break ;
}
2022-12-21 09:57:53 -05:00
// CopyData
'd' = > {
// Don't flush yet, buffer until we reach limit
if self . buffer . len ( ) > = 8196 {
break ;
}
}
2022-02-04 08:26:50 -08:00
// CopyDone
2022-02-15 22:45:45 -08:00
// Buffer until ReadyForQuery shows up, so don't exit the loop yet.
'c' = > ( ) ,
2022-02-04 08:26:50 -08:00
2023-11-28 21:13:30 -08:00
// Parse complete successfully
'1' = > {
self . registering_prepared_statement . pop_front ( ) ;
}
2022-02-15 22:45:45 -08:00
// Anything else, e.g. errors, notices, etc.
// Keep buffering until ReadyForQuery shows up.
_ = > ( ) ,
2022-02-03 15:17:04 -08:00
} ;
}
let bytes = self . buffer . clone ( ) ;
2022-02-15 22:45:45 -08:00
// Keep track of how much data we got from the server for stats.
2023-03-28 17:19:37 +02:00
self . stats ( ) . data_received ( bytes . len ( ) ) ;
2022-02-15 22:45:45 -08:00
// Clear the buffer for next query.
2022-02-03 15:17:04 -08:00
self . buffer . clear ( ) ;
2022-08-11 17:42:40 -04:00
// Successfully received data from server
self . last_activity = SystemTime ::now ( ) ;
2022-02-15 22:45:45 -08:00
// Pass the data back to the client.
2022-02-03 15:17:04 -08:00
Ok ( bytes )
}
2022-02-03 16:25:05 -08:00
2023-10-25 18:11:57 -04:00
// Determines if the server already has a prepared statement with the given name
// Increments the prepared statement cache hit counter
pub fn has_prepared_statement ( & mut self , name : & str ) -> bool {
let cache = match & mut self . prepared_statement_cache {
Some ( cache ) = > cache ,
None = > return false ,
} ;
2023-06-16 12:57:44 -07:00
2023-10-25 18:11:57 -04:00
let has_it = cache . get ( name ) . is_some ( ) ;
if has_it {
2023-06-16 12:57:44 -07:00
self . stats . prepared_cache_hit ( ) ;
2023-10-25 18:11:57 -04:00
} else {
self . stats . prepared_cache_miss ( ) ;
2023-06-16 12:57:44 -07:00
}
2023-10-25 18:11:57 -04:00
has_it
2023-06-16 12:57:44 -07:00
}
2023-11-28 21:13:30 -08:00
fn add_prepared_statement_to_cache ( & mut self , name : & str ) -> Option < String > {
2023-10-25 18:11:57 -04:00
let cache = match & mut self . prepared_statement_cache {
Some ( cache ) = > cache ,
None = > return None ,
} ;
2023-06-16 12:57:44 -07:00
2023-06-18 23:02:34 -07:00
self . stats . prepared_cache_add ( ) ;
2023-06-16 12:57:44 -07:00
2023-10-25 18:11:57 -04:00
// If we evict something, we need to close it on the server
if let Some ( ( evicted_name , _ ) ) = cache . push ( name . to_string ( ) , ( ) ) {
if evicted_name ! = name {
debug! (
" Evicted prepared statement {} from cache, replaced with {} " ,
evicted_name , name
) ;
return Some ( evicted_name ) ;
2023-06-18 23:02:34 -07:00
}
2023-10-25 18:11:57 -04:00
} ;
2023-06-18 23:02:34 -07:00
2023-10-25 18:11:57 -04:00
None
2023-06-18 23:02:34 -07:00
}
2023-11-28 21:13:30 -08:00
fn remove_prepared_statement_from_cache ( & mut self , name : & str ) {
2023-10-25 18:11:57 -04:00
let cache = match & mut self . prepared_statement_cache {
Some ( cache ) = > cache ,
None = > return ,
} ;
2023-06-18 23:02:34 -07:00
2023-10-25 18:11:57 -04:00
self . stats . prepared_cache_remove ( ) ;
cache . pop ( name ) ;
2023-06-18 23:02:34 -07:00
}
2023-10-25 18:11:57 -04:00
pub async fn register_prepared_statement (
& mut self ,
parse : & Parse ,
should_send_parse_to_server : bool ,
) -> Result < ( ) , Error > {
if ! self . has_prepared_statement ( & parse . name ) {
2023-11-28 21:13:30 -08:00
self . registering_prepared_statement
. push_back ( parse . name . clone ( ) ) ;
2023-10-25 18:11:57 -04:00
let mut bytes = BytesMut ::new ( ) ;
2023-06-18 23:02:34 -07:00
2023-10-25 18:11:57 -04:00
if should_send_parse_to_server {
let parse_bytes : BytesMut = parse . try_into ( ) ? ;
bytes . extend_from_slice ( & parse_bytes ) ;
}
2023-06-18 23:02:34 -07:00
2023-10-25 18:11:57 -04:00
// If we evict something, we need to close it on the server
// We do this by adding it to the messages we're sending to the server before the sync
if let Some ( evicted_name ) = self . add_prepared_statement_to_cache ( & parse . name ) {
self . remove_prepared_statement_from_cache ( & evicted_name ) ;
let close_bytes : BytesMut = Close ::new ( & evicted_name ) . try_into ( ) ? ;
bytes . extend_from_slice ( & close_bytes ) ;
} ;
2023-06-18 23:02:34 -07:00
2023-10-25 18:11:57 -04:00
// If we have a parse or close we need to send to the server, send them and sync
if ! bytes . is_empty ( ) {
bytes . extend_from_slice ( & sync ( ) ) ;
2023-06-18 23:02:34 -07:00
2023-10-25 18:11:57 -04:00
self . send ( & bytes ) . await ? ;
2023-06-18 23:02:34 -07:00
2023-10-25 18:11:57 -04:00
loop {
self . recv ( None ) . await ? ;
if ! self . is_data_available ( ) {
break ;
}
2023-06-18 23:02:34 -07:00
}
2023-10-25 18:11:57 -04:00
}
} ;
2023-06-18 23:02:34 -07:00
2023-11-28 21:13:30 -08:00
// If it's not there, something went bad, I'm guessing bad syntax or permissions error
// on the server.
if ! self . has_prepared_statement ( & parse . name ) {
Err ( Error ::PreparedStatementError )
} else {
Ok ( ( ) )
}
2023-06-18 23:02:34 -07:00
}
2022-02-04 09:28:52 -08:00
/// If the server is still inside a transaction.
/// If the client disconnects while the server is in a transaction, we will clean it up.
2022-02-03 16:25:05 -08:00
pub fn in_transaction ( & self ) -> bool {
2023-01-28 15:36:35 -08:00
debug! ( " Server in transaction: {} " , self . in_transaction ) ;
2022-02-03 16:25:05 -08:00
self . in_transaction
}
2022-02-03 17:06:19 -08:00
2023-11-28 21:13:30 -08:00
/// Currently copying data from client to server or vice-versa.
2023-07-21 01:06:01 -05:00
pub fn in_copy_mode ( & self ) -> bool {
self . in_copy_mode
}
2022-02-04 09:28:52 -08:00
/// We don't buffer all of server responses, e.g. COPY OUT produces too much data.
/// The client is responsible to call `self.recv()` while this method returns true.
2022-02-04 08:26:50 -08:00
pub fn is_data_available ( & self ) -> bool {
self . data_available
}
2022-02-04 09:28:52 -08:00
/// Server & client are out of sync, we must discard this connection.
/// This happens with clients that misbehave.
2022-02-03 17:06:19 -08:00
pub fn is_bad ( & self ) -> bool {
2023-05-02 10:26:40 +02:00
if self . bad {
return self . bad ;
} ;
let cached_resolver = CACHED_RESOLVER . load ( ) ;
if cached_resolver . enabled ( ) {
if let Some ( addr_set ) = & self . addr_set {
if cached_resolver . has_changed ( self . address . host . as_str ( ) , addr_set ) {
warn! (
" DNS changed for {}, it was {:?}. Dropping server connection. " ,
self . address . host . as_str ( ) ,
addr_set
) ;
return true ;
}
}
}
false
2022-02-03 17:06:19 -08:00
}
2022-02-04 09:28:52 -08:00
/// Get server startup information to forward it to the client.
2023-08-10 11:18:46 -04:00
pub fn server_parameters ( & self ) -> ServerParameters {
self . server_parameters . clone ( )
}
pub async fn sync_parameters ( & mut self , parameters : & ServerParameters ) -> Result < ( ) , Error > {
let parameter_diff = self . server_parameters . compare_params ( parameters ) ;
if parameter_diff . is_empty ( ) {
return Ok ( ( ) ) ;
}
let mut query = String ::from ( " " ) ;
for ( key , value ) in parameter_diff {
query . push_str ( & format! ( " SET {} TO ' {} '; " , key , value ) ) ;
}
let res = self . query ( & query ) . await ;
self . cleanup_state . reset ( ) ;
res
2022-02-03 17:48:37 -08:00
}
2022-02-04 09:28:52 -08:00
/// Indicate that this server connection cannot be re-used and must be discarded.
2023-12-04 19:09:41 -05:00
pub fn mark_bad ( & mut self , reason : & str ) {
error! ( " Server {:?} marked bad, reason: {} " , self . address , reason ) ;
2022-02-03 17:06:19 -08:00
self . bad = true ;
}
2022-02-04 16:08:18 -08:00
/// Claim this server as mine for the purposes of query cancellation.
2022-02-04 16:01:35 -08:00
pub fn claim ( & mut self , process_id : i32 , secret_key : i32 ) {
2022-02-24 08:44:41 -08:00
let mut guard = self . client_server_map . lock ( ) ;
2022-02-05 10:02:13 -08:00
guard . insert (
( process_id , secret_key ) ,
(
2022-02-20 22:47:08 -08:00
self . process_id ,
2022-02-05 10:02:13 -08:00
self . secret_key ,
2022-02-15 08:18:01 -08:00
self . address . host . clone ( ) ,
2022-08-25 06:40:56 -07:00
self . address . port ,
2022-02-05 10:02:13 -08:00
) ,
) ;
2022-02-04 16:01:35 -08:00
}
2022-02-04 09:28:52 -08:00
/// Execute an arbitrary query against the server.
2022-02-15 22:45:45 -08:00
/// It will use the simple query protocol.
2022-02-04 09:28:52 -08:00
/// Result will not be returned, so this is useful for things like `SET` or `ROLLBACK`.
2022-02-03 17:32:04 -08:00
pub async fn query ( & mut self , query : & str ) -> Result < ( ) , Error > {
2023-05-12 09:50:52 -07:00
debug! ( " Running `{}` on server {:?} " , query , self . address ) ;
2022-02-24 08:44:41 -08:00
let query = simple_query ( query ) ;
2022-02-03 17:06:19 -08:00
2023-01-17 20:39:55 -05:00
self . send ( & query ) . await ? ;
2022-02-15 22:45:45 -08:00
2022-02-05 14:38:41 -08:00
loop {
2023-08-10 11:18:46 -04:00
let _ = self . recv ( None ) . await ? ;
2022-02-15 22:45:45 -08:00
2022-02-05 14:38:41 -08:00
if ! self . data_available {
break ;
}
}
2022-02-03 17:06:19 -08:00
Ok ( ( ) )
}
2022-02-03 17:32:04 -08:00
2022-09-01 22:06:55 -05:00
/// Perform any necessary cleanup before putting the server
/// connection back in the pool
pub async fn checkin_cleanup ( & mut self ) -> Result < ( ) , Error > {
// Client disconnected with an open transaction on the server connection.
// Pgbouncer behavior is to close the server connection but that can cause
// server connection thrashing if clients repeatedly do this.
// Instead, we ROLLBACK that transaction before putting the connection back in the pool
if self . in_transaction ( ) {
2023-07-27 17:51:23 +02:00
warn! ( target : " pgcat::server::cleanup " , " Server returned while still in transaction, rolling back transaction " ) ;
2022-09-01 22:06:55 -05:00
self . query ( " ROLLBACK " ) . await ? ;
}
2022-10-13 22:33:12 -04:00
// Client disconnected but it performed session-altering operations such as
2022-09-01 22:06:55 -05:00
// SET statement_timeout to 1 or create a prepared statement. We clear that
// to avoid leaking state between clients. For performance reasons we only
2023-08-16 13:08:48 -04:00
// send `RESET ALL` if we think the session is altered instead of just sending
2022-09-01 22:06:55 -05:00
// it before each checkin.
2023-05-18 10:46:55 -07:00
if self . cleanup_state . needs_cleanup ( ) & & self . cleanup_connections {
2023-07-27 17:51:23 +02:00
info! ( target : " pgcat::server::cleanup " , " Server returned with session state altered, discarding state ({}) for application {} " , self . cleanup_state , self . application_name ) ;
2023-08-16 13:08:48 -04:00
let mut reset_string = String ::from ( " RESET ROLE; " ) ;
if self . cleanup_state . needs_cleanup_set {
reset_string . push_str ( " RESET ALL; " ) ;
} ;
if self . cleanup_state . needs_cleanup_prepare {
reset_string . push_str ( " DEALLOCATE ALL; " ) ;
2023-10-25 18:11:57 -04:00
// Since we deallocated all prepared statements, we need to clear the cache
if let Some ( cache ) = & mut self . prepared_statement_cache {
cache . clear ( ) ;
}
2023-08-16 13:08:48 -04:00
} ;
self . query ( & reset_string ) . await ? ;
2023-05-11 20:40:10 -04:00
self . cleanup_state . reset ( ) ;
2022-09-01 22:06:55 -05:00
}
2023-07-21 01:06:01 -05:00
if self . in_copy_mode ( ) {
2023-07-27 17:51:23 +02:00
warn! ( target : " pgcat::server::cleanup " , " Server returned while still in copy-mode " ) ;
2023-07-21 01:06:01 -05:00
}
2022-11-10 02:04:31 +08:00
Ok ( ( ) )
2022-09-01 22:06:55 -05:00
}
2023-03-28 17:19:37 +02:00
/// get Server stats
pub fn stats ( & self ) -> Arc < ServerStats > {
self . stats . clone ( )
}
2022-02-15 22:45:45 -08:00
/// Get the servers address.
#[ allow(dead_code) ]
2022-02-05 13:15:53 -08:00
pub fn address ( & self ) -> Address {
2022-02-15 08:18:01 -08:00
self . address . clone ( )
2022-02-05 13:15:53 -08:00
}
2022-02-20 22:47:08 -08:00
2022-08-11 17:42:40 -04:00
// Get server's latest response timestamp
pub fn last_activity ( & self ) -> SystemTime {
self . last_activity
}
2022-09-07 22:37:17 -05:00
2023-08-16 13:08:48 -04:00
// Marks a connection as needing cleanup at checkin
2022-09-07 22:37:17 -05:00
pub fn mark_dirty ( & mut self ) {
2023-05-11 20:40:10 -04:00
self . cleanup_state . set_true ( ) ;
2022-09-07 22:37:17 -05:00
}
2023-03-10 06:23:51 -06:00
pub fn mirror_send ( & mut self , bytes : & BytesMut ) {
2023-10-10 09:18:21 -07:00
if let Some ( manager ) = self . mirror_manager . as_mut ( ) {
manager . send ( bytes )
2023-03-10 06:23:51 -06:00
}
}
pub fn mirror_disconnect ( & mut self ) {
2023-10-10 09:18:21 -07:00
if let Some ( manager ) = self . mirror_manager . as_mut ( ) {
manager . disconnect ( )
2023-03-10 06:23:51 -06:00
}
}
Auth passthrough (auth_query) (#266)
* Add a new exec_simple_query method
This adds a new `exec_simple_query` method so we can make 'out of band'
queries to servers that don't interfere with pools at all.
In order to reuse startup code for making these simple queries,
we need to set the stats (`Reporter`) optional, so using these
simple queries wont interfere with stats.
* Add auth passthough (auth_query)
Adds a feature that allows setting auth passthrough for md5 auth.
It adds 3 new (general and pool) config parameters:
- `auth_query`: An string containing a query that will be executed on boot
to obtain the hash of a given user. This query have to use a placeholder `$1`,
so pgcat can replace it with the user its trying to fetch the hash from.
- `auth_query_user`: The user to use for connecting to the server and executing the
auth_query.
- `auth_query_password`: The password to use for connecting to the server and executing the
auth_query.
The configuration can be done either on the general config (so pools share them) or in a per-pool basis.
The behavior is, at boot time, when validating server connections, a hash is fetched per server
and stored in the pool. When new server connections are created, and no cleartext password is specified,
the obtained hash is used for creating them, if the hash could not be obtained for whatever reason, it retries
it.
When client authentication is tried, it uses cleartext passwords if specified, it not, it checks whether
we have query_auth set up, if so, it tries to use the obtained hash for making client auth. If there is no
hash (we could not obtain one when validating the connection), a new fetch is tried.
Once we have a hash, we authenticate using it against whathever the client has sent us, if there is a failure
we refetch the hash and retry auth (so password changes can be done).
The idea with this 'retrial' mechanism is to make it fault tolerant, so if for whatever reason hash could not be
obtained during connection validation, or the password has change, we can still connect later.
* Add documentation for Auth passthrough
2023-03-30 22:29:23 +02:00
// This is so we can execute out of band queries to the server.
// The connection will be opened, the query executed and closed.
pub async fn exec_simple_query (
address : & Address ,
user : & User ,
query : & str ,
) -> Result < Vec < String > , Error > {
let client_server_map : ClientServerMap = Arc ::new ( Mutex ::new ( HashMap ::new ( ) ) ) ;
debug! ( " Connecting to server to obtain auth hashes. " ) ;
let mut server = Server ::startup (
address ,
user ,
& address . database ,
client_server_map ,
Arc ::new ( ServerStats ::default ( ) ) ,
Arc ::new ( RwLock ::new ( None ) ) ,
2023-05-18 10:46:55 -07:00
true ,
2023-08-16 14:01:21 -04:00
false ,
2023-10-25 18:11:57 -04:00
0 ,
Auth passthrough (auth_query) (#266)
* Add a new exec_simple_query method
This adds a new `exec_simple_query` method so we can make 'out of band'
queries to servers that don't interfere with pools at all.
In order to reuse startup code for making these simple queries,
we need to set the stats (`Reporter`) optional, so using these
simple queries wont interfere with stats.
* Add auth passthough (auth_query)
Adds a feature that allows setting auth passthrough for md5 auth.
It adds 3 new (general and pool) config parameters:
- `auth_query`: An string containing a query that will be executed on boot
to obtain the hash of a given user. This query have to use a placeholder `$1`,
so pgcat can replace it with the user its trying to fetch the hash from.
- `auth_query_user`: The user to use for connecting to the server and executing the
auth_query.
- `auth_query_password`: The password to use for connecting to the server and executing the
auth_query.
The configuration can be done either on the general config (so pools share them) or in a per-pool basis.
The behavior is, at boot time, when validating server connections, a hash is fetched per server
and stored in the pool. When new server connections are created, and no cleartext password is specified,
the obtained hash is used for creating them, if the hash could not be obtained for whatever reason, it retries
it.
When client authentication is tried, it uses cleartext passwords if specified, it not, it checks whether
we have query_auth set up, if so, it tries to use the obtained hash for making client auth. If there is no
hash (we could not obtain one when validating the connection), a new fetch is tried.
Once we have a hash, we authenticate using it against whathever the client has sent us, if there is a failure
we refetch the hash and retry auth (so password changes can be done).
The idea with this 'retrial' mechanism is to make it fault tolerant, so if for whatever reason hash could not be
obtained during connection validation, or the password has change, we can still connect later.
* Add documentation for Auth passthrough
2023-03-30 22:29:23 +02:00
)
. await ? ;
debug! ( " Connected!, sending query. " ) ;
server . send ( & simple_query ( query ) ) . await ? ;
2023-08-10 11:18:46 -04:00
let mut message = server . recv ( None ) . await ? ;
Auth passthrough (auth_query) (#266)
* Add a new exec_simple_query method
This adds a new `exec_simple_query` method so we can make 'out of band'
queries to servers that don't interfere with pools at all.
In order to reuse startup code for making these simple queries,
we need to set the stats (`Reporter`) optional, so using these
simple queries wont interfere with stats.
* Add auth passthough (auth_query)
Adds a feature that allows setting auth passthrough for md5 auth.
It adds 3 new (general and pool) config parameters:
- `auth_query`: An string containing a query that will be executed on boot
to obtain the hash of a given user. This query have to use a placeholder `$1`,
so pgcat can replace it with the user its trying to fetch the hash from.
- `auth_query_user`: The user to use for connecting to the server and executing the
auth_query.
- `auth_query_password`: The password to use for connecting to the server and executing the
auth_query.
The configuration can be done either on the general config (so pools share them) or in a per-pool basis.
The behavior is, at boot time, when validating server connections, a hash is fetched per server
and stored in the pool. When new server connections are created, and no cleartext password is specified,
the obtained hash is used for creating them, if the hash could not be obtained for whatever reason, it retries
it.
When client authentication is tried, it uses cleartext passwords if specified, it not, it checks whether
we have query_auth set up, if so, it tries to use the obtained hash for making client auth. If there is no
hash (we could not obtain one when validating the connection), a new fetch is tried.
Once we have a hash, we authenticate using it against whathever the client has sent us, if there is a failure
we refetch the hash and retry auth (so password changes can be done).
The idea with this 'retrial' mechanism is to make it fault tolerant, so if for whatever reason hash could not be
obtained during connection validation, or the password has change, we can still connect later.
* Add documentation for Auth passthrough
2023-03-30 22:29:23 +02:00
2023-10-10 09:18:21 -07:00
parse_query_message ( & mut message ) . await
Auth passthrough (auth_query) (#266)
* Add a new exec_simple_query method
This adds a new `exec_simple_query` method so we can make 'out of band'
queries to servers that don't interfere with pools at all.
In order to reuse startup code for making these simple queries,
we need to set the stats (`Reporter`) optional, so using these
simple queries wont interfere with stats.
* Add auth passthough (auth_query)
Adds a feature that allows setting auth passthrough for md5 auth.
It adds 3 new (general and pool) config parameters:
- `auth_query`: An string containing a query that will be executed on boot
to obtain the hash of a given user. This query have to use a placeholder `$1`,
so pgcat can replace it with the user its trying to fetch the hash from.
- `auth_query_user`: The user to use for connecting to the server and executing the
auth_query.
- `auth_query_password`: The password to use for connecting to the server and executing the
auth_query.
The configuration can be done either on the general config (so pools share them) or in a per-pool basis.
The behavior is, at boot time, when validating server connections, a hash is fetched per server
and stored in the pool. When new server connections are created, and no cleartext password is specified,
the obtained hash is used for creating them, if the hash could not be obtained for whatever reason, it retries
it.
When client authentication is tried, it uses cleartext passwords if specified, it not, it checks whether
we have query_auth set up, if so, it tries to use the obtained hash for making client auth. If there is no
hash (we could not obtain one when validating the connection), a new fetch is tried.
Once we have a hash, we authenticate using it against whathever the client has sent us, if there is a failure
we refetch the hash and retry auth (so password changes can be done).
The idea with this 'retrial' mechanism is to make it fault tolerant, so if for whatever reason hash could not be
obtained during connection validation, or the password has change, we can still connect later.
* Add documentation for Auth passthrough
2023-03-30 22:29:23 +02:00
}
}
async fn parse_query_message ( message : & mut BytesMut ) -> Result < Vec < String > , Error > {
let mut pair = Vec ::< String > ::new ( ) ;
match message ::backend ::Message ::parse ( message ) {
Ok ( Some ( message ::backend ::Message ::RowDescription ( _description ) ) ) = > { }
Ok ( Some ( message ::backend ::Message ::ErrorResponse ( err ) ) ) = > {
return Err ( Error ::ProtocolSyncError ( format! (
" Protocol error parsing response. Err: {:?} " ,
err . fields ( )
. iterator ( )
. fold ( String ::default ( ) , | acc , element | acc
+ element . unwrap ( ) . value ( ) )
) ) )
}
Ok ( _ ) = > {
return Err ( Error ::ProtocolSyncError (
" Protocol error, expected Row Description. " . to_string ( ) ,
) )
}
Err ( err ) = > {
return Err ( Error ::ProtocolSyncError ( format! (
" Protocol error parsing response. Err: {:?} " ,
err
) ) )
}
}
while ! message . is_empty ( ) {
match message ::backend ::Message ::parse ( message ) {
Ok ( postgres_message ) = > {
match postgres_message {
Some ( message ::backend ::Message ::DataRow ( data ) ) = > {
let buf = data . buffer ( ) ;
trace! ( " Data: {:?} " , buf ) ;
for item in data . ranges ( ) . iterator ( ) {
match item . as_ref ( ) {
Ok ( range ) = > match range {
Some ( range ) = > {
pair . push ( String ::from_utf8_lossy ( & buf [ range . clone ( ) ] ) . to_string ( ) ) ;
}
None = > return Err ( Error ::ProtocolSyncError ( String ::from (
" Data expected while receiving query auth data, found nothing. " ,
) ) ) ,
} ,
Err ( err ) = > {
return Err ( Error ::ProtocolSyncError ( format! (
" Data error, err: {:?} " ,
err
) ) )
}
}
}
}
Some ( message ::backend ::Message ::CommandComplete ( _ ) ) = > { }
Some ( message ::backend ::Message ::ReadyForQuery ( _ ) ) = > { }
_ = > {
return Err ( Error ::ProtocolSyncError (
" Unexpected message while receiving auth query data. " . to_string ( ) ,
) )
}
}
}
Err ( err ) = > {
return Err ( Error ::ProtocolSyncError ( format! (
" Parse error, err: {:?} " ,
err
) ) )
}
} ;
}
Ok ( pair )
2022-02-03 15:17:04 -08:00
}
2022-02-12 10:16:05 -08:00
impl Drop for Server {
2022-02-15 22:45:45 -08:00
/// Try to do a clean shut down. Best effort because
/// the socket is in non-blocking mode, so it may not be ready
/// for a write.
2022-02-12 10:16:05 -08:00
fn drop ( & mut self ) {
2023-03-10 06:23:51 -06:00
self . mirror_disconnect ( ) ;
2023-03-28 17:19:37 +02:00
// Update statistics
self . stats . disconnect ( ) ;
2022-02-20 22:47:08 -08:00
2023-04-30 09:41:46 -07:00
let mut bytes = BytesMut ::with_capacity ( 5 ) ;
2022-02-12 10:16:05 -08:00
bytes . put_u8 ( b 'X' ) ;
bytes . put_i32 ( 4 ) ;
2023-04-30 09:41:46 -07:00
match self . stream . get_mut ( ) . try_write ( & bytes ) {
Ok ( 5 ) = > ( ) ,
_ = > debug! ( " Dirty shutdown " ) ,
2022-02-12 10:16:05 -08:00
} ;
let now = chrono ::offset ::Utc ::now ( ) . naive_utc ( ) ;
let duration = now - self . connected_at ;
2023-05-05 08:27:19 -07:00
let message = if self . bad {
" Server connection terminated "
} else {
" Server connection closed "
} ;
2022-02-20 22:47:08 -08:00
info! (
2023-05-05 08:27:19 -07:00
" {} {:?}, session duration: {} " ,
message ,
2022-09-01 13:16:22 -05:00
self . address ,
2022-02-12 10:16:05 -08:00
crate ::format_duration ( & duration )
) ;
}
}