2023-03-28 17:19:37 +02:00
|
|
|
use std::sync::Arc;
|
|
|
|
|
|
2023-03-10 06:23:51 -06:00
|
|
|
/// A mirrored PostgreSQL client.
|
|
|
|
|
/// Packets arrive to us through a channel from the main client and we send them to the server.
|
|
|
|
|
use bb8::Pool;
|
|
|
|
|
use bytes::{Bytes, BytesMut};
|
Auth passthrough (auth_query) (#266)
* Add a new exec_simple_query method
This adds a new `exec_simple_query` method so we can make 'out of band'
queries to servers that don't interfere with pools at all.
In order to reuse startup code for making these simple queries,
we need to set the stats (`Reporter`) optional, so using these
simple queries wont interfere with stats.
* Add auth passthough (auth_query)
Adds a feature that allows setting auth passthrough for md5 auth.
It adds 3 new (general and pool) config parameters:
- `auth_query`: An string containing a query that will be executed on boot
to obtain the hash of a given user. This query have to use a placeholder `$1`,
so pgcat can replace it with the user its trying to fetch the hash from.
- `auth_query_user`: The user to use for connecting to the server and executing the
auth_query.
- `auth_query_password`: The password to use for connecting to the server and executing the
auth_query.
The configuration can be done either on the general config (so pools share them) or in a per-pool basis.
The behavior is, at boot time, when validating server connections, a hash is fetched per server
and stored in the pool. When new server connections are created, and no cleartext password is specified,
the obtained hash is used for creating them, if the hash could not be obtained for whatever reason, it retries
it.
When client authentication is tried, it uses cleartext passwords if specified, it not, it checks whether
we have query_auth set up, if so, it tries to use the obtained hash for making client auth. If there is no
hash (we could not obtain one when validating the connection), a new fetch is tried.
Once we have a hash, we authenticate using it against whathever the client has sent us, if there is a failure
we refetch the hash and retry auth (so password changes can be done).
The idea with this 'retrial' mechanism is to make it fault tolerant, so if for whatever reason hash could not be
obtained during connection validation, or the password has change, we can still connect later.
* Add documentation for Auth passthrough
2023-03-30 22:29:23 +02:00
|
|
|
use parking_lot::RwLock;
|
2023-03-10 06:23:51 -06:00
|
|
|
|
|
|
|
|
use crate::config::{get_config, Address, Role, User};
|
2023-05-23 08:44:49 -05:00
|
|
|
use crate::pool::{ClientServerMap, ServerPool};
|
2023-03-10 06:23:51 -06:00
|
|
|
use log::{error, info, trace, warn};
|
|
|
|
|
use tokio::sync::mpsc::{channel, Receiver, Sender};
|
|
|
|
|
|
|
|
|
|
pub struct MirroredClient {
|
|
|
|
|
address: Address,
|
|
|
|
|
user: User,
|
|
|
|
|
database: String,
|
|
|
|
|
bytes_rx: Receiver<Bytes>,
|
|
|
|
|
disconnect_rx: Receiver<()>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl MirroredClient {
|
|
|
|
|
async fn create_pool(&self) -> Pool<ServerPool> {
|
|
|
|
|
let config = get_config();
|
|
|
|
|
let default = std::time::Duration::from_millis(10_000).as_millis() as u64;
|
2023-10-25 18:11:57 -04:00
|
|
|
let (connection_timeout, idle_timeout, _cfg, prepared_statement_cache_size) =
|
2023-03-28 17:19:37 +02:00
|
|
|
match config.pools.get(&self.address.pool_name) {
|
|
|
|
|
Some(cfg) => (
|
|
|
|
|
cfg.connect_timeout.unwrap_or(default),
|
|
|
|
|
cfg.idle_timeout.unwrap_or(default),
|
|
|
|
|
cfg.clone(),
|
2023-10-25 18:11:57 -04:00
|
|
|
cfg.prepared_statements_cache_size,
|
2023-03-28 17:19:37 +02:00
|
|
|
),
|
2023-10-25 18:11:57 -04:00
|
|
|
None => (default, default, crate::config::Pool::default(), 0),
|
2023-03-28 17:19:37 +02:00
|
|
|
};
|
|
|
|
|
|
2023-03-10 06:23:51 -06:00
|
|
|
let manager = ServerPool::new(
|
|
|
|
|
self.address.clone(),
|
|
|
|
|
self.user.clone(),
|
|
|
|
|
self.database.as_str(),
|
|
|
|
|
ClientServerMap::default(),
|
Auth passthrough (auth_query) (#266)
* Add a new exec_simple_query method
This adds a new `exec_simple_query` method so we can make 'out of band'
queries to servers that don't interfere with pools at all.
In order to reuse startup code for making these simple queries,
we need to set the stats (`Reporter`) optional, so using these
simple queries wont interfere with stats.
* Add auth passthough (auth_query)
Adds a feature that allows setting auth passthrough for md5 auth.
It adds 3 new (general and pool) config parameters:
- `auth_query`: An string containing a query that will be executed on boot
to obtain the hash of a given user. This query have to use a placeholder `$1`,
so pgcat can replace it with the user its trying to fetch the hash from.
- `auth_query_user`: The user to use for connecting to the server and executing the
auth_query.
- `auth_query_password`: The password to use for connecting to the server and executing the
auth_query.
The configuration can be done either on the general config (so pools share them) or in a per-pool basis.
The behavior is, at boot time, when validating server connections, a hash is fetched per server
and stored in the pool. When new server connections are created, and no cleartext password is specified,
the obtained hash is used for creating them, if the hash could not be obtained for whatever reason, it retries
it.
When client authentication is tried, it uses cleartext passwords if specified, it not, it checks whether
we have query_auth set up, if so, it tries to use the obtained hash for making client auth. If there is no
hash (we could not obtain one when validating the connection), a new fetch is tried.
Once we have a hash, we authenticate using it against whathever the client has sent us, if there is a failure
we refetch the hash and retry auth (so password changes can be done).
The idea with this 'retrial' mechanism is to make it fault tolerant, so if for whatever reason hash could not be
obtained during connection validation, or the password has change, we can still connect later.
* Add documentation for Auth passthrough
2023-03-30 22:29:23 +02:00
|
|
|
Arc::new(RwLock::new(None)),
|
2023-05-12 09:50:52 -07:00
|
|
|
None,
|
2023-05-18 10:46:55 -07:00
|
|
|
true,
|
2023-08-16 14:01:21 -04:00
|
|
|
false,
|
2023-10-25 18:11:57 -04:00
|
|
|
prepared_statement_cache_size,
|
2023-03-10 06:23:51 -06:00
|
|
|
);
|
|
|
|
|
|
|
|
|
|
Pool::builder()
|
|
|
|
|
.max_size(1)
|
|
|
|
|
.connection_timeout(std::time::Duration::from_millis(connection_timeout))
|
|
|
|
|
.idle_timeout(Some(std::time::Duration::from_millis(idle_timeout)))
|
|
|
|
|
.test_on_check_out(false)
|
|
|
|
|
.build(manager)
|
|
|
|
|
.await
|
|
|
|
|
.unwrap()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn start(mut self) {
|
|
|
|
|
tokio::spawn(async move {
|
|
|
|
|
let pool = self.create_pool().await;
|
|
|
|
|
let address = self.address.clone();
|
|
|
|
|
loop {
|
|
|
|
|
let mut server = match pool.get().await {
|
|
|
|
|
Ok(server) => server,
|
|
|
|
|
Err(err) => {
|
|
|
|
|
error!(
|
|
|
|
|
"Failed to get connection from pool, Discarding message {:?}, {:?}",
|
|
|
|
|
err,
|
|
|
|
|
address.clone()
|
|
|
|
|
);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
tokio::select! {
|
|
|
|
|
// Exit channel events
|
|
|
|
|
_ = self.disconnect_rx.recv() => {
|
|
|
|
|
info!("Got mirror exit signal, exiting {:?}", address.clone());
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Incoming data from server (we read to clear the socket buffer and discard the data)
|
2023-08-10 11:18:46 -04:00
|
|
|
recv_result = server.recv(None) => {
|
2023-03-10 06:23:51 -06:00
|
|
|
match recv_result {
|
|
|
|
|
Ok(message) => trace!("Received from mirror: {} {:?}", String::from_utf8_lossy(&message[..]), address.clone()),
|
|
|
|
|
Err(err) => {
|
2023-12-04 19:09:41 -05:00
|
|
|
server.mark_bad(
|
|
|
|
|
format!("Failed to send to mirror, Discarding message {:?}, {:?}", err, address.clone()).as_str()
|
|
|
|
|
);
|
2023-03-10 06:23:51 -06:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Messages to send to the server
|
|
|
|
|
message = self.bytes_rx.recv() => {
|
|
|
|
|
match message {
|
|
|
|
|
Some(bytes) => {
|
|
|
|
|
match server.send(&BytesMut::from(&bytes[..])).await {
|
|
|
|
|
Ok(_) => trace!("Sent to mirror: {} {:?}", String::from_utf8_lossy(&bytes[..]), address.clone()),
|
|
|
|
|
Err(err) => {
|
2023-12-04 19:09:41 -05:00
|
|
|
server.mark_bad(
|
|
|
|
|
format!("Failed to receive from mirror {:?} {:?}", err, address.clone()).as_str()
|
|
|
|
|
);
|
2023-03-10 06:23:51 -06:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
None => {
|
|
|
|
|
info!("Mirror channel closed, exiting {:?}", address.clone());
|
|
|
|
|
break;
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
pub struct MirroringManager {
|
|
|
|
|
pub byte_senders: Vec<Sender<Bytes>>,
|
|
|
|
|
pub disconnect_senders: Vec<Sender<()>>,
|
|
|
|
|
}
|
|
|
|
|
impl MirroringManager {
|
|
|
|
|
pub fn from_addresses(
|
|
|
|
|
user: User,
|
|
|
|
|
database: String,
|
|
|
|
|
addresses: Vec<Address>,
|
|
|
|
|
) -> MirroringManager {
|
|
|
|
|
let mut byte_senders: Vec<Sender<Bytes>> = vec![];
|
|
|
|
|
let mut exit_senders: Vec<Sender<()>> = vec![];
|
|
|
|
|
|
|
|
|
|
addresses.iter().for_each(|mirror| {
|
2023-03-15 17:58:45 -05:00
|
|
|
let (bytes_tx, bytes_rx) = channel::<Bytes>(10);
|
2023-03-10 06:23:51 -06:00
|
|
|
let (exit_tx, exit_rx) = channel::<()>(1);
|
|
|
|
|
let mut addr = mirror.clone();
|
|
|
|
|
addr.role = Role::Mirror;
|
|
|
|
|
let client = MirroredClient {
|
|
|
|
|
user: user.clone(),
|
|
|
|
|
database: database.to_owned(),
|
|
|
|
|
address: addr,
|
|
|
|
|
bytes_rx,
|
|
|
|
|
disconnect_rx: exit_rx,
|
|
|
|
|
};
|
2023-10-10 09:18:21 -07:00
|
|
|
exit_senders.push(exit_tx);
|
|
|
|
|
byte_senders.push(bytes_tx);
|
2023-03-10 06:23:51 -06:00
|
|
|
client.start();
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
Self {
|
2023-10-10 09:18:21 -07:00
|
|
|
byte_senders,
|
2023-03-10 06:23:51 -06:00
|
|
|
disconnect_senders: exit_senders,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-10-10 09:18:21 -07:00
|
|
|
pub fn send(&mut self, bytes: &BytesMut) {
|
2023-03-15 17:58:45 -05:00
|
|
|
// We want to avoid performing an allocation if we won't be able to send the message
|
|
|
|
|
// There is a possibility of a race here where we check the capacity and then the channel is
|
|
|
|
|
// closed or the capacity is reduced to 0, but mirroring is best effort anyway
|
|
|
|
|
if self
|
|
|
|
|
.byte_senders
|
|
|
|
|
.iter()
|
|
|
|
|
.all(|sender| sender.capacity() == 0 || sender.is_closed())
|
|
|
|
|
{
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
let immutable_bytes = bytes.clone().freeze();
|
|
|
|
|
self.byte_senders.iter_mut().for_each(|sender| {
|
|
|
|
|
match sender.try_send(immutable_bytes.clone()) {
|
2023-03-10 06:23:51 -06:00
|
|
|
Ok(_) => {}
|
|
|
|
|
Err(err) => {
|
|
|
|
|
warn!("Failed to send bytes to a mirror channel {}", err);
|
|
|
|
|
}
|
2023-03-15 17:58:45 -05:00
|
|
|
}
|
|
|
|
|
});
|
2023-03-10 06:23:51 -06:00
|
|
|
}
|
|
|
|
|
|
2023-10-10 09:18:21 -07:00
|
|
|
pub fn disconnect(&mut self) {
|
2023-03-10 06:23:51 -06:00
|
|
|
self.disconnect_senders
|
|
|
|
|
.iter_mut()
|
|
|
|
|
.for_each(|sender| match sender.try_send(()) {
|
|
|
|
|
Ok(_) => {}
|
|
|
|
|
Err(err) => {
|
|
|
|
|
warn!(
|
|
|
|
|
"Failed to send disconnect signal to a mirror channel {}",
|
|
|
|
|
err
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|