Compare commits

..

1 Commits

Author SHA1 Message Date
Javier Goday
f782206578 #829: read/write splitting on CTE mutable statements 2024-10-16 20:12:25 +02:00
13 changed files with 55 additions and 191 deletions

1
.gitignore vendored
View File

@@ -12,4 +12,3 @@ dev/cache
!dev/cache/.keepme
.venv
**/__pycache__
.bundle

View File

@@ -309,15 +309,6 @@ If the client doesn't specify, PgCat routes traffic to this role by default.
`replica` round-robin between replicas only without touching the primary,
`primary` all queries go to the primary unless otherwise specified.
### replica_to_primary_failover_enabled
```
path: pools.<pool_name>.replica_to_primary_failover_enabled
default: "false"
```
If set to true, when the specified role is `replica` (either by setting `default_role` or manually)
and all replicas are banned, queries will be sent to the primary (until a replica is back online).
### prepared_statements_cache_size
```
path: general.prepared_statements_cache_size

5
Cargo.lock generated
View File

@@ -192,11 +192,12 @@ checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d"
[[package]]
name = "bb8"
version = "0.8.6"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d89aabfae550a5c44b43ab941844ffcd2e993cb6900b342debf59e9ea74acdb8"
checksum = "98b4b0f25f18bcdc3ac72bdb486ed0acf7e185221fd4dc985bc15db5800b0ba2"
dependencies = [
"async-trait",
"futures-channel",
"futures-util",
"parking_lot",
"tokio",

View File

@@ -8,7 +8,7 @@ edition = "2021"
tokio = { version = "1", features = ["full"] }
bytes = "1"
md-5 = "0.10"
bb8 = "=0.8.6"
bb8 = "0.8.1"
async-trait = "0.1"
rand = "0.8"
chrono = "0.4"

View File

@@ -175,7 +175,7 @@ The setting will persist until it's changed again or the client disconnects.
By default, all queries are routed to the first available server; `default_role` setting controls this behavior.
### Failover
All servers are checked with a `;` (very fast) query before being given to a client. Additionally, the server health is monitored with every client query that it processes. If the server is not reachable, it will be banned and cannot serve any more transactions for the duration of the ban. The queries are routed to the remaining servers. If `replica_to_primary_failover_enabled` is set to true and all replicas become banned, the query will be routed to the primary. If `replica_to_primary_failover_enabled` is false and all servers (replicas) become banned, the ban list is cleared: this is a safety precaution against false positives. The primary can never be banned.
All servers are checked with a `;` (very fast) query before being given to a client. Additionally, the server health is monitored with every client query that it processes. If the server is not reachable, it will be banned and cannot serve any more transactions for the duration of the ban. The queries are routed to the remaining servers. If all servers become banned, the ban list is cleared: this is a safety precaution against false positives. The primary can never be banned.
The ban time can be changed with `ban_time`. The default is 60 seconds.

View File

@@ -5,4 +5,4 @@ maintainers:
- name: Wildcard
email: support@w6d.io
appVersion: "1.2.0"
version: 0.2.4
version: 0.2.1

View File

@@ -1,2 +1 @@
sign: false
pages_branch: main

View File

@@ -179,7 +179,7 @@ primary_reads_enabled = true
# `random`: picks a shard at random
# `random_healthy`: picks a shard at random favoring shards with the least number of recent errors
# `shard_<number>`: e.g. shard_0, shard_4, etc. picks a specific shard, everytime
# default_shard = "shard_0"
# no_shard_specified_behavior = "shard_0"
# So what if you wanted to implement a different hashing function,
# or you've already built one and you want this pooler to use it?

View File

@@ -881,7 +881,6 @@ where
};
query_router.update_pool_settings(&pool.settings);
query_router.set_default_role();
// Our custom protocol loop.
// We expect the client to either start a transaction with regular queries

View File

@@ -541,9 +541,6 @@ pub struct Pool {
#[serde(default = "Pool::default_default_role")]
pub default_role: String,
#[serde(default)] // False
pub replica_to_primary_failover_enabled: bool,
#[serde(default)] // False
pub query_parser_enabled: bool,
@@ -737,7 +734,6 @@ impl Default for Pool {
pool_mode: Self::default_pool_mode(),
load_balancing_mode: Self::default_load_balancing_mode(),
default_role: String::from("any"),
replica_to_primary_failover_enabled: false,
query_parser_enabled: false,
query_parser_max_length: None,
query_parser_read_write_splitting: false,

View File

@@ -162,9 +162,6 @@ pub struct PoolSettings {
// Default server role to connect to.
pub default_role: Option<Role>,
// Whether or not we should use primary when replicas are unavailable
pub replica_to_primary_failover_enabled: bool,
// Enable/disable query parser.
pub query_parser_enabled: bool,
@@ -222,7 +219,6 @@ impl Default for PoolSettings {
user: User::default(),
db: String::default(),
default_role: None,
replica_to_primary_failover_enabled: false,
query_parser_enabled: false,
query_parser_max_length: None,
query_parser_read_write_splitting: false,
@@ -535,8 +531,6 @@ impl ConnectionPool {
"primary" => Some(Role::Primary),
_ => unreachable!(),
},
replica_to_primary_failover_enabled: pool_config
.replica_to_primary_failover_enabled,
query_parser_enabled: pool_config.query_parser_enabled,
query_parser_max_length: pool_config.query_parser_max_length,
query_parser_read_write_splitting: pool_config
@@ -737,19 +731,6 @@ impl ConnectionPool {
});
}
// If the role is replica and we allow sending traffic to primary when replicas are unavailble,
// we add primary address at the end of the list of candidates, this way it will be tried when
// replicas are all unavailable.
if role == Role::Replica && self.settings.replica_to_primary_failover_enabled {
let mut primaries = self
.addresses
.iter()
.flatten()
.filter(|address| address.role == Role::Primary)
.collect::<Vec<&Address>>();
candidates.insert(0, primaries.pop().unwrap());
}
// Indicate we're waiting on a server connection from a pool.
let now = Instant::now();
client_stats.waiting();
@@ -954,28 +935,24 @@ impl ConnectionPool {
return true;
}
// If we have replica to primary failover we should not unban replicas
// as we still have the primary to server traffic.
if !self.settings.replica_to_primary_failover_enabled {
// Check if all replicas are banned, in that case unban all of them
let replicas_available = self.addresses[address.shard]
.iter()
.filter(|addr| addr.role == Role::Replica)
.count();
// Check if all replicas are banned, in that case unban all of them
let replicas_available = self.addresses[address.shard]
.iter()
.filter(|addr| addr.role == Role::Replica)
.count();
debug!("Available targets: {}", replicas_available);
debug!("Available targets: {}", replicas_available);
let read_guard = self.banlist.read();
let all_replicas_banned = read_guard[address.shard].len() == replicas_available;
drop(read_guard);
let read_guard = self.banlist.read();
let all_replicas_banned = read_guard[address.shard].len() == replicas_available;
drop(read_guard);
if all_replicas_banned {
let mut write_guard = self.banlist.write();
warn!("Unbanning all replicas.");
write_guard[address.shard].clear();
if all_replicas_banned {
let mut write_guard = self.banlist.write();
warn!("Unbanning all replicas.");
write_guard[address.shard].clear();
return true;
}
return true;
}
// Check if ban time is expired

View File

@@ -386,14 +386,14 @@ impl QueryRouter {
}
}
/// Determines if a query is a mutation or not.
fn is_mutation_query(q: &sqlparser::ast::Query) -> bool {
/// Determines if a query is mutable or not.
fn query_is_mutable_statement(q: &sqlparser::ast::Query) -> bool {
use sqlparser::ast::*;
match q.body.as_ref() {
SetExpr::Insert(_) => true,
SetExpr::Update(_) => true,
SetExpr::Query(q) => Self::is_mutation_query(q),
SetExpr::Query(q) => Self::query_is_mutable_statement(q),
_ => false,
}
}
@@ -440,9 +440,9 @@ impl QueryRouter {
};
let has_locks = !query.locks.is_empty();
let has_mutation = Self::is_mutation_query(query);
let is_mutable_statement = Self::query_is_mutable_statement(query);
if has_locks || has_mutation {
if has_locks || is_mutable_statement {
self.active_role = Some(Role::Primary);
} else if !visited_write_statement {
// If we already visited a write statement, we should be going to the primary.
@@ -1061,11 +1061,6 @@ impl QueryRouter {
self.active_shard
}
/// Set active_role as the default_role specified in the pool.
pub fn set_default_role(&mut self) {
self.active_role = self.pool_settings.default_role;
}
/// Get the current desired server role we should be talking to.
pub fn role(&self) -> Option<Role> {
self.active_role
@@ -1459,7 +1454,6 @@ mod test {
load_balancing_mode: crate::config::LoadBalancingMode::Random,
shards: 2,
user: crate::config::User::default(),
replica_to_primary_failover_enabled: false,
default_role: Some(Role::Replica),
query_parser_enabled: true,
query_parser_max_length: None,
@@ -1539,7 +1533,6 @@ mod test {
shards: 5,
user: crate::config::User::default(),
default_role: Some(Role::Replica),
replica_to_primary_failover_enabled: false,
query_parser_enabled: true,
query_parser_max_length: None,
query_parser_read_write_splitting: true,

View File

@@ -1,5 +1,5 @@
# frozen_string_literal: true
require_relative "spec_helper"
require_relative 'spec_helper'
describe "Random Load Balancing" do
let(:processes) { Helpers::Pgcat.single_shard_setup("sharded_db", 5) }
@@ -8,7 +8,7 @@ describe "Random Load Balancing" do
processes.pgcat.shutdown
end
context("under regular circumstances") do
context "under regular circumstances" do
it "balances query volume between all instances" do
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
@@ -22,14 +22,14 @@ describe "Random Load Balancing" do
failed_count += 1
end
expect(failed_count).to(eq(0))
expect(failed_count).to eq(0)
processes.all_databases.map(&:count_select_1_plus_2).each do |instance_share|
expect(instance_share).to(be_within(expected_share * MARGIN_OF_ERROR).of(expected_share))
expect(instance_share).to be_within(expected_share * MARGIN_OF_ERROR).of(expected_share)
end
end
end
context("when some replicas are down") do
context "when some replicas are down" do
it "balances query volume between working instances" do
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
expected_share = QUERY_COUNT / (processes.all_databases.count - 2)
@@ -49,9 +49,9 @@ describe "Random Load Balancing" do
processes.all_databases.each do |instance|
queries_routed = instance.count_select_1_plus_2
if processes.replicas[0..1].include?(instance)
expect(queries_routed).to(eq(0))
expect(queries_routed).to eq(0)
else
expect(queries_routed).to(be_within(expected_share * MARGIN_OF_ERROR).of(expected_share))
expect(queries_routed).to be_within(expected_share * MARGIN_OF_ERROR).of(expected_share)
end
end
end
@@ -65,7 +65,7 @@ describe "Least Outstanding Queries Load Balancing" do
processes.pgcat.shutdown
end
context("under homogeneous load") do
context "under homogeneous load" do
it "balances query volume between all instances" do
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
@@ -79,15 +79,15 @@ describe "Least Outstanding Queries Load Balancing" do
failed_count += 1
end
expect(failed_count).to(eq(0))
expect(failed_count).to eq(0)
processes.all_databases.map(&:count_select_1_plus_2).each do |instance_share|
expect(instance_share).to(be_within(expected_share * MARGIN_OF_ERROR).of(expected_share))
expect(instance_share).to be_within(expected_share * MARGIN_OF_ERROR).of(expected_share)
end
end
end
context("under heterogeneous load") do
xit("balances query volume between all instances based on how busy they are") do
context "under heterogeneous load" do
xit "balances query volume between all instances based on how busy they are" do
slow_query_count = 2
threads = Array.new(slow_query_count) do
Thread.new do
@@ -108,32 +108,31 @@ describe "Least Outstanding Queries Load Balancing" do
failed_count += 1
end
expect(failed_count).to(eq(0))
expect(failed_count).to eq(0)
# Under LOQ, we expect replicas running the slow pg_sleep
# to get no selects
expect(
processes
.all_databases
.map(&:count_select_1_plus_2)
.count { |instance_share| instance_share == 0 }
)
.to(eq(slow_query_count))
processes.
all_databases.
map(&:count_select_1_plus_2).
count { |instance_share| instance_share == 0 }
).to eq(slow_query_count)
# We also expect the quick queries to be spread across
# the idle servers only
processes
.all_databases
.map(&:count_select_1_plus_2)
.reject { |instance_share| instance_share == 0 }
.each do |instance_share|
expect(instance_share).to(be_within(expected_share * MARGIN_OF_ERROR).of(expected_share))
end
processes.
all_databases.
map(&:count_select_1_plus_2).
reject { |instance_share| instance_share == 0 }.
each do |instance_share|
expect(instance_share).to be_within(expected_share * MARGIN_OF_ERROR).of(expected_share)
end
threads.map(&:join)
end
end
context("when some replicas are down") do
context "when some replicas are down" do
it "balances query volume between working instances" do
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
expected_share = QUERY_COUNT / (processes.all_databases.count - 2)
@@ -150,106 +149,16 @@ describe "Least Outstanding Queries Load Balancing" do
end
end
expect(failed_count).to(be <= 2)
expect(failed_count).to be <= 2
processes.all_databases.each do |instance|
queries_routed = instance.count_select_1_plus_2
if processes.replicas[0..1].include?(instance)
expect(queries_routed).to(eq(0))
expect(queries_routed).to eq(0)
else
expect(queries_routed).to(be_within(expected_share * MARGIN_OF_ERROR).of(expected_share))
expect(queries_routed).to be_within(expected_share * MARGIN_OF_ERROR).of(expected_share)
end
end
end
end
end
describe "Candidate filtering based on `default_pool`" do
let(:processes) {
Helpers::Pgcat.single_shard_setup("sharded_db", 5, "transaction", "random", "debug", pool_settings)
}
after do
processes.all_databases.map(&:reset)
processes.pgcat.shutdown
end
context("with default_pool set to replicas") do
context("when all replicas are down ") do
let(:pool_settings) do
{
"default_role" => "replica",
"replica_to_primary_failover_enabled" => replica_to_primary_failover_enabled
}
end
context("with `replica_to_primary_failover_enabled` set to false`") do
let(:replica_to_primary_failover_enabled) { false }
it(
"unbans them automatically to prevent false positives in health checks that could make all replicas unavailable"
) do
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
failed_count = 0
number_of_replicas = processes[:replicas].length
# Take down all replicas
processes[:replicas].each(&:take_down)
(number_of_replicas + 1).times do |n|
conn.async_exec("SELECT 1 + 2")
rescue
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
failed_count += 1
end
expect(failed_count).to(eq(number_of_replicas + 1))
failed_count = 0
# Ban_time is configured to 60 so this reset will only work
# if the replicas are unbanned automatically
processes[:replicas].each(&:reset)
number_of_replicas.times do
conn.async_exec("SELECT 1 + 2")
rescue
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
failed_count += 1
end
expect(failed_count).to(eq(0))
end
end
context("with `replica_to_primary_failover_enabled` set to true`") do
let(:replica_to_primary_failover_enabled) { true }
it "does not unbans them automatically" do
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
failed_count = 0
number_of_replicas = processes[:replicas].length
# We need to allow pgcat to open connections to replicas
(number_of_replicas + 10).times do |n|
conn.async_exec("SELECT 1 + 2")
rescue
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
failed_count += 1
end
expect(failed_count).to(eq(0))
# Take down all replicas
processes[:replicas].each(&:take_down)
(number_of_replicas + 10).times do |n|
conn.async_exec("SELECT 1 + 2")
rescue
conn = PG.connect(processes.pgcat.connection_string("sharded_db", "sharding_user"))
failed_count += 1
end
expect(failed_count).to(eq(number_of_replicas))
end
end
end
end
end