Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revert "[objects pruner] live pruner support (#7923)" #8196

Merged
merged 1 commit into from
Feb 9, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 0 additions & 1 deletion crates/sui-benchmark/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,6 @@ sysinfo = "0.27.5"
[target.'cfg(msim)'.dependencies]
sui-macros = { path = "../sui-macros" }
sui-simulator = { path = "../sui-simulator" }
typed-store = { path = "../typed-store" }

[features]
benchmark = ["narwhal-node/benchmark"]
40 changes: 2 additions & 38 deletions crates/sui-benchmark/tests/simtest.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
#[cfg(msim)]
mod test {

use itertools::Itertools;
use rand::{thread_rng, Rng};
use std::str::FromStr;
use std::sync::{Arc, Mutex};
Expand All @@ -21,19 +20,13 @@ mod test {
workloads::make_combination_workload,
LocalValidatorAggregatorProxy, ValidatorProxy,
};
use sui_config::{AUTHORITIES_DB_NAME, SUI_KEYSTORE_FILENAME};
use sui_config::SUI_KEYSTORE_FILENAME;
use sui_macros::{register_fail_points, sim_test};
use sui_simulator::{configs::*, SimConfig};
use sui_types::object::{Object, Owner};
use sui_types::storage::ObjectKey;
use sui_types::object::Owner;
use test_utils::messages::get_sui_gas_object_with_wallet_context;
use test_utils::network::{TestCluster, TestClusterBuilder};
use tracing::info;
use typed_store::rocks::ReadWriteOptions;
use typed_store::{
rocks::{DBMap, MetricConf},
traits::Map,
};

fn test_config() -> SimConfig {
env_config(
Expand Down Expand Up @@ -147,35 +140,6 @@ mod test {
test_simulated_load(test_cluster, 120).await;
}

#[sim_test(config = "test_config()")]
async fn test_simulated_load_pruning() {
let epoch_duration_ms = 1000;
let test_cluster = build_test_cluster(7, epoch_duration_ms).await;
test_simulated_load(test_cluster.clone(), 5).await;
// waiting enough time to get all transactions into checkpoints
tokio::time::sleep(Duration::from_millis(2 * epoch_duration_ms)).await;

let swarm_dir = test_cluster.swarm.dir().join(AUTHORITIES_DB_NAME);
let validator_path = std::fs::read_dir(swarm_dir).unwrap().next().unwrap();
let db_path = validator_path.unwrap().path().join("store");

let db = typed_store::rocks::open_cf(&db_path, None, MetricConf::default(), &["objects"]);
let objects = DBMap::<ObjectKey, Object>::reopen(
&db.unwrap(),
Some("objects"),
&ReadWriteOptions {
ignore_range_deletions: false,
},
)
.unwrap();

let iter = objects.iter().skip_to_last().reverse();
for (_, group) in &iter.group_by(|item| item.0 .0) {
// assure only last version is kept
assert_eq!(group.count(), 1);
}
}

async fn build_test_cluster(
default_num_validators: usize,
default_epoch_duration_ms: u64,
Expand Down
1 change: 0 additions & 1 deletion crates/sui-config/data/fullnode-template-with-path.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ authority-store-pruning-config:
objects-pruning-initial-delay-secs: 3600
num-latest-epoch-dbs-to-retain: 3
epoch-db-pruning-period-secs: 3600
enable-live-pruner: true

protocol-key-pair:
path: "protocol.key"
Expand Down
1 change: 0 additions & 1 deletion crates/sui-config/data/fullnode-template.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,3 @@ authority-store-pruning-config:
objects-pruning-initial-delay-secs: 3600
num-latest-epoch-dbs-to-retain: 3
epoch-db-pruning-period-secs: 3600
enable-live-pruner: true
8 changes: 2 additions & 6 deletions crates/sui-config/src/node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -277,18 +277,16 @@ pub struct AuthorityStorePruningConfig {
pub objects_pruning_initial_delay_secs: u64,
pub num_latest_epoch_dbs_to_retain: usize,
pub epoch_db_pruning_period_secs: u64,
pub enable_live_pruner: bool,
}

impl Default for AuthorityStorePruningConfig {
fn default() -> Self {
Self {
objects_num_latest_versions_to_retain: u64::MAX,
objects_pruning_period_secs: 24 * 60 * 60,
objects_pruning_initial_delay_secs: 60 * 60,
objects_pruning_period_secs: u64::MAX,
objects_pruning_initial_delay_secs: u64::MAX,
num_latest_epoch_dbs_to_retain: usize::MAX,
epoch_db_pruning_period_secs: u64::MAX,
enable_live_pruner: cfg!(test) || cfg!(msim),
}
}
}
Expand All @@ -303,7 +301,6 @@ impl AuthorityStorePruningConfig {
objects_pruning_initial_delay_secs: 60 * 60,
num_latest_epoch_dbs_to_retain: 3,
epoch_db_pruning_period_secs: 60 * 60,
enable_live_pruner: cfg!(test) || cfg!(msim),
}
}
pub fn fullnode_config() -> Self {
Expand All @@ -313,7 +310,6 @@ impl AuthorityStorePruningConfig {
objects_pruning_initial_delay_secs: 60 * 60,
num_latest_epoch_dbs_to_retain: 3,
epoch_db_pruning_period_secs: 60 * 60,
enable_live_pruner: cfg!(test) || cfg!(msim),
}
}
}
Expand Down
11 changes: 1 addition & 10 deletions crates/sui-config/src/swarm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,6 @@ pub struct FullnodeConfigBuilder<'a> {
rpc_port: Option<u16>,
// port for admin interface
admin_port: Option<u16>,
enable_pruner: bool,
}

impl<'a> FullnodeConfigBuilder<'a> {
Expand All @@ -99,7 +98,6 @@ impl<'a> FullnodeConfigBuilder<'a> {
p2p_port: None,
rpc_port: None,
admin_port: None,
enable_pruner: true,
}
}

Expand Down Expand Up @@ -158,11 +156,6 @@ impl<'a> FullnodeConfigBuilder<'a> {
self
}

pub fn set_enable_pruner(mut self, status: bool) -> Self {
self.enable_pruner = status;
self
}

pub fn build(self) -> Result<NodeConfig, anyhow::Error> {
let protocol_key_pair = get_key_pair_from_rng::<AuthorityKeyPair, _>(&mut OsRng).1;
let worker_key_pair = get_key_pair_from_rng::<NetworkKeyPair, _>(&mut OsRng).1;
Expand Down Expand Up @@ -223,8 +216,6 @@ impl<'a> FullnodeConfigBuilder<'a> {
let rpc_port = self.rpc_port.unwrap_or_else(|| get_available_port(9000));
let jsonrpc_server_url = format!("{}:{}", listen_ip, rpc_port);
let json_rpc_address: SocketAddr = jsonrpc_server_url.parse().unwrap();
let mut authority_store_pruning_config = AuthorityStorePruningConfig::fullnode_config();
authority_store_pruning_config.enable_live_pruner = self.enable_pruner;

Ok(NodeConfig {
protocol_key_pair: AuthorityKeyPairWithPath::new(protocol_key_pair),
Expand All @@ -246,7 +237,7 @@ impl<'a> FullnodeConfigBuilder<'a> {
grpc_load_shed: None,
grpc_concurrency_limit: None,
p2p_config,
authority_store_pruning_config,
authority_store_pruning_config: AuthorityStorePruningConfig::fullnode_config(),
end_of_epoch_broadcast_channel_capacity:
default_end_of_epoch_broadcast_channel_capacity(),
checkpoint_executor_config: Default::default(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,6 @@ validator_configs:
objects-pruning-initial-delay-secs: 3600
num-latest-epoch-dbs-to-retain: 3
epoch-db-pruning-period-secs: 3600
enable-live-pruner: false
end-of-epoch-broadcast-channel-capacity: 128
checkpoint-executor-config:
checkpoint-execution-max-concurrency: 100
Expand Down Expand Up @@ -136,7 +135,6 @@ validator_configs:
objects-pruning-initial-delay-secs: 3600
num-latest-epoch-dbs-to-retain: 3
epoch-db-pruning-period-secs: 3600
enable-live-pruner: false
end-of-epoch-broadcast-channel-capacity: 128
checkpoint-executor-config:
checkpoint-execution-max-concurrency: 100
Expand Down Expand Up @@ -205,7 +203,6 @@ validator_configs:
objects-pruning-initial-delay-secs: 3600
num-latest-epoch-dbs-to-retain: 3
epoch-db-pruning-period-secs: 3600
enable-live-pruner: false
end-of-epoch-broadcast-channel-capacity: 128
checkpoint-executor-config:
checkpoint-execution-max-concurrency: 100
Expand Down Expand Up @@ -274,7 +271,6 @@ validator_configs:
objects-pruning-initial-delay-secs: 3600
num-latest-epoch-dbs-to-retain: 3
epoch-db-pruning-period-secs: 3600
enable-live-pruner: false
end-of-epoch-broadcast-channel-capacity: 128
checkpoint-executor-config:
checkpoint-execution-max-concurrency: 100
Expand Down Expand Up @@ -343,7 +339,6 @@ validator_configs:
objects-pruning-initial-delay-secs: 3600
num-latest-epoch-dbs-to-retain: 3
epoch-db-pruning-period-secs: 3600
enable-live-pruner: false
end-of-epoch-broadcast-channel-capacity: 128
checkpoint-executor-config:
checkpoint-execution-max-concurrency: 100
Expand Down Expand Up @@ -412,7 +407,6 @@ validator_configs:
objects-pruning-initial-delay-secs: 3600
num-latest-epoch-dbs-to-retain: 3
epoch-db-pruning-period-secs: 3600
enable-live-pruner: false
end-of-epoch-broadcast-channel-capacity: 128
checkpoint-executor-config:
checkpoint-execution-max-concurrency: 100
Expand Down Expand Up @@ -481,7 +475,6 @@ validator_configs:
objects-pruning-initial-delay-secs: 3600
num-latest-epoch-dbs-to-retain: 3
epoch-db-pruning-period-secs: 3600
enable-live-pruner: false
end-of-epoch-broadcast-channel-capacity: 128
checkpoint-executor-config:
checkpoint-execution-max-concurrency: 100
Expand Down
26 changes: 4 additions & 22 deletions crates/sui-core/src/authority/authority_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
use super::authority_store_pruner::AuthorityStorePruner;
use super::{authority_store_tables::AuthorityPerpetualTables, *};
use crate::authority::authority_per_epoch_store::AuthorityPerEpochStore;
use crate::checkpoints::checkpoint_executor::CheckpointExecutionMessage;
use once_cell::sync::OnceCell;
use rocksdb::Options;
use serde::{Deserialize, Serialize};
Expand All @@ -19,7 +18,7 @@ use sui_types::object::Owner;
use sui_types::object::PACKAGE_VERSION;
use sui_types::storage::{ChildObjectResolver, ObjectKey};
use sui_types::{base_types::SequenceNumber, fp_bail, fp_ensure, storage::ParentSync};
use tokio::sync::{mpsc, RwLock, RwLockReadGuard, RwLockWriteGuard};
use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
use tracing::{debug, info, trace};
use typed_store::rocks::DBBatch;
use typed_store::traits::Map;
Expand Down Expand Up @@ -61,7 +60,6 @@ impl AuthorityStore {
genesis: &Genesis,
committee_store: &Arc<CommitteeStore>,
pruning_config: &AuthorityStorePruningConfig,
checkpoint_stream: mpsc::Receiver<CheckpointExecutionMessage>,
) -> SuiResult<Self> {
let perpetual_tables = Arc::new(AuthorityPerpetualTables::open(path, db_options.clone()));
if perpetual_tables.database_is_empty()? {
Expand All @@ -71,14 +69,7 @@ impl AuthorityStore {
let committee = committee_store
.get_committee(&cur_epoch)?
.expect("Committee of the current epoch must exist");
Self::open_inner(
genesis,
perpetual_tables,
committee,
pruning_config,
checkpoint_stream,
)
.await
Self::open_inner(genesis, perpetual_tables, committee, pruning_config).await
}

pub async fn open_with_committee_for_testing(
Expand All @@ -92,27 +83,18 @@ impl AuthorityStore {
// as the genesis committee.
assert_eq!(committee.epoch, 0);
let perpetual_tables = Arc::new(AuthorityPerpetualTables::open(path, db_options.clone()));
Self::open_inner(
genesis,
perpetual_tables,
committee.clone(),
pruning_config,
mpsc::channel(1).1,
)
.await
Self::open_inner(genesis, perpetual_tables, committee.clone(), pruning_config).await
}

async fn open_inner(
genesis: &Genesis,
perpetual_tables: Arc<AuthorityPerpetualTables>,
committee: Committee,
pruning_config: &AuthorityStorePruningConfig,
checkpoint_stream: mpsc::Receiver<CheckpointExecutionMessage>,
) -> SuiResult<Self> {
let epoch = committee.epoch;

let _store_pruner =
AuthorityStorePruner::new(perpetual_tables.clone(), pruning_config, checkpoint_stream);
let _store_pruner = AuthorityStorePruner::new(perpetual_tables.clone(), pruning_config);

let store = Self {
mutex_table: MutexTable::new(NUM_SHARDS, SHARD_SIZE),
Expand Down
Loading